recipes-kernel: Update aspeed drivers to SDK9.05
All these driver patches are from Aspeed SDK9.05. Please refer to doc:
SDK_User_Guide_v09.05.pdf (6.3 Kernel Driver List and Usage ast2700)
Following are not used driver:
```
PWM & Fan Tacho
UFS
KVM Video
LPC-KCS
LPC-BT
LPC-SNOOP
PECI
Graphics Display
OTP
CAN
```
Tested:
We check .config and check each drive's source file. And run it on
ast2700 a1 machine.
Fusion-Link: NA
Google-Bug-Id: 402600438
Change-Id: I8cbb005e83fb43f5add1849917c13a36f19dcb4c
Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
diff --git a/recipes-kernel/linux/files/0001-Add-arch-files-for-ast2700.patch b/recipes-kernel/linux/files/0001-Add-arch-files-for-ast2700.patch
new file mode 100644
index 0000000..216cefe
--- /dev/null
+++ b/recipes-kernel/linux/files/0001-Add-arch-files-for-ast2700.patch
@@ -0,0 +1,5823 @@
+From 5b246399e998a6d19c4121867b2240d2dee50537 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Mon, 10 Mar 2025 13:59:18 +0800
+Subject: [PATCH] Add arch files for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ arch/arm64/Kconfig.platforms | 6 +
+ arch/arm64/boot/dts/Makefile | 1 +
+ arch/arm64/boot/dts/aspeed/Makefile | 17 +
+ .../aspeed/aspeed-evb-flash-layout-128.dtsi | 32 +
+ .../boot/dts/aspeed/aspeed-g7-pinctrl.dtsi | 1250 ++++++++
+ arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi | 2774 +++++++++++++++++
+ arch/arm64/boot/dts/aspeed/ast2700-evb.dts | 1155 +++++++
+ .../boot/dts/aspeed/ast2700-reserved-mem.dtsi | 85 +
+ arch/arm64/configs/aspeed_g7_defconfig | 407 +++
+ 9 files changed, 5727 insertions(+)
+ create mode 100644 arch/arm64/boot/dts/aspeed/Makefile
+ create mode 100644 arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi
+ create mode 100644 arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi
+ create mode 100644 arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi
+ create mode 100644 arch/arm64/boot/dts/aspeed/ast2700-evb.dts
+ create mode 100644 arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi
+ create mode 100644 arch/arm64/configs/aspeed_g7_defconfig
+
+diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
+index 606912019..40d1900fe 100644
+--- a/arch/arm64/Kconfig.platforms
++++ b/arch/arm64/Kconfig.platforms
+@@ -33,6 +33,12 @@ config ARCH_APPLE
+ This enables support for Apple's in-house ARM SoC family, starting
+ with the Apple M1.
+
++config ARCH_ASPEED
++ bool "Aspeed SoC family"
++ help
++ Say yes if you intend to run on an Aspeed ast2700 or similar
++ seventh generation Aspeed BMCs.
++
+ menuconfig ARCH_BCM
+ bool "Broadcom SoC Support"
+
+diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
+index 30dd6347a..f71b77b1c 100644
+--- a/arch/arm64/boot/dts/Makefile
++++ b/arch/arm64/boot/dts/Makefile
+@@ -33,3 +33,4 @@ subdir-y += tesla
+ subdir-y += ti
+ subdir-y += toshiba
+ subdir-y += xilinx
++subdir-y += aspeed
+diff --git a/arch/arm64/boot/dts/aspeed/Makefile b/arch/arm64/boot/dts/aspeed/Makefile
+new file mode 100644
+index 000000000..0f1cc2cc6
+--- /dev/null
++++ b/arch/arm64/boot/dts/aspeed/Makefile
+@@ -0,0 +1,17 @@
++# SPDX-License-Identifier: GPL-2.0
++
++dtb-$(CONFIG_ARCH_ASPEED) += \
++ ast2700a0-evb.dtb \
++ ast2700-evb.dtb \
++ ast2700-evb-s0.dtb \
++ ast2700-evb-s1.dtb \
++ ast2700a0-evb_ast1700a0-evb.dtb \
++ ast2700a0-ncsi.dtb \
++ ast2700-ncsi.dtb \
++ ast2700a0-dcscm.dtb \
++ ast2700-dcscm.dtb \
++ ast2700a0-dcscm_ast1700a0-demo.dtb \
++ ast2700-dcscm_ast1700-evb.dtb \
++ ast2700a0-evb-256-abr.dtb \
++ ast2700-evb-256-abr.dtb \
++ ast2700-slt.dtb
+diff --git a/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi
+new file mode 100644
+index 000000000..46e8b7ae4
+--- /dev/null
++++ b/arch/arm64/boot/dts/aspeed/aspeed-evb-flash-layout-128.dtsi
+@@ -0,0 +1,32 @@
++// SPDX-License-Identifier: GPL-2.0+
++
++partitions {
++ compatible = "fixed-partitions";
++ #address-cells = <1>;
++ #size-cells = <1>;
++
++ u-boot@0 {
++ reg = <0x0 0x400000>; // 4MB
++ label = "u-boot";
++ };
++
++ u-boot-env@400000 {
++ reg = <0x400000 0x20000>; // 128KB
++ label = "u-boot-env";
++ };
++
++ kernel@420000 {
++ reg = <0x420000 0x900000>; // 9MB
++ label = "kernel";
++ };
++
++ rofs@d20000 {
++ reg = <0xd20000 0x52E0000>; // 82.875MB
++ label = "rofs";
++ };
++
++ rwfs@6000000 {
++ reg = <0x6000000 0x2000000>; // 32MB
++ label = "rwfs";
++ };
++};
+diff --git a/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi
+new file mode 100644
+index 000000000..a3b8f0339
+--- /dev/null
++++ b/arch/arm64/boot/dts/aspeed/aspeed-g7-pinctrl.dtsi
+@@ -0,0 +1,1250 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++&pinctrl0 {
++ pinctrl_emmc_default: emmc_default {
++ function = "EMMC";
++ groups = "EMMCG1";
++ };
++
++ pinctrl_emmcg4_default: emmc_default {
++ function = "EMMC";
++ groups = "EMMCG4";
++ };
++
++ pinctrl_emmcg8_default: emmcg8_default {
++ function = "EMMC";
++ groups = "EMMCG8";
++ };
++
++ pinctrl_emmcwpn_default: emmcwpn_default {
++ function = "EMMC";
++ groups = "EMMCWPN";
++ };
++
++ pinctrl_emmccdn_default: emmccdn_default {
++ function = "EMMC";
++ groups = "EMMCCDN";
++ };
++
++ pinctrl_vgaddc_default: vgaddc_default {
++ function = "VGADDC";
++ groups = "VGADDC";
++ };
++
++ pinctrl_usb3axhd_default: usb3axhd_default {
++ function = "USB3A";
++ groups = "USB3AXHD";
++ };
++
++ pinctrl_usb3axhpd_default: usb3axhpd_default {
++ function = "USB3A";
++ groups = "USB3AXHPD";
++ };
++
++ pinctrl_usb3axh_default: usb3axh_default {
++ function = "USB3A";
++ groups = "USB3AXH";
++ };
++
++ pinctrl_usb3axhp_default: usb3axhp_default {
++ function = "USB3A";
++ groups = "USB3AXHP";
++ };
++
++ pinctrl_usb3axh2b_default: usb3axh2b_default {
++ function = "USB3A";
++ groups = "USB3AXH2B";
++ };
++
++ pinctrl_usb3axhp2b_default: usb3axhp2b_default {
++ function = "USB3A";
++ groups = "USB3AXHP2B";
++ };
++
++ pinctrl_usb2axhd1_default: usb2axhd1_default {
++ function = "USB2A";
++ groups = "USB2AXHD1";
++ };
++
++ pinctrl_usb2axhpd1_default: usb2axhpd1_default {
++ function = "USB2A";
++ groups = "USB2AXHPD1";
++ };
++
++ pinctrl_usb2ad1_default: usb2ad1_default {
++ function = "USB2A";
++ groups = "USB2AD1";
++ };
++
++ pinctrl_usb2axh_default: usb2axh_default {
++ function = "USB2A";
++ groups = "USB2AXH";
++ };
++
++ pinctrl_usb2axhp_default: usb2axhp_default {
++ function = "USB2A";
++ groups = "USB2AXHP";
++ };
++
++ pinctrl_usb2axh2b_default: usb2axh2b_default {
++ function = "USB2A";
++ groups = "USB2AXH2B";
++ };
++
++ pinctrl_usb2axhp2b_default: usb2axhp2b_default {
++ function = "USB2A";
++ groups = "USB2AXHP2B";
++ };
++
++ pinctrl_usb2ahpd0_default: usb2ahpd0_default {
++ function = "USB2A";
++ groups = "USB2AHPD0";
++ };
++
++ pinctrl_usb2ad0_default: usb2ad0_default {
++ function = "USB2A";
++ groups = "USB2AD0";
++ };
++
++ pinctrl_usb2ah_default: usb2ah_default {
++ function = "USB2A";
++ groups = "USB2AH";
++ };
++
++ pinctrl_usb2ahp_default: usb2ahp_default {
++ function = "USB2A";
++ groups = "USB2AHP";
++ };
++
++ pinctrl_usb3bxhd_default: usb3bxhd_default {
++ function = "USB3B";
++ groups = "USB3BXHD";
++ };
++
++ pinctrl_usb3bxhpd_default: usb3bxhpd_default {
++ function = "USB3B";
++ groups = "USB3BXHPD";
++ };
++
++ pinctrl_usb3bxh_default: usb3bxh_default {
++ function = "USB3B";
++ groups = "USB3BXH";
++ };
++
++ pinctrl_usb3bxhp_default: usb3bxhp_default {
++ function = "USB3B";
++ groups = "USB3BXHP";
++ };
++
++ pinctrl_usb3bxh2a_default: usb3bxh2a_default {
++ function = "USB3B";
++ groups = "USB3BXH2A";
++ };
++
++ pinctrl_usb3bxhp2a_default: usb3bxhp2a_default {
++ function = "USB3B";
++ groups = "USB3BXHP2A";
++ };
++
++ pinctrl_usb2bxhd1_default: usb2bxhd1_default {
++ function = "USB2B";
++ groups = "USB2BXHD1";
++ };
++
++ pinctrl_usb2bxhpd1_default: usb2bxhpd1_default {
++ function = "USB2B";
++ groups = "USB2BXHPD1";
++ };
++
++ pinctrl_usb2bd1_default: usb2bd1_default {
++ function = "USB2B";
++ groups = "USB2BD1";
++ };
++
++ pinctrl_usb2bxh_default: usb2bxh_default {
++ function = "USB2B";
++ groups = "USB2BXH";
++ };
++
++ pinctrl_usb2bxhp_default: usb2bxhp_default {
++ function = "USB2B";
++ groups = "USB2BXHP";
++ };
++
++ pinctrl_usb2bxh2a_default: usb2bxh2a_default {
++ function = "USB2B";
++ groups = "USB2BXH2A";
++ };
++
++ pinctrl_usb2bxhp2a_default: usb2bxhp2a_default {
++ function = "USB2B";
++ groups = "USB2BXHP2A";
++ };
++
++ pinctrl_usb2bhpd0_default: usb2bhpd0_default {
++ function = "USB2B";
++ groups = "USB2BHPD0";
++ };
++
++ pinctrl_usb2bd0_default: usb2bd0_default {
++ function = "USB2B";
++ groups = "USB2BD0";
++ };
++
++ pinctrl_usb2bh_default: usb2bh_default {
++ function = "USB2B";
++ groups = "USB2BH";
++ };
++
++ pinctrl_usb2bhp_default: usb2bhp_default {
++ function = "USB2B";
++ groups = "USB2BHP";
++ };
++
++ pinctrl_jtagm0_default: jtagm0_default {
++ function = "JTAG0";
++ groups = "JTAGM0";
++ };
++
++ pinctrl_jtag_psp_default: jtag_psp_default {
++ function = "JTAG0";
++ groups = "PSP";
++ };
++
++ pinctrl_jtag_ssp_default: jtag_ssp_default {
++ function = "JTAG0";
++ groups = "SSP";
++ };
++
++ pinctrl_jtag_tsp_default: jtag_tsp_default {
++ function = "JTAG0";
++ groups = "TSP";
++ };
++
++ pinctrl_jtag_ddr_default: jtag_ddr_default {
++ function = "JTAG0";
++ groups = "DDR";
++ };
++
++ pinctrl_jtag_usb3a_default: jtag_usb3a_default {
++ function = "JTAG0";
++ groups = "USB3A";
++ };
++
++ pinctrl_jtag_usb3b_default: jtag_usb3b_default {
++ function = "JTAG0";
++ groups = "USB3B";
++ };
++
++ pinctrl_jtag_pciea_default: jtag_pciea_default {
++ function = "JTAG0";
++ groups = "PCIEA";
++ };
++
++ pinctrl_jtag_pcieb_default: jtag_pcieb_default {
++ function = "JTAG0";
++ groups = "PCIEB";
++ };
++
++ pinctrl_pcierc0_perst_default: pcierc0_perst_default {
++ function = "PCIERC";
++ groups = "PCIERC0PERST";
++ };
++
++ pinctrl_pcierc1_perst_default: pcierc1_perst_default {
++ function = "PCIERC";
++ groups = "PCIERC1PERST";
++ };
++};
++
++&pinctrl1 {
++ pinctrl_sgpm0_default: sgpm0_default {
++ function = "SGPM0";
++ groups = "SGPM0";
++ };
++
++ pinctrl_sgpm1_default: sgpm1_default {
++ function = "SGPM1";
++ groups = "SGPM1";
++ };
++
++ pinctrl_sgps_default: sgps_default {
++ function = "SGPS";
++ groups = "SGPS";
++ };
++
++ pinctrl_adc0_default: adc0_default {
++ function = "ADC0";
++ groups = "ADC0";
++ };
++
++ pinctrl_adc1_default: adc1_default {
++ function = "ADC1";
++ groups = "ADC1";
++ };
++
++ pinctrl_adc2_default: adc2_default {
++ function = "ADC2";
++ groups = "ADC2";
++ };
++
++ pinctrl_adc3_default: adc3_default {
++ function = "ADC3";
++ groups = "ADC3";
++ };
++
++ pinctrl_adc4_default: adc4_default {
++ function = "ADC4";
++ groups = "ADC4";
++ };
++
++ pinctrl_adc5_default: adc5_default {
++ function = "ADC5";
++ groups = "ADC5";
++ };
++
++ pinctrl_adc6_default: adc6_default {
++ function = "ADC6";
++ groups = "ADC6";
++ };
++
++ pinctrl_adc7_default: adc7_default {
++ function = "ADC7";
++ groups = "ADC7";
++ };
++
++ pinctrl_adc8_default: adc8_default {
++ function = "ADC8";
++ groups = "ADC8";
++ };
++
++ pinctrl_adc9_default: adc9_default {
++ function = "ADC9";
++ groups = "ADC9";
++ };
++
++ pinctrl_adc10_default: adc10_default {
++ function = "ADC10";
++ groups = "ADC10";
++ };
++
++ pinctrl_adc11_default: adc11_default {
++ function = "ADC11";
++ groups = "ADC11";
++ };
++
++ pinctrl_adc12_default: adc12_default {
++ function = "ADC12";
++ groups = "ADC12";
++ };
++
++ pinctrl_adc13_default: adc13_default {
++ function = "ADC13";
++ groups = "ADC13";
++ };
++
++ pinctrl_adc14_default: adc14_default {
++ function = "ADC14";
++ groups = "ADC14";
++ };
++
++ pinctrl_adc15_default: adc15_default {
++ function = "ADC15";
++ groups = "ADC15";
++ };
++
++ pinctrl_pwm0_default: pwm0_default {
++ function = "PWM0";
++ groups = "PWM0";
++ };
++
++ pinctrl_pwm1_default: pwm1_default {
++ function = "PWM1";
++ groups = "PWM1";
++ };
++
++ pinctrl_pwm2_default: pwm2_default {
++ function = "PWM2";
++ groups = "PWM2";
++ };
++
++ pinctrl_pwm3_default: pwm3_default {
++ function = "PWM3";
++ groups = "PWM3";
++ };
++
++ pinctrl_pwm4_default: pwm4_default {
++ function = "PWM4";
++ groups = "PWM4";
++ };
++
++ pinctrl_pwm5_default: pwm5_default {
++ function = "PWM5";
++ groups = "PWM5";
++ };
++
++ pinctrl_pwm6_default: pwm6_default {
++ function = "PWM6";
++ groups = "PWM6";
++ };
++
++ pinctrl_pwm7_default: pwm7_default {
++ function = "PWM7";
++ groups = "PWM7";
++ };
++
++ pinctrl_pwm8_default: pwm8_default {
++ function = "PWM8";
++ groups = "PWM8";
++ };
++
++ pinctrl_pwm9_default: pwm9_default {
++ function = "PWM9";
++ groups = "PWM9";
++ };
++
++ pinctrl_pwm10_default: pwm10_default {
++ function = "PWM10";
++ groups = "PWM10";
++ };
++
++ pinctrl_pwm11_default: pwm11_default {
++ function = "PWM11";
++ groups = "PWM11";
++ };
++
++ pinctrl_pwm12_default: pwm12_default {
++ function = "PWM12";
++ groups = "PWM12";
++ };
++
++ pinctrl_pwm13_default: pwm13_default {
++ function = "PWM13";
++ groups = "PWM13";
++ };
++
++ pinctrl_pwm14_default: pwm14_default {
++ function = "PWM14";
++ groups = "PWM14";
++ };
++
++ pinctrl_pwm15_default: pwm15_default {
++ function = "PWM15";
++ groups = "PWM15";
++ };
++
++ pinctrl_tach0_default: tach0_default {
++ function = "TACH0";
++ groups = "TACH0";
++ };
++
++ pinctrl_tach1_default: tach1_default {
++ function = "TACH1";
++ groups = "TACH1";
++ };
++
++ pinctrl_tach2_default: tach2_default {
++ function = "TACH2";
++ groups = "TACH2";
++ };
++
++ pinctrl_tach3_default: tach3_default {
++ function = "TACH3";
++ groups = "TACH3";
++ };
++
++ pinctrl_tach4_default: tach4_default {
++ function = "TACH4";
++ groups = "TACH4";
++ };
++
++ pinctrl_tach5_default: tach5_default {
++ function = "TACH5";
++ groups = "TACH5";
++ };
++
++ pinctrl_tach6_default: tach6_default {
++ function = "TACH6";
++ groups = "TACH6";
++ };
++
++ pinctrl_tach7_default: tach7_default {
++ function = "TACH7";
++ groups = "TACH7";
++ };
++
++ pinctrl_tach8_default: tach8_default {
++ function = "TACH8";
++ groups = "TACH8";
++ };
++
++ pinctrl_tach9_default: tach9_default {
++ function = "TACH9";
++ groups = "TACH9";
++ };
++
++ pinctrl_tach10_default: tach10_default {
++ function = "TACH10";
++ groups = "TACH10";
++ };
++
++ pinctrl_tach11_default: tach11_default {
++ function = "TACH11";
++ groups = "TACH11";
++ };
++
++ pinctrl_tach12_default: tach12_default {
++ function = "TACH12";
++ groups = "TACH12";
++ };
++
++ pinctrl_tach13_default: tach13_default {
++ function = "TACH13";
++ groups = "TACH13";
++ };
++
++ pinctrl_tach14_default: tach14_default {
++ function = "TACH14";
++ groups = "TACH14";
++ };
++
++ pinctrl_tach15_default: tach15_default {
++ function = "TACH15";
++ groups = "TACH15";
++ };
++
++ pinctrl_jtagm1_default: jtagm1_default {
++ function = "JTAGM1";
++ groups = "JTAGM1";
++ };
++
++ pinctrl_mdio0_default: mdio0_default {
++ function = "MDIO0";
++ groups = "MDIO0";
++ };
++
++ pinctrl_mdio1_default: mdio1_default {
++ function = "MDIO1";
++ groups = "MDIO1";
++ };
++
++ pinctrl_mdio2_default: mdio2_default {
++ function = "MDIO2";
++ groups = "MDIO2";
++ };
++
++ pinctrl_rgmii0_default: rgmii0_default {
++ function = "RGMII0";
++ groups = "RGMII0";
++ };
++
++ pinctrl_rgmii1_default: rgmii1_default {
++ function = "RGMII1";
++ groups = "RGMII1";
++ };
++
++ pinctrl_rmii0_default: rmii0_default {
++ function = "RMII0";
++ groups = "RMII0";
++ };
++
++ pinctrl_rmii1_default: rmii1_default {
++ function = "RMII1";
++ groups = "RMII1";
++ };
++
++ pinctrl_sgmii_default: sgmii_default {
++ function = "SGMII";
++ groups = "SGMII";
++ };
++
++ pinctrl_fwspi_quad_default: fwspi_quad_default {
++ function = "FWQSPI";
++ groups = "FWQSPI";
++ };
++
++ pinctrl_fsi0_default: fsi0_default {
++ function = "FSI0";
++ groups = "FSI0";
++ };
++
++ pinctrl_fsi1_default: fsi1_default {
++ function = "FSI1";
++ groups = "FSI1";
++ };
++
++ pinctrl_fsi2_default: fsi2_default {
++ function = "FSI2";
++ groups = "FSI2";
++ };
++
++ pinctrl_fsi3_default: fsi3_default {
++ function = "FSI3";
++ groups = "FSI3";
++ };
++
++ pinctrl_spi0_default: spi0_default {
++ function = "SPI0";
++ groups = "SPI0";
++ };
++
++ pinctrl_spi0_quad_default: spi0_quad_default {
++ function = "QSPI0";
++ groups = "QSPI0";
++ };
++
++ pinctrl_spi0_cs1_default: spi0_cs1_default {
++ function = "SPI0CS1";
++ groups = "SPI0CS1";
++ };
++
++ pinctrl_spi1_default: spi1_default {
++ function = "SPI1";
++ groups = "SPI1";
++ };
++
++ pinctrl_spi1_quad_default: spi1_quad_default {
++ function = "QSPI1";
++ groups = "QSPI1";
++ };
++
++ pinctrl_spi1_cs1_default: spi1_cs1_default {
++ function = "SPI1CS1";
++ groups = "SPI1CS1";
++ };
++
++ pinctrl_spi2_default: spi2_default {
++ function = "SPI2";
++ groups = "SPI2";
++ };
++
++ pinctrl_spi2_quad_default: spi2_quad_default {
++ function = "QSPI2";
++ groups = "QSPI2";
++ };
++
++ pinctrl_spi2_cs1_default: spi2_cs1_default {
++ function = "SPI2CS1";
++ groups = "SPI2CS1";
++ };
++
++ pinctrl_espi0_default: espi0_default {
++ function = "ESPI0";
++ groups = "ESPI0";
++ };
++
++ pinctrl_espi1_default: espi1_default {
++ function = "ESPI1";
++ groups = "ESPI1";
++ };
++
++ pinctrl_lpc0_default: lpc0_default {
++ function = "LPC0";
++ groups = "LPC0";
++ };
++
++ pinctrl_lpc1_default: lpc1_default {
++ function = "LPC1";
++ groups = "LPC1";
++ };
++
++ pinctrl_vpi_default: vpi_default {
++ function = "VPI";
++ groups = "VPI";
++ };
++
++ pinctrl_sd_default: sd_default {
++ function = "SD";
++ groups = "SD";
++ };
++
++ pinctrl_hvi3c0_default: hvi3c0_default {
++ function = "I3C0";
++ groups = "HVI3C0";
++ };
++
++ pinctrl_hvi3c1_default: hvi3c1_default {
++ function = "I3C1";
++ groups = "HVI3C1";
++ };
++
++ pinctrl_hvi3c2_default: hvi3c2_default {
++ function = "I3C2";
++ groups = "HVI3C2";
++ };
++
++ pinctrl_hvi3c3_default: hvi3c3_default {
++ function = "I3C3";
++ groups = "HVI3C3";
++ };
++
++ pinctrl_i3c4_default: i3c4_default {
++ function = "I3C4";
++ groups = "I3C4";
++ };
++
++ pinctrl_i3c5_default: i3c5_default {
++ function = "I3C5";
++ groups = "I3C5";
++ };
++
++ pinctrl_i3c6_default: i3c6_default {
++ function = "I3C6";
++ groups = "I3C6";
++ };
++
++ pinctrl_i3c7_default: i3c7_default {
++ function = "I3C7";
++ groups = "I3C7";
++ };
++
++ pinctrl_i3c8_default: i3c8_default {
++ function = "I3C8";
++ groups = "I3C8";
++ };
++
++ pinctrl_i3c9_default: i3c9_default {
++ function = "I3C9";
++ groups = "I3C9";
++ };
++
++ pinctrl_i3c10_default: i3c10_default {
++ function = "I3C10";
++ groups = "I3C10";
++ };
++
++ pinctrl_i3c11_default: i3c11_default {
++ function = "I3C11";
++ groups = "I3C11";
++ };
++
++ pinctrl_hvi3c12_default: hvi3c12_default {
++ function = "I3C12";
++ groups = "HVI3C12";
++ };
++
++ pinctrl_hvi3c13_default: hvi3c13_default {
++ function = "I3C13";
++ groups = "HVI3C13";
++ };
++
++ pinctrl_hvi3c14_default: hvi3c14_default {
++ function = "I3C14";
++ groups = "HVI3C14";
++ };
++
++ pinctrl_hvi3c15_default: hvi3c15_default {
++ function = "I3C15";
++ groups = "HVI3C15";
++ };
++
++ pinctrl_tach0_default: tach0_default {
++ function = "TACH0";
++ groups = "TACH0";
++ };
++
++ pinctrl_tach1_default: tach1_default {
++ function = "TACH1";
++ groups = "TACH1";
++ };
++
++ pinctrl_tach2_default: tach2_default {
++ function = "TACH2";
++ groups = "TACH2";
++ };
++
++ pinctrl_tach3_default: tach3_default {
++ function = "TACH3";
++ groups = "TACH3";
++ };
++
++ pinctrl_tach4_default: tach4_default {
++ function = "TACH4";
++ groups = "TACH4";
++ };
++
++ pinctrl_tach5_default: tach5_default {
++ function = "TACH5";
++ groups = "TACH5";
++ };
++
++ pinctrl_tach6_default: tach6_default {
++ function = "TACH6";
++ groups = "TACH6";
++ };
++
++ pinctrl_tach7_default: tach7_default {
++ function = "TACH7";
++ groups = "TACH7";
++ };
++
++ pinctrl_tach8_default: tach8_default {
++ function = "TACH8";
++ groups = "TACH8";
++ };
++
++ pinctrl_tach9_default: tach9_default {
++ function = "TACH9";
++ groups = "TACH9";
++ };
++
++ pinctrl_tach10_default: tach10_default {
++ function = "TACH10";
++ groups = "TACH10";
++ };
++
++ pinctrl_tach11_default: tach11_default {
++ function = "TACH11";
++ groups = "TACH11";
++ };
++
++ pinctrl_tach12_default: tach12_default {
++ function = "TACH12";
++ groups = "TACH12";
++ };
++
++ pinctrl_tach13_default: tach13_default {
++ function = "TACH13";
++ groups = "TACH13";
++ };
++
++ pinctrl_tach14_default: tach14_default {
++ function = "TACH14";
++ groups = "TACH14";
++ };
++
++ pinctrl_tach15_default: tach15_default {
++ function = "TACH15";
++ groups = "TACH15";
++ };
++
++ pinctrl_thru0_default: thru0_default {
++ function = "THRU0";
++ groups = "THRU0";
++ };
++
++ pinctrl_thru1_default: thru1_default {
++ function = "THRU1";
++ groups = "THRU1";
++ };
++
++ pinctrl_thru2_default: thru2_default {
++ function = "THRU2";
++ groups = "THRU2";
++ };
++
++ pinctrl_thru3_default: thru3_default {
++ function = "THRU3";
++ groups = "THRU3";
++ };
++
++ pinctrl_ncts5_default: ncts5_default {
++ function = "NCTS5";
++ groups = "NCTS5";
++ };
++
++ pinctrl_ndcd5_default: ndcd5_default {
++ function = "NDCD5";
++ groups = "NDCD5";
++ };
++
++ pinctrl_ndsr5_default: ndsr5_default {
++ function = "NDSR5";
++ groups = "NDSR5";
++ };
++
++ pinctrl_nri5_default: nri5_default {
++ function = "NRI5";
++ groups = "NRI5";
++ };
++
++ pinctrl_i2c0_default: i2c0_default {
++ function = "I2C0";
++ groups = "I2C0";
++ };
++
++ pinctrl_i2c1_default: i2c1_default {
++ function = "I2C1";
++ groups = "I2C1";
++ };
++
++ pinctrl_i2c2_default: i2c2_default {
++ function = "I2C2";
++ groups = "I2C2";
++ };
++
++ pinctrl_i2c3_default: i2c3_default {
++ function = "I2C3";
++ groups = "I2C3";
++ };
++
++ pinctrl_i2c4_default: i2c4_default {
++ function = "I2C4";
++ groups = "I2C4";
++ };
++
++ pinctrl_i2c5_default: i2c5_default {
++ function = "I2C5";
++ groups = "I2C5";
++ };
++
++ pinctrl_i2c6_default: i2c6_default {
++ function = "I2C6";
++ groups = "I2C6";
++ };
++
++ pinctrl_i2c7_default: i2c7_default {
++ function = "I2C7";
++ groups = "I2C7";
++ };
++
++ pinctrl_i2c8_default: i2c8_default {
++ function = "I2C8";
++ groups = "I2C8";
++ };
++
++ pinctrl_i2c9_default: i2c9_default {
++ function = "I2C9";
++ groups = "I2C9";
++ };
++
++ pinctrl_i2c10_default: i2c10_default {
++ function = "I2C10";
++ groups = "I2C10";
++ };
++
++ pinctrl_i2c11_default: i2c11_default {
++ function = "I2C11";
++ groups = "I2C11";
++ };
++
++ pinctrl_i2c12_default: i2c12_default {
++ function = "I2C12";
++ groups = "I2C12";
++ };
++
++ pinctrl_i2c13_default: i2c13_default {
++ function = "I2C13";
++ groups = "I2C13";
++ };
++
++ pinctrl_i2c14_default: i2c14_default {
++ function = "I2C14";
++ groups = "I2C14";
++ };
++
++ pinctrl_i2c15_default: i2c15_default {
++ function = "I2C15";
++ groups = "I2C15";
++ };
++
++ pinctrl_can_default: can_default {
++ function = "CANBUS";
++ groups = "CANBUS";
++ };
++
++ pinctrl_di2c8_default: di2c8_default {
++ function = "DI2C8";
++ groups = "DI2C8";
++ };
++
++ pinctrl_di2c9_default: di2c9_default {
++ function = "DI2C9";
++ groups = "DI2C9";
++ };
++
++ pinctrl_di2c10_default: di2c10_default {
++ function = "DI2C10";
++ groups = "DI2C10";
++ };
++
++ pinctrl_di2c11_default: di2c11_default {
++ function = "DI2C11";
++ groups = "DI2C11";
++ };
++
++ pinctrl_di2c12_default: id2c12_default {
++ function = "DI2C12";
++ groups = "DI2C12";
++ };
++
++ pinctrl_di2c13_default: di2c13_default {
++ function = "DI2C13";
++ groups = "DI2C13";
++ };
++
++ pinctrl_di2c14_default: di2c14_default {
++ function = "DI2C14";
++ groups = "DI2C14";
++ };
++
++ pinctrl_di2c15_default: di2c15_default {
++ function = "DI2C15";
++ groups = "DI2C15";
++ };
++
++ pinctrl_ncts0_default: ncts0_default {
++ function = "UART0";
++ groups = "NCTS0";
++ };
++
++ pinctrl_ndcd0_default: ndcd0_default {
++ function = "UART0";
++ groups = "NDCD0";
++ };
++
++ pinctrl_ndsr0_default: ndsr0_default {
++ function = "UART0";
++ groups = "NDSR0";
++ };
++
++ pinctrl_nri0_default: nri0_default {
++ function = "UART0";
++ groups = "NRI0";
++ };
++
++ pinctrl_ndtr0_default: ndtr0_default {
++ function = "UART0";
++ groups = "NDTR0";
++ };
++
++ pinctrl_nrts0_default: nrts0_default {
++ function = "UART0";
++ groups = "NRTS0";
++ };
++
++ pinctrl_txd0_default: txd0_default {
++ function = "UART0";
++ groups = "TXD0";
++ };
++
++ pinctrl_rxd0_default: rxd0_default {
++ function = "UART0";
++ groups = "RXD0";
++ };
++
++ pinctrl_ncts1_default: ncts1_default {
++ function = "UART1";
++ groups = "NCTS1";
++ };
++
++ pinctrl_ndcd1_default: ndcd1_default {
++ function = "UART1";
++ groups = "NDCD1";
++ };
++
++ pinctrl_ndsr1_default: ndsr1_default {
++ function = "UART1";
++ groups = "NDSR1";
++ };
++
++ pinctrl_nri1_default: nri1_default {
++ function = "UART1";
++ groups = "NRI1";
++ };
++
++ pinctrl_ndtr1_default: ndtr1_default {
++ function = "UART1";
++ groups = "NDTR1";
++ };
++
++ pinctrl_nrts1_default: nrts1_default {
++ function = "UART1";
++ groups = "NRTS1";
++ };
++
++ pinctrl_txd1_default: txd1_default {
++ function = "UART1";
++ groups = "TXD1";
++ };
++
++ pinctrl_rxd1_default: rxd1_default {
++ function = "UART1";
++ groups = "RXD1";
++ };
++
++ pinctrl_txd2_default: txd2_default {
++ function = "UART2";
++ groups = "TXD2";
++ };
++
++ pinctrl_rxd2_default: rxd2_default {
++ function = "UART2";
++ groups = "RXD2";
++ };
++
++ pinctrl_txd3_default: txd3_default {
++ function = "UART3";
++ groups = "TXD3";
++ };
++
++ pinctrl_rxd3_default: rxd3_default {
++ function = "UART3";
++ groups = "RXD3";
++ };
++
++ pinctrl_ncts5_default: ncts5_default {
++ function = "UART5";
++ groups = "NCTS5";
++ };
++
++ pinctrl_ndcd5_default: ndcd5_default {
++ function = "UART5";
++ groups = "NDCD5";
++ };
++
++ pinctrl_ndsr5_default: ndsr5_default {
++ function = "UART5";
++ groups = "NDSR5";
++ };
++
++ pinctrl_nri5_default: nri5_default {
++ function = "UART5";
++ groups = "NRI5";
++ };
++
++ pinctrl_ndtr5_default: ndtr5_default {
++ function = "UART5";
++ groups = "NDTR5";
++ };
++
++ pinctrl_nrts5_default: nrts5_default {
++ function = "UART5";
++ groups = "NRTS5";
++ };
++
++ pinctrl_txd5_default: txd5_default {
++ function = "UART5";
++ groups = "TXD5";
++ };
++
++ pinctrl_rxd5_default: rxd5_default {
++ function = "UART5";
++ groups = "RXD5";
++ };
++
++ pinctrl_ncts6_default: ncts6_default {
++ function = "UART6";
++ groups = "NCTS6";
++ };
++
++ pinctrl_ndcd6_default: ndcd6_default {
++ function = "UART6";
++ groups = "NDCD6";
++ };
++
++ pinctrl_ndsr6_default: ndsr6_default {
++ function = "UART6";
++ groups = "NDSR6";
++ };
++
++ pinctrl_nri6_default: nri6_default {
++ function = "UART6";
++ groups = "NRI6";
++ };
++
++ pinctrl_ndtr6_default: ndtr6_default {
++ function = "UART6";
++ groups = "NDTR6";
++ };
++
++ pinctrl_nrts6_default: nrts6_default {
++ function = "UART6";
++ groups = "NRTS6";
++ };
++
++ pinctrl_txd6_default: txd6_default {
++ function = "UART6";
++ groups = "TXD6";
++ };
++
++ pinctrl_rxd6_default: rxd6_default {
++ function = "UART6";
++ groups = "RXD6";
++ };
++
++ pinctrl_txd7_default: txd7_default {
++ function = "UART7";
++ groups = "TXD7";
++ };
++
++ pinctrl_rxd7_default: rxd7_default {
++ function = "UART7";
++ groups = "RXD7";
++ };
++
++ pinctrl_txd8_default: txd8_default {
++ function = "UART8";
++ groups = "TXD8";
++ };
++
++ pinctrl_rxd8_default: rxd8_default {
++ function = "UART8";
++ groups = "RXD8";
++ };
++
++ pinctrl_txd9_default: txd9_default {
++ function = "UART9";
++ groups = "TXD9";
++ };
++
++ pinctrl_rxd9_default: rxd9_default {
++ function = "UART9";
++ groups = "RXD9";
++ };
++
++ pinctrl_txd10_default: txd10_default {
++ function = "UART10";
++ groups = "TXD10";
++ };
++
++ pinctrl_rxd10_default: rxd10_default {
++ function = "UART10";
++ groups = "RXD10";
++ };
++
++ pinctrl_txd11_default: txd11_default {
++ function = "UART11";
++ groups = "TXD11";
++ };
++
++ pinctrl_rxd11_default: rxd11_default {
++ function = "UART11";
++ groups = "RXD11";
++ };
++
++ pinctrl_pcierc2_perst_default: pcierc2_perst_default {
++ function = "PCIERC";
++ groups = "PE2SGRSTN";
++ };
++
++ pinctrl_usb2cud_default: usb2cud_default {
++ function = "USB2C";
++ groups = "USB2CUD";
++ };
++
++ pinctrl_usb2cd_default: usb2cd_default {
++ function = "USB2C";
++ groups = "USB2CD";
++ };
++
++ pinctrl_usb2ch_default: usb2ch_default {
++ function = "USB2C";
++ groups = "USB2CH";
++ };
++
++ pinctrl_usb2cu_default: usb2cu_default {
++ function = "USB2C";
++ groups = "USB2CU";
++ };
++
++ pinctrl_usb2dd_default: usb2dd_default {
++ function = "USB2D";
++ groups = "USB2DD";
++ };
++
++ pinctrl_usb2dh_default: usb2dh_default {
++ function = "USB2D";
++ groups = "USB2DH";
++ };
++};
+diff --git a/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi b/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi
+new file mode 100644
+index 000000000..dfa5b622b
+--- /dev/null
++++ b/arch/arm64/boot/dts/aspeed/aspeed-g7.dtsi
+@@ -0,0 +1,2774 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++#include <dt-bindings/clock/aspeed,ast2700-scu.h>
++#include <dt-bindings/reset/aspeed,ast2700-scu.h>
++#include <dt-bindings/interrupt-controller/arm-gic.h>
++#include <dt-bindings/interrupt-controller/aspeed-scu-ic.h>
++#include <dt-bindings/interrupt-controller/aspeed-e2m-ic.h>
++
++/ {
++ model = "Aspeed BMC";
++ compatible = "aspeed,ast2700";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ interrupt-parent = <&gic>;
++
++ aliases {
++ serial0 = &uart0;
++ serial1 = &uart1;
++ serial2 = &uart2;
++ serial3 = &uart3;
++ serial4 = &uart4;
++ serial5 = &uart5;
++ serial6 = &uart6;
++ serial7 = &uart7;
++ serial8 = &uart8;
++ serial9 = &uart9;
++ serial10 = &uart10;
++ serial11 = &uart11;
++ serial12 = &uart12;
++ serial13 = &uart13;
++ serial14 = &uart14;
++ serial15 = &vuart0;
++ serial16 = &vuart1;
++ serial17 = &vuart2;
++ serial18 = &vuart3;
++ serial19 = &pcie_vuart0;
++ serial20 = &pcie_vuart1;
++ serial21 = &pcie_vuart2;
++ serial22 = &pcie_vuart3;
++ i2c0 = &i2c0;
++ i2c1 = &i2c1;
++ i2c2 = &i2c2;
++ i2c3 = &i2c3;
++ i2c4 = &i2c4;
++ i2c5 = &i2c5;
++ i2c6 = &i2c6;
++ i2c7 = &i2c7;
++ i2c8 = &i2c8;
++ i2c9 = &i2c9;
++ i2c10 = &i2c10;
++ i2c11 = &i2c11;
++ i2c12 = &i2c12;
++ i2c13 = &i2c13;
++ i2c14 = &i2c14;
++ i2c15 = &i2c15;
++ i3c0 = &i3c0;
++ i3c1 = &i3c1;
++ i3c2 = &i3c2;
++ i3c3 = &i3c3;
++ i3c4 = &i3c4;
++ i3c5 = &i3c5;
++ i3c6 = &i3c6;
++ i3c7 = &i3c7;
++ i3c8 = &i3c8;
++ i3c9 = &i3c9;
++ i3c10 = &i3c10;
++ i3c11 = &i3c11;
++ i3c12 = &i3c12;
++ i3c13 = &i3c13;
++ i3c14 = &i3c14;
++ i3c15 = &i3c15;
++ mdio0 = &mdio0;
++ mdio1 = &mdio1;
++ mdio2 = &mdio2;
++ mctp0 = &mctp0;
++ mctp1 = &mctp1;
++ mctp2 = &mctp2;
++ video0 = &video0;
++ video1 = &video1;
++ rvas0 = &rvas0;
++ rvas1 = &rvas1;
++ xdma0 = &xdma0;
++ xdma1 = &xdma1;
++ pcie_mmbi0 = &pcie0_mmbi0;
++ pcie_mmbi1 = &pcie0_mmbi1;
++ pcie_mmbi2 = &pcie0_mmbi2;
++ pcie_mmbi3 = &pcie0_mmbi3;
++ pcie_mmbi4 = &pcie1_mmbi4;
++ pcie_mmbi5 = &pcie1_mmbi5;
++ pcie_mmbi6 = &pcie1_mmbi6;
++ pcie_mmbi7 = &pcie1_mmbi7;
++ pcie_mmbi8 = &pcie2_mmbi0;
++ pcie_mmbi9 = &pcie2_mmbi1;
++ pcie_mmbi10 = &pcie2_mmbi2;
++ pcie_mmbi11 = &pcie2_mmbi3;
++ pcie_mmbi12 = &pcie2_mmbi4;
++ pcie_mmbi13 = &pcie2_mmbi5;
++ };
++
++ cpus {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ cpu@0 {
++ compatible = "arm,cortex-a35";
++ enable-method = "psci";
++ device_type = "cpu";
++ reg = <0>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0x8000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <256>;
++ next-level-cache = <&l2>;
++ };
++
++ cpu@1 {
++ compatible = "arm,cortex-a35";
++ enable-method = "psci";
++ device_type = "cpu";
++ reg = <1>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0x8000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <256>;
++ next-level-cache = <&l2>;
++ };
++
++ cpu@2 {
++ compatible = "arm,cortex-a35";
++ enable-method = "psci";
++ device_type = "cpu";
++ reg = <2>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0x8000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <256>;
++ next-level-cache = <&l2>;
++ };
++
++ cpu@3 {
++ compatible = "arm,cortex-a35";
++ enable-method = "psci";
++ device_type = "cpu";
++ reg = <3>;
++ d-cache-size = <0x8000>;
++ d-cache-line-size = <64>;
++ d-cache-sets = <128>;
++ i-cache-size = <0x8000>;
++ i-cache-line-size = <64>;
++ i-cache-sets = <256>;
++ next-level-cache = <&l2>;
++ };
++
++ l2: l2-cache0 {
++ compatible = "cache";
++ cache-size = <0x80000>;
++ cache-line-size = <64>;
++ cache-sets = <1024>;
++ cache-level = <2>;
++ };
++ };
++
++ pmu {
++ compatible = "arm,cortex-a35-pmu";
++ interrupt-parent = <&gic>;
++ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ psci {
++ compatible = "arm,psci-1.0";
++ method = "smc";
++ };
++
++ gic: interrupt-controller@12200000 {
++ compatible = "arm,gic-v3";
++ interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
++ #interrupt-cells = <3>;
++ interrupt-controller;
++ interrupt-parent = <&gic>;
++ #redistributor-regions = <1>;
++ reg = <0 0x12200000 0 0x10000>, //GICD
++ <0 0x12280000 0 0x80000>, //GICR
++ <0 0x40440000 0 0x1000>; //GICC
++ };
++
++ timer {
++ compatible = "arm,armv8-timer";
++ interrupt-parent = <&gic>;
++ interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>,
++ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>,
++ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>,
++ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>;
++ arm,cpu-registers-not-fw-configured;
++ always-on;
++ };
++
++ soc0: soc@10000000 {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ sram0: sram@10000000 {
++ compatible = "mmio-sram";
++ reg = <0x0 0x10000000 0x0 0x20000>;
++ ranges = <0x0 0x0 0x0 0x10000000 0x0 0x20000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ no-memory-wc;
++
++ exported@0 {
++ reg = <0 0x0 0 0x20000>;
++ export;
++ };
++ };
++
++ uphy3a: usb-phy3@12010000 {
++ compatible = "aspeed,ast2700-uphy3a";
++ reg = <0x0 0x12010000 0x0 0xBC>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTA_PHY3>;
++ aspeed,scu = <&syscon0>;
++ #phy-cells = <0>;
++ status = "disabled";
++ };
++
++ vhuba1: usb-vhub@12011000 {
++ compatible = "aspeed,ast2700-usb-vhuba1";
++ reg = <0x0 0x12011000 0x0 0x820>;
++ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB>;
++ aspeed,vhub-downstream-ports = <7>;
++ aspeed,vhub-generic-endpoints = <21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2axhpd1_default>;
++ aspeed,device = <&pcie_device0>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ uphy2a: usb-phy2@12011800 {
++ compatible = "aspeed,ast2700-uphy2a";
++ reg = <0x0 0x12011800 0x0 0x10>;
++ ctrl = <0x4 0xbc001e00>; /* xHCI to vHub1 clock rate: 60MHz */
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ uphy3b: usb-phy3@12020000 {
++ compatible = "aspeed,ast2700-uphy3b";
++ reg = <0x0 0x12020000 0x0 0xBC>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTB_PHY3>;
++ aspeed,scu = <&syscon0>;
++ #phy-cells = <0>;
++ status = "disabled";
++ };
++
++ vhubb1: usb-vhub@12021000 {
++ compatible = "aspeed,ast2700-usb-vhubb1";
++ reg = <0x0 0x12021000 0x0 0x820>;
++ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB>;
++ aspeed,vhub-downstream-ports = <7>;
++ aspeed,vhub-generic-endpoints = <21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2bxhpd1_default>;
++ aspeed,device = <&pcie_device1>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ uphy2b: usb-phy2@12021800 {
++ compatible = "aspeed,ast2700-uphy2b";
++ reg = <0x0 0x12021800 0x0 0x10>;
++ ctrl = <0x4 0xbc001e00>; /* xHCI to vHub1 clock rate: 60MHz */
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ xhci0: usb@12030000 {
++ compatible = "aspeed,ast2700-xhci", "snps,dwc3";
++ reg = <0x0 0x12030000 0x0 0x10000>;
++ interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>,
++ <&syscon0 SCU0_CLK_U2PHY_REFCLK>,
++ <&syscon0 SCU0_CLK_U2PHY_CLK12M>;
++ clock-names = "bus_early", "ref", "suspend";
++ resets = <&syscon0 SCU0_RESET_PORTA_XHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb3axh_default &pinctrl_usb2axh_default>;
++ phys = <&uphy3a>;
++ phy-names = "usb3-phy";
++ dr_mode = "host";
++ status = "disabled";
++ };
++
++ usb3ahp: usb3ahp {
++ compatible = "aspeed,ast2700-usb3ahp";
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTA_XHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb3axhp_default>;
++ aspeed,device = <&pcie_device0>;
++ phys = <&uphy3a>;
++ phy-names = "usb3-phy";
++ status = "disabled";
++ };
++
++ uhci0: usb@12040000 {
++ compatible = "aspeed,ast2700-uhci", "generic-uhci";
++ reg = <0x0 0x12040000 0x0 0x100>;
++ interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
++ #ports = <2>;
++ clocks = <&syscon0 SCU0_CLK_GATE_UHCICLK>;
++ resets = <&syscon0 SCU0_RESET_UHCI>;
++ status = "disabled";
++ };
++
++ xhci1: usb@12050000 {
++ compatible = "aspeed,ast2700-xhci", "snps,dwc3";
++ reg = <0x0 0x12050000 0x0 0x10000>;
++ interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>,
++ <&syscon0 SCU0_CLK_U2PHY_REFCLK>,
++ <&syscon0 SCU0_CLK_U2PHY_CLK12M>;
++ clock-names = "bus_early", "ref", "suspend";
++ resets = <&syscon0 SCU0_RESET_PORTB_XHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb3bxh_default &pinctrl_usb2bxh_default>;
++ phys = <&uphy3b>;
++ phy-names = "usb3-phy";
++ dr_mode = "host";
++ status = "disabled";
++ };
++
++ usb3bhp: usb3bhp {
++ compatible = "aspeed,ast2700-usb3bhp";
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTB_XHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb3bxhp_default>;
++ aspeed,device = <&pcie_device1>;
++ phys = <&uphy3b>;
++ phy-names = "usb3-phy";
++ status = "disabled";
++ };
++
++ vhuba0: usb-vhub@12060000 {
++ compatible = "aspeed,ast2700-usb-vhuba0";
++ reg = <0x0 0x12060000 0x0 0x820>;
++ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB_EHCI>;
++ aspeed,vhub-downstream-ports = <7>;
++ aspeed,vhub-generic-endpoints = <21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2ad0_default>;
++ aspeed,device = <&pcie_device0>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ ehci0: usb@12061000 {
++ compatible = "aspeed,ast2700-ehci", "generic-ehci";
++ reg = <0x0 0x12061000 0x0 0x100>;
++ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB_EHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2ah_default>;
++ status = "disabled";
++ };
++
++ usb2ahp: usb2ahp {
++ compatible = "aspeed,ast2700-usb2ahp";
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTAUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTA_VHUB_EHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2ahp_default>;
++ aspeed,device = <&pcie_device0>;
++ status = "disabled";
++ };
++
++ vhubb0: usb-vhub@12062000 {
++ compatible = "aspeed,ast2700-usb-vhubb0";
++ reg = <0x0 0x12062000 0x0 0x820>;
++ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB_EHCI>;
++ aspeed,vhub-downstream-ports = <7>;
++ aspeed,vhub-generic-endpoints = <21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2bd0_default>;
++ aspeed,device = <&pcie_device1>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ ehci1: usb@12063000 {
++ compatible = "aspeed,ast2700-ehci", "generic-ehci";
++ reg = <0x0 0x12063000 0x0 0x100>;
++ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB_EHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2bh_default>;
++ status = "disabled";
++ };
++
++ usb2bhp: usb2bhp {
++ compatible = "aspeed,ast2700-usb2bhp";
++ clocks = <&syscon0 SCU0_CLK_GATE_PORTBUSB2CLK>;
++ resets = <&syscon0 SCU0_RESET_PORTB_VHUB_EHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2bhp_default>;
++ aspeed,device = <&pcie_device1>;
++ status = "disabled";
++ };
++
++ rsss: crypto@12080000 {
++ compatible = "aspeed,ast2700-rsss";
++ reg = <0x0 0x12080000 0 0x1000>;
++ clocks = <&syscon0 SCU0_CLK_GATE_RSACLK>;
++ interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_RSA>,
++ <&syscon0 SCU0_RESET_SHA3>,
++ <&syscon0 SCU0_RESET_SM3>,
++ <&syscon0 SCU0_RESET_SM4>;
++ reset-names = "rsa", "sha3", "sm3", "sm4";
++ status = "disabled";
++ };
++
++ emmc_controller: sdc@12090000 {
++ compatible = "aspeed,ast2600-sd-controller";
++ reg = <0 0x12090000 0 0x100>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges = <0 0 0 0x12090000 0 0x10000>;
++ clocks = <&syscon0 SCU0_CLK_GATE_EMMCCLK>;
++ resets = <&syscon0 SCU0_RESET_EMMC>;
++ status = "disabled";
++
++ emmc: sdhci@12090100 {
++ compatible = "aspeed,ast2600-emmc";
++ reg = <0 0x100 0 0x100>;
++ sdhci,auto-cmd12;
++ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_EMMCCLK>;
++ pinctrl-names = "default";
++ status = "disabled";
++ };
++ };
++
++ ufs_controller: cnr@12c08000 {
++ compatible = "aspeed,ast2700-ufscnr";
++ reg = <0 0x12c08000 0 0x100>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges = <0 0 0 0x12c08000 0 0x300>;
++ clocks = <&syscon0 SCU0_CLK_GATE_UFSCLK>;
++ resets = <&syscon0 SCU0_RESET_UFS>;
++ status = "disabled";
++
++ ufs: ufshc@12c08200 {
++ compatible = "aspeed,ast2700-ufshc";
++ reg = <0 0x200 0 0x100>;
++ interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_AXI1>;
++ clock-names = "core_clk";
++ freq-table-hz = <0 0>;
++ status = "disabled";
++ };
++ };
++
++ intc0: interrupt-controller@12100000 {
++ compatible = "simple-mfd";
++ reg = <0 0x12100000 0 0x4000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges = <0x0 0x0 0x0 0x12100000 0x0 0x4000>;
++
++ intc0_11: interrupt-controller@1b00 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x1b00 0x0 0x10>;
++ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
++ };
++ };
++
++ bmc_dev0: bmc-dev@12110000 {
++ compatible = "aspeed,ast2700-bmc-device";
++ reg = <0x0 0x12110000 0x0 0xb000>;
++ interrupts-extended = <&gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
++ <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>;
++ aspeed,config = <&pcie_config0>;
++ aspeed,device = <&pcie_device0>;
++ aspeed,e2m = <&e2m_config0>;
++ aspeed,scu = <&syscon0>;
++ pcie2lpc;
++ status = "disabled";
++ };
++
++ bmc_dev1: bmc-dev@12120000 {
++ compatible = "aspeed,ast2700-bmc-device";
++ reg = <0x0 0x12120000 0x0 0xb000>;
++ interrupts-extended = <&gic GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
++ <&scu_ic1 ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI>;
++ aspeed,config = <&pcie_config1>;
++ aspeed,device = <&pcie_device1>;
++ aspeed,e2m = <&e2m_config1>;
++ aspeed,scu = <&syscon0>;
++ pcie2lpc;
++ status = "disabled";
++ };
++
++ syscon0: syscon@12c02000 {
++ compatible = "aspeed,ast2700-scu0", "syscon", "simple-mfd";
++ reg = <0x0 0x12c02000 0x0 0x1000>;
++ ranges = <0x0 0x0 0 0x12c02000 0 0x1000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ #clock-cells = <1>;
++ #reset-cells = <1>;
++
++ silicon-id@0 {
++ compatible = "aspeed,ast2700-silicon-id", "aspeed,silicon-id";
++ reg = <0 0x0 0 0x4>;
++ };
++
++ scu_ic0: interrupt-controller@1D0 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-scu-ic0";
++ reg = <0 0x1d0 0 0xc>;
++ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-controller;
++ };
++
++ scu_ic1: interrupt-controller@1E0 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-scu-ic1";
++ reg = <0 0x1e0 0 0xc>;
++ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-controller;
++ };
++
++ pinctrl0: pinctrl@400 {
++ compatible = "aspeed,ast2700-soc0-pinctrl";
++ reg = <0 0x400 0 0x600>;
++ };
++ };
++
++ pcie_config0: pcie-config@12c02960 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x12c02960 0 0x40>;
++ };
++
++ pcie_config1: pcie-config@12c029a0 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x12c029a0 0 0x40>;
++ };
++
++ pcie_device0: pcie-device@12c02a00 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x12c02a00 0 0x80>;
++ };
++
++ pcie_device1: pcie-device@12c02a80 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x12c02a80 0 0x80>;
++ };
++
++ mctp0: mctp0@12c06000 {
++ compatible = "aspeed,ast2700-mctp0";
++ reg = <0x0 0x12c06000 0x0 0x40>;
++ interrupts-extended = <&gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>;
++ interrupt-names = "mctp", "pcie";
++ resets = <&syscon0 SCU0_RESET_MCTP0>;
++ aspeed,scu = <&syscon0>;
++ aspeed,pcieh = <&pcie_phy0>;
++ status = "disabled";
++ };
++
++ mctp1: mctp1@12c07000 {
++ compatible = "aspeed,ast2700-mctp1";
++ reg = <0x0 0x12c07000 0x0 0x40>;
++ interrupts-extended = <&gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, <&scu_ic1 ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI>;
++ interrupt-names = "mctp", "pcie";
++ resets = <&syscon0 SCU0_RESET_MCTP1>;
++ aspeed,scu = <&syscon0>;
++ aspeed,pcieh = <&pcie_phy1>;
++ status = "disabled";
++ };
++
++ display_port: dp@12c0a000 {
++ compatible = "aspeed,ast2700-displayport", "syscon";
++ reg = <0x0 0x12c0a000 0x0 0x200>;
++ status = "disabled";
++ };
++
++ display_port_mcu: dpmcu@11000000 {
++ compatible = "aspeed,ast2700-displayport-mcu", "syscon";
++ reg = <0x0 0x11000000 0x0 0xf00>;
++ status = "disabled";
++ };
++
++ gfx: display@12c09000 {
++ compatible = "aspeed,ast2700-gfx", "syscon";
++ reg = <0 0x12c09000 0 0x100>;
++ reg-io-width = <4>;
++ clocks = <&syscon0 SCU0_CLK_CRT1>;
++ resets = <&syscon0 SCU0_RESET_CRT0>;
++ syscon = <&syscon0>;
++ status = "disabled";
++ interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>;
++ };
++
++ disp_intf: disp-intf {
++ compatible = "aspeed,ast2700-disp-intf", "syscon";
++ reg = <0x0 0x12c1d000 0x0 0x40>;
++ syscon = <&syscon0>;
++ status = "disabled";
++ };
++
++ rtc: rtc@12c0f000 {
++ compatible = "aspeed,ast2700-rtc";
++ reg = <0 0x12c0f000 0 0x18>;
++ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ gpio1: gpio@12c11000 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-gpio";
++ reg = <0x0 0x12c11000 0x0 0x1000>;
++ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
++ gpio-ranges = <&pinctrl0 0 0 12>;
++ ngpios = <12>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ pcie_phy0: pcie-phy@12c15000 {
++ compatible = "aspeed,ast2700-pcie-phy", "syscon";
++ reg = <0x0 0x12c15000 0x0 0x800>;
++ };
++
++ pcie_phy1: rc-bridge@12c15800 {
++ compatible = "aspeed,ast2700-pcie-phy", "syscon";
++ reg = <0x0 0x12c15800 0x0 0x800>;
++ };
++
++ pcie_vuart0: serial@12c18000 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x12c18000 0x0 0x40>;
++ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ pcie_vuart1: serial@12c18100 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x12c18100 0x0 0x40>;
++ interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ pcie_vuart2: serial@12c18200 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x12c18200 0x0 0x40>;
++ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ pcie_vuart3: serial@12c18300 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x12c18300 0x0 0x40>;
++ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ pcie_lpc0: pcie-lpc@12c19000 {
++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon";
++ reg = <0x0 0x12c19000 0x0 0x800>;
++
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x12c19000 0x800>;
++
++ pcie_lpc0_kcs0: pcie-kcs@24 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
++ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc0_kcs1: pcie-kcs@28 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
++ interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc0_kcs2: pcie-kcs@2c {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
++ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc0_kcs3: pcie-kcs@114 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>;
++ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc0_snoop: pcie-snoop@80 {
++ compatible = "aspeed,ast2600-lpc-snoop";
++ reg = <0x80 0x80>;
++ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc0_pcc: pcie-pcc@0 {
++ compatible = "aspeed,ast2600-lpc-pcc";
++ reg = <0x0 0x140>;
++ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc0_ibt: pcie-ibt@140 {
++ compatible = "aspeed,ast2600-ibt-bmc";
++ reg = <0x140 0x18>;
++ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ };
++
++ pcie_lpc1: pcie-lpc@12c19800 {
++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon";
++ reg = <0x0 0x12c19800 0x0 0x800>;
++
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x12c19800 0x800>;
++
++ pcie_lpc1_kcs0: pcie-kcs@24 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
++ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc1_kcs1: pcie-kcs@28 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
++ interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc1_kcs2: pcie-kcs@2c {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
++ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc1_kcs3: pcie-kcs@114 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>;
++ interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc1_snoop: pcie-snoop@80 {
++ compatible = "aspeed,ast2600-lpc-snoop";
++ reg = <0x80 0x80>;
++ interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc1_pcc: pcie-pcc@0 {
++ compatible = "aspeed,ast2600-lpc-pcc";
++ reg = <0x0 0x140>;
++ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ pcie_lpc1_ibt: pcie-ibt@140 {
++ compatible = "aspeed,ast2600-ibt-bmc";
++ reg = <0x140 0x18>;
++ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
++ status = "disabled";
++ };
++
++ };
++
++ pcie0: pcie@120e0000 {
++ compatible = "aspeed,ast2700-pcie";
++ device_type = "pci";
++ reg = <0x0 0x120e0000 0x0 0x100>;
++ linux,pci-domain = <0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ bus-range = <0x00 0xff>;
++ ranges = <0x01000000 0 0x00000000 0x0 0x00000000 0x0 0x00008000>, /* I/O */
++ <0x02000000 0 0x60000000 0x0 0x60000000 0x0 0x10000000>; /* memory */
++ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_H2X0>,
++ <&syscon0 SCU0_RESET_PCIE0RST>;
++ reset-names = "h2x", "perst";
++ status = "disabled";
++
++ pinctrl-0 = <&pinctrl_pcierc0_perst_default>;
++ pinctrl-names = "default";
++
++ #interrupt-cells = <1>;
++ msi-parent = <&pcie0>;
++ msi_address = <0x000000F0>;
++ pciephy = <&pcie_phy0>;
++ aspeed,device = <&pcie_device0>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0 0 0 1 &pcie_intc0 0>,
++ <0 0 0 2 &pcie_intc0 1>,
++ <0 0 0 3 &pcie_intc0 2>,
++ <0 0 0 4 &pcie_intc0 3>;
++ pcie_intc0: interrupt-controller {
++ interrupt-controller;
++ #address-cells = <0>;
++ #interrupt-cells = <1>;
++ };
++ };
++
++ pcie1: pcie@120f0000 {
++ compatible = "aspeed,ast2700-pcie";
++ device_type = "pci";
++ reg = <0x0 0x120f0000 0x0 0x100>;
++ linux,pci-domain = <1>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ bus-range = <0x00 0xff>;
++ ranges = <0x01000000 0 0x00000000 0x0 0x00000000 0x0 0x00008000>,
++ <0x02000000 0 0x80000000 0x0 0x80000000 0x0 0x10000000>; /* memory */
++ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_H2X1>,
++ <&syscon0 SCU0_RESET_PCIE1RST>;
++ reset-names = "h2x", "perst";
++ status = "disabled";
++
++ pinctrl-0 = <&pinctrl_pcierc1_perst_default>;
++ pinctrl-names = "default";
++
++ #interrupt-cells = <1>;
++ msi-parent = <&pcie1>;
++ msi_address = <0x000000F0>;
++ pciephy = <&pcie_phy1>;
++ aspeed,device = <&pcie_device1>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0 0 0 1 &pcie_intc1 0>,
++ <0 0 0 2 &pcie_intc1 1>,
++ <0 0 0 3 &pcie_intc1 2>,
++ <0 0 0 4 &pcie_intc1 3>;
++ pcie_intc1: interrupt-controller {
++ interrupt-controller;
++ #address-cells = <0>;
++ #interrupt-cells = <1>;
++ };
++ };
++
++ uart4: serial@12c1a000 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x12c1a000 0x0 0x1000>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon0 SCU0_CLK_GATE_UART4CLK>;
++ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
++ no-loopback-test;
++ status = "disabled";
++ };
++
++ ecdsa: crypto@12c1e000 {
++ compatible = "aspeed,ast2700-ecdsa";
++ reg = <0x0 0x12c1e000 0x0 0x1000>;
++ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_ECDSACLK>;
++ resets = <&syscon0 SCU0_RESET_ECC>;
++ status = "disabled";
++ };
++
++ jtag0: jtag@12c20000 {
++ compatible = "aspeed,ast2700-jtag";
++ reg= <0x0 0x12c20000 0x0 0x40>;
++ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ resets = <&syscon0 SCU0_RESET_JTAG>;
++ status = "disabled";
++ };
++
++ e2m_config0: e2m-config@12c21000 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x12c21000 0 0x300>;
++ ranges = <0 0 0 0x12c21000 0 0x300>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ aspeed,device = <&pcie_device0>;
++
++ e2m_ic0: interrupt-controller@14 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-e2m-ic";
++ reg = <0 0x14 0 0x8>;
++ interrupts-extended = <&gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-names = "e2m";
++ interrupt-controller;
++ };
++
++ pcie0_mmbi0: pcie0-mmbi@0 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT0>;
++ status = "disabled";
++ };
++
++ pcie0_mmbi1: pcie0-mmbi@1 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT1>;
++ status = "disabled";
++ };
++
++ pcie0_mmbi2: pcie0-mmbi@2 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT2>;
++ status = "disabled";
++ };
++
++ pcie0_mmbi3: pcie0-mmbi@3 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic0 ASPEED_AST2700_E2M_MMBI_H2B_INT3>;
++ status = "disabled";
++ };
++ };
++
++ e2m_config1: e2m-config@12c22000 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x12c22000 0 0x300>;
++ ranges = <0x0 0x0 0 0x12c22000 0 0x300>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ aspeed,device = <&pcie_device1>;
++
++ e2m_ic1: interrupt-controller@14 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-e2m-ic";
++ reg = <0 0x14 0 0x8>;
++ interrupts-extended = <&gic GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-controller;
++ };
++
++ pcie1_mmbi4: pcie1-mmbi@4 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT4>;
++ status = "disabled";
++ };
++
++ pcie1_mmbi5: pcie1-mmbi@5 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT5>;
++ status = "disabled";
++ };
++
++ pcie1_mmbi6: pcie1-mmbi@6 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT6>;
++ status = "disabled";
++ };
++
++ pcie1_mmbi7: pcie1-mmbi@7 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic1 ASPEED_AST2700_E2M_MMBI_H2B_INT7>;
++ status = "disabled";
++ };
++ };
++
++ hace: crypto@12070000 {
++ compatible = "aspeed,ast2700-hace";
++ reg = <0x0 0x12070000 0x0 0x200>;
++ interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&syscon0 SCU0_CLK_GATE_HACCLK>;
++ resets = <&syscon0 SCU0_RESET_HACE>;
++ status = "disabled";
++ };
++
++ video0: video@120a0000 {
++ compatible = "aspeed,ast2700-video-engine", "syscon";
++ reg = <0x0 0x120a0000 0x0 0x400>;
++ clocks = <&syscon0 SCU0_CLK_GATE_VCLK>,
++ <&syscon0 SCU0_CLK_GATE_ECLK>,
++ <&syscon0 SCU0_CLK_GATE_CRT1CLK>;
++ clock-names = "vclk", "eclk", "crt2clk";
++ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_VIDEO>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ video1: video@120a1000 {
++ compatible = "aspeed,ast2700-video-engine";
++ reg = <0x0 0x120a1000 0x0 0x400>;
++ clocks = <&syscon0 SCU0_CLK_GATE_VCLK>,
++ <&syscon0 SCU0_CLK_GATE_ECLK>,
++ <&syscon0 SCU0_CLK_GATE_CRT1CLK>;
++ clock-names = "vclk", "eclk", "crt2clk";
++ interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_VIDEO>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ rvas0: rvas@120b8000 {
++ compatible = "aspeed,ast2700-rvas";
++ reg = <0x0 0x120b8000 0x0 0x800>,
++ <0x0 0x12c14000 0x0 0x800>,
++ <0x0 0x120a0000 0x0 0x1000>;
++ clocks = <&syscon0 SCU0_CLK_GATE_RVAS0CLK>,
++ <&syscon0 SCU0_CLK_GATE_VCLK>,
++ <&syscon0 SCU0_CLK_GATE_ECLK>;
++ clock-names = "rvasclk", "vclk", "eclk";
++ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_RVAS0>, <&syscon0 SCU0_RESET_VIDEO>;
++ status = "disabled";
++ };
++
++ rvas1: rvas@120bc000 {
++ compatible = "aspeed,ast2700-rvas";
++ reg = <0x0 0x120bc000 0x0 0x800>,
++ <0x0 0x12c14800 0x0 0x800>,
++ <0x0 0x120a1000 0x0 0x1000>;
++ clocks = <&syscon0 SCU0_CLK_GATE_RVAS1CLK>,
++ <&syscon0 SCU0_CLK_GATE_VCLK>,
++ <&syscon0 SCU0_CLK_GATE_ECLK>;
++ clock-names = "rvas2clk", "vclk", "eclk";
++ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
++ resets = <&syscon0 SCU0_RESET_RVAS1>, <&syscon0 SCU0_RESET_VIDEO>;
++ status = "disabled";
++ };
++
++ xdma0: xdma@12c04000 {
++ compatible = "aspeed,ast2700-xdma0";
++ reg = <0x0 0x12c04000 0x0 0x100>;
++ clocks = <&syscon0 SCU0_CLK_GATE_BCLK>;
++ resets = <&syscon0 SCU0_RESET_XDMA0>;
++ reset-names = "device";
++ interrupts-extended = <&gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
++ <&scu_ic0 ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI>;
++ aspeed,pcie-device = "bmc";
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ xdma1: xdma@12c05000 {
++ compatible = "aspeed,ast2700-xdma1";
++ reg = <0x0 0x12c05000 0x0 0x100>;
++ clocks = <&syscon0 SCU0_CLK_GATE_BCLK>;
++ resets = <&syscon0 SCU0_RESET_XDMA1>;
++ reset-names = "device";
++ interrupts-extended = <&gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
++ <&scu_ic1 ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI>;
++ aspeed,pcie-device = "bmc";
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++ };
++
++ soc1: soc@14000000 {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ fmc: spi@14000000 {
++ reg = <0x0 0x14000000 0x0 0xc4>, <0x1 0x00000000 0x0 0x80000000>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "aspeed,ast2700-fmc";
++ status = "disabled";
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ num-cs = <3>;
++
++ flash@0 {
++ reg = < 0 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++
++ flash@1 {
++ reg = < 1 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++
++ flash@2 {
++ reg = < 2 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++ };
++
++ spi0: spi@14010000 {
++ reg = <0x0 0x14010000 0x0 0xc4>, <0x1 0x80000000 0x0 0x80000000>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "aspeed,ast2700-spi";
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ status = "disabled";
++ num-cs = <2>;
++
++ flash@0 {
++ reg = < 0 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++
++ flash@1 {
++ reg = < 1 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++ };
++
++ spi1: spi@14020000 {
++ reg = <0x0 0x14020000 0x0 0xc4>, <0x2 0x00000000 0x0 0x80000000>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ compatible = "aspeed,ast2700-spi";
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ status = "disabled";
++ num-cs = <2>;
++
++ flash@0 {
++ reg = < 0 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++
++ flash@1 {
++ reg = < 1 >;
++ compatible = "jedec,spi-nor";
++ spi-max-frequency = <50000000>;
++ spi-rx-bus-width = <2>;
++ status = "disabled";
++ };
++ };
++
++ spi2: spi@14030000 {
++ reg = <0x0 0x14030000 0x0 0x1f0>, <0x2 0x80000000 0x0 0x80000000>;
++ #address-cells = <1>;
++ #size-cells = <0>;
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ resets = <&syscon1 SCU1_RESET_SPI2>;
++ num-cs = <2>;
++ status = "disabled";
++ };
++
++ can0: can@14c3e000 {
++ reg = <0x0 0x14c3e000 0x0 0x2000>;
++ compatible = "aspeed,canfd";
++ status = "disabled";
++ clocks = <&syscon1 SCU1_CLK_GATE_CANCLK>;
++ resets = <&syscon1 SCU1_RESET_CAN>;
++ interrupts-extended = <&intc1_3 31>;
++ pinctrl-0 = <&pinctrl_can_default>;
++ pinctrl-names = "default";
++ };
++
++ mdio: bus@14040000 {
++ compatible = "simple-bus";
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges = <0 0 0 0x14040000 0 0x100>;
++
++ mdio0: mdio@0 {
++ compatible = "aspeed,ast2600-mdio";
++ reg = <0 0 0 0x8>;
++ resets = <&syscon1 SCU1_RESET_MII>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_mdio0_default>;
++ status = "disabled";
++ };
++
++ mdio1: mdio@8 {
++ compatible = "aspeed,ast2600-mdio";
++ reg = <0 0x8 0 0x8>;
++ resets = <&syscon1 SCU1_RESET_MII>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_mdio1_default>;
++ status = "disabled";
++ };
++
++ mdio2: mdio@10 {
++ compatible = "aspeed,ast2600-mdio";
++ reg = <0 0x10 0 0x8>;
++ resets = <&syscon1 SCU1_RESET_MII>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_mdio2_default>;
++ status = "disabled";
++ };
++ };
++
++ plda: plda@14C1C200 {
++ compatible = "aspeed,plda", "syscon", "simple-mfd";
++ reg = <0x0 0x14c1c000 0x0 0x400>;
++ };
++
++ sgmii: phy@14C01000 {
++ compatible = "aspeed,ast2700-sgmii";
++ reg = <0x0 0x14c01000 0x0 0x40>;
++
++ aspeed,plda = <&plda>;
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sgmii_default>;
++ #phy-cells = <0>;
++ status = "disabled";
++ };
++
++ mac0: ftgmac@14050000 {
++ compatible = "aspeed,ast2700-mac", "faraday,ftgmac100";
++ reg = <0x0 0x14050000 0x0 0x200>;
++ interrupts-extended = <&intc1_4 0>;
++
++ clocks = <&syscon1 SCU1_CLK_GATE_MAC0CLK>;
++ resets = <&syscon1 SCU1_RESET_MAC0>;
++ status = "disabled";
++ };
++
++ mac1: ftgmac@14060000 {
++ compatible = "aspeed,ast2700-mac", "faraday,ftgmac100";
++ reg = <0x0 0x14060000 0x0 0x200>;
++ interrupts-extended = <&intc1_4 1>;
++
++ clocks = <&syscon1 SCU1_CLK_GATE_MAC1CLK>;
++ resets = <&syscon1 SCU1_RESET_MAC1>;
++ status = "disabled";
++ };
++
++ mac2: ftgmac@14070000 {
++ compatible = "aspeed,ast2700-mac", "faraday,ftgmac100";
++ reg = <0x0 0x14070000 0x0 0x200>;
++ interrupts-extended = <&intc1_4 2>;
++
++ phys = <&sgmii>;
++ phy-names = "sgmii";
++
++ clocks = <&syscon1 SCU1_CLK_GATE_MAC2CLK>;
++ resets = <&syscon1 SCU1_RESET_MAC2>;
++ status = "disabled";
++ };
++
++ sdio_controller: sdc@14080000 {
++ compatible = "aspeed,ast2600-sd-controller";
++ reg = <0 0x14080000 0 0x100>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges = <0 0 0 0x14080000 0 0x10000>;
++ clocks = <&syscon1 SCU1_CLK_GATE_SDCLK>;
++ resets = <&syscon1 SCU1_RESET_SD>;
++ status = "disabled";
++
++ sdhci: sdhci@14080100 {
++ compatible = "aspeed,ast2600-sdhci", "sdhci";
++ reg = <0 0x100 0 0x100>;
++ sdhci,auto-cmd12;
++ interrupts-extended = <&intc1_5 1>;
++ clocks = <&syscon1 SCU1_CLK_GATE_SDCLK>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sd_default>;
++ status = "disabled";
++ };
++ };
++
++ pwm_tach: pwm-tach-controller@140c0000 {
++ compatible = "aspeed,ast2700-pwm-tach";
++ reg = <0x0 0x140c0000 0 0x100>;
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ resets = <&syscon1 SCU1_RESET_PWM>;
++ #pwm-cells = <3>;
++ status = "disabled";
++ };
++
++ uhci1: usb@14110000 {
++ compatible = "aspeed,ast2700-uhci", "generic-uhci";
++ reg = <0x0 0x14110000 0x0 0x100>;
++ interrupts-extended = <&intc1_4 27>;
++ #ports = <2>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UHCICLK>;
++ resets = <&syscon1 SCU1_RESET_UHCI>;
++ status = "disabled";
++ };
++
++ vhubc: usb-vhub@14120000 {
++ compatible = "aspeed,ast2700-usb-vhubc";
++ reg = <0x0 0x14120000 0x0 0x820>;
++ interrupts-extended = <&intc1_4 28>;
++ clocks = <&syscon1 SCU1_CLK_GATE_PORTCUSB2CLK>;
++ resets = <&syscon1 SCU1_RESET_PORTC_VHUB_EHCI>;
++ aspeed,vhub-downstream-ports = <7>;
++ aspeed,vhub-generic-endpoints = <21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2cd_default>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ ehci2: usb@14121000 {
++ compatible = "aspeed,ast2700-ehci", "generic-ehci";
++ reg = <0x0 0x14121000 0x0 0x100>;
++ interrupts-extended = <&intc1_4 28>;
++ clocks = <&syscon1 SCU1_CLK_GATE_PORTCUSB2CLK>;
++ resets = <&syscon1 SCU1_RESET_PORTC_VHUB_EHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2ch_default>;
++ status = "disabled";
++ };
++
++ vhubd: usb-vhub@14122000 {
++ compatible = "aspeed,ast2700-usb-vhubd";
++ reg = <0x0 0x14122000 0x0 0x820>;
++ interrupts-extended = <&intc1_4 29>;
++ clocks = <&syscon1 SCU1_CLK_GATE_PORTDUSB2CLK>;
++ resets = <&syscon1 SCU1_RESET_PORTD_VHUB_EHCI>;
++ aspeed,vhub-downstream-ports = <7>;
++ aspeed,vhub-generic-endpoints = <21>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2dd_default>;
++ aspeed,scu = <&syscon0>;
++ status = "disabled";
++ };
++
++ ehci3: usb@14123000 {
++ compatible = "aspeed,ast2700-ehci", "generic-ehci";
++ reg = <0x0 0x14123000 0x0 0x100>;
++ interrupts-extended = <&intc1_4 29>;
++ clocks = <&syscon1 SCU1_CLK_GATE_PORTDUSB2CLK>;
++ resets = <&syscon1 SCU1_RESET_PORTD_VHUB_EHCI>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_usb2dh_default>;
++ status = "disabled";
++ };
++
++ adc0: adc@14c00000 {
++ compatible = "aspeed,ast2700-adc0";
++ reg = <0x0 0x14c00000 0 0x100>;
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ resets = <&syscon1 SCU1_RESET_ADC>;
++ interrupts-extended = <&intc1_2 16>;
++ #io-channel-cells = <1>;
++ aspeed,scu = <&syscon1>;
++ status = "disabled";
++ };
++
++ adc1: adc@14c00100 {
++ compatible = "aspeed,ast2700-adc1";
++ reg = <0x0 0x14c00100 0x0 0x100>;
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ resets = <&syscon1 SCU1_RESET_ADC>;
++ interrupts-extended = <&intc1_2 16>;
++ #io-channel-cells = <1>;
++ aspeed,scu = <&syscon1>;
++ status = "disabled";
++ };
++
++ sram1: sram@14b80000 {
++ compatible = "mmio-sram";
++ reg = <0x0 0x14b80000 0x0 0x40000>;
++ ranges = <0x0 0x0 0x0 0x14b80000 0x0 0x40000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ no-memory-wc;
++
++ exported@0 {
++ reg = <0 0x0 0 0x40000>;
++ export;
++ };
++ };
++
++ syscon1: syscon@14c02000 {
++ compatible = "aspeed,ast2700-scu1", "syscon", "simple-mfd";
++ reg = <0x0 0x14c02000 0x0 0x1000>;
++ ranges = <0x0 0x0 0x0 0x14c02000 0x0 0x1000>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ #clock-cells = <1>;
++ #reset-cells = <1>;
++
++ scu_ic2: interrupt-controller@100 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-scu-ic2";
++ reg = <0 0x100 0 0x8>;
++ interrupts-extended = <&intc1_5 0>;
++ interrupt-controller;
++ };
++
++ scu_ic3: interrupt-controller@108 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-scu-ic3";
++ reg = <0 0x108 0 0x8>;
++ interrupts-extended = <&intc1_5 26>;
++ interrupt-controller;
++ };
++
++ pinctrl1: pinctrl@400 {
++ compatible = "aspeed,ast2700-soc1-pinctrl";
++ reg = <0x0 0x400 0x0 0x100>;
++ };
++ };
++
++ rng: hwrng@14c020f0 {
++ compatible = "aspeed,ast2700-trng";
++ reg = <0x0 0x14c020f0 0x0 0x8>;
++ status = "disabled";
++ };
++
++ chassis: chassis@14c04010 {
++ compatible = "aspeed,ast2600-chassis";
++ reg = <0 0x14c04010 0 0x4>;
++ interrupts-extended = <&intc1_5 5>;
++ status = "disabled";
++ };
++
++ backed_sram0: sram@14c04100 {
++ compatible = "mmio-sram";
++ reg = <0x0 0x14c04100 0x0 0x80>;
++ ranges = <0x0 0x0 0x0 0x14c04100 0x0 0x80>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ no-memory-wc;
++
++ exported@0 {
++ reg = <0 0x0 0 0x80>;
++ export;
++ };
++ };
++
++ backed_sram1: sram@14c04300 {
++ compatible = "mmio-sram";
++ reg = <0x0 0x14c04300 0x0 0x80>;
++ ranges = <0x0 0x0 0x0 0x14c04300 0x0 0x80>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ no-memory-wc;
++
++ exported@0 {
++ reg = <0 0x0 0 0x80>;
++ export;
++ };
++ };
++
++ backed_sram2: sram@14c04500 {
++ compatible = "mmio-sram";
++ reg = <0x0 0x14c04500 0x0 0x40>;
++ ranges = <0x0 0x0 0x0 0x14c04500 0x0 0x40>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ no-memory-wc;
++
++ exported@0 {
++ reg = <0 0x0 0 0x40>;
++ export;
++ };
++ };
++
++ espi0: espi@14c05000 {
++ compatible = "aspeed,ast2700-espi";
++ reg = <0 0x14c05000 0 0x1000>;
++ interrupts-extended = <&intc1_0 10>, <&intc1_0 16>;
++ clocks = <&syscon1 SCU1_CLK_GATE_ESPI0CLK>;
++ resets = <&syscon1 SCU1_RESET_ESPI0>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_espi0_default>;
++ status = "disabled";
++ };
++
++ espi1: espi@14c06000 {
++ compatible = "aspeed,ast2700-espi";
++ reg = <0 0x14c06000 0 0x1000>;
++ interrupts-extended = <&intc1_1 10>, <&intc1_1 16>;
++ clocks = <&syscon1 SCU1_CLK_GATE_ESPI1CLK>;
++ resets = <&syscon1 SCU1_RESET_ESPI1>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_espi1_default>;
++ status = "disabled";
++ };
++
++ otp: otp@14c07000 {
++ compatible = "aspeed,ast2700-otp";
++ reg = <0 0x14c07000 0 0x1000>;
++ interrupts-extended = <&intc1_5 8>;
++ aspeed,scu0 = <&syscon0>;
++ aspeed,scu1 = <&syscon1>;
++ status = "disabled";
++ };
++
++ jtag1: jtag@14c09000 {
++ compatible = "aspeed,ast2700-jtag";
++ reg= <0x0 0x14c09000 0x0 0x40>;
++ interrupts-extended = <&intc1_5 2>;
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ resets = <&syscon1 SCU1_RESET_JTAG1>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_jtagm1_default>;
++ status = "disabled";
++ };
++
++ gpio0: gpio@14c0b000 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-gpio";
++ reg = <0x0 0x14c0b000 0x0 0x1000>;
++ interrupts-extended = <&intc1_2 18>;
++ gpio-ranges = <&pinctrl1 0 0 216>;
++ ngpios = <216>;
++ clocks = <&syscon1 SCU1_CLK_AHB>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ };
++
++ sgpiom0: sgpiom@14c0c000 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-sgpiom";
++ reg = <0x0 0x14c0c000 0x0 0x100>;
++ interrupts-extended = <&intc1_2 21>;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ ngpios = <256>;
++ bus-frequency = <12000000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sgpm0_default>;
++ status = "disabled";
++ };
++
++ sgpiom1: sgpiom@14c0d000 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-sgpiom";
++ reg = <0x0 0x14c0d000 0x0 0x100>;
++ interrupts-extended = <&intc1_2 24>;
++ #interrupt-cells = <2>;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ interrupt-controller;
++ ngpios = <256>;
++ bus-frequency = <12000000>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sgpm1_default>;
++ status = "disabled";
++ };
++
++ i2c: bus@14c0f000 {
++ compatible = "simple-bus";
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x14c0f000 0x1100>;
++ };
++
++ udma: uart-dma@14c12000 {
++ compatible = "aspeed,ast2700-udma";
++ reg = <0x0 0x14c12000 0x0 0x1000>;
++ interrupts-extended = <&intc1_4 19>;
++ };
++
++ intc1: interrupt-controller@14c18000 {
++ compatible = "simple-mfd";
++ reg = <0 0x14c18000 0 0x400>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges = <0x0 0x0 0x0 0x14c18000 0x0 0x400>;
++
++ intc1_0: interrupt-controller@100 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x100 0x0 0x10>;
++ interrupts-extended = <&intc0_11 0>;
++ };
++
++ intc1_1: interrupt-controller@110 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x110 0x0 0x10>;
++ interrupts-extended = <&intc0_11 1>;
++ };
++
++ intc1_2: interrupt-controller@120 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x120 0x0 0x10>;
++ interrupts-extended = <&intc0_11 2>;
++ };
++
++ intc1_3: interrupt-controller@130 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x130 0x0 0x10>;
++ interrupts-extended = <&intc0_11 3>;
++ };
++
++ intc1_4: interrupt-controller@140 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x140 0x0 0x10>;
++ interrupts-extended = <&intc0_11 4>;
++ };
++
++ intc1_5: interrupt-controller@150 {
++ #interrupt-cells = <1>;
++ interrupt-controller;
++ compatible = "aspeed,ast2700-intc-ic";
++ reg = <0x0 0x150 0x0 0x10>;
++ interrupts-extended = <&intc0_11 5>;
++ };
++ };
++
++ peci0: peci-controller@14c1f000 {
++ compatible = "aspeed,ast2600-peci";
++ reg = <0x0 0x14c1f000 0x0 0x100>;
++ interrupts-extended = <&intc1_5 4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_IPEREFCLK>;
++ resets = <&syscon1 SCU1_RESET_PECI>;
++ cmd-timeout-ms = <1000>;
++ clock-frequency = <1000000>;
++ status = "disabled";
++ };
++
++ i3c0: i3c0@14c20000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c20000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 0>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C0CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C0>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c0_default>;
++ status = "disabled";
++ };
++
++ i3c1: i3c1@14c21000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c21000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 1>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C1CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C1>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c1_default>;
++ status = "disabled";
++ };
++
++ i3c2: i3c2@14c22000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c22000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 2>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C2CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C2>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c2_default>;
++ status = "disabled";
++ };
++
++ i3c3: i3c3@14c23000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c23000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 3>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C3CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C3>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c3_default>;
++ status = "disabled";
++ };
++
++ i3c4: i3c4@14c24000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c24000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C4CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C4>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c4_default>;
++ status = "disabled";
++ };
++
++ i3c5: i3c5@14c25000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c25000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 5>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C5CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C5>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c5_default>;
++ status = "disabled";
++ };
++
++ i3c6: i3c6@14c26000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c26000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 6>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C6CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C6>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c6_default>;
++ status = "disabled";
++ };
++
++ i3c7: i3c7@14c27000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c27000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 7>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C7CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C7>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c7_default>;
++ status = "disabled";
++ };
++
++ i3c8: i3c8@14c28000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c28000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 8>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C8CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C8>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c8_default>;
++ status = "disabled";
++ };
++
++ i3c9: i3c9@14c29000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c29000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 9>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C9CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C9>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c9_default>;
++ status = "disabled";
++ };
++
++ i3c10: i3c10@14c2a000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c2a000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 10>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C10CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C10>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c10_default>;
++ status = "disabled";
++ };
++
++ i3c11: i3c11@14c2b000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c2b000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 11>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C11CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C11>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i3c11_default>;
++ status = "disabled";
++ };
++
++ i3c12: i3c12@14c2c000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c2c000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 12>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C12CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C12>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c12_default>;
++ status = "disabled";
++ };
++
++ i3c13: i3c13@14c2d000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c2d000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 13>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C13CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C13>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c13_default>;
++ status = "disabled";
++ };
++
++ i3c14: i3c14@14c2e000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c2e000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 14>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C14CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C14>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c14_default>;
++ status = "disabled";
++ };
++
++ i3c15: i3c15@14c2f000 {
++ compatible = "aspeed-i3c-hci";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x14c2f000 0x0 0x1000>;
++ interrupts-extended = <&intc1_3 15>;
++ clocks = <&syscon1 SCU1_CLK_GATE_I3C15CLK>;
++ resets = <&syscon1 SCU1_RESET_I3C15>, <&syscon1 SCU1_RESET_I3CDMA>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_hvi3c15_default>;
++ status = "disabled";
++ };
++
++ vuart0: serial@14c30000 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x14c30000 0x0 0x40>;
++ interrupts-extended = <&intc1_0 17>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ vuart1: serial@14c30100 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x14c30100 0x0 0x40>;
++ interrupts-extended = <&intc1_0 18>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ vuart2: serial@14c30200 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x14c30200 0x0 0x40>;
++ interrupts-extended = <&intc1_1 17>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ vuart3: serial@14c30300 {
++ compatible = "aspeed,ast2600-uart";
++ reg = <0x0 0x14c30300 0x0 0x40>;
++ interrupts-extended = <&intc1_1 18>;
++ clocks = <&syscon0 SCU0_CLK_APB>;
++ virtual;
++ status = "disabled";
++ };
++
++ lpc0: lpc@14c31000 {
++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon";
++ reg = <0x0 0x14c31000 0x0 0x1000>;
++ reg-io-width = <4>;
++
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x14c31000 0x1000>;
++
++ lpc0_kcs0: lpc-kcs@24 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
++ interrupts-extended = <&intc1_0 4>;
++ status = "disabled";
++ };
++
++ lpc0_kcs1: lpc-kcs@28 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
++ interrupts-extended = <&intc1_0 5>;
++ status = "disabled";
++ };
++
++ lpc0_kcs2: lpc-kcs@2c {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
++ interrupts-extended = <&intc1_0 6>;
++ status = "disabled";
++ };
++
++ lpc0_kcs3: lpc-kcs@114 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>;
++ interrupts-extended = <&intc1_0 7>;
++ status = "disabled";
++ };
++
++ lpc0_snoop: lpc-snoop@80 {
++ compatible = "aspeed,ast2600-lpc-snoop";
++ reg = <0x80 0x80>;
++ interrupts-extended = <&intc1_0 1>;
++ status = "disabled";
++ };
++
++ lpc0_pcc: lpc-pcc@0 {
++ compatible = "aspeed,ast2600-lpc-pcc";
++ reg = <0x0 0x140>;
++ interrupts-extended = <&intc1_0 3>;
++ status = "disabled";
++ };
++
++ lpc0_reset: reset-controller@98 {
++ compatible = "aspeed,ast2600-lpc-reset";
++ reg = <0x98 0x4>;
++ #reset-cells = <1>;
++ };
++
++ lpc0_uart_routing: uart-routing@98 {
++ compatible = "aspeed,ast2700n0-uart-routing";
++ reg = <0x98 0x8>;
++ status = "disabled";
++ };
++
++ lpc0_ibt: ibt@140 {
++ compatible = "aspeed,ast2600-ibt-bmc";
++ reg = <0x140 0x18>;
++ interrupts-extended = <&intc1_0 2>;
++ status = "disabled";
++ };
++
++ lpc0_mbox: mbox@200 {
++ compatible = "aspeed,ast2700-mbox";
++ reg = <0x200 0xc0>;
++ interrupts-extended = <&intc1_0 20>;
++ status = "disabled";
++ };
++ };
++
++ lpc1: lpc@14c32000 {
++ compatible = "aspeed,ast2700-lpc", "simple-mfd", "syscon";
++ reg = <0x0 0x14c32000 0x0 0x1000>;
++ reg-io-width = <4>;
++
++ #address-cells = <1>;
++ #size-cells = <1>;
++ ranges = <0x0 0x0 0x14c32000 0x1000>;
++
++ lpc1_kcs0: kcs@24 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x24 0x1>, <0x30 0x1>, <0x3c 0x1>;
++ interrupts-extended = <&intc1_1 4>;
++ status = "disabled";
++ };
++
++ lpc1_kcs1: kcs@28 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x28 0x1>, <0x34 0x1>, <0x40 0x1>;
++ interrupts-extended = <&intc1_1 5>;
++ status = "disabled";
++ };
++
++ lpc1_kcs2: kcs@2c {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x2c 0x1>, <0x38 0x1>, <0x44 0x1>;
++ interrupts-extended = <&intc1_1 6>;
++ status = "disabled";
++ };
++
++ lpc1_kcs3: kcs@114 {
++ compatible = "aspeed,ast2600-kcs-bmc";
++ reg = <0x114 0x1>, <0x118 0x1>, <0x11c 0x1>;
++ interrupts-extended = <&intc1_1 7>;
++ status = "disabled";
++ };
++
++ lpc1_snoop: lpc-snoop@80 {
++ compatible = "aspeed,ast2600-lpc-snoop";
++ reg = <0x80 0x80>;
++ interrupts-extended = <&intc1_1 1>;
++ status = "disabled";
++ };
++
++ lpc1_pcc: lpc-pcc@0 {
++ compatible = "aspeed,ast2600-lpc-pcc";
++ reg = <0x0 0x140>;
++ interrupts-extended = <&intc1_1 3>;
++ status = "disabled";
++ };
++
++ lpc1_reset: reset-controller@98 {
++ compatible = "aspeed,ast2600-lpc-reset";
++ reg = <0x98 0x4>;
++ #reset-cells = <1>;
++ };
++
++ lpc1_uart_routing: uart-routing@98 {
++ compatible = "aspeed,ast2700n1-uart-routing";
++ reg = <0x98 0x8>;
++ status = "disabled";
++ };
++
++ lpc1_ibt: ibt@140 {
++ compatible = "aspeed,ast2600-ibt-bmc";
++ reg = <0x140 0x18>;
++ interrupts-extended = <&intc1_1 2>;
++ status = "disabled";
++ };
++
++ lpc1_mbox: mbox@200 {
++ compatible = "aspeed,ast2700-mbox";
++ reg = <0x200 0xc0>;
++ interrupts-extended = <&intc1_1 20>;
++ status = "disabled";
++ };
++ };
++
++ uart0: serial@14c33000 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33000 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART0CLK>;
++ resets = <&lpc0_reset 4>;
++ interrupts-extended = <&intc1_4 7>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd0_default &pinctrl_rxd0_default>;
++ status = "disabled";
++ };
++
++ uart1: serial@14c33100 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33100 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART1CLK>;
++ resets = <&lpc0_reset 5>;
++ interrupts-extended = <&intc1_4 8>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd1_default &pinctrl_rxd1_default>;
++ status = "disabled";
++ };
++
++ uart2: serial@14c33200 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33200 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART2CLK>;
++ resets = <&lpc0_reset 6>;
++ interrupts-extended = <&intc1_4 9>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd2_default &pinctrl_rxd2_default>;
++ status = "disabled";
++ };
++
++ uart3: serial@14c33300 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33300 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART3CLK>;
++ resets = <&lpc0_reset 7>;
++ interrupts-extended = <&intc1_4 10>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd3_default &pinctrl_rxd3_default>;
++ status = "disabled";
++ };
++
++ uart5: serial@14c33400 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33400 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART5CLK>;
++ resets = <&lpc1_reset 4>;
++ interrupts-extended = <&intc1_4 11>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd5_default &pinctrl_rxd5_default>;
++ status = "disabled";
++ };
++
++ uart6: serial@14c33500 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33500 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART6CLK>;
++ resets = <&lpc1_reset 5>;
++ interrupts-extended = <&intc1_4 12>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd6_default &pinctrl_rxd6_default>;
++ status = "disabled";
++ };
++
++ uart7: serial@14c33600 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33600 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART7CLK>;
++ resets = <&lpc1_reset 6>;
++ interrupts-extended = <&intc1_4 13>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd7_default &pinctrl_rxd7_default>;
++ status = "disabled";
++ };
++
++ uart8: serial@14c33700 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33700 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART8CLK>;
++ resets = <&lpc1_reset 7>;
++ interrupts-extended = <&intc1_4 14>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd8_default &pinctrl_rxd8_default>;
++ status = "disabled";
++ };
++
++ uart9: serial@14c33800 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33800 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART9CLK>;
++ interrupts-extended = <&intc1_4 15>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd9_default &pinctrl_rxd9_default>;
++ status = "disabled";
++ };
++
++ uart10: serial@14c33900 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33900 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART10CLK>;
++ interrupts-extended = <&intc1_4 16>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd10_default &pinctrl_rxd10_default>;
++ status = "disabled";
++ };
++
++ uart11: serial@14c33a00 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33a00 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART11CLK>;
++ interrupts-extended = <&intc1_4 17>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_txd11_default &pinctrl_rxd11_default>;
++ status = "disabled";
++ };
++
++ uart12: serial@14c33b00 {
++ compatible = "aspeed,ast2700-uart";
++ reg = <0x0 0x14c33b00 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ clocks = <&syscon1 SCU1_CLK_GATE_UART12CLK>;
++ interrupts-extended = <&intc1_4 18>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ };
++
++ uart13: serial@14c33c00 {
++ compatible = "ns16550a";
++ reg = <0x0 0x14c33c00 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ interrupts-extended = <&intc1_0 23>;
++ clock-frequency = <1846154>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ status = "disabled";
++ };
++
++ uart14: serial@14c33d00 {
++ compatible = "ns16550a";
++ reg = <0x0 0x14c33d00 0x0 0x100>;
++ reg-shift = <2>;
++ reg-io-width = <4>;
++ interrupts-extended = <&intc1_1 23>;
++ clock-frequency = <1846154>;
++ no-loopback-test;
++ pinctrl-names = "default";
++ status = "disabled";
++ };
++
++ ltpi0: ltpi@14c34000 {
++ compatible = "aspeed-ltpi";
++ reg = <0x0 0x14c34000 0x0 0x100>;
++ clocks = <&syscon1 SCU1_CLK_GATE_LTPICLK>,
++ <&syscon1 SCU1_CLK_GATE_LTPIPHYCLK>;
++ clock-names = "ahb", "phy";
++ resets = <&syscon1 SCU1_RESET_LTPI0>;
++ interrupts-extended = <&intc1_5 12>;
++ status = "disabled";
++ };
++
++ ltpi0_gpio: ltpi0-gpio@14c34c00 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-ltpi-gpio";
++ reg = <0x0 0x14c34c00 0x0 0x300>;
++ interrupts-extended = <&intc1_0 22>;
++ ngpios = <112>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ status = "disabled";
++ };
++
++ ltpi1: ltpi@14c35000 {
++ compatible = "aspeed-ltpi";
++ reg = <0x0 0x14c35000 0x0 0x100>;
++ clocks = <&syscon1 SCU1_CLK_GATE_LTPICLK>,
++ <&syscon1 SCU1_CLK_GATE_LTPI1TXCLK>;
++ clock-names = "ahb", "phy";
++ resets = <&syscon1 SCU1_RESET_LTPI1>;
++ interrupts-extended = <&intc1_5 13>;
++ status = "disabled";
++ };
++
++ ltpi1_gpio: ltpi1-gpio@14c35c00 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-ltpi-gpio";
++ reg = <0x0 0x14c35c00 0x0 0x300>;
++ interrupts-extended = <&intc1_1 22>;
++ ngpios = <112>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ status = "disabled";
++ };
++
++ wdt0: watchdog@14c37000 {
++ compatible = "aspeed,ast2700-wdt";
++ reg = <0x0 0x14c37000 0x0 0x80>;
++ };
++
++ wdt1: watchdog@14c37080 {
++ compatible = "aspeed,ast2700-wdt";
++ reg = <0x0 0x14c37080 0x0 0x80>;
++ };
++
++ wdt2: watchdog@14c37100 {
++ compatible = "aspeed,ast2700-wdt";
++ reg = <0x0 0x14c37100 0x0 0x80>;
++ status = "disabled";
++ };
++
++ wdt3: watchdog@14c37180 {
++ compatible = "aspeed,ast2700-wdt";
++ reg = <0x0 0x14c37180 0x0 0x80>;
++ status = "disabled";
++ };
++
++ sgpios: sgpios@14c3c000 {
++ #gpio-cells = <2>;
++ gpio-controller;
++ compatible = "aspeed,ast2700-sgpios";
++ reg = <0x0 0x14c3c000 0x0 0x100>;
++ interrupts-extended = <&intc1_2 29>;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ interrupt-controller;
++ #interrupt-cells = <2>;
++ ngpios = <136>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sgps_default>;
++ status = "disabled";
++ };
++
++ fsim0: fsi@21800000 {
++ compatible = "aspeed,ast2700-fsi-master", "fsi-master";
++ reg = <0x0 0x21800000 0x0 0x94>;
++ interrupts-extended = <&intc1_5 6>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_fsi0_default>;
++ clocks = <&syscon1 SCU1_CLK_GATE_FSICLK>;
++ resets = <&syscon1 SCU1_RESET_FSI>;
++ status = "disabled";
++ };
++
++ fsim1: fsi@23800000 {
++ compatible = "aspeed,ast2700-fsi-master", "fsi-master";
++ reg = <0x0 0x23800000 0x0 0x94>;
++ interrupts-extended = <&intc1_5 7>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_fsi1_default>;
++ clocks = <&syscon1 SCU1_CLK_GATE_FSICLK>;
++ resets = <&syscon1 SCU1_RESET_FSI>;
++ status = "disabled";
++ };
++
++ pcie_device2: pcie-device@14c02a80 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x14c02a80 0 0x80>;
++ };
++
++ mctp2: mctp2@14c1a000 {
++ compatible = "aspeed,ast2700-mctp1";
++ reg = <0x0 0x14c1a000 0x0 0x40>;
++ interrupts-extended = <&intc1_4 3>, <&scu_ic2 ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI>;
++ interrupt-names = "mctp", "pcie";
++ resets = <&syscon1 SCU1_RESET_MCTP>;
++ aspeed,scu = <&syscon1>;
++ aspeed,pcieh = <&pcie_phy2>;
++ status = "disabled";
++ };
++
++ pcie_phy2: rc-bridge@14c1c000 {
++ compatible = "aspeed,ast2700-pcie-phy", "syscon";
++ reg = <0x0 0x14c1c000 0x0 0x800>;
++ };
++
++ pcie2: pcie@140d0000 {
++ compatible = "aspeed,ast2700-pcie";
++ device_type = "pci";
++ reg = <0x0 0x140d0000 0x0 0x100>;
++ linux,pci-domain = <2>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ bus-range = <0x00 0xff>;
++ ranges = <0x01000000 0 0x00000000 0x0 0x00000000 0x0 0x00008000>,
++ <0x02000000 0 0xa0000000 0x0 0xa0000000 0x0 0x10000000>; /* memory */
++ interrupts-extended = <&intc1_4 31>;
++ resets = <&syscon1 SCU1_RESET_H2X>,
++ <&syscon1 SCU1_RESET_PCIE2RST>;
++ reset-names = "h2x", "perst";
++ status = "disabled";
++
++ pinctrl-0 = <&pinctrl_pcierc2_perst_default>;
++ pinctrl-names = "default";
++
++ #interrupt-cells = <1>;
++ msi-parent = <&pcie2>;
++ msi_address = <0x000000F0>;
++ pciephy = <&pcie_phy2>;
++ aspeed,device = <&pcie_device2>;
++ interrupt-map-mask = <0 0 0 7>;
++ interrupt-map = <0 0 0 1 &pcie_intc2 0>,
++ <0 0 0 2 &pcie_intc2 1>,
++ <0 0 0 3 &pcie_intc2 2>,
++ <0 0 0 4 &pcie_intc2 3>;
++ pcie_intc2: interrupt-controller {
++ interrupt-controller;
++ #address-cells = <0>;
++ #interrupt-cells = <1>;
++ };
++ };
++
++ e2m_config2: e2m-config@14c1d000 {
++ compatible = "syscon", "simple-mfd";
++ reg = <0 0x14c1d000 0 0x300>;
++ ranges = <0x0 0x0 0 0x14c1d000 0 0x300>;
++ #address-cells = <2>;
++ #size-cells = <2>;
++ aspeed,device = <&pcie_device2>;
++
++ e2m_ic2: interrupt-controller@14 {
++ #interrupt-cells = <1>;
++ compatible = "aspeed,ast2700-e2m-ic";
++ reg = <0 0x14 0 0x8>;
++ interrupts-extended = <&intc1_4 20>;
++ interrupt-controller;
++ };
++
++ pcie2_mmbi0: pcie2-mmbi@0 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT0>;
++ status = "disabled";
++ };
++
++ pcie2_mmbi1: pcie2-mmbi@1 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT1>;
++ status = "disabled";
++ };
++
++ pcie2_mmbi2: pcie2-mmbi@2 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT2>;
++ status = "disabled";
++ };
++
++ pcie2_mmbi3: pcie2-mmbi@3 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT3>;
++ status = "disabled";
++ };
++
++ pcie2_mmbi4: pcie2-mmbi@4 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT4>;
++ status = "disabled";
++ };
++
++ pcie2_mmbi5: pcie2-mmbi@5 {
++ compatible = "aspeed,ast2700-pcie-mmbi";
++ interrupts-extended = <&e2m_ic2 ASPEED_AST2700_E2M_MMBI_H2B_INT5>;
++ status = "disabled";
++ };
++ };
++ };
++};
++
++#include "aspeed-g7-pinctrl.dtsi"
++
++&i2c {
++ i2c_global: i2c-global-regs@0 {
++ compatible = "aspeed,i2c-global", "simple-mfd", "syscon";
++ reg = <0x0 0x100>;
++ };
++
++ i2c0: i2c-bus@100 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x100 0x80>, <0x1A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 0>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c0_default>;
++ status = "disabled";
++ };
++
++ i2c1: i2c-bus@200 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x200 0x80>, <0x2A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 1>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c1_default>;
++ status = "disabled";
++ };
++
++ i2c2: i2c-bus@300 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x300 0x80>, <0x3A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 2>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c2_default>;
++ status = "disabled";
++ };
++
++ i2c3: i2c-bus@400 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x400 0x80>, <0x4A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 3>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c3_default>;
++ status = "disabled";
++ };
++
++ i2c4: i2c-bus@500 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x500 0x80>, <0x5A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ interrupts-extended = <&intc1_2 4>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c4_default>;
++ status = "disabled";
++ };
++
++ i2c5: i2c-bus@600 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x600 0x80>, <0x6A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 5>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c5_default>;
++ status = "disabled";
++ };
++
++ i2c6: i2c-bus@700 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x700 0x80>, <0x7A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 6>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c6_default>;
++ status = "disabled";
++ };
++
++ i2c7: i2c-bus@800 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x800 0x80>, <0x8A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 7>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c7_default>;
++ status = "disabled";
++ };
++
++ i2c8: i2c-bus@900 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x900 0x80>, <0x9A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 8>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c8_default>;
++ status = "disabled";
++ };
++
++ i2c9: i2c-bus@a00 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0xA00 0x80>, <0xAA0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 9>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c9_default>;
++ status = "disabled";
++ };
++
++ i2c10: i2c-bus@b00 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0xB00 0x80>, <0xBA0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 10>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c10_default>;
++ status = "disabled";
++ };
++
++ i2c11: i2c-bus@c00 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0xC00 0x80>, <0xCA0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 11>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c11_default>;
++ status = "disabled";
++ };
++
++ i2c12: i2c-bus@d00 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0xD00 0x80>, <0xDA0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 12>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c12_default>;
++ status = "disabled";
++ };
++
++ i2c13: i2c-bus@e00 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0xE00 0x80>, <0xEA0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 13>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c13_default>;
++ status = "disabled";
++ };
++
++ i2c14: i2c-bus@f00 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0xF00 0x80>, <0xFA0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 14>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c14_default>;
++ status = "disabled";
++ };
++
++ i2c15: i2c-bus@1000 {
++ #address-cells = <1>;
++ #size-cells = <0>;
++ #interrupt-cells = <1>;
++ reg = <0x1000 0x80>, <0x10A0 0x20>;
++ compatible = "aspeed,ast2700-i2c";
++ aspeed,global-regs = <&i2c_global>;
++ aspeed,enable-dma;
++ clocks = <&syscon1 SCU1_CLK_APB>;
++ resets = <&syscon1 SCU1_RESET_I2C>;
++ interrupts-extended = <&intc1_2 15>;
++ clock-frequency = <100000>;
++ debounce-level = <2>;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_i2c15_default>;
++ status = "disabled";
++ };
++};
+diff --git a/arch/arm64/boot/dts/aspeed/ast2700-evb.dts b/arch/arm64/boot/dts/aspeed/ast2700-evb.dts
+new file mode 100644
+index 000000000..b94bf77a2
+--- /dev/null
++++ b/arch/arm64/boot/dts/aspeed/ast2700-evb.dts
+@@ -0,0 +1,1155 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++/dts-v1/;
++
++#include "aspeed-g7.dtsi"
++#include <dt-bindings/gpio/aspeed-gpio.h>
++
++/ {
++ model = "AST2700-EVB";
++ compatible = "aspeed,ast2700-evb", "aspeed,ast2700";
++
++ chosen {
++ stdout-path = &uart12;
++ };
++
++ firmware {
++ optee: optee {
++ compatible = "linaro,optee-tz";
++ method = "smc";
++ };
++ };
++
++ memory@400000000 {
++ device_type = "memory";
++ reg = <0x4 0x00000000 0x0 0x40000000>;
++ };
++
++ reserved-memory {
++ #address-cells = <2>;
++ #size-cells = <2>;
++ ranges;
++
++ #include "ast2700-reserved-mem.dtsi"
++
++ video_engine_memory0: video0 {
++ size = <0x0 0x04000000>;
++ alignment = <0x0 0x00010000>;
++ compatible = "shared-dma-pool";
++ reusable;
++ };
++
++ video_engine_memory1: video1{
++ size = <0x0 0x04000000>;
++ alignment = <0x0 0x00010000>;
++ compatible = "shared-dma-pool";
++ reusable;
++ };
++
++ gfx_memory: framebuffer {
++ size = <0x0 0x01000000>;
++ alignment = <0x0 0x01000000>;
++ compatible = "shared-dma-pool";
++ reusable;
++ };
++
++ xdma_memory0: xdma0 {
++ size = <0x0 0x01000000>;
++ alignment = <0x0 0x01000000>;
++ compatible = "shared-dma-pool";
++ no-map;
++ };
++
++ xdma_memory1: xdma1 {
++ size = <0x0 0x01000000>;
++ alignment = <0x0 0x01000000>;
++ compatible = "shared-dma-pool";
++ no-map;
++ };
++ };
++
++ fan0: pwm-fan0 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 0 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan1: pwm-fan1 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 1 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan2: pwm-fan2 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 2 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan3: pwm-fan3 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 3 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan4: pwm-fan4 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 4 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan5: pwm-fan5 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 5 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan6: pwm-fan6 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 6 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan7: pwm-fan7 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 7 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ fan8: pwm-fan8 {
++ compatible = "pwm-fan";
++ pwms = <&pwm_tach 8 40000 0>; /* Target freq:25 kHz */
++ cooling-min-state = <0>;
++ cooling-max-state = <3>;
++ #cooling-cells = <2>;
++ cooling-levels = <0 15 128 255>;
++ };
++
++ iio-hwmon {
++ compatible = "iio-hwmon";
++ status = "okay";
++ io-channels = <&adc0 0>, <&adc0 1>, <&adc0 2>, <&adc0 3>,
++ <&adc0 4>, <&adc0 5>, <&adc0 6>, <&adc0 7>,
++ <&adc1 0>, <&adc1 1>, <&adc1 2>, <&adc1 3>,
++ <&adc1 4>, <&adc1 5>, <&adc1 6>, <&adc1 7>;
++ };
++};
++
++&pwm_tach {
++ status = "okay";
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default
++ &pinctrl_pwm2_default &pinctrl_pwm3_default
++ &pinctrl_pwm4_default &pinctrl_pwm5_default
++ &pinctrl_pwm6_default &pinctrl_pwm7_default
++ &pinctrl_pwm8_default
++ &pinctrl_tach0_default &pinctrl_tach1_default
++ &pinctrl_tach2_default &pinctrl_tach3_default
++ &pinctrl_tach4_default &pinctrl_tach5_default
++ &pinctrl_tach6_default &pinctrl_tach7_default
++ &pinctrl_tach8_default &pinctrl_tach9_default
++ &pinctrl_tach10_default &pinctrl_tach11_default
++ &pinctrl_tach12_default &pinctrl_tach13_default
++ &pinctrl_tach14_default &pinctrl_tach15_default>;
++ fan-0 {
++ tach-ch = /bits/ 8 <0x0>;
++ };
++ fan-1 {
++ tach-ch = /bits/ 8 <0x1>;
++ };
++ fan-2 {
++ tach-ch = /bits/ 8 <0x2>;
++ };
++ fan-3 {
++ tach-ch = /bits/ 8 <0x3>;
++ };
++ fan-4 {
++ tach-ch = /bits/ 8 <0x4>;
++ };
++ fan-5 {
++ tach-ch = /bits/ 8 <0x5>;
++ };
++ fan-6 {
++ tach-ch = /bits/ 8 <0x6>;
++ };
++ fan-7 {
++ tach-ch = /bits/ 8 <0x7>;
++ };
++ fan-8 {
++ tach-ch = /bits/ 8 <0x8>;
++ };
++ fan-9 {
++ tach-ch = /bits/ 8 <0x9>;
++ };
++ fan-10 {
++ tach-ch = /bits/ 8 <0xA>;
++ };
++ fan-11 {
++ tach-ch = /bits/ 8 <0xB>;
++ };
++ fan-12 {
++ tach-ch = /bits/ 8 <0xC>;
++ };
++ fan-13 {
++ tach-ch = /bits/ 8 <0xD>;
++ };
++ fan-14 {
++ tach-ch = /bits/ 8 <0xE>;
++ };
++ fan-15 {
++ tach-ch = /bits/ 8 <0xF>;
++ };
++};
++
++&mctp0 {
++ status = "okay";
++ memory-region = <&mctp0_reserved>;
++};
++
++&mctp1 {
++ status = "okay";
++ memory-region = <&mctp1_reserved>;
++};
++
++&mctp2 {
++ status = "okay";
++ memory-region = <&mctp2_reserved>;
++};
++
++&sgpiom0 {
++ status = "okay";
++};
++
++&sgpiom1 {
++ status = "okay";
++};
++
++&jtag1 {
++ status = "okay";
++};
++
++&adc0 {
++ aspeed,int-vref-microvolt = <2500000>;
++ status = "okay";
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_adc0_default &pinctrl_adc1_default
++ &pinctrl_adc2_default &pinctrl_adc3_default
++ &pinctrl_adc4_default &pinctrl_adc5_default
++ &pinctrl_adc6_default &pinctrl_adc7_default>;
++};
++
++&adc1 {
++ aspeed,int-vref-microvolt = <2500000>;
++ status = "okay";
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_adc8_default &pinctrl_adc9_default
++ &pinctrl_adc10_default &pinctrl_adc11_default
++ &pinctrl_adc12_default &pinctrl_adc13_default
++ &pinctrl_adc14_default &pinctrl_adc15_default>;
++};
++
++&pinctrl1 {
++ pinctrl_i3c0_3_hv_voltage: i3chv-voltage {
++ pins = "U25";
++ power-source = <1800>;
++ };
++
++ pinctrl_i3c0_driving: i3c0-driving {
++ pins = "U25", "U26";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c1_driving: i3c1-driving {
++ pins = "Y26", "AA24";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c2_driving: i3c2-driving {
++ pins = "R25", "AA26";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c3_driving: i3c3-driving {
++ pins = "R26", "Y25";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c12_15_hv_voltage: i3chv-voltage {
++ pins = "W25";
++ power-source = <1800>;
++ };
++
++ pinctrl_i3c12_driving: i3c12-driving {
++ pins = "W25", "Y23";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c13_driving: i3c13-driving {
++ pins = "Y24", "W21";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c14_driving: i3c14-driving {
++ pins = "AA23", "AC22";
++ drive-strength = <2>;
++ };
++
++ pinctrl_i3c15_driving: i3c15-driving {
++ pins = "AB22", "Y21";
++ drive-strength = <2>;
++ };
++
++ pinctrl_rgmii0_driving: rgmii0-driving {
++ pins = "C20", "C19", "A8", "R14", "A7", "P14",
++ "D20", "A6", "B6", "N14", "B7", "B8";
++ drive-strength = <1>;
++ };
++};
++
++&gpio0 {
++ pinctrl-0 = <&pinctrl_i3c0_3_hv_voltage &pinctrl_i3c12_15_hv_voltage
++ &pinctrl_i3c0_driving &pinctrl_i3c1_driving
++ &pinctrl_i3c2_driving &pinctrl_i3c3_driving
++ &pinctrl_i3c12_driving &pinctrl_i3c13_driving
++ &pinctrl_i3c14_driving &pinctrl_i3c15_driving>;
++ pinctrl-names = "default";
++};
++
++&i3c0 {
++ initial-role = "target";
++ pid = <0x000007ec 0x06010000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c1 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c2 {
++ initial-role = "target";
++ pid = <0x000007ec 0x06012000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c3 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c4 {
++ initial-role = "target";
++ pid = <0x000007ec 0x06014000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c5 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c6 {
++ initial-role = "target";
++ pid = <0x000007ec 0x06016000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c7 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c8 {
++ initial-role = "target";
++ pid = <0x000007ec 0x06018000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c9 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c10 {
++ initial-role = "target";
++ pid = <0x000007ec 0x0601A000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c11 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c12 {
++ initial-role = "target";
++ pid = <0x000007ec 0x0601C000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c13 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&i3c14 {
++ initial-role = "target";
++ pid = <0x000007ec 0x0601E000>;
++ dcr = /bits/ 8 <0xcc>;
++ status = "okay";
++};
++
++&i3c15 {
++ initial-role = "primary";
++ status = "okay";
++};
++
++&uart12 {
++ status = "okay";
++};
++
++&fmc {
++ status = "okay";
++ pinctrl-0 = <&pinctrl_fwspi_quad_default>;
++ pinctrl-names = "default";
++
++ flash@0 {
++ status = "okay";
++ m25p,fast-read;
++ label = "bmc";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <4>;
++ spi-rx-bus-width = <4>;
++#include "aspeed-evb-flash-layout-128.dtsi"
++ };
++
++ flash@1 {
++ status = "okay";
++ m25p,fast-read;
++ label = "fmc0:1";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <4>;
++ spi-rx-bus-width = <4>;
++ };
++
++ flash@2 {
++ status = "disabled";
++ m25p,fast-read;
++ label = "fmc0:2";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <4>;
++ spi-rx-bus-width = <4>;
++ };
++};
++
++&spi0 {
++ status = "okay";
++ pinctrl-0 = <&pinctrl_spi0_default &pinctrl_spi0_cs1_default>;
++ pinctrl-names = "default";
++
++ flash@0 {
++ status = "okay";
++ m25p,fast-read;
++ label = "spi0:0";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <2>;
++ spi-rx-bus-width = <2>;
++ };
++
++ flash@1 {
++ status = "disabled";
++ m25p,fast-read;
++ label = "spi0:1";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <2>;
++ spi-rx-bus-width = <2>;
++ };
++};
++
++&spi1 {
++ status = "okay";
++ pinctrl-0 = <&pinctrl_spi1_default &pinctrl_spi1_cs1_default>;
++ pinctrl-names = "default";
++
++ flash@0 {
++ status = "okay";
++ m25p,fast-read;
++ label = "spi1:0";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <2>;
++ spi-rx-bus-width = <2>;
++ };
++
++ flash@1 {
++ status = "disabled";
++ m25p,fast-read;
++ label = "spi1:1";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <2>;
++ spi-rx-bus-width = <2>;
++ };
++};
++
++#if 1
++&spi2 {
++ compatible = "aspeed,ast2700-spi-txrx";
++ pinctrl-0 = <&pinctrl_spi2_default>;
++ pinctrl-names = "default";
++ status = "okay";
++
++ spi-aspeed-full-duplex;
++
++ tpm0: tpmdev@0 {
++ compatible = "tcg,tpm_tis-spi";
++ spi-max-frequency = <34000000>;
++ reg = <0>;
++ status = "okay";
++ };
++};
++#else
++&spi2 {
++ compatible = "aspeed,ast2700-spi";
++ pinctrl-0 = <&pinctrl_spi2_default &pinctrl_spi2_cs1_default>;
++ pinctrl-names = "default";
++ status = "okay";
++
++ flash@0 {
++ status = "okay";
++ reg = < 0 >;
++ compatible = "jedec,spi-nor";
++ m25p,fast-read;
++ label = "spi2:0";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <2>;
++ spi-rx-bus-width = <2>;
++ };
++
++ flash@1 {
++ status = "okay";
++ reg = < 1 >;
++ compatible = "jedec,spi-nor";
++ m25p,fast-read;
++ label = "spi2:1";
++ spi-max-frequency = <50000000>;
++ spi-tx-bus-width = <2>;
++ spi-rx-bus-width = <2>;
++ };
++};
++#endif
++
++&can0 {
++ status = "okay";
++};
++
++&emmc_controller {
++ status = "okay";
++ mmc-hs200-1_8v;
++};
++
++&emmc {
++ status = "okay";
++#if 1
++ bus-width = <4>;
++#else
++ bus-width = <8>;
++ pinctrl-0 = <&pinctrl_emmc_default
++ &pinctrl_emmcg8_default>;
++#endif
++ non-removable;
++ max-frequency = <200000000>;
++};
++
++&ufs_controller {
++ status = "okay";
++};
++
++&ufs {
++ status = "okay";
++ lanes-per-direction = <2>;
++ ref-clk-freq = <26000000>;
++};
++
++&chassis {
++ status = "okay";
++};
++
++&mdio0 {
++ status = "okay";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ ethphy0: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c22";
++ reg = <0>;
++ };
++};
++
++&mdio1 {
++ status = "okay";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ ethphy1: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c22";
++ reg = <0>;
++ };
++};
++
++&mac0 {
++ status = "okay";
++
++ phy-mode = "rgmii";
++ phy-handle = <ðphy0>;
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_rgmii0_default &pinctrl_rgmii0_driving>;
++};
++
++&mac1 {
++ status = "okay";
++
++ phy-mode = "rgmii";
++ phy-handle = <ðphy1>;
++
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_rgmii1_default>;
++};
++
++#if 0
++&mdio2 {
++ status = "okay";
++ #address-cells = <1>;
++ #size-cells = <0>;
++
++ ethphy2: ethernet-phy@0 {
++ compatible = "ethernet-phy-ieee802.3-c22";
++ reg = <0>;
++ };
++};
++
++&sgmii {
++ status = "okay";
++};
++
++&mac2 {
++ status = "okay";
++
++ phy-mode = "sgmii";
++ phy-handle = <ðphy2>;
++};
++
++&pcie2 {
++ status = "okay";
++};
++#endif
++
++&syscon1 {
++ mac0-clk-delay = <0x33 0x30
++ 0x10 0x10
++ 0x10 0x10>;
++ mac1-clk-delay = <0x31 0x31
++ 0x10 0x10
++ 0x10 0x10>;
++ assigned-clocks = <&syscon1 SCU1_CLK_MACHCLK>,
++ <&syscon1 SCU1_CLK_RGMII>,
++ <&syscon1 SCU1_CLK_RMII>;
++ assigned-clock-rates = <200000000>, <125000000>, <50000000>;
++};
++
++&espi0 {
++ status = "okay";
++ perif-dma-mode;
++ perif-mmbi-enable;
++ perif-mmbi-src-addr = <0x0 0xa8000000>;
++ perif-mmbi-tgt-memory = <&espi0_mmbi_memory>;
++ perif-mmbi-instance-num = <0x1>;
++ perif-mcyc-enable;
++ perif-mcyc-src-addr = <0x0 0x98000000>;
++ perif-mcyc-size = <0x0 0x10000>;
++ oob-dma-mode;
++ flash-dma-mode;
++};
++
++&lpc0_kcs0 {
++ status = "okay";
++ kcs-io-addr = <0xca0>;
++ kcs-channel = <0>;
++};
++
++&lpc0_kcs1 {
++ status = "okay";
++ kcs-io-addr = <0xca8>;
++ kcs-channel = <1>;
++};
++
++&lpc0_kcs2 {
++ status = "okay";
++ kcs-io-addr = <0xca2>;
++ kcs-channel = <2>;
++};
++
++&lpc0_kcs3 {
++ status = "okay";
++ kcs-io-addr = <0xca4>;
++ kcs-channel = <3>;
++};
++
++&lpc0_ibt {
++ status = "okay";
++};
++
++&lpc0_mbox {
++ status = "okay";
++};
++
++&lpc0_snoop {
++ status = "okay";
++ snoop-ports = <0x80>, <0x81>;
++};
++
++&lpc0_uart_routing {
++ status = "okay";
++};
++
++&lpc1_kcs0 {
++ status = "okay";
++ kcs-io-addr = <0xca0>;
++ kcs-channel = <4>;
++};
++
++&lpc1_kcs1 {
++ status = "okay";
++ kcs-io-addr = <0xca8>;
++ kcs-channel = <5>;
++};
++
++&lpc1_kcs2 {
++ status = "okay";
++ kcs-io-addr = <0xca2>;
++ kcs-channel = <6>;
++};
++
++&lpc1_kcs3 {
++ status = "okay";
++ kcs-io-addr = <0xca4>;
++ kcs-channel = <7>;
++};
++
++&lpc1_ibt {
++ status = "okay";
++};
++
++&lpc1_mbox {
++ status = "okay";
++};
++
++&lpc1_snoop {
++ status = "okay";
++ snoop-ports = <0x80>, <0x81>;
++};
++
++&lpc1_uart_routing {
++ status = "okay";
++};
++
++&video0 {
++ status = "okay";
++ memory-region = <&video_engine_memory0>;
++};
++
++&video1 {
++ status = "okay";
++ memory-region = <&video_engine_memory1>;
++};
++
++&disp_intf {
++ status = "okay";
++};
++
++&rtc {
++ status = "okay";
++};
++
++&rsss {
++ status = "okay";
++};
++
++&ecdsa {
++ status = "okay";
++};
++
++&hace {
++ status = "okay";
++};
++
++#if 1
++&pcie0_mmbi0 {
++ status = "okay";
++ memory-region = <&pcie0_mmbi0_memory>;
++
++ mmbi-bmc-int-value = /bits/ 8 <0x00>;
++ mmbi-bmc-int-offset = <0x100000>;
++};
++
++&pcie1_mmbi4 {
++ status = "okay";
++ memory-region = <&pcie1_mmbi4_memory>;
++
++ mmbi-bmc-int-value = /bits/ 8 <0x44>;
++ mmbi-bmc-int-offset = <0x100400>;
++};
++#endif
++
++#if 1
++&bmc_dev0 {
++ status = "okay";
++ memory-region = <&bmc_dev0_memory>;
++};
++
++&xdma0 {
++ status = "okay";
++ memory-region = <&xdma_memory0>;
++};
++
++&pcie_vuart0 {
++ port = <0x3f8>;
++ sirq = <4>;
++ sirq-polarity = <0>;
++
++ status = "okay";
++};
++
++&pcie_vuart1 {
++ port = <0x2f8>;
++ sirq = <3>;
++ sirq-polarity = <0>;
++
++ status = "okay";
++};
++
++&pcie_lpc0_kcs0 {
++ status = "okay";
++ kcs-io-addr = <0x3a0>;
++ kcs-channel = <8>;
++};
++
++&pcie_lpc0_kcs1 {
++ status = "okay";
++ kcs-io-addr = <0x3a8>;
++ kcs-channel = <9>;
++};
++
++&pcie_lpc0_kcs2 {
++ status = "okay";
++ kcs-io-addr = <0x3a2>;
++ kcs-channel = <10>;
++};
++
++&pcie_lpc0_kcs3 {
++ status = "okay";
++ kcs-io-addr = <0x3a4>;
++ kcs-channel = <11>;
++};
++
++&pcie_lpc0_ibt {
++ status = "okay";
++ bt-channel = <2>;
++};
++#else
++&pcie0 {
++ status = "okay";
++};
++#endif
++
++#if 1
++&bmc_dev1 {
++ status = "okay";
++ memory-region = <&bmc_dev1_memory>;
++};
++
++&xdma1 {
++ status = "okay";
++ memory-region = <&xdma_memory1>;
++};
++
++&pcie_vuart2 {
++ port = <0x3f8>;
++ sirq = <4>;
++ sirq-polarity = <0>;
++
++ status = "okay";
++};
++
++&pcie_vuart3 {
++ port = <0x2f8>;
++ sirq = <3>;
++ sirq-polarity = <0>;
++
++ status = "okay";
++};
++
++&pcie_lpc1_kcs0 {
++ status = "okay";
++ kcs-io-addr = <0x3a0>;
++ kcs-channel = <12>;
++};
++
++&pcie_lpc1_kcs1 {
++ status = "okay";
++ kcs-io-addr = <0x3a8>;
++ kcs-channel = <13>;
++};
++
++&pcie_lpc1_kcs2 {
++ status = "okay";
++ kcs-io-addr = <0x3a2>;
++ kcs-channel = <14>;
++};
++
++&pcie_lpc1_kcs3 {
++ status = "okay";
++ kcs-io-addr = <0x3a4>;
++ kcs-channel = <15>;
++};
++
++&pcie_lpc1_ibt {
++ status = "okay";
++ bt-channel = <3>;
++};
++#else
++&pcie1 {
++ status = "okay";
++};
++#endif
++
++#if 0
++&i3c0 {
++ status = "okay";
++};
++
++&jtag0 {
++ status = "okay";
++};
++#endif
++
++&sdio_controller {
++ status = "okay";
++ mmc-hs200-1_8v;
++
++ vcc_sdhci0: regulator-vcc-sdhci0 {
++ compatible = "regulator-fixed";
++ regulator-name = "SDHCI0 Vcc";
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
++ gpios = <&gpio0 ASPEED_GPIO(G, 6) GPIO_ACTIVE_HIGH>;
++ enable-active-high;
++ };
++
++ vccq_sdhci0: regulator-vccq-sdhci0 {
++ compatible = "regulator-gpio";
++ regulator-name = "SDHCI0 VccQ";
++ regulator-min-microvolt = <1800000>;
++ regulator-max-microvolt = <3300000>;
++ gpios = <&gpio0 ASPEED_GPIO(G, 7) GPIO_ACTIVE_HIGH>;
++ gpios-states = <1>;
++ states = <3300000 1>,
++ <1800000 0>;
++ };
++
++};
++
++&sdhci {
++ status = "okay";
++ bus-width = <4>;
++ max-frequency = <100000000>;
++ /* DDR50 bits in CAPA2 are not supported */
++ sdhci-caps-mask = <0x6 0x0>;
++ sdhci-drive-type = /bits/ 8 <3>;
++ sdhci,wp-inverted;
++ pinctrl-names = "default";
++ pinctrl-0 = <&pinctrl_sd_default>;
++ vmmc-supply = <&vcc_sdhci0>;
++ vqmmc-supply = <&vccq_sdhci0>;
++ sd-uhs-sdr104; /* enable sdr104 to execute tuning */
++};
++
++#if 1
++&i2c0 {
++ status = "okay";
++};
++
++&i2c1 {
++ status = "okay";
++};
++
++&i2c2 {
++ status = "okay";
++};
++
++&i2c3 {
++ status = "okay";
++};
++
++&i2c4 {
++ status = "okay";
++};
++
++&i2c5 {
++ status = "okay";
++};
++
++&i2c6 {
++ status = "okay";
++};
++
++&i2c7 {
++ status = "okay";
++};
++
++&i2c8 {
++ status = "okay";
++};
++
++&i2c11 {
++ status = "okay";
++};
++
++&i2c12 {
++ status = "okay";
++};
++
++&i2c13 {
++ status = "okay";
++};
++#endif
++
++#if 0
++&ehci0 {
++ status = "okay";
++};
++
++&ehci1 {
++ status = "okay";
++};
++
++&uhci0 {
++ status = "okay";
++};
++
++#endif
++
++#if 0
++&uphy3a {
++ status = "okay";
++};
++
++&uphy3b {
++ status = "okay";
++};
++#endif
++
++#if 0
++&xhci0 {
++ status = "okay";
++};
++
++&xhci1 {
++ status = "okay";
++};
++#endif
++
++#if 0
++&vhuba1 {
++ status = "okay";
++};
++
++&vhubb1 {
++ status = "okay";
++};
++
++#endif
++
++#if 1
++&vhuba0 {
++ status = "okay";
++};
++
++&vhubb0 {
++ status = "okay";
++};
++#endif
++
++#if 0
++&usb3ahp {
++ status = "okay";
++};
++
++&usb3bhp {
++ status = "okay";
++};
++
++&uphy2a {
++ status = "okay";
++};
++
++&uphy2b {
++ status = "okay";
++};
++#endif
++
++&vhubc {
++ status = "okay";
++};
++
++&ehci3 {
++ status = "okay";
++};
++
++&uhci1 {
++ status = "okay";
++};
++
++&wdt0 {
++ status = "okay";
++};
++
++&wdt1 {
++ status = "okay";
++};
++
++&otp {
++ status = "okay";
++};
+diff --git a/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi b/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi
+new file mode 100644
+index 000000000..22c67ff40
+--- /dev/null
++++ b/arch/arm64/boot/dts/aspeed/ast2700-reserved-mem.dtsi
+@@ -0,0 +1,85 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++
++/* AST2700 reserved memories with fixed address */
++bmc_dev0_memory: bmc-dev0-memory@423800000 {
++ reg = <0x4 0x23800000 0x0 0x100000>;
++ no-map;
++};
++
++bmc_dev1_memory: bmc-dev1-memory@423900000 {
++ reg = <0x4 0x23900000 0x0 0x100000>;
++ no-map;
++};
++
++pcie0_mmbi0_memory: pcie0-mmbi0-memory@423a00000 {
++ reg = <0x4 0x23a00000 0x0 0x200000>;
++ no-map;
++};
++
++pcie1_mmbi4_memory: pcie1-mmbi4-memory@423c00000 {
++ reg = <0x4 0x23c00000 0x0 0x200000>;
++ no-map;
++};
++
++pcie2_mmbi0_memory: pcie2-mmbi0-memory@423e00000 {
++ reg = <0x4 0x23e00000 0x0 0x200000>;
++ no-map;
++};
++
++espi0_mmbi_memory: espi0-mmbi-memory@424000000 {
++ reg = <0x4 0x24000000 0x0 0x4000000>;
++ no-map;
++};
++
++espi1_mmbi_memory: espi1-mmbi-memory@428000000 {
++ reg = <0x4 0x28000000 0x0 0x4000000>;
++ no-map;
++};
++
++ssp_memory: ssp-memory@42c000000 {
++ reg = <0x4 0x2c000000 0x0 0x2000000>;
++ no-map;
++};
++
++tsp_memory: tsp-memory@42e000000 {
++ reg = <0x4 0x2e000000 0x0 0x2000000>;
++ no-map;
++};
++
++atf: trusted-firmware-a@430000000 {
++ reg = <0x4 0x30000000 0x0 0x80000>;
++ no-map;
++};
++
++optee_core: optee-core@430080000 {
++ reg = <0x4 0x30080000 0x0 0x1000000>;
++ no-map;
++};
++
++vbios_base0: vbios-base0@431bb0000 {
++ reg = <0x4 0x31bb0000 0x0 0x10000>;
++ no-map;
++};
++
++vbios_base1: vbios-base1@431bc0000 {
++ reg = <0x4 0x31bc0000 0x0 0x10000>;
++ no-map;
++};
++
++mctp0_reserved: mctp0-reserved@431bd0000 {
++ reg = <0x4 0x31bd0000 0x0 0x10000>;
++ compatible = "shared-dma-pool";
++ no-map;
++};
++
++mctp1_reserved: mctp1-reserved@431be0000 {
++ reg = <0x4 0x31be0000 0x0 0x10000>;
++ compatible = "shared-dma-pool";
++ no-map;
++};
++
++mctp2_reserved: mctp2-reserved@431bf0000 {
++ reg = <0x4 0x31bf0000 0x0 0x10000>;
++ compatible = "shared-dma-pool";
++ no-map;
++};
+diff --git a/arch/arm64/configs/aspeed_g7_defconfig b/arch/arm64/configs/aspeed_g7_defconfig
+new file mode 100644
+index 000000000..82287972e
+--- /dev/null
++++ b/arch/arm64/configs/aspeed_g7_defconfig
+@@ -0,0 +1,407 @@
++CONFIG_SYSVIPC=y
++CONFIG_POSIX_MQUEUE=y
++CONFIG_AUDIT=y
++CONFIG_NO_HZ_IDLE=y
++CONFIG_HIGH_RES_TIMERS=y
++CONFIG_BPF_SYSCALL=y
++CONFIG_PREEMPT=y
++CONFIG_IRQ_TIME_ACCOUNTING=y
++CONFIG_BSD_PROCESS_ACCT=y
++CONFIG_BSD_PROCESS_ACCT_V3=y
++CONFIG_TASKSTATS=y
++CONFIG_TASK_XACCT=y
++CONFIG_TASK_IO_ACCOUNTING=y
++CONFIG_IKCONFIG=y
++CONFIG_IKCONFIG_PROC=y
++CONFIG_LOG_BUF_SHIFT=21
++CONFIG_CGROUPS=y
++CONFIG_BLK_DEV_INITRD=y
++CONFIG_EXPERT=y
++CONFIG_PERF_EVENTS=y
++CONFIG_ARCH_ASPEED=y
++CONFIG_SCHED_SMT=y
++CONFIG_NR_CPUS=4
++# CONFIG_ARM64_HW_AFDBM is not set
++# CONFIG_ARM64_PAN is not set
++# CONFIG_ARM64_RAS_EXTN is not set
++# CONFIG_ARM64_CNP is not set
++# CONFIG_ARM64_PTR_AUTH is not set
++# CONFIG_ARM64_AMU_EXTN is not set
++# CONFIG_ARM64_TLB_RANGE is not set
++# CONFIG_ARM64_BTI is not set
++# CONFIG_ARM64_E0PD is not set
++# CONFIG_EFI is not set
++CONFIG_MODULES=y
++CONFIG_MODULE_FORCE_LOAD=y
++CONFIG_MODULE_UNLOAD=y
++CONFIG_MODULE_FORCE_UNLOAD=y
++# CONFIG_SWAP is not set
++# CONFIG_ZONE_DMA is not set
++# CONFIG_ZONE_DMA32 is not set
++CONFIG_NET=y
++CONFIG_PACKET=y
++CONFIG_PACKET_DIAG=y
++CONFIG_UNIX=y
++CONFIG_UNIX_DIAG=y
++CONFIG_INET=y
++# CONFIG_INET_DIAG is not set
++CONFIG_IPV6_ROUTER_PREF=y
++CONFIG_IPV6_ROUTE_INFO=y
++CONFIG_IPV6_OPTIMISTIC_DAD=y
++# CONFIG_IPV6_SIT is not set
++CONFIG_IPV6_MULTIPLE_TABLES=y
++CONFIG_NETFILTER=y
++# CONFIG_NETFILTER_ADVANCED is not set
++CONFIG_BRIDGE=y
++CONFIG_VLAN_8021Q=y
++CONFIG_NET_NCSI=y
++CONFIG_CAN=y
++CONFIG_MCTP=y
++# CONFIG_WIRELESS is not set
++CONFIG_PCI=y
++CONFIG_PCIEPORTBUS=y
++CONFIG_PCI_DEBUG=y
++CONFIG_PCI_STUB=y
++CONFIG_PCI_IOV=y
++CONFIG_PCI_PRI=y
++CONFIG_PCIE_BUS_SAFE=y
++CONFIG_PCIE_ASPEED=y
++CONFIG_DEVTMPFS=y
++CONFIG_DEVTMPFS_MOUNT=y
++CONFIG_DEVTMPFS_SAFE=y
++# CONFIG_PREVENT_FIRMWARE_BUILD is not set
++CONFIG_ASPEED_LTPI=y
++# CONFIG_ARM_SMCCC_SOC_ID is not set
++CONFIG_MTD=y
++CONFIG_MTD_BLOCK=y
++CONFIG_MTD_PARTITIONED_MASTER=y
++CONFIG_MTD_SPI_NOR=y
++# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
++CONFIG_MTD_UBI=y
++CONFIG_MTD_UBI_FASTMAP=y
++CONFIG_MTD_UBI_BLOCK=y
++CONFIG_BLK_DEV_LOOP=y
++CONFIG_BLK_DEV_NBD=y
++CONFIG_BLK_DEV_RAM=y
++CONFIG_BLK_DEV_RAM_SIZE=32768
++CONFIG_SMPRO_ERRMON=y
++CONFIG_SMPRO_MISC=y
++CONFIG_SRAM=y
++CONFIG_EEPROM_AT24=y
++CONFIG_EEPROM_AT25=y
++CONFIG_SCSI=y
++CONFIG_BLK_DEV_SD=y
++CONFIG_NETDEVICES=y
++# CONFIG_NET_VENDOR_ALACRITECH is not set
++# CONFIG_NET_VENDOR_AMAZON is not set
++# CONFIG_NET_VENDOR_AMD is not set
++# CONFIG_NET_VENDOR_AQUANTIA is not set
++# CONFIG_NET_VENDOR_ARC is not set
++# CONFIG_NET_VENDOR_ASIX is not set
++# CONFIG_NET_VENDOR_BROADCOM is not set
++# CONFIG_NET_VENDOR_CADENCE is not set
++# CONFIG_NET_VENDOR_CAVIUM is not set
++# CONFIG_NET_VENDOR_CORTINA is not set
++# CONFIG_NET_VENDOR_DAVICOM is not set
++# CONFIG_NET_VENDOR_ENGLEDER is not set
++# CONFIG_NET_VENDOR_EZCHIP is not set
++CONFIG_FTGMAC100=y
++# CONFIG_NET_VENDOR_FUNGIBLE is not set
++# CONFIG_NET_VENDOR_GOOGLE is not set
++# CONFIG_NET_VENDOR_HISILICON is not set
++# CONFIG_NET_VENDOR_HUAWEI is not set
++# CONFIG_NET_VENDOR_INTEL is not set
++# CONFIG_NET_VENDOR_ADI is not set
++# CONFIG_NET_VENDOR_LITEX is not set
++# CONFIG_NET_VENDOR_MARVELL is not set
++# CONFIG_NET_VENDOR_MELLANOX is not set
++# CONFIG_NET_VENDOR_MICREL is not set
++# CONFIG_NET_VENDOR_MICROCHIP is not set
++# CONFIG_NET_VENDOR_MICROSEMI is not set
++# CONFIG_NET_VENDOR_MICROSOFT is not set
++# CONFIG_NET_VENDOR_NI is not set
++# CONFIG_NET_VENDOR_NATSEMI is not set
++# CONFIG_NET_VENDOR_NETRONOME is not set
++# CONFIG_NET_VENDOR_PENSANDO is not set
++# CONFIG_NET_VENDOR_QUALCOMM is not set
++# CONFIG_NET_VENDOR_RENESAS is not set
++# CONFIG_NET_VENDOR_ROCKER is not set
++# CONFIG_NET_VENDOR_SAMSUNG is not set
++# CONFIG_NET_VENDOR_SEEQ is not set
++# CONFIG_NET_VENDOR_SOLARFLARE is not set
++# CONFIG_NET_VENDOR_SMSC is not set
++# CONFIG_NET_VENDOR_SOCIONEXT is not set
++# CONFIG_NET_VENDOR_STMICRO is not set
++# CONFIG_NET_VENDOR_SYNOPSYS is not set
++# CONFIG_NET_VENDOR_VERTEXCOM is not set
++# CONFIG_NET_VENDOR_VIA is not set
++# CONFIG_NET_VENDOR_WANGXUN is not set
++# CONFIG_NET_VENDOR_WIZNET is not set
++# CONFIG_NET_VENDOR_XILINX is not set
++CONFIG_AIROHA_AN8801_PHY=y
++CONFIG_BROADCOM_PHY=y
++CONFIG_REALTEK_PHY=y
++CONFIG_DP83867_PHY=y
++CONFIG_CAN_ASPEED=y
++CONFIG_CAN_DEBUG_DEVICES=y
++CONFIG_MCTP_SERIAL=y
++CONFIG_MCTP_TRANSPORT_I2C=y
++CONFIG_MCTP_TRANSPORT_I3C=y
++# CONFIG_USB_NET_DRIVERS is not set
++# CONFIG_WLAN is not set
++CONFIG_INPUT_EVDEV=y
++# CONFIG_KEYBOARD_ATKBD is not set
++CONFIG_KEYBOARD_GPIO=y
++CONFIG_KEYBOARD_GPIO_POLLED=y
++# CONFIG_INPUT_MOUSE is not set
++CONFIG_INPUT_MISC=y
++CONFIG_INPUT_IBM_PANEL=y
++CONFIG_SERIO_RAW=y
++# CONFIG_VT is not set
++# CONFIG_LEGACY_PTYS is not set
++CONFIG_SERIAL_8250=y
++# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
++CONFIG_SERIAL_8250_CONSOLE=y
++CONFIG_SERIAL_8250_NR_UARTS=23
++CONFIG_SERIAL_8250_RUNTIME_UARTS=23
++CONFIG_SERIAL_8250_EXTENDED=y
++CONFIG_SERIAL_8250_ASPEED=y
++CONFIG_SERIAL_8250_ASPEED_VUART=y
++CONFIG_SERIAL_8250_SHARE_IRQ=y
++CONFIG_SERIAL_8250_DW=y
++CONFIG_SERIAL_OF_PLATFORM=y
++CONFIG_IPMI_HANDLER=y
++CONFIG_IPMI_DEVICE_INTERFACE=y
++CONFIG_IPMI_SI=m
++CONFIG_IPMI_SSIF=y
++CONFIG_IPMI_IPMB=y
++CONFIG_ASPEED_KCS_IPMI_BMC=y
++CONFIG_IPMI_KCS_BMC_CDEV_IPMI=y
++CONFIG_ASPEED_BT_IPMI_BMC=y
++CONFIG_SSIF_IPMI_BMC=y
++CONFIG_IPMB_DEVICE_INTERFACE=y
++CONFIG_HW_RANDOM=y
++# CONFIG_HW_RANDOM_OPTEE is not set
++# CONFIG_HW_RANDOM_ARM_SMCCC_TRNG is not set
++# CONFIG_HW_RANDOM_CN10K is not set
++CONFIG_HW_RANDOM_DWC=y
++CONFIG_TCG_TPM=y
++CONFIG_TCG_TIS_SPI=y
++# CONFIG_I2C_COMPAT is not set
++CONFIG_I2C_CHARDEV=y
++CONFIG_I2C_AST2600=y
++CONFIG_I2C_SLAVE=y
++CONFIG_I2C_SLAVE_EEPROM=y
++CONFIG_I3C=y
++CONFIG_I3CDEV=y
++CONFIG_I3C_MCTP=y
++CONFIG_I3C_TARGET_MCTP=y
++CONFIG_MIPI_I3C_HCI=y
++CONFIG_SPI=y
++CONFIG_SPI_ASPEED_SMC=y
++CONFIG_SPI_ASPEED_TXRX=y
++CONFIG_PINCTRL=y
++CONFIG_PINCTRL_ASPEED_G7=y
++CONFIG_GPIOLIB=y
++CONFIG_GPIO_SYSFS=y
++CONFIG_GPIO_ASPEED=y
++CONFIG_GPIO_ASPEED_SGPIO=y
++CONFIG_GPIO_PCA953X=y
++CONFIG_GPIO_PCA953X_IRQ=y
++CONFIG_W1=y
++CONFIG_W1_MASTER_GPIO=y
++CONFIG_W1_SLAVE_THERM=y
++CONFIG_SENSORS_SMPRO=y
++CONFIG_SENSORS_ADT7475=y
++CONFIG_SENSORS_ASPEED=y
++CONFIG_SENSORS_ASPEED_G6=y
++CONFIG_SENSORS_ASPEED_CHASSIS=y
++CONFIG_SENSORS_IIO_HWMON=y
++CONFIG_SENSORS_LM75=y
++CONFIG_SENSORS_PECI_CPUTEMP=y
++CONFIG_SENSORS_PECI_DIMMTEMP=y
++CONFIG_PMBUS=y
++CONFIG_SENSORS_ADM1275=y
++CONFIG_SENSORS_IBM_CFFPS=y
++CONFIG_SENSORS_IR35221=y
++CONFIG_SENSORS_IR38064=y
++CONFIG_SENSORS_ISL68137=y
++CONFIG_SENSORS_LM25066=y
++CONFIG_SENSORS_MAX31785=y
++CONFIG_SENSORS_MP5023=y
++CONFIG_SENSORS_UCD9000=y
++CONFIG_SENSORS_UCD9200=y
++CONFIG_SENSORS_PWM_FAN=y
++CONFIG_SENSORS_SBTSI=y
++CONFIG_SENSORS_TMP421=y
++CONFIG_SENSORS_W83773G=y
++CONFIG_WATCHDOG=y
++CONFIG_WATCHDOG_SYSFS=y
++CONFIG_ASPEED_WATCHDOG=y
++CONFIG_MFD_SMPRO=y
++CONFIG_REGULATOR=y
++CONFIG_REGULATOR_FIXED_VOLTAGE=y
++CONFIG_REGULATOR_GPIO=y
++CONFIG_MEDIA_SUPPORT=y
++CONFIG_MEDIA_SUPPORT_FILTER=y
++CONFIG_MEDIA_PLATFORM_SUPPORT=y
++CONFIG_V4L_PLATFORM_DRIVERS=y
++CONFIG_VIDEO_ASPEED=y
++CONFIG_DRM=y
++CONFIG_DRM_AST=y
++CONFIG_DRM_ASPEED_GFX=y
++CONFIG_USB=y
++CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
++CONFIG_USB_DYNAMIC_MINORS=y
++CONFIG_USB_XHCI_HCD=y
++CONFIG_USB_EHCI_HCD=y
++CONFIG_USB_EHCI_ROOT_HUB_TT=y
++CONFIG_USB_EHCI_HCD_PLATFORM=y
++CONFIG_USB_UHCI_HCD=y
++CONFIG_USB_ACM=y
++CONFIG_USB_STORAGE=y
++CONFIG_USB_UAS=y
++CONFIG_USB_DWC3=y
++CONFIG_USB_DWC3_HOST=y
++# CONFIG_USB_DWC3_HAPS is not set
++# CONFIG_USB_DWC3_OF_SIMPLE is not set
++CONFIG_USB_SERIAL=y
++CONFIG_USB_SERIAL_PL2303=y
++CONFIG_USB_GADGET=y
++CONFIG_USB_ASPEED_VHUB=y
++CONFIG_USB_CONFIGFS=y
++CONFIG_USB_CONFIGFS_ACM=y
++CONFIG_USB_CONFIGFS_NCM=y
++CONFIG_USB_CONFIGFS_ECM=y
++CONFIG_USB_CONFIGFS_ECM_SUBSET=y
++CONFIG_USB_CONFIGFS_RNDIS=y
++CONFIG_USB_CONFIGFS_EEM=y
++CONFIG_USB_CONFIGFS_MASS_STORAGE=y
++CONFIG_USB_CONFIGFS_F_HID=y
++CONFIG_MMC=y
++CONFIG_MMC_SDHCI=y
++CONFIG_MMC_SDHCI_PLTFM=y
++CONFIG_MMC_SDHCI_OF_ASPEED=y
++CONFIG_SCSI_UFSHCD=y
++CONFIG_SCSI_UFSHCD_PLATFORM=y
++CONFIG_SCSI_UFS_ASPEED=y
++CONFIG_NEW_LEDS=y
++CONFIG_LEDS_CLASS=y
++CONFIG_LEDS_CLASS_FLASH=y
++CONFIG_LEDS_GPIO=y
++CONFIG_LEDS_PCA955X=y
++CONFIG_LEDS_PCA955X_GPIO=y
++CONFIG_LEDS_TRIGGERS=y
++CONFIG_LEDS_TRIGGER_TIMER=y
++CONFIG_LEDS_TRIGGER_HEARTBEAT=y
++CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
++CONFIG_EDAC=y
++CONFIG_RTC_CLASS=y
++CONFIG_RTC_DRV_DS1307=y
++CONFIG_RTC_DRV_PCF8523=y
++CONFIG_RTC_DRV_S35390A=y
++CONFIG_RTC_DRV_RV8803=y
++CONFIG_RTC_DRV_ASPEED=y
++# CONFIG_VIRTIO_MENU is not set
++CONFIG_COMMON_CLK_AST2700=y
++CONFIG_COMMON_CLK_AST1700=y
++# CONFIG_IOMMU_SUPPORT is not set
++CONFIG_ASPEED_BMC_DEV=y
++CONFIG_ASPEED_HOST_BMC_DEV=y
++CONFIG_ASPEED_MCTP=y
++CONFIG_ASPEED_XDMA=y
++CONFIG_AST2700_ESPI=y
++CONFIG_ASPEED_LPC_PCC=y
++CONFIG_AST2700_OTP=y
++CONFIG_ASPEED_PCIE_MMBI=y
++CONFIG_IIO=y
++CONFIG_ASPEED_ADC=y
++CONFIG_MAX1363=y
++CONFIG_SI7020=y
++CONFIG_BMP280=y
++CONFIG_DPS310=y
++CONFIG_PWM=y
++CONFIG_RESET_ASPEED=y
++CONFIG_PHY_ASPEED_SGMII=y
++CONFIG_PHY_ASPEED_USB3=y
++CONFIG_RAS=y
++CONFIG_FSI=y
++CONFIG_FSI_MASTER_GPIO=y
++CONFIG_FSI_MASTER_HUB=y
++CONFIG_FSI_MASTER_ASPEED=y
++CONFIG_FSI_SCOM=y
++CONFIG_FSI_SBEFIFO=y
++CONFIG_FSI_OCC=y
++CONFIG_TEE=y
++CONFIG_OPTEE=y
++CONFIG_PECI=y
++CONFIG_PECI_ASPEED=y
++CONFIG_JTAG=y
++CONFIG_JTAG_ASPEED=y
++CONFIG_EXT4_FS=y
++CONFIG_FS_ENCRYPTION=y
++CONFIG_FANOTIFY=y
++CONFIG_OVERLAY_FS=y
++CONFIG_VFAT_FS=y
++CONFIG_EXFAT_FS=y
++CONFIG_TMPFS=y
++CONFIG_JFFS2_FS=y
++# CONFIG_JFFS2_FS_WRITEBUFFER is not set
++CONFIG_JFFS2_SUMMARY=y
++CONFIG_JFFS2_FS_XATTR=y
++CONFIG_UBIFS_FS=y
++CONFIG_SQUASHFS=y
++CONFIG_SQUASHFS_XZ=y
++CONFIG_SQUASHFS_ZSTD=y
++CONFIG_PSTORE=y
++CONFIG_PSTORE_CONSOLE=y
++CONFIG_PSTORE_PMSG=y
++CONFIG_PSTORE_RAM=y
++# CONFIG_NETWORK_FILESYSTEMS is not set
++CONFIG_NLS_CODEPAGE_437=y
++CONFIG_NLS_ISO8859_1=y
++# CONFIG_SECURITYFS is not set
++CONFIG_HARDENED_USERCOPY=y
++CONFIG_FORTIFY_SOURCE=y
++CONFIG_INIT_STACK_NONE=y
++CONFIG_CRYPTO_USER=y
++# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
++CONFIG_CRYPTO_TEST=m
++CONFIG_CRYPTO_USER_API_HASH=y
++CONFIG_CRYPTO_USER_API_SKCIPHER=y
++CONFIG_CRYPTO_USER_API_AEAD=y
++CONFIG_CRYPTO_USER_API_AKCIPHER=y
++CONFIG_CRYPTO_SHA1_ARM64_CE=y
++CONFIG_CRYPTO_SHA2_ARM64_CE=y
++CONFIG_CRYPTO_SHA512_ARM64=y
++CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
++CONFIG_CRYPTO_DEV_ASPEED=y
++CONFIG_CRYPTO_DEV_ASPEED_HACE=y
++CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH=y
++CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO=y
++CONFIG_CRYPTO_DEV_ASPEED_RSSS=y
++CONFIG_CRYPTO_DEV_ASPEED_ECDSA=y
++# CONFIG_XZ_DEC_X86 is not set
++# CONFIG_XZ_DEC_POWERPC is not set
++# CONFIG_XZ_DEC_IA64 is not set
++# CONFIG_XZ_DEC_SPARC is not set
++CONFIG_PRINTK_TIME=y
++CONFIG_DYNAMIC_DEBUG=y
++CONFIG_DEBUG_INFO_DWARF4=y
++CONFIG_DEBUG_INFO_REDUCED=y
++CONFIG_GDB_SCRIPTS=y
++CONFIG_STRIP_ASM_SYMS=y
++CONFIG_DEBUG_FS=y
++CONFIG_DEBUG_WX=y
++CONFIG_SCHED_STACK_END_CHECK=y
++CONFIG_PANIC_ON_OOPS=y
++CONFIG_PANIC_TIMEOUT=-1
++CONFIG_SOFTLOCKUP_DETECTOR=y
++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
++CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
++CONFIG_WQ_WATCHDOG=y
++# CONFIG_SCHED_DEBUG is not set
++CONFIG_DEBUG_LIST=y
++CONFIG_FUNCTION_TRACER=y
++# CONFIG_STRICT_DEVMEM is not set
++# CONFIG_RUNTIME_TESTING_MENU is not set
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0001-Add-dts-dtsi-config-for-2700-A0-A1.patch b/recipes-kernel/linux/files/0001-Add-dts-dtsi-config-for-2700-A0-A1.patch
deleted file mode 100644
index 6d429e1..0000000
--- a/recipes-kernel/linux/files/0001-Add-dts-dtsi-config-for-2700-A0-A1.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From 496c0f5fecd22f4eb546e0bcf021a559b66a3882 Mon Sep 17 00:00:00 2001
-From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Mon, 18 Nov 2024 13:54:14 +0800
-Subject: [PATCH] Add dts, dtsi, config for 2700 A0 & A1
-
-from linux-aspeed:
-a769cc67850759a3952f7a40f5f5798c3d0f7bfd
-
-Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
----
- arch/arm64/Kconfig.platforms | 14 +
- arch/arm64/boot/dts/Makefile | 1 +
- arch/arm64/boot/dts/aspeed/Makefile | 5 +
- 10 files changed, 9119 insertions(+)
-
-diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
-index 6069120199bb..d1c7d7764818 100644
---- a/arch/arm64/Kconfig.platforms
-+++ b/arch/arm64/Kconfig.platforms
-@@ -33,6 +33,20 @@ config ARCH_APPLE
- This enables support for Apple's in-house ARM SoC family, starting
- with the Apple M1.
-
-+config ARCH_ASPEED
-+ bool "Aspeed SoC family"
-+ select MACH_ASPEED_G7
-+ help
-+ Say yes if you intend to run on an Aspeed ast2700 or similar
-+ seventh generation Aspeed BMCs.
-+
-+config MACH_ASPEED_G7
-+ bool "Aspeed SoC AST2700"
-+ help
-+ Say yes if you intend to run on an Aspeed ast2700
-+ seventh generation Aspeed BMCs.
-+ Aspeed ast2700 BMC based on the Cortex A35.
-+
- menuconfig ARCH_BCM
- bool "Broadcom SoC Support"
-
-diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
-index 30dd6347a929..f71b77b1c691 100644
---- a/arch/arm64/boot/dts/Makefile
-+++ b/arch/arm64/boot/dts/Makefile
-@@ -33,3 +33,4 @@ subdir-y += tesla
- subdir-y += ti
- subdir-y += toshiba
- subdir-y += xilinx
-+subdir-y += aspeed
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0002-Add-dt-bindings-head-files-for-ast2700.patch b/recipes-kernel/linux/files/0002-Add-dt-bindings-head-files-for-ast2700.patch
new file mode 100644
index 0000000..0e433cf
--- /dev/null
+++ b/recipes-kernel/linux/files/0002-Add-dt-bindings-head-files-for-ast2700.patch
@@ -0,0 +1,810 @@
+From 2a263fe6d39e81cb2a9016a75fb465725faa20db Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 08:45:20 +0800
+Subject: [PATCH] Add dt-bindings head file for ast2700
+
+1.clock
+2.dma
+3.i3c
+4.interrupt-controller
+5.reset
+6.watchdog
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ .../dt-bindings/clock/aspeed,ast1700-clk.h | 97 +++++++++++
+ .../dt-bindings/clock/aspeed,ast2700-scu.h | 164 ++++++++++++++++++
+ include/dt-bindings/clock/aspeed-clock.h | 1 +
+ include/dt-bindings/clock/ast2600-clock.h | 20 ++-
+ include/dt-bindings/dma/ast2600-udma.h | 40 +++++
+ include/dt-bindings/gpio/aspeed-gpio.h | 2 +
+ include/dt-bindings/i3c/i3c.h | 17 ++
+ .../interrupt-controller/aspeed-e2m-ic.h | 21 +++
+ .../interrupt-controller/aspeed-scu-ic.h | 14 ++
+ .../dt-bindings/reset/aspeed,ast1700-reset.h | 69 ++++++++
+ .../dt-bindings/reset/aspeed,ast2700-scu.h | 124 +++++++++++++
+ include/dt-bindings/watchdog/aspeed-wdt.h | 92 ++++++++++
+ 12 files changed, 656 insertions(+), 5 deletions(-)
+ create mode 100644 include/dt-bindings/clock/aspeed,ast1700-clk.h
+ create mode 100644 include/dt-bindings/clock/aspeed,ast2700-scu.h
+ create mode 100644 include/dt-bindings/dma/ast2600-udma.h
+ create mode 100644 include/dt-bindings/i3c/i3c.h
+ create mode 100644 include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h
+ create mode 100644 include/dt-bindings/reset/aspeed,ast1700-reset.h
+ create mode 100644 include/dt-bindings/reset/aspeed,ast2700-scu.h
+ create mode 100644 include/dt-bindings/watchdog/aspeed-wdt.h
+
+diff --git a/include/dt-bindings/clock/aspeed,ast1700-clk.h b/include/dt-bindings/clock/aspeed,ast1700-clk.h
+new file mode 100644
+index 000000000..d6613d16a
+--- /dev/null
++++ b/include/dt-bindings/clock/aspeed,ast1700-clk.h
+@@ -0,0 +1,97 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Device Tree binding constants for AST2700 clock controller.
++ *
++ * Copyright (c) 2023 Aspeed Technology Inc.
++ */
++
++#ifndef __DT_BINDINGS_CLOCK_AST1700_H
++#define __DT_BINDINGS_CLOCK_AST1700_H
++
++/* io die clk gate */
++#define AST1700_CLK_GATE_LCLK0 (0)
++#define AST1700_CLK_GATE_LCLK1 (1)
++#define AST1700_CLK_GATE_ESPI0CLK (2)
++#define AST1700_CLK_GATE_ESPI1CLK (3)
++#define AST1700_CLK_GATE_SDCLK (4)
++#define AST1700_CLK_GATE_REFCLK (5)
++#define AST1700_CLK_GATE_RSV5CLK (6)
++#define AST1700_CLK_GATE_LPCHCLK (7)
++#define AST1700_CLK_GATE_MAC0CLK (8)
++#define AST1700_CLK_GATE_MAC1CLK (9)
++#define AST1700_CLK_GATE_MAC2CLK (10)
++#define AST1700_CLK_GATE_UART0CLK (11)
++#define AST1700_CLK_GATE_UART1CLK (12)
++#define AST1700_CLK_GATE_UART2CLK (13)
++#define AST1700_CLK_GATE_UART3CLK (14)
++/* reserved bit 15*/
++#define AST1700_CLK_GATE_I3C0CLK (16)
++#define AST1700_CLK_GATE_I3C1CLK (17)
++#define AST1700_CLK_GATE_I3C2CLK (18)
++#define AST1700_CLK_GATE_I3C3CLK (19)
++#define AST1700_CLK_GATE_I3C4CLK (20)
++#define AST1700_CLK_GATE_I3C5CLK (21)
++#define AST1700_CLK_GATE_I3C6CLK (22)
++#define AST1700_CLK_GATE_I3C7CLK (23)
++#define AST1700_CLK_GATE_I3C8CLK (24)
++#define AST1700_CLK_GATE_I3C9CLK (25)
++#define AST1700_CLK_GATE_I3C10CLK (26)
++#define AST1700_CLK_GATE_I3C11CLK (27)
++#define AST1700_CLK_GATE_I3C12CLK (28)
++#define AST1700_CLK_GATE_I3C13CLK (29)
++#define AST1700_CLK_GATE_I3C14CLK (30)
++#define AST1700_CLK_GATE_I3C15CLK (31)
++
++#define AST1700_CLK_GATE_UART5CLK (32 + 0)
++#define AST1700_CLK_GATE_UART6CLK (32 + 1)
++#define AST1700_CLK_GATE_UART7CLK (32 + 2)
++#define AST1700_CLK_GATE_UART8CLK (32 + 3)
++#define AST1700_CLK_GATE_UART9CLK (32 + 4)
++#define AST1700_CLK_GATE_UART10CLK (32 + 5)
++#define AST1700_CLK_GATE_UART11CLK (32 + 6)
++#define AST1700_CLK_GATE_UART12CLK (32 + 7)
++#define AST1700_CLK_GATE_FSICLK (32 + 8)
++#define AST1700_CLK_GATE_LTPIPHYCLK (32 + 9)
++#define AST1700_CLK_GATE_LTPICLK (32 + 10)
++#define AST1700_CLK_GATE_VGALCLK (32 + 11)
++#define AST1700_CLK_GATE_USBUARTCLK (32 + 12)
++#define AST1700_CLK_GATE_CANCLK (32 + 13)
++#define AST1700_CLK_GATE_PCICLK (32 + 14)
++#define AST1700_CLK_GATE_SLICLK (32 + 15)
++
++#define AST1700_CLK_GATE_NUM (AST1700_CLK_GATE_SLICLK + 1)
++
++/* io die clk */
++#define AST1700_CLKIN (AST1700_CLK_GATE_NUM + 0)
++#define AST1700_CLK_HPLL (AST1700_CLK_GATE_NUM + 1)
++#define AST1700_CLK_APLL (AST1700_CLK_GATE_NUM + 2)
++#define AST1700_CLK_APLL_DIV2 (AST1700_CLK_GATE_NUM + 3)
++#define AST1700_CLK_APLL_DIV4 (AST1700_CLK_GATE_NUM + 4)
++#define AST1700_CLK_DPLL (AST1700_CLK_GATE_NUM + 5)
++#define AST1700_CLK_UXCLK (AST1700_CLK_GATE_NUM + 6)
++#define AST1700_CLK_HUXCLK (AST1700_CLK_GATE_NUM + 7)
++#define AST1700_CLK_UARTX (AST1700_CLK_GATE_NUM + 8)
++#define AST1700_CLK_HUARTX (AST1700_CLK_GATE_NUM + 9)
++#define AST1700_CLK_AHB (AST1700_CLK_GATE_NUM + 10)
++#define AST1700_CLK_APB (AST1700_CLK_GATE_NUM + 11)
++#define AST1700_CLK_UART0 (AST1700_CLK_GATE_NUM + 12)
++#define AST1700_CLK_UART1 (AST1700_CLK_GATE_NUM + 13)
++#define AST1700_CLK_UART2 (AST1700_CLK_GATE_NUM + 14)
++#define AST1700_CLK_UART3 (AST1700_CLK_GATE_NUM + 15)
++#define AST1700_CLK_UART5 (AST1700_CLK_GATE_NUM + 16)
++#define AST1700_CLK_UART6 (AST1700_CLK_GATE_NUM + 17)
++#define AST1700_CLK_UART7 (AST1700_CLK_GATE_NUM + 18)
++#define AST1700_CLK_UART8 (AST1700_CLK_GATE_NUM + 19)
++#define AST1700_CLK_UART9 (AST1700_CLK_GATE_NUM + 20)
++#define AST1700_CLK_UART10 (AST1700_CLK_GATE_NUM + 21)
++#define AST1700_CLK_UART11 (AST1700_CLK_GATE_NUM + 22)
++#define AST1700_CLK_UART12 (AST1700_CLK_GATE_NUM + 23)
++#define AST1700_CLK_HPLL_DIVN (AST1700_CLK_GATE_NUM + 24)
++#define AST1700_CLK_APLL_DIVN (AST1700_CLK_GATE_NUM + 25)
++#define AST1700_CLK_SDCLK (AST1700_CLK_GATE_NUM + 26)
++#define AST1700_CLK_RMII (AST1700_CLK_GATE_NUM + 27)
++#define AST1700_CLK_RGMII (AST1700_CLK_GATE_NUM + 28)
++#define AST1700_CLK_MACHCLK (AST1700_CLK_GATE_NUM + 29)
++
++#define AST1700_NUM_CLKS (AST1700_CLK_MACHCLK + 1)
++#endif
+diff --git a/include/dt-bindings/clock/aspeed,ast2700-scu.h b/include/dt-bindings/clock/aspeed,ast2700-scu.h
+new file mode 100644
+index 000000000..cecd8c9e0
+--- /dev/null
++++ b/include/dt-bindings/clock/aspeed,ast2700-scu.h
+@@ -0,0 +1,164 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Device Tree binding constants for AST2700 clock controller.
++ *
++ * Copyright (c) 2024 Aspeed Technology Inc.
++ */
++
++#ifndef __DT_BINDINGS_CLOCK_AST2700_H
++#define __DT_BINDINGS_CLOCK_AST2700_H
++
++/* SOC0 clk */
++#define SCU0_CLKIN 0
++#define SCU0_CLK_24M 1
++#define SCU0_CLK_192M 2
++#define SCU0_CLK_UART 3
++#define SCU0_CLK_PSP 4
++#define SCU0_CLK_HPLL 5
++#define SCU0_CLK_HPLL_DIV2 6
++#define SCU0_CLK_HPLL_DIV4 7
++#define SCU0_CLK_DPLL 8
++#define SCU0_CLK_MPLL 9
++#define SCU0_CLK_MPLL_DIV2 10
++#define SCU0_CLK_MPLL_DIV4 11
++#define SCU0_CLK_MPLL_DIV8 12
++#define SCU0_CLK_D0 13
++#define SCU0_CLK_D1 14
++#define SCU0_CLK_CRT0 15
++#define SCU0_CLK_CRT1 16
++#define SCU0_CLK_MPHY 17
++#define SCU0_CLK_AXI0 18
++#define SCU0_CLK_AXI1 19
++#define SCU0_CLK_AHBMUX 20
++#define SCU0_CLK_AHB 21
++#define SCU0_CLK_APB 22
++#define SCU0_CLK_UART4 23
++#define SCU0_CLK_EMMCMUX 24
++#define SCU0_CLK_EMMC 25
++#define SCU0_CLK_U2PHY_CLK12M 26
++#define SCU0_CLK_U2PHY_REFCLK 27
++
++/* SOC0 clk-gate */
++#define SCU0_CLK_GATE_MCLK 28
++#define SCU0_CLK_GATE_ECLK 29
++#define SCU0_CLK_GATE_2DCLK 30
++#define SCU0_CLK_GATE_VCLK 31
++#define SCU0_CLK_GATE_BCLK 32
++#define SCU0_CLK_GATE_VGA0CLK 33
++#define SCU0_CLK_GATE_REFCLK 34
++#define SCU0_CLK_GATE_PORTBUSB2CLK 35
++#define SCU0_CLK_GATE_UHCICLK 36
++#define SCU0_CLK_GATE_VGA1CLK 37
++#define SCU0_CLK_GATE_DDRPHYCLK 38
++#define SCU0_CLK_GATE_E2M0CLK 39
++#define SCU0_CLK_GATE_HACCLK 40
++#define SCU0_CLK_GATE_PORTAUSB2CLK 41
++#define SCU0_CLK_GATE_UART4CLK 42
++#define SCU0_CLK_GATE_SLICLK 43
++#define SCU0_CLK_GATE_DACCLK 44
++#define SCU0_CLK_GATE_DP 45
++#define SCU0_CLK_GATE_E2M1CLK 46
++#define SCU0_CLK_GATE_CRT0CLK 47
++#define SCU0_CLK_GATE_CRT1CLK 48
++#define SCU0_CLK_GATE_ECDSACLK 49
++#define SCU0_CLK_GATE_RSACLK 50
++#define SCU0_CLK_GATE_RVAS0CLK 51
++#define SCU0_CLK_GATE_UFSCLK 52
++#define SCU0_CLK_GATE_EMMCCLK 53
++#define SCU0_CLK_GATE_RVAS1CLK 54
++#define SCU0_CLK_MPHYSRC 55
++#define SCU0_CLK_U2PHY_REFCLKSRC 56
++
++/* SOC1 clk */
++#define SCU1_CLKIN 0
++#define SCU1_CLK_HPLL 1
++#define SCU1_CLK_APLL 2
++#define SCU1_CLK_APLL_DIV2 3
++#define SCU1_CLK_APLL_DIV4 4
++#define SCU1_CLK_DPLL 5
++#define SCU1_CLK_UXCLK 6
++#define SCU1_CLK_HUXCLK 7
++#define SCU1_CLK_UARTX 8
++#define SCU1_CLK_HUARTX 9
++#define SCU1_CLK_AHB 10
++#define SCU1_CLK_APB 11
++#define SCU1_CLK_UART0 12
++#define SCU1_CLK_UART1 13
++#define SCU1_CLK_UART2 14
++#define SCU1_CLK_UART3 15
++#define SCU1_CLK_UART5 16
++#define SCU1_CLK_UART6 17
++#define SCU1_CLK_UART7 18
++#define SCU1_CLK_UART8 19
++#define SCU1_CLK_UART9 20
++#define SCU1_CLK_UART10 21
++#define SCU1_CLK_UART11 22
++#define SCU1_CLK_UART12 23
++#define SCU1_CLK_UART13 24
++#define SCU1_CLK_UART14 25
++#define SCU1_CLK_APLL_DIVN 26
++#define SCU1_CLK_SDMUX 27
++#define SCU1_CLK_SDCLK 28
++#define SCU1_CLK_RMII 29
++#define SCU1_CLK_RGMII 30
++#define SCU1_CLK_MACHCLK 31
++#define SCU1_CLK_MAC0RCLK 32
++#define SCU1_CLK_MAC1RCLK 33
++#define SCU1_CLK_CAN 34
++
++/* SOC1 clk gate */
++#define SCU1_CLK_GATE_LCLK0 35
++#define SCU1_CLK_GATE_LCLK1 36
++#define SCU1_CLK_GATE_ESPI0CLK 37
++#define SCU1_CLK_GATE_ESPI1CLK 38
++#define SCU1_CLK_GATE_SDCLK 39
++#define SCU1_CLK_GATE_IPEREFCLK 40
++#define SCU1_CLK_GATE_REFCLK 41
++#define SCU1_CLK_GATE_LPCHCLK 42
++#define SCU1_CLK_GATE_MAC0CLK 43
++#define SCU1_CLK_GATE_MAC1CLK 44
++#define SCU1_CLK_GATE_MAC2CLK 45
++#define SCU1_CLK_GATE_UART0CLK 46
++#define SCU1_CLK_GATE_UART1CLK 47
++#define SCU1_CLK_GATE_UART2CLK 48
++#define SCU1_CLK_GATE_UART3CLK 49
++#define SCU1_CLK_GATE_I2CCLK 50
++#define SCU1_CLK_GATE_I3C0CLK 51
++#define SCU1_CLK_GATE_I3C1CLK 52
++#define SCU1_CLK_GATE_I3C2CLK 53
++#define SCU1_CLK_GATE_I3C3CLK 54
++#define SCU1_CLK_GATE_I3C4CLK 55
++#define SCU1_CLK_GATE_I3C5CLK 56
++#define SCU1_CLK_GATE_I3C6CLK 57
++#define SCU1_CLK_GATE_I3C7CLK 58
++#define SCU1_CLK_GATE_I3C8CLK 59
++#define SCU1_CLK_GATE_I3C9CLK 60
++#define SCU1_CLK_GATE_I3C10CLK 61
++#define SCU1_CLK_GATE_I3C11CLK 62
++#define SCU1_CLK_GATE_I3C12CLK 63
++#define SCU1_CLK_GATE_I3C13CLK 64
++#define SCU1_CLK_GATE_I3C14CLK 65
++#define SCU1_CLK_GATE_I3C15CLK 66
++#define SCU1_CLK_GATE_UART5CLK 67
++#define SCU1_CLK_GATE_UART6CLK 68
++#define SCU1_CLK_GATE_UART7CLK 69
++#define SCU1_CLK_GATE_UART8CLK 70
++#define SCU1_CLK_GATE_UART9CLK 71
++#define SCU1_CLK_GATE_UART10CLK 72
++#define SCU1_CLK_GATE_UART11CLK 73
++#define SCU1_CLK_GATE_UART12CLK 74
++#define SCU1_CLK_GATE_FSICLK 75
++#define SCU1_CLK_GATE_LTPIPHYCLK 76
++#define SCU1_CLK_GATE_LTPICLK 77
++#define SCU1_CLK_GATE_VGALCLK 78
++#define SCU1_CLK_GATE_UHCICLK 79
++#define SCU1_CLK_GATE_CANCLK 80
++#define SCU1_CLK_GATE_PCICLK 81
++#define SCU1_CLK_GATE_SLICLK 82
++#define SCU1_CLK_GATE_E2MCLK 83
++#define SCU1_CLK_GATE_PORTCUSB2CLK 84
++#define SCU1_CLK_GATE_PORTDUSB2CLK 85
++#define SCU1_CLK_GATE_LTPI1TXCLK 86
++
++#define SCU1_CLK_I3C 87
++#endif
+diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
+index 06d568382..421ca577c 100644
+--- a/include/dt-bindings/clock/aspeed-clock.h
++++ b/include/dt-bindings/clock/aspeed-clock.h
+@@ -53,5 +53,6 @@
+ #define ASPEED_RESET_AHB 8
+ #define ASPEED_RESET_CRT1 9
+ #define ASPEED_RESET_HACE 10
++#define ASPEED_RESET_VIDEO 21
+
+ #endif
+diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
+index 712782177..ace3db51f 100644
+--- a/include/dt-bindings/clock/ast2600-clock.h
++++ b/include/dt-bindings/clock/ast2600-clock.h
+@@ -72,7 +72,7 @@
+ #define ASPEED_CLK_D1CLK 55
+ #define ASPEED_CLK_VCLK 56
+ #define ASPEED_CLK_LHCLK 57
+-#define ASPEED_CLK_UART 58
++#define ASPEED_CLK_UART5 58
+ #define ASPEED_CLK_UARTX 59
+ #define ASPEED_CLK_SDIO 60
+ #define ASPEED_CLK_EMMC 61
+@@ -86,8 +86,12 @@
+ #define ASPEED_CLK_MAC3RCLK 69
+ #define ASPEED_CLK_MAC4RCLK 70
+ #define ASPEED_CLK_I3C 71
++#define ASPEED_CLK_HUARTX 72
++#define ASPEED_CLK_UXCLK 73
++#define ASPEED_CLK_HUXCLK 74
+
+ /* Only list resets here that are not part of a clock gate + reset pair */
++#define ASPEED_RESET_ESPI 57
+ #define ASPEED_RESET_ADC 55
+ #define ASPEED_RESET_JTAG_MASTER2 54
+
+@@ -117,10 +121,16 @@
+ #define ASPEED_RESET_DEV_MCTP 24
+ #define ASPEED_RESET_RC_MCTP 23
+ #define ASPEED_RESET_JTAG_MASTER 22
+-#define ASPEED_RESET_PCIE_DEV_O 21
+-#define ASPEED_RESET_PCIE_DEV_OEN 20
+-#define ASPEED_RESET_PCIE_RC_O 19
+-#define ASPEED_RESET_PCIE_RC_OEN 18
++#define ASPEED_RESET_PCIE_DEV_OE 21
++#define ASPEED_RESET_PCIE_DEV_O 20
++#define ASPEED_RESET_PCIE_RC_OE 19
++#define ASPEED_RESET_PCIE_RC_O 18
++#define ASPEED_RESET_EMMC 16
++#define ASPEED_RESET_CRT 13
++#define ASPEED_RESET_MAC2 12
++#define ASPEED_RESET_MAC1 11
++#define ASPEED_RESET_RVAS 9
++#define ASPEED_RESET_VIDEO 6
+ #define ASPEED_RESET_PCI_DP 5
+ #define ASPEED_RESET_HACE 4
+ #define ASPEED_RESET_AHB 1
+diff --git a/include/dt-bindings/dma/ast2600-udma.h b/include/dt-bindings/dma/ast2600-udma.h
+new file mode 100644
+index 000000000..0b92035b9
+--- /dev/null
++++ b/include/dt-bindings/dma/ast2600-udma.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0-only */
++/*
++ * This header provides macros for Aspeed AST2600 UDMA bindings
++ *
++ * Copyright (c) 2023 Aspeed Technology Inc.
++ */
++
++#ifndef __DT_BINDINGS_DMA_AST2600_UDMA_H__
++#define __DT_BINDINGS_DMA_AST2600_UDMA_H__
++
++#define AST2600_UDMA_UART1_TX 0
++#define AST2600_UDMA_UART1_RX 1
++#define AST2600_UDMA_UART2_TX 2
++#define AST2600_UDMA_UART2_RX 3
++#define AST2600_UDMA_UART3_TX 4
++#define AST2600_UDMA_UART3_RX 5
++#define AST2600_UDMA_UART4_TX 6
++#define AST2600_UDMA_UART4_RX 7
++#define AST2600_UDMA_UART6_TX 8
++#define AST2600_UDMA_UART6_RX 9
++#define AST2600_UDMA_UART7_TX 10
++#define AST2600_UDMA_UART7_RX 11
++#define AST2600_UDMA_UART8_TX 12
++#define AST2600_UDMA_UART8_RX 13
++#define AST2600_UDMA_UART9_TX 14
++#define AST2600_UDMA_UART9_RX 15
++#define AST2600_UDMA_UART10_TX 16
++#define AST2600_UDMA_UART10_RX 17
++#define AST2600_UDMA_UART11_TX 18
++#define AST2600_UDMA_UART11_RX 19
++#define AST2600_UDMA_UART12_TX 20
++#define AST2600_UDMA_UART12_RX 21
++#define AST2600_UDMA_UART13_TX 22
++#define AST2600_UDMA_UART13_RX 23
++#define AST2600_UDMA_VUART1_TX 24
++#define AST2600_UDMA_VUART1_RX 25
++#define AST2600_UDMA_VUART2_TX 26
++#define AST2600_UDMA_VUART2_RX 27
++
++#endif /* __DT_BINDINGS_DMA_AST2600_UDMA_H__ */
+diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h
+index 56fc4889b..5b5013b0f 100644
+--- a/include/dt-bindings/gpio/aspeed-gpio.h
++++ b/include/dt-bindings/gpio/aspeed-gpio.h
+@@ -42,6 +42,8 @@
+ #define ASPEED_GPIO_PORT_AA 26
+ #define ASPEED_GPIO_PORT_AB 27
+ #define ASPEED_GPIO_PORT_AC 28
++#define ASPEED_GPIO_PORT_AD 29
++#define ASPEED_GPIO_PORT_AE 30
+
+ #define ASPEED_GPIO(port, offset) \
+ ((ASPEED_GPIO_PORT_##port * 8) + offset)
+diff --git a/include/dt-bindings/i3c/i3c.h b/include/dt-bindings/i3c/i3c.h
+new file mode 100644
+index 000000000..aa7dfcd5e
+--- /dev/null
++++ b/include/dt-bindings/i3c/i3c.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * This header provides constants for I3C bindings
++ *
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#ifndef _DT_BINDINGS_I3C_I3C_H
++#define _DT_BINDINGS_I3C_I3C_H
++
++#define I3C_BUS_CONTEXT_MIPI_BASIC_V1_0_0 0x10
++#define I3C_BUS_CONTEXT_MIPI_BASIC_V1_1_0 0x11
++#define I3C_BUS_CONTEXT_MIPI_BASIC_V1_1_1 0x31
++#define I3C_BUS_CONTEXT_MIPI_BASIC_V1_2_0 0x12
++#define I3C_BUS_CONTEXT_JESD403 0x80
++#define I3C_BUS_CONTEXT_MCTP 0x81
++
++#endif
+diff --git a/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h b/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h
+new file mode 100644
+index 000000000..9449945b1
+--- /dev/null
++++ b/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Device Tree binding constants for AST2700 E2M interrupt
++ * controller.
++ *
++ * Copyright (c) 2024 Aspeed Technology Inc.
++ */
++
++#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_
++#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_
++
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT0 0
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT1 1
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT2 2
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT3 3
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT4 4
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT5 5
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT6 6
++#define ASPEED_AST2700_E2M_MMBI_H2B_INT7 7
++
++#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_ */
+diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+index f315d5a7f..7dd04424a 100644
+--- a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
++++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
+@@ -20,4 +20,18 @@
+ #define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0
+ #define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1
+
++#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI 3
++#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO 2
++
++#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI 3
++#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_HI_TO_LO 2
++
++#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI 3
++#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_HI_TO_LO 2
++#define ASPEED_AST2700_SCU_IC2_LPC_RESET_LO_TO_HI 1
++#define ASPEED_AST2700_SCU_IC2_LPC_RESET_HI_TO_LO 0
++
++#define ASPEED_AST2700_SCU_IC3_LPC_RESET_LO_TO_HI 1
++#define ASPEED_AST2700_SCU_IC3_LPC_RESET_HI_TO_LO 0
++
+ #endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
+diff --git a/include/dt-bindings/reset/aspeed,ast1700-reset.h b/include/dt-bindings/reset/aspeed,ast1700-reset.h
+new file mode 100644
+index 000000000..9a1eb7832
+--- /dev/null
++++ b/include/dt-bindings/reset/aspeed,ast1700-reset.h
+@@ -0,0 +1,69 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Device Tree binding constants for AST2700 reset controller.
++ *
++ * Copyright (c) 2023 Aspeed Technology Inc.
++ */
++
++#ifndef _MACH_ASPEED_AST1700_RESET_H_
++#define _MACH_ASPEED_AST1700_RESET_H_
++
++#define AST1700_RESET_LPC0 (0)
++#define AST1700_RESET_LPC1 (1)
++#define AST1700_RESET_MII (2)
++#define AST1700_RESET_PECI (3)
++#define AST1700_RESET_PWM (4)
++#define AST1700_RESET_MAC0 (5)
++#define AST1700_RESET_MAC1 (6)
++#define AST1700_RESET_MAC2 (7)
++#define AST1700_RESET_ADC (8)
++#define AST1700_RESET_SD (9)
++#define AST1700_RESET_ESPI0 (10)
++#define AST1700_RESET_ESPI1 (11)
++#define AST1700_RESET_JTAG1 (12)
++#define AST1700_RESET_SPI0 (13)
++#define AST1700_RESET_SPI1 (14)
++#define AST1700_RESET_SPI2 (15)
++#define AST1700_RESET_I3C0 (16)
++#define AST1700_RESET_I3C1 (17)
++#define AST1700_RESET_I3C2 (18)
++#define AST1700_RESET_I3C3 (19)
++#define AST1700_RESET_I3C4 (20)
++#define AST1700_RESET_I3C5 (21)
++#define AST1700_RESET_I3C6 (22)
++#define AST1700_RESET_I3C7 (23)
++#define AST1700_RESET_I3C8 (24)
++#define AST1700_RESET_I3C9 (25)
++#define AST1700_RESET_I3C10 (26)
++#define AST1700_RESET_I3C11 (27)
++#define AST1700_RESET_I3C12 (28)
++#define AST1700_RESET_I3C13 (29)
++#define AST1700_RESET_I3C14 (30)
++#define AST1700_RESET_I3C15 (31)
++/* reserved 32 */
++#define AST1700_RESET_IOMCU (33)
++#define AST1700_RESET_H2A_SPI1 (34)
++#define AST1700_RESET_H2A_SPI2 (35)
++#define AST1700_RESET_UART0 (36)
++#define AST1700_RESET_UART1 (37)
++#define AST1700_RESET_UART2 (38)
++#define AST1700_RESET_UART3 (39)
++#define AST1700_RESET_I2C_FILTER (40)
++#define AST1700_RESET_CALIPTRA (41)
++/* reserved 42:43 */
++#define AST1700_RESET_FSI (44)
++#define AST1700_RESET_CAN (45)
++#define AST1700_RESET_MCTP (46)
++#define AST1700_RESET_I2C (47)
++#define AST1700_RESET_UART6 (48)
++#define AST1700_RESET_UART7 (49)
++#define AST1700_RESET_UART8 (50)
++#define AST1700_RESET_UART9 (51)
++#define AST1700_RESET_LTPI (52)
++#define AST1700_RESET_VGAL (53)
++/* reserved 54:62 */
++#define AST1700_RESET_I3CDMA (63)
++
++#define AST1700_RESET_NUMS (AST1700_RESET_I3CDMA + 1)
++
++#endif /* _MACH_ASPEED_AST1700_RESET_H_ */
+diff --git a/include/dt-bindings/reset/aspeed,ast2700-scu.h b/include/dt-bindings/reset/aspeed,ast2700-scu.h
+new file mode 100644
+index 000000000..d53c719b7
+--- /dev/null
++++ b/include/dt-bindings/reset/aspeed,ast2700-scu.h
+@@ -0,0 +1,124 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++/*
++ * Device Tree binding constants for AST2700 reset controller.
++ *
++ * Copyright (c) 2024 Aspeed Technology Inc.
++ */
++
++#ifndef _MACH_ASPEED_AST2700_RESET_H_
++#define _MACH_ASPEED_AST2700_RESET_H_
++
++/* SOC0 */
++#define SCU0_RESET_SDRAM 0
++#define SCU0_RESET_DDRPHY 1
++#define SCU0_RESET_RSA 2
++#define SCU0_RESET_SHA3 3
++#define SCU0_RESET_HACE 4
++#define SCU0_RESET_SOC 5
++#define SCU0_RESET_VIDEO 6
++#define SCU0_RESET_2D 7
++#define SCU0_RESET_PCIS 8
++#define SCU0_RESET_RVAS0 9
++#define SCU0_RESET_RVAS1 10
++#define SCU0_RESET_SM3 11
++#define SCU0_RESET_SM4 12
++#define SCU0_RESET_CRT0 13
++#define SCU0_RESET_ECC 14
++#define SCU0_RESET_DP_PCI 15
++#define SCU0_RESET_UFS 16
++#define SCU0_RESET_EMMC 17
++#define SCU0_RESET_PCIE1RST 18
++#define SCU0_RESET_PCIE1RSTOE 19
++#define SCU0_RESET_PCIE0RST 20
++#define SCU0_RESET_PCIE0RSTOE 21
++#define SCU0_RESET_JTAG 22
++#define SCU0_RESET_MCTP0 23
++#define SCU0_RESET_MCTP1 24
++#define SCU0_RESET_XDMA0 25
++#define SCU0_RESET_XDMA1 26
++#define SCU0_RESET_H2X1 27
++#define SCU0_RESET_DP 28
++#define SCU0_RESET_DP_MCU 29
++#define SCU0_RESET_SSP 30
++#define SCU0_RESET_H2X0 31
++#define SCU0_RESET_PORTA_VHUB 32
++#define SCU0_RESET_PORTA_PHY3 33
++#define SCU0_RESET_PORTA_XHCI 34
++#define SCU0_RESET_PORTB_VHUB 35
++#define SCU0_RESET_PORTB_PHY3 36
++#define SCU0_RESET_PORTB_XHCI 37
++#define SCU0_RESET_PORTA_VHUB_EHCI 38
++#define SCU0_RESET_PORTB_VHUB_EHCI 39
++#define SCU0_RESET_UHCI 40
++#define SCU0_RESET_TSP 41
++#define SCU0_RESET_E2M0 42
++#define SCU0_RESET_E2M1 43
++#define SCU0_RESET_VLINK 44
++
++/* SOC1 */
++#define SCU1_RESET_LPC0 0
++#define SCU1_RESET_LPC1 1
++#define SCU1_RESET_MII 2
++#define SCU1_RESET_PECI 3
++#define SCU1_RESET_PWM 4
++#define SCU1_RESET_MAC0 5
++#define SCU1_RESET_MAC1 6
++#define SCU1_RESET_MAC2 7
++#define SCU1_RESET_ADC 8
++#define SCU1_RESET_SD 9
++#define SCU1_RESET_ESPI0 10
++#define SCU1_RESET_ESPI1 11
++#define SCU1_RESET_JTAG1 12
++#define SCU1_RESET_SPI0 13
++#define SCU1_RESET_SPI1 14
++#define SCU1_RESET_SPI2 15
++#define SCU1_RESET_I3C0 16
++#define SCU1_RESET_I3C1 17
++#define SCU1_RESET_I3C2 18
++#define SCU1_RESET_I3C3 19
++#define SCU1_RESET_I3C4 20
++#define SCU1_RESET_I3C5 21
++#define SCU1_RESET_I3C6 22
++#define SCU1_RESET_I3C7 23
++#define SCU1_RESET_I3C8 24
++#define SCU1_RESET_I3C9 25
++#define SCU1_RESET_I3C10 26
++#define SCU1_RESET_I3C11 27
++#define SCU1_RESET_I3C12 28
++#define SCU1_RESET_I3C13 29
++#define SCU1_RESET_I3C14 30
++#define SCU1_RESET_I3C15 31
++#define SCU1_RESET_MCU0 32
++#define SCU1_RESET_MCU1 33
++#define SCU1_RESET_H2A_SPI1 34
++#define SCU1_RESET_H2A_SPI2 35
++#define SCU1_RESET_UART0 36
++#define SCU1_RESET_UART1 37
++#define SCU1_RESET_UART2 38
++#define SCU1_RESET_UART3 39
++#define SCU1_RESET_I2C_FILTER 40
++#define SCU1_RESET_CALIPTRA 41
++#define SCU1_RESET_XDMA 42
++#define SCU1_RESET_FSI 43
++#define SCU1_RESET_CAN 44
++#define SCU1_RESET_MCTP 45
++#define SCU1_RESET_I2C 46
++#define SCU1_RESET_UART6 47
++#define SCU1_RESET_UART7 48
++#define SCU1_RESET_UART8 49
++#define SCU1_RESET_UART9 50
++#define SCU1_RESET_LTPI0 51
++#define SCU1_RESET_VGAL 52
++#define SCU1_RESET_LTPI1 53
++#define SCU1_RESET_ACE 54
++#define SCU1_RESET_E2M 55
++#define SCU1_RESET_UHCI 56
++#define SCU1_RESET_PORTC_USB2UART 57
++#define SCU1_RESET_PORTC_VHUB_EHCI 58
++#define SCU1_RESET_PORTD_USB2UART 59
++#define SCU1_RESET_PORTD_VHUB_EHCI 60
++#define SCU1_RESET_H2X 61
++#define SCU1_RESET_I3CDMA 62
++#define SCU1_RESET_PCIE2RST 63
++
++#endif /* _MACH_ASPEED_AST2700_RESET_H_ */
+diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
+new file mode 100644
+index 000000000..7ae6d84b2
+--- /dev/null
++++ b/include/dt-bindings/watchdog/aspeed-wdt.h
+@@ -0,0 +1,92 @@
++/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
++
++#ifndef DT_BINDINGS_ASPEED_WDT_H
++#define DT_BINDINGS_ASPEED_WDT_H
++
++#define AST2500_WDT_RESET_CPU (1 << 0)
++#define AST2500_WDT_RESET_COPROC (1 << 1)
++#define AST2500_WDT_RESET_SDRAM (1 << 2)
++#define AST2500_WDT_RESET_AHB (1 << 3)
++#define AST2500_WDT_RESET_I2C (1 << 4)
++#define AST2500_WDT_RESET_MAC0 (1 << 5)
++#define AST2500_WDT_RESET_MAC1 (1 << 6)
++#define AST2500_WDT_RESET_GRAPHICS (1 << 7)
++#define AST2500_WDT_RESET_USB2_HOST_HUB (1 << 8)
++#define AST2500_WDT_RESET_USB_HOST (1 << 9)
++#define AST2500_WDT_RESET_HID_EHCI (1 << 10)
++#define AST2500_WDT_RESET_VIDEO (1 << 11)
++#define AST2500_WDT_RESET_HAC (1 << 12)
++#define AST2500_WDT_RESET_LPC (1 << 13)
++#define AST2500_WDT_RESET_SDIO (1 << 14)
++#define AST2500_WDT_RESET_MIC (1 << 15)
++#define AST2500_WDT_RESET_CRT (1 << 16)
++#define AST2500_WDT_RESET_PWM (1 << 17)
++#define AST2500_WDT_RESET_PECI (1 << 18)
++#define AST2500_WDT_RESET_JTAG (1 << 19)
++#define AST2500_WDT_RESET_ADC (1 << 20)
++#define AST2500_WDT_RESET_GPIO (1 << 21)
++#define AST2500_WDT_RESET_MCTP (1 << 22)
++#define AST2500_WDT_RESET_XDMA (1 << 23)
++#define AST2500_WDT_RESET_SPI (1 << 24)
++#define AST2500_WDT_RESET_SOC_MISC (1 << 25)
++
++#define AST2500_WDT_RESET_DEFAULT 0x023ffff3
++
++#define AST2600_WDT_RESET1_CPU (1 << 0)
++#define AST2600_WDT_RESET1_SDRAM (1 << 1)
++#define AST2600_WDT_RESET1_AHB (1 << 2)
++#define AST2600_WDT_RESET1_SLI (1 << 3)
++#define AST2600_WDT_RESET1_SOC_MISC0 (1 << 4)
++#define AST2600_WDT_RESET1_COPROC (1 << 5)
++#define AST2600_WDT_RESET1_USB_A (1 << 6)
++#define AST2600_WDT_RESET1_USB_B (1 << 7)
++#define AST2600_WDT_RESET1_UHCI (1 << 8)
++#define AST2600_WDT_RESET1_GRAPHICS (1 << 9)
++#define AST2600_WDT_RESET1_CRT (1 << 10)
++#define AST2600_WDT_RESET1_VIDEO (1 << 11)
++#define AST2600_WDT_RESET1_HAC (1 << 12)
++#define AST2600_WDT_RESET1_DP (1 << 13)
++#define AST2600_WDT_RESET1_DP_MCU (1 << 14)
++#define AST2600_WDT_RESET1_GP_MCU (1 << 15)
++#define AST2600_WDT_RESET1_MAC0 (1 << 16)
++#define AST2600_WDT_RESET1_MAC1 (1 << 17)
++#define AST2600_WDT_RESET1_SDIO0 (1 << 18)
++#define AST2600_WDT_RESET1_JTAG0 (1 << 19)
++#define AST2600_WDT_RESET1_MCTP0 (1 << 20)
++#define AST2600_WDT_RESET1_MCTP1 (1 << 21)
++#define AST2600_WDT_RESET1_XDMA0 (1 << 22)
++#define AST2600_WDT_RESET1_XDMA1 (1 << 23)
++#define AST2600_WDT_RESET1_GPIO0 (1 << 24)
++#define AST2600_WDT_RESET1_RVAS (1 << 25)
++
++#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1
++
++#define AST2600_WDT_RESET2_CPU (1 << 0)
++#define AST2600_WDT_RESET2_SPI (1 << 1)
++#define AST2600_WDT_RESET2_AHB2 (1 << 2)
++#define AST2600_WDT_RESET2_SLI2 (1 << 3)
++#define AST2600_WDT_RESET2_SOC_MISC1 (1 << 4)
++#define AST2600_WDT_RESET2_MAC2 (1 << 5)
++#define AST2600_WDT_RESET2_MAC3 (1 << 6)
++#define AST2600_WDT_RESET2_SDIO1 (1 << 7)
++#define AST2600_WDT_RESET2_JTAG1 (1 << 8)
++#define AST2600_WDT_RESET2_GPIO1 (1 << 9)
++#define AST2600_WDT_RESET2_MDIO (1 << 10)
++#define AST2600_WDT_RESET2_LPC (1 << 11)
++#define AST2600_WDT_RESET2_PECI (1 << 12)
++#define AST2600_WDT_RESET2_PWM (1 << 13)
++#define AST2600_WDT_RESET2_ADC (1 << 14)
++#define AST2600_WDT_RESET2_FSI (1 << 15)
++#define AST2600_WDT_RESET2_I2C (1 << 16)
++#define AST2600_WDT_RESET2_I3C_GLOBAL (1 << 17)
++#define AST2600_WDT_RESET2_I3C0 (1 << 18)
++#define AST2600_WDT_RESET2_I3C1 (1 << 19)
++#define AST2600_WDT_RESET2_I3C2 (1 << 20)
++#define AST2600_WDT_RESET2_I3C3 (1 << 21)
++#define AST2600_WDT_RESET2_I3C4 (1 << 22)
++#define AST2600_WDT_RESET2_I3C5 (1 << 23)
++#define AST2600_WDT_RESET2_ESPI (1 << 26)
++
++#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
++
++#endif
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0002-dt-bings-head-file-for-2700-A0-A1.patch b/recipes-kernel/linux/files/0002-dt-bings-head-file-for-2700-A0-A1.patch
deleted file mode 100644
index f3360be..0000000
--- a/recipes-kernel/linux/files/0002-dt-bings-head-file-for-2700-A0-A1.patch
+++ /dev/null
@@ -1,397 +0,0 @@
-From 5d987e19b91d7c63f882e7f2986e6401598c591c Mon Sep 17 00:00:00 2001
-From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Thu, 14 Nov 2024 15:47:35 +0800
-Subject: [PATCH] dt-bings head file for 2700 A0 A1
-
-This is from aspeed kernel:
-a769cc67850759a3952f7a40f5f5798c3d0f7bfd
-
-Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
----
- .../dt-bindings/clock/aspeed,ast2700-scu.h | 163 ++++++++++++++++++
- include/dt-bindings/clock/aspeed-clock.h | 1 +
- include/dt-bindings/gpio/aspeed-gpio.h | 2 +
- .../interrupt-controller/aspeed-e2m-ic.h | 21 +++
- .../interrupt-controller/aspeed-scu-ic.h | 14 ++
- .../dt-bindings/reset/aspeed,ast2700-scu.h | 124 +++++++++++++
- 6 files changed, 325 insertions(+)
- create mode 100644 include/dt-bindings/clock/aspeed,ast2700-scu.h
- create mode 100644 include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h
- create mode 100644 include/dt-bindings/reset/aspeed,ast2700-scu.h
-
-diff --git a/include/dt-bindings/clock/aspeed,ast2700-scu.h b/include/dt-bindings/clock/aspeed,ast2700-scu.h
-new file mode 100644
-index 000000000000..63021af3caf5
---- /dev/null
-+++ b/include/dt-bindings/clock/aspeed,ast2700-scu.h
-@@ -0,0 +1,163 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Device Tree binding constants for AST2700 clock controller.
-+ *
-+ * Copyright (c) 2024 Aspeed Technology Inc.
-+ */
-+
-+#ifndef __DT_BINDINGS_CLOCK_AST2700_H
-+#define __DT_BINDINGS_CLOCK_AST2700_H
-+
-+/* SOC0 clk */
-+#define SCU0_CLKIN 0
-+#define SCU0_CLK_24M 1
-+#define SCU0_CLK_192M 2
-+#define SCU0_CLK_UART 3
-+#define SCU0_CLK_UART_DIV13 3
-+#define SCU0_CLK_PSP 4
-+#define SCU0_CLK_HPLL 5
-+#define SCU0_CLK_HPLL_DIV2 6
-+#define SCU0_CLK_HPLL_DIV4 7
-+#define SCU0_CLK_HPLL_DIV_AHB 8
-+#define SCU0_CLK_DPLL 9
-+#define SCU0_CLK_MPLL 10
-+#define SCU0_CLK_MPLL_DIV2 11
-+#define SCU0_CLK_MPLL_DIV4 12
-+#define SCU0_CLK_MPLL_DIV8 13
-+#define SCU0_CLK_MPLL_DIV_AHB 14
-+#define SCU0_CLK_D0 15
-+#define SCU0_CLK_D1 16
-+#define SCU0_CLK_CRT0 17
-+#define SCU0_CLK_CRT1 18
-+#define SCU0_CLK_MPHY 19
-+#define SCU0_CLK_AXI0 20
-+#define SCU0_CLK_AXI1 21
-+#define SCU0_CLK_AHB 22
-+#define SCU0_CLK_APB 23
-+#define SCU0_CLK_UART4 24
-+#define SCU0_CLK_EMMCMUX 25
-+#define SCU0_CLK_EMMC 26
-+#define SCU0_CLK_U2PHY_CLK12M 27
-+#define SCU0_CLK_U2PHY_REFCLK 28
-+
-+/* SOC0 clk-gate */
-+#define SCU0_CLK_GATE_MCLK 29
-+#define SCU0_CLK_GATE_ECLK 30
-+#define SCU0_CLK_GATE_2DCLK 31
-+#define SCU0_CLK_GATE_VCLK 32
-+#define SCU0_CLK_GATE_BCLK 33
-+#define SCU0_CLK_GATE_VGA0CLK 34
-+#define SCU0_CLK_GATE_REFCLK 35
-+#define SCU0_CLK_GATE_PORTBUSB2CLK 36
-+#define SCU0_CLK_GATE_UHCICLK 37
-+#define SCU0_CLK_GATE_VGA1CLK 38
-+#define SCU0_CLK_GATE_DDRPHYCLK 39
-+#define SCU0_CLK_GATE_E2M0CLK 40
-+#define SCU0_CLK_GATE_HACCLK 41
-+#define SCU0_CLK_GATE_PORTAUSB2CLK 42
-+#define SCU0_CLK_GATE_UART4CLK 43
-+#define SCU0_CLK_GATE_SLICLK 44
-+#define SCU0_CLK_GATE_DACCLK 45
-+#define SCU0_CLK_GATE_DP 46
-+#define SCU0_CLK_GATE_E2M1CLK 47
-+#define SCU0_CLK_GATE_CRT0CLK 48
-+#define SCU0_CLK_GATE_CRT1CLK 49
-+#define SCU0_CLK_GATE_ECDSACLK 50
-+#define SCU0_CLK_GATE_RSACLK 51
-+#define SCU0_CLK_GATE_RVAS0CLK 52
-+#define SCU0_CLK_GATE_UFSCLK 53
-+#define SCU0_CLK_GATE_EMMCCLK 54
-+#define SCU0_CLK_GATE_RVAS1CLK 55
-+
-+/* SOC1 clk */
-+#define SCU1_CLKIN 0
-+#define SCU1_CLK_HPLL 1
-+#define SCU1_CLK_APLL 2
-+#define SCU1_CLK_APLL_DIV2 3
-+#define SCU1_CLK_APLL_DIV4 4
-+#define SCU1_CLK_DPLL 5
-+#define SCU1_CLK_UXCLK 6
-+#define SCU1_CLK_HUXCLK 7
-+#define SCU1_CLK_UARTX 8
-+#define SCU1_CLK_HUARTX 9
-+#define SCU1_CLK_AHB 10
-+#define SCU1_CLK_APB 11
-+#define SCU1_CLK_UART0 12
-+#define SCU1_CLK_UART1 13
-+#define SCU1_CLK_UART2 14
-+#define SCU1_CLK_UART3 15
-+#define SCU1_CLK_UART5 16
-+#define SCU1_CLK_UART6 17
-+#define SCU1_CLK_UART7 18
-+#define SCU1_CLK_UART8 19
-+#define SCU1_CLK_UART9 20
-+#define SCU1_CLK_UART10 21
-+#define SCU1_CLK_UART11 22
-+#define SCU1_CLK_UART12 23
-+#define SCU1_CLK_UART13 24
-+#define SCU1_CLK_UART14 25
-+#define SCU1_CLK_APLL_DIVN 26
-+#define SCU1_CLK_SDMUX 27
-+#define SCU1_CLK_SDCLK 28
-+#define SCU1_CLK_RMII 29
-+#define SCU1_CLK_RGMII 30
-+#define SCU1_CLK_MACHCLK 31
-+#define SCU1_CLK_MAC0RCLK 32
-+#define SCU1_CLK_MAC1RCLK 33
-+#define SCU1_CLK_CAN 34
-+
-+/* SOC1 clk gate */
-+#define SCU1_CLK_GATE_LCLK0 35
-+#define SCU1_CLK_GATE_LCLK1 36
-+#define SCU1_CLK_GATE_ESPI0CLK 37
-+#define SCU1_CLK_GATE_ESPI1CLK 38
-+#define SCU1_CLK_GATE_SDCLK 39
-+#define SCU1_CLK_GATE_IPEREFCLK 40
-+#define SCU1_CLK_GATE_REFCLK 41
-+#define SCU1_CLK_GATE_LPCHCLK 42
-+#define SCU1_CLK_GATE_MAC0CLK 43
-+#define SCU1_CLK_GATE_MAC1CLK 44
-+#define SCU1_CLK_GATE_MAC2CLK 45
-+#define SCU1_CLK_GATE_UART0CLK 46
-+#define SCU1_CLK_GATE_UART1CLK 47
-+#define SCU1_CLK_GATE_UART2CLK 48
-+#define SCU1_CLK_GATE_UART3CLK 49
-+#define SCU1_CLK_GATE_I2CCLK 50
-+#define SCU1_CLK_GATE_I3C0CLK 51
-+#define SCU1_CLK_GATE_I3C1CLK 52
-+#define SCU1_CLK_GATE_I3C2CLK 53
-+#define SCU1_CLK_GATE_I3C3CLK 54
-+#define SCU1_CLK_GATE_I3C4CLK 55
-+#define SCU1_CLK_GATE_I3C5CLK 56
-+#define SCU1_CLK_GATE_I3C6CLK 57
-+#define SCU1_CLK_GATE_I3C7CLK 58
-+#define SCU1_CLK_GATE_I3C8CLK 59
-+#define SCU1_CLK_GATE_I3C9CLK 60
-+#define SCU1_CLK_GATE_I3C10CLK 61
-+#define SCU1_CLK_GATE_I3C11CLK 62
-+#define SCU1_CLK_GATE_I3C12CLK 63
-+#define SCU1_CLK_GATE_I3C13CLK 64
-+#define SCU1_CLK_GATE_I3C14CLK 65
-+#define SCU1_CLK_GATE_I3C15CLK 66
-+#define SCU1_CLK_GATE_UART5CLK 67
-+#define SCU1_CLK_GATE_UART6CLK 68
-+#define SCU1_CLK_GATE_UART7CLK 69
-+#define SCU1_CLK_GATE_UART8CLK 70
-+#define SCU1_CLK_GATE_UART9CLK 71
-+#define SCU1_CLK_GATE_UART10CLK 72
-+#define SCU1_CLK_GATE_UART11CLK 73
-+#define SCU1_CLK_GATE_UART12CLK 74
-+#define SCU1_CLK_GATE_FSICLK 75
-+#define SCU1_CLK_GATE_LTPIPHYCLK 76
-+#define SCU1_CLK_GATE_LTPICLK 77
-+#define SCU1_CLK_GATE_VGALCLK 78
-+#define SCU1_CLK_GATE_UHCICLK 79
-+#define SCU1_CLK_GATE_CANCLK 80
-+#define SCU1_CLK_GATE_PCICLK 81
-+#define SCU1_CLK_GATE_SLICLK 82
-+#define SCU1_CLK_GATE_E2MCLK 83
-+#define SCU1_CLK_GATE_PORTCUSB2CLK 84
-+#define SCU1_CLK_GATE_PORTDUSB2CLK 85
-+#define SCU1_CLK_GATE_LTPI1TXCLK 86
-+
-+#endif
-diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
-index 06d568382c77..421ca577c1b2 100644
---- a/include/dt-bindings/clock/aspeed-clock.h
-+++ b/include/dt-bindings/clock/aspeed-clock.h
-@@ -53,5 +53,6 @@
- #define ASPEED_RESET_AHB 8
- #define ASPEED_RESET_CRT1 9
- #define ASPEED_RESET_HACE 10
-+#define ASPEED_RESET_VIDEO 21
-
- #endif
-diff --git a/include/dt-bindings/gpio/aspeed-gpio.h b/include/dt-bindings/gpio/aspeed-gpio.h
-index 56fc4889b2c4..5b5013b0f239 100644
---- a/include/dt-bindings/gpio/aspeed-gpio.h
-+++ b/include/dt-bindings/gpio/aspeed-gpio.h
-@@ -42,6 +42,8 @@
- #define ASPEED_GPIO_PORT_AA 26
- #define ASPEED_GPIO_PORT_AB 27
- #define ASPEED_GPIO_PORT_AC 28
-+#define ASPEED_GPIO_PORT_AD 29
-+#define ASPEED_GPIO_PORT_AE 30
-
- #define ASPEED_GPIO(port, offset) \
- ((ASPEED_GPIO_PORT_##port * 8) + offset)
-diff --git a/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h b/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h
-new file mode 100644
-index 000000000000..9449945b1bc8
---- /dev/null
-+++ b/include/dt-bindings/interrupt-controller/aspeed-e2m-ic.h
-@@ -0,0 +1,21 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Device Tree binding constants for AST2700 E2M interrupt
-+ * controller.
-+ *
-+ * Copyright (c) 2024 Aspeed Technology Inc.
-+ */
-+
-+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_
-+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_
-+
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT0 0
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT1 1
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT2 2
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT3 3
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT4 4
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT5 5
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT6 6
-+#define ASPEED_AST2700_E2M_MMBI_H2B_INT7 7
-+
-+#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_E2M_IC_H_ */
-diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
-index f315d5a7f5ee..7dd04424afcc 100644
---- a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
-+++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
-@@ -20,4 +20,18 @@
- #define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0
- #define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1
-
-+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_LO_TO_HI 3
-+#define ASPEED_AST2700_SCU_IC0_PCIE_PERST_HI_TO_LO 2
-+
-+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_LO_TO_HI 3
-+#define ASPEED_AST2700_SCU_IC1_PCIE_RCRST_HI_TO_LO 2
-+
-+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_LO_TO_HI 3
-+#define ASPEED_AST2700_SCU_IC2_PCIE_PERST_HI_TO_LO 2
-+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_LO_TO_HI 1
-+#define ASPEED_AST2700_SCU_IC2_LPC_RESET_HI_TO_LO 0
-+
-+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_LO_TO_HI 1
-+#define ASPEED_AST2700_SCU_IC3_LPC_RESET_HI_TO_LO 0
-+
- #endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
-diff --git a/include/dt-bindings/reset/aspeed,ast2700-scu.h b/include/dt-bindings/reset/aspeed,ast2700-scu.h
-new file mode 100644
-index 000000000000..d53c719b7a66
---- /dev/null
-+++ b/include/dt-bindings/reset/aspeed,ast2700-scu.h
-@@ -0,0 +1,124 @@
-+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
-+/*
-+ * Device Tree binding constants for AST2700 reset controller.
-+ *
-+ * Copyright (c) 2024 Aspeed Technology Inc.
-+ */
-+
-+#ifndef _MACH_ASPEED_AST2700_RESET_H_
-+#define _MACH_ASPEED_AST2700_RESET_H_
-+
-+/* SOC0 */
-+#define SCU0_RESET_SDRAM 0
-+#define SCU0_RESET_DDRPHY 1
-+#define SCU0_RESET_RSA 2
-+#define SCU0_RESET_SHA3 3
-+#define SCU0_RESET_HACE 4
-+#define SCU0_RESET_SOC 5
-+#define SCU0_RESET_VIDEO 6
-+#define SCU0_RESET_2D 7
-+#define SCU0_RESET_PCIS 8
-+#define SCU0_RESET_RVAS0 9
-+#define SCU0_RESET_RVAS1 10
-+#define SCU0_RESET_SM3 11
-+#define SCU0_RESET_SM4 12
-+#define SCU0_RESET_CRT0 13
-+#define SCU0_RESET_ECC 14
-+#define SCU0_RESET_DP_PCI 15
-+#define SCU0_RESET_UFS 16
-+#define SCU0_RESET_EMMC 17
-+#define SCU0_RESET_PCIE1RST 18
-+#define SCU0_RESET_PCIE1RSTOE 19
-+#define SCU0_RESET_PCIE0RST 20
-+#define SCU0_RESET_PCIE0RSTOE 21
-+#define SCU0_RESET_JTAG 22
-+#define SCU0_RESET_MCTP0 23
-+#define SCU0_RESET_MCTP1 24
-+#define SCU0_RESET_XDMA0 25
-+#define SCU0_RESET_XDMA1 26
-+#define SCU0_RESET_H2X1 27
-+#define SCU0_RESET_DP 28
-+#define SCU0_RESET_DP_MCU 29
-+#define SCU0_RESET_SSP 30
-+#define SCU0_RESET_H2X0 31
-+#define SCU0_RESET_PORTA_VHUB 32
-+#define SCU0_RESET_PORTA_PHY3 33
-+#define SCU0_RESET_PORTA_XHCI 34
-+#define SCU0_RESET_PORTB_VHUB 35
-+#define SCU0_RESET_PORTB_PHY3 36
-+#define SCU0_RESET_PORTB_XHCI 37
-+#define SCU0_RESET_PORTA_VHUB_EHCI 38
-+#define SCU0_RESET_PORTB_VHUB_EHCI 39
-+#define SCU0_RESET_UHCI 40
-+#define SCU0_RESET_TSP 41
-+#define SCU0_RESET_E2M0 42
-+#define SCU0_RESET_E2M1 43
-+#define SCU0_RESET_VLINK 44
-+
-+/* SOC1 */
-+#define SCU1_RESET_LPC0 0
-+#define SCU1_RESET_LPC1 1
-+#define SCU1_RESET_MII 2
-+#define SCU1_RESET_PECI 3
-+#define SCU1_RESET_PWM 4
-+#define SCU1_RESET_MAC0 5
-+#define SCU1_RESET_MAC1 6
-+#define SCU1_RESET_MAC2 7
-+#define SCU1_RESET_ADC 8
-+#define SCU1_RESET_SD 9
-+#define SCU1_RESET_ESPI0 10
-+#define SCU1_RESET_ESPI1 11
-+#define SCU1_RESET_JTAG1 12
-+#define SCU1_RESET_SPI0 13
-+#define SCU1_RESET_SPI1 14
-+#define SCU1_RESET_SPI2 15
-+#define SCU1_RESET_I3C0 16
-+#define SCU1_RESET_I3C1 17
-+#define SCU1_RESET_I3C2 18
-+#define SCU1_RESET_I3C3 19
-+#define SCU1_RESET_I3C4 20
-+#define SCU1_RESET_I3C5 21
-+#define SCU1_RESET_I3C6 22
-+#define SCU1_RESET_I3C7 23
-+#define SCU1_RESET_I3C8 24
-+#define SCU1_RESET_I3C9 25
-+#define SCU1_RESET_I3C10 26
-+#define SCU1_RESET_I3C11 27
-+#define SCU1_RESET_I3C12 28
-+#define SCU1_RESET_I3C13 29
-+#define SCU1_RESET_I3C14 30
-+#define SCU1_RESET_I3C15 31
-+#define SCU1_RESET_MCU0 32
-+#define SCU1_RESET_MCU1 33
-+#define SCU1_RESET_H2A_SPI1 34
-+#define SCU1_RESET_H2A_SPI2 35
-+#define SCU1_RESET_UART0 36
-+#define SCU1_RESET_UART1 37
-+#define SCU1_RESET_UART2 38
-+#define SCU1_RESET_UART3 39
-+#define SCU1_RESET_I2C_FILTER 40
-+#define SCU1_RESET_CALIPTRA 41
-+#define SCU1_RESET_XDMA 42
-+#define SCU1_RESET_FSI 43
-+#define SCU1_RESET_CAN 44
-+#define SCU1_RESET_MCTP 45
-+#define SCU1_RESET_I2C 46
-+#define SCU1_RESET_UART6 47
-+#define SCU1_RESET_UART7 48
-+#define SCU1_RESET_UART8 49
-+#define SCU1_RESET_UART9 50
-+#define SCU1_RESET_LTPI0 51
-+#define SCU1_RESET_VGAL 52
-+#define SCU1_RESET_LTPI1 53
-+#define SCU1_RESET_ACE 54
-+#define SCU1_RESET_E2M 55
-+#define SCU1_RESET_UHCI 56
-+#define SCU1_RESET_PORTC_USB2UART 57
-+#define SCU1_RESET_PORTC_VHUB_EHCI 58
-+#define SCU1_RESET_PORTD_USB2UART 59
-+#define SCU1_RESET_PORTD_VHUB_EHCI 60
-+#define SCU1_RESET_H2X 61
-+#define SCU1_RESET_I3CDMA 62
-+#define SCU1_RESET_PCIE2RST 63
-+
-+#endif /* _MACH_ASPEED_AST2700_RESET_H_ */
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0003-Add-include-head-files-for-ast2700.patch b/recipes-kernel/linux/files/0003-Add-include-head-files-for-ast2700.patch
new file mode 100644
index 0000000..dda1278
--- /dev/null
+++ b/recipes-kernel/linux/files/0003-Add-include-head-files-for-ast2700.patch
@@ -0,0 +1,2017 @@
+From 4e1297cb680872cce8384bba799cbd8e6a780680 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 09:50:13 +0800
+Subject: [PATCH] Add include head files for ast2700
+
+Add head file for ast2700 driver use.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ include/crypto/if_alg.h | 7 +-
+ include/linux/aspeed-mctp.h | 155 +++++++++++
+ include/linux/brcmphy.h | 10 +
+ include/linux/i2c.h | 42 +++
+ include/linux/i3c/ccc.h | 21 ++
+ include/linux/i3c/device.h | 95 ++++++-
+ include/linux/i3c/master.h | 62 ++++-
+ include/linux/i3c/mctp/i3c-mctp.h | 50 ++++
+ include/linux/i3c/target.h | 30 ++
+ include/linux/jtag.h | 49 ++++
+ include/linux/mmc/host.h | 2 +
+ include/linux/mtd/spi-nor.h | 6 +
+ include/linux/soc/aspeed/aspeed-otp.h | 11 +
+ include/linux/soc/aspeed/aspeed-udma.h | 30 ++
+ include/linux/usb/chipidea.h | 1 +
+ include/soc/aspeed/reset-aspeed.h | 21 ++
+ include/trace/events/xdma.h | 139 ++++++++++
+ include/uapi/linux/aspeed-mctp.h | 136 +++++++++
+ include/uapi/linux/aspeed-otp.h | 50 ++++
+ include/uapi/linux/aspeed-video.h | 9 +
+ include/uapi/linux/aspeed-xdma.h | 42 +++
+ include/uapi/linux/i3c/i3cdev.h | 37 +++
+ include/uapi/linux/if_alg.h | 3 +
+ include/uapi/linux/jtag.h | 370 +++++++++++++++++++++++++
+ include/uapi/linux/npcm-video.h | 41 +++
+ include/uapi/linux/v4l2-controls.h | 6 +
+ include/uapi/linux/videodev2.h | 2 +
+ 27 files changed, 1421 insertions(+), 6 deletions(-)
+ create mode 100644 include/linux/aspeed-mctp.h
+ create mode 100644 include/linux/i3c/mctp/i3c-mctp.h
+ create mode 100644 include/linux/i3c/target.h
+ create mode 100644 include/linux/jtag.h
+ create mode 100644 include/linux/soc/aspeed/aspeed-otp.h
+ create mode 100644 include/linux/soc/aspeed/aspeed-udma.h
+ create mode 100644 include/soc/aspeed/reset-aspeed.h
+ create mode 100644 include/trace/events/xdma.h
+ create mode 100644 include/uapi/linux/aspeed-mctp.h
+ create mode 100644 include/uapi/linux/aspeed-otp.h
+ create mode 100644 include/uapi/linux/aspeed-xdma.h
+ create mode 100644 include/uapi/linux/i3c/i3cdev.h
+ create mode 100644 include/uapi/linux/jtag.h
+ create mode 100644 include/uapi/linux/npcm-video.h
+
+diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
+index 08b803a4f..9004a83ab 100644
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -18,6 +18,7 @@
+
+ #include <crypto/aead.h>
+ #include <crypto/skcipher.h>
++#include <crypto/akcipher.h>
+
+ #define ALG_MAX_PAGES 16
+
+@@ -44,6 +45,7 @@ struct af_alg_type {
+ void *(*bind)(const char *name, u32 type, u32 mask);
+ void (*release)(void *private);
+ int (*setkey)(void *private, const u8 *key, unsigned int keylen);
++ int (*setpubkey)(void *private, const u8 *key, unsigned int keylen);
+ int (*setentropy)(void *private, sockptr_t entropy, unsigned int len);
+ int (*accept)(void *private, struct sock *sk);
+ int (*accept_nokey)(void *private, struct sock *sk);
+@@ -108,6 +110,7 @@ struct af_alg_async_req {
+ union {
+ struct aead_request aead_req;
+ struct skcipher_request skcipher_req;
++ struct akcipher_request akcipher_req;
+ } cra_u;
+
+ /* req ctx trails this struct */
+@@ -132,7 +135,7 @@ struct af_alg_async_req {
+ * @more: More data to be expected from user space?
+ * @merge: Shall new data from user space be merged into existing
+ * SG?
+- * @enc: Cryptographic operation to be performed when
++ * @op: Cryptographic operation to be performed when
+ * recvmsg is invoked.
+ * @init: True if metadata has been sent.
+ * @len: Length of memory allocated for this data structure.
+@@ -151,8 +154,8 @@ struct af_alg_ctx {
+
+ bool more;
+ bool merge;
+- bool enc;
+ bool init;
++ int op;
+
+ unsigned int len;
+
+diff --git a/include/linux/aspeed-mctp.h b/include/linux/aspeed-mctp.h
+new file mode 100644
+index 000000000..7fbbf301b
+--- /dev/null
++++ b/include/linux/aspeed-mctp.h
+@@ -0,0 +1,155 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/* Copyright (c) 2020 Intel Corporation */
++
++#ifndef __LINUX_ASPEED_MCTP_H
++#define __LINUX_ASPEED_MCTP_H
++
++#include <linux/types.h>
++
++struct mctp_client;
++struct aspeed_mctp;
++
++struct pcie_transport_hdr {
++ u8 fmt_type;
++ u8 mbz;
++ u8 mbz_attr_len_hi;
++ u8 len_lo;
++ u16 requester;
++ u8 tag;
++ u8 code;
++ u16 target;
++ u16 vendor;
++} __packed;
++
++struct mctp_protocol_hdr {
++ u8 ver;
++ u8 dest;
++ u8 src;
++ u8 flags_seq_tag;
++} __packed;
++
++#define PCIE_VDM_HDR_SIZE 16
++#define MCTP_BTU_SIZE 64
++/* The MTU of the ASPEED MCTP can be 64/128/256 */
++#define ASPEED_MCTP_MTU MCTP_BTU_SIZE
++#define PCIE_VDM_DATA_SIZE_DW (ASPEED_MCTP_MTU / 4)
++#define PCIE_VDM_HDR_SIZE_DW (PCIE_VDM_HDR_SIZE / 4)
++
++#define PCIE_MCTP_MIN_PACKET_SIZE (PCIE_VDM_HDR_SIZE + 4)
++
++struct mctp_pcie_packet_data_2500 {
++ u32 data[32];
++};
++
++struct mctp_pcie_packet_data {
++ u32 hdr[PCIE_VDM_HDR_SIZE_DW];
++ u32 payload[PCIE_VDM_DATA_SIZE_DW];
++};
++
++struct mctp_pcie_packet {
++ struct mctp_pcie_packet_data data;
++ u32 size;
++};
++
++/**
++ * aspeed_mctp_add_type_handler() - register for the given MCTP message type
++ * @client: pointer to the existing mctp_client context
++ * @mctp_type: message type code according to DMTF DSP0239 spec.
++ * @pci_vendor_id: vendor ID (non-zero if msg_type is Vendor Defined PCI,
++ * otherwise it should be set to 0)
++ * @vdm_type: vendor defined message type (it should be set to 0 for non-Vendor
++ * Defined PCI message type)
++ * @vdm_mask: vendor defined message mask (it should be set to 0 for non-Vendor
++ * Defined PCI message type)
++ *
++ * Return:
++ * * 0 - success,
++ * * -EINVAL - arguments passed are incorrect,
++ * * -ENOMEM - cannot alloc a new handler,
++ * * -EBUSY - given message has already registered handler.
++ */
++
++int aspeed_mctp_add_type_handler(struct mctp_client *client, u8 mctp_type,
++ u16 pci_vendor_id, u16 vdm_type, u16 vdm_mask);
++
++/**
++ * aspeed_mctp_create_client() - create mctp_client context
++ * @priv pointer to aspeed-mctp context
++ *
++ * Returns struct mctp_client or NULL.
++ */
++struct mctp_client *aspeed_mctp_create_client(struct aspeed_mctp *priv);
++
++/**
++ * aspeed_mctp_delete_client()- delete mctp_client context
++ * @client: pointer to existing mctp_client context
++ */
++void aspeed_mctp_delete_client(struct mctp_client *client);
++
++/**
++ * aspeed_mctp_send_packet() - send mctp_packet
++ * @client: pointer to existing mctp_client context
++ * @tx_packet: the allocated packet that needs to be send via aspeed-mctp
++ *
++ * After the function returns success, the packet is no longer owned by the
++ * caller, and as such, the caller should not attempt to free it.
++ *
++ * Return:
++ * * 0 - success,
++ * * -ENOSPC - failed to send packet due to lack of available space.
++ */
++int aspeed_mctp_send_packet(struct mctp_client *client,
++ struct mctp_pcie_packet *tx_packet);
++
++/**
++ * aspeed_mctp_receive_packet() - receive mctp_packet
++ * @client: pointer to existing mctp_client context
++ * @timeout: timeout, in jiffies
++ *
++ * The function will sleep for up to @timeout if no packet is ready to read.
++ *
++ * After the function returns valid packet, the caller takes its ownership and
++ * is responsible for freeing it.
++ *
++ * Returns struct mctp_pcie_packet from or ERR_PTR in case of error or the
++ * @timeout elapsed.
++ */
++struct mctp_pcie_packet *aspeed_mctp_receive_packet(struct mctp_client *client,
++ unsigned long timeout);
++
++/**
++ * aspeed_mctp_flush_rx_queue() - remove all mctp_packets from rx queue
++ * @client: pointer to existing mctp_client context
++ */
++void aspeed_mctp_flush_rx_queue(struct mctp_client *client);
++
++/**
++ * aspeed_mctp_get_eid_bdf() - return PCIe address for requested endpoint ID
++ * @client: pointer to existing mctp_client context
++ * @eid: requested eid
++ * @bdf: pointer to store BDF value
++ *
++ * Return:
++ * * 0 - success,
++ * * -ENOENT - there is no record for requested endpoint id.
++ */
++int aspeed_mctp_get_eid_bdf(struct mctp_client *client, u8 eid, u16 *bdf);
++
++/**
++ * aspeed_mctp_get_eid() - return EID for requested BDF and domainId.
++ * @client: pointer to existing mctp_client context
++ * @bdf: requested BDF value
++ * @domain_id: requested domainId
++ * @eid: pointer to store EID value
++ *
++ * Return:
++ * * 0 - success,
++ * * -ENOENT - there is no record for requested bdf/domainId.
++ */
++int aspeed_mctp_get_eid(struct mctp_client *client, u16 bdf,
++ u8 domain_id, u8 *eid);
++
++void *aspeed_mctp_packet_alloc(gfp_t flags);
++void aspeed_mctp_packet_free(void *packet);
++
++#endif /* __LINUX_ASPEED_MCTP_H */
+diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
+index c55810a43..006fb9996 100644
+--- a/include/linux/brcmphy.h
++++ b/include/linux/brcmphy.h
+@@ -11,6 +11,7 @@
+
+ #define PHY_ID_BCM50610 0x0143bd60
+ #define PHY_ID_BCM50610M 0x0143bd70
++#define PHY_ID_BCM5221 0x004061e0
+ #define PHY_ID_BCM5241 0x0143bc30
+ #define PHY_ID_BCMAC131 0x0143bc70
+ #define PHY_ID_BCM5481 0x0143bca0
+@@ -420,4 +421,13 @@
+ #define BCM54XX_EXP_ECD_PAIR_D_LENGTH_RESULTS (MII_BCM54XX_EXP_SEL_ER + 0xc5)
+ #define BCM54XX_ECD_LENGTH_RESULTS_INVALID 0xffff
+
++/* MII BCM5221 Additional registers */
++#define MII_BCM5221_TEST 0x1f
++#define MII_BCM5221_TEST_ENABLE_SHADOWS 0x0080
++#define MII_BCM5221_SHDOW_AUX_STAT2 0x1b
++#define MII_BCM5221_SHDOW_AUX_STAT2_APD 0x0020
++#define MII_BCM5221_SHDOW_AUX_MODE4 0x1a
++#define MII_BCM5221_SHDOW_AUX_MODE4_IDDQMODE 0x0001
++#define MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR 0x0004
++
+ #endif /* _LINUX_BRCMPHY_H */
+diff --git a/include/linux/i2c.h b/include/linux/i2c.h
+index 32cf5708d..e131ab9db 100644
+--- a/include/linux/i2c.h
++++ b/include/linux/i2c.h
+@@ -582,6 +582,26 @@ struct i2c_lock_operations {
+ void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags);
+ };
+
++/**
++ * struct i2c_mux_root_operations - represent operations to lock and select
++ * the adapter's mux channel (if a mux is present)
++ * @lock_select: Get exclusive access to the root I2C bus adapter with the
++ * correct mux channel selected for the adapter
++ * @unlock_deslect: Release exclusive access to the root I2C bus adapter and
++ * deselect the mux channel for the adapter
++ *
++ * Some I2C clients need the ability to control the root I2C bus even if the
++ * endpoint device is behind a mux. For example, a driver for a chip that
++ * can't handle any I2C traffic on the bus while coming out of reset (including
++ * an I2C-driven mux switching channels) may need to lock the root bus with
++ * the mux selection fixed for the entire time the device is in reset.
++ * These operations are for such a purpose.
++ */
++struct i2c_mux_root_operations {
++ struct i2c_adapter *(*lock_select)(struct i2c_adapter *adapter);
++ void (*unlock_deselect)(struct i2c_adapter *adapter);
++};
++
+ /**
+ * struct i2c_timings - I2C timing information
+ * @bus_freq_hz: the bus frequency in Hz
+@@ -724,6 +744,7 @@ struct i2c_adapter {
+
+ /* data fields that are valid for all devices */
+ const struct i2c_lock_operations *lock_ops;
++ const struct i2c_mux_root_operations *mux_root_ops;
+ struct rt_mutex bus_lock;
+ struct rt_mutex mux_lock;
+
+@@ -816,6 +837,27 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
+ adapter->lock_ops->unlock_bus(adapter, flags);
+ }
+
++/**
++ * i2c_lock_select_bus - Get exclusive access to the root I2C bus with the
++ * target's mux channel (if a mux is present) selected.
++ * @adapter: Target I2C bus
++ *
++ * Return the root I2C bus if mux selection succeeds, an ERR_PTR otherwise
++ */
++static inline struct i2c_adapter *i2c_lock_select_bus(struct i2c_adapter *adapter)
++{
++ return adapter->mux_root_ops->lock_select(adapter);
++}
++
++/**
++ * i2c_unlock_deselect_bus - Release exclusive access to the root I2C bus
++ * @adapter: Target I2C bus
++ */
++static inline void i2c_unlock_deselect_bus(struct i2c_adapter *adapter)
++{
++ adapter->mux_root_ops->unlock_deselect(adapter);
++}
++
+ /**
+ * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core
+ * @adap: Adapter to mark as suspended
+diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
+index ad59a4ae6..9e413cfa4 100644
+--- a/include/linux/i3c/ccc.h
++++ b/include/linux/i3c/ccc.h
+@@ -26,12 +26,16 @@
+ #define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+ #define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+ #define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
++#define I3C_CCC_DBGACTION(broadcast) I3C_CCC_ID(0x58, broadcast)
+
+ /* Broadcast-only commands */
+ #define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+ #define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+ #define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+ #define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
++#define I3C_CCC_SETAASA I3C_CCC_ID(0x29, true)
++#define I3C_CCC_SETHID I3C_CCC_ID(0x61, true)
++#define I3C_CCC_DEVCTRL I3C_CCC_ID(0x62, true)
+
+ /* Unicast-only commands */
+ #define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+@@ -47,6 +51,7 @@
+ #define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+ #define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+ #define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
++#define I3C_CCC_DBGOPCODE I3C_CCC_ID(0x57, false)
+
+ #define I3C_CCC_EVENT_SIR BIT(0)
+ #define I3C_CCC_EVENT_MR BIT(1)
+@@ -243,6 +248,15 @@ struct i3c_ccc_setbrgtgt {
+ struct i3c_ccc_bridged_slave_desc bslaves[];
+ } __packed;
+
++/**
++ * struct i3c_ccc_sethid - payload passed to SETHID CCC
++ *
++ * @hid: 3-bit HID
++ */
++struct i3c_ccc_sethid {
++ u8 hid;
++};
++
+ /**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+@@ -291,6 +305,9 @@ struct i3c_ccc_getmxds {
+ */
+ struct i3c_ccc_gethdrcap {
+ u8 modes;
++ u8 caps;
++ u8 sup;
++ u8 reserved;
+ } __packed;
+
+ /**
+@@ -369,6 +386,8 @@ struct i3c_ccc_cmd_dest {
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
++ * @dbp: true if the defining byte present
++ * @db: the defining byte
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+@@ -377,6 +396,8 @@ struct i3c_ccc_cmd_dest {
+ struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
++ u8 dbp;
++ u8 db;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
+index ef6217da8..c23de58e2 100644
+--- a/include/linux/i3c/device.h
++++ b/include/linux/i3c/device.h
+@@ -43,11 +43,32 @@ enum i3c_error_code {
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
++ * @I3C_HDR_BT: BT mode
+ */
+ enum i3c_hdr_mode {
+ I3C_HDR_DDR,
+ I3C_HDR_TSP,
+ I3C_HDR_TSL,
++ I3C_HDR_BT,
++};
++
++/**
++ * struct i3c_hdr_cmd - I3C HDR command
++ * @mode: HDR mode selected for this command
++ * @code: command opcode
++ * @ndatawords: number of data words (a word is 16bits wide)
++ * @data: input/output buffer
++ * @err: I3C error code
++ */
++struct i3c_hdr_cmd {
++ enum i3c_hdr_mode mode;
++ u8 code;
++ int ndatawords;
++ union {
++ void *in;
++ const void *out;
++ } data;
++ enum i3c_error_code err;
+ };
+
+ /**
+@@ -77,6 +98,17 @@ struct i3c_priv_xfer {
+ */
+ enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
++ I3C_DCR_HUB = 194,
++ I3C_DCR_JESD403_BEGIN = 208,
++ I3C_DCR_THERMAL_SENSOR_FIRST = 210,
++ I3C_DCR_THERMAL_SENSOR_SECOND = 214,
++ I3C_DCR_PMIC_SECOND = 216,
++ I3C_DCR_PMIC_FIRST = 217,
++ I3C_DCR_SPD_HUB = 218,
++ I3C_DCR_RCD = 219,
++ I3C_DCR_PMIC_THIRD = 220,
++ I3C_DCR_JESD403_END = 223,
++ I3C_DCR_MAX = 228,
+ };
+
+ #define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+@@ -98,7 +130,7 @@ enum i3c_dcr {
+
+ /**
+ * struct i3c_device_info - I3C device information
+- * @pid: Provisional ID
++ * @pid: Provisioned ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+@@ -110,6 +142,8 @@ enum i3c_dcr {
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
++ * @pec: flag telling whether PEC (Packet Error Check) generation and verification for read
++ * and write transaction is enabled
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+@@ -131,6 +165,8 @@ struct i3c_device_info {
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
++ u8 pec;
++ __be16 status;
+ };
+
+ /*
+@@ -181,6 +217,7 @@ struct i3c_driver {
+ int (*probe)(struct i3c_device *dev);
+ void (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
++ bool target;
+ };
+
+ static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+@@ -306,6 +343,19 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
+
+ int i3c_device_do_setdasa(struct i3c_device *dev);
+
++int i3c_device_getstatus_ccc(struct i3c_device *dev, struct i3c_device_info *info);
++
++int i3c_device_send_hdr_cmds(struct i3c_device *dev, struct i3c_hdr_cmd *cmds,
++ int ncmds);
++
++int i3c_device_generate_ibi(struct i3c_device *dev, const u8 *data, int len);
++
++int i3c_device_pending_read_notify(struct i3c_device *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify);
++
++bool i3c_device_is_ibi_enabled(struct i3c_device *dev);
++
+ void i3c_device_get_info(const struct i3c_device *dev, struct i3c_device_info *info);
+
+ struct i3c_ibi_payload {
+@@ -344,5 +394,48 @@ int i3c_device_request_ibi(struct i3c_device *dev,
+ void i3c_device_free_ibi(struct i3c_device *dev);
+ int i3c_device_enable_ibi(struct i3c_device *dev);
+ int i3c_device_disable_ibi(struct i3c_device *dev);
++int i3c_device_setmrl_ccc(struct i3c_device *dev, struct i3c_device_info *info, u16 read_len,
++ u8 ibi_len);
++int i3c_device_setmwl_ccc(struct i3c_device *dev, struct i3c_device_info *info, u16 write_len);
++int i3c_device_getmrl_ccc(struct i3c_device *dev, struct i3c_device_info *info);
++int i3c_device_getmwl_ccc(struct i3c_device *dev, struct i3c_device_info *info);
++int i3c_device_setaasa_ccc(struct i3c_device *dev);
++int i3c_device_sethid_ccc(struct i3c_device *dev);
++
++int i3c_device_dbgaction_wr_ccc(struct i3c_device *dev, struct i3c_device_info *info,
++ u8 *data, u8 len);
++int i3c_device_dbgopcode_wr_ccc(struct i3c_device *dev, struct i3c_device_info *info,
++ u8 *data, u8 len);
++int i3c_device_dbgopcode_rd_ccc(struct i3c_device *dev, struct i3c_device_info *info,
++ u8 *data, u8 len);
++
++struct i3c_target_read_setup {
++ void (*handler)(struct i3c_device *dev, const u8 *data, size_t len);
++};
++
++int i3c_target_read_register(struct i3c_device *dev, const struct i3c_target_read_setup *setup);
++
++int i3c_device_control_pec(struct i3c_device *dev, bool pec);
++
++/**
++ * enum i3c_event - List of possible events could be send/published to
++ * registered devices.
++ * @i3c_event_prepare_for_rescan: Event send when controller driver is going to
++ * run bus discovery again.
++ * @i3c_event_rescan_done: Event send when controller driver run bus discovery
++ * again.
++ */
++enum i3c_event {
++ i3c_event_prepare_for_rescan = 0,
++ i3c_event_rescan_done,
++};
++
++/**
++ * i3c_event_cb - callback registered by device driver and used by controller
++ * driver to publish event.
++ */
++typedef void (*i3c_event_cb)(struct i3c_device *dev, enum i3c_event event);
++
++void i3c_device_register_event_cb(struct i3c_device *dev, i3c_event_cb cb);
+
+ #endif /* I3C_DEV_H */
+diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
+index 0b52da4f2..f50311c69 100644
+--- a/include/linux/i3c/master.h
++++ b/include/linux/i3c/master.h
+@@ -24,6 +24,13 @@
+
+ struct i2c_client;
+
++struct i3c_target_ops;
++/* notifier actions. notifier call data is the struct i3c_bus */
++enum {
++ I3C_NOTIFY_BUS_ADD,
++ I3C_NOTIFY_BUS_REMOVE,
++};
++
+ struct i3c_master_controller;
+ struct i3c_bus;
+ struct i3c_device;
+@@ -129,6 +136,7 @@ struct i3c_ibi_slot {
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
++ * @wq: workqueue used to execute IBI handlers.
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+@@ -151,6 +159,7 @@ struct i3c_device_ibi_info {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
++ struct workqueue_struct *wq;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+ };
+@@ -166,7 +175,7 @@ struct i3c_device_ibi_info {
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+- * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
++ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+@@ -181,14 +190,26 @@ struct i3c_dev_boardinfo {
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
++ u8 bcr;
++ u8 dcr;
+ struct device_node *of_node;
+ };
+
++/**
++ * struct i3c_target_info - target information attached to a specific device
++ * @read handler: handler specified at i3c_target_read_register() call time.
++ */
++
++struct i3c_target_info {
++ void (*read_handler)(struct i3c_device *dev, const u8 *data, size_t len);
++};
++
+ /**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
++ * @target_info: I3C target information.
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+@@ -197,6 +218,8 @@ struct i3c_dev_boardinfo {
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
++ * @event_cb: I3C framework event callback used to publish events to registered
++ * devices' drivers
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+@@ -207,10 +230,12 @@ struct i3c_dev_boardinfo {
+ struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
++ struct i3c_target_info target_info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
++ i3c_event_cb event_cb;
+ };
+
+ /**
+@@ -246,6 +271,7 @@ struct i3c_device {
+ #define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
+ #define I3C_BUS_I2C_FM_SCL_RATE 400000
+ #define I3C_BUS_TLOW_OD_MIN_NS 200
++#define I3C_BUS_THIGH_MAX_NS 41
+
+ /**
+ * enum i3c_bus_mode - I3C bus mode
+@@ -343,6 +369,7 @@ struct i3c_bus {
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
++ u8 context;
+ };
+
+ /**
+@@ -384,6 +411,9 @@ struct i3c_bus {
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
++ * @send_hdr_cmds: send one or several HDR commands. If there is more than one
++ * command, they should ideally be sent in the same HDR
++ * transaction
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+@@ -430,6 +460,7 @@ struct i3c_bus {
+ struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
++ int (*bus_reset)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+@@ -438,6 +469,8 @@ struct i3c_master_controller_ops {
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
++ int (*send_hdr_cmds)(struct i3c_dev_desc *dev,
++ struct i3c_hdr_cmd *cmds, int ncmds);
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+@@ -452,6 +485,8 @@ struct i3c_master_controller_ops {
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
++ int (*enable_hotjoin)(struct i3c_master_controller *master);
++ int (*disable_hotjoin)(struct i3c_master_controller *master);
+ };
+
+ /**
+@@ -463,13 +498,15 @@ struct i3c_master_controller_ops {
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
++ * @target_ops: target operations. See &struct i3c_target_ops
++ * @target: true if the underlying I3C device acts as a target on I3C bus
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+- * @wq: workqueue used to execute IBI handlers. Can also be used by master
++ * @wq: workqueue which can be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+@@ -485,8 +522,12 @@ struct i3c_master_controller {
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
++ const struct i3c_target_ops *target_ops;
++ unsigned int pec_supported : 1;
++ unsigned int target : 1;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
++ unsigned int hotjoin: 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+@@ -525,7 +566,12 @@ int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+ int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
++int i3c_master_setmrl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, u16 read_len,
++ u8 ibi_len);
+ int i3c_master_entdaa_locked(struct i3c_master_controller *master);
++int i3c_master_setaasa_locked(struct i3c_master_controller *master);
++int i3c_master_sethid_locked(struct i3c_master_controller *master);
+ int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+ int i3c_master_get_free_addr(struct i3c_master_controller *master,
+@@ -543,7 +589,12 @@ int i3c_master_register(struct i3c_master_controller *master,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+ void i3c_master_unregister(struct i3c_master_controller *master);
+-
++int i3c_register(struct i3c_master_controller *master,
++ struct device *parent,
++ const struct i3c_master_controller_ops *master_ops,
++ const struct i3c_target_ops *target_ops,
++ bool secondary);
++int i3c_unregister(struct i3c_master_controller *master);
+ /**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+@@ -652,4 +703,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+ struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
++void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
++ void *data);
++int i3c_register_notifier(struct notifier_block *nb);
++int i3c_unregister_notifier(struct notifier_block *nb);
++
+ #endif /* I3C_MASTER_H */
+diff --git a/include/linux/i3c/mctp/i3c-mctp.h b/include/linux/i3c/mctp/i3c-mctp.h
+new file mode 100644
+index 000000000..dd20750d7
+--- /dev/null
++++ b/include/linux/i3c/mctp/i3c-mctp.h
+@@ -0,0 +1,50 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright (C) 2022 Intel Corporation.*/
++
++#ifndef I3C_MCTP_H
++#define I3C_MCTP_H
++
++#define I3C_MCTP_PACKET_SIZE 68
++#define I3C_MCTP_PAYLOAD_SIZE 64
++#define I3C_MCTP_HDR_SIZE 4
++
++/* PECI MCTP Intel VDM definitions */
++#define MCTP_MSG_TYPE_VDM_PCI 0x7E
++#define MCTP_VDM_PCI_INTEL_VENDOR_ID 0x8086
++#define MCTP_VDM_PCI_INTEL_PECI 0x2
++
++/* MCTP message header offsets */
++#define MCTP_MSG_HDR_MSG_TYPE_OFFSET 0
++#define MCTP_MSG_HDR_VENDOR_OFFSET 1
++#define MCTP_MSG_HDR_OPCODE_OFFSET 4
++
++struct i3c_mctp_client;
++
++struct mctp_protocol_hdr {
++ u8 ver;
++ u8 dest;
++ u8 src;
++ u8 flags_seq_tag;
++} __packed;
++
++struct i3c_mctp_packet_data {
++ u8 protocol_hdr[I3C_MCTP_HDR_SIZE];
++ u8 payload[I3C_MCTP_PAYLOAD_SIZE];
++};
++
++struct i3c_mctp_packet {
++ struct i3c_mctp_packet_data data;
++ u32 size;
++};
++
++void *i3c_mctp_packet_alloc(gfp_t flags);
++void i3c_mctp_packet_free(void *packet);
++
++int i3c_mctp_get_eid(struct i3c_mctp_client *client, u8 domain_id, u8 *eid);
++int i3c_mctp_send_packet(struct i3c_device *i3c, struct i3c_mctp_packet *tx_packet);
++struct i3c_mctp_packet *i3c_mctp_receive_packet(struct i3c_mctp_client *client,
++ unsigned long timeout);
++struct i3c_mctp_client *i3c_mctp_add_peci_client(struct i3c_device *i3c);
++void i3c_mctp_remove_peci_client(struct i3c_mctp_client *client);
++
++#endif /* I3C_MCTP_H */
+diff --git a/include/linux/i3c/target.h b/include/linux/i3c/target.h
+new file mode 100644
+index 000000000..6025b8c2f
+--- /dev/null
++++ b/include/linux/i3c/target.h
+@@ -0,0 +1,30 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright (c) 2022, Intel Corporation */
++
++#ifndef I3C_TARGET_H
++#define I3C_TARGET_H
++
++#include <linux/device.h>
++#include <linux/i3c/device.h>
++
++struct i3c_master_controller;
++
++struct i3c_target_ops {
++ int (*bus_init)(struct i3c_master_controller *master);
++ void (*bus_cleanup)(struct i3c_master_controller *master);
++ int (*hj_req)(struct i3c_dev_desc *dev);
++ int (*priv_xfers)(struct i3c_dev_desc *dev, struct i3c_priv_xfer *xfers, int nxfers);
++ int (*generate_ibi)(struct i3c_dev_desc *dev, const u8 *data, int len);
++ int (*pending_read_notify)(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify);
++ bool (*is_ibi_enabled)(struct i3c_dev_desc *dev);
++ bool (*is_hj_enabled)(struct i3c_dev_desc *dev);
++ u8 (*get_dyn_addr)(struct i3c_master_controller *master);
++};
++
++int i3c_target_register(struct i3c_master_controller *master, struct device *parent,
++ const struct i3c_target_ops *ops);
++int i3c_target_unregister(struct i3c_master_controller *master);
++
++#endif
+diff --git a/include/linux/jtag.h b/include/linux/jtag.h
+new file mode 100644
+index 000000000..3b7157df3
+--- /dev/null
++++ b/include/linux/jtag.h
+@@ -0,0 +1,49 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/* Copyright (c) 2018 Mellanox Technologies. All rights reserved. */
++/* Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com> */
++/* Copyright (c) 2019 Intel Corporation */
++
++#ifndef __LINUX_JTAG_H
++#define __LINUX_JTAG_H
++
++#include <linux/types.h>
++#include <uapi/linux/jtag.h>
++
++#define JTAG_MAX_XFER_DATA_LEN (0xFFFFFFFF) //65535
++
++struct jtag;
++/**
++ * struct jtag_ops - callbacks for JTAG control functions:
++ *
++ * @freq_get: get frequency function. Filled by dev driver
++ * @freq_set: set frequency function. Filled by dev driver
++ * @status_get: get JTAG TAPC state function. Mandatory, Filled by dev driver
++ * @status_set: set JTAG TAPC state function. Mandatory, Filled by dev driver
++ * @xfer: send JTAG xfer function. Mandatory func. Filled by dev driver
++ * @mode_set: set specific work mode for JTAG. Filled by dev driver
++ * @trst_set: set TRST pin active(pull low) for JTAG. Filled by dev driver
++ * @bitbang: set low level bitbang operations. Filled by dev driver
++ * @enable: enables JTAG interface in master mode. Filled by dev driver
++ * @disable: disables JTAG interface master mode. Filled by dev driver
++ */
++struct jtag_ops {
++ int (*freq_get)(struct jtag *jtag, u32 *freq);
++ int (*freq_set)(struct jtag *jtag, u32 freq);
++ int (*status_get)(struct jtag *jtag, u32 *state);
++ int (*status_set)(struct jtag *jtag, struct jtag_tap_state *endst);
++ int (*xfer)(struct jtag *jtag, struct jtag_xfer *xfer, u8 *xfer_data);
++ int (*mode_set)(struct jtag *jtag, struct jtag_mode *jtag_mode);
++ int (*trst_set)(struct jtag *jtag, u32 active);
++ int (*bitbang)(struct jtag *jtag, struct bitbang_packet *bitbang,
++ struct tck_bitbang *bitbang_data);
++ int (*enable)(struct jtag *jtag);
++ int (*disable)(struct jtag *jtag);
++};
++
++void *jtag_priv(struct jtag *jtag);
++int devm_jtag_register(struct device *dev, struct jtag *jtag);
++struct jtag *jtag_alloc(struct device *host, size_t priv_size,
++ const struct jtag_ops *ops);
++void jtag_free(struct jtag *jtag);
++
++#endif /* __LINUX_JTAG_H */
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 62a6847a3..9836e3923 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -82,7 +82,9 @@ struct mmc_ios {
+
+ struct mmc_clk_phase {
+ bool valid;
++ bool inv_in_deg;
+ u16 in_deg;
++ bool inv_out_deg;
+ u16 out_deg;
+ };
+
+diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
+index cdcfe0fd2..7dc2eea25 100644
+--- a/include/linux/mtd/spi-nor.h
++++ b/include/linux/mtd/spi-nor.h
+@@ -21,6 +21,7 @@
+ /* Flash opcodes. */
+ #define SPINOR_OP_WRDI 0x04 /* Write disable */
+ #define SPINOR_OP_WREN 0x06 /* Write enable */
++#define SPINOR_OP_VSR_WREN 0x50 /* Write enable for volatile register */
+ #define SPINOR_OP_RDSR 0x05 /* Read status register */
+ #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
+ #define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */
+@@ -80,6 +81,9 @@
+ /* Used for SST flashes only. */
+ #define SPINOR_OP_BP 0x02 /* Byte program */
+ #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
++#define SPINOR_OP_SST_RDNVCR 0xB5 /* Read nonvolatile configuration register */
++#define SPINOR_OP_SST_WRNVCR 0xB1 /* Write nonvolatile configuration register */
++#define SPINOR_SST_RST_HOLD_CTRL BIT(4) /* Nonvolatile configuration register bit 4*/
+
+ /* Used for Macronix and Winbond flashes. */
+ #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
+@@ -450,4 +454,6 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
+ int spi_nor_scan(struct spi_nor *nor, const char *name,
+ const struct spi_nor_hwcaps *hwcaps);
+
++u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr);
++
+ #endif
+diff --git a/include/linux/soc/aspeed/aspeed-otp.h b/include/linux/soc/aspeed/aspeed-otp.h
+new file mode 100644
+index 000000000..17695ea40
+--- /dev/null
++++ b/include/linux/soc/aspeed/aspeed-otp.h
+@@ -0,0 +1,11 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
++/*
++ * Copyright (C) 2021 ASPEED Technology Inc.
++ */
++
++#ifndef _LINUX_ASPEED_OTP_H
++#define _LINUX_ASPEED_OTP_H
++
++void otp_read_data_buf(u32 offset, u32 *buf, u32 len);
++
++#endif /* _LINUX_ASPEED_OTP_H */
+diff --git a/include/linux/soc/aspeed/aspeed-udma.h b/include/linux/soc/aspeed/aspeed-udma.h
+new file mode 100644
+index 000000000..33acea745
+--- /dev/null
++++ b/include/linux/soc/aspeed/aspeed-udma.h
+@@ -0,0 +1,30 @@
++#ifndef __ASPEED_UDMA_H__
++#define __ASPEED_UDMA_H__
++
++#include <linux/circ_buf.h>
++
++typedef void (*aspeed_udma_cb_t)(int rb_rwptr, void *id);
++
++enum aspeed_udma_ops {
++ ASPEED_UDMA_OP_ENABLE,
++ ASPEED_UDMA_OP_DISABLE,
++ ASPEED_UDMA_OP_RESET,
++};
++
++void aspeed_udma_set_tx_wptr(u32 ch_no, u32 wptr);
++void aspeed_udma_set_rx_rptr(u32 ch_no, u32 rptr);
++
++void aspeed_udma_tx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op);
++void aspeed_udma_rx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op);
++
++int aspeed_udma_request_tx_chan(u32 ch_no, dma_addr_t addr,
++ struct circ_buf *rb, u32 rb_sz,
++ aspeed_udma_cb_t cb, void *id, bool en_tmout);
++int aspeed_udma_request_rx_chan(u32 ch_no, dma_addr_t addr,
++ struct circ_buf *rb, u32 rb_sz,
++ aspeed_udma_cb_t cb, void *id, bool en_tmout);
++
++int aspeed_udma_free_tx_chan(u32 ch_no);
++int aspeed_udma_free_rx_chan(u32 ch_no);
++
++#endif
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 0b4f2d5fa..5a7f96684 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -64,6 +64,7 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_PMQOS BIT(15)
+ #define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
+ #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
++#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+diff --git a/include/soc/aspeed/reset-aspeed.h b/include/soc/aspeed/reset-aspeed.h
+new file mode 100644
+index 000000000..54ac18551
+--- /dev/null
++++ b/include/soc/aspeed/reset-aspeed.h
+@@ -0,0 +1,21 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (c) 2024 ASPEED Technology Inc.
++ * Author: Ryan Chen <ryan_chen@aspeedtech.com>
++ */
++
++#ifndef __RESET_ASPEED_H__
++#define __RESET_ASPEED_H__
++
++#if IS_ENABLED(CONFIG_RESET_ASPEED)
++int aspeed_reset_controller_register(struct device *clk_dev, void __iomem *base,
++ const char *adev_name);
++#else
++int aspeed_reset_controller_register(struct device *clk_dev, void __iomem *base,
++ const char *adev_name)
++{
++ return -ENODEV;
++}
++#endif /* if IS_ENABLED(CONFIG_RESET_ASPEED) */
++
++#endif /* __RESET_ASPEED_H__ */
+diff --git a/include/trace/events/xdma.h b/include/trace/events/xdma.h
+new file mode 100644
+index 000000000..bf515ad3d
+--- /dev/null
++++ b/include/trace/events/xdma.h
+@@ -0,0 +1,139 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++#undef TRACE_SYSTEM
++#define TRACE_SYSTEM xdma
++
++#if !defined(_TRACE_XDMA_H) || defined(TRACE_HEADER_MULTI_READ)
++#define _TRACE_XDMA_H
++
++#include <linux/tracepoint.h>
++
++TRACE_EVENT(xdma_start,
++ TP_PROTO(const struct aspeed_xdma *ctx, const struct aspeed_xdma_cmd *cmd),
++ TP_ARGS(ctx, cmd),
++ TP_STRUCT__entry(
++ __field(bool, dir_upstream)
++ __field(unsigned int, index)
++ __field(__u64, host)
++ __field(__u64, pitch)
++ __field(__u64, cmd)
++ ),
++ TP_fast_assign(
++ __entry->dir_upstream = ctx->upstream;
++ __entry->index = ctx->cmd_idx;
++ __entry->host = cmd->host_addr;
++ __entry->pitch = cmd->pitch;
++ __entry->cmd = cmd->cmd;
++ ),
++ TP_printk("%s cmd:%u [%08llx %016llx %016llx]",
++ __entry->dir_upstream ? "upstream" : "downstream",
++ __entry->index,
++ __entry->host,
++ __entry->pitch,
++ __entry->cmd
++ )
++);
++
++TRACE_EVENT(xdma_irq,
++ TP_PROTO(u32 sts),
++ TP_ARGS(sts),
++ TP_STRUCT__entry(
++ __field(__u32, status)
++ ),
++ TP_fast_assign(
++ __entry->status = sts;
++ ),
++ TP_printk("sts:%08x",
++ __entry->status
++ )
++);
++
++TRACE_EVENT(xdma_reset,
++ TP_PROTO(const struct aspeed_xdma *ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(
++ __field(bool, dir_upstream)
++ __field(bool, in_progress)
++ ),
++ TP_fast_assign(
++ __entry->dir_upstream = ctx->upstream;
++ __entry->in_progress =
++ ctx->current_client ? ctx->current_client->in_progress : false;
++ ),
++ TP_printk("%sin progress%s",
++ __entry->in_progress ? "" : "not ",
++ __entry->in_progress ? (__entry->dir_upstream ? " upstream" : " downstream") : ""
++ )
++);
++
++TRACE_EVENT(xdma_perst,
++ TP_PROTO(const struct aspeed_xdma *ctx),
++ TP_ARGS(ctx),
++ TP_STRUCT__entry(
++ __field(bool, in_reset)
++ ),
++ TP_fast_assign(
++ __entry->in_reset = ctx->in_reset;
++ ),
++ TP_printk("%s",
++ __entry->in_reset ? "in reset" : ""
++ )
++);
++
++TRACE_EVENT(xdma_unmap,
++ TP_PROTO(const struct aspeed_xdma_client *client),
++ TP_ARGS(client),
++ TP_STRUCT__entry(
++ __field(__u32, phys)
++ __field(__u32, size)
++ ),
++ TP_fast_assign(
++ __entry->phys = client->phys;
++ __entry->size = client->size;
++ ),
++ TP_printk("p:%08x s:%08x",
++ __entry->phys,
++ __entry->size
++ )
++);
++
++TRACE_EVENT(xdma_mmap_error,
++ TP_PROTO(const struct aspeed_xdma_client *client, unsigned long vm_start),
++ TP_ARGS(client, vm_start),
++ TP_STRUCT__entry(
++ __field(__u32, phys)
++ __field(__u32, size)
++ __field(unsigned long, vm_start)
++ ),
++ TP_fast_assign(
++ __entry->phys = client->phys;
++ __entry->size = client->size;
++ __entry->vm_start = vm_start;
++ ),
++ TP_printk("p:%08x s:%08x v:%08lx",
++ __entry->phys,
++ __entry->size,
++ __entry->vm_start
++ )
++);
++
++TRACE_EVENT(xdma_mmap,
++ TP_PROTO(const struct aspeed_xdma_client *client),
++ TP_ARGS(client),
++ TP_STRUCT__entry(
++ __field(__u32, phys)
++ __field(__u32, size)
++ ),
++ TP_fast_assign(
++ __entry->phys = client->phys;
++ __entry->size = client->size;
++ ),
++ TP_printk("p:%08x s:%08x",
++ __entry->phys,
++ __entry->size
++ )
++);
++
++#endif /* _TRACE_XDMA_H */
++
++#include <trace/define_trace.h>
+diff --git a/include/uapi/linux/aspeed-mctp.h b/include/uapi/linux/aspeed-mctp.h
+new file mode 100644
+index 000000000..3bac75294
+--- /dev/null
++++ b/include/uapi/linux/aspeed-mctp.h
+@@ -0,0 +1,136 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/* Copyright (c) 2020 Intel Corporation */
++
++#ifndef _UAPI_LINUX_ASPEED_MCTP_H
++#define _UAPI_LINUX_ASPEED_MCTP_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++/*
++ * aspeed-mctp is a simple device driver exposing a read/write interface:
++ * +----------------------+
++ * | PCIe VDM Header | 16 bytes (Big Endian)
++ * +----------------------+
++ * | MCTP Message Payload | 64/128/256/512 bytes (Big Endian)
++ * +----------------------+
++ *
++ * MCTP packet description can be found in DMTF DSP0238,
++ * MCTP PCIe VDM Transport Specification.
++ */
++
++#define ASPEED_MCTP_PCIE_VDM_HDR_SIZE 16
++
++/*
++ * uevents generated by aspeed-mctp driver
++ */
++#define ASPEED_MCTP_READY "PCIE_READY"
++
++/*
++ * maximum possible number of struct eid_info elements stored in list
++ */
++#define ASPEED_MCTP_EID_INFO_MAX 256
++
++/*
++ * MCTP operations
++ * @ASPEED_MCTP_IOCTL_FILTER_EID: enable/disable filter incoming packets based
++ * on Endpoint ID (BROKEN)
++ * @ASPEED_MCTP_IOCTL_GET_BDF: read PCI bus/device/function of MCTP Controller
++ * @ASPEED_MCTP_IOCTL_GET_MEDIUM_ID: read MCTP physical medium identifier
++ * related to PCIe revision
++ * @ASPEED_MCTP_IOCTL_GET_MTU: read max transmission unit (in bytes)
++ * @ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER Register client as default
++ * handler that receives all MCTP messages that were not dispatched to other
++ * clients
++ * @ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER Register client to receive all
++ * messages of specified MCTP type or PCI vendor defined type
++ * @ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER Unregister client as handler
++ * for specified MCTP type or PCI vendor defined message type
++ * @ASPEED_MCTP_GET_EID_INFO - deprecated, use ASPEED_MCTP_GET_EID_EXT instead
++ * @ASPEED_MCTP_SET_EID_INFO - deprecated, use ASPEED_MCTP_SET_EID_EXT instead
++ * @ASPEED_MCTP_GET_EID_EXT_INFO: read list of existing CPU EID and Domain ID
++ * mappings and return count which is lesser of the two: requested count and existing count
++ * @ASPEED_MCTP_SET_EID_EXT_INFO: write or overwrite already existing list of
++ * CPU EID and Domain ID mappings
++ * @ASPEED_MCTP_SET_OWN_EID: write/overwrite own EID information
++ */
++
++struct aspeed_mctp_filter_eid {
++ __u8 eid;
++ bool enable;
++};
++
++struct aspeed_mctp_get_bdf {
++ __u16 bdf;
++};
++
++struct aspeed_mctp_get_medium_id {
++ __u8 medium_id;
++};
++
++struct aspeed_mctp_get_mtu {
++ __u16 mtu;
++};
++
++struct aspeed_mctp_type_handler_ioctl {
++ __u8 mctp_type; /* MCTP message type as per DSP239*/
++ /* Below params must be 0 if mctp_type is not Vendor Defined PCI */
++ __u16 pci_vendor_id; /* PCI Vendor ID */
++ __u16 vendor_type; /* Vendor specific type */
++ __u16 vendor_type_mask; /* Mask applied to vendor type */
++};
++
++struct aspeed_mctp_eid_info {
++ __u8 eid;
++ __u16 bdf;
++};
++
++struct aspeed_mctp_eid_ext_info {
++ __u8 eid;
++ __u16 bdf;
++ __u8 domain_id;
++};
++
++struct aspeed_mctp_get_eid_info {
++ __u64 ptr;
++ __u16 count;
++ __u8 start_eid;
++};
++
++struct aspeed_mctp_set_eid_info {
++ __u64 ptr;
++ __u16 count;
++};
++
++struct aspeed_mctp_set_own_eid {
++ __u8 eid;
++};
++
++#define ASPEED_MCTP_IOCTL_BASE 0x4d
++
++#define ASPEED_MCTP_IOCTL_FILTER_EID \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 0, struct aspeed_mctp_filter_eid)
++#define ASPEED_MCTP_IOCTL_GET_BDF \
++ _IOR(ASPEED_MCTP_IOCTL_BASE, 1, struct aspeed_mctp_get_bdf)
++#define ASPEED_MCTP_IOCTL_GET_MEDIUM_ID \
++ _IOR(ASPEED_MCTP_IOCTL_BASE, 2, struct aspeed_mctp_get_medium_id)
++#define ASPEED_MCTP_IOCTL_GET_MTU \
++ _IOR(ASPEED_MCTP_IOCTL_BASE, 3, struct aspeed_mctp_get_mtu)
++#define ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER \
++ _IO(ASPEED_MCTP_IOCTL_BASE, 4)
++#define ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 6, struct aspeed_mctp_type_handler_ioctl)
++#define ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 7, struct aspeed_mctp_type_handler_ioctl)
++#define ASPEED_MCTP_IOCTL_GET_EID_INFO \
++ _IOWR(ASPEED_MCTP_IOCTL_BASE, 8, struct aspeed_mctp_get_eid_info)
++#define ASPEED_MCTP_IOCTL_SET_EID_INFO \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 9, struct aspeed_mctp_set_eid_info)
++#define ASPEED_MCTP_IOCTL_GET_EID_EXT_INFO \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 10, struct aspeed_mctp_get_eid_info)
++#define ASPEED_MCTP_IOCTL_SET_EID_EXT_INFO \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 11, struct aspeed_mctp_set_eid_info)
++#define ASPEED_MCTP_IOCTL_SET_OWN_EID \
++ _IOW(ASPEED_MCTP_IOCTL_BASE, 12, struct aspeed_mctp_set_own_eid)
++
++#endif /* _UAPI_LINUX_ASPEED_MCTP_H */
+diff --git a/include/uapi/linux/aspeed-otp.h b/include/uapi/linux/aspeed-otp.h
+new file mode 100644
+index 000000000..e713a07f3
+--- /dev/null
++++ b/include/uapi/linux/aspeed-otp.h
+@@ -0,0 +1,50 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
++/*
++ * Copyright (C) 2021 ASPEED Technology Inc.
++ */
++
++#ifndef _UAPI_LINUX_ASPEED_OTP_H
++#define _UAPI_LINUX_ASPEED_OTP_H
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++struct otp_read {
++ unsigned int offset;
++ unsigned int len;
++ uint8_t *data;
++};
++
++struct otp_prog {
++ unsigned int dw_offset;
++ unsigned int bit_offset;
++ unsigned int value;
++ unsigned int w_offset;
++ unsigned int len;
++ uint8_t *data;
++};
++
++struct otp_revid {
++ uint32_t revid0;
++ uint32_t revid1;
++};
++
++#define OTP_A0 0
++#define OTP_A1 1
++#define OTP_A2 2
++#define OTP_A3 3
++
++#define OTPIOC_BASE 'O'
++
++#define ASPEED_OTP_READ_DATA _IOR(OTPIOC_BASE, 0, struct otp_read)
++#define ASPEED_OTP_READ_CONF _IOR(OTPIOC_BASE, 1, struct otp_read)
++#define ASPEED_OTP_PROG_DATA _IOW(OTPIOC_BASE, 2, struct otp_prog)
++#define ASPEED_OTP_PROG_CONF _IOW(OTPIOC_BASE, 3, struct otp_prog)
++#define ASPEED_OTP_VER _IOR(OTPIOC_BASE, 4, unsigned int)
++#define ASPEED_OTP_SW_RID _IOR(OTPIOC_BASE, 5, u32 *)
++#define ASPEED_SEC_KEY_NUM _IOR(OTPIOC_BASE, 6, u32 *)
++#define ASPEED_OTP_GET_ECC _IOR(OTPIOC_BASE, 7, uint32_t)
++#define ASPEED_OTP_SET_ECC _IO(OTPIOC_BASE, 8)
++#define ASPEED_OTP_GET_REVID _IOR(OTPIOC_BASE, 9, struct otp_revid)
++
++#endif /* _UAPI_LINUX_ASPEED_OTP_H */
+diff --git a/include/uapi/linux/aspeed-video.h b/include/uapi/linux/aspeed-video.h
+index 6586a6554..f55299d60 100644
+--- a/include/uapi/linux/aspeed-video.h
++++ b/include/uapi/linux/aspeed-video.h
+@@ -8,6 +8,15 @@
+
+ #include <linux/v4l2-controls.h>
+
++// enum for aspeed video's v4l2 s_input
++enum aspeed_video_input {
++ VIDEO_INPUT_VGA = 0,
++ VIDEO_INPUT_GFX,
++ VIDEO_INPUT_MEM,
++ VIDEO_INPUT_DVI,
++ VIDEO_INPUT_MAX
++};
++
+ #define V4L2_CID_ASPEED_HQ_MODE (V4L2_CID_USER_ASPEED_BASE + 1)
+ #define V4L2_CID_ASPEED_HQ_JPEG_QUALITY (V4L2_CID_USER_ASPEED_BASE + 2)
+
+diff --git a/include/uapi/linux/aspeed-xdma.h b/include/uapi/linux/aspeed-xdma.h
+new file mode 100644
+index 000000000..3a3646fd1
+--- /dev/null
++++ b/include/uapi/linux/aspeed-xdma.h
+@@ -0,0 +1,42 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/* Copyright IBM Corp 2019 */
++
++#ifndef _UAPI_LINUX_ASPEED_XDMA_H_
++#define _UAPI_LINUX_ASPEED_XDMA_H_
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++#define __ASPEED_XDMA_IOCTL_MAGIC 0xb7
++#define ASPEED_XDMA_IOCTL_RESET _IO(__ASPEED_XDMA_IOCTL_MAGIC, 0)
++
++/*
++ * aspeed_xdma_direction
++ *
++ * ASPEED_XDMA_DIRECTION_DOWNSTREAM: transfers data from the host to the BMC
++ *
++ * ASPEED_XDMA_DIRECTION_UPSTREAM: transfers data from the BMC to the host
++ */
++enum aspeed_xdma_direction {
++ ASPEED_XDMA_DIRECTION_DOWNSTREAM = 0,
++ ASPEED_XDMA_DIRECTION_UPSTREAM,
++};
++
++/*
++ * aspeed_xdma_op
++ *
++ * host_addr: the DMA address on the host side, typically configured by PCI
++ * subsystem
++ *
++ * len: the size of the transfer in bytes
++ *
++ * direction: an enumerator indicating the direction of the DMA operation; see
++ * enum aspeed_xdma_direction
++ */
++struct aspeed_xdma_op {
++ __u64 host_addr;
++ __u32 len;
++ __u32 direction;
++};
++
++#endif /* _UAPI_LINUX_ASPEED_XDMA_H_ */
+diff --git a/include/uapi/linux/i3c/i3cdev.h b/include/uapi/linux/i3c/i3cdev.h
+new file mode 100644
+index 000000000..5adc1e3e7
+--- /dev/null
++++ b/include/uapi/linux/i3c/i3cdev.h
+@@ -0,0 +1,37 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/*
++ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
++ *
++ * Author: Vitor Soares <vitor.soares@synopsys.com>
++ */
++
++#ifndef _UAPI_I3C_DEV_H_
++#define _UAPI_I3C_DEV_H_
++
++#include <linux/types.h>
++#include <linux/ioctl.h>
++
++/* IOCTL commands */
++#define I3C_DEV_IOC_MAGIC 0x07
++
++/**
++ * struct i3c_ioc_priv_xfer - I3C SDR ioctl private transfer
++ * @data: Holds pointer to userspace buffer with transmit data.
++ * @len: Length of data buffer buffers, in bytes.
++ * @rnw: encodes the transfer direction. true for a read, false for a write
++ */
++struct i3c_ioc_priv_xfer {
++ __u64 data;
++ __u16 len;
++ __u8 rnw;
++ __u8 pad[5];
++};
++
++#define I3C_PRIV_XFER_SIZE(N) \
++ ((((sizeof(struct i3c_ioc_priv_xfer)) * (N)) < (1 << _IOC_SIZEBITS)) \
++ ? ((sizeof(struct i3c_ioc_priv_xfer)) * (N)) : 0)
++
++#define I3C_IOC_PRIV_XFER(N) \
++ _IOC(_IOC_READ|_IOC_WRITE, I3C_DEV_IOC_MAGIC, 30, I3C_PRIV_XFER_SIZE(N))
++
++#endif
+diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
+index 0824fbc02..7c40be624 100644
+--- a/include/uapi/linux/if_alg.h
++++ b/include/uapi/linux/if_alg.h
+@@ -53,9 +53,12 @@ struct af_alg_iv {
+ #define ALG_SET_AEAD_AUTHSIZE 5
+ #define ALG_SET_DRBG_ENTROPY 6
+ #define ALG_SET_KEY_BY_KEY_SERIAL 7
++#define ALG_SET_PUBKEY 8
+
+ /* Operations */
+ #define ALG_OP_DECRYPT 0
+ #define ALG_OP_ENCRYPT 1
++#define ALG_OP_SIGN 2
++#define ALG_OP_VERIFY 3
+
+ #endif /* _LINUX_IF_ALG_H */
+diff --git a/include/uapi/linux/jtag.h b/include/uapi/linux/jtag.h
+new file mode 100644
+index 000000000..77d0b471e
+--- /dev/null
++++ b/include/uapi/linux/jtag.h
+@@ -0,0 +1,370 @@
++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
++/* Copyright (c) 2018 Mellanox Technologies. All rights reserved. */
++/* Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com> */
++/* Copyright (c) 2019 Intel Corporation */
++
++#ifndef __UAPI_LINUX_JTAG_H
++#define __UAPI_LINUX_JTAG_H
++
++#include <linux/types.h>
++#include <linux/ioctl.h>
++
++/*
++ * JTAG_XFER_MODE: JTAG transfer mode. Used to set JTAG controller transfer mode
++ * This is bitmask for feature param in jtag_mode for ioctl JTAG_SIOCMODE
++ */
++#define JTAG_XFER_MODE 0
++/*
++ * JTAG_CONTROL_MODE: JTAG controller mode. Used to set JTAG controller mode
++ * This is bitmask for feature param in jtag_mode for ioctl JTAG_SIOCMODE
++ */
++#define JTAG_CONTROL_MODE 1
++/*
++ * JTAG_MASTER_OUTPUT_DISABLE: JTAG master mode output disable, it is used to
++ * enable other devices to own the JTAG bus.
++ * This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
++ */
++#define JTAG_MASTER_OUTPUT_DISABLE 0
++/*
++ * JTAG_MASTER_MODE: JTAG master mode. Used to set JTAG controller master mode
++ * This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
++ */
++#define JTAG_MASTER_MODE 1
++/*
++ * JTAG_XFER_HW_MODE: JTAG hardware mode. Used to set HW drived or bitbang
++ * mode. This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
++ */
++#define JTAG_XFER_HW_MODE 1
++/*
++ * JTAG_XFER_SW_MODE: JTAG software mode. Used to set SW drived or bitbang
++ * mode. This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
++ */
++#define JTAG_XFER_SW_MODE 0
++
++/**
++ * enum jtag_tapstate:
++ *
++ * @JTAG_STATE_TLRESET: JTAG state machine Test Logic Reset state
++ * @JTAG_STATE_IDLE: JTAG state machine IDLE state
++ * @JTAG_STATE_SELECTDR: JTAG state machine SELECT_DR state
++ * @JTAG_STATE_CAPTUREDR: JTAG state machine CAPTURE_DR state
++ * @JTAG_STATE_SHIFTDR: JTAG state machine SHIFT_DR state
++ * @JTAG_STATE_EXIT1DR: JTAG state machine EXIT-1 DR state
++ * @JTAG_STATE_PAUSEDR: JTAG state machine PAUSE_DR state
++ * @JTAG_STATE_EXIT2DR: JTAG state machine EXIT-2 DR state
++ * @JTAG_STATE_UPDATEDR: JTAG state machine UPDATE DR state
++ * @JTAG_STATE_SELECTIR: JTAG state machine SELECT_IR state
++ * @JTAG_STATE_CAPTUREIR: JTAG state machine CAPTURE_IR state
++ * @JTAG_STATE_SHIFTIR: JTAG state machine SHIFT_IR state
++ * @JTAG_STATE_EXIT1IR: JTAG state machine EXIT-1 IR state
++ * @JTAG_STATE_PAUSEIR: JTAG state machine PAUSE_IR state
++ * @JTAG_STATE_EXIT2IR: JTAG state machine EXIT-2 IR state
++ * @JTAG_STATE_UPDATEIR: JTAG state machine UPDATE IR state
++ * @JTAG_STATE_CURRENT: JTAG current state, saved by driver
++ */
++enum jtag_tapstate {
++ JTAG_STATE_TLRESET,
++ JTAG_STATE_IDLE,
++ JTAG_STATE_SELECTDR,
++ JTAG_STATE_CAPTUREDR,
++ JTAG_STATE_SHIFTDR,
++ JTAG_STATE_EXIT1DR,
++ JTAG_STATE_PAUSEDR,
++ JTAG_STATE_EXIT2DR,
++ JTAG_STATE_UPDATEDR,
++ JTAG_STATE_SELECTIR,
++ JTAG_STATE_CAPTUREIR,
++ JTAG_STATE_SHIFTIR,
++ JTAG_STATE_EXIT1IR,
++ JTAG_STATE_PAUSEIR,
++ JTAG_STATE_EXIT2IR,
++ JTAG_STATE_UPDATEIR,
++ JTAG_STATE_CURRENT
++};
++
++/**
++ * enum jtag_reset:
++ *
++ * @JTAG_NO_RESET: JTAG run TAP from current state
++ * @JTAG_FORCE_RESET: JTAG force TAP to reset state
++ */
++enum jtag_reset {
++ JTAG_NO_RESET = 0,
++ JTAG_FORCE_RESET = 1,
++};
++
++/**
++ * enum jtag_xfer_type:
++ *
++ * @JTAG_SIR_XFER: SIR transfer
++ * @JTAG_SDR_XFER: SDR transfer
++ */
++enum jtag_xfer_type {
++ JTAG_SIR_XFER = 0,
++ JTAG_SDR_XFER = 1,
++};
++
++/**
++ * enum jtag_xfer_direction:
++ *
++ * @JTAG_READ_XFER: read transfer
++ * @JTAG_WRITE_XFER: write transfer
++ * @JTAG_READ_WRITE_XFER: read & write transfer
++ */
++enum jtag_xfer_direction {
++ JTAG_READ_XFER = 1,
++ JTAG_WRITE_XFER = 2,
++ JTAG_READ_WRITE_XFER = 3,
++};
++
++/**
++ * struct jtag_tap_state - forces JTAG state machine to go into a TAPC
++ * state
++ *
++ * @reset: 0 - run IDLE/PAUSE from current state
++ * 1 - go through TEST_LOGIC/RESET state before IDLE/PAUSE
++ * @end: completion flag
++ * @tck: clock counter
++ *
++ * Structure provide interface to JTAG device for JTAG set state execution.
++ */
++struct jtag_tap_state {
++ __u8 reset;
++ __u8 from;
++ __u8 endstate;
++ __u8 tck;
++};
++
++/**
++ * union pad_config - Padding Configuration:
++ *
++ * @type: transfer type
++ * @pre_pad_number: Number of prepadding bits bit[11:0]
++ * @post_pad_number: Number of prepadding bits bit[23:12]
++ * @pad_data : Bit value to be used by pre and post padding bit[24]
++ * @int_value: unsigned int packed padding configuration value bit[32:0]
++ *
++ * Structure provide pre and post padding configuration in a single __u32
++ */
++union pad_config {
++ struct {
++ __u32 pre_pad_number : 12;
++ __u32 post_pad_number : 12;
++ __u32 pad_data : 1;
++ __u32 rsvd : 7;
++ };
++ __u32 int_value;
++};
++
++/**
++ * struct jtag_xfer - jtag xfer:
++ *
++ * @type: transfer type
++ * @direction: xfer direction
++ * @from: xfer current state
++ * @endstate: xfer end state
++ * @padding: xfer padding
++ * @length: xfer bits length
++ * @tdio : xfer data array
++ *
++ * Structure provide interface to JTAG device for JTAG SDR/SIR xfer execution.
++ */
++struct jtag_xfer {
++ __u8 type;
++ __u8 direction;
++ __u8 from;
++ __u8 endstate;
++ __u32 padding;
++ __u32 length;
++ __u64 tdio;
++};
++
++/**
++ * struct bitbang_packet - jtag bitbang array packet:
++ *
++ * @data: JTAG Bitbang struct array pointer(input/output)
++ * @length: array size (input)
++ *
++ * Structure provide interface to JTAG device for JTAG bitbang bundle execution
++ */
++struct bitbang_packet {
++ struct tck_bitbang *data;
++ __u32 length;
++} __attribute__((__packed__));
++
++/**
++ * struct jtag_bitbang - jtag bitbang:
++ *
++ * @tms: JTAG TMS
++ * @tdi: JTAG TDI (input)
++ * @tdo: JTAG TDO (output)
++ *
++ * Structure provide interface to JTAG device for JTAG bitbang execution.
++ */
++struct tck_bitbang {
++ __u8 tms;
++ __u8 tdi;
++ __u8 tdo;
++} __attribute__((__packed__));
++
++/**
++ * struct jtag_mode - jtag mode:
++ *
++ * @feature: 0 - JTAG feature setting selector for JTAG controller HW/SW
++ * 1 - JTAG feature setting selector for controller bus master
++ * mode output (enable / disable).
++ * @mode: (0 - SW / 1 - HW) for JTAG_XFER_MODE feature(0)
++ * (0 - output disable / 1 - output enable) for JTAG_CONTROL_MODE
++ * feature(1)
++ *
++ * Structure provide configuration modes to JTAG device.
++ */
++struct jtag_mode {
++ __u32 feature;
++ __u32 mode;
++};
++
++/* ioctl interface */
++#define __JTAG_IOCTL_MAGIC 0xb2
++
++#define JTAG_SIOCSTATE _IOW(__JTAG_IOCTL_MAGIC, 0, struct jtag_tap_state)
++#define JTAG_SIOCFREQ _IOW(__JTAG_IOCTL_MAGIC, 1, unsigned int)
++#define JTAG_GIOCFREQ _IOR(__JTAG_IOCTL_MAGIC, 2, unsigned int)
++#define JTAG_IOCXFER _IOWR(__JTAG_IOCTL_MAGIC, 3, struct jtag_xfer)
++#define JTAG_GIOCSTATUS _IOWR(__JTAG_IOCTL_MAGIC, 4, enum jtag_tapstate)
++#define JTAG_SIOCMODE _IOW(__JTAG_IOCTL_MAGIC, 5, unsigned int)
++#define JTAG_IOCBITBANG _IOW(__JTAG_IOCTL_MAGIC, 6, unsigned int)
++#define JTAG_SIOCTRST _IOW(__JTAG_IOCTL_MAGIC, 7, unsigned int)
++
++/**
++ * struct tms_cycle - This structure represents a tms cycle state.
++ *
++ * @tmsbits: is the bitwise representation of the needed tms transitions to
++ * move from one state to another.
++ * @count: number of jumps needed to move to the needed state.
++ *
++ */
++struct tms_cycle {
++ unsigned char tmsbits;
++ unsigned char count;
++};
++
++/*
++ * This is the complete set TMS cycles for going from any TAP state to any
++ * other TAP state, following a "shortest path" rule.
++ */
++static const struct tms_cycle _tms_cycle_lookup[][16] = {
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* TLR */{{0x00, 0}, {0x00, 1}, {0x02, 2}, {0x02, 3}, {0x02, 4}, {0x0a, 4},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x0a, 5}, {0x2a, 6}, {0x1a, 5}, {0x06, 3}, {0x06, 4}, {0x06, 5},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x16, 5}, {0x16, 6}, {0x56, 7}, {0x36, 6} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* RTI */{{0x07, 3}, {0x00, 0}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x05, 4}, {0x15, 5}, {0x0d, 4}, {0x03, 2}, {0x03, 3}, {0x03, 4},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x1b, 5} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* SelDR*/{{0x03, 2}, {0x03, 3}, {0x00, 0}, {0x00, 1}, {0x00, 2}, {0x02, 2},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x02, 3}, {0x0a, 4}, {0x06, 3}, {0x01, 1}, {0x01, 2}, {0x01, 3},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x05, 3}, {0x05, 4}, {0x15, 5}, {0x0d, 4} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* CapDR*/{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x00, 0}, {0x00, 1}, {0x01, 1},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x01, 2}, {0x05, 3}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* SDR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x00, 0}, {0x01, 1},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x01, 2}, {0x05, 3}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* Ex1DR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x02, 3}, {0x00, 0},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x00, 1}, {0x02, 2}, {0x01, 1}, {0x07, 3}, {0x07, 4}, {0x07, 5},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x17, 5}, {0x17, 6}, {0x57, 7}, {0x37, 6} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* PDR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x01, 2}, {0x05, 3},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x00, 0}, {0x01, 1}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* Ex2DR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x00, 1}, {0x02, 2},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x02, 3}, {0x00, 0}, {0x01, 1}, {0x07, 3}, {0x07, 4}, {0x07, 5},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x17, 5}, {0x17, 6}, {0x57, 7}, {0x37, 6} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* UpdDR*/{{0x07, 3}, {0x00, 1}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x05, 4}, {0x15, 5}, {0x00, 0}, {0x03, 2}, {0x03, 3}, {0x03, 4},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x1b, 5} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* SelIR*/{{0x01, 1}, {0x01, 2}, {0x05, 3}, {0x05, 4}, {0x05, 5}, {0x15, 5},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x15, 6}, {0x55, 7}, {0x35, 6}, {0x00, 0}, {0x00, 1}, {0x00, 2},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x02, 2}, {0x02, 3}, {0x0a, 4}, {0x06, 3} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* CapIR*/{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x00, 0}, {0x00, 1},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x01, 1}, {0x01, 2}, {0x05, 3}, {0x03, 2} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* SIR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x0f, 5}, {0x00, 0},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x01, 1}, {0x01, 2}, {0x05, 3}, {0x03, 2} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* Ex1IR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x03, 4}, {0x0b, 4},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x0b, 5}, {0x2b, 6}, {0x1b, 5}, {0x07, 3}, {0x07, 4}, {0x02, 3},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x00, 0}, {0x00, 1}, {0x02, 2}, {0x01, 1} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* PIR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x0f, 5}, {0x01, 2},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x05, 3}, {0x00, 0}, {0x01, 1}, {0x03, 2} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* Ex2IR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x03, 4}, {0x0b, 4},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x0b, 5}, {0x2b, 6}, {0x1b, 5}, {0x07, 3}, {0x07, 4}, {0x00, 1},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x02, 2}, {0x02, 3}, {0x00, 0}, {0x01, 1} },
++
++/* TLR RTI SelDR CapDR SDR Ex1DR*/
++/* UpdIR*/{{0x07, 3}, {0x00, 1}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
++/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
++ {0x05, 4}, {0x15, 5}, {0x0d, 4}, {0x03, 2}, {0x03, 3}, {0x03, 4},
++/* Ex1IR PIR Ex2IR UpdIR*/
++ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x00, 0} },
++};
++
++#endif /* __UAPI_LINUX_JTAG_H */
+diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h
+new file mode 100644
+index 000000000..1d39f6f38
+--- /dev/null
++++ b/include/uapi/linux/npcm-video.h
+@@ -0,0 +1,41 @@
++/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
++/*
++ * Controls header for NPCM video driver
++ *
++ * Copyright (C) 2022 Nuvoton Technologies
++ */
++
++#ifndef _UAPI_LINUX_NPCM_VIDEO_H
++#define _UAPI_LINUX_NPCM_VIDEO_H
++
++#include <linux/v4l2-controls.h>
++
++/*
++ * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control
++ * details.
++ */
++
++/*
++ * This control is meant to set the mode of NPCM Video Capture/Differentiation
++ * (VCD) engine.
++ *
++ * The VCD engine supports two modes:
++ * COMPLETE - Capture the next complete frame into memory.
++ * DIFF - Compare the incoming frame with the frame stored in memory, and
++ * updates the differentiated frame in memory.
++ */
++#define V4L2_CID_NPCM_CAPTURE_MODE (V4L2_CID_USER_NPCM_BASE + 0)
++
++enum v4l2_npcm_capture_mode {
++ V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */
++ V4L2_NPCM_CAPTURE_MODE_DIFF = 1, /* DIFF mode */
++};
++
++/*
++ * This control is meant to get the count of compressed HEXTILE rectangles which
++ * is relevant to the number of differentiated frames if VCD is in DIFF mode.
++ * And the count will always be 1 if VCD is in COMPLETE mode.
++ */
++#define V4L2_CID_NPCM_RECT_COUNT (V4L2_CID_USER_NPCM_BASE + 1)
++
++#endif /* _UAPI_LINUX_NPCM_VIDEO_H */
+diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
+index c3604a0a3..68db66d4a 100644
+--- a/include/uapi/linux/v4l2-controls.h
++++ b/include/uapi/linux/v4l2-controls.h
+@@ -203,6 +203,12 @@ enum v4l2_colorfx {
+ */
+ #define V4L2_CID_USER_ASPEED_BASE (V4L2_CID_USER_BASE + 0x11a0)
+
++/*
++ * The base for Nuvoton NPCM driver controls.
++ * We reserve 16 controls for this driver.
++ */
++#define V4L2_CID_USER_NPCM_BASE (V4L2_CID_USER_BASE + 0x11b0)
++
+ /* MPEG-class control IDs */
+ /* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
+diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
+index 78260e5d9..e5ee05b8c 100644
+--- a/include/uapi/linux/videodev2.h
++++ b/include/uapi/linux/videodev2.h
+@@ -804,6 +804,7 @@ struct v4l2_pix_format {
+ #define V4L2_PIX_FMT_QC08C v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
+ #define V4L2_PIX_FMT_QC10C v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
+ #define V4L2_PIX_FMT_AJPG v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
++#define V4L2_PIX_FMT_HEXTILE v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
+
+ /* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
+ #define V4L2_PIX_FMT_IPU3_SBGGR10 v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
+@@ -844,6 +845,7 @@ struct v4l2_pix_format {
+ /* Flags */
+ #define V4L2_PIX_FMT_FLAG_PREMUL_ALPHA 0x00000001
+ #define V4L2_PIX_FMT_FLAG_SET_CSC 0x00000002
++#define V4L2_PIX_FMT_FLAG_PARTIAL_JPG 0x00000004
+
+ /*
+ * F O R M A T E N U M E R A T I O N
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0003-Add-socX_intc-driver.patch b/recipes-kernel/linux/files/0003-Add-socX_intc-driver.patch
deleted file mode 100644
index 663f32c..0000000
--- a/recipes-kernel/linux/files/0003-Add-socX_intc-driver.patch
+++ /dev/null
@@ -1,237 +0,0 @@
-From e1ba6551e0db2da696a8d75eae8af6d3bb4fb55c Mon Sep 17 00:00:00 2001
-From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Thu, 14 Nov 2024 16:06:21 +0800
-Subject: [PATCH] Add socX_intc driver
-
-Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
----
- drivers/irqchip/Makefile | 3 +-
- drivers/irqchip/irq-aspeed-intc.c | 202 ++++++++++++++++++++++++++++++
- 2 files changed, 204 insertions(+), 1 deletion(-)
- create mode 100644 drivers/irqchip/irq-aspeed-intc.c
-
-diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
-index ffd945fe71aa..9a5b5e0c993e 100644
---- a/drivers/irqchip/Makefile
-+++ b/drivers/irqchip/Makefile
-@@ -83,7 +83,8 @@ obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
- obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
- obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
- obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
--obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
-+obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o irq-aspeed-intc.o
-+obj-$(CONFIG_MACH_ASPEED_G7) += irq-aspeed-intc.o
- obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
- obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
- obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
-diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c
-new file mode 100644
-index 000000000000..da9a7763d242
---- /dev/null
-+++ b/drivers/irqchip/irq-aspeed-intc.c
-@@ -0,0 +1,202 @@
-+// SPDX-License-Identifier: GPL-2.0-only
-+/*
-+ * Aspeed Interrupt Controller.
-+ *
-+ * Copyright (C) 2023 ASPEED Technology Inc.
-+ */
-+
-+#include <linux/bitops.h>
-+#include <linux/irq.h>
-+#include <linux/irqchip.h>
-+#include <linux/irqchip/chained_irq.h>
-+#include <linux/irqdomain.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/io.h>
-+#include <linux/spinlock.h>
-+
-+#define INTC_INT_ENABLE_REG 0x00
-+#define INTC_INT_STATUS_REG 0x04
-+
-+struct aspeed_intc_ic {
-+ void __iomem *base;
-+ raw_spinlock_t gic_lock;
-+ raw_spinlock_t intc_lock;
-+ struct irq_domain *irq_domain;
-+};
-+
-+static void aspeed_intc_ic_irq_handler(struct irq_desc *desc)
-+{
-+ struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc);
-+ struct irq_chip *chip = irq_desc_get_chip(desc);
-+ unsigned long bit, status, flags;
-+
-+ chained_irq_enter(chip, desc);
-+
-+ raw_spin_lock_irqsave(&intc_ic->gic_lock, flags);
-+ status = readl(intc_ic->base + INTC_INT_STATUS_REG);
-+ for_each_set_bit(bit, &status, 32) {
-+ generic_handle_domain_irq(intc_ic->irq_domain, bit);
-+ writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG);
-+ }
-+ raw_spin_unlock_irqrestore(&intc_ic->gic_lock, flags);
-+
-+ chained_irq_exit(chip, desc);
-+}
-+
-+static void aspeed_intc_irq_mask(struct irq_data *data)
-+{
-+ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
-+ unsigned int mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq);
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&intc_ic->intc_lock, flags);
-+ writel(mask, intc_ic->base + INTC_INT_ENABLE_REG);
-+ raw_spin_unlock_irqrestore(&intc_ic->intc_lock, flags);
-+}
-+
-+static void aspeed_intc_irq_unmask(struct irq_data *data)
-+{
-+ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
-+ unsigned int unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq);
-+ unsigned long flags;
-+
-+ raw_spin_lock_irqsave(&intc_ic->intc_lock, flags);
-+ writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG);
-+ raw_spin_unlock_irqrestore(&intc_ic->intc_lock, flags);
-+}
-+
-+static int aspeed_intc_irq_set_affinity(struct irq_data *data,
-+ const struct cpumask *dest,
-+ bool force)
-+{
-+ return -EINVAL;
-+}
-+
-+static struct irq_chip aspeed_intc_chip = {
-+ .name = "ASPEED INTC",
-+ .irq_mask = aspeed_intc_irq_mask,
-+ .irq_unmask = aspeed_intc_irq_unmask,
-+ .irq_set_affinity = aspeed_intc_irq_set_affinity,
-+};
-+
-+static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain,
-+ unsigned int irq, irq_hw_number_t hwirq)
-+{
-+ irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq);
-+ irq_set_chip_data(irq, domain->host_data);
-+
-+ return 0;
-+}
-+
-+static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = {
-+ .map = aspeed_intc_ic_map_irq_domain,
-+};
-+
-+static int __init aspeed_intc_ic_of_init(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_intc_ic *intc_ic;
-+ int ret = 0;
-+ int irq;
-+
-+ intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL);
-+ if (!intc_ic)
-+ return -ENOMEM;
-+
-+ intc_ic->base = of_iomap(node, 0);
-+ if (!intc_ic->base) {
-+ pr_err("Failed to iomap intc_ic base\n");
-+ ret = -ENOMEM;
-+ goto err_free_ic;
-+ }
-+ writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
-+ writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
-+
-+ irq = irq_of_parse_and_map(node, 0);
-+ if (!irq) {
-+ pr_err("Failed to get irq number\n");
-+ ret = -EINVAL;
-+ goto err_iounmap;
-+ }
-+
-+ intc_ic->irq_domain = irq_domain_add_linear(node, 32,
-+ &aspeed_intc_ic_irq_domain_ops,
-+ intc_ic);
-+ if (!intc_ic->irq_domain) {
-+ ret = -ENOMEM;
-+ goto err_iounmap;
-+ }
-+
-+ raw_spin_lock_init(&intc_ic->gic_lock);
-+ raw_spin_lock_init(&intc_ic->intc_lock);
-+
-+ intc_ic->irq_domain->name = "aspeed-intc-domain";
-+
-+ irq_set_chained_handler_and_data(irq,
-+ aspeed_intc_ic_irq_handler, intc_ic);
-+
-+ return 0;
-+
-+err_iounmap:
-+ iounmap(intc_ic->base);
-+err_free_ic:
-+ kfree(intc_ic);
-+ return ret;
-+}
-+
-+static int __init aspeed_intc_ic_of_init_v2(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_intc_ic *intc_ic;
-+ int ret = 0;
-+ int irq, i;
-+
-+ intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL);
-+ if (!intc_ic)
-+ return -ENOMEM;
-+
-+ intc_ic->base = of_iomap(node, 0);
-+ if (!intc_ic->base) {
-+ pr_err("Failed to iomap intc_ic base\n");
-+ ret = -ENOMEM;
-+ goto err_free_ic;
-+ }
-+ writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
-+ writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
-+
-+ intc_ic->irq_domain = irq_domain_add_linear(node, 32,
-+ &aspeed_intc_ic_irq_domain_ops,
-+ intc_ic);
-+ if (!intc_ic->irq_domain) {
-+ ret = -ENOMEM;
-+ goto err_iounmap;
-+ }
-+
-+ raw_spin_lock_init(&intc_ic->gic_lock);
-+ raw_spin_lock_init(&intc_ic->intc_lock);
-+
-+ intc_ic->irq_domain->name = "aspeed-intc-domain";
-+
-+ for (i = 0; i < of_irq_count(node); i++) {
-+ irq = irq_of_parse_and_map(node, i);
-+ if (!irq) {
-+ pr_err("Failed to get irq number\n");
-+ ret = -EINVAL;
-+ goto err_iounmap;
-+ } else {
-+ irq_set_chained_handler_and_data(irq, aspeed_intc_ic_irq_handler, intc_ic);
-+ }
-+ }
-+
-+ return 0;
-+
-+err_iounmap:
-+ iounmap(intc_ic->base);
-+err_free_ic:
-+ kfree(intc_ic);
-+ return ret;
-+}
-+
-+IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init);
-+IRQCHIP_DECLARE(ast2700_intc_icv2, "aspeed,ast2700-intc-icv2", aspeed_intc_ic_of_init_v2);
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0004-Add-e2m_icX-for-A1.patch b/recipes-kernel/linux/files/0004-Add-e2m_icX-for-A1.patch
deleted file mode 100644
index 703d712..0000000
--- a/recipes-kernel/linux/files/0004-Add-e2m_icX-for-A1.patch
+++ /dev/null
@@ -1,212 +0,0 @@
-From 0681f1b9b92ec9520f772921c8b482850d935f02 Mon Sep 17 00:00:00 2001
-From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Thu, 14 Nov 2024 16:15:10 +0800
-Subject: [PATCH] Add e2m_icX for A1
-
-Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
----
- drivers/irqchip/Makefile | 2 +-
- drivers/irqchip/irq-aspeed-e2m-ic.c | 178 ++++++++++++++++++++++++++++
- 2 files changed, 179 insertions(+), 1 deletion(-)
- create mode 100644 drivers/irqchip/irq-aspeed-e2m-ic.c
-
-diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
-index 9a5b5e0c993e..60d0aca020a4 100644
---- a/drivers/irqchip/Makefile
-+++ b/drivers/irqchip/Makefile
-@@ -84,7 +84,7 @@ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
- obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
- obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
- obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o irq-aspeed-intc.o
--obj-$(CONFIG_MACH_ASPEED_G7) += irq-aspeed-intc.o
-+obj-$(CONFIG_MACH_ASPEED_G7) += irq-aspeed-intc.o irq-aspeed-e2m-ic.o
- obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
- obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
- obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
-diff --git a/drivers/irqchip/irq-aspeed-e2m-ic.c b/drivers/irqchip/irq-aspeed-e2m-ic.c
-new file mode 100644
-index 000000000000..e072925f72dc
---- /dev/null
-+++ b/drivers/irqchip/irq-aspeed-e2m-ic.c
-@@ -0,0 +1,178 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Aspeed AST27XX E2M Interrupt Controller
-+ * Copyright (C) 2023 ASPEED Technology Inc.
-+ *
-+ */
-+
-+#include <linux/bitops.h>
-+#include <linux/irq.h>
-+#include <linux/irqchip.h>
-+#include <linux/irqchip/chained_irq.h>
-+#include <linux/irqdomain.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/of_irq.h>
-+#include <linux/regmap.h>
-+
-+#define ASPEED_AST2700_E2M_IC_SHIFT 0
-+#define ASPEED_AST2700_E2M_IC_ENABLE \
-+ GENMASK(7, ASPEED_AST2700_E2M_IC_SHIFT)
-+#define ASPEED_AST2700_E2M_IC_NUM_IRQS 8
-+#define ASPEED_AST2700_E2M_IC_EN_REG 0x14
-+#define ASPEED_AST2700_E2M_IC_STS_REG 0x18
-+
-+struct aspeed_e2m_ic {
-+ unsigned long irq_enable;
-+ unsigned long irq_shift;
-+ unsigned int num_irqs;
-+ unsigned int reg;
-+ unsigned int en_reg;
-+ unsigned int sts_reg;
-+ struct regmap *e2m;
-+ struct irq_domain *irq_domain;
-+};
-+
-+static void aspeed_e2m_ic_irq_handler(struct irq_desc *desc)
-+{
-+ unsigned int val;
-+ unsigned long bit;
-+ unsigned long enabled;
-+ unsigned long max;
-+ unsigned long status;
-+ struct aspeed_e2m_ic *e2m_ic = irq_desc_get_handler_data(desc);
-+ struct irq_chip *chip = irq_desc_get_chip(desc);
-+ unsigned int mask;
-+
-+ chained_irq_enter(chip, desc);
-+
-+ mask = e2m_ic->irq_enable;
-+ regmap_read(e2m_ic->e2m, e2m_ic->en_reg, &val);
-+ enabled = val & e2m_ic->irq_enable;
-+ regmap_read(e2m_ic->e2m, e2m_ic->sts_reg, &val);
-+ status = val & enabled;
-+
-+ bit = e2m_ic->irq_shift;
-+ max = e2m_ic->num_irqs + bit;
-+
-+ for_each_set_bit_from(bit, &status, max) {
-+ generic_handle_domain_irq(e2m_ic->irq_domain, bit - e2m_ic->irq_shift);
-+
-+ regmap_write_bits(e2m_ic->e2m, e2m_ic->sts_reg, mask, BIT(bit));
-+ }
-+
-+ chained_irq_exit(chip, desc);
-+}
-+
-+static void aspeed_e2m_ic_irq_mask(struct irq_data *data)
-+{
-+ struct aspeed_e2m_ic *e2m_ic = irq_data_get_irq_chip_data(data);
-+ unsigned int mask;
-+
-+ mask = BIT(data->hwirq + e2m_ic->irq_shift);
-+ regmap_update_bits(e2m_ic->e2m, e2m_ic->en_reg, mask, 0);
-+}
-+
-+static void aspeed_e2m_ic_irq_unmask(struct irq_data *data)
-+{
-+ struct aspeed_e2m_ic *e2m_ic = irq_data_get_irq_chip_data(data);
-+ unsigned int bit = BIT(data->hwirq + e2m_ic->irq_shift);
-+ unsigned int mask;
-+
-+ mask = bit;
-+ regmap_update_bits(e2m_ic->e2m, e2m_ic->en_reg, mask, bit);
-+}
-+
-+static int aspeed_e2m_ic_irq_set_affinity(struct irq_data *data,
-+ const struct cpumask *dest,
-+ bool force)
-+{
-+ return -EINVAL;
-+}
-+
-+static struct irq_chip aspeed_scu_ic_chip = {
-+ .name = "aspeed-e2m-ic",
-+ .irq_mask = aspeed_e2m_ic_irq_mask,
-+ .irq_unmask = aspeed_e2m_ic_irq_unmask,
-+ .irq_set_affinity = aspeed_e2m_ic_irq_set_affinity,
-+};
-+
-+static int aspeed_e2m_ic_map(struct irq_domain *domain, unsigned int irq,
-+ irq_hw_number_t hwirq)
-+{
-+ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
-+ irq_set_chip_data(irq, domain->host_data);
-+
-+ return 0;
-+}
-+
-+static const struct irq_domain_ops aspeed_e2m_ic_domain_ops = {
-+ .map = aspeed_e2m_ic_map,
-+};
-+
-+static int aspeed_e2m_ic_of_init_common(struct aspeed_e2m_ic *e2m_ic,
-+ struct device_node *node)
-+{
-+ int irq;
-+ int rc = 0;
-+
-+ if (!node->parent) {
-+ rc = -ENODEV;
-+ goto err;
-+ }
-+
-+ e2m_ic->e2m = syscon_node_to_regmap(node->parent);
-+ if (IS_ERR(e2m_ic->e2m)) {
-+ rc = PTR_ERR(e2m_ic->e2m);
-+ goto err;
-+ }
-+
-+ /* Clear status and disable all interrupt */
-+ regmap_write_bits(e2m_ic->e2m, e2m_ic->sts_reg,
-+ e2m_ic->irq_enable, e2m_ic->irq_enable);
-+ regmap_write_bits(e2m_ic->e2m, e2m_ic->en_reg,
-+ e2m_ic->irq_enable, 0);
-+
-+ irq = irq_of_parse_and_map(node, 0);
-+ if (!irq) {
-+ rc = -EINVAL;
-+ goto err;
-+ }
-+
-+ e2m_ic->irq_domain = irq_domain_add_linear(node, e2m_ic->num_irqs,
-+ &aspeed_e2m_ic_domain_ops,
-+ e2m_ic);
-+ if (!e2m_ic->irq_domain) {
-+ rc = -ENOMEM;
-+ goto err;
-+ }
-+
-+ irq_set_chained_handler_and_data(irq, aspeed_e2m_ic_irq_handler,
-+ e2m_ic);
-+
-+ return 0;
-+
-+err:
-+ kfree(e2m_ic);
-+
-+ return rc;
-+}
-+
-+static int __init aspeed_ast2700_e2m_ic_of_init(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_e2m_ic *e2m_ic = kzalloc(sizeof(*e2m_ic), GFP_KERNEL);
-+
-+ if (!e2m_ic)
-+ return -ENOMEM;
-+
-+ e2m_ic->irq_enable = ASPEED_AST2700_E2M_IC_ENABLE;
-+ e2m_ic->irq_shift = ASPEED_AST2700_E2M_IC_SHIFT;
-+ e2m_ic->num_irqs = ASPEED_AST2700_E2M_IC_NUM_IRQS;
-+ e2m_ic->en_reg = ASPEED_AST2700_E2M_IC_EN_REG;
-+ e2m_ic->sts_reg = ASPEED_AST2700_E2M_IC_STS_REG;
-+
-+ return aspeed_e2m_ic_of_init_common(e2m_ic, node);
-+}
-+
-+IRQCHIP_DECLARE(ast2700_e2m_ic, "aspeed,ast2700-e2m-ic",
-+ aspeed_ast2700_e2m_ic_of_init);
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0004-Add-irqchip-driver-for-ast2700.patch b/recipes-kernel/linux/files/0004-Add-irqchip-driver-for-ast2700.patch
new file mode 100644
index 0000000..57a7f4c
--- /dev/null
+++ b/recipes-kernel/linux/files/0004-Add-irqchip-driver-for-ast2700.patch
@@ -0,0 +1,667 @@
+From 7b50e5835c0ed2188bfc1a3749cd690f7ab0da5a Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 09:59:16 +0800
+Subject: [PATCH] Add irqchip driver for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/irqchip/Makefile | 2 +-
+ drivers/irqchip/irq-aspeed-e2m-ic.c | 178 ++++++++++++++++++++
+ drivers/irqchip/irq-aspeed-intc.c | 140 ++++++++++++++++
+ drivers/irqchip/irq-aspeed-scu-ic.c | 241 ++++++++++++++++++++++------
+ 4 files changed, 515 insertions(+), 46 deletions(-)
+ create mode 100644 drivers/irqchip/irq-aspeed-e2m-ic.c
+ create mode 100644 drivers/irqchip/irq-aspeed-intc.c
+
+diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
+index ffd945fe7..950f3ce4e 100644
+--- a/drivers/irqchip/Makefile
++++ b/drivers/irqchip/Makefile
+@@ -83,7 +83,7 @@ obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
+ obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
+ obj-$(CONFIG_LS_EXTIRQ) += irq-ls-extirq.o
+ obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
+-obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o
++obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o irq-aspeed-scu-ic.o irq-aspeed-intc.o irq-aspeed-e2m-ic.o
+ obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
+ obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
+ obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
+diff --git a/drivers/irqchip/irq-aspeed-e2m-ic.c b/drivers/irqchip/irq-aspeed-e2m-ic.c
+new file mode 100644
+index 000000000..e072925f7
+--- /dev/null
++++ b/drivers/irqchip/irq-aspeed-e2m-ic.c
+@@ -0,0 +1,178 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Aspeed AST27XX E2M Interrupt Controller
++ * Copyright (C) 2023 ASPEED Technology Inc.
++ *
++ */
++
++#include <linux/bitops.h>
++#include <linux/irq.h>
++#include <linux/irqchip.h>
++#include <linux/irqchip/chained_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/mfd/syscon.h>
++#include <linux/of_irq.h>
++#include <linux/regmap.h>
++
++#define ASPEED_AST2700_E2M_IC_SHIFT 0
++#define ASPEED_AST2700_E2M_IC_ENABLE \
++ GENMASK(7, ASPEED_AST2700_E2M_IC_SHIFT)
++#define ASPEED_AST2700_E2M_IC_NUM_IRQS 8
++#define ASPEED_AST2700_E2M_IC_EN_REG 0x14
++#define ASPEED_AST2700_E2M_IC_STS_REG 0x18
++
++struct aspeed_e2m_ic {
++ unsigned long irq_enable;
++ unsigned long irq_shift;
++ unsigned int num_irqs;
++ unsigned int reg;
++ unsigned int en_reg;
++ unsigned int sts_reg;
++ struct regmap *e2m;
++ struct irq_domain *irq_domain;
++};
++
++static void aspeed_e2m_ic_irq_handler(struct irq_desc *desc)
++{
++ unsigned int val;
++ unsigned long bit;
++ unsigned long enabled;
++ unsigned long max;
++ unsigned long status;
++ struct aspeed_e2m_ic *e2m_ic = irq_desc_get_handler_data(desc);
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++ unsigned int mask;
++
++ chained_irq_enter(chip, desc);
++
++ mask = e2m_ic->irq_enable;
++ regmap_read(e2m_ic->e2m, e2m_ic->en_reg, &val);
++ enabled = val & e2m_ic->irq_enable;
++ regmap_read(e2m_ic->e2m, e2m_ic->sts_reg, &val);
++ status = val & enabled;
++
++ bit = e2m_ic->irq_shift;
++ max = e2m_ic->num_irqs + bit;
++
++ for_each_set_bit_from(bit, &status, max) {
++ generic_handle_domain_irq(e2m_ic->irq_domain, bit - e2m_ic->irq_shift);
++
++ regmap_write_bits(e2m_ic->e2m, e2m_ic->sts_reg, mask, BIT(bit));
++ }
++
++ chained_irq_exit(chip, desc);
++}
++
++static void aspeed_e2m_ic_irq_mask(struct irq_data *data)
++{
++ struct aspeed_e2m_ic *e2m_ic = irq_data_get_irq_chip_data(data);
++ unsigned int mask;
++
++ mask = BIT(data->hwirq + e2m_ic->irq_shift);
++ regmap_update_bits(e2m_ic->e2m, e2m_ic->en_reg, mask, 0);
++}
++
++static void aspeed_e2m_ic_irq_unmask(struct irq_data *data)
++{
++ struct aspeed_e2m_ic *e2m_ic = irq_data_get_irq_chip_data(data);
++ unsigned int bit = BIT(data->hwirq + e2m_ic->irq_shift);
++ unsigned int mask;
++
++ mask = bit;
++ regmap_update_bits(e2m_ic->e2m, e2m_ic->en_reg, mask, bit);
++}
++
++static int aspeed_e2m_ic_irq_set_affinity(struct irq_data *data,
++ const struct cpumask *dest,
++ bool force)
++{
++ return -EINVAL;
++}
++
++static struct irq_chip aspeed_scu_ic_chip = {
++ .name = "aspeed-e2m-ic",
++ .irq_mask = aspeed_e2m_ic_irq_mask,
++ .irq_unmask = aspeed_e2m_ic_irq_unmask,
++ .irq_set_affinity = aspeed_e2m_ic_irq_set_affinity,
++};
++
++static int aspeed_e2m_ic_map(struct irq_domain *domain, unsigned int irq,
++ irq_hw_number_t hwirq)
++{
++ irq_set_chip_and_handler(irq, &aspeed_scu_ic_chip, handle_level_irq);
++ irq_set_chip_data(irq, domain->host_data);
++
++ return 0;
++}
++
++static const struct irq_domain_ops aspeed_e2m_ic_domain_ops = {
++ .map = aspeed_e2m_ic_map,
++};
++
++static int aspeed_e2m_ic_of_init_common(struct aspeed_e2m_ic *e2m_ic,
++ struct device_node *node)
++{
++ int irq;
++ int rc = 0;
++
++ if (!node->parent) {
++ rc = -ENODEV;
++ goto err;
++ }
++
++ e2m_ic->e2m = syscon_node_to_regmap(node->parent);
++ if (IS_ERR(e2m_ic->e2m)) {
++ rc = PTR_ERR(e2m_ic->e2m);
++ goto err;
++ }
++
++ /* Clear status and disable all interrupt */
++ regmap_write_bits(e2m_ic->e2m, e2m_ic->sts_reg,
++ e2m_ic->irq_enable, e2m_ic->irq_enable);
++ regmap_write_bits(e2m_ic->e2m, e2m_ic->en_reg,
++ e2m_ic->irq_enable, 0);
++
++ irq = irq_of_parse_and_map(node, 0);
++ if (!irq) {
++ rc = -EINVAL;
++ goto err;
++ }
++
++ e2m_ic->irq_domain = irq_domain_add_linear(node, e2m_ic->num_irqs,
++ &aspeed_e2m_ic_domain_ops,
++ e2m_ic);
++ if (!e2m_ic->irq_domain) {
++ rc = -ENOMEM;
++ goto err;
++ }
++
++ irq_set_chained_handler_and_data(irq, aspeed_e2m_ic_irq_handler,
++ e2m_ic);
++
++ return 0;
++
++err:
++ kfree(e2m_ic);
++
++ return rc;
++}
++
++static int __init aspeed_ast2700_e2m_ic_of_init(struct device_node *node,
++ struct device_node *parent)
++{
++ struct aspeed_e2m_ic *e2m_ic = kzalloc(sizeof(*e2m_ic), GFP_KERNEL);
++
++ if (!e2m_ic)
++ return -ENOMEM;
++
++ e2m_ic->irq_enable = ASPEED_AST2700_E2M_IC_ENABLE;
++ e2m_ic->irq_shift = ASPEED_AST2700_E2M_IC_SHIFT;
++ e2m_ic->num_irqs = ASPEED_AST2700_E2M_IC_NUM_IRQS;
++ e2m_ic->en_reg = ASPEED_AST2700_E2M_IC_EN_REG;
++ e2m_ic->sts_reg = ASPEED_AST2700_E2M_IC_STS_REG;
++
++ return aspeed_e2m_ic_of_init_common(e2m_ic, node);
++}
++
++IRQCHIP_DECLARE(ast2700_e2m_ic, "aspeed,ast2700-e2m-ic",
++ aspeed_ast2700_e2m_ic_of_init);
+diff --git a/drivers/irqchip/irq-aspeed-intc.c b/drivers/irqchip/irq-aspeed-intc.c
+new file mode 100644
+index 000000000..faf18a675
+--- /dev/null
++++ b/drivers/irqchip/irq-aspeed-intc.c
+@@ -0,0 +1,140 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Aspeed Interrupt Controller.
++ *
++ * Copyright (C) 2023 ASPEED Technology Inc.
++ */
++
++#include <linux/bitops.h>
++#include <linux/irq.h>
++#include <linux/irqchip.h>
++#include <linux/irqchip/chained_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/io.h>
++#include <linux/spinlock.h>
++
++#define INTC_INT_ENABLE_REG 0x00
++#define INTC_INT_STATUS_REG 0x04
++#define INTC_IRQS_PER_WORD 32
++
++struct aspeed_intc_ic {
++ void __iomem *base;
++ raw_spinlock_t gic_lock;
++ raw_spinlock_t intc_lock;
++ struct irq_domain *irq_domain;
++};
++
++static void aspeed_intc_ic_irq_handler(struct irq_desc *desc)
++{
++ struct aspeed_intc_ic *intc_ic = irq_desc_get_handler_data(desc);
++ struct irq_chip *chip = irq_desc_get_chip(desc);
++
++ chained_irq_enter(chip, desc);
++
++ scoped_guard(raw_spinlock, &intc_ic->gic_lock) {
++ unsigned long bit, status;
++
++ status = readl(intc_ic->base + INTC_INT_STATUS_REG);
++ for_each_set_bit(bit, &status, INTC_IRQS_PER_WORD) {
++ generic_handle_domain_irq(intc_ic->irq_domain, bit);
++ writel(BIT(bit), intc_ic->base + INTC_INT_STATUS_REG);
++ }
++ }
++
++ chained_irq_exit(chip, desc);
++}
++
++static void aspeed_intc_irq_mask(struct irq_data *data)
++{
++ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
++ unsigned int mask = readl(intc_ic->base + INTC_INT_ENABLE_REG) & ~BIT(data->hwirq);
++
++ guard(raw_spinlock)(&intc_ic->intc_lock);
++ writel(mask, intc_ic->base + INTC_INT_ENABLE_REG);
++}
++
++static void aspeed_intc_irq_unmask(struct irq_data *data)
++{
++ struct aspeed_intc_ic *intc_ic = irq_data_get_irq_chip_data(data);
++ unsigned int unmask = readl(intc_ic->base + INTC_INT_ENABLE_REG) | BIT(data->hwirq);
++
++ guard(raw_spinlock)(&intc_ic->intc_lock);
++ writel(unmask, intc_ic->base + INTC_INT_ENABLE_REG);
++}
++
++static struct irq_chip aspeed_intc_chip = {
++ .name = "ASPEED INTC",
++ .irq_mask = aspeed_intc_irq_mask,
++ .irq_unmask = aspeed_intc_irq_unmask,
++};
++
++static int aspeed_intc_ic_map_irq_domain(struct irq_domain *domain, unsigned int irq,
++ irq_hw_number_t hwirq)
++{
++ irq_set_chip_and_handler(irq, &aspeed_intc_chip, handle_level_irq);
++ irq_set_chip_data(irq, domain->host_data);
++
++ return 0;
++}
++
++static const struct irq_domain_ops aspeed_intc_ic_irq_domain_ops = {
++ .map = aspeed_intc_ic_map_irq_domain,
++};
++
++static int __init aspeed_intc_ic_of_init(struct device_node *node,
++ struct device_node *parent)
++{
++ struct aspeed_intc_ic *intc_ic;
++ int ret = 0;
++ int irq, i;
++
++ intc_ic = kzalloc(sizeof(*intc_ic), GFP_KERNEL);
++ if (!intc_ic)
++ return -ENOMEM;
++
++ intc_ic->base = of_iomap(node, 0);
++ if (!intc_ic->base) {
++ pr_err("Failed to iomap intc_ic base\n");
++ ret = -ENOMEM;
++ goto err_free_ic;
++ }
++ writel(0xffffffff, intc_ic->base + INTC_INT_STATUS_REG);
++ writel(0x0, intc_ic->base + INTC_INT_ENABLE_REG);
++
++ intc_ic->irq_domain = irq_domain_add_linear(node, INTC_IRQS_PER_WORD,
++ &aspeed_intc_ic_irq_domain_ops, intc_ic);
++ if (!intc_ic->irq_domain) {
++ ret = -ENOMEM;
++ goto err_iounmap;
++ }
++
++ raw_spin_lock_init(&intc_ic->gic_lock);
++ raw_spin_lock_init(&intc_ic->intc_lock);
++
++ /* Check all the irq numbers valid. If not, unmaps all the base and frees the data. */
++ for (i = 0; i < of_irq_count(node); i++) {
++ irq = irq_of_parse_and_map(node, i);
++ if (!irq) {
++ pr_err("Failed to get irq number\n");
++ ret = -EINVAL;
++ goto err_iounmap;
++ }
++ }
++
++ for (i = 0; i < of_irq_count(node); i++) {
++ irq = irq_of_parse_and_map(node, i);
++ irq_set_chained_handler_and_data(irq, aspeed_intc_ic_irq_handler, intc_ic);
++ }
++
++ return 0;
++
++err_iounmap:
++ iounmap(intc_ic->base);
++err_free_ic:
++ kfree(intc_ic);
++ return ret;
++}
++
++IRQCHIP_DECLARE(ast2700_intc_ic, "aspeed,ast2700-intc-ic", aspeed_intc_ic_of_init);
+diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
+index 94a7223e9..94b4ea51a 100644
+--- a/drivers/irqchip/irq-aspeed-scu-ic.c
++++ b/drivers/irqchip/irq-aspeed-scu-ic.c
+@@ -34,51 +34,100 @@
+ GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
+ #define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
+
++#define ASPEED_AST2700_SCU_IC0_EN_REG 0x1d0
++#define ASPEED_AST2700_SCU_IC0_STS_REG 0x1d4
++#define ASPEED_AST2700_SCU_IC0_SHIFT 0
++#define ASPEED_AST2700_SCU_IC0_ENABLE \
++ GENMASK(3, ASPEED_AST2700_SCU_IC0_SHIFT)
++#define ASPEED_AST2700_SCU_IC0_NUM_IRQS 4
++
++#define ASPEED_AST2700_SCU_IC1_EN_REG 0x1e0
++#define ASPEED_AST2700_SCU_IC1_STS_REG 0x1e4
++#define ASPEED_AST2700_SCU_IC1_SHIFT 0
++#define ASPEED_AST2700_SCU_IC1_ENABLE \
++ GENMASK(3, ASPEED_AST2700_SCU_IC1_SHIFT)
++#define ASPEED_AST2700_SCU_IC1_NUM_IRQS 4
++
++#define ASPEED_AST2700_SCU_IC2_EN_REG 0x104
++#define ASPEED_AST2700_SCU_IC2_STS_REG 0x100
++#define ASPEED_AST2700_SCU_IC2_SHIFT 0
++#define ASPEED_AST2700_SCU_IC2_ENABLE \
++ GENMASK(3, ASPEED_AST2700_SCU_IC2_SHIFT)
++#define ASPEED_AST2700_SCU_IC2_NUM_IRQS 4
++
++#define ASPEED_AST2700_SCU_IC3_EN_REG 0x10c
++#define ASPEED_AST2700_SCU_IC3_STS_REG 0x108
++#define ASPEED_AST2700_SCU_IC3_SHIFT 0
++#define ASPEED_AST2700_SCU_IC3_ENABLE \
++ GENMASK(1, ASPEED_AST2700_SCU_IC3_SHIFT)
++#define ASPEED_AST2700_SCU_IC3_NUM_IRQS 2
++
+ struct aspeed_scu_ic {
+ unsigned long irq_enable;
+ unsigned long irq_shift;
+ unsigned int num_irqs;
++ bool en_sts_split;
+ unsigned int reg;
++ unsigned int en_reg;
++ unsigned int sts_reg;
+ struct regmap *scu;
+ struct irq_domain *irq_domain;
+ };
+
+ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+ {
+- unsigned int sts;
++ unsigned int val;
+ unsigned long bit;
+ unsigned long enabled;
+ unsigned long max;
+ unsigned long status;
+ struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+- unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
++ unsigned int mask;
+
+ chained_irq_enter(chip, desc);
+
+- /*
+- * The SCU IC has just one register to control its operation and read
+- * status. The interrupt enable bits occupy the lower 16 bits of the
+- * register, while the interrupt status bits occupy the upper 16 bits.
+- * The status bit for a given interrupt is always 16 bits shifted from
+- * the enable bit for the same interrupt.
+- * Therefore, perform the IRQ operations in the enable bit space by
+- * shifting the status down to get the mapping and then back up to
+- * clear the bit.
+- */
+- regmap_read(scu_ic->scu, scu_ic->reg, &sts);
+- enabled = sts & scu_ic->irq_enable;
+- status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
+-
+- bit = scu_ic->irq_shift;
+- max = scu_ic->num_irqs + bit;
+-
+- for_each_set_bit_from(bit, &status, max) {
+- generic_handle_domain_irq(scu_ic->irq_domain,
+- bit - scu_ic->irq_shift);
+-
+- regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
+- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
++ if (!scu_ic->en_sts_split) {
++ mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
++ /*
++ * The SCU IC has just one register to control its operation and read
++ * status. The interrupt enable bits occupy the lower 16 bits of the
++ * register, while the interrupt status bits occupy the upper 16 bits.
++ * The status bit for a given interrupt is always 16 bits shifted from
++ * the enable bit for the same interrupt.
++ * Therefore, perform the IRQ operations in the enable bit space by
++ * shifting the status down to get the mapping and then back up to
++ * clear the bit.
++ */
++ regmap_read(scu_ic->scu, scu_ic->reg, &val);
++ enabled = val & scu_ic->irq_enable;
++ status = (val >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
++
++ bit = scu_ic->irq_shift;
++ max = scu_ic->num_irqs + bit;
++
++ for_each_set_bit_from(bit, &status, max) {
++ generic_handle_domain_irq(scu_ic->irq_domain,
++ bit - scu_ic->irq_shift);
++
++ regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
++ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
++ }
++ } else {
++ mask = scu_ic->irq_enable;
++ regmap_read(scu_ic->scu, scu_ic->en_reg, &val);
++ enabled = val & scu_ic->irq_enable;
++ regmap_read(scu_ic->scu, scu_ic->sts_reg, &val);
++ status = val & enabled;
++
++ bit = scu_ic->irq_shift;
++ max = scu_ic->num_irqs + bit;
++
++ for_each_set_bit_from(bit, &status, max) {
++ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
++
++ regmap_write_bits(scu_ic->scu, scu_ic->sts_reg, mask, BIT(bit));
++ }
+ }
+
+ chained_irq_exit(chip, desc);
+@@ -87,30 +136,41 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
+ static void aspeed_scu_ic_irq_mask(struct irq_data *data)
+ {
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+- unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
+- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+-
+- /*
+- * Status bits are cleared by writing 1. In order to prevent the mask
+- * operation from clearing the status bits, they should be under the
+- * mask and written with 0.
+- */
+- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
++ unsigned int mask;
++
++ if (!scu_ic->en_sts_split) {
++ mask = BIT(data->hwirq + scu_ic->irq_shift) |
++ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
++ /*
++ * Status bits are cleared by writing 1. In order to prevent the mask
++ * operation from clearing the status bits, they should be under the
++ * mask and written with 0.
++ */
++ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
++ } else {
++ mask = BIT(data->hwirq + scu_ic->irq_shift);
++ regmap_update_bits(scu_ic->scu, scu_ic->en_reg, mask, 0);
++ }
+ }
+
+ static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
+ {
+ struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
+ unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
+- unsigned int mask = bit |
+- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
+-
+- /*
+- * Status bits are cleared by writing 1. In order to prevent the unmask
+- * operation from clearing the status bits, they should be under the
+- * mask and written with 0.
+- */
+- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
++ unsigned int mask;
++
++ if (!scu_ic->en_sts_split) {
++ mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
++ /*
++ * Status bits are cleared by writing 1. In order to prevent the unmask
++ * operation from clearing the status bits, they should be under the
++ * mask and written with 0.
++ */
++ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
++ } else {
++ mask = bit;
++ regmap_update_bits(scu_ic->scu, scu_ic->en_reg, mask, bit);
++ }
+ }
+
+ static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
+@@ -156,8 +216,19 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
+ rc = PTR_ERR(scu_ic->scu);
+ goto err;
+ }
+- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
+- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
++
++ /* Clear status and disable all interrupt */
++ if (!scu_ic->en_sts_split) {
++ regmap_write_bits(scu_ic->scu, scu_ic->reg,
++ ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
++ regmap_write_bits(scu_ic->scu, scu_ic->reg,
++ ASPEED_SCU_IC_ENABLE, 0);
++ } else {
++ regmap_write_bits(scu_ic->scu, scu_ic->sts_reg,
++ scu_ic->irq_enable, scu_ic->irq_enable);
++ regmap_write_bits(scu_ic->scu, scu_ic->en_reg,
++ scu_ic->irq_enable, 0);
++ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq) {
+@@ -232,9 +303,89 @@ static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
+ return aspeed_scu_ic_of_init_common(scu_ic, node);
+ }
+
++static int __init aspeed_ast2700_scu_ic0_of_init(struct device_node *node,
++ struct device_node *parent)
++{
++ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
++
++ if (!scu_ic)
++ return -ENOMEM;
++
++ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC0_ENABLE;
++ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC0_SHIFT;
++ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC0_NUM_IRQS;
++ scu_ic->en_sts_split = true;
++ scu_ic->en_reg = ASPEED_AST2700_SCU_IC0_EN_REG;
++ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC0_STS_REG;
++
++ return aspeed_scu_ic_of_init_common(scu_ic, node);
++}
++
++static int __init aspeed_ast2700_scu_ic1_of_init(struct device_node *node,
++ struct device_node *parent)
++{
++ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
++
++ if (!scu_ic)
++ return -ENOMEM;
++
++ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC1_ENABLE;
++ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC1_SHIFT;
++ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC1_NUM_IRQS;
++ scu_ic->en_sts_split = true;
++ scu_ic->en_reg = ASPEED_AST2700_SCU_IC1_EN_REG;
++ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC1_STS_REG;
++
++ return aspeed_scu_ic_of_init_common(scu_ic, node);
++}
++
++static int __init aspeed_ast2700_scu_ic2_of_init(struct device_node *node,
++ struct device_node *parent)
++{
++ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
++
++ if (!scu_ic)
++ return -ENOMEM;
++
++ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC2_ENABLE;
++ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC2_SHIFT;
++ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC2_NUM_IRQS;
++ scu_ic->en_sts_split = true;
++ scu_ic->en_reg = ASPEED_AST2700_SCU_IC2_EN_REG;
++ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC2_STS_REG;
++
++ return aspeed_scu_ic_of_init_common(scu_ic, node);
++}
++
++static int __init aspeed_ast2700_scu_ic3_of_init(struct device_node *node,
++ struct device_node *parent)
++{
++ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
++
++ if (!scu_ic)
++ return -ENOMEM;
++
++ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC3_ENABLE;
++ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC3_SHIFT;
++ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC3_NUM_IRQS;
++ scu_ic->en_sts_split = true;
++ scu_ic->en_reg = ASPEED_AST2700_SCU_IC3_EN_REG;
++ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC3_STS_REG;
++
++ return aspeed_scu_ic_of_init_common(scu_ic, node);
++}
++
+ IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
+ IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
+ IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
+ aspeed_ast2600_scu_ic0_of_init);
+ IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
+ aspeed_ast2600_scu_ic1_of_init);
++IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0",
++ aspeed_ast2700_scu_ic0_of_init);
++IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1",
++ aspeed_ast2700_scu_ic1_of_init);
++IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2",
++ aspeed_ast2700_scu_ic2_of_init);
++IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3",
++ aspeed_ast2700_scu_ic3_of_init);
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0005-Add-pinctrl-driver-for-ast2700.patch b/recipes-kernel/linux/files/0005-Add-pinctrl-driver-for-ast2700.patch
new file mode 100644
index 0000000..a3edf7d
--- /dev/null
+++ b/recipes-kernel/linux/files/0005-Add-pinctrl-driver-for-ast2700.patch
@@ -0,0 +1,6506 @@
+From e18c3cdbe593524396c5b51f3a0828ab8e2b80cb Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 10:09:00 +0800
+Subject: [PATCH] Add pinctrl driver for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/pinctrl/aspeed/Kconfig | 8 +
+ drivers/pinctrl/aspeed/Makefile | 1 +
+ drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c | 19 +-
+ drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c | 19 +-
+ drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c | 525 ++--
+ .../pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c | 458 ++++
+ .../pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c | 2292 ++++++++++++++++
+ .../pinctrl/aspeed/pinctrl-aspeed-g7a0-soc1.c | 2304 +++++++++++++++++
+ drivers/pinctrl/aspeed/pinctrl-aspeed.c | 47 +
+ drivers/pinctrl/aspeed/pinctrl-aspeed.h | 5 +
+ drivers/pinctrl/aspeed/pinmux-aspeed.h | 31 +
+ 11 files changed, 5513 insertions(+), 196 deletions(-)
+ create mode 100644 drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c
+ create mode 100644 drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c
+ create mode 100644 drivers/pinctrl/aspeed/pinctrl-aspeed-g7a0-soc1.c
+
+diff --git a/drivers/pinctrl/aspeed/Kconfig b/drivers/pinctrl/aspeed/Kconfig
+index 1a4e5b9ed..16743091a 100644
+--- a/drivers/pinctrl/aspeed/Kconfig
++++ b/drivers/pinctrl/aspeed/Kconfig
+@@ -31,3 +31,11 @@ config PINCTRL_ASPEED_G6
+ help
+ Say Y here to enable pin controller support for Aspeed's 6th
+ generation SoCs. GPIO is provided by a separate GPIO driver.
++
++config PINCTRL_ASPEED_G7
++ bool "Aspeed G7 SoC pin control"
++ depends on (ARCH_ASPEED || COMPILE_TEST) && OF
++ select PINCTRL_ASPEED
++ help
++ Say Y here to enable pin controller support for Aspeed's 7th
++ generation SoCs. GPIO is provided by a separate GPIO driver.
+diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
+index 489ea1778..64770d020 100644
+--- a/drivers/pinctrl/aspeed/Makefile
++++ b/drivers/pinctrl/aspeed/Makefile
+@@ -6,3 +6,4 @@ obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o pinmux-aspeed.o
+ obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
+ obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
+ obj-$(CONFIG_PINCTRL_ASPEED_G6) += pinctrl-aspeed-g6.o
++obj-$(CONFIG_PINCTRL_ASPEED_G7) += pinctrl-aspeed-g7-soc0.o pinctrl-aspeed-g7-soc1.o pinctrl-aspeed-g7a0-soc1.o
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+index bfed0e274..774f8d051 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c
+@@ -2563,15 +2563,20 @@ static int aspeed_g4_sig_expr_set(struct aspeed_pinmux_data *ctx,
+ * deconfigured and is the reason we re-evaluate after writing
+ * all descriptor bits.
+ *
+- * Port D and port E GPIO loopback modes are the only exception
+- * as those are commonly used with front-panel buttons to allow
+- * normal operation of the host when the BMC is powered off or
+- * fails to boot. Once the BMC has booted, the loopback mode
+- * must be disabled for the BMC to control host power-on and
+- * reset.
++ * We make two exceptions to the read-only rule:
++ *
++ * - The passthrough mode of GPIO ports D and E are commonly
++ * used with front-panel buttons to allow normal operation
++ * of the host if the BMC is powered off or fails to boot.
++ * Once the BMC has booted, the loopback mode must be
++ * disabled for the BMC to control host power-on and reset.
++ *
++ * - The operating mode of the SPI1 interface is simply
++ * strapped incorrectly on some systems and requires a
++ * software fixup, which we allow to be done via pinctrl.
+ */
+ if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP1 &&
+- !(desc->mask & (BIT(21) | BIT(22))))
++ !(desc->mask & (BIT(22) | BIT(21) | BIT(13) | BIT(12))))
+ continue;
+
+ if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP2)
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+index 4c0d26606..5bb8fd0d1 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+@@ -2742,15 +2742,20 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
+ * deconfigured and is the reason we re-evaluate after writing
+ * all descriptor bits.
+ *
+- * Port D and port E GPIO loopback modes are the only exception
+- * as those are commonly used with front-panel buttons to allow
+- * normal operation of the host when the BMC is powered off or
+- * fails to boot. Once the BMC has booted, the loopback mode
+- * must be disabled for the BMC to control host power-on and
+- * reset.
++ * We make two exceptions to the read-only rule:
++ *
++ * - The passthrough mode of GPIO ports D and E are commonly
++ * used with front-panel buttons to allow normal operation
++ * of the host if the BMC is powered off or fails to boot.
++ * Once the BMC has booted, the loopback mode must be
++ * disabled for the BMC to control host power-on and reset.
++ *
++ * - The operating mode of the SPI1 interface is simply
++ * strapped incorrectly on some systems and requires a
++ * software fixup, which we allow to be done via pinctrl.
+ */
+ if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP1 &&
+- !(desc->mask & (BIT(21) | BIT(22))))
++ !(desc->mask & (BIT(22) | BIT(21) | BIT(13) | BIT(12))))
+ continue;
+
+ if (desc->ip == ASPEED_IP_SCU && desc->reg == HW_STRAP2)
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+index 793874113..b169e709f 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
+@@ -17,6 +17,8 @@
+ #include "../pinctrl-utils.h"
+ #include "pinctrl-aspeed.h"
+
++#define SCU040 0x040 /* Reset Control Set 1 */
++#define SCU0C8 0x0C8 /* Debug Control */
+ #define SCU400 0x400 /* Multi-function Pin Control #1 */
+ #define SCU404 0x404 /* Multi-function Pin Control #2 */
+ #define SCU40C 0x40C /* Multi-function Pin Control #3 */
+@@ -31,6 +33,7 @@
+ #define SCU450 0x450 /* Multi-function Pin Control #14 */
+ #define SCU454 0x454 /* Multi-function Pin Control #15 */
+ #define SCU458 0x458 /* Multi-function Pin Control #16 */
++#define SCU470 0x470
+ #define SCU4B0 0x4B0 /* Multi-function Pin Control #17 */
+ #define SCU4B4 0x4B4 /* Multi-function Pin Control #18 */
+ #define SCU4B8 0x4B8 /* Multi-function Pin Control #19 */
+@@ -46,13 +49,16 @@
+ #define SCU630 0x630 /* Disable GPIO Internal Pull-Down #4 */
+ #define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */
+ #define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */
++#define SCU650 0x650 /* Driving Strength */
+ #define SCU690 0x690 /* Multi-function Pin Control #24 */
+ #define SCU694 0x694 /* Multi-function Pin Control #25 */
++#define SCU698 0x698 /* Multi-function Pin Control #26 */
+ #define SCU69C 0x69C /* Multi-function Pin Control #27 */
+ #define SCU6D0 0x6D0 /* Multi-function Pin Control #29 */
+ #define SCUC20 0xC20 /* PCIE configuration Setting Control */
++#define SCUC24 0xC24 /* BMC MMIO Decode Setting */
+
+-#define ASPEED_G6_NR_PINS 256
++#define ASPEED_G6_NR_PINS 258
+
+ #define M24 0
+ SIG_EXPR_LIST_DECL_SESG(M24, MDC3, MDIO3, SIG_DESC_SET(SCU410, 0));
+@@ -171,85 +177,99 @@ FUNC_GROUP_DECL(LPCHC, J26, K23, H26, J25, J23, G26, H25, J24);
+
+ #define H24 16
+ SIG_EXPR_LIST_DECL_SESG(H24, RGMII3TXCK, RGMII3, SIG_DESC_SET(SCU410, 16),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(H24, RMII3RCLKO, RMII3, SIG_DESC_SET(SCU410, 16),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(H24, GPIOC0, RGMII3TXCK, RMII3RCLKO);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(H24, VPA_B0, VPA, SIG_DESC_CLEAR(SCU410, 16));
++PIN_DECL_3(H24, GPIOC0, RGMII3TXCK, RMII3RCLKO, VPA_B0);
+
+ #define J22 17
+ SIG_EXPR_LIST_DECL_SESG(J22, RGMII3TXCTL, RGMII3, SIG_DESC_SET(SCU410, 17),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(J22, RMII3TXEN, RMII3, SIG_DESC_SET(SCU410, 17),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(J22, GPIOC1, RGMII3TXCTL, RMII3TXEN);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(J22, VPA_B1, VPA, SIG_DESC_CLEAR(SCU410, 17));
++PIN_DECL_3(J22, GPIOC1, RGMII3TXCTL, RMII3TXEN, VPA_B1);
+
+ #define H22 18
+ SIG_EXPR_LIST_DECL_SESG(H22, RGMII3TXD0, RGMII3, SIG_DESC_SET(SCU410, 18),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(H22, RMII3TXD0, RMII3, SIG_DESC_SET(SCU410, 18),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(H22, GPIOC2, RGMII3TXD0, RMII3TXD0);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(H22, VPA_B2, VPA, SIG_DESC_CLEAR(SCU410, 18));
++PIN_DECL_3(H22, GPIOC2, RGMII3TXD0, RMII3TXD0, VPA_B2);
+
+ #define H23 19
+ SIG_EXPR_LIST_DECL_SESG(H23, RGMII3TXD1, RGMII3, SIG_DESC_SET(SCU410, 19),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(H23, RMII3TXD1, RMII3, SIG_DESC_SET(SCU410, 19),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(H23, GPIOC3, RGMII3TXD1, RMII3TXD1);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(H23, VPA_B3, VPA, SIG_DESC_CLEAR(SCU410, 19));
++PIN_DECL_3(H23, GPIOC3, RGMII3TXD1, RMII3TXD1, VPA_B3);
+
+ #define G22 20
+ SIG_EXPR_LIST_DECL_SESG(G22, RGMII3TXD2, RGMII3, SIG_DESC_SET(SCU410, 20),
+- SIG_DESC_SET(SCU510, 0));
+-PIN_DECL_1(G22, GPIOC4, RGMII3TXD2);
++ SIG_DESC_SET(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(G22, VPA_B4, VPA, SIG_DESC_CLEAR(SCU410, 20));
++PIN_DECL_2(G22, GPIOC4, RGMII3TXD2, VPA_B4);
+
+ #define F22 21
+ SIG_EXPR_LIST_DECL_SESG(F22, RGMII3TXD3, RGMII3, SIG_DESC_SET(SCU410, 21),
+- SIG_DESC_SET(SCU510, 0));
+-PIN_DECL_1(F22, GPIOC5, RGMII3TXD3);
++ SIG_DESC_SET(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(F22, VPA_B5, VPA, SIG_DESC_CLEAR(SCU410, 21));
++PIN_DECL_2(F22, GPIOC5, RGMII3TXD3, VPA_B5);
+
+ #define G23 22
+ SIG_EXPR_LIST_DECL_SESG(G23, RGMII3RXCK, RGMII3, SIG_DESC_SET(SCU410, 22),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(G23, RMII3RCLKI, RMII3, SIG_DESC_SET(SCU410, 22),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(G23, GPIOC6, RGMII3RXCK, RMII3RCLKI);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(G23, VPAPCLK, VPA, SIG_DESC_CLEAR(SCU410, 22));
++PIN_DECL_3(G23, GPIOC6, RGMII3RXCK, RMII3RCLKI, VPAPCLK);
+
+ #define G24 23
+ SIG_EXPR_LIST_DECL_SESG(G24, RGMII3RXCTL, RGMII3, SIG_DESC_SET(SCU410, 23),
+- SIG_DESC_SET(SCU510, 0));
+-PIN_DECL_1(G24, GPIOC7, RGMII3RXCTL);
++ SIG_DESC_SET(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(G24, VPA_B6, VPA, SIG_DESC_CLEAR(SCU410, 23));
++PIN_DECL_2(G24, GPIOC7, RGMII3RXCTL, VPA_B6);
+
+ #define F23 24
+ SIG_EXPR_LIST_DECL_SESG(F23, RGMII3RXD0, RGMII3, SIG_DESC_SET(SCU410, 24),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(F23, RMII3RXD0, RMII3, SIG_DESC_SET(SCU410, 24),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(F23, GPIOD0, RGMII3RXD0, RMII3RXD0);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(F23, VPA_B7, VPA, SIG_DESC_CLEAR(SCU410, 24));
++PIN_DECL_3(F23, GPIOD0, RGMII3RXD0, RMII3RXD0, VPA_B7);
+
+ #define F26 25
+ SIG_EXPR_LIST_DECL_SESG(F26, RGMII3RXD1, RGMII3, SIG_DESC_SET(SCU410, 25),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(F26, RMII3RXD1, RMII3, SIG_DESC_SET(SCU410, 25),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(F26, GPIOD1, RGMII3RXD1, RMII3RXD1);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(F26, VPA_G0, VPA, SIG_DESC_CLEAR(SCU410, 25));
++PIN_DECL_3(F26, GPIOD1, RGMII3RXD1, RMII3RXD1, VPA_G0);
+
+ #define F25 26
+ SIG_EXPR_LIST_DECL_SESG(F25, RGMII3RXD2, RGMII3, SIG_DESC_SET(SCU410, 26),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(F25, RMII3CRSDV, RMII3, SIG_DESC_SET(SCU410, 26),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(F25, GPIOD2, RGMII3RXD2, RMII3CRSDV);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(F25, VPA_G1, VPA, SIG_DESC_CLEAR(SCU410, 26));
++PIN_DECL_3(F25, GPIOD2, RGMII3RXD2, RMII3CRSDV, VPA_G1);
+
+ #define E26 27
+ SIG_EXPR_LIST_DECL_SESG(E26, RGMII3RXD3, RGMII3, SIG_DESC_SET(SCU410, 27),
+- SIG_DESC_SET(SCU510, 0));
++ SIG_DESC_SET(SCU510, 0));
+ SIG_EXPR_LIST_DECL_SESG(E26, RMII3RXER, RMII3, SIG_DESC_SET(SCU410, 27),
+- SIG_DESC_CLEAR(SCU510, 0));
+-PIN_DECL_2(E26, GPIOD3, RGMII3RXD3, RMII3RXER);
++ SIG_DESC_CLEAR(SCU510, 0));
++SIG_EXPR_LIST_DECL_SESG(E26, VPA_G2, VPA, SIG_DESC_CLEAR(SCU410, 27));
++PIN_DECL_3(E26, GPIOD3, RGMII3RXD3, RMII3RXER, VPA_G2);
+
+ FUNC_GROUP_DECL(RGMII3, H24, J22, H22, H23, G22, F22, G23, G24, F23, F26, F25,
+ E26);
+-FUNC_GROUP_DECL(RMII3, H24, J22, H22, H23, G23, F23, F26, F25, E26);
++GROUP_DECL(RMII3, H24, J22, H22, H23, G23, F23, F26, F25, E26);
++GROUP_DECL(NCSI3, J22, H22, H23, G23, F23, F26, F25, E26);
++FUNC_DECL_2(RMII3, RMII3, NCSI3);
+
+ #define F24 28
+ SIG_EXPR_LIST_DECL_SESG(F24, NCTS3, NCTS3, SIG_DESC_SET(SCU410, 28));
+@@ -257,7 +277,9 @@ SIG_EXPR_LIST_DECL_SESG(F24, RGMII4TXCK, RGMII4, SIG_DESC_SET(SCU4B0, 28),
+ SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(F24, RMII4RCLKO, RMII4, SIG_DESC_SET(SCU4B0, 28),
+ SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(F24, GPIOD4, NCTS3, RGMII4TXCK, RMII4RCLKO);
++SIG_EXPR_LIST_DECL_SESG(F24, VPA_G3, VPA, SIG_DESC_CLEAR(SCU410, 28),
++ SIG_DESC_CLEAR(SCU4B0, 28));
++PIN_DECL_4(F24, GPIOD4, NCTS3, RGMII4TXCK, RMII4RCLKO, VPA_G3);
+ FUNC_GROUP_DECL(NCTS3, F24);
+
+ #define E23 29
+@@ -266,7 +288,9 @@ SIG_EXPR_LIST_DECL_SESG(E23, RGMII4TXCTL, RGMII4, SIG_DESC_SET(SCU4B0, 29),
+ SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(E23, RMII4TXEN, RMII4, SIG_DESC_SET(SCU4B0, 29),
+ SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(E23, GPIOD5, NDCD3, RGMII4TXCTL, RMII4TXEN);
++SIG_EXPR_LIST_DECL_SESG(E23, VPA_G4, VPA, SIG_DESC_CLEAR(SCU410, 29),
++ SIG_DESC_CLEAR(SCU4B0, 29));
++PIN_DECL_4(E23, GPIOD5, NDCD3, RGMII4TXCTL, RMII4TXEN, VPA_G4);
+ FUNC_GROUP_DECL(NDCD3, E23);
+
+ #define E24 30
+@@ -275,7 +299,9 @@ SIG_EXPR_LIST_DECL_SESG(E24, RGMII4TXD0, RGMII4, SIG_DESC_SET(SCU4B0, 30),
+ SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(E24, RMII4TXD0, RMII4, SIG_DESC_SET(SCU4B0, 30),
+ SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(E24, GPIOD6, NDSR3, RGMII4TXD0, RMII4TXD0);
++SIG_EXPR_LIST_DECL_SESG(E24, VPA_G5, VPA, SIG_DESC_CLEAR(SCU410, 30),
++ SIG_DESC_CLEAR(SCU4B0, 30));
++PIN_DECL_4(E24, GPIOD6, NDSR3, RGMII4TXD0, RMII4TXD0, VPA_G5);
+ FUNC_GROUP_DECL(NDSR3, E24);
+
+ #define E25 31
+@@ -284,103 +310,143 @@ SIG_EXPR_LIST_DECL_SESG(E25, RGMII4TXD1, RGMII4, SIG_DESC_SET(SCU4B0, 31),
+ SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(E25, RMII4TXD1, RMII4, SIG_DESC_SET(SCU4B0, 31),
+ SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(E25, GPIOD7, NRI3, RGMII4TXD1, RMII4TXD1);
++SIG_EXPR_LIST_DECL_SESG(E25, VPA_G6, VPA, SIG_DESC_CLEAR(SCU410, 31),
++ SIG_DESC_CLEAR(SCU4B0, 31));
++PIN_DECL_4(E25, GPIOD7, NRI3, RGMII4TXD1, RMII4TXD1, VPA_G6);
+ FUNC_GROUP_DECL(NRI3, E25);
+
+ #define D26 32
+-SIG_EXPR_LIST_DECL_SESG(D26, NDTR3, NDTR3, SIG_DESC_SET(SCU414, 0));
++SIG_EXPR_LIST_DECL_SESG(D26, NDTR3, NDTR3, SIG_DESC_SET(SCU414, 0),
++ SIG_DESC_CLEAR(SCU470, 16));
+ SIG_EXPR_LIST_DECL_SESG(D26, RGMII4TXD2, RGMII4, SIG_DESC_SET(SCU4B4, 0),
+- SIG_DESC_SET(SCU510, 1));
+-PIN_DECL_2(D26, GPIOE0, NDTR3, RGMII4TXD2);
++ SIG_DESC_CLEAR(SCU470, 16), SIG_DESC_SET(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(D26, VPA_G7, VPA, SIG_DESC_CLEAR(SCU414, 0),
++ SIG_DESC_CLEAR(SCU4B4, 0));
++PIN_DECL_3(D26, GPIOE0, NDTR3, RGMII4TXD2, VPA_G7);
+ FUNC_GROUP_DECL(NDTR3, D26);
+
+ #define D24 33
+-SIG_EXPR_LIST_DECL_SESG(D24, NRTS3, NRTS3, SIG_DESC_SET(SCU414, 1));
++SIG_EXPR_LIST_DECL_SESG(D24, NRTS3, NRTS3, SIG_DESC_SET(SCU414, 1),
++ SIG_DESC_CLEAR(SCU470, 17));
+ SIG_EXPR_LIST_DECL_SESG(D24, RGMII4TXD3, RGMII4, SIG_DESC_SET(SCU4B4, 1),
+- SIG_DESC_SET(SCU510, 1));
+-PIN_DECL_2(D24, GPIOE1, NRTS3, RGMII4TXD3);
++ SIG_DESC_CLEAR(SCU470, 17), SIG_DESC_SET(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(D24, VPA_R0, VPA, SIG_DESC_CLEAR(SCU414, 1),
++ SIG_DESC_CLEAR(SCU4B4, 1));
++PIN_DECL_3(D24, GPIOE1, NRTS3, RGMII4TXD3, VPA_R0);
+ FUNC_GROUP_DECL(NRTS3, D24);
+
+ #define C25 34
+-SIG_EXPR_LIST_DECL_SESG(C25, NCTS4, NCTS4, SIG_DESC_SET(SCU414, 2));
++SIG_EXPR_LIST_DECL_SESG(C25, NCTS4, NCTS4, SIG_DESC_SET(SCU414, 2),
++ SIG_DESC_CLEAR(SCU470, 18));
+ SIG_EXPR_LIST_DECL_SESG(C25, RGMII4RXCK, RGMII4, SIG_DESC_SET(SCU4B4, 2),
+- SIG_DESC_SET(SCU510, 1));
++ SIG_DESC_CLEAR(SCU470, 18), SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(C25, RMII4RCLKI, RMII4, SIG_DESC_SET(SCU4B4, 2),
+- SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(C25, GPIOE2, NCTS4, RGMII4RXCK, RMII4RCLKI);
++ SIG_DESC_CLEAR(SCU470, 18), SIG_DESC_CLEAR(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(C25, VPA_R1, VPA, SIG_DESC_CLEAR(SCU414, 2),
++ SIG_DESC_CLEAR(SCU4B4, 2));
++PIN_DECL_4(C25, GPIOE2, NCTS4, RGMII4RXCK, RMII4RCLKI, VPA_R1);
+ FUNC_GROUP_DECL(NCTS4, C25);
+
+ #define C26 35
+-SIG_EXPR_LIST_DECL_SESG(C26, NDCD4, NDCD4, SIG_DESC_SET(SCU414, 3));
++SIG_EXPR_LIST_DECL_SESG(C26, NDCD4, NDCD4, SIG_DESC_SET(SCU414, 3),
++ SIG_DESC_CLEAR(SCU470, 19));
+ SIG_EXPR_LIST_DECL_SESG(C26, RGMII4RXCTL, RGMII4, SIG_DESC_SET(SCU4B4, 3),
+- SIG_DESC_SET(SCU510, 1));
+-PIN_DECL_2(C26, GPIOE3, NDCD4, RGMII4RXCTL);
++ SIG_DESC_CLEAR(SCU470, 19), SIG_DESC_SET(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(C26, VPA_R2, VPA, SIG_DESC_CLEAR(SCU414, 3),
++ SIG_DESC_CLEAR(SCU4B4, 3));
++PIN_DECL_3(C26, GPIOE3, NDCD4, RGMII4RXCTL, VPA_R2);
+ FUNC_GROUP_DECL(NDCD4, C26);
+
+ #define C24 36
+-SIG_EXPR_LIST_DECL_SESG(C24, NDSR4, NDSR4, SIG_DESC_SET(SCU414, 4));
++SIG_EXPR_LIST_DECL_SESG(C24, NDSR4, NDSR4, SIG_DESC_SET(SCU414, 4),
++ SIG_DESC_CLEAR(SCU470, 20));
+ SIG_EXPR_LIST_DECL_SESG(C24, RGMII4RXD0, RGMII4, SIG_DESC_SET(SCU4B4, 4),
+- SIG_DESC_SET(SCU510, 1));
++ SIG_DESC_CLEAR(SCU470, 20), SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(C24, RMII4RXD0, RMII4, SIG_DESC_SET(SCU4B4, 4),
+- SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(C24, GPIOE4, NDSR4, RGMII4RXD0, RMII4RXD0);
++ SIG_DESC_CLEAR(SCU470, 20), SIG_DESC_CLEAR(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(C24, VPA_R3, VPA, SIG_DESC_CLEAR(SCU414, 4),
++ SIG_DESC_CLEAR(SCU4B4, 4));
++PIN_DECL_4(C24, GPIOE4, NDSR4, RGMII4RXD0, RMII4RXD0, VPA_R3);
+ FUNC_GROUP_DECL(NDSR4, C24);
+
+ #define B26 37
+-SIG_EXPR_LIST_DECL_SESG(B26, NRI4, NRI4, SIG_DESC_SET(SCU414, 5));
++SIG_EXPR_LIST_DECL_SESG(B26, NRI4, NRI4, SIG_DESC_SET(SCU414, 5),
++ SIG_DESC_CLEAR(SCU470, 21));
+ SIG_EXPR_LIST_DECL_SESG(B26, RGMII4RXD1, RGMII4, SIG_DESC_SET(SCU4B4, 5),
+- SIG_DESC_SET(SCU510, 1));
++ SIG_DESC_CLEAR(SCU470, 21), SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(B26, RMII4RXD1, RMII4, SIG_DESC_SET(SCU4B4, 5),
+- SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(B26, GPIOE5, NRI4, RGMII4RXD1, RMII4RXD1);
++ SIG_DESC_CLEAR(SCU470, 21), SIG_DESC_CLEAR(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(B26, VPA_R4, VPA, SIG_DESC_CLEAR(SCU414, 5),
++ SIG_DESC_CLEAR(SCU4B4, 5));
++PIN_DECL_4(B26, GPIOE5, NRI4, RGMII4RXD1, RMII4RXD1, VPA_R4);
+ FUNC_GROUP_DECL(NRI4, B26);
+
+ #define B25 38
+-SIG_EXPR_LIST_DECL_SESG(B25, NDTR4, NDTR4, SIG_DESC_SET(SCU414, 6));
++SIG_EXPR_LIST_DECL_SESG(B25, NDTR4, NDTR4, SIG_DESC_SET(SCU414, 6),
++ SIG_DESC_CLEAR(SCU470, 22));
+ SIG_EXPR_LIST_DECL_SESG(B25, RGMII4RXD2, RGMII4, SIG_DESC_SET(SCU4B4, 6),
+- SIG_DESC_SET(SCU510, 1));
++ SIG_DESC_CLEAR(SCU470, 22), SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(B25, RMII4CRSDV, RMII4, SIG_DESC_SET(SCU4B4, 6),
+- SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(B25, GPIOE6, NDTR4, RGMII4RXD2, RMII4CRSDV);
++ SIG_DESC_CLEAR(SCU470, 22), SIG_DESC_CLEAR(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(B25, VPA_R5, VPA, SIG_DESC_CLEAR(SCU414, 6),
++ SIG_DESC_CLEAR(SCU4B4, 6));
++PIN_DECL_4(B25, GPIOE6, NDTR4, RGMII4RXD2, RMII4CRSDV, VPA_R5);
+ FUNC_GROUP_DECL(NDTR4, B25);
+
+ #define B24 39
+-SIG_EXPR_LIST_DECL_SESG(B24, NRTS4, NRTS4, SIG_DESC_SET(SCU414, 7));
++SIG_EXPR_LIST_DECL_SESG(B24, NRTS4, NRTS4, SIG_DESC_SET(SCU414, 7),
++ SIG_DESC_CLEAR(SCU470, 23));
+ SIG_EXPR_LIST_DECL_SESG(B24, RGMII4RXD3, RGMII4, SIG_DESC_SET(SCU4B4, 7),
+- SIG_DESC_SET(SCU510, 1));
++ SIG_DESC_CLEAR(SCU470, 23), SIG_DESC_SET(SCU510, 1));
+ SIG_EXPR_LIST_DECL_SESG(B24, RMII4RXER, RMII4, SIG_DESC_SET(SCU4B4, 7),
+- SIG_DESC_CLEAR(SCU510, 1));
+-PIN_DECL_3(B24, GPIOE7, NRTS4, RGMII4RXD3, RMII4RXER);
++ SIG_DESC_CLEAR(SCU470, 23), SIG_DESC_CLEAR(SCU510, 1));
++SIG_EXPR_LIST_DECL_SESG(B24, VPA_R6, VPA, SIG_DESC_CLEAR(SCU414, 7),
++ SIG_DESC_CLEAR(SCU4B4, 7));
++PIN_DECL_4(B24, GPIOE7, NRTS4, RGMII4RXD3, RMII4RXER, VPA_R6);
+ FUNC_GROUP_DECL(NRTS4, B24);
+
+ FUNC_GROUP_DECL(RGMII4, F24, E23, E24, E25, D26, D24, C25, C26, C24, B26, B25,
+ B24);
+-FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
++GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24);
++GROUP_DECL(NCSI4, E23, E24, E25, C25, C24, B26, B25, B24);
++FUNC_DECL_2(RMII4, RMII4, NCSI4);
+
+ #define D22 40
+ SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8));
+ SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8));
+-PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8);
++SIG_EXPR_LIST_DECL_SESG(D22, VPA_R7, VPA, SIG_DESC_CLEAR(SCU414, 8),
++ SIG_DESC_CLEAR(SCU4B4, 8));
++PIN_DECL_3(D22, GPIOF0, SD1CLK, PWM8, VPA_R7);
+ GROUP_DECL(PWM8G0, D22);
+
+ #define E22 41
+ SIG_EXPR_LIST_DECL_SESG(E22, SD1CMD, SD1, SIG_DESC_SET(SCU414, 9));
+ SIG_EXPR_LIST_DECL_SEMG(E22, PWM9, PWM9G0, PWM9, SIG_DESC_SET(SCU4B4, 9));
+-PIN_DECL_2(E22, GPIOF1, SD1CMD, PWM9);
++SIG_EXPR_LIST_DECL_SESG(E22, VPAHS, VPA, SIG_DESC_CLEAR(SCU414, 9),
++ SIG_DESC_CLEAR(SCU4B4, 9));
++PIN_DECL_3(E22, GPIOF1, SD1CMD, PWM9, VPAHS);
+ GROUP_DECL(PWM9G0, E22);
+
+ #define D23 42
+ SIG_EXPR_LIST_DECL_SESG(D23, SD1DAT0, SD1, SIG_DESC_SET(SCU414, 10));
+ SIG_EXPR_LIST_DECL_SEMG(D23, PWM10, PWM10G0, PWM10, SIG_DESC_SET(SCU4B4, 10));
+-PIN_DECL_2(D23, GPIOF2, SD1DAT0, PWM10);
++SIG_EXPR_LIST_DECL_SESG(D23, VPAVS, VPA, SIG_DESC_CLEAR(SCU414, 10),
++ SIG_DESC_CLEAR(SCU4B4, 10));
++PIN_DECL_3(D23, GPIOF2, SD1DAT0, PWM10, VPAVS);
+ GROUP_DECL(PWM10G0, D23);
+
+ #define C23 43
+ SIG_EXPR_LIST_DECL_SESG(C23, SD1DAT1, SD1, SIG_DESC_SET(SCU414, 11));
+ SIG_EXPR_LIST_DECL_SEMG(C23, PWM11, PWM11G0, PWM11, SIG_DESC_SET(SCU4B4, 11));
+-PIN_DECL_2(C23, GPIOF3, SD1DAT1, PWM11);
++SIG_EXPR_LIST_DECL_SESG(C23, VPADE, VPA, SIG_DESC_CLEAR(SCU414, 11),
++ SIG_DESC_CLEAR(SCU4B4, 11));
++PIN_DECL_3(C23, GPIOF3, SD1DAT1, PWM11, VPADE);
+ GROUP_DECL(PWM11G0, C23);
+
++FUNC_GROUP_DECL(VPA, H24, J22, H22, H23, G22, F22, G23, G24, F23, F26, F25,
++ E26, F24, E23, E24, E25, D26, D24, C25, C26, C24, B26, B25,
++ B24, D22, E22, D23, C23);
++
+ #define C22 44
+ SIG_EXPR_LIST_DECL_SESG(C22, SD1DAT2, SD1, SIG_DESC_SET(SCU414, 12));
+ SIG_EXPR_LIST_DECL_SEMG(C22, PWM12, PWM12G0, PWM12, SIG_DESC_SET(SCU4B4, 12));
+@@ -410,7 +476,7 @@ FUNC_GROUP_DECL(SD1, D22, E22, D23, C23, C22, A25, A24, A23);
+ #define E21 48
+ SIG_EXPR_LIST_DECL_SESG(E21, TXD6, UART6, SIG_DESC_SET(SCU414, 16));
+ SIG_EXPR_LIST_DECL_SESG(E21, SD2CLK, SD2, SIG_DESC_SET(SCU4B4, 16),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(E21, SALT9, SALT9G0, SALT9, SIG_DESC_SET(SCU694, 16));
+ PIN_DECL_3(E21, GPIOG0, TXD6, SD2CLK, SALT9);
+ GROUP_DECL(SALT9G0, E21);
+@@ -418,7 +484,7 @@ GROUP_DECL(SALT9G0, E21);
+ #define B22 49
+ SIG_EXPR_LIST_DECL_SESG(B22, RXD6, UART6, SIG_DESC_SET(SCU414, 17));
+ SIG_EXPR_LIST_DECL_SESG(B22, SD2CMD, SD2, SIG_DESC_SET(SCU4B4, 17),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(B22, SALT10, SALT10G0, SALT10,
+ SIG_DESC_SET(SCU694, 17));
+ PIN_DECL_3(B22, GPIOG1, RXD6, SD2CMD, SALT10);
+@@ -429,7 +495,7 @@ FUNC_GROUP_DECL(UART6, E21, B22);
+ #define C21 50
+ SIG_EXPR_LIST_DECL_SESG(C21, TXD7, UART7, SIG_DESC_SET(SCU414, 18));
+ SIG_EXPR_LIST_DECL_SESG(C21, SD2DAT0, SD2, SIG_DESC_SET(SCU4B4, 18),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(C21, SALT11, SALT11G0, SALT11,
+ SIG_DESC_SET(SCU694, 18));
+ PIN_DECL_3(C21, GPIOG2, TXD7, SD2DAT0, SALT11);
+@@ -438,7 +504,7 @@ GROUP_DECL(SALT11G0, C21);
+ #define A22 51
+ SIG_EXPR_LIST_DECL_SESG(A22, RXD7, UART7, SIG_DESC_SET(SCU414, 19));
+ SIG_EXPR_LIST_DECL_SESG(A22, SD2DAT1, SD2, SIG_DESC_SET(SCU4B4, 19),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(A22, SALT12, SALT12G0, SALT12,
+ SIG_DESC_SET(SCU694, 19));
+ PIN_DECL_3(A22, GPIOG3, RXD7, SD2DAT1, SALT12);
+@@ -449,7 +515,7 @@ FUNC_GROUP_DECL(UART7, C21, A22);
+ #define A21 52
+ SIG_EXPR_LIST_DECL_SESG(A21, TXD8, UART8, SIG_DESC_SET(SCU414, 20));
+ SIG_EXPR_LIST_DECL_SESG(A21, SD2DAT2, SD2, SIG_DESC_SET(SCU4B4, 20),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(A21, SALT13, SALT13G0, SALT13,
+ SIG_DESC_SET(SCU694, 20));
+ PIN_DECL_3(A21, GPIOG4, TXD8, SD2DAT2, SALT13);
+@@ -458,7 +524,7 @@ GROUP_DECL(SALT13G0, A21);
+ #define E20 53
+ SIG_EXPR_LIST_DECL_SESG(E20, RXD8, UART8, SIG_DESC_SET(SCU414, 21));
+ SIG_EXPR_LIST_DECL_SESG(E20, SD2DAT3, SD2, SIG_DESC_SET(SCU4B4, 21),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(E20, SALT14, SALT14G0, SALT14,
+ SIG_DESC_SET(SCU694, 21));
+ PIN_DECL_3(E20, GPIOG5, RXD8, SD2DAT3, SALT14);
+@@ -469,7 +535,7 @@ FUNC_GROUP_DECL(UART8, A21, E20);
+ #define D21 54
+ SIG_EXPR_LIST_DECL_SESG(D21, TXD9, UART9, SIG_DESC_SET(SCU414, 22));
+ SIG_EXPR_LIST_DECL_SESG(D21, SD2CD, SD2, SIG_DESC_SET(SCU4B4, 22),
+- SIG_DESC_SET(SCU450, 1));
++ SIG_DESC_SET(SCU450, 1));
+ SIG_EXPR_LIST_DECL_SEMG(D21, SALT15, SALT15G0, SALT15,
+ SIG_DESC_SET(SCU694, 22));
+ PIN_DECL_3(D21, GPIOG6, TXD9, SD2CD, SALT15);
+@@ -579,116 +645,150 @@ FUNC_GROUP_DECL(BMCINT, A15);
+ FUNC_GROUP_DECL(SIOSCI, A15);
+
+ #define B20 72
+-SIG_EXPR_LIST_DECL_SEMG(B20, I3C3SCL, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 8));
++SIG_EXPR_LIST_DECL_SEMG(B20, I3C3SCL, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 8),
++ SIG_DESC_CLEAR(SCU438, 20));
+ SIG_EXPR_LIST_DECL_SESG(B20, SCL1, I2C1, SIG_DESC_SET(SCU4B8, 8));
+-PIN_DECL_2(B20, GPIOJ0, I3C3SCL, SCL1);
++SIG_EXPR_LIST_DECL_SESG(B20, SSCL1, SI2C1, SIG_DESC_SET(SCU698, 8));
++PIN_DECL_3(B20, GPIOJ0, I3C3SCL, SCL1, SSCL1);
+
+ #define A20 73
+-SIG_EXPR_LIST_DECL_SEMG(A20, I3C3SDA, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 9));
++SIG_EXPR_LIST_DECL_SEMG(A20, I3C3SDA, HVI3C3, I3C3, SIG_DESC_SET(SCU418, 9),
++ SIG_DESC_CLEAR(SCU438, 21));
+ SIG_EXPR_LIST_DECL_SESG(A20, SDA1, I2C1, SIG_DESC_SET(SCU4B8, 9));
+-PIN_DECL_2(A20, GPIOJ1, I3C3SDA, SDA1);
++SIG_EXPR_LIST_DECL_SESG(A20, SSDA1, SI2C1, SIG_DESC_SET(SCU698, 9));
++PIN_DECL_3(A20, GPIOJ1, I3C3SDA, SDA1, SSDA1);
+
+ GROUP_DECL(HVI3C3, B20, A20);
+ FUNC_GROUP_DECL(I2C1, B20, A20);
++FUNC_GROUP_DECL(SI2C1, B20, A20);
+
+ #define E19 74
+-SIG_EXPR_LIST_DECL_SEMG(E19, I3C4SCL, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 10));
++SIG_EXPR_LIST_DECL_SEMG(E19, I3C4SCL, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 10),
++ SIG_DESC_CLEAR(SCU438, 22));
+ SIG_EXPR_LIST_DECL_SESG(E19, SCL2, I2C2, SIG_DESC_SET(SCU4B8, 10));
+-PIN_DECL_2(E19, GPIOJ2, I3C4SCL, SCL2);
++SIG_EXPR_LIST_DECL_SESG(E19, SSCL2, SI2C2, SIG_DESC_SET(SCU698, 10));
++PIN_DECL_3(E19, GPIOJ2, I3C4SCL, SCL2, SSCL2);
+
+ #define D20 75
+-SIG_EXPR_LIST_DECL_SEMG(D20, I3C4SDA, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 11));
++SIG_EXPR_LIST_DECL_SEMG(D20, I3C4SDA, HVI3C4, I3C4, SIG_DESC_SET(SCU418, 11),
++ SIG_DESC_CLEAR(SCU438, 23));
+ SIG_EXPR_LIST_DECL_SESG(D20, SDA2, I2C2, SIG_DESC_SET(SCU4B8, 11));
+-PIN_DECL_2(D20, GPIOJ3, I3C4SDA, SDA2);
++SIG_EXPR_LIST_DECL_SESG(D20, SSDA2, SI2C2, SIG_DESC_SET(SCU698, 11));
++PIN_DECL_3(D20, GPIOJ3, I3C4SDA, SDA2, SSDA2);
+
+ GROUP_DECL(HVI3C4, E19, D20);
+ FUNC_GROUP_DECL(I2C2, E19, D20);
++FUNC_GROUP_DECL(SI2C2, E19, D20);
+
+ #define C19 76
+ SIG_EXPR_LIST_DECL_SESG(C19, I3C5SCL, I3C5, SIG_DESC_SET(SCU418, 12));
+ SIG_EXPR_LIST_DECL_SESG(C19, SCL3, I2C3, SIG_DESC_SET(SCU4B8, 12));
+-PIN_DECL_2(C19, GPIOJ4, I3C5SCL, SCL3);
++SIG_EXPR_LIST_DECL_SESG(C19, SSCL3, SI2C3, SIG_DESC_SET(SCU698, 12));
++PIN_DECL_3(C19, GPIOJ4, I3C5SCL, SCL3, SSCL3);
+
+ #define A19 77
+ SIG_EXPR_LIST_DECL_SESG(A19, I3C5SDA, I3C5, SIG_DESC_SET(SCU418, 13));
+ SIG_EXPR_LIST_DECL_SESG(A19, SDA3, I2C3, SIG_DESC_SET(SCU4B8, 13));
+-PIN_DECL_2(A19, GPIOJ5, I3C5SDA, SDA3);
++SIG_EXPR_LIST_DECL_SESG(A19, SSDA3, SI2C3, SIG_DESC_SET(SCU698, 13));
++PIN_DECL_3(A19, GPIOJ5, I3C5SDA, SDA3, SSDA3);
+
+ FUNC_GROUP_DECL(I3C5, C19, A19);
+ FUNC_GROUP_DECL(I2C3, C19, A19);
++FUNC_GROUP_DECL(SI2C3, C19, A19);
+
+ #define C20 78
+ SIG_EXPR_LIST_DECL_SESG(C20, I3C6SCL, I3C6, SIG_DESC_SET(SCU418, 14));
+ SIG_EXPR_LIST_DECL_SESG(C20, SCL4, I2C4, SIG_DESC_SET(SCU4B8, 14));
+-PIN_DECL_2(C20, GPIOJ6, I3C6SCL, SCL4);
++SIG_EXPR_LIST_DECL_SESG(C20, SSCL4, SI2C4, SIG_DESC_SET(SCU698, 14));
++PIN_DECL_3(C20, GPIOJ6, I3C6SCL, SCL4, SSCL4);
+
+ #define D19 79
+ SIG_EXPR_LIST_DECL_SESG(D19, I3C6SDA, I3C6, SIG_DESC_SET(SCU418, 15));
+ SIG_EXPR_LIST_DECL_SESG(D19, SDA4, I2C4, SIG_DESC_SET(SCU4B8, 15));
+-PIN_DECL_2(D19, GPIOJ7, I3C6SDA, SDA4);
++SIG_EXPR_LIST_DECL_SESG(D19, SSDA4, SI2C4, SIG_DESC_SET(SCU698, 15));
++PIN_DECL_3(D19, GPIOJ7, I3C6SDA, SDA4, SSDA4);
+
+ FUNC_GROUP_DECL(I3C6, C20, D19);
+ FUNC_GROUP_DECL(I2C4, C20, D19);
++FUNC_GROUP_DECL(SI2C4, C20, D19);
+
+ #define A11 80
+ SIG_EXPR_LIST_DECL_SESG(A11, SCL5, I2C5, SIG_DESC_SET(SCU418, 16));
+-PIN_DECL_1(A11, GPIOK0, SCL5);
++SIG_EXPR_LIST_DECL_SESG(A11, SSCL5, SI2C5, SIG_DESC_SET(SCU4B8, 16));
++PIN_DECL_2(A11, GPIOK0, SCL5, SSCL5);
+
+ #define C11 81
+ SIG_EXPR_LIST_DECL_SESG(C11, SDA5, I2C5, SIG_DESC_SET(SCU418, 17));
+-PIN_DECL_1(C11, GPIOK1, SDA5);
++SIG_EXPR_LIST_DECL_SESG(C11, SSDA5, SI2C5, SIG_DESC_SET(SCU4B8, 17));
++PIN_DECL_2(C11, GPIOK1, SDA5, SSDA5);
+
+ FUNC_GROUP_DECL(I2C5, A11, C11);
++FUNC_GROUP_DECL(SI2C5, A11, C11);
+
+ #define D12 82
+ SIG_EXPR_LIST_DECL_SESG(D12, SCL6, I2C6, SIG_DESC_SET(SCU418, 18));
+-PIN_DECL_1(D12, GPIOK2, SCL6);
++SIG_EXPR_LIST_DECL_SESG(D12, SSCL6, SI2C6, SIG_DESC_SET(SCU4B8, 18));
++PIN_DECL_2(D12, GPIOK2, SCL6, SSCL6);
+
+ #define E13 83
+ SIG_EXPR_LIST_DECL_SESG(E13, SDA6, I2C6, SIG_DESC_SET(SCU418, 19));
+-PIN_DECL_1(E13, GPIOK3, SDA6);
++SIG_EXPR_LIST_DECL_SESG(E13, SSDA6, SI2C6, SIG_DESC_SET(SCU4B8, 19));
++PIN_DECL_2(E13, GPIOK3, SDA6, SSDA6);
+
+ FUNC_GROUP_DECL(I2C6, D12, E13);
++FUNC_GROUP_DECL(SI2C6, D12, E13);
+
+ #define D11 84
+ SIG_EXPR_LIST_DECL_SESG(D11, SCL7, I2C7, SIG_DESC_SET(SCU418, 20));
+-PIN_DECL_1(D11, GPIOK4, SCL7);
++SIG_EXPR_LIST_DECL_SESG(D11, SSCL7, SI2C7, SIG_DESC_SET(SCU4B8, 20));
++PIN_DECL_2(D11, GPIOK4, SCL7, SSCL7);
+
+ #define E11 85
+ SIG_EXPR_LIST_DECL_SESG(E11, SDA7, I2C7, SIG_DESC_SET(SCU418, 21));
+-PIN_DECL_1(E11, GPIOK5, SDA7);
++SIG_EXPR_LIST_DECL_SESG(E11, SSDA7, SI2C7, SIG_DESC_SET(SCU4B8, 21));
++PIN_DECL_2(E11, GPIOK5, SDA7, SSDA7);
+
+ FUNC_GROUP_DECL(I2C7, D11, E11);
++FUNC_GROUP_DECL(SI2C7, D11, E11);
+
+ #define F13 86
+ SIG_EXPR_LIST_DECL_SESG(F13, SCL8, I2C8, SIG_DESC_SET(SCU418, 22));
+-PIN_DECL_1(F13, GPIOK6, SCL8);
++SIG_EXPR_LIST_DECL_SESG(F13, SSCL8, SI2C8, SIG_DESC_SET(SCU4B8, 22));
++PIN_DECL_2(F13, GPIOK6, SCL8, SSCL8);
+
+ #define E12 87
+ SIG_EXPR_LIST_DECL_SESG(E12, SDA8, I2C8, SIG_DESC_SET(SCU418, 23));
+-PIN_DECL_1(E12, GPIOK7, SDA8);
++SIG_EXPR_LIST_DECL_SESG(E12, SSDA8, SI2C8, SIG_DESC_SET(SCU4B8, 23));
++PIN_DECL_2(E12, GPIOK7, SDA8, SSDA8);
+
+ FUNC_GROUP_DECL(I2C8, F13, E12);
++FUNC_GROUP_DECL(SI2C8, F13, E12);
+
+ #define D15 88
+ SIG_EXPR_LIST_DECL_SESG(D15, SCL9, I2C9, SIG_DESC_SET(SCU418, 24));
+-PIN_DECL_1(D15, GPIOL0, SCL9);
++SIG_EXPR_LIST_DECL_SESG(D15, SSCL9, SI2C9, SIG_DESC_SET(SCU4B8, 24));
++PIN_DECL_2(D15, GPIOL0, SCL9, SSCL9);
+
+ #define A14 89
+ SIG_EXPR_LIST_DECL_SESG(A14, SDA9, I2C9, SIG_DESC_SET(SCU418, 25));
+-PIN_DECL_1(A14, GPIOL1, SDA9);
++SIG_EXPR_LIST_DECL_SESG(A14, SSDA9, SI2C9, SIG_DESC_SET(SCU4B8, 25));
++PIN_DECL_2(A14, GPIOL1, SDA9, SSDA9);
+
+ FUNC_GROUP_DECL(I2C9, D15, A14);
++FUNC_GROUP_DECL(SI2C9, D15, A14);
+
+ #define E15 90
+ SIG_EXPR_LIST_DECL_SESG(E15, SCL10, I2C10, SIG_DESC_SET(SCU418, 26));
+-PIN_DECL_1(E15, GPIOL2, SCL10);
++SIG_EXPR_LIST_DECL_SESG(E15, SSCL10, SI2C10, SIG_DESC_SET(SCU4B8, 26));
++PIN_DECL_2(E15, GPIOL2, SCL10, SSCL10);
+
+ #define A13 91
+ SIG_EXPR_LIST_DECL_SESG(A13, SDA10, I2C10, SIG_DESC_SET(SCU418, 27));
+-PIN_DECL_1(A13, GPIOL3, SDA10);
++SIG_EXPR_LIST_DECL_SESG(A13, SSDA10, SI2C10, SIG_DESC_SET(SCU4B8, 27));
++PIN_DECL_2(A13, GPIOL3, SDA10, SSDA10);
+
+ FUNC_GROUP_DECL(I2C10, E15, A13);
++FUNC_GROUP_DECL(SI2C10, E15, A13);
+
+ #define C15 92
+ SSSF_PIN_DECL(C15, GPIOL4, TXD3, SIG_DESC_SET(SCU418, 28));
+@@ -983,9 +1083,8 @@ FUNC_GROUP_DECL(ADC7, AE18);
+
+ #define AB16 160
+ SIG_EXPR_LIST_DECL_SEMG(AB16, SALT9, SALT9G1, SALT9, SIG_DESC_SET(SCU434, 0),
+- SIG_DESC_CLEAR(SCU694, 16));
+-SIG_EXPR_LIST_DECL_SESG(AB16, GPIU0, GPIU0, SIG_DESC_SET(SCU434, 0),
+- SIG_DESC_SET(SCU694, 16));
++ SIG_DESC_CLEAR(SCU694, 16), SIG_DESC_SET(SCU4D4, 0));
++SIG_EXPR_LIST_DECL_SESG(AB16, GPIU0, GPIU0, SIG_DESC_SET(SCU434, 0));
+ SIG_EXPR_LIST_DECL_SESG(AB16, ADC8, ADC8);
+ PIN_DECL_(AB16, SIG_EXPR_LIST_PTR(AB16, SALT9), SIG_EXPR_LIST_PTR(AB16, GPIU0),
+ SIG_EXPR_LIST_PTR(AB16, ADC8));
+@@ -996,9 +1095,8 @@ FUNC_GROUP_DECL(ADC8, AB16);
+
+ #define AA17 161
+ SIG_EXPR_LIST_DECL_SEMG(AA17, SALT10, SALT10G1, SALT10, SIG_DESC_SET(SCU434, 1),
+- SIG_DESC_CLEAR(SCU694, 17));
+-SIG_EXPR_LIST_DECL_SESG(AA17, GPIU1, GPIU1, SIG_DESC_SET(SCU434, 1),
+- SIG_DESC_SET(SCU694, 17));
++ SIG_DESC_CLEAR(SCU694, 17), SIG_DESC_SET(SCU4D4, 1));
++SIG_EXPR_LIST_DECL_SESG(AA17, GPIU1, GPIU1, SIG_DESC_SET(SCU434, 1));
+ SIG_EXPR_LIST_DECL_SESG(AA17, ADC9, ADC9);
+ PIN_DECL_(AA17, SIG_EXPR_LIST_PTR(AA17, SALT10), SIG_EXPR_LIST_PTR(AA17, GPIU1),
+ SIG_EXPR_LIST_PTR(AA17, ADC9));
+@@ -1009,9 +1107,8 @@ FUNC_GROUP_DECL(ADC9, AA17);
+
+ #define AB17 162
+ SIG_EXPR_LIST_DECL_SEMG(AB17, SALT11, SALT11G1, SALT11, SIG_DESC_SET(SCU434, 2),
+- SIG_DESC_CLEAR(SCU694, 18));
+-SIG_EXPR_LIST_DECL_SESG(AB17, GPIU2, GPIU2, SIG_DESC_SET(SCU434, 2),
+- SIG_DESC_SET(SCU694, 18));
++ SIG_DESC_CLEAR(SCU694, 18), SIG_DESC_SET(SCU4D4, 2));
++SIG_EXPR_LIST_DECL_SESG(AB17, GPIU2, GPIU2, SIG_DESC_SET(SCU434, 2));
+ SIG_EXPR_LIST_DECL_SESG(AB17, ADC10, ADC10);
+ PIN_DECL_(AB17, SIG_EXPR_LIST_PTR(AB17, SALT11), SIG_EXPR_LIST_PTR(AB17, GPIU2),
+ SIG_EXPR_LIST_PTR(AB17, ADC10));
+@@ -1022,9 +1119,8 @@ FUNC_GROUP_DECL(ADC10, AB17);
+
+ #define AE16 163
+ SIG_EXPR_LIST_DECL_SEMG(AE16, SALT12, SALT12G1, SALT12, SIG_DESC_SET(SCU434, 3),
+- SIG_DESC_CLEAR(SCU694, 19));
+-SIG_EXPR_LIST_DECL_SESG(AE16, GPIU3, GPIU3, SIG_DESC_SET(SCU434, 3),
+- SIG_DESC_SET(SCU694, 19));
++ SIG_DESC_CLEAR(SCU694, 19), SIG_DESC_SET(SCU4D4, 3));
++SIG_EXPR_LIST_DECL_SESG(AE16, GPIU3, GPIU3, SIG_DESC_SET(SCU434, 3));
+ SIG_EXPR_LIST_DECL_SESG(AE16, ADC11, ADC11);
+ PIN_DECL_(AE16, SIG_EXPR_LIST_PTR(AE16, SALT12), SIG_EXPR_LIST_PTR(AE16, GPIU3),
+ SIG_EXPR_LIST_PTR(AE16, ADC11));
+@@ -1035,9 +1131,8 @@ FUNC_GROUP_DECL(ADC11, AE16);
+
+ #define AC16 164
+ SIG_EXPR_LIST_DECL_SEMG(AC16, SALT13, SALT13G1, SALT13, SIG_DESC_SET(SCU434, 4),
+- SIG_DESC_CLEAR(SCU694, 20));
+-SIG_EXPR_LIST_DECL_SESG(AC16, GPIU4, GPIU4, SIG_DESC_SET(SCU434, 4),
+- SIG_DESC_SET(SCU694, 20));
++ SIG_DESC_CLEAR(SCU694, 20), SIG_DESC_SET(SCU4D4, 4));
++SIG_EXPR_LIST_DECL_SESG(AC16, GPIU4, GPIU4, SIG_DESC_SET(SCU434, 4));
+ SIG_EXPR_LIST_DECL_SESG(AC16, ADC12, ADC12);
+ PIN_DECL_(AC16, SIG_EXPR_LIST_PTR(AC16, SALT13), SIG_EXPR_LIST_PTR(AC16, GPIU4),
+ SIG_EXPR_LIST_PTR(AC16, ADC12));
+@@ -1048,9 +1143,8 @@ FUNC_GROUP_DECL(ADC12, AC16);
+
+ #define AA16 165
+ SIG_EXPR_LIST_DECL_SEMG(AA16, SALT14, SALT14G1, SALT14, SIG_DESC_SET(SCU434, 5),
+- SIG_DESC_CLEAR(SCU694, 21));
+-SIG_EXPR_LIST_DECL_SESG(AA16, GPIU5, GPIU5, SIG_DESC_SET(SCU434, 5),
+- SIG_DESC_SET(SCU694, 21));
++ SIG_DESC_CLEAR(SCU694, 21), SIG_DESC_SET(SCU4D4, 5));
++SIG_EXPR_LIST_DECL_SESG(AA16, GPIU5, GPIU5, SIG_DESC_SET(SCU434, 5));
+ SIG_EXPR_LIST_DECL_SESG(AA16, ADC13, ADC13);
+ PIN_DECL_(AA16, SIG_EXPR_LIST_PTR(AA16, SALT14), SIG_EXPR_LIST_PTR(AA16, GPIU5),
+ SIG_EXPR_LIST_PTR(AA16, ADC13));
+@@ -1061,9 +1155,8 @@ FUNC_GROUP_DECL(ADC13, AA16);
+
+ #define AD16 166
+ SIG_EXPR_LIST_DECL_SEMG(AD16, SALT15, SALT15G1, SALT15, SIG_DESC_SET(SCU434, 6),
+- SIG_DESC_CLEAR(SCU694, 22));
+-SIG_EXPR_LIST_DECL_SESG(AD16, GPIU6, GPIU6, SIG_DESC_SET(SCU434, 6),
+- SIG_DESC_SET(SCU694, 22));
++ SIG_DESC_CLEAR(SCU694, 22), SIG_DESC_SET(SCU4D4, 6));
++SIG_EXPR_LIST_DECL_SESG(AD16, GPIU6, GPIU6, SIG_DESC_SET(SCU434, 6));
+ SIG_EXPR_LIST_DECL_SESG(AD16, ADC14, ADC14);
+ PIN_DECL_(AD16, SIG_EXPR_LIST_PTR(AD16, SALT15), SIG_EXPR_LIST_PTR(AD16, GPIU6),
+ SIG_EXPR_LIST_PTR(AD16, ADC14));
+@@ -1074,9 +1167,8 @@ FUNC_GROUP_DECL(ADC14, AD16);
+
+ #define AC17 167
+ SIG_EXPR_LIST_DECL_SEMG(AC17, SALT16, SALT16G1, SALT16, SIG_DESC_SET(SCU434, 7),
+- SIG_DESC_CLEAR(SCU694, 23));
+-SIG_EXPR_LIST_DECL_SESG(AC17, GPIU7, GPIU7, SIG_DESC_SET(SCU434, 7),
+- SIG_DESC_SET(SCU694, 23));
++ SIG_DESC_CLEAR(SCU694, 23), SIG_DESC_SET(SCU4D4, 7));
++SIG_EXPR_LIST_DECL_SESG(AC17, GPIU7, GPIU7, SIG_DESC_SET(SCU434, 7));
+ SIG_EXPR_LIST_DECL_SESG(AC17, ADC15, ADC15);
+ PIN_DECL_(AC17, SIG_EXPR_LIST_PTR(AC17, SALT16), SIG_EXPR_LIST_PTR(AC17, GPIU7),
+ SIG_EXPR_LIST_PTR(AC17, ADC15));
+@@ -1201,7 +1293,7 @@ SIG_EXPR_LIST_DECL_SEMG(AB10, RXD12, UART12G1, UART12,
+ SIG_DESC_SET(SCU4D4, 31));
+ PIN_DECL_2(AB10, GPIOX7, SPI2DQ3, RXD12);
+
+-GROUP_DECL(QSPI2, AE8, AF8, AB9, AD9, AF9, AB10);
++GROUP_DECL(QSPI2, AF9, AB10);
+ FUNC_DECL_2(SPI2, SPI2, QSPI2);
+
+ GROUP_DECL(UART12G1, AF9, AB10);
+@@ -1236,15 +1328,21 @@ FUNC_GROUP_DECL(SALT8, AA12);
+ FUNC_GROUP_DECL(WDTRST4, AA12);
+
+ #define AE12 196
++SIG_EXPR_LIST_DECL_SEMG(AE12, FWSPIDQ2, FWQSPID, FWSPID,
++ SIG_DESC_SET(SCU438, 4));
+ SIG_EXPR_LIST_DECL_SESG(AE12, FWSPIQ2, FWQSPI, SIG_DESC_SET(SCU438, 4));
+ SIG_EXPR_LIST_DECL_SESG(AE12, GPIOY4, GPIOY4);
+-PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIQ2),
++PIN_DECL_(AE12, SIG_EXPR_LIST_PTR(AE12, FWSPIDQ2),
++ SIG_EXPR_LIST_PTR(AE12, FWSPIQ2),
+ SIG_EXPR_LIST_PTR(AE12, GPIOY4));
+
+ #define AF12 197
++SIG_EXPR_LIST_DECL_SEMG(AF12, FWSPIDQ3, FWQSPID, FWSPID,
++ SIG_DESC_SET(SCU438, 5));
+ SIG_EXPR_LIST_DECL_SESG(AF12, FWSPIQ3, FWQSPI, SIG_DESC_SET(SCU438, 5));
+ SIG_EXPR_LIST_DECL_SESG(AF12, GPIOY5, GPIOY5);
+-PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIQ3),
++PIN_DECL_(AF12, SIG_EXPR_LIST_PTR(AF12, FWSPIDQ3),
++ SIG_EXPR_LIST_PTR(AF12, FWSPIQ3),
+ SIG_EXPR_LIST_PTR(AF12, GPIOY5));
+ FUNC_GROUP_DECL(FWQSPI, AE12, AF12);
+
+@@ -1289,7 +1387,7 @@ SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
+ SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
+ PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
+
+-GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
++GROUP_DECL(QSPI1, AD11, AF10);
+ FUNC_DECL_2(SPI1, SPI1, QSPI1);
+
+ GROUP_DECL(UART13G1, AD11, AF10);
+@@ -1297,80 +1395,80 @@ FUNC_DECL_2(UART13, UART13G0, UART13G1);
+
+ #define C6 208
+ SIG_EXPR_LIST_DECL_SESG(C6, RGMII1TXCK, RGMII1, SIG_DESC_SET(SCU400, 0),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(C6, RMII1RCLKO, RMII1, SIG_DESC_SET(SCU400, 0),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(C6, GPIO18A0, RGMII1TXCK, RMII1RCLKO);
+
+ #define D6 209
+ SIG_EXPR_LIST_DECL_SESG(D6, RGMII1TXCTL, RGMII1, SIG_DESC_SET(SCU400, 1),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(D6, RMII1TXEN, RMII1, SIG_DESC_SET(SCU400, 1),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(D6, GPIO18A1, RGMII1TXCTL, RMII1TXEN);
+
+ #define D5 210
+ SIG_EXPR_LIST_DECL_SESG(D5, RGMII1TXD0, RGMII1, SIG_DESC_SET(SCU400, 2),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(D5, RMII1TXD0, RMII1, SIG_DESC_SET(SCU400, 2),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(D5, GPIO18A2, RGMII1TXD0, RMII1TXD0);
+
+ #define A3 211
+ SIG_EXPR_LIST_DECL_SESG(A3, RGMII1TXD1, RGMII1, SIG_DESC_SET(SCU400, 3),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(A3, RMII1TXD1, RMII1, SIG_DESC_SET(SCU400, 3),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(A3, GPIO18A3, RGMII1TXD1, RMII1TXD1);
+
+ #define C5 212
+ SIG_EXPR_LIST_DECL_SESG(C5, RGMII1TXD2, RGMII1, SIG_DESC_SET(SCU400, 4),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ PIN_DECL_1(C5, GPIO18A4, RGMII1TXD2);
+
+ #define E6 213
+ SIG_EXPR_LIST_DECL_SESG(E6, RGMII1TXD3, RGMII1, SIG_DESC_SET(SCU400, 5),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ PIN_DECL_1(E6, GPIO18A5, RGMII1TXD3);
+
+ #define B3 214
+ SIG_EXPR_LIST_DECL_SESG(B3, RGMII1RXCK, RGMII1, SIG_DESC_SET(SCU400, 6),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(B3, RMII1RCLKI, RMII1, SIG_DESC_SET(SCU400, 6),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(B3, GPIO18A6, RGMII1RXCK, RMII1RCLKI);
+
+ #define A2 215
+ SIG_EXPR_LIST_DECL_SESG(A2, RGMII1RXCTL, RGMII1, SIG_DESC_SET(SCU400, 7),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ PIN_DECL_1(A2, GPIO18A7, RGMII1RXCTL);
+
+ #define B2 216
+ SIG_EXPR_LIST_DECL_SESG(B2, RGMII1RXD0, RGMII1, SIG_DESC_SET(SCU400, 8),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(B2, RMII1RXD0, RMII1, SIG_DESC_SET(SCU400, 8),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(B2, GPIO18B0, RGMII1RXD0, RMII1RXD0);
+
+ #define B1 217
+ SIG_EXPR_LIST_DECL_SESG(B1, RGMII1RXD1, RGMII1, SIG_DESC_SET(SCU400, 9),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(B1, RMII1RXD1, RMII1, SIG_DESC_SET(SCU400, 9),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(B1, GPIO18B1, RGMII1RXD1, RMII1RXD1);
+
+ #define C4 218
+ SIG_EXPR_LIST_DECL_SESG(C4, RGMII1RXD2, RGMII1, SIG_DESC_SET(SCU400, 10),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(C4, RMII1CRSDV, RMII1, SIG_DESC_SET(SCU400, 10),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(C4, GPIO18B2, RGMII1RXD2, RMII1CRSDV);
+
+ #define E5 219
+ SIG_EXPR_LIST_DECL_SESG(E5, RGMII1RXD3, RGMII1, SIG_DESC_SET(SCU400, 11),
+- SIG_DESC_SET(SCU500, 6));
++ SIG_DESC_SET(SCU500, 6));
+ SIG_EXPR_LIST_DECL_SESG(E5, RMII1RXER, RMII1, SIG_DESC_SET(SCU400, 11),
+- SIG_DESC_CLEAR(SCU500, 6));
++ SIG_DESC_CLEAR(SCU500, 6));
+ PIN_DECL_2(E5, GPIO18B3, RGMII1RXD3, RMII1RXER);
+
+ FUNC_GROUP_DECL(RGMII1, C6, D6, D5, A3, C5, E6, B3, A2, B2, B1, C4, E5);
+@@ -1378,80 +1476,80 @@ FUNC_GROUP_DECL(RMII1, C6, D6, D5, A3, B3, B2, B1, C4, E5);
+
+ #define D4 220
+ SIG_EXPR_LIST_DECL_SESG(D4, RGMII2TXCK, RGMII2, SIG_DESC_SET(SCU400, 12),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(D4, RMII2RCLKO, RMII2, SIG_DESC_SET(SCU400, 12),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(D4, GPIO18B4, RGMII2TXCK, RMII2RCLKO);
+
+ #define C2 221
+ SIG_EXPR_LIST_DECL_SESG(C2, RGMII2TXCTL, RGMII2, SIG_DESC_SET(SCU400, 13),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(C2, RMII2TXEN, RMII2, SIG_DESC_SET(SCU400, 13),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(C2, GPIO18B5, RGMII2TXCTL, RMII2TXEN);
+
+ #define C1 222
+ SIG_EXPR_LIST_DECL_SESG(C1, RGMII2TXD0, RGMII2, SIG_DESC_SET(SCU400, 14),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(C1, RMII2TXD0, RMII2, SIG_DESC_SET(SCU400, 14),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(C1, GPIO18B6, RGMII2TXD0, RMII2TXD0);
+
+ #define D3 223
+ SIG_EXPR_LIST_DECL_SESG(D3, RGMII2TXD1, RGMII2, SIG_DESC_SET(SCU400, 15),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(D3, RMII2TXD1, RMII2, SIG_DESC_SET(SCU400, 15),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(D3, GPIO18B7, RGMII2TXD1, RMII2TXD1);
+
+ #define E4 224
+ SIG_EXPR_LIST_DECL_SESG(E4, RGMII2TXD2, RGMII2, SIG_DESC_SET(SCU400, 16),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ PIN_DECL_1(E4, GPIO18C0, RGMII2TXD2);
+
+ #define F5 225
+ SIG_EXPR_LIST_DECL_SESG(F5, RGMII2TXD3, RGMII2, SIG_DESC_SET(SCU400, 17),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ PIN_DECL_1(F5, GPIO18C1, RGMII2TXD3);
+
+ #define D2 226
+ SIG_EXPR_LIST_DECL_SESG(D2, RGMII2RXCK, RGMII2, SIG_DESC_SET(SCU400, 18),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(D2, RMII2RCLKI, RMII2, SIG_DESC_SET(SCU400, 18),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(D2, GPIO18C2, RGMII2RXCK, RMII2RCLKI);
+
+ #define E3 227
+ SIG_EXPR_LIST_DECL_SESG(E3, RGMII2RXCTL, RGMII2, SIG_DESC_SET(SCU400, 19),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ PIN_DECL_1(E3, GPIO18C3, RGMII2RXCTL);
+
+ #define D1 228
+ SIG_EXPR_LIST_DECL_SESG(D1, RGMII2RXD0, RGMII2, SIG_DESC_SET(SCU400, 20),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(D1, RMII2RXD0, RMII2, SIG_DESC_SET(SCU400, 20),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(D1, GPIO18C4, RGMII2RXD0, RMII2RXD0);
+
+ #define F4 229
+ SIG_EXPR_LIST_DECL_SESG(F4, RGMII2RXD1, RGMII2, SIG_DESC_SET(SCU400, 21),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(F4, RMII2RXD1, RMII2, SIG_DESC_SET(SCU400, 21),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(F4, GPIO18C5, RGMII2RXD1, RMII2RXD1);
+
+ #define E2 230
+ SIG_EXPR_LIST_DECL_SESG(E2, RGMII2RXD2, RGMII2, SIG_DESC_SET(SCU400, 22),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(E2, RMII2CRSDV, RMII2, SIG_DESC_SET(SCU400, 22),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(E2, GPIO18C6, RGMII2RXD2, RMII2CRSDV);
+
+ #define E1 231
+ SIG_EXPR_LIST_DECL_SESG(E1, RGMII2RXD3, RGMII2, SIG_DESC_SET(SCU400, 23),
+- SIG_DESC_SET(SCU500, 7));
++ SIG_DESC_SET(SCU500, 7));
+ SIG_EXPR_LIST_DECL_SESG(E1, RMII2RXER, RMII2, SIG_DESC_SET(SCU400, 23),
+- SIG_DESC_CLEAR(SCU500, 7));
++ SIG_DESC_CLEAR(SCU500, 7));
+ PIN_DECL_2(E1, GPIO18C7, RGMII2RXD3, RMII2RXER);
+
+ FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1);
+@@ -1519,8 +1617,9 @@ SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+ PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
+
+ GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
++GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+ GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
+-FUNC_DECL_1(FWSPID, FWSPID);
++FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
+ FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
+ FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
+ /*
+@@ -1528,13 +1627,15 @@ FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
+ * following 4 pins
+ */
+ #define AF25 244
+-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
++SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
++ SIG_DESC_CLEAR(SCU418, 8));
+ SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
+ PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
+ SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
+
+ #define AE26 245
+-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
++SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
++ SIG_DESC_CLEAR(SCU418, 9));
+ SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
+ PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
+ SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
+@@ -1544,13 +1645,15 @@ FUNC_DECL_2(I3C3, HVI3C3, I3C3);
+ FUNC_GROUP_DECL(FSI1, AF25, AE26);
+
+ #define AE25 246
+-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
++SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
++ SIG_DESC_CLEAR(SCU418, 10));
+ SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
+ PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
+ SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
+
+ #define AF24 247
+-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
++SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
++ SIG_DESC_CLEAR(SCU418, 11));
+ SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
+ PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
+ SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
+@@ -1592,9 +1695,10 @@ SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADPDP, USBA, USB2ADP, USB2ADP_DESC,
+ SIG_DESC_SET(SCUC20, 16));
+ SIG_EXPR_LIST_DECL_SEMG(A4, USB2ADDP, USBA, USB2AD, USB2AD_DESC);
+ SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHDP, USBA, USB2AH, USB2AH_DESC);
+-SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC);
++SIG_EXPR_LIST_DECL_SEMG(A4, USB2AHPDP, USBA, USB2AHP, USB2AHP_DESC,
++ SIG_DESC_SET(SCUC20, 16));
+ PIN_DECL_(A4, SIG_EXPR_LIST_PTR(A4, USB2ADPDP), SIG_EXPR_LIST_PTR(A4, USB2ADDP),
+- SIG_EXPR_LIST_PTR(A4, USB2AHDP));
++ SIG_EXPR_LIST_PTR(A4, USB2AHDP), SIG_EXPR_LIST_PTR(A4, USB2AHPDP));
+
+ #define B4 253
+ SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADPDN, USBA, USB2ADP, USB2ADP_DESC);
+@@ -1602,7 +1706,7 @@ SIG_EXPR_LIST_DECL_SEMG(B4, USB2ADDN, USBA, USB2AD, USB2AD_DESC);
+ SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHDN, USBA, USB2AH, USB2AH_DESC);
+ SIG_EXPR_LIST_DECL_SEMG(B4, USB2AHPDN, USBA, USB2AHP, USB2AHP_DESC);
+ PIN_DECL_(B4, SIG_EXPR_LIST_PTR(B4, USB2ADPDN), SIG_EXPR_LIST_PTR(B4, USB2ADDN),
+- SIG_EXPR_LIST_PTR(B4, USB2AHDN));
++ SIG_EXPR_LIST_PTR(B4, USB2AHDN), SIG_EXPR_LIST_PTR(B4, USB2AHPDN));
+
+ GROUP_DECL(USBA, A4, B4);
+
+@@ -1631,6 +1735,23 @@ FUNC_DECL_1(USB11BHID, USBB);
+ FUNC_DECL_1(USB2BD, USBB);
+ FUNC_DECL_1(USB2BH, USBB);
+
++/* bit19: Enable RC-L DMA mode
++ * bit23: Enable RC-L DMA decode
++ */
++#define PCIERC0_DESC { ASPEED_IP_SCU, SCUC24, GENMASK(23, 19), 0x1f, 0 }
++
++#define A7 256
++SIG_EXPR_LIST_DECL_SESG(A7, PERST, PCIERC0, SIG_DESC_SET(SCU040, 21),
++ SIG_DESC_CLEAR(SCU0C8, 6), PCIERC0_DESC);
++PIN_DECL_(A7, SIG_EXPR_LIST_PTR(A7, PERST));
++FUNC_GROUP_DECL(PCIERC0, A7);
++
++#define D7 257
++SIG_EXPR_LIST_DECL_SESG(D7, RCRST, PCIERC1, SIG_DESC_SET(SCU040, 19),
++ SIG_DESC_SET(SCU500, 24));
++PIN_DECL_(D7, SIG_EXPR_LIST_PTR(D7, RCRST));
++FUNC_GROUP_DECL(PCIERC1, D7);
++
+ /* Pins, groups and functions are sort(1):ed alphabetically for sanity */
+
+ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
+@@ -1653,6 +1774,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
+ ASPEED_PINCTRL_PIN(A3),
+ ASPEED_PINCTRL_PIN(A4),
+ ASPEED_PINCTRL_PIN(A6),
++ ASPEED_PINCTRL_PIN(A7),
+ ASPEED_PINCTRL_PIN(AA11),
+ ASPEED_PINCTRL_PIN(AA12),
+ ASPEED_PINCTRL_PIN(AA16),
+@@ -1801,6 +1923,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
+ ASPEED_PINCTRL_PIN(D4),
+ ASPEED_PINCTRL_PIN(D5),
+ ASPEED_PINCTRL_PIN(D6),
++ ASPEED_PINCTRL_PIN(D7),
+ ASPEED_PINCTRL_PIN(E1),
+ ASPEED_PINCTRL_PIN(E11),
+ ASPEED_PINCTRL_PIN(E12),
+@@ -1916,6 +2039,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(FSI2),
+ ASPEED_PINCTRL_GROUP(FWSPIABR),
+ ASPEED_PINCTRL_GROUP(FWSPID),
++ ASPEED_PINCTRL_GROUP(FWQSPID),
+ ASPEED_PINCTRL_GROUP(FWQSPI),
+ ASPEED_PINCTRL_GROUP(FWSPIWP),
+ ASPEED_PINCTRL_GROUP(GPIT0),
+@@ -1953,6 +2077,16 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(I2C7),
+ ASPEED_PINCTRL_GROUP(I2C8),
+ ASPEED_PINCTRL_GROUP(I2C9),
++ ASPEED_PINCTRL_GROUP(SI2C1),
++ ASPEED_PINCTRL_GROUP(SI2C2),
++ ASPEED_PINCTRL_GROUP(SI2C3),
++ ASPEED_PINCTRL_GROUP(SI2C4),
++ ASPEED_PINCTRL_GROUP(SI2C5),
++ ASPEED_PINCTRL_GROUP(SI2C6),
++ ASPEED_PINCTRL_GROUP(SI2C7),
++ ASPEED_PINCTRL_GROUP(SI2C8),
++ ASPEED_PINCTRL_GROUP(SI2C9),
++ ASPEED_PINCTRL_GROUP(SI2C10),
+ ASPEED_PINCTRL_GROUP(I3C1),
+ ASPEED_PINCTRL_GROUP(I3C2),
+ ASPEED_PINCTRL_GROUP(I3C3),
+@@ -1976,6 +2110,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(MDIO2),
+ ASPEED_PINCTRL_GROUP(MDIO3),
+ ASPEED_PINCTRL_GROUP(MDIO4),
++ ASPEED_PINCTRL_GROUP(NCSI3),
++ ASPEED_PINCTRL_GROUP(NCSI4),
+ ASPEED_PINCTRL_GROUP(NCTS1),
+ ASPEED_PINCTRL_GROUP(NCTS2),
+ ASPEED_PINCTRL_GROUP(NCTS3),
+@@ -2066,6 +2202,8 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(SALT9G1),
+ ASPEED_PINCTRL_GROUP(SD1),
+ ASPEED_PINCTRL_GROUP(SD2),
++ ASPEED_PINCTRL_GROUP(PCIERC0),
++ ASPEED_PINCTRL_GROUP(PCIERC1),
+ ASPEED_PINCTRL_GROUP(EMMCG1),
+ ASPEED_PINCTRL_GROUP(EMMCG4),
+ ASPEED_PINCTRL_GROUP(EMMCG8),
+@@ -2125,6 +2263,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
+ ASPEED_PINCTRL_GROUP(USBA),
+ ASPEED_PINCTRL_GROUP(USBB),
+ ASPEED_PINCTRL_GROUP(VB),
++ ASPEED_PINCTRL_GROUP(VPA),
+ ASPEED_PINCTRL_GROUP(VGAHS),
+ ASPEED_PINCTRL_GROUP(VGAVS),
+ ASPEED_PINCTRL_GROUP(WDTRST1),
+@@ -2193,6 +2332,16 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
+ ASPEED_PINCTRL_FUNC(I2C7),
+ ASPEED_PINCTRL_FUNC(I2C8),
+ ASPEED_PINCTRL_FUNC(I2C9),
++ ASPEED_PINCTRL_FUNC(SI2C1),
++ ASPEED_PINCTRL_FUNC(SI2C2),
++ ASPEED_PINCTRL_FUNC(SI2C3),
++ ASPEED_PINCTRL_FUNC(SI2C4),
++ ASPEED_PINCTRL_FUNC(SI2C5),
++ ASPEED_PINCTRL_FUNC(SI2C6),
++ ASPEED_PINCTRL_FUNC(SI2C7),
++ ASPEED_PINCTRL_FUNC(SI2C8),
++ ASPEED_PINCTRL_FUNC(SI2C9),
++ ASPEED_PINCTRL_FUNC(SI2C10),
+ ASPEED_PINCTRL_FUNC(I3C1),
+ ASPEED_PINCTRL_FUNC(I3C2),
+ ASPEED_PINCTRL_FUNC(I3C3),
+@@ -2307,6 +2456,8 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
+ ASPEED_PINCTRL_FUNC(SPI2),
+ ASPEED_PINCTRL_FUNC(SPI2CS1),
+ ASPEED_PINCTRL_FUNC(SPI2CS2),
++ ASPEED_PINCTRL_FUNC(PCIERC0),
++ ASPEED_PINCTRL_FUNC(PCIERC1),
+ ASPEED_PINCTRL_FUNC(TACH0),
+ ASPEED_PINCTRL_FUNC(TACH1),
+ ASPEED_PINCTRL_FUNC(TACH10),
+@@ -2347,6 +2498,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
+ ASPEED_PINCTRL_FUNC(USB2BD),
+ ASPEED_PINCTRL_FUNC(USB2BH),
+ ASPEED_PINCTRL_FUNC(VB),
++ ASPEED_PINCTRL_FUNC(VPA),
+ ASPEED_PINCTRL_FUNC(VGAHS),
+ ASPEED_PINCTRL_FUNC(VGAVS),
+ ASPEED_PINCTRL_FUNC(WDTRST1),
+@@ -2600,6 +2752,10 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB8, AB8 }, SCU454, GENMASK(27, 26)},
+ /* LAD0 */
+ { PIN_CONFIG_DRIVE_STRENGTH, { AB7, AB7 }, SCU454, GENMASK(25, 24)},
++ /* GPIOF */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D22, A23 }, SCU458, GENMASK(9, 8)},
++ /* GPIOG */
++ { PIN_CONFIG_DRIVE_STRENGTH, { E21, B21 }, SCU458, GENMASK(11, 10)},
+
+ /* MAC3 */
+ { PIN_CONFIG_POWER_SOURCE, { H24, E26 }, SCU458, BIT_MASK(4)},
+@@ -2608,6 +2764,11 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
+ { PIN_CONFIG_POWER_SOURCE, { F24, B24 }, SCU458, BIT_MASK(5)},
+ { PIN_CONFIG_DRIVE_STRENGTH, { F24, B24 }, SCU458, GENMASK(3, 2)},
+
++ /* GPIOJ */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B20, A20 }, SCU650, BIT_MASK(12)},
++ { PIN_CONFIG_DRIVE_STRENGTH, { E19, D20 }, SCU650, BIT_MASK(13)},
++ { PIN_CONFIG_DRIVE_STRENGTH, { C19, A19 }, SCU650, BIT_MASK(14)},
++ { PIN_CONFIG_DRIVE_STRENGTH, { C20, D19 }, SCU650, BIT_MASK(15)},
+ /* GPIO18E */
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_PULL_DOWN, Y1, Y4, SCU40C, 4),
+ ASPEED_SB_PINCONF(PIN_CONFIG_BIAS_DISABLE, Y1, Y4, SCU40C, 4),
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c
+new file mode 100644
+index 000000000..ce4eeac09
+--- /dev/null
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c
+@@ -0,0 +1,458 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bits.h>
++#include <linux/device.h>
++#include <linux/gpio/driver.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/mfd/syscon.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/pinctrl/machine.h>
++#include <linux/pinctrl/pinconf.h>
++#include <linux/pinctrl/pinconf-generic.h>
++#include <linux/pinctrl/pinctrl.h>
++#include <linux/pinctrl/pinmux.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
++#include <linux/regmap.h>
++#include "pinctrl-aspeed.h"
++#include "../pinctrl-utils.h"
++
++#define SCU200 0x200 /* System Reset Control #1 */
++
++#define SCU400 0x400 /* Multi-function Pin Control #1 */
++#define SCU404 0x404 /* Multi-function Pin Control #2 */
++#define SCU408 0x408 /* Multi-function Pin Control #3 */
++#define SCU40C 0x40C /* Multi-function Pin Control #3 */
++#define SCU410 0x410 /* USB Multi-function Control Register */
++#define SCU414 0x414 /* VGA Function Control Register */
++
++#define SCU480 0x480 /* GPIO18D0 IO Control Register */
++#define SCU484 0x484 /* GPIO18D1 IO Control Register */
++#define SCU488 0x488 /* GPIO18D2 IO Control Register */
++#define SCU48C 0x48c /* GPIO18D3 IO Control Register */
++#define SCU490 0x490 /* GPIO18D4 IO Control Register */
++#define SCU494 0x494 /* GPIO18D5 IO Control Register */
++#define SCU498 0x498 /* GPIO18D6 IO Control Register */
++#define SCU49C 0x49c /* GPIO18D7 IO Control Register */
++
++enum {
++ AC14,
++ AE15,
++ AD14,
++ AE14,
++ AF14,
++ AB13,
++ AB14,
++ AF15,
++ AF13,
++ AC13,
++ AD13,
++ AE13,
++ PORTA_U3, // SCU410[1:0]
++ PORTA_U2, // SCU410[3:2]
++ PORTB_U3, // SCU410[5:4]
++ PORTB_U2, // SCU410[7:6]
++ PORTA_U3_XHCI, // SCU410[9]
++ PORTA_U2_XHCI, // SCU410[9]
++ PORTB_U3_XHCI, // SCU410[10]
++ PORTB_U2_XHCI, // SCU410[10]
++ PORTA_MODE, // SCU410[25:24]
++ PORTB_MODE, // SCU410[29:28]
++ PORTA_U2_PHY,
++ PORTA_U3_PHY,
++ PORTB_U2_PHY,
++ PORTB_U3_PHY,
++ JTAG_PORT,
++ PCIERC0_PERST,
++ PCIERC1_PERST,
++};
++
++GROUP_DECL(EMMCG1, AC14, AE15, AD14);
++GROUP_DECL(EMMCG4, AC14, AE15, AD14, AE14, AF14, AB13);
++GROUP_DECL(EMMCG8, AC14, AE15, AD14, AE14, AF14, AB13, AF13, AC13, AD13, AE13);
++GROUP_DECL(EMMCWPN, AF15);
++GROUP_DECL(EMMCCDN, AB14);
++GROUP_DECL(VGADDC, AD13, AE13);
++GROUP_DECL(VB1, AC14, AE15, AD14, AE14);
++GROUP_DECL(VB0, AF15, AB14, AF13, AC13);
++//USB3A
++//xhci: BMC/PCIE, vHub/PHY/EXT port
++GROUP_DECL(USB3AXHD, PORTA_U3, PORTA_U3_XHCI);
++GROUP_DECL(USB3AXHPD, PORTA_U3, PORTA_U3_XHCI);
++GROUP_DECL(USB3AXH, PORTA_U3, PORTA_U3_XHCI, PORTA_U3_PHY);
++GROUP_DECL(USB3AXHP, PORTA_U3, PORTA_U3_XHCI, PORTA_U3_PHY);
++GROUP_DECL(USB3AXH2B, PORTA_U3, PORTA_U3_XHCI, PORTB_U3_PHY);
++GROUP_DECL(USB3AXHP2B, PORTA_U3, PORTA_U3_XHCI, PORTB_U3_PHY);
++
++//USB2A
++//xhci: BMC/PCIE, vHub/PHY/EXT port
++GROUP_DECL(USB2AXHD1, PORTA_U2, PORTA_U2_XHCI);
++GROUP_DECL(USB2AXHPD1, PORTA_U2, PORTA_U2_XHCI);
++GROUP_DECL(USB2AXH, PORTA_U2, PORTA_U2_XHCI, PORTA_U2_PHY);
++GROUP_DECL(USB2AXHP, PORTA_U2, PORTA_U2_XHCI, PORTA_U2_PHY);
++GROUP_DECL(USB2AXH2B, PORTA_U2, PORTA_U2_XHCI, PORTB_U2_PHY);
++GROUP_DECL(USB2AXHP2B, PORTA_U2, PORTA_U2_XHCI, PORTB_U2_PHY);
++// vhub to phy
++GROUP_DECL(USB2AD1, PORTA_U2, PORTA_U2_PHY);
++//ehci
++GROUP_DECL(USB2AHPD0, PORTA_MODE);
++GROUP_DECL(USB2AH, PORTA_MODE, PORTA_U2_PHY);
++GROUP_DECL(USB2AHP, PORTA_MODE, PORTA_U2_PHY);
++GROUP_DECL(USB2AD0, PORTA_MODE, PORTA_U2_PHY);
++
++//USB3B
++//xhci: BMC/PCIE, vHub/PHY/EXT port
++GROUP_DECL(USB3BXHD, PORTB_U3, PORTB_U3_XHCI);
++GROUP_DECL(USB3BXHPD, PORTB_U3, PORTB_U3_XHCI);
++GROUP_DECL(USB3BXH, PORTB_U3, PORTB_U3_XHCI, PORTB_U3_PHY);
++GROUP_DECL(USB3BXHP, PORTB_U3, PORTB_U3_XHCI, PORTB_U3_PHY);
++GROUP_DECL(USB3BXH2A, PORTB_U3, PORTB_U3_XHCI, PORTA_U3_PHY);
++GROUP_DECL(USB3BXHP2A, PORTB_U3, PORTB_U3_XHCI, PORTA_U3_PHY);
++
++//USB2B
++//xhci: BMC/PCIE, vHub/PHY/EXT port
++GROUP_DECL(USB2BXHD1, PORTB_U2, PORTB_U2_XHCI);
++GROUP_DECL(USB2BXHPD1, PORTB_U2, PORTB_U2_XHCI);
++GROUP_DECL(USB2BXH, PORTB_U2, PORTB_U2_XHCI, PORTB_U2_PHY);
++GROUP_DECL(USB2BXHP, PORTB_U2, PORTB_U2_XHCI, PORTB_U2_PHY);
++GROUP_DECL(USB2BXH2A, PORTB_U2, PORTB_U2_XHCI, PORTA_U2_PHY);
++GROUP_DECL(USB2BXHP2A, PORTB_U2, PORTB_U2_XHCI, PORTA_U2_PHY);
++// vhub to phy
++GROUP_DECL(USB2BD1, PORTB_U2, PORTB_U2_PHY);
++//ehci
++GROUP_DECL(USB2BHPD0, PORTB_MODE);
++GROUP_DECL(USB2BH, PORTB_MODE, PORTB_U2_PHY);
++GROUP_DECL(USB2BHP, PORTB_MODE, PORTB_U2_PHY);
++GROUP_DECL(USB2BD0, PORTB_MODE, PORTB_U2_PHY);
++//JTAG port
++GROUP_DECL(PSP, JTAG_PORT);
++GROUP_DECL(SSP, JTAG_PORT);
++GROUP_DECL(TSP, JTAG_PORT);
++GROUP_DECL(DDR, JTAG_PORT);
++GROUP_DECL(USB3A, JTAG_PORT);
++GROUP_DECL(USB3B, JTAG_PORT);
++GROUP_DECL(PCIEA, JTAG_PORT);
++GROUP_DECL(PCIEB, JTAG_PORT);
++GROUP_DECL(JTAGM0, JTAG_PORT);
++//PCIE RC PERST
++GROUP_DECL(PCIERC0PERST, PCIERC0_PERST);
++GROUP_DECL(PCIERC1PERST, PCIERC1_PERST);
++
++static struct aspeed_pin_group aspeed_g7_soc0_pingroups[] = {
++ ASPEED_PINCTRL_GROUP(EMMCG1),
++ ASPEED_PINCTRL_GROUP(EMMCG4),
++ ASPEED_PINCTRL_GROUP(EMMCG8),
++ ASPEED_PINCTRL_GROUP(EMMCWPN),
++ ASPEED_PINCTRL_GROUP(EMMCCDN),
++ ASPEED_PINCTRL_GROUP(VGADDC),
++ ASPEED_PINCTRL_GROUP(VB1),
++ ASPEED_PINCTRL_GROUP(VB0),
++ ASPEED_PINCTRL_GROUP(USB3AXHD),
++ ASPEED_PINCTRL_GROUP(USB3AXHPD),
++ ASPEED_PINCTRL_GROUP(USB3AXH),
++ ASPEED_PINCTRL_GROUP(USB3AXHP),
++ ASPEED_PINCTRL_GROUP(USB3AXH2B),
++ ASPEED_PINCTRL_GROUP(USB3AXHP2B),
++ ASPEED_PINCTRL_GROUP(USB2AXHD1),
++ ASPEED_PINCTRL_GROUP(USB2AXHPD1),
++ ASPEED_PINCTRL_GROUP(USB2AXH),
++ ASPEED_PINCTRL_GROUP(USB2AXHP),
++ ASPEED_PINCTRL_GROUP(USB2AXH2B),
++ ASPEED_PINCTRL_GROUP(USB2AXHP2B),
++ ASPEED_PINCTRL_GROUP(USB2AD1),
++ ASPEED_PINCTRL_GROUP(USB2AHPD0),
++ ASPEED_PINCTRL_GROUP(USB2AH),
++ ASPEED_PINCTRL_GROUP(USB2AHP),
++ ASPEED_PINCTRL_GROUP(USB2AD0),
++ ASPEED_PINCTRL_GROUP(USB3BXHD),
++ ASPEED_PINCTRL_GROUP(USB3BXHPD),
++ ASPEED_PINCTRL_GROUP(USB3BXH),
++ ASPEED_PINCTRL_GROUP(USB3BXHP),
++ ASPEED_PINCTRL_GROUP(USB3BXH2A),
++ ASPEED_PINCTRL_GROUP(USB3BXHP2A),
++ ASPEED_PINCTRL_GROUP(USB2BXHD1),
++ ASPEED_PINCTRL_GROUP(USB2BXHPD1),
++ ASPEED_PINCTRL_GROUP(USB2BXH),
++ ASPEED_PINCTRL_GROUP(USB2BXHP),
++ ASPEED_PINCTRL_GROUP(USB2BXH2A),
++ ASPEED_PINCTRL_GROUP(USB2BXHP2A),
++ ASPEED_PINCTRL_GROUP(USB2BD1),
++ ASPEED_PINCTRL_GROUP(USB2BHPD0),
++ ASPEED_PINCTRL_GROUP(USB2BH),
++ ASPEED_PINCTRL_GROUP(USB2BHP),
++ ASPEED_PINCTRL_GROUP(USB2BD0),
++ ASPEED_PINCTRL_GROUP(PSP),
++ ASPEED_PINCTRL_GROUP(SSP),
++ ASPEED_PINCTRL_GROUP(TSP),
++ ASPEED_PINCTRL_GROUP(DDR),
++ ASPEED_PINCTRL_GROUP(USB3A),
++ ASPEED_PINCTRL_GROUP(USB3B),
++ ASPEED_PINCTRL_GROUP(PCIEA),
++ ASPEED_PINCTRL_GROUP(PCIEB),
++ ASPEED_PINCTRL_GROUP(JTAGM0),
++ ASPEED_PINCTRL_GROUP(PCIERC0PERST),
++ ASPEED_PINCTRL_GROUP(PCIERC1PERST),
++};
++
++FUNC_DECL_(EMMC, "EMMCG1", "EMMCG4", "EMMCG8", "EMMCWPN", "EMMCCDN");
++FUNC_DECL_(VGADDC, "VGADDC");
++FUNC_DECL_(VB, "VB0", "VB1");
++FUNC_DECL_(USB3A, "USB3AXHD", "USB3AXHPD", "USB3AXH", "USB3AXHP", "USB3AXH2B",
++ "USB3AXHP2B");
++FUNC_DECL_(USB2A, "USB2AXHD1", "USB2AXHPD1", "USB2AXH", "USB2AXHP", "USB2AXH2B",
++ "USB2AXHP2B", "USB2AD1", "USB2AHPD0", "USB2AH", "USB2AHP",
++ "USB2AD0");
++FUNC_DECL_(USB3B, "USB3BXHD", "USB3BXHPD", "USB3BXH", "USB3BXHP", "USB3BXH2A",
++ "USB3BXHP2A");
++FUNC_DECL_(USB2B, "USB2BXHD1", "USB2BXHPD1", "USB2BXH", "USB2BXHP", "USB2BXH2A",
++ "USB2BXHP2A", "USB2BD1", "USB2BHPD0", "USB2BH", "USB2BHP",
++ "USB2BD0");
++FUNC_DECL_(JTAG0, "PSP", "SSP", "TSP", "DDR", "USB3A", "USB3B",
++ "PCIEA", "PCIEB", "JTAGM0");
++FUNC_DECL_(PCIERC, "PCIERC0PERST", "PCIERC1PERST");
++
++static struct aspeed_pin_function aspeed_g7_soc0_funcs[] = {
++ ASPEED_PINCTRL_FUNC(EMMC),
++ ASPEED_PINCTRL_FUNC(VGADDC),
++ ASPEED_PINCTRL_FUNC(VB),
++ ASPEED_PINCTRL_FUNC(USB3A),
++ ASPEED_PINCTRL_FUNC(USB2A),
++ ASPEED_PINCTRL_FUNC(USB3B),
++ ASPEED_PINCTRL_FUNC(USB2B),
++ ASPEED_PINCTRL_FUNC(JTAG0),
++ ASPEED_PINCTRL_FUNC(PCIERC),
++};
++
++static const struct pinctrl_pin_desc aspeed_g7_soc0_pins[] = {
++ PINCTRL_PIN(AC14, "AC14"),
++ PINCTRL_PIN(AE15, "AE15"),
++ PINCTRL_PIN(AD14, "AD14"),
++ PINCTRL_PIN(AE14, "AE14"),
++ PINCTRL_PIN(AF14, "AF14"),
++ PINCTRL_PIN(AB13, "AB13"),
++ PINCTRL_PIN(AF15, "AF15"),
++ PINCTRL_PIN(AB14, "AB14"),
++ PINCTRL_PIN(AF13, "AF13"),
++ PINCTRL_PIN(AC13, "AC13"),
++ PINCTRL_PIN(AD13, "AD13"),
++ PINCTRL_PIN(AE13, "AE13"),
++ PINCTRL_PIN(PORTA_U3, "PORTA_U3"),
++ PINCTRL_PIN(PORTA_U2, "PORTA_U2"),
++ PINCTRL_PIN(PORTB_U3, "PORTB_U3"),
++ PINCTRL_PIN(PORTB_U2, "PORTB_U2"),
++ PINCTRL_PIN(PORTA_U3_XHCI, "PORTA_U3_XHCI"),
++ PINCTRL_PIN(PORTA_U2_XHCI, "PORTA_U2_XHCI"),
++ PINCTRL_PIN(PORTB_U3_XHCI, "PORTB_U3_XHCI"),
++ PINCTRL_PIN(PORTB_U2_XHCI, "PORTB_U2_XHCI"),
++ PINCTRL_PIN(PORTA_MODE, "PORTA_MODE"),
++ PINCTRL_PIN(PORTA_U3_PHY, "PORTA_U3_PHY"),
++ PINCTRL_PIN(PORTA_U2_PHY, "PORTA_U2_PHY"),
++ PINCTRL_PIN(PORTB_MODE, "PORTB_MODE"),
++ PINCTRL_PIN(PORTB_U3_PHY, "PORTB_U3_PHY"),
++ PINCTRL_PIN(PORTB_U2_PHY, "PORTB_U2_PHY"),
++ PINCTRL_PIN(JTAG_PORT, "JTAG_PORT"),
++ PINCTRL_PIN(PCIERC0_PERST, "PCIERC0_PERST"),
++ PINCTRL_PIN(PCIERC1_PERST, "PCIERC1_PERST"),
++};
++
++FUNCFG_DESCL(AC14, PIN_CFG(EMMCG1, SCU400, BIT_MASK(0), BIT(0)),
++ PIN_CFG(EMMCG4, SCU400, BIT_MASK(0), BIT(0)),
++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(0), BIT(0)),
++ PIN_CFG(VB1, SCU404, BIT_MASK(0), BIT(0)));
++FUNCFG_DESCL(AE15, PIN_CFG(EMMCG1, SCU400, BIT_MASK(1), BIT(1)),
++ PIN_CFG(EMMCG4, SCU400, BIT_MASK(1), BIT(1)),
++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(1), BIT(1)),
++ PIN_CFG(VB1, SCU404, BIT_MASK(1), BIT(1)));
++FUNCFG_DESCL(AD14, PIN_CFG(EMMCG1, SCU400, BIT_MASK(2), BIT(2)),
++ PIN_CFG(EMMCG4, SCU400, BIT_MASK(2), BIT(2)),
++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(2), BIT(2)),
++ PIN_CFG(VB1, SCU404, BIT_MASK(2), BIT(2)));
++FUNCFG_DESCL(AE14, PIN_CFG(EMMCG4, SCU400, BIT_MASK(3), BIT(3)),
++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(3), BIT(3)),
++ PIN_CFG(VB1, SCU404, BIT_MASK(3), BIT(3)));
++FUNCFG_DESCL(AF14, PIN_CFG(EMMCG4, SCU400, BIT_MASK(4), BIT(4)),
++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(4), BIT(4)));
++FUNCFG_DESCL(AB13, PIN_CFG(EMMCG4, SCU400, BIT_MASK(5), BIT(5)),
++ PIN_CFG(EMMCG8, SCU400, BIT_MASK(5), BIT(5)));
++FUNCFG_DESCL(AB14, PIN_CFG(EMMCCDN, SCU400, BIT_MASK(6), BIT(6)),
++ PIN_CFG(VB0, SCU404, BIT_MASK(6), BIT(6)));
++FUNCFG_DESCL(AF15, PIN_CFG(EMMCWPN, SCU400, BIT_MASK(7), BIT(7)),
++ PIN_CFG(VB0, SCU404, BIT_MASK(7), BIT(7)));
++FUNCFG_DESCL(AF13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(8), BIT(8)),
++ PIN_CFG(VB0, SCU404, BIT_MASK(8), BIT(8)));
++FUNCFG_DESCL(AC13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(9), BIT(9)),
++ PIN_CFG(VB0, SCU404, BIT_MASK(9), BIT(9)));
++FUNCFG_DESCL(AD13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(10), BIT(10)),
++ PIN_CFG(VGADDC, SCU404, BIT_MASK(10), BIT(10)));
++FUNCFG_DESCL(AE13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(11), BIT(11)),
++ PIN_CFG(VGADDC, SCU404, BIT_MASK(11), BIT(11)));
++FUNCFG_DESCL(PORTA_U3, PIN_CFG(USB3AXHD, SCU410, GENMASK(1, 0), 0),
++ PIN_CFG(USB3AXHPD, SCU410, GENMASK(1, 0), 0),
++ PIN_CFG(USB3AXH, SCU410, GENMASK(1, 0), 2),
++ PIN_CFG(USB3AXHP, SCU410, GENMASK(1, 0), 2),
++ PIN_CFG(USB3AXH2B, SCU410, GENMASK(1, 0), 3),
++ PIN_CFG(USB3AXHP2B, SCU410, GENMASK(1, 0), 3));
++FUNCFG_DESCL(PORTA_U2, PIN_CFG(USB2AXHD1, SCU410, GENMASK(3, 2), 0),
++ PIN_CFG(USB2AXHPD1, SCU410, GENMASK(3, 2), 0),
++ PIN_CFG(USB2AXH, SCU410, GENMASK(3, 2), 2 << 2),
++ PIN_CFG(USB2AXHP, SCU410, GENMASK(3, 2), 2 << 2),
++ PIN_CFG(USB2AXH2B, SCU410, GENMASK(3, 2), 3 << 2),
++ PIN_CFG(USB2AXHP2B, SCU410, GENMASK(3, 2), 3 << 2),
++ PIN_CFG(USB2AD1, SCU410, GENMASK(3, 2), 1 << 2));
++FUNCFG_DESCL(PORTB_U3, PIN_CFG(USB3BXHD, SCU410, GENMASK(5, 4), 0),
++ PIN_CFG(USB3BXHPD, SCU410, GENMASK(5, 4), 0),
++ PIN_CFG(USB3BXH, SCU410, GENMASK(5, 4), 2 << 4),
++ PIN_CFG(USB3BXHP, SCU410, GENMASK(5, 4), 2 << 4),
++ PIN_CFG(USB3BXH2A, SCU410, GENMASK(5, 4), 3 << 4),
++ PIN_CFG(USB3BXHP2A, SCU410, GENMASK(5, 4), 3 << 4));
++FUNCFG_DESCL(PORTB_U2, PIN_CFG(USB2BXHD1, SCU410, GENMASK(7, 6), 0),
++ PIN_CFG(USB2BXHPD1, SCU410, GENMASK(7, 6), 0),
++ PIN_CFG(USB2BXH, SCU410, GENMASK(7, 6), 2 << 6),
++ PIN_CFG(USB2BXHP, SCU410, GENMASK(7, 6), 2 << 6),
++ PIN_CFG(USB2BXH2A, SCU410, GENMASK(7, 6), 3 << 6),
++ PIN_CFG(USB2BXHP2A, SCU410, GENMASK(7, 6), 3 << 6),
++ PIN_CFG(USB2BD1, SCU410, GENMASK(7, 6), 1 << 6));
++FUNCFG_DESCL(PORTA_U3_XHCI, PIN_CFG(USB3AXHD, SCU410, BIT_MASK(9), 1 << 9),
++ PIN_CFG(USB3AXHPD, SCU410, BIT_MASK(9), 0),
++ PIN_CFG(USB3AXH, SCU410, BIT_MASK(9), 1 << 9),
++ PIN_CFG(USB3AXHP, SCU410, BIT_MASK(9), 0),
++ PIN_CFG(USB3AXH2B, SCU410, BIT_MASK(9), 1 << 9),
++ PIN_CFG(USB3AXHP2B, SCU410, BIT_MASK(9), 0));
++FUNCFG_DESCL(PORTA_U2_XHCI, PIN_CFG(USB2AXHD1, SCU410, BIT_MASK(9), 1 << 9),
++ PIN_CFG(USB2AXHPD1, SCU410, BIT_MASK(9), 0),
++ PIN_CFG(USB2AXH, SCU410, BIT_MASK(9), 1 << 9),
++ PIN_CFG(USB2AXHP, SCU410, BIT_MASK(9), 0),
++ PIN_CFG(USB2AXH2B, SCU410, BIT_MASK(9), 1 << 9),
++ PIN_CFG(USB2AXHP2B, SCU410, BIT_MASK(9), 0));
++FUNCFG_DESCL(PORTB_U3_XHCI, PIN_CFG(USB3BXHD, SCU410, BIT_MASK(10), 1 << 10),
++ PIN_CFG(USB3BXHPD, SCU410, BIT_MASK(10), 0),
++ PIN_CFG(USB3BXH, SCU410, BIT_MASK(10), 1 << 10),
++ PIN_CFG(USB3BXHP, SCU410, BIT_MASK(10), 0),
++ PIN_CFG(USB3BXH2A, SCU410, BIT_MASK(10), 1 << 10),
++ PIN_CFG(USB3BXHP2A, SCU410, BIT_MASK(10), 0));
++FUNCFG_DESCL(PORTB_U2_XHCI, PIN_CFG(USB2BXHD1, SCU410, BIT_MASK(10), 1 << 10),
++ PIN_CFG(USB2BXHPD1, SCU410, BIT_MASK(10), 0),
++ PIN_CFG(USB2BXH, SCU410, BIT_MASK(10), 1 << 10),
++ PIN_CFG(USB2BXHP, SCU410, BIT_MASK(10), 0),
++ PIN_CFG(USB2BXH2A, SCU410, BIT_MASK(10), 1 << 10),
++ PIN_CFG(USB2BXHP2A, SCU410, BIT_MASK(10), 0));
++FUNCFG_DESCL(PORTA_MODE, PIN_CFG(USB2AHPD0, SCU410, GENMASK(25, 24), 0),
++ PIN_CFG(USB2AH, SCU410, GENMASK(25, 24), 2 << 24),
++ PIN_CFG(USB2AHP, SCU410, GENMASK(25, 24), 3 << 24),
++ PIN_CFG(USB2AD0, SCU410, GENMASK(25, 24), 1 << 24));
++FUNCFG_DESCL(PORTB_MODE, PIN_CFG(USB2BHPD0, SCU410, GENMASK(29, 28), 0),
++ PIN_CFG(USB2BH, SCU410, GENMASK(29, 28), 2 << 28),
++ PIN_CFG(USB2BHP, SCU410, GENMASK(29, 28), 3 << 28),
++ PIN_CFG(USB2BD0, SCU410, GENMASK(29, 28), 1 << 28));
++FUNCFG_DESCL(PORTA_U3_PHY);
++FUNCFG_DESCL(PORTA_U2_PHY);
++FUNCFG_DESCL(PORTB_U3_PHY);
++FUNCFG_DESCL(PORTB_U2_PHY);
++FUNCFG_DESCL(JTAG_PORT, PIN_CFG(PSP, SCU408, GENMASK(12, 5), 0x0 << 5),
++ PIN_CFG(SSP, SCU408, GENMASK(12, 5), 0x41 << 5),
++ PIN_CFG(TSP, SCU408, GENMASK(12, 5), 0x42 << 5),
++ PIN_CFG(DDR, SCU408, GENMASK(12, 5), 0x43 << 5),
++ PIN_CFG(USB3A, SCU408, GENMASK(12, 5), 0x44 << 5),
++ PIN_CFG(USB3B, SCU408, GENMASK(12, 5), 0x45 << 5),
++ PIN_CFG(PCIEA, SCU408, GENMASK(12, 5), 0x46 << 5),
++ PIN_CFG(PCIEB, SCU408, GENMASK(12, 5), 0x47 << 5),
++ PIN_CFG(JTAGM0, SCU408, GENMASK(12, 5), 0x8 << 5));
++FUNCFG_DESCL(PCIERC0_PERST, PIN_CFG(PCIERC0PERST, SCU200, BIT_MASK(21), 1 << 21));
++FUNCFG_DESCL(PCIERC1_PERST, PIN_CFG(PCIERC1PERST, SCU200, BIT_MASK(19), 1 << 19));
++
++static const struct aspeed_g7_pincfg pin_cfg[] = {
++ PINCFG_PIN(AC14), PINCFG_PIN(AE15),
++ PINCFG_PIN(AD14), PINCFG_PIN(AE14),
++ PINCFG_PIN(AF14), PINCFG_PIN(AB13),
++ PINCFG_PIN(AB14), PINCFG_PIN(AF15),
++ PINCFG_PIN(AF13), PINCFG_PIN(AC13),
++ PINCFG_PIN(AD13), PINCFG_PIN(AE13),
++ PINCFG_PIN(PORTA_U3), PINCFG_PIN(PORTA_U2),
++ PINCFG_PIN(PORTB_U3), PINCFG_PIN(PORTB_U2),
++ PINCFG_PIN(PORTA_U3_XHCI), PINCFG_PIN(PORTA_U2_XHCI),
++ PINCFG_PIN(PORTB_U3_XHCI), PINCFG_PIN(PORTB_U2_XHCI),
++ PINCFG_PIN(PORTA_MODE), PINCFG_PIN(PORTB_MODE),
++ PINCFG_PIN(PORTA_U3_PHY), PINCFG_PIN(PORTA_U2_PHY),
++ PINCFG_PIN(PORTB_U3_PHY), PINCFG_PIN(PORTB_U2_PHY),
++ PINCFG_PIN(JTAG_PORT), PINCFG_PIN(PCIERC0_PERST),
++ PINCFG_PIN(PCIERC1_PERST),
++};
++
++static const struct pinctrl_ops aspeed_g7_soc0_pinctrl_ops = {
++ .get_groups_count = aspeed_pinctrl_get_groups_count,
++ .get_group_name = aspeed_pinctrl_get_group_name,
++ .get_group_pins = aspeed_pinctrl_get_group_pins,
++ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
++ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
++ .dt_free_map = pinctrl_utils_free_map,
++};
++
++static const struct pinmux_ops aspeed_g7_soc0_pinmux_ops = {
++ .get_functions_count = aspeed_pinmux_get_fn_count,
++ .get_function_name = aspeed_pinmux_get_fn_name,
++ .get_function_groups = aspeed_pinmux_get_fn_groups,
++ .set_mux = aspeed_g7_pinmux_set_mux,
++ .gpio_request_enable = aspeed_g7_gpio_request_enable,
++ .strict = true,
++};
++
++static const struct pinconf_ops aspeed_g7_soc0_pinconf_ops = {
++ .is_generic = true,
++ .pin_config_get = aspeed_pin_config_get,
++ .pin_config_set = aspeed_pin_config_set,
++ .pin_config_group_get = aspeed_pin_config_group_get,
++ .pin_config_group_set = aspeed_pin_config_group_set,
++};
++
++/* pinctrl_desc */
++static struct pinctrl_desc aspeed_g7_soc0_pinctrl_desc = {
++ .name = "aspeed-g7-soc0-pinctrl",
++ .pins = aspeed_g7_soc0_pins,
++ .npins = ARRAY_SIZE(aspeed_g7_soc0_pins),
++ .pctlops = &aspeed_g7_soc0_pinctrl_ops,
++ .pmxops = &aspeed_g7_soc0_pinmux_ops,
++ .confops = &aspeed_g7_soc0_pinconf_ops,
++ .owner = THIS_MODULE,
++};
++
++static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = {
++ .pins = aspeed_g7_soc0_pins,
++ .npins = ARRAY_SIZE(aspeed_g7_soc0_pins),
++ .pinmux = {
++ .groups = aspeed_g7_soc0_pingroups,
++ .ngroups = ARRAY_SIZE(aspeed_g7_soc0_pingroups),
++ .functions = aspeed_g7_soc0_funcs,
++ .nfunctions = ARRAY_SIZE(aspeed_g7_soc0_funcs),
++ .configs_g7 = pin_cfg,
++ .nconfigs_g7 = ARRAY_SIZE(pin_cfg),
++ },
++};
++
++static int aspeed_g7_soc0_pinctrl_probe(struct platform_device *pdev)
++{
++ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc0_pinctrl_desc,
++ &aspeed_g7_pinctrl_data);
++}
++
++static const struct of_device_id aspeed_g7_soc0_pinctrl_match[] = {
++ { .compatible = "aspeed,ast2700-soc0-pinctrl" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, aspeed_g7_soc0_pinctrl_match);
++
++static struct platform_driver aspeed_g7_soc0_pinctrl_driver = {
++ .probe = aspeed_g7_soc0_pinctrl_probe,
++ .driver = {
++ .name = "aspeed-g7-soc0-pinctrl",
++ .of_match_table = aspeed_g7_soc0_pinctrl_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++static int __init aspeed_g7_soc0_pinctrl_register(void)
++{
++ return platform_driver_register(&aspeed_g7_soc0_pinctrl_driver);
++}
++arch_initcall(aspeed_g7_soc0_pinctrl_register);
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c
+new file mode 100644
+index 000000000..e150b14bb
+--- /dev/null
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c
+@@ -0,0 +1,2292 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bits.h>
++#include <linux/device.h>
++#include <linux/gpio/driver.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/mfd/syscon.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/pinctrl/machine.h>
++#include <linux/pinctrl/pinconf.h>
++#include <linux/pinctrl/pinconf-generic.h>
++#include <linux/pinctrl/pinctrl.h>
++#include <linux/pinctrl/pinmux.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
++#include "pinctrl-aspeed.h"
++
++#define SCU3B0 0x3B0 /* USB Controller Register */
++#define SCU3B4 0x3B4 /* USB Controller Lock Register */
++#define SCU3B8 0x3B8 /* USB Controller Secure Register #1 */
++#define SCU3BC 0x3BC /* USB Controller Secure Register #2 */
++#define SCU3C0 0x3C0 /* USB Controller Secure Register #3 */
++#define SCU400 0x400 /* Multi-function Pin Control #1 */
++#define SCU404 0x404 /* Multi-function Pin Control #2 */
++#define SCU408 0x408 /* Multi-function Pin Control #3 */
++#define SCU40C 0x40C /* Multi-function Pin Control #4 */
++#define SCU410 0x410 /* Multi-function Pin Control #5 */
++#define SCU414 0x414 /* Multi-function Pin Control #6 */
++#define SCU418 0x418 /* Multi-function Pin Control #7 */
++#define SCU41C 0x41C /* Multi-function Pin Control #8 */
++#define SCU420 0x420 /* Multi-function Pin Control #9 */
++#define SCU424 0x424 /* Multi-function Pin Control #10 */
++#define SCU428 0x428 /* Multi-function Pin Control #11 */
++#define SCU42C 0x42C /* Multi-function Pin Control #12 */
++#define SCU430 0x430 /* Multi-function Pin Control #13 */
++#define SCU434 0x434 /* Multi-function Pin Control #14 */
++#define SCU438 0x438 /* Multi-function Pin Control #15 */
++#define SCU43C 0x43C /* Multi-function Pin Control #16 */
++#define SCU440 0x440 /* Multi-function Pin Control #17 */
++#define SCU444 0x444 /* Multi-function Pin Control #18 */
++#define SCU448 0x448 /* Multi-function Pin Control #19 */
++#define SCU44C 0x44C /* Multi-function Pin Control #20 */
++#define SCU450 0x450 /* Multi-function Pin Control #21 */
++#define SCU454 0x454 /* Multi-function Pin Control #22 */
++#define SCU458 0x458 /* Multi-function Pin Control #23 */
++#define SCU45C 0x45C /* Multi-function Pin Control #24 */
++#define SCU460 0x460 /* Multi-function Pin Control #25 */
++#define SCU464 0x464 /* Multi-function Pin Control #26 */
++#define SCU468 0x468 /* Multi-function Pin Control #27 */
++#define SCU46C 0x46C /* Multi-function Pin Control #28 */
++#define SCU470 0x470 /* Multi-function Pin Control #29 */
++#define SCU474 0x474 /* Multi-function Pin Control #30 */
++#define SCU478 0x478 /* Multi-function Pin Control #31 */
++#define SCU47C 0x47C
++#define SCU4A0 0x4A0 /* Voltage Selection */
++#define SCU4C0 0x4C0 /* Driving Strength #0 A-I */
++#define SCU4C4 0x4C4 /* Driving Strength #1 J-K */
++#define SCU4C8 0x4C8 /* Driving Strength #2 L-M */
++#define SCU4CC 0x4CC /* Driving Strength #3 N-O */
++#define SCU4D0 0x4D0 /* Driving Strength #4 P-Q */
++#define SCU4D4 0x4D4 /* Driving Strength #5 R-S */
++#define SCU4D8 0x4D8 /* Driving Strength #6 T-U */
++#define SCU4DC 0x4DC /* Driving Strength #7 W */
++
++#define SCU908 0x908 /* PCIe RC PERST Pin Control */
++
++enum {
++ C16,
++ C14,
++ C11,
++ D9,
++ F14,
++ D10,
++ C12,
++ C13,
++ AC26,
++ AA25,
++ AB23,
++ U22,
++ V21,
++ N26,
++ P25,
++ N25,
++ V23,
++ W22,
++ AB26,
++ AD26,
++ P26,
++ AE26,
++ AF26,
++ AF25,
++ AE25,
++ AD25,
++ AF23,
++ AF20,
++ AF21,
++ AE21,
++ AE23,
++ AD22,
++ AF17,
++ AA16,
++ Y16,
++ V17,
++ J13,
++ AB16,
++ AC16,
++ AF16,
++ AA15,
++ AB15,
++ AC15,
++ AD15,
++ Y15,
++ AA14,
++ W16,
++ V16,
++ AB18,
++ AC18,
++ K13,
++ AA17,
++ AB17,
++ AD16,
++ AC17,
++ AD17,
++ AE16,
++ AE17,
++ AB24,
++ W26,
++ HOLE0,
++ HOLE1,
++ HOLE2,
++ HOLE3,
++ W25,
++ Y23,
++ Y24,
++ W21,
++ AA23,
++ AC22,
++ AB22,
++ Y21,
++ AE20,
++ AF19,
++ Y22,
++ AA20,
++ AA22,
++ AB20,
++ AF18,
++ AE19,
++ AD20,
++ AC20,
++ AA21,
++ AB21,
++ AC19,
++ AE18,
++ AD19,
++ AD18,
++ U25,
++ U26,
++ Y26,
++ AA24,
++ R25,
++ AA26,
++ R26,
++ Y25,
++ B16,
++ D14,
++ B15,
++ B14,
++ C17,
++ B13,
++ E14,
++ C15,
++ D24,
++ B23,
++ B22,
++ C23,
++ B18,
++ B21,
++ M15,
++ B19,
++ B26,
++ A25,
++ A24,
++ B24,
++ E26,
++ A21,
++ A19,
++ A18,
++ D26,
++ C26,
++ A23,
++ A22,
++ B25,
++ F26,
++ A26,
++ A14,
++ E10,
++ E13,
++ D12,
++ F10,
++ E11,
++ F11,
++ F13,
++ N15,
++ C20,
++ C19,
++ A8,
++ R14,
++ A7,
++ P14,
++ D20,
++ A6,
++ B6,
++ N14,
++ B7,
++ B8,
++ B9,
++ M14,
++ J11,
++ E7,
++ D19,
++ B11,
++ D15,
++ B12,
++ B10,
++ P13,
++ C18,
++ C6,
++ C7,
++ D7,
++ N13,
++ C8,
++ C9,
++ C10,
++ M16,
++ A15,
++ G11,
++ H7,
++ H8,
++ H9,
++ H10,
++ H11,
++ J9,
++ J10,
++ E9,
++ F9,
++ F8,
++ M13,
++ F7,
++ D8,
++ E8,
++ L12,
++ F12,
++ E12,
++ J12,
++ G7,
++ G8,
++ G9,
++ G10,
++ K12,
++ W17,
++ V18,
++ W18,
++ Y17,
++ AA18,
++ AA13,
++ Y18,
++ AA12,
++ W20,
++ V20,
++ Y11,
++ V14,
++ V19,
++ W14,
++ Y20,
++ AB19,
++ U21,
++ T24,
++ V24,
++ V22,
++ T23,
++ AC25,
++ AB25,
++ AC24,
++ SGMII0,
++ PCIERC2_PERST,
++ PORTC_MODE, // SCU3B0[1:0]
++ PORTD_MODE, // SCU3B0[3:2]
++};
++
++GROUP_DECL(ESPI0, B16, D14, B15, B14, C17, B13, E14, C15);
++GROUP_DECL(ESPI1, C16, C14, C11, D9, F14, D10, C12, C13);
++GROUP_DECL(LPC0, AF26, AF25, B16, D14, B15, B14, C17, B13, E14, C15);
++GROUP_DECL(LPC1, C16, C14, C11, D9, F14, D10, C12, C13, AE16, AE17);
++GROUP_DECL(SD, C16, C14, C11, D9, F14, D10, C12, C13);
++GROUP_DECL(VPI, C16, C14, C11, D9, F14, D10, C12, C13, AC26, AA25, AB23, U22,
++ V21, N26, P25, N25, V23, W22, AB26, AD26, P26, AE26, AF26, AF25,
++ AE25, AD25, AF23, AF20, AF21, AE21);
++GROUP_DECL(OSCCLK, C17);
++GROUP_DECL(TACH0, AC26);
++GROUP_DECL(TACH1, AA25);
++GROUP_DECL(TACH2, AB23);
++GROUP_DECL(TACH3, U22);
++GROUP_DECL(THRU0, AC26, AA25);
++GROUP_DECL(THRU1, AB23, U22);
++GROUP_DECL(TACH4, V21);
++GROUP_DECL(TACH5, N26);
++GROUP_DECL(TACH6, P25);
++GROUP_DECL(TACH7, N25);
++GROUP_DECL(NTCS5, V21);
++GROUP_DECL(NDCD5, N26);
++GROUP_DECL(NDSR5, P25);
++GROUP_DECL(NRI5, N25);
++GROUP_DECL(SALT12, AB26);
++GROUP_DECL(SALT13, AD26);
++GROUP_DECL(SALT14, P26);
++GROUP_DECL(SALT15, AE26);
++GROUP_DECL(NDTR5, V23);
++GROUP_DECL(NRTS5, W22);
++GROUP_DECL(NCTS6, AB26);
++GROUP_DECL(NDCD6, AD26);
++GROUP_DECL(NDSR6, P26);
++GROUP_DECL(NRI6, AE26);
++GROUP_DECL(NDTR6, AF26);
++GROUP_DECL(NRTS6, AF25);
++GROUP_DECL(TACH8, V23);
++GROUP_DECL(TACH9, W22);
++GROUP_DECL(TACH10, AB26);
++GROUP_DECL(TACH11, AD26);
++GROUP_DECL(TACH12, P26);
++GROUP_DECL(TACH13, AE26);
++GROUP_DECL(TACH14, AF26);
++GROUP_DECL(TACH15, AF25);
++GROUP_DECL(SPIM0, AE25, AD25, AF23, AF20, AF21, AE21, AE23);
++GROUP_DECL(PWM0, AE25);
++GROUP_DECL(PWM1, AD25);
++GROUP_DECL(PWM2, AF23);
++GROUP_DECL(PWM3, AF20);
++GROUP_DECL(PWM4, AF21);
++GROUP_DECL(PWM5, AE21);
++GROUP_DECL(PWM6, AE23);
++GROUP_DECL(PWM7, AD22);
++GROUP_DECL(SIOPBON0, AE25);
++GROUP_DECL(SIOPBIN0, AD25);
++GROUP_DECL(SIOSCIN0, AF23);
++GROUP_DECL(SIOS3N0, AF20);
++GROUP_DECL(SIOS5N0, AF21);
++GROUP_DECL(SIOPWREQN0, AE21);
++GROUP_DECL(SIOONCTRLN0, AE23);
++GROUP_DECL(SIOPWRGD0, AD22);
++GROUP_DECL(NCTS0, AF17);
++GROUP_DECL(NDCD0, AA16);
++GROUP_DECL(NDSR0, Y16);
++GROUP_DECL(NRI0, V17);
++GROUP_DECL(NDTR0, J13);
++GROUP_DECL(NRTS0, AB16);
++GROUP_DECL(TXD0, AC16);
++GROUP_DECL(RXD0, AF16);
++GROUP_DECL(NCTS1, AA15);
++GROUP_DECL(NDCD1, AB15);
++GROUP_DECL(NDSR1, AC15);
++GROUP_DECL(NRI1, AD15);
++GROUP_DECL(NDTR1, Y15);
++GROUP_DECL(NRTS1, AA14);
++GROUP_DECL(TXD1, W16);
++GROUP_DECL(RXD1, V16);
++GROUP_DECL(TXD2, AB18);
++GROUP_DECL(RXD2, AC18);
++GROUP_DECL(TXD3, K13);
++GROUP_DECL(RXD3, AA17);
++GROUP_DECL(NCTS5, V21);
++GROUP_DECL(TXD5, AB17);
++GROUP_DECL(RXD5, AD16);
++GROUP_DECL(TXD6, AC17);
++GROUP_DECL(RXD6, AD17);
++GROUP_DECL(TXD7, AE16);
++GROUP_DECL(RXD7, AE17);
++GROUP_DECL(TXD8, M15);
++GROUP_DECL(RXD8, B19);
++GROUP_DECL(TXD9, B26);
++GROUP_DECL(RXD9, A25);
++GROUP_DECL(TXD10, A24);
++GROUP_DECL(RXD10, B24);
++GROUP_DECL(TXD11, E26);
++GROUP_DECL(RXD11, A21);
++GROUP_DECL(SPIM1, K13, AA17, AB17, AD16, AC17, AD17, AE16, AE17);
++GROUP_DECL(WDTRST0N, K13);
++GROUP_DECL(WDTRST1N, AA17);
++GROUP_DECL(WDTRST2N, AB17);
++GROUP_DECL(WDTRST3N, AD16);
++GROUP_DECL(WDTRST4N, AC25);
++GROUP_DECL(WDTRST5N, AB25);
++GROUP_DECL(WDTRST6N, AC24);
++GROUP_DECL(WDTRST7N, AB24);
++GROUP_DECL(PWM8, K13);
++GROUP_DECL(PWM9, AA17);
++GROUP_DECL(PWM10, AB17);
++GROUP_DECL(PWM11, AD16);
++GROUP_DECL(PWM12, AC17);
++GROUP_DECL(PWM13, AD17);
++GROUP_DECL(PWM14, AE16);
++GROUP_DECL(PWM15, AE17);
++GROUP_DECL(SALT0, AC17);
++GROUP_DECL(SALT1, AD17);
++GROUP_DECL(SALT2, AC15);
++GROUP_DECL(SALT3, AD15);
++GROUP_DECL(FSI0, AD20, AC20);
++GROUP_DECL(FSI1, AA21, AB21);
++GROUP_DECL(FSI2, AC19, AE18);
++GROUP_DECL(FSI3, AD19, AD18);
++GROUP_DECL(SPIM2, W25, Y23, Y24, W21, AA23, AC22, AB22, Y21);
++GROUP_DECL(SALT4, W17);
++GROUP_DECL(SALT5, V18);
++GROUP_DECL(SALT6, W18);
++GROUP_DECL(SALT7, Y17);
++GROUP_DECL(SALT8, AA18);
++GROUP_DECL(SALT9, AA13);
++GROUP_DECL(SALT10, Y18);
++GROUP_DECL(SALT11, AA12);
++GROUP_DECL(ADC0, W17);
++GROUP_DECL(ADC1, V18);
++GROUP_DECL(ADC2, W18);
++GROUP_DECL(ADC3, Y17);
++GROUP_DECL(ADC4, AA18);
++GROUP_DECL(ADC5, AA13);
++GROUP_DECL(ADC6, Y18);
++GROUP_DECL(ADC7, AA12);
++GROUP_DECL(ADC8, W20);
++GROUP_DECL(ADC9, V20);
++GROUP_DECL(ADC10, Y11);
++GROUP_DECL(ADC11, V14);
++GROUP_DECL(ADC12, V19);
++GROUP_DECL(ADC13, W14);
++GROUP_DECL(ADC14, Y20);
++GROUP_DECL(ADC15, AB19);
++GROUP_DECL(AUXPWRGOOD0, W14);
++GROUP_DECL(AUXPWRGOOD1, Y20);
++GROUP_DECL(SGPM0, U21, T24, V22, T23);
++GROUP_DECL(SGPM1, AC25, AB25, AB24, W26);
++GROUP_DECL(I2C0, G11, H7);
++GROUP_DECL(I2C1, H8, H9);
++GROUP_DECL(I2C2, H10, H11);
++GROUP_DECL(I2C3, J9, J10);
++GROUP_DECL(I2C4, E9, F9);
++GROUP_DECL(I2C5, F8, M13);
++GROUP_DECL(I2C6, F7, D8);
++GROUP_DECL(I2C7, E8, L12);
++GROUP_DECL(I2C8, F12, E12);
++GROUP_DECL(I2C9, J12, G7);
++GROUP_DECL(I2C10, G8, G9);
++GROUP_DECL(I2C11, G10, K12);
++GROUP_DECL(I2C12, AC18, AA17);
++GROUP_DECL(I2C13, AB17, AD16);
++GROUP_DECL(I2C14, AC17, AD17);
++GROUP_DECL(I2C15, AE16, AE17);
++GROUP_DECL(DI2C0, C16, D9);
++GROUP_DECL(DI2C1, C14, F14);
++GROUP_DECL(DI2C2, D10, C12);
++GROUP_DECL(DI2C3, C11, C13);
++GROUP_DECL(DI2C8, U25, U26);
++GROUP_DECL(DI2C9, Y26, AA24);
++GROUP_DECL(DI2C10, R25, AA26);
++GROUP_DECL(DI2C11, R26, Y25);
++GROUP_DECL(DI2C12, W25, Y23);
++GROUP_DECL(DI2C13, Y24, W21);
++GROUP_DECL(DI2C14, AA23, AC22);
++GROUP_DECL(DI2C15, AB22, Y21);
++GROUP_DECL(LTPI_I2C0, G11, H7);
++GROUP_DECL(LTPI_I2C1, H8, H9);
++GROUP_DECL(LTPI_I2C2, H10, H11);
++GROUP_DECL(LTPI_I2C3, J9, J10);
++GROUP_DECL(SIOPBON1, AF17);
++GROUP_DECL(SIOPBIN1, AA16);
++GROUP_DECL(SIOSCIN1, Y16);
++GROUP_DECL(SIOS3N1, V17);
++GROUP_DECL(SIOS5N1, J13);
++GROUP_DECL(SIOPWREQN1, AB16);
++GROUP_DECL(SIOONCTRLN1, AA15);
++GROUP_DECL(SIOPWRGD1, AB15);
++GROUP_DECL(HVI3C12, W25, Y23);
++GROUP_DECL(HVI3C13, Y24, W21);
++GROUP_DECL(HVI3C14, AA23, AC22);
++GROUP_DECL(HVI3C15, AB22, Y21);
++GROUP_DECL(I3C4, AE20, AF19);
++GROUP_DECL(I3C5, Y22, AA20);
++GROUP_DECL(I3C6, AA22, AB20);
++GROUP_DECL(I3C7, AF18, AE19);
++GROUP_DECL(I3C8, AD20, AC20);
++GROUP_DECL(I3C9, AA21, AB21);
++GROUP_DECL(I3C10, AC19, AE18);
++GROUP_DECL(I3C11, AD19, AD18);
++GROUP_DECL(HVI3C0, U25, U26);
++GROUP_DECL(HVI3C1, Y26, AA24);
++GROUP_DECL(HVI3C2, R25, AA26);
++GROUP_DECL(HVI3C3, R26, Y25);
++GROUP_DECL(LTPI, U25, U26, Y26, AA24);
++GROUP_DECL(SPI0, D24, B23, B22);
++GROUP_DECL(QSPI0, C23, B18);
++GROUP_DECL(SPI0CS1, B21);
++GROUP_DECL(SPI0ABR, M15);
++GROUP_DECL(SPI0WPN, B19);
++GROUP_DECL(SPI1, B26, A25, A24);
++GROUP_DECL(QSPI1, B24, E26);
++GROUP_DECL(SPI1CS1, A21);
++GROUP_DECL(SPI1ABR, A19);
++GROUP_DECL(SPI1WPN, A18);
++GROUP_DECL(SPI2, D26, C26, A23, A22);
++GROUP_DECL(QSPI2, B25, F26);
++GROUP_DECL(SPI2CS1, A26);
++GROUP_DECL(THRU2, A19, A18);
++GROUP_DECL(THRU3, B25, F26);
++GROUP_DECL(JTAGM1, D12, F10, E11, F11, F13);
++GROUP_DECL(MDIO0, B9, M14);
++GROUP_DECL(MDIO1, C9, C10);
++GROUP_DECL(MDIO2, E10, E13);
++GROUP_DECL(FWQSPI, M16, A15);
++GROUP_DECL(FWSPIABR, A14);
++GROUP_DECL(FWSPIWPN, N15);
++GROUP_DECL(RGMII0, C20, C19, A8, R14, A7, P14, D20, A6, B6, N14, B7, B8);
++GROUP_DECL(RGMII1, D19, B11, D15, B12, B10, P13, C18, C6, C7, D7, N13, C8);
++GROUP_DECL(RMII0, C20, A8, R14, A7, P14, D20, A6, B6, N14);
++GROUP_DECL(RMII1, D19, D15, B12, B10, P13, C18, C6, C7, D7);
++GROUP_DECL(VGA, J11, E7);
++GROUP_DECL(DSGPM1, D19, B10, C7, D7);
++GROUP_DECL(SGPS, B11, C18, N13, C8);
++GROUP_DECL(I2CF0, F12, E12, J12, G7);
++GROUP_DECL(I2CF1, E9, F9, F8, M13);
++GROUP_DECL(I2CF2, F7, D8, E8, L12);
++GROUP_DECL(CANBUS, G7, G8, G9);
++GROUP_DECL(USBUART, G10, K12);
++GROUP_DECL(HBLED, V24);
++GROUP_DECL(MACLINK0, U21);
++GROUP_DECL(MACLINK1, T24);
++GROUP_DECL(MACLINK2, AC24);
++GROUP_DECL(NCTS2, U21);
++GROUP_DECL(NDCD2, T24);
++GROUP_DECL(NDSR2, V22);
++GROUP_DECL(NRI2, T23);
++GROUP_DECL(NDTR2, AC25);
++GROUP_DECL(NRTS2, AB25);
++GROUP_DECL(SMON0, U21, T24, V22, T23);
++GROUP_DECL(SMON1, AB24, W26, AC25, AB25);
++GROUP_DECL(SGMII, SGMII0);
++//PCIE RC PERST
++GROUP_DECL(PE2SGRSTN, PCIERC2_PERST, E10);
++GROUP_DECL(USB2CUD, PORTC_MODE);
++GROUP_DECL(USB2CD, PORTC_MODE);
++GROUP_DECL(USB2CH, PORTC_MODE);
++GROUP_DECL(USB2CU, PORTC_MODE);
++GROUP_DECL(USB2DD, PORTD_MODE);
++GROUP_DECL(USB2DH, PORTD_MODE);
++
++static struct aspeed_pin_group aspeed_g7_soc1_pingroups[] = {
++ ASPEED_PINCTRL_GROUP(ESPI0),
++ ASPEED_PINCTRL_GROUP(ESPI1),
++ ASPEED_PINCTRL_GROUP(LPC0),
++ ASPEED_PINCTRL_GROUP(LPC1),
++ ASPEED_PINCTRL_GROUP(SD),
++ ASPEED_PINCTRL_GROUP(VPI),
++ ASPEED_PINCTRL_GROUP(OSCCLK),
++ ASPEED_PINCTRL_GROUP(TACH0),
++ ASPEED_PINCTRL_GROUP(TACH1),
++ ASPEED_PINCTRL_GROUP(TACH2),
++ ASPEED_PINCTRL_GROUP(TACH3),
++ ASPEED_PINCTRL_GROUP(THRU0),
++ ASPEED_PINCTRL_GROUP(THRU1),
++ ASPEED_PINCTRL_GROUP(TACH4),
++ ASPEED_PINCTRL_GROUP(TACH5),
++ ASPEED_PINCTRL_GROUP(TACH6),
++ ASPEED_PINCTRL_GROUP(TACH7),
++ ASPEED_PINCTRL_GROUP(NTCS5),
++ ASPEED_PINCTRL_GROUP(NDCD5),
++ ASPEED_PINCTRL_GROUP(NDSR5),
++ ASPEED_PINCTRL_GROUP(NRI5),
++ ASPEED_PINCTRL_GROUP(SALT12),
++ ASPEED_PINCTRL_GROUP(SALT13),
++ ASPEED_PINCTRL_GROUP(SALT14),
++ ASPEED_PINCTRL_GROUP(SALT15),
++ ASPEED_PINCTRL_GROUP(NDTR5),
++ ASPEED_PINCTRL_GROUP(NRTS5),
++ ASPEED_PINCTRL_GROUP(NCTS6),
++ ASPEED_PINCTRL_GROUP(NDCD6),
++ ASPEED_PINCTRL_GROUP(NDSR6),
++ ASPEED_PINCTRL_GROUP(NRI6),
++ ASPEED_PINCTRL_GROUP(NDTR6),
++ ASPEED_PINCTRL_GROUP(NRTS6),
++ ASPEED_PINCTRL_GROUP(TACH8),
++ ASPEED_PINCTRL_GROUP(TACH9),
++ ASPEED_PINCTRL_GROUP(TACH10),
++ ASPEED_PINCTRL_GROUP(TACH11),
++ ASPEED_PINCTRL_GROUP(TACH12),
++ ASPEED_PINCTRL_GROUP(TACH13),
++ ASPEED_PINCTRL_GROUP(TACH14),
++ ASPEED_PINCTRL_GROUP(TACH15),
++ ASPEED_PINCTRL_GROUP(SPIM0),
++ ASPEED_PINCTRL_GROUP(PWM0),
++ ASPEED_PINCTRL_GROUP(PWM1),
++ ASPEED_PINCTRL_GROUP(PWM2),
++ ASPEED_PINCTRL_GROUP(PWM3),
++ ASPEED_PINCTRL_GROUP(PWM4),
++ ASPEED_PINCTRL_GROUP(PWM5),
++ ASPEED_PINCTRL_GROUP(PWM6),
++ ASPEED_PINCTRL_GROUP(PWM7),
++ ASPEED_PINCTRL_GROUP(SIOPBON0),
++ ASPEED_PINCTRL_GROUP(SIOPBIN0),
++ ASPEED_PINCTRL_GROUP(SIOSCIN0),
++ ASPEED_PINCTRL_GROUP(SIOS3N0),
++ ASPEED_PINCTRL_GROUP(SIOS5N0),
++ ASPEED_PINCTRL_GROUP(SIOPWREQN0),
++ ASPEED_PINCTRL_GROUP(SIOONCTRLN0),
++ ASPEED_PINCTRL_GROUP(SIOPWRGD0),
++ ASPEED_PINCTRL_GROUP(NCTS0),
++ ASPEED_PINCTRL_GROUP(NDCD0),
++ ASPEED_PINCTRL_GROUP(NDSR0),
++ ASPEED_PINCTRL_GROUP(NRI0),
++ ASPEED_PINCTRL_GROUP(NDTR0),
++ ASPEED_PINCTRL_GROUP(NRTS0),
++ ASPEED_PINCTRL_GROUP(TXD0),
++ ASPEED_PINCTRL_GROUP(RXD0),
++ ASPEED_PINCTRL_GROUP(NCTS1),
++ ASPEED_PINCTRL_GROUP(NDCD1),
++ ASPEED_PINCTRL_GROUP(NDSR1),
++ ASPEED_PINCTRL_GROUP(NRI1),
++ ASPEED_PINCTRL_GROUP(NDTR1),
++ ASPEED_PINCTRL_GROUP(NRTS1),
++ ASPEED_PINCTRL_GROUP(TXD1),
++ ASPEED_PINCTRL_GROUP(RXD1),
++ ASPEED_PINCTRL_GROUP(TXD2),
++ ASPEED_PINCTRL_GROUP(RXD2),
++ ASPEED_PINCTRL_GROUP(TXD3),
++ ASPEED_PINCTRL_GROUP(RXD3),
++ ASPEED_PINCTRL_GROUP(NCTS5),
++ ASPEED_PINCTRL_GROUP(NDCD5),
++ ASPEED_PINCTRL_GROUP(NDSR5),
++ ASPEED_PINCTRL_GROUP(NRI5),
++ ASPEED_PINCTRL_GROUP(NDTR5),
++ ASPEED_PINCTRL_GROUP(NRTS5),
++ ASPEED_PINCTRL_GROUP(TXD5),
++ ASPEED_PINCTRL_GROUP(RXD5),
++ ASPEED_PINCTRL_GROUP(NCTS6),
++ ASPEED_PINCTRL_GROUP(NDCD6),
++ ASPEED_PINCTRL_GROUP(NDSR6),
++ ASPEED_PINCTRL_GROUP(NRI6),
++ ASPEED_PINCTRL_GROUP(NDTR6),
++ ASPEED_PINCTRL_GROUP(NRTS6),
++ ASPEED_PINCTRL_GROUP(TXD6),
++ ASPEED_PINCTRL_GROUP(RXD6),
++ ASPEED_PINCTRL_GROUP(TXD6),
++ ASPEED_PINCTRL_GROUP(RXD6),
++ ASPEED_PINCTRL_GROUP(TXD7),
++ ASPEED_PINCTRL_GROUP(RXD7),
++ ASPEED_PINCTRL_GROUP(TXD8),
++ ASPEED_PINCTRL_GROUP(RXD8),
++ ASPEED_PINCTRL_GROUP(TXD9),
++ ASPEED_PINCTRL_GROUP(RXD9),
++ ASPEED_PINCTRL_GROUP(TXD10),
++ ASPEED_PINCTRL_GROUP(RXD10),
++ ASPEED_PINCTRL_GROUP(TXD11),
++ ASPEED_PINCTRL_GROUP(RXD11),
++ ASPEED_PINCTRL_GROUP(SPIM1),
++ ASPEED_PINCTRL_GROUP(WDTRST0N),
++ ASPEED_PINCTRL_GROUP(WDTRST1N),
++ ASPEED_PINCTRL_GROUP(WDTRST2N),
++ ASPEED_PINCTRL_GROUP(WDTRST3N),
++ ASPEED_PINCTRL_GROUP(WDTRST4N),
++ ASPEED_PINCTRL_GROUP(WDTRST5N),
++ ASPEED_PINCTRL_GROUP(WDTRST6N),
++ ASPEED_PINCTRL_GROUP(WDTRST7N),
++ ASPEED_PINCTRL_GROUP(PWM8),
++ ASPEED_PINCTRL_GROUP(PWM9),
++ ASPEED_PINCTRL_GROUP(PWM10),
++ ASPEED_PINCTRL_GROUP(PWM11),
++ ASPEED_PINCTRL_GROUP(PWM12),
++ ASPEED_PINCTRL_GROUP(PWM13),
++ ASPEED_PINCTRL_GROUP(PWM14),
++ ASPEED_PINCTRL_GROUP(PWM15),
++ ASPEED_PINCTRL_GROUP(SALT0),
++ ASPEED_PINCTRL_GROUP(SALT1),
++ ASPEED_PINCTRL_GROUP(SALT2),
++ ASPEED_PINCTRL_GROUP(SALT3),
++ ASPEED_PINCTRL_GROUP(FSI0),
++ ASPEED_PINCTRL_GROUP(FSI1),
++ ASPEED_PINCTRL_GROUP(FSI2),
++ ASPEED_PINCTRL_GROUP(FSI3),
++ ASPEED_PINCTRL_GROUP(SPIM2),
++ ASPEED_PINCTRL_GROUP(SALT4),
++ ASPEED_PINCTRL_GROUP(SALT5),
++ ASPEED_PINCTRL_GROUP(SALT6),
++ ASPEED_PINCTRL_GROUP(SALT7),
++ ASPEED_PINCTRL_GROUP(SALT8),
++ ASPEED_PINCTRL_GROUP(SALT9),
++ ASPEED_PINCTRL_GROUP(SALT10),
++ ASPEED_PINCTRL_GROUP(SALT11),
++ ASPEED_PINCTRL_GROUP(ADC0),
++ ASPEED_PINCTRL_GROUP(ADC1),
++ ASPEED_PINCTRL_GROUP(ADC2),
++ ASPEED_PINCTRL_GROUP(ADC3),
++ ASPEED_PINCTRL_GROUP(ADC4),
++ ASPEED_PINCTRL_GROUP(ADC5),
++ ASPEED_PINCTRL_GROUP(ADC6),
++ ASPEED_PINCTRL_GROUP(ADC7),
++ ASPEED_PINCTRL_GROUP(ADC8),
++ ASPEED_PINCTRL_GROUP(ADC9),
++ ASPEED_PINCTRL_GROUP(ADC10),
++ ASPEED_PINCTRL_GROUP(ADC11),
++ ASPEED_PINCTRL_GROUP(ADC12),
++ ASPEED_PINCTRL_GROUP(ADC13),
++ ASPEED_PINCTRL_GROUP(ADC14),
++ ASPEED_PINCTRL_GROUP(ADC15),
++ ASPEED_PINCTRL_GROUP(AUXPWRGOOD0),
++ ASPEED_PINCTRL_GROUP(AUXPWRGOOD1),
++ ASPEED_PINCTRL_GROUP(SGPM0),
++ ASPEED_PINCTRL_GROUP(SGPM1),
++ ASPEED_PINCTRL_GROUP(I2C0),
++ ASPEED_PINCTRL_GROUP(I2C1),
++ ASPEED_PINCTRL_GROUP(I2C2),
++ ASPEED_PINCTRL_GROUP(I2C3),
++ ASPEED_PINCTRL_GROUP(I2C4),
++ ASPEED_PINCTRL_GROUP(I2C5),
++ ASPEED_PINCTRL_GROUP(I2C6),
++ ASPEED_PINCTRL_GROUP(I2C7),
++ ASPEED_PINCTRL_GROUP(I2C8),
++ ASPEED_PINCTRL_GROUP(I2C9),
++ ASPEED_PINCTRL_GROUP(I2C10),
++ ASPEED_PINCTRL_GROUP(I2C11),
++ ASPEED_PINCTRL_GROUP(I2C12),
++ ASPEED_PINCTRL_GROUP(I2C13),
++ ASPEED_PINCTRL_GROUP(I2C14),
++ ASPEED_PINCTRL_GROUP(I2C15),
++ ASPEED_PINCTRL_GROUP(DI2C8),
++ ASPEED_PINCTRL_GROUP(DI2C9),
++ ASPEED_PINCTRL_GROUP(DI2C10),
++ ASPEED_PINCTRL_GROUP(DI2C11),
++ ASPEED_PINCTRL_GROUP(DI2C13),
++ ASPEED_PINCTRL_GROUP(DI2C14),
++ ASPEED_PINCTRL_GROUP(DI2C15),
++ ASPEED_PINCTRL_GROUP(SIOPBON1),
++ ASPEED_PINCTRL_GROUP(SIOPBIN1),
++ ASPEED_PINCTRL_GROUP(SIOSCIN1),
++ ASPEED_PINCTRL_GROUP(SIOS3N1),
++ ASPEED_PINCTRL_GROUP(SIOS5N1),
++ ASPEED_PINCTRL_GROUP(SIOPWREQN1),
++ ASPEED_PINCTRL_GROUP(SIOONCTRLN1),
++ ASPEED_PINCTRL_GROUP(SIOPWRGD1),
++ ASPEED_PINCTRL_GROUP(HVI3C12),
++ ASPEED_PINCTRL_GROUP(HVI3C13),
++ ASPEED_PINCTRL_GROUP(HVI3C14),
++ ASPEED_PINCTRL_GROUP(HVI3C15),
++ ASPEED_PINCTRL_GROUP(I3C4),
++ ASPEED_PINCTRL_GROUP(I3C5),
++ ASPEED_PINCTRL_GROUP(I3C6),
++ ASPEED_PINCTRL_GROUP(I3C7),
++ ASPEED_PINCTRL_GROUP(I3C8),
++ ASPEED_PINCTRL_GROUP(I3C9),
++ ASPEED_PINCTRL_GROUP(I3C10),
++ ASPEED_PINCTRL_GROUP(I3C11),
++ ASPEED_PINCTRL_GROUP(HVI3C0),
++ ASPEED_PINCTRL_GROUP(HVI3C1),
++ ASPEED_PINCTRL_GROUP(HVI3C2),
++ ASPEED_PINCTRL_GROUP(HVI3C3),
++ ASPEED_PINCTRL_GROUP(LTPI),
++ ASPEED_PINCTRL_GROUP(SPI0),
++ ASPEED_PINCTRL_GROUP(QSPI0),
++ ASPEED_PINCTRL_GROUP(SPI0CS1),
++ ASPEED_PINCTRL_GROUP(SPI0ABR),
++ ASPEED_PINCTRL_GROUP(SPI0WPN),
++ ASPEED_PINCTRL_GROUP(SPI1),
++ ASPEED_PINCTRL_GROUP(QSPI1),
++ ASPEED_PINCTRL_GROUP(SPI1CS1),
++ ASPEED_PINCTRL_GROUP(SPI1ABR),
++ ASPEED_PINCTRL_GROUP(SPI1WPN),
++ ASPEED_PINCTRL_GROUP(SPI2),
++ ASPEED_PINCTRL_GROUP(QSPI2),
++ ASPEED_PINCTRL_GROUP(SPI2CS1),
++ ASPEED_PINCTRL_GROUP(THRU2),
++ ASPEED_PINCTRL_GROUP(THRU3),
++ ASPEED_PINCTRL_GROUP(JTAGM1),
++ ASPEED_PINCTRL_GROUP(MDIO0),
++ ASPEED_PINCTRL_GROUP(MDIO1),
++ ASPEED_PINCTRL_GROUP(MDIO2),
++ ASPEED_PINCTRL_GROUP(FWQSPI),
++ ASPEED_PINCTRL_GROUP(FWSPIABR),
++ ASPEED_PINCTRL_GROUP(FWSPIWPN),
++ ASPEED_PINCTRL_GROUP(RGMII0),
++ ASPEED_PINCTRL_GROUP(RGMII1),
++ ASPEED_PINCTRL_GROUP(RMII0),
++ ASPEED_PINCTRL_GROUP(RMII1),
++ ASPEED_PINCTRL_GROUP(VGA),
++ ASPEED_PINCTRL_GROUP(DSGPM1),
++ ASPEED_PINCTRL_GROUP(SGPS),
++ ASPEED_PINCTRL_GROUP(I2CF0),
++ ASPEED_PINCTRL_GROUP(I2CF1),
++ ASPEED_PINCTRL_GROUP(I2CF2),
++ ASPEED_PINCTRL_GROUP(CANBUS),
++ ASPEED_PINCTRL_GROUP(USBUART),
++ ASPEED_PINCTRL_GROUP(HBLED),
++ ASPEED_PINCTRL_GROUP(MACLINK0),
++ ASPEED_PINCTRL_GROUP(MACLINK1),
++ ASPEED_PINCTRL_GROUP(MACLINK2),
++ ASPEED_PINCTRL_GROUP(NCTS2),
++ ASPEED_PINCTRL_GROUP(NDCD2),
++ ASPEED_PINCTRL_GROUP(NDSR2),
++ ASPEED_PINCTRL_GROUP(NRI2),
++ ASPEED_PINCTRL_GROUP(NDTR2),
++ ASPEED_PINCTRL_GROUP(NRTS2),
++ ASPEED_PINCTRL_GROUP(SMON0),
++ ASPEED_PINCTRL_GROUP(SMON1),
++ ASPEED_PINCTRL_GROUP(SGMII),
++ ASPEED_PINCTRL_GROUP(PE2SGRSTN),
++ ASPEED_PINCTRL_GROUP(USB2CUD),
++ ASPEED_PINCTRL_GROUP(USB2CD),
++ ASPEED_PINCTRL_GROUP(USB2CH),
++ ASPEED_PINCTRL_GROUP(USB2CU),
++ ASPEED_PINCTRL_GROUP(USB2DD),
++ ASPEED_PINCTRL_GROUP(USB2DH),
++};
++
++FUNC_DECL_(ESPI0, "ESPI0");
++FUNC_DECL_(ESPI1, "ESPI1");
++FUNC_DECL_(LPC0, "LPC0");
++FUNC_DECL_(LPC1, "LPC1");
++FUNC_DECL_(VPI, "VPI");
++FUNC_DECL_(SD, "SD");
++FUNC_DECL_(OSCCLK, "OSCCLK");
++FUNC_DECL_(TACH0, "TACH0");
++FUNC_DECL_(TACH1, "TACH1");
++FUNC_DECL_(TACH2, "TACH2");
++FUNC_DECL_(TACH3, "TACH3");
++FUNC_DECL_(TACH4, "TACH4");
++FUNC_DECL_(TACH5, "TACH5");
++FUNC_DECL_(TACH6, "TACH6");
++FUNC_DECL_(TACH7, "TACH7");
++FUNC_DECL_(THRU0, "THRU0");
++FUNC_DECL_(THRU1, "THRU1");
++FUNC_DECL_(NTCS5, "NTCS5");
++FUNC_DECL_(NDSR5, "NDSR5");
++FUNC_DECL_(NRI5, "NRI5");
++FUNC_DECL_(TACH8, "TACH8");
++FUNC_DECL_(TACH9, "TACH9");
++FUNC_DECL_(TACH10, "TACH10");
++FUNC_DECL_(TACH11, "TACH11");
++FUNC_DECL_(TACH12, "TACH12");
++FUNC_DECL_(TACH13, "TACH13");
++FUNC_DECL_(TACH14, "TACH14");
++FUNC_DECL_(TACH15, "TACH15");
++FUNC_DECL_(SALT12, "SALT12");
++FUNC_DECL_(SALT13, "SALT13");
++FUNC_DECL_(SALT14, "SALT14");
++FUNC_DECL_(SALT15, "SALT15");
++FUNC_DECL_(SPIM0, "SPIM0");
++FUNC_DECL_(PWM0, "PWM0");
++FUNC_DECL_(PWM1, "PWM1");
++FUNC_DECL_(PWM2, "PWM2");
++FUNC_DECL_(PWM3, "PWM3");
++FUNC_DECL_(PWM4, "PWM4");
++FUNC_DECL_(PWM5, "PWM5");
++FUNC_DECL_(PWM6, "PWM6");
++FUNC_DECL_(PWM7, "PWM7");
++FUNC_DECL_(SIOPBON0, "SIOPBON0");
++FUNC_DECL_(SIOPBIN0, "SIOPBIN0");
++FUNC_DECL_(SIOSCIN0, "SIOSCIN0");
++FUNC_DECL_(SIOS3N0, "SIOS3N0");
++FUNC_DECL_(SIOS5N0, "SIOS5N0");
++FUNC_DECL_(SIOPWREQN0, "SIOPWREQN0");
++FUNC_DECL_(SIOONCTRLN0, "SIOONCTRLN0");
++FUNC_DECL_(SIOPWRGD0, "SIOPWRGD0");
++FUNC_DECL_(UART0, "NCTS0", "NDCD0", "NDSR0", "NRI0", "NDTR0", "NRTS0", "TXD0", "RXD0");
++FUNC_DECL_(UART1, "NCTS1", "NDCD1", "NDSR1", "NRI1", "NDTR1", "NRTS1", "TXD1", "RXD1");
++FUNC_DECL_(UART2, "TXD2", "RXD2");
++FUNC_DECL_(UART3, "TXD3", "RXD3");
++FUNC_DECL_(UART5, "NCTS5", "NDCD5", "NDSR5", "NRI5", "NDTR5", "NRTS5", "TXD5", "RXD5");
++FUNC_DECL_(UART6, "NCTS6", "NDCD6", "NDSR6", "NRI6", "NDTR6", "NRTS6", "TXD6", "RXD6");
++FUNC_DECL_(UART7, "TXD7", "RXD7");
++FUNC_DECL_(UART8, "TXD8", "RXD8");
++FUNC_DECL_(UART9, "TXD9", "RXD9");
++FUNC_DECL_(UART10, "TXD10", "RXD10");
++FUNC_DECL_(UART11, "TXD11", "RXD11");
++FUNC_DECL_(SPIM1, "SPIM1");
++FUNC_DECL_(SPIM2, "SPIM2");
++FUNC_DECL_(PWM8, "PWM8");
++FUNC_DECL_(PWM9, "PWM9");
++FUNC_DECL_(PWM10, "PWM10");
++FUNC_DECL_(PWM11, "PWM11");
++FUNC_DECL_(PWM12, "PWM12");
++FUNC_DECL_(PWM13, "PWM13");
++FUNC_DECL_(PWM14, "PWM14");
++FUNC_DECL_(PWM15, "PWM15");
++FUNC_DECL_(WDTRST0N, "WDTRST0N");
++FUNC_DECL_(WDTRST1N, "WDTRST1N");
++FUNC_DECL_(WDTRST2N, "WDTRST2N");
++FUNC_DECL_(WDTRST3N, "WDTRST3N");
++FUNC_DECL_(WDTRST4N, "WDTRST4N");
++FUNC_DECL_(WDTRST5N, "WDTRST5N");
++FUNC_DECL_(WDTRST6N, "WDTRST6N");
++FUNC_DECL_(WDTRST7N, "WDTRST7N");
++FUNC_DECL_(FSI0, "FSI0");
++FUNC_DECL_(FSI1, "FSI1");
++FUNC_DECL_(FSI2, "FSI2");
++FUNC_DECL_(FSI3, "FSI3");
++FUNC_DECL_(SALT4, "ASLT4");
++FUNC_DECL_(SALT5, "ASLT5");
++FUNC_DECL_(SALT6, "ASLT6");
++FUNC_DECL_(SALT7, "ASLT7");
++FUNC_DECL_(SALT8, "ASLT8");
++FUNC_DECL_(SALT9, "ASLT9");
++FUNC_DECL_(SALT10, "ASLT10");
++FUNC_DECL_(SALT11, "ASLT11");
++FUNC_DECL_(ADC0, "ADC0");
++FUNC_DECL_(ADC1, "ADC1");
++FUNC_DECL_(ADC2, "ADC2");
++FUNC_DECL_(ADC3, "ADC3");
++FUNC_DECL_(ADC4, "ADC4");
++FUNC_DECL_(ADC5, "ADC5");
++FUNC_DECL_(ADC6, "ADC6");
++FUNC_DECL_(ADC7, "ADC7");
++FUNC_DECL_(ADC8, "ADC8");
++FUNC_DECL_(ADC9, "ADC9");
++FUNC_DECL_(ADC10, "ADC10");
++FUNC_DECL_(ADC11, "ADC11");
++FUNC_DECL_(ADC12, "ADC12");
++FUNC_DECL_(ADC13, "ADC13");
++FUNC_DECL_(ADC14, "ADC14");
++FUNC_DECL_(ADC15, "ADC15");
++FUNC_DECL_(AUXPWRGOOD0, "AUXPWRGOOD0");
++FUNC_DECL_(AUXPWRGOOD1, "AUXPWRGOOD1");
++FUNC_DECL_(SGPM0, "SGPM0");
++FUNC_DECL_(SGPM1, "SGPM1");
++FUNC_DECL_(I2C0, "I2C0");
++FUNC_DECL_(I2C1, "I2C1");
++FUNC_DECL_(I2C2, "I2C2");
++FUNC_DECL_(I2C3, "I2C3");
++FUNC_DECL_(I2C4, "I2C4");
++FUNC_DECL_(I2C5, "I2C5");
++FUNC_DECL_(I2C6, "I2C6");
++FUNC_DECL_(I2C7, "I2C7");
++FUNC_DECL_(I2C8, "I2C8");
++FUNC_DECL_(I2C9, "I2C9");
++FUNC_DECL_(I2C10, "I2C10");
++FUNC_DECL_(I2C11, "I2C11");
++FUNC_DECL_(I2C12, "I2C12");
++FUNC_DECL_(I2C13, "I2C13");
++FUNC_DECL_(I2C14, "I2C14");
++FUNC_DECL_(I2C15, "I2C15");
++FUNC_DECL_(DI2C8, "DI2C8");
++FUNC_DECL_(DI2C9, "DI2C9");
++FUNC_DECL_(DI2C10, "DI2C10");
++FUNC_DECL_(DI2C11, "DI2C11");
++FUNC_DECL_(DI2C13, "DI2C13");
++FUNC_DECL_(DI2C14, "DI2C14");
++FUNC_DECL_(DI2C15, "DI2C15");
++FUNC_DECL_(SIOPBON1, "SIOPBON1");
++FUNC_DECL_(SIOPBIN1, "SIOPBIN1");
++FUNC_DECL_(SIOSCIN1, "SIOSCIN1");
++FUNC_DECL_(SIOS3N1, "SIOS3N1");
++FUNC_DECL_(SIOS5N1, "SIOS5N1");
++FUNC_DECL_(SIOPWREQN1, "SIOPWREQN1");
++FUNC_DECL_(SIOONCTRLN1, "SIOONCTRLN1");
++FUNC_DECL_(SIOPWRGD1, "SIOPWRGD1");
++FUNC_DECL_(I3C0, "HVI3C0");
++FUNC_DECL_(I3C1, "HVI3C1");
++FUNC_DECL_(I3C2, "HVI3C2");
++FUNC_DECL_(I3C3, "HVI3C3");
++FUNC_DECL_(I3C4, "I3C4");
++FUNC_DECL_(I3C5, "I3C5");
++FUNC_DECL_(I3C6, "I3C6");
++FUNC_DECL_(I3C7, "I3C7");
++FUNC_DECL_(I3C8, "I3C8");
++FUNC_DECL_(I3C9, "I3C9");
++FUNC_DECL_(I3C10, "I3C10");
++FUNC_DECL_(I3C11, "I3C11");
++FUNC_DECL_(I3C12, "HVI3C12");
++FUNC_DECL_(I3C13, "HVI3C13");
++FUNC_DECL_(I3C14, "HVI3C14");
++FUNC_DECL_(I3C15, "HVI3C15");
++FUNC_DECL_(LTPI, "LTPI");
++FUNC_DECL_(SPI0, "SPI0");
++FUNC_DECL_(QSPI0, "QSPI0");
++FUNC_DECL_(SPI0CS1, "SPI0CS1");
++FUNC_DECL_(SPI0ABR, "SPI0ABR");
++FUNC_DECL_(SPI0WPN, "SPI0WPN");
++FUNC_DECL_(SPI1, "SPI1");
++FUNC_DECL_(QSPI1, "QSPI1");
++FUNC_DECL_(SPI1CS1, "SPI1CS1");
++FUNC_DECL_(SPI1ABR, "SPI1ABR");
++FUNC_DECL_(SPI1WPN, "SPI1WPN");
++FUNC_DECL_(SPI2, "SPI2");
++FUNC_DECL_(QSPI2, "QSPI2");
++FUNC_DECL_(SPI2CS1, "SPI2CS1");
++FUNC_DECL_(THRU2, "THRU2");
++FUNC_DECL_(THRU3, "THRU3");
++FUNC_DECL_(JTAGM1, "JTAGM1");
++FUNC_DECL_(MDIO0, "MDIO0");
++FUNC_DECL_(MDIO1, "MDIO1");
++FUNC_DECL_(MDIO2, "MDIO2");
++FUNC_DECL_(FWQSPI, "FWQSPI");
++FUNC_DECL_(FWSPIABR, "FWSPIABR");
++FUNC_DECL_(FWSPIWPN, "FWSPIWPN");
++FUNC_DECL_(RGMII0, "RGMII0");
++FUNC_DECL_(RGMII1, "RGMII1");
++FUNC_DECL_(RMII0, "RMII0");
++FUNC_DECL_(RMII1, "RMII1");
++FUNC_DECL_(VGA, "VGA");
++FUNC_DECL_(DSGPM1, "DSGPM1");
++FUNC_DECL_(SGPS, "SGPS");
++FUNC_DECL_(I2CF0, "I2CF0");
++FUNC_DECL_(I2CF1, "I2CF1");
++FUNC_DECL_(I2CF2, "I2CF2");
++FUNC_DECL_(CANBUS, "CANBUS");
++FUNC_DECL_(USBUART, "USBUART");
++FUNC_DECL_(HBLED, "HBLED");
++FUNC_DECL_(MACLINK0, "MACLINK0");
++FUNC_DECL_(MACLINK1, "MACLINK1");
++FUNC_DECL_(MACLINK2, "MACLINK2");
++FUNC_DECL_(SMON0, "SMON0");
++FUNC_DECL_(SMON1, "SMON1");
++FUNC_DECL_(SGMII, "SGMII");
++FUNC_DECL_(PCIERC, "PE2SGRSTN");
++FUNC_DECL_(USB2C, "USB2CUD", "USB2CD", "USB2CH", "USB2CU");
++FUNC_DECL_(USB2D, "USB2DD", "USB2DH");
++
++static struct aspeed_pin_function aspeed_g7_soc1_funcs[] = {
++ ASPEED_PINCTRL_FUNC(ESPI0),
++ ASPEED_PINCTRL_FUNC(ESPI1),
++ ASPEED_PINCTRL_FUNC(LPC0),
++ ASPEED_PINCTRL_FUNC(LPC1),
++ ASPEED_PINCTRL_FUNC(VPI),
++ ASPEED_PINCTRL_FUNC(SD),
++ ASPEED_PINCTRL_FUNC(OSCCLK),
++ ASPEED_PINCTRL_FUNC(TACH0),
++ ASPEED_PINCTRL_FUNC(TACH1),
++ ASPEED_PINCTRL_FUNC(TACH2),
++ ASPEED_PINCTRL_FUNC(TACH3),
++ ASPEED_PINCTRL_FUNC(TACH4),
++ ASPEED_PINCTRL_FUNC(TACH5),
++ ASPEED_PINCTRL_FUNC(TACH6),
++ ASPEED_PINCTRL_FUNC(TACH7),
++ ASPEED_PINCTRL_FUNC(THRU0),
++ ASPEED_PINCTRL_FUNC(THRU1),
++ ASPEED_PINCTRL_FUNC(NTCS5),
++ ASPEED_PINCTRL_FUNC(NTCS5),
++ ASPEED_PINCTRL_FUNC(NDSR5),
++ ASPEED_PINCTRL_FUNC(NRI5),
++ ASPEED_PINCTRL_FUNC(NRI5),
++ ASPEED_PINCTRL_FUNC(SALT12),
++ ASPEED_PINCTRL_FUNC(SALT13),
++ ASPEED_PINCTRL_FUNC(SALT14),
++ ASPEED_PINCTRL_FUNC(SALT15),
++ ASPEED_PINCTRL_FUNC(TACH8),
++ ASPEED_PINCTRL_FUNC(TACH9),
++ ASPEED_PINCTRL_FUNC(TACH10),
++ ASPEED_PINCTRL_FUNC(TACH11),
++ ASPEED_PINCTRL_FUNC(TACH12),
++ ASPEED_PINCTRL_FUNC(TACH13),
++ ASPEED_PINCTRL_FUNC(TACH14),
++ ASPEED_PINCTRL_FUNC(TACH15),
++ ASPEED_PINCTRL_FUNC(SPIM0),
++ ASPEED_PINCTRL_FUNC(PWM0),
++ ASPEED_PINCTRL_FUNC(PWM1),
++ ASPEED_PINCTRL_FUNC(PWM2),
++ ASPEED_PINCTRL_FUNC(PWM3),
++ ASPEED_PINCTRL_FUNC(PWM4),
++ ASPEED_PINCTRL_FUNC(PWM5),
++ ASPEED_PINCTRL_FUNC(PWM6),
++ ASPEED_PINCTRL_FUNC(PWM7),
++ ASPEED_PINCTRL_FUNC(SIOPBON0),
++ ASPEED_PINCTRL_FUNC(SIOPBIN0),
++ ASPEED_PINCTRL_FUNC(SIOSCIN0),
++ ASPEED_PINCTRL_FUNC(SIOS3N0),
++ ASPEED_PINCTRL_FUNC(SIOS5N0),
++ ASPEED_PINCTRL_FUNC(SIOPWREQN0),
++ ASPEED_PINCTRL_FUNC(SIOONCTRLN0),
++ ASPEED_PINCTRL_FUNC(SIOPWRGD0),
++ ASPEED_PINCTRL_FUNC(UART0),
++ ASPEED_PINCTRL_FUNC(UART1),
++ ASPEED_PINCTRL_FUNC(UART2),
++ ASPEED_PINCTRL_FUNC(UART3),
++ ASPEED_PINCTRL_FUNC(UART5),
++ ASPEED_PINCTRL_FUNC(UART6),
++ ASPEED_PINCTRL_FUNC(UART7),
++ ASPEED_PINCTRL_FUNC(UART8),
++ ASPEED_PINCTRL_FUNC(UART9),
++ ASPEED_PINCTRL_FUNC(UART10),
++ ASPEED_PINCTRL_FUNC(UART11),
++ ASPEED_PINCTRL_FUNC(SPIM1),
++ ASPEED_PINCTRL_FUNC(PWM7),
++ ASPEED_PINCTRL_FUNC(PWM8),
++ ASPEED_PINCTRL_FUNC(PWM9),
++ ASPEED_PINCTRL_FUNC(PWM10),
++ ASPEED_PINCTRL_FUNC(PWM11),
++ ASPEED_PINCTRL_FUNC(PWM12),
++ ASPEED_PINCTRL_FUNC(PWM13),
++ ASPEED_PINCTRL_FUNC(PWM14),
++ ASPEED_PINCTRL_FUNC(PWM15),
++ ASPEED_PINCTRL_FUNC(WDTRST0N),
++ ASPEED_PINCTRL_FUNC(WDTRST1N),
++ ASPEED_PINCTRL_FUNC(WDTRST2N),
++ ASPEED_PINCTRL_FUNC(WDTRST3N),
++ ASPEED_PINCTRL_FUNC(WDTRST4N),
++ ASPEED_PINCTRL_FUNC(WDTRST5N),
++ ASPEED_PINCTRL_FUNC(WDTRST6N),
++ ASPEED_PINCTRL_FUNC(WDTRST7N),
++ ASPEED_PINCTRL_FUNC(FSI0),
++ ASPEED_PINCTRL_FUNC(FSI1),
++ ASPEED_PINCTRL_FUNC(FSI2),
++ ASPEED_PINCTRL_FUNC(FSI3),
++ ASPEED_PINCTRL_FUNC(SALT4),
++ ASPEED_PINCTRL_FUNC(SALT5),
++ ASPEED_PINCTRL_FUNC(SALT6),
++ ASPEED_PINCTRL_FUNC(SALT7),
++ ASPEED_PINCTRL_FUNC(SALT8),
++ ASPEED_PINCTRL_FUNC(SALT9),
++ ASPEED_PINCTRL_FUNC(SALT10),
++ ASPEED_PINCTRL_FUNC(SALT11),
++ ASPEED_PINCTRL_FUNC(ADC0),
++ ASPEED_PINCTRL_FUNC(ADC1),
++ ASPEED_PINCTRL_FUNC(ADC2),
++ ASPEED_PINCTRL_FUNC(ADC3),
++ ASPEED_PINCTRL_FUNC(ADC4),
++ ASPEED_PINCTRL_FUNC(ADC5),
++ ASPEED_PINCTRL_FUNC(ADC6),
++ ASPEED_PINCTRL_FUNC(ADC7),
++ ASPEED_PINCTRL_FUNC(ADC8),
++ ASPEED_PINCTRL_FUNC(ADC9),
++ ASPEED_PINCTRL_FUNC(ADC10),
++ ASPEED_PINCTRL_FUNC(ADC11),
++ ASPEED_PINCTRL_FUNC(ADC12),
++ ASPEED_PINCTRL_FUNC(ADC13),
++ ASPEED_PINCTRL_FUNC(ADC14),
++ ASPEED_PINCTRL_FUNC(ADC15),
++ ASPEED_PINCTRL_FUNC(AUXPWRGOOD0),
++ ASPEED_PINCTRL_FUNC(AUXPWRGOOD1),
++ ASPEED_PINCTRL_FUNC(SGPM0),
++ ASPEED_PINCTRL_FUNC(SGPM1),
++ ASPEED_PINCTRL_FUNC(SPIM2),
++ ASPEED_PINCTRL_FUNC(I2C0),
++ ASPEED_PINCTRL_FUNC(I2C1),
++ ASPEED_PINCTRL_FUNC(I2C2),
++ ASPEED_PINCTRL_FUNC(I2C3),
++ ASPEED_PINCTRL_FUNC(I2C4),
++ ASPEED_PINCTRL_FUNC(I2C5),
++ ASPEED_PINCTRL_FUNC(I2C6),
++ ASPEED_PINCTRL_FUNC(I2C7),
++ ASPEED_PINCTRL_FUNC(I2C8),
++ ASPEED_PINCTRL_FUNC(I2C9),
++ ASPEED_PINCTRL_FUNC(I2C10),
++ ASPEED_PINCTRL_FUNC(I2C11),
++ ASPEED_PINCTRL_FUNC(I2C12),
++ ASPEED_PINCTRL_FUNC(I2C13),
++ ASPEED_PINCTRL_FUNC(I2C14),
++ ASPEED_PINCTRL_FUNC(I2C15),
++ ASPEED_PINCTRL_FUNC(DI2C8),
++ ASPEED_PINCTRL_FUNC(DI2C9),
++ ASPEED_PINCTRL_FUNC(DI2C10),
++ ASPEED_PINCTRL_FUNC(DI2C11),
++ ASPEED_PINCTRL_FUNC(DI2C13),
++ ASPEED_PINCTRL_FUNC(DI2C14),
++ ASPEED_PINCTRL_FUNC(DI2C15),
++ ASPEED_PINCTRL_FUNC(SIOPBON1),
++ ASPEED_PINCTRL_FUNC(SIOPBIN1),
++ ASPEED_PINCTRL_FUNC(SIOSCIN1),
++ ASPEED_PINCTRL_FUNC(SIOS3N1),
++ ASPEED_PINCTRL_FUNC(SIOS5N1),
++ ASPEED_PINCTRL_FUNC(SIOPWREQN1),
++ ASPEED_PINCTRL_FUNC(SIOONCTRLN1),
++ ASPEED_PINCTRL_FUNC(SIOPWRGD1),
++ ASPEED_PINCTRL_FUNC(I3C0),
++ ASPEED_PINCTRL_FUNC(I3C1),
++ ASPEED_PINCTRL_FUNC(I3C2),
++ ASPEED_PINCTRL_FUNC(I3C3),
++ ASPEED_PINCTRL_FUNC(I3C4),
++ ASPEED_PINCTRL_FUNC(I3C5),
++ ASPEED_PINCTRL_FUNC(I3C6),
++ ASPEED_PINCTRL_FUNC(I3C7),
++ ASPEED_PINCTRL_FUNC(I3C8),
++ ASPEED_PINCTRL_FUNC(I3C9),
++ ASPEED_PINCTRL_FUNC(I3C10),
++ ASPEED_PINCTRL_FUNC(I3C11),
++ ASPEED_PINCTRL_FUNC(I3C12),
++ ASPEED_PINCTRL_FUNC(I3C13),
++ ASPEED_PINCTRL_FUNC(I3C14),
++ ASPEED_PINCTRL_FUNC(I3C15),
++ ASPEED_PINCTRL_FUNC(LTPI),
++ ASPEED_PINCTRL_FUNC(SPI0),
++ ASPEED_PINCTRL_FUNC(QSPI0),
++ ASPEED_PINCTRL_FUNC(SPI0CS1),
++ ASPEED_PINCTRL_FUNC(SPI0ABR),
++ ASPEED_PINCTRL_FUNC(SPI0WPN),
++ ASPEED_PINCTRL_FUNC(SPI1),
++ ASPEED_PINCTRL_FUNC(QSPI1),
++ ASPEED_PINCTRL_FUNC(SPI1CS1),
++ ASPEED_PINCTRL_FUNC(SPI1ABR),
++ ASPEED_PINCTRL_FUNC(SPI1WPN),
++ ASPEED_PINCTRL_FUNC(SPI2),
++ ASPEED_PINCTRL_FUNC(QSPI2),
++ ASPEED_PINCTRL_FUNC(SPI2CS1),
++ ASPEED_PINCTRL_FUNC(THRU2),
++ ASPEED_PINCTRL_FUNC(THRU3),
++ ASPEED_PINCTRL_FUNC(JTAGM1),
++ ASPEED_PINCTRL_FUNC(MDIO0),
++ ASPEED_PINCTRL_FUNC(MDIO1),
++ ASPEED_PINCTRL_FUNC(MDIO2),
++ ASPEED_PINCTRL_FUNC(FWQSPI),
++ ASPEED_PINCTRL_FUNC(FWSPIABR),
++ ASPEED_PINCTRL_FUNC(FWSPIWPN),
++ ASPEED_PINCTRL_FUNC(RGMII0),
++ ASPEED_PINCTRL_FUNC(RGMII1),
++ ASPEED_PINCTRL_FUNC(RMII0),
++ ASPEED_PINCTRL_FUNC(RMII1),
++ ASPEED_PINCTRL_FUNC(VGA),
++ ASPEED_PINCTRL_FUNC(DSGPM1),
++ ASPEED_PINCTRL_FUNC(SGPS),
++ ASPEED_PINCTRL_FUNC(I2CF0),
++ ASPEED_PINCTRL_FUNC(I2CF1),
++ ASPEED_PINCTRL_FUNC(I2CF2),
++ ASPEED_PINCTRL_FUNC(CANBUS),
++ ASPEED_PINCTRL_FUNC(USBUART),
++ ASPEED_PINCTRL_FUNC(HBLED),
++ ASPEED_PINCTRL_FUNC(MACLINK0),
++ ASPEED_PINCTRL_FUNC(MACLINK1),
++ ASPEED_PINCTRL_FUNC(MACLINK2),
++ ASPEED_PINCTRL_FUNC(SMON0),
++ ASPEED_PINCTRL_FUNC(SMON1),
++ ASPEED_PINCTRL_FUNC(SGMII),
++ ASPEED_PINCTRL_FUNC(PCIERC),
++ ASPEED_PINCTRL_FUNC(USB2C),
++ ASPEED_PINCTRL_FUNC(USB2D),
++};
++
++/* number, name, drv_data */
++static const struct pinctrl_pin_desc aspeed_g7_soc1_pins[] = {
++ PINCTRL_PIN(C16, "C16"),
++ PINCTRL_PIN(C14, "C14"),
++ PINCTRL_PIN(C11, "C11"),
++ PINCTRL_PIN(D9, "D9"),
++ PINCTRL_PIN(F14, "F14"),
++ PINCTRL_PIN(D10, "D10"),
++ PINCTRL_PIN(C12, "C12"),
++ PINCTRL_PIN(C13, "C13"),
++ PINCTRL_PIN(AC26, "AC26"),
++ PINCTRL_PIN(AA25, "AA25"),
++ PINCTRL_PIN(AB23, "AB23"),
++ PINCTRL_PIN(U22, "U22"),
++ PINCTRL_PIN(V21, "V21"),
++ PINCTRL_PIN(N26, "N26"),
++ PINCTRL_PIN(P25, "P25"),
++ PINCTRL_PIN(N25, "N25"),
++ PINCTRL_PIN(V23, "V23"),
++ PINCTRL_PIN(W22, "W22"),
++ PINCTRL_PIN(AB26, "AB26"),
++ PINCTRL_PIN(AD26, "AD26"),
++ PINCTRL_PIN(P26, "P26"),
++ PINCTRL_PIN(AE26, "AE26"),
++ PINCTRL_PIN(AF26, "AF26"),
++ PINCTRL_PIN(AF25, "AF25"),
++ PINCTRL_PIN(AE25, "AE25"),
++ PINCTRL_PIN(AD25, "AD25"),
++ PINCTRL_PIN(AF23, "AF23"),
++ PINCTRL_PIN(AF20, "AF20"),
++ PINCTRL_PIN(AF21, "AF21"),
++ PINCTRL_PIN(AE21, "AE21"),
++ PINCTRL_PIN(AE23, "AE23"),
++ PINCTRL_PIN(AD22, "AD22"),
++ PINCTRL_PIN(AF17, "AF17"),
++ PINCTRL_PIN(AA16, "AA16"),
++ PINCTRL_PIN(Y16, "Y16"),
++ PINCTRL_PIN(V17, "V17"),
++ PINCTRL_PIN(J13, "J13"),
++ PINCTRL_PIN(AB16, "AB16"),
++ PINCTRL_PIN(AC16, "AC16"),
++ PINCTRL_PIN(AF16, "AF16"),
++ PINCTRL_PIN(AA15, "AA15"),
++ PINCTRL_PIN(AB15, "AB15"),
++ PINCTRL_PIN(AC15, "AC15"),
++ PINCTRL_PIN(AD15, "AD15"),
++ PINCTRL_PIN(Y15, "Y15"),
++ PINCTRL_PIN(AA14, "AA14"),
++ PINCTRL_PIN(W16, "W16"),
++ PINCTRL_PIN(V16, "V16"),
++ PINCTRL_PIN(AB18, "AB18"),
++ PINCTRL_PIN(AC18, "AC18"),
++ PINCTRL_PIN(K13, "K13"),
++ PINCTRL_PIN(AA17, "AA17"),
++ PINCTRL_PIN(AB17, "AB17"),
++ PINCTRL_PIN(AD16, "AD16"),
++ PINCTRL_PIN(AC17, "AC17"),
++ PINCTRL_PIN(AD17, "AD17"),
++ PINCTRL_PIN(AE16, "AE16"),
++ PINCTRL_PIN(AE17, "AE17"),
++ PINCTRL_PIN(AB24, "AB24"),
++ PINCTRL_PIN(W26, "W26"),
++ PINCTRL_PIN(HOLE0, "HOLE0"),
++ PINCTRL_PIN(HOLE1, "HOLE1"),
++ PINCTRL_PIN(HOLE2, "HOLE2"),
++ PINCTRL_PIN(HOLE3, "HOLE3"),
++ PINCTRL_PIN(W25, "W25"),
++ PINCTRL_PIN(Y23, "Y23"),
++ PINCTRL_PIN(Y24, "Y24"),
++ PINCTRL_PIN(W21, "W21"),
++ PINCTRL_PIN(AA23, "AA23"),
++ PINCTRL_PIN(AC22, "AC22"),
++ PINCTRL_PIN(AB22, "AB22"),
++ PINCTRL_PIN(Y21, "Y21"),
++ PINCTRL_PIN(AE20, "AE20"),
++ PINCTRL_PIN(AF19, "AF19"),
++ PINCTRL_PIN(Y22, "Y22"),
++ PINCTRL_PIN(AA20, "AA20"),
++ PINCTRL_PIN(AA22, "AA22"),
++ PINCTRL_PIN(AB20, "AB20"),
++ PINCTRL_PIN(AF18, "AF18"),
++ PINCTRL_PIN(AE19, "AE19"),
++ PINCTRL_PIN(AD20, "AD20"),
++ PINCTRL_PIN(AC20, "AC20"),
++ PINCTRL_PIN(AA21, "AA21"),
++ PINCTRL_PIN(AB21, "AB21"),
++ PINCTRL_PIN(AC19, "AC19"),
++ PINCTRL_PIN(AE18, "AE18"),
++ PINCTRL_PIN(AD19, "AD19"),
++ PINCTRL_PIN(AD18, "AD18"),
++ PINCTRL_PIN(U25, "U25"),
++ PINCTRL_PIN(U26, "U26"),
++ PINCTRL_PIN(Y26, "Y26"),
++ PINCTRL_PIN(AA24, "AA24"),
++ PINCTRL_PIN(R25, "R25"),
++ PINCTRL_PIN(AA26, "AA26"),
++ PINCTRL_PIN(R26, "R26"),
++ PINCTRL_PIN(Y25, "Y25"),
++ PINCTRL_PIN(B16, "B16"),
++ PINCTRL_PIN(D14, "D14"),
++ PINCTRL_PIN(B15, "B15"),
++ PINCTRL_PIN(B14, "B14"),
++ PINCTRL_PIN(C17, "C17"),
++ PINCTRL_PIN(B13, "B13"),
++ PINCTRL_PIN(E14, "E14"),
++ PINCTRL_PIN(C15, "C15"),
++ PINCTRL_PIN(D24, "D24"),
++ PINCTRL_PIN(B23, "B23"),
++ PINCTRL_PIN(B22, "B22"),
++ PINCTRL_PIN(C23, "C23"),
++ PINCTRL_PIN(B18, "B18"),
++ PINCTRL_PIN(B21, "B21"),
++ PINCTRL_PIN(M15, "M15"),
++ PINCTRL_PIN(B19, "B19"),
++ PINCTRL_PIN(B26, "B26"),
++ PINCTRL_PIN(A25, "A25"),
++ PINCTRL_PIN(A24, "A24"),
++ PINCTRL_PIN(B24, "B24"),
++ PINCTRL_PIN(E26, "E26"),
++ PINCTRL_PIN(A21, "A21"),
++ PINCTRL_PIN(A19, "A19"),
++ PINCTRL_PIN(A18, "A18"),
++ PINCTRL_PIN(D26, "D26"),
++ PINCTRL_PIN(C26, "C26"),
++ PINCTRL_PIN(A23, "A23"),
++ PINCTRL_PIN(A22, "A22"),
++ PINCTRL_PIN(B25, "B25"),
++ PINCTRL_PIN(F26, "F26"),
++ PINCTRL_PIN(A26, "A26"),
++ PINCTRL_PIN(A14, "A14"),
++ PINCTRL_PIN(E10, "E10"),
++ PINCTRL_PIN(E13, "E13"),
++ PINCTRL_PIN(D12, "D12"),
++ PINCTRL_PIN(F10, "F10"),
++ PINCTRL_PIN(E11, "E11"),
++ PINCTRL_PIN(F11, "F11"),
++ PINCTRL_PIN(F13, "F13"),
++ PINCTRL_PIN(N15, "N15"),
++ PINCTRL_PIN(C20, "C20"),
++ PINCTRL_PIN(C19, "C19"),
++ PINCTRL_PIN(A8, "A8"),
++ PINCTRL_PIN(R14, "R14"),
++ PINCTRL_PIN(A7, "A7"),
++ PINCTRL_PIN(P14, "P14"),
++ PINCTRL_PIN(D20, "D20"),
++ PINCTRL_PIN(A6, "A6"),
++ PINCTRL_PIN(B6, "B6"),
++ PINCTRL_PIN(N14, "N14"),
++ PINCTRL_PIN(B7, "B7"),
++ PINCTRL_PIN(B8, "B8"),
++ PINCTRL_PIN(B9, "B9"),
++ PINCTRL_PIN(M14, "M14"),
++ PINCTRL_PIN(J11, "J11"),
++ PINCTRL_PIN(E7, "E7"),
++ PINCTRL_PIN(D19, "D19"),
++ PINCTRL_PIN(B11, "B11"),
++ PINCTRL_PIN(D15, "D15"),
++ PINCTRL_PIN(B12, "B12"),
++ PINCTRL_PIN(B10, "B10"),
++ PINCTRL_PIN(P13, "P13"),
++ PINCTRL_PIN(C18, "C18"),
++ PINCTRL_PIN(C6, "C6"),
++ PINCTRL_PIN(C7, "C7"),
++ PINCTRL_PIN(D7, "D7"),
++ PINCTRL_PIN(N13, "N13"),
++ PINCTRL_PIN(C8, "C8"),
++ PINCTRL_PIN(C9, "C9"),
++ PINCTRL_PIN(C10, "C10"),
++ PINCTRL_PIN(M16, "M16"),
++ PINCTRL_PIN(A15, "A15"),
++ PINCTRL_PIN(G11, "G11"),
++ PINCTRL_PIN(H7, "H7"),
++ PINCTRL_PIN(H8, "H8"),
++ PINCTRL_PIN(H9, "H9"),
++ PINCTRL_PIN(H10, "H10"),
++ PINCTRL_PIN(H11, "H11"),
++ PINCTRL_PIN(J9, "J9"),
++ PINCTRL_PIN(J10, "J10"),
++ PINCTRL_PIN(E9, "E9"),
++ PINCTRL_PIN(F9, "F9"),
++ PINCTRL_PIN(F8, "F8"),
++ PINCTRL_PIN(M13, "M13"),
++ PINCTRL_PIN(F7, "F7"),
++ PINCTRL_PIN(D8, "D8"),
++ PINCTRL_PIN(E8, "E8"),
++ PINCTRL_PIN(L12, "L12"),
++ PINCTRL_PIN(F12, "F12"),
++ PINCTRL_PIN(E12, "E12"),
++ PINCTRL_PIN(J12, "J12"),
++ PINCTRL_PIN(G7, "G7"),
++ PINCTRL_PIN(G8, "G8"),
++ PINCTRL_PIN(G9, "G9"),
++ PINCTRL_PIN(G10, "G10"),
++ PINCTRL_PIN(K12, "K12"),
++ PINCTRL_PIN(W17, "W17"),
++ PINCTRL_PIN(V18, "V18"),
++ PINCTRL_PIN(W18, "W18"),
++ PINCTRL_PIN(Y17, "Y17"),
++ PINCTRL_PIN(AA18, "AA18"),
++ PINCTRL_PIN(AA13, "AA13"),
++ PINCTRL_PIN(Y18, "Y18"),
++ PINCTRL_PIN(AA12, "AA12"),
++ PINCTRL_PIN(W20, "W20"),
++ PINCTRL_PIN(V20, "V20"),
++ PINCTRL_PIN(Y11, "Y11"),
++ PINCTRL_PIN(V14, "V14"),
++ PINCTRL_PIN(V19, "V19"),
++ PINCTRL_PIN(W14, "W14"),
++ PINCTRL_PIN(Y20, "Y20"),
++ PINCTRL_PIN(AB19, "AB19"),
++ PINCTRL_PIN(U21, "U21"),
++ PINCTRL_PIN(T24, "T24"),
++ PINCTRL_PIN(V24, "V24"),
++ PINCTRL_PIN(V22, "V22"),
++ PINCTRL_PIN(T23, "T23"),
++ PINCTRL_PIN(AC25, "AC25"),
++ PINCTRL_PIN(AB25, "AB25"),
++ PINCTRL_PIN(AC24, "AC24"),
++ PINCTRL_PIN(SGMII0, "SGMII0"),
++ PINCTRL_PIN(PCIERC2_PERST, "PCIERC2_PERST"),
++ PINCTRL_PIN(PORTC_MODE, "PORTC_MODE"),
++ PINCTRL_PIN(PORTD_MODE, "PORTD_MODE"),
++};
++
++FUNCFG_DESCL(C16, PIN_CFG(ESPI1, SCU400, GENMASK(2, 0), 1),
++ PIN_CFG(LPC1, SCU400, GENMASK(2, 0), 2),
++ PIN_CFG(SD, SCU400, GENMASK(2, 0), 3),
++ PIN_CFG(DI2C0, SCU400, GENMASK(2, 0), 4),
++ PIN_CFG(VPI, SCU400, GENMASK(2, 0), 5));
++FUNCFG_DESCL(C14, PIN_CFG(ESPI1, SCU400, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(LPC1, SCU400, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(SD, SCU400, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(DI2C1, SCU400, GENMASK(6, 4), (4 << 4)),
++ PIN_CFG(VPI, SCU400, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(C11, PIN_CFG(ESPI1, SCU400, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(LPC1, SCU400, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(SD, SCU400, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(DI2C3, SCU400, GENMASK(10, 8), (4 << 8)),
++ PIN_CFG(VPI, SCU400, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(D9, PIN_CFG(ESPI1, SCU400, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(LPC1, SCU400, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(SD, SCU400, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(DI2C0, SCU400, GENMASK(14, 12), (4 << 12)),
++ PIN_CFG(VPI, SCU400, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(F14, PIN_CFG(ESPI1, SCU400, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(LPC1, SCU400, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(SD, SCU400, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(DI2C1, SCU400, GENMASK(18, 16), (4 << 16)),
++ PIN_CFG(VPI, SCU400, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(D10, PIN_CFG(ESPI1, SCU400, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(LPC1, SCU400, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(SD, SCU400, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(DI2C2, SCU400, GENMASK(22, 20), (4 << 20)),
++ PIN_CFG(VPI, SCU400, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(C12, PIN_CFG(ESPI1, SCU400, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LPC1, SCU400, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SD, SCU400, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(DI2C2, SCU400, GENMASK(26, 24), (4 << 28)));
++FUNCFG_DESCL(C13, PIN_CFG(ESPI1, SCU400, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LPC1, SCU400, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(SD, SCU400, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(DI2C3, SCU400, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(AC26, PIN_CFG(TACH0, SCU404, GENMASK(2, 0), 1),
++ PIN_CFG(THRU0, SCU404, GENMASK(2, 0), 2),
++ PIN_CFG(VPI, SCU404, GENMASK(2, 0), 3));
++FUNCFG_DESCL(AA25, PIN_CFG(TACH1, SCU404, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(THRU0, SCU404, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(VPI, SCU404, GENMASK(6, 4), (3 << 4)));
++FUNCFG_DESCL(AB23, PIN_CFG(TACH2, SCU404, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(THRU1, SCU404, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(VPI, SCU404, GENMASK(10, 8), (3 << 8)));
++FUNCFG_DESCL(U22, PIN_CFG(TACH3, SCU404, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(THRU1, SCU404, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(VPI, SCU404, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(V21, PIN_CFG(TACH4, SCU404, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(VPI, SCU404, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(NCTS5, SCU404, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(N26, PIN_CFG(TACH5, SCU404, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(VPI, SCU404, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(NDCD5, SCU404, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(P25, PIN_CFG(TACH6, SCU404, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(VPI, SCU404, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(NDSR5, SCU404, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(N25, PIN_CFG(TACH7, SCU404, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(VPI, SCU404, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(NRI5, SCU404, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(V23, PIN_CFG(TACH8, SCU408, GENMASK(2, 0), 1),
++ PIN_CFG(VPI, SCU408, GENMASK(2, 0), 3),
++ PIN_CFG(NDTR5, SCU408, GENMASK(2, 0), 4));
++FUNCFG_DESCL(W22, PIN_CFG(TACH9, SCU408, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(VPI, SCU408, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(NRTS5, SCU408, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(AB26, PIN_CFG(TACH10, SCU408, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SALT12, SCU408, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(VPI, SCU408, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(NCTS6, SCU408, GENMASK(10, 8), (4 << 8)));
++FUNCFG_DESCL(AD26, PIN_CFG(TACH11, SCU408, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SALT13, SCU408, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(VPI, SCU408, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(NDCD6, SCU408, GENMASK(14, 12), (4 << 12)));
++FUNCFG_DESCL(P26, PIN_CFG(TACH12, SCU408, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SALT14, SCU408, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(VPI, SCU408, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(NDSR6, SCU408, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(AE26, PIN_CFG(TACH13, SCU408, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SALT15, SCU408, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(VPI, SCU408, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(NRI6, SCU408, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(AF26, PIN_CFG(TACH14, SCU408, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LPC0, SCU408, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(VPI, SCU408, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(NDTR6, SCU408, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(AF25, PIN_CFG(TACH15, SCU408, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LPC0, SCU408, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(VPI, SCU408, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(NRTS6, SCU408, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(AE25, PIN_CFG(PWM0, SCU40C, GENMASK(2, 0), 1),
++ PIN_CFG(SIOPBON0, SCU40C, GENMASK(2, 0), 2),
++ PIN_CFG(VPI, SCU40C, GENMASK(2, 0), 3),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(2, 0), 4));
++FUNCFG_DESCL(AD25, PIN_CFG(PWM1, SCU40C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SIOPBIN0, SCU40C, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(VPI, SCU40C, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(AF23, PIN_CFG(PWM2, SCU40C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SIOSCIN0, SCU40C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(VPI, SCU40C, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(10, 8), (4 << 8)));
++FUNCFG_DESCL(AF20, PIN_CFG(PWM3, SCU40C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SIOS3N0, SCU40C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(VPI, SCU40C, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(14, 12), (4 << 12)));
++FUNCFG_DESCL(AF21, PIN_CFG(PWM4, SCU40C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SIOS5N0, SCU40C, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(VPI, SCU40C, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(AE21, PIN_CFG(PWM5, SCU40C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SIOPWREQN0, SCU40C, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(VPI, SCU40C, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(AE23, PIN_CFG(PWM6, SCU40C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(SIOONCTRLN0, SCU40C, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(AD22, PIN_CFG(PWM7, SCU40C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(AF17, PIN_CFG(NCTS0, SCU410, GENMASK(2, 0), 1),
++ PIN_CFG(SIOPBON1, SCU410, GENMASK(2, 0), 2));
++FUNCFG_DESCL(AA16, PIN_CFG(NDCD0, SCU410, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SIOPBIN1, SCU410, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(Y16, PIN_CFG(NDSR0, SCU410, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SIOSCIN1, SCU410, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(V17, PIN_CFG(NRI0, SCU410, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SIOS3N1, SCU410, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(J13, PIN_CFG(NDTR0, SCU410, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SIOS5N1, SCU410, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AB16, PIN_CFG(NRTS0, SCU410, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SIOPWREQN1, SCU410, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(AC16, PIN_CFG(TXD0, SCU410, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(AF16, PIN_CFG(RXD0, SCU410, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(AA15, PIN_CFG(NCTS1, SCU414, GENMASK(2, 0), 1),
++ PIN_CFG(SIOONCTRLN1, SCU414, GENMASK(2, 0), 2));
++FUNCFG_DESCL(AB15, PIN_CFG(NDCD1, SCU414, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SIOPWRGD1, SCU414, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(AC15, PIN_CFG(NDSR1, SCU414, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SALT2, SCU414, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(AD15, PIN_CFG(NRI1, SCU414, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SALT3, SCU414, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(Y15, PIN_CFG(NDTR1, SCU414, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(AA14, PIN_CFG(NRTS1, SCU414, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(W16, PIN_CFG(TXD1, SCU414, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(V16, PIN_CFG(RXD1, SCU414, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(AB18, PIN_CFG(TXD2, SCU418, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AC18, PIN_CFG(RXD2, SCU418, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2C12, SCU418, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(K13, PIN_CFG(TXD3, SCU418, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(WDTRST0N, SCU418, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(PWM8, SCU418, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(AA17, PIN_CFG(RXD3, SCU418, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(WDTRST1N, SCU418, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(PWM9, SCU418, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(I2C12, SCU418, GENMASK(14, 12), (4 << 12)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(AB17, PIN_CFG(TXD5, SCU418, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(WDTRST2N, SCU418, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(PWM10, SCU418, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(I2C13, SCU418, GENMASK(18, 16), (4 << 16)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(AD16, PIN_CFG(RXD5, SCU418, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(WDTRST3N, SCU418, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(PWM11, SCU418, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(I2C13, SCU418, GENMASK(22, 20), (4 << 20)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(AC17, PIN_CFG(TXD6, SCU418, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(SALT0, SCU418, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(PWM12, SCU418, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(I2C14, SCU418, GENMASK(26, 24), (4 << 24)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(AD17, PIN_CFG(RXD6, SCU418, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(SALT1, SCU418, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(PWM13, SCU418, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(I2C14, SCU418, GENMASK(30, 28), (4 << 28)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(30, 28), (5 << 28)));
++FUNCFG_DESCL(AE16, PIN_CFG(TXD7, SCU41C, GENMASK(2, 0), 1),
++ PIN_CFG(I2C15, SCU41C, GENMASK(2, 0), 2),
++ PIN_CFG(PWM14, SCU41C, GENMASK(2, 0), 3),
++ PIN_CFG(LPC1, SCU41C, GENMASK(2, 0), 4),
++ PIN_CFG(SPIM1, SCU41C, GENMASK(2, 0), 5));
++FUNCFG_DESCL(AE17, PIN_CFG(RXD7, SCU41C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2C15, SCU41C, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(PWM15, SCU41C, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(LPC1, SCU41C, GENMASK(6, 4), (4 << 4)),
++ PIN_CFG(SPIM1, SCU41C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(AB24, PIN_CFG(SGPM1, SCU41C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(WDTRST7N, SCU41C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(PESGWAKEN, SCU41C, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SMON1, SCU41C, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(W26, PIN_CFG(SGPM1, SCU41C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SMON1, SCU41C, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(HOLE0);
++FUNCFG_DESCL(HOLE1);
++FUNCFG_DESCL(HOLE2);
++FUNCFG_DESCL(HOLE3);
++FUNCFG_DESCL(W25, PIN_CFG(HVI3C12, SCU420, GENMASK(2, 0), 1),
++ PIN_CFG(I2C12, SCU420, GENMASK(2, 0), 2));
++FUNCFG_DESCL(Y23, PIN_CFG(HVI3C12, SCU420, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2C12, SCU420, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(Y24, PIN_CFG(HVI3C13, SCU420, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(I2C13, SCU420, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(W21, PIN_CFG(HVI3C13, SCU420, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(I2C13, SCU420, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(AA23, PIN_CFG(HVI3C14, SCU420, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(I2C14, SCU420, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AC22, PIN_CFG(HVI3C14, SCU420, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(I2C14, SCU420, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(AB22, PIN_CFG(HVI3C15, SCU420, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(I2C15, SCU420, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(Y21, PIN_CFG(HVI3C15, SCU420, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(I2C15, SCU420, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(AE20, PIN_CFG(I3C4, SCU424, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AF19, PIN_CFG(I3C4, SCU424, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(Y22, PIN_CFG(I3C5, SCU424, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(AA20, PIN_CFG(I3C5, SCU424, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(AA22, PIN_CFG(I3C6, SCU424, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(AB20, PIN_CFG(I3C6, SCU424, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(AF18, PIN_CFG(I3C7, SCU424, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(AE19, PIN_CFG(I3C7, SCU424, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(AD20, PIN_CFG(I3C8, SCU428, GENMASK(2, 0), 1),
++ PIN_CFG(FSI0, SCU428, GENMASK(2, 0), 2));
++FUNCFG_DESCL(AC20, PIN_CFG(I3C8, SCU428, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(FSI0, SCU428, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(AA21, PIN_CFG(I3C9, SCU428, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(FSI1, SCU428, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(AB21, PIN_CFG(I3C9, SCU428, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(FSI1, SCU428, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(AC19, PIN_CFG(I3C10, SCU428, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(FSI2, SCU428, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AE18, PIN_CFG(I3C10, SCU428, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(FSI2, SCU428, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(AD19, PIN_CFG(I3C11, SCU428, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(FSI3, SCU428, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(AD18, PIN_CFG(I3C11, SCU428, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(FSI3, SCU428, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(U25, PIN_CFG(HVI3C0, SCU42C, GENMASK(2, 0), 1),
++ PIN_CFG(DI2C8, SCU42C, GENMASK(2, 0), 2));
++FUNCFG_DESCL(U26, PIN_CFG(HVI3C0, SCU42C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(DI2C8, SCU42C, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(Y26, PIN_CFG(HVI3C1, SCU42C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(DI2C9, SCU42C, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(AA24, PIN_CFG(HVI3C1, SCU42C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(DI2C9, SCU42C, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(R25, PIN_CFG(HVI3C2, SCU42C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(DI2C10, SCU42C, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AA26, PIN_CFG(HVI3C2, SCU42C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(DI2C10, SCU42C, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(R26, PIN_CFG(HVI3C3, SCU42C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(DI2C11, SCU42C, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(Y25, PIN_CFG(HVI3C3, SCU42C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(DI2C11, SCU42C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(B16, PIN_CFG(ESPI0, SCU430, GENMASK(2, 0), 1),
++ PIN_CFG(LPC0, SCU430, GENMASK(2, 0), 2));
++FUNCFG_DESCL(D14, PIN_CFG(ESPI0, SCU430, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(LPC0, SCU430, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(B15, PIN_CFG(ESPI0, SCU430, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(LPC0, SCU430, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(B14, PIN_CFG(ESPI0, SCU430, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(LPC0, SCU430, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(C17, PIN_CFG(ESPI0, SCU430, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(LPC0, SCU430, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(OSCCLK, SCU430, GENMASK(18, 16), (3 << 16)));
++FUNCFG_DESCL(B13, PIN_CFG(ESPI0, SCU430, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(LPC0, SCU430, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(E14, PIN_CFG(ESPI0, SCU430, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LPC0, SCU430, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(C15, PIN_CFG(ESPI0, SCU430, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LPC0, SCU430, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(D24, PIN_CFG(SPI0, SCU434, GENMASK(2, 0), 1));
++FUNCFG_DESCL(B23, PIN_CFG(SPI0, SCU434, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(B22, PIN_CFG(SPI0, SCU434, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(C23, PIN_CFG(QSPI0, SCU434, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(B18, PIN_CFG(QSPI0, SCU434, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(B21, PIN_CFG(SPI0CS1, SCU434, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(M15, PIN_CFG(SPI0ABR, SCU434, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(TXD8, SCU434, GENMASK(26, 24), (3 << 24)));
++FUNCFG_DESCL(B19, PIN_CFG(SPI0WPN, SCU434, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(RXD8, SCU434, GENMASK(30, 28), (3 << 28)));
++FUNCFG_DESCL(B26, PIN_CFG(SPI1, SCU438, GENMASK(2, 0), 1),
++ PIN_CFG(TXD9, SCU438, GENMASK(2, 0), 3));
++FUNCFG_DESCL(A25, PIN_CFG(SPI1, SCU438, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(RXD9, SCU438, GENMASK(6, 4), (3 << 4)));
++FUNCFG_DESCL(A24, PIN_CFG(SPI1, SCU438, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(TXD10, SCU438, GENMASK(10, 8), (3 << 8)));
++FUNCFG_DESCL(B24, PIN_CFG(QSPI1, SCU438, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(RXD10, SCU438, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(E26, PIN_CFG(QSPI1, SCU438, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(TXD11, SCU438, GENMASK(18, 16), (3 << 16)));
++FUNCFG_DESCL(A21, PIN_CFG(SPI1CS1, SCU438, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(RXD11, SCU438, GENMASK(22, 20), (3 << 20)));
++FUNCFG_DESCL(A19, PIN_CFG(SPI1ABR, SCU438, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(THRU2, SCU438, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(A18, PIN_CFG(SPI1WPN, SCU438, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(THRU2, SCU438, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(D26, PIN_CFG(SPI2, SCU43C, GENMASK(2, 0), 1));
++FUNCFG_DESCL(C26, PIN_CFG(SPI2, SCU43C, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(A23, PIN_CFG(SPI2, SCU43C, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(A22, PIN_CFG(SPI2, SCU43C, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(B25, PIN_CFG(QSPI2, SCU43C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(THRU3, SCU43C, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(F26, PIN_CFG(QSPI2, SCU43C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(THRU3, SCU43C, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(A26, PIN_CFG(SPI2CS1, SCU43C, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(A14, PIN_CFG(FWSPIABR, SCU43C, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(E10, PIN_CFG(MDIO2, SCU440, GENMASK(2, 0), 1),
++ PIN_CFG(PE2SGRSTN, SCU440, GENMASK(2, 0), 2));
++FUNCFG_DESCL(E13, PIN_CFG(MDIO2, SCU440, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(D12, PIN_CFG(JTAGM1, SCU440, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(F10, PIN_CFG(JTAGM1, SCU440, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(E11, PIN_CFG(JTAGM1, SCU440, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(F11, PIN_CFG(JTAGM1, SCU440, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(F13, PIN_CFG(JTAGM1, SCU440, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(N15, PIN_CFG(FWSPIWPEN, SCU440, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(C20, PIN_CFG(RGMII0, SCU444, GENMASK(2, 0), 1),
++ PIN_CFG(RMII0R, SCU444, GENMASK(2, 0), 2));
++FUNCFG_DESCL(C19, PIN_CFG(RGMII0, SCU444, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(A8, PIN_CFG(RGMII0, SCU444, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(RMII0R, SCU444, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(R14, PIN_CFG(RGMII0, SCU444, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(RMII0R, SCU444, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(A7, PIN_CFG(RGMII0, SCU444, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(RMII0C, SCU444, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(P14, PIN_CFG(RGMII0, SCU444, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(RMII0, SCU444, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(D20, PIN_CFG(RGMII0, SCU444, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(RMII0, SCU444, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(A6, PIN_CFG(RGMII0, SCU444, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(RMII0, SCU444, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(B6, PIN_CFG(RGMII0, SCU448, GENMASK(2, 0), 1),
++ PIN_CFG(RMII0, SCU448, GENMASK(2, 0), 2));
++FUNCFG_DESCL(N14, PIN_CFG(RGMII0, SCU448, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(RMII0, SCU448, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(B7, PIN_CFG(RGMII0, SCU448, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(B8, PIN_CFG(RGMII0, SCU448, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(B9, PIN_CFG(MDIO0, SCU448, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(M14, PIN_CFG(MDIO0, SCU448, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(J11, PIN_CFG(VGA, SCU448, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(E7, PIN_CFG(VGA, SCU448, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(D19, PIN_CFG(RGMII1, SCU44C, GENMASK(2, 0), 1),
++ PIN_CFG(RMII1, SCU44C, GENMASK(2, 0), 2),
++ PIN_CFG(DSGPM1, SCU44C, GENMASK(2, 0), 4));
++FUNCFG_DESCL(B11, PIN_CFG(RGMII1, SCU44C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SGPS, SCU44C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(D15, PIN_CFG(RGMII1, SCU44C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(TXD3, SCU44C, GENMASK(10, 8), (4 << 8)));
++FUNCFG_DESCL(B12, PIN_CFG(RGMII1, SCU44C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(RXD3, SCU44C, GENMASK(14, 12), (4 << 12)));
++FUNCFG_DESCL(B10, PIN_CFG(RGMII1, SCU44C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(DSGPM1, SCU44C, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(P13, PIN_CFG(RGMII1, SCU44C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(C18, PIN_CFG(RGMII1, SCU44C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SGPS, SCU44C, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(C6, PIN_CFG(RGMII1, SCU44C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(C7, PIN_CFG(RGMII1, SCU450, GENMASK(2, 0), 1),
++ PIN_CFG(RMII1, SCU450, GENMASK(2, 0), 2),
++ PIN_CFG(DSGPM1, SCU450, GENMASK(2, 0), 4));
++FUNCFG_DESCL(D7, PIN_CFG(RGMII1, SCU450, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(RMII1, SCU450, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(DSGPM1, SCU450, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(N13, PIN_CFG(RGMII1, SCU450, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SGPS, SCU450, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(C8, PIN_CFG(RGMII1, SCU450, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SGPS, SCU450, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(C9, PIN_CFG(MDIO1, SCU450, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(C10, PIN_CFG(MDIO1, SCU450, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(M16, PIN_CFG(FWQSPI, SCU450, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(A15, PIN_CFG(FWQSPI, SCU450, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(G11, PIN_CFG(I2C0, SCU454, GENMASK(2, 0), 1),
++ PIN_CFG(LTPI_I2C0, SCU454, GENMASK(2, 0), 2));
++FUNCFG_DESCL(H7, PIN_CFG(I2C0, SCU454, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(LTPI_I2C0, SCU454, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(H8, PIN_CFG(I2C1, SCU454, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(LTPI_I2C1, SCU454, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(H9, PIN_CFG(I2C1, SCU454, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(LTPI_I2C1, SCU454, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(H10, PIN_CFG(I2C2, SCU454, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(LTPI_I2C2, SCU454, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(H11, PIN_CFG(I2C2, SCU454, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(LTPI_I2C2, SCU454, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(J9, PIN_CFG(I2C3, SCU454, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LTPI_I2C3, SCU454, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(J10, PIN_CFG(I2C3, SCU454, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LTPI_I2C3, SCU454, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(E9, PIN_CFG(I2C4, SCU458, GENMASK(2, 0), 1),
++ PIN_CFG(I2CF1, SCU458, GENMASK(2, 0), 5));
++FUNCFG_DESCL(F9, PIN_CFG(I2C4, SCU458, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2CF1, SCU458, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(F8, PIN_CFG(I2C5, SCU458, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(I2CF1, SCU458, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(M13, PIN_CFG(I2C5, SCU458, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(I2CF1, SCU458, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(F7, PIN_CFG(I2C6, SCU458, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(D8, PIN_CFG(I2C6, SCU458, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(E8, PIN_CFG(I2C7, SCU458, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(L12, PIN_CFG(I2C7, SCU458, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(30, 28), (5 << 28)));
++FUNCFG_DESCL(F12, PIN_CFG(I2C8, SCU45C, GENMASK(2, 0), 1),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(2, 0), 5));
++FUNCFG_DESCL(E12, PIN_CFG(I2C8, SCU45C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(J12, PIN_CFG(I2C9, SCU45C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(G7, PIN_CFG(I2C9, SCU45C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(CANBUS, SCU45C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(G8, PIN_CFG(I2C10, SCU45C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(CANBUS, SCU45C, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(G9, PIN_CFG(I2C10, SCU45C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(CANBUS, SCU45C, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(G10, PIN_CFG(I2C11, SCU45C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(USBUART, SCU45C, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(K12, PIN_CFG(I2C11, SCU45C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(USBUART, SCU45C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(W17, PIN_CFG(ADC0, SCU460, GENMASK(2, 0), 0),
++ PIN_CFG(GPIY0, SCU460, GENMASK(2, 0), 1),
++ PIN_CFG(SALT4, SCU460, GENMASK(2, 0), 2));
++FUNCFG_DESCL(V18, PIN_CFG(ADC1, SCU460, GENMASK(6, 4), 0),
++ PIN_CFG(GPIY1, SCU460, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SALT5, SCU460, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(W18, PIN_CFG(ADC2, SCU460, GENMASK(10, 8), 0),
++ PIN_CFG(GPIY2, SCU460, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SALT6, SCU460, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(Y17, PIN_CFG(ADC3, SCU460, GENMASK(14, 12), 0),
++ PIN_CFG(GPIY3, SCU460, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SALT7, SCU460, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(AA18, PIN_CFG(ADC4, SCU460, GENMASK(18, 16), 0),
++ PIN_CFG(GPIY4, SCU460, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SALT8, SCU460, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AA13, PIN_CFG(ADC5, SCU460, GENMASK(22, 20), 0),
++ PIN_CFG(GPIY5, SCU460, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SALT9, SCU460, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(Y18, PIN_CFG(ADC6, SCU460, GENMASK(26, 24), 0),
++ PIN_CFG(GPIY6, SCU460, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(SALT10, SCU460, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(AA12, PIN_CFG(ADC7, SCU460, GENMASK(30, 28), 0),
++ PIN_CFG(GPIY7, SCU460, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(SALT11, SCU460, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(W20, PIN_CFG(ADC8, SCU464, GENMASK(2, 0), 0),
++ PIN_CFG(GPIZ0, SCU464, GENMASK(2, 0), 1));
++FUNCFG_DESCL(V20, PIN_CFG(ADC9, SCU464, GENMASK(6, 4), 0),
++ PIN_CFG(GPIZ1, SCU464, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(Y11, PIN_CFG(ADC10, SCU464, GENMASK(10, 8), 0),
++ PIN_CFG(GPIZ2, SCU464, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(V14, PIN_CFG(ADC11, SCU464, GENMASK(14, 12), 0),
++ PIN_CFG(GPIZ3, SCU464, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(V19, PIN_CFG(ADC12, SCU464, GENMASK(18, 16), 0),
++ PIN_CFG(GPIZ4, SCU464, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(W14, PIN_CFG(ADC13, SCU464, GENMASK(22, 20), 0),
++ PIN_CFG(GPIZ5, SCU464, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(AUXPWRGOOD0, SCU464, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(Y20, PIN_CFG(ADC14, SCU464, GENMASK(26, 24), 0),
++ PIN_CFG(GPIZ6, SCU464, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(AUXPWRGOOD1, SCU464, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(AB19, PIN_CFG(ADC15, SCU464, GENMASK(30, 28), 0),
++ PIN_CFG(GPIZ7, SCU464, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(U21, PIN_CFG(SGPM0, SCU468, GENMASK(2, 0), 1),
++ PIN_CFG(SMON0, SCU468, GENMASK(2, 0), 2),
++ PIN_CFG(NCTS2, SCU468, GENMASK(2, 0), 3),
++ PIN_CFG(MACLINK0, SCU468, GENMASK(2, 0), 4));
++FUNCFG_DESCL(T24, PIN_CFG(SGPM0, SCU468, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SMON0, SCU468, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(NDCD2, SCU468, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(MACLINK2, SCU468, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(V24, PIN_CFG(SGPM0LD_R, SCU468, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(HBLED, SCU468, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(V22, PIN_CFG(SGPM0, SCU468, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SMON0, SCU468, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(NDSR2, SCU468, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(T23, PIN_CFG(SGPM0, SCU468, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SMON0, SCU468, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(NRI2, SCU468, GENMASK(18, 16), (3 << 16)));
++FUNCFG_DESCL(AC25, PIN_CFG(SGPM1, SCU468, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(WDTRST4N, SCU468, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(NDTR2, SCU468, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(SMON1, SCU468, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(AB25, PIN_CFG(SGPM1, SCU468, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(WDTRST5N, SCU468, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(NRTS2, SCU468, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(SMON1, SCU468, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(AC24, PIN_CFG(SGPM1LD_R, SCU468, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(WDTRST6N, SCU468, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(MACLINK1, SCU468, GENMASK(30, 28), (3 << 28)));
++FUNCFG_DESCL(SGMII0, PIN_CFG(SGMII, SCU47C, BIT(0), 1 << 0));
++FUNCFG_DESCL(PCIERC2_PERST, PIN_CFG(PE2SGRSTN, SCU908, BIT(1), 1 << 1));
++FUNCFG_DESCL(PORTC_MODE, PIN_CFG(USB2CUD, SCU3B0, GENMASK(1, 0), 0),
++ PIN_CFG(USB2CD, SCU3B0, GENMASK(1, 0), 1 << 0),
++ PIN_CFG(USB2CH, SCU3B0, GENMASK(1, 0), 2 << 0),
++ PIN_CFG(USB2CU, SCU3B0, GENMASK(1, 0), 3 << 0));
++FUNCFG_DESCL(PORTD_MODE, PIN_CFG(USB2DD, SCU3B0, GENMASK(3, 2), 1 << 2),
++ PIN_CFG(USB2DH, SCU3B0, GENMASK(3, 2), 2 << 2));
++
++static const struct aspeed_g7_pincfg pin_cfg[] = {
++ PINCFG_PIN(C16), PINCFG_PIN(C14), PINCFG_PIN(C11),
++ PINCFG_PIN(D9), PINCFG_PIN(F14), PINCFG_PIN(D10),
++ PINCFG_PIN(C12), PINCFG_PIN(C13), PINCFG_PIN(AC26),
++ PINCFG_PIN(AA25), PINCFG_PIN(AB23), PINCFG_PIN(U22),
++ PINCFG_PIN(V21), PINCFG_PIN(N26), PINCFG_PIN(P25),
++ PINCFG_PIN(N25), PINCFG_PIN(V23), PINCFG_PIN(W22),
++ PINCFG_PIN(AB26), PINCFG_PIN(AD26), PINCFG_PIN(P26),
++ PINCFG_PIN(AE26), PINCFG_PIN(AF26), PINCFG_PIN(AF25),
++ PINCFG_PIN(AE25), PINCFG_PIN(AD25), PINCFG_PIN(AF23),
++ PINCFG_PIN(AF20), PINCFG_PIN(AF21), PINCFG_PIN(AE21),
++ PINCFG_PIN(AE23), PINCFG_PIN(AD22), PINCFG_PIN(AF17),
++ PINCFG_PIN(AA16), PINCFG_PIN(Y16), PINCFG_PIN(V17),
++ PINCFG_PIN(J13), PINCFG_PIN(AB16), PINCFG_PIN(AC16),
++ PINCFG_PIN(AF16), PINCFG_PIN(AA15), PINCFG_PIN(AB15),
++ PINCFG_PIN(AC15), PINCFG_PIN(AD15), PINCFG_PIN(Y15),
++ PINCFG_PIN(AA14), PINCFG_PIN(W16), PINCFG_PIN(V16),
++ PINCFG_PIN(AB18), PINCFG_PIN(AC18), PINCFG_PIN(K13),
++ PINCFG_PIN(AA17), PINCFG_PIN(AB17), PINCFG_PIN(AD16),
++ PINCFG_PIN(AC17), PINCFG_PIN(AD17), PINCFG_PIN(AE16),
++ PINCFG_PIN(AE17), PINCFG_PIN(AB24), PINCFG_PIN(W26),
++ PINCFG_PIN(HOLE0), PINCFG_PIN(HOLE1), PINCFG_PIN(HOLE2),
++ PINCFG_PIN(HOLE3), PINCFG_PIN(W25), PINCFG_PIN(Y23),
++ PINCFG_PIN(Y24), PINCFG_PIN(W21), PINCFG_PIN(AA23),
++ PINCFG_PIN(AC22), PINCFG_PIN(AB22), PINCFG_PIN(Y21),
++ PINCFG_PIN(AE20), PINCFG_PIN(AF19), PINCFG_PIN(Y22),
++ PINCFG_PIN(AA20), PINCFG_PIN(AA22), PINCFG_PIN(AB20),
++ PINCFG_PIN(AF18), PINCFG_PIN(AE19), PINCFG_PIN(AD20),
++ PINCFG_PIN(AC20), PINCFG_PIN(AA21), PINCFG_PIN(AB21),
++ PINCFG_PIN(AC19), PINCFG_PIN(AE18), PINCFG_PIN(AD19),
++ PINCFG_PIN(AD18), PINCFG_PIN(U25), PINCFG_PIN(U26),
++ PINCFG_PIN(Y26), PINCFG_PIN(AA24), PINCFG_PIN(R25),
++ PINCFG_PIN(AA26), PINCFG_PIN(R26), PINCFG_PIN(Y25),
++ PINCFG_PIN(B16), PINCFG_PIN(D14), PINCFG_PIN(B15),
++ PINCFG_PIN(B14), PINCFG_PIN(C17), PINCFG_PIN(B13),
++ PINCFG_PIN(E14), PINCFG_PIN(C15), PINCFG_PIN(D24),
++ PINCFG_PIN(B23), PINCFG_PIN(B22), PINCFG_PIN(C23),
++ PINCFG_PIN(B18), PINCFG_PIN(B21), PINCFG_PIN(M15),
++ PINCFG_PIN(B19), PINCFG_PIN(B26), PINCFG_PIN(A25),
++ PINCFG_PIN(A24), PINCFG_PIN(B24), PINCFG_PIN(E26),
++ PINCFG_PIN(A21), PINCFG_PIN(A19), PINCFG_PIN(A18),
++ PINCFG_PIN(D26), PINCFG_PIN(C26), PINCFG_PIN(A23),
++ PINCFG_PIN(A22), PINCFG_PIN(B25), PINCFG_PIN(F26),
++ PINCFG_PIN(A26), PINCFG_PIN(A14), PINCFG_PIN(E10),
++ PINCFG_PIN(E13), PINCFG_PIN(D12), PINCFG_PIN(F10),
++ PINCFG_PIN(E11), PINCFG_PIN(F11), PINCFG_PIN(F13),
++ PINCFG_PIN(N15), PINCFG_PIN(C20), PINCFG_PIN(C19),
++ PINCFG_PIN(A8), PINCFG_PIN(R14), PINCFG_PIN(A7),
++ PINCFG_PIN(P14), PINCFG_PIN(D20), PINCFG_PIN(A6),
++ PINCFG_PIN(B6), PINCFG_PIN(N14), PINCFG_PIN(B7),
++ PINCFG_PIN(B8), PINCFG_PIN(B9), PINCFG_PIN(M14),
++ PINCFG_PIN(J11), PINCFG_PIN(E7), PINCFG_PIN(D19),
++ PINCFG_PIN(B11), PINCFG_PIN(D15), PINCFG_PIN(B12),
++ PINCFG_PIN(B10), PINCFG_PIN(P13), PINCFG_PIN(C18),
++ PINCFG_PIN(C6), PINCFG_PIN(C7), PINCFG_PIN(D7),
++ PINCFG_PIN(N13), PINCFG_PIN(C8), PINCFG_PIN(C9),
++ PINCFG_PIN(C10), PINCFG_PIN(M16), PINCFG_PIN(A15),
++ PINCFG_PIN(G11), PINCFG_PIN(H7), PINCFG_PIN(H8),
++ PINCFG_PIN(H9), PINCFG_PIN(H10), PINCFG_PIN(H11),
++ PINCFG_PIN(J9), PINCFG_PIN(J10), PINCFG_PIN(E9),
++ PINCFG_PIN(F9), PINCFG_PIN(F8), PINCFG_PIN(M13),
++ PINCFG_PIN(F7), PINCFG_PIN(D8), PINCFG_PIN(E8),
++ PINCFG_PIN(L12), PINCFG_PIN(F12), PINCFG_PIN(E12),
++ PINCFG_PIN(J12), PINCFG_PIN(G7), PINCFG_PIN(G8),
++ PINCFG_PIN(G9), PINCFG_PIN(G10), PINCFG_PIN(K12),
++ PINCFG_PIN(W17), PINCFG_PIN(V18), PINCFG_PIN(W18),
++ PINCFG_PIN(Y17), PINCFG_PIN(AA18), PINCFG_PIN(AA13),
++ PINCFG_PIN(Y18), PINCFG_PIN(AA12), PINCFG_PIN(W20),
++ PINCFG_PIN(V20), PINCFG_PIN(Y11), PINCFG_PIN(V14),
++ PINCFG_PIN(V19), PINCFG_PIN(W14), PINCFG_PIN(Y20),
++ PINCFG_PIN(AB19), PINCFG_PIN(U21), PINCFG_PIN(T24),
++ PINCFG_PIN(V24), PINCFG_PIN(V22), PINCFG_PIN(T23),
++ PINCFG_PIN(AC25), PINCFG_PIN(AB25), PINCFG_PIN(AC24),
++ PINCFG_PIN(SGMII0), PINCFG_PIN(PCIERC2_PERST),
++ PINCFG_PIN(PORTC_MODE), PINCFG_PIN(PORTD_MODE),
++};
++
++static int aspeed_g7_soc1_dt_node_to_map(struct pinctrl_dev *pctldev,
++ struct device_node *np_config,
++ struct pinctrl_map **map, u32 *num_maps)
++{
++ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
++ PIN_MAP_TYPE_INVALID);
++}
++
++static void aspeed_g7_soc1_dt_free_map(struct pinctrl_dev *pctldev,
++ struct pinctrl_map *map, u32 num_maps)
++{
++ kfree(map);
++}
++
++static const struct pinctrl_ops aspeed_g7_soc1_pinctrl_ops = {
++ .get_groups_count = aspeed_pinctrl_get_groups_count,
++ .get_group_name = aspeed_pinctrl_get_group_name,
++ .get_group_pins = aspeed_pinctrl_get_group_pins,
++ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
++ .dt_node_to_map = aspeed_g7_soc1_dt_node_to_map,
++ .dt_free_map = aspeed_g7_soc1_dt_free_map,
++};
++
++static const struct pinmux_ops aspeed_g7_soc1_pinmux_ops = {
++ .get_functions_count = aspeed_pinmux_get_fn_count,
++ .get_function_name = aspeed_pinmux_get_fn_name,
++ .get_function_groups = aspeed_pinmux_get_fn_groups,
++ .set_mux = aspeed_g7_pinmux_set_mux,
++ .gpio_request_enable = aspeed_g7_gpio_request_enable,
++ .strict = true,
++};
++
++static const struct pinconf_ops aspeed_g7_soc1_pinconf_ops = {
++ .is_generic = true,
++ .pin_config_get = aspeed_pin_config_get,
++ .pin_config_set = aspeed_pin_config_set,
++ .pin_config_group_get = aspeed_pin_config_group_get,
++ .pin_config_group_set = aspeed_pin_config_group_set,
++};
++
++/* pinctrl_desc */
++static struct pinctrl_desc aspeed_g7_soc1_pinctrl_desc = {
++ .name = "aspeed-g7-soc1-pinctrl",
++ .pins = aspeed_g7_soc1_pins,
++ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins),
++ .pctlops = &aspeed_g7_soc1_pinctrl_ops,
++ .pmxops = &aspeed_g7_soc1_pinmux_ops,
++ .confops = &aspeed_g7_soc1_pinconf_ops,
++ .owner = THIS_MODULE,
++};
++
++static struct aspeed_pin_config aspeed_g7_configs[] = {
++ /* GPIOA */
++ { PIN_CONFIG_DRIVE_STRENGTH, { C16, C16 }, SCU4C0, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C14, C14 }, SCU4C0, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C11, C11 }, SCU4C0, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D9, D9 }, SCU4C0, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F14, F14 }, SCU4C0, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D10, D10 }, SCU4C0, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C12, C12 }, SCU4C0, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C13, C13 }, SCU4C0, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { C16, C13 }, SCU4A0, BIT_MASK(4) },
++ /* GPIOI */
++ { PIN_CONFIG_DRIVE_STRENGTH, { W25, W25 }, SCU4C0, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y23, Y23 }, SCU4C0, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y24, Y24 }, SCU4C0, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { W21, W21 }, SCU4C0, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA23, AA23 }, SCU4C0, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AC22, AC22 }, SCU4C0, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AB22, AB22 }, SCU4C0, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y21, Y21 }, SCU4C0, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { W25, Y21 }, SCU4A0, BIT_MASK(12) },
++ /* GPIOJ */
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE20, AE20 }, SCU4C4, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF19, AF19 }, SCU4C4, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y22, Y22 }, SCU4C4, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA20, AA20 }, SCU4C4, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA22, AA22 }, SCU4C4, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AB20, AB20 }, SCU4C4, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF18, AF18 }, SCU4C4, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE19, AE19 }, SCU4C4, GENMASK(15, 14) },
++ /* GPIOK */
++ { PIN_CONFIG_DRIVE_STRENGTH, { AD20, AD20 }, SCU4C4, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AC20, AC20 }, SCU4C4, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA21, AA21 }, SCU4C4, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AB21, AB21 }, SCU4C4, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AC19, AC19 }, SCU4C4, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE18, AE18 }, SCU4C4, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AD19, AD19 }, SCU4C4, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AD18, AD18 }, SCU4C4, GENMASK(31, 30) },
++ /* GPIOL */
++ { PIN_CONFIG_DRIVE_STRENGTH, { U25, U25 }, SCU4C8, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { U26, U26 }, SCU4C8, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y26, Y26 }, SCU4C8, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA24, AA24 }, SCU4C8, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { R25, R25 }, SCU4C8, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA26, AA26 }, SCU4C8, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { R26, R26 }, SCU4C8, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y25, Y25 }, SCU4C8, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { U25, Y25 }, SCU4A0, BIT_MASK(15) },
++ /* GPIOM */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B16, B16 }, SCU4C8, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D14, D14 }, SCU4C8, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B15, B15 }, SCU4C8, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B14, B14 }, SCU4C8, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C17, C17 }, SCU4C8, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B13, B13 }, SCU4C8, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E14, E14 }, SCU4C8, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C15, C15 }, SCU4C8, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { B16, C15 }, SCU4A0, BIT_MASK(16) },
++ /* GPION */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D24, D24 }, SCU4CC, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B23, B23 }, SCU4CC, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B22, B22 }, SCU4CC, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C23, C23 }, SCU4CC, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B18, B18 }, SCU4CC, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B21, B21 }, SCU4CC, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M15, M15 }, SCU4CC, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B19, B19 }, SCU4CC, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { D24, B19 }, SCU4A0, BIT_MASK(17) },
++ /* GPIOO */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B26, B26 }, SCU4CC, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A25, A25 }, SCU4CC, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A24, A24 }, SCU4CC, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B24, B24 }, SCU4CC, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E26, E26 }, SCU4CC, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A21, A21 }, SCU4CC, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A19, A19 }, SCU4CC, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A18, A18 }, SCU4CC, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { B26, A18 }, SCU4A0, BIT_MASK(18) },
++ /* GPIOP */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D26, D26 }, SCU4D0, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C26, C26 }, SCU4D0, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A23, A23 }, SCU4D0, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A22, A22 }, SCU4D0, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B25, B25 }, SCU4D0, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F26, F26 }, SCU4D0, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A26, A26 }, SCU4D0, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A14, A14 }, SCU4D0, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { D26, A14 }, SCU4A0, BIT_MASK(19) },
++ /* GPIOQ */
++ { PIN_CONFIG_DRIVE_STRENGTH, { E10, E10 }, SCU4D0, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E13, E13 }, SCU4D0, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D12, D12 }, SCU4D0, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F10, F10 }, SCU4D0, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E11, E11 }, SCU4D0, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F11, F11 }, SCU4D0, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F13, F13 }, SCU4D0, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { N15, N15 }, SCU4D0, GENMASK(31, 30) },
++ /* GPIOR */
++ { PIN_CONFIG_DRIVE_STRENGTH, { C20, C20 }, SCU4D4, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C19, C19 }, SCU4D4, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A8, A8 }, SCU4D4, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { R14, R14 }, SCU4D4, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A7, A7 }, SCU4D4, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { P14, P14 }, SCU4D4, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D20, D20 }, SCU4D4, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A6, A6 }, SCU4D4, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { C20, A6 }, SCU4A0, BIT_MASK(21) },
++ /* GPIOS */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B6, B6 }, SCU4D4, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { N14, N14 }, SCU4D4, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B7, B7 }, SCU4D4, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B8, B8 }, SCU4D4, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B9, B9 }, SCU4D4, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M14, M14 }, SCU4D4, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { J11, J11 }, SCU4D4, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E7, E7 }, SCU4D4, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { B6, E7 }, SCU4A0, BIT_MASK(22) },
++ /* GPIOT */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D19, D19 }, SCU4D8, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B11, B11 }, SCU4D8, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D15, D15 }, SCU4D8, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B12, B12 }, SCU4D8, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B10, B10 }, SCU4D8, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { P13, P13 }, SCU4D8, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C18, C18 }, SCU4D8, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C6, C6 }, SCU4D8, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { D19, C6 }, SCU4A0, BIT_MASK(23) },
++ /* GPIOU */
++ { PIN_CONFIG_DRIVE_STRENGTH, { C7, C7 }, SCU4D8, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D7, D7 }, SCU4D8, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { N13, N13 }, SCU4D8, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C8, C8 }, SCU4D8, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C9, C9 }, SCU4D8, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C10, C10 }, SCU4D8, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M16, M16 }, SCU4D8, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A15, A15 }, SCU4D8, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { C7, A15 }, SCU4A0, BIT_MASK(24) },
++ /* GPIOW */
++ { PIN_CONFIG_DRIVE_STRENGTH, { E9, E9 }, SCU4DC, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F9, F9 }, SCU4DC, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F8, F8 }, SCU4DC, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M13, M13 }, SCU4DC, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F7, F7 }, SCU4DC, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D8, D8 }, SCU4DC, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E8, E8 }, SCU4DC, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { L12, L12 }, SCU4DC, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { E9, L12 }, SCU4A0, BIT_MASK(26) },
++};
++
++static const struct aspeed_pin_config_map aspeed_g7_pin_config_map[] = {
++ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 0, 0, GENMASK(1, 0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 1, 1, GENMASK(1, 0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 2, 2, GENMASK(1, 0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 3, 3, GENMASK(1, 0)},
++ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)},
++ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)},
++};
++
++static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = {
++ .pins = aspeed_g7_soc1_pins,
++ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins),
++ .pinmux = {
++ .groups = aspeed_g7_soc1_pingroups,
++ .ngroups = ARRAY_SIZE(aspeed_g7_soc1_pingroups),
++ .functions = aspeed_g7_soc1_funcs,
++ .nfunctions = ARRAY_SIZE(aspeed_g7_soc1_funcs),
++ .configs_g7 = pin_cfg,
++ .nconfigs_g7 = ARRAY_SIZE(pin_cfg),
++ },
++ .configs = aspeed_g7_configs,
++ .nconfigs = ARRAY_SIZE(aspeed_g7_configs),
++ .confmaps = aspeed_g7_pin_config_map,
++ .nconfmaps = ARRAY_SIZE(aspeed_g7_pin_config_map),
++};
++
++static int aspeed_g7_soc1_pinctrl_probe(struct platform_device *pdev)
++{
++ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc1_pinctrl_desc,
++ &aspeed_g7_pinctrl_data);
++}
++
++static const struct of_device_id aspeed_g7_soc1_pinctrl_match[] = {
++ { .compatible = "aspeed,ast2700-soc1-pinctrl" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, aspeed_g7_soc1_pinctrl_match);
++
++static struct platform_driver aspeed_g7_soc1_pinctrl_driver = {
++ .probe = aspeed_g7_soc1_pinctrl_probe,
++ .driver = {
++ .name = "aspeed-g7-soc1-pinctrl",
++ .of_match_table = aspeed_g7_soc1_pinctrl_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++static int __init aspeed_g7_soc1_pinctrl_register(void)
++{
++ return platform_driver_register(&aspeed_g7_soc1_pinctrl_driver);
++}
++arch_initcall(aspeed_g7_soc1_pinctrl_register);
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7a0-soc1.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7a0-soc1.c
+new file mode 100644
+index 000000000..7a50a54a4
+--- /dev/null
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7a0-soc1.c
+@@ -0,0 +1,2304 @@
++// SPDX-License-Identifier: GPL-2.0
++
++#include <linux/bits.h>
++#include <linux/device.h>
++#include <linux/gpio/driver.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++#include <linux/mfd/syscon.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/pinctrl/machine.h>
++#include <linux/pinctrl/pinconf.h>
++#include <linux/pinctrl/pinconf-generic.h>
++#include <linux/pinctrl/pinctrl.h>
++#include <linux/pinctrl/pinmux.h>
++#include <linux/platform_device.h>
++#include <linux/property.h>
++#include "pinctrl-aspeed.h"
++
++#define SCU3B0 0x3B0 /* USB Controller Register */
++#define SCU3B4 0x3B4 /* USB Controller Lock Register */
++#define SCU3B8 0x3B8 /* USB Controller Secure Register #1 */
++#define SCU3BC 0x3BC /* USB Controller Secure Register #2 */
++#define SCU3C0 0x3C0 /* USB Controller Secure Register #3 */
++#define SCU400 0x400 /* Multi-function Pin Control #1 */
++#define SCU404 0x404 /* Multi-function Pin Control #2 */
++#define SCU408 0x408 /* Multi-function Pin Control #3 */
++#define SCU40C 0x40C /* Multi-function Pin Control #4 */
++#define SCU410 0x410 /* Multi-function Pin Control #5 */
++#define SCU414 0x414 /* Multi-function Pin Control #6 */
++#define SCU418 0x418 /* Multi-function Pin Control #7 */
++#define SCU41C 0x41C /* Multi-function Pin Control #8 */
++#define SCU420 0x420 /* Multi-function Pin Control #9 */
++#define SCU424 0x424 /* Multi-function Pin Control #10 */
++#define SCU428 0x428 /* Multi-function Pin Control #11 */
++#define SCU42C 0x42C /* Multi-function Pin Control #12 */
++#define SCU430 0x430 /* Multi-function Pin Control #13 */
++#define SCU434 0x434 /* Multi-function Pin Control #14 */
++#define SCU438 0x438 /* Multi-function Pin Control #15 */
++#define SCU43C 0x43C /* Multi-function Pin Control #16 */
++#define SCU440 0x440 /* Multi-function Pin Control #17 */
++#define SCU444 0x444 /* Multi-function Pin Control #18 */
++#define SCU448 0x448 /* Multi-function Pin Control #19 */
++#define SCU44C 0x44C /* Multi-function Pin Control #20 */
++#define SCU450 0x450 /* Multi-function Pin Control #21 */
++#define SCU454 0x454 /* Multi-function Pin Control #22 */
++#define SCU458 0x458 /* Multi-function Pin Control #23 */
++#define SCU45C 0x45C /* Multi-function Pin Control #24 */
++#define SCU460 0x460 /* Multi-function Pin Control #25 */
++#define SCU464 0x464 /* Multi-function Pin Control #26 */
++#define SCU468 0x468 /* Multi-function Pin Control #27 */
++#define SCU46C 0x46C /* Multi-function Pin Control #28 */
++#define SCU470 0x470 /* Multi-function Pin Control #29 */
++#define SCU474 0x474 /* Multi-function Pin Control #30 */
++#define SCU478 0x478 /* Multi-function Pin Control #31 */
++#define SCU47C 0x47C
++#define SCU4A0 0x4A0 /* Voltage Selection */
++#define SCU4C0 0x4C0 /* Driving Strength #0 A-I */
++#define SCU4C4 0x4C4 /* Driving Strength #1 J-K */
++#define SCU4C8 0x4C8 /* Driving Strength #2 L-M */
++#define SCU4CC 0x4CC /* Driving Strength #3 N-O */
++#define SCU4D0 0x4D0 /* Driving Strength #4 P-Q */
++#define SCU4D4 0x4D4 /* Driving Strength #5 R-S */
++#define SCU4D8 0x4D8 /* Driving Strength #6 T-U */
++#define SCU4DC 0x4DC /* Driving Strength #7 W */
++
++#define SCU908 0x908 /* PCIe RC PERST Pin Control */
++
++enum {
++ C16,
++ C14,
++ C11,
++ D9,
++ F14,
++ D10,
++ C12,
++ C13,
++ AA24,
++ AB24,
++ AB23,
++ AC22,
++ AD22,
++ AE21,
++ AF20,
++ AE20,
++ AD20,
++ Y23,
++ W23,
++ AD19,
++ AC20,
++ AA23,
++ AB22,
++ AB21,
++ AA22,
++ Y22,
++ W22,
++ AF18,
++ AE18,
++ AD18,
++ AC19,
++ AB20,
++ AF17,
++ AA16,
++ Y16,
++ V17,
++ J13,
++ AB16,
++ AC16,
++ AF16,
++ AA15,
++ AB15,
++ AC15,
++ AD15,
++ Y15,
++ AA14,
++ W16,
++ V16,
++ AB18,
++ AC18,
++ K13,
++ AA17,
++ AB17,
++ AD16,
++ AC17,
++ AD17,
++ AE16,
++ AE17,
++ U23,
++ T24,
++ HOLE0,
++ HOLE1,
++ HOLE2,
++ HOLE3,
++ AC24,
++ AD24,
++ AE23,
++ AE19,
++ AF23,
++ Y25,
++ AA25,
++ AF19,
++ AB25,
++ AC25,
++ AD25,
++ V22,
++ AE25,
++ V21,
++ AF21,
++ AF25,
++ AF26,
++ AE26,
++ W21,
++ AD26,
++ Y21,
++ AC26,
++ AA26,
++ AB26,
++ T26,
++ AA20,
++ V23,
++ W24,
++ R26,
++ AA21,
++ P26,
++ Y24,
++ B16,
++ D14,
++ B15,
++ B14,
++ C17,
++ B13,
++ E14,
++ C15,
++ D24,
++ B23,
++ B22,
++ C23,
++ B18,
++ B21,
++ B17,
++ B19,
++ B26,
++ A25,
++ A24,
++ B24,
++ E26,
++ A21,
++ A19,
++ A18,
++ D26,
++ C26,
++ A23,
++ B25,
++ A22,
++ F26,
++ A26,
++ A14,
++ E10,
++ E13,
++ D12,
++ F10,
++ E11,
++ F11,
++ F13,
++ N15,
++ C20,
++ C19,
++ A8,
++ R14,
++ A7,
++ P14,
++ D20,
++ A6,
++ B6,
++ N14,
++ B7,
++ B8,
++ B9,
++ M14,
++ J11,
++ E7,
++ D19,
++ B11,
++ D15,
++ B12,
++ B10,
++ P13,
++ C18,
++ C6,
++ C7,
++ D7,
++ N13,
++ C8,
++ C9,
++ C10,
++ M16,
++ A15,
++ G11,
++ H7,
++ H8,
++ H9,
++ H10,
++ H11,
++ J9,
++ J10,
++ E9,
++ F9,
++ F8,
++ M13,
++ F7,
++ D8,
++ E8,
++ L12,
++ F12,
++ E12,
++ J12,
++ G7,
++ G8,
++ G9,
++ G10,
++ K12,
++ W17,
++ V18,
++ W18,
++ Y17,
++ AA18,
++ AA13,
++ Y18,
++ AA12,
++ W20,
++ V20,
++ Y11,
++ V14,
++ V19,
++ W14,
++ Y20,
++ AB19,
++ U26,
++ U25,
++ V26,
++ W26,
++ Y26,
++ W25,
++ V24,
++ U24,
++ SGMII0,
++ PCIERC2_PERST,
++ PORTC_MODE, // SCU3B0[1:0]
++ PORTD_MODE, // SCU3B0[3:2]
++};
++
++GROUP_DECL(ESPI0, B16, D14, B15, B14, C17, B13, E14, C15);
++GROUP_DECL(ESPI1, C16, C14, C11, D9, F14, D10, C12, C13);
++GROUP_DECL(LPC0, AB22, AB21, B16, D14, B15, B14, C17, B13, E14, C15);
++GROUP_DECL(LPC1, C16, C14, C11, D9, F14, D10, C12, C13);
++GROUP_DECL(SD, C16, C14, C11, D9, F14, D10, C12, C13);
++GROUP_DECL(VPI, C16, C14, C11, D9, F14, D10, C12, C13, AA24, AB24, AB23, AC22,
++ AD22, AE21, AF20, AE20, AD20, Y23, W23, AD19, AC20, AA23, AB22, AB21,
++ AA22, Y22, W22, AF18, AE18, AD18);
++GROUP_DECL(OSCCLK, C17);
++GROUP_DECL(TACH0, AA24);
++GROUP_DECL(TACH1, AB24);
++GROUP_DECL(TACH2, AB23);
++GROUP_DECL(TACH3, AC22);
++GROUP_DECL(THRU0, AA24, AB24);
++GROUP_DECL(THRU1, AB23, AC22);
++GROUP_DECL(TACH4, AD22);
++GROUP_DECL(TACH5, AE21);
++GROUP_DECL(TACH6, AF20);
++GROUP_DECL(TACH7, AE20);
++GROUP_DECL(NTCS5, AD22);
++GROUP_DECL(NDCD5, AE21);
++GROUP_DECL(NDSR5, AF20);
++GROUP_DECL(NRI5, AE20);
++GROUP_DECL(SALT12, W23);
++GROUP_DECL(SALT13, AD19);
++GROUP_DECL(SALT14, AC20);
++GROUP_DECL(SALT15, AA23);
++GROUP_DECL(NDTR5, AD20);
++GROUP_DECL(NRTS5, Y23);
++GROUP_DECL(NCTS6, W23);
++GROUP_DECL(NDCD6, AD19);
++GROUP_DECL(NDSR6, AC20);
++GROUP_DECL(NRI6, AA23);
++GROUP_DECL(NDTR6, AB22);
++GROUP_DECL(NRTS6, AB21);
++GROUP_DECL(TACH8, AD20);
++GROUP_DECL(TACH9, Y23);
++GROUP_DECL(TACH10, W23);
++GROUP_DECL(TACH11, AD19);
++GROUP_DECL(TACH12, AC20);
++GROUP_DECL(TACH13, AA23);
++GROUP_DECL(TACH14, AB22);
++GROUP_DECL(TACH15, AB21);
++GROUP_DECL(SPIM0, AB21, AA22, Y22, W22, AF18, AE18, AD18, AC19);
++GROUP_DECL(PWM0, AA22);
++GROUP_DECL(PWM1, Y22);
++GROUP_DECL(PWM2, W22);
++GROUP_DECL(PWM3, AF18);
++GROUP_DECL(PWM4, AE18);
++GROUP_DECL(PWM5, AD18);
++GROUP_DECL(PWM6, AC19);
++GROUP_DECL(PWM7, AB20);
++GROUP_DECL(SIOPBON0, AA22);
++GROUP_DECL(SIOPBIN0, Y22);
++GROUP_DECL(SIOSCIN0, W22);
++GROUP_DECL(SIOS3N0, AF18);
++GROUP_DECL(SIOS5N0, AE18);
++GROUP_DECL(SIOPWREQN0, AD18);
++GROUP_DECL(SIOONCTRLN0, AC19);
++GROUP_DECL(SIOPWRGD0, AB20);
++GROUP_DECL(NCTS0, AF17);
++GROUP_DECL(NDCD0, AA16);
++GROUP_DECL(NDSR0, Y16);
++GROUP_DECL(NRI0, V17);
++GROUP_DECL(NDTR0, J13);
++GROUP_DECL(NRTS0, AB16);
++GROUP_DECL(TXD0, AC16);
++GROUP_DECL(RXD0, AF16);
++GROUP_DECL(NCTS1, AA15);
++GROUP_DECL(NDCD1, AB15);
++GROUP_DECL(NDSR1, AC15);
++GROUP_DECL(NRI1, AD15);
++GROUP_DECL(NDTR1, Y15);
++GROUP_DECL(NRTS1, AA14);
++GROUP_DECL(TXD1, W16);
++GROUP_DECL(RXD1, V16);
++GROUP_DECL(TXD2, AB18);
++GROUP_DECL(RXD2, AC18);
++GROUP_DECL(TXD3, K13);
++GROUP_DECL(RXD3, AA17);
++GROUP_DECL(NCTS5, AD22);
++GROUP_DECL(TXD5, AB17);
++GROUP_DECL(RXD5, AD16);
++GROUP_DECL(TXD6, AC17);
++GROUP_DECL(RXD6, AD17);
++GROUP_DECL(TXD7, AE16);
++GROUP_DECL(RXD7, AE17);
++GROUP_DECL(TXD8, B17);
++GROUP_DECL(RXD8, B19);
++GROUP_DECL(TXD9, B26);
++GROUP_DECL(RXD9, A25);
++GROUP_DECL(TXD10, A24);
++GROUP_DECL(RXD10, B24);
++GROUP_DECL(TXD11, E26);
++GROUP_DECL(RXD11, A21);
++GROUP_DECL(SPIM1, K13, AA17, AB17, AD16, AC17, AD17, AE16, AE17);
++GROUP_DECL(WDTRST0N, K13);
++GROUP_DECL(WDTRST1N, AA17);
++GROUP_DECL(WDTRST2N, AB17);
++GROUP_DECL(WDTRST3N, AD16);
++GROUP_DECL(WDTRST4N, W25);
++GROUP_DECL(WDTRST5N, V24);
++GROUP_DECL(WDTRST6N, U24);
++GROUP_DECL(WDTRST7N, U23);
++GROUP_DECL(PWM8, K13);
++GROUP_DECL(PWM9, AA17);
++GROUP_DECL(PWM10, AB17);
++GROUP_DECL(PWM11, AD16);
++GROUP_DECL(PWM12, AC17);
++GROUP_DECL(PWM13, AD17);
++GROUP_DECL(PWM14, AE16);
++GROUP_DECL(PWM15, AE17);
++GROUP_DECL(SALT0, AC17);
++GROUP_DECL(SALT1, AD17);
++GROUP_DECL(SALT2, AE16);
++GROUP_DECL(SALT3, AE17);
++GROUP_DECL(FSI0, T26, AA20);
++GROUP_DECL(FSI1, V23, W24);
++GROUP_DECL(FSI2, R26, AA21);
++GROUP_DECL(FSI3, P26, Y24);
++GROUP_DECL(SPIM2, AC24, AD24, AE23, AE19, AF23, Y25, AA25, AF19);
++GROUP_DECL(SALT4, W17);
++GROUP_DECL(SALT5, V18);
++GROUP_DECL(SALT6, W18);
++GROUP_DECL(SALT7, Y17);
++GROUP_DECL(SALT8, AA18);
++GROUP_DECL(SALT9, AA13);
++GROUP_DECL(SALT10, Y18);
++GROUP_DECL(SALT11, AA12);
++GROUP_DECL(ADC0, W17);
++GROUP_DECL(ADC1, V18);
++GROUP_DECL(ADC2, W18);
++GROUP_DECL(ADC3, Y17);
++GROUP_DECL(ADC4, AA18);
++GROUP_DECL(ADC5, AA13);
++GROUP_DECL(ADC6, Y18);
++GROUP_DECL(ADC7, AA12);
++GROUP_DECL(ADC8, W20);
++GROUP_DECL(ADC9, V20);
++GROUP_DECL(ADC10, Y11);
++GROUP_DECL(ADC11, V14);
++GROUP_DECL(ADC12, V19);
++GROUP_DECL(ADC13, W14);
++GROUP_DECL(ADC14, Y20);
++GROUP_DECL(ADC15, AB19);
++GROUP_DECL(AUXPWRGOOD0, W14);
++GROUP_DECL(AUXPWRGOOD1, Y20);
++GROUP_DECL(SGPM0, U26, U25, W26, Y26);
++GROUP_DECL(SGPM1, W25, V24, U23, T24);
++GROUP_DECL(I2C0, G11, H7);
++GROUP_DECL(I2C1, H8, H9);
++GROUP_DECL(I2C2, H10, H11);
++GROUP_DECL(I2C3, J9, J10);
++GROUP_DECL(I2C4, E9, F9);
++GROUP_DECL(I2C5, F8, M13);
++GROUP_DECL(I2C6, F7, D8);
++GROUP_DECL(I2C7, E8, L12);
++GROUP_DECL(I2C8, F12, E12);
++GROUP_DECL(I2C9, J12, G7);
++GROUP_DECL(I2C10, G8, G9);
++GROUP_DECL(I2C11, G10, K12);
++GROUP_DECL(I2C12, AC24, AD24);
++GROUP_DECL(I2C13, AE23, AE19);
++GROUP_DECL(I2C14, AF23, Y25);
++GROUP_DECL(I2C15, AA25, AF19);
++GROUP_DECL(DI2C8, D19, B10);
++GROUP_DECL(DI2C9, D15, B12);
++GROUP_DECL(DI2C10, C7, D7);
++GROUP_DECL(DI2C13, D26, C26);
++GROUP_DECL(DI2C14, A23, B25);
++GROUP_DECL(DI2C15, A22, F26);
++GROUP_DECL(SIOPBON1, AC24);
++GROUP_DECL(SIOPBIN1, AD24);
++GROUP_DECL(SIOSCIN1, AE23);
++GROUP_DECL(SIOS3N1, AE19);
++GROUP_DECL(SIOS5N1, AF23);
++GROUP_DECL(SIOPWREQN1, Y25);
++GROUP_DECL(SIOONCTRLN1, AA25);
++GROUP_DECL(SIOPWRGD1, AF19);
++GROUP_DECL(HVI3C12, AC24, AD24);
++GROUP_DECL(HVI3C13, AE23, AE19);
++GROUP_DECL(HVI3C14, AF23, Y25);
++GROUP_DECL(HVI3C15, AA25, AF19);
++GROUP_DECL(HVI3C4, C16, C14);
++GROUP_DECL(HVI3C5, C11, D9);
++GROUP_DECL(HVI3C6, F14, D10);
++GROUP_DECL(HVI3C7, D26, C26);
++GROUP_DECL(HVI3C10, A23, B25);
++GROUP_DECL(HVI3C11, A22, F26);
++GROUP_DECL(I3C4, AB25, AC25);
++GROUP_DECL(I3C5, AD25, V22);
++GROUP_DECL(I3C6, AE25, V21);
++GROUP_DECL(I3C7, AF21, AF25);
++GROUP_DECL(I3C8, AF26, AE26);
++GROUP_DECL(I3C9, W21, AD26);
++GROUP_DECL(I3C10, Y21, AC26);
++GROUP_DECL(I3C11, AA26, AB26);
++GROUP_DECL(I3C0, T26, AA20);
++GROUP_DECL(I3C1, V23, W24);
++GROUP_DECL(I3C2, R26, AA21);
++GROUP_DECL(I3C3, P26, Y24);
++GROUP_DECL(LTPI, T26, AA20, V23, W24);
++GROUP_DECL(SPI0, D24, B23, B22);
++GROUP_DECL(QSPI0, C23, B18);
++GROUP_DECL(SPI0CS1, B21);
++GROUP_DECL(SPI0ABR, B17);
++GROUP_DECL(SPI0WPN, B19);
++GROUP_DECL(SPI1, B26, A25, A24);
++GROUP_DECL(QSPI1, B24, E26);
++GROUP_DECL(SPI1CS1, A21);
++GROUP_DECL(SPI1ABR, A19);
++GROUP_DECL(SPI1WPN, A18);
++GROUP_DECL(SPI2, D26, C26, A23, B25);
++GROUP_DECL(QSPI2, A22, F26);
++GROUP_DECL(SPI2CS1, A26);
++GROUP_DECL(THRU2, A19, A18);
++GROUP_DECL(THRU3, A22, F26);
++GROUP_DECL(JTAGM1, D12, F10, E11, F11, F13);
++GROUP_DECL(MDIO0, B9, M14);
++GROUP_DECL(MDIO1, C9, C10);
++GROUP_DECL(MDIO2, E10, E13);
++GROUP_DECL(FWQSPI, M16, A15);
++GROUP_DECL(FWSPIABR, A14);
++GROUP_DECL(FWSPIWPN, N15);
++GROUP_DECL(RGMII0, C20, C19, A8, R14, A7, P14, D20, A6, B6, N14, B7, B8);
++GROUP_DECL(RGMII1, D19, B11, D15, B12, B10, P13, C18, C6, C7, D7, N13, C8);
++GROUP_DECL(RMII0, C20, A8, R14, A7, P14, D20, A6, B6, N14);
++GROUP_DECL(RMII1, D19, D15, B12, B10, P13, C18, C6, C7, D7);
++GROUP_DECL(VGA, J11, E7);
++GROUP_DECL(DSGPM1, D19, B10, C7, D7);
++GROUP_DECL(SGPS, B11, C18, N13, C8);
++GROUP_DECL(I2CF0, F12, E12, J12, G7);
++GROUP_DECL(I2CF1, E9, F9, F8, M13);
++GROUP_DECL(I2CF2, F7, D8, E8, L12);
++GROUP_DECL(CANBUS, G7, G8, G9);
++GROUP_DECL(USBUART, G8, G9);
++GROUP_DECL(HBLED, V26);
++GROUP_DECL(MACLINK0, U26);
++GROUP_DECL(MACLINK1, U25);
++GROUP_DECL(MACLINK2, U24);
++GROUP_DECL(NCTS2, U26);
++GROUP_DECL(NDCD2, U25);
++GROUP_DECL(NDSR2, W26);
++GROUP_DECL(NRI2, Y26);
++GROUP_DECL(NDTR2, W25);
++GROUP_DECL(NRTS2, V24);
++GROUP_DECL(SMON0, U26, U25, W26, Y26);
++GROUP_DECL(SMON1, U23, T24, W25, V24);
++GROUP_DECL(SGMII, SGMII0);
++//PCIE RC PERST
++GROUP_DECL(PCIERC2PERST, PCIERC2_PERST);
++GROUP_DECL(USB2CUD, PORTC_MODE);
++GROUP_DECL(USB2CD, PORTC_MODE);
++GROUP_DECL(USB2CH, PORTC_MODE);
++GROUP_DECL(USB2CU, PORTC_MODE);
++GROUP_DECL(USB2DD, PORTD_MODE);
++GROUP_DECL(USB2DH, PORTD_MODE);
++
++static struct aspeed_pin_group aspeed_g7_soc1_pingroups[] = {
++ ASPEED_PINCTRL_GROUP(ESPI0),
++ ASPEED_PINCTRL_GROUP(ESPI1),
++ ASPEED_PINCTRL_GROUP(LPC0),
++ ASPEED_PINCTRL_GROUP(LPC1),
++ ASPEED_PINCTRL_GROUP(SD),
++ ASPEED_PINCTRL_GROUP(VPI),
++ ASPEED_PINCTRL_GROUP(OSCCLK),
++ ASPEED_PINCTRL_GROUP(TACH0),
++ ASPEED_PINCTRL_GROUP(TACH1),
++ ASPEED_PINCTRL_GROUP(TACH2),
++ ASPEED_PINCTRL_GROUP(TACH3),
++ ASPEED_PINCTRL_GROUP(THRU0),
++ ASPEED_PINCTRL_GROUP(THRU1),
++ ASPEED_PINCTRL_GROUP(TACH4),
++ ASPEED_PINCTRL_GROUP(TACH5),
++ ASPEED_PINCTRL_GROUP(TACH6),
++ ASPEED_PINCTRL_GROUP(TACH7),
++ ASPEED_PINCTRL_GROUP(NTCS5),
++ ASPEED_PINCTRL_GROUP(NDCD5),
++ ASPEED_PINCTRL_GROUP(NDSR5),
++ ASPEED_PINCTRL_GROUP(NRI5),
++ ASPEED_PINCTRL_GROUP(SALT12),
++ ASPEED_PINCTRL_GROUP(SALT13),
++ ASPEED_PINCTRL_GROUP(SALT14),
++ ASPEED_PINCTRL_GROUP(SALT15),
++ ASPEED_PINCTRL_GROUP(NDTR5),
++ ASPEED_PINCTRL_GROUP(NRTS5),
++ ASPEED_PINCTRL_GROUP(NCTS6),
++ ASPEED_PINCTRL_GROUP(NDCD6),
++ ASPEED_PINCTRL_GROUP(NDSR6),
++ ASPEED_PINCTRL_GROUP(NRI6),
++ ASPEED_PINCTRL_GROUP(NDTR6),
++ ASPEED_PINCTRL_GROUP(NRTS6),
++ ASPEED_PINCTRL_GROUP(TACH8),
++ ASPEED_PINCTRL_GROUP(TACH9),
++ ASPEED_PINCTRL_GROUP(TACH10),
++ ASPEED_PINCTRL_GROUP(TACH11),
++ ASPEED_PINCTRL_GROUP(TACH12),
++ ASPEED_PINCTRL_GROUP(TACH13),
++ ASPEED_PINCTRL_GROUP(TACH14),
++ ASPEED_PINCTRL_GROUP(TACH15),
++ ASPEED_PINCTRL_GROUP(SPIM0),
++ ASPEED_PINCTRL_GROUP(PWM0),
++ ASPEED_PINCTRL_GROUP(PWM1),
++ ASPEED_PINCTRL_GROUP(PWM2),
++ ASPEED_PINCTRL_GROUP(PWM3),
++ ASPEED_PINCTRL_GROUP(PWM4),
++ ASPEED_PINCTRL_GROUP(PWM5),
++ ASPEED_PINCTRL_GROUP(PWM6),
++ ASPEED_PINCTRL_GROUP(PWM7),
++ ASPEED_PINCTRL_GROUP(SIOPBON0),
++ ASPEED_PINCTRL_GROUP(SIOPBIN0),
++ ASPEED_PINCTRL_GROUP(SIOSCIN0),
++ ASPEED_PINCTRL_GROUP(SIOS3N0),
++ ASPEED_PINCTRL_GROUP(SIOS5N0),
++ ASPEED_PINCTRL_GROUP(SIOPWREQN0),
++ ASPEED_PINCTRL_GROUP(SIOONCTRLN0),
++ ASPEED_PINCTRL_GROUP(SIOPWRGD0),
++ ASPEED_PINCTRL_GROUP(NCTS0),
++ ASPEED_PINCTRL_GROUP(NDCD0),
++ ASPEED_PINCTRL_GROUP(NDSR0),
++ ASPEED_PINCTRL_GROUP(NRI0),
++ ASPEED_PINCTRL_GROUP(NDTR0),
++ ASPEED_PINCTRL_GROUP(NRTS0),
++ ASPEED_PINCTRL_GROUP(TXD0),
++ ASPEED_PINCTRL_GROUP(RXD0),
++ ASPEED_PINCTRL_GROUP(NCTS1),
++ ASPEED_PINCTRL_GROUP(NDCD1),
++ ASPEED_PINCTRL_GROUP(NDSR1),
++ ASPEED_PINCTRL_GROUP(NRI1),
++ ASPEED_PINCTRL_GROUP(NDTR1),
++ ASPEED_PINCTRL_GROUP(NRTS1),
++ ASPEED_PINCTRL_GROUP(TXD1),
++ ASPEED_PINCTRL_GROUP(RXD1),
++ ASPEED_PINCTRL_GROUP(TXD2),
++ ASPEED_PINCTRL_GROUP(RXD2),
++ ASPEED_PINCTRL_GROUP(TXD3),
++ ASPEED_PINCTRL_GROUP(RXD3),
++ ASPEED_PINCTRL_GROUP(NCTS5),
++ ASPEED_PINCTRL_GROUP(NDCD5),
++ ASPEED_PINCTRL_GROUP(NDSR5),
++ ASPEED_PINCTRL_GROUP(NRI5),
++ ASPEED_PINCTRL_GROUP(NDTR5),
++ ASPEED_PINCTRL_GROUP(NRTS5),
++ ASPEED_PINCTRL_GROUP(TXD5),
++ ASPEED_PINCTRL_GROUP(RXD5),
++ ASPEED_PINCTRL_GROUP(NCTS6),
++ ASPEED_PINCTRL_GROUP(NDCD6),
++ ASPEED_PINCTRL_GROUP(NDSR6),
++ ASPEED_PINCTRL_GROUP(NRI6),
++ ASPEED_PINCTRL_GROUP(NDTR6),
++ ASPEED_PINCTRL_GROUP(NRTS6),
++ ASPEED_PINCTRL_GROUP(TXD6),
++ ASPEED_PINCTRL_GROUP(RXD6),
++ ASPEED_PINCTRL_GROUP(TXD6),
++ ASPEED_PINCTRL_GROUP(RXD6),
++ ASPEED_PINCTRL_GROUP(TXD7),
++ ASPEED_PINCTRL_GROUP(RXD7),
++ ASPEED_PINCTRL_GROUP(TXD8),
++ ASPEED_PINCTRL_GROUP(RXD8),
++ ASPEED_PINCTRL_GROUP(TXD9),
++ ASPEED_PINCTRL_GROUP(RXD9),
++ ASPEED_PINCTRL_GROUP(TXD10),
++ ASPEED_PINCTRL_GROUP(RXD10),
++ ASPEED_PINCTRL_GROUP(TXD11),
++ ASPEED_PINCTRL_GROUP(RXD11),
++ ASPEED_PINCTRL_GROUP(SPIM1),
++ ASPEED_PINCTRL_GROUP(WDTRST0N),
++ ASPEED_PINCTRL_GROUP(WDTRST1N),
++ ASPEED_PINCTRL_GROUP(WDTRST2N),
++ ASPEED_PINCTRL_GROUP(WDTRST3N),
++ ASPEED_PINCTRL_GROUP(WDTRST4N),
++ ASPEED_PINCTRL_GROUP(WDTRST5N),
++ ASPEED_PINCTRL_GROUP(WDTRST6N),
++ ASPEED_PINCTRL_GROUP(WDTRST7N),
++ ASPEED_PINCTRL_GROUP(PWM8),
++ ASPEED_PINCTRL_GROUP(PWM9),
++ ASPEED_PINCTRL_GROUP(PWM10),
++ ASPEED_PINCTRL_GROUP(PWM11),
++ ASPEED_PINCTRL_GROUP(PWM12),
++ ASPEED_PINCTRL_GROUP(PWM13),
++ ASPEED_PINCTRL_GROUP(PWM14),
++ ASPEED_PINCTRL_GROUP(PWM15),
++ ASPEED_PINCTRL_GROUP(SALT0),
++ ASPEED_PINCTRL_GROUP(SALT1),
++ ASPEED_PINCTRL_GROUP(SALT2),
++ ASPEED_PINCTRL_GROUP(SALT3),
++ ASPEED_PINCTRL_GROUP(FSI0),
++ ASPEED_PINCTRL_GROUP(FSI1),
++ ASPEED_PINCTRL_GROUP(FSI2),
++ ASPEED_PINCTRL_GROUP(FSI3),
++ ASPEED_PINCTRL_GROUP(SPIM2),
++ ASPEED_PINCTRL_GROUP(SALT4),
++ ASPEED_PINCTRL_GROUP(SALT5),
++ ASPEED_PINCTRL_GROUP(SALT6),
++ ASPEED_PINCTRL_GROUP(SALT7),
++ ASPEED_PINCTRL_GROUP(SALT8),
++ ASPEED_PINCTRL_GROUP(SALT9),
++ ASPEED_PINCTRL_GROUP(SALT10),
++ ASPEED_PINCTRL_GROUP(SALT11),
++ ASPEED_PINCTRL_GROUP(ADC0),
++ ASPEED_PINCTRL_GROUP(ADC1),
++ ASPEED_PINCTRL_GROUP(ADC2),
++ ASPEED_PINCTRL_GROUP(ADC3),
++ ASPEED_PINCTRL_GROUP(ADC4),
++ ASPEED_PINCTRL_GROUP(ADC5),
++ ASPEED_PINCTRL_GROUP(ADC6),
++ ASPEED_PINCTRL_GROUP(ADC7),
++ ASPEED_PINCTRL_GROUP(ADC8),
++ ASPEED_PINCTRL_GROUP(ADC9),
++ ASPEED_PINCTRL_GROUP(ADC10),
++ ASPEED_PINCTRL_GROUP(ADC11),
++ ASPEED_PINCTRL_GROUP(ADC12),
++ ASPEED_PINCTRL_GROUP(ADC13),
++ ASPEED_PINCTRL_GROUP(ADC14),
++ ASPEED_PINCTRL_GROUP(ADC15),
++ ASPEED_PINCTRL_GROUP(AUXPWRGOOD0),
++ ASPEED_PINCTRL_GROUP(AUXPWRGOOD1),
++ ASPEED_PINCTRL_GROUP(SGPM0),
++ ASPEED_PINCTRL_GROUP(SGPM1),
++ ASPEED_PINCTRL_GROUP(I2C0),
++ ASPEED_PINCTRL_GROUP(I2C1),
++ ASPEED_PINCTRL_GROUP(I2C2),
++ ASPEED_PINCTRL_GROUP(I2C3),
++ ASPEED_PINCTRL_GROUP(I2C4),
++ ASPEED_PINCTRL_GROUP(I2C5),
++ ASPEED_PINCTRL_GROUP(I2C6),
++ ASPEED_PINCTRL_GROUP(I2C7),
++ ASPEED_PINCTRL_GROUP(I2C8),
++ ASPEED_PINCTRL_GROUP(I2C9),
++ ASPEED_PINCTRL_GROUP(I2C10),
++ ASPEED_PINCTRL_GROUP(I2C11),
++ ASPEED_PINCTRL_GROUP(I2C12),
++ ASPEED_PINCTRL_GROUP(I2C13),
++ ASPEED_PINCTRL_GROUP(I2C14),
++ ASPEED_PINCTRL_GROUP(I2C15),
++ ASPEED_PINCTRL_GROUP(DI2C8),
++ ASPEED_PINCTRL_GROUP(DI2C9),
++ ASPEED_PINCTRL_GROUP(DI2C10),
++ ASPEED_PINCTRL_GROUP(DI2C13),
++ ASPEED_PINCTRL_GROUP(DI2C14),
++ ASPEED_PINCTRL_GROUP(DI2C15),
++ ASPEED_PINCTRL_GROUP(SIOPBON1),
++ ASPEED_PINCTRL_GROUP(SIOPBIN1),
++ ASPEED_PINCTRL_GROUP(SIOSCIN1),
++ ASPEED_PINCTRL_GROUP(SIOS3N1),
++ ASPEED_PINCTRL_GROUP(SIOS5N1),
++ ASPEED_PINCTRL_GROUP(SIOPWREQN1),
++ ASPEED_PINCTRL_GROUP(SIOONCTRLN1),
++ ASPEED_PINCTRL_GROUP(SIOPWRGD1),
++ ASPEED_PINCTRL_GROUP(HVI3C12),
++ ASPEED_PINCTRL_GROUP(HVI3C13),
++ ASPEED_PINCTRL_GROUP(HVI3C14),
++ ASPEED_PINCTRL_GROUP(HVI3C15),
++ ASPEED_PINCTRL_GROUP(HVI3C4),
++ ASPEED_PINCTRL_GROUP(HVI3C5),
++ ASPEED_PINCTRL_GROUP(HVI3C6),
++ ASPEED_PINCTRL_GROUP(HVI3C7),
++ ASPEED_PINCTRL_GROUP(HVI3C10),
++ ASPEED_PINCTRL_GROUP(HVI3C11),
++ ASPEED_PINCTRL_GROUP(I3C4),
++ ASPEED_PINCTRL_GROUP(I3C5),
++ ASPEED_PINCTRL_GROUP(I3C6),
++ ASPEED_PINCTRL_GROUP(I3C7),
++ ASPEED_PINCTRL_GROUP(I3C8),
++ ASPEED_PINCTRL_GROUP(I3C9),
++ ASPEED_PINCTRL_GROUP(I3C10),
++ ASPEED_PINCTRL_GROUP(I3C11),
++ ASPEED_PINCTRL_GROUP(I3C0),
++ ASPEED_PINCTRL_GROUP(I3C1),
++ ASPEED_PINCTRL_GROUP(I3C2),
++ ASPEED_PINCTRL_GROUP(I3C3),
++ ASPEED_PINCTRL_GROUP(LTPI),
++ ASPEED_PINCTRL_GROUP(SPI0),
++ ASPEED_PINCTRL_GROUP(QSPI0),
++ ASPEED_PINCTRL_GROUP(SPI0CS1),
++ ASPEED_PINCTRL_GROUP(SPI0ABR),
++ ASPEED_PINCTRL_GROUP(SPI0WPN),
++ ASPEED_PINCTRL_GROUP(SPI1),
++ ASPEED_PINCTRL_GROUP(QSPI1),
++ ASPEED_PINCTRL_GROUP(SPI1CS1),
++ ASPEED_PINCTRL_GROUP(SPI1ABR),
++ ASPEED_PINCTRL_GROUP(SPI1WPN),
++ ASPEED_PINCTRL_GROUP(SPI2),
++ ASPEED_PINCTRL_GROUP(QSPI2),
++ ASPEED_PINCTRL_GROUP(SPI2CS1),
++ ASPEED_PINCTRL_GROUP(THRU2),
++ ASPEED_PINCTRL_GROUP(THRU3),
++ ASPEED_PINCTRL_GROUP(JTAGM1),
++ ASPEED_PINCTRL_GROUP(MDIO0),
++ ASPEED_PINCTRL_GROUP(MDIO1),
++ ASPEED_PINCTRL_GROUP(MDIO2),
++ ASPEED_PINCTRL_GROUP(FWQSPI),
++ ASPEED_PINCTRL_GROUP(FWSPIABR),
++ ASPEED_PINCTRL_GROUP(FWSPIWPN),
++ ASPEED_PINCTRL_GROUP(RGMII0),
++ ASPEED_PINCTRL_GROUP(RGMII1),
++ ASPEED_PINCTRL_GROUP(RMII0),
++ ASPEED_PINCTRL_GROUP(RMII1),
++ ASPEED_PINCTRL_GROUP(VGA),
++ ASPEED_PINCTRL_GROUP(DSGPM1),
++ ASPEED_PINCTRL_GROUP(SGPS),
++ ASPEED_PINCTRL_GROUP(I2CF0),
++ ASPEED_PINCTRL_GROUP(I2CF1),
++ ASPEED_PINCTRL_GROUP(I2CF2),
++ ASPEED_PINCTRL_GROUP(CANBUS),
++ ASPEED_PINCTRL_GROUP(USBUART),
++ ASPEED_PINCTRL_GROUP(HBLED),
++ ASPEED_PINCTRL_GROUP(MACLINK0),
++ ASPEED_PINCTRL_GROUP(MACLINK1),
++ ASPEED_PINCTRL_GROUP(MACLINK2),
++ ASPEED_PINCTRL_GROUP(NCTS2),
++ ASPEED_PINCTRL_GROUP(NDCD2),
++ ASPEED_PINCTRL_GROUP(NDSR2),
++ ASPEED_PINCTRL_GROUP(NRI2),
++ ASPEED_PINCTRL_GROUP(NDTR2),
++ ASPEED_PINCTRL_GROUP(NRTS2),
++ ASPEED_PINCTRL_GROUP(SMON0),
++ ASPEED_PINCTRL_GROUP(SMON1),
++ ASPEED_PINCTRL_GROUP(SGMII),
++ ASPEED_PINCTRL_GROUP(PCIERC2PERST),
++ ASPEED_PINCTRL_GROUP(USB2CUD),
++ ASPEED_PINCTRL_GROUP(USB2CD),
++ ASPEED_PINCTRL_GROUP(USB2CH),
++ ASPEED_PINCTRL_GROUP(USB2CU),
++ ASPEED_PINCTRL_GROUP(USB2DD),
++ ASPEED_PINCTRL_GROUP(USB2DH),
++};
++
++FUNC_DECL_(ESPI0, "ESPI0");
++FUNC_DECL_(ESPI1, "ESPI1");
++FUNC_DECL_(LPC0, "LPC0");
++FUNC_DECL_(LPC1, "LPC1");
++FUNC_DECL_(VPI, "VPI");
++FUNC_DECL_(SD, "SD");
++FUNC_DECL_(OSCCLK, "OSCCLK");
++FUNC_DECL_(TACH0, "TACH0");
++FUNC_DECL_(TACH1, "TACH1");
++FUNC_DECL_(TACH2, "TACH2");
++FUNC_DECL_(TACH3, "TACH3");
++FUNC_DECL_(TACH4, "TACH4");
++FUNC_DECL_(TACH5, "TACH5");
++FUNC_DECL_(TACH6, "TACH6");
++FUNC_DECL_(TACH7, "TACH7");
++FUNC_DECL_(THRU0, "THRU0");
++FUNC_DECL_(THRU1, "THRU1");
++FUNC_DECL_(NTCS5, "NTCS5");
++FUNC_DECL_(NDSR5, "NDSR5");
++FUNC_DECL_(NRI5, "NRI5");
++FUNC_DECL_(TACH8, "TACH8");
++FUNC_DECL_(TACH9, "TACH9");
++FUNC_DECL_(TACH10, "TACH10");
++FUNC_DECL_(TACH11, "TACH11");
++FUNC_DECL_(TACH12, "TACH12");
++FUNC_DECL_(TACH13, "TACH13");
++FUNC_DECL_(TACH14, "TACH14");
++FUNC_DECL_(TACH15, "TACH15");
++FUNC_DECL_(SALT12, "SALT12");
++FUNC_DECL_(SALT13, "SALT13");
++FUNC_DECL_(SALT14, "SALT14");
++FUNC_DECL_(SALT15, "SALT15");
++FUNC_DECL_(SPIM0, "SPIM0");
++FUNC_DECL_(PWM0, "PWM0");
++FUNC_DECL_(PWM1, "PWM1");
++FUNC_DECL_(PWM2, "PWM2");
++FUNC_DECL_(PWM3, "PWM3");
++FUNC_DECL_(PWM4, "PWM4");
++FUNC_DECL_(PWM5, "PWM5");
++FUNC_DECL_(PWM6, "PWM6");
++FUNC_DECL_(PWM7, "PWM7");
++FUNC_DECL_(SIOPBON0, "SIOPBON0");
++FUNC_DECL_(SIOPBIN0, "SIOPBIN0");
++FUNC_DECL_(SIOSCIN0, "SIOSCIN0");
++FUNC_DECL_(SIOS3N0, "SIOS3N0");
++FUNC_DECL_(SIOS5N0, "SIOS5N0");
++FUNC_DECL_(SIOPWREQN0, "SIOPWREQN0");
++FUNC_DECL_(SIOONCTRLN0, "SIOONCTRLN0");
++FUNC_DECL_(SIOPWRGD0, "SIOPWRGD0");
++FUNC_DECL_(UART0, "NCTS0", "NDCD0", "NDSR0", "NRI0", "NDTR0", "NRTS0", "TXD0", "RXD0");
++FUNC_DECL_(UART1, "NCTS1", "NDCD1", "NDSR1", "NRI1", "NDTR1", "NRTS1", "TXD1", "RXD1");
++FUNC_DECL_(UART2, "TXD2", "RXD2");
++FUNC_DECL_(UART3, "TXD3", "RXD3");
++FUNC_DECL_(UART5, "NCTS5", "NDCD5", "NDSR5", "NRI5", "NDTR5", "NRTS5", "TXD5", "RXD5");
++FUNC_DECL_(UART6, "NCTS6", "NDCD6", "NDSR6", "NRI6", "NDTR6", "NRTS6", "TXD6", "RXD6");
++FUNC_DECL_(UART7, "TXD7", "RXD7");
++FUNC_DECL_(UART8, "TXD8", "RXD8");
++FUNC_DECL_(UART9, "TXD9", "RXD9");
++FUNC_DECL_(UART10, "TXD10", "RXD10");
++FUNC_DECL_(UART11, "TXD11", "RXD11");
++FUNC_DECL_(SPIM1, "SPIM1");
++FUNC_DECL_(SPIM2, "SPIM2");
++FUNC_DECL_(PWM8, "PWM8");
++FUNC_DECL_(PWM9, "PWM9");
++FUNC_DECL_(PWM10, "PWM10");
++FUNC_DECL_(PWM11, "PWM11");
++FUNC_DECL_(PWM12, "PWM12");
++FUNC_DECL_(PWM13, "PWM13");
++FUNC_DECL_(PWM14, "PWM14");
++FUNC_DECL_(PWM15, "PWM15");
++FUNC_DECL_(WDTRST0N, "WDTRST0N");
++FUNC_DECL_(WDTRST1N, "WDTRST1N");
++FUNC_DECL_(WDTRST2N, "WDTRST2N");
++FUNC_DECL_(WDTRST3N, "WDTRST3N");
++FUNC_DECL_(WDTRST4N, "WDTRST4N");
++FUNC_DECL_(WDTRST5N, "WDTRST5N");
++FUNC_DECL_(WDTRST6N, "WDTRST6N");
++FUNC_DECL_(WDTRST7N, "WDTRST7N");
++FUNC_DECL_(FSI0, "FSI0");
++FUNC_DECL_(FSI1, "FSI1");
++FUNC_DECL_(FSI2, "FSI2");
++FUNC_DECL_(FSI3, "FSI3");
++FUNC_DECL_(SALT4, "ASLT4");
++FUNC_DECL_(SALT5, "ASLT5");
++FUNC_DECL_(SALT6, "ASLT6");
++FUNC_DECL_(SALT7, "ASLT7");
++FUNC_DECL_(SALT8, "ASLT8");
++FUNC_DECL_(SALT9, "ASLT9");
++FUNC_DECL_(SALT10, "ASLT10");
++FUNC_DECL_(SALT11, "ASLT11");
++FUNC_DECL_(ADC0, "ADC0");
++FUNC_DECL_(ADC1, "ADC1");
++FUNC_DECL_(ADC2, "ADC2");
++FUNC_DECL_(ADC3, "ADC3");
++FUNC_DECL_(ADC4, "ADC4");
++FUNC_DECL_(ADC5, "ADC5");
++FUNC_DECL_(ADC6, "ADC6");
++FUNC_DECL_(ADC7, "ADC7");
++FUNC_DECL_(ADC8, "ADC8");
++FUNC_DECL_(ADC9, "ADC9");
++FUNC_DECL_(ADC10, "ADC10");
++FUNC_DECL_(ADC11, "ADC11");
++FUNC_DECL_(ADC12, "ADC12");
++FUNC_DECL_(ADC13, "ADC13");
++FUNC_DECL_(ADC14, "ADC14");
++FUNC_DECL_(ADC15, "ADC15");
++FUNC_DECL_(AUXPWRGOOD0, "AUXPWRGOOD0");
++FUNC_DECL_(AUXPWRGOOD1, "AUXPWRGOOD1");
++FUNC_DECL_(SGPM0, "SGPM0");
++FUNC_DECL_(SGPM1, "SGPM1");
++FUNC_DECL_(I2C0, "I2C0");
++FUNC_DECL_(I2C1, "I2C1");
++FUNC_DECL_(I2C2, "I2C2");
++FUNC_DECL_(I2C3, "I2C3");
++FUNC_DECL_(I2C4, "I2C4");
++FUNC_DECL_(I2C5, "I2C5");
++FUNC_DECL_(I2C6, "I2C6");
++FUNC_DECL_(I2C7, "I2C7");
++FUNC_DECL_(I2C8, "I2C8");
++FUNC_DECL_(I2C9, "I2C9");
++FUNC_DECL_(I2C10, "I2C10");
++FUNC_DECL_(I2C11, "I2C11");
++FUNC_DECL_(I2C12, "I2C12");
++FUNC_DECL_(I2C13, "I2C13");
++FUNC_DECL_(I2C14, "I2C14");
++FUNC_DECL_(I2C15, "I2C15");
++FUNC_DECL_(DI2C8, "DI2C8");
++FUNC_DECL_(DI2C9, "DI2C9");
++FUNC_DECL_(DI2C10, "DI2C10");
++FUNC_DECL_(DI2C13, "DI2C13");
++FUNC_DECL_(DI2C14, "DI2C14");
++FUNC_DECL_(DI2C15, "DI2C15");
++FUNC_DECL_(SIOPBON1, "SIOPBON1");
++FUNC_DECL_(SIOPBIN1, "SIOPBIN1");
++FUNC_DECL_(SIOSCIN1, "SIOSCIN1");
++FUNC_DECL_(SIOS3N1, "SIOS3N1");
++FUNC_DECL_(SIOS5N1, "SIOS5N1");
++FUNC_DECL_(SIOPWREQN1, "SIOPWREQN1");
++FUNC_DECL_(SIOONCTRLN1, "SIOONCTRLN1");
++FUNC_DECL_(SIOPWRGD1, "SIOPWRGD1");
++FUNC_DECL_(I3C0, "I3C0");
++FUNC_DECL_(I3C1, "I3C1");
++FUNC_DECL_(I3C2, "I3C2");
++FUNC_DECL_(I3C3, "I3C3");
++FUNC_DECL_(I3C4, "I3C4", "HVI3C4");
++FUNC_DECL_(I3C5, "I3C5", "HVI3C5");
++FUNC_DECL_(I3C6, "I3C6", "HVI3C6");
++FUNC_DECL_(I3C7, "I3C7", "HVI3C7");
++FUNC_DECL_(I3C8, "I3C8");
++FUNC_DECL_(I3C9, "I3C9");
++FUNC_DECL_(I3C10, "I3C10", "HVI3C10");
++FUNC_DECL_(I3C11, "I3C11", "HVI3C11");
++FUNC_DECL_(I3C12, "HVI3C12");
++FUNC_DECL_(I3C13, "HVI3C13");
++FUNC_DECL_(I3C14, "HVI3C14");
++FUNC_DECL_(I3C15, "HVI3C15");
++FUNC_DECL_(LTPI, "LTPI");
++FUNC_DECL_(SPI0, "SPI0");
++FUNC_DECL_(QSPI0, "QSPI0");
++FUNC_DECL_(SPI0CS1, "SPI0CS1");
++FUNC_DECL_(SPI0ABR, "SPI0ABR");
++FUNC_DECL_(SPI0WPN, "SPI0WPN");
++FUNC_DECL_(SPI1, "SPI1");
++FUNC_DECL_(QSPI1, "QSPI1");
++FUNC_DECL_(SPI1CS1, "SPI1CS1");
++FUNC_DECL_(SPI1ABR, "SPI1ABR");
++FUNC_DECL_(SPI1WPN, "SPI1WPN");
++FUNC_DECL_(SPI2, "SPI2");
++FUNC_DECL_(QSPI2, "QSPI2");
++FUNC_DECL_(SPI2CS1, "SPI2CS1");
++FUNC_DECL_(THRU2, "THRU2");
++FUNC_DECL_(THRU3, "THRU3");
++FUNC_DECL_(JTAGM1, "JTAGM1");
++FUNC_DECL_(MDIO0, "MDIO0");
++FUNC_DECL_(MDIO1, "MDIO1");
++FUNC_DECL_(MDIO2, "MDIO2");
++FUNC_DECL_(FWQSPI, "FWQSPI");
++FUNC_DECL_(FWSPIABR, "FWSPIABR");
++FUNC_DECL_(FWSPIWPN, "FWSPIWPN");
++FUNC_DECL_(RGMII0, "RGMII0");
++FUNC_DECL_(RGMII1, "RGMII1");
++FUNC_DECL_(RMII0, "RMII0");
++FUNC_DECL_(RMII1, "RMII1");
++FUNC_DECL_(VGA, "VGA");
++FUNC_DECL_(DSGPM1, "DSGPM1");
++FUNC_DECL_(SGPS, "SGPS");
++FUNC_DECL_(I2CF0, "I2CF0");
++FUNC_DECL_(I2CF1, "I2CF1");
++FUNC_DECL_(I2CF2, "I2CF2");
++FUNC_DECL_(CANBUS, "CANBUS");
++FUNC_DECL_(USBUART, "USBUART");
++FUNC_DECL_(HBLED, "HBLED");
++FUNC_DECL_(MACLINK0, "MACLINK0");
++FUNC_DECL_(MACLINK1, "MACLINK1");
++FUNC_DECL_(MACLINK2, "MACLINK2");
++FUNC_DECL_(SMON0, "SMON0");
++FUNC_DECL_(SMON1, "SMON1");
++FUNC_DECL_(SGMII, "SGMII");
++FUNC_DECL_(PCIERC, "PCIERC2PERST");
++FUNC_DECL_(USB2C, "USB2CUD", "USB2CD", "USB2CH", "USB2CU");
++FUNC_DECL_(USB2D, "USB2DD", "USB2DH");
++
++static struct aspeed_pin_function aspeed_g7_soc1_funcs[] = {
++ ASPEED_PINCTRL_FUNC(ESPI0),
++ ASPEED_PINCTRL_FUNC(ESPI1),
++ ASPEED_PINCTRL_FUNC(LPC0),
++ ASPEED_PINCTRL_FUNC(LPC1),
++ ASPEED_PINCTRL_FUNC(VPI),
++ ASPEED_PINCTRL_FUNC(SD),
++ ASPEED_PINCTRL_FUNC(OSCCLK),
++ ASPEED_PINCTRL_FUNC(TACH0),
++ ASPEED_PINCTRL_FUNC(TACH1),
++ ASPEED_PINCTRL_FUNC(TACH2),
++ ASPEED_PINCTRL_FUNC(TACH3),
++ ASPEED_PINCTRL_FUNC(TACH4),
++ ASPEED_PINCTRL_FUNC(TACH5),
++ ASPEED_PINCTRL_FUNC(TACH6),
++ ASPEED_PINCTRL_FUNC(TACH7),
++ ASPEED_PINCTRL_FUNC(THRU0),
++ ASPEED_PINCTRL_FUNC(THRU1),
++ ASPEED_PINCTRL_FUNC(NTCS5),
++ ASPEED_PINCTRL_FUNC(NTCS5),
++ ASPEED_PINCTRL_FUNC(NDSR5),
++ ASPEED_PINCTRL_FUNC(NRI5),
++ ASPEED_PINCTRL_FUNC(NRI5),
++ ASPEED_PINCTRL_FUNC(SALT12),
++ ASPEED_PINCTRL_FUNC(SALT13),
++ ASPEED_PINCTRL_FUNC(SALT14),
++ ASPEED_PINCTRL_FUNC(SALT15),
++ ASPEED_PINCTRL_FUNC(TACH8),
++ ASPEED_PINCTRL_FUNC(TACH9),
++ ASPEED_PINCTRL_FUNC(TACH10),
++ ASPEED_PINCTRL_FUNC(TACH11),
++ ASPEED_PINCTRL_FUNC(TACH12),
++ ASPEED_PINCTRL_FUNC(TACH13),
++ ASPEED_PINCTRL_FUNC(TACH14),
++ ASPEED_PINCTRL_FUNC(TACH15),
++ ASPEED_PINCTRL_FUNC(SPIM0),
++ ASPEED_PINCTRL_FUNC(PWM0),
++ ASPEED_PINCTRL_FUNC(PWM1),
++ ASPEED_PINCTRL_FUNC(PWM2),
++ ASPEED_PINCTRL_FUNC(PWM3),
++ ASPEED_PINCTRL_FUNC(PWM4),
++ ASPEED_PINCTRL_FUNC(PWM5),
++ ASPEED_PINCTRL_FUNC(PWM6),
++ ASPEED_PINCTRL_FUNC(PWM7),
++ ASPEED_PINCTRL_FUNC(SIOPBON0),
++ ASPEED_PINCTRL_FUNC(SIOPBIN0),
++ ASPEED_PINCTRL_FUNC(SIOSCIN0),
++ ASPEED_PINCTRL_FUNC(SIOS3N0),
++ ASPEED_PINCTRL_FUNC(SIOS5N0),
++ ASPEED_PINCTRL_FUNC(SIOPWREQN0),
++ ASPEED_PINCTRL_FUNC(SIOONCTRLN0),
++ ASPEED_PINCTRL_FUNC(SIOPWRGD0),
++ ASPEED_PINCTRL_FUNC(UART0),
++ ASPEED_PINCTRL_FUNC(UART1),
++ ASPEED_PINCTRL_FUNC(UART2),
++ ASPEED_PINCTRL_FUNC(UART3),
++ ASPEED_PINCTRL_FUNC(UART5),
++ ASPEED_PINCTRL_FUNC(UART6),
++ ASPEED_PINCTRL_FUNC(UART7),
++ ASPEED_PINCTRL_FUNC(UART8),
++ ASPEED_PINCTRL_FUNC(UART9),
++ ASPEED_PINCTRL_FUNC(UART10),
++ ASPEED_PINCTRL_FUNC(UART11),
++ ASPEED_PINCTRL_FUNC(SPIM1),
++ ASPEED_PINCTRL_FUNC(PWM7),
++ ASPEED_PINCTRL_FUNC(PWM8),
++ ASPEED_PINCTRL_FUNC(PWM9),
++ ASPEED_PINCTRL_FUNC(PWM10),
++ ASPEED_PINCTRL_FUNC(PWM11),
++ ASPEED_PINCTRL_FUNC(PWM12),
++ ASPEED_PINCTRL_FUNC(PWM13),
++ ASPEED_PINCTRL_FUNC(PWM14),
++ ASPEED_PINCTRL_FUNC(PWM15),
++ ASPEED_PINCTRL_FUNC(WDTRST0N),
++ ASPEED_PINCTRL_FUNC(WDTRST1N),
++ ASPEED_PINCTRL_FUNC(WDTRST2N),
++ ASPEED_PINCTRL_FUNC(WDTRST3N),
++ ASPEED_PINCTRL_FUNC(WDTRST4N),
++ ASPEED_PINCTRL_FUNC(WDTRST5N),
++ ASPEED_PINCTRL_FUNC(WDTRST6N),
++ ASPEED_PINCTRL_FUNC(WDTRST7N),
++ ASPEED_PINCTRL_FUNC(FSI0),
++ ASPEED_PINCTRL_FUNC(FSI1),
++ ASPEED_PINCTRL_FUNC(FSI2),
++ ASPEED_PINCTRL_FUNC(FSI3),
++ ASPEED_PINCTRL_FUNC(SALT4),
++ ASPEED_PINCTRL_FUNC(SALT5),
++ ASPEED_PINCTRL_FUNC(SALT6),
++ ASPEED_PINCTRL_FUNC(SALT7),
++ ASPEED_PINCTRL_FUNC(SALT8),
++ ASPEED_PINCTRL_FUNC(SALT9),
++ ASPEED_PINCTRL_FUNC(SALT10),
++ ASPEED_PINCTRL_FUNC(SALT11),
++ ASPEED_PINCTRL_FUNC(ADC0),
++ ASPEED_PINCTRL_FUNC(ADC1),
++ ASPEED_PINCTRL_FUNC(ADC2),
++ ASPEED_PINCTRL_FUNC(ADC3),
++ ASPEED_PINCTRL_FUNC(ADC4),
++ ASPEED_PINCTRL_FUNC(ADC5),
++ ASPEED_PINCTRL_FUNC(ADC6),
++ ASPEED_PINCTRL_FUNC(ADC7),
++ ASPEED_PINCTRL_FUNC(ADC8),
++ ASPEED_PINCTRL_FUNC(ADC9),
++ ASPEED_PINCTRL_FUNC(ADC10),
++ ASPEED_PINCTRL_FUNC(ADC11),
++ ASPEED_PINCTRL_FUNC(ADC12),
++ ASPEED_PINCTRL_FUNC(ADC13),
++ ASPEED_PINCTRL_FUNC(ADC14),
++ ASPEED_PINCTRL_FUNC(ADC15),
++ ASPEED_PINCTRL_FUNC(AUXPWRGOOD0),
++ ASPEED_PINCTRL_FUNC(AUXPWRGOOD1),
++ ASPEED_PINCTRL_FUNC(SGPM0),
++ ASPEED_PINCTRL_FUNC(SGPM1),
++ ASPEED_PINCTRL_FUNC(SPIM2),
++ ASPEED_PINCTRL_FUNC(I2C0),
++ ASPEED_PINCTRL_FUNC(I2C1),
++ ASPEED_PINCTRL_FUNC(I2C2),
++ ASPEED_PINCTRL_FUNC(I2C3),
++ ASPEED_PINCTRL_FUNC(I2C4),
++ ASPEED_PINCTRL_FUNC(I2C5),
++ ASPEED_PINCTRL_FUNC(I2C6),
++ ASPEED_PINCTRL_FUNC(I2C7),
++ ASPEED_PINCTRL_FUNC(I2C8),
++ ASPEED_PINCTRL_FUNC(I2C9),
++ ASPEED_PINCTRL_FUNC(I2C10),
++ ASPEED_PINCTRL_FUNC(I2C11),
++ ASPEED_PINCTRL_FUNC(I2C12),
++ ASPEED_PINCTRL_FUNC(I2C13),
++ ASPEED_PINCTRL_FUNC(I2C14),
++ ASPEED_PINCTRL_FUNC(I2C15),
++ ASPEED_PINCTRL_FUNC(DI2C8),
++ ASPEED_PINCTRL_FUNC(DI2C9),
++ ASPEED_PINCTRL_FUNC(DI2C10),
++ ASPEED_PINCTRL_FUNC(DI2C13),
++ ASPEED_PINCTRL_FUNC(DI2C14),
++ ASPEED_PINCTRL_FUNC(DI2C15),
++ ASPEED_PINCTRL_FUNC(SIOPBON1),
++ ASPEED_PINCTRL_FUNC(SIOPBIN1),
++ ASPEED_PINCTRL_FUNC(SIOSCIN1),
++ ASPEED_PINCTRL_FUNC(SIOS3N1),
++ ASPEED_PINCTRL_FUNC(SIOS5N1),
++ ASPEED_PINCTRL_FUNC(SIOPWREQN1),
++ ASPEED_PINCTRL_FUNC(SIOONCTRLN1),
++ ASPEED_PINCTRL_FUNC(SIOPWRGD1),
++ ASPEED_PINCTRL_FUNC(I3C0),
++ ASPEED_PINCTRL_FUNC(I3C1),
++ ASPEED_PINCTRL_FUNC(I3C2),
++ ASPEED_PINCTRL_FUNC(I3C3),
++ ASPEED_PINCTRL_FUNC(I3C4),
++ ASPEED_PINCTRL_FUNC(I3C5),
++ ASPEED_PINCTRL_FUNC(I3C6),
++ ASPEED_PINCTRL_FUNC(I3C7),
++ ASPEED_PINCTRL_FUNC(I3C8),
++ ASPEED_PINCTRL_FUNC(I3C9),
++ ASPEED_PINCTRL_FUNC(I3C10),
++ ASPEED_PINCTRL_FUNC(I3C11),
++ ASPEED_PINCTRL_FUNC(I3C12),
++ ASPEED_PINCTRL_FUNC(I3C13),
++ ASPEED_PINCTRL_FUNC(I3C14),
++ ASPEED_PINCTRL_FUNC(I3C15),
++ ASPEED_PINCTRL_FUNC(LTPI),
++ ASPEED_PINCTRL_FUNC(SPI0),
++ ASPEED_PINCTRL_FUNC(QSPI0),
++ ASPEED_PINCTRL_FUNC(SPI0CS1),
++ ASPEED_PINCTRL_FUNC(SPI0ABR),
++ ASPEED_PINCTRL_FUNC(SPI0WPN),
++ ASPEED_PINCTRL_FUNC(SPI1),
++ ASPEED_PINCTRL_FUNC(QSPI1),
++ ASPEED_PINCTRL_FUNC(SPI1CS1),
++ ASPEED_PINCTRL_FUNC(SPI1ABR),
++ ASPEED_PINCTRL_FUNC(SPI1WPN),
++ ASPEED_PINCTRL_FUNC(SPI2),
++ ASPEED_PINCTRL_FUNC(QSPI2),
++ ASPEED_PINCTRL_FUNC(SPI2CS1),
++ ASPEED_PINCTRL_FUNC(THRU2),
++ ASPEED_PINCTRL_FUNC(THRU3),
++ ASPEED_PINCTRL_FUNC(JTAGM1),
++ ASPEED_PINCTRL_FUNC(MDIO0),
++ ASPEED_PINCTRL_FUNC(MDIO1),
++ ASPEED_PINCTRL_FUNC(MDIO2),
++ ASPEED_PINCTRL_FUNC(FWQSPI),
++ ASPEED_PINCTRL_FUNC(FWSPIABR),
++ ASPEED_PINCTRL_FUNC(FWSPIWPN),
++ ASPEED_PINCTRL_FUNC(RGMII0),
++ ASPEED_PINCTRL_FUNC(RGMII1),
++ ASPEED_PINCTRL_FUNC(RMII0),
++ ASPEED_PINCTRL_FUNC(RMII1),
++ ASPEED_PINCTRL_FUNC(VGA),
++ ASPEED_PINCTRL_FUNC(DSGPM1),
++ ASPEED_PINCTRL_FUNC(SGPS),
++ ASPEED_PINCTRL_FUNC(I2CF0),
++ ASPEED_PINCTRL_FUNC(I2CF1),
++ ASPEED_PINCTRL_FUNC(I2CF2),
++ ASPEED_PINCTRL_FUNC(CANBUS),
++ ASPEED_PINCTRL_FUNC(USBUART),
++ ASPEED_PINCTRL_FUNC(HBLED),
++ ASPEED_PINCTRL_FUNC(MACLINK0),
++ ASPEED_PINCTRL_FUNC(MACLINK1),
++ ASPEED_PINCTRL_FUNC(MACLINK2),
++ ASPEED_PINCTRL_FUNC(SMON0),
++ ASPEED_PINCTRL_FUNC(SMON1),
++ ASPEED_PINCTRL_FUNC(SGMII),
++ ASPEED_PINCTRL_FUNC(PCIERC),
++ ASPEED_PINCTRL_FUNC(USB2C),
++ ASPEED_PINCTRL_FUNC(USB2D),
++};
++
++/* number, name, drv_data */
++static const struct pinctrl_pin_desc aspeed_g7_soc1_pins[] = {
++ PINCTRL_PIN(C16, "C16"),
++ PINCTRL_PIN(C14, "C14"),
++ PINCTRL_PIN(C11, "C11"),
++ PINCTRL_PIN(D9, "D9"),
++ PINCTRL_PIN(F14, "F14"),
++ PINCTRL_PIN(D10, "D10"),
++ PINCTRL_PIN(C12, "C12"),
++ PINCTRL_PIN(C13, "C13"),
++ PINCTRL_PIN(AA24, "AA24"),
++ PINCTRL_PIN(AB24, "AB24"),
++ PINCTRL_PIN(AB23, "AB23"),
++ PINCTRL_PIN(AC22, "AC22"),
++ PINCTRL_PIN(AD22, "AD22"),
++ PINCTRL_PIN(AE21, "AE21"),
++ PINCTRL_PIN(AF20, "AF20"),
++ PINCTRL_PIN(AE20, "AE20"),
++ PINCTRL_PIN(AD20, "AD20"),
++ PINCTRL_PIN(Y23, "Y23"),
++ PINCTRL_PIN(W23, "W23"),
++ PINCTRL_PIN(AD19, "AD19"),
++ PINCTRL_PIN(AC20, "AC20"),
++ PINCTRL_PIN(AA23, "AA23"),
++ PINCTRL_PIN(AB22, "AB22"),
++ PINCTRL_PIN(AB21, "AB21"),
++ PINCTRL_PIN(AA22, "AA22"),
++ PINCTRL_PIN(Y22, "Y22"),
++ PINCTRL_PIN(W22, "W22"),
++ PINCTRL_PIN(AF18, "AF18"),
++ PINCTRL_PIN(AE18, "AE18"),
++ PINCTRL_PIN(AD18, "AD18"),
++ PINCTRL_PIN(AC19, "AC19"),
++ PINCTRL_PIN(AB20, "AB20"),
++ PINCTRL_PIN(AF17, "AF17"),
++ PINCTRL_PIN(AA16, "AA16"),
++ PINCTRL_PIN(Y16, "Y16"),
++ PINCTRL_PIN(V17, "V17"),
++ PINCTRL_PIN(J13, "J13"),
++ PINCTRL_PIN(AB16, "AB16"),
++ PINCTRL_PIN(AC16, "AC16"),
++ PINCTRL_PIN(AF16, "AF16"),
++ PINCTRL_PIN(AA15, "AA15"),
++ PINCTRL_PIN(AB15, "AB15"),
++ PINCTRL_PIN(AC15, "AC15"),
++ PINCTRL_PIN(AD15, "AD15"),
++ PINCTRL_PIN(Y15, "Y15"),
++ PINCTRL_PIN(AA14, "AA14"),
++ PINCTRL_PIN(W16, "W16"),
++ PINCTRL_PIN(V16, "V16"),
++ PINCTRL_PIN(AB18, "AB18"),
++ PINCTRL_PIN(AC18, "AC18"),
++ PINCTRL_PIN(K13, "K13"),
++ PINCTRL_PIN(AA17, "AA17"),
++ PINCTRL_PIN(AB17, "AB17"),
++ PINCTRL_PIN(AD16, "AD16"),
++ PINCTRL_PIN(AC17, "AC17"),
++ PINCTRL_PIN(AD17, "AD17"),
++ PINCTRL_PIN(AE16, "AE16"),
++ PINCTRL_PIN(AE17, "AE17"),
++ PINCTRL_PIN(U23, "U23"),
++ PINCTRL_PIN(T24, "T24"),
++ PINCTRL_PIN(HOLE0, "HOLE0"),
++ PINCTRL_PIN(HOLE1, "HOLE1"),
++ PINCTRL_PIN(HOLE2, "HOLE2"),
++ PINCTRL_PIN(HOLE3, "HOLE3"),
++ PINCTRL_PIN(AC24, "AC24"),
++ PINCTRL_PIN(AD24, "AD24"),
++ PINCTRL_PIN(AE23, "AE23"),
++ PINCTRL_PIN(AE19, "AE19"),
++ PINCTRL_PIN(AF23, "AF23"),
++ PINCTRL_PIN(Y25, "Y25"),
++ PINCTRL_PIN(AA25, "AA25"),
++ PINCTRL_PIN(AF19, "AF19"),
++ PINCTRL_PIN(AB25, "AB25"),
++ PINCTRL_PIN(AC25, "AC25"),
++ PINCTRL_PIN(AD25, "AD25"),
++ PINCTRL_PIN(V22, "V22"),
++ PINCTRL_PIN(AE25, "AE25"),
++ PINCTRL_PIN(V21, "V21"),
++ PINCTRL_PIN(AF21, "AF21"),
++ PINCTRL_PIN(AF25, "AF25"),
++ PINCTRL_PIN(AF26, "AF26"),
++ PINCTRL_PIN(AE26, "AE26"),
++ PINCTRL_PIN(W21, "W21"),
++ PINCTRL_PIN(AD26, "AD26"),
++ PINCTRL_PIN(Y21, "Y21"),
++ PINCTRL_PIN(AC26, "AC26"),
++ PINCTRL_PIN(AA26, "AA26"),
++ PINCTRL_PIN(AB26, "AB26"),
++ PINCTRL_PIN(T26, "T26"),
++ PINCTRL_PIN(AA20, "AA20"),
++ PINCTRL_PIN(V23, "V23"),
++ PINCTRL_PIN(W24, "W24"),
++ PINCTRL_PIN(R26, "R26"),
++ PINCTRL_PIN(AA21, "AA21"),
++ PINCTRL_PIN(P26, "P26"),
++ PINCTRL_PIN(Y24, "Y24"),
++ PINCTRL_PIN(B16, "B16"),
++ PINCTRL_PIN(D14, "D14"),
++ PINCTRL_PIN(B15, "B15"),
++ PINCTRL_PIN(B14, "B14"),
++ PINCTRL_PIN(C17, "C17"),
++ PINCTRL_PIN(B13, "B13"),
++ PINCTRL_PIN(E14, "E14"),
++ PINCTRL_PIN(C15, "C15"),
++ PINCTRL_PIN(D24, "D24"),
++ PINCTRL_PIN(B23, "B23"),
++ PINCTRL_PIN(B22, "B22"),
++ PINCTRL_PIN(C23, "C23"),
++ PINCTRL_PIN(B18, "B18"),
++ PINCTRL_PIN(B21, "B21"),
++ PINCTRL_PIN(B17, "B17"),
++ PINCTRL_PIN(B19, "B19"),
++ PINCTRL_PIN(B26, "B26"),
++ PINCTRL_PIN(A25, "A25"),
++ PINCTRL_PIN(A24, "A24"),
++ PINCTRL_PIN(B24, "B24"),
++ PINCTRL_PIN(E26, "E26"),
++ PINCTRL_PIN(A21, "A21"),
++ PINCTRL_PIN(A19, "A19"),
++ PINCTRL_PIN(A18, "A18"),
++ PINCTRL_PIN(D26, "D26"),
++ PINCTRL_PIN(C26, "C26"),
++ PINCTRL_PIN(A23, "A23"),
++ PINCTRL_PIN(B25, "B25"),
++ PINCTRL_PIN(A22, "A22"),
++ PINCTRL_PIN(F26, "F26"),
++ PINCTRL_PIN(A26, "A26"),
++ PINCTRL_PIN(A14, "A14"),
++ PINCTRL_PIN(E10, "E10"),
++ PINCTRL_PIN(E13, "E13"),
++ PINCTRL_PIN(D12, "D12"),
++ PINCTRL_PIN(F10, "F10"),
++ PINCTRL_PIN(E11, "E11"),
++ PINCTRL_PIN(F11, "F11"),
++ PINCTRL_PIN(F13, "F13"),
++ PINCTRL_PIN(N15, "N15"),
++ PINCTRL_PIN(C20, "C20"),
++ PINCTRL_PIN(C19, "C19"),
++ PINCTRL_PIN(A8, "A8"),
++ PINCTRL_PIN(R14, "R14"),
++ PINCTRL_PIN(A7, "A7"),
++ PINCTRL_PIN(P14, "P14"),
++ PINCTRL_PIN(D20, "D20"),
++ PINCTRL_PIN(A6, "A6"),
++ PINCTRL_PIN(B6, "B6"),
++ PINCTRL_PIN(N14, "N14"),
++ PINCTRL_PIN(B7, "B7"),
++ PINCTRL_PIN(B8, "B8"),
++ PINCTRL_PIN(B9, "B9"),
++ PINCTRL_PIN(M14, "M14"),
++ PINCTRL_PIN(J11, "J11"),
++ PINCTRL_PIN(E7, "E7"),
++ PINCTRL_PIN(D19, "D19"),
++ PINCTRL_PIN(B11, "B11"),
++ PINCTRL_PIN(D15, "D15"),
++ PINCTRL_PIN(B12, "B12"),
++ PINCTRL_PIN(B10, "B10"),
++ PINCTRL_PIN(P13, "P13"),
++ PINCTRL_PIN(C18, "C18"),
++ PINCTRL_PIN(C6, "C6"),
++ PINCTRL_PIN(C7, "C7"),
++ PINCTRL_PIN(D7, "D7"),
++ PINCTRL_PIN(N13, "N13"),
++ PINCTRL_PIN(C8, "C8"),
++ PINCTRL_PIN(C9, "C9"),
++ PINCTRL_PIN(C10, "C10"),
++ PINCTRL_PIN(M16, "M16"),
++ PINCTRL_PIN(A15, "A15"),
++ PINCTRL_PIN(G11, "G11"),
++ PINCTRL_PIN(H7, "H7"),
++ PINCTRL_PIN(H8, "H8"),
++ PINCTRL_PIN(H9, "H9"),
++ PINCTRL_PIN(H10, "H10"),
++ PINCTRL_PIN(H11, "H11"),
++ PINCTRL_PIN(J9, "J9"),
++ PINCTRL_PIN(J10, "J10"),
++ PINCTRL_PIN(E9, "E9"),
++ PINCTRL_PIN(F9, "F9"),
++ PINCTRL_PIN(F8, "F8"),
++ PINCTRL_PIN(M13, "M13"),
++ PINCTRL_PIN(F7, "F7"),
++ PINCTRL_PIN(D8, "D8"),
++ PINCTRL_PIN(E8, "E8"),
++ PINCTRL_PIN(L12, "L12"),
++ PINCTRL_PIN(F12, "F12"),
++ PINCTRL_PIN(E12, "E12"),
++ PINCTRL_PIN(J12, "J12"),
++ PINCTRL_PIN(G7, "G7"),
++ PINCTRL_PIN(G8, "G8"),
++ PINCTRL_PIN(G9, "G9"),
++ PINCTRL_PIN(G10, "G10"),
++ PINCTRL_PIN(K12, "K12"),
++ PINCTRL_PIN(W17, "W17"),
++ PINCTRL_PIN(V18, "V18"),
++ PINCTRL_PIN(W18, "W18"),
++ PINCTRL_PIN(Y17, "Y17"),
++ PINCTRL_PIN(AA18, "AA18"),
++ PINCTRL_PIN(AA13, "AA13"),
++ PINCTRL_PIN(Y18, "Y18"),
++ PINCTRL_PIN(AA12, "AA12"),
++ PINCTRL_PIN(W20, "W20"),
++ PINCTRL_PIN(V20, "V20"),
++ PINCTRL_PIN(Y11, "Y11"),
++ PINCTRL_PIN(V14, "V14"),
++ PINCTRL_PIN(V19, "V19"),
++ PINCTRL_PIN(W14, "W14"),
++ PINCTRL_PIN(Y20, "Y20"),
++ PINCTRL_PIN(AB19, "AB19"),
++ PINCTRL_PIN(U26, "U26"),
++ PINCTRL_PIN(U25, "U25"),
++ PINCTRL_PIN(V26, "V26"),
++ PINCTRL_PIN(W26, "W26"),
++ PINCTRL_PIN(Y26, "Y26"),
++ PINCTRL_PIN(W25, "W25"),
++ PINCTRL_PIN(V24, "V24"),
++ PINCTRL_PIN(U24, "U24"),
++ PINCTRL_PIN(SGMII0, "SGMII0"),
++ PINCTRL_PIN(PCIERC2_PERST, "PCIERC2_PERST"),
++ PINCTRL_PIN(PORTC_MODE, "PORTC_MODE"),
++ PINCTRL_PIN(PORTD_MODE, "PORTD_MODE"),
++};
++
++FUNCFG_DESCL(C16, PIN_CFG(ESPI1, SCU400, GENMASK(2, 0), 1),
++ PIN_CFG(LPC1, SCU400, GENMASK(2, 0), 2),
++ PIN_CFG(SD, SCU400, GENMASK(2, 0), 3),
++ PIN_CFG(HVI3C4, SCU400, GENMASK(2, 0), 4),
++ PIN_CFG(VPI, SCU400, GENMASK(2, 0), 5));
++FUNCFG_DESCL(C14, PIN_CFG(ESPI1, SCU400, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(LPC1, SCU400, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(SD, SCU400, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(HVI3C4, SCU400, GENMASK(6, 4), (4 << 4)),
++ PIN_CFG(VPI, SCU400, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(C11, PIN_CFG(ESPI1, SCU400, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(LPC1, SCU400, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(SD, SCU400, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(HVI3C5, SCU400, GENMASK(10, 8), (4 << 8)),
++ PIN_CFG(VPI, SCU400, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(D9, PIN_CFG(ESPI1, SCU400, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(LPC1, SCU400, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(SD, SCU400, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(HVI3C5, SCU400, GENMASK(14, 12), (4 << 12)),
++ PIN_CFG(VPI, SCU400, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(F14, PIN_CFG(ESPI1, SCU400, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(LPC1, SCU400, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(SD, SCU400, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(HVI3C6, SCU400, GENMASK(18, 16), (4 << 16)),
++ PIN_CFG(VPI, SCU400, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(D10, PIN_CFG(ESPI1, SCU400, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(LPC1, SCU400, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(SD, SCU400, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(HVI3C6, SCU400, GENMASK(22, 20), (4 << 20)),
++ PIN_CFG(VPI, SCU400, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(C12, PIN_CFG(ESPI1, SCU400, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LPC1, SCU400, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SD, SCU400, GENMASK(26, 24), (3 << 24)));
++FUNCFG_DESCL(C13, PIN_CFG(ESPI1, SCU400, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LPC1, SCU400, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(SD, SCU400, GENMASK(30, 28), (3 << 28)));
++FUNCFG_DESCL(AA24, PIN_CFG(TACH0, SCU404, GENMASK(2, 0), 1),
++ PIN_CFG(THRU0, SCU404, GENMASK(2, 0), 2),
++ PIN_CFG(VPI, SCU404, GENMASK(2, 0), 3));
++FUNCFG_DESCL(AB24, PIN_CFG(TACH1, SCU404, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(THRU0, SCU404, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(VPI, SCU404, GENMASK(6, 4), (3 << 4)));
++FUNCFG_DESCL(AB23, PIN_CFG(TACH2, SCU404, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(THRU1, SCU404, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(VPI, SCU404, GENMASK(10, 8), (3 << 8)));
++FUNCFG_DESCL(AC22, PIN_CFG(TACH3, SCU404, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(THRU1, SCU404, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(VPI, SCU404, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(AD22, PIN_CFG(TACH4, SCU404, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(VPI, SCU404, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(NCTS5, SCU404, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(AE21, PIN_CFG(TACH5, SCU404, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(VPI, SCU404, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(NDCD5, SCU404, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(AF20, PIN_CFG(TACH6, SCU404, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(VPI, SCU404, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(NDSR5, SCU404, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(AE20, PIN_CFG(TACH7, SCU404, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(VPI, SCU404, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(NRI5, SCU404, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(AD20, PIN_CFG(TACH8, SCU408, GENMASK(2, 0), 1),
++ PIN_CFG(VPI, SCU408, GENMASK(2, 0), 3),
++ PIN_CFG(NDTR5, SCU408, GENMASK(2, 0), 4));
++FUNCFG_DESCL(Y23, PIN_CFG(TACH9, SCU408, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(VPI, SCU408, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(NRTS5, SCU408, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(W23, PIN_CFG(TACH10, SCU408, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SALT12, SCU408, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(VPI, SCU408, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(NCTS6, SCU408, GENMASK(10, 8), (4 << 8)));
++FUNCFG_DESCL(AD19, PIN_CFG(TACH11, SCU408, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SALT13, SCU408, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(VPI, SCU408, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(NDCD6, SCU408, GENMASK(14, 12), (4 << 12)));
++FUNCFG_DESCL(AC20, PIN_CFG(TACH12, SCU408, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SALT14, SCU408, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(VPI, SCU408, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(NDSR6, SCU408, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(AA23, PIN_CFG(TACH13, SCU408, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SALT15, SCU408, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(VPI, SCU408, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(NRI6, SCU408, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(AB22, PIN_CFG(TACH14, SCU408, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LPCPME0, SCU408, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(VPI, SCU408, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(NDTR6, SCU408, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(AB21, PIN_CFG(TACH15, SCU408, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LPCSMIN0, SCU408, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(VPI, SCU408, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(NRTS6, SCU408, GENMASK(30, 28), (4 << 28)),
++ PIN_CFG(SPIM0, SCU408, GENMASK(30, 28), (5 << 28)));
++FUNCFG_DESCL(AA22, PIN_CFG(PWM0, SCU40C, GENMASK(2, 0), 1),
++ PIN_CFG(SIOPBON0, SCU40C, GENMASK(2, 0), 2),
++ PIN_CFG(VPI, SCU40C, GENMASK(2, 0), 3),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(2, 0), 5));
++FUNCFG_DESCL(Y22, PIN_CFG(PWM1, SCU40C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SIOPBIN0, SCU40C, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(VPI, SCU40C, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(W22, PIN_CFG(PWM2, SCU40C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SIOSCIN0, SCU40C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(VPI, SCU40C, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(AF18, PIN_CFG(PWM3, SCU40C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SIOS3N0, SCU40C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(VPI, SCU40C, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(AE18, PIN_CFG(PWM4, SCU40C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SIOS5N0, SCU40C, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(VPI, SCU40C, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(AD18, PIN_CFG(PWM5, SCU40C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SIOPWREQN0, SCU40C, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(VPI, SCU40C, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(AC19, PIN_CFG(PWM6, SCU40C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(SIOONCTRLN0, SCU40C, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(AB20, PIN_CFG(PWM7, SCU40C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(SPIM0, SCU40C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(AF17, PIN_CFG(NCTS0, SCU410, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AA16, PIN_CFG(NDCD0, SCU410, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(Y16, PIN_CFG(NDSR0, SCU410, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(V17, PIN_CFG(NRI0, SCU410, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(J13, PIN_CFG(NDTR0, SCU410, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(AB16, PIN_CFG(NRTS0, SCU410, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(AC16, PIN_CFG(TXD0, SCU410, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(AF16, PIN_CFG(RXD0, SCU410, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(AA15, PIN_CFG(NCTS1, SCU414, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AB15, PIN_CFG(NDCD1, SCU414, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(AC15, PIN_CFG(NDSR1, SCU414, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(AD15, PIN_CFG(NRI1, SCU414, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(Y15, PIN_CFG(NDTR1, SCU414, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(AA14, PIN_CFG(NRTS1, SCU414, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(W16, PIN_CFG(TXD1, SCU414, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(V16, PIN_CFG(RXD1, SCU414, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(AB18, PIN_CFG(TXD2, SCU418, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AC18, PIN_CFG(RXD2, SCU418, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(K13, PIN_CFG(TXD3, SCU418, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(WDTRST0N, SCU418, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(PWM8, SCU418, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(AA17, PIN_CFG(RXD3, SCU418, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(WDTRST1N, SCU418, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(PWM9, SCU418, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(AB17, PIN_CFG(TXD5, SCU418, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(WDTRST2N, SCU418, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(PWM10, SCU418, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(AD16, PIN_CFG(RXD5, SCU418, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(WDTRST3N, SCU418, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(PWM11, SCU418, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(AC17, PIN_CFG(TXD6, SCU418, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(SALT0, SCU418, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(PWM12, SCU418, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(AD17, PIN_CFG(RXD6, SCU418, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(SALT1, SCU418, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(PWM13, SCU418, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(SPIM1, SCU418, GENMASK(30, 28), (5 << 28)));
++FUNCFG_DESCL(AE16, PIN_CFG(TXD7, SCU41C, GENMASK(2, 0), 1),
++ PIN_CFG(SALT2, SCU41C, GENMASK(2, 0), 2),
++ PIN_CFG(PWM14, SCU41C, GENMASK(2, 0), 3),
++ PIN_CFG(SPIM1, SCU41C, GENMASK(2, 0), 5));
++FUNCFG_DESCL(AE17, PIN_CFG(RXD7, SCU41C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SALT3, SCU41C, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(PWM15, SCU41C, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(SPIM1, SCU41C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(U23, PIN_CFG(SGPM1, SCU41C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(WDTRST7N, SCU41C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(PESGWAKEN, SCU41C, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SMON1, SCU41C, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(T24, PIN_CFG(SGPM1, SCU41C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SMON1, SCU41C, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(HOLE0);
++FUNCFG_DESCL(HOLE1);
++FUNCFG_DESCL(HOLE2);
++FUNCFG_DESCL(HOLE3);
++FUNCFG_DESCL(AC24, PIN_CFG(HVI3C12, SCU420, GENMASK(2, 0), 1),
++ PIN_CFG(I2C12, SCU420, GENMASK(2, 0), 2),
++ PIN_CFG(SIOPBON1, SCU420, GENMASK(2, 0), 3),
++ PIN_CFG(SPIM2, SCU420, GENMASK(2, 0), 5));
++FUNCFG_DESCL(AD24, PIN_CFG(HVI3C12, SCU420, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2C12, SCU420, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(SIOPBIN1, SCU420, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(AE23, PIN_CFG(HVI3C13, SCU420, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(I2C13, SCU420, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(SIOSCIN1, SCU420, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(AE19, PIN_CFG(HVI3C13, SCU420, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(I2C13, SCU420, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(SIOS3N1, SCU420, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(AF23, PIN_CFG(HVI3C14, SCU420, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(I2C14, SCU420, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(SIOS5N1, SCU420, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(Y25, PIN_CFG(HVI3C14, SCU420, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(I2C14, SCU420, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(SIOPWREQN1, SCU420, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(AA25, PIN_CFG(HVI3C15, SCU420, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(I2C15, SCU420, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SIOONCTRLN1, SCU420, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(AF19, PIN_CFG(HVI3C15, SCU420, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(I2C15, SCU420, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(SIOPWRGD1, SCU420, GENMASK(30, 28), (3 << 28)),
++ PIN_CFG(SPIM2, SCU420, GENMASK(30, 28), (5 << 28)));
++FUNCFG_DESCL(AB25, PIN_CFG(I3C4, SCU424, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AC25, PIN_CFG(I3C4, SCU424, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(AD25, PIN_CFG(I3C5, SCU424, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(V22, PIN_CFG(I3C5, SCU424, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(AE25, PIN_CFG(I3C6, SCU424, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(V21, PIN_CFG(I3C6, SCU424, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(AF21, PIN_CFG(I3C7, SCU424, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(AF25, PIN_CFG(I3C7, SCU424, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(AF26, PIN_CFG(I3C8, SCU428, GENMASK(2, 0), 1));
++FUNCFG_DESCL(AE26, PIN_CFG(I3C8, SCU428, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(W21, PIN_CFG(I3C9, SCU428, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(AD26, PIN_CFG(I3C9, SCU428, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(Y21, PIN_CFG(I3C10, SCU428, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(AC26, PIN_CFG(I3C10, SCU428, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(AA26, PIN_CFG(I3C11, SCU428, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(AB26, PIN_CFG(I3C11, SCU428, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(T26, PIN_CFG(I3C0, SCU42C, GENMASK(2, 0), 1),
++ PIN_CFG(FSI0, SCU42C, GENMASK(2, 0), 2),
++ PIN_CFG(LTPI, SCU42C, GENMASK(2, 0), 3));
++FUNCFG_DESCL(AA20, PIN_CFG(I3C0, SCU42C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(FSI0, SCU42C, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(LTPI, SCU42C, GENMASK(6, 4), (3 << 4)));
++FUNCFG_DESCL(V23, PIN_CFG(I3C1, SCU42C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(FSI1, SCU42C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(LTPI, SCU42C, GENMASK(10, 8), (3 << 8)));
++FUNCFG_DESCL(W24, PIN_CFG(I3C1, SCU42C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(FSI1, SCU42C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(LTPI, SCU42C, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(R26, PIN_CFG(I3C2, SCU42C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(FSI2, SCU42C, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AA21, PIN_CFG(I3C2, SCU42C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(FSI2, SCU42C, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(P26, PIN_CFG(I3C3, SCU42C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(FSI3, SCU42C, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(Y24, PIN_CFG(I3C3, SCU42C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(FSI3, SCU42C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(B16, PIN_CFG(ESPI0, SCU430, GENMASK(2, 0), 1),
++ PIN_CFG(LPC0, SCU430, GENMASK(2, 0), 2));
++FUNCFG_DESCL(D14, PIN_CFG(ESPI0, SCU430, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(LPC0, SCU430, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(B15, PIN_CFG(ESPI0, SCU430, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(LPC0, SCU430, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(B14, PIN_CFG(ESPI0, SCU430, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(LPC0, SCU430, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(C17, PIN_CFG(ESPI0, SCU430, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(LPC0, SCU430, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(OSCCLK, SCU430, GENMASK(18, 16), (3 << 16)));
++FUNCFG_DESCL(B13, PIN_CFG(ESPI0, SCU430, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(LPC0, SCU430, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(E14, PIN_CFG(ESPI0, SCU430, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(LPC0, SCU430, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(C15, PIN_CFG(ESPI0, SCU430, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(LPC0, SCU430, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(D24, PIN_CFG(SPI0, SCU434, GENMASK(2, 0), 1));
++FUNCFG_DESCL(B23, PIN_CFG(SPI0, SCU434, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(B22, PIN_CFG(SPI0, SCU434, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(C23, PIN_CFG(QSPI0, SCU434, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(B18, PIN_CFG(QSPI0, SCU434, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(B21, PIN_CFG(SPI0CS1, SCU434, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(B17, PIN_CFG(SPI0ABR, SCU434, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(TXD8, SCU434, GENMASK(26, 24), (3 << 24)));
++FUNCFG_DESCL(B19, PIN_CFG(SPI0WPN, SCU434, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(RXD8, SCU434, GENMASK(30, 28), (3 << 28)));
++FUNCFG_DESCL(B26, PIN_CFG(SPI1, SCU438, GENMASK(2, 0), 1),
++ PIN_CFG(TXD9, SCU438, GENMASK(2, 0), 3));
++FUNCFG_DESCL(A25, PIN_CFG(SPI1, SCU438, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(RXD9, SCU438, GENMASK(6, 4), (3 << 4)));
++FUNCFG_DESCL(A24, PIN_CFG(SPI1, SCU438, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(TXD10, SCU438, GENMASK(10, 8), (3 << 8)));
++FUNCFG_DESCL(B24, PIN_CFG(QSPI1, SCU438, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(RXD10, SCU438, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(E26, PIN_CFG(QSPI1, SCU438, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(TXD11, SCU438, GENMASK(18, 16), (3 << 16)));
++FUNCFG_DESCL(A21, PIN_CFG(SPI1CS1, SCU438, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(RXD11, SCU438, GENMASK(22, 20), (3 << 20)));
++FUNCFG_DESCL(A19, PIN_CFG(SPI1ABR, SCU438, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(THRU2, SCU438, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(A18, PIN_CFG(SPI1WPN, SCU438, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(THRU2, SCU438, GENMASK(30, 28), (4 << 28)));
++FUNCFG_DESCL(D26, PIN_CFG(SPI2, SCU43C, GENMASK(2, 0), 1),
++ PIN_CFG(DI2C13, SCU43C, GENMASK(2, 0), 2),
++ PIN_CFG(HVI3C7, SCU43C, GENMASK(2, 0), 3));
++FUNCFG_DESCL(C26, PIN_CFG(SPI2, SCU43C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(DI2C13, SCU43C, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(HVI3C7, SCU43C, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(EM_SPICK, SCU43C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(A23, PIN_CFG(SPI2, SCU43C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(DI2C14, SCU43C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(HVI3C10, SCU43C, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(EM_SPIMOSI, SCU43C, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(B25, PIN_CFG(SPI2, SCU43C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(DI2C14, SCU43C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(HVI3C10, SCU43C, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(EM_SPIMISO, SCU43C, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(A22, PIN_CFG(QSPI2, SCU43C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(DI2C15, SCU43C, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(HVI3C11, SCU43C, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(THRU3, SCU43C, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(F26, PIN_CFG(QSPI2, SCU43C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(DI2C15, SCU43C, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(HVI3C11, SCU43C, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(THRU3, SCU43C, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(A26, PIN_CFG(SPI2CS1, SCU43C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(EM_SPICSN, SCU43C, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(A14, PIN_CFG(FWSPIABR, SCU43C, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(E10, PIN_CFG(MDIO2, SCU440, GENMASK(2, 0), 1),
++ PIN_CFG(PE2SGRSTN, SCU440, GENMASK(2, 0), 2));
++FUNCFG_DESCL(E13, PIN_CFG(MDIO2, SCU440, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(D12, PIN_CFG(JTAGM1, SCU440, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(F10, PIN_CFG(JTAGM1, SCU440, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(E11, PIN_CFG(JTAGM1, SCU440, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(F11, PIN_CFG(JTAGM1, SCU440, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(F13, PIN_CFG(JTAGM1, SCU440, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(N15, PIN_CFG(FWSPIWPEN, SCU440, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(C20, PIN_CFG(RGMII0, SCU444, GENMASK(2, 0), 1),
++ PIN_CFG(RMII0R, SCU444, GENMASK(2, 0), 2));
++FUNCFG_DESCL(C19, PIN_CFG(RGMII0, SCU444, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(A8, PIN_CFG(RGMII0, SCU444, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(RMII0R, SCU444, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(R14, PIN_CFG(RGMII0, SCU444, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(RMII0R, SCU444, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(A7, PIN_CFG(RGMII0, SCU444, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(RMII0C, SCU444, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(P14, PIN_CFG(RGMII0, SCU444, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(RMII0, SCU444, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(D20, PIN_CFG(RGMII0, SCU444, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(RMII0, SCU444, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(A6, PIN_CFG(RGMII0, SCU444, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(RMII0, SCU444, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(B6, PIN_CFG(RGMII0, SCU448, GENMASK(2, 0), 1),
++ PIN_CFG(RMII0, SCU448, GENMASK(2, 0), 2));
++FUNCFG_DESCL(N14, PIN_CFG(RGMII0, SCU448, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(RMII0, SCU448, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(B7, PIN_CFG(RGMII0, SCU448, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(B8, PIN_CFG(RGMII0, SCU448, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(B9, PIN_CFG(MDIO0, SCU448, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(M14, PIN_CFG(MDIO0, SCU448, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(J11, PIN_CFG(VGA, SCU448, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(E7, PIN_CFG(VGA, SCU448, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(D19, PIN_CFG(RGMII1, SCU44C, GENMASK(2, 0), 1),
++ PIN_CFG(RMII1, SCU44C, GENMASK(2, 0), 2),
++ PIN_CFG(DI2C8, SCU44C, GENMASK(2, 0), 3),
++ PIN_CFG(DSGPM1, SCU44C, GENMASK(2, 0), 4));
++FUNCFG_DESCL(B11, PIN_CFG(RGMII1, SCU44C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SGPS, SCU44C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(D15, PIN_CFG(RGMII1, SCU44C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(DI2C9, SCU44C, GENMASK(10, 8), (3 << 8)),
++ PIN_CFG(TXD3, SCU44C, GENMASK(10, 8), (4 << 8)));
++FUNCFG_DESCL(B12, PIN_CFG(RGMII1, SCU44C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(DI2C9, SCU44C, GENMASK(14, 12), (3 << 12)),
++ PIN_CFG(RXD3, SCU44C, GENMASK(14, 12), (4 << 12)));
++FUNCFG_DESCL(B10, PIN_CFG(RGMII1, SCU44C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(DI2C8, SCU44C, GENMASK(18, 16), (3 << 16)),
++ PIN_CFG(DSGPM1, SCU44C, GENMASK(18, 16), (4 << 16)));
++FUNCFG_DESCL(P13, PIN_CFG(RGMII1, SCU44C, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(C18, PIN_CFG(RGMII1, SCU44C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(SGPS, SCU44C, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(C6, PIN_CFG(RGMII1, SCU44C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(RMII1, SCU44C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(C7, PIN_CFG(RGMII1, SCU450, GENMASK(2, 0), 1),
++ PIN_CFG(RMII1, SCU450, GENMASK(2, 0), 2),
++ PIN_CFG(DI2C10, SCU450, GENMASK(2, 0), 3),
++ PIN_CFG(DSGPM1, SCU450, GENMASK(2, 0), 4));
++FUNCFG_DESCL(D7, PIN_CFG(RGMII1, SCU450, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(RMII1, SCU450, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(DI2C10, SCU450, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(DSGPM1, SCU450, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(N13, PIN_CFG(RGMII1, SCU450, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SGPS, SCU450, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(C8, PIN_CFG(RGMII1, SCU450, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SGPS, SCU450, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(C9, PIN_CFG(MDIO1, SCU450, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(C10, PIN_CFG(MDIO1, SCU450, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(M16, PIN_CFG(FWQSPI, SCU450, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(A15, PIN_CFG(FWQSPI, SCU450, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(G11, PIN_CFG(I2C0, SCU454, GENMASK(2, 0), 1));
++FUNCFG_DESCL(H7, PIN_CFG(I2C0, SCU454, GENMASK(6, 4), (1 << 4)));
++FUNCFG_DESCL(H8, PIN_CFG(I2C1, SCU454, GENMASK(10, 8), (1 << 8)));
++FUNCFG_DESCL(H9, PIN_CFG(I2C1, SCU454, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(H10, PIN_CFG(I2C2, SCU454, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(H11, PIN_CFG(I2C2, SCU454, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(J9, PIN_CFG(I2C3, SCU454, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(J10, PIN_CFG(I2C3, SCU454, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(E9, PIN_CFG(I2C4, SCU458, GENMASK(2, 0), 1),
++ PIN_CFG(ESPI1, SCU458, GENMASK(2, 0), 2),
++ PIN_CFG(I2CF1, SCU458, GENMASK(2, 0), 5));
++FUNCFG_DESCL(F9, PIN_CFG(I2C4, SCU458, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(I2CF1, SCU458, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(F8, PIN_CFG(I2C5, SCU458, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(I2CF1, SCU458, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(M13, PIN_CFG(I2C5, SCU458, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(I2CF1, SCU458, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(F7, PIN_CFG(I2C6, SCU458, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(18, 16), (5 << 16)));
++FUNCFG_DESCL(D8, PIN_CFG(I2C6, SCU458, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(22, 20), (5 << 20)));
++FUNCFG_DESCL(E8, PIN_CFG(I2C7, SCU458, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(26, 24), (5 << 24)));
++FUNCFG_DESCL(L12, PIN_CFG(I2C7, SCU458, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(ESPI1, SCU458, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(I2CF2, SCU458, GENMASK(30, 28), (5 << 28)));
++FUNCFG_DESCL(F12, PIN_CFG(I2C8, SCU45C, GENMASK(2, 0), 1),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(2, 0), 5));
++FUNCFG_DESCL(E12, PIN_CFG(I2C8, SCU45C, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(6, 4), (5 << 4)));
++FUNCFG_DESCL(J12, PIN_CFG(I2C9, SCU45C, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(10, 8), (5 << 8)));
++FUNCFG_DESCL(G7, PIN_CFG(I2C9, SCU45C, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(CANBUS, SCU45C, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(I2CF0, SCU45C, GENMASK(14, 12), (5 << 12)));
++FUNCFG_DESCL(G8, PIN_CFG(I2C10, SCU45C, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(CANBUS, SCU45C, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(G9, PIN_CFG(I2C10, SCU45C, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(CANBUS, SCU45C, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(G10, PIN_CFG(I2C11, SCU45C, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(USBUART, SCU45C, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(K12, PIN_CFG(I2C11, SCU45C, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(USBUART, SCU45C, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(W17, PIN_CFG(ADC0, SCU460, GENMASK(2, 0), 0),
++ PIN_CFG(GPIY0, SCU460, GENMASK(2, 0), 1),
++ PIN_CFG(SALT4, SCU460, GENMASK(2, 0), 2));
++FUNCFG_DESCL(V18, PIN_CFG(ADC1, SCU460, GENMASK(6, 4), 0),
++ PIN_CFG(GPIY1, SCU460, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SALT5, SCU460, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(W18, PIN_CFG(ADC2, SCU460, GENMASK(10, 8), 0),
++ PIN_CFG(GPIY2, SCU460, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(SALT6, SCU460, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(Y17, PIN_CFG(ADC3, SCU460, GENMASK(14, 12), 0),
++ PIN_CFG(GPIY3, SCU460, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SALT7, SCU460, GENMASK(14, 12), (2 << 12)));
++FUNCFG_DESCL(AA18, PIN_CFG(ADC4, SCU460, GENMASK(18, 16), 0),
++ PIN_CFG(GPIY4, SCU460, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SALT8, SCU460, GENMASK(18, 16), (2 << 16)));
++FUNCFG_DESCL(AA13, PIN_CFG(ADC5, SCU460, GENMASK(22, 20), 0),
++ PIN_CFG(GPIY5, SCU460, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(SALT9, SCU460, GENMASK(22, 20), (2 << 20)));
++FUNCFG_DESCL(Y18, PIN_CFG(ADC6, SCU460, GENMASK(26, 24), 0),
++ PIN_CFG(GPIY6, SCU460, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(SALT10, SCU460, GENMASK(26, 24), (2 << 24)));
++FUNCFG_DESCL(AA12, PIN_CFG(ADC7, SCU460, GENMASK(30, 28), 0),
++ PIN_CFG(GPIY7, SCU460, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(SALT11, SCU460, GENMASK(30, 28), (2 << 28)));
++FUNCFG_DESCL(W20, PIN_CFG(ADC15, SCU464, GENMASK(2, 0), 0),
++ PIN_CFG(GPIZ7, SCU464, GENMASK(2, 0), 1));
++FUNCFG_DESCL(V20, PIN_CFG(ADC14, SCU464, GENMASK(6, 4), 0),
++ PIN_CFG(GPIZ6, SCU464, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(AUXPWRGOOD1, SCU464, GENMASK(6, 4), (2 << 4)));
++FUNCFG_DESCL(Y11, PIN_CFG(ADC13, SCU464, GENMASK(10, 8), 0),
++ PIN_CFG(GPIZ5, SCU464, GENMASK(10, 8), (1 << 8)),
++ PIN_CFG(AUXPWRGOOD0, SCU464, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(V14, PIN_CFG(ADC12, SCU464, GENMASK(14, 12), 0),
++ PIN_CFG(GPIZ4, SCU464, GENMASK(14, 12), (1 << 12)));
++FUNCFG_DESCL(V19, PIN_CFG(ADC11, SCU464, GENMASK(18, 16), 0),
++ PIN_CFG(GPIZ3, SCU464, GENMASK(18, 16), (1 << 16)));
++FUNCFG_DESCL(W14, PIN_CFG(ADC10, SCU464, GENMASK(22, 20), 0),
++ PIN_CFG(GPIZ2, SCU464, GENMASK(22, 20), (1 << 20)));
++FUNCFG_DESCL(Y20, PIN_CFG(ADC9, SCU464, GENMASK(26, 24), 0),
++ PIN_CFG(GPIZ1, SCU464, GENMASK(26, 24), (1 << 24)));
++FUNCFG_DESCL(AB19, PIN_CFG(ADC8, SCU464, GENMASK(30, 28), 0),
++ PIN_CFG(GPIZ0, SCU464, GENMASK(30, 28), (1 << 28)));
++FUNCFG_DESCL(U26, PIN_CFG(SGPM0, SCU468, GENMASK(2, 0), 1),
++ PIN_CFG(SMON0, SCU468, GENMASK(2, 0), 2),
++ PIN_CFG(NCTS2, SCU468, GENMASK(2, 0), 3),
++ PIN_CFG(MACLINK0, SCU468, GENMASK(2, 0), 4));
++FUNCFG_DESCL(U25, PIN_CFG(SGPM0, SCU468, GENMASK(6, 4), (1 << 4)),
++ PIN_CFG(SMON0, SCU468, GENMASK(6, 4), (2 << 4)),
++ PIN_CFG(NDCD2, SCU468, GENMASK(6, 4), (3 << 4)),
++ PIN_CFG(MACLINK2, SCU468, GENMASK(6, 4), (4 << 4)));
++FUNCFG_DESCL(V26, PIN_CFG(SGPM0LD_R, SCU468, GENMASK(10, 8), (2 << 8)),
++ PIN_CFG(HBLED, SCU468, GENMASK(10, 8), (2 << 8)));
++FUNCFG_DESCL(W26, PIN_CFG(SGPM0, SCU468, GENMASK(14, 12), (1 << 12)),
++ PIN_CFG(SMON0, SCU468, GENMASK(14, 12), (2 << 12)),
++ PIN_CFG(NDSR2, SCU468, GENMASK(14, 12), (3 << 12)));
++FUNCFG_DESCL(Y26, PIN_CFG(SGPM0, SCU468, GENMASK(18, 16), (1 << 16)),
++ PIN_CFG(SMON0, SCU468, GENMASK(18, 16), (2 << 16)),
++ PIN_CFG(NRI2, SCU468, GENMASK(18, 16), (3 << 16)));
++FUNCFG_DESCL(W25, PIN_CFG(SGPM1, SCU468, GENMASK(22, 20), (1 << 20)),
++ PIN_CFG(WDTRST4N, SCU468, GENMASK(22, 20), (2 << 20)),
++ PIN_CFG(NDTR2, SCU468, GENMASK(22, 20), (3 << 20)),
++ PIN_CFG(SMON1, SCU468, GENMASK(22, 20), (4 << 20)));
++FUNCFG_DESCL(V24, PIN_CFG(SGPM1, SCU468, GENMASK(26, 24), (1 << 24)),
++ PIN_CFG(WDTRST5N, SCU468, GENMASK(26, 24), (2 << 24)),
++ PIN_CFG(NRTS2, SCU468, GENMASK(26, 24), (3 << 24)),
++ PIN_CFG(SMON1, SCU468, GENMASK(26, 24), (4 << 24)));
++FUNCFG_DESCL(U24, PIN_CFG(SGPM1LD_R, SCU468, GENMASK(30, 28), (1 << 28)),
++ PIN_CFG(WDTRST6N, SCU468, GENMASK(30, 28), (2 << 28)),
++ PIN_CFG(MACLINK1, SCU468, GENMASK(30, 28), (3 << 28)));
++FUNCFG_DESCL(SGMII0, PIN_CFG(SGMII, SCU47C, BIT(0), 1 << 0));
++FUNCFG_DESCL(PCIERC2_PERST, PIN_CFG(PCIERC2PERST, SCU908, BIT(1), 1 << 1));
++FUNCFG_DESCL(PORTC_MODE, PIN_CFG(USB2CUD, SCU3B0, GENMASK(1, 0), 0),
++ PIN_CFG(USB2CD, SCU3B0, GENMASK(1, 0), 1 << 0),
++ PIN_CFG(USB2CH, SCU3B0, GENMASK(1, 0), 2 << 0),
++ PIN_CFG(USB2CU, SCU3B0, GENMASK(1, 0), 3 << 0));
++FUNCFG_DESCL(PORTD_MODE, PIN_CFG(USB2DD, SCU3B0, GENMASK(3, 2), 1 << 2),
++ PIN_CFG(USB2DH, SCU3B0, GENMASK(3, 2), 2 << 2));
++
++static const struct aspeed_g7_pincfg pin_cfg[] = {
++ PINCFG_PIN(C16), PINCFG_PIN(C14), PINCFG_PIN(C11),
++ PINCFG_PIN(D9), PINCFG_PIN(F14), PINCFG_PIN(D10),
++ PINCFG_PIN(C12), PINCFG_PIN(C13), PINCFG_PIN(AA24),
++ PINCFG_PIN(AB24), PINCFG_PIN(AB23), PINCFG_PIN(AC22),
++ PINCFG_PIN(AD22), PINCFG_PIN(AE21), PINCFG_PIN(AF20),
++ PINCFG_PIN(AE20), PINCFG_PIN(AD20), PINCFG_PIN(Y23),
++ PINCFG_PIN(W23), PINCFG_PIN(AD19), PINCFG_PIN(AC20),
++ PINCFG_PIN(AA23), PINCFG_PIN(AB22), PINCFG_PIN(AB21),
++ PINCFG_PIN(AA22), PINCFG_PIN(Y22), PINCFG_PIN(W22),
++ PINCFG_PIN(AF18), PINCFG_PIN(AE18), PINCFG_PIN(AD18),
++ PINCFG_PIN(AC19), PINCFG_PIN(AB20), PINCFG_PIN(AF17),
++ PINCFG_PIN(AA16), PINCFG_PIN(Y16), PINCFG_PIN(V17),
++ PINCFG_PIN(J13), PINCFG_PIN(AB16), PINCFG_PIN(AC16),
++ PINCFG_PIN(AF16), PINCFG_PIN(AA15), PINCFG_PIN(AB15),
++ PINCFG_PIN(AC15), PINCFG_PIN(AD15), PINCFG_PIN(Y15),
++ PINCFG_PIN(AA14), PINCFG_PIN(W16), PINCFG_PIN(V16),
++ PINCFG_PIN(AB18), PINCFG_PIN(AC18), PINCFG_PIN(K13),
++ PINCFG_PIN(AA17), PINCFG_PIN(AB17), PINCFG_PIN(AD16),
++ PINCFG_PIN(AC17), PINCFG_PIN(AD17), PINCFG_PIN(AE16),
++ PINCFG_PIN(AE17), PINCFG_PIN(U23), PINCFG_PIN(T24),
++ PINCFG_PIN(HOLE0), PINCFG_PIN(HOLE1), PINCFG_PIN(HOLE2),
++ PINCFG_PIN(HOLE3), PINCFG_PIN(AC24), PINCFG_PIN(AD24),
++ PINCFG_PIN(AE23), PINCFG_PIN(AE19), PINCFG_PIN(AF23),
++ PINCFG_PIN(Y25), PINCFG_PIN(AA25), PINCFG_PIN(AF19),
++ PINCFG_PIN(AB25), PINCFG_PIN(AC25), PINCFG_PIN(AD25),
++ PINCFG_PIN(V22), PINCFG_PIN(AE25), PINCFG_PIN(V21),
++ PINCFG_PIN(AF21), PINCFG_PIN(AF25), PINCFG_PIN(AF26),
++ PINCFG_PIN(AE26), PINCFG_PIN(W21), PINCFG_PIN(AD26),
++ PINCFG_PIN(Y21), PINCFG_PIN(AC26), PINCFG_PIN(AA26),
++ PINCFG_PIN(AB26), PINCFG_PIN(T26), PINCFG_PIN(AA20),
++ PINCFG_PIN(V23), PINCFG_PIN(W24), PINCFG_PIN(R26),
++ PINCFG_PIN(AA21), PINCFG_PIN(P26), PINCFG_PIN(Y24),
++ PINCFG_PIN(B16), PINCFG_PIN(D14), PINCFG_PIN(B15),
++ PINCFG_PIN(B14), PINCFG_PIN(C17), PINCFG_PIN(B13),
++ PINCFG_PIN(E14), PINCFG_PIN(C15), PINCFG_PIN(D24),
++ PINCFG_PIN(B23), PINCFG_PIN(B22), PINCFG_PIN(C23),
++ PINCFG_PIN(B18), PINCFG_PIN(B21), PINCFG_PIN(B17),
++ PINCFG_PIN(B19), PINCFG_PIN(B26), PINCFG_PIN(A25),
++ PINCFG_PIN(A24), PINCFG_PIN(B24), PINCFG_PIN(E26),
++ PINCFG_PIN(A21), PINCFG_PIN(A19), PINCFG_PIN(A18),
++ PINCFG_PIN(D26), PINCFG_PIN(C26), PINCFG_PIN(A23),
++ PINCFG_PIN(B25), PINCFG_PIN(A22), PINCFG_PIN(F26),
++ PINCFG_PIN(A26), PINCFG_PIN(A14), PINCFG_PIN(E10),
++ PINCFG_PIN(E13), PINCFG_PIN(D12), PINCFG_PIN(F10),
++ PINCFG_PIN(E11), PINCFG_PIN(F11), PINCFG_PIN(F13),
++ PINCFG_PIN(N15), PINCFG_PIN(C20), PINCFG_PIN(C19),
++ PINCFG_PIN(A8), PINCFG_PIN(R14), PINCFG_PIN(A7),
++ PINCFG_PIN(P14), PINCFG_PIN(D20), PINCFG_PIN(A6),
++ PINCFG_PIN(B6), PINCFG_PIN(N14), PINCFG_PIN(B7),
++ PINCFG_PIN(B8), PINCFG_PIN(B9), PINCFG_PIN(M14),
++ PINCFG_PIN(J11), PINCFG_PIN(E7), PINCFG_PIN(D19),
++ PINCFG_PIN(B11), PINCFG_PIN(D15), PINCFG_PIN(B12),
++ PINCFG_PIN(B10), PINCFG_PIN(P13), PINCFG_PIN(C18),
++ PINCFG_PIN(C6), PINCFG_PIN(C7), PINCFG_PIN(D7),
++ PINCFG_PIN(N13), PINCFG_PIN(C8), PINCFG_PIN(C9),
++ PINCFG_PIN(C10), PINCFG_PIN(M16), PINCFG_PIN(A15),
++ PINCFG_PIN(G11), PINCFG_PIN(H7), PINCFG_PIN(H8),
++ PINCFG_PIN(H9), PINCFG_PIN(H10), PINCFG_PIN(H11),
++ PINCFG_PIN(J9), PINCFG_PIN(J10), PINCFG_PIN(E9),
++ PINCFG_PIN(F9), PINCFG_PIN(F8), PINCFG_PIN(M13),
++ PINCFG_PIN(F7), PINCFG_PIN(D8), PINCFG_PIN(E8),
++ PINCFG_PIN(L12), PINCFG_PIN(F12), PINCFG_PIN(E12),
++ PINCFG_PIN(J12), PINCFG_PIN(G7), PINCFG_PIN(G8),
++ PINCFG_PIN(G9), PINCFG_PIN(G10), PINCFG_PIN(K12),
++ PINCFG_PIN(W17), PINCFG_PIN(V18), PINCFG_PIN(W18),
++ PINCFG_PIN(Y17), PINCFG_PIN(AA18), PINCFG_PIN(AA13),
++ PINCFG_PIN(Y18), PINCFG_PIN(AA12), PINCFG_PIN(W20),
++ PINCFG_PIN(V20), PINCFG_PIN(Y11), PINCFG_PIN(V14),
++ PINCFG_PIN(V19), PINCFG_PIN(W14), PINCFG_PIN(Y20),
++ PINCFG_PIN(AB19), PINCFG_PIN(U26), PINCFG_PIN(U25),
++ PINCFG_PIN(V26), PINCFG_PIN(W26), PINCFG_PIN(Y26),
++ PINCFG_PIN(W25), PINCFG_PIN(V24), PINCFG_PIN(U24),
++ PINCFG_PIN(SGMII0), PINCFG_PIN(PCIERC2_PERST),
++ PINCFG_PIN(PORTC_MODE), PINCFG_PIN(PORTD_MODE),
++};
++
++static int aspeed_g7_soc1_dt_node_to_map(struct pinctrl_dev *pctldev,
++ struct device_node *np_config,
++ struct pinctrl_map **map, u32 *num_maps)
++{
++ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
++ PIN_MAP_TYPE_INVALID);
++}
++
++static void aspeed_g7_soc1_dt_free_map(struct pinctrl_dev *pctldev,
++ struct pinctrl_map *map, u32 num_maps)
++{
++ kfree(map);
++}
++
++static const struct pinctrl_ops aspeed_g7_soc1_pinctrl_ops = {
++ .get_groups_count = aspeed_pinctrl_get_groups_count,
++ .get_group_name = aspeed_pinctrl_get_group_name,
++ .get_group_pins = aspeed_pinctrl_get_group_pins,
++ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
++ .dt_node_to_map = aspeed_g7_soc1_dt_node_to_map,
++ .dt_free_map = aspeed_g7_soc1_dt_free_map,
++};
++
++static const struct pinmux_ops aspeed_g7_soc1_pinmux_ops = {
++ .get_functions_count = aspeed_pinmux_get_fn_count,
++ .get_function_name = aspeed_pinmux_get_fn_name,
++ .get_function_groups = aspeed_pinmux_get_fn_groups,
++ .set_mux = aspeed_g7_pinmux_set_mux,
++ .gpio_request_enable = aspeed_g7_gpio_request_enable,
++ .strict = true,
++};
++
++static const struct pinconf_ops aspeed_g7_soc1_pinconf_ops = {
++ .is_generic = true,
++ .pin_config_get = aspeed_pin_config_get,
++ .pin_config_set = aspeed_pin_config_set,
++ .pin_config_group_get = aspeed_pin_config_group_get,
++ .pin_config_group_set = aspeed_pin_config_group_set,
++};
++
++/* pinctrl_desc */
++static struct pinctrl_desc aspeed_g7_soc1_pinctrl_desc = {
++ .name = "aspeed-g7-soc1-pinctrl",
++ .pins = aspeed_g7_soc1_pins,
++ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins),
++ .pctlops = &aspeed_g7_soc1_pinctrl_ops,
++ .pmxops = &aspeed_g7_soc1_pinmux_ops,
++ .confops = &aspeed_g7_soc1_pinconf_ops,
++ .owner = THIS_MODULE,
++};
++
++static struct aspeed_pin_config aspeed_g7_configs[] = {
++ /* GPIOA */
++ { PIN_CONFIG_DRIVE_STRENGTH, { C16, C16 }, SCU4C0, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C14, C14 }, SCU4C0, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C11, C11 }, SCU4C0, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D9, D9 }, SCU4C0, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F14, F14 }, SCU4C0, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D10, D10 }, SCU4C0, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C12, C12 }, SCU4C0, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C13, C13 }, SCU4C0, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { C16, C13 }, SCU4A0, BIT_MASK(4) },
++ /* GPIOI */
++ { PIN_CONFIG_DRIVE_STRENGTH, { AC24, AC24 }, SCU4C0, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AD24, AD24 }, SCU4C0, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE23, AE23 }, SCU4C0, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE19, AE19 }, SCU4C0, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF23, AF23 }, SCU4C0, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y25, Y25 }, SCU4C0, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA25, AA25 }, SCU4C0, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF19, AF19 }, SCU4C0, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { AC24, AF19 }, SCU4A0, BIT_MASK(12) },
++ /* GPIOJ */
++ { PIN_CONFIG_DRIVE_STRENGTH, { AB25, AB25 }, SCU4C4, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AC25, AC25 }, SCU4C4, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AD25, AD25 }, SCU4C4, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { V22, V22 }, SCU4C4, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE25, AE25 }, SCU4C4, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { V21, V21 }, SCU4C4, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF21, AF21 }, SCU4C4, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF25, AF25 }, SCU4C4, GENMASK(15, 14) },
++ /* GPIOK */
++ { PIN_CONFIG_DRIVE_STRENGTH, { AF26, AF26 }, SCU4C4, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AE26, AE26 }, SCU4C4, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { W21, W21 }, SCU4C4, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AD26, AD26 }, SCU4C4, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y21, Y21 }, SCU4C4, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AC26, AC26 }, SCU4C4, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA26, AA26 }, SCU4C4, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AB26, AB26 }, SCU4C4, GENMASK(31, 30) },
++ /* GPIOL */
++ { PIN_CONFIG_DRIVE_STRENGTH, { T26, T26 }, SCU4C8, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA20, AA20 }, SCU4C8, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { V23, V23 }, SCU4C8, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { W24, W24 }, SCU4C8, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { R26, R26 }, SCU4C8, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { AA21, AA21 }, SCU4C8, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { P26, P26 }, SCU4C8, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { Y24, Y24 }, SCU4C8, GENMASK(15, 14) },
++ /* GPIOM */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B16, B16 }, SCU4C8, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D14, D14 }, SCU4C8, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B15, B15 }, SCU4C8, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B14, B14 }, SCU4C8, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C17, C17 }, SCU4C8, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B13, B13 }, SCU4C8, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E14, E14 }, SCU4C8, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C15, C15 }, SCU4C8, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { B16, C15 }, SCU4A0, BIT_MASK(16) },
++ /* GPION */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D24, D24 }, SCU4CC, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B23, B23 }, SCU4CC, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B22, B22 }, SCU4CC, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C23, C23 }, SCU4CC, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B18, B18 }, SCU4CC, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B21, B21 }, SCU4CC, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B17, B17 }, SCU4CC, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B19, B19 }, SCU4CC, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { D24, B19 }, SCU4A0, BIT_MASK(17) },
++ /* GPIOO */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B26, B26 }, SCU4CC, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A25, A25 }, SCU4CC, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A24, A24 }, SCU4CC, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B24, B24 }, SCU4CC, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E26, E26 }, SCU4CC, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A21, A21 }, SCU4CC, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A19, A19 }, SCU4CC, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A18, A18 }, SCU4CC, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { B26, A18 }, SCU4A0, BIT_MASK(18) },
++ /* GPIOP */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D26, D26 }, SCU4D0, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C26, C26 }, SCU4D0, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A23, A23 }, SCU4D0, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B25, B25 }, SCU4D0, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A22, A22 }, SCU4D0, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F26, F26 }, SCU4D0, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A26, A26 }, SCU4D0, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A14, A14 }, SCU4D0, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { D26, A14 }, SCU4A0, BIT_MASK(19) },
++ /* GPIOQ */
++ { PIN_CONFIG_DRIVE_STRENGTH, { E10, E10 }, SCU4D0, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E13, E13 }, SCU4D0, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D12, D12 }, SCU4D0, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F10, F10 }, SCU4D0, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E11, E11 }, SCU4D0, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F11, F11 }, SCU4D0, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F13, F13 }, SCU4D0, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { N15, N15 }, SCU4D0, GENMASK(31, 30) },
++ /* GPIOR */
++ { PIN_CONFIG_DRIVE_STRENGTH, { C20, C20 }, SCU4D4, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C19, C19 }, SCU4D4, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A8, A8 }, SCU4D4, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { R14, R14 }, SCU4D4, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A7, A7 }, SCU4D4, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { P14, P14 }, SCU4D4, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D20, D20 }, SCU4D4, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A6, A6 }, SCU4D4, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { C20, A6 }, SCU4A0, BIT_MASK(21) },
++ /* GPIOS */
++ { PIN_CONFIG_DRIVE_STRENGTH, { B6, B6 }, SCU4D4, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { N14, N14 }, SCU4D4, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B7, B7 }, SCU4D4, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B8, B8 }, SCU4D4, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B9, B9 }, SCU4D4, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M14, M14 }, SCU4D4, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { J11, J11 }, SCU4D4, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E7, E7 }, SCU4D4, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { B6, E7 }, SCU4A0, BIT_MASK(22) },
++ /* GPIOT */
++ { PIN_CONFIG_DRIVE_STRENGTH, { D19, D19 }, SCU4D8, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B11, B11 }, SCU4D8, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D15, D15 }, SCU4D8, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B12, B12 }, SCU4D8, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { B10, B10 }, SCU4D8, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { P13, P13 }, SCU4D8, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C18, C18 }, SCU4D8, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C6, C6 }, SCU4D8, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { D19, C6 }, SCU4A0, BIT_MASK(23) },
++ /* GPIOU */
++ { PIN_CONFIG_DRIVE_STRENGTH, { C7, C7 }, SCU4D8, GENMASK(17, 16) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D7, D7 }, SCU4D8, GENMASK(19, 18) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { N13, N13 }, SCU4D8, GENMASK(21, 20) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C8, C8 }, SCU4D8, GENMASK(23, 22) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C9, C9 }, SCU4D8, GENMASK(25, 24) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { C10, C10 }, SCU4D8, GENMASK(27, 26) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M16, M16 }, SCU4D8, GENMASK(29, 28) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { A15, A15 }, SCU4D8, GENMASK(31, 30) },
++ { PIN_CONFIG_POWER_SOURCE, { C7, A15 }, SCU4A0, BIT_MASK(24) },
++ /* GPIOW */
++ { PIN_CONFIG_DRIVE_STRENGTH, { E9, E9 }, SCU4DC, GENMASK(1, 0) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F9, F9 }, SCU4DC, GENMASK(3, 2) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F8, F8 }, SCU4DC, GENMASK(5, 4) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { M13, M13 }, SCU4DC, GENMASK(7, 6) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { F7, F7 }, SCU4DC, GENMASK(9, 8) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { D8, D8 }, SCU4DC, GENMASK(11, 10) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { E8, E8 }, SCU4DC, GENMASK(13, 12) },
++ { PIN_CONFIG_DRIVE_STRENGTH, { L12, L12 }, SCU4DC, GENMASK(15, 14) },
++ { PIN_CONFIG_POWER_SOURCE, { E9, L12 }, SCU4A0, BIT_MASK(26) },
++};
++
++static const struct aspeed_pin_config_map aspeed_g7_pin_config_map[] = {
++ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)},
++ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 0, 0, GENMASK(1, 0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 1, 1, GENMASK(1, 0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 2, 2, GENMASK(1, 0)},
++ { PIN_CONFIG_DRIVE_STRENGTH, 3, 3, GENMASK(1, 0)},
++ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)},
++ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)},
++};
++
++static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = {
++ .pins = aspeed_g7_soc1_pins,
++ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins),
++ .pinmux = {
++ .groups = aspeed_g7_soc1_pingroups,
++ .ngroups = ARRAY_SIZE(aspeed_g7_soc1_pingroups),
++ .functions = aspeed_g7_soc1_funcs,
++ .nfunctions = ARRAY_SIZE(aspeed_g7_soc1_funcs),
++ .configs_g7 = pin_cfg,
++ .nconfigs_g7 = ARRAY_SIZE(pin_cfg),
++ },
++ .configs = aspeed_g7_configs,
++ .nconfigs = ARRAY_SIZE(aspeed_g7_configs),
++ .confmaps = aspeed_g7_pin_config_map,
++ .nconfmaps = ARRAY_SIZE(aspeed_g7_pin_config_map),
++};
++
++static int aspeed_g7_soc1_pinctrl_probe(struct platform_device *pdev)
++{
++ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc1_pinctrl_desc,
++ &aspeed_g7_pinctrl_data);
++}
++
++static const struct of_device_id aspeed_g7_soc1_pinctrl_match[] = {
++ { .compatible = "aspeed,ast2700a0-soc1-pinctrl" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, aspeed_g7_soc1_pinctrl_match);
++
++static struct platform_driver aspeed_g7_soc1_pinctrl_driver = {
++ .probe = aspeed_g7_soc1_pinctrl_probe,
++ .driver = {
++ .name = "aspeed-g7a0-soc1-pinctrl",
++ .of_match_table = aspeed_g7_soc1_pinctrl_match,
++ .suppress_bind_attrs = true,
++ },
++};
++
++static int __init aspeed_g7_soc1_pinctrl_register(void)
++{
++ return platform_driver_register(&aspeed_g7_soc1_pinctrl_driver);
++}
++arch_initcall(aspeed_g7_soc1_pinctrl_register);
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+index 9c6ee46ac..83d62506e 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
+@@ -285,6 +285,32 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ return 0;
+ }
+
++int aspeed_g7_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
++ unsigned int group)
++{
++ int i, j;
++ int pin;
++ const struct aspeed_g7_funcfg *funcfg;
++ struct aspeed_pinctrl_data *pinctrl = pinctrl_dev_get_drvdata(pctldev);
++ const struct aspeed_pin_group *pingroup =
++ &pinctrl->pinmux.groups[group];
++ const struct aspeed_g7_pincfg *pin_cfg = pinctrl->pinmux.configs_g7;
++
++ for (i = 0; i < pingroup->npins; i++) {
++ pin = pingroup->pins[i];
++ funcfg = pin_cfg[pin].funcfg;
++
++ for (j = 0; j < pin_cfg[pin].nfuncfg; j++) {
++ if (strcmp(funcfg[j].name, pingroup->name) == 0) {
++ regmap_update_bits(pinctrl->scu, funcfg[j].reg,
++ funcfg[j].mask,
++ funcfg[j].val);
++ }
++ }
++ }
++ return 0;
++}
++
+ static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
+ {
+ /*
+@@ -440,6 +466,27 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
+ return 0;
+ }
+
++int aspeed_g7_gpio_request_enable(struct pinctrl_dev *pctldev,
++ struct pinctrl_gpio_range *range,
++ unsigned int offset)
++{
++ int i;
++ struct aspeed_pinctrl_data *pinctrl = pinctrl_dev_get_drvdata(pctldev);
++ const struct aspeed_g7_pincfg *pin_cfg = pinctrl->pinmux.configs_g7;
++ const struct aspeed_g7_funcfg *funcfg = pin_cfg[offset].funcfg;
++
++ for (i = 0; i < pin_cfg[offset].nfuncfg; i++) {
++ if (!strncmp(funcfg[i].name, "GPI", 3)) {
++ regmap_update_bits(pinctrl->scu, funcfg[i].reg,
++ funcfg[i].mask, funcfg[i].val);
++ break;
++ }
++ regmap_update_bits(pinctrl->scu, funcfg[i].reg, funcfg[i].mask,
++ 0);
++ }
++ return 0;
++}
++
+ int aspeed_pinctrl_probe(struct platform_device *pdev,
+ struct pinctrl_desc *pdesc,
+ struct aspeed_pinctrl_data *pdata)
+diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+index 4dcde3bc2..0d7c35af1 100644
+--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
+@@ -98,9 +98,14 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
+ unsigned int * const num_groups);
+ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
+ unsigned int group);
++int aspeed_g7_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
++ unsigned int group);
+ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset);
++int aspeed_g7_gpio_request_enable(struct pinctrl_dev *pctldev,
++ struct pinctrl_gpio_range *range,
++ unsigned int offset);
+ int aspeed_pinctrl_probe(struct platform_device *pdev,
+ struct pinctrl_desc *pdesc,
+ struct aspeed_pinctrl_data *pdata);
+diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
+index aaa78a613..09d1658b0 100644
+--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
++++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
+@@ -792,6 +792,33 @@ struct aspeed_pinmux_ops {
+ const struct aspeed_sig_expr *expr, bool enabled);
+ };
+
++struct aspeed_g7_funcfg {
++ char *name;
++ u32 reg;
++ u32 mask;
++ int val;
++};
++
++struct aspeed_g7_pincfg {
++ struct aspeed_g7_funcfg *funcfg;
++ unsigned int nfuncfg;
++};
++
++#define PIN_CFG(cfg_name, cfg_reg, cfg_mask, cfg_val) \
++ { \
++ .name = #cfg_name, .reg = cfg_reg, .mask = cfg_mask, \
++ .val = cfg_val \
++ }
++#define FUNCFG_SYM(pin) funcfg_ ## pin
++#define FUNCFG_PTR(pin) (&FUNCFG_SYM(pin))
++
++#define FUNCFG_DESCL(pin, ...) \
++ static const struct aspeed_g7_funcfg FUNCFG_SYM(pin)[] = { __VA_ARGS__ }
++
++#define PINCFG_PIN(pin) \
++ [pin] = { .funcfg = (struct aspeed_g7_funcfg *)FUNCFG_PTR(pin), \
++ .nfuncfg = ARRAY_SIZE(FUNCFG_SYM(pin)) }
++
+ struct aspeed_pinmux_data {
+ struct device *dev;
+ struct regmap *maps[ASPEED_NR_PINMUX_IPS];
+@@ -803,6 +830,10 @@ struct aspeed_pinmux_data {
+
+ const struct aspeed_pin_function *functions;
+ const unsigned int nfunctions;
++
++ const struct aspeed_g7_pincfg *configs_g7;
++ const unsigned int nconfigs_g7;
++
+ };
+
+ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0005-pinctrl-for-2700.patch b/recipes-kernel/linux/files/0005-pinctrl-for-2700.patch
deleted file mode 100644
index 2176c1c..0000000
--- a/recipes-kernel/linux/files/0005-pinctrl-for-2700.patch
+++ /dev/null
@@ -1,2941 +0,0 @@
-From 550a4821779bfc63d563f2923d8315d02e43a396 Mon Sep 17 00:00:00 2001
-From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Thu, 14 Nov 2024 16:25:54 +0800
-Subject: [PATCH] pinctrl for 2700
-
-Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
----
- drivers/pinctrl/aspeed/Kconfig | 8 +
- drivers/pinctrl/aspeed/Makefile | 1 +
- .../pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c | 458 ++++
- .../pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c | 2292 +++++++++++++++++
- drivers/pinctrl/aspeed/pinctrl-aspeed.c | 47 +
- drivers/pinctrl/aspeed/pinctrl-aspeed.h | 5 +
- drivers/pinctrl/aspeed/pinmux-aspeed.h | 31 +
- 7 files changed, 2842 insertions(+)
- create mode 100644 drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c
- create mode 100644 drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c
-
-diff --git a/drivers/pinctrl/aspeed/Kconfig b/drivers/pinctrl/aspeed/Kconfig
-index 1a4e5b9ed471..9fd020528917 100644
---- a/drivers/pinctrl/aspeed/Kconfig
-+++ b/drivers/pinctrl/aspeed/Kconfig
-@@ -31,3 +31,11 @@ config PINCTRL_ASPEED_G6
- help
- Say Y here to enable pin controller support for Aspeed's 6th
- generation SoCs. GPIO is provided by a separate GPIO driver.
-+
-+config PINCTRL_ASPEED_G7
-+ bool "Aspeed G7 SoC pin control"
-+ depends on (MACH_ASPEED_G7 || COMPILE_TEST) && OF
-+ select PINCTRL_ASPEED
-+ help
-+ Say Y here to enable pin controller support for Aspeed's 7th
-+ generation SoCs. GPIO is provided by a separate GPIO driver.
-diff --git a/drivers/pinctrl/aspeed/Makefile b/drivers/pinctrl/aspeed/Makefile
-index 489ea1778353..012e62e4e311 100644
---- a/drivers/pinctrl/aspeed/Makefile
-+++ b/drivers/pinctrl/aspeed/Makefile
-@@ -6,3 +6,4 @@ obj-$(CONFIG_PINCTRL_ASPEED) += pinctrl-aspeed.o pinmux-aspeed.o
- obj-$(CONFIG_PINCTRL_ASPEED_G4) += pinctrl-aspeed-g4.o
- obj-$(CONFIG_PINCTRL_ASPEED_G5) += pinctrl-aspeed-g5.o
- obj-$(CONFIG_PINCTRL_ASPEED_G6) += pinctrl-aspeed-g6.o
-+obj-$(CONFIG_PINCTRL_ASPEED_G7) += pinctrl-aspeed-g7-soc0.o pinctrl-aspeed-g7-soc1.o
-diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c
-new file mode 100644
-index 000000000000..ce4eeac09920
---- /dev/null
-+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc0.c
-@@ -0,0 +1,458 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/bits.h>
-+#include <linux/device.h>
-+#include <linux/gpio/driver.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/module.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/pinctrl/machine.h>
-+#include <linux/pinctrl/pinconf.h>
-+#include <linux/pinctrl/pinconf-generic.h>
-+#include <linux/pinctrl/pinctrl.h>
-+#include <linux/pinctrl/pinmux.h>
-+#include <linux/platform_device.h>
-+#include <linux/property.h>
-+#include <linux/regmap.h>
-+#include "pinctrl-aspeed.h"
-+#include "../pinctrl-utils.h"
-+
-+#define SCU200 0x200 /* System Reset Control #1 */
-+
-+#define SCU400 0x400 /* Multi-function Pin Control #1 */
-+#define SCU404 0x404 /* Multi-function Pin Control #2 */
-+#define SCU408 0x408 /* Multi-function Pin Control #3 */
-+#define SCU40C 0x40C /* Multi-function Pin Control #3 */
-+#define SCU410 0x410 /* USB Multi-function Control Register */
-+#define SCU414 0x414 /* VGA Function Control Register */
-+
-+#define SCU480 0x480 /* GPIO18D0 IO Control Register */
-+#define SCU484 0x484 /* GPIO18D1 IO Control Register */
-+#define SCU488 0x488 /* GPIO18D2 IO Control Register */
-+#define SCU48C 0x48c /* GPIO18D3 IO Control Register */
-+#define SCU490 0x490 /* GPIO18D4 IO Control Register */
-+#define SCU494 0x494 /* GPIO18D5 IO Control Register */
-+#define SCU498 0x498 /* GPIO18D6 IO Control Register */
-+#define SCU49C 0x49c /* GPIO18D7 IO Control Register */
-+
-+enum {
-+ AC14,
-+ AE15,
-+ AD14,
-+ AE14,
-+ AF14,
-+ AB13,
-+ AB14,
-+ AF15,
-+ AF13,
-+ AC13,
-+ AD13,
-+ AE13,
-+ PORTA_U3, // SCU410[1:0]
-+ PORTA_U2, // SCU410[3:2]
-+ PORTB_U3, // SCU410[5:4]
-+ PORTB_U2, // SCU410[7:6]
-+ PORTA_U3_XHCI, // SCU410[9]
-+ PORTA_U2_XHCI, // SCU410[9]
-+ PORTB_U3_XHCI, // SCU410[10]
-+ PORTB_U2_XHCI, // SCU410[10]
-+ PORTA_MODE, // SCU410[25:24]
-+ PORTB_MODE, // SCU410[29:28]
-+ PORTA_U2_PHY,
-+ PORTA_U3_PHY,
-+ PORTB_U2_PHY,
-+ PORTB_U3_PHY,
-+ JTAG_PORT,
-+ PCIERC0_PERST,
-+ PCIERC1_PERST,
-+};
-+
-+GROUP_DECL(EMMCG1, AC14, AE15, AD14);
-+GROUP_DECL(EMMCG4, AC14, AE15, AD14, AE14, AF14, AB13);
-+GROUP_DECL(EMMCG8, AC14, AE15, AD14, AE14, AF14, AB13, AF13, AC13, AD13, AE13);
-+GROUP_DECL(EMMCWPN, AF15);
-+GROUP_DECL(EMMCCDN, AB14);
-+GROUP_DECL(VGADDC, AD13, AE13);
-+GROUP_DECL(VB1, AC14, AE15, AD14, AE14);
-+GROUP_DECL(VB0, AF15, AB14, AF13, AC13);
-+//USB3A
-+//xhci: BMC/PCIE, vHub/PHY/EXT port
-+GROUP_DECL(USB3AXHD, PORTA_U3, PORTA_U3_XHCI);
-+GROUP_DECL(USB3AXHPD, PORTA_U3, PORTA_U3_XHCI);
-+GROUP_DECL(USB3AXH, PORTA_U3, PORTA_U3_XHCI, PORTA_U3_PHY);
-+GROUP_DECL(USB3AXHP, PORTA_U3, PORTA_U3_XHCI, PORTA_U3_PHY);
-+GROUP_DECL(USB3AXH2B, PORTA_U3, PORTA_U3_XHCI, PORTB_U3_PHY);
-+GROUP_DECL(USB3AXHP2B, PORTA_U3, PORTA_U3_XHCI, PORTB_U3_PHY);
-+
-+//USB2A
-+//xhci: BMC/PCIE, vHub/PHY/EXT port
-+GROUP_DECL(USB2AXHD1, PORTA_U2, PORTA_U2_XHCI);
-+GROUP_DECL(USB2AXHPD1, PORTA_U2, PORTA_U2_XHCI);
-+GROUP_DECL(USB2AXH, PORTA_U2, PORTA_U2_XHCI, PORTA_U2_PHY);
-+GROUP_DECL(USB2AXHP, PORTA_U2, PORTA_U2_XHCI, PORTA_U2_PHY);
-+GROUP_DECL(USB2AXH2B, PORTA_U2, PORTA_U2_XHCI, PORTB_U2_PHY);
-+GROUP_DECL(USB2AXHP2B, PORTA_U2, PORTA_U2_XHCI, PORTB_U2_PHY);
-+// vhub to phy
-+GROUP_DECL(USB2AD1, PORTA_U2, PORTA_U2_PHY);
-+//ehci
-+GROUP_DECL(USB2AHPD0, PORTA_MODE);
-+GROUP_DECL(USB2AH, PORTA_MODE, PORTA_U2_PHY);
-+GROUP_DECL(USB2AHP, PORTA_MODE, PORTA_U2_PHY);
-+GROUP_DECL(USB2AD0, PORTA_MODE, PORTA_U2_PHY);
-+
-+//USB3B
-+//xhci: BMC/PCIE, vHub/PHY/EXT port
-+GROUP_DECL(USB3BXHD, PORTB_U3, PORTB_U3_XHCI);
-+GROUP_DECL(USB3BXHPD, PORTB_U3, PORTB_U3_XHCI);
-+GROUP_DECL(USB3BXH, PORTB_U3, PORTB_U3_XHCI, PORTB_U3_PHY);
-+GROUP_DECL(USB3BXHP, PORTB_U3, PORTB_U3_XHCI, PORTB_U3_PHY);
-+GROUP_DECL(USB3BXH2A, PORTB_U3, PORTB_U3_XHCI, PORTA_U3_PHY);
-+GROUP_DECL(USB3BXHP2A, PORTB_U3, PORTB_U3_XHCI, PORTA_U3_PHY);
-+
-+//USB2B
-+//xhci: BMC/PCIE, vHub/PHY/EXT port
-+GROUP_DECL(USB2BXHD1, PORTB_U2, PORTB_U2_XHCI);
-+GROUP_DECL(USB2BXHPD1, PORTB_U2, PORTB_U2_XHCI);
-+GROUP_DECL(USB2BXH, PORTB_U2, PORTB_U2_XHCI, PORTB_U2_PHY);
-+GROUP_DECL(USB2BXHP, PORTB_U2, PORTB_U2_XHCI, PORTB_U2_PHY);
-+GROUP_DECL(USB2BXH2A, PORTB_U2, PORTB_U2_XHCI, PORTA_U2_PHY);
-+GROUP_DECL(USB2BXHP2A, PORTB_U2, PORTB_U2_XHCI, PORTA_U2_PHY);
-+// vhub to phy
-+GROUP_DECL(USB2BD1, PORTB_U2, PORTB_U2_PHY);
-+//ehci
-+GROUP_DECL(USB2BHPD0, PORTB_MODE);
-+GROUP_DECL(USB2BH, PORTB_MODE, PORTB_U2_PHY);
-+GROUP_DECL(USB2BHP, PORTB_MODE, PORTB_U2_PHY);
-+GROUP_DECL(USB2BD0, PORTB_MODE, PORTB_U2_PHY);
-+//JTAG port
-+GROUP_DECL(PSP, JTAG_PORT);
-+GROUP_DECL(SSP, JTAG_PORT);
-+GROUP_DECL(TSP, JTAG_PORT);
-+GROUP_DECL(DDR, JTAG_PORT);
-+GROUP_DECL(USB3A, JTAG_PORT);
-+GROUP_DECL(USB3B, JTAG_PORT);
-+GROUP_DECL(PCIEA, JTAG_PORT);
-+GROUP_DECL(PCIEB, JTAG_PORT);
-+GROUP_DECL(JTAGM0, JTAG_PORT);
-+//PCIE RC PERST
-+GROUP_DECL(PCIERC0PERST, PCIERC0_PERST);
-+GROUP_DECL(PCIERC1PERST, PCIERC1_PERST);
-+
-+static struct aspeed_pin_group aspeed_g7_soc0_pingroups[] = {
-+ ASPEED_PINCTRL_GROUP(EMMCG1),
-+ ASPEED_PINCTRL_GROUP(EMMCG4),
-+ ASPEED_PINCTRL_GROUP(EMMCG8),
-+ ASPEED_PINCTRL_GROUP(EMMCWPN),
-+ ASPEED_PINCTRL_GROUP(EMMCCDN),
-+ ASPEED_PINCTRL_GROUP(VGADDC),
-+ ASPEED_PINCTRL_GROUP(VB1),
-+ ASPEED_PINCTRL_GROUP(VB0),
-+ ASPEED_PINCTRL_GROUP(USB3AXHD),
-+ ASPEED_PINCTRL_GROUP(USB3AXHPD),
-+ ASPEED_PINCTRL_GROUP(USB3AXH),
-+ ASPEED_PINCTRL_GROUP(USB3AXHP),
-+ ASPEED_PINCTRL_GROUP(USB3AXH2B),
-+ ASPEED_PINCTRL_GROUP(USB3AXHP2B),
-+ ASPEED_PINCTRL_GROUP(USB2AXHD1),
-+ ASPEED_PINCTRL_GROUP(USB2AXHPD1),
-+ ASPEED_PINCTRL_GROUP(USB2AXH),
-+ ASPEED_PINCTRL_GROUP(USB2AXHP),
-+ ASPEED_PINCTRL_GROUP(USB2AXH2B),
-+ ASPEED_PINCTRL_GROUP(USB2AXHP2B),
-+ ASPEED_PINCTRL_GROUP(USB2AD1),
-+ ASPEED_PINCTRL_GROUP(USB2AHPD0),
-+ ASPEED_PINCTRL_GROUP(USB2AH),
-+ ASPEED_PINCTRL_GROUP(USB2AHP),
-+ ASPEED_PINCTRL_GROUP(USB2AD0),
-+ ASPEED_PINCTRL_GROUP(USB3BXHD),
-+ ASPEED_PINCTRL_GROUP(USB3BXHPD),
-+ ASPEED_PINCTRL_GROUP(USB3BXH),
-+ ASPEED_PINCTRL_GROUP(USB3BXHP),
-+ ASPEED_PINCTRL_GROUP(USB3BXH2A),
-+ ASPEED_PINCTRL_GROUP(USB3BXHP2A),
-+ ASPEED_PINCTRL_GROUP(USB2BXHD1),
-+ ASPEED_PINCTRL_GROUP(USB2BXHPD1),
-+ ASPEED_PINCTRL_GROUP(USB2BXH),
-+ ASPEED_PINCTRL_GROUP(USB2BXHP),
-+ ASPEED_PINCTRL_GROUP(USB2BXH2A),
-+ ASPEED_PINCTRL_GROUP(USB2BXHP2A),
-+ ASPEED_PINCTRL_GROUP(USB2BD1),
-+ ASPEED_PINCTRL_GROUP(USB2BHPD0),
-+ ASPEED_PINCTRL_GROUP(USB2BH),
-+ ASPEED_PINCTRL_GROUP(USB2BHP),
-+ ASPEED_PINCTRL_GROUP(USB2BD0),
-+ ASPEED_PINCTRL_GROUP(PSP),
-+ ASPEED_PINCTRL_GROUP(SSP),
-+ ASPEED_PINCTRL_GROUP(TSP),
-+ ASPEED_PINCTRL_GROUP(DDR),
-+ ASPEED_PINCTRL_GROUP(USB3A),
-+ ASPEED_PINCTRL_GROUP(USB3B),
-+ ASPEED_PINCTRL_GROUP(PCIEA),
-+ ASPEED_PINCTRL_GROUP(PCIEB),
-+ ASPEED_PINCTRL_GROUP(JTAGM0),
-+ ASPEED_PINCTRL_GROUP(PCIERC0PERST),
-+ ASPEED_PINCTRL_GROUP(PCIERC1PERST),
-+};
-+
-+FUNC_DECL_(EMMC, "EMMCG1", "EMMCG4", "EMMCG8", "EMMCWPN", "EMMCCDN");
-+FUNC_DECL_(VGADDC, "VGADDC");
-+FUNC_DECL_(VB, "VB0", "VB1");
-+FUNC_DECL_(USB3A, "USB3AXHD", "USB3AXHPD", "USB3AXH", "USB3AXHP", "USB3AXH2B",
-+ "USB3AXHP2B");
-+FUNC_DECL_(USB2A, "USB2AXHD1", "USB2AXHPD1", "USB2AXH", "USB2AXHP", "USB2AXH2B",
-+ "USB2AXHP2B", "USB2AD1", "USB2AHPD0", "USB2AH", "USB2AHP",
-+ "USB2AD0");
-+FUNC_DECL_(USB3B, "USB3BXHD", "USB3BXHPD", "USB3BXH", "USB3BXHP", "USB3BXH2A",
-+ "USB3BXHP2A");
-+FUNC_DECL_(USB2B, "USB2BXHD1", "USB2BXHPD1", "USB2BXH", "USB2BXHP", "USB2BXH2A",
-+ "USB2BXHP2A", "USB2BD1", "USB2BHPD0", "USB2BH", "USB2BHP",
-+ "USB2BD0");
-+FUNC_DECL_(JTAG0, "PSP", "SSP", "TSP", "DDR", "USB3A", "USB3B",
-+ "PCIEA", "PCIEB", "JTAGM0");
-+FUNC_DECL_(PCIERC, "PCIERC0PERST", "PCIERC1PERST");
-+
-+static struct aspeed_pin_function aspeed_g7_soc0_funcs[] = {
-+ ASPEED_PINCTRL_FUNC(EMMC),
-+ ASPEED_PINCTRL_FUNC(VGADDC),
-+ ASPEED_PINCTRL_FUNC(VB),
-+ ASPEED_PINCTRL_FUNC(USB3A),
-+ ASPEED_PINCTRL_FUNC(USB2A),
-+ ASPEED_PINCTRL_FUNC(USB3B),
-+ ASPEED_PINCTRL_FUNC(USB2B),
-+ ASPEED_PINCTRL_FUNC(JTAG0),
-+ ASPEED_PINCTRL_FUNC(PCIERC),
-+};
-+
-+static const struct pinctrl_pin_desc aspeed_g7_soc0_pins[] = {
-+ PINCTRL_PIN(AC14, "AC14"),
-+ PINCTRL_PIN(AE15, "AE15"),
-+ PINCTRL_PIN(AD14, "AD14"),
-+ PINCTRL_PIN(AE14, "AE14"),
-+ PINCTRL_PIN(AF14, "AF14"),
-+ PINCTRL_PIN(AB13, "AB13"),
-+ PINCTRL_PIN(AF15, "AF15"),
-+ PINCTRL_PIN(AB14, "AB14"),
-+ PINCTRL_PIN(AF13, "AF13"),
-+ PINCTRL_PIN(AC13, "AC13"),
-+ PINCTRL_PIN(AD13, "AD13"),
-+ PINCTRL_PIN(AE13, "AE13"),
-+ PINCTRL_PIN(PORTA_U3, "PORTA_U3"),
-+ PINCTRL_PIN(PORTA_U2, "PORTA_U2"),
-+ PINCTRL_PIN(PORTB_U3, "PORTB_U3"),
-+ PINCTRL_PIN(PORTB_U2, "PORTB_U2"),
-+ PINCTRL_PIN(PORTA_U3_XHCI, "PORTA_U3_XHCI"),
-+ PINCTRL_PIN(PORTA_U2_XHCI, "PORTA_U2_XHCI"),
-+ PINCTRL_PIN(PORTB_U3_XHCI, "PORTB_U3_XHCI"),
-+ PINCTRL_PIN(PORTB_U2_XHCI, "PORTB_U2_XHCI"),
-+ PINCTRL_PIN(PORTA_MODE, "PORTA_MODE"),
-+ PINCTRL_PIN(PORTA_U3_PHY, "PORTA_U3_PHY"),
-+ PINCTRL_PIN(PORTA_U2_PHY, "PORTA_U2_PHY"),
-+ PINCTRL_PIN(PORTB_MODE, "PORTB_MODE"),
-+ PINCTRL_PIN(PORTB_U3_PHY, "PORTB_U3_PHY"),
-+ PINCTRL_PIN(PORTB_U2_PHY, "PORTB_U2_PHY"),
-+ PINCTRL_PIN(JTAG_PORT, "JTAG_PORT"),
-+ PINCTRL_PIN(PCIERC0_PERST, "PCIERC0_PERST"),
-+ PINCTRL_PIN(PCIERC1_PERST, "PCIERC1_PERST"),
-+};
-+
-+FUNCFG_DESCL(AC14, PIN_CFG(EMMCG1, SCU400, BIT_MASK(0), BIT(0)),
-+ PIN_CFG(EMMCG4, SCU400, BIT_MASK(0), BIT(0)),
-+ PIN_CFG(EMMCG8, SCU400, BIT_MASK(0), BIT(0)),
-+ PIN_CFG(VB1, SCU404, BIT_MASK(0), BIT(0)));
-+FUNCFG_DESCL(AE15, PIN_CFG(EMMCG1, SCU400, BIT_MASK(1), BIT(1)),
-+ PIN_CFG(EMMCG4, SCU400, BIT_MASK(1), BIT(1)),
-+ PIN_CFG(EMMCG8, SCU400, BIT_MASK(1), BIT(1)),
-+ PIN_CFG(VB1, SCU404, BIT_MASK(1), BIT(1)));
-+FUNCFG_DESCL(AD14, PIN_CFG(EMMCG1, SCU400, BIT_MASK(2), BIT(2)),
-+ PIN_CFG(EMMCG4, SCU400, BIT_MASK(2), BIT(2)),
-+ PIN_CFG(EMMCG8, SCU400, BIT_MASK(2), BIT(2)),
-+ PIN_CFG(VB1, SCU404, BIT_MASK(2), BIT(2)));
-+FUNCFG_DESCL(AE14, PIN_CFG(EMMCG4, SCU400, BIT_MASK(3), BIT(3)),
-+ PIN_CFG(EMMCG8, SCU400, BIT_MASK(3), BIT(3)),
-+ PIN_CFG(VB1, SCU404, BIT_MASK(3), BIT(3)));
-+FUNCFG_DESCL(AF14, PIN_CFG(EMMCG4, SCU400, BIT_MASK(4), BIT(4)),
-+ PIN_CFG(EMMCG8, SCU400, BIT_MASK(4), BIT(4)));
-+FUNCFG_DESCL(AB13, PIN_CFG(EMMCG4, SCU400, BIT_MASK(5), BIT(5)),
-+ PIN_CFG(EMMCG8, SCU400, BIT_MASK(5), BIT(5)));
-+FUNCFG_DESCL(AB14, PIN_CFG(EMMCCDN, SCU400, BIT_MASK(6), BIT(6)),
-+ PIN_CFG(VB0, SCU404, BIT_MASK(6), BIT(6)));
-+FUNCFG_DESCL(AF15, PIN_CFG(EMMCWPN, SCU400, BIT_MASK(7), BIT(7)),
-+ PIN_CFG(VB0, SCU404, BIT_MASK(7), BIT(7)));
-+FUNCFG_DESCL(AF13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(8), BIT(8)),
-+ PIN_CFG(VB0, SCU404, BIT_MASK(8), BIT(8)));
-+FUNCFG_DESCL(AC13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(9), BIT(9)),
-+ PIN_CFG(VB0, SCU404, BIT_MASK(9), BIT(9)));
-+FUNCFG_DESCL(AD13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(10), BIT(10)),
-+ PIN_CFG(VGADDC, SCU404, BIT_MASK(10), BIT(10)));
-+FUNCFG_DESCL(AE13, PIN_CFG(EMMCG8, SCU400, BIT_MASK(11), BIT(11)),
-+ PIN_CFG(VGADDC, SCU404, BIT_MASK(11), BIT(11)));
-+FUNCFG_DESCL(PORTA_U3, PIN_CFG(USB3AXHD, SCU410, GENMASK(1, 0), 0),
-+ PIN_CFG(USB3AXHPD, SCU410, GENMASK(1, 0), 0),
-+ PIN_CFG(USB3AXH, SCU410, GENMASK(1, 0), 2),
-+ PIN_CFG(USB3AXHP, SCU410, GENMASK(1, 0), 2),
-+ PIN_CFG(USB3AXH2B, SCU410, GENMASK(1, 0), 3),
-+ PIN_CFG(USB3AXHP2B, SCU410, GENMASK(1, 0), 3));
-+FUNCFG_DESCL(PORTA_U2, PIN_CFG(USB2AXHD1, SCU410, GENMASK(3, 2), 0),
-+ PIN_CFG(USB2AXHPD1, SCU410, GENMASK(3, 2), 0),
-+ PIN_CFG(USB2AXH, SCU410, GENMASK(3, 2), 2 << 2),
-+ PIN_CFG(USB2AXHP, SCU410, GENMASK(3, 2), 2 << 2),
-+ PIN_CFG(USB2AXH2B, SCU410, GENMASK(3, 2), 3 << 2),
-+ PIN_CFG(USB2AXHP2B, SCU410, GENMASK(3, 2), 3 << 2),
-+ PIN_CFG(USB2AD1, SCU410, GENMASK(3, 2), 1 << 2));
-+FUNCFG_DESCL(PORTB_U3, PIN_CFG(USB3BXHD, SCU410, GENMASK(5, 4), 0),
-+ PIN_CFG(USB3BXHPD, SCU410, GENMASK(5, 4), 0),
-+ PIN_CFG(USB3BXH, SCU410, GENMASK(5, 4), 2 << 4),
-+ PIN_CFG(USB3BXHP, SCU410, GENMASK(5, 4), 2 << 4),
-+ PIN_CFG(USB3BXH2A, SCU410, GENMASK(5, 4), 3 << 4),
-+ PIN_CFG(USB3BXHP2A, SCU410, GENMASK(5, 4), 3 << 4));
-+FUNCFG_DESCL(PORTB_U2, PIN_CFG(USB2BXHD1, SCU410, GENMASK(7, 6), 0),
-+ PIN_CFG(USB2BXHPD1, SCU410, GENMASK(7, 6), 0),
-+ PIN_CFG(USB2BXH, SCU410, GENMASK(7, 6), 2 << 6),
-+ PIN_CFG(USB2BXHP, SCU410, GENMASK(7, 6), 2 << 6),
-+ PIN_CFG(USB2BXH2A, SCU410, GENMASK(7, 6), 3 << 6),
-+ PIN_CFG(USB2BXHP2A, SCU410, GENMASK(7, 6), 3 << 6),
-+ PIN_CFG(USB2BD1, SCU410, GENMASK(7, 6), 1 << 6));
-+FUNCFG_DESCL(PORTA_U3_XHCI, PIN_CFG(USB3AXHD, SCU410, BIT_MASK(9), 1 << 9),
-+ PIN_CFG(USB3AXHPD, SCU410, BIT_MASK(9), 0),
-+ PIN_CFG(USB3AXH, SCU410, BIT_MASK(9), 1 << 9),
-+ PIN_CFG(USB3AXHP, SCU410, BIT_MASK(9), 0),
-+ PIN_CFG(USB3AXH2B, SCU410, BIT_MASK(9), 1 << 9),
-+ PIN_CFG(USB3AXHP2B, SCU410, BIT_MASK(9), 0));
-+FUNCFG_DESCL(PORTA_U2_XHCI, PIN_CFG(USB2AXHD1, SCU410, BIT_MASK(9), 1 << 9),
-+ PIN_CFG(USB2AXHPD1, SCU410, BIT_MASK(9), 0),
-+ PIN_CFG(USB2AXH, SCU410, BIT_MASK(9), 1 << 9),
-+ PIN_CFG(USB2AXHP, SCU410, BIT_MASK(9), 0),
-+ PIN_CFG(USB2AXH2B, SCU410, BIT_MASK(9), 1 << 9),
-+ PIN_CFG(USB2AXHP2B, SCU410, BIT_MASK(9), 0));
-+FUNCFG_DESCL(PORTB_U3_XHCI, PIN_CFG(USB3BXHD, SCU410, BIT_MASK(10), 1 << 10),
-+ PIN_CFG(USB3BXHPD, SCU410, BIT_MASK(10), 0),
-+ PIN_CFG(USB3BXH, SCU410, BIT_MASK(10), 1 << 10),
-+ PIN_CFG(USB3BXHP, SCU410, BIT_MASK(10), 0),
-+ PIN_CFG(USB3BXH2A, SCU410, BIT_MASK(10), 1 << 10),
-+ PIN_CFG(USB3BXHP2A, SCU410, BIT_MASK(10), 0));
-+FUNCFG_DESCL(PORTB_U2_XHCI, PIN_CFG(USB2BXHD1, SCU410, BIT_MASK(10), 1 << 10),
-+ PIN_CFG(USB2BXHPD1, SCU410, BIT_MASK(10), 0),
-+ PIN_CFG(USB2BXH, SCU410, BIT_MASK(10), 1 << 10),
-+ PIN_CFG(USB2BXHP, SCU410, BIT_MASK(10), 0),
-+ PIN_CFG(USB2BXH2A, SCU410, BIT_MASK(10), 1 << 10),
-+ PIN_CFG(USB2BXHP2A, SCU410, BIT_MASK(10), 0));
-+FUNCFG_DESCL(PORTA_MODE, PIN_CFG(USB2AHPD0, SCU410, GENMASK(25, 24), 0),
-+ PIN_CFG(USB2AH, SCU410, GENMASK(25, 24), 2 << 24),
-+ PIN_CFG(USB2AHP, SCU410, GENMASK(25, 24), 3 << 24),
-+ PIN_CFG(USB2AD0, SCU410, GENMASK(25, 24), 1 << 24));
-+FUNCFG_DESCL(PORTB_MODE, PIN_CFG(USB2BHPD0, SCU410, GENMASK(29, 28), 0),
-+ PIN_CFG(USB2BH, SCU410, GENMASK(29, 28), 2 << 28),
-+ PIN_CFG(USB2BHP, SCU410, GENMASK(29, 28), 3 << 28),
-+ PIN_CFG(USB2BD0, SCU410, GENMASK(29, 28), 1 << 28));
-+FUNCFG_DESCL(PORTA_U3_PHY);
-+FUNCFG_DESCL(PORTA_U2_PHY);
-+FUNCFG_DESCL(PORTB_U3_PHY);
-+FUNCFG_DESCL(PORTB_U2_PHY);
-+FUNCFG_DESCL(JTAG_PORT, PIN_CFG(PSP, SCU408, GENMASK(12, 5), 0x0 << 5),
-+ PIN_CFG(SSP, SCU408, GENMASK(12, 5), 0x41 << 5),
-+ PIN_CFG(TSP, SCU408, GENMASK(12, 5), 0x42 << 5),
-+ PIN_CFG(DDR, SCU408, GENMASK(12, 5), 0x43 << 5),
-+ PIN_CFG(USB3A, SCU408, GENMASK(12, 5), 0x44 << 5),
-+ PIN_CFG(USB3B, SCU408, GENMASK(12, 5), 0x45 << 5),
-+ PIN_CFG(PCIEA, SCU408, GENMASK(12, 5), 0x46 << 5),
-+ PIN_CFG(PCIEB, SCU408, GENMASK(12, 5), 0x47 << 5),
-+ PIN_CFG(JTAGM0, SCU408, GENMASK(12, 5), 0x8 << 5));
-+FUNCFG_DESCL(PCIERC0_PERST, PIN_CFG(PCIERC0PERST, SCU200, BIT_MASK(21), 1 << 21));
-+FUNCFG_DESCL(PCIERC1_PERST, PIN_CFG(PCIERC1PERST, SCU200, BIT_MASK(19), 1 << 19));
-+
-+static const struct aspeed_g7_pincfg pin_cfg[] = {
-+ PINCFG_PIN(AC14), PINCFG_PIN(AE15),
-+ PINCFG_PIN(AD14), PINCFG_PIN(AE14),
-+ PINCFG_PIN(AF14), PINCFG_PIN(AB13),
-+ PINCFG_PIN(AB14), PINCFG_PIN(AF15),
-+ PINCFG_PIN(AF13), PINCFG_PIN(AC13),
-+ PINCFG_PIN(AD13), PINCFG_PIN(AE13),
-+ PINCFG_PIN(PORTA_U3), PINCFG_PIN(PORTA_U2),
-+ PINCFG_PIN(PORTB_U3), PINCFG_PIN(PORTB_U2),
-+ PINCFG_PIN(PORTA_U3_XHCI), PINCFG_PIN(PORTA_U2_XHCI),
-+ PINCFG_PIN(PORTB_U3_XHCI), PINCFG_PIN(PORTB_U2_XHCI),
-+ PINCFG_PIN(PORTA_MODE), PINCFG_PIN(PORTB_MODE),
-+ PINCFG_PIN(PORTA_U3_PHY), PINCFG_PIN(PORTA_U2_PHY),
-+ PINCFG_PIN(PORTB_U3_PHY), PINCFG_PIN(PORTB_U2_PHY),
-+ PINCFG_PIN(JTAG_PORT), PINCFG_PIN(PCIERC0_PERST),
-+ PINCFG_PIN(PCIERC1_PERST),
-+};
-+
-+static const struct pinctrl_ops aspeed_g7_soc0_pinctrl_ops = {
-+ .get_groups_count = aspeed_pinctrl_get_groups_count,
-+ .get_group_name = aspeed_pinctrl_get_group_name,
-+ .get_group_pins = aspeed_pinctrl_get_group_pins,
-+ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
-+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
-+ .dt_free_map = pinctrl_utils_free_map,
-+};
-+
-+static const struct pinmux_ops aspeed_g7_soc0_pinmux_ops = {
-+ .get_functions_count = aspeed_pinmux_get_fn_count,
-+ .get_function_name = aspeed_pinmux_get_fn_name,
-+ .get_function_groups = aspeed_pinmux_get_fn_groups,
-+ .set_mux = aspeed_g7_pinmux_set_mux,
-+ .gpio_request_enable = aspeed_g7_gpio_request_enable,
-+ .strict = true,
-+};
-+
-+static const struct pinconf_ops aspeed_g7_soc0_pinconf_ops = {
-+ .is_generic = true,
-+ .pin_config_get = aspeed_pin_config_get,
-+ .pin_config_set = aspeed_pin_config_set,
-+ .pin_config_group_get = aspeed_pin_config_group_get,
-+ .pin_config_group_set = aspeed_pin_config_group_set,
-+};
-+
-+/* pinctrl_desc */
-+static struct pinctrl_desc aspeed_g7_soc0_pinctrl_desc = {
-+ .name = "aspeed-g7-soc0-pinctrl",
-+ .pins = aspeed_g7_soc0_pins,
-+ .npins = ARRAY_SIZE(aspeed_g7_soc0_pins),
-+ .pctlops = &aspeed_g7_soc0_pinctrl_ops,
-+ .pmxops = &aspeed_g7_soc0_pinmux_ops,
-+ .confops = &aspeed_g7_soc0_pinconf_ops,
-+ .owner = THIS_MODULE,
-+};
-+
-+static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = {
-+ .pins = aspeed_g7_soc0_pins,
-+ .npins = ARRAY_SIZE(aspeed_g7_soc0_pins),
-+ .pinmux = {
-+ .groups = aspeed_g7_soc0_pingroups,
-+ .ngroups = ARRAY_SIZE(aspeed_g7_soc0_pingroups),
-+ .functions = aspeed_g7_soc0_funcs,
-+ .nfunctions = ARRAY_SIZE(aspeed_g7_soc0_funcs),
-+ .configs_g7 = pin_cfg,
-+ .nconfigs_g7 = ARRAY_SIZE(pin_cfg),
-+ },
-+};
-+
-+static int aspeed_g7_soc0_pinctrl_probe(struct platform_device *pdev)
-+{
-+ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc0_pinctrl_desc,
-+ &aspeed_g7_pinctrl_data);
-+}
-+
-+static const struct of_device_id aspeed_g7_soc0_pinctrl_match[] = {
-+ { .compatible = "aspeed,ast2700-soc0-pinctrl" },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, aspeed_g7_soc0_pinctrl_match);
-+
-+static struct platform_driver aspeed_g7_soc0_pinctrl_driver = {
-+ .probe = aspeed_g7_soc0_pinctrl_probe,
-+ .driver = {
-+ .name = "aspeed-g7-soc0-pinctrl",
-+ .of_match_table = aspeed_g7_soc0_pinctrl_match,
-+ .suppress_bind_attrs = true,
-+ },
-+};
-+
-+static int __init aspeed_g7_soc0_pinctrl_register(void)
-+{
-+ return platform_driver_register(&aspeed_g7_soc0_pinctrl_driver);
-+}
-+arch_initcall(aspeed_g7_soc0_pinctrl_register);
-diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c
-new file mode 100644
-index 000000000000..f5e766eb6393
---- /dev/null
-+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g7-soc1.c
-@@ -0,0 +1,2292 @@
-+// SPDX-License-Identifier: GPL-2.0
-+
-+#include <linux/bits.h>
-+#include <linux/device.h>
-+#include <linux/gpio/driver.h>
-+#include <linux/interrupt.h>
-+#include <linux/irq.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/module.h>
-+#include <linux/mod_devicetable.h>
-+#include <linux/pinctrl/machine.h>
-+#include <linux/pinctrl/pinconf.h>
-+#include <linux/pinctrl/pinconf-generic.h>
-+#include <linux/pinctrl/pinctrl.h>
-+#include <linux/pinctrl/pinmux.h>
-+#include <linux/platform_device.h>
-+#include <linux/property.h>
-+#include "pinctrl-aspeed.h"
-+
-+#define SCU3B0 0x3B0 /* USB Controller Register */
-+#define SCU3B4 0x3B4 /* USB Controller Lock Register */
-+#define SCU3B8 0x3B8 /* USB Controller Secure Register #1 */
-+#define SCU3BC 0x3BC /* USB Controller Secure Register #2 */
-+#define SCU3C0 0x3C0 /* USB Controller Secure Register #3 */
-+#define SCU400 0x400 /* Multi-function Pin Control #1 */
-+#define SCU404 0x404 /* Multi-function Pin Control #2 */
-+#define SCU408 0x408 /* Multi-function Pin Control #3 */
-+#define SCU40C 0x40C /* Multi-function Pin Control #4 */
-+#define SCU410 0x410 /* Multi-function Pin Control #5 */
-+#define SCU414 0x414 /* Multi-function Pin Control #6 */
-+#define SCU418 0x418 /* Multi-function Pin Control #7 */
-+#define SCU41C 0x41C /* Multi-function Pin Control #8 */
-+#define SCU420 0x420 /* Multi-function Pin Control #9 */
-+#define SCU424 0x424 /* Multi-function Pin Control #10 */
-+#define SCU428 0x428 /* Multi-function Pin Control #11 */
-+#define SCU42C 0x42C /* Multi-function Pin Control #12 */
-+#define SCU430 0x430 /* Multi-function Pin Control #13 */
-+#define SCU434 0x434 /* Multi-function Pin Control #14 */
-+#define SCU438 0x438 /* Multi-function Pin Control #15 */
-+#define SCU43C 0x43C /* Multi-function Pin Control #16 */
-+#define SCU440 0x440 /* Multi-function Pin Control #17 */
-+#define SCU444 0x444 /* Multi-function Pin Control #18 */
-+#define SCU448 0x448 /* Multi-function Pin Control #19 */
-+#define SCU44C 0x44C /* Multi-function Pin Control #20 */
-+#define SCU450 0x450 /* Multi-function Pin Control #21 */
-+#define SCU454 0x454 /* Multi-function Pin Control #22 */
-+#define SCU458 0x458 /* Multi-function Pin Control #23 */
-+#define SCU45C 0x45C /* Multi-function Pin Control #24 */
-+#define SCU460 0x460 /* Multi-function Pin Control #25 */
-+#define SCU464 0x464 /* Multi-function Pin Control #26 */
-+#define SCU468 0x468 /* Multi-function Pin Control #27 */
-+#define SCU46C 0x46C /* Multi-function Pin Control #28 */
-+#define SCU470 0x470 /* Multi-function Pin Control #29 */
-+#define SCU474 0x474 /* Multi-function Pin Control #30 */
-+#define SCU478 0x478 /* Multi-function Pin Control #31 */
-+#define SCU47C 0x47C
-+#define SCU4C0 0x4C0 /* Driving Strength #0 A-I */
-+#define SCU4C4 0x4C4 /* Driving Strength #1 J-K */
-+#define SCU4C8 0x4C8 /* Driving Strength #2 L-M */
-+#define SCU4CC 0x4CC /* Driving Strength #3 N-O */
-+#define SCU4D0 0x4D0 /* Driving Strength #4 P-Q */
-+#define SCU4D4 0x4D4 /* Driving Strength #5 R-S */
-+#define SCU4D8 0x4D8 /* Driving Strength #6 T-U */
-+#define SCU4DC 0x4DC /* Driving Strength #7 W */
-+
-+#define SCU908 0x908 /* PCIe RC PERST Pin Control */
-+
-+enum {
-+ C16,
-+ C14,
-+ C11,
-+ D9,
-+ F14,
-+ D10,
-+ C12,
-+ C13,
-+ AA24,
-+ AB24,
-+ AB23,
-+ AC22,
-+ AD22,
-+ AE21,
-+ AF20,
-+ AE20,
-+ AD20,
-+ Y23,
-+ W23,
-+ AD19,
-+ AC20,
-+ AA23,
-+ AB22,
-+ AB21,
-+ AA22,
-+ Y22,
-+ W22,
-+ AF18,
-+ AE18,
-+ AD18,
-+ AC19,
-+ AB20,
-+ AF17,
-+ AA16,
-+ Y16,
-+ V17,
-+ J13,
-+ AB16,
-+ AC16,
-+ AF16,
-+ AA15,
-+ AB15,
-+ AC15,
-+ AD15,
-+ Y15,
-+ AA14,
-+ W16,
-+ V16,
-+ AB18,
-+ AC18,
-+ K13,
-+ AA17,
-+ AB17,
-+ AD16,
-+ AC17,
-+ AD17,
-+ AE16,
-+ AE17,
-+ U23,
-+ T24,
-+ HOLE0,
-+ HOLE1,
-+ HOLE2,
-+ HOLE3,
-+ AC24,
-+ AD24,
-+ AE23,
-+ AE19,
-+ AF23,
-+ Y25,
-+ AA25,
-+ AF19,
-+ AB25,
-+ AC25,
-+ AD25,
-+ V22,
-+ AE25,
-+ V21,
-+ AF21,
-+ AF25,
-+ AF26,
-+ AE26,
-+ W21,
-+ AD26,
-+ Y21,
-+ AC26,
-+ AA26,
-+ AB26,
-+ T26,
-+ AA20,
-+ V23,
-+ W24,
-+ R26,
-+ AA21,
-+ P26,
-+ Y24,
-+ B16,
-+ D14,
-+ B15,
-+ B14,
-+ C17,
-+ B13,
-+ E14,
-+ C15,
-+ D24,
-+ B23,
-+ B22,
-+ C23,
-+ B18,
-+ B21,
-+ B17,
-+ B19,
-+ B26,
-+ A25,
-+ A24,
-+ B24,
-+ E26,
-+ A21,
-+ A19,
-+ A18,
-+ D26,
-+ C26,
-+ A23,
-+ B25,
-+ A22,
-+ F26,
-+ A26,
-+ A14,
-+ E10,
-+ E13,
-+ D12,
-+ F10,
-+ E11,
-+ F11,
-+ F13,
-+ N15,
-+ C20,
-+ C19,
-+ A8,
-+ R14,
-+ A7,
-+ P14,
-+ D20,
-+ A6,
-+ B6,
-+ N14,
-+ B7,
-+ B8,
-+ B9,
-+ M14,
-+ J11,
-+ E7,
-+ D19,
-+ B11,
-+ D15,
-+ B12,
-+ B10,
-+ P13,
-+ C18,
-+ C6,
-+ C7,
-+ D7,
-+ N13,
-+ C8,
-+ C9,
-+ C10,
-+ M16,
-+ A15,
-+ G11,
-+ H7,
-+ H8,
-+ H9,
-+ H10,
-+ H11,
-+ J9,
-+ J10,
-+ E9,
-+ F9,
-+ F8,
-+ M13,
-+ F7,
-+ D8,
-+ E8,
-+ L12,
-+ F12,
-+ E12,
-+ J12,
-+ G7,
-+ G8,
-+ G9,
-+ G10,
-+ K12,
-+ W17,
-+ V18,
-+ W18,
-+ Y17,
-+ AA18,
-+ AA13,
-+ Y18,
-+ AA12,
-+ W20,
-+ V20,
-+ Y11,
-+ V14,
-+ V19,
-+ W14,
-+ Y20,
-+ AB19,
-+ U26,
-+ U25,
-+ V26,
-+ W26,
-+ Y26,
-+ W25,
-+ V24,
-+ U24,
-+ SGMII0,
-+ PCIERC2_PERST,
-+ PORTC_MODE, // SCU3B0[1:0]
-+ PORTD_MODE, // SCU3B0[3:2]
-+};
-+
-+GROUP_DECL(ESPI0, B16, D14, B15, B14, C17, B13, E14, C15);
-+GROUP_DECL(ESPI1, C16, C14, C11, D9, F14, D10, C12, C13);
-+GROUP_DECL(LPC0, AB22, AB21, B16, D14, B15, B14, C17, B13, E14, C15);
-+GROUP_DECL(LPC1, C16, C14, C11, D9, F14, D10, C12, C13);
-+GROUP_DECL(SD, C16, C14, C11, D9, F14, D10, C12, C13);
-+GROUP_DECL(VPI, C16, C14, C11, D9, F14, D10, C12, C13, AA24, AB24, AB23, AC22,
-+ AD22, AE21, AF20, AE20, AD20, Y23, W23, AD19, AC20, AA23, AB22, AB21,
-+ AA22, Y22, W22, AF18, AE18, AD18);
-+GROUP_DECL(OSCCLK, C17);
-+GROUP_DECL(TACH0, AA24);
-+GROUP_DECL(TACH1, AB24);
-+GROUP_DECL(TACH2, AB23);
-+GROUP_DECL(TACH3, AC22);
-+GROUP_DECL(THRU0, AA24, AB24);
-+GROUP_DECL(THRU1, AB23, AC22);
-+GROUP_DECL(TACH4, AD22);
-+GROUP_DECL(TACH5, AE21);
-+GROUP_DECL(TACH6, AF20);
-+GROUP_DECL(TACH7, AE20);
-+GROUP_DECL(NTCS5, AD22);
-+GROUP_DECL(NDCD5, AE21);
-+GROUP_DECL(NDSR5, AF20);
-+GROUP_DECL(NRI5, AE20);
-+GROUP_DECL(SALT12, W23);
-+GROUP_DECL(SALT13, AD19);
-+GROUP_DECL(SALT14, AC20);
-+GROUP_DECL(SALT15, AA23);
-+GROUP_DECL(NDTR5, AD20);
-+GROUP_DECL(NRTS5, Y23);
-+GROUP_DECL(NCTS6, W23);
-+GROUP_DECL(NDCD6, AD19);
-+GROUP_DECL(NDSR6, AC20);
-+GROUP_DECL(NRI6, AA23);
-+GROUP_DECL(NDTR6, AB22);
-+GROUP_DECL(NRTS6, AB21);
-+GROUP_DECL(TACH8, AD20);
-+GROUP_DECL(TACH9, Y23);
-+GROUP_DECL(TACH10, W23);
-+GROUP_DECL(TACH11, AD19);
-+GROUP_DECL(TACH12, AC20);
-+GROUP_DECL(TACH13, AA23);
-+GROUP_DECL(TACH14, AB22);
-+GROUP_DECL(TACH15, AB21);
-+GROUP_DECL(SPIM0, AB21, AA22, Y22, W22, AF18, AE18, AD18, AC19);
-+GROUP_DECL(PWM0, AA22);
-+GROUP_DECL(PWM1, Y22);
-+GROUP_DECL(PWM2, W22);
-+GROUP_DECL(PWM3, AF18);
-+GROUP_DECL(PWM4, AE18);
-+GROUP_DECL(PWM5, AD18);
-+GROUP_DECL(PWM6, AC19);
-+GROUP_DECL(PWM7, AB20);
-+GROUP_DECL(SIOPBON0, AA22);
-+GROUP_DECL(SIOPBIN0, Y22);
-+GROUP_DECL(SIOSCIN0, W22);
-+GROUP_DECL(SIOS3N0, AF18);
-+GROUP_DECL(SIOS5N0, AE18);
-+GROUP_DECL(SIOPWREQN0, AD18);
-+GROUP_DECL(SIOONCTRLN0, AC19);
-+GROUP_DECL(SIOPWRGD0, AB20);
-+GROUP_DECL(NCTS0, AF17);
-+GROUP_DECL(NDCD0, AA16);
-+GROUP_DECL(NDSR0, Y16);
-+GROUP_DECL(NRI0, V17);
-+GROUP_DECL(NDTR0, J13);
-+GROUP_DECL(NRTS0, AB16);
-+GROUP_DECL(TXD0, AC16);
-+GROUP_DECL(RXD0, AF16);
-+GROUP_DECL(NCTS1, AA15);
-+GROUP_DECL(NDCD1, AB15);
-+GROUP_DECL(NDSR1, AC15);
-+GROUP_DECL(NRI1, AD15);
-+GROUP_DECL(NDTR1, Y15);
-+GROUP_DECL(NRTS1, AA14);
-+GROUP_DECL(TXD1, W16);
-+GROUP_DECL(RXD1, V16);
-+GROUP_DECL(TXD2, AB18);
-+GROUP_DECL(RXD2, AC18);
-+GROUP_DECL(TXD3, K13);
-+GROUP_DECL(RXD3, AA17);
-+GROUP_DECL(NCTS5, AD22);
-+GROUP_DECL(TXD5, AB17);
-+GROUP_DECL(RXD5, AD16);
-+GROUP_DECL(TXD6, AC17);
-+GROUP_DECL(RXD6, AD17);
-+GROUP_DECL(TXD7, AE16);
-+GROUP_DECL(RXD7, AE17);
-+GROUP_DECL(TXD8, B17);
-+GROUP_DECL(RXD8, B19);
-+GROUP_DECL(TXD9, B26);
-+GROUP_DECL(RXD9, A25);
-+GROUP_DECL(TXD10, A24);
-+GROUP_DECL(RXD10, B24);
-+GROUP_DECL(TXD11, E26);
-+GROUP_DECL(RXD11, A21);
-+GROUP_DECL(SPIM1, K13, AA17, AB17, AD16, AC17, AD17, AE16, AE17);
-+GROUP_DECL(WDTRST0N, K13);
-+GROUP_DECL(WDTRST1N, AA17);
-+GROUP_DECL(WDTRST2N, AB17);
-+GROUP_DECL(WDTRST3N, AD16);
-+GROUP_DECL(WDTRST4N, W25);
-+GROUP_DECL(WDTRST5N, V24);
-+GROUP_DECL(WDTRST6N, U24);
-+GROUP_DECL(WDTRST7N, U23);
-+GROUP_DECL(PWM8, K13);
-+GROUP_DECL(PWM9, AA17);
-+GROUP_DECL(PWM10, AB17);
-+GROUP_DECL(PWM11, AD16);
-+GROUP_DECL(PWM12, AC17);
-+GROUP_DECL(PWM13, AD17);
-+GROUP_DECL(PWM14, AE16);
-+GROUP_DECL(PWM15, AE17);
-+GROUP_DECL(SALT0, AC17);
-+GROUP_DECL(SALT1, AD17);
-+GROUP_DECL(SALT2, AE16);
-+GROUP_DECL(SALT3, AE17);
-+GROUP_DECL(FSI0, T26, AA20);
-+GROUP_DECL(FSI1, V23, W24);
-+GROUP_DECL(FSI2, R26, AA21);
-+GROUP_DECL(FSI3, P26, Y24);
-+GROUP_DECL(SPIM2, AC24, AD24, AE23, AE19, AF23, Y25, AA25, AF19);
-+GROUP_DECL(SALT4, W17);
-+GROUP_DECL(SALT5, V18);
-+GROUP_DECL(SALT6, W18);
-+GROUP_DECL(SALT7, Y17);
-+GROUP_DECL(SALT8, AA18);
-+GROUP_DECL(SALT9, AA13);
-+GROUP_DECL(SALT10, Y18);
-+GROUP_DECL(SALT11, AA12);
-+GROUP_DECL(ADC0, W17);
-+GROUP_DECL(ADC1, V18);
-+GROUP_DECL(ADC2, W18);
-+GROUP_DECL(ADC3, Y17);
-+GROUP_DECL(ADC4, AA18);
-+GROUP_DECL(ADC5, AA13);
-+GROUP_DECL(ADC6, Y18);
-+GROUP_DECL(ADC7, AA12);
-+GROUP_DECL(ADC8, W20);
-+GROUP_DECL(ADC9, V20);
-+GROUP_DECL(ADC10, Y11);
-+GROUP_DECL(ADC11, V14);
-+GROUP_DECL(ADC12, V19);
-+GROUP_DECL(ADC13, W14);
-+GROUP_DECL(ADC14, Y20);
-+GROUP_DECL(ADC15, AB19);
-+GROUP_DECL(AUXPWRGOOD0, W14);
-+GROUP_DECL(AUXPWRGOOD1, Y20);
-+GROUP_DECL(SGPM0, U26, U25, W26, Y26);
-+GROUP_DECL(SGPM1, W25, V24, U23, T24);
-+GROUP_DECL(I2C0, G11, H7);
-+GROUP_DECL(I2C1, H8, H9);
-+GROUP_DECL(I2C2, H10, H11);
-+GROUP_DECL(I2C3, J9, J10);
-+GROUP_DECL(I2C4, E9, F9);
-+GROUP_DECL(I2C5, F8, M13);
-+GROUP_DECL(I2C6, F7, D8);
-+GROUP_DECL(I2C7, E8, L12);
-+GROUP_DECL(I2C8, F12, E12);
-+GROUP_DECL(I2C9, J12, G7);
-+GROUP_DECL(I2C10, G8, G9);
-+GROUP_DECL(I2C11, G10, K12);
-+GROUP_DECL(I2C12, AC24, AD24);
-+GROUP_DECL(I2C13, AE23, AE19);
-+GROUP_DECL(I2C14, AF23, Y25);
-+GROUP_DECL(I2C15, AA25, AF19);
-+GROUP_DECL(DI2C8, D19, B10);
-+GROUP_DECL(DI2C9, D15, B12);
-+GROUP_DECL(DI2C10, C7, D7);
-+GROUP_DECL(DI2C13, D26, C26);
-+GROUP_DECL(DI2C14, A23, B25);
-+GROUP_DECL(DI2C15, A22, F26);
-+GROUP_DECL(SIOPBON1, AC24);
-+GROUP_DECL(SIOPBIN1, AD24);
-+GROUP_DECL(SIOSCIN1, AE23);
-+GROUP_DECL(SIOS3N1, AE19);
-+GROUP_DECL(SIOS5N1, AF23);
-+GROUP_DECL(SIOPWREQN1, Y25);
-+GROUP_DECL(SIOONCTRLN1, AA25);
-+GROUP_DECL(SIOPWRGD1, AF19);
-+GROUP_DECL(HVI3C12, AC24, AD24);
-+GROUP_DECL(HVI3C13, AE23, AE19);
-+GROUP_DECL(HVI3C14, AF23, Y25);
-+GROUP_DECL(HVI3C15, AA25, AF19);
-+GROUP_DECL(HVI3C4, C16, C14);
-+GROUP_DECL(HVI3C5, C11, D9);
-+GROUP_DECL(HVI3C6, F14, D10);
-+GROUP_DECL(HVI3C7, D26, C26);
-+GROUP_DECL(HVI3C10, A23, B25);
-+GROUP_DECL(HVI3C11, A22, F26);
-+GROUP_DECL(I3C4, AB25, AC25);
-+GROUP_DECL(I3C5, AD25, V22);
-+GROUP_DECL(I3C6, AE25, V21);
-+GROUP_DECL(I3C7, AF21, AF25);
-+GROUP_DECL(I3C8, AF26, AE26);
-+GROUP_DECL(I3C9, W21, AD26);
-+GROUP_DECL(I3C10, Y21, AC26);
-+GROUP_DECL(I3C11, AA26, AB26);
-+GROUP_DECL(I3C0, T26, AA20);
-+GROUP_DECL(I3C1, V23, W24);
-+GROUP_DECL(I3C2, R26, AA21);
-+GROUP_DECL(I3C3, P26, Y24);
-+GROUP_DECL(LTPI, T26, AA20, V23, W24);
-+GROUP_DECL(SPI0, D24, B23, B22);
-+GROUP_DECL(QSPI0, C23, B18);
-+GROUP_DECL(SPI0CS1, B21);
-+GROUP_DECL(SPI0ABR, B17);
-+GROUP_DECL(SPI0WPN, B19);
-+GROUP_DECL(SPI1, B26, A25, A24);
-+GROUP_DECL(QSPI1, B24, E26);
-+GROUP_DECL(SPI1CS1, A21);
-+GROUP_DECL(SPI1ABR, A19);
-+GROUP_DECL(SPI1WPN, A18);
-+GROUP_DECL(SPI2, D26, C26, A23, B25);
-+GROUP_DECL(QSPI2, A22, F26);
-+GROUP_DECL(SPI2CS1, A26);
-+GROUP_DECL(THRU2, A19, A18);
-+GROUP_DECL(THRU3, A22, F26);
-+GROUP_DECL(JTAGM1, D12, F10, E11, F11, F13);
-+GROUP_DECL(MDIO0, B9, M14);
-+GROUP_DECL(MDIO1, C9, C10);
-+GROUP_DECL(MDIO2, E10, E13);
-+GROUP_DECL(FWQSPI, M16, A15);
-+GROUP_DECL(FWSPIABR, A14);
-+GROUP_DECL(FWSPIWPN, N15);
-+GROUP_DECL(RGMII0, C20, C19, A8, R14, A7, P14, D20, A6, B6, N14, B7, B8);
-+GROUP_DECL(RGMII1, D19, B11, D15, B12, B10, P13, C18, C6, C7, D7, N13, C8);
-+GROUP_DECL(RMII0, C20, A8, R14, A7, P14, D20, A6, B6, N14);
-+GROUP_DECL(RMII1, D19, D15, B12, B10, P13, C18, C6, C7, D7);
-+GROUP_DECL(VGA, J11, E7);
-+GROUP_DECL(DSGPM1, D19, B10, C7, D7);
-+GROUP_DECL(SGPS, B11, C18, N13, C8);
-+GROUP_DECL(I2CF0, F12, E12, J12, G7);
-+GROUP_DECL(I2CF1, E9, F9, F8, M13);
-+GROUP_DECL(I2CF2, F7, D8, E8, L12);
-+GROUP_DECL(CANBUS, G7, G8, G9);
-+GROUP_DECL(USBUART, G8, G9);
-+GROUP_DECL(HBLED, V26);
-+GROUP_DECL(MACLINK0, U26);
-+GROUP_DECL(MACLINK1, U25);
-+GROUP_DECL(MACLINK2, U24);
-+GROUP_DECL(NCTS2, U26);
-+GROUP_DECL(NDCD2, U25);
-+GROUP_DECL(NDSR2, W26);
-+GROUP_DECL(NRI2, Y26);
-+GROUP_DECL(NDTR2, W25);
-+GROUP_DECL(NRTS2, V24);
-+GROUP_DECL(SMON0, U26, U25, W26, Y26);
-+GROUP_DECL(SMON1, U23, T24, W25, V24);
-+GROUP_DECL(SGMII, SGMII0);
-+//PCIE RC PERST
-+GROUP_DECL(PCIERC2PERST, PCIERC2_PERST);
-+GROUP_DECL(USB2CUD, PORTC_MODE);
-+GROUP_DECL(USB2CD, PORTC_MODE);
-+GROUP_DECL(USB2CH, PORTC_MODE);
-+GROUP_DECL(USB2CU, PORTC_MODE);
-+GROUP_DECL(USB2DD, PORTD_MODE);
-+GROUP_DECL(USB2DH, PORTD_MODE);
-+
-+static struct aspeed_pin_group aspeed_g7_soc1_pingroups[] = {
-+ ASPEED_PINCTRL_GROUP(ESPI0),
-+ ASPEED_PINCTRL_GROUP(ESPI1),
-+ ASPEED_PINCTRL_GROUP(LPC0),
-+ ASPEED_PINCTRL_GROUP(LPC1),
-+ ASPEED_PINCTRL_GROUP(SD),
-+ ASPEED_PINCTRL_GROUP(VPI),
-+ ASPEED_PINCTRL_GROUP(OSCCLK),
-+ ASPEED_PINCTRL_GROUP(TACH0),
-+ ASPEED_PINCTRL_GROUP(TACH1),
-+ ASPEED_PINCTRL_GROUP(TACH2),
-+ ASPEED_PINCTRL_GROUP(TACH3),
-+ ASPEED_PINCTRL_GROUP(THRU0),
-+ ASPEED_PINCTRL_GROUP(THRU1),
-+ ASPEED_PINCTRL_GROUP(TACH4),
-+ ASPEED_PINCTRL_GROUP(TACH5),
-+ ASPEED_PINCTRL_GROUP(TACH6),
-+ ASPEED_PINCTRL_GROUP(TACH7),
-+ ASPEED_PINCTRL_GROUP(NTCS5),
-+ ASPEED_PINCTRL_GROUP(NDCD5),
-+ ASPEED_PINCTRL_GROUP(NDSR5),
-+ ASPEED_PINCTRL_GROUP(NRI5),
-+ ASPEED_PINCTRL_GROUP(SALT12),
-+ ASPEED_PINCTRL_GROUP(SALT13),
-+ ASPEED_PINCTRL_GROUP(SALT14),
-+ ASPEED_PINCTRL_GROUP(SALT15),
-+ ASPEED_PINCTRL_GROUP(NDTR5),
-+ ASPEED_PINCTRL_GROUP(NRTS5),
-+ ASPEED_PINCTRL_GROUP(NCTS6),
-+ ASPEED_PINCTRL_GROUP(NDCD6),
-+ ASPEED_PINCTRL_GROUP(NDSR6),
-+ ASPEED_PINCTRL_GROUP(NRI6),
-+ ASPEED_PINCTRL_GROUP(NDTR6),
-+ ASPEED_PINCTRL_GROUP(NRTS6),
-+ ASPEED_PINCTRL_GROUP(TACH8),
-+ ASPEED_PINCTRL_GROUP(TACH9),
-+ ASPEED_PINCTRL_GROUP(TACH10),
-+ ASPEED_PINCTRL_GROUP(TACH11),
-+ ASPEED_PINCTRL_GROUP(TACH12),
-+ ASPEED_PINCTRL_GROUP(TACH13),
-+ ASPEED_PINCTRL_GROUP(TACH14),
-+ ASPEED_PINCTRL_GROUP(TACH15),
-+ ASPEED_PINCTRL_GROUP(SPIM0),
-+ ASPEED_PINCTRL_GROUP(PWM0),
-+ ASPEED_PINCTRL_GROUP(PWM1),
-+ ASPEED_PINCTRL_GROUP(PWM2),
-+ ASPEED_PINCTRL_GROUP(PWM3),
-+ ASPEED_PINCTRL_GROUP(PWM4),
-+ ASPEED_PINCTRL_GROUP(PWM5),
-+ ASPEED_PINCTRL_GROUP(PWM6),
-+ ASPEED_PINCTRL_GROUP(PWM7),
-+ ASPEED_PINCTRL_GROUP(SIOPBON0),
-+ ASPEED_PINCTRL_GROUP(SIOPBIN0),
-+ ASPEED_PINCTRL_GROUP(SIOSCIN0),
-+ ASPEED_PINCTRL_GROUP(SIOS3N0),
-+ ASPEED_PINCTRL_GROUP(SIOS5N0),
-+ ASPEED_PINCTRL_GROUP(SIOPWREQN0),
-+ ASPEED_PINCTRL_GROUP(SIOONCTRLN0),
-+ ASPEED_PINCTRL_GROUP(SIOPWRGD0),
-+ ASPEED_PINCTRL_GROUP(NCTS0),
-+ ASPEED_PINCTRL_GROUP(NDCD0),
-+ ASPEED_PINCTRL_GROUP(NDSR0),
-+ ASPEED_PINCTRL_GROUP(NRI0),
-+ ASPEED_PINCTRL_GROUP(NDTR0),
-+ ASPEED_PINCTRL_GROUP(NRTS0),
-+ ASPEED_PINCTRL_GROUP(TXD0),
-+ ASPEED_PINCTRL_GROUP(RXD0),
-+ ASPEED_PINCTRL_GROUP(NCTS1),
-+ ASPEED_PINCTRL_GROUP(NDCD1),
-+ ASPEED_PINCTRL_GROUP(NDSR1),
-+ ASPEED_PINCTRL_GROUP(NRI1),
-+ ASPEED_PINCTRL_GROUP(NDTR1),
-+ ASPEED_PINCTRL_GROUP(NRTS1),
-+ ASPEED_PINCTRL_GROUP(TXD1),
-+ ASPEED_PINCTRL_GROUP(RXD1),
-+ ASPEED_PINCTRL_GROUP(TXD2),
-+ ASPEED_PINCTRL_GROUP(RXD2),
-+ ASPEED_PINCTRL_GROUP(TXD3),
-+ ASPEED_PINCTRL_GROUP(RXD3),
-+ ASPEED_PINCTRL_GROUP(NCTS5),
-+ ASPEED_PINCTRL_GROUP(NDCD5),
-+ ASPEED_PINCTRL_GROUP(NDSR5),
-+ ASPEED_PINCTRL_GROUP(NRI5),
-+ ASPEED_PINCTRL_GROUP(NDTR5),
-+ ASPEED_PINCTRL_GROUP(NRTS5),
-+ ASPEED_PINCTRL_GROUP(TXD5),
-+ ASPEED_PINCTRL_GROUP(RXD5),
-+ ASPEED_PINCTRL_GROUP(NCTS6),
-+ ASPEED_PINCTRL_GROUP(NDCD6),
-+ ASPEED_PINCTRL_GROUP(NDSR6),
-+ ASPEED_PINCTRL_GROUP(NRI6),
-+ ASPEED_PINCTRL_GROUP(NDTR6),
-+ ASPEED_PINCTRL_GROUP(NRTS6),
-+ ASPEED_PINCTRL_GROUP(TXD6),
-+ ASPEED_PINCTRL_GROUP(RXD6),
-+ ASPEED_PINCTRL_GROUP(TXD6),
-+ ASPEED_PINCTRL_GROUP(RXD6),
-+ ASPEED_PINCTRL_GROUP(TXD7),
-+ ASPEED_PINCTRL_GROUP(RXD7),
-+ ASPEED_PINCTRL_GROUP(TXD8),
-+ ASPEED_PINCTRL_GROUP(RXD8),
-+ ASPEED_PINCTRL_GROUP(TXD9),
-+ ASPEED_PINCTRL_GROUP(RXD9),
-+ ASPEED_PINCTRL_GROUP(TXD10),
-+ ASPEED_PINCTRL_GROUP(RXD10),
-+ ASPEED_PINCTRL_GROUP(TXD11),
-+ ASPEED_PINCTRL_GROUP(RXD11),
-+ ASPEED_PINCTRL_GROUP(SPIM1),
-+ ASPEED_PINCTRL_GROUP(WDTRST0N),
-+ ASPEED_PINCTRL_GROUP(WDTRST1N),
-+ ASPEED_PINCTRL_GROUP(WDTRST2N),
-+ ASPEED_PINCTRL_GROUP(WDTRST3N),
-+ ASPEED_PINCTRL_GROUP(WDTRST4N),
-+ ASPEED_PINCTRL_GROUP(WDTRST5N),
-+ ASPEED_PINCTRL_GROUP(WDTRST6N),
-+ ASPEED_PINCTRL_GROUP(WDTRST7N),
-+ ASPEED_PINCTRL_GROUP(PWM8),
-+ ASPEED_PINCTRL_GROUP(PWM9),
-+ ASPEED_PINCTRL_GROUP(PWM10),
-+ ASPEED_PINCTRL_GROUP(PWM11),
-+ ASPEED_PINCTRL_GROUP(PWM12),
-+ ASPEED_PINCTRL_GROUP(PWM13),
-+ ASPEED_PINCTRL_GROUP(PWM14),
-+ ASPEED_PINCTRL_GROUP(PWM15),
-+ ASPEED_PINCTRL_GROUP(SALT0),
-+ ASPEED_PINCTRL_GROUP(SALT1),
-+ ASPEED_PINCTRL_GROUP(SALT2),
-+ ASPEED_PINCTRL_GROUP(SALT3),
-+ ASPEED_PINCTRL_GROUP(FSI0),
-+ ASPEED_PINCTRL_GROUP(FSI1),
-+ ASPEED_PINCTRL_GROUP(FSI2),
-+ ASPEED_PINCTRL_GROUP(FSI3),
-+ ASPEED_PINCTRL_GROUP(SPIM2),
-+ ASPEED_PINCTRL_GROUP(SALT4),
-+ ASPEED_PINCTRL_GROUP(SALT5),
-+ ASPEED_PINCTRL_GROUP(SALT6),
-+ ASPEED_PINCTRL_GROUP(SALT7),
-+ ASPEED_PINCTRL_GROUP(SALT8),
-+ ASPEED_PINCTRL_GROUP(SALT9),
-+ ASPEED_PINCTRL_GROUP(SALT10),
-+ ASPEED_PINCTRL_GROUP(SALT11),
-+ ASPEED_PINCTRL_GROUP(ADC0),
-+ ASPEED_PINCTRL_GROUP(ADC1),
-+ ASPEED_PINCTRL_GROUP(ADC2),
-+ ASPEED_PINCTRL_GROUP(ADC3),
-+ ASPEED_PINCTRL_GROUP(ADC4),
-+ ASPEED_PINCTRL_GROUP(ADC5),
-+ ASPEED_PINCTRL_GROUP(ADC6),
-+ ASPEED_PINCTRL_GROUP(ADC7),
-+ ASPEED_PINCTRL_GROUP(ADC8),
-+ ASPEED_PINCTRL_GROUP(ADC9),
-+ ASPEED_PINCTRL_GROUP(ADC10),
-+ ASPEED_PINCTRL_GROUP(ADC11),
-+ ASPEED_PINCTRL_GROUP(ADC12),
-+ ASPEED_PINCTRL_GROUP(ADC13),
-+ ASPEED_PINCTRL_GROUP(ADC14),
-+ ASPEED_PINCTRL_GROUP(ADC15),
-+ ASPEED_PINCTRL_GROUP(AUXPWRGOOD0),
-+ ASPEED_PINCTRL_GROUP(AUXPWRGOOD1),
-+ ASPEED_PINCTRL_GROUP(SGPM0),
-+ ASPEED_PINCTRL_GROUP(SGPM1),
-+ ASPEED_PINCTRL_GROUP(I2C0),
-+ ASPEED_PINCTRL_GROUP(I2C1),
-+ ASPEED_PINCTRL_GROUP(I2C2),
-+ ASPEED_PINCTRL_GROUP(I2C3),
-+ ASPEED_PINCTRL_GROUP(I2C4),
-+ ASPEED_PINCTRL_GROUP(I2C5),
-+ ASPEED_PINCTRL_GROUP(I2C6),
-+ ASPEED_PINCTRL_GROUP(I2C7),
-+ ASPEED_PINCTRL_GROUP(I2C8),
-+ ASPEED_PINCTRL_GROUP(I2C9),
-+ ASPEED_PINCTRL_GROUP(I2C10),
-+ ASPEED_PINCTRL_GROUP(I2C11),
-+ ASPEED_PINCTRL_GROUP(I2C12),
-+ ASPEED_PINCTRL_GROUP(I2C13),
-+ ASPEED_PINCTRL_GROUP(I2C14),
-+ ASPEED_PINCTRL_GROUP(I2C15),
-+ ASPEED_PINCTRL_GROUP(DI2C8),
-+ ASPEED_PINCTRL_GROUP(DI2C9),
-+ ASPEED_PINCTRL_GROUP(DI2C10),
-+ ASPEED_PINCTRL_GROUP(DI2C13),
-+ ASPEED_PINCTRL_GROUP(DI2C14),
-+ ASPEED_PINCTRL_GROUP(DI2C15),
-+ ASPEED_PINCTRL_GROUP(SIOPBON1),
-+ ASPEED_PINCTRL_GROUP(SIOPBIN1),
-+ ASPEED_PINCTRL_GROUP(SIOSCIN1),
-+ ASPEED_PINCTRL_GROUP(SIOS3N1),
-+ ASPEED_PINCTRL_GROUP(SIOS5N1),
-+ ASPEED_PINCTRL_GROUP(SIOPWREQN1),
-+ ASPEED_PINCTRL_GROUP(SIOONCTRLN1),
-+ ASPEED_PINCTRL_GROUP(SIOPWRGD1),
-+ ASPEED_PINCTRL_GROUP(HVI3C12),
-+ ASPEED_PINCTRL_GROUP(HVI3C13),
-+ ASPEED_PINCTRL_GROUP(HVI3C14),
-+ ASPEED_PINCTRL_GROUP(HVI3C15),
-+ ASPEED_PINCTRL_GROUP(HVI3C4),
-+ ASPEED_PINCTRL_GROUP(HVI3C5),
-+ ASPEED_PINCTRL_GROUP(HVI3C6),
-+ ASPEED_PINCTRL_GROUP(HVI3C7),
-+ ASPEED_PINCTRL_GROUP(HVI3C10),
-+ ASPEED_PINCTRL_GROUP(HVI3C11),
-+ ASPEED_PINCTRL_GROUP(I3C4),
-+ ASPEED_PINCTRL_GROUP(I3C5),
-+ ASPEED_PINCTRL_GROUP(I3C6),
-+ ASPEED_PINCTRL_GROUP(I3C7),
-+ ASPEED_PINCTRL_GROUP(I3C8),
-+ ASPEED_PINCTRL_GROUP(I3C9),
-+ ASPEED_PINCTRL_GROUP(I3C10),
-+ ASPEED_PINCTRL_GROUP(I3C11),
-+ ASPEED_PINCTRL_GROUP(I3C0),
-+ ASPEED_PINCTRL_GROUP(I3C1),
-+ ASPEED_PINCTRL_GROUP(I3C2),
-+ ASPEED_PINCTRL_GROUP(I3C3),
-+ ASPEED_PINCTRL_GROUP(LTPI),
-+ ASPEED_PINCTRL_GROUP(SPI0),
-+ ASPEED_PINCTRL_GROUP(QSPI0),
-+ ASPEED_PINCTRL_GROUP(SPI0CS1),
-+ ASPEED_PINCTRL_GROUP(SPI0ABR),
-+ ASPEED_PINCTRL_GROUP(SPI0WPN),
-+ ASPEED_PINCTRL_GROUP(SPI1),
-+ ASPEED_PINCTRL_GROUP(QSPI1),
-+ ASPEED_PINCTRL_GROUP(SPI1CS1),
-+ ASPEED_PINCTRL_GROUP(SPI1ABR),
-+ ASPEED_PINCTRL_GROUP(SPI1WPN),
-+ ASPEED_PINCTRL_GROUP(SPI2),
-+ ASPEED_PINCTRL_GROUP(QSPI2),
-+ ASPEED_PINCTRL_GROUP(SPI2CS1),
-+ ASPEED_PINCTRL_GROUP(THRU2),
-+ ASPEED_PINCTRL_GROUP(THRU3),
-+ ASPEED_PINCTRL_GROUP(JTAGM1),
-+ ASPEED_PINCTRL_GROUP(MDIO0),
-+ ASPEED_PINCTRL_GROUP(MDIO1),
-+ ASPEED_PINCTRL_GROUP(MDIO2),
-+ ASPEED_PINCTRL_GROUP(FWQSPI),
-+ ASPEED_PINCTRL_GROUP(FWSPIABR),
-+ ASPEED_PINCTRL_GROUP(FWSPIWPN),
-+ ASPEED_PINCTRL_GROUP(RGMII0),
-+ ASPEED_PINCTRL_GROUP(RGMII1),
-+ ASPEED_PINCTRL_GROUP(RMII0),
-+ ASPEED_PINCTRL_GROUP(RMII1),
-+ ASPEED_PINCTRL_GROUP(VGA),
-+ ASPEED_PINCTRL_GROUP(DSGPM1),
-+ ASPEED_PINCTRL_GROUP(SGPS),
-+ ASPEED_PINCTRL_GROUP(I2CF0),
-+ ASPEED_PINCTRL_GROUP(I2CF1),
-+ ASPEED_PINCTRL_GROUP(I2CF2),
-+ ASPEED_PINCTRL_GROUP(CANBUS),
-+ ASPEED_PINCTRL_GROUP(USBUART),
-+ ASPEED_PINCTRL_GROUP(HBLED),
-+ ASPEED_PINCTRL_GROUP(MACLINK0),
-+ ASPEED_PINCTRL_GROUP(MACLINK1),
-+ ASPEED_PINCTRL_GROUP(MACLINK2),
-+ ASPEED_PINCTRL_GROUP(NCTS2),
-+ ASPEED_PINCTRL_GROUP(NDCD2),
-+ ASPEED_PINCTRL_GROUP(NDSR2),
-+ ASPEED_PINCTRL_GROUP(NRI2),
-+ ASPEED_PINCTRL_GROUP(NDTR2),
-+ ASPEED_PINCTRL_GROUP(NRTS2),
-+ ASPEED_PINCTRL_GROUP(SMON0),
-+ ASPEED_PINCTRL_GROUP(SMON1),
-+ ASPEED_PINCTRL_GROUP(SGMII),
-+ ASPEED_PINCTRL_GROUP(PCIERC2PERST),
-+ ASPEED_PINCTRL_GROUP(USB2CUD),
-+ ASPEED_PINCTRL_GROUP(USB2CD),
-+ ASPEED_PINCTRL_GROUP(USB2CH),
-+ ASPEED_PINCTRL_GROUP(USB2CU),
-+ ASPEED_PINCTRL_GROUP(USB2DD),
-+ ASPEED_PINCTRL_GROUP(USB2DH),
-+};
-+
-+FUNC_DECL_(ESPI0, "ESPI0");
-+FUNC_DECL_(ESPI1, "ESPI1");
-+FUNC_DECL_(LPC0, "LPC0");
-+FUNC_DECL_(LPC1, "LPC1");
-+FUNC_DECL_(VPI, "VPI");
-+FUNC_DECL_(SD, "SD");
-+FUNC_DECL_(OSCCLK, "OSCCLK");
-+FUNC_DECL_(TACH0, "TACH0");
-+FUNC_DECL_(TACH1, "TACH1");
-+FUNC_DECL_(TACH2, "TACH2");
-+FUNC_DECL_(TACH3, "TACH3");
-+FUNC_DECL_(TACH4, "TACH4");
-+FUNC_DECL_(TACH5, "TACH5");
-+FUNC_DECL_(TACH6, "TACH6");
-+FUNC_DECL_(TACH7, "TACH7");
-+FUNC_DECL_(THRU0, "THRU0");
-+FUNC_DECL_(THRU1, "THRU1");
-+FUNC_DECL_(NTCS5, "NTCS5");
-+FUNC_DECL_(NDSR5, "NDSR5");
-+FUNC_DECL_(NRI5, "NRI5");
-+FUNC_DECL_(TACH8, "TACH8");
-+FUNC_DECL_(TACH9, "TACH9");
-+FUNC_DECL_(TACH10, "TACH10");
-+FUNC_DECL_(TACH11, "TACH11");
-+FUNC_DECL_(TACH12, "TACH12");
-+FUNC_DECL_(TACH13, "TACH13");
-+FUNC_DECL_(TACH14, "TACH14");
-+FUNC_DECL_(TACH15, "TACH15");
-+FUNC_DECL_(SALT12, "SALT12");
-+FUNC_DECL_(SALT13, "SALT13");
-+FUNC_DECL_(SALT14, "SALT14");
-+FUNC_DECL_(SALT15, "SALT15");
-+FUNC_DECL_(SPIM0, "SPIM0");
-+FUNC_DECL_(PWM0, "PWM0");
-+FUNC_DECL_(PWM1, "PWM1");
-+FUNC_DECL_(PWM2, "PWM2");
-+FUNC_DECL_(PWM3, "PWM3");
-+FUNC_DECL_(PWM4, "PWM4");
-+FUNC_DECL_(PWM5, "PWM5");
-+FUNC_DECL_(PWM6, "PWM6");
-+FUNC_DECL_(PWM7, "PWM7");
-+FUNC_DECL_(SIOPBON0, "SIOPBON0");
-+FUNC_DECL_(SIOPBIN0, "SIOPBIN0");
-+FUNC_DECL_(SIOSCIN0, "SIOSCIN0");
-+FUNC_DECL_(SIOS3N0, "SIOS3N0");
-+FUNC_DECL_(SIOS5N0, "SIOS5N0");
-+FUNC_DECL_(SIOPWREQN0, "SIOPWREQN0");
-+FUNC_DECL_(SIOONCTRLN0, "SIOONCTRLN0");
-+FUNC_DECL_(SIOPWRGD0, "SIOPWRGD0");
-+FUNC_DECL_(UART0, "NCTS0", "NDCD0", "NDSR0", "NRI0", "NDTR0", "NRTS0", "TXD0", "RXD0");
-+FUNC_DECL_(UART1, "NCTS1", "NDCD1", "NDSR1", "NRI1", "NDTR1", "NRTS1", "TXD1", "RXD1");
-+FUNC_DECL_(UART2, "TXD2", "RXD2");
-+FUNC_DECL_(UART3, "TXD3", "RXD3");
-+FUNC_DECL_(UART5, "NCTS5", "NDCD5", "NDSR5", "NRI5", "NDTR5", "NRTS5", "TXD5", "RXD5");
-+FUNC_DECL_(UART6, "NCTS6", "NDCD6", "NDSR6", "NRI6", "NDTR6", "NRTS6", "TXD6", "RXD6");
-+FUNC_DECL_(UART7, "TXD7", "RXD7");
-+FUNC_DECL_(UART8, "TXD8", "RXD8");
-+FUNC_DECL_(UART9, "TXD9", "RXD9");
-+FUNC_DECL_(UART10, "TXD10", "RXD10");
-+FUNC_DECL_(UART11, "TXD11", "RXD11");
-+FUNC_DECL_(SPIM1, "SPIM1");
-+FUNC_DECL_(SPIM2, "SPIM2");
-+FUNC_DECL_(PWM8, "PWM8");
-+FUNC_DECL_(PWM9, "PWM9");
-+FUNC_DECL_(PWM10, "PWM10");
-+FUNC_DECL_(PWM11, "PWM11");
-+FUNC_DECL_(PWM12, "PWM12");
-+FUNC_DECL_(PWM13, "PWM13");
-+FUNC_DECL_(PWM14, "PWM14");
-+FUNC_DECL_(PWM15, "PWM15");
-+FUNC_DECL_(WDTRST0N, "WDTRST0N");
-+FUNC_DECL_(WDTRST1N, "WDTRST1N");
-+FUNC_DECL_(WDTRST2N, "WDTRST2N");
-+FUNC_DECL_(WDTRST3N, "WDTRST3N");
-+FUNC_DECL_(WDTRST4N, "WDTRST4N");
-+FUNC_DECL_(WDTRST5N, "WDTRST5N");
-+FUNC_DECL_(WDTRST6N, "WDTRST6N");
-+FUNC_DECL_(WDTRST7N, "WDTRST7N");
-+FUNC_DECL_(FSI0, "FSI0");
-+FUNC_DECL_(FSI1, "FSI1");
-+FUNC_DECL_(FSI2, "FSI2");
-+FUNC_DECL_(FSI3, "FSI3");
-+FUNC_DECL_(SALT4, "ASLT4");
-+FUNC_DECL_(SALT5, "ASLT5");
-+FUNC_DECL_(SALT6, "ASLT6");
-+FUNC_DECL_(SALT7, "ASLT7");
-+FUNC_DECL_(SALT8, "ASLT8");
-+FUNC_DECL_(SALT9, "ASLT9");
-+FUNC_DECL_(SALT10, "ASLT10");
-+FUNC_DECL_(SALT11, "ASLT11");
-+FUNC_DECL_(ADC0, "ADC0");
-+FUNC_DECL_(ADC1, "ADC1");
-+FUNC_DECL_(ADC2, "ADC2");
-+FUNC_DECL_(ADC3, "ADC3");
-+FUNC_DECL_(ADC4, "ADC4");
-+FUNC_DECL_(ADC5, "ADC5");
-+FUNC_DECL_(ADC6, "ADC6");
-+FUNC_DECL_(ADC7, "ADC7");
-+FUNC_DECL_(ADC8, "ADC8");
-+FUNC_DECL_(ADC9, "ADC9");
-+FUNC_DECL_(ADC10, "ADC10");
-+FUNC_DECL_(ADC11, "ADC11");
-+FUNC_DECL_(ADC12, "ADC12");
-+FUNC_DECL_(ADC13, "ADC13");
-+FUNC_DECL_(ADC14, "ADC14");
-+FUNC_DECL_(ADC15, "ADC15");
-+FUNC_DECL_(AUXPWRGOOD0, "AUXPWRGOOD0");
-+FUNC_DECL_(AUXPWRGOOD1, "AUXPWRGOOD1");
-+FUNC_DECL_(SGPM0, "SGPM0");
-+FUNC_DECL_(SGPM1, "SGPM1");
-+FUNC_DECL_(I2C0, "I2C0");
-+FUNC_DECL_(I2C1, "I2C1");
-+FUNC_DECL_(I2C2, "I2C2");
-+FUNC_DECL_(I2C3, "I2C3");
-+FUNC_DECL_(I2C4, "I2C4");
-+FUNC_DECL_(I2C5, "I2C5");
-+FUNC_DECL_(I2C6, "I2C6");
-+FUNC_DECL_(I2C7, "I2C7");
-+FUNC_DECL_(I2C8, "I2C8");
-+FUNC_DECL_(I2C9, "I2C9");
-+FUNC_DECL_(I2C10, "I2C10");
-+FUNC_DECL_(I2C11, "I2C11");
-+FUNC_DECL_(I2C12, "I2C12");
-+FUNC_DECL_(I2C13, "I2C13");
-+FUNC_DECL_(I2C14, "I2C14");
-+FUNC_DECL_(I2C15, "I2C15");
-+FUNC_DECL_(DI2C8, "DI2C8");
-+FUNC_DECL_(DI2C9, "DI2C9");
-+FUNC_DECL_(DI2C10, "DI2C10");
-+FUNC_DECL_(DI2C13, "DI2C13");
-+FUNC_DECL_(DI2C14, "DI2C14");
-+FUNC_DECL_(DI2C15, "DI2C15");
-+FUNC_DECL_(SIOPBON1, "SIOPBON1");
-+FUNC_DECL_(SIOPBIN1, "SIOPBIN1");
-+FUNC_DECL_(SIOSCIN1, "SIOSCIN1");
-+FUNC_DECL_(SIOS3N1, "SIOS3N1");
-+FUNC_DECL_(SIOS5N1, "SIOS5N1");
-+FUNC_DECL_(SIOPWREQN1, "SIOPWREQN1");
-+FUNC_DECL_(SIOONCTRLN1, "SIOONCTRLN1");
-+FUNC_DECL_(SIOPWRGD1, "SIOPWRGD1");
-+FUNC_DECL_(I3C0, "I3C0");
-+FUNC_DECL_(I3C1, "I3C1");
-+FUNC_DECL_(I3C2, "I3C2");
-+FUNC_DECL_(I3C3, "I3C3");
-+FUNC_DECL_(I3C4, "I3C4", "HVI3C4");
-+FUNC_DECL_(I3C5, "I3C5", "HVI3C5");
-+FUNC_DECL_(I3C6, "I3C6", "HVI3C6");
-+FUNC_DECL_(I3C7, "I3C7", "HVI3C7");
-+FUNC_DECL_(I3C8, "I3C8");
-+FUNC_DECL_(I3C9, "I3C9");
-+FUNC_DECL_(I3C10, "I3C10", "HVI3C10");
-+FUNC_DECL_(I3C11, "I3C11", "HVI3C11");
-+FUNC_DECL_(I3C12, "HVI3C12");
-+FUNC_DECL_(I3C13, "HVI3C13");
-+FUNC_DECL_(I3C14, "HVI3C14");
-+FUNC_DECL_(I3C15, "HVI3C15");
-+FUNC_DECL_(LTPI, "LTPI");
-+FUNC_DECL_(SPI0, "SPI0");
-+FUNC_DECL_(QSPI0, "QSPI0");
-+FUNC_DECL_(SPI0CS1, "SPI0CS1");
-+FUNC_DECL_(SPI0ABR, "SPI0ABR");
-+FUNC_DECL_(SPI0WPN, "SPI0WPN");
-+FUNC_DECL_(SPI1, "SPI1");
-+FUNC_DECL_(QSPI1, "QSPI1");
-+FUNC_DECL_(SPI1CS1, "SPI1CS1");
-+FUNC_DECL_(SPI1ABR, "SPI1ABR");
-+FUNC_DECL_(SPI1WPN, "SPI1WPN");
-+FUNC_DECL_(SPI2, "SPI2");
-+FUNC_DECL_(QSPI2, "QSPI2");
-+FUNC_DECL_(SPI2CS1, "SPI2CS1");
-+FUNC_DECL_(THRU2, "THRU2");
-+FUNC_DECL_(THRU3, "THRU3");
-+FUNC_DECL_(JTAGM1, "JTAGM1");
-+FUNC_DECL_(MDIO0, "MDIO0");
-+FUNC_DECL_(MDIO1, "MDIO1");
-+FUNC_DECL_(MDIO2, "MDIO2");
-+FUNC_DECL_(FWQSPI, "FWQSPI");
-+FUNC_DECL_(FWSPIABR, "FWSPIABR");
-+FUNC_DECL_(FWSPIWPN, "FWSPIWPN");
-+FUNC_DECL_(RGMII0, "RGMII0");
-+FUNC_DECL_(RGMII1, "RGMII1");
-+FUNC_DECL_(RMII0, "RMII0");
-+FUNC_DECL_(RMII1, "RMII1");
-+FUNC_DECL_(VGA, "VGA");
-+FUNC_DECL_(DSGPM1, "DSGPM1");
-+FUNC_DECL_(SGPS, "SGPS");
-+FUNC_DECL_(I2CF0, "I2CF0");
-+FUNC_DECL_(I2CF1, "I2CF1");
-+FUNC_DECL_(I2CF2, "I2CF2");
-+FUNC_DECL_(CANBUS, "CANBUS");
-+FUNC_DECL_(USBUART, "USBUART");
-+FUNC_DECL_(HBLED, "HBLED");
-+FUNC_DECL_(MACLINK0, "MACLINK0");
-+FUNC_DECL_(MACLINK1, "MACLINK1");
-+FUNC_DECL_(MACLINK2, "MACLINK2");
-+FUNC_DECL_(SMON0, "SMON0");
-+FUNC_DECL_(SMON1, "SMON1");
-+FUNC_DECL_(SGMII, "SGMII");
-+FUNC_DECL_(PCIERC, "PCIERC2PERST");
-+FUNC_DECL_(USB2C, "USB2CUD", "USB2CD", "USB2CH", "USB2CU");
-+FUNC_DECL_(USB2D, "USB2DD", "USB2DH");
-+
-+static struct aspeed_pin_function aspeed_g7_soc1_funcs[] = {
-+ ASPEED_PINCTRL_FUNC(ESPI0),
-+ ASPEED_PINCTRL_FUNC(ESPI1),
-+ ASPEED_PINCTRL_FUNC(LPC0),
-+ ASPEED_PINCTRL_FUNC(LPC1),
-+ ASPEED_PINCTRL_FUNC(VPI),
-+ ASPEED_PINCTRL_FUNC(SD),
-+ ASPEED_PINCTRL_FUNC(OSCCLK),
-+ ASPEED_PINCTRL_FUNC(TACH0),
-+ ASPEED_PINCTRL_FUNC(TACH1),
-+ ASPEED_PINCTRL_FUNC(TACH2),
-+ ASPEED_PINCTRL_FUNC(TACH3),
-+ ASPEED_PINCTRL_FUNC(TACH4),
-+ ASPEED_PINCTRL_FUNC(TACH5),
-+ ASPEED_PINCTRL_FUNC(TACH6),
-+ ASPEED_PINCTRL_FUNC(TACH7),
-+ ASPEED_PINCTRL_FUNC(THRU0),
-+ ASPEED_PINCTRL_FUNC(THRU1),
-+ ASPEED_PINCTRL_FUNC(NTCS5),
-+ ASPEED_PINCTRL_FUNC(NTCS5),
-+ ASPEED_PINCTRL_FUNC(NDSR5),
-+ ASPEED_PINCTRL_FUNC(NRI5),
-+ ASPEED_PINCTRL_FUNC(NRI5),
-+ ASPEED_PINCTRL_FUNC(SALT12),
-+ ASPEED_PINCTRL_FUNC(SALT13),
-+ ASPEED_PINCTRL_FUNC(SALT14),
-+ ASPEED_PINCTRL_FUNC(SALT15),
-+ ASPEED_PINCTRL_FUNC(TACH8),
-+ ASPEED_PINCTRL_FUNC(TACH9),
-+ ASPEED_PINCTRL_FUNC(TACH10),
-+ ASPEED_PINCTRL_FUNC(TACH11),
-+ ASPEED_PINCTRL_FUNC(TACH12),
-+ ASPEED_PINCTRL_FUNC(TACH13),
-+ ASPEED_PINCTRL_FUNC(TACH14),
-+ ASPEED_PINCTRL_FUNC(TACH15),
-+ ASPEED_PINCTRL_FUNC(SPIM0),
-+ ASPEED_PINCTRL_FUNC(PWM0),
-+ ASPEED_PINCTRL_FUNC(PWM1),
-+ ASPEED_PINCTRL_FUNC(PWM2),
-+ ASPEED_PINCTRL_FUNC(PWM3),
-+ ASPEED_PINCTRL_FUNC(PWM4),
-+ ASPEED_PINCTRL_FUNC(PWM5),
-+ ASPEED_PINCTRL_FUNC(PWM6),
-+ ASPEED_PINCTRL_FUNC(PWM7),
-+ ASPEED_PINCTRL_FUNC(SIOPBON0),
-+ ASPEED_PINCTRL_FUNC(SIOPBIN0),
-+ ASPEED_PINCTRL_FUNC(SIOSCIN0),
-+ ASPEED_PINCTRL_FUNC(SIOS3N0),
-+ ASPEED_PINCTRL_FUNC(SIOS5N0),
-+ ASPEED_PINCTRL_FUNC(SIOPWREQN0),
-+ ASPEED_PINCTRL_FUNC(SIOONCTRLN0),
-+ ASPEED_PINCTRL_FUNC(SIOPWRGD0),
-+ ASPEED_PINCTRL_FUNC(UART0),
-+ ASPEED_PINCTRL_FUNC(UART1),
-+ ASPEED_PINCTRL_FUNC(UART2),
-+ ASPEED_PINCTRL_FUNC(UART3),
-+ ASPEED_PINCTRL_FUNC(UART5),
-+ ASPEED_PINCTRL_FUNC(UART6),
-+ ASPEED_PINCTRL_FUNC(UART7),
-+ ASPEED_PINCTRL_FUNC(UART8),
-+ ASPEED_PINCTRL_FUNC(UART9),
-+ ASPEED_PINCTRL_FUNC(UART10),
-+ ASPEED_PINCTRL_FUNC(UART11),
-+ ASPEED_PINCTRL_FUNC(SPIM1),
-+ ASPEED_PINCTRL_FUNC(PWM7),
-+ ASPEED_PINCTRL_FUNC(PWM8),
-+ ASPEED_PINCTRL_FUNC(PWM9),
-+ ASPEED_PINCTRL_FUNC(PWM10),
-+ ASPEED_PINCTRL_FUNC(PWM11),
-+ ASPEED_PINCTRL_FUNC(PWM12),
-+ ASPEED_PINCTRL_FUNC(PWM13),
-+ ASPEED_PINCTRL_FUNC(PWM14),
-+ ASPEED_PINCTRL_FUNC(PWM15),
-+ ASPEED_PINCTRL_FUNC(WDTRST0N),
-+ ASPEED_PINCTRL_FUNC(WDTRST1N),
-+ ASPEED_PINCTRL_FUNC(WDTRST2N),
-+ ASPEED_PINCTRL_FUNC(WDTRST3N),
-+ ASPEED_PINCTRL_FUNC(WDTRST4N),
-+ ASPEED_PINCTRL_FUNC(WDTRST5N),
-+ ASPEED_PINCTRL_FUNC(WDTRST6N),
-+ ASPEED_PINCTRL_FUNC(WDTRST7N),
-+ ASPEED_PINCTRL_FUNC(FSI0),
-+ ASPEED_PINCTRL_FUNC(FSI1),
-+ ASPEED_PINCTRL_FUNC(FSI2),
-+ ASPEED_PINCTRL_FUNC(FSI3),
-+ ASPEED_PINCTRL_FUNC(SALT4),
-+ ASPEED_PINCTRL_FUNC(SALT5),
-+ ASPEED_PINCTRL_FUNC(SALT6),
-+ ASPEED_PINCTRL_FUNC(SALT7),
-+ ASPEED_PINCTRL_FUNC(SALT8),
-+ ASPEED_PINCTRL_FUNC(SALT9),
-+ ASPEED_PINCTRL_FUNC(SALT10),
-+ ASPEED_PINCTRL_FUNC(SALT11),
-+ ASPEED_PINCTRL_FUNC(ADC0),
-+ ASPEED_PINCTRL_FUNC(ADC1),
-+ ASPEED_PINCTRL_FUNC(ADC2),
-+ ASPEED_PINCTRL_FUNC(ADC3),
-+ ASPEED_PINCTRL_FUNC(ADC4),
-+ ASPEED_PINCTRL_FUNC(ADC5),
-+ ASPEED_PINCTRL_FUNC(ADC6),
-+ ASPEED_PINCTRL_FUNC(ADC7),
-+ ASPEED_PINCTRL_FUNC(ADC8),
-+ ASPEED_PINCTRL_FUNC(ADC9),
-+ ASPEED_PINCTRL_FUNC(ADC10),
-+ ASPEED_PINCTRL_FUNC(ADC11),
-+ ASPEED_PINCTRL_FUNC(ADC12),
-+ ASPEED_PINCTRL_FUNC(ADC13),
-+ ASPEED_PINCTRL_FUNC(ADC14),
-+ ASPEED_PINCTRL_FUNC(ADC15),
-+ ASPEED_PINCTRL_FUNC(AUXPWRGOOD0),
-+ ASPEED_PINCTRL_FUNC(AUXPWRGOOD1),
-+ ASPEED_PINCTRL_FUNC(SGPM0),
-+ ASPEED_PINCTRL_FUNC(SGPM1),
-+ ASPEED_PINCTRL_FUNC(SPIM2),
-+ ASPEED_PINCTRL_FUNC(I2C0),
-+ ASPEED_PINCTRL_FUNC(I2C1),
-+ ASPEED_PINCTRL_FUNC(I2C2),
-+ ASPEED_PINCTRL_FUNC(I2C3),
-+ ASPEED_PINCTRL_FUNC(I2C4),
-+ ASPEED_PINCTRL_FUNC(I2C5),
-+ ASPEED_PINCTRL_FUNC(I2C6),
-+ ASPEED_PINCTRL_FUNC(I2C7),
-+ ASPEED_PINCTRL_FUNC(I2C8),
-+ ASPEED_PINCTRL_FUNC(I2C9),
-+ ASPEED_PINCTRL_FUNC(I2C10),
-+ ASPEED_PINCTRL_FUNC(I2C11),
-+ ASPEED_PINCTRL_FUNC(I2C12),
-+ ASPEED_PINCTRL_FUNC(I2C13),
-+ ASPEED_PINCTRL_FUNC(I2C14),
-+ ASPEED_PINCTRL_FUNC(I2C15),
-+ ASPEED_PINCTRL_FUNC(DI2C8),
-+ ASPEED_PINCTRL_FUNC(DI2C9),
-+ ASPEED_PINCTRL_FUNC(DI2C10),
-+ ASPEED_PINCTRL_FUNC(DI2C13),
-+ ASPEED_PINCTRL_FUNC(DI2C14),
-+ ASPEED_PINCTRL_FUNC(DI2C15),
-+ ASPEED_PINCTRL_FUNC(SIOPBON1),
-+ ASPEED_PINCTRL_FUNC(SIOPBIN1),
-+ ASPEED_PINCTRL_FUNC(SIOSCIN1),
-+ ASPEED_PINCTRL_FUNC(SIOS3N1),
-+ ASPEED_PINCTRL_FUNC(SIOS5N1),
-+ ASPEED_PINCTRL_FUNC(SIOPWREQN1),
-+ ASPEED_PINCTRL_FUNC(SIOONCTRLN1),
-+ ASPEED_PINCTRL_FUNC(SIOPWRGD1),
-+ ASPEED_PINCTRL_FUNC(I3C0),
-+ ASPEED_PINCTRL_FUNC(I3C1),
-+ ASPEED_PINCTRL_FUNC(I3C2),
-+ ASPEED_PINCTRL_FUNC(I3C3),
-+ ASPEED_PINCTRL_FUNC(I3C4),
-+ ASPEED_PINCTRL_FUNC(I3C5),
-+ ASPEED_PINCTRL_FUNC(I3C6),
-+ ASPEED_PINCTRL_FUNC(I3C7),
-+ ASPEED_PINCTRL_FUNC(I3C8),
-+ ASPEED_PINCTRL_FUNC(I3C9),
-+ ASPEED_PINCTRL_FUNC(I3C10),
-+ ASPEED_PINCTRL_FUNC(I3C11),
-+ ASPEED_PINCTRL_FUNC(I3C12),
-+ ASPEED_PINCTRL_FUNC(I3C13),
-+ ASPEED_PINCTRL_FUNC(I3C14),
-+ ASPEED_PINCTRL_FUNC(I3C15),
-+ ASPEED_PINCTRL_FUNC(LTPI),
-+ ASPEED_PINCTRL_FUNC(SPI0),
-+ ASPEED_PINCTRL_FUNC(QSPI0),
-+ ASPEED_PINCTRL_FUNC(SPI0CS1),
-+ ASPEED_PINCTRL_FUNC(SPI0ABR),
-+ ASPEED_PINCTRL_FUNC(SPI0WPN),
-+ ASPEED_PINCTRL_FUNC(SPI1),
-+ ASPEED_PINCTRL_FUNC(QSPI1),
-+ ASPEED_PINCTRL_FUNC(SPI1CS1),
-+ ASPEED_PINCTRL_FUNC(SPI1ABR),
-+ ASPEED_PINCTRL_FUNC(SPI1WPN),
-+ ASPEED_PINCTRL_FUNC(SPI2),
-+ ASPEED_PINCTRL_FUNC(QSPI2),
-+ ASPEED_PINCTRL_FUNC(SPI2CS1),
-+ ASPEED_PINCTRL_FUNC(THRU2),
-+ ASPEED_PINCTRL_FUNC(THRU3),
-+ ASPEED_PINCTRL_FUNC(JTAGM1),
-+ ASPEED_PINCTRL_FUNC(MDIO0),
-+ ASPEED_PINCTRL_FUNC(MDIO1),
-+ ASPEED_PINCTRL_FUNC(MDIO2),
-+ ASPEED_PINCTRL_FUNC(FWQSPI),
-+ ASPEED_PINCTRL_FUNC(FWSPIABR),
-+ ASPEED_PINCTRL_FUNC(FWSPIWPN),
-+ ASPEED_PINCTRL_FUNC(RGMII0),
-+ ASPEED_PINCTRL_FUNC(RGMII1),
-+ ASPEED_PINCTRL_FUNC(RMII0),
-+ ASPEED_PINCTRL_FUNC(RMII1),
-+ ASPEED_PINCTRL_FUNC(VGA),
-+ ASPEED_PINCTRL_FUNC(DSGPM1),
-+ ASPEED_PINCTRL_FUNC(SGPS),
-+ ASPEED_PINCTRL_FUNC(I2CF0),
-+ ASPEED_PINCTRL_FUNC(I2CF1),
-+ ASPEED_PINCTRL_FUNC(I2CF2),
-+ ASPEED_PINCTRL_FUNC(CANBUS),
-+ ASPEED_PINCTRL_FUNC(USBUART),
-+ ASPEED_PINCTRL_FUNC(HBLED),
-+ ASPEED_PINCTRL_FUNC(MACLINK0),
-+ ASPEED_PINCTRL_FUNC(MACLINK1),
-+ ASPEED_PINCTRL_FUNC(MACLINK2),
-+ ASPEED_PINCTRL_FUNC(SMON0),
-+ ASPEED_PINCTRL_FUNC(SMON1),
-+ ASPEED_PINCTRL_FUNC(SGMII),
-+ ASPEED_PINCTRL_FUNC(PCIERC),
-+ ASPEED_PINCTRL_FUNC(USB2C),
-+ ASPEED_PINCTRL_FUNC(USB2D),
-+};
-+
-+/* number, name, drv_data */
-+static const struct pinctrl_pin_desc aspeed_g7_soc1_pins[] = {
-+ PINCTRL_PIN(C16, "C16"),
-+ PINCTRL_PIN(C14, "C14"),
-+ PINCTRL_PIN(C11, "C11"),
-+ PINCTRL_PIN(D9, "D9"),
-+ PINCTRL_PIN(F14, "F14"),
-+ PINCTRL_PIN(D10, "D10"),
-+ PINCTRL_PIN(C12, "C12"),
-+ PINCTRL_PIN(C13, "C13"),
-+ PINCTRL_PIN(AA24, "AA24"),
-+ PINCTRL_PIN(AB24, "AB24"),
-+ PINCTRL_PIN(AB23, "AB23"),
-+ PINCTRL_PIN(AC22, "AC22"),
-+ PINCTRL_PIN(AD22, "AD22"),
-+ PINCTRL_PIN(AE21, "AE21"),
-+ PINCTRL_PIN(AF20, "AF20"),
-+ PINCTRL_PIN(AE20, "AE20"),
-+ PINCTRL_PIN(AD20, "AD20"),
-+ PINCTRL_PIN(Y23, "Y23"),
-+ PINCTRL_PIN(W23, "W23"),
-+ PINCTRL_PIN(AD19, "AD19"),
-+ PINCTRL_PIN(AC20, "AC20"),
-+ PINCTRL_PIN(AA23, "AA23"),
-+ PINCTRL_PIN(AB22, "AB22"),
-+ PINCTRL_PIN(AB21, "AB21"),
-+ PINCTRL_PIN(AA22, "AA22"),
-+ PINCTRL_PIN(Y22, "Y22"),
-+ PINCTRL_PIN(W22, "W22"),
-+ PINCTRL_PIN(AF18, "AF18"),
-+ PINCTRL_PIN(AE18, "AE18"),
-+ PINCTRL_PIN(AD18, "AD18"),
-+ PINCTRL_PIN(AC19, "AC19"),
-+ PINCTRL_PIN(AB20, "AB20"),
-+ PINCTRL_PIN(AF17, "AF17"),
-+ PINCTRL_PIN(AA16, "AA16"),
-+ PINCTRL_PIN(Y16, "Y16"),
-+ PINCTRL_PIN(V17, "V17"),
-+ PINCTRL_PIN(J13, "J13"),
-+ PINCTRL_PIN(AB16, "AB16"),
-+ PINCTRL_PIN(AC16, "AC16"),
-+ PINCTRL_PIN(AF16, "AF16"),
-+ PINCTRL_PIN(AA15, "AA15"),
-+ PINCTRL_PIN(AB15, "AB15"),
-+ PINCTRL_PIN(AC15, "AC15"),
-+ PINCTRL_PIN(AD15, "AD15"),
-+ PINCTRL_PIN(Y15, "Y15"),
-+ PINCTRL_PIN(AA14, "AA14"),
-+ PINCTRL_PIN(W16, "W16"),
-+ PINCTRL_PIN(V16, "V16"),
-+ PINCTRL_PIN(AB18, "AB18"),
-+ PINCTRL_PIN(AC18, "AC18"),
-+ PINCTRL_PIN(K13, "K13"),
-+ PINCTRL_PIN(AA17, "AA17"),
-+ PINCTRL_PIN(AB17, "AB17"),
-+ PINCTRL_PIN(AD16, "AD16"),
-+ PINCTRL_PIN(AC17, "AC17"),
-+ PINCTRL_PIN(AD17, "AD17"),
-+ PINCTRL_PIN(AE16, "AE16"),
-+ PINCTRL_PIN(AE17, "AE17"),
-+ PINCTRL_PIN(U23, "U23"),
-+ PINCTRL_PIN(T24, "T24"),
-+ PINCTRL_PIN(HOLE0, "HOLE0"),
-+ PINCTRL_PIN(HOLE1, "HOLE1"),
-+ PINCTRL_PIN(HOLE2, "HOLE2"),
-+ PINCTRL_PIN(HOLE3, "HOLE3"),
-+ PINCTRL_PIN(AC24, "AC24"),
-+ PINCTRL_PIN(AD24, "AD24"),
-+ PINCTRL_PIN(AE23, "AE23"),
-+ PINCTRL_PIN(AE19, "AE19"),
-+ PINCTRL_PIN(AF23, "AF23"),
-+ PINCTRL_PIN(Y25, "Y25"),
-+ PINCTRL_PIN(AA25, "AA25"),
-+ PINCTRL_PIN(AF19, "AF19"),
-+ PINCTRL_PIN(AB25, "AB25"),
-+ PINCTRL_PIN(AC25, "AC25"),
-+ PINCTRL_PIN(AD25, "AD25"),
-+ PINCTRL_PIN(V22, "V22"),
-+ PINCTRL_PIN(AE25, "AE25"),
-+ PINCTRL_PIN(V21, "V21"),
-+ PINCTRL_PIN(AF21, "AF21"),
-+ PINCTRL_PIN(AF25, "AF25"),
-+ PINCTRL_PIN(AF26, "AF26"),
-+ PINCTRL_PIN(AE26, "AE26"),
-+ PINCTRL_PIN(W21, "W21"),
-+ PINCTRL_PIN(AD26, "AD26"),
-+ PINCTRL_PIN(Y21, "Y21"),
-+ PINCTRL_PIN(AC26, "AC26"),
-+ PINCTRL_PIN(AA26, "AA26"),
-+ PINCTRL_PIN(AB26, "AB26"),
-+ PINCTRL_PIN(T26, "T26"),
-+ PINCTRL_PIN(AA20, "AA20"),
-+ PINCTRL_PIN(V23, "V23"),
-+ PINCTRL_PIN(W24, "W24"),
-+ PINCTRL_PIN(R26, "R26"),
-+ PINCTRL_PIN(AA21, "AA21"),
-+ PINCTRL_PIN(P26, "P26"),
-+ PINCTRL_PIN(Y24, "Y24"),
-+ PINCTRL_PIN(B16, "B16"),
-+ PINCTRL_PIN(D14, "D14"),
-+ PINCTRL_PIN(B15, "B15"),
-+ PINCTRL_PIN(B14, "B14"),
-+ PINCTRL_PIN(C17, "C17"),
-+ PINCTRL_PIN(B13, "B13"),
-+ PINCTRL_PIN(E14, "E14"),
-+ PINCTRL_PIN(C15, "C15"),
-+ PINCTRL_PIN(D24, "D24"),
-+ PINCTRL_PIN(B23, "B23"),
-+ PINCTRL_PIN(B22, "B22"),
-+ PINCTRL_PIN(C23, "C23"),
-+ PINCTRL_PIN(B18, "B18"),
-+ PINCTRL_PIN(B21, "B21"),
-+ PINCTRL_PIN(B17, "B17"),
-+ PINCTRL_PIN(B19, "B19"),
-+ PINCTRL_PIN(B26, "B26"),
-+ PINCTRL_PIN(A25, "A25"),
-+ PINCTRL_PIN(A24, "A24"),
-+ PINCTRL_PIN(B24, "B24"),
-+ PINCTRL_PIN(E26, "E26"),
-+ PINCTRL_PIN(A21, "A21"),
-+ PINCTRL_PIN(A19, "A19"),
-+ PINCTRL_PIN(A18, "A18"),
-+ PINCTRL_PIN(D26, "D26"),
-+ PINCTRL_PIN(C26, "C26"),
-+ PINCTRL_PIN(A23, "A23"),
-+ PINCTRL_PIN(B25, "B25"),
-+ PINCTRL_PIN(A22, "A22"),
-+ PINCTRL_PIN(F26, "F26"),
-+ PINCTRL_PIN(A26, "A26"),
-+ PINCTRL_PIN(A14, "A14"),
-+ PINCTRL_PIN(E10, "E10"),
-+ PINCTRL_PIN(E13, "E13"),
-+ PINCTRL_PIN(D12, "D12"),
-+ PINCTRL_PIN(F10, "F10"),
-+ PINCTRL_PIN(E11, "E11"),
-+ PINCTRL_PIN(F11, "F11"),
-+ PINCTRL_PIN(F13, "F13"),
-+ PINCTRL_PIN(N15, "N15"),
-+ PINCTRL_PIN(C20, "C20"),
-+ PINCTRL_PIN(C19, "C19"),
-+ PINCTRL_PIN(A8, "A8"),
-+ PINCTRL_PIN(R14, "R14"),
-+ PINCTRL_PIN(A7, "A7"),
-+ PINCTRL_PIN(P14, "P14"),
-+ PINCTRL_PIN(D20, "D20"),
-+ PINCTRL_PIN(A6, "A6"),
-+ PINCTRL_PIN(B6, "B6"),
-+ PINCTRL_PIN(N14, "N14"),
-+ PINCTRL_PIN(B7, "B7"),
-+ PINCTRL_PIN(B8, "B8"),
-+ PINCTRL_PIN(B9, "B9"),
-+ PINCTRL_PIN(M14, "M14"),
-+ PINCTRL_PIN(J11, "J11"),
-+ PINCTRL_PIN(E7, "E7"),
-+ PINCTRL_PIN(D19, "D19"),
-+ PINCTRL_PIN(B11, "B11"),
-+ PINCTRL_PIN(D15, "D15"),
-+ PINCTRL_PIN(B12, "B12"),
-+ PINCTRL_PIN(B10, "B10"),
-+ PINCTRL_PIN(P13, "P13"),
-+ PINCTRL_PIN(C18, "C18"),
-+ PINCTRL_PIN(C6, "C6"),
-+ PINCTRL_PIN(C7, "C7"),
-+ PINCTRL_PIN(D7, "D7"),
-+ PINCTRL_PIN(N13, "N13"),
-+ PINCTRL_PIN(C8, "C8"),
-+ PINCTRL_PIN(C9, "C9"),
-+ PINCTRL_PIN(C10, "C10"),
-+ PINCTRL_PIN(M16, "M16"),
-+ PINCTRL_PIN(A15, "A15"),
-+ PINCTRL_PIN(G11, "G11"),
-+ PINCTRL_PIN(H7, "H7"),
-+ PINCTRL_PIN(H8, "H8"),
-+ PINCTRL_PIN(H9, "H9"),
-+ PINCTRL_PIN(H10, "H10"),
-+ PINCTRL_PIN(H11, "H11"),
-+ PINCTRL_PIN(J9, "J9"),
-+ PINCTRL_PIN(J10, "J10"),
-+ PINCTRL_PIN(E9, "E9"),
-+ PINCTRL_PIN(F9, "F9"),
-+ PINCTRL_PIN(F8, "F8"),
-+ PINCTRL_PIN(M13, "M13"),
-+ PINCTRL_PIN(F7, "F7"),
-+ PINCTRL_PIN(D8, "D8"),
-+ PINCTRL_PIN(E8, "E8"),
-+ PINCTRL_PIN(L12, "L12"),
-+ PINCTRL_PIN(F12, "F12"),
-+ PINCTRL_PIN(E12, "E12"),
-+ PINCTRL_PIN(J12, "J12"),
-+ PINCTRL_PIN(G7, "G7"),
-+ PINCTRL_PIN(G8, "G8"),
-+ PINCTRL_PIN(G9, "G9"),
-+ PINCTRL_PIN(G10, "G10"),
-+ PINCTRL_PIN(K12, "K12"),
-+ PINCTRL_PIN(W17, "W17"),
-+ PINCTRL_PIN(V18, "V18"),
-+ PINCTRL_PIN(W18, "W18"),
-+ PINCTRL_PIN(Y17, "Y17"),
-+ PINCTRL_PIN(AA18, "AA18"),
-+ PINCTRL_PIN(AA13, "AA13"),
-+ PINCTRL_PIN(Y18, "Y18"),
-+ PINCTRL_PIN(AA12, "AA12"),
-+ PINCTRL_PIN(W20, "W20"),
-+ PINCTRL_PIN(V20, "V20"),
-+ PINCTRL_PIN(Y11, "Y11"),
-+ PINCTRL_PIN(V14, "V14"),
-+ PINCTRL_PIN(V19, "V19"),
-+ PINCTRL_PIN(W14, "W14"),
-+ PINCTRL_PIN(Y20, "Y20"),
-+ PINCTRL_PIN(AB19, "AB19"),
-+ PINCTRL_PIN(U26, "U26"),
-+ PINCTRL_PIN(U25, "U25"),
-+ PINCTRL_PIN(V26, "V26"),
-+ PINCTRL_PIN(W26, "W26"),
-+ PINCTRL_PIN(Y26, "Y26"),
-+ PINCTRL_PIN(W25, "W25"),
-+ PINCTRL_PIN(V24, "V24"),
-+ PINCTRL_PIN(U24, "U24"),
-+ PINCTRL_PIN(SGMII0, "SGMII0"),
-+ PINCTRL_PIN(PCIERC2_PERST, "PCIERC2_PERST"),
-+ PINCTRL_PIN(PORTC_MODE, "PORTC_MODE"),
-+ PINCTRL_PIN(PORTD_MODE, "PORTD_MODE"),
-+};
-+
-+FUNCFG_DESCL(C16, PIN_CFG(ESPI1, SCU400, GENMASK(2, 0), 1),
-+ PIN_CFG(LPC1, SCU400, GENMASK(2, 0), 2),
-+ PIN_CFG(SD, SCU400, GENMASK(2, 0), 3),
-+ PIN_CFG(HVI3C4, SCU400, GENMASK(2, 0), 4),
-+ PIN_CFG(VPI, SCU400, GENMASK(2, 0), 5));
-+FUNCFG_DESCL(C14, PIN_CFG(ESPI1, SCU400, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(SD, SCU400, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(HVI3C4, SCU400, GENMASK(6, 4), (4 << 4)),
-+ PIN_CFG(VPI, SCU400, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(C11, PIN_CFG(ESPI1, SCU400, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(SD, SCU400, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(HVI3C5, SCU400, GENMASK(10, 8), (4 << 8)),
-+ PIN_CFG(VPI, SCU400, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(D9, PIN_CFG(ESPI1, SCU400, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(SD, SCU400, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(HVI3C5, SCU400, GENMASK(14, 12), (4 << 12)),
-+ PIN_CFG(VPI, SCU400, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(F14, PIN_CFG(ESPI1, SCU400, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(SD, SCU400, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(HVI3C6, SCU400, GENMASK(18, 16), (4 << 16)),
-+ PIN_CFG(VPI, SCU400, GENMASK(18, 16), (5 << 16)));
-+FUNCFG_DESCL(D10, PIN_CFG(ESPI1, SCU400, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(SD, SCU400, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(HVI3C6, SCU400, GENMASK(22, 20), (4 << 20)),
-+ PIN_CFG(VPI, SCU400, GENMASK(22, 20), (5 << 20)));
-+FUNCFG_DESCL(C12, PIN_CFG(ESPI1, SCU400, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(SD, SCU400, GENMASK(26, 24), (3 << 24)));
-+FUNCFG_DESCL(C13, PIN_CFG(ESPI1, SCU400, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(LPC1, SCU400, GENMASK(30, 28), (2 << 28)),
-+ PIN_CFG(SD, SCU400, GENMASK(30, 28), (3 << 28)));
-+FUNCFG_DESCL(AA24, PIN_CFG(TACH0, SCU404, GENMASK(2, 0), 1),
-+ PIN_CFG(THRU0, SCU404, GENMASK(2, 0), 2),
-+ PIN_CFG(VPI, SCU404, GENMASK(2, 0), 3));
-+FUNCFG_DESCL(AB24, PIN_CFG(TACH1, SCU404, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(THRU0, SCU404, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(VPI, SCU404, GENMASK(6, 4), (3 << 4)));
-+FUNCFG_DESCL(AB23, PIN_CFG(TACH2, SCU404, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(THRU1, SCU404, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(VPI, SCU404, GENMASK(10, 8), (3 << 8)));
-+FUNCFG_DESCL(AC22, PIN_CFG(TACH3, SCU404, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(THRU1, SCU404, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(VPI, SCU404, GENMASK(14, 12), (3 << 12)));
-+FUNCFG_DESCL(AD22, PIN_CFG(TACH4, SCU404, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(VPI, SCU404, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(NCTS5, SCU404, GENMASK(18, 16), (4 << 16)));
-+FUNCFG_DESCL(AE21, PIN_CFG(TACH5, SCU404, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(VPI, SCU404, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(NDCD5, SCU404, GENMASK(22, 20), (4 << 20)));
-+FUNCFG_DESCL(AF20, PIN_CFG(TACH6, SCU404, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(VPI, SCU404, GENMASK(26, 24), (3 << 24)),
-+ PIN_CFG(NDSR5, SCU404, GENMASK(26, 24), (4 << 24)));
-+FUNCFG_DESCL(AE20, PIN_CFG(TACH7, SCU404, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(VPI, SCU404, GENMASK(30, 28), (3 << 28)),
-+ PIN_CFG(NRI5, SCU404, GENMASK(30, 28), (4 << 28)));
-+FUNCFG_DESCL(AD20, PIN_CFG(TACH8, SCU408, GENMASK(2, 0), 1),
-+ PIN_CFG(VPI, SCU408, GENMASK(2, 0), 3),
-+ PIN_CFG(NDTR5, SCU408, GENMASK(2, 0), 4));
-+FUNCFG_DESCL(Y23, PIN_CFG(TACH9, SCU408, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(VPI, SCU408, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(NRTS5, SCU408, GENMASK(6, 4), (4 << 4)));
-+FUNCFG_DESCL(W23, PIN_CFG(TACH10, SCU408, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(SALT12, SCU408, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(VPI, SCU408, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(NCTS6, SCU408, GENMASK(10, 8), (4 << 8)));
-+FUNCFG_DESCL(AD19, PIN_CFG(TACH11, SCU408, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(SALT13, SCU408, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(VPI, SCU408, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(NDCD6, SCU408, GENMASK(14, 12), (4 << 12)));
-+FUNCFG_DESCL(AC20, PIN_CFG(TACH12, SCU408, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(SALT14, SCU408, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(VPI, SCU408, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(NDSR6, SCU408, GENMASK(18, 16), (4 << 16)));
-+FUNCFG_DESCL(AA23, PIN_CFG(TACH13, SCU408, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(SALT15, SCU408, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(VPI, SCU408, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(NRI6, SCU408, GENMASK(22, 20), (4 << 20)));
-+FUNCFG_DESCL(AB22, PIN_CFG(TACH14, SCU408, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(LPCPME0, SCU408, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(VPI, SCU408, GENMASK(26, 24), (3 << 24)),
-+ PIN_CFG(NDTR6, SCU408, GENMASK(26, 24), (4 << 24)));
-+FUNCFG_DESCL(AB21, PIN_CFG(TACH15, SCU408, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(LPCSMIN0, SCU408, GENMASK(30, 28), (2 << 28)),
-+ PIN_CFG(VPI, SCU408, GENMASK(30, 28), (3 << 28)),
-+ PIN_CFG(NRTS6, SCU408, GENMASK(30, 28), (4 << 28)),
-+ PIN_CFG(SPIM0, SCU408, GENMASK(30, 28), (5 << 28)));
-+FUNCFG_DESCL(AA22, PIN_CFG(PWM0, SCU40C, GENMASK(2, 0), 1),
-+ PIN_CFG(SIOPBON0, SCU40C, GENMASK(2, 0), 2),
-+ PIN_CFG(VPI, SCU40C, GENMASK(2, 0), 3),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(2, 0), 5));
-+FUNCFG_DESCL(Y22, PIN_CFG(PWM1, SCU40C, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(SIOPBIN0, SCU40C, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(VPI, SCU40C, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(W22, PIN_CFG(PWM2, SCU40C, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(SIOSCIN0, SCU40C, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(VPI, SCU40C, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(AF18, PIN_CFG(PWM3, SCU40C, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(SIOS3N0, SCU40C, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(VPI, SCU40C, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(AE18, PIN_CFG(PWM4, SCU40C, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(SIOS5N0, SCU40C, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(VPI, SCU40C, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(18, 16), (5 << 16)));
-+FUNCFG_DESCL(AD18, PIN_CFG(PWM5, SCU40C, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(SIOPWREQN0, SCU40C, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(VPI, SCU40C, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(22, 20), (5 << 20)));
-+FUNCFG_DESCL(AC19, PIN_CFG(PWM6, SCU40C, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(SIOONCTRLN0, SCU40C, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(26, 24), (5 << 24)));
-+FUNCFG_DESCL(AB20, PIN_CFG(PWM7, SCU40C, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(SPIM0, SCU40C, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(AF17, PIN_CFG(NCTS0, SCU410, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(AA16, PIN_CFG(NDCD0, SCU410, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(Y16, PIN_CFG(NDSR0, SCU410, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(V17, PIN_CFG(NRI0, SCU410, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(J13, PIN_CFG(NDTR0, SCU410, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(AB16, PIN_CFG(NRTS0, SCU410, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(AC16, PIN_CFG(TXD0, SCU410, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(AF16, PIN_CFG(RXD0, SCU410, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(AA15, PIN_CFG(NCTS1, SCU414, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(AB15, PIN_CFG(NDCD1, SCU414, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(AC15, PIN_CFG(NDSR1, SCU414, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(AD15, PIN_CFG(NRI1, SCU414, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(Y15, PIN_CFG(NDTR1, SCU414, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(AA14, PIN_CFG(NRTS1, SCU414, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(W16, PIN_CFG(TXD1, SCU414, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(V16, PIN_CFG(RXD1, SCU414, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(AB18, PIN_CFG(TXD2, SCU418, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(AC18, PIN_CFG(RXD2, SCU418, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(K13, PIN_CFG(TXD3, SCU418, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(WDTRST0N, SCU418, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(PWM8, SCU418, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(SPIM1, SCU418, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(AA17, PIN_CFG(RXD3, SCU418, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(WDTRST1N, SCU418, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(PWM9, SCU418, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(SPIM1, SCU418, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(AB17, PIN_CFG(TXD5, SCU418, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(WDTRST2N, SCU418, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(PWM10, SCU418, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(SPIM1, SCU418, GENMASK(18, 16), (5 << 16)));
-+FUNCFG_DESCL(AD16, PIN_CFG(RXD5, SCU418, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(WDTRST3N, SCU418, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(PWM11, SCU418, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(SPIM1, SCU418, GENMASK(22, 20), (5 << 20)));
-+FUNCFG_DESCL(AC17, PIN_CFG(TXD6, SCU418, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(SALT0, SCU418, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(PWM12, SCU418, GENMASK(26, 24), (3 << 24)),
-+ PIN_CFG(SPIM1, SCU418, GENMASK(26, 24), (5 << 24)));
-+FUNCFG_DESCL(AD17, PIN_CFG(RXD6, SCU418, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(SALT1, SCU418, GENMASK(30, 28), (2 << 28)),
-+ PIN_CFG(PWM13, SCU418, GENMASK(30, 28), (3 << 28)),
-+ PIN_CFG(SPIM1, SCU418, GENMASK(30, 28), (5 << 28)));
-+FUNCFG_DESCL(AE16, PIN_CFG(TXD7, SCU41C, GENMASK(2, 0), 1),
-+ PIN_CFG(SALT2, SCU41C, GENMASK(2, 0), 2),
-+ PIN_CFG(PWM14, SCU41C, GENMASK(2, 0), 3),
-+ PIN_CFG(SPIM1, SCU41C, GENMASK(2, 0), 5));
-+FUNCFG_DESCL(AE17, PIN_CFG(RXD7, SCU41C, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(SALT3, SCU41C, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(PWM15, SCU41C, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(SPIM1, SCU41C, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(U23, PIN_CFG(SGPM1, SCU41C, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(WDTRST7N, SCU41C, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(PESGWAKEN, SCU41C, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(SMON1, SCU41C, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(T24, PIN_CFG(SGPM1, SCU41C, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(SMON1, SCU41C, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(HOLE0);
-+FUNCFG_DESCL(HOLE1);
-+FUNCFG_DESCL(HOLE2);
-+FUNCFG_DESCL(HOLE3);
-+FUNCFG_DESCL(AC24, PIN_CFG(HVI3C12, SCU420, GENMASK(2, 0), 1),
-+ PIN_CFG(I2C12, SCU420, GENMASK(2, 0), 2),
-+ PIN_CFG(SIOPBON1, SCU420, GENMASK(2, 0), 3),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(2, 0), 5));
-+FUNCFG_DESCL(AD24, PIN_CFG(HVI3C12, SCU420, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(I2C12, SCU420, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(SIOPBIN1, SCU420, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(AE23, PIN_CFG(HVI3C13, SCU420, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(I2C13, SCU420, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(SIOSCIN1, SCU420, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(AE19, PIN_CFG(HVI3C13, SCU420, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(I2C13, SCU420, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(SIOS3N1, SCU420, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(AF23, PIN_CFG(HVI3C14, SCU420, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(I2C14, SCU420, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(SIOS5N1, SCU420, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(18, 16), (5 << 16)));
-+FUNCFG_DESCL(Y25, PIN_CFG(HVI3C14, SCU420, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(I2C14, SCU420, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(SIOPWREQN1, SCU420, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(22, 20), (5 << 20)));
-+FUNCFG_DESCL(AA25, PIN_CFG(HVI3C15, SCU420, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(I2C15, SCU420, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(SIOONCTRLN1, SCU420, GENMASK(26, 24), (3 << 24)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(26, 24), (5 << 24)));
-+FUNCFG_DESCL(AF19, PIN_CFG(HVI3C15, SCU420, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(I2C15, SCU420, GENMASK(30, 28), (2 << 28)),
-+ PIN_CFG(SIOPWRGD1, SCU420, GENMASK(30, 28), (3 << 28)),
-+ PIN_CFG(SPIM2, SCU420, GENMASK(30, 28), (5 << 28)));
-+FUNCFG_DESCL(AB25, PIN_CFG(I3C4, SCU424, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(AC25, PIN_CFG(I3C4, SCU424, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(AD25, PIN_CFG(I3C5, SCU424, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(V22, PIN_CFG(I3C5, SCU424, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(AE25, PIN_CFG(I3C6, SCU424, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(V21, PIN_CFG(I3C6, SCU424, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(AF21, PIN_CFG(I3C7, SCU424, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(AF25, PIN_CFG(I3C7, SCU424, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(AF26, PIN_CFG(I3C8, SCU428, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(AE26, PIN_CFG(I3C8, SCU428, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(W21, PIN_CFG(I3C9, SCU428, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(AD26, PIN_CFG(I3C9, SCU428, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(Y21, PIN_CFG(I3C10, SCU428, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(AC26, PIN_CFG(I3C10, SCU428, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(AA26, PIN_CFG(I3C11, SCU428, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(AB26, PIN_CFG(I3C11, SCU428, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(T26, PIN_CFG(I3C0, SCU42C, GENMASK(2, 0), 1),
-+ PIN_CFG(FSI0, SCU42C, GENMASK(2, 0), 2),
-+ PIN_CFG(LTPI, SCU42C, GENMASK(2, 0), 3));
-+FUNCFG_DESCL(AA20, PIN_CFG(I3C0, SCU42C, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(FSI0, SCU42C, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(LTPI, SCU42C, GENMASK(6, 4), (3 << 4)));
-+FUNCFG_DESCL(V23, PIN_CFG(I3C1, SCU42C, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(FSI1, SCU42C, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(LTPI, SCU42C, GENMASK(10, 8), (3 << 8)));
-+FUNCFG_DESCL(W24, PIN_CFG(I3C1, SCU42C, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(FSI1, SCU42C, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(LTPI, SCU42C, GENMASK(14, 12), (3 << 12)));
-+FUNCFG_DESCL(R26, PIN_CFG(I3C2, SCU42C, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(FSI2, SCU42C, GENMASK(18, 16), (2 << 16)));
-+FUNCFG_DESCL(AA21, PIN_CFG(I3C2, SCU42C, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(FSI2, SCU42C, GENMASK(22, 20), (2 << 20)));
-+FUNCFG_DESCL(P26, PIN_CFG(I3C3, SCU42C, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(FSI3, SCU42C, GENMASK(26, 24), (2 << 24)));
-+FUNCFG_DESCL(Y24, PIN_CFG(I3C3, SCU42C, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(FSI3, SCU42C, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(B16, PIN_CFG(ESPI0, SCU430, GENMASK(2, 0), 1),
-+ PIN_CFG(LPC0, SCU430, GENMASK(2, 0), 2));
-+FUNCFG_DESCL(D14, PIN_CFG(ESPI0, SCU430, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(6, 4), (2 << 4)));
-+FUNCFG_DESCL(B15, PIN_CFG(ESPI0, SCU430, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(10, 8), (2 << 8)));
-+FUNCFG_DESCL(B14, PIN_CFG(ESPI0, SCU430, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(14, 12), (2 << 12)));
-+FUNCFG_DESCL(C17, PIN_CFG(ESPI0, SCU430, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(OSCCLK, SCU430, GENMASK(18, 16), (3 << 16)));
-+FUNCFG_DESCL(B13, PIN_CFG(ESPI0, SCU430, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(22, 20), (2 << 20)));
-+FUNCFG_DESCL(E14, PIN_CFG(ESPI0, SCU430, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(26, 24), (2 << 24)));
-+FUNCFG_DESCL(C15, PIN_CFG(ESPI0, SCU430, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(LPC0, SCU430, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(D24, PIN_CFG(SPI0, SCU434, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(B23, PIN_CFG(SPI0, SCU434, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(B22, PIN_CFG(SPI0, SCU434, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(C23, PIN_CFG(QSPI0, SCU434, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(B18, PIN_CFG(QSPI0, SCU434, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(B21, PIN_CFG(SPI0CS1, SCU434, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(B17, PIN_CFG(SPI0ABR, SCU434, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(TXD8, SCU434, GENMASK(26, 24), (3 << 24)));
-+FUNCFG_DESCL(B19, PIN_CFG(SPI0WPN, SCU434, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(RXD8, SCU434, GENMASK(30, 28), (3 << 28)));
-+FUNCFG_DESCL(B26, PIN_CFG(SPI1, SCU438, GENMASK(2, 0), 1),
-+ PIN_CFG(TXD9, SCU438, GENMASK(2, 0), 3));
-+FUNCFG_DESCL(A25, PIN_CFG(SPI1, SCU438, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(RXD9, SCU438, GENMASK(6, 4), (3 << 4)));
-+FUNCFG_DESCL(A24, PIN_CFG(SPI1, SCU438, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(TXD10, SCU438, GENMASK(10, 8), (3 << 8)));
-+FUNCFG_DESCL(B24, PIN_CFG(QSPI1, SCU438, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(RXD10, SCU438, GENMASK(14, 12), (3 << 12)));
-+FUNCFG_DESCL(E26, PIN_CFG(QSPI1, SCU438, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(TXD11, SCU438, GENMASK(18, 16), (3 << 16)));
-+FUNCFG_DESCL(A21, PIN_CFG(SPI1CS1, SCU438, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(RXD11, SCU438, GENMASK(22, 20), (3 << 20)));
-+FUNCFG_DESCL(A19, PIN_CFG(SPI1ABR, SCU438, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(THRU2, SCU438, GENMASK(26, 24), (4 << 24)));
-+FUNCFG_DESCL(A18, PIN_CFG(SPI1WPN, SCU438, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(THRU2, SCU438, GENMASK(30, 28), (4 << 28)));
-+FUNCFG_DESCL(D26, PIN_CFG(SPI2, SCU43C, GENMASK(2, 0), 1),
-+ PIN_CFG(DI2C13, SCU43C, GENMASK(2, 0), 2),
-+ PIN_CFG(HVI3C7, SCU43C, GENMASK(2, 0), 3));
-+FUNCFG_DESCL(C26, PIN_CFG(SPI2, SCU43C, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(DI2C13, SCU43C, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(HVI3C7, SCU43C, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(EM_SPICK, SCU43C, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(A23, PIN_CFG(SPI2, SCU43C, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(DI2C14, SCU43C, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(HVI3C10, SCU43C, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(EM_SPIMOSI, SCU43C, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(B25, PIN_CFG(SPI2, SCU43C, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(DI2C14, SCU43C, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(HVI3C10, SCU43C, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(EM_SPIMISO, SCU43C, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(A22, PIN_CFG(QSPI2, SCU43C, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(DI2C15, SCU43C, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(HVI3C11, SCU43C, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(THRU3, SCU43C, GENMASK(18, 16), (4 << 16)));
-+FUNCFG_DESCL(F26, PIN_CFG(QSPI2, SCU43C, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(DI2C15, SCU43C, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(HVI3C11, SCU43C, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(THRU3, SCU43C, GENMASK(22, 20), (4 << 20)));
-+FUNCFG_DESCL(A26, PIN_CFG(SPI2CS1, SCU43C, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(EM_SPICSN, SCU43C, GENMASK(26, 24), (5 << 24)));
-+FUNCFG_DESCL(A14, PIN_CFG(FWSPIABR, SCU43C, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(E10, PIN_CFG(MDIO2, SCU440, GENMASK(2, 0), 1),
-+ PIN_CFG(PE2SGRSTN, SCU440, GENMASK(2, 0), 2));
-+FUNCFG_DESCL(E13, PIN_CFG(MDIO2, SCU440, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(D12, PIN_CFG(JTAGM1, SCU440, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(F10, PIN_CFG(JTAGM1, SCU440, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(E11, PIN_CFG(JTAGM1, SCU440, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(F11, PIN_CFG(JTAGM1, SCU440, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(F13, PIN_CFG(JTAGM1, SCU440, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(N15, PIN_CFG(FWSPIWPEN, SCU440, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(C20, PIN_CFG(RGMII0, SCU444, GENMASK(2, 0), 1),
-+ PIN_CFG(RMII0R, SCU444, GENMASK(2, 0), 2));
-+FUNCFG_DESCL(C19, PIN_CFG(RGMII0, SCU444, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(A8, PIN_CFG(RGMII0, SCU444, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(RMII0R, SCU444, GENMASK(10, 8), (2 << 8)));
-+FUNCFG_DESCL(R14, PIN_CFG(RGMII0, SCU444, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(RMII0R, SCU444, GENMASK(14, 12), (2 << 12)));
-+FUNCFG_DESCL(A7, PIN_CFG(RGMII0, SCU444, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(RMII0C, SCU444, GENMASK(18, 16), (2 << 16)));
-+FUNCFG_DESCL(P14, PIN_CFG(RGMII0, SCU444, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(RMII0, SCU444, GENMASK(22, 20), (2 << 20)));
-+FUNCFG_DESCL(D20, PIN_CFG(RGMII0, SCU444, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(RMII0, SCU444, GENMASK(26, 24), (2 << 24)));
-+FUNCFG_DESCL(A6, PIN_CFG(RGMII0, SCU444, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(RMII0, SCU444, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(B6, PIN_CFG(RGMII0, SCU448, GENMASK(2, 0), 1),
-+ PIN_CFG(RMII0, SCU448, GENMASK(2, 0), 2));
-+FUNCFG_DESCL(N14, PIN_CFG(RGMII0, SCU448, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(RMII0, SCU448, GENMASK(6, 4), (2 << 4)));
-+FUNCFG_DESCL(B7, PIN_CFG(RGMII0, SCU448, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(B8, PIN_CFG(RGMII0, SCU448, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(B9, PIN_CFG(MDIO0, SCU448, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(M14, PIN_CFG(MDIO0, SCU448, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(J11, PIN_CFG(VGA, SCU448, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(E7, PIN_CFG(VGA, SCU448, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(D19, PIN_CFG(RGMII1, SCU44C, GENMASK(2, 0), 1),
-+ PIN_CFG(RMII1, SCU44C, GENMASK(2, 0), 2),
-+ PIN_CFG(DI2C8, SCU44C, GENMASK(2, 0), 3),
-+ PIN_CFG(DSGPM1, SCU44C, GENMASK(2, 0), 4));
-+FUNCFG_DESCL(B11, PIN_CFG(RGMII1, SCU44C, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(SGPS, SCU44C, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(D15, PIN_CFG(RGMII1, SCU44C, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(RMII1, SCU44C, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(DI2C9, SCU44C, GENMASK(10, 8), (3 << 8)),
-+ PIN_CFG(TXD3, SCU44C, GENMASK(10, 8), (4 << 8)));
-+FUNCFG_DESCL(B12, PIN_CFG(RGMII1, SCU44C, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(RMII1, SCU44C, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(DI2C9, SCU44C, GENMASK(14, 12), (3 << 12)),
-+ PIN_CFG(RXD3, SCU44C, GENMASK(14, 12), (4 << 12)));
-+FUNCFG_DESCL(B10, PIN_CFG(RGMII1, SCU44C, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(RMII1, SCU44C, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(DI2C8, SCU44C, GENMASK(18, 16), (3 << 16)),
-+ PIN_CFG(DSGPM1, SCU44C, GENMASK(18, 16), (4 << 16)));
-+FUNCFG_DESCL(P13, PIN_CFG(RGMII1, SCU44C, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(C18, PIN_CFG(RGMII1, SCU44C, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(RMII1, SCU44C, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(SGPS, SCU44C, GENMASK(26, 24), (5 << 24)));
-+FUNCFG_DESCL(C6, PIN_CFG(RGMII1, SCU44C, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(RMII1, SCU44C, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(C7, PIN_CFG(RGMII1, SCU450, GENMASK(2, 0), 1),
-+ PIN_CFG(RMII1, SCU450, GENMASK(2, 0), 2),
-+ PIN_CFG(DI2C10, SCU450, GENMASK(2, 0), 3),
-+ PIN_CFG(DSGPM1, SCU450, GENMASK(2, 0), 4));
-+FUNCFG_DESCL(D7, PIN_CFG(RGMII1, SCU450, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(RMII1, SCU450, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(DI2C10, SCU450, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(DSGPM1, SCU450, GENMASK(6, 4), (4 << 4)));
-+FUNCFG_DESCL(N13, PIN_CFG(RGMII1, SCU450, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(SGPS, SCU450, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(C8, PIN_CFG(RGMII1, SCU450, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(SGPS, SCU450, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(C9, PIN_CFG(MDIO1, SCU450, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(C10, PIN_CFG(MDIO1, SCU450, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(M16, PIN_CFG(FWQSPI, SCU450, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(A15, PIN_CFG(FWQSPI, SCU450, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(G11, PIN_CFG(I2C0, SCU454, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(H7, PIN_CFG(I2C0, SCU454, GENMASK(6, 4), (1 << 4)));
-+FUNCFG_DESCL(H8, PIN_CFG(I2C1, SCU454, GENMASK(10, 8), (1 << 8)));
-+FUNCFG_DESCL(H9, PIN_CFG(I2C1, SCU454, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(H10, PIN_CFG(I2C2, SCU454, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(H11, PIN_CFG(I2C2, SCU454, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(J9, PIN_CFG(I2C3, SCU454, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(J10, PIN_CFG(I2C3, SCU454, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(E9, PIN_CFG(I2C4, SCU458, GENMASK(2, 0), 1),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(2, 0), 2),
-+ PIN_CFG(I2CF1, SCU458, GENMASK(2, 0), 5));
-+FUNCFG_DESCL(F9, PIN_CFG(I2C4, SCU458, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(I2CF1, SCU458, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(F8, PIN_CFG(I2C5, SCU458, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(I2CF1, SCU458, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(M13, PIN_CFG(I2C5, SCU458, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(I2CF1, SCU458, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(F7, PIN_CFG(I2C6, SCU458, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(I2CF2, SCU458, GENMASK(18, 16), (5 << 16)));
-+FUNCFG_DESCL(D8, PIN_CFG(I2C6, SCU458, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(I2CF2, SCU458, GENMASK(22, 20), (5 << 20)));
-+FUNCFG_DESCL(E8, PIN_CFG(I2C7, SCU458, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(I2CF2, SCU458, GENMASK(26, 24), (5 << 24)));
-+FUNCFG_DESCL(L12, PIN_CFG(I2C7, SCU458, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(ESPI1, SCU458, GENMASK(30, 28), (2 << 28)),
-+ PIN_CFG(I2CF2, SCU458, GENMASK(30, 28), (5 << 28)));
-+FUNCFG_DESCL(F12, PIN_CFG(I2C8, SCU45C, GENMASK(2, 0), 1),
-+ PIN_CFG(I2CF0, SCU45C, GENMASK(2, 0), 5));
-+FUNCFG_DESCL(E12, PIN_CFG(I2C8, SCU45C, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(I2CF0, SCU45C, GENMASK(6, 4), (5 << 4)));
-+FUNCFG_DESCL(J12, PIN_CFG(I2C9, SCU45C, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(I2CF0, SCU45C, GENMASK(10, 8), (5 << 8)));
-+FUNCFG_DESCL(G7, PIN_CFG(I2C9, SCU45C, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(CANBUS, SCU45C, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(I2CF0, SCU45C, GENMASK(14, 12), (5 << 12)));
-+FUNCFG_DESCL(G8, PIN_CFG(I2C10, SCU45C, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(CANBUS, SCU45C, GENMASK(18, 16), (2 << 16)));
-+FUNCFG_DESCL(G9, PIN_CFG(I2C10, SCU45C, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(CANBUS, SCU45C, GENMASK(22, 20), (2 << 20)));
-+FUNCFG_DESCL(G10, PIN_CFG(I2C11, SCU45C, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(USBUART, SCU45C, GENMASK(26, 24), (2 << 24)));
-+FUNCFG_DESCL(K12, PIN_CFG(I2C11, SCU45C, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(USBUART, SCU45C, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(W17, PIN_CFG(ADC0, SCU460, GENMASK(2, 0), 0),
-+ PIN_CFG(GPIY0, SCU460, GENMASK(2, 0), 1),
-+ PIN_CFG(SALT4, SCU460, GENMASK(2, 0), 2));
-+FUNCFG_DESCL(V18, PIN_CFG(ADC1, SCU460, GENMASK(6, 4), 0),
-+ PIN_CFG(GPIY1, SCU460, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(SALT5, SCU460, GENMASK(6, 4), (2 << 4)));
-+FUNCFG_DESCL(W18, PIN_CFG(ADC2, SCU460, GENMASK(10, 8), 0),
-+ PIN_CFG(GPIY2, SCU460, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(SALT6, SCU460, GENMASK(10, 8), (2 << 8)));
-+FUNCFG_DESCL(Y17, PIN_CFG(ADC3, SCU460, GENMASK(14, 12), 0),
-+ PIN_CFG(GPIY3, SCU460, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(SALT7, SCU460, GENMASK(14, 12), (2 << 12)));
-+FUNCFG_DESCL(AA18, PIN_CFG(ADC4, SCU460, GENMASK(18, 16), 0),
-+ PIN_CFG(GPIY4, SCU460, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(SALT8, SCU460, GENMASK(18, 16), (2 << 16)));
-+FUNCFG_DESCL(AA13, PIN_CFG(ADC5, SCU460, GENMASK(22, 20), 0),
-+ PIN_CFG(GPIY5, SCU460, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(SALT9, SCU460, GENMASK(22, 20), (2 << 20)));
-+FUNCFG_DESCL(Y18, PIN_CFG(ADC6, SCU460, GENMASK(26, 24), 0),
-+ PIN_CFG(GPIY6, SCU460, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(SALT10, SCU460, GENMASK(26, 24), (2 << 24)));
-+FUNCFG_DESCL(AA12, PIN_CFG(ADC7, SCU460, GENMASK(30, 28), 0),
-+ PIN_CFG(GPIY7, SCU460, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(SALT11, SCU460, GENMASK(30, 28), (2 << 28)));
-+FUNCFG_DESCL(W20, PIN_CFG(ADC15, SCU464, GENMASK(2, 0), 0),
-+ PIN_CFG(GPIZ7, SCU464, GENMASK(2, 0), 1));
-+FUNCFG_DESCL(V20, PIN_CFG(ADC14, SCU464, GENMASK(6, 4), 0),
-+ PIN_CFG(GPIZ6, SCU464, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(AUXPWRGOOD1, SCU464, GENMASK(6, 4), (2 << 4)));
-+FUNCFG_DESCL(Y11, PIN_CFG(ADC13, SCU464, GENMASK(10, 8), 0),
-+ PIN_CFG(GPIZ5, SCU464, GENMASK(10, 8), (1 << 8)),
-+ PIN_CFG(AUXPWRGOOD0, SCU464, GENMASK(10, 8), (2 << 8)));
-+FUNCFG_DESCL(V14, PIN_CFG(ADC12, SCU464, GENMASK(14, 12), 0),
-+ PIN_CFG(GPIZ4, SCU464, GENMASK(14, 12), (1 << 12)));
-+FUNCFG_DESCL(V19, PIN_CFG(ADC11, SCU464, GENMASK(18, 16), 0),
-+ PIN_CFG(GPIZ3, SCU464, GENMASK(18, 16), (1 << 16)));
-+FUNCFG_DESCL(W14, PIN_CFG(ADC10, SCU464, GENMASK(22, 20), 0),
-+ PIN_CFG(GPIZ2, SCU464, GENMASK(22, 20), (1 << 20)));
-+FUNCFG_DESCL(Y20, PIN_CFG(ADC9, SCU464, GENMASK(26, 24), 0),
-+ PIN_CFG(GPIZ1, SCU464, GENMASK(26, 24), (1 << 24)));
-+FUNCFG_DESCL(AB19, PIN_CFG(ADC8, SCU464, GENMASK(30, 28), 0),
-+ PIN_CFG(GPIZ0, SCU464, GENMASK(30, 28), (1 << 28)));
-+FUNCFG_DESCL(U26, PIN_CFG(SGPM0, SCU468, GENMASK(2, 0), 1),
-+ PIN_CFG(SMON0, SCU468, GENMASK(2, 0), 2),
-+ PIN_CFG(NCTS2, SCU468, GENMASK(2, 0), 3),
-+ PIN_CFG(MACLINK0, SCU468, GENMASK(2, 0), 4));
-+FUNCFG_DESCL(U25, PIN_CFG(SGPM0, SCU468, GENMASK(6, 4), (1 << 4)),
-+ PIN_CFG(SMON0, SCU468, GENMASK(6, 4), (2 << 4)),
-+ PIN_CFG(NDCD2, SCU468, GENMASK(6, 4), (3 << 4)),
-+ PIN_CFG(MACLINK2, SCU468, GENMASK(6, 4), (4 << 4)));
-+FUNCFG_DESCL(V26, PIN_CFG(SGPM0LD_R, SCU468, GENMASK(10, 8), (2 << 8)),
-+ PIN_CFG(HBLED, SCU468, GENMASK(10, 8), (2 << 8)));
-+FUNCFG_DESCL(W26, PIN_CFG(SGPM0, SCU468, GENMASK(14, 12), (1 << 12)),
-+ PIN_CFG(SMON0, SCU468, GENMASK(14, 12), (2 << 12)),
-+ PIN_CFG(NDSR2, SCU468, GENMASK(14, 12), (3 << 12)));
-+FUNCFG_DESCL(Y26, PIN_CFG(SGPM0, SCU468, GENMASK(18, 16), (1 << 16)),
-+ PIN_CFG(SMON0, SCU468, GENMASK(18, 16), (2 << 16)),
-+ PIN_CFG(NRI2, SCU468, GENMASK(18, 16), (3 << 16)));
-+FUNCFG_DESCL(W25, PIN_CFG(SGPM1, SCU468, GENMASK(22, 20), (1 << 20)),
-+ PIN_CFG(WDTRST4N, SCU468, GENMASK(22, 20), (2 << 20)),
-+ PIN_CFG(NDTR2, SCU468, GENMASK(22, 20), (3 << 20)),
-+ PIN_CFG(SMON1, SCU468, GENMASK(22, 20), (4 << 20)));
-+FUNCFG_DESCL(V24, PIN_CFG(SGPM1, SCU468, GENMASK(26, 24), (1 << 24)),
-+ PIN_CFG(WDTRST5N, SCU468, GENMASK(26, 24), (2 << 24)),
-+ PIN_CFG(NRTS2, SCU468, GENMASK(26, 24), (3 << 24)),
-+ PIN_CFG(SMON1, SCU468, GENMASK(26, 24), (4 << 24)));
-+FUNCFG_DESCL(U24, PIN_CFG(SGPM1LD_R, SCU468, GENMASK(30, 28), (1 << 28)),
-+ PIN_CFG(WDTRST6N, SCU468, GENMASK(30, 28), (2 << 28)),
-+ PIN_CFG(MACLINK1, SCU468, GENMASK(30, 28), (3 << 28)));
-+FUNCFG_DESCL(SGMII0, PIN_CFG(SGMII, SCU47C, BIT(0), 1 << 0));
-+FUNCFG_DESCL(PCIERC2_PERST, PIN_CFG(PCIERC2PERST, SCU908, BIT(1), 1 << 1));
-+FUNCFG_DESCL(PORTC_MODE, PIN_CFG(USB2CUD, SCU3B0, GENMASK(1, 0), 0),
-+ PIN_CFG(USB2CD, SCU3B0, GENMASK(1, 0), 1 << 0),
-+ PIN_CFG(USB2CH, SCU3B0, GENMASK(1, 0), 2 << 0),
-+ PIN_CFG(USB2CU, SCU3B0, GENMASK(1, 0), 3 << 0));
-+FUNCFG_DESCL(PORTD_MODE, PIN_CFG(USB2DD, SCU3B0, GENMASK(3, 2), 1 << 2),
-+ PIN_CFG(USB2DH, SCU3B0, GENMASK(3, 2), 2 << 2));
-+
-+static const struct aspeed_g7_pincfg pin_cfg[] = {
-+ PINCFG_PIN(C16), PINCFG_PIN(C14), PINCFG_PIN(C11),
-+ PINCFG_PIN(D9), PINCFG_PIN(F14), PINCFG_PIN(D10),
-+ PINCFG_PIN(C12), PINCFG_PIN(C13), PINCFG_PIN(AA24),
-+ PINCFG_PIN(AB24), PINCFG_PIN(AB23), PINCFG_PIN(AC22),
-+ PINCFG_PIN(AD22), PINCFG_PIN(AE21), PINCFG_PIN(AF20),
-+ PINCFG_PIN(AE20), PINCFG_PIN(AD20), PINCFG_PIN(Y23),
-+ PINCFG_PIN(W23), PINCFG_PIN(AD19), PINCFG_PIN(AC20),
-+ PINCFG_PIN(AA23), PINCFG_PIN(AB22), PINCFG_PIN(AB21),
-+ PINCFG_PIN(AA22), PINCFG_PIN(Y22), PINCFG_PIN(W22),
-+ PINCFG_PIN(AF18), PINCFG_PIN(AE18), PINCFG_PIN(AD18),
-+ PINCFG_PIN(AC19), PINCFG_PIN(AB20), PINCFG_PIN(AF17),
-+ PINCFG_PIN(AA16), PINCFG_PIN(Y16), PINCFG_PIN(V17),
-+ PINCFG_PIN(J13), PINCFG_PIN(AB16), PINCFG_PIN(AC16),
-+ PINCFG_PIN(AF16), PINCFG_PIN(AA15), PINCFG_PIN(AB15),
-+ PINCFG_PIN(AC15), PINCFG_PIN(AD15), PINCFG_PIN(Y15),
-+ PINCFG_PIN(AA14), PINCFG_PIN(W16), PINCFG_PIN(V16),
-+ PINCFG_PIN(AB18), PINCFG_PIN(AC18), PINCFG_PIN(K13),
-+ PINCFG_PIN(AA17), PINCFG_PIN(AB17), PINCFG_PIN(AD16),
-+ PINCFG_PIN(AC17), PINCFG_PIN(AD17), PINCFG_PIN(AE16),
-+ PINCFG_PIN(AE17), PINCFG_PIN(U23), PINCFG_PIN(T24),
-+ PINCFG_PIN(HOLE0), PINCFG_PIN(HOLE1), PINCFG_PIN(HOLE2),
-+ PINCFG_PIN(HOLE3), PINCFG_PIN(AC24), PINCFG_PIN(AD24),
-+ PINCFG_PIN(AE23), PINCFG_PIN(AE19), PINCFG_PIN(AF23),
-+ PINCFG_PIN(Y25), PINCFG_PIN(AA25), PINCFG_PIN(AF19),
-+ PINCFG_PIN(AB25), PINCFG_PIN(AC25), PINCFG_PIN(AD25),
-+ PINCFG_PIN(V22), PINCFG_PIN(AE25), PINCFG_PIN(V21),
-+ PINCFG_PIN(AF21), PINCFG_PIN(AF25), PINCFG_PIN(AF26),
-+ PINCFG_PIN(AE26), PINCFG_PIN(W21), PINCFG_PIN(AD26),
-+ PINCFG_PIN(Y21), PINCFG_PIN(AC26), PINCFG_PIN(AA26),
-+ PINCFG_PIN(AB26), PINCFG_PIN(T26), PINCFG_PIN(AA20),
-+ PINCFG_PIN(V23), PINCFG_PIN(W24), PINCFG_PIN(R26),
-+ PINCFG_PIN(AA21), PINCFG_PIN(P26), PINCFG_PIN(Y24),
-+ PINCFG_PIN(B16), PINCFG_PIN(D14), PINCFG_PIN(B15),
-+ PINCFG_PIN(B14), PINCFG_PIN(C17), PINCFG_PIN(B13),
-+ PINCFG_PIN(E14), PINCFG_PIN(C15), PINCFG_PIN(D24),
-+ PINCFG_PIN(B23), PINCFG_PIN(B22), PINCFG_PIN(C23),
-+ PINCFG_PIN(B18), PINCFG_PIN(B21), PINCFG_PIN(B17),
-+ PINCFG_PIN(B19), PINCFG_PIN(B26), PINCFG_PIN(A25),
-+ PINCFG_PIN(A24), PINCFG_PIN(B24), PINCFG_PIN(E26),
-+ PINCFG_PIN(A21), PINCFG_PIN(A19), PINCFG_PIN(A18),
-+ PINCFG_PIN(D26), PINCFG_PIN(C26), PINCFG_PIN(A23),
-+ PINCFG_PIN(B25), PINCFG_PIN(A22), PINCFG_PIN(F26),
-+ PINCFG_PIN(A26), PINCFG_PIN(A14), PINCFG_PIN(E10),
-+ PINCFG_PIN(E13), PINCFG_PIN(D12), PINCFG_PIN(F10),
-+ PINCFG_PIN(E11), PINCFG_PIN(F11), PINCFG_PIN(F13),
-+ PINCFG_PIN(N15), PINCFG_PIN(C20), PINCFG_PIN(C19),
-+ PINCFG_PIN(A8), PINCFG_PIN(R14), PINCFG_PIN(A7),
-+ PINCFG_PIN(P14), PINCFG_PIN(D20), PINCFG_PIN(A6),
-+ PINCFG_PIN(B6), PINCFG_PIN(N14), PINCFG_PIN(B7),
-+ PINCFG_PIN(B8), PINCFG_PIN(B9), PINCFG_PIN(M14),
-+ PINCFG_PIN(J11), PINCFG_PIN(E7), PINCFG_PIN(D19),
-+ PINCFG_PIN(B11), PINCFG_PIN(D15), PINCFG_PIN(B12),
-+ PINCFG_PIN(B10), PINCFG_PIN(P13), PINCFG_PIN(C18),
-+ PINCFG_PIN(C6), PINCFG_PIN(C7), PINCFG_PIN(D7),
-+ PINCFG_PIN(N13), PINCFG_PIN(C8), PINCFG_PIN(C9),
-+ PINCFG_PIN(C10), PINCFG_PIN(M16), PINCFG_PIN(A15),
-+ PINCFG_PIN(G11), PINCFG_PIN(H7), PINCFG_PIN(H8),
-+ PINCFG_PIN(H9), PINCFG_PIN(H10), PINCFG_PIN(H11),
-+ PINCFG_PIN(J9), PINCFG_PIN(J10), PINCFG_PIN(E9),
-+ PINCFG_PIN(F9), PINCFG_PIN(F8), PINCFG_PIN(M13),
-+ PINCFG_PIN(F7), PINCFG_PIN(D8), PINCFG_PIN(E8),
-+ PINCFG_PIN(L12), PINCFG_PIN(F12), PINCFG_PIN(E12),
-+ PINCFG_PIN(J12), PINCFG_PIN(G7), PINCFG_PIN(G8),
-+ PINCFG_PIN(G9), PINCFG_PIN(G10), PINCFG_PIN(K12),
-+ PINCFG_PIN(W17), PINCFG_PIN(V18), PINCFG_PIN(W18),
-+ PINCFG_PIN(Y17), PINCFG_PIN(AA18), PINCFG_PIN(AA13),
-+ PINCFG_PIN(Y18), PINCFG_PIN(AA12), PINCFG_PIN(W20),
-+ PINCFG_PIN(V20), PINCFG_PIN(Y11), PINCFG_PIN(V14),
-+ PINCFG_PIN(V19), PINCFG_PIN(W14), PINCFG_PIN(Y20),
-+ PINCFG_PIN(AB19), PINCFG_PIN(U26), PINCFG_PIN(U25),
-+ PINCFG_PIN(V26), PINCFG_PIN(W26), PINCFG_PIN(Y26),
-+ PINCFG_PIN(W25), PINCFG_PIN(V24), PINCFG_PIN(U24),
-+ PINCFG_PIN(SGMII0), PINCFG_PIN(PCIERC2_PERST),
-+ PINCFG_PIN(PORTC_MODE), PINCFG_PIN(PORTD_MODE),
-+};
-+
-+static int aspeed_g7_soc1_dt_node_to_map(struct pinctrl_dev *pctldev,
-+ struct device_node *np_config,
-+ struct pinctrl_map **map, u32 *num_maps)
-+{
-+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
-+ PIN_MAP_TYPE_INVALID);
-+}
-+
-+static void aspeed_g7_soc1_dt_free_map(struct pinctrl_dev *pctldev,
-+ struct pinctrl_map *map, u32 num_maps)
-+{
-+ kfree(map);
-+}
-+
-+static const struct pinctrl_ops aspeed_g7_soc1_pinctrl_ops = {
-+ .get_groups_count = aspeed_pinctrl_get_groups_count,
-+ .get_group_name = aspeed_pinctrl_get_group_name,
-+ .get_group_pins = aspeed_pinctrl_get_group_pins,
-+ .pin_dbg_show = aspeed_pinctrl_pin_dbg_show,
-+ .dt_node_to_map = aspeed_g7_soc1_dt_node_to_map,
-+ .dt_free_map = aspeed_g7_soc1_dt_free_map,
-+};
-+
-+static const struct pinmux_ops aspeed_g7_soc1_pinmux_ops = {
-+ .get_functions_count = aspeed_pinmux_get_fn_count,
-+ .get_function_name = aspeed_pinmux_get_fn_name,
-+ .get_function_groups = aspeed_pinmux_get_fn_groups,
-+ .set_mux = aspeed_g7_pinmux_set_mux,
-+ .gpio_request_enable = aspeed_g7_gpio_request_enable,
-+ .strict = true,
-+};
-+
-+static const struct pinconf_ops aspeed_g7_soc1_pinconf_ops = {
-+ .is_generic = true,
-+ .pin_config_get = aspeed_pin_config_get,
-+ .pin_config_set = aspeed_pin_config_set,
-+ .pin_config_group_get = aspeed_pin_config_group_get,
-+ .pin_config_group_set = aspeed_pin_config_group_set,
-+};
-+
-+/* pinctrl_desc */
-+static struct pinctrl_desc aspeed_g7_soc1_pinctrl_desc = {
-+ .name = "aspeed-g7-soc1-pinctrl",
-+ .pins = aspeed_g7_soc1_pins,
-+ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins),
-+ .pctlops = &aspeed_g7_soc1_pinctrl_ops,
-+ .pmxops = &aspeed_g7_soc1_pinmux_ops,
-+ .confops = &aspeed_g7_soc1_pinconf_ops,
-+ .owner = THIS_MODULE,
-+};
-+
-+static struct aspeed_pin_config aspeed_g7_configs[] = {
-+ /* GPIOA */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C16, C16 }, SCU4C0, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C14, C14 }, SCU4C0, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C11, C11 }, SCU4C0, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D9, D9 }, SCU4C0, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F14, F14 }, SCU4C0, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D10, D10 }, SCU4C0, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C12, C12 }, SCU4C0, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C13, C13 }, SCU4C0, GENMASK(15, 14) },
-+ /* GPIOI */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AC24, AC24 }, SCU4C0, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AD24, AD24 }, SCU4C0, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AE23, AE23 }, SCU4C0, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AE19, AE19 }, SCU4C0, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AF23, AF23 }, SCU4C0, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { Y25, Y25 }, SCU4C0, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AA25, AA25 }, SCU4C0, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AF19, AF19 }, SCU4C0, GENMASK(31, 30) },
-+ /* GPIOJ */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AB25, AB25 }, SCU4C4, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AC25, AC25 }, SCU4C4, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AD25, AD25 }, SCU4C4, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { V22, V22 }, SCU4C4, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AE25, AE25 }, SCU4C4, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { V21, V21 }, SCU4C4, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AF21, AF21 }, SCU4C4, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AF25, AF25 }, SCU4C4, GENMASK(15, 14) },
-+ /* GPIOK */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AF26, AF26 }, SCU4C4, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AE26, AE26 }, SCU4C4, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { W21, W21 }, SCU4C4, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AD26, AD26 }, SCU4C4, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { Y21, Y21 }, SCU4C4, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AC26, AC26 }, SCU4C4, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AA26, AA26 }, SCU4C4, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AB26, AB26 }, SCU4C4, GENMASK(31, 30) },
-+ /* GPIOL */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { T26, T26 }, SCU4C8, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AA20, AA20 }, SCU4C8, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { V23, V23 }, SCU4C8, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { W24, W24 }, SCU4C8, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { R26, R26 }, SCU4C8, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { AA21, AA21 }, SCU4C8, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { P26, P26 }, SCU4C8, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { Y24, Y24 }, SCU4C8, GENMASK(15, 14) },
-+ /* GPIOM */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B16, B16 }, SCU4C8, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D14, D14 }, SCU4C8, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B15, B15 }, SCU4C8, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B14, B14 }, SCU4C8, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C17, C17 }, SCU4C8, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B13, B13 }, SCU4C8, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E14, E14 }, SCU4C8, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C15, C15 }, SCU4C8, GENMASK(31, 30) },
-+ /* GPION */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D24, D24 }, SCU4CC, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B23, B23 }, SCU4CC, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B22, B22 }, SCU4CC, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C23, C23 }, SCU4CC, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B18, B18 }, SCU4CC, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B21, B21 }, SCU4CC, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B17, B17 }, SCU4CC, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B19, B19 }, SCU4CC, GENMASK(15, 14) },
-+ /* GPIOO */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B26, B26 }, SCU4CC, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A25, A25 }, SCU4CC, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A24, A24 }, SCU4CC, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B24, B24 }, SCU4CC, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E26, E26 }, SCU4CC, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A21, A21 }, SCU4CC, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A19, A19 }, SCU4CC, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A18, A18 }, SCU4CC, GENMASK(31, 30) },
-+ /* GPIOP */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D26, D26 }, SCU4D0, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C26, C26 }, SCU4D0, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A23, A23 }, SCU4D0, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B25, B25 }, SCU4D0, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A22, A22 }, SCU4D0, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F26, F26 }, SCU4D0, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A26, A26 }, SCU4D0, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A14, A14 }, SCU4D0, GENMASK(15, 14) },
-+ /* GPIOQ */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E10, E10 }, SCU4D0, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E13, E13 }, SCU4D0, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D12, D12 }, SCU4D0, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F10, F10 }, SCU4D0, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E11, E11 }, SCU4D0, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F11, F11 }, SCU4D0, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F13, F13 }, SCU4D0, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { N15, N15 }, SCU4D0, GENMASK(31, 30) },
-+ /* GPIOR */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C20, C20 }, SCU4D4, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C19, C19 }, SCU4D4, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A8, A8 }, SCU4D4, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { R14, R14 }, SCU4D4, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A7, A7 }, SCU4D4, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { P14, P14 }, SCU4D4, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D20, D20 }, SCU4D4, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A6, A6 }, SCU4D4, GENMASK(15, 14) },
-+ /* GPIOS */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B6, B6 }, SCU4D4, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { N14, N14 }, SCU4D4, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B7, B7 }, SCU4D4, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B8, B8 }, SCU4D4, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B9, B9 }, SCU4D4, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { M14, M14 }, SCU4D4, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { J11, J11 }, SCU4D4, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E7, E7 }, SCU4D4, GENMASK(31, 30) },
-+ /* GPIOT */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D19, D19 }, SCU4D8, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B11, B11 }, SCU4D8, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D15, D15 }, SCU4D8, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B12, B12 }, SCU4D8, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { B10, B10 }, SCU4D8, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { P13, P13 }, SCU4D8, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C18, C18 }, SCU4D8, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C6, C6 }, SCU4D8, GENMASK(15, 14) },
-+ /* GPIOU */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C7, C7 }, SCU4D8, GENMASK(17, 16) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D7, D7 }, SCU4D8, GENMASK(19, 18) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { N13, N13 }, SCU4D8, GENMASK(21, 20) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C8, C8 }, SCU4D8, GENMASK(23, 22) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C9, C9 }, SCU4D8, GENMASK(25, 24) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { C10, C10 }, SCU4D8, GENMASK(27, 26) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { M16, M16 }, SCU4D8, GENMASK(29, 28) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { A15, A15 }, SCU4D8, GENMASK(31, 30) },
-+ /* GPIOW */
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E9, E9 }, SCU4DC, GENMASK(1, 0) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F9, F9 }, SCU4DC, GENMASK(3, 2) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F8, F8 }, SCU4DC, GENMASK(5, 4) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { M13, M13 }, SCU4DC, GENMASK(7, 6) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { F7, F7 }, SCU4DC, GENMASK(9, 8) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { D8, D8 }, SCU4DC, GENMASK(11, 10) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { E8, E8 }, SCU4DC, GENMASK(13, 12) },
-+ { PIN_CONFIG_DRIVE_STRENGTH, { L12, L12 }, SCU4DC, GENMASK(15, 14) },
-+};
-+
-+static const struct aspeed_pin_config_map aspeed_g7_pin_config_map[] = {
-+ { PIN_CONFIG_BIAS_PULL_DOWN, 0, 1, BIT_MASK(0)},
-+ { PIN_CONFIG_BIAS_PULL_DOWN, -1, 0, BIT_MASK(0)},
-+ { PIN_CONFIG_BIAS_PULL_UP, 0, 1, BIT_MASK(0)},
-+ { PIN_CONFIG_BIAS_PULL_UP, -1, 0, BIT_MASK(0)},
-+ { PIN_CONFIG_BIAS_DISABLE, -1, 1, BIT_MASK(0)},
-+ { PIN_CONFIG_DRIVE_STRENGTH, 0, 0, GENMASK(1, 0)},
-+ { PIN_CONFIG_DRIVE_STRENGTH, 1, 1, GENMASK(1, 0)},
-+ { PIN_CONFIG_DRIVE_STRENGTH, 2, 2, GENMASK(1, 0)},
-+ { PIN_CONFIG_DRIVE_STRENGTH, 3, 3, GENMASK(1, 0)},
-+ { PIN_CONFIG_POWER_SOURCE, 3300, 0, BIT_MASK(0)},
-+ { PIN_CONFIG_POWER_SOURCE, 1800, 1, BIT_MASK(0)},
-+};
-+
-+static struct aspeed_pinctrl_data aspeed_g7_pinctrl_data = {
-+ .pins = aspeed_g7_soc1_pins,
-+ .npins = ARRAY_SIZE(aspeed_g7_soc1_pins),
-+ .pinmux = {
-+ .groups = aspeed_g7_soc1_pingroups,
-+ .ngroups = ARRAY_SIZE(aspeed_g7_soc1_pingroups),
-+ .functions = aspeed_g7_soc1_funcs,
-+ .nfunctions = ARRAY_SIZE(aspeed_g7_soc1_funcs),
-+ .configs_g7 = pin_cfg,
-+ .nconfigs_g7 = ARRAY_SIZE(pin_cfg),
-+ },
-+ .configs = aspeed_g7_configs,
-+ .nconfigs = ARRAY_SIZE(aspeed_g7_configs),
-+ .confmaps = aspeed_g7_pin_config_map,
-+ .nconfmaps = ARRAY_SIZE(aspeed_g7_pin_config_map),
-+};
-+
-+static int aspeed_g7_soc1_pinctrl_probe(struct platform_device *pdev)
-+{
-+ return aspeed_pinctrl_probe(pdev, &aspeed_g7_soc1_pinctrl_desc,
-+ &aspeed_g7_pinctrl_data);
-+}
-+
-+static const struct of_device_id aspeed_g7_soc1_pinctrl_match[] = {
-+ { .compatible = "aspeed,ast2700-soc1-pinctrl" },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, aspeed_g7_soc1_pinctrl_match);
-+
-+static struct platform_driver aspeed_g7_soc1_pinctrl_driver = {
-+ .probe = aspeed_g7_soc1_pinctrl_probe,
-+ .driver = {
-+ .name = "aspeed-g7-soc1-pinctrl",
-+ .of_match_table = aspeed_g7_soc1_pinctrl_match,
-+ .suppress_bind_attrs = true,
-+ },
-+};
-+
-+static int __init aspeed_g7_soc1_pinctrl_register(void)
-+{
-+ return platform_driver_register(&aspeed_g7_soc1_pinctrl_driver);
-+}
-+arch_initcall(aspeed_g7_soc1_pinctrl_register);
-diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
-index 9c6ee46ac7a0..83d62506e279 100644
---- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
-+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
-@@ -285,6 +285,32 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
- return 0;
- }
-
-+int aspeed_g7_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
-+ unsigned int group)
-+{
-+ int i, j;
-+ int pin;
-+ const struct aspeed_g7_funcfg *funcfg;
-+ struct aspeed_pinctrl_data *pinctrl = pinctrl_dev_get_drvdata(pctldev);
-+ const struct aspeed_pin_group *pingroup =
-+ &pinctrl->pinmux.groups[group];
-+ const struct aspeed_g7_pincfg *pin_cfg = pinctrl->pinmux.configs_g7;
-+
-+ for (i = 0; i < pingroup->npins; i++) {
-+ pin = pingroup->pins[i];
-+ funcfg = pin_cfg[pin].funcfg;
-+
-+ for (j = 0; j < pin_cfg[pin].nfuncfg; j++) {
-+ if (strcmp(funcfg[j].name, pingroup->name) == 0) {
-+ regmap_update_bits(pinctrl->scu, funcfg[j].reg,
-+ funcfg[j].mask,
-+ funcfg[j].val);
-+ }
-+ }
-+ }
-+ return 0;
-+}
-+
- static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
- {
- /*
-@@ -440,6 +466,27 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
- return 0;
- }
-
-+int aspeed_g7_gpio_request_enable(struct pinctrl_dev *pctldev,
-+ struct pinctrl_gpio_range *range,
-+ unsigned int offset)
-+{
-+ int i;
-+ struct aspeed_pinctrl_data *pinctrl = pinctrl_dev_get_drvdata(pctldev);
-+ const struct aspeed_g7_pincfg *pin_cfg = pinctrl->pinmux.configs_g7;
-+ const struct aspeed_g7_funcfg *funcfg = pin_cfg[offset].funcfg;
-+
-+ for (i = 0; i < pin_cfg[offset].nfuncfg; i++) {
-+ if (!strncmp(funcfg[i].name, "GPI", 3)) {
-+ regmap_update_bits(pinctrl->scu, funcfg[i].reg,
-+ funcfg[i].mask, funcfg[i].val);
-+ break;
-+ }
-+ regmap_update_bits(pinctrl->scu, funcfg[i].reg, funcfg[i].mask,
-+ 0);
-+ }
-+ return 0;
-+}
-+
- int aspeed_pinctrl_probe(struct platform_device *pdev,
- struct pinctrl_desc *pdesc,
- struct aspeed_pinctrl_data *pdata)
-diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.h b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
-index 4dcde3bc29c8..0d7c35af11d0 100644
---- a/drivers/pinctrl/aspeed/pinctrl-aspeed.h
-+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.h
-@@ -98,9 +98,14 @@ int aspeed_pinmux_get_fn_groups(struct pinctrl_dev *pctldev,
- unsigned int * const num_groups);
- int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
- unsigned int group);
-+int aspeed_g7_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
-+ unsigned int group);
- int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev,
- struct pinctrl_gpio_range *range,
- unsigned int offset);
-+int aspeed_g7_gpio_request_enable(struct pinctrl_dev *pctldev,
-+ struct pinctrl_gpio_range *range,
-+ unsigned int offset);
- int aspeed_pinctrl_probe(struct platform_device *pdev,
- struct pinctrl_desc *pdesc,
- struct aspeed_pinctrl_data *pdata);
-diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
-index aaa78a613196..09d1658b097a 100644
---- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
-+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
-@@ -792,6 +792,33 @@ struct aspeed_pinmux_ops {
- const struct aspeed_sig_expr *expr, bool enabled);
- };
-
-+struct aspeed_g7_funcfg {
-+ char *name;
-+ u32 reg;
-+ u32 mask;
-+ int val;
-+};
-+
-+struct aspeed_g7_pincfg {
-+ struct aspeed_g7_funcfg *funcfg;
-+ unsigned int nfuncfg;
-+};
-+
-+#define PIN_CFG(cfg_name, cfg_reg, cfg_mask, cfg_val) \
-+ { \
-+ .name = #cfg_name, .reg = cfg_reg, .mask = cfg_mask, \
-+ .val = cfg_val \
-+ }
-+#define FUNCFG_SYM(pin) funcfg_ ## pin
-+#define FUNCFG_PTR(pin) (&FUNCFG_SYM(pin))
-+
-+#define FUNCFG_DESCL(pin, ...) \
-+ static const struct aspeed_g7_funcfg FUNCFG_SYM(pin)[] = { __VA_ARGS__ }
-+
-+#define PINCFG_PIN(pin) \
-+ [pin] = { .funcfg = (struct aspeed_g7_funcfg *)FUNCFG_PTR(pin), \
-+ .nfuncfg = ARRAY_SIZE(FUNCFG_SYM(pin)) }
-+
- struct aspeed_pinmux_data {
- struct device *dev;
- struct regmap *maps[ASPEED_NR_PINMUX_IPS];
-@@ -803,6 +830,10 @@ struct aspeed_pinmux_data {
-
- const struct aspeed_pin_function *functions;
- const unsigned int nfunctions;
-+
-+ const struct aspeed_g7_pincfg *configs_g7;
-+ const unsigned int nconfigs_g7;
-+
- };
-
- int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0006-Add-clk-driver-for-ast2700.patch b/recipes-kernel/linux/files/0006-Add-clk-driver-for-ast2700.patch
new file mode 100644
index 0000000..c524042
--- /dev/null
+++ b/recipes-kernel/linux/files/0006-Add-clk-driver-for-ast2700.patch
@@ -0,0 +1,2970 @@
+From 0569fe144b9936685ea8e0f5a24b87c1cf51240d Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 10:13:43 +0800
+Subject: [PATCH] Add clk driver for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/clk/Kconfig | 22 +
+ drivers/clk/Makefile | 3 +
+ drivers/clk/clk-aspeed.c | 1 +
+ drivers/clk/clk-ast1700.c | 805 +++++++++++++++++++
+ drivers/clk/clk-ast2600.c | 375 +++++++--
+ drivers/clk/clk-ast2700.c | 1527 +++++++++++++++++++++++++++++++++++++
+ 6 files changed, 2654 insertions(+), 79 deletions(-)
+ create mode 100644 drivers/clk/clk-ast1700.c
+ create mode 100644 drivers/clk/clk-ast2700.c
+
+diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
+index c30099866..0b7a73039 100644
+--- a/drivers/clk/Kconfig
++++ b/drivers/clk/Kconfig
+@@ -269,6 +269,20 @@ config COMMON_CLK_ASPEED
+ The G4 and G5 series, including the ast2400 and ast2500, are supported
+ by this driver.
+
++config COMMON_CLK_AST2700
++ bool "Clock driver for AST2700 SoC"
++ depends on ARCH_ASPEED || COMPILE_TEST
++ help
++ This driver provides support for clock on AST2700 SoC.
++ The driver is responsible for managing the various clocks requireed
++ by the peripherals and cores within the AST2700
++
++config COMMON_CLK_AST1700
++ bool "Clock driver for AST1700"
++ depends on ARCH_ASPEED || COMPILE_TEST
++ help
++ This driver supports the AST1700 clocks on the Aspeed BMC platforms.
++
+ config COMMON_CLK_S2MPS11
+ tristate "Clock driver for S2MPS1X/S5M8767 MFD"
+ depends on MFD_SEC_CORE || COMPILE_TEST
+@@ -325,6 +339,14 @@ config COMMON_CLK_LOCHNAGAR
+ This driver supports the clocking features of the Cirrus Logic
+ Lochnagar audio development board.
+
++config COMMON_CLK_NPCM8XX
++ tristate "Clock driver for the NPCM8XX SoC Family"
++ depends on ARCH_NPCM || COMPILE_TEST
++ help
++ This driver supports the clocks on the Nuvoton BMC NPCM8XX SoC Family,
++ all the clocks are initialized by the bootloader, so this driver
++ allows only reading of current settings directly from the hardware.
++
+ config COMMON_CLK_LOONGSON2
+ bool "Clock driver for Loongson-2 SoC"
+ depends on LOONGARCH || COMPILE_TEST
+diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
+index 18969cbd4..0db16bf26 100644
+--- a/drivers/clk/Makefile
++++ b/drivers/clk/Makefile
+@@ -37,6 +37,8 @@ obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
+ obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
+ obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
+ obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o
++obj-$(CONFIG_COMMON_CLK_AST2700) += clk-ast2700.o
++obj-$(CONFIG_COMMON_CLK_AST1700) += clk-ast1700.o
+ obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
+ obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
+ obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
+@@ -51,6 +53,7 @@ obj-$(CONFIG_ARCH_MILBEAUT_M10V) += clk-milbeaut.o
+ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
+ obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
+ obj-$(CONFIG_ARCH_NPCM7XX) += clk-npcm7xx.o
++obj-$(CONFIG_COMMON_CLK_NPCM8XX) += clk-npcm8xx.o
+ obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
+ obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o
+ obj-$(CONFIG_CLK_LS1028A_PLLDIG) += clk-plldig.o
+diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
+index ff84191d0..27b99cdfa 100644
+--- a/drivers/clk/clk-aspeed.c
++++ b/drivers/clk/clk-aspeed.c
+@@ -278,6 +278,7 @@ static const u8 aspeed_resets[] = {
+ [ASPEED_RESET_PECI] = 10,
+ [ASPEED_RESET_I2C] = 2,
+ [ASPEED_RESET_AHB] = 1,
++ [ASPEED_RESET_VIDEO] = 6,
+
+ /*
+ * SCUD4 resets start at an offset to separate them from
+diff --git a/drivers/clk/clk-ast1700.c b/drivers/clk/clk-ast1700.c
+new file mode 100644
+index 000000000..49caa687c
+--- /dev/null
++++ b/drivers/clk/clk-ast1700.c
+@@ -0,0 +1,805 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright ASPEED Technology
++
++#include <linux/clk-provider.h>
++#include <linux/of_address.h>
++#include <linux/of_device.h>
++#include <linux/reset-controller.h>
++
++#include <dt-bindings/clock/aspeed,ast1700-clk.h>
++#include <dt-bindings/reset/aspeed,ast1700-reset.h>
++
++#define AST1700_CLK_25MHZ 25000000
++#define AST1700_CLK_24MHZ 24000000
++#define AST1700_CLK_192MHZ 192000000
++/* IO Die */
++#define AST1700_CLK_STOP 0x240
++#define AST1700_CLK_STOP2 0x260
++#define AST1700_CLK_SEL1 0x280
++#define AST1700_CLK_SEL2 0x284
++#define UXCLK_MASK GENMASK(1, 0)
++#define HUXCLK_MASK GENMASK(4, 3)
++#define AST1700_HPLL_PARAM 0x300
++#define AST1700_APLL_PARAM 0x310
++#define AST1700_DPLL_PARAM 0x320
++#define AST1700_UXCLK_CTRL 0x330
++#define AST1700_HUXCLK_CTRL 0x334
++
++static DEFINE_IDA(ast1700_clk_ida);
++
++/* Globally visible clocks */
++static DEFINE_SPINLOCK(ast1700_clk_lock);
++
++/* Division of RGMII Clock */
++static const struct clk_div_table ast1700_rgmii_div_table[] = {
++ { 0x0, 4 },
++ { 0x1, 4 },
++ { 0x2, 6 },
++ { 0x3, 8 },
++ { 0x4, 10 },
++ { 0x5, 12 },
++ { 0x6, 14 },
++ { 0x7, 16 },
++ { 0 }
++};
++
++/* Division of RMII Clock */
++static const struct clk_div_table ast1700_rmii_div_table[] = {
++ { 0x0, 8 },
++ { 0x1, 8 },
++ { 0x2, 12 },
++ { 0x3, 16 },
++ { 0x4, 20 },
++ { 0x5, 24 },
++ { 0x6, 28 },
++ { 0x7, 32 },
++ { 0 }
++};
++
++/* Division of HCLK/SDIO/MAC/apll_divn CLK */
++static const struct clk_div_table ast1700_clk_div_table[] = {
++ { 0x0, 2 },
++ { 0x1, 2 },
++ { 0x2, 3 },
++ { 0x3, 4 },
++ { 0x4, 5 },
++ { 0x5, 6 },
++ { 0x6, 7 },
++ { 0x7, 8 },
++ { 0 }
++};
++
++/* Division of PCLK/EMMC CLK */
++static const struct clk_div_table ast1700_clk_div_table2[] = {
++ { 0x0, 2 },
++ { 0x1, 4 },
++ { 0x2, 6 },
++ { 0x3, 8 },
++ { 0x4, 10 },
++ { 0x5, 12 },
++ { 0x6, 14 },
++ { 0x7, 16 },
++ { 0 }
++};
++
++static struct clk_hw *AST1700_calc_uclk(const char *name, u32 val)
++{
++ unsigned int mult, div;
++
++ /* UARTCLK = UXCLK * R / (N * 2) */
++ u32 r = val & 0xff;
++ u32 n = (val >> 8) & 0x3ff;
++
++ mult = r;
++ div = n * 2;
++
++ return clk_hw_register_fixed_factor(NULL, name, "ast1700-uxclk", 0, mult, div);
++};
++
++static struct clk_hw *AST1700_calc_huclk(const char *name, u32 val)
++{
++ unsigned int mult, div;
++
++ /* UARTCLK = UXCLK * R / (N * 2) */
++ u32 r = val & 0xff;
++ u32 n = (val >> 8) & 0x3ff;
++
++ mult = r;
++ div = n * 2;
++
++ return clk_hw_register_fixed_factor(NULL, name, "ast1700-huxclk", 0, mult, div);
++};
++
++struct clk_hw *AST1700_calc_pll(const char *name, const char *parent_name, u32 val)
++{
++ unsigned int mult, div;
++
++ if (val & BIT(24)) {
++ /* Pass through mode */
++ mult = 1;
++ div = 1;
++ } else {
++ /* F = 25Mhz * [(M + 1) / (n + 1)] / (p + 1) */
++ u32 m = val & 0x1fff;
++ u32 n = (val >> 13) & 0x3f;
++ u32 p = (val >> 19) & 0xf;
++
++ mult = (m + 1) / (n + 1);
++ div = (p + 1);
++ }
++ return clk_hw_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
++};
++
++static int AST1700_clk_is_enabled(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ u32 clk = BIT(gate->bit_idx);
++ u32 reg;
++
++ reg = readl(gate->reg);
++
++ return !(reg & clk);
++}
++
++static int AST1700_clk_enable(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ u32 clk = BIT(gate->bit_idx);
++
++ if (readl(gate->reg) & clk)
++ writel(clk, gate->reg + 0x04);
++
++ return 0;
++}
++
++static void AST1700_clk_disable(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ u32 clk = BIT(gate->bit_idx);
++
++ /* Clock is set to enable, so use write to set register */
++ writel(clk, gate->reg);
++}
++
++static const struct clk_ops AST1700_clk_gate_ops = {
++ .enable = AST1700_clk_enable,
++ .disable = AST1700_clk_disable,
++ .is_enabled = AST1700_clk_is_enabled,
++};
++
++static struct clk_hw *AST1700_clk_hw_register_gate(struct device *dev, const char *name,
++ const char *parent_name, unsigned long flags,
++ void __iomem *reg, u8 clock_idx,
++ u8 clk_gate_flags, spinlock_t *lock)
++{
++ struct clk_gate *gate;
++ struct clk_hw *hw;
++ struct clk_init_data init;
++ int ret = -EINVAL;
++
++ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
++ if (!gate)
++ return ERR_PTR(-ENOMEM);
++
++ init.name = name;
++ init.ops = &AST1700_clk_gate_ops;
++ init.flags = flags;
++ init.parent_names = parent_name ? &parent_name : NULL;
++ init.num_parents = parent_name ? 1 : 0;
++
++ gate->reg = reg;
++ gate->bit_idx = clock_idx;
++ gate->flags = clk_gate_flags;
++ gate->lock = lock;
++ gate->hw.init = &init;
++
++ hw = &gate->hw;
++ ret = clk_hw_register(dev, hw);
++ if (ret) {
++ kfree(gate);
++ hw = ERR_PTR(ret);
++ }
++
++ return hw;
++}
++
++struct ast1700_reset {
++ void __iomem *base;
++ struct reset_controller_dev rcdev;
++};
++
++#define to_rc_data(p) container_of(p, struct ast1700_reset, rcdev)
++
++static int ast1700_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
++{
++ struct ast1700_reset *rc = to_rc_data(rcdev);
++ u32 rst = BIT(id % 32);
++ u32 reg = id >= 32 ? 0x220 : 0x200;
++
++ writel(rst, rc->base + reg);
++ return 0;
++}
++
++static int ast1700_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
++{
++ struct ast1700_reset *rc = to_rc_data(rcdev);
++ u32 rst = BIT(id % 32);
++ u32 reg = id >= 32 ? 0x220 : 0x200;
++
++ /* Use set to clear register */
++ writel(rst, rc->base + reg + 0x04);
++ return 0;
++}
++
++static int ast1700_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
++{
++ struct ast1700_reset *rc = to_rc_data(rcdev);
++ u32 rst = BIT(id % 32);
++ u32 reg = id >= 32 ? 0x220 : 0x200;
++
++ return (readl(rc->base + reg) & rst);
++}
++
++static const struct reset_control_ops ast1700_reset_ops = {
++ .assert = ast1700_reset_assert,
++ .deassert = ast1700_reset_deassert,
++ .status = ast1700_reset_status,
++};
++
++static const char *const sdclk_sel0[] = {
++ "ast1700_0-hpll_divn",
++ "ast1700_0-apll_divn",
++};
++
++static const char *const sdclk_sel1[] = {
++ "ast1700_1-hpll_divn",
++ "ast1700_1-apll_divn",
++};
++
++static const char *const uartclk_sel0[] = {
++ "ast1700_0-uartxclk",
++ "ast1700_0-huartxclk",
++};
++
++static const char *const uartclk_sel1[] = {
++ "ast1700_1-uartxclk",
++ "ast1700_1-huartxclk",
++};
++
++static const char *const uxclk_sel0[] = {
++ "ast1700_0-apll_div4",
++ "ast1700_0-apll_div2",
++ "ast1700_0-apll",
++ "ast1700_0-hpll",
++};
++
++static const char *const uxclk_sel1[] = {
++ "ast1700_1-apll_div4",
++ "ast1700_1-apll_div2",
++ "ast1700_1-apll",
++ "ast1700_1-hpll",
++};
++
++#define CREATE_CLK_NAME(id, suffix) kasprintf(GFP_KERNEL, "ast1700_%d-%s", id, suffix)
++
++static int AST1700_clk_init(struct device_node *ast1700_node)
++{
++ struct clk_hw_onecell_data *clk_data;
++ struct ast1700_reset *reset;
++ u32 uart_clk_source = 0;
++ void __iomem *clk_base;
++ struct clk_hw **clks;
++ struct clk_hw *hw;
++ u32 val;
++ int ret;
++
++ int id = ida_simple_get(&ast1700_clk_ida, 0, 0, GFP_KERNEL);
++
++ clk_base = of_iomap(ast1700_node, 0);
++ WARN_ON(!clk_base);
++
++ clk_data = kzalloc(struct_size(clk_data, hws, AST1700_NUM_CLKS), GFP_KERNEL);
++ if (!clk_data)
++ return -ENOMEM;
++
++ clk_data->num = AST1700_NUM_CLKS;
++ clks = clk_data->hws;
++
++ reset = kzalloc(sizeof(*reset), GFP_KERNEL);
++ if (!reset)
++ return -ENOMEM;
++
++ reset->base = clk_base;
++
++ reset->rcdev.owner = THIS_MODULE;
++ reset->rcdev.nr_resets = AST1700_RESET_NUMS;
++ reset->rcdev.ops = &ast1700_reset_ops;
++ reset->rcdev.of_node = ast1700_node;
++
++ ret = reset_controller_register(&reset->rcdev);
++ if (ret) {
++ pr_err("soc1 failed to register reset controller\n");
++ return ret;
++ }
++ /*
++ * Ast1700 A0 workaround:
++ * I3C reset should assert all of the I3C controllers simultaneously.
++ * Otherwise, it may lead to failure in accessing I3C registers.
++ */
++ for (id = AST1700_RESET_I3C0; id <= AST1700_RESET_I3C15; id++)
++ ast1700_reset_assert(&reset->rcdev, id);
++
++ hw = clk_hw_register_fixed_rate(NULL, CREATE_CLK_NAME(id, "clkin"), NULL, 0, AST1700_CLK_25MHZ);
++ if (IS_ERR(hw))
++ return PTR_ERR(hw);
++ clks[AST1700_CLKIN] = hw;
++
++ /* HPLL 1000Mhz */
++ val = readl(clk_base + AST1700_HPLL_PARAM);
++ clks[AST1700_CLK_HPLL] = AST1700_calc_pll(CREATE_CLK_NAME(id, "hpll"), CREATE_CLK_NAME(id, "clkin"), val);
++
++ /* HPLL 800Mhz */
++ val = readl(clk_base + AST1700_APLL_PARAM);
++ clks[AST1700_CLK_APLL] = AST1700_calc_pll(CREATE_CLK_NAME(id, "apll"), CREATE_CLK_NAME(id, "clkin"), val);
++
++ clks[AST1700_CLK_APLL_DIV2] =
++ clk_hw_register_fixed_factor(NULL, CREATE_CLK_NAME(id, "apll_div2"), CREATE_CLK_NAME(id, "apll"), 0, 1, 2);
++
++ clks[AST1700_CLK_APLL_DIV4] =
++ clk_hw_register_fixed_factor(NULL, CREATE_CLK_NAME(id, "apll_div4"), CREATE_CLK_NAME(id, "apll"), 0, 1, 4);
++
++ val = readl(clk_base + AST1700_DPLL_PARAM);
++ clks[AST1700_CLK_DPLL] = AST1700_calc_pll(CREATE_CLK_NAME(id, "dpll"), CREATE_CLK_NAME(id, "clkin"), val);
++
++ /* uxclk mux selection */
++ clks[AST1700_CLK_UXCLK] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uxclk"),
++ (id == 0) ? uxclk_sel0 : uxclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uxclk_sel0) : ARRAY_SIZE(uxclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 0, 2, 0, &ast1700_clk_lock);
++
++ val = readl(clk_base + AST1700_UXCLK_CTRL);
++ clks[AST1700_CLK_UARTX] = AST1700_calc_uclk(CREATE_CLK_NAME(id, "uartxclk"), val);
++
++ /* huxclk mux selection */
++ clks[AST1700_CLK_HUXCLK] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "huxclk"),
++ (id == 0) ? uxclk_sel0 : uxclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uxclk_sel0) : ARRAY_SIZE(uxclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 3, 2, 0, &ast1700_clk_lock);
++
++ val = readl(clk_base + AST1700_HUXCLK_CTRL);
++ clks[AST1700_CLK_HUARTX] = AST1700_calc_huclk(CREATE_CLK_NAME(id, "huartxclk"), val);
++
++ /* AHB CLK = 200Mhz */
++ clks[AST1700_CLK_AHB] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "ahb"),
++ CREATE_CLK_NAME(id, "hpll"),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 20, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock);
++
++ /* APB CLK = 100Mhz */
++ clks[AST1700_CLK_APB] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "apb"),
++ CREATE_CLK_NAME(id, "hpll"),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 18, 3, 0, ast1700_clk_div_table2, &ast1700_clk_lock);
++
++ //rmii
++ clks[AST1700_CLK_RMII] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "rmii"),
++ CREATE_CLK_NAME(id, "hpll"),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 21, 3, 0, ast1700_rmii_div_table, &ast1700_clk_lock);
++
++ //rgmii
++ clks[AST1700_CLK_RGMII] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "rgmii"),
++ CREATE_CLK_NAME(id, "hpll"),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 25, 3, 0, ast1700_rgmii_div_table, &ast1700_clk_lock);
++
++ //mac hclk
++ clks[AST1700_CLK_MACHCLK] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "machclk"),
++ CREATE_CLK_NAME(id, "hpll"),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 29, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_LCLK0] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "lclk0-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP,
++ 0, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_LCLK0] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "lclk1-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP,
++ 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_ESPI0CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "espi0clk-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP,
++ 2, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_ESPI1CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "espi1clk-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP,
++ 3, 0, &ast1700_clk_lock);
++
++ //sd pll divn
++ clks[AST1700_CLK_HPLL_DIVN] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "hpll_divn"),
++ CREATE_CLK_NAME(id, "hpll"),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 20, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_APLL_DIVN] =
++ clk_hw_register_divider_table(NULL, CREATE_CLK_NAME(id, "apll_divn"),
++ CREATE_CLK_NAME(id, "apll"),
++ 0, clk_base + AST1700_CLK_SEL2,
++ 8, 3, 0, ast1700_clk_div_table, &ast1700_clk_lock);
++
++ //sd clk
++ clks[AST1700_CLK_SDCLK] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "sdclk"),
++ (id == 0) ? sdclk_sel0 : sdclk_sel1,
++ (id == 0) ? ARRAY_SIZE(sdclk_sel0) : ARRAY_SIZE(sdclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 13, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_SDCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "sdclk-gate"),
++ CREATE_CLK_NAME(id, "sdclk"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 4, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_REFCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "io-refclk-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP,
++ 6, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_LPCHCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "lpchclk-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP,
++ 7, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_MAC0CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "mac0clk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP,
++ 8, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_MAC1CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "mac1clk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP,
++ 9, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_MAC2CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "mac2clk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP,
++ 10, 0, &ast1700_clk_lock);
++
++ of_property_read_u32(ast1700_node, "uart-clk-source", &uart_clk_source);
++
++ if (uart_clk_source) {
++ val = readl(clk_base + AST1700_CLK_SEL1) & ~GENMASK(12, 0);
++ uart_clk_source &= GENMASK(12, 0);
++ writel(val | uart_clk_source, clk_base + AST1700_CLK_SEL1);
++ }
++
++ //UART0
++ clks[AST1700_CLK_UART0] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart0clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 0, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART0CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart0clk-gate"),
++ CREATE_CLK_NAME(id, "uart0clk"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 11, 0, &ast1700_clk_lock);
++
++ //UART1
++ clks[AST1700_CLK_UART1] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart1clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 1, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART1CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart1clk-gate"),
++ CREATE_CLK_NAME(id, "uart1clk"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 12, 0, &ast1700_clk_lock);
++
++ //UART2
++ clks[AST1700_CLK_UART2] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart2clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 2, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART2CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart2clk-gate"),
++ CREATE_CLK_NAME(id, "uart2clk"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 13, 0, &ast1700_clk_lock);
++
++ //UART3
++ clks[AST1700_CLK_UART3] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart3clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 3, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART3CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart3clk-gate"),
++ CREATE_CLK_NAME(id, "uart3clk"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 14, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C0CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c0clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 16, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C1CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c1clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 17, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C2CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c2clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 18, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C3CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c3clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 19, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C4CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c4clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 20, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C5CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c5clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 21, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C6CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c6clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 22, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C7CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c7clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 23, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C8CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c8clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 24, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C9CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c9clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 25, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C10CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c10clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 26, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C11CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c11clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 27, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C12CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c12clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 28, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C13CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c13clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 29, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C14CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c14clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 30, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_I3C15CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "i3c15clk-gate"),
++ CREATE_CLK_NAME(id, "ahb"),
++ 0, clk_base + AST1700_CLK_STOP,
++ 31, 0, &ast1700_clk_lock);
++
++ /*clk stop 2 */
++ //UART5
++ clks[AST1700_CLK_UART5] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart5clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 5, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART5CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart5clk-gate"),
++ CREATE_CLK_NAME(id, "uart5clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 0, 0, &ast1700_clk_lock);
++
++ //UART6
++ clks[AST1700_CLK_UART6] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart6clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 6, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART6CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart6clk-gate"),
++ CREATE_CLK_NAME(id, "uart6clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 1, 0, &ast1700_clk_lock);
++
++ //UART7
++ clks[AST1700_CLK_UART7] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart7clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 7, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART7CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart7clk-gate"),
++ CREATE_CLK_NAME(id, "uart7clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 2, 0, &ast1700_clk_lock);
++
++ //UART8
++ clks[AST1700_CLK_UART8] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart8clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 8, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART8CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart8clk-gate"),
++ CREATE_CLK_NAME(id, "uart8clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 3, 0, &ast1700_clk_lock);
++
++ //UART9
++ clks[AST1700_CLK_UART9] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart9clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 9, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART9CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart9clk-gate"),
++ CREATE_CLK_NAME(id, "uart9clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 4, 0, &ast1700_clk_lock);
++
++ //UART10
++ clks[AST1700_CLK_UART10] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart10clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 10, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART10CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart10clk-gate"),
++ CREATE_CLK_NAME(id, "uart10clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 5, 0, &ast1700_clk_lock);
++
++ //UART11
++ clks[AST1700_CLK_UART11] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart11clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 11, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART11CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart11clk-gate"),
++ CREATE_CLK_NAME(id, "uart11clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 6, 0, &ast1700_clk_lock);
++
++ //uart12: call bmc uart
++ clks[AST1700_CLK_UART12] =
++ clk_hw_register_mux(NULL, CREATE_CLK_NAME(id, "uart12clk"),
++ (id == 0) ? uartclk_sel0 : uartclk_sel1,
++ (id == 0) ? ARRAY_SIZE(uartclk_sel0) : ARRAY_SIZE(uartclk_sel1),
++ 0, clk_base + AST1700_CLK_SEL1,
++ 12, 1, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_UART12CLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "uart12clk-gate"),
++ CREATE_CLK_NAME(id, "uart12clk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 7, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_FSICLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "fsiclk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP2,
++ 8, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_LTPIPHYCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "ltpiphyclk-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP2,
++ 9, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_LTPICLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "ltpiclk-gate"), NULL,
++ CLK_IS_CRITICAL, clk_base + AST1700_CLK_STOP2,
++ 10, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_VGALCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "vgalclk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP2,
++ 11, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_USBUARTCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "usbuartclk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP2,
++ 12, 0, &ast1700_clk_lock);
++
++ clk_hw_register_fixed_factor(NULL, CREATE_CLK_NAME(id, "canclk"), CREATE_CLK_NAME(id, "apll"), 0, 1, 10);
++
++ clks[AST1700_CLK_GATE_CANCLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "canclk-gate"),
++ CREATE_CLK_NAME(id, "canclk"),
++ 0, clk_base + AST1700_CLK_STOP2,
++ 13, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_PCICLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "pciclk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP2,
++ 14, 0, &ast1700_clk_lock);
++
++ clks[AST1700_CLK_GATE_SLICLK] =
++ AST1700_clk_hw_register_gate(NULL, CREATE_CLK_NAME(id, "sliclk-gate"), NULL,
++ 0, clk_base + AST1700_CLK_STOP2,
++ 15, 0, &ast1700_clk_lock);
++
++ of_clk_add_hw_provider(ast1700_node, of_clk_hw_onecell_get, clk_data);
++
++ return 0;
++};
++
++CLK_OF_DECLARE_DRIVER(ast1700, "aspeed,ast1700-scu", AST1700_clk_init);
++
+diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
+index 909c3137c..3b59b2172 100644
+--- a/drivers/clk/clk-ast2600.c
++++ b/drivers/clk/clk-ast2600.c
+@@ -19,7 +19,7 @@
+ * This includes the gates (configured from aspeed_g6_gates), plus the
+ * explicitly-configured clocks (ASPEED_CLK_HPLL and up).
+ */
+-#define ASPEED_G6_NUM_CLKS 72
++#define ASPEED_G6_NUM_CLKS 75
+
+ #define ASPEED_G6_SILICON_REV 0x014
+ #define CHIP_REVISION_ID GENMASK(23, 16)
+@@ -59,8 +59,24 @@
+
+ #define ASPEED_G6_STRAP1 0x500
+
++#define ASPEED_UARTCLK_FROM_UXCLK 0x338
++
+ #define ASPEED_MAC12_CLK_DLY 0x340
++#define ASPEED_MAC12_CLK_DLY_100M 0x348
++#define ASPEED_MAC12_CLK_DLY_10M 0x34C
++
+ #define ASPEED_MAC34_CLK_DLY 0x350
++#define ASPEED_MAC34_CLK_DLY_100M 0x358
++#define ASPEED_MAC34_CLK_DLY_10M 0x35C
++
++#define ASPEED_G6_MAC34_DRIVING_CTRL 0x458
++
++#define ASPEED_G6_DEF_MAC12_DELAY_1G 0x0028a410
++#define ASPEED_G6_DEF_MAC12_DELAY_100M 0x00410410
++#define ASPEED_G6_DEF_MAC12_DELAY_10M 0x00410410
++#define ASPEED_G6_DEF_MAC34_DELAY_1G 0x00104208
++#define ASPEED_G6_DEF_MAC34_DELAY_100M 0x00104208
++#define ASPEED_G6_DEF_MAC34_DELAY_10M 0x00104208
+
+ /* Globally visible clocks */
+ static DEFINE_SPINLOCK(aspeed_g6_clk_lock);
+@@ -72,6 +88,45 @@ static void __iomem *scu_g6_base;
+ /* AST2600 revision: A0, A1, A2, etc */
+ static u8 soc_rev;
+
++struct mac_delay_config {
++ u32 tx_delay_1000;
++ u32 rx_delay_1000;
++ u32 tx_delay_100;
++ u32 rx_delay_100;
++ u32 tx_delay_10;
++ u32 rx_delay_10;
++};
++
++union mac_delay_1g {
++ u32 w;
++ struct {
++ unsigned int tx_delay_1 : 6; /* bit[5:0] */
++ unsigned int tx_delay_2 : 6; /* bit[11:6] */
++ unsigned int rx_delay_1 : 6; /* bit[17:12] */
++ unsigned int rx_delay_2 : 6; /* bit[23:18] */
++ unsigned int rx_clk_inv_1 : 1; /* bit[24] */
++ unsigned int rx_clk_inv_2 : 1; /* bit[25] */
++ unsigned int rmii_tx_data_at_falling_1 : 1; /* bit[26] */
++ unsigned int rmii_tx_data_at_falling_2 : 1; /* bit[27] */
++ unsigned int rgmiick_pad_dir : 1; /* bit[28] */
++ unsigned int rmii_50m_oe_1 : 1; /* bit[29] */
++ unsigned int rmii_50m_oe_2 : 1; /* bit[30] */
++ unsigned int rgmii_125m_o_sel : 1; /* bit[31] */
++ } b;
++};
++
++union mac_delay_100_10 {
++ u32 w;
++ struct {
++ unsigned int tx_delay_1 : 6; /* bit[5:0] */
++ unsigned int tx_delay_2 : 6; /* bit[11:6] */
++ unsigned int rx_delay_1 : 6; /* bit[17:12] */
++ unsigned int rx_delay_2 : 6; /* bit[23:18] */
++ unsigned int rx_clk_inv_1 : 1; /* bit[24] */
++ unsigned int rx_clk_inv_2 : 1; /* bit[25] */
++ unsigned int reserved_0 : 6; /* bit[31:26] */
++ } b;
++};
+ /*
+ * The majority of the clocks in the system are gates paired with a reset
+ * controller that holds the IP in reset; this is represented by the @reset_idx
+@@ -99,14 +154,14 @@ static u8 soc_rev;
+ * ref0 and ref1 are essential for the SoC to operate
+ * mpll is required if SDRAM is used
+ */
+-static const struct aspeed_gate_data aspeed_g6_gates[] = {
++static struct aspeed_gate_data aspeed_g6_gates[] = {
+ /* clk rst name parent flags */
+ [ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
+ [ASPEED_CLK_GATE_ECLK] = { 1, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
+ [ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
+ /* vclk parent - dclk/d1clk/hclk/mclk */
+ [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
+- [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
++ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+ /* From dpll */
+ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
+ [ASPEED_CLK_GATE_REF0CLK] = { 6, -1, "ref0clk-gate", "clkin", CLK_IS_CRITICAL },
+@@ -128,8 +183,8 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
+ /* Reserved 26 */
+ [ASPEED_CLK_GATE_EMMCCLK] = { 27, 16, "emmcclk-gate", NULL, 0 }, /* For card clk */
+ /* Reserved 28/29/30 */
+- [ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, 0 }, /* LPC */
+- [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, 0 }, /* eSPI */
++ [ASPEED_CLK_GATE_LCLK] = { 32, 32, "lclk-gate", NULL, CLK_IS_CRITICAL }, /* LPC */
++ [ASPEED_CLK_GATE_ESPICLK] = { 33, -1, "espiclk-gate", NULL, CLK_IS_CRITICAL }, /* eSPI */
+ [ASPEED_CLK_GATE_REF1CLK] = { 34, -1, "ref1clk-gate", "clkin", CLK_IS_CRITICAL },
+ /* Reserved 35 */
+ [ASPEED_CLK_GATE_SDCLK] = { 36, 56, "sdclk-gate", NULL, 0 }, /* SDIO/SD */
+@@ -143,20 +198,20 @@ static const struct aspeed_gate_data aspeed_g6_gates[] = {
+ [ASPEED_CLK_GATE_I3C4CLK] = { 44, 44, "i3c4clk-gate", "i3cclk", 0 }, /* I3C4 */
+ [ASPEED_CLK_GATE_I3C5CLK] = { 45, 45, "i3c5clk-gate", "i3cclk", 0 }, /* I3C5 */
+ /* Reserved: 46 & 47 */
+- [ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uart", 0 }, /* UART1 */
+- [ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uart", 0 }, /* UART2 */
+- [ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uart", 0 }, /* UART3 */
+- [ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uart", 0 }, /* UART4 */
++ [ASPEED_CLK_GATE_UART1CLK] = { 48, -1, "uart1clk-gate", "uxclk", CLK_IS_CRITICAL }, /* UART1 */
++ [ASPEED_CLK_GATE_UART2CLK] = { 49, -1, "uart2clk-gate", "uxclk", CLK_IS_CRITICAL }, /* UART2 */
++ [ASPEED_CLK_GATE_UART3CLK] = { 50, -1, "uart3clk-gate", "uxclk", 0 }, /* UART3 */
++ [ASPEED_CLK_GATE_UART4CLK] = { 51, -1, "uart4clk-gate", "uxclk", 0 }, /* UART4 */
+ [ASPEED_CLK_GATE_MAC3CLK] = { 52, 52, "mac3clk-gate", "mac34", 0 }, /* MAC3 */
+ [ASPEED_CLK_GATE_MAC4CLK] = { 53, 53, "mac4clk-gate", "mac34", 0 }, /* MAC4 */
+- [ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uartx", 0 }, /* UART6 */
+- [ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uartx", 0 }, /* UART7 */
+- [ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uartx", 0 }, /* UART8 */
+- [ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uartx", 0 }, /* UART9 */
+- [ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uartx", 0 }, /* UART10 */
+- [ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uartx", 0 }, /* UART11 */
+- [ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uartx", 0 }, /* UART12 */
+- [ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uartx", 0 }, /* UART13 */
++ [ASPEED_CLK_GATE_UART6CLK] = { 54, -1, "uart6clk-gate", "uxclk", 0 }, /* UART6 */
++ [ASPEED_CLK_GATE_UART7CLK] = { 55, -1, "uart7clk-gate", "uxclk", 0 }, /* UART7 */
++ [ASPEED_CLK_GATE_UART8CLK] = { 56, -1, "uart8clk-gate", "uxclk", 0 }, /* UART8 */
++ [ASPEED_CLK_GATE_UART9CLK] = { 57, -1, "uart9clk-gate", "uxclk", 0 }, /* UART9 */
++ [ASPEED_CLK_GATE_UART10CLK] = { 58, -1, "uart10clk-gate", "uxclk", 0 }, /* UART10 */
++ [ASPEED_CLK_GATE_UART11CLK] = { 59, -1, "uart11clk-gate", "uxclk", CLK_IS_CRITICAL }, /* UART11 */
++ [ASPEED_CLK_GATE_UART12CLK] = { 60, -1, "uart12clk-gate", "uxclk", 0 }, /* UART12 */
++ [ASPEED_CLK_GATE_UART13CLK] = { 61, -1, "uart13clk-gate", "uxclk", 0 }, /* UART13 */
+ [ASPEED_CLK_GATE_FSICLK] = { 62, 59, "fsiclk-gate", NULL, 0 }, /* FSI */
+ };
+
+@@ -184,6 +239,18 @@ static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
+ { 0 }
+ };
+
++static const struct clk_div_table ast2600_sd_div_table[] = {
++ { 0x0, 2 },
++ { 0x1, 4 },
++ { 0x2, 6 },
++ { 0x3, 8 },
++ { 0x4, 10 },
++ { 0x5, 12 },
++ { 0x6, 14 },
++ { 0x7, 1 },
++ { 0 }
++};
++
+ static const struct clk_div_table ast2600_mac_div_table[] = {
+ { 0x0, 4 },
+ { 0x1, 4 },
+@@ -384,9 +451,14 @@ static int aspeed_g6_reset_deassert(struct reset_controller_dev *rcdev,
+ struct aspeed_reset *ar = to_aspeed_reset(rcdev);
+ u32 rst = get_bit(id);
+ u32 reg = id >= 32 ? ASPEED_G6_RESET_CTRL2 : ASPEED_G6_RESET_CTRL;
++ u32 val;
++ int ret;
+
+ /* Use set to clear register */
+- return regmap_write(ar->map, reg + 0x04, rst);
++ ret = regmap_write(ar->map, reg + 0x04, rst);
++ /* Add dummy read to ensure the write transfer is finished */
++ regmap_read(ar->map, reg + 4, &val);
++ return ret;
+ }
+
+ static int aspeed_g6_reset_assert(struct reset_controller_dev *rcdev,
+@@ -458,11 +530,6 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
+ return hw;
+ }
+
+-static const char *const emmc_extclk_parent_names[] = {
+- "emmc_extclk_hpll_in",
+- "mpll",
+-};
+-
+ static const char * const vclk_parent_names[] = {
+ "dpll",
+ "d1pll",
+@@ -484,7 +551,7 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
+ struct aspeed_reset *ar;
+ struct regmap *map;
+ struct clk_hw *hw;
+- u32 val, rate;
++ u32 val;
+ int i, ret;
+
+ map = syscon_node_to_regmap(dev->of_node);
+@@ -510,70 +577,50 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- /* UART clock div13 setting */
+- regmap_read(map, ASPEED_G6_MISC_CTRL, &val);
+- if (val & UART_DIV13_EN)
+- rate = 24000000 / 13;
+- else
+- rate = 24000000;
+- hw = clk_hw_register_fixed_rate(dev, "uart", NULL, 0, rate);
+- if (IS_ERR(hw))
+- return PTR_ERR(hw);
+- aspeed_g6_clk_data->hws[ASPEED_CLK_UART] = hw;
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(14, 11), BIT(11));
+
+- /* UART6~13 clock div13 setting */
+- regmap_read(map, 0x80, &val);
+- if (val & BIT(31))
+- rate = 24000000 / 13;
+- else
+- rate = 24000000;
+- hw = clk_hw_register_fixed_rate(dev, "uartx", NULL, 0, rate);
++ /* EMMC ext clock divider */
++ hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "mpll", 0,
++ scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0,
++ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+- aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
+
+- /* EMMC ext clock */
+- hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
+- 0, 1, 2);
+- if (IS_ERR(hw))
+- return PTR_ERR(hw);
+-
+- hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
+- emmc_extclk_parent_names,
+- ARRAY_SIZE(emmc_extclk_parent_names), 0,
+- scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
+- 0, &aspeed_g6_clk_lock);
+- if (IS_ERR(hw))
+- return PTR_ERR(hw);
+-
+- hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
+- 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
+- 15, 0, &aspeed_g6_clk_lock);
+- if (IS_ERR(hw))
+- return PTR_ERR(hw);
+-
+- hw = clk_hw_register_divider_table(dev, "emmc_extclk",
+- "emmc_extclk_gate", 0,
+- scu_g6_base +
+- ASPEED_G6_CLK_SELECTION1, 12,
+- 3, 0, ast2600_emmc_extclk_div_table,
++ //ast2600 emmc clk should under 200Mhz
++ hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0,
++ scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0,
++ ast2600_emmc_extclk_div_table,
+ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
+
+- /* SD/SDIO clock divider and gate */
+- hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hpll", 0,
+- scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0,
+- &aspeed_g6_clk_lock);
+- if (IS_ERR(hw))
+- return PTR_ERR(hw);
++ clk_hw_register_fixed_rate(NULL, "hclk", NULL, 0, 200000000);
++
++ regmap_read(map, 0x310, &val);
++ if (val & BIT(8)) {
++ /* SD/SDIO clock divider and gate */
++ hw = clk_hw_register_gate(dev, "sd_extclk_gate", "apll", 0,
++ scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0,
++ &aspeed_g6_clk_lock);
++ if (IS_ERR(hw))
++ return PTR_ERR(hw);
++ } else {
++ /* SD/SDIO clock divider and gate */
++ hw = clk_hw_register_gate(dev, "sd_extclk_gate", "hclk", 0,
++ scu_g6_base + ASPEED_G6_CLK_SELECTION4, 31, 0,
++ &aspeed_g6_clk_lock);
++ if (IS_ERR(hw))
++ return PTR_ERR(hw);
++ }
++
+ hw = clk_hw_register_divider_table(dev, "sd_extclk", "sd_extclk_gate",
+- 0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0,
+- ast2600_div_table,
+- &aspeed_g6_clk_lock);
++ 0, scu_g6_base + ASPEED_G6_CLK_SELECTION4, 28, 3, 0,
++ ast2600_sd_div_table,
++ &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
++
+ aspeed_g6_clk_data->hws[ASPEED_CLK_SDIO] = hw;
+
+ /* MAC1/2 RMII 50MHz RCLK */
+@@ -645,8 +692,8 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_LHCLK] = hw;
+
+- /* gfx d1clk : use dp clk */
+- regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(10));
++ /* gfx d1clk : use usb phy */
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(10, 8), BIT(9));
+ /* SoC Display clock selection */
+ hw = clk_hw_register_mux(dev, "d1clk", d1clk_parent_names,
+ ARRAY_SIZE(d1clk_parent_names), 0,
+@@ -677,6 +724,8 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_VCLK] = hw;
+
++ //vclk : force disable dynmamic slow down and fix vclk = eclk / 2
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(31, 28), 0);
+ /* Video Engine clock divider */
+ hw = clk_hw_register_divider_table(dev, "eclk", NULL, 0,
+ scu_g6_base + ASPEED_G6_CLK_SELECTION1, 28, 3, 0,
+@@ -686,6 +735,26 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
+ return PTR_ERR(hw);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_ECLK] = hw;
+
++ /* uartx parent assign*/
++ for (i = 0; i < 13; i++) {
++ if (i < 6 && i != 4) {
++ regmap_read(map, 0x310, &val);
++ if (val & BIT(i))
++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "huxclk";
++ else
++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "uxclk";
++ }
++ if (i == 4)
++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "uart5";
++ if (i > 5 && i != 4) {
++ regmap_read(map, 0x314, &val);
++ if (val & BIT(i))
++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "huxclk";
++ else
++ aspeed_g6_gates[ASPEED_CLK_GATE_UART1CLK + i].parent_name = "uxclk";
++ }
++ }
++
+ for (i = 0; i < ARRAY_SIZE(aspeed_g6_gates); i++) {
+ const struct aspeed_gate_data *gd = &aspeed_g6_gates[i];
+ u32 gate_flags;
+@@ -749,7 +818,8 @@ static const u32 ast2600_a1_axi_ahb200_tbl[] = {
+ static void __init aspeed_g6_cc(struct regmap *map)
+ {
+ struct clk_hw *hw;
+- u32 val, div, divbits, axi_div, ahb_div;
++ u32 val, freq, div, divbits, axi_div, ahb_div;
++ u32 mult;
+
+ clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, 25000000);
+
+@@ -814,6 +884,55 @@ static void __init aspeed_g6_cc(struct regmap *map)
+ hw = clk_hw_register_fixed_rate(NULL, "usb-phy-40m", NULL, 0, 40000000);
+ aspeed_g6_clk_data->hws[ASPEED_CLK_USBPHY_40M] = hw;
+
++ /* uart5 clock selection */
++ regmap_read(map, ASPEED_G6_MISC_CTRL, &val);
++ if (val & UART_DIV13_EN)
++ div = 13;
++ else
++ div = 1;
++ regmap_read(map, ASPEED_G6_CLK_SELECTION2, &val);
++ if (val & BIT(14))
++ freq = 192000000;
++ else
++ freq = 24000000;
++ freq = freq / div;
++
++ aspeed_g6_clk_data->hws[ASPEED_CLK_UART5] = clk_hw_register_fixed_rate(NULL, "uart5", NULL, 0, freq);
++
++ /* UART1~13 clock div13 setting except uart5 */
++ regmap_read(map, ASPEED_G6_CLK_SELECTION5, &val);
++
++ switch (val & 0x3) {
++ case 0:
++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "apll", 0, 1, 4);
++ break;
++ case 1:
++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "apll", 0, 1, 2);
++ break;
++ case 2:
++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "apll", 0, 1, 1);
++ break;
++ case 3:
++ aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = clk_hw_register_fixed_factor(NULL, "uartx", "ahb", 0, 1, 1);
++ break;
++ }
++
++ /* uxclk */
++ regmap_read(map, ASPEED_UARTCLK_FROM_UXCLK, &val);
++ div = ((val >> 8) & 0x3ff) * 2;
++ mult = val & 0xff;
++
++ hw = clk_hw_register_fixed_factor(NULL, "uxclk", "uartx", 0, mult, div);
++ aspeed_g6_clk_data->hws[ASPEED_CLK_UXCLK] = hw;
++
++ /* huxclk */
++ regmap_read(map, 0x33c, &val);
++ div = ((val >> 8) & 0x3ff) * 2;
++ mult = val & 0xff;
++
++ hw = clk_hw_register_fixed_factor(NULL, "huxclk", "uartx", 0, mult, div);
++ aspeed_g6_clk_data->hws[ASPEED_CLK_HUXCLK] = hw;
++
+ /* i3c clock: source from apll, divide by 8 */
+ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5,
+ I3C_CLK_SELECTION | APLL_DIV_SELECTION,
+@@ -826,6 +945,10 @@ static void __init aspeed_g6_cc(struct regmap *map)
+ static void __init aspeed_g6_cc_init(struct device_node *np)
+ {
+ struct regmap *map;
++ struct mac_delay_config mac_cfg;
++ union mac_delay_1g reg_1g;
++ union mac_delay_100_10 reg_100, reg_10;
++ u32 uart_clk_source = 0;
+ int ret;
+ int i;
+
+@@ -860,6 +983,100 @@ static void __init aspeed_g6_cc_init(struct device_node *np)
+ return;
+ }
+
++ of_property_read_u32(np, "uart-clk-source", &uart_clk_source);
++
++ if (uart_clk_source) {
++ if (uart_clk_source & GENMASK(5, 0))
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION4, GENMASK(5, 0), uart_clk_source & GENMASK(5, 0));
++
++ if (uart_clk_source & GENMASK(12, 6))
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION5, GENMASK(12, 6), uart_clk_source & GENMASK(12, 6));
++ }
++
++ /* fixed settings for RGMII/RMII clock generator */
++ /* MAC1/2 RGMII 125MHz = EPLL / 8 */
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION2, GENMASK(23, 20),
++ (0x7 << 20));
++
++ /* MAC3/4 RMII 50MHz = HCLK / 4 */
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION4, GENMASK(18, 16),
++ (0x3 << 16));
++
++ /* BIT[31]: MAC1/2 RGMII 125M source = internal PLL
++ * BIT[28]: RGMIICK pad direction = output
++ */
++ regmap_write(map, ASPEED_MAC12_CLK_DLY,
++ BIT(31) | BIT(28) | ASPEED_G6_DEF_MAC12_DELAY_1G);
++ regmap_write(map, ASPEED_MAC12_CLK_DLY_100M,
++ ASPEED_G6_DEF_MAC12_DELAY_100M);
++ regmap_write(map, ASPEED_MAC12_CLK_DLY_10M,
++ ASPEED_G6_DEF_MAC12_DELAY_10M);
++
++ /* MAC3/4 RGMII 125M source = RGMIICK pad */
++ regmap_write(map, ASPEED_MAC34_CLK_DLY,
++ ASPEED_G6_DEF_MAC34_DELAY_1G);
++ regmap_write(map, ASPEED_MAC34_CLK_DLY_100M,
++ ASPEED_G6_DEF_MAC34_DELAY_100M);
++ regmap_write(map, ASPEED_MAC34_CLK_DLY_10M,
++ ASPEED_G6_DEF_MAC34_DELAY_10M);
++
++ /* MAC3/4 default pad driving strength */
++ regmap_write(map, ASPEED_G6_MAC34_DRIVING_CTRL, 0x0000000f);
++
++ regmap_read(map, ASPEED_MAC12_CLK_DLY, ®_1g.w);
++ regmap_read(map, ASPEED_MAC12_CLK_DLY_100M, ®_100.w);
++ regmap_read(map, ASPEED_MAC12_CLK_DLY_10M, ®_10.w);
++ ret = of_property_read_u32_array(np, "mac0-clk-delay", (u32 *)&mac_cfg, 6);
++ if (!ret) {
++ reg_1g.b.tx_delay_1 = mac_cfg.tx_delay_1000;
++ reg_1g.b.rx_delay_1 = mac_cfg.rx_delay_1000;
++ reg_100.b.tx_delay_1 = mac_cfg.tx_delay_100;
++ reg_100.b.rx_delay_1 = mac_cfg.rx_delay_100;
++ reg_10.b.tx_delay_1 = mac_cfg.tx_delay_10;
++ reg_10.b.rx_delay_1 = mac_cfg.rx_delay_10;
++ }
++ ret = of_property_read_u32_array(np, "mac1-clk-delay", (u32 *)&mac_cfg, 6);
++ if (!ret) {
++ reg_1g.b.tx_delay_2 = mac_cfg.tx_delay_1000;
++ reg_1g.b.rx_delay_2 = mac_cfg.rx_delay_1000;
++ reg_100.b.tx_delay_2 = mac_cfg.tx_delay_100;
++ reg_100.b.rx_delay_2 = mac_cfg.rx_delay_100;
++ reg_10.b.tx_delay_2 = mac_cfg.tx_delay_10;
++ reg_10.b.rx_delay_2 = mac_cfg.rx_delay_10;
++ }
++ regmap_write(map, ASPEED_MAC12_CLK_DLY, reg_1g.w);
++ regmap_write(map, ASPEED_MAC12_CLK_DLY_100M, reg_100.w);
++ regmap_write(map, ASPEED_MAC12_CLK_DLY_10M, reg_10.w);
++
++ regmap_read(map, ASPEED_MAC34_CLK_DLY, ®_1g.w);
++ regmap_read(map, ASPEED_MAC34_CLK_DLY_100M, ®_100.w);
++ regmap_read(map, ASPEED_MAC34_CLK_DLY_10M, ®_10.w);
++ ret = of_property_read_u32_array(np, "mac2-clk-delay", (u32 *)&mac_cfg, 6);
++ if (!ret) {
++ reg_1g.b.tx_delay_1 = mac_cfg.tx_delay_1000;
++ reg_1g.b.rx_delay_1 = mac_cfg.rx_delay_1000;
++ reg_100.b.tx_delay_1 = mac_cfg.tx_delay_100;
++ reg_100.b.rx_delay_1 = mac_cfg.rx_delay_100;
++ reg_10.b.tx_delay_1 = mac_cfg.tx_delay_10;
++ reg_10.b.rx_delay_1 = mac_cfg.rx_delay_10;
++ }
++ ret = of_property_read_u32_array(np, "mac3-clk-delay", (u32 *)&mac_cfg, 6);
++ if (!ret) {
++ reg_1g.b.tx_delay_2 = mac_cfg.tx_delay_1000;
++ reg_1g.b.rx_delay_2 = mac_cfg.rx_delay_1000;
++ reg_100.b.tx_delay_2 = mac_cfg.tx_delay_100;
++ reg_100.b.rx_delay_2 = mac_cfg.rx_delay_100;
++ reg_10.b.tx_delay_2 = mac_cfg.tx_delay_10;
++ reg_10.b.rx_delay_2 = mac_cfg.rx_delay_10;
++ }
++ regmap_write(map, ASPEED_MAC34_CLK_DLY, reg_1g.w);
++ regmap_write(map, ASPEED_MAC34_CLK_DLY_100M, reg_100.w);
++ regmap_write(map, ASPEED_MAC34_CLK_DLY_10M, reg_10.w);
++
++ /* A0/A1 need change to RSA clock = HPLL/3, A2/A3 have been set at Rom Code */
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, BIT(19), BIT(19));
++ regmap_update_bits(map, ASPEED_G6_CLK_SELECTION1, GENMASK(27, 26), (2 << 26));
++
+ aspeed_g6_cc(map);
+ ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, aspeed_g6_clk_data);
+ if (ret)
+diff --git a/drivers/clk/clk-ast2700.c b/drivers/clk/clk-ast2700.c
+new file mode 100644
+index 000000000..8223e7fd0
+--- /dev/null
++++ b/drivers/clk/clk-ast2700.c
+@@ -0,0 +1,1527 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (c) 2024 ASPEED Technology Inc.
++ * Author: Ryan Chen <ryan_chen@aspeedtech.com>
++ */
++
++#include <linux/clk-provider.h>
++#include <linux/io.h>
++#include <linux/mod_devicetable.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/units.h>
++#include <soc/aspeed/reset-aspeed.h>
++
++#include <dt-bindings/clock/aspeed,ast2700-scu.h>
++
++#define SCU_CLK_12MHZ (12 * HZ_PER_MHZ)
++#define SCU_CLK_24MHZ (24 * HZ_PER_MHZ)
++#define SCU_CLK_25MHZ (25 * HZ_PER_MHZ)
++#define SCU_CLK_192MHZ (192 * HZ_PER_MHZ)
++
++/* SOC0 */
++#define SCU0_HWSTRAP1 0x010
++#define SCU0_CLK_STOP 0x240
++#define SCU0_CLK_SEL1 0x280
++#define SCU0_CLK_SEL2 0x284
++#define GET_USB_REFCLK_DIV(x) ((GENMASK(23, 20) & (x)) >> 20)
++#define UART_DIV13_EN BIT(30)
++#define SCU0_HPLL_PARAM 0x300
++#define SCU0_DPLL_PARAM 0x308
++#define SCU0_MPLL_PARAM 0x310
++#define SCU0_D0CLK_PARAM 0x320
++#define SCU0_D1CLK_PARAM 0x330
++#define SCU0_CRT0CLK_PARAM 0x340
++#define SCU0_CRT1CLK_PARAM 0x350
++#define SCU0_MPHYCLK_PARAM 0x360
++
++/* SOC1 */
++#define SCU1_REVISION_ID 0x0
++#define REVISION_ID GENMASK(23, 16)
++#define SCU1_CLK_STOP 0x240
++#define SCU1_CLK_STOP2 0x260
++#define SCU1_CLK_SEL1 0x280
++#define SCU1_CLK_SEL2 0x284
++#define SCU1_CLK_I3C_DIV_MASK GENMASK(25, 23)
++#define SCU1_CLK_I3C_DIV(n) ((n) - 1)
++#define UXCLK_MASK GENMASK(1, 0)
++#define HUXCLK_MASK GENMASK(4, 3)
++#define SCU1_HPLL_PARAM 0x300
++#define SCU1_APLL_PARAM 0x310
++#define SCU1_DPLL_PARAM 0x320
++#define SCU1_UXCLK_CTRL 0x330
++#define SCU1_HUXCLK_CTRL 0x334
++#define SCU1_MAC12_CLK_DLY 0x390
++#define SCU1_MAC12_CLK_DLY_100M 0x394
++#define SCU1_MAC12_CLK_DLY_10M 0x398
++
++/*
++ * MAC Clock Delay settings
++ */
++#define MAC_CLK_RMII1_50M_RCLK_O_CTRL BIT(30)
++#define MAC_CLK_RMII1_50M_RCLK_O_DIS 0
++#define MAC_CLK_RMII1_50M_RCLK_O_EN 1
++#define MAC_CLK_RMII0_50M_RCLK_O_CTRL BIT(29)
++#define MAC_CLK_RMII0_5M_RCLK_O_DIS 0
++#define MAC_CLK_RMII0_5M_RCLK_O_EN 1
++#define MAC_CLK_RMII_TXD_FALLING_2 BIT(27)
++#define MAC_CLK_RMII_TXD_FALLING_1 BIT(26)
++#define MAC_CLK_RXCLK_INV_2 BIT(25)
++#define MAC_CLK_RXCLK_INV_1 BIT(24)
++#define MAC_CLK_1G_INPUT_DELAY_2 GENMASK(23, 18)
++#define MAC_CLK_1G_INPUT_DELAY_1 GENMASK(17, 12)
++#define MAC_CLK_1G_OUTPUT_DELAY_2 GENMASK(11, 6)
++#define MAC_CLK_1G_OUTPUT_DELAY_1 GENMASK(5, 0)
++
++#define MAC_CLK_100M_10M_RESERVED GENMASK(31, 26)
++#define MAC_CLK_100M_10M_RXCLK_INV_2 BIT(25)
++#define MAC_CLK_100M_10M_RXCLK_INV_1 BIT(24)
++#define MAC_CLK_100M_10M_INPUT_DELAY_2 GENMASK(23, 18)
++#define MAC_CLK_100M_10M_INPUT_DELAY_1 GENMASK(17, 12)
++#define MAC_CLK_100M_10M_OUTPUT_DELAY_2 GENMASK(11, 6)
++#define MAC_CLK_100M_10M_OUTPUT_DELAY_1 GENMASK(5, 0)
++
++#define AST2700_DEF_MAC12_DELAY_1G_A0 0x00CF4D75
++#define AST2700_DEF_MAC12_DELAY_1G_A1 0x00C70C73
++#define AST2700_DEF_MAC12_DELAY_100M 0x00410410
++#define AST2700_DEF_MAC12_DELAY_10M 0x00410410
++
++struct mac_delay_config {
++ u32 tx_delay_1000;
++ u32 rx_delay_1000;
++ u32 tx_delay_100;
++ u32 rx_delay_100;
++ u32 tx_delay_10;
++ u32 rx_delay_10;
++};
++
++enum ast2700_clk_type {
++ CLK_MUX,
++ CLK_PLL,
++ CLK_HPLL,
++ CLK_GATE,
++ CLK_MISC,
++ CLK_FIXED,
++ DCLK_FIXED,
++ CLK_DIVIDER,
++ CLK_UART_PLL,
++ CLK_FIXED_FACTOR,
++ CLK_GATE_ASPEED,
++};
++
++struct ast2700_clk_fixed_factor_data {
++ const struct clk_parent_data *parent;
++ unsigned int mult;
++ unsigned int div;
++};
++
++struct ast2700_clk_gate_data {
++ const struct clk_parent_data *parent;
++ u32 flags;
++ u32 reg;
++ u8 bit;
++};
++
++struct ast2700_clk_mux_data {
++ const struct clk_parent_data *parents;
++ unsigned int num_parents;
++ u8 bit_shift;
++ u8 bit_width;
++ u32 reg;
++};
++
++struct ast2700_clk_div_data {
++ const struct clk_div_table *div_table;
++ const struct clk_parent_data *parent;
++ u8 bit_shift;
++ u8 bit_width;
++ u32 reg;
++};
++
++struct ast2700_clk_pll_data {
++ const struct clk_parent_data *parent;
++ u32 reg;
++};
++
++struct ast2700_clk_fixed_rate_data {
++ unsigned long fixed_rate;
++};
++
++struct ast2700_clk_info {
++ const char *name;
++ u8 clk_idx;
++ u32 reg;
++ u32 type;
++ union {
++ struct ast2700_clk_fixed_factor_data factor;
++ struct ast2700_clk_fixed_rate_data rate;
++ struct ast2700_clk_gate_data gate;
++ struct ast2700_clk_div_data div;
++ struct ast2700_clk_pll_data pll;
++ struct ast2700_clk_mux_data mux;
++ } data;
++};
++
++struct ast2700_clk_data {
++ struct ast2700_clk_info const *clk_info;
++ unsigned int nr_clks;
++ const int scu;
++};
++
++struct ast2700_clk_ctrl {
++ const struct ast2700_clk_data *clk_data;
++ struct device *dev;
++ void __iomem *base;
++ spinlock_t lock; /* clk lock */
++};
++
++static const struct clk_div_table ast2700_rgmii_div_table[] = {
++ { 0x0, 4 },
++ { 0x1, 4 },
++ { 0x2, 6 },
++ { 0x3, 8 },
++ { 0x4, 10 },
++ { 0x5, 12 },
++ { 0x6, 14 },
++ { 0x7, 16 },
++ { 0 }
++};
++
++static const struct clk_div_table ast2700_rmii_div_table[] = {
++ { 0x0, 8 },
++ { 0x1, 8 },
++ { 0x2, 12 },
++ { 0x3, 16 },
++ { 0x4, 20 },
++ { 0x5, 24 },
++ { 0x6, 28 },
++ { 0x7, 32 },
++ { 0 }
++};
++
++static const struct clk_div_table ast2700_clk_div_table[] = {
++ { 0x0, 2 },
++ { 0x1, 2 },
++ { 0x2, 3 },
++ { 0x3, 4 },
++ { 0x4, 5 },
++ { 0x5, 6 },
++ { 0x6, 7 },
++ { 0x7, 8 },
++ { 0 }
++};
++
++static const struct clk_div_table ast2700_clk_div_table2[] = {
++ { 0x0, 2 },
++ { 0x1, 4 },
++ { 0x2, 6 },
++ { 0x3, 8 },
++ { 0x4, 10 },
++ { 0x5, 12 },
++ { 0x6, 14 },
++ { 0x7, 16 },
++ { 0 }
++};
++
++static const struct clk_div_table ast2700_hclk_div_table[] = {
++ { 0x0, 6 },
++ { 0x1, 5 },
++ { 0x2, 4 },
++ { 0x3, 7 },
++ { 0 }
++};
++
++static const struct clk_div_table ast2700a0_hclk_div_table[] = {
++ { 0x0, 4 },
++ { 0x1, 4 },
++ { 0x2, 6 },
++ { 0x3, 8 },
++ { 0 }
++};
++
++static const struct clk_div_table ast2700_clk_uart_div_table[] = {
++ { 0x0, 1 },
++ { 0x1, 13 },
++ { 0 }
++};
++
++static const struct clk_parent_data soc0_clkin[] = {
++ { .fw_name = "soc0-clkin", .name = "soc0-clkin" },
++};
++
++static const struct clk_parent_data pspclk[] = {
++ { .fw_name = "pspclk", .name = "pspclk" },
++};
++
++static const struct clk_parent_data soc0_mpll_div8[] = {
++ { .fw_name = "soc0-mpll_div8", .name = "soc0-mpll_div8" },
++};
++
++static const struct clk_parent_data mphysrc[] = {
++ { .fw_name = "mphysrc", .name = "mphysrc" },
++};
++
++static const struct clk_parent_data u2phy_refclksrc[] = {
++ { .fw_name = "u2phy_refclksrc", .name = "u2phy_refclksrc" },
++};
++
++static const struct clk_parent_data soc0_hpll[] = {
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++};
++
++static const struct clk_parent_data soc0_mpll[] = {
++ { .fw_name = "soc0-mpll", .name = "soc0-mpll" },
++};
++
++static const struct clk_parent_data axi0clk[] = {
++ { .fw_name = "axi0clk", .name = "axi0clk" },
++};
++
++static const struct clk_parent_data soc0_ahbmux[] = {
++ { .fw_name = "soc0-ahbmux", .name = "soc0-ahbmux" },
++};
++
++static const struct clk_parent_data soc0_ahb[] = {
++ { .fw_name = "soc0-ahb", .name = "soc0-ahb" },
++};
++
++static const struct clk_parent_data soc0_uartclk[] = {
++ { .fw_name = "soc0-uartclk", .name = "soc0-uartclk" },
++};
++
++static const struct clk_parent_data emmcclk[] = {
++ { .fw_name = "emmcclk", .name = "emmcclk" },
++};
++
++static const struct clk_parent_data emmcsrc_mux[] = {
++ { .fw_name = "emmcsrc-mux", .name = "emmcsrc-mux" },
++};
++
++static const struct clk_parent_data soc1_clkin[] = {
++ { .fw_name = "soc1-clkin", .name = "soc1-clkin" },
++};
++
++static const struct clk_parent_data soc1_hpll[] = {
++ { .fw_name = "soc1-hpll", .name = "soc1-hpll" },
++};
++
++static const struct clk_parent_data soc1_apll[] = {
++ { .fw_name = "soc1-apll", .name = "soc1-apll" },
++};
++
++static const struct clk_parent_data sdclk[] = {
++ { .fw_name = "sdclk", .name = "sdclk" },
++};
++
++static const struct clk_parent_data sdclk_mux[] = {
++ { .fw_name = "sdclk-mux", .name = "sdclk-mux" },
++};
++
++static const struct clk_parent_data huartxclk[] = {
++ { .fw_name = "huartxclk", .name = "huartxclk" },
++};
++
++static const struct clk_parent_data uxclk[] = {
++ { .fw_name = "uxclk", .name = "uxclk" },
++};
++
++static const struct clk_parent_data huxclk[] = {
++ { .fw_name = "huxclk", .name = "huxclk" },
++};
++
++static const struct clk_parent_data uart0clk[] = {
++ { .fw_name = "uart0clk", .name = "uart0clk" },
++};
++
++static const struct clk_parent_data uart1clk[] = {
++ { .fw_name = "uart1clk", .name = "uart1clk" },
++};
++
++static const struct clk_parent_data uart2clk[] = {
++ { .fw_name = "uart2clk", .name = "uart2clk" },
++};
++
++static const struct clk_parent_data uart3clk[] = {
++ { .fw_name = "uart3clk", .name = "uart3clk" },
++};
++
++static const struct clk_parent_data uart5clk[] = {
++ { .fw_name = "uart5clk", .name = "uart5clk" },
++};
++
++static const struct clk_parent_data uart4clk[] = {
++ { .fw_name = "uart4clk", .name = "uart4clk" },
++};
++
++static const struct clk_parent_data uart6clk[] = {
++ { .fw_name = "uart6clk", .name = "uart6clk" },
++};
++
++static const struct clk_parent_data uart7clk[] = {
++ { .fw_name = "uart7clk", .name = "uart7clk" },
++};
++
++static const struct clk_parent_data uart8clk[] = {
++ { .fw_name = "uart8clk", .name = "uart8clk" },
++};
++
++static const struct clk_parent_data uart9clk[] = {
++ { .fw_name = "uart9clk", .name = "uart9clk" },
++};
++
++static const struct clk_parent_data uart10clk[] = {
++ { .fw_name = "uart10clk", .name = "uart10clk" },
++};
++
++static const struct clk_parent_data uart11clk[] = {
++ { .fw_name = "uart11clk", .name = "uart11clk" },
++};
++
++static const struct clk_parent_data uart12clk[] = {
++ { .fw_name = "uart12clk", .name = "uart12clk" },
++};
++
++static const struct clk_parent_data uart13clk[] = {
++ { .fw_name = "uart13clk", .name = "uart13clk" },
++};
++
++static const struct clk_parent_data uart14clk[] = {
++ { .fw_name = "uart14clk", .name = "uart14clk" },
++};
++
++static const struct clk_parent_data uart15clk[] = {
++ { .fw_name = "uart15clk", .name = "uart15clk" },
++};
++
++static const struct clk_parent_data uart16clk[] = {
++ { .fw_name = "uart16clk", .name = "uart16clk" },
++};
++
++static const struct clk_parent_data soc1_ahb[] = {
++ { .fw_name = "soc1-ahb", .name = "soc1-ahb" },
++};
++
++static const struct clk_parent_data soc1_i3c[] = {
++ { .fw_name = "soc1-i3c", .name = "soc1-i3c" },
++};
++
++static const struct clk_parent_data canclk[] = {
++ { .fw_name = "canclk", .name = "canclk" },
++};
++
++static const struct clk_parent_data rmii[] = {
++ { .fw_name = "rmii", .name = "rmii" },
++};
++
++static const struct clk_parent_data d_clk_sels[] = {
++ { .fw_name = "soc0-hpll_div2", .name = "soc0-hpll_div2" },
++ { .fw_name = "soc0-mpll_div2", .name = "soc0-mpll_div2" },
++};
++
++static const struct clk_parent_data hclk_clk_sels[] = {
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++ { .fw_name = "soc0-mpll", .name = "soc0-mpll" },
++};
++
++static const struct clk_parent_data mhpll_clk_sels[] = {
++ { .fw_name = "soc0-mpll", .name = "soc0-mpll" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++};
++
++static const struct clk_parent_data mphy_clk_sels[] = {
++ { .fw_name = "soc0-mpll", .name = "soc0-mpll" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++ { .fw_name = "soc0-dpll", .name = "soc0-dpll" },
++ { .fw_name = "soc0-clk192Mhz", .name = "soc0-clk192Mhz" },
++};
++
++static const struct clk_parent_data psp_clk_sels[] = {
++ { .fw_name = "soc0-mpll", .name = "soc0-mpll" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++ { .fw_name = "soc0-mpll_div2", .name = "soc0-mpll_div2" },
++ { .fw_name = "soc0-hpll_div2", .name = "soc0-hpll_div2" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++ { .fw_name = "soc0-hpll", .name = "soc0-hpll" },
++};
++
++static const struct clk_parent_data uart_clk_sels[] = {
++ { .fw_name = "soc0-clk24Mhz", .name = "soc0-clk24Mhz" },
++ { .fw_name = "soc0-clk192Mhz", .name = "soc0-clk192Mhz" },
++};
++
++static const struct clk_parent_data emmc_clk_sels[] = {
++ { .fw_name = "soc0-mpll_div4", .name = "soc0-mpll_div4" },
++ { .fw_name = "soc0-hpll_div4", .name = "soc0-hpll_div4" },
++};
++
++static const struct clk_parent_data sdio_clk_sels[] = {
++ { .fw_name = "soc1-hpll", .name = "soc1-hpll" },
++ { .fw_name = "soc1-apll", .name = "soc1-apll" },
++};
++
++static const struct clk_parent_data ux_clk_sels[] = {
++ { .fw_name = "soc1-apll_div4", .name = "soc1-apll_div4" },
++ { .fw_name = "soc1-apll_div2", .name = "soc1-apll_div2" },
++ { .fw_name = "soc1-apll", .name = "soc1-apll" },
++ { .fw_name = "soc1-hpll", .name = "soc1-hpll" },
++};
++
++static const struct clk_parent_data uartx_clk_sels[] = {
++ { .fw_name = "uartxclk", .name = "uartxclk" },
++ { .fw_name = "huartxclk", .name = "huartxclk" },
++};
++
++#define FIXED_CLK(_id, _name, _rate) \
++ [_id] = { \
++ .type = CLK_FIXED, \
++ .name = _name, \
++ .data = { .rate = { .fixed_rate = _rate, } }, \
++ }
++
++#define PLL_CLK(_id, _type, _name, _parent, _reg) \
++ [_id] = { \
++ .type = _type, \
++ .name = _name, \
++ .data = { .pll = { .parent = _parent, .reg = _reg, } }, \
++ }
++
++#define MUX_CLK(_id, _name, _parents, _num_parents, _reg, _shift, _width) \
++ [_id] = { \
++ .type = CLK_MUX, \
++ .name = _name, \
++ .data = { \
++ .mux = { \
++ .parents = _parents, \
++ .num_parents = _num_parents, \
++ .reg = _reg, \
++ .bit_shift = _shift, \
++ .bit_width = _width, \
++ }, \
++ }, \
++ }
++
++#define DIVIDER_CLK(_id, _name, _parent, _reg, _shift, _width, _div_table) \
++ [_id] = { \
++ .type = CLK_DIVIDER, \
++ .name = _name, \
++ .data = { \
++ .div = { \
++ .parent = _parent, \
++ .reg = _reg, \
++ .bit_shift = _shift, \
++ .bit_width = _width, \
++ .div_table = _div_table, \
++ }, \
++ }, \
++ }
++
++#define FIXED_FACTOR_CLK(_id, _name, _parent, _mult, _div) \
++ [_id] = { \
++ .type = CLK_FIXED_FACTOR, \
++ .name = _name, \
++ .data = { .factor = { .parent = _parent, .mult = _mult, .div = _div, } }, \
++ }
++
++#define GATE_CLK(_id, _type, _name, _parent, _reg, _bit, _flags) \
++ [_id] = { \
++ .type = _type, \
++ .name = _name, \
++ .data = { \
++ .gate = { \
++ .parent = _parent, \
++ .reg = _reg, \
++ .bit = _bit, \
++ .flags = _flags, \
++ }, \
++ }, \
++ }
++
++static const struct ast2700_clk_info ast2700_scu0_clk_info[] __initconst = {
++ FIXED_CLK(SCU0_CLKIN, "soc0-clkin", SCU_CLK_25MHZ),
++ FIXED_CLK(SCU0_CLK_24M, "soc0-clk24Mhz", SCU_CLK_24MHZ),
++ FIXED_CLK(SCU0_CLK_192M, "soc0-clk192Mhz", SCU_CLK_192MHZ),
++ FIXED_CLK(SCU0_CLK_U2PHY_CLK12M, "u2phy_clk12m", SCU_CLK_12MHZ),
++ PLL_CLK(SCU0_CLK_HPLL, CLK_HPLL, "soc0-hpll", soc0_clkin, SCU0_HPLL_PARAM),
++ PLL_CLK(SCU0_CLK_DPLL, CLK_PLL, "soc0-dpll", soc0_clkin, SCU0_DPLL_PARAM),
++ PLL_CLK(SCU0_CLK_MPLL, CLK_PLL, "soc0-mpll", soc0_clkin, SCU0_MPLL_PARAM),
++ PLL_CLK(SCU0_CLK_D0, DCLK_FIXED, "d0clk", NULL, SCU0_D0CLK_PARAM),
++ PLL_CLK(SCU0_CLK_D1, DCLK_FIXED, "d1clk", NULL, SCU0_D1CLK_PARAM),
++ PLL_CLK(SCU0_CLK_CRT0, DCLK_FIXED, "crt0clk", NULL, SCU0_CRT0CLK_PARAM),
++ PLL_CLK(SCU0_CLK_CRT1, DCLK_FIXED, "crt1clk", NULL, SCU0_CRT1CLK_PARAM),
++ PLL_CLK(SCU0_CLK_MPHY, CLK_MISC, "mphyclk", mphysrc, SCU0_MPHYCLK_PARAM),
++ PLL_CLK(SCU0_CLK_U2PHY_REFCLK, CLK_MISC, "u2phy_refclk", u2phy_refclksrc, SCU0_CLK_SEL2),
++ FIXED_FACTOR_CLK(SCU0_CLK_HPLL_DIV2, "soc0-hpll_div2", soc0_hpll, 1, 2),
++ FIXED_FACTOR_CLK(SCU0_CLK_HPLL_DIV4, "soc0-hpll_div4", soc0_hpll, 1, 4),
++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV2, "soc0-mpll_div2", soc0_mpll, 1, 2),
++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV4, "soc0-mpll_div4", soc0_mpll, 1, 4),
++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV8, "soc0-mpll_div8", soc0_mpll, 1, 8),
++ FIXED_FACTOR_CLK(SCU0_CLK_AXI0, "axi0clk", pspclk, 1, 2),
++ FIXED_FACTOR_CLK(SCU0_CLK_AXI1, "axi1clk", soc0_mpll, 1, 4),
++ DIVIDER_CLK(SCU0_CLK_AHB, "soc0-ahb", soc0_ahbmux,
++ SCU0_HWSTRAP1, 5, 2, ast2700_hclk_div_table),
++ DIVIDER_CLK(SCU0_CLK_EMMC, "emmcclk", emmcsrc_mux,
++ SCU0_CLK_SEL1, 12, 3, ast2700_clk_div_table2),
++ DIVIDER_CLK(SCU0_CLK_APB, "soc0-apb", axi0clk,
++ SCU0_CLK_SEL1, 23, 3, ast2700_clk_div_table2),
++ DIVIDER_CLK(SCU0_CLK_UART4, "uart4clk", soc0_uartclk,
++ SCU0_CLK_SEL2, 30, 1, ast2700_clk_uart_div_table),
++ MUX_CLK(SCU0_CLK_PSP, "pspclk", psp_clk_sels, ARRAY_SIZE(psp_clk_sels),
++ SCU0_HWSTRAP1, 2, 3),
++ MUX_CLK(SCU0_CLK_AHBMUX, "soc0-ahbmux", hclk_clk_sels, ARRAY_SIZE(hclk_clk_sels),
++ SCU0_HWSTRAP1, 7, 1),
++ MUX_CLK(SCU0_CLK_EMMCMUX, "emmcsrc-mux", emmc_clk_sels, ARRAY_SIZE(emmc_clk_sels),
++ SCU0_CLK_SEL1, 11, 1),
++ MUX_CLK(SCU0_CLK_MPHYSRC, "mphysrc", mphy_clk_sels, ARRAY_SIZE(mphy_clk_sels),
++ SCU0_CLK_SEL2, 18, 2),
++ MUX_CLK(SCU0_CLK_U2PHY_REFCLKSRC, "u2phy_refclksrc", mhpll_clk_sels,
++ ARRAY_SIZE(mhpll_clk_sels), SCU0_CLK_SEL2, 23, 1),
++ MUX_CLK(SCU0_CLK_UART, "soc0-uartclk", uart_clk_sels, ARRAY_SIZE(uart_clk_sels),
++ SCU0_CLK_SEL2, 14, 1),
++ GATE_CLK(SCU0_CLK_GATE_MCLK, CLK_GATE_ASPEED, "mclk-gate", soc0_mpll,
++ SCU0_CLK_STOP, 0, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_ECLK, CLK_GATE_ASPEED, "eclk-gate", NULL, SCU0_CLK_STOP, 1, 0),
++ GATE_CLK(SCU0_CLK_GATE_2DCLK, CLK_GATE_ASPEED, "gclk-gate", NULL, SCU0_CLK_STOP, 2, 0),
++ GATE_CLK(SCU0_CLK_GATE_VCLK, CLK_GATE_ASPEED, "vclk-gate", NULL, SCU0_CLK_STOP, 3, 0),
++ GATE_CLK(SCU0_CLK_GATE_BCLK, CLK_GATE_ASPEED, "bclk-gate", NULL,
++ SCU0_CLK_STOP, 4, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_VGA0CLK, CLK_GATE_ASPEED, "vga0clk-gate", NULL,
++ SCU0_CLK_STOP, 5, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc0-refclk-gate", soc0_clkin,
++ SCU0_CLK_STOP, 6, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_PORTBUSB2CLK, CLK_GATE_ASPEED, "portb-usb2clk-gate", NULL,
++ SCU0_CLK_STOP, 7, 0),
++ GATE_CLK(SCU0_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "uhciclk-gate", NULL, SCU0_CLK_STOP, 9, 0),
++ GATE_CLK(SCU0_CLK_GATE_VGA1CLK, CLK_GATE_ASPEED, "vga1clk-gate", NULL,
++ SCU0_CLK_STOP, 10, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_DDRPHYCLK, CLK_GATE_ASPEED, "ddrphy-gate", NULL,
++ SCU0_CLK_STOP, 11, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_E2M0CLK, CLK_GATE_ASPEED, "e2m0clk-gate", NULL,
++ SCU0_CLK_STOP, 12, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_HACCLK, CLK_GATE_ASPEED, "hacclk-gate", NULL, SCU0_CLK_STOP, 13, 0),
++ GATE_CLK(SCU0_CLK_GATE_PORTAUSB2CLK, CLK_GATE_ASPEED, "porta-usb2clk-gate", NULL,
++ SCU0_CLK_STOP, 14, 0),
++ GATE_CLK(SCU0_CLK_GATE_UART4CLK, CLK_GATE_ASPEED, "uart4clk-gate", uart4clk,
++ SCU0_CLK_STOP, 15, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc0-sliclk-gate", NULL,
++ SCU0_CLK_STOP, 16, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_DACCLK, CLK_GATE_ASPEED, "dacclk-gate", NULL,
++ SCU0_CLK_STOP, 17, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_DP, CLK_GATE_ASPEED, "dpclk-gate", NULL,
++ SCU0_CLK_STOP, 18, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_E2M1CLK, CLK_GATE_ASPEED, "e2m1clk-gate", NULL,
++ SCU0_CLK_STOP, 19, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_CRT0CLK, CLK_GATE_ASPEED, "crt0clk-gate", NULL,
++ SCU0_CLK_STOP, 20, 0),
++ GATE_CLK(SCU0_CLK_GATE_CRT1CLK, CLK_GATE_ASPEED, "crt1clk-gate", NULL,
++ SCU0_CLK_STOP, 21, 0),
++ GATE_CLK(SCU0_CLK_GATE_ECDSACLK, CLK_GATE_ASPEED, "eccclk-gate", NULL,
++ SCU0_CLK_STOP, 23, 0),
++ GATE_CLK(SCU0_CLK_GATE_RSACLK, CLK_GATE_ASPEED, "rsaclk-gate", NULL,
++ SCU0_CLK_STOP, 24, 0),
++ GATE_CLK(SCU0_CLK_GATE_RVAS0CLK, CLK_GATE_ASPEED, "rvas0clk-gate", NULL,
++ SCU0_CLK_STOP, 25, 0),
++ GATE_CLK(SCU0_CLK_GATE_UFSCLK, CLK_GATE_ASPEED, "ufsclk-gate", NULL,
++ SCU0_CLK_STOP, 26, 0),
++ GATE_CLK(SCU0_CLK_GATE_EMMCCLK, CLK_GATE_ASPEED, "emmcclk-gate", emmcclk,
++ SCU0_CLK_STOP, 27, 0),
++ GATE_CLK(SCU0_CLK_GATE_RVAS1CLK, CLK_GATE_ASPEED, "rvas1clk-gate", NULL,
++ SCU0_CLK_STOP, 28, 0),
++};
++
++static const struct ast2700_clk_info ast2700a0_scu0_clk_info[] __initconst = {
++ FIXED_CLK(SCU0_CLKIN, "soc0-clkin", SCU_CLK_25MHZ),
++ FIXED_CLK(SCU0_CLK_24M, "soc0-clk24Mhz", SCU_CLK_24MHZ),
++ FIXED_CLK(SCU0_CLK_192M, "soc0-clk192Mhz", SCU_CLK_192MHZ),
++ FIXED_CLK(SCU0_CLK_U2PHY_CLK12M, "u2phy_clk12m", SCU_CLK_12MHZ),
++ PLL_CLK(SCU0_CLK_HPLL, CLK_HPLL, "soc0-hpll", soc0_clkin, SCU0_HPLL_PARAM),
++ PLL_CLK(SCU0_CLK_DPLL, CLK_PLL, "soc0-dpll", soc0_clkin, SCU0_DPLL_PARAM),
++ PLL_CLK(SCU0_CLK_MPLL, CLK_PLL, "soc0-mpll", soc0_clkin, SCU0_MPLL_PARAM),
++ PLL_CLK(SCU0_CLK_D0, DCLK_FIXED, "d0clk", NULL, SCU0_D0CLK_PARAM),
++ PLL_CLK(SCU0_CLK_D1, DCLK_FIXED, "d1clk", NULL, SCU0_D1CLK_PARAM),
++ PLL_CLK(SCU0_CLK_CRT0, DCLK_FIXED, "crt0clk", NULL, SCU0_CRT0CLK_PARAM),
++ PLL_CLK(SCU0_CLK_CRT1, DCLK_FIXED, "crt1clk", NULL, SCU0_CRT1CLK_PARAM),
++ PLL_CLK(SCU0_CLK_MPHY, CLK_MISC, "mphyclk", soc0_hpll, SCU0_MPHYCLK_PARAM),
++ PLL_CLK(SCU0_CLK_U2PHY_REFCLK, CLK_MISC, "u2phy_refclk", soc0_mpll_div8, SCU0_CLK_SEL2),
++ FIXED_FACTOR_CLK(SCU0_CLK_HPLL_DIV2, "soc0-hpll_div2", soc0_hpll, 1, 2),
++ FIXED_FACTOR_CLK(SCU0_CLK_HPLL_DIV4, "soc0-hpll_div4", soc0_hpll, 1, 4),
++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV2, "soc0-mpll_div2", soc0_mpll, 1, 2),
++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV4, "soc0-mpll_div4", soc0_mpll, 1, 4),
++ FIXED_FACTOR_CLK(SCU0_CLK_MPLL_DIV8, "soc0-mpll_div8", soc0_mpll, 1, 8),
++ FIXED_FACTOR_CLK(SCU0_CLK_AXI0, "axi0clk", pspclk, 1, 2),
++ FIXED_FACTOR_CLK(SCU0_CLK_AXI1, "axi1clk", soc0_ahb, 1, 1),
++ DIVIDER_CLK(SCU0_CLK_AHB, "soc0-ahb", soc0_ahbmux,
++ SCU0_HWSTRAP1, 5, 2, ast2700a0_hclk_div_table),
++ DIVIDER_CLK(SCU0_CLK_APB, "soc0-apb", axi0clk,
++ SCU0_CLK_SEL1, 23, 3, ast2700_clk_div_table2),
++ DIVIDER_CLK(SCU0_CLK_UART4, "uart4clk", soc0_uartclk,
++ SCU0_CLK_SEL2, 30, 1, ast2700_clk_uart_div_table),
++ DIVIDER_CLK(SCU0_CLK_EMMC, "emmcclk", emmcsrc_mux,
++ SCU0_CLK_SEL1, 12, 3, ast2700_clk_div_table2),
++ MUX_CLK(SCU0_CLK_PSP, "pspclk", mhpll_clk_sels, ARRAY_SIZE(mhpll_clk_sels),
++ SCU0_HWSTRAP1, 4, 1),
++ MUX_CLK(SCU0_CLK_AHBMUX, "soc0-ahbmux", mhpll_clk_sels, ARRAY_SIZE(mhpll_clk_sels),
++ SCU0_HWSTRAP1, 7, 1),
++ MUX_CLK(SCU0_CLK_UART, "soc0-uartclk", uart_clk_sels, ARRAY_SIZE(uart_clk_sels),
++ SCU0_CLK_SEL2, 14, 1),
++ MUX_CLK(SCU0_CLK_EMMCMUX, "emmcsrc-mux", emmc_clk_sels, ARRAY_SIZE(emmc_clk_sels),
++ SCU0_CLK_SEL1, 11, 1),
++ GATE_CLK(SCU0_CLK_GATE_MCLK, CLK_GATE_ASPEED, "mclk-gate", soc0_mpll,
++ SCU0_CLK_STOP, 0, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_ECLK, CLK_GATE_ASPEED, "eclk-gate", NULL, SCU0_CLK_STOP, 1, 0),
++ GATE_CLK(SCU0_CLK_GATE_2DCLK, CLK_GATE_ASPEED, "gclk-gate", NULL, SCU0_CLK_STOP, 2, 0),
++ GATE_CLK(SCU0_CLK_GATE_VCLK, CLK_GATE_ASPEED, "vclk-gate", NULL, SCU0_CLK_STOP, 3, 0),
++ GATE_CLK(SCU0_CLK_GATE_BCLK, CLK_GATE_ASPEED, "bclk-gate", NULL,
++ SCU0_CLK_STOP, 4, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_VGA0CLK, CLK_GATE_ASPEED, "vga0clk-gate", NULL,
++ SCU0_CLK_STOP, 5, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc0-refclk-gate", soc0_clkin,
++ SCU0_CLK_STOP, 6, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_PORTBUSB2CLK, CLK_GATE_ASPEED, "portb-usb2clk-gate", NULL,
++ SCU0_CLK_STOP, 7, 0),
++ GATE_CLK(SCU0_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "uhciclk-gate", NULL, SCU0_CLK_STOP, 9, 0),
++ GATE_CLK(SCU0_CLK_GATE_VGA1CLK, CLK_GATE_ASPEED, "vga1clk-gate", NULL,
++ SCU0_CLK_STOP, 10, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_DDRPHYCLK, CLK_GATE_ASPEED, "ddrphy-gate", NULL,
++ SCU0_CLK_STOP, 11, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_E2M0CLK, CLK_GATE_ASPEED, "e2m0clk-gate", NULL,
++ SCU0_CLK_STOP, 12, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_HACCLK, CLK_GATE_ASPEED, "hacclk-gate", NULL, SCU0_CLK_STOP, 13, 0),
++ GATE_CLK(SCU0_CLK_GATE_PORTAUSB2CLK, CLK_GATE_ASPEED, "porta-usb2clk-gate", NULL,
++ SCU0_CLK_STOP, 14, 0),
++ GATE_CLK(SCU0_CLK_GATE_UART4CLK, CLK_GATE_ASPEED, "uart4clk-gate", uart4clk,
++ SCU0_CLK_STOP, 15, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc0-sliclk-gate", NULL,
++ SCU0_CLK_STOP, 16, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_DACCLK, CLK_GATE_ASPEED, "dacclk-gate", NULL,
++ SCU0_CLK_STOP, 17, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_DP, CLK_GATE_ASPEED, "dpclk-gate", NULL,
++ SCU0_CLK_STOP, 18, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_E2M1CLK, CLK_GATE_ASPEED, "e2m1clk-gate", NULL,
++ SCU0_CLK_STOP, 19, CLK_IS_CRITICAL),
++ GATE_CLK(SCU0_CLK_GATE_CRT0CLK, CLK_GATE_ASPEED, "crt0clk-gate", NULL,
++ SCU0_CLK_STOP, 20, 0),
++ GATE_CLK(SCU0_CLK_GATE_CRT1CLK, CLK_GATE_ASPEED, "crt1clk-gate", NULL,
++ SCU0_CLK_STOP, 21, 0),
++ GATE_CLK(SCU0_CLK_GATE_ECDSACLK, CLK_GATE_ASPEED, "eccclk-gate", NULL,
++ SCU0_CLK_STOP, 23, 0),
++ GATE_CLK(SCU0_CLK_GATE_RSACLK, CLK_GATE_ASPEED, "rsaclk-gate", NULL,
++ SCU0_CLK_STOP, 24, 0),
++ GATE_CLK(SCU0_CLK_GATE_RVAS0CLK, CLK_GATE_ASPEED, "rvas0clk-gate", NULL,
++ SCU0_CLK_STOP, 25, 0),
++ GATE_CLK(SCU0_CLK_GATE_UFSCLK, CLK_GATE_ASPEED, "ufsclk-gate", NULL,
++ SCU0_CLK_STOP, 26, 0),
++ GATE_CLK(SCU0_CLK_GATE_EMMCCLK, CLK_GATE_ASPEED, "emmcclk-gate", emmcclk,
++ SCU0_CLK_STOP, 27, 0),
++ GATE_CLK(SCU0_CLK_GATE_RVAS1CLK, CLK_GATE_ASPEED, "rvas1clk-gate", NULL,
++ SCU0_CLK_STOP, 28, 0),
++};
++
++static const struct ast2700_clk_info ast2700_scu1_clk_info[] __initconst = {
++ FIXED_CLK(SCU1_CLKIN, "soc1-clkin", SCU_CLK_25MHZ),
++ PLL_CLK(SCU1_CLK_HPLL, CLK_PLL, "soc1-hpll", soc1_clkin, SCU1_HPLL_PARAM),
++ PLL_CLK(SCU1_CLK_APLL, CLK_PLL, "soc1-apll", soc1_clkin, SCU1_APLL_PARAM),
++ PLL_CLK(SCU1_CLK_DPLL, CLK_PLL, "soc1-dpll", soc1_clkin, SCU1_DPLL_PARAM),
++ PLL_CLK(SCU1_CLK_UARTX, CLK_UART_PLL, "uartxclk", uxclk, SCU1_UXCLK_CTRL),
++ PLL_CLK(SCU1_CLK_HUARTX, CLK_UART_PLL, "huartxclk", huxclk, SCU1_HUXCLK_CTRL),
++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV2, "soc1-apll_div2", soc1_apll, 1, 2),
++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV4, "soc1-apll_div4", soc1_apll, 1, 4),
++ FIXED_FACTOR_CLK(SCU1_CLK_UART13, "uart13clk", huartxclk, 1, 1),
++ FIXED_FACTOR_CLK(SCU1_CLK_UART14, "uart14clk", huartxclk, 1, 1),
++ FIXED_FACTOR_CLK(SCU1_CLK_CAN, "canclk", soc1_apll, 1, 10),
++ DIVIDER_CLK(SCU1_CLK_SDCLK, "sdclk", sdclk_mux,
++ SCU1_CLK_SEL1, 14, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_APB, "soc1-apb", soc1_hpll,
++ SCU1_CLK_SEL1, 18, 3, ast2700_clk_div_table2),
++ DIVIDER_CLK(SCU1_CLK_RMII, "rmii", soc1_hpll,
++ SCU1_CLK_SEL1, 21, 3, ast2700_rmii_div_table),
++ DIVIDER_CLK(SCU1_CLK_RGMII, "rgmii", soc1_hpll,
++ SCU1_CLK_SEL1, 25, 3, ast2700_rgmii_div_table),
++ DIVIDER_CLK(SCU1_CLK_MACHCLK, "machclk", soc1_hpll,
++ SCU1_CLK_SEL1, 29, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_APLL_DIVN, "soc1-apll_divn", soc1_apll,
++ SCU1_CLK_SEL2, 8, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_AHB, "soc1-ahb", soc1_hpll,
++ SCU1_CLK_SEL2, 20, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_I3C, "soc1-i3c", soc1_hpll,
++ SCU1_CLK_SEL2, 23, 3, ast2700_clk_div_table),
++ MUX_CLK(SCU1_CLK_UART0, "uart0clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 0, 1),
++ MUX_CLK(SCU1_CLK_UART1, "uart1clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 1, 1),
++ MUX_CLK(SCU1_CLK_UART2, "uart2clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 2, 1),
++ MUX_CLK(SCU1_CLK_UART3, "uart3clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 3, 1),
++ MUX_CLK(SCU1_CLK_UART5, "uart5clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 5, 1),
++ MUX_CLK(SCU1_CLK_UART6, "uart6clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 6, 1),
++ MUX_CLK(SCU1_CLK_UART7, "uart7clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 7, 1),
++ MUX_CLK(SCU1_CLK_UART8, "uart8clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 8, 1),
++ MUX_CLK(SCU1_CLK_UART9, "uart9clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 9, 1),
++ MUX_CLK(SCU1_CLK_UART10, "uart10clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 10, 1),
++ MUX_CLK(SCU1_CLK_UART11, "uart11clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 11, 1),
++ MUX_CLK(SCU1_CLK_UART12, "uart12clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 12, 1),
++ MUX_CLK(SCU1_CLK_SDMUX, "sdclk-mux", sdio_clk_sels, ARRAY_SIZE(sdio_clk_sels),
++ SCU1_CLK_SEL1, 13, 1),
++ MUX_CLK(SCU1_CLK_UXCLK, "uxclk", ux_clk_sels, ARRAY_SIZE(ux_clk_sels),
++ SCU1_CLK_SEL2, 0, 2),
++ MUX_CLK(SCU1_CLK_HUXCLK, "huxclk", ux_clk_sels, ARRAY_SIZE(ux_clk_sels),
++ SCU1_CLK_SEL2, 3, 2),
++ GATE_CLK(SCU1_CLK_MAC0RCLK, CLK_GATE, "mac0rclk-gate", rmii, SCU1_MAC12_CLK_DLY, 29, 0),
++ GATE_CLK(SCU1_CLK_MAC1RCLK, CLK_GATE, "mac1rclk-gate", rmii, SCU1_MAC12_CLK_DLY, 30, 0),
++ GATE_CLK(SCU1_CLK_GATE_LCLK0, CLK_GATE_ASPEED, "lclk0-gate", NULL,
++ SCU1_CLK_STOP, 0, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_LCLK1, CLK_GATE_ASPEED, "lclk1-gate", NULL,
++ SCU1_CLK_STOP, 1, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_ESPI0CLK, CLK_GATE_ASPEED, "espi0clk-gate", NULL,
++ SCU1_CLK_STOP, 2, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_ESPI1CLK, CLK_GATE_ASPEED, "espi1clk-gate", NULL,
++ SCU1_CLK_STOP, 3, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_SDCLK, CLK_GATE_ASPEED, "sdclk-gate", sdclk,
++ SCU1_CLK_STOP, 4, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_IPEREFCLK, CLK_GATE_ASPEED, "soc1-iperefclk-gate", NULL,
++ SCU1_CLK_STOP, 5, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc1-refclk-gate", NULL,
++ SCU1_CLK_STOP, 6, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_LPCHCLK, CLK_GATE_ASPEED, "lpchclk-gate", NULL,
++ SCU1_CLK_STOP, 7, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_MAC0CLK, CLK_GATE_ASPEED, "mac0clk-gate", NULL,
++ SCU1_CLK_STOP, 8, 0),
++ GATE_CLK(SCU1_CLK_GATE_MAC1CLK, CLK_GATE_ASPEED, "mac1clk-gate", NULL,
++ SCU1_CLK_STOP, 9, 0),
++ GATE_CLK(SCU1_CLK_GATE_MAC2CLK, CLK_GATE_ASPEED, "mac2clk-gate", NULL,
++ SCU1_CLK_STOP, 10, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART0CLK, CLK_GATE_ASPEED, "uart0clk-gate", uart0clk,
++ SCU1_CLK_STOP, 11, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART1CLK, CLK_GATE_ASPEED, "uart1clk-gate", uart1clk,
++ SCU1_CLK_STOP, 12, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART2CLK, CLK_GATE_ASPEED, "uart2clk-gate", uart2clk,
++ SCU1_CLK_STOP, 13, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART3CLK, CLK_GATE_ASPEED, "uart3clk-gate", uart3clk,
++ SCU1_CLK_STOP, 14, 0),
++ GATE_CLK(SCU1_CLK_GATE_I2CCLK, CLK_GATE_ASPEED, "i2cclk-gate", NULL, SCU1_CLK_STOP, 15, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C0CLK, CLK_GATE_ASPEED, "i3c0clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 16, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C1CLK, CLK_GATE_ASPEED, "i3c1clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 17, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C2CLK, CLK_GATE_ASPEED, "i3c2clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 18, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C3CLK, CLK_GATE_ASPEED, "i3c3clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 19, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C4CLK, CLK_GATE_ASPEED, "i3c4clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 20, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C5CLK, CLK_GATE_ASPEED, "i3c5clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 21, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C6CLK, CLK_GATE_ASPEED, "i3c6clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 22, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C7CLK, CLK_GATE_ASPEED, "i3c7clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 23, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C8CLK, CLK_GATE_ASPEED, "i3c8clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 24, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C9CLK, CLK_GATE_ASPEED, "i3c9clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 25, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C10CLK, CLK_GATE_ASPEED, "i3c10clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 26, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C11CLK, CLK_GATE_ASPEED, "i3c11clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 27, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C12CLK, CLK_GATE_ASPEED, "i3c12clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 28, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C13CLK, CLK_GATE_ASPEED, "i3c13clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 29, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C14CLK, CLK_GATE_ASPEED, "i3c14clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 30, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C15CLK, CLK_GATE_ASPEED, "i3c15clk-gate", soc1_i3c,
++ SCU1_CLK_STOP, 31, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART5CLK, CLK_GATE_ASPEED, "uart5clk-gate", uart5clk,
++ SCU1_CLK_STOP2, 0, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART6CLK, CLK_GATE_ASPEED, "uart6clk-gate", uart6clk,
++ SCU1_CLK_STOP2, 1, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART7CLK, CLK_GATE_ASPEED, "uart7clk-gate", uart7clk,
++ SCU1_CLK_STOP2, 2, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART8CLK, CLK_GATE_ASPEED, "uart8clk-gate", uart8clk,
++ SCU1_CLK_STOP2, 3, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART9CLK, CLK_GATE_ASPEED, "uart9clk-gate", uart9clk,
++ SCU1_CLK_STOP2, 4, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART10CLK, CLK_GATE_ASPEED, "uart10clk-gate", uart10clk,
++ SCU1_CLK_STOP2, 5, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART11CLK, CLK_GATE_ASPEED, "uart11clk-gate", uart11clk,
++ SCU1_CLK_STOP2, 6, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART12CLK, CLK_GATE_ASPEED, "uart12clk-gate", uart12clk,
++ SCU1_CLK_STOP2, 7, 0),
++ GATE_CLK(SCU1_CLK_GATE_FSICLK, CLK_GATE_ASPEED, "fsiclk-gate", NULL, SCU1_CLK_STOP2, 8, 0),
++ GATE_CLK(SCU1_CLK_GATE_LTPIPHYCLK, CLK_GATE_ASPEED, "ltpiphyclk-gate", NULL,
++ SCU1_CLK_STOP2, 9, 0),
++ GATE_CLK(SCU1_CLK_GATE_LTPICLK, CLK_GATE_ASPEED, "ltpiclk-gate", NULL,
++ SCU1_CLK_STOP2, 10, 0),
++ GATE_CLK(SCU1_CLK_GATE_VGALCLK, CLK_GATE_ASPEED, "vgalclk-gate", NULL,
++ SCU1_CLK_STOP2, 11, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "usbuartclk-gate", NULL,
++ SCU1_CLK_STOP2, 12, 0),
++ GATE_CLK(SCU1_CLK_GATE_CANCLK, CLK_GATE_ASPEED, "canclk-gate", canclk,
++ SCU1_CLK_STOP2, 13, 0),
++ GATE_CLK(SCU1_CLK_GATE_PCICLK, CLK_GATE_ASPEED, "pciclk-gate", NULL,
++ SCU1_CLK_STOP2, 14, 0),
++ GATE_CLK(SCU1_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc1-sliclk-gate", NULL,
++ SCU1_CLK_STOP2, 15, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_E2MCLK, CLK_GATE_ASPEED, "soc1-e2m-gate", NULL,
++ SCU1_CLK_STOP2, 16, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_PORTCUSB2CLK, CLK_GATE_ASPEED, "portcusb2-gate", NULL,
++ SCU1_CLK_STOP2, 17, 0),
++ GATE_CLK(SCU1_CLK_GATE_PORTDUSB2CLK, CLK_GATE_ASPEED, "portdusb2-gate", NULL,
++ SCU1_CLK_STOP2, 18, 0),
++ GATE_CLK(SCU1_CLK_GATE_LTPI1TXCLK, CLK_GATE_ASPEED, "ltp1tx-gate", NULL,
++ SCU1_CLK_STOP2, 19, 0),
++};
++
++static const struct ast2700_clk_info ast2700a0_scu1_clk_info[] __initconst = {
++ FIXED_CLK(SCU1_CLKIN, "soc1-clkin", SCU_CLK_25MHZ),
++ PLL_CLK(SCU1_CLK_HPLL, CLK_PLL, "soc1-hpll", soc1_clkin, SCU1_HPLL_PARAM),
++ PLL_CLK(SCU1_CLK_APLL, CLK_PLL, "soc1-apll", soc1_clkin, SCU1_APLL_PARAM),
++ PLL_CLK(SCU1_CLK_DPLL, CLK_PLL, "soc1-dpll", soc1_clkin, SCU1_DPLL_PARAM),
++ PLL_CLK(SCU1_CLK_UARTX, CLK_UART_PLL, "uartxclk", uxclk, SCU1_UXCLK_CTRL),
++ PLL_CLK(SCU1_CLK_HUARTX, CLK_UART_PLL, "huartxclk", huxclk, SCU1_HUXCLK_CTRL),
++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV2, "soc1-apll_div2", soc1_apll, 1, 2),
++ FIXED_FACTOR_CLK(SCU1_CLK_APLL_DIV4, "soc1-apll_div4", soc1_apll, 1, 4),
++ FIXED_FACTOR_CLK(SCU1_CLK_UART13, "uart13clk", huartxclk, 1, 1),
++ FIXED_FACTOR_CLK(SCU1_CLK_UART14, "uart14clk", huartxclk, 1, 1),
++ FIXED_FACTOR_CLK(SCU1_CLK_CAN, "canclk", soc1_apll, 1, 10),
++ DIVIDER_CLK(SCU1_CLK_SDCLK, "sdclk", sdclk_mux,
++ SCU1_CLK_SEL1, 14, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_APB, "soc1-apb", soc1_hpll,
++ SCU1_CLK_SEL1, 18, 3, ast2700_clk_div_table2),
++ DIVIDER_CLK(SCU1_CLK_RMII, "rmii", soc1_hpll,
++ SCU1_CLK_SEL1, 21, 3, ast2700_rmii_div_table),
++ DIVIDER_CLK(SCU1_CLK_RGMII, "rgmii", soc1_hpll,
++ SCU1_CLK_SEL1, 25, 3, ast2700_rgmii_div_table),
++ DIVIDER_CLK(SCU1_CLK_MACHCLK, "machclk", soc1_hpll,
++ SCU1_CLK_SEL1, 29, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_APLL_DIVN, "soc1-apll_divn", soc1_apll,
++ SCU1_CLK_SEL2, 8, 3, ast2700_clk_div_table),
++ DIVIDER_CLK(SCU1_CLK_AHB, "soc1-ahb", soc1_hpll,
++ SCU1_CLK_SEL2, 20, 3, ast2700_clk_div_table),
++ MUX_CLK(SCU1_CLK_UART0, "uart0clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 0, 1),
++ MUX_CLK(SCU1_CLK_UART1, "uart1clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 1, 1),
++ MUX_CLK(SCU1_CLK_UART2, "uart2clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 2, 1),
++ MUX_CLK(SCU1_CLK_UART3, "uart3clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 3, 1),
++ MUX_CLK(SCU1_CLK_UART5, "uart5clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 5, 1),
++ MUX_CLK(SCU1_CLK_UART6, "uart6clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 6, 1),
++ MUX_CLK(SCU1_CLK_UART7, "uart7clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 7, 1),
++ MUX_CLK(SCU1_CLK_UART8, "uart8clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 8, 1),
++ MUX_CLK(SCU1_CLK_UART9, "uart9clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 9, 1),
++ MUX_CLK(SCU1_CLK_UART10, "uart10clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 10, 1),
++ MUX_CLK(SCU1_CLK_UART11, "uart11clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 11, 1),
++ MUX_CLK(SCU1_CLK_UART12, "uart12clk", uartx_clk_sels, ARRAY_SIZE(uartx_clk_sels),
++ SCU1_CLK_SEL1, 12, 1),
++ MUX_CLK(SCU1_CLK_SDMUX, "sdclk-mux", sdio_clk_sels, ARRAY_SIZE(sdio_clk_sels),
++ SCU1_CLK_SEL1, 13, 1),
++ MUX_CLK(SCU1_CLK_UXCLK, "uxclk", ux_clk_sels, ARRAY_SIZE(ux_clk_sels),
++ SCU1_CLK_SEL2, 0, 2),
++ MUX_CLK(SCU1_CLK_HUXCLK, "huxclk", ux_clk_sels, ARRAY_SIZE(ux_clk_sels),
++ SCU1_CLK_SEL2, 3, 2),
++ GATE_CLK(SCU1_CLK_MAC0RCLK, CLK_GATE, "mac0rclk-gate", rmii, SCU1_MAC12_CLK_DLY, 29, 0),
++ GATE_CLK(SCU1_CLK_MAC1RCLK, CLK_GATE, "mac1rclk-gate", rmii, SCU1_MAC12_CLK_DLY, 30, 0),
++ GATE_CLK(SCU1_CLK_GATE_LCLK0, CLK_GATE_ASPEED, "lclk0-gate", NULL,
++ SCU1_CLK_STOP, 0, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_LCLK1, CLK_GATE_ASPEED, "lclk1-gate", NULL,
++ SCU1_CLK_STOP, 1, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_ESPI0CLK, CLK_GATE_ASPEED, "espi0clk-gate", NULL,
++ SCU1_CLK_STOP, 2, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_ESPI1CLK, CLK_GATE_ASPEED, "espi1clk-gate", NULL,
++ SCU1_CLK_STOP, 3, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_SDCLK, CLK_GATE_ASPEED, "sdclk-gate", sdclk,
++ SCU1_CLK_STOP, 4, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_IPEREFCLK, CLK_GATE_ASPEED, "soc1-iperefclk-gate", NULL,
++ SCU1_CLK_STOP, 5, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_REFCLK, CLK_GATE_ASPEED, "soc1-refclk-gate", NULL,
++ SCU1_CLK_STOP, 6, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_LPCHCLK, CLK_GATE_ASPEED, "lpchclk-gate", NULL,
++ SCU1_CLK_STOP, 7, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_MAC0CLK, CLK_GATE_ASPEED, "mac0clk-gate", NULL,
++ SCU1_CLK_STOP, 8, 0),
++ GATE_CLK(SCU1_CLK_GATE_MAC1CLK, CLK_GATE_ASPEED, "mac1clk-gate", NULL,
++ SCU1_CLK_STOP, 9, 0),
++ GATE_CLK(SCU1_CLK_GATE_MAC2CLK, CLK_GATE_ASPEED, "mac2clk-gate", NULL,
++ SCU1_CLK_STOP, 10, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART0CLK, CLK_GATE_ASPEED, "uart0clk-gate", uart0clk,
++ SCU1_CLK_STOP, 11, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART1CLK, CLK_GATE_ASPEED, "uart1clk-gate", uart1clk,
++ SCU1_CLK_STOP, 12, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART2CLK, CLK_GATE_ASPEED, "uart2clk-gate", uart2clk,
++ SCU1_CLK_STOP, 13, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART3CLK, CLK_GATE_ASPEED, "uart3clk-gate", uart3clk,
++ SCU1_CLK_STOP, 14, 0),
++ GATE_CLK(SCU1_CLK_GATE_I2CCLK, CLK_GATE_ASPEED, "i2cclk-gate", NULL, SCU1_CLK_STOP, 15, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C0CLK, CLK_GATE_ASPEED, "i3c0clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 16, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C1CLK, CLK_GATE_ASPEED, "i3c1clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 17, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C2CLK, CLK_GATE_ASPEED, "i3c2clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 18, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C3CLK, CLK_GATE_ASPEED, "i3c3clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 19, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C4CLK, CLK_GATE_ASPEED, "i3c4clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 20, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C5CLK, CLK_GATE_ASPEED, "i3c5clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 21, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C6CLK, CLK_GATE_ASPEED, "i3c6clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 22, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C7CLK, CLK_GATE_ASPEED, "i3c7clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 23, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C8CLK, CLK_GATE_ASPEED, "i3c8clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 24, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C9CLK, CLK_GATE_ASPEED, "i3c9clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 25, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C10CLK, CLK_GATE_ASPEED, "i3c10clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 26, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C11CLK, CLK_GATE_ASPEED, "i3c11clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 27, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C12CLK, CLK_GATE_ASPEED, "i3c12clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 28, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C13CLK, CLK_GATE_ASPEED, "i3c13clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 29, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C14CLK, CLK_GATE_ASPEED, "i3c14clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 30, 0),
++ GATE_CLK(SCU1_CLK_GATE_I3C15CLK, CLK_GATE_ASPEED, "i3c15clk-gate", soc1_ahb,
++ SCU1_CLK_STOP, 31, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART5CLK, CLK_GATE_ASPEED, "uart5clk-gate", uart5clk,
++ SCU1_CLK_STOP2, 0, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART6CLK, CLK_GATE_ASPEED, "uart6clk-gate", uart6clk,
++ SCU1_CLK_STOP2, 1, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART7CLK, CLK_GATE_ASPEED, "uart7clk-gate", uart7clk,
++ SCU1_CLK_STOP2, 2, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART8CLK, CLK_GATE_ASPEED, "uart8clk-gate", uart8clk,
++ SCU1_CLK_STOP2, 3, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UART9CLK, CLK_GATE_ASPEED, "uart9clk-gate", uart9clk,
++ SCU1_CLK_STOP2, 4, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART10CLK, CLK_GATE_ASPEED, "uart10clk-gate", uart10clk,
++ SCU1_CLK_STOP2, 5, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART11CLK, CLK_GATE_ASPEED, "uart11clk-gate", uart11clk,
++ SCU1_CLK_STOP2, 6, 0),
++ GATE_CLK(SCU1_CLK_GATE_UART12CLK, CLK_GATE_ASPEED, "uart12clk-gate", uart12clk,
++ SCU1_CLK_STOP2, 7, 0),
++ GATE_CLK(SCU1_CLK_GATE_FSICLK, CLK_GATE_ASPEED, "fsiclk-gate", NULL, SCU1_CLK_STOP2, 8, 0),
++ GATE_CLK(SCU1_CLK_GATE_LTPIPHYCLK, CLK_GATE_ASPEED, "ltpiphyclk-gate", NULL,
++ SCU1_CLK_STOP2, 9, 0),
++ GATE_CLK(SCU1_CLK_GATE_LTPICLK, CLK_GATE_ASPEED, "ltpiclk-gate", NULL,
++ SCU1_CLK_STOP2, 10, 0),
++ GATE_CLK(SCU1_CLK_GATE_VGALCLK, CLK_GATE_ASPEED, "vgalclk-gate", NULL,
++ SCU1_CLK_STOP2, 11, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_UHCICLK, CLK_GATE_ASPEED, "usbuartclk-gate", NULL,
++ SCU1_CLK_STOP2, 12, 0),
++ GATE_CLK(SCU1_CLK_GATE_CANCLK, CLK_GATE_ASPEED, "canclk-gate", canclk,
++ SCU1_CLK_STOP2, 13, 0),
++ GATE_CLK(SCU1_CLK_GATE_PCICLK, CLK_GATE_ASPEED, "pciclk-gate", canclk,
++ SCU1_CLK_STOP2, 14, 0),
++ GATE_CLK(SCU1_CLK_GATE_SLICLK, CLK_GATE_ASPEED, "soc1-sliclk-gate", canclk,
++ SCU1_CLK_STOP2, 15, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_E2MCLK, CLK_GATE_ASPEED, "soc1-e2m-gate", NULL,
++ SCU1_CLK_STOP2, 16, CLK_IS_CRITICAL),
++ GATE_CLK(SCU1_CLK_GATE_PORTCUSB2CLK, CLK_GATE_ASPEED, "portcusb2-gate", NULL,
++ SCU1_CLK_STOP2, 17, 0),
++ GATE_CLK(SCU1_CLK_GATE_PORTDUSB2CLK, CLK_GATE_ASPEED, "portdusb2-gate", NULL,
++ SCU1_CLK_STOP2, 18, 0),
++ GATE_CLK(SCU1_CLK_GATE_LTPI1TXCLK, CLK_GATE_ASPEED, "ltp1tx-gate", NULL,
++ SCU1_CLK_STOP2, 19, 0),
++};
++
++static struct clk_hw *ast2700_clk_hw_register_hpll(void __iomem *reg,
++ const char *name, const char *parent_name,
++ struct ast2700_clk_ctrl *clk_ctrl)
++{
++ unsigned int mult, div;
++ u32 val;
++
++ val = readl(clk_ctrl->base + SCU0_HWSTRAP1);
++ if ((readl(clk_ctrl->base) & REVISION_ID) && (val & BIT(3))) {
++ switch ((val & GENMASK(4, 2)) >> 2) {
++ case 2:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 1800 * HZ_PER_MHZ);
++ case 3:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 1700 * HZ_PER_MHZ);
++ case 6:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 1200 * HZ_PER_MHZ);
++ case 7:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 800 * HZ_PER_MHZ);
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++ } else if ((val & GENMASK(3, 2)) != 0) {
++ switch ((val & GENMASK(3, 2)) >> 2) {
++ case 1:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 1900 * HZ_PER_MHZ);
++ case 2:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 1800 * HZ_PER_MHZ);
++ case 3:
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL,
++ 0, 1700 * HZ_PER_MHZ);
++ default:
++ return ERR_PTR(-EINVAL);
++ }
++ } else {
++ val = readl(reg);
++
++ if (val & BIT(24)) {
++ /* Pass through mode */
++ mult = 1;
++ div = 1;
++ } else {
++ u32 m = val & 0x1fff;
++ u32 n = (val >> 13) & 0x3f;
++ u32 p = (val >> 19) & 0xf;
++
++ mult = (m + 1) / (2 * (n + 1));
++ div = (p + 1);
++ }
++ }
++
++ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, name, parent_name, 0, mult, div);
++}
++
++static struct clk_hw *ast2700_clk_hw_register_pll(int clk_idx, void __iomem *reg,
++ const char *name, const char *parent_name,
++ struct ast2700_clk_ctrl *clk_ctrl)
++{
++ int scu = clk_ctrl->clk_data->scu;
++ unsigned int mult, div;
++ u32 val = readl(reg);
++
++ if (val & BIT(24)) {
++ /* Pass through mode */
++ mult = 1;
++ div = 1;
++ } else {
++ u32 m = val & 0x1fff;
++ u32 n = (val >> 13) & 0x3f;
++ u32 p = (val >> 19) & 0xf;
++
++ if (scu) {
++ mult = (m + 1) / (n + 1);
++ div = (p + 1);
++ } else {
++ if (clk_idx == SCU0_CLK_MPLL) {
++ mult = m / (n + 1);
++ div = (p + 1);
++ } else {
++ mult = (m + 1) / (2 * (n + 1));
++ div = (p + 1);
++ }
++ }
++ }
++
++ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, name, parent_name, 0, mult, div);
++}
++
++static struct clk_hw *ast2700_clk_hw_register_dclk(void __iomem *reg, const char *name,
++ struct ast2700_clk_ctrl *clk_ctrl)
++{
++ unsigned int mult, div, r, n;
++ u32 xdclk;
++ u32 val;
++
++ val = readl(clk_ctrl->base + 0x284);
++ if (val & BIT(29))
++ xdclk = 800 * HZ_PER_MHZ;
++ else
++ xdclk = 1000 * HZ_PER_MHZ;
++
++ val = readl(reg);
++ r = val & GENMASK(15, 0);
++ n = (val >> 16) & GENMASK(15, 0);
++ mult = r;
++ div = 2 * n;
++
++ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, name, NULL, 0, (xdclk * mult) / div);
++}
++
++static struct clk_hw *ast2700_clk_hw_register_uartpll(void __iomem *reg,
++ const char *name, const char *parent_name,
++ struct ast2700_clk_ctrl *clk_ctrl)
++{
++ unsigned int mult, div;
++ u32 val = readl(reg);
++ u32 r = val & 0xff;
++ u32 n = (val >> 8) & 0x3ff;
++
++ mult = r;
++ div = n * 2;
++
++ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, name,
++ parent_name, 0, mult, div);
++}
++
++static struct clk_hw *ast2700_clk_hw_register_misc(int clk_idx, void __iomem *reg,
++ const char *name, const char *parent_name,
++ struct ast2700_clk_ctrl *clk_ctrl)
++{
++ u32 div = 0;
++
++ if (clk_idx == SCU0_CLK_MPHY) {
++ div = readl(reg) + 1;
++ } else if (clk_idx == SCU0_CLK_U2PHY_REFCLK) {
++ if (readl(clk_ctrl->base) & REVISION_ID)
++ div = (GET_USB_REFCLK_DIV(readl(reg)) + 1) << 4;
++ else
++ div = (GET_USB_REFCLK_DIV(readl(reg)) + 1) << 1;
++ } else {
++ return ERR_PTR(-EINVAL);
++ }
++
++ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, name,
++ parent_name, 0, 1, div);
++}
++
++static int ast2700_clk_is_enabled(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ u32 clk = BIT(gate->bit_idx);
++ u32 reg;
++
++ reg = readl(gate->reg);
++
++ return !(reg & clk);
++}
++
++static int ast2700_clk_enable(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ u32 clk = BIT(gate->bit_idx);
++
++ if (readl(gate->reg) & clk)
++ writel(clk, gate->reg + 0x04);
++
++ return 0;
++}
++
++static void ast2700_clk_disable(struct clk_hw *hw)
++{
++ struct clk_gate *gate = to_clk_gate(hw);
++ u32 clk = BIT(gate->bit_idx);
++
++ /* Clock is set to enable, so use write to set register */
++ writel(clk, gate->reg);
++}
++
++static const struct clk_ops ast2700_clk_gate_ops = {
++ .enable = ast2700_clk_enable,
++ .disable = ast2700_clk_disable,
++ .is_enabled = ast2700_clk_is_enabled,
++};
++
++static struct clk_hw *ast2700_clk_hw_register_gate(struct device *dev, const char *name,
++ const struct clk_parent_data *parent,
++ void __iomem *reg, u8 clock_idx,
++ unsigned long clk_gate_flags, spinlock_t *lock)
++{
++ struct clk_gate *gate;
++ struct clk_hw *hw;
++ struct clk_init_data init;
++ int ret = -EINVAL;
++
++ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
++ if (!gate)
++ return ERR_PTR(-ENOMEM);
++
++ init.name = name;
++ init.ops = &ast2700_clk_gate_ops;
++ init.flags = clk_gate_flags;
++ init.parent_names = parent ? &parent->name : NULL;
++ init.num_parents = parent ? 1 : 0;
++
++ gate->reg = reg;
++ gate->bit_idx = clock_idx;
++ gate->flags = 0;
++ gate->lock = lock;
++ gate->hw.init = &init;
++
++ hw = &gate->hw;
++ ret = clk_hw_register(dev, hw);
++ if (ret) {
++ kfree(gate);
++ hw = ERR_PTR(ret);
++ }
++
++ return hw;
++}
++
++static void ast2700_soc1_configure_mac01_clk(struct ast2700_clk_ctrl *clk_ctrl)
++{
++ struct device_node *np = clk_ctrl->dev->of_node;
++ struct mac_delay_config mac_cfg;
++ u32 reg[3];
++ int ret;
++
++ if (readl(clk_ctrl->base + SCU1_REVISION_ID) & REVISION_ID)
++ reg[0] = AST2700_DEF_MAC12_DELAY_1G_A1;
++ else
++ reg[0] = AST2700_DEF_MAC12_DELAY_1G_A0;
++ reg[1] = AST2700_DEF_MAC12_DELAY_100M;
++ reg[2] = AST2700_DEF_MAC12_DELAY_10M;
++
++ ret = of_property_read_u32_array(np, "mac0-clk-delay", (u32 *)&mac_cfg,
++ sizeof(mac_cfg) / sizeof(u32));
++ if (!ret) {
++ reg[0] &= ~(MAC_CLK_1G_INPUT_DELAY_1 | MAC_CLK_1G_OUTPUT_DELAY_1);
++ reg[0] |= FIELD_PREP(MAC_CLK_1G_INPUT_DELAY_1, mac_cfg.rx_delay_1000) |
++ FIELD_PREP(MAC_CLK_1G_OUTPUT_DELAY_1, mac_cfg.tx_delay_1000);
++
++ reg[1] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_1 | MAC_CLK_100M_10M_OUTPUT_DELAY_1);
++ reg[1] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_1, mac_cfg.rx_delay_100) |
++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_1, mac_cfg.tx_delay_100);
++
++ reg[2] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_1 | MAC_CLK_100M_10M_OUTPUT_DELAY_1);
++ reg[2] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_1, mac_cfg.rx_delay_10) |
++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_1, mac_cfg.tx_delay_10);
++ }
++
++ ret = of_property_read_u32_array(np, "mac1-clk-delay", (u32 *)&mac_cfg,
++ sizeof(mac_cfg) / sizeof(u32));
++ if (!ret) {
++ reg[0] &= ~(MAC_CLK_1G_INPUT_DELAY_2 | MAC_CLK_1G_OUTPUT_DELAY_2);
++ reg[0] |= FIELD_PREP(MAC_CLK_1G_INPUT_DELAY_2, mac_cfg.rx_delay_1000) |
++ FIELD_PREP(MAC_CLK_1G_OUTPUT_DELAY_2, mac_cfg.tx_delay_1000);
++
++ reg[1] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_2 | MAC_CLK_100M_10M_OUTPUT_DELAY_2);
++ reg[1] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_2, mac_cfg.rx_delay_100) |
++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_2, mac_cfg.tx_delay_100);
++
++ reg[2] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_2 | MAC_CLK_100M_10M_OUTPUT_DELAY_2);
++ reg[2] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_2, mac_cfg.rx_delay_10) |
++ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_2, mac_cfg.tx_delay_10);
++ }
++
++ reg[0] |= (readl(clk_ctrl->base + SCU1_MAC12_CLK_DLY) & ~GENMASK(25, 0));
++ writel(reg[0], clk_ctrl->base + SCU1_MAC12_CLK_DLY);
++ writel(reg[1], clk_ctrl->base + SCU1_MAC12_CLK_DLY_100M);
++ writel(reg[2], clk_ctrl->base + SCU1_MAC12_CLK_DLY_10M);
++}
++
++static void ast2700_soc1_configure_i3c_clk(struct ast2700_clk_ctrl *clk_ctrl)
++{
++ if (readl(clk_ctrl->base + SCU1_REVISION_ID) & REVISION_ID)
++ /* I3C 250MHz = HPLL/4 */
++ writel((readl(clk_ctrl->base + SCU1_CLK_SEL2) &
++ ~SCU1_CLK_I3C_DIV_MASK) |
++ FIELD_PREP(SCU1_CLK_I3C_DIV_MASK,
++ SCU1_CLK_I3C_DIV(4)),
++ clk_ctrl->base + SCU1_CLK_SEL2);
++}
++
++static int ast2700_soc_clk_probe(struct platform_device *pdev)
++{
++ struct ast2700_clk_data *clk_data;
++ struct ast2700_clk_ctrl *clk_ctrl;
++ struct clk_hw_onecell_data *clk_hw_data;
++ struct device *dev = &pdev->dev;
++ u32 uart_clk_source = 0;
++ void __iomem *clk_base;
++ struct clk_hw **hws;
++ char *reset_name;
++ int ret;
++ int i;
++
++ clk_ctrl = devm_kzalloc(dev, sizeof(*clk_ctrl), GFP_KERNEL);
++ if (!clk_ctrl)
++ return -ENOMEM;
++ clk_ctrl->dev = dev;
++ dev_set_drvdata(&pdev->dev, clk_ctrl);
++
++ spin_lock_init(&clk_ctrl->lock);
++
++ clk_base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(clk_base))
++ return PTR_ERR(clk_base);
++
++ clk_ctrl->base = clk_base;
++
++ clk_data = (struct ast2700_clk_data *)device_get_match_data(dev);
++ if (!clk_data)
++ return -ENODEV;
++
++ clk_ctrl->clk_data = clk_data;
++ reset_name = devm_kasprintf(dev, GFP_KERNEL, "reset%d", clk_data->scu);
++
++ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, clk_data->nr_clks),
++ GFP_KERNEL);
++ if (!clk_hw_data)
++ return -ENOMEM;
++
++ clk_hw_data->num = clk_data->nr_clks;
++ hws = clk_hw_data->hws;
++
++ if (clk_data->scu) {
++ of_property_read_u32(dev->of_node, "uart-clk-source", &uart_clk_source);
++ if (uart_clk_source) {
++ u32 val = readl(clk_base + SCU1_CLK_SEL1) & ~GENMASK(12, 0);
++
++ uart_clk_source &= GENMASK(12, 0);
++ writel(val | uart_clk_source, clk_base + SCU1_CLK_SEL1);
++ }
++
++ ast2700_soc1_configure_mac01_clk(clk_ctrl);
++ ast2700_soc1_configure_i3c_clk(clk_ctrl);
++ }
++
++ for (i = 0; i < clk_data->nr_clks; i++) {
++ const struct ast2700_clk_info *clk = &clk_data->clk_info[i];
++ void __iomem *reg = clk_ctrl->base + clk->reg;
++
++ if (clk->type == CLK_FIXED) {
++ const struct ast2700_clk_fixed_rate_data *fixed_rate = &clk->data.rate;
++
++ hws[i] = devm_clk_hw_register_fixed_rate(dev, clk->name, NULL, 0,
++ fixed_rate->fixed_rate);
++ } else if (clk->type == CLK_FIXED_FACTOR) {
++ const struct ast2700_clk_fixed_factor_data *factor = &clk->data.factor;
++
++ hws[i] = devm_clk_hw_register_fixed_factor(dev, clk->name,
++ factor->parent->name,
++ 0, factor->mult, factor->div);
++ } else if (clk->type == DCLK_FIXED) {
++ const struct ast2700_clk_pll_data *pll = &clk->data.pll;
++
++ reg = clk_ctrl->base + pll->reg;
++ hws[i] = ast2700_clk_hw_register_dclk(reg, clk->name, clk_ctrl);
++ } else if (clk->type == CLK_HPLL) {
++ const struct ast2700_clk_pll_data *pll = &clk->data.pll;
++
++ reg = clk_ctrl->base + pll->reg;
++ hws[i] = ast2700_clk_hw_register_hpll(reg, clk->name,
++ pll->parent->name, clk_ctrl);
++ } else if (clk->type == CLK_PLL) {
++ const struct ast2700_clk_pll_data *pll = &clk->data.pll;
++
++ reg = clk_ctrl->base + pll->reg;
++ hws[i] = ast2700_clk_hw_register_pll(i, reg, clk->name,
++ pll->parent->name, clk_ctrl);
++ } else if (clk->type == CLK_UART_PLL) {
++ const struct ast2700_clk_pll_data *pll = &clk->data.pll;
++
++ reg = clk_ctrl->base + pll->reg;
++ hws[i] = ast2700_clk_hw_register_uartpll(reg, clk->name,
++ pll->parent->name, clk_ctrl);
++ } else if (clk->type == CLK_MUX) {
++ const struct ast2700_clk_mux_data *mux = &clk->data.mux;
++
++ reg = clk_ctrl->base + mux->reg;
++ hws[i] = devm_clk_hw_register_mux_parent_data_table(dev, clk->name,
++ mux->parents,
++ mux->num_parents, 0,
++ reg, mux->bit_shift,
++ mux->bit_width, 0,
++ NULL, &clk_ctrl->lock);
++ } else if (clk->type == CLK_MISC) {
++ const struct ast2700_clk_pll_data *misc = &clk->data.pll;
++
++ reg = clk_ctrl->base + misc->reg;
++ hws[i] = ast2700_clk_hw_register_misc(i, reg, clk->name,
++ misc->parent->name, clk_ctrl);
++ } else if (clk->type == CLK_DIVIDER) {
++ const struct ast2700_clk_div_data *div = &clk->data.div;
++
++ reg = clk_ctrl->base + div->reg;
++ hws[i] = devm_clk_hw_register_divider_table(dev, clk->name,
++ div->parent->name, 0,
++ reg, div->bit_shift,
++ div->bit_width, 0,
++ div->div_table,
++ &clk_ctrl->lock);
++ } else if (clk->type == CLK_GATE_ASPEED) {
++ const struct ast2700_clk_gate_data *gate = &clk->data.gate;
++
++ reg = clk_ctrl->base + gate->reg;
++ hws[i] = ast2700_clk_hw_register_gate(dev, clk->name, gate->parent,
++ reg, gate->bit, gate->flags, 0);
++
++ } else {
++ const struct ast2700_clk_gate_data *gate = &clk->data.gate;
++
++ reg = clk_ctrl->base + gate->reg;
++ hws[i] = devm_clk_hw_register_gate_parent_data(dev, clk->name, gate->parent,
++ 0, reg, clk->clk_idx, 0,
++ &clk_ctrl->lock);
++ }
++
++ if (IS_ERR(hws[i]))
++ return PTR_ERR(hws[i]);
++ }
++
++ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
++ if (ret)
++ return ret;
++
++ return aspeed_reset_controller_register(dev, clk_base, reset_name);
++}
++
++static const struct ast2700_clk_data ast2700_clk0_data = {
++ .scu = 0,
++ .nr_clks = ARRAY_SIZE(ast2700_scu0_clk_info),
++ .clk_info = ast2700_scu0_clk_info,
++};
++
++static const struct ast2700_clk_data ast2700a0_clk0_data = {
++ .scu = 0,
++ .nr_clks = ARRAY_SIZE(ast2700a0_scu0_clk_info),
++ .clk_info = ast2700a0_scu0_clk_info,
++};
++
++static const struct ast2700_clk_data ast2700_clk1_data = {
++ .scu = 1,
++ .nr_clks = ARRAY_SIZE(ast2700_scu1_clk_info),
++ .clk_info = ast2700_scu1_clk_info,
++};
++
++static const struct ast2700_clk_data ast2700a0_clk1_data = {
++ .scu = 1,
++ .nr_clks = ARRAY_SIZE(ast2700a0_scu1_clk_info),
++ .clk_info = ast2700a0_scu1_clk_info,
++};
++
++static const struct of_device_id ast2700_scu_match[] = {
++ { .compatible = "aspeed,ast2700-scu0", .data = &ast2700_clk0_data },
++ { .compatible = "aspeed,ast2700a0-scu0", .data = &ast2700a0_clk0_data },
++ { .compatible = "aspeed,ast2700-scu1", .data = &ast2700_clk1_data },
++ { .compatible = "aspeed,ast2700a0-scu1", .data = &ast2700a0_clk1_data },
++ { /* sentinel */ }
++};
++
++MODULE_DEVICE_TABLE(of, ast2700_scu_match);
++
++static struct platform_driver ast2700_scu_driver = {
++ .probe = ast2700_soc_clk_probe,
++ .driver = {
++ .name = "clk-ast2700",
++ .of_match_table = ast2700_scu_match,
++ },
++};
++
++static int __init clk_ast2700_init(void)
++{
++ return platform_driver_register(&ast2700_scu_driver);
++}
++arch_initcall(clk_ast2700_init);
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0006-syscon0_syscon1_A0_A1.patch b/recipes-kernel/linux/files/0006-syscon0_syscon1_A0_A1.patch
deleted file mode 100644
index 999e51c..0000000
--- a/recipes-kernel/linux/files/0006-syscon0_syscon1_A0_A1.patch
+++ /dev/null
@@ -1,2052 +0,0 @@
-From a0c01a54bf1206a5f5d6b43c068151f9d98e6a75 Mon Sep 17 00:00:00 2001
-From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Thu, 14 Nov 2024 16:48:50 +0800
-Subject: [PATCH] syscon0_syscon1_A0_A1
-
-clk, silicon-id, scu_ic0-scu_ic3,
-
-Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
----
- drivers/clk/Makefile | 1 +
- drivers/clk/clk-aspeed.c | 1 +
- drivers/clk/clk-ast2700.c | 1652 +++++++++++++++++++++++++++
- drivers/irqchip/irq-aspeed-scu-ic.c | 241 +++-
- drivers/soc/aspeed/aspeed-socinfo.c | 16 +-
- 5 files changed, 1864 insertions(+), 47 deletions(-)
- create mode 100644 drivers/clk/clk-ast2700.c
-
-diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
-index e2cbc6cceb8c..39667668a38f 100644
---- a/drivers/clk/Makefile
-+++ b/drivers/clk/Makefile
-@@ -37,6 +37,7 @@ obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
- obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
- obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
- obj-$(CONFIG_MACH_ASPEED_G6) += clk-ast2600.o
-+obj-$(CONFIG_MACH_ASPEED_G7) += clk-ast2700.o
- obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
- obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
- obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
-diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
-index ff84191d0fe8..27b99cdfad41 100644
---- a/drivers/clk/clk-aspeed.c
-+++ b/drivers/clk/clk-aspeed.c
-@@ -278,6 +278,7 @@ static const u8 aspeed_resets[] = {
- [ASPEED_RESET_PECI] = 10,
- [ASPEED_RESET_I2C] = 2,
- [ASPEED_RESET_AHB] = 1,
-+ [ASPEED_RESET_VIDEO] = 6,
-
- /*
- * SCUD4 resets start at an offset to separate them from
-diff --git a/drivers/clk/clk-ast2700.c b/drivers/clk/clk-ast2700.c
-new file mode 100644
-index 000000000000..66bb1dbb92c6
---- /dev/null
-+++ b/drivers/clk/clk-ast2700.c
-@@ -0,0 +1,1652 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (c) 2024 ASPEED Technology Inc.
-+ * Author: Ryan Chen <ryan_chen@aspeedtech.com>
-+ */
-+
-+#include <linux/auxiliary_bus.h>
-+#include <linux/clk-provider.h>
-+#include <linux/of_address.h>
-+#include <linux/of_device.h>
-+#include <linux/slab.h>
-+
-+#include <dt-bindings/clock/aspeed,ast2700-scu.h>
-+
-+#define SCU_CLK_12MHZ 12000000
-+#define SCU_CLK_24MHZ 24000000
-+#define SCU_CLK_25MHZ 25000000
-+#define SCU_CLK_192MHZ 192000000
-+
-+/* SOC0 */
-+#define SCU0_HWSTRAP1 0x010
-+#define SCU0_CLK_STOP 0x240
-+#define SCU0_CLK_SEL1 0x280
-+#define SCU0_CLK_SEL2 0x284
-+#define GET_USB_REFCLK_DIV(x) ((GENMASK(23, 20) & (x)) >> 20)
-+#define UART_DIV13_EN BIT(30)
-+#define SCU0_HPLL_PARAM 0x300
-+#define SCU0_DPLL_PARAM 0x308
-+#define SCU0_MPLL_PARAM 0x310
-+#define SCU0_D0CLK_PARAM 0x320
-+#define SCU0_D1CLK_PARAM 0x330
-+#define SCU0_CRT0CLK_PARAM 0x340
-+#define SCU0_CRT1CLK_PARAM 0x350
-+#define SCU0_MPHYCLK_PARAM 0x360
-+
-+/* SOC1 */
-+#define SCU1_REVISION_ID 0x0
-+#define REVISION_ID GENMASK(23, 16)
-+#define SCU1_CLK_STOP 0x240
-+#define SCU1_CLK_STOP2 0x260
-+#define SCU1_CLK_SEL1 0x280
-+#define SCU1_CLK_SEL2 0x284
-+#define UXCLK_MASK GENMASK(1, 0)
-+#define HUXCLK_MASK GENMASK(4, 3)
-+#define SCU1_HPLL_PARAM 0x300
-+#define SCU1_APLL_PARAM 0x310
-+#define SCU1_DPLL_PARAM 0x320
-+#define SCU1_UXCLK_CTRL 0x330
-+#define SCU1_HUXCLK_CTRL 0x334
-+#define SCU1_MAC12_CLK_DLY 0x390
-+#define SCU1_MAC12_CLK_DLY_100M 0x394
-+#define SCU1_MAC12_CLK_DLY_10M 0x398
-+
-+/*
-+ * MAC Clock Delay settings
-+ */
-+#define MAC_CLK_RMII1_50M_RCLK_O_CTRL BIT(30)
-+#define MAC_CLK_RMII1_50M_RCLK_O_DIS 0
-+#define MAC_CLK_RMII1_50M_RCLK_O_EN 1
-+#define MAC_CLK_RMII0_50M_RCLK_O_CTRL BIT(29)
-+#define MAC_CLK_RMII0_5M_RCLK_O_DIS 0
-+#define MAC_CLK_RMII0_5M_RCLK_O_EN 1
-+#define MAC_CLK_RMII_TXD_FALLING_2 BIT(27)
-+#define MAC_CLK_RMII_TXD_FALLING_1 BIT(26)
-+#define MAC_CLK_RXCLK_INV_2 BIT(25)
-+#define MAC_CLK_RXCLK_INV_1 BIT(24)
-+#define MAC_CLK_1G_INPUT_DELAY_2 GENMASK(23, 18)
-+#define MAC_CLK_1G_INPUT_DELAY_1 GENMASK(17, 12)
-+#define MAC_CLK_1G_OUTPUT_DELAY_2 GENMASK(11, 6)
-+#define MAC_CLK_1G_OUTPUT_DELAY_1 GENMASK(5, 0)
-+
-+#define MAC_CLK_100M_10M_RESERVED GENMASK(31, 26)
-+#define MAC_CLK_100M_10M_RXCLK_INV_2 BIT(25)
-+#define MAC_CLK_100M_10M_RXCLK_INV_1 BIT(24)
-+#define MAC_CLK_100M_10M_INPUT_DELAY_2 GENMASK(23, 18)
-+#define MAC_CLK_100M_10M_INPUT_DELAY_1 GENMASK(17, 12)
-+#define MAC_CLK_100M_10M_OUTPUT_DELAY_2 GENMASK(11, 6)
-+#define MAC_CLK_100M_10M_OUTPUT_DELAY_1 GENMASK(5, 0)
-+
-+#define AST2700_DEF_MAC12_DELAY_1G 0x00CF4D75
-+#define AST2700_DEF_MAC12_DELAY_100M 0x00410410
-+#define AST2700_DEF_MAC12_DELAY_10M 0x00410410
-+
-+struct mac_delay_config {
-+ u32 tx_delay_1000;
-+ u32 rx_delay_1000;
-+ u32 tx_delay_100;
-+ u32 rx_delay_100;
-+ u32 tx_delay_10;
-+ u32 rx_delay_10;
-+};
-+
-+enum {
-+ CLK_MUX,
-+ CLK_PLL,
-+ CLK_GATE,
-+ CLK_MISC,
-+ CLK_FIXED,
-+ CLK_DIVIDER,
-+ CLK_UART_PLL,
-+ CLK_DIV_TABLE,
-+ CLK_FIXED_FACTOR,
-+};
-+
-+struct ast2700_clk_info {
-+ const char *name;
-+ const char * const *parent_names;
-+ const struct clk_div_table *div_table;
-+ unsigned long fixed_rate;
-+ unsigned int mult;
-+ unsigned int div;
-+ u32 reg;
-+ u32 flags;
-+ u32 type;
-+ u8 clk_idx;
-+ u8 bit_shift;
-+ u8 bit_width;
-+ u8 num_parents;
-+};
-+
-+struct ast2700_clk_data {
-+ struct ast2700_clk_info const *clk_info;
-+ unsigned int nr_clks;
-+ const int scu;
-+};
-+
-+struct ast2700_clk_ctrl {
-+ const struct ast2700_clk_data *clk_data;
-+ struct device *dev;
-+ void __iomem *base;
-+ spinlock_t lock; /* clk lock */
-+};
-+
-+static const struct clk_div_table ast2700_rgmii_div_table[] = {
-+ { 0x0, 4 },
-+ { 0x1, 4 },
-+ { 0x2, 6 },
-+ { 0x3, 8 },
-+ { 0x4, 10 },
-+ { 0x5, 12 },
-+ { 0x6, 14 },
-+ { 0x7, 16 },
-+ { 0 }
-+};
-+
-+static const struct clk_div_table ast2700_rmii_div_table[] = {
-+ { 0x0, 8 },
-+ { 0x1, 8 },
-+ { 0x2, 12 },
-+ { 0x3, 16 },
-+ { 0x4, 20 },
-+ { 0x5, 24 },
-+ { 0x6, 28 },
-+ { 0x7, 32 },
-+ { 0 }
-+};
-+
-+static const struct clk_div_table ast2700_clk_div_table[] = {
-+ { 0x0, 2 },
-+ { 0x1, 2 },
-+ { 0x2, 3 },
-+ { 0x3, 4 },
-+ { 0x4, 5 },
-+ { 0x5, 6 },
-+ { 0x6, 7 },
-+ { 0x7, 8 },
-+ { 0 }
-+};
-+
-+static const struct clk_div_table ast2700_clk_div_table2[] = {
-+ { 0x0, 2 },
-+ { 0x1, 4 },
-+ { 0x2, 6 },
-+ { 0x3, 8 },
-+ { 0x4, 10 },
-+ { 0x5, 12 },
-+ { 0x6, 14 },
-+ { 0x7, 16 },
-+ { 0 }
-+};
-+
-+static const struct clk_div_table ast2700_clk_uart_div_table[] = {
-+ { 0x0, 1 },
-+ { 0x1, 13 },
-+ { 0 }
-+};
-+
-+static const struct ast2700_clk_info ast2700_scu0_clk_info[] __initconst = {
-+ [SCU0_CLKIN] = {
-+ .type = CLK_FIXED,
-+ .name = "soc0-clkin",
-+ .fixed_rate = SCU_CLK_25MHZ,
-+ },
-+ [SCU0_CLK_24M] = {
-+ .type = CLK_FIXED,
-+ .name = "soc0-clk24Mhz",
-+ .fixed_rate = SCU_CLK_24MHZ,
-+ },
-+ [SCU0_CLK_192M] = {
-+ .type = CLK_FIXED,
-+ .name = "soc0-clk192Mhz",
-+ .fixed_rate = SCU_CLK_192MHZ,
-+ },
-+ [SCU0_CLK_HPLL] = {
-+ .type = CLK_PLL,
-+ .name = "soc0-hpll",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_HPLL_PARAM,
-+ },
-+ [SCU0_CLK_HPLL_DIV2] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc0-hpll_div2",
-+ .parent_names = (const char *[]){ "soc0-hpll", },
-+ .mult = 1,
-+ .div = 2,
-+ },
-+ [SCU0_CLK_HPLL_DIV4] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc0-hpll_div4",
-+ .parent_names = (const char *[]){ "soc0-hpll", },
-+ .mult = 1,
-+ .div = 4,
-+ },
-+ [SCU0_CLK_HPLL_DIV_AHB] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "soc0-hpll_div_ahb",
-+ .parent_names = (const char *[]){ "soc0-hpll", },
-+ .reg = SCU0_HWSTRAP1,
-+ .bit_shift = 5,
-+ .bit_width = 2,
-+ .div_table = ast2700_clk_div_table,
-+ },
-+ [SCU0_CLK_DPLL] = {
-+ .type = CLK_PLL,
-+ .name = "dpll",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_DPLL_PARAM,
-+ },
-+ [SCU0_CLK_MPLL] = {
-+ .type = CLK_PLL,
-+ .name = "soc0-mpll",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_MPLL_PARAM,
-+ },
-+ [SCU0_CLK_MPLL_DIV2] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc0-mpll_div2",
-+ .parent_names = (const char *[]){ "soc0-mpll", },
-+ .mult = 1,
-+ .div = 2,
-+ },
-+ [SCU0_CLK_MPLL_DIV4] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc0-mpll_div4",
-+ .parent_names = (const char *[]){ "soc0-mpll", },
-+ .mult = 1,
-+ .div = 4,
-+ },
-+ [SCU0_CLK_MPLL_DIV8] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc0-mpll_div8",
-+ .parent_names = (const char *[]){ "soc0-mpll", },
-+ .mult = 1,
-+ .div = 8,
-+ },
-+ [SCU0_CLK_MPLL_DIV_AHB] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "soc0-mpll_div_ahb",
-+ .parent_names = (const char *[]){ "soc0-mpll", },
-+ .reg = SCU0_HWSTRAP1,
-+ .bit_shift = 5,
-+ .bit_width = 2,
-+ .div_table = ast2700_clk_div_table,
-+ },
-+ [SCU0_CLK_D0] = {
-+ .type = CLK_PLL,
-+ .name = "d0clk",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_D0CLK_PARAM,
-+ },
-+ [SCU0_CLK_D1] = {
-+ .type = CLK_PLL,
-+ .name = "d1clk",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_D1CLK_PARAM,
-+ },
-+ [SCU0_CLK_CRT0] = {
-+ .type = CLK_PLL,
-+ .name = "crt0clk",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_CRT0CLK_PARAM,
-+ },
-+ [SCU0_CLK_CRT1] = {
-+ .type = CLK_PLL,
-+ .name = "crt1clk",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_CRT1CLK_PARAM,
-+ },
-+ [SCU0_CLK_MPHY] = {
-+ .type = CLK_MISC,
-+ .name = "mphyclk",
-+ .parent_names = (const char *[]){ "soc0-hpll", },
-+ .reg = SCU0_MPHYCLK_PARAM,
-+ },
-+ [SCU0_CLK_PSP] = {
-+ .type = CLK_MUX,
-+ .name = "pspclk",
-+ .parent_names = (const char *[]){"soc0-mpll", "soc0-hpll", },
-+ .num_parents = 2,
-+ .reg = SCU0_HWSTRAP1,
-+ .bit_shift = 4,
-+ .bit_width = 1,
-+ },
-+ [SCU0_CLK_AXI0] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "axi0clk",
-+ .parent_names = (const char *[]){"pspclk", },
-+ .mult = 1,
-+ .div = 2,
-+ },
-+ [SCU0_CLK_AHB] = {
-+ .type = CLK_MUX,
-+ .name = "soc0-ahb",
-+ .parent_names = (const char *[]){"soc0-mpll_div_ahb", "soc0-hspll_div_ahb", },
-+ .num_parents = 2,
-+ .reg = SCU0_HWSTRAP1,
-+ .bit_shift = 7,
-+ .bit_width = 1,
-+ },
-+ [SCU0_CLK_AXI1] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "axi1clk",
-+ .parent_names = (const char *[]){ "soc0-ahb", },
-+ .mult = 1,
-+ .div = 2,
-+ },
-+ [SCU0_CLK_APB] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "soc0-apb",
-+ .parent_names = (const char *[]){ "axi0clk", },
-+ .reg = SCU0_CLK_SEL1,
-+ .bit_shift = 23,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table2,
-+ },
-+ [SCU0_CLK_GATE_MCLK] = {
-+ .type = CLK_GATE,
-+ .name = "mclk",
-+ .parent_names = (const char *[]){ "soc0-mpll", },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 0,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_ECLK] = {
-+ .type = CLK_GATE,
-+ .name = "eclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 1,
-+ },
-+ [SCU0_CLK_GATE_2DCLK] = {
-+ .type = CLK_GATE,
-+ .name = "gclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 2,
-+ },
-+ [SCU0_CLK_GATE_VCLK] = {
-+ .type = CLK_GATE,
-+ .name = "vclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 3,
-+ },
-+ [SCU0_CLK_GATE_BCLK] = {
-+ .type = CLK_GATE,
-+ .name = "bclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 4,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_VGA0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "d1clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 5,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_REFCLK] = {
-+ .type = CLK_GATE,
-+ .name = "soc0-refclk-gate",
-+ .parent_names = (const char *[]){ "soc0-clkin", },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 6,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_U2PHY_REFCLK] = {
-+ .type = CLK_MISC,
-+ .name = "xhci_ref_clk",
-+ .parent_names = (const char *[]){ "soc0-mpll_div8", },
-+ .reg = SCU0_CLK_SEL2,
-+ },
-+ [SCU0_CLK_U2PHY_CLK12M] = {
-+ .type = CLK_FIXED,
-+ .name = "xhci_suspend_clk",
-+ .parent_names = (const char *[]){ },
-+ .fixed_rate = SCU_CLK_12MHZ,
-+ },
-+ [SCU0_CLK_GATE_PORTBUSB2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "portb-usb2clk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 7,
-+ },
-+ [SCU0_CLK_GATE_UHCICLK] = {
-+ .type = CLK_GATE,
-+ .name = "uhciclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 9,
-+ },
-+ [SCU0_CLK_GATE_VGA1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "d2clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 10,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_DDRPHYCLK] = {
-+ .type = CLK_GATE,
-+ .name = "ddrphy-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 11,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_E2M0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "e2m0clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 12,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_HACCLK] = {
-+ .type = CLK_GATE,
-+ .name = "hac-clk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 13,
-+ },
-+ [SCU0_CLK_GATE_PORTAUSB2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "porta-usb2clk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 14,
-+ },
-+ [SCU0_CLK_UART] = {
-+ .type = CLK_MUX,
-+ .name = "soc0-uartclk",
-+ .parent_names = (const char *[]){"soc0-clk24Mhz", "soc0-clk192Mhz", },
-+ .num_parents = 2,
-+ .reg = SCU0_CLK_SEL2,
-+ .bit_shift = 14,
-+ .bit_width = 1,
-+ },
-+ [SCU0_CLK_UART4] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "uart4clk",
-+ .parent_names = (const char *[]){ "soc0-uartclk", },
-+ .reg = SCU0_CLK_SEL2,
-+ .bit_shift = 30,
-+ .bit_width = 1,
-+ .div_table = ast2700_clk_uart_div_table,
-+ },
-+ [SCU0_CLK_GATE_UART4CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart4clk-gate",
-+ .parent_names = (const char *[]){"uart4clk" },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 15,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_SLICLK] = {
-+ .type = CLK_GATE,
-+ .name = "soc0-sliclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 16,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_DACCLK] = {
-+ .type = CLK_GATE,
-+ .name = "dacclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 17,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_DP] = {
-+ .type = CLK_GATE,
-+ .name = "dpclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 18,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_E2M1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "e2m1clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 19,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU0_CLK_GATE_CRT0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "crt0clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 20,
-+ },
-+ [SCU0_CLK_GATE_CRT1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "crt1clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 21,
-+ },
-+ [SCU0_CLK_GATE_ECDSACLK] = {
-+ .type = CLK_GATE,
-+ .name = "eccclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 23,
-+ },
-+ [SCU0_CLK_GATE_RSACLK] = {
-+ .type = CLK_GATE,
-+ .name = "rsaclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 24,
-+ },
-+ [SCU0_CLK_GATE_RVAS0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "rvasclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 25,
-+ },
-+ [SCU0_CLK_GATE_UFSCLK] = {
-+ .type = CLK_GATE,
-+ .name = "ufsclk",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 26,
-+ },
-+ [SCU0_CLK_EMMCMUX] = {
-+ .type = CLK_MUX,
-+ .name = "emmcsrc-mux",
-+ .parent_names = (const char *[]){"soc0-mpll_div4", "soc0-hpll_div4", },
-+ .num_parents = 2,
-+ .reg = SCU0_CLK_SEL1,
-+ .bit_shift = 11,
-+ .bit_width = 1,
-+ },
-+ [SCU0_CLK_EMMC] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "emmcclk",
-+ .parent_names = (const char *[]){ "emmcsrc-mux", },
-+ .reg = SCU0_CLK_SEL1,
-+ .bit_shift = 12,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table2,
-+ },
-+ [SCU0_CLK_GATE_EMMCCLK] = {
-+ .type = CLK_GATE,
-+ .name = "emmcclk-gate",
-+ .parent_names = (const char *[]){ "emmcclk", },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 27,
-+ },
-+ [SCU0_CLK_GATE_RVAS1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "rvas2clk",
-+ .parent_names = (const char *[]){ "emmcclk", },
-+ .reg = SCU0_CLK_STOP,
-+ .clk_idx = 28,
-+ },
-+};
-+
-+static const struct ast2700_clk_info ast2700_scu1_clk_info[] __initconst = {
-+ [SCU1_CLKIN] = {
-+ .type = CLK_FIXED,
-+ .name = "soc1-clkin",
-+ .fixed_rate = SCU_CLK_25MHZ,
-+ },
-+ [SCU1_CLK_HPLL] = {
-+ .type = CLK_PLL,
-+ .name = "soc1-hpll",
-+ .parent_names = (const char *[]){ "soc1-clkin", },
-+ .reg = SCU1_HPLL_PARAM,
-+ },
-+ [SCU1_CLK_APLL] = {
-+ .type = CLK_PLL,
-+ .name = "soc1-apll",
-+ .parent_names = (const char *[]){ "soc1-clkin", },
-+ .reg = SCU1_APLL_PARAM,
-+ },
-+ [SCU1_CLK_APLL_DIV2] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc1-apll_div2",
-+ .parent_names = (const char *[]){ "soc1-apll", },
-+ .mult = 1,
-+ .div = 2,
-+ },
-+ [SCU1_CLK_APLL_DIV4] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "soc1-apll_div4",
-+ .parent_names = (const char *[]){ "soc1-apll", },
-+ .mult = 1,
-+ .div = 4,
-+ },
-+ [SCU1_CLK_DPLL] = {
-+ .type = CLK_PLL,
-+ .name = "soc1-dpll",
-+ .parent_names = (const char *[]){ "soc1-clkin", },
-+ .reg = SCU1_DPLL_PARAM,
-+ },
-+ [SCU1_CLK_UXCLK] = {
-+ .type = CLK_MUX,
-+ .name = "uxclk",
-+ .parent_names = (const char *[]){ "soc1-apll_div4", "soc1-apll_div2",
-+ "soc1-apll", "soc1-hpll",},
-+ .num_parents = 4,
-+ .reg = SCU1_CLK_SEL2,
-+ .bit_shift = 0,
-+ .bit_width = 2,
-+ },
-+ [SCU1_CLK_UARTX] = {
-+ .type = CLK_UART_PLL,
-+ .name = "uartxclk",
-+ .parent_names = (const char *[]){ "uxclk", },
-+ .reg = SCU1_UXCLK_CTRL,
-+ },
-+ [SCU1_CLK_HUXCLK] = {
-+ .type = CLK_MUX,
-+ .name = "huxclk",
-+ .parent_names = (const char *[]){"soc1-apll_div4", "soc1-apll_div2",
-+ "soc1-apll", "soc1-hpll",},
-+ .num_parents = 4,
-+ .reg = SCU1_CLK_SEL2,
-+ .bit_shift = 3,
-+ .bit_width = 2,
-+ },
-+ [SCU1_CLK_HUARTX] = {
-+ .type = CLK_UART_PLL,
-+ .name = "huartxclk",
-+ .parent_names = (const char *[]){ "huxclk", },
-+ .reg = SCU1_HUXCLK_CTRL,
-+ },
-+ [SCU1_CLK_AHB] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "soc1-ahb",
-+ .parent_names = (const char *[]){"soc1-hpll", },
-+ .reg = SCU1_CLK_SEL2,
-+ .bit_shift = 20,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table,
-+ },
-+ [SCU1_CLK_APB] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "soc1-apb",
-+ .parent_names = (const char *[]){"soc1-hpll", },
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 18,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table2,
-+ },
-+ [SCU1_CLK_RMII] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "rmii",
-+ .parent_names = (const char *[]){"soc1-hpll", },
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 21,
-+ .bit_width = 3,
-+ .div_table = ast2700_rmii_div_table,
-+ },
-+ [SCU1_CLK_MAC0RCLK] = {
-+ .type = CLK_GATE,
-+ .name = "mac0rclk",
-+ .parent_names = (const char *[]){ "rmii", },
-+ .reg = SCU1_MAC12_CLK_DLY,
-+ .clk_idx = 29,
-+ },
-+ [SCU1_CLK_MAC1RCLK] = {
-+ .type = CLK_GATE,
-+ .name = "mac1rclk",
-+ .parent_names = (const char *[]){ "rmii", },
-+ .reg = SCU1_MAC12_CLK_DLY,
-+ .clk_idx = 30,
-+ },
-+ [SCU1_CLK_RGMII] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "rgmii",
-+ .parent_names = (const char *[]){"soc1-hpll", },
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 25,
-+ .bit_width = 3,
-+ .div_table = ast2700_rgmii_div_table,
-+ },
-+ [SCU1_CLK_MACHCLK] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "machclk",
-+ .parent_names = (const char *[]){"soc1-hpll", },
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 29,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table,
-+ },
-+ [SCU1_CLK_GATE_LCLK0] = {
-+ .type = CLK_GATE,
-+ .name = "lclk0-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 0,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_LCLK1] = {
-+ .type = CLK_GATE,
-+ .name = "lclk1-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 1,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_ESPI0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "espi0clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 2,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_ESPI1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "espi1clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 3,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_APLL_DIVN] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "soc1-apll_divn",
-+ .parent_names = (const char *[]){"soc1-apll", },
-+ .reg = SCU1_CLK_SEL2,
-+ .bit_shift = 8,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table,
-+ },
-+ [SCU1_CLK_SDMUX] = {
-+ .type = CLK_MUX,
-+ .name = "sdclk-mux",
-+ .parent_names = (const char *[]){ "soc1-hpll", "soc1-apll", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 13,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_SDCLK] = {
-+ .type = CLK_DIV_TABLE,
-+ .name = "sdclk",
-+ .parent_names = (const char *[]){"sdclk-mux", },
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 14,
-+ .bit_width = 3,
-+ .div_table = ast2700_clk_div_table,
-+ },
-+ [SCU1_CLK_GATE_SDCLK] = {
-+ .type = CLK_GATE,
-+ .name = "sdclk-gate",
-+ .parent_names = (const char *[]){"sdclk", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 4,
-+ },
-+ [SCU1_CLK_GATE_IPEREFCLK] = {
-+ .type = CLK_GATE,
-+ .name = "soc1-iperefclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 5,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_REFCLK] = {
-+ .type = CLK_GATE,
-+ .name = "soc1-refclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 6,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_LPCHCLK] = {
-+ .type = CLK_GATE,
-+ .name = "lpchclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 7,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_MAC0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "mac0clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 8,
-+ },
-+ [SCU1_CLK_GATE_MAC1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "mac1clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 9,
-+ },
-+ [SCU1_CLK_GATE_MAC2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "mac2clk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 10,
-+ },
-+ [SCU1_CLK_UART0] = {
-+ .type = CLK_MUX,
-+ .name = "uart0clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 0,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart0clk-gate",
-+ .parent_names = (const char *[]){ "uart0clk", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 11,
-+ },
-+ [SCU1_CLK_UART1] = {
-+ .type = CLK_MUX,
-+ .name = "uart1clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 1,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart1clk-gate",
-+ .parent_names = (const char *[]){ "uart1clk", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 12,
-+ },
-+ [SCU1_CLK_UART2] = {
-+ .type = CLK_MUX,
-+ .name = "uart2clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 2,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart2clk-gate",
-+ .parent_names = (const char *[]){ "uart2clk", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 13,
-+ },
-+ [SCU1_CLK_UART3] = {
-+ .type = CLK_MUX,
-+ .name = "uart3clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 3,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART3CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart3clk-gate",
-+ .parent_names = (const char *[]){ "uart3clk", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 14,
-+ },
-+ [SCU1_CLK_GATE_I2CCLK] = {
-+ .type = CLK_GATE,
-+ .name = "i2cclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 15,
-+ },
-+ [SCU1_CLK_GATE_I3C0CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c0clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 16,
-+ },
-+ [SCU1_CLK_GATE_I3C1CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c1clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 17,
-+ },
-+ [SCU1_CLK_GATE_I3C2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c2clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 18,
-+ },
-+ [SCU1_CLK_GATE_I3C3CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c3clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 19,
-+ },
-+ [SCU1_CLK_GATE_I3C4CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c4clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 20,
-+ },
-+ [SCU1_CLK_GATE_I3C5CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c5clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 21,
-+ },
-+ [SCU1_CLK_GATE_I3C6CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c6clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 22,
-+ },
-+ [SCU1_CLK_GATE_I3C7CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c7clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 23,
-+ },
-+ [SCU1_CLK_GATE_I3C8CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c8clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 24,
-+ },
-+ [SCU1_CLK_GATE_I3C9CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c9clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 25,
-+ },
-+ [SCU1_CLK_GATE_I3C10CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c10clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 26,
-+ },
-+ [SCU1_CLK_GATE_I3C11CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c11clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 27,
-+ },
-+ [SCU1_CLK_GATE_I3C12CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c12clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 28,
-+ },
-+ [SCU1_CLK_GATE_I3C13CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c13clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 29,
-+ },
-+ [SCU1_CLK_GATE_I3C14CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c14clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 30,
-+ },
-+ [SCU1_CLK_GATE_I3C15CLK] = {
-+ .type = CLK_GATE,
-+ .name = "i3c15clk-gate",
-+ .parent_names = (const char *[]){ "soc1-ahb", },
-+ .reg = SCU1_CLK_STOP,
-+ .clk_idx = 31,
-+ },
-+ [SCU1_CLK_UART5] = {
-+ .type = CLK_MUX,
-+ .name = "uart5clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 5,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART5CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart5clk-gate",
-+ .parent_names = (const char *[]){ "uart5clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 0,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_UART6] = {
-+ .type = CLK_MUX,
-+ .name = "uart6clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 6,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART6CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart6clk-gate",
-+ .parent_names = (const char *[]){ "uart6clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 1,
-+ },
-+ [SCU1_CLK_UART7] = {
-+ .type = CLK_MUX,
-+ .name = "uart7clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 7,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART7CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart7clk-gate",
-+ .parent_names = (const char *[]){ "uart7clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 2,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_UART8] = {
-+ .type = CLK_MUX,
-+ .name = "uart8clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 8,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART8CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart8clk-gate",
-+ .parent_names = (const char *[]){ "uart8clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 3,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_UART9] = {
-+ .type = CLK_MUX,
-+ .name = "uart9clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 9,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART9CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart9clk-gate",
-+ .parent_names = (const char *[]){ "uart9clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 4,
-+ },
-+ [SCU1_CLK_UART10] = {
-+ .type = CLK_MUX,
-+ .name = "uart10clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 10,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART10CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart10clk-gate",
-+ .parent_names = (const char *[]){ "uart10clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 5,
-+ },
-+ [SCU1_CLK_UART11] = {
-+ .type = CLK_MUX,
-+ .name = "uart11clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 11,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART11CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart11clk-gate",
-+ .parent_names = (const char *[]){ "uart11clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 6,
-+ },
-+ [SCU1_CLK_UART12] = {
-+ .type = CLK_MUX,
-+ .name = "uart12clk",
-+ .parent_names = (const char *[]){"uartxclk", "huartxclk", },
-+ .num_parents = 2,
-+ .reg = SCU1_CLK_SEL1,
-+ .bit_shift = 12,
-+ .bit_width = 1,
-+ },
-+ [SCU1_CLK_GATE_UART12CLK] = {
-+ .type = CLK_GATE,
-+ .name = "uart12clk-gate",
-+ .parent_names = (const char *[]){ "uart12clk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 7,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_UART13] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "uart13clk",
-+ .parent_names = (const char *[]){ "huartxclk", },
-+ .mult = 1,
-+ .div = 1,
-+ },
-+ [SCU1_CLK_UART14] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "uart14clk",
-+ .parent_names = (const char *[]){ "huartxclk", },
-+ .mult = 1,
-+ .div = 1,
-+ },
-+ [SCU1_CLK_GATE_FSICLK] = {
-+ .type = CLK_GATE,
-+ .name = "fsiclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 8,
-+ },
-+ [SCU1_CLK_GATE_LTPIPHYCLK] = {
-+ .type = CLK_GATE,
-+ .name = "ltpiphyclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 9,
-+ },
-+ [SCU1_CLK_GATE_LTPICLK] = {
-+ .type = CLK_GATE,
-+ .name = "ltpiclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 10,
-+ },
-+ [SCU1_CLK_GATE_VGALCLK] = {
-+ .type = CLK_GATE,
-+ .name = "vgalclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 11,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_UHCICLK] = {
-+ .type = CLK_GATE,
-+ .name = "usbuartclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 12,
-+ },
-+ [SCU1_CLK_CAN] = {
-+ .type = CLK_FIXED_FACTOR,
-+ .name = "canclk",
-+ .parent_names = (const char *[]){ "soc1-apll", },
-+ .mult = 1,
-+ .div = 10,
-+ },
-+ [SCU1_CLK_GATE_CANCLK] = {
-+ .type = CLK_GATE,
-+ .name = "canclk-gate",
-+ .parent_names = (const char *[]){ "canclk", },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 13,
-+ },
-+ [SCU1_CLK_GATE_PCICLK] = {
-+ .type = CLK_GATE,
-+ .name = "pciclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 14,
-+ },
-+ [SCU1_CLK_GATE_SLICLK] = {
-+ .type = CLK_GATE,
-+ .name = "soc1-sliclk-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 15,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_E2MCLK] = {
-+ .type = CLK_GATE,
-+ .name = "soc1-e2m-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 16,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_PORTCUSB2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "portcusb2-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 17,
-+ .flags = CLK_IS_CRITICAL,
-+ },
-+ [SCU1_CLK_GATE_PORTDUSB2CLK] = {
-+ .type = CLK_GATE,
-+ .name = "portdusb2-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 18,
-+ },
-+ [SCU1_CLK_GATE_LTPI1TXCLK] = {
-+ .type = CLK_GATE,
-+ .name = "ltp1tx-gate",
-+ .parent_names = (const char *[]){ },
-+ .reg = SCU1_CLK_STOP2,
-+ .clk_idx = 19,
-+ },
-+};
-+
-+static struct clk_hw *ast2700_clk_hw_register_pll(int clk_idx, void __iomem *reg,
-+ const struct ast2700_clk_info *clk,
-+ struct ast2700_clk_ctrl *clk_ctrl)
-+{
-+ int scu = clk_ctrl->clk_data->scu;
-+ unsigned int mult, div;
-+ u32 val;
-+
-+ if (!scu && clk_idx == SCU0_CLK_HPLL) {
-+ val = readl(clk_ctrl->base + SCU0_HWSTRAP1);
-+ if ((val & GENMASK(3, 2)) != 0) {
-+ switch ((val & GENMASK(3, 2)) >> 2) {
-+ case 1:
-+ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, "soc0-hpll",
-+ NULL, 0, 1900000000);
-+ case 2:
-+ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, "soc0-hpll",
-+ NULL, 0, 1800000000);
-+ case 3:
-+ return devm_clk_hw_register_fixed_rate(clk_ctrl->dev, "soc0-hpll",
-+ NULL, 0, 1700000000);
-+ default:
-+ return ERR_PTR(-EINVAL);
-+ }
-+ }
-+ }
-+
-+ val = readl(reg);
-+
-+ if (val & BIT(24)) {
-+ /* Pass through mode */
-+ mult = 1;
-+ div = 1;
-+ } else {
-+ u32 m = val & 0x1fff;
-+ u32 n = (val >> 13) & 0x3f;
-+ u32 p = (val >> 19) & 0xf;
-+
-+ if (scu) {
-+ mult = (m + 1) / (n + 1);
-+ div = (p + 1);
-+ } else {
-+ if (clk_idx == SCU0_CLK_MPLL) {
-+ mult = m / (n + 1);
-+ div = (p + 1);
-+ } else {
-+ mult = (m + 1) / (2 * (n + 1));
-+ div = (p + 1);
-+ }
-+ }
-+ }
-+
-+ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, clk->name,
-+ clk->parent_names[0], 0, mult, div);
-+}
-+
-+static struct clk_hw *ast2700_clk_hw_register_uartpll(int clk_idx, void __iomem *reg,
-+ const struct ast2700_clk_info *clk,
-+ struct ast2700_clk_ctrl *clk_ctrl)
-+{
-+ unsigned int mult, div;
-+ u32 val = readl(reg);
-+ u32 r = val & 0xff;
-+ u32 n = (val >> 8) & 0x3ff;
-+
-+ mult = r;
-+ div = n * 2;
-+
-+ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, clk->name,
-+ clk->parent_names[0], 0, mult, div);
-+}
-+
-+static struct clk_hw *ast2700_clk_hw_register_misc(int clk_idx, void __iomem *reg,
-+ const struct ast2700_clk_info *clk,
-+ struct ast2700_clk_ctrl *clk_ctrl)
-+{
-+ u32 div = 0;
-+
-+ if (clk_idx == SCU0_CLK_MPHY)
-+ div = readl(reg) + 1;
-+ else if (clk_idx == SCU0_CLK_U2PHY_REFCLK)
-+ div = (GET_USB_REFCLK_DIV(readl(reg)) + 1) << 1;
-+ else
-+ return ERR_PTR(-EINVAL);
-+
-+ return devm_clk_hw_register_fixed_factor(clk_ctrl->dev, clk->name,
-+ clk->parent_names[0], clk->flags,
-+ 1, div);
-+}
-+
-+static int ast2700_clk_is_enabled(struct clk_hw *hw)
-+{
-+ struct clk_gate *gate = to_clk_gate(hw);
-+ u32 clk = BIT(gate->bit_idx);
-+ u32 reg;
-+
-+ reg = readl(gate->reg);
-+
-+ return !(reg & clk);
-+}
-+
-+static int ast2700_clk_enable(struct clk_hw *hw)
-+{
-+ struct clk_gate *gate = to_clk_gate(hw);
-+ u32 clk = BIT(gate->bit_idx);
-+
-+ if (readl(gate->reg) & clk)
-+ writel(clk, gate->reg + 0x04);
-+
-+ return 0;
-+}
-+
-+static void ast2700_clk_disable(struct clk_hw *hw)
-+{
-+ struct clk_gate *gate = to_clk_gate(hw);
-+ u32 clk = BIT(gate->bit_idx);
-+
-+ /* Clock is set to enable, so use write to set register */
-+ writel(clk, gate->reg);
-+}
-+
-+static const struct clk_ops ast2700_clk_gate_ops = {
-+ .enable = ast2700_clk_enable,
-+ .disable = ast2700_clk_disable,
-+ .is_enabled = ast2700_clk_is_enabled,
-+};
-+
-+static struct clk_hw *ast2700_clk_hw_register_gate(struct device *dev, const char *name,
-+ const char *parent_name, unsigned long flags,
-+ void __iomem *reg, u8 clock_idx,
-+ u8 clk_gate_flags, spinlock_t *lock)
-+{
-+ struct clk_gate *gate;
-+ struct clk_hw *hw;
-+ struct clk_init_data init;
-+ int ret = -EINVAL;
-+
-+ gate = kzalloc(sizeof(*gate), GFP_KERNEL);
-+ if (!gate)
-+ return ERR_PTR(-ENOMEM);
-+
-+ init.name = name;
-+ init.ops = &ast2700_clk_gate_ops;
-+ init.flags = flags;
-+ init.parent_names = parent_name ? &parent_name : NULL;
-+ init.num_parents = parent_name ? 1 : 0;
-+
-+ gate->reg = reg;
-+ gate->bit_idx = clock_idx;
-+ gate->flags = clk_gate_flags;
-+ gate->lock = lock;
-+ gate->hw.init = &init;
-+
-+ hw = &gate->hw;
-+ ret = clk_hw_register(dev, hw);
-+ if (ret) {
-+ kfree(gate);
-+ hw = ERR_PTR(ret);
-+ }
-+
-+ return hw;
-+}
-+
-+static void ast2700_soc1_configure_mac01_clk(struct ast2700_clk_ctrl *clk_ctrl)
-+{
-+ struct device_node *np = clk_ctrl->dev->of_node;
-+ struct mac_delay_config mac_cfg;
-+ u32 reg[3];
-+ int ret;
-+
-+ reg[0] = AST2700_DEF_MAC12_DELAY_1G;
-+ reg[1] = AST2700_DEF_MAC12_DELAY_100M;
-+ reg[2] = AST2700_DEF_MAC12_DELAY_10M;
-+
-+ ret = of_property_read_u32_array(np, "mac0-clk-delay", (u32 *)&mac_cfg,
-+ sizeof(mac_cfg) / sizeof(u32));
-+ if (!ret) {
-+ reg[0] &= ~(MAC_CLK_1G_INPUT_DELAY_1 | MAC_CLK_1G_OUTPUT_DELAY_1);
-+ reg[0] |= FIELD_PREP(MAC_CLK_1G_INPUT_DELAY_1, mac_cfg.rx_delay_1000) |
-+ FIELD_PREP(MAC_CLK_1G_OUTPUT_DELAY_1, mac_cfg.tx_delay_1000);
-+
-+ reg[1] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_1 | MAC_CLK_100M_10M_OUTPUT_DELAY_1);
-+ reg[1] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_1, mac_cfg.rx_delay_100) |
-+ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_1, mac_cfg.tx_delay_100);
-+
-+ reg[2] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_1 | MAC_CLK_100M_10M_OUTPUT_DELAY_1);
-+ reg[2] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_1, mac_cfg.rx_delay_10) |
-+ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_1, mac_cfg.tx_delay_10);
-+ }
-+
-+ ret = of_property_read_u32_array(np, "mac1-clk-delay", (u32 *)&mac_cfg,
-+ sizeof(mac_cfg) / sizeof(u32));
-+ if (!ret) {
-+ reg[0] &= ~(MAC_CLK_1G_INPUT_DELAY_2 | MAC_CLK_1G_OUTPUT_DELAY_2);
-+ reg[0] |= FIELD_PREP(MAC_CLK_1G_INPUT_DELAY_2, mac_cfg.rx_delay_1000) |
-+ FIELD_PREP(MAC_CLK_1G_OUTPUT_DELAY_2, mac_cfg.tx_delay_1000);
-+
-+ reg[1] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_2 | MAC_CLK_100M_10M_OUTPUT_DELAY_2);
-+ reg[1] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_2, mac_cfg.rx_delay_100) |
-+ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_2, mac_cfg.tx_delay_100);
-+
-+ reg[2] &= ~(MAC_CLK_100M_10M_INPUT_DELAY_2 | MAC_CLK_100M_10M_OUTPUT_DELAY_2);
-+ reg[2] |= FIELD_PREP(MAC_CLK_100M_10M_INPUT_DELAY_2, mac_cfg.rx_delay_10) |
-+ FIELD_PREP(MAC_CLK_100M_10M_OUTPUT_DELAY_2, mac_cfg.tx_delay_10);
-+ }
-+
-+ reg[0] |= (readl(clk_ctrl->base + SCU1_MAC12_CLK_DLY) & ~GENMASK(25, 0));
-+ writel(reg[0], clk_ctrl->base + SCU1_MAC12_CLK_DLY);
-+ writel(reg[1], clk_ctrl->base + SCU1_MAC12_CLK_DLY_100M);
-+ writel(reg[2], clk_ctrl->base + SCU1_MAC12_CLK_DLY_10M);
-+}
-+
-+static void aspeed_reset_unregister_adev(void *_adev)
-+{
-+ struct auxiliary_device *adev = _adev;
-+
-+ auxiliary_device_delete(adev);
-+ auxiliary_device_uninit(adev);
-+}
-+
-+static void aspeed_reset_adev_release(struct device *dev)
-+{
-+ struct auxiliary_device *adev = to_auxiliary_dev(dev);
-+
-+ kfree(adev);
-+}
-+
-+static int aspeed_reset_controller_register(struct device *clk_dev,
-+ void __iomem *base, const char *adev_name)
-+{
-+ struct auxiliary_device *adev;
-+ int ret;
-+
-+ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
-+ if (!adev)
-+ return -ENOMEM;
-+
-+ adev->name = adev_name;
-+ adev->dev.parent = clk_dev;
-+ adev->dev.release = aspeed_reset_adev_release;
-+ adev->id = 666u;
-+
-+ ret = auxiliary_device_init(adev);
-+ if (ret) {
-+ kfree(adev);
-+ return ret;
-+ }
-+
-+ ret = auxiliary_device_add(adev);
-+ if (ret) {
-+ auxiliary_device_uninit(adev);
-+ return ret;
-+ }
-+
-+ adev->dev.platform_data = (__force void *)base;
-+
-+ return devm_add_action_or_reset(clk_dev, aspeed_reset_unregister_adev, adev);
-+}
-+
-+static int ast2700_soc_clk_probe(struct platform_device *pdev)
-+{
-+ struct ast2700_clk_data *clk_data;
-+ struct ast2700_clk_ctrl *clk_ctrl;
-+ struct clk_hw_onecell_data *clk_hw_data;
-+ struct device *dev = &pdev->dev;
-+ u32 uart_clk_source = 0;
-+ void __iomem *clk_base;
-+ struct clk_hw **hws;
-+ char *reset_name;
-+ int ret;
-+ int i;
-+
-+ clk_ctrl = devm_kzalloc(dev, sizeof(*clk_ctrl), GFP_KERNEL);
-+ if (!clk_ctrl)
-+ return -ENOMEM;
-+ clk_ctrl->dev = dev;
-+ dev_set_drvdata(&pdev->dev, clk_ctrl);
-+
-+ spin_lock_init(&clk_ctrl->lock);
-+
-+ clk_base = devm_platform_ioremap_resource(pdev, 0);
-+ if (IS_ERR(clk_base))
-+ return PTR_ERR(clk_base);
-+
-+ clk_ctrl->base = clk_base;
-+
-+ clk_data = (struct ast2700_clk_data *)of_device_get_match_data(dev);
-+ if (!clk_data)
-+ return devm_of_platform_populate(dev);
-+
-+ clk_ctrl->clk_data = clk_data;
-+ reset_name = devm_kasprintf(dev, GFP_KERNEL, "reset%d", clk_data->scu);
-+
-+ clk_hw_data = devm_kzalloc(dev, struct_size(clk_hw_data, hws, clk_data->nr_clks),
-+ GFP_KERNEL);
-+ if (!clk_hw_data)
-+ return -ENOMEM;
-+
-+ clk_hw_data->num = clk_data->nr_clks;
-+ hws = clk_hw_data->hws;
-+
-+ if (clk_data->scu) {
-+ of_property_read_u32(dev->of_node, "uart-clk-source", &uart_clk_source);
-+ if (uart_clk_source) {
-+ u32 val = readl(clk_base + SCU1_CLK_SEL1) & ~GENMASK(12, 0);
-+
-+ uart_clk_source &= GENMASK(12, 0);
-+ writel(val | uart_clk_source, clk_base + SCU1_CLK_SEL1);
-+ }
-+
-+ ast2700_soc1_configure_mac01_clk(clk_ctrl);
-+ }
-+
-+ for (i = 0; i < clk_data->nr_clks; i++) {
-+ const struct ast2700_clk_info *clk = &clk_data->clk_info[i];
-+ void __iomem *reg = clk_ctrl->base + clk->reg;
-+
-+ if (clk->type == CLK_FIXED) {
-+ hws[i] = devm_clk_hw_register_fixed_rate(dev, clk->name, NULL,
-+ clk->flags, clk->fixed_rate);
-+ } else if (clk->type == CLK_FIXED_FACTOR) {
-+ hws[i] = devm_clk_hw_register_fixed_factor(dev, clk->name,
-+ clk->parent_names[0], clk->flags,
-+ clk->mult, clk->div);
-+ } else if (clk->type == CLK_PLL) {
-+ hws[i] = ast2700_clk_hw_register_pll(i, reg, clk, clk_ctrl);
-+ } else if (clk->type == CLK_UART_PLL) {
-+ hws[i] = ast2700_clk_hw_register_uartpll(i, reg, clk, clk_ctrl);
-+ } else if (clk->type == CLK_MUX) {
-+ hws[i] = devm_clk_hw_register_mux(dev, clk->name, clk->parent_names,
-+ clk->num_parents, clk->flags, reg,
-+ clk->bit_shift, clk->bit_width,
-+ 0, &clk_ctrl->lock);
-+ } else if (clk->type == CLK_MISC) {
-+ hws[i] = ast2700_clk_hw_register_misc(i, reg, clk, clk_ctrl);
-+ } else if (clk->type == CLK_DIVIDER) {
-+ hws[i] = devm_clk_hw_register_divider(dev, clk->name, clk->parent_names[0],
-+ clk->flags, reg, clk->bit_shift,
-+ clk->bit_width, 0,
-+ &clk_ctrl->lock);
-+ } else if (clk->type == CLK_DIV_TABLE) {
-+ hws[i] = clk_hw_register_divider_table(dev, clk->name, clk->parent_names[0],
-+ clk->flags, reg, clk->bit_shift,
-+ clk->bit_width, 0,
-+ clk->div_table, &clk_ctrl->lock);
-+ } else {
-+ hws[i] = ast2700_clk_hw_register_gate(dev, clk->name, clk->parent_names[0],
-+ clk->flags, reg, clk->clk_idx,
-+ clk->flags, &clk_ctrl->lock);
-+ }
-+
-+ if (IS_ERR(hws[i]))
-+ return PTR_ERR(hws[i]);
-+ }
-+
-+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_hw_data);
-+ if (ret)
-+ return ret;
-+
-+ return aspeed_reset_controller_register(dev, clk_base, reset_name);
-+}
-+
-+static const struct ast2700_clk_data ast2700_clk0_data = {
-+ .scu = 0,
-+ .nr_clks = ARRAY_SIZE(ast2700_scu0_clk_info),
-+ .clk_info = ast2700_scu0_clk_info,
-+};
-+
-+static const struct ast2700_clk_data ast2700_clk1_data = {
-+ .scu = 1,
-+ .nr_clks = ARRAY_SIZE(ast2700_scu1_clk_info),
-+ .clk_info = ast2700_scu1_clk_info,
-+};
-+
-+static const struct of_device_id ast2700_scu_match[] = {
-+ { .compatible = "aspeed,ast2700-scu0", .data = &ast2700_clk0_data },
-+ { .compatible = "aspeed,ast2700-scu1", .data = &ast2700_clk1_data },
-+ { /* sentinel */ }
-+};
-+
-+MODULE_DEVICE_TABLE(of, ast2700_scu_match);
-+
-+static struct platform_driver ast2700_scu_driver = {
-+ .driver = {
-+ .name = "clk-ast2700",
-+ .of_match_table = ast2700_scu_match,
-+ },
-+};
-+
-+builtin_platform_driver_probe(ast2700_scu_driver, ast2700_soc_clk_probe);
-diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
-index 94a7223e95df..94b4ea51a225 100644
---- a/drivers/irqchip/irq-aspeed-scu-ic.c
-+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
-@@ -34,51 +34,100 @@
- GENMASK(5, ASPEED_AST2600_SCU_IC1_SHIFT)
- #define ASPEED_AST2600_SCU_IC1_NUM_IRQS 2
-
-+#define ASPEED_AST2700_SCU_IC0_EN_REG 0x1d0
-+#define ASPEED_AST2700_SCU_IC0_STS_REG 0x1d4
-+#define ASPEED_AST2700_SCU_IC0_SHIFT 0
-+#define ASPEED_AST2700_SCU_IC0_ENABLE \
-+ GENMASK(3, ASPEED_AST2700_SCU_IC0_SHIFT)
-+#define ASPEED_AST2700_SCU_IC0_NUM_IRQS 4
-+
-+#define ASPEED_AST2700_SCU_IC1_EN_REG 0x1e0
-+#define ASPEED_AST2700_SCU_IC1_STS_REG 0x1e4
-+#define ASPEED_AST2700_SCU_IC1_SHIFT 0
-+#define ASPEED_AST2700_SCU_IC1_ENABLE \
-+ GENMASK(3, ASPEED_AST2700_SCU_IC1_SHIFT)
-+#define ASPEED_AST2700_SCU_IC1_NUM_IRQS 4
-+
-+#define ASPEED_AST2700_SCU_IC2_EN_REG 0x104
-+#define ASPEED_AST2700_SCU_IC2_STS_REG 0x100
-+#define ASPEED_AST2700_SCU_IC2_SHIFT 0
-+#define ASPEED_AST2700_SCU_IC2_ENABLE \
-+ GENMASK(3, ASPEED_AST2700_SCU_IC2_SHIFT)
-+#define ASPEED_AST2700_SCU_IC2_NUM_IRQS 4
-+
-+#define ASPEED_AST2700_SCU_IC3_EN_REG 0x10c
-+#define ASPEED_AST2700_SCU_IC3_STS_REG 0x108
-+#define ASPEED_AST2700_SCU_IC3_SHIFT 0
-+#define ASPEED_AST2700_SCU_IC3_ENABLE \
-+ GENMASK(1, ASPEED_AST2700_SCU_IC3_SHIFT)
-+#define ASPEED_AST2700_SCU_IC3_NUM_IRQS 2
-+
- struct aspeed_scu_ic {
- unsigned long irq_enable;
- unsigned long irq_shift;
- unsigned int num_irqs;
-+ bool en_sts_split;
- unsigned int reg;
-+ unsigned int en_reg;
-+ unsigned int sts_reg;
- struct regmap *scu;
- struct irq_domain *irq_domain;
- };
-
- static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
- {
-- unsigned int sts;
-+ unsigned int val;
- unsigned long bit;
- unsigned long enabled;
- unsigned long max;
- unsigned long status;
- struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
-- unsigned int mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
-+ unsigned int mask;
-
- chained_irq_enter(chip, desc);
-
-- /*
-- * The SCU IC has just one register to control its operation and read
-- * status. The interrupt enable bits occupy the lower 16 bits of the
-- * register, while the interrupt status bits occupy the upper 16 bits.
-- * The status bit for a given interrupt is always 16 bits shifted from
-- * the enable bit for the same interrupt.
-- * Therefore, perform the IRQ operations in the enable bit space by
-- * shifting the status down to get the mapping and then back up to
-- * clear the bit.
-- */
-- regmap_read(scu_ic->scu, scu_ic->reg, &sts);
-- enabled = sts & scu_ic->irq_enable;
-- status = (sts >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
--
-- bit = scu_ic->irq_shift;
-- max = scu_ic->num_irqs + bit;
--
-- for_each_set_bit_from(bit, &status, max) {
-- generic_handle_domain_irq(scu_ic->irq_domain,
-- bit - scu_ic->irq_shift);
--
-- regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
-- BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
-+ if (!scu_ic->en_sts_split) {
-+ mask = scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT;
-+ /*
-+ * The SCU IC has just one register to control its operation and read
-+ * status. The interrupt enable bits occupy the lower 16 bits of the
-+ * register, while the interrupt status bits occupy the upper 16 bits.
-+ * The status bit for a given interrupt is always 16 bits shifted from
-+ * the enable bit for the same interrupt.
-+ * Therefore, perform the IRQ operations in the enable bit space by
-+ * shifting the status down to get the mapping and then back up to
-+ * clear the bit.
-+ */
-+ regmap_read(scu_ic->scu, scu_ic->reg, &val);
-+ enabled = val & scu_ic->irq_enable;
-+ status = (val >> ASPEED_SCU_IC_STATUS_SHIFT) & enabled;
-+
-+ bit = scu_ic->irq_shift;
-+ max = scu_ic->num_irqs + bit;
-+
-+ for_each_set_bit_from(bit, &status, max) {
-+ generic_handle_domain_irq(scu_ic->irq_domain,
-+ bit - scu_ic->irq_shift);
-+
-+ regmap_write_bits(scu_ic->scu, scu_ic->reg, mask,
-+ BIT(bit + ASPEED_SCU_IC_STATUS_SHIFT));
-+ }
-+ } else {
-+ mask = scu_ic->irq_enable;
-+ regmap_read(scu_ic->scu, scu_ic->en_reg, &val);
-+ enabled = val & scu_ic->irq_enable;
-+ regmap_read(scu_ic->scu, scu_ic->sts_reg, &val);
-+ status = val & enabled;
-+
-+ bit = scu_ic->irq_shift;
-+ max = scu_ic->num_irqs + bit;
-+
-+ for_each_set_bit_from(bit, &status, max) {
-+ generic_handle_domain_irq(scu_ic->irq_domain, bit - scu_ic->irq_shift);
-+
-+ regmap_write_bits(scu_ic->scu, scu_ic->sts_reg, mask, BIT(bit));
-+ }
- }
-
- chained_irq_exit(chip, desc);
-@@ -87,30 +136,41 @@ static void aspeed_scu_ic_irq_handler(struct irq_desc *desc)
- static void aspeed_scu_ic_irq_mask(struct irq_data *data)
- {
- struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
-- unsigned int mask = BIT(data->hwirq + scu_ic->irq_shift) |
-- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
--
-- /*
-- * Status bits are cleared by writing 1. In order to prevent the mask
-- * operation from clearing the status bits, they should be under the
-- * mask and written with 0.
-- */
-- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
-+ unsigned int mask;
-+
-+ if (!scu_ic->en_sts_split) {
-+ mask = BIT(data->hwirq + scu_ic->irq_shift) |
-+ (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
-+ /*
-+ * Status bits are cleared by writing 1. In order to prevent the mask
-+ * operation from clearing the status bits, they should be under the
-+ * mask and written with 0.
-+ */
-+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, 0);
-+ } else {
-+ mask = BIT(data->hwirq + scu_ic->irq_shift);
-+ regmap_update_bits(scu_ic->scu, scu_ic->en_reg, mask, 0);
-+ }
- }
-
- static void aspeed_scu_ic_irq_unmask(struct irq_data *data)
- {
- struct aspeed_scu_ic *scu_ic = irq_data_get_irq_chip_data(data);
- unsigned int bit = BIT(data->hwirq + scu_ic->irq_shift);
-- unsigned int mask = bit |
-- (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
--
-- /*
-- * Status bits are cleared by writing 1. In order to prevent the unmask
-- * operation from clearing the status bits, they should be under the
-- * mask and written with 0.
-- */
-- regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
-+ unsigned int mask;
-+
-+ if (!scu_ic->en_sts_split) {
-+ mask = bit | (scu_ic->irq_enable << ASPEED_SCU_IC_STATUS_SHIFT);
-+ /*
-+ * Status bits are cleared by writing 1. In order to prevent the unmask
-+ * operation from clearing the status bits, they should be under the
-+ * mask and written with 0.
-+ */
-+ regmap_update_bits(scu_ic->scu, scu_ic->reg, mask, bit);
-+ } else {
-+ mask = bit;
-+ regmap_update_bits(scu_ic->scu, scu_ic->en_reg, mask, bit);
-+ }
- }
-
- static int aspeed_scu_ic_irq_set_affinity(struct irq_data *data,
-@@ -156,8 +216,19 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
- rc = PTR_ERR(scu_ic->scu);
- goto err;
- }
-- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
-- regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
-+
-+ /* Clear status and disable all interrupt */
-+ if (!scu_ic->en_sts_split) {
-+ regmap_write_bits(scu_ic->scu, scu_ic->reg,
-+ ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
-+ regmap_write_bits(scu_ic->scu, scu_ic->reg,
-+ ASPEED_SCU_IC_ENABLE, 0);
-+ } else {
-+ regmap_write_bits(scu_ic->scu, scu_ic->sts_reg,
-+ scu_ic->irq_enable, scu_ic->irq_enable);
-+ regmap_write_bits(scu_ic->scu, scu_ic->en_reg,
-+ scu_ic->irq_enable, 0);
-+ }
-
- irq = irq_of_parse_and_map(node, 0);
- if (!irq) {
-@@ -232,9 +303,89 @@ static int __init aspeed_ast2600_scu_ic1_of_init(struct device_node *node,
- return aspeed_scu_ic_of_init_common(scu_ic, node);
- }
-
-+static int __init aspeed_ast2700_scu_ic0_of_init(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-+
-+ if (!scu_ic)
-+ return -ENOMEM;
-+
-+ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC0_ENABLE;
-+ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC0_SHIFT;
-+ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC0_NUM_IRQS;
-+ scu_ic->en_sts_split = true;
-+ scu_ic->en_reg = ASPEED_AST2700_SCU_IC0_EN_REG;
-+ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC0_STS_REG;
-+
-+ return aspeed_scu_ic_of_init_common(scu_ic, node);
-+}
-+
-+static int __init aspeed_ast2700_scu_ic1_of_init(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-+
-+ if (!scu_ic)
-+ return -ENOMEM;
-+
-+ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC1_ENABLE;
-+ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC1_SHIFT;
-+ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC1_NUM_IRQS;
-+ scu_ic->en_sts_split = true;
-+ scu_ic->en_reg = ASPEED_AST2700_SCU_IC1_EN_REG;
-+ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC1_STS_REG;
-+
-+ return aspeed_scu_ic_of_init_common(scu_ic, node);
-+}
-+
-+static int __init aspeed_ast2700_scu_ic2_of_init(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-+
-+ if (!scu_ic)
-+ return -ENOMEM;
-+
-+ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC2_ENABLE;
-+ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC2_SHIFT;
-+ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC2_NUM_IRQS;
-+ scu_ic->en_sts_split = true;
-+ scu_ic->en_reg = ASPEED_AST2700_SCU_IC2_EN_REG;
-+ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC2_STS_REG;
-+
-+ return aspeed_scu_ic_of_init_common(scu_ic, node);
-+}
-+
-+static int __init aspeed_ast2700_scu_ic3_of_init(struct device_node *node,
-+ struct device_node *parent)
-+{
-+ struct aspeed_scu_ic *scu_ic = kzalloc(sizeof(*scu_ic), GFP_KERNEL);
-+
-+ if (!scu_ic)
-+ return -ENOMEM;
-+
-+ scu_ic->irq_enable = ASPEED_AST2700_SCU_IC3_ENABLE;
-+ scu_ic->irq_shift = ASPEED_AST2700_SCU_IC3_SHIFT;
-+ scu_ic->num_irqs = ASPEED_AST2700_SCU_IC3_NUM_IRQS;
-+ scu_ic->en_sts_split = true;
-+ scu_ic->en_reg = ASPEED_AST2700_SCU_IC3_EN_REG;
-+ scu_ic->sts_reg = ASPEED_AST2700_SCU_IC3_STS_REG;
-+
-+ return aspeed_scu_ic_of_init_common(scu_ic, node);
-+}
-+
- IRQCHIP_DECLARE(ast2400_scu_ic, "aspeed,ast2400-scu-ic", aspeed_scu_ic_of_init);
- IRQCHIP_DECLARE(ast2500_scu_ic, "aspeed,ast2500-scu-ic", aspeed_scu_ic_of_init);
- IRQCHIP_DECLARE(ast2600_scu_ic0, "aspeed,ast2600-scu-ic0",
- aspeed_ast2600_scu_ic0_of_init);
- IRQCHIP_DECLARE(ast2600_scu_ic1, "aspeed,ast2600-scu-ic1",
- aspeed_ast2600_scu_ic1_of_init);
-+IRQCHIP_DECLARE(ast2700_scu_ic0, "aspeed,ast2700-scu-ic0",
-+ aspeed_ast2700_scu_ic0_of_init);
-+IRQCHIP_DECLARE(ast2700_scu_ic1, "aspeed,ast2700-scu-ic1",
-+ aspeed_ast2700_scu_ic1_of_init);
-+IRQCHIP_DECLARE(ast2700_scu_ic2, "aspeed,ast2700-scu-ic2",
-+ aspeed_ast2700_scu_ic2_of_init);
-+IRQCHIP_DECLARE(ast2700_scu_ic3, "aspeed,ast2700-scu-ic3",
-+ aspeed_ast2700_scu_ic3_of_init);
-diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
-index 3f759121dc00..7ee7f291109e 100644
---- a/drivers/soc/aspeed/aspeed-socinfo.c
-+++ b/drivers/soc/aspeed/aspeed-socinfo.c
-@@ -12,7 +12,9 @@
- static struct {
- const char *name;
- const u32 id;
--} const rev_table[] = {
-+}
-+
-+const rev_table[] = {
- /* AST2400 */
- { "AST2400", 0x02000303 },
- { "AST1400", 0x02010103 },
-@@ -27,6 +29,10 @@ static struct {
- { "AST2620", 0x05010203 },
- { "AST2605", 0x05030103 },
- { "AST2625", 0x05030403 },
-+ /* AST2700 */
-+ { "AST2750", 0x06000003 },
-+ { "AST2700", 0x06000103 },
-+ { "AST2720", 0x06000203 },
- };
-
- static const char *siliconid_to_name(u32 siliconid)
-@@ -57,7 +63,7 @@ static const char *siliconid_to_rev(u32 siliconid)
- case 3:
- return "A2";
- }
-- } else {
-+ } else if (gen == 0x6) {
- /* AST2600 */
- switch (rev) {
- case 0:
-@@ -69,6 +75,12 @@ static const char *siliconid_to_rev(u32 siliconid)
- case 3:
- return "A3";
- }
-+ } else {
-+ /* AST2700 */
-+ switch (rev) {
-+ case 0:
-+ return "A0";
-+ }
- }
-
- return "??";
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0007-Add-SOC-driver-for-ast2700.patch b/recipes-kernel/linux/files/0007-Add-SOC-driver-for-ast2700.patch
new file mode 100644
index 0000000..69b8444
--- /dev/null
+++ b/recipes-kernel/linux/files/0007-Add-SOC-driver-for-ast2700.patch
@@ -0,0 +1,24513 @@
+From ae40c5c4a7045140ee165194e5fd7bd61c499d6d Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 10:16:40 +0800
+Subject: [PATCH] Add SOC driver for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/soc/aspeed/Kconfig | 122 +
+ drivers/soc/aspeed/Makefile | 19 +
+ drivers/soc/aspeed/aspeed-bmc-dev.c | 727 ++++++
+ drivers/soc/aspeed/aspeed-disp-intf.c | 255 ++
+ drivers/soc/aspeed/aspeed-espi-comm.h | 196 ++
+ drivers/soc/aspeed/aspeed-host-bmc-dev.c | 791 ++++++
+ drivers/soc/aspeed/aspeed-lpc-ctrl.c | 6 +-
+ drivers/soc/aspeed/aspeed-lpc-mbox.c | 439 ++++
+ drivers/soc/aspeed/aspeed-lpc-pcc.c | 507 ++++
+ drivers/soc/aspeed/aspeed-lpc-snoop.c | 251 +-
+ drivers/soc/aspeed/aspeed-mctp.c | 2523 ++++++++++++++++++++
+ drivers/soc/aspeed/aspeed-p2a-ctrl.c | 6 +-
+ drivers/soc/aspeed/aspeed-pcie-mmbi.c | 389 +++
+ drivers/soc/aspeed/aspeed-pcie-mmbi.h | 20 +
+ drivers/soc/aspeed/aspeed-sbc.c | 73 +
+ drivers/soc/aspeed/aspeed-socinfo.c | 16 +-
+ drivers/soc/aspeed/aspeed-ssp.c | 277 +++
+ drivers/soc/aspeed/aspeed-uart-routing.c | 430 +++-
+ drivers/soc/aspeed/aspeed-udma.c | 433 ++++
+ drivers/soc/aspeed/aspeed-usb-hp.c | 138 ++
+ drivers/soc/aspeed/aspeed-usb-phy.c | 113 +
+ drivers/soc/aspeed/aspeed-xdma.c | 1433 +++++++++++
+ drivers/soc/aspeed/ast2500-espi.c | 1739 ++++++++++++++
+ drivers/soc/aspeed/ast2500-espi.h | 250 ++
+ drivers/soc/aspeed/ast2600-espi.c | 2141 +++++++++++++++++
+ drivers/soc/aspeed/ast2600-espi.h | 297 +++
+ drivers/soc/aspeed/ast2600-otp.c | 640 +++++
+ drivers/soc/aspeed/ast2700-espi.c | 2216 +++++++++++++++++
+ drivers/soc/aspeed/ast2700-espi.h | 275 +++
+ drivers/soc/aspeed/ast2700-otp.c | 567 +++++
+ drivers/soc/aspeed/rvas/Kconfig | 9 +
+ drivers/soc/aspeed/rvas/Makefile | 3 +
+ drivers/soc/aspeed/rvas/hardware_engines.c | 2203 +++++++++++++++++
+ drivers/soc/aspeed/rvas/hardware_engines.h | 551 +++++
+ drivers/soc/aspeed/rvas/video.h | 41 +
+ drivers/soc/aspeed/rvas/video_debug.h | 35 +
+ drivers/soc/aspeed/rvas/video_engine.c | 1339 +++++++++++
+ drivers/soc/aspeed/rvas/video_engine.h | 270 +++
+ drivers/soc/aspeed/rvas/video_ioctl.h | 275 +++
+ drivers/soc/aspeed/rvas/video_main.c | 1851 ++++++++++++++
+ 40 files changed, 23713 insertions(+), 153 deletions(-)
+ create mode 100644 drivers/soc/aspeed/aspeed-bmc-dev.c
+ create mode 100644 drivers/soc/aspeed/aspeed-disp-intf.c
+ create mode 100644 drivers/soc/aspeed/aspeed-espi-comm.h
+ create mode 100644 drivers/soc/aspeed/aspeed-host-bmc-dev.c
+ create mode 100644 drivers/soc/aspeed/aspeed-lpc-mbox.c
+ create mode 100644 drivers/soc/aspeed/aspeed-lpc-pcc.c
+ create mode 100644 drivers/soc/aspeed/aspeed-mctp.c
+ create mode 100644 drivers/soc/aspeed/aspeed-pcie-mmbi.c
+ create mode 100644 drivers/soc/aspeed/aspeed-pcie-mmbi.h
+ create mode 100644 drivers/soc/aspeed/aspeed-sbc.c
+ create mode 100644 drivers/soc/aspeed/aspeed-ssp.c
+ create mode 100644 drivers/soc/aspeed/aspeed-udma.c
+ create mode 100644 drivers/soc/aspeed/aspeed-usb-hp.c
+ create mode 100644 drivers/soc/aspeed/aspeed-usb-phy.c
+ create mode 100644 drivers/soc/aspeed/aspeed-xdma.c
+ create mode 100644 drivers/soc/aspeed/ast2500-espi.c
+ create mode 100644 drivers/soc/aspeed/ast2500-espi.h
+ create mode 100644 drivers/soc/aspeed/ast2600-espi.c
+ create mode 100644 drivers/soc/aspeed/ast2600-espi.h
+ create mode 100644 drivers/soc/aspeed/ast2600-otp.c
+ create mode 100644 drivers/soc/aspeed/ast2700-espi.c
+ create mode 100644 drivers/soc/aspeed/ast2700-espi.h
+ create mode 100644 drivers/soc/aspeed/ast2700-otp.c
+ create mode 100644 drivers/soc/aspeed/rvas/Kconfig
+ create mode 100644 drivers/soc/aspeed/rvas/Makefile
+ create mode 100644 drivers/soc/aspeed/rvas/hardware_engines.c
+ create mode 100644 drivers/soc/aspeed/rvas/hardware_engines.h
+ create mode 100644 drivers/soc/aspeed/rvas/video.h
+ create mode 100644 drivers/soc/aspeed/rvas/video_debug.h
+ create mode 100644 drivers/soc/aspeed/rvas/video_engine.c
+ create mode 100644 drivers/soc/aspeed/rvas/video_engine.h
+ create mode 100644 drivers/soc/aspeed/rvas/video_ioctl.h
+ create mode 100644 drivers/soc/aspeed/rvas/video_main.c
+
+diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
+index f579ee0b5..98149a48a 100644
+--- a/drivers/soc/aspeed/Kconfig
++++ b/drivers/soc/aspeed/Kconfig
+@@ -4,6 +4,12 @@ if ARCH_ASPEED || COMPILE_TEST
+
+ menu "ASPEED SoC drivers"
+
++config ASPEED_BMC_DEV
++ tristate "ASPEED BMC Device"
++
++config ASPEED_HOST_BMC_DEV
++ tristate "ASPEED Host BMC Device"
++
+ config ASPEED_LPC_CTRL
+ tristate "ASPEED LPC firmware cycle control"
+ select REGMAP
+@@ -24,6 +30,20 @@ config ASPEED_LPC_SNOOP
+ allows the BMC to listen on and save the data written by
+ the host to an arbitrary LPC I/O port.
+
++config ASPEED_SSP
++ tristate "ASPEED SSP loader"
++ default n
++ help
++ Driver for loading secondary-service-processor binary
++
++config ASPEED_MCTP
++ tristate "Aspeed ast2600 MCTP Controller support"
++ depends on REGMAP && MFD_SYSCON
++ help
++ Enable support for ast2600 MCTP Controller.
++ The MCTP controller allows the BMC to communicate with devices on
++ the host PCIe network.
++
+ config ASPEED_UART_ROUTING
+ tristate "ASPEED uart routing control"
+ select REGMAP
+@@ -34,6 +54,16 @@ config ASPEED_UART_ROUTING
+ users to perform runtime configuration of the RX muxes among
+ the UART controllers and I/O pins.
+
++config ASPEED_LPC_MAILBOX
++ tristate "ASPEED LPC mailbox support"
++ select REGMAP
++ select MFD_SYSCON
++ default ARCH_ASPEED
++ help
++ Provides a driver to control the LPC mailbox which possesses
++ up to 32 data registers for the communication between the Host
++ and the BMC over LPC.
++
+ config ASPEED_P2A_CTRL
+ tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
+ select REGMAP
+@@ -52,6 +82,98 @@ config ASPEED_SOCINFO
+ help
+ Say yes to support decoding of ASPEED BMC information.
+
++config ASPEED_XDMA
++ tristate "ASPEED XDMA Engine Driver"
++ select REGMAP
++ select MFD_SYSCON
++ depends on HAS_DMA
++ help
++ Enable support for the XDMA Engine found on the ASPEED BMC
++ SoCs. The XDMA engine can perform PCIe DMA operations between the BMC
++ and a host processor.
++
++config ASPEED_SBC
++ bool "ASPEED Secure Boot Controller driver"
++ default MACH_ASPEED_G6
++ help
++ Say yes to provide information about the secure boot controller in
++ debugfs.
++
++config AST2500_ESPI
++ tristate "ASPEED AST2500 eSPI slave driver"
++ help
++ Enable driver support for Aspeed AST2500 eSPI engine. The eSPI engine
++ plays as a slave device in BMC to communicate with the Host over
++ the eSPI interface. The four eSPI channels, namely peripheral,
++ virtual wire, out-of-band, and flash are supported.
++
++config AST2600_ESPI
++ tristate "ASPEED AST2600 eSPI slave driver"
++ help
++ Enable driver support for Aspeed AST2600 eSPI engine. The eSPI engine
++ plays as a slave device in BMC to communicate with the Host over
++ the eSPI interface. The four eSPI channels, namely peripheral,
++ virtual wire, out-of-band, and flash are supported.
++
++config AST2700_ESPI
++ tristate "ASPEED AST2700 eSPI slave driver"
++ help
++ Enable driver support for Aspeed AST2700 eSPI engine. The eSPI engine
++ plays as a slave device in BMC to communicate with the Host over
++ the eSPI interface. The four eSPI channels, namely peripheral,
++ virtual wire, out-of-band, and flash are supported.
++
++config ASPEED_LPC_PCC
++ tristate "Aspeed Post Code Capture support"
++ depends on ARCH_ASPEED && REGMAP && MFD_SYSCON
++ help
++ Provides a driver to control the LPC PCC interface,
++ allowing the BMC to snoop data bytes written by the
++ the host to an arbitrary LPC I/O port.
++
++config ASPEED_UDMA
++ tristate "Aspeed UDMA Engine Driver"
++ depends on ARCH_ASPEED && REGMAP && MFD_SYSCON && HAS_DMA
++ help
++ Enable support for the Aspeed UDMA Engine found on the Aspeed AST2XXX
++ SOCs. The UDMA engine can perform UART DMA operations between the memory
++ buffer and the UART/VUART devices.
++
++config ASPEED_OTP
++ tristate
++ help
++ Enable driver support for Aspeed OTP driver. Each bit in One
++ Time Prgrammable (OTP) memory is capable to be programmed once.
++ The OTP driver performs basic read/program operations of
++ OTP memory.
++
++config AST2600_OTP
++ tristate "AST2600 OTP Driver"
++ select ASPEED_OTP
++ depends on ARCH_ASPEED
++ help
++ Enable driver support for Aspeed AST2600 OTP driver.
++
++config AST2700_OTP
++ tristate "AST2700 OTP Driver"
++ select ASPEED_OTP
++ depends on ARCH_ASPEED
++ help
++ Enable driver support for Aspeed AST2700 OTP driver.
++
++config ASPEED_DISP_INTF
++ bool "ASPEED Display Interface driver"
++ select REGMAP
++ select MFD_SYSCON
++ default ARCH_ASPEED
++ help
++ Say yes to support control the display interface of ASPEED BMC.
++
++config ASPEED_PCIE_MMBI
++ tristate "ASPEED PCIE MMBI"
++
++source "drivers/soc/aspeed/rvas/Kconfig"
++
+ endmenu
+
+ endif
+diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
+index b35d74592..0dcc9ac2a 100644
+--- a/drivers/soc/aspeed/Makefile
++++ b/drivers/soc/aspeed/Makefile
+@@ -1,6 +1,25 @@
+ # SPDX-License-Identifier: GPL-2.0-only
++obj-$(CONFIG_ASPEED_BMC_DEV) += aspeed-bmc-dev.o
++obj-$(CONFIG_ASPEED_HOST_BMC_DEV) += aspeed-host-bmc-dev.o
+ obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+ obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o
++obj-$(CONFIG_ASPEED_SSP) += aspeed-ssp.o
+ obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+ obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
++obj-$(CONFIG_ASPEED_SBC) += aspeed-sbc.o
++obj-$(CONFIG_ASPEED_XDMA) += aspeed-xdma.o
++obj-$(CONFIG_AST2500_ESPI) += ast2500-espi.o
++obj-$(CONFIG_AST2600_ESPI) += ast2600-espi.o
++obj-$(CONFIG_AST2700_ESPI) += ast2700-espi.o
++obj-$(CONFIG_ASPEED_LPC_MAILBOX) += aspeed-lpc-mbox.o
++obj-$(CONFIG_ASPEED_LPC_PCC) += aspeed-lpc-pcc.o
++obj-$(CONFIG_ASPEED_RVAS) += rvas/
++obj-$(CONFIG_ASPEED_UDMA) += aspeed-udma.o
++obj-$(CONFIG_ARCH_ASPEED) += aspeed-usb-phy.o
++obj-$(CONFIG_ARCH_ASPEED) += aspeed-usb-hp.o
++obj-$(CONFIG_ASPEED_MCTP) += aspeed-mctp.o
++obj-$(CONFIG_AST2600_OTP) += ast2600-otp.o
++obj-$(CONFIG_AST2700_OTP) += ast2700-otp.o
++obj-$(CONFIG_ASPEED_DISP_INTF) += aspeed-disp-intf.o
++obj-$(CONFIG_ASPEED_PCIE_MMBI) += aspeed-pcie-mmbi.o
+diff --git a/drivers/soc/aspeed/aspeed-bmc-dev.c b/drivers/soc/aspeed/aspeed-bmc-dev.c
+new file mode 100644
+index 000000000..60d99239c
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-bmc-dev.c
+@@ -0,0 +1,727 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright (C) ASPEED Technology Inc.
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/of_reserved_mem.h>
++#include <linux/platform_device.h>
++
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
++#include <linux/regmap.h>
++#include <linux/interrupt.h>
++#include <linux/mfd/syscon.h>
++#include <linux/dma-mapping.h>
++#include <linux/miscdevice.h>
++
++static DEFINE_IDA(bmc_device_ida);
++
++#define SCU_TRIGGER_MSI
++
++/* AST2600 SCU */
++#define ASPEED_SCU04 0x04
++#define AST2600A3_SCU04 0x05030303
++#define ASPEED_SCUC20 0xC20
++#define ASPEED_SCUC24 0xC24
++#define MSI_ROUTING_MASK GENMASK(11, 10)
++#define PCIDEV1_INTX_MSI_HOST2BMC_EN BIT(18)
++#define MSI_ROUTING_PCIe2LPC_PCIDEV0 (0x1 << 10)
++#define MSI_ROUTING_PCIe2LPC_PCIDEV1 (0x2 << 10)
++/* AST2700 SCU */
++#define SCU0_REVISION_ID 0x0
++#define REVISION_ID GENMASK(23, 16)
++/* Host2BMC */
++#define ASPEED_BMC_MEM_BAR 0xF10
++#define PCIE2PCI_MEM_BAR_ENABLE BIT(1)
++#define HOST2BMC_MEM_BAR_ENABLE BIT(0)
++#define ASPEED_BMC_MEM_BAR_REMAP 0xF18
++
++#define ASPEED_BMC_SHADOW_CTRL 0xF50
++#define READ_ONLY_MASK BIT(31)
++#define MASK_BAR1 BIT(2)
++#define MASK_BAR0 BIT(1)
++#define SHADOW_CFG BIT(0)
++
++#define ASPEED_BMC_HOST2BMC_Q1 0xA000
++#define ASPEED_BMC_HOST2BMC_Q2 0xA010
++#define ASPEED_BMC_BMC2HOST_Q1 0xA020
++#define ASPEED_BMC_BMC2HOST_Q2 0xA030
++#define ASPEED_BMC_BMC2HOST_STS 0xA040
++#define BMC2HOST_INT_STS_DOORBELL BIT(31)
++#define BMC2HOST_ENABLE_INTB BIT(30)
++#define BMC2HOST_Q1_FULL BIT(27)
++#define BMC2HOST_Q1_EMPTY BIT(26)
++#define BMC2HOST_Q2_FULL BIT(25)
++#define BMC2HOST_Q2_EMPTY BIT(24)
++#define BMC2HOST_Q1_FULL_UNMASK BIT(23)
++#define BMC2HOST_Q1_EMPTY_UNMASK BIT(22)
++#define BMC2HOST_Q2_FULL_UNMASK BIT(21)
++#define BMC2HOST_Q2_EMPTY_UNMASK BIT(20)
++
++#define ASPEED_BMC_HOST2BMC_STS 0xA044
++#define HOST2BMC_INT_STS_DOORBELL BIT(31)
++#define HOST2BMC_ENABLE_INTB BIT(30)
++#define HOST2BMC_Q1_FULL BIT(27)
++#define HOST2BMC_Q1_EMPTY BIT(26)
++#define HOST2BMC_Q2_FULL BIT(25)
++#define HOST2BMC_Q2_EMPTY BIT(24)
++#define HOST2BMC_Q1_FULL_UNMASK BIT(23)
++#define HOST2BMC_Q1_EMPTY_UNMASK BIT(22)
++#define HOST2BMC_Q2_FULL_UNMASK BIT(21)
++#define HOST2BMC_Q2_EMPTY_UNMASK BIT(20)
++
++#define ASPEED_SCU_PCIE_CONF_CTRL 0xC20
++#define SCU_PCIE_CONF_BMC_DEV_EN BIT(8)
++#define SCU_PCIE_CONF_BMC_DEV_EN_MMIO BIT(9)
++#define SCU_PCIE_CONF_BMC_DEV_EN_MSI BIT(11)
++#define SCU_PCIE_CONF_BMC_DEV_EN_IRQ BIT(13)
++#define SCU_PCIE_CONF_BMC_DEV_EN_DMA BIT(14)
++#define SCU_PCIE_CONF_BMC_DEV_EN_E2L BIT(15)
++#define SCU_PCIE_CONF_BMC_DEV_EN_LPC_DECODE BIT(21)
++
++#define ASPEED_SCU_BMC_DEV_CLASS 0xC68
++
++#define ASPEED_QUEUE_NUM 2
++enum queue_index {
++ QUEUE1 = 0,
++ QUEUE2,
++};
++
++struct aspeed_platform {
++ int (*init)(struct platform_device *pdev);
++ ssize_t (*queue_rx)(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
++ char *buf, loff_t off, size_t count);
++ ssize_t (*queue_tx)(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
++ char *buf, loff_t off, size_t count);
++};
++
++struct aspeed_queue_message {
++ /* Queue waiters for idle engine */
++ wait_queue_head_t tx_wait;
++ wait_queue_head_t rx_wait;
++ struct kernfs_node *kn;
++ struct bin_attribute bin;
++ int index;
++ struct aspeed_bmc_device *bmc_device;
++};
++
++struct aspeed_bmc_device {
++ unsigned char *host2bmc_base_virt;
++ struct device *dev;
++ struct miscdevice miscdev;
++ int id;
++ void __iomem *reg_base;
++ void __iomem *bmc_mem_virt;
++ dma_addr_t bmc_mem_phy;
++ phys_addr_t bmc_mem_size;
++
++ int pcie2lpc;
++ int irq;
++
++ struct aspeed_queue_message queue[ASPEED_QUEUE_NUM];
++
++ const struct aspeed_platform *platform;
++
++ /* AST2700 */
++ struct regmap *config;
++ struct regmap *device;
++ struct regmap *e2m;
++ /*AST2600*/
++ struct regmap *scu;
++ int pcie_irq;
++};
++
++static struct aspeed_bmc_device *file_aspeed_bmc_device(struct file *file)
++{
++ return container_of(file->private_data, struct aspeed_bmc_device,
++ miscdev);
++}
++
++static int aspeed_bmc_device_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct aspeed_bmc_device *bmc_device = file_aspeed_bmc_device(file);
++ unsigned long vsize = vma->vm_end - vma->vm_start;
++ pgprot_t prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vsize) > bmc_device->bmc_mem_size)
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (bmc_device->bmc_mem_phy >> PAGE_SHIFT) + vma->vm_pgoff, vsize, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static const struct file_operations aspeed_bmc_device_fops = {
++ .owner = THIS_MODULE,
++ .mmap = aspeed_bmc_device_mmap,
++};
++
++static ssize_t aspeed_ast2600_queue_rx(struct file *filp, struct kobject *kobj,
++ struct bin_attribute *attr, char *buf, loff_t off,
++ size_t count)
++{
++ struct aspeed_queue_message *queue = attr->private;
++ struct aspeed_bmc_device *bmc_device = queue->bmc_device;
++ int index = queue->index;
++ u32 *data = (u32 *)buf;
++ u32 scu_id;
++ int ret;
++
++ ret = wait_event_interruptible(queue->rx_wait,
++ !(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) &
++ ((index == QUEUE1) ? HOST2BMC_Q1_EMPTY : HOST2BMC_Q2_EMPTY)));
++ if (ret)
++ return -EINTR;
++
++ data[0] = readl(bmc_device->reg_base +
++ ((index == QUEUE1) ? ASPEED_BMC_HOST2BMC_Q1 : ASPEED_BMC_HOST2BMC_Q2));
++
++ regmap_read(bmc_device->scu, ASPEED_SCU04, &scu_id);
++ if (scu_id == AST2600A3_SCU04) {
++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS);
++ } else {
++ //A0 : BIT(12) A1 : BIT(15)
++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), BIT(15));
++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), 0);
++ }
++
++ return sizeof(u32);
++}
++
++static ssize_t aspeed_ast2600_queue_tx(struct file *filp, struct kobject *kobj,
++ struct bin_attribute *attr, char *buf, loff_t off,
++ size_t count)
++{
++ struct aspeed_queue_message *queue = attr->private;
++ struct aspeed_bmc_device *bmc_device = queue->bmc_device;
++ int index = queue->index;
++ u32 tx_buff;
++ u32 scu_id;
++ int ret;
++
++ if (count != sizeof(u32))
++ return -EINVAL;
++
++ ret = wait_event_interruptible(queue->tx_wait,
++ !(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) &
++ ((index == QUEUE1) ? BMC2HOST_Q1_FULL : BMC2HOST_Q2_FULL)));
++ if (ret)
++ return -EINTR;
++
++ memcpy(&tx_buff, buf, 4);
++ writel(tx_buff, bmc_device->reg_base + ((index == QUEUE1) ? ASPEED_BMC_BMC2HOST_Q1 :
++ ASPEED_BMC_BMC2HOST_Q2));
++
++ /* trigger to host
++ * Only After AST2600A3 support DoorBell MSI
++ */
++ regmap_read(bmc_device->scu, ASPEED_SCU04, &scu_id);
++ if (scu_id == AST2600A3_SCU04) {
++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS);
++ } else {
++ //A0 : BIT(12) A1 : BIT(15)
++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), BIT(15));
++ regmap_update_bits(bmc_device->scu, 0x560, BIT(15), 0);
++ }
++
++ return sizeof(u32);
++}
++
++static ssize_t aspeed_ast2700_queue_rx(struct file *filp, struct kobject *kobj,
++ struct bin_attribute *attr, char *buf, loff_t off,
++ size_t count)
++{
++ struct aspeed_queue_message *queue = attr->private;
++ struct aspeed_bmc_device *bmc_device = queue->bmc_device;
++ int index = queue->index;
++ u32 *data = (u32 *)buf;
++ int ret;
++
++ ret = wait_event_interruptible(queue->rx_wait,
++ !(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) &
++ ((index == QUEUE1) ? HOST2BMC_Q1_EMPTY : HOST2BMC_Q2_EMPTY)));
++ if (ret)
++ return -EINTR;
++
++ data[0] = readl(bmc_device->reg_base +
++ ((index == QUEUE1) ? ASPEED_BMC_HOST2BMC_Q1 : ASPEED_BMC_HOST2BMC_Q2));
++
++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS);
++
++ return sizeof(u32);
++}
++
++static ssize_t aspeed_ast2700_queue_tx(struct file *filp, struct kobject *kobj,
++ struct bin_attribute *attr, char *buf, loff_t off,
++ size_t count)
++{
++ struct aspeed_queue_message *queue = attr->private;
++ struct aspeed_bmc_device *bmc_device = queue->bmc_device;
++ int index = queue->index;
++ u32 tx_buff;
++ int ret;
++
++ if (count != sizeof(u32))
++ return -EINVAL;
++
++ ret = wait_event_interruptible(queue->tx_wait,
++ !(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) &
++ ((index == QUEUE1) ? BMC2HOST_Q1_FULL : BMC2HOST_Q2_FULL)));
++ if (ret)
++ return -EINTR;
++
++ memcpy(&tx_buff, buf, 4);
++ writel(tx_buff, bmc_device->reg_base + ((index == QUEUE1) ? ASPEED_BMC_BMC2HOST_Q1 :
++ ASPEED_BMC_BMC2HOST_Q2));
++
++ writel(BMC2HOST_INT_STS_DOORBELL | BMC2HOST_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS);
++
++ return sizeof(u32);
++}
++
++/* AST2600 */
++static irqreturn_t aspeed_bmc_dev_pcie_isr(int irq, void *dev_id)
++{
++ struct aspeed_bmc_device *bmc_device = dev_id;
++
++ while (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q1_EMPTY))
++ readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_Q1);
++
++ while (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q2_EMPTY))
++ readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_Q2);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t aspeed_bmc_dev_isr(int irq, void *dev_id)
++{
++ struct aspeed_bmc_device *bmc_device = dev_id;
++ u32 host2bmc_q_sts = readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS);
++
++ if (host2bmc_q_sts & HOST2BMC_INT_STS_DOORBELL)
++ writel(HOST2BMC_INT_STS_DOORBELL, bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS);
++
++ if (host2bmc_q_sts & HOST2BMC_ENABLE_INTB)
++ writel(HOST2BMC_ENABLE_INTB, bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS);
++
++ if (host2bmc_q_sts & HOST2BMC_Q1_FULL)
++ dev_info(bmc_device->dev, "Q1 Full\n");
++
++ if (host2bmc_q_sts & HOST2BMC_Q2_FULL)
++ dev_info(bmc_device->dev, "Q2 Full\n");
++
++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) & BMC2HOST_Q1_FULL))
++ wake_up_interruptible(&bmc_device->queue[QUEUE1].tx_wait);
++
++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q1_EMPTY))
++ wake_up_interruptible(&bmc_device->queue[QUEUE1].rx_wait);
++
++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS) & BMC2HOST_Q2_FULL))
++ wake_up_interruptible(&bmc_device->queue[QUEUE2].tx_wait);
++
++ if (!(readl(bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS) & HOST2BMC_Q2_EMPTY))
++ wake_up_interruptible(&bmc_device->queue[QUEUE2].rx_wait);
++
++ return IRQ_HANDLED;
++}
++
++static int aspeed_ast2600_init(struct platform_device *pdev)
++{
++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ u32 pcie_config_ctl = SCU_PCIE_CONF_BMC_DEV_EN_IRQ |
++ SCU_PCIE_CONF_BMC_DEV_EN_MMIO | SCU_PCIE_CONF_BMC_DEV_EN;
++ u32 scu_id;
++
++ bmc_device->scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu");
++ if (IS_ERR(bmc_device->scu)) {
++ dev_err(&pdev->dev, "failed to find SCU regmap\n");
++ return PTR_ERR(bmc_device->scu);
++ }
++
++ if (bmc_device->pcie2lpc)
++ pcie_config_ctl |= SCU_PCIE_CONF_BMC_DEV_EN_E2L |
++ SCU_PCIE_CONF_BMC_DEV_EN_LPC_DECODE;
++
++ regmap_update_bits(bmc_device->scu, ASPEED_SCU_PCIE_CONF_CTRL,
++ pcie_config_ctl, pcie_config_ctl);
++
++ /* update class code to others as it is a MFD device */
++ regmap_write(bmc_device->scu, ASPEED_SCU_BMC_DEV_CLASS, 0xff000000);
++
++#ifdef SCU_TRIGGER_MSI
++ //SCUC24[17]: Enable PCI device 1 INTx/MSI from SCU560[15]. Will be added in next version
++ regmap_update_bits(bmc_device->scu, ASPEED_SCUC20, BIT(11) | BIT(14), BIT(11) | BIT(14));
++
++ regmap_read(bmc_device->scu, ASPEED_SCU04, &scu_id);
++ if (scu_id == AST2600A3_SCU04)
++ regmap_update_bits(bmc_device->scu, ASPEED_SCUC24,
++ PCIDEV1_INTX_MSI_HOST2BMC_EN | MSI_ROUTING_MASK,
++ PCIDEV1_INTX_MSI_HOST2BMC_EN | MSI_ROUTING_PCIe2LPC_PCIDEV1);
++ else
++ regmap_update_bits(bmc_device->scu, ASPEED_SCUC24,
++ BIT(17) | BIT(14) | BIT(11), BIT(17) | BIT(14) | BIT(11));
++#else
++ //SCUC24[18]: Enable PCI device 1 INTx/MSI from Host-to-BMC controller.
++ regmap_update_bits(bmc_device->scu, 0xc24, BIT(18) | BIT(14), BIT(18) | BIT(14));
++#endif
++
++ writel((~(bmc_device->bmc_mem_size - 1) & 0xFFFFFFFF) | HOST2BMC_MEM_BAR_ENABLE,
++ bmc_device->reg_base + ASPEED_BMC_MEM_BAR);
++ writel(bmc_device->bmc_mem_phy, bmc_device->reg_base + ASPEED_BMC_MEM_BAR_REMAP);
++
++ //Setting BMC to Host Q register
++ writel(BMC2HOST_Q2_FULL_UNMASK | BMC2HOST_Q1_FULL_UNMASK | BMC2HOST_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS);
++ writel(HOST2BMC_Q2_FULL_UNMASK | HOST2BMC_Q1_FULL_UNMASK | HOST2BMC_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS);
++
++ return 0;
++}
++
++static int aspeed_ast2700_init(struct platform_device *pdev)
++{
++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ u32 pcie_config_ctl = SCU_PCIE_CONF_BMC_DEV_EN_IRQ |
++ SCU_PCIE_CONF_BMC_DEV_EN_MMIO | SCU_PCIE_CONF_BMC_DEV_EN;
++ u32 scu_id;
++ int i;
++
++ bmc_device->config = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,config");
++ if (IS_ERR(bmc_device->config)) {
++ dev_err(&pdev->dev, "failed to find config regmap\n");
++ return PTR_ERR(bmc_device->config);
++ }
++
++ bmc_device->device = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,device");
++ if (IS_ERR(bmc_device->device)) {
++ dev_err(&pdev->dev, "failed to find device regmap\n");
++ return PTR_ERR(bmc_device->device);
++ }
++
++ bmc_device->e2m = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,e2m");
++ if (IS_ERR(bmc_device->e2m)) {
++ dev_err(&pdev->dev, "failed to find e2m regmap\n");
++ return PTR_ERR(bmc_device->e2m);
++ }
++
++ bmc_device->scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu");
++ if (IS_ERR(bmc_device->scu)) {
++ dev_err(&pdev->dev, "failed to find SCU regmap\n");
++ return PTR_ERR(bmc_device->scu);
++ }
++
++ if (bmc_device->pcie2lpc)
++ pcie_config_ctl |= SCU_PCIE_CONF_BMC_DEV_EN_E2L |
++ SCU_PCIE_CONF_BMC_DEV_EN_LPC_DECODE;
++
++ regmap_update_bits(bmc_device->config, 0x10, pcie_config_ctl, pcie_config_ctl);
++
++ /* update class code to others as it is a MFD device */
++ regmap_write(bmc_device->device, 0x18, 0xff000027);
++
++ //MSI
++ regmap_update_bits(bmc_device->device, 0x74, GENMASK(7, 4), BIT(7) | (5 << 4));
++
++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev
++ //Disable MSI[bit25] in ast2700A0 int only
++ regmap_read(bmc_device->scu, SCU0_REVISION_ID, &scu_id);
++ if (scu_id & REVISION_ID)
++ regmap_update_bits(bmc_device->device, 0x70,
++ BIT(25) | BIT(17) | BIT(9) | BIT(1),
++ BIT(25) | BIT(17) | BIT(9) | BIT(1));
++ else
++ regmap_update_bits(bmc_device->device, 0x70,
++ BIT(17) | BIT(9) | BIT(1),
++ BIT(25) | BIT(17) | BIT(9) | BIT(1));
++
++ //bar size check for 4k align
++ for (i = 1; i < 16; i++) {
++ if ((bmc_device->bmc_mem_size / 4096) == (1 << (i - 1)))
++ break;
++ }
++ if (i == 16) {
++ i = 0;
++ dev_warn(bmc_device->dev,
++ "Bar size not align for 4K : %dK\n", (u32)bmc_device->bmc_mem_size / 1024);
++ }
++
++ /*
++ * BAR assign in scu
++ * ((bar_mem / 4k) << 8) | per_size
++ */
++ regmap_write(bmc_device->device, 0x1c, ((bmc_device->bmc_mem_phy) >> 4) | i);
++
++ /*
++ * BAR assign in e2m
++ * e2m0:12c21000
++ * 108:host2bmc-0 for pcie0
++ * 128:host2bmc-1 for pcie0
++ * e2m1:12c22000
++ * 108:host2bmc-0 for pcie1
++ * 128:host2bmc-1 for pcie1
++ */
++ if (bmc_device->id)
++ regmap_write(bmc_device->e2m, 0x128, ((bmc_device->bmc_mem_phy) >> 4) | i);
++ else
++ regmap_write(bmc_device->e2m, 0x108, ((bmc_device->bmc_mem_phy) >> 4) | i);
++
++ //Setting BMC to Host Q register
++ writel(BMC2HOST_Q2_FULL_UNMASK | BMC2HOST_Q1_FULL_UNMASK | BMC2HOST_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_BMC2HOST_STS);
++ writel(HOST2BMC_Q2_FULL_UNMASK | HOST2BMC_Q1_FULL_UNMASK | HOST2BMC_ENABLE_INTB,
++ bmc_device->reg_base + ASPEED_BMC_HOST2BMC_STS);
++
++ return 0;
++}
++
++static int aspeed_bmc_device_setup_queue(struct platform_device *pdev)
++{
++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ int ret, i;
++
++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) {
++ struct aspeed_queue_message *queue = &bmc_device->queue[i];
++
++ init_waitqueue_head(&queue->tx_wait);
++ init_waitqueue_head(&queue->rx_wait);
++
++ sysfs_bin_attr_init(&queue->bin);
++
++ /* Queue name index starts from 1 */
++ queue->bin.attr.name =
++ devm_kasprintf(dev, GFP_KERNEL, "bmc-dev-queue%d", (i + 1));
++ queue->bin.attr.mode = 0600;
++ queue->bin.read = bmc_device->platform->queue_rx;
++ queue->bin.write = bmc_device->platform->queue_tx;
++ queue->bin.size = 4;
++ queue->bin.private = queue;
++
++ ret = sysfs_create_bin_file(&pdev->dev.kobj, &queue->bin);
++ if (ret) {
++ dev_err(dev, "error for bin%d file\n", i);
++ return ret;
++ }
++
++ queue->kn = kernfs_find_and_get(dev->kobj.sd, queue->bin.attr.name);
++ if (!queue->kn) {
++ sysfs_remove_bin_file(&dev->kobj, &queue->bin);
++ return ret;
++ }
++
++ queue->index = i;
++ queue->bmc_device = bmc_device;
++ }
++
++ return 0;
++}
++
++static int aspeed_bmc_device_setup_memory_mapping(struct platform_device *pdev)
++{
++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ int ret;
++
++ bmc_device->miscdev.minor = MISC_DYNAMIC_MINOR;
++ bmc_device->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "bmc-device%d", bmc_device->id);
++ bmc_device->miscdev.fops = &aspeed_bmc_device_fops;
++ bmc_device->miscdev.parent = dev;
++ ret = misc_register(&bmc_device->miscdev);
++ if (ret) {
++ dev_err(dev, "Unable to register device\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static struct aspeed_platform ast2600_plaform = {
++ .init = aspeed_ast2600_init,
++ .queue_rx = aspeed_ast2600_queue_rx,
++ .queue_tx = aspeed_ast2600_queue_tx
++};
++
++static struct aspeed_platform ast2700_plaform = {
++ .init = aspeed_ast2700_init,
++ .queue_rx = aspeed_ast2700_queue_rx,
++ .queue_tx = aspeed_ast2700_queue_tx
++};
++
++static const struct of_device_id aspeed_bmc_device_of_matches[] = {
++ { .compatible = "aspeed,ast2600-bmc-device", .data = &ast2600_plaform },
++ { .compatible = "aspeed,ast2700-bmc-device", .data = &ast2700_plaform },
++ {},
++};
++MODULE_DEVICE_TABLE(of, aspeed_bmc_device_of_matches);
++
++static int aspeed_bmc_device_probe(struct platform_device *pdev)
++{
++ struct aspeed_bmc_device *bmc_device;
++ struct device *dev = &pdev->dev;
++ struct resource res;
++ const void *md = of_device_get_match_data(dev);
++ struct device_node *np;
++ int ret = 0, i;
++
++ if (!md)
++ return -ENODEV;
++
++ bmc_device = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_bmc_device), GFP_KERNEL);
++ if (!bmc_device)
++ return -ENOMEM;
++ dev_set_drvdata(dev, bmc_device);
++
++ bmc_device->platform = md;
++
++ bmc_device->id = ida_simple_get(&bmc_device_ida, 0, 0, GFP_KERNEL);
++ if (bmc_device->id < 0)
++ goto out_region;
++
++ bmc_device->dev = dev;
++ bmc_device->reg_base = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(bmc_device->reg_base))
++ goto out_region;
++
++ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (ret) {
++ dev_err(dev, "cannot set 64-bits DMA mask\n");
++ goto out_region;
++ }
++
++ np = of_parse_phandle(dev->of_node, "memory-region", 0);
++ if (!np || of_address_to_resource(np, 0, &res)) {
++ dev_err(dev, "Failed to find memory-region.\n");
++ ret = -ENOMEM;
++ goto out_region;
++ }
++
++ of_node_put(np);
++
++ bmc_device->bmc_mem_phy = res.start;
++ bmc_device->bmc_mem_size = resource_size(&res);
++ bmc_device->bmc_mem_virt = devm_ioremap_resource(dev, &res);
++ if (!bmc_device->bmc_mem_virt) {
++ dev_err(dev, "cannot map bmc dev memory region\n");
++ ret = -ENOMEM;
++ goto out_region;
++ }
++
++ bmc_device->irq = platform_get_irq(pdev, 0);
++ if (bmc_device->irq < 0) {
++ dev_err(&pdev->dev, "platform get of irq[=%d] failed!\n", bmc_device->irq);
++ goto out_unmap;
++ }
++ ret = devm_request_irq(&pdev->dev, bmc_device->irq, aspeed_bmc_dev_isr, 0,
++ dev_name(&pdev->dev), bmc_device);
++ if (ret) {
++ dev_err(dev, "aspeed bmc device Unable to get IRQ");
++ goto out_unmap;
++ }
++
++ ret = aspeed_bmc_device_setup_queue(pdev);
++ if (ret) {
++ dev_err(dev, "Cannot setup queue message");
++ goto out_irq;
++ }
++
++ ret = aspeed_bmc_device_setup_memory_mapping(pdev);
++ if (ret) {
++ dev_err(dev, "Cannot setup memory mapping misc");
++ goto out_free_queue;
++ }
++
++ if (of_property_read_bool(dev->of_node, "pcie2lpc"))
++ bmc_device->pcie2lpc = 1;
++
++ ret = bmc_device->platform->init(pdev);
++ if (ret) {
++ dev_err(dev, "Initialize bmc device failed\n");
++ goto out_free_misc;
++ }
++
++ bmc_device->pcie_irq = platform_get_irq(pdev, 1);
++ if (bmc_device->pcie_irq < 0) {
++ dev_warn(&pdev->dev,
++ "platform get of pcie irq[=%d] failed!\n", bmc_device->pcie_irq);
++ } else {
++ ret = devm_request_irq(&pdev->dev, bmc_device->pcie_irq,
++ aspeed_bmc_dev_pcie_isr, IRQF_SHARED,
++ dev_name(&pdev->dev), bmc_device);
++ if (ret < 0) {
++ dev_warn(dev, "Failed to request PCI-E IRQ %d.\n", ret);
++ bmc_device->pcie_irq = -1;
++ }
++ }
++
++ dev_info(dev, "aspeed bmc device: driver successfully loaded.\n");
++
++ return 0;
++
++out_free_misc:
++ misc_deregister(&bmc_device->miscdev);
++out_free_queue:
++ for (i = 0; i < ASPEED_QUEUE_NUM; i++)
++ sysfs_remove_bin_file(&pdev->dev.kobj, &bmc_device->queue[i].bin);
++out_irq:
++ devm_free_irq(&pdev->dev, bmc_device->irq, bmc_device);
++out_unmap:
++ iounmap(bmc_device->reg_base);
++ devm_iounmap(&pdev->dev, bmc_device->bmc_mem_virt);
++out_region:
++ devm_kfree(&pdev->dev, bmc_device);
++ dev_warn(dev, "aspeed bmc device: driver init failed (ret=%d)!\n", ret);
++ return ret;
++}
++
++static int aspeed_bmc_device_remove(struct platform_device *pdev)
++{
++ struct aspeed_bmc_device *bmc_device = platform_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < ASPEED_QUEUE_NUM; i++)
++ sysfs_remove_bin_file(&pdev->dev.kobj, &bmc_device->queue[i].bin);
++ misc_deregister(&bmc_device->miscdev);
++ devm_free_irq(&pdev->dev, bmc_device->irq, bmc_device);
++ devm_free_irq(&pdev->dev, bmc_device->pcie_irq, bmc_device);
++
++ iounmap(bmc_device->reg_base);
++
++ devm_iounmap(&pdev->dev, bmc_device->bmc_mem_virt);
++
++ devm_kfree(&pdev->dev, bmc_device);
++
++ return 0;
++}
++
++static struct platform_driver aspeed_bmc_device_driver = {
++ .probe = aspeed_bmc_device_probe,
++ .remove = aspeed_bmc_device_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_bmc_device_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_bmc_device_driver);
++
++MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
++MODULE_DESCRIPTION("ASPEED BMC DEVICE Driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/aspeed-disp-intf.c b/drivers/soc/aspeed/aspeed-disp-intf.c
+new file mode 100644
+index 000000000..a2fc811a5
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-disp-intf.c
+@@ -0,0 +1,255 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright (C) ASPEED Technology Inc.
++#include <linux/bitfield.h>
++#include <linux/clk.h>
++#include <linux/log2.h>
++#include <linux/mfd/syscon.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/of_address.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regmap.h>
++
++#define DEVICE_NAME "aspeed-disp-intf"
++
++#define AST2700_SCU_CHIP_ID 0x0
++#define SCU_CPU_REVISION_ID_HW GENMASK(23, 16)
++
++#define AST2700_SCU_PIN_SEL 0x414
++#define AST2700_SCU_D1PLL_SEL GENMASK(13, 12)
++#define AST2700_SCU_DAC_SRC_SEL GENMASK(11, 10)
++#define AST2700_SCU_DP_SRC_SEL GENMASK(9, 8)
++
++#define AST2600_SCU_PIN_SEL 0x0C0
++#define AST2600_SCU_DP_SRC_SEL BIT(18)
++#define AST2600_SCU_DAC_SRC_SEL BIT(16)
++
++struct aspeed_disp_intf_config {
++ u8 version;
++ u32 dac_src_sel;
++ u32 dac_src_max;
++ u32 dac_src_min;
++ u32 dp_src_sel;
++ u32 dp_src_max;
++ u32 dp_src_min;
++};
++
++struct aspeed_disp_intf {
++ struct device *dev;
++ struct miscdevice miscdev;
++ struct regmap *scu;
++ const struct aspeed_disp_intf_config *config;
++};
++
++static int dac_src, dp_src;
++
++static const struct aspeed_disp_intf_config ast2600_config = {
++ .version = 6,
++ .dac_src_sel = AST2600_SCU_PIN_SEL,
++ .dac_src_max = 1,
++ .dac_src_min = 0,
++ .dp_src_sel = AST2600_SCU_PIN_SEL,
++ .dp_src_max = 1,
++ .dp_src_min = 0,
++};
++
++static const struct aspeed_disp_intf_config ast2700_config = {
++ .version = 7,
++ .dac_src_sel = AST2700_SCU_PIN_SEL,
++ .dac_src_max = 2,
++ .dac_src_min = 0,
++ .dp_src_sel = AST2700_SCU_PIN_SEL,
++ .dp_src_max = 2,
++ .dp_src_min = 0,
++};
++
++static ssize_t dac_src_show(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev);
++ const struct aspeed_disp_intf_config *config = intf->config;
++ u32 val;
++
++ regmap_read(intf->scu, config->dac_src_sel, &val);
++ dac_src = (config->version == 6)
++ ? FIELD_GET(AST2600_SCU_DAC_SRC_SEL, val)
++ : FIELD_GET(AST2700_SCU_DAC_SRC_SEL, val);
++ return sysfs_emit(buf, "%d\n", dac_src);
++}
++
++static ssize_t dac_src_store(struct device *dev,
++ struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev);
++ const struct aspeed_disp_intf_config *config = intf->config;
++ int src, res;
++
++ res = kstrtoint(buf, 0, &src);
++ if (res)
++ return res;
++
++ if (src < config->dac_src_min || src > config->dac_src_max) {
++ dev_err(intf->dev, "Invalid dac_src(max:%d, min:%d)\n",
++ config->dac_src_max, config->dac_src_min);
++ return -1;
++ }
++
++ dac_src = src;
++ if (config->version == 6) {
++ regmap_update_bits(intf->scu, config->dac_src_sel, AST2600_SCU_DAC_SRC_SEL,
++ FIELD_PREP(AST2600_SCU_DAC_SRC_SEL, src));
++ } else {
++ u32 id;
++ u32 mask = AST2700_SCU_DAC_SRC_SEL;
++ u32 val = FIELD_PREP(AST2700_SCU_DAC_SRC_SEL, src);
++
++ // D1PLL used in A0 only
++ regmap_read(intf->scu, AST2700_SCU_CHIP_ID, &id);
++ if (FIELD_GET(SCU_CPU_REVISION_ID_HW, id) != 0) {
++ mask |= AST2700_SCU_D1PLL_SEL;
++ val |= FIELD_PREP(AST2700_SCU_D1PLL_SEL, src);
++ }
++
++ regmap_update_bits(intf->scu, config->dac_src_sel, mask, val);
++ }
++ return count;
++}
++
++static DEVICE_ATTR_RW(dac_src);
++
++static ssize_t dp_src_show(struct device *dev, struct device_attribute *attr,
++ char *buf)
++{
++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev);
++ const struct aspeed_disp_intf_config *config = intf->config;
++ u32 val;
++
++ regmap_read(intf->scu, config->dp_src_sel, &val);
++ dp_src = (config->version == 6)
++ ? FIELD_GET(AST2600_SCU_DP_SRC_SEL, val)
++ : FIELD_GET(AST2700_SCU_DP_SRC_SEL, val);
++ return sysfs_emit(buf, "%d\n", dp_src);
++}
++
++static ssize_t dp_src_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct aspeed_disp_intf *intf = dev_get_drvdata(dev);
++ const struct aspeed_disp_intf_config *config = intf->config;
++ int src, res;
++
++ res = kstrtoint(buf, 0, &src);
++ if (res)
++ return res;
++
++ if (src < config->dp_src_min || src > config->dp_src_max) {
++ dev_err(intf->dev, "Invalid dp_src(max:%d, min:%d)\n",
++ config->dp_src_max, config->dp_src_min);
++ return -1;
++ }
++
++ dp_src = src;
++ if (config->version == 6) {
++ regmap_update_bits(intf->scu, config->dp_src_sel, AST2600_SCU_DP_SRC_SEL,
++ FIELD_PREP(AST2600_SCU_DP_SRC_SEL, src));
++ } else {
++ u32 val;
++
++ regmap_update_bits(intf->scu, config->dp_src_sel, AST2700_SCU_DP_SRC_SEL,
++ FIELD_PREP(AST2700_SCU_DP_SRC_SEL, src));
++
++ // D1PLL used in A0 only
++ regmap_read(intf->scu, AST2700_SCU_CHIP_ID, &val);
++ if (FIELD_GET(SCU_CPU_REVISION_ID_HW, val) == 0) {
++ regmap_update_bits(intf->scu, config->dp_src_sel, AST2700_SCU_D1PLL_SEL,
++ FIELD_PREP(AST2700_SCU_D1PLL_SEL, src));
++ }
++ }
++
++ return count;
++}
++
++static DEVICE_ATTR_RW(dp_src);
++
++static struct attribute *aspeed_disp_intf_attrs[] = {
++ &dev_attr_dac_src.attr,
++ &dev_attr_dp_src.attr,
++ NULL,
++};
++
++static const struct attribute_group aspeed_disp_intf_attgrp = {
++ .name = NULL,
++ .attrs = aspeed_disp_intf_attrs,
++};
++
++static int aspeed_disp_intf_probe(struct platform_device *pdev)
++{
++ struct aspeed_disp_intf *intf;
++ struct device *dev = &pdev->dev;
++ int ret;
++
++ intf = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_disp_intf), GFP_KERNEL);
++ if (!intf)
++ return -ENOMEM;
++
++ dev_set_drvdata(&pdev->dev, intf);
++
++ intf->config = of_device_get_match_data(&pdev->dev);
++ if (!intf->config)
++ return -ENODEV;
++
++ intf->dev = dev;
++ intf->scu = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
++ if (IS_ERR(intf->scu)) {
++ dev_err(dev, "failed to find SCU regmap\n");
++ return PTR_ERR(intf->scu);
++ }
++
++ intf->miscdev.minor = MISC_DYNAMIC_MINOR;
++ intf->miscdev.name = DEVICE_NAME;
++ intf->miscdev.parent = dev;
++ ret = misc_register(&intf->miscdev);
++ if (ret) {
++ dev_err(dev, "Unable to register device\n");
++ return ret;
++ }
++
++ ret = sysfs_create_group(&dev->kobj, &aspeed_disp_intf_attgrp);
++ if (ret != 0)
++ dev_warn(dev, "failed to register attributes\n");
++
++ return 0;
++}
++
++static void aspeed_disp_intf_remove(struct platform_device *pdev)
++{
++ struct aspeed_disp_intf *intf = platform_get_drvdata(pdev);
++
++ sysfs_remove_group(&intf->dev->kobj, &aspeed_disp_intf_attgrp);
++ misc_deregister(&intf->miscdev);
++ devm_kfree(&pdev->dev, intf);
++}
++
++static const struct of_device_id aspeed_disp_intf_of_matches[] = {
++ { .compatible = "aspeed,ast2600-disp-intf", .data = &ast2600_config },
++ { .compatible = "aspeed,ast2700-disp-intf", .data = &ast2700_config },
++ {},
++};
++
++static struct platform_driver aspeed_disp_intf_driver = {
++ .probe = aspeed_disp_intf_probe,
++ .remove_new = aspeed_disp_intf_remove,
++ .driver = {
++ .name = DEVICE_NAME,
++ .of_match_table = aspeed_disp_intf_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_disp_intf_driver);
++
++MODULE_DEVICE_TABLE(of, aspeed_disp_intf_of_matches);
++MODULE_AUTHOR("Jammy Huang <jammy_huang@aspeedtech.com>");
++MODULE_DESCRIPTION("ASPEED Display Interface Driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/aspeed-espi-comm.h b/drivers/soc/aspeed/aspeed-espi-comm.h
+new file mode 100644
+index 000000000..b50393c80
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-espi-comm.h
+@@ -0,0 +1,196 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#ifndef __ASPEED_ESPI_COMM_H__
++#define __ASPEED_ESPI_COMM_H__
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++/*
++ * eSPI cycle type encoding
++ *
++ * Section 5.1 Cycle Types and Packet Format,
++ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016.
++ */
++#define ESPI_PERIF_MEMRD32 0x00
++#define ESPI_PERIF_MEMRD64 0x02
++#define ESPI_PERIF_MEMWR32 0x01
++#define ESPI_PERIF_MEMWR64 0x03
++#define ESPI_PERIF_MSG 0x10
++#define ESPI_PERIF_MSG_D 0x11
++#define ESPI_PERIF_SUC_CMPLT 0x06
++#define ESPI_PERIF_SUC_CMPLT_D_MIDDLE 0x09
++#define ESPI_PERIF_SUC_CMPLT_D_FIRST 0x0b
++#define ESPI_PERIF_SUC_CMPLT_D_LAST 0x0d
++#define ESPI_PERIF_SUC_CMPLT_D_ONLY 0x0f
++#define ESPI_PERIF_UNSUC_CMPLT 0x0c
++#define ESPI_OOB_MSG 0x21
++#define ESPI_FLASH_READ 0x00
++#define ESPI_FLASH_WRITE 0x01
++#define ESPI_FLASH_ERASE 0x02
++#define ESPI_FLASH_SUC_CMPLT 0x06
++#define ESPI_FLASH_SUC_CMPLT_D_MIDDLE 0x09
++#define ESPI_FLASH_SUC_CMPLT_D_FIRST 0x0b
++#define ESPI_FLASH_SUC_CMPLT_D_LAST 0x0d
++#define ESPI_FLASH_SUC_CMPLT_D_ONLY 0x0f
++#define ESPI_FLASH_UNSUC_CMPLT 0x0c
++
++/*
++ * eSPI packet format structure
++ *
++ * Section 5.1 Cycle Types and Packet Format,
++ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016.
++ */
++struct espi_comm_hdr {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++};
++
++struct espi_perif_mem32 {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint32_t addr_be;
++ uint8_t data[];
++} __packed;
++
++struct espi_perif_mem64 {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint32_t addr_be;
++ uint8_t data[];
++} __packed;
++
++struct espi_perif_msg {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint8_t msg_code;
++ uint8_t msg_byte[4];
++ uint8_t data[];
++} __packed;
++
++struct espi_perif_cmplt {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint8_t data[];
++} __packed;
++
++struct espi_oob_msg {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint8_t data[];
++};
++
++struct espi_flash_rwe {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint32_t addr_be;
++ uint8_t data[];
++} __packed;
++
++struct espi_flash_cmplt {
++ uint8_t cyc;
++ uint8_t len_h : 4;
++ uint8_t tag : 4;
++ uint8_t len_l;
++ uint8_t data[];
++} __packed;
++
++#define ESPI_MAX_PLD_LEN BIT(12)
++
++/*
++ * Aspeed IOCTL for eSPI raw packet send/receive
++ *
++ * This IOCTL interface works in the eSPI packet in/out paradigm.
++ *
++ * Only the virtual wire IOCTL is a special case which does not send
++ * or receive an eSPI packet. However, to keep a more consisten use from
++ * userspace, we make all of the four channel drivers serve through the
++ * IOCTL interface.
++ *
++ * For the eSPI packet format, refer to
++ * Section 5.1 Cycle Types and Packet Format,
++ * Intel eSPI Interface Base Specification, Rev 1.0, Jan. 2016.
++ *
++ * For the example user apps using these IOCTL, refer to
++ * https://github.com/AspeedTech-BMC/aspeed_app/tree/master/espi_test
++ */
++#define __ASPEED_ESPI_IOCTL_MAGIC 0xb8
++
++/*
++ * we choose the longest header and the max payload size
++ * based on the Intel specification to define the maximum
++ * eSPI packet length
++ */
++#define ESPI_MAX_PKT_LEN (sizeof(struct espi_perif_msg) + ESPI_MAX_PLD_LEN)
++
++struct aspeed_espi_ioc {
++ uint32_t pkt_len;
++ uint8_t *pkt;
++};
++
++/*
++ * Peripheral Channel (CH0)
++ * - ASPEED_ESPI_PERIF_PC_GET_RX
++ * Receive an eSPI Posted/Completion packet
++ * - ASPEED_ESPI_PERIF_PC_PUT_TX
++ * Transmit an eSPI Posted/Completion packet
++ * - ASPEED_ESPI_PERIF_NP_PUT_TX
++ * Transmit an eSPI Non-Posted packet
++ */
++#define ASPEED_ESPI_PERIF_PC_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x00, struct aspeed_espi_ioc)
++#define ASPEED_ESPI_PERIF_PC_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x01, struct aspeed_espi_ioc)
++#define ASPEED_ESPI_PERIF_NP_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x02, struct aspeed_espi_ioc)
++/*
++ * Virtual Wire Channel (CH1)
++ * - ASPEED_ESPI_VW_GET_GPIO_VAL
++ * Read the input value of GPIO over the VW channel
++ * - ASPEED_ESPI_VW_PUT_GPIO_VAL
++ * Write the output value of GPIO over the VW channel
++ */
++#define ASPEED_ESPI_VW_GET_GPIO_VAL _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x10, uint8_t)
++#define ASPEED_ESPI_VW_PUT_GPIO_VAL _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x11, uint8_t)
++/*
++ * Out-of-band Channel (CH2)
++ * - ASPEED_ESPI_OOB_GET_RX
++ * Receive an eSPI OOB packet
++ * - ASPEED_ESPI_OOB_PUT_TX
++ * Transmit an eSPI OOB packet
++ */
++#define ASPEED_ESPI_OOB_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x20, struct aspeed_espi_ioc)
++#define ASPEED_ESPI_OOB_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x21, struct aspeed_espi_ioc)
++/*
++ * Flash Channel (CH3)
++ * - ASPEED_ESPI_FLASH_GET_RX
++ * Receive an eSPI flash packet
++ * - ASPEED_ESPI_FLASH_PUT_TX
++ * Transmit an eSPI flash packet
++ */
++#define ASPEED_ESPI_FLASH_GET_RX _IOR(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x30, struct aspeed_espi_ioc)
++#define ASPEED_ESPI_FLASH_PUT_TX _IOW(__ASPEED_ESPI_IOCTL_MAGIC, \
++ 0x31, struct aspeed_espi_ioc)
++
++#endif
+diff --git a/drivers/soc/aspeed/aspeed-host-bmc-dev.c b/drivers/soc/aspeed/aspeed-host-bmc-dev.c
+new file mode 100644
+index 000000000..b96d6281f
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-host-bmc-dev.c
+@@ -0,0 +1,791 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright (C) ASPEED Technology Inc.
++
++#include <linux/init.h>
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/pci.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/interrupt.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/serial_core.h>
++#include <linux/serial_8250.h>
++#include <linux/poll.h>
++
++#define PCI_BMC_HOST2BMC_Q1 0x30000
++#define PCI_BMC_HOST2BMC_Q2 0x30010
++#define PCI_BMC_BMC2HOST_Q1 0x30020
++#define PCI_BMC_BMC2HOST_Q2 0x30030
++#define PCI_BMC_BMC2HOST_STS 0x30040
++#define BMC2HOST_INT_STS_DOORBELL BIT(31)
++#define BMC2HOST_ENABLE_INTB BIT(30)
++
++#define BMC2HOST_Q1_FULL BIT(27)
++#define BMC2HOST_Q1_EMPTY BIT(26)
++#define BMC2HOST_Q2_FULL BIT(25)
++#define BMC2HOST_Q2_EMPTY BIT(24)
++#define BMC2HOST_Q1_FULL_UNMASK BIT(23)
++#define BMC2HOST_Q1_EMPTY_UNMASK BIT(22)
++#define BMC2HOST_Q2_FULL_UNMASK BIT(21)
++#define BMC2HOST_Q2_EMPTY_UNMASK BIT(20)
++
++#define PCI_BMC_HOST2BMC_STS 0x30044
++#define HOST2BMC_INT_STS_DOORBELL BIT(31)
++#define HOST2BMC_ENABLE_INTB BIT(30)
++
++#define HOST2BMC_Q1_FULL BIT(27)
++#define HOST2BMC_Q1_EMPTY BIT(26)
++#define HOST2BMC_Q2_FULL BIT(25)
++#define HOST2BMC_Q2_EMPTY BIT(24)
++#define HOST2BMC_Q1_FULL_UNMASK BIT(23)
++#define HOST2BMC_Q1_EMPTY_UNMASK BIT(22)
++#define HOST2BMC_Q2_FULL_UNMASK BIT(21)
++#define HOST2BMC_Q2_EMPTY_UNMASK BIT(20)
++
++static DEFINE_IDA(bmc_device_ida);
++
++#define MMBI_MAX_INST 6
++#define VUART_MAX_PARMS 2
++#define ASPEED_QUEUE_NUM 2
++#define MAX_MSI_NUM 8
++
++enum aspeed_platform_id {
++ ASPEED,
++ ASPEED_AST2700_SOC1,
++};
++
++enum queue_index {
++ QUEUE1 = 0,
++ QUEUE2,
++};
++
++enum msi_index {
++ BMC_MSI,
++ MBX_MSI,
++ VUART0_MSI,
++ VUART1_MSI,
++ MMBI0_MSI,
++ MMBI1_MSI,
++ MMBI2_MSI,
++ MMBI3_MSI,
++};
++
++/* Match msi_index */
++static int ast2600_msi_idx_table[MAX_MSI_NUM] = { 4, 21, 16, 15 };
++static int ast2700_soc0_msi_idx_table[MAX_MSI_NUM] = { 0, 11, 6, 5, 28, 29, 30, 31 };
++/* ARRAY = MMIB0_MSI, MMBI1_MSI, MMBI2_MSI, MMBI3_MSI, MMBI4_MSI, MMBI5_MSI */
++static int ast2700_soc1_msi_idx_table[MAX_MSI_NUM] = { 0, 1, 2, 3, 4, 5 };
++
++struct aspeed_platform {
++ int (*setup)(struct pci_dev *pdev);
++};
++struct aspeed_queue_message {
++ /* Queue waiters for idle engine */
++ wait_queue_head_t tx_wait;
++ wait_queue_head_t rx_wait;
++ struct kernfs_node *kn;
++ struct bin_attribute bin;
++ int index;
++ struct aspeed_pci_bmc_dev *pci_bmc_device;
++};
++
++struct aspeed_pci_mmbi {
++ unsigned long base;
++ unsigned long size;
++ void __iomem *mem;
++ struct miscdevice mdev;
++ bool bmc_rwp_update;
++ wait_queue_head_t wq;
++ u32 segment_size;
++ int irq;
++};
++
++struct aspeed_pci_bmc_dev {
++ struct device *dev;
++ struct miscdevice miscdev;
++ struct aspeed_platform *platform;
++ kernel_ulong_t driver_data;
++ int id;
++
++ unsigned long mem_bar_base;
++ unsigned long mem_bar_size;
++ void __iomem *mem_bar_reg;
++
++ unsigned long message_bar_base;
++ unsigned long message_bar_size;
++ void __iomem *msg_bar_reg;
++
++ void __iomem *pcie_sio_decode_addr;
++
++ struct aspeed_queue_message queue[ASPEED_QUEUE_NUM];
++
++ void __iomem *sio_mbox_reg;
++ struct uart_8250_port uart[VUART_MAX_PARMS];
++ int uart_line[VUART_MAX_PARMS];
++
++ /* Interrupt
++ * The index of array is using to enum msi_index
++ */
++ int *msi_idx_table;
++
++ bool ast2700_soc1;
++
++ /* AST2700 MMBI */
++ struct aspeed_pci_mmbi mmbi[MMBI_MAX_INST];
++ int mmbi_start_msi;
++};
++
++#define PCIE_DEVICE_SIO_ADDR (0x2E * 4)
++#define BMC_MULTI_MSI 32
++
++#define DRIVER_NAME "aspeed-host-bmc-dev"
++
++static int aspeed_pci_mmbi_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct aspeed_pci_mmbi *mmbi;
++ unsigned long vm_size;
++ pgprot_t prot;
++
++ mmbi = container_of(fp->private_data, struct aspeed_pci_mmbi, mdev);
++
++ vm_size = vma->vm_end - vma->vm_start;
++ prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > mmbi->size)
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start, (mmbi->base >> PAGE_SHIFT) + vma->vm_pgoff, vm_size,
++ prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static __poll_t aspeed_pci_mmbi_poll(struct file *fp, struct poll_table_struct *pt)
++{
++ struct aspeed_pci_mmbi *mmbi;
++
++ mmbi = container_of(fp->private_data, struct aspeed_pci_mmbi, mdev);
++
++ poll_wait(fp, &mmbi->wq, pt);
++
++ if (!mmbi->bmc_rwp_update)
++ return 0;
++
++ mmbi->bmc_rwp_update = false;
++
++ return EPOLLIN;
++}
++
++static struct aspeed_pci_bmc_dev *file_aspeed_bmc_device(struct file *file)
++{
++ return container_of(file->private_data, struct aspeed_pci_bmc_dev, miscdev);
++}
++
++static int aspeed_pci_bmc_dev_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = file_aspeed_bmc_device(file);
++ unsigned long vsize = vma->vm_end - vma->vm_start;
++ pgprot_t prot = vma->vm_page_prot;
++
++ if (vma->vm_pgoff + vsize > pci_bmc_dev->mem_bar_base + 0x100000)
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (pci_bmc_dev->mem_bar_base >> PAGE_SHIFT) + vma->vm_pgoff,
++ vsize, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static const struct file_operations aspeed_pci_bmc_dev_fops = {
++ .owner = THIS_MODULE,
++ .mmap = aspeed_pci_bmc_dev_mmap,
++};
++
++static const struct file_operations aspeed_pci_mmbi_fops = {
++ .owner = THIS_MODULE,
++ .mmap = aspeed_pci_mmbi_mmap,
++ .poll = aspeed_pci_mmbi_poll,
++};
++
++static ssize_t aspeed_queue_rx(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
++ char *buf, loff_t off, size_t count)
++{
++ struct aspeed_queue_message *queue = attr->private;
++ struct aspeed_pci_bmc_dev *pci_bmc_device = queue->pci_bmc_device;
++ int index = queue->index;
++ u32 *data = (u32 *)buf;
++ int ret;
++
++ ret = wait_event_interruptible(queue->rx_wait,
++ !(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS) &
++ ((index == QUEUE1) ? BMC2HOST_Q1_EMPTY : BMC2HOST_Q2_EMPTY)));
++ if (ret)
++ return -EINTR;
++
++ data[0] = readl(pci_bmc_device->msg_bar_reg +
++ ((index == QUEUE1) ? PCI_BMC_BMC2HOST_Q1 : PCI_BMC_BMC2HOST_Q2));
++
++ writel(HOST2BMC_INT_STS_DOORBELL | HOST2BMC_ENABLE_INTB,
++ pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS);
++
++ return sizeof(u32);
++}
++
++static ssize_t aspeed_queue_tx(struct file *filp, struct kobject *kobj, struct bin_attribute *attr,
++ char *buf, loff_t off, size_t count)
++{
++ struct aspeed_queue_message *queue = attr->private;
++ struct aspeed_pci_bmc_dev *pci_bmc_device = queue->pci_bmc_device;
++ int index = queue->index;
++ u32 tx_buff;
++ int ret;
++
++ if (count != sizeof(u32))
++ return -EINVAL;
++
++ ret = wait_event_interruptible(queue->tx_wait,
++ !(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS) &
++ ((index == QUEUE1) ? HOST2BMC_Q1_FULL : HOST2BMC_Q2_FULL)));
++ if (ret)
++ return -EINTR;
++
++ memcpy(&tx_buff, buf, 4);
++ writel(tx_buff, pci_bmc_device->msg_bar_reg +
++ ((index == QUEUE1) ? PCI_BMC_HOST2BMC_Q1 : PCI_BMC_HOST2BMC_Q2));
++ //trigger to host
++ writel(HOST2BMC_INT_STS_DOORBELL | HOST2BMC_ENABLE_INTB,
++ pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS);
++
++ return sizeof(u32);
++}
++
++static irqreturn_t aspeed_pci_host_bmc_device_interrupt(int irq, void *dev_id)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_device = dev_id;
++ u32 bmc2host_q_sts = readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS);
++
++ if (bmc2host_q_sts & BMC2HOST_INT_STS_DOORBELL)
++ writel(BMC2HOST_INT_STS_DOORBELL,
++ pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS);
++
++ if (bmc2host_q_sts & BMC2HOST_ENABLE_INTB)
++ writel(BMC2HOST_ENABLE_INTB, pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS);
++
++ if (bmc2host_q_sts & BMC2HOST_Q1_FULL)
++ dev_info(pci_bmc_device->dev, "Q1 Full\n");
++
++ if (bmc2host_q_sts & BMC2HOST_Q2_FULL)
++ dev_info(pci_bmc_device->dev, "Q2 Full\n");
++
++ //check q1
++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS) & HOST2BMC_Q1_FULL))
++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE1].tx_wait);
++
++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS) & BMC2HOST_Q1_EMPTY))
++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE1].rx_wait);
++ //chech q2
++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_HOST2BMC_STS) & HOST2BMC_Q2_FULL))
++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE2].tx_wait);
++
++ if (!(readl(pci_bmc_device->msg_bar_reg + PCI_BMC_BMC2HOST_STS) & BMC2HOST_Q2_EMPTY))
++ wake_up_interruptible(&pci_bmc_device->queue[QUEUE2].rx_wait);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t aspeed_pci_host_mbox_interrupt(int irq, void *dev_id)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_device = dev_id;
++ u32 isr = readl(pci_bmc_device->sio_mbox_reg + 0x94);
++
++ if (isr & BIT(7))
++ writel(BIT(7), pci_bmc_device->sio_mbox_reg + 0x94);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t aspeed_pci_mmbi_isr(int irq, void *dev_id)
++{
++ struct aspeed_pci_mmbi *mmbi = dev_id;
++
++ mmbi->bmc_rwp_update = true;
++ wake_up_interruptible(&mmbi->wq);
++
++ return IRQ_HANDLED;
++}
++
++static void aspeed_pci_setup_irq_resource(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++
++ /* Assign static msi index table by platform */
++ if (pdev->revision == 0x27) {
++ if (pci_bmc_dev->driver_data == ASPEED) {
++ pci_bmc_dev->msi_idx_table = ast2700_soc0_msi_idx_table;
++ } else {
++ pci_bmc_dev->msi_idx_table = ast2700_soc1_msi_idx_table;
++ pci_bmc_dev->ast2700_soc1 = true;
++ }
++ } else {
++ pci_bmc_dev->msi_idx_table = ast2600_msi_idx_table;
++ }
++
++ if (pci_alloc_irq_vectors(pdev, 1, BMC_MULTI_MSI, PCI_IRQ_LEGACY | PCI_IRQ_MSI) <= 1)
++ /* Set all msi index to the first vector */
++ memset(pci_bmc_dev->msi_idx_table, 0, sizeof(int) * MAX_MSI_NUM);
++}
++
++static int aspeed_pci_bmc_device_setup_queue(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_device = pci_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ int ret, i;
++
++ for (i = 0; i < ASPEED_QUEUE_NUM; i++) {
++ struct aspeed_queue_message *queue = &pci_bmc_device->queue[i];
++
++ init_waitqueue_head(&queue->tx_wait);
++ init_waitqueue_head(&queue->rx_wait);
++
++ sysfs_bin_attr_init(&queue->bin);
++
++ /* Queue name index starts from 1 */
++ queue->bin.attr.name =
++ devm_kasprintf(dev, GFP_KERNEL, "pci-bmc-dev-queue%d", (i + 1));
++ queue->bin.attr.mode = 0600;
++ queue->bin.read = aspeed_queue_rx;
++ queue->bin.write = aspeed_queue_tx;
++ queue->bin.size = 4;
++ queue->bin.private = queue;
++
++ ret = sysfs_create_bin_file(&pdev->dev.kobj, &queue->bin);
++ if (ret) {
++ dev_err(dev, "error for bin%d file\n", i);
++ return ret;
++ }
++
++ queue->kn = kernfs_find_and_get(dev->kobj.sd, queue->bin.attr.name);
++ if (!queue->kn) {
++ sysfs_remove_bin_file(&dev->kobj, &queue->bin);
++ return ret;
++ }
++
++ queue->index = i;
++ queue->pci_bmc_device = pci_bmc_device;
++ }
++
++ return 0;
++}
++
++static int aspeed_pci_bmc_device_setup_vuart(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ u16 vuart_ioport;
++ int ret, i;
++
++ for (i = 0; i < VUART_MAX_PARMS; i++) {
++ /* Assign the line to non-exist device */
++ pci_bmc_dev->uart_line[i] = -ENOENT;
++ vuart_ioport = 0x3F8 - (i * 0x100);
++ pci_bmc_dev->uart[i].port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
++ pci_bmc_dev->uart[i].port.uartclk = 115200 * 16;
++ pci_bmc_dev->uart[i].port.irq =
++ pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[VUART0_MSI + i]);
++ pci_bmc_dev->uart[i].port.dev = dev;
++ pci_bmc_dev->uart[i].port.iotype = UPIO_MEM32;
++ pci_bmc_dev->uart[i].port.iobase = 0;
++ pci_bmc_dev->uart[i].port.mapbase =
++ pci_bmc_dev->message_bar_base + (vuart_ioport << 2);
++ pci_bmc_dev->uart[i].port.membase = 0;
++ pci_bmc_dev->uart[i].port.type = PORT_16550A;
++ pci_bmc_dev->uart[i].port.flags |= (UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE);
++ pci_bmc_dev->uart[i].port.regshift = 2;
++ ret = serial8250_register_8250_port(&pci_bmc_dev->uart[i]);
++ if (ret < 0) {
++ dev_err_probe(dev, ret, "Can't setup PCIe VUART\n");
++ return ret;
++ }
++ pci_bmc_dev->uart_line[i] = ret;
++ }
++ return 0;
++}
++
++static int aspeed_pci_bmc_device_setup_memory_mapping(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ int ret;
++
++ pci_bmc_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
++ pci_bmc_dev->miscdev.name =
++ devm_kasprintf(dev, GFP_KERNEL, "%s%d", DRIVER_NAME, pci_bmc_dev->id);
++ pci_bmc_dev->miscdev.fops = &aspeed_pci_bmc_dev_fops;
++ pci_bmc_dev->miscdev.parent = dev;
++
++ ret = misc_register(&pci_bmc_dev->miscdev);
++ if (ret) {
++ pr_err("host bmc register fail %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++static int aspeed_pci_bmc_device_setup_mbox(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ int ret;
++
++ /* setup mbox */
++ pci_bmc_dev->pcie_sio_decode_addr = pci_bmc_dev->msg_bar_reg + PCIE_DEVICE_SIO_ADDR;
++ writel(0xaa, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0xa5, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0xa5, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0x07, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0x0e, pci_bmc_dev->pcie_sio_decode_addr + 0x04);
++ /* disable */
++ writel(0x30, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0x00, pci_bmc_dev->pcie_sio_decode_addr + 0x04);
++ /* set decode address 0x100 */
++ writel(0x60, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0x01, pci_bmc_dev->pcie_sio_decode_addr + 0x04);
++ writel(0x61, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0x00, pci_bmc_dev->pcie_sio_decode_addr + 0x04);
++ /* enable */
++ writel(0x30, pci_bmc_dev->pcie_sio_decode_addr);
++ writel(0x01, pci_bmc_dev->pcie_sio_decode_addr + 0x04);
++ pci_bmc_dev->sio_mbox_reg = pci_bmc_dev->msg_bar_reg + 0x400;
++
++ ret = devm_request_irq(dev,
++ pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[MBX_MSI]),
++ aspeed_pci_host_mbox_interrupt, IRQF_SHARED,
++ devm_kasprintf(dev, GFP_KERNEL, "aspeed-sio-mbox%d", pci_bmc_dev->id),
++ pci_bmc_dev);
++ if (ret) {
++ pr_err("host bmc device Unable to get IRQ %d\n", ret);
++ return ret;
++ }
++
++ return 0;
++}
++
++/* AST2700 PCIe MMBI
++ * SoC : | 0 | 1 |
++ * BAR : | 2 3 4 5 | 0 1 2 3 4 5 |
++ * MMBI: | 0 1 2 3 | 0 1 2 3 4 5 |
++ */
++static void aspeed_pci_bmc_device_setup_mmbi(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ struct aspeed_pci_mmbi *mmbi;
++ u32 start_bar = 2, mmbi_max_inst = 4, start_msi = MMBI0_MSI; /* AST2700 SoC0 */
++ int i, rc = 0;
++
++ if (pdev->revision != 0x27)
++ return;
++
++ if (pci_bmc_dev->ast2700_soc1) {
++ /* AST2700 SoC1 */
++ start_bar = 0;
++ mmbi_max_inst = 6;
++ start_msi = 0;
++ }
++
++ for (i = 0; i < mmbi_max_inst; i++) {
++ mmbi = &pci_bmc_dev->mmbi[i];
++
++ /* Get MMBI BAR resource */
++ mmbi->base = pci_resource_start(pdev, start_bar + i);
++ mmbi->size = pci_resource_len(pdev, start_bar + i);
++
++ if (mmbi->size == 0)
++ continue;
++
++ mmbi->mem = pci_ioremap_bar(pdev, start_bar + i);
++ if (!mmbi->mem) {
++ mmbi->size = 0;
++ continue;
++ }
++
++ mmbi->mdev.parent = &pdev->dev;
++ mmbi->mdev.minor = MISC_DYNAMIC_MINOR;
++ mmbi->mdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
++ "aspeed-pcie%d-mmbi%d",
++ pci_bmc_dev->id, i);
++ mmbi->mdev.fops = &aspeed_pci_mmbi_fops;
++ rc = misc_register(&mmbi->mdev);
++ if (rc) {
++ dev_err(&pdev->dev, "Cannot register device %s (err=%d)\n",
++ mmbi->mdev.name, rc);
++ mmbi->size = 0;
++ iounmap(mmbi->mem);
++ continue;
++ }
++
++ mmbi->irq = pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[start_msi + i]);
++ rc = devm_request_irq(&pdev->dev, mmbi->irq, aspeed_pci_mmbi_isr, IRQF_SHARED,
++ mmbi->mdev.name, mmbi);
++ if (rc) {
++ pr_err("MMBI device %s unable to get IRQ %d\n", mmbi->mdev.name, rc);
++ misc_deregister(&mmbi->mdev);
++ mmbi->size = 0;
++ iounmap(mmbi->mem);
++ continue;
++ }
++
++ mmbi->bmc_rwp_update = false;
++ init_waitqueue_head(&mmbi->wq);
++ }
++}
++
++static void aspeed_pci_host_bmc_device_release_queue(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < ASPEED_QUEUE_NUM; i++)
++ sysfs_remove_bin_file(&pdev->dev.kobj, &pci_bmc_dev->queue[i].bin);
++}
++
++static void aspeed_pci_host_bmc_device_release_vuart(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < VUART_MAX_PARMS; i++) {
++ if (pci_bmc_dev->uart_line[i] >= 0)
++ serial8250_unregister_port(pci_bmc_dev->uart_line[i]);
++ }
++}
++
++static void aspeed_pci_host_bmc_device_release_memory_mapping(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++
++ if (!list_empty(&pci_bmc_dev->miscdev.list))
++ misc_deregister(&pci_bmc_dev->miscdev);
++}
++
++static void aspeed_pci_release_mmbi(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ struct aspeed_pci_mmbi *mmbi;
++ int i;
++
++ if (pdev->revision != 0x27)
++ return;
++
++ for (i = 0; i < MMBI_MAX_INST; i++) {
++ mmbi = &pci_bmc_dev->mmbi[i];
++
++ if (mmbi->size == 0)
++ continue;
++ misc_deregister(&mmbi->mdev);
++ devm_free_irq(&pdev->dev, mmbi->irq, mmbi);
++ }
++}
++
++static int aspeed_pci_host_setup(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++ int rc = 0;
++
++ /* Get share memory BAR */
++ pci_bmc_dev->mem_bar_base = pci_resource_start(pdev, 0);
++ pci_bmc_dev->mem_bar_size = pci_resource_len(pdev, 0);
++ pci_bmc_dev->mem_bar_reg = pci_ioremap_bar(pdev, 0);
++ if (!pci_bmc_dev->mem_bar_reg)
++ return -ENOMEM;
++
++ /* Get Message BAR */
++ pci_bmc_dev->message_bar_base = pci_resource_start(pdev, 1);
++ pci_bmc_dev->message_bar_size = pci_resource_len(pdev, 1);
++ pci_bmc_dev->msg_bar_reg = pci_ioremap_bar(pdev, 1);
++ if (!pci_bmc_dev->msg_bar_reg) {
++ rc = -ENOMEM;
++ goto out_free0;
++ }
++
++ /* AST2600 ERRTA40: dummy read */
++ if (pdev->revision < 0x27)
++ (void)__raw_readl((void __iomem *)pci_bmc_dev->msg_bar_reg);
++
++ rc = aspeed_pci_bmc_device_setup_queue(pdev);
++ if (rc) {
++ pr_err("Cannot setup Queue Message");
++ goto out_free1;
++ }
++
++ rc = aspeed_pci_bmc_device_setup_memory_mapping(pdev);
++ if (rc) {
++ pr_err("Cannot setup Memory Mapping");
++ goto out_free_queue;
++ }
++
++ rc = aspeed_pci_bmc_device_setup_mbox(pdev);
++ if (rc) {
++ pr_err("Cannot setup Mailnbox");
++ goto out_free_mmapping;
++ }
++
++ rc = aspeed_pci_bmc_device_setup_vuart(pdev);
++ if (rc) {
++ pr_err("Cannot setup Virtual UART");
++ goto out_free_mbox;
++ }
++
++ rc = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[BMC_MSI]),
++ aspeed_pci_host_bmc_device_interrupt, IRQF_SHARED,
++ pci_bmc_dev->miscdev.name, pci_bmc_dev);
++ if (rc) {
++ pr_err("Get BMC DEVICE IRQ failed. (err=%d)\n", rc);
++ goto out_free_uart;
++ }
++
++ /* Setup AST2700 SoC0 MMBI device */
++ aspeed_pci_bmc_device_setup_mmbi(pdev);
++
++ return 0;
++
++out_free_uart:
++ aspeed_pci_host_bmc_device_release_vuart(pdev);
++out_free_mbox:
++ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[MBX_MSI]),
++ pci_bmc_dev);
++out_free_mmapping:
++ aspeed_pci_host_bmc_device_release_memory_mapping(pdev);
++out_free_queue:
++ aspeed_pci_host_bmc_device_release_queue(pdev);
++out_free1:
++ iounmap(pci_bmc_dev->msg_bar_reg);
++out_free0:
++ iounmap(pci_bmc_dev->mem_bar_reg);
++
++ pci_release_regions(pdev);
++ return rc;
++}
++
++static int aspeed_pci_host_mmbi_device_setup(struct pci_dev *pdev)
++{
++ aspeed_pci_bmc_device_setup_mmbi(pdev);
++ return 0;
++}
++
++static struct aspeed_platform aspeed_pcie_host[] = {
++ { .setup = aspeed_pci_host_setup },
++ { .setup = aspeed_pci_host_mmbi_device_setup },
++ { 0 }
++};
++
++static int aspeed_pci_host_bmc_device_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev;
++ int rc = 0;
++
++ pr_info("ASPEED BMC PCI ID %04x:%04x, IRQ=%u\n", pdev->vendor, pdev->device, pdev->irq);
++
++ pci_bmc_dev = devm_kzalloc(&pdev->dev, sizeof(*pci_bmc_dev), GFP_KERNEL);
++ if (!pci_bmc_dev)
++ return -ENOMEM;
++
++ /* Get platform id */
++ pci_bmc_dev->driver_data = ent->driver_data;
++ pci_bmc_dev->platform = &aspeed_pcie_host[ent->driver_data];
++
++ pci_bmc_dev->id = ida_simple_get(&bmc_device_ida, 0, 0, GFP_KERNEL);
++ if (pci_bmc_dev->id < 0)
++ return pci_bmc_dev->id;
++
++ rc = pci_enable_device(pdev);
++ if (rc) {
++ dev_err(&pdev->dev, "pci_enable_device() returned error %d\n", rc);
++ return rc;
++ }
++
++ pci_set_master(pdev);
++ pci_set_drvdata(pdev, pci_bmc_dev);
++
++ /* Prepare IRQ resource */
++ aspeed_pci_setup_irq_resource(pdev);
++
++ /* Setup BMC PCI device */
++ rc = pci_bmc_dev->platform->setup(pdev);
++ if (rc) {
++ dev_err(&pdev->dev, "ASPEED PCIe Host device returned error %d\n", rc);
++ pci_free_irq_vectors(pdev);
++ pci_disable_device(pdev);
++ return rc;
++ }
++
++ return 0;
++}
++
++static void aspeed_pci_host_bmc_device_remove(struct pci_dev *pdev)
++{
++ struct aspeed_pci_bmc_dev *pci_bmc_dev = pci_get_drvdata(pdev);
++
++ if (pci_bmc_dev->driver_data == ASPEED) {
++ aspeed_pci_host_bmc_device_release_queue(pdev);
++ aspeed_pci_host_bmc_device_release_memory_mapping(pdev);
++ aspeed_pci_host_bmc_device_release_vuart(pdev);
++
++ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[BMC_MSI]),
++ pci_bmc_dev);
++ devm_free_irq(&pdev->dev, pci_irq_vector(pdev, pci_bmc_dev->msi_idx_table[MBX_MSI]),
++ pci_bmc_dev);
++ }
++
++ aspeed_pci_release_mmbi(pdev);
++
++ ida_simple_remove(&bmc_device_ida, pci_bmc_dev->id);
++
++ pci_free_irq_vectors(pdev);
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++}
++
++/**
++ * This table holds the list of (VendorID,DeviceID) supported by this driver
++ *
++ */
++static struct pci_device_id aspeed_host_bmc_dev_pci_ids[] = {
++ /* ASPEED BMC Device */
++ { PCI_DEVICE(0x1A03, 0x2402), .class = 0xFF0000, .class_mask = 0xFFFF00,
++ .driver_data = ASPEED },
++ /* AST2700 SoC1 MMBI device */
++ { PCI_DEVICE(0x1A03, 0x2402), .class = 0x0C0C00, .class_mask = (0xFFFF00),
++ .driver_data = ASPEED_AST2700_SOC1 },
++ {
++ 0,
++ }
++};
++
++MODULE_DEVICE_TABLE(pci, aspeed_host_bmc_dev_pci_ids);
++
++static struct pci_driver aspeed_host_bmc_dev_driver = {
++ .name = DRIVER_NAME,
++ .id_table = aspeed_host_bmc_dev_pci_ids,
++ .probe = aspeed_pci_host_bmc_device_probe,
++ .remove = aspeed_pci_host_bmc_device_remove,
++};
++
++module_pci_driver(aspeed_host_bmc_dev_driver);
++
++MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
++MODULE_DESCRIPTION("ASPEED Host BMC DEVICE Driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+index 258894ed2..e87038009 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
++++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+@@ -332,14 +332,12 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+-static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
++static void aspeed_lpc_ctrl_remove(struct platform_device *pdev)
+ {
+ struct aspeed_lpc_ctrl *lpc_ctrl = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&lpc_ctrl->miscdev);
+ clk_disable_unprepare(lpc_ctrl->clk);
+-
+- return 0;
+ }
+
+ static const struct of_device_id aspeed_lpc_ctrl_match[] = {
+@@ -355,7 +353,7 @@ static struct platform_driver aspeed_lpc_ctrl_driver = {
+ .of_match_table = aspeed_lpc_ctrl_match,
+ },
+ .probe = aspeed_lpc_ctrl_probe,
+- .remove = aspeed_lpc_ctrl_remove,
++ .remove_new = aspeed_lpc_ctrl_remove,
+ };
+
+ module_platform_driver(aspeed_lpc_ctrl_driver);
+diff --git a/drivers/soc/aspeed/aspeed-lpc-mbox.c b/drivers/soc/aspeed/aspeed-lpc-mbox.c
+new file mode 100644
+index 000000000..4b6a1693e
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-lpc-mbox.c
+@@ -0,0 +1,439 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright 2017 IBM Corporation
++ * Copyright 2021 Aspeed Technology Inc.
++ */
++#include <linux/interrupt.h>
++#include <linux/mfd/syscon.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/of_irq.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regmap.h>
++#include <linux/slab.h>
++
++#define DEVICE_NAME "aspeed-mbox"
++
++static DEFINE_IDA(aspeed_mbox_ida);
++
++#define ASPEED_MBOX_DR(dr, n) (dr + (n * 4))
++#define ASPEED_MBOX_STR(str, n) (str + (n / 8) * 4)
++#define ASPEED_MBOX_BIE(bie, n) (bie + (n / 8) * 4)
++#define ASPEED_MBOX_HIE(hie, n) (hie + (n / 8) * 4)
++
++#define ASPEED_MBOX_BCR_RECV BIT(7)
++#define ASPEED_MBOX_BCR_MASK BIT(1)
++#define ASPEED_MBOX_BCR_SEND BIT(0)
++
++/* ioctl code */
++#define ASPEED_MBOX_IOCTL 0xA3
++#define ASPEED_MBOX_IOCTL_GET_SIZE \
++ _IOR(ASPEED_MBOX_IOCTL, 0, struct aspeed_mbox_ioctl_data)
++
++struct aspeed_mbox_ioctl_data {
++ unsigned int data;
++};
++
++struct aspeed_mbox_model {
++ unsigned int dr_num;
++
++ /* offsets to the MBOX registers */
++ unsigned int dr;
++ unsigned int str;
++ unsigned int bcr;
++ unsigned int hcr;
++ unsigned int bie;
++ unsigned int hie;
++};
++
++struct aspeed_mbox {
++ int mdev_id;
++ struct miscdevice mdev;
++ struct regmap *map;
++ unsigned int base;
++ wait_queue_head_t queue;
++ struct mutex mutex;
++ const struct aspeed_mbox_model *model;
++};
++
++static atomic_t aspeed_mbox_open_count = ATOMIC_INIT(0);
++
++static u8 aspeed_mbox_inb(struct aspeed_mbox *mbox, int reg)
++{
++ /*
++ * The mbox registers are actually only one byte but are addressed
++ * four bytes apart. The other three bytes are marked 'reserved',
++ * they *should* be zero but lets not rely on it.
++ * I am going to rely on the fact we can casually read/write to them...
++ */
++ unsigned int val = 0xff; /* If regmap throws an error return 0xff */
++ int rc = regmap_read(mbox->map, mbox->base + reg, &val);
++
++ if (rc)
++ dev_err(mbox->mdev.parent, "regmap_read() failed with "
++ "%d (reg: 0x%08x)\n", rc, reg);
++
++ return val & 0xff;
++}
++
++static void aspeed_mbox_outb(struct aspeed_mbox *mbox, u8 data, int reg)
++{
++ int rc = regmap_write(mbox->map, mbox->base + reg, data);
++
++ if (rc)
++ dev_err(mbox->mdev.parent, "regmap_write() failed with "
++ "%d (data: %u reg: 0x%08x)\n", rc, data, reg);
++}
++
++static struct aspeed_mbox *file_mbox(struct file *file)
++{
++ return container_of(file->private_data, struct aspeed_mbox, mdev);
++}
++
++static int aspeed_mbox_open(struct inode *inode, struct file *file)
++{
++ struct aspeed_mbox *mbox = file_mbox(file);
++ const struct aspeed_mbox_model *model = mbox->model;
++
++ if (atomic_inc_return(&aspeed_mbox_open_count) == 1) {
++ /*
++ * Clear the interrupt status bit if it was left on and unmask
++ * interrupts.
++ * ASPEED_MBOX_BCR_RECV bit is W1C, this also unmasks in 1 step
++ */
++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_RECV, model->bcr);
++ return 0;
++ }
++
++ atomic_dec(&aspeed_mbox_open_count);
++ return -EBUSY;
++}
++
++static ssize_t aspeed_mbox_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct aspeed_mbox *mbox = file_mbox(file);
++ const struct aspeed_mbox_model *model = mbox->model;
++ char __user *p = buf;
++ ssize_t ret;
++ int i;
++
++ if (!access_ok(buf, count))
++ return -EFAULT;
++
++ if (count + *ppos > model->dr_num)
++ return -EINVAL;
++
++ if (file->f_flags & O_NONBLOCK) {
++ if (!(aspeed_mbox_inb(mbox, model->bcr) &
++ ASPEED_MBOX_BCR_RECV))
++ return -EAGAIN;
++ } else if (wait_event_interruptible(mbox->queue,
++ aspeed_mbox_inb(mbox, model->bcr) &
++ ASPEED_MBOX_BCR_RECV)) {
++ return -ERESTARTSYS;
++ }
++
++ mutex_lock(&mbox->mutex);
++
++ for (i = *ppos; count > 0 && i < model->dr_num; i++) {
++ uint8_t reg = aspeed_mbox_inb(mbox, ASPEED_MBOX_DR(model->dr, i));
++
++ ret = __put_user(reg, p);
++ if (ret)
++ goto out_unlock;
++
++ p++;
++ count--;
++ }
++
++ /* ASPEED_MBOX_BCR_RECV bit is write to clear, this also unmasks in 1 step */
++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_RECV, model->bcr);
++ ret = p - buf;
++
++out_unlock:
++ mutex_unlock(&mbox->mutex);
++ return ret;
++}
++
++static ssize_t aspeed_mbox_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct aspeed_mbox *mbox = file_mbox(file);
++ const struct aspeed_mbox_model *model = mbox->model;
++ const char __user *p = buf;
++ ssize_t ret;
++ char c;
++ int i;
++
++ if (!access_ok(buf, count))
++ return -EFAULT;
++
++ if (count + *ppos > model->dr_num)
++ return -EINVAL;
++
++ mutex_lock(&mbox->mutex);
++
++ for (i = *ppos; count > 0 && i < model->dr_num; i++) {
++ ret = __get_user(c, p);
++ if (ret)
++ goto out_unlock;
++
++ aspeed_mbox_outb(mbox, c, ASPEED_MBOX_DR(model->dr, i));
++ p++;
++ count--;
++ }
++
++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_SEND, model->bcr);
++ ret = p - buf;
++
++out_unlock:
++ mutex_unlock(&mbox->mutex);
++ return ret;
++}
++
++static __poll_t aspeed_mbox_poll(struct file *file, poll_table *wait)
++{
++ struct aspeed_mbox *mbox = file_mbox(file);
++ const struct aspeed_mbox_model *model = mbox->model;
++ __poll_t mask = 0;
++
++ poll_wait(file, &mbox->queue, wait);
++
++ if (aspeed_mbox_inb(mbox, model->bcr) & ASPEED_MBOX_BCR_RECV)
++ mask |= POLLIN;
++
++ return mask;
++}
++
++static int aspeed_mbox_release(struct inode *inode, struct file *file)
++{
++ atomic_dec(&aspeed_mbox_open_count);
++ return 0;
++}
++
++static long aspeed_mbox_ioctl(struct file *file, unsigned int cmd,
++ unsigned long param)
++{
++ long ret = 0;
++ struct aspeed_mbox *mbox = file_mbox(file);
++ const struct aspeed_mbox_model *model = mbox->model;
++ struct aspeed_mbox_ioctl_data data;
++
++ switch (cmd) {
++ case ASPEED_MBOX_IOCTL_GET_SIZE:
++ data.data = model->dr_num;
++ if (copy_to_user((void __user *)param, &data, sizeof(data)))
++ ret = -EFAULT;
++ break;
++ default:
++ ret = -ENOTTY;
++ break;
++ }
++
++ return ret;
++}
++
++static const struct file_operations aspeed_mbox_fops = {
++ .owner = THIS_MODULE,
++ .llseek = no_seek_end_llseek,
++ .read = aspeed_mbox_read,
++ .write = aspeed_mbox_write,
++ .open = aspeed_mbox_open,
++ .release = aspeed_mbox_release,
++ .poll = aspeed_mbox_poll,
++ .unlocked_ioctl = aspeed_mbox_ioctl,
++};
++
++static irqreturn_t aspeed_mbox_irq(int irq, void *arg)
++{
++ struct aspeed_mbox *mbox = arg;
++ const struct aspeed_mbox_model *model = mbox->model;
++
++ if (!(aspeed_mbox_inb(mbox, model->bcr) & ASPEED_MBOX_BCR_RECV))
++ return IRQ_NONE;
++
++ /*
++ * Leave the status bit set so that we know the data is for us,
++ * clear it once it has been read.
++ */
++
++ /* Mask it off, we'll clear it when we the data gets read */
++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_MASK, model->bcr);
++
++ wake_up(&mbox->queue);
++ return IRQ_HANDLED;
++}
++
++static int aspeed_mbox_config_irq(struct aspeed_mbox *mbox,
++ struct platform_device *pdev)
++{
++ const struct aspeed_mbox_model *model = mbox->model;
++ struct device *dev = &pdev->dev;
++ int i, rc, irq;
++
++ irq = irq_of_parse_and_map(dev->of_node, 0);
++ if (!irq)
++ return -ENODEV;
++
++ rc = devm_request_irq(dev, irq, aspeed_mbox_irq,
++ IRQF_SHARED, DEVICE_NAME, mbox);
++ if (rc < 0) {
++ dev_err(dev, "Unable to request IRQ %d\n", irq);
++ return rc;
++ }
++
++ /*
++ * Disable all register based interrupts.
++ */
++ for (i = 0; i < model->dr_num / 8; ++i)
++ aspeed_mbox_outb(mbox, 0x00, ASPEED_MBOX_BIE(model->bie, i));
++
++ /* These registers are write one to clear. Clear them. */
++ for (i = 0; i < model->dr_num / 8; ++i)
++ aspeed_mbox_outb(mbox, 0xff, ASPEED_MBOX_STR(model->str, i));
++
++ aspeed_mbox_outb(mbox, ASPEED_MBOX_BCR_RECV, model->bcr);
++ return 0;
++}
++
++static int aspeed_mbox_probe(struct platform_device *pdev)
++{
++ struct aspeed_mbox *mbox;
++ struct device *dev;
++ int rc;
++
++ dev = &pdev->dev;
++
++ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
++ if (!mbox)
++ return -ENOMEM;
++
++ dev_set_drvdata(&pdev->dev, mbox);
++
++ rc = of_property_read_u32(dev->of_node, "reg", &mbox->base);
++ if (rc) {
++ dev_err(dev, "Couldn't read reg device tree property\n");
++ return rc;
++ }
++
++ mbox->model = of_device_get_match_data(dev);
++ if (IS_ERR(mbox->model)) {
++ dev_err(dev, "Couldn't get model data\n");
++ return -ENODEV;
++ }
++
++ mbox->map = syscon_node_to_regmap(
++ pdev->dev.parent->of_node);
++ if (IS_ERR(mbox->map)) {
++ dev_err(dev, "Couldn't get regmap\n");
++ return -ENODEV;
++ }
++
++ mutex_init(&mbox->mutex);
++ init_waitqueue_head(&mbox->queue);
++
++ mbox->mdev_id = ida_alloc(&aspeed_mbox_ida, GFP_KERNEL);
++ if (mbox->mdev_id < 0) {
++ dev_err(dev, "cannot allocate device ID\n");
++ return mbox->mdev_id;
++ }
++
++ mbox->mdev.minor = MISC_DYNAMIC_MINOR;
++ mbox->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, mbox->mdev_id);
++ mbox->mdev.fops = &aspeed_mbox_fops;
++ mbox->mdev.parent = dev;
++ rc = misc_register(&mbox->mdev);
++ if (rc) {
++ dev_err(dev, "Unable to register device\n");
++ return rc;
++ }
++
++ rc = aspeed_mbox_config_irq(mbox, pdev);
++ if (rc) {
++ dev_err(dev, "Failed to configure IRQ\n");
++ misc_deregister(&mbox->mdev);
++ return rc;
++ }
++
++ return 0;
++}
++
++static int aspeed_mbox_remove(struct platform_device *pdev)
++{
++ struct aspeed_mbox *mbox = dev_get_drvdata(&pdev->dev);
++
++ misc_deregister(&mbox->mdev);
++
++ return 0;
++}
++
++static const struct aspeed_mbox_model ast2400_model = {
++ .dr_num = 16,
++ .dr = 0x0,
++ .str = 0x40,
++ .bcr = 0x48,
++ .hcr = 0x4c,
++ .bie = 0x50,
++ .hie = 0x58,
++};
++
++static const struct aspeed_mbox_model ast2500_model = {
++ .dr_num = 16,
++ .dr = 0x0,
++ .str = 0x40,
++ .bcr = 0x48,
++ .hcr = 0x4c,
++ .bie = 0x50,
++ .hie = 0x58,
++};
++
++static const struct aspeed_mbox_model ast2600_model = {
++ .dr_num = 32,
++ .dr = 0x0,
++ .str = 0x80,
++ .bcr = 0x90,
++ .hcr = 0x94,
++ .bie = 0xa0,
++ .hie = 0xb0,
++};
++
++static const struct aspeed_mbox_model ast2700_model = {
++ .dr_num = 32,
++ .dr = 0x0,
++ .str = 0x80,
++ .bcr = 0x90,
++ .hcr = 0x94,
++ .bie = 0xa0,
++ .hie = 0xb0,
++};
++
++static const struct of_device_id aspeed_mbox_match[] = {
++ { .compatible = "aspeed,ast2400-mbox",
++ .data = &ast2400_model },
++ { .compatible = "aspeed,ast2500-mbox",
++ .data = &ast2500_model },
++ { .compatible = "aspeed,ast2600-mbox",
++ .data = &ast2600_model },
++ { .compatible = "aspeed,ast2700-mbox",
++ .data = &ast2700_model },
++ { },
++};
++
++static struct platform_driver aspeed_mbox_driver = {
++ .driver = {
++ .name = DEVICE_NAME,
++ .of_match_table = aspeed_mbox_match,
++ },
++ .probe = aspeed_mbox_probe,
++ .remove = aspeed_mbox_remove,
++};
++
++module_platform_driver(aspeed_mbox_driver);
++MODULE_DEVICE_TABLE(of, aspeed_mbox_match);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com");
++MODULE_DESCRIPTION("Aspeed mailbox device driver");
+diff --git a/drivers/soc/aspeed/aspeed-lpc-pcc.c b/drivers/soc/aspeed/aspeed-lpc-pcc.c
+new file mode 100644
+index 000000000..61bb47747
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-lpc-pcc.c
+@@ -0,0 +1,507 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) ASPEED Technology Inc.
++ */
++#include <linux/bitops.h>
++#include <linux/interrupt.h>
++#include <linux/fs.h>
++#include <linux/kfifo.h>
++#include <linux/mfd/syscon.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regmap.h>
++#include <linux/dma-mapping.h>
++#include <linux/sizes.h>
++
++#define DEVICE_NAME "aspeed-lpc-pcc"
++
++static DEFINE_IDA(aspeed_pcc_ida);
++
++#define HICR6 0x084
++#define HICR6_EN2BMODE BIT(19)
++#define SNPWADR 0x090
++#define PCCR6 0x0c4
++#define PCCR6_DMA_CUR_ADDR GENMASK(27, 0)
++#define PCCR4 0x0d0
++#define PCCR4_DMA_ADDRL_MASK GENMASK(31, 0)
++#define PCCR4_DMA_ADDRL_SHIFT 0
++#define PCCR5 0x0d4
++#define PCCR5_DMA_ADDRH_MASK GENMASK(27, 24)
++#define PCCR5_DMA_ADDRH_SHIFT 24
++#define PCCR5_DMA_LEN_MASK GENMASK(23, 0)
++#define PCCR5_DMA_LEN_SHIFT 0
++#define HICRB 0x100
++#define HICRB_ENSNP0D BIT(14)
++#define HICRB_ENSNP1D BIT(15)
++#define PCCR0 0x130
++#define PCCR0_EN_DMA_INT BIT(31)
++#define PCCR0_EN_DMA_MODE BIT(14)
++#define PCCR0_ADDR_SEL_MASK GENMASK(13, 12)
++#define PCCR0_ADDR_SEL_SHIFT 12
++#define PCCR0_RX_TRIG_LVL_MASK GENMASK(10, 8)
++#define PCCR0_RX_TRIG_LVL_SHIFT 8
++#define PCCR0_CLR_RX_FIFO BIT(7)
++#define PCCR0_MODE_SEL_MASK GENMASK(5, 4)
++#define PCCR0_MODE_SEL_SHIFT 4
++#define PCCR0_EN_RX_TMOUT_INT BIT(2)
++#define PCCR0_EN_RX_AVAIL_INT BIT(1)
++#define PCCR0_EN BIT(0)
++#define PCCR1 0x134
++#define PCCR1_BASE_ADDR_MASK GENMASK(15, 0)
++#define PCCR1_BASE_ADDR_SHIFT 0
++#define PCCR1_DONT_CARE_BITS_MASK GENMASK(21, 16)
++#define PCCR1_DONT_CARE_BITS_SHIFT 16
++#define PCCR2 0x138
++#define PCCR2_INT_STATUS_PATTERN_B BIT(16)
++#define PCCR2_INT_STATUS_PATTERN_A BIT(8)
++#define PCCR2_INT_STATUS_DMA_DONE BIT(4)
++#define PCCR2_INT_STATUS_DATA_RDY PCCR2_INT_STATUS_DMA_DONE
++#define PCCR2_INT_STATUS_RX_OVER BIT(3)
++#define PCCR2_INT_STATUS_RX_TMOUT BIT(2)
++#define PCCR2_INT_STATUS_RX_AVAIL BIT(1)
++#define PCCR3 0x13c
++#define PCCR3_FIFO_DATA_MASK GENMASK(7, 0)
++
++#define PCC_DMA_BUFSZ (256 * SZ_1K)
++
++enum pcc_fifo_threshold {
++ PCC_FIFO_THR_1_BYTE,
++ PCC_FIFO_THR_1_EIGHTH,
++ PCC_FIFO_THR_2_EIGHTH,
++ PCC_FIFO_THR_3_EIGHTH,
++ PCC_FIFO_THR_4_EIGHTH,
++ PCC_FIFO_THR_5_EIGHTH,
++ PCC_FIFO_THR_6_EIGHTH,
++ PCC_FIFO_THR_7_EIGHTH,
++ PCC_FIFO_THR_8_EIGHTH,
++};
++
++enum pcc_record_mode {
++ PCC_REC_1B,
++ PCC_REC_2B,
++ PCC_REC_4B,
++ PCC_REC_FULL,
++};
++
++enum pcc_port_hbits_select {
++ PCC_PORT_HBITS_SEL_NONE,
++ PCC_PORT_HBITS_SEL_45,
++ PCC_PORT_HBITS_SEL_67,
++ PCC_PORT_HBITS_SEL_89,
++};
++
++struct aspeed_pcc_dma {
++ uint32_t rptr;
++ uint8_t *virt;
++ dma_addr_t addr;
++ uint32_t size;
++};
++
++struct aspeed_pcc {
++ struct device *dev;
++ struct regmap *regmap;
++ int irq;
++ uint32_t rec_mode;
++ uint32_t port;
++ uint32_t port_xbits;
++ uint32_t port_hbits_select;
++ uint32_t dma_mode;
++ struct aspeed_pcc_dma dma;
++ struct kfifo fifo;
++ wait_queue_head_t wq;
++ struct miscdevice mdev;
++ int mdev_id;
++ bool a2600_15;
++};
++
++static inline bool is_valid_rec_mode(uint32_t mode)
++{
++ return (mode > PCC_REC_FULL) ? false : true;
++}
++
++static inline bool is_valid_high_bits_select(uint32_t sel)
++{
++ return (sel > PCC_PORT_HBITS_SEL_89) ? false : true;
++}
++
++static ssize_t aspeed_pcc_file_read(struct file *file, char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ int rc;
++ unsigned int copied;
++ struct aspeed_pcc *pcc = container_of(file->private_data,
++ struct aspeed_pcc,
++ mdev);
++
++ if (kfifo_is_empty(&pcc->fifo)) {
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ rc = wait_event_interruptible(pcc->wq,
++ !kfifo_is_empty(&pcc->fifo));
++ if (rc == -ERESTARTSYS)
++ return -EINTR;
++ }
++
++ rc = kfifo_to_user(&pcc->fifo, buffer, count, &copied);
++
++ return rc ? rc : copied;
++}
++
++static __poll_t aspeed_pcc_file_poll(struct file *file,
++ struct poll_table_struct *pt)
++{
++ struct aspeed_pcc *pcc = container_of(
++ file->private_data,
++ struct aspeed_pcc,
++ mdev);
++
++ poll_wait(file, &pcc->wq, pt);
++
++ return !kfifo_is_empty(&pcc->fifo) ? POLLIN : 0;
++}
++
++static const struct file_operations pcc_fops = {
++ .owner = THIS_MODULE,
++ .read = aspeed_pcc_file_read,
++ .poll = aspeed_pcc_file_poll,
++};
++
++static irqreturn_t aspeed_pcc_dma_isr(int irq, void *arg)
++{
++ uint32_t reg, rptr, wptr;
++ struct aspeed_pcc *pcc = (struct aspeed_pcc*)arg;
++ struct kfifo *fifo = &pcc->fifo;
++
++ regmap_write_bits(pcc->regmap, PCCR2, PCCR2_INT_STATUS_DMA_DONE, PCCR2_INT_STATUS_DMA_DONE);
++
++ regmap_read(pcc->regmap, PCCR6, ®);
++ wptr = (reg & PCCR6_DMA_CUR_ADDR) - (pcc->dma.addr & PCCR6_DMA_CUR_ADDR);
++ rptr = pcc->dma.rptr;
++
++ do {
++ if (kfifo_is_full(fifo))
++ kfifo_skip(fifo);
++
++ kfifo_put(fifo, pcc->dma.virt[rptr]);
++
++ rptr = (rptr + 1) % pcc->dma.size;
++ } while (rptr != wptr);
++
++ pcc->dma.rptr = rptr;
++
++ wake_up_interruptible(&pcc->wq);
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t aspeed_pcc_isr(int irq, void *arg)
++{
++ uint32_t sts, reg;
++ struct aspeed_pcc *pcc = (struct aspeed_pcc*)arg;
++ struct kfifo *fifo = &pcc->fifo;
++
++ regmap_read(pcc->regmap, PCCR2, &sts);
++
++ if (!(sts & (PCCR2_INT_STATUS_RX_TMOUT | PCCR2_INT_STATUS_RX_AVAIL | PCCR2_INT_STATUS_DMA_DONE)))
++ return IRQ_NONE;
++
++ if (pcc->dma_mode)
++ return aspeed_pcc_dma_isr(irq, arg);
++
++ while (sts & PCCR2_INT_STATUS_DATA_RDY) {
++ regmap_read(pcc->regmap, PCCR3, ®);
++
++ if (kfifo_is_full(fifo))
++ kfifo_skip(fifo);
++
++ kfifo_put(fifo, reg & PCCR3_FIFO_DATA_MASK);
++
++ regmap_read(pcc->regmap, PCCR2, &sts);
++ }
++
++ wake_up_interruptible(&pcc->wq);
++
++ return IRQ_HANDLED;
++}
++
++/*
++ * A2600-15 AP note
++ *
++ * SW workaround to prevent generating Non-Fatal-Error (NFE)
++ * eSPI response when PCC is used for port I/O byte snooping
++ * over eSPI.
++ */
++static int aspeed_a2600_15(struct aspeed_pcc *pcc, struct device *dev)
++{
++ struct device_node *np;
++ u32 hicrb_en;
++
++ /* abort if snoop is enabled */
++ np = of_find_compatible_node(dev->parent->of_node, NULL, "aspeed,ast2600-lpc-snoop");
++ if (np) {
++ if (of_device_is_available(np)) {
++ dev_err(dev, "A2600-15 should be applied with snoop disabled\n");
++ return -EPERM;
++ }
++ }
++
++ /* abort if port is not 4-bytes continuous range */
++ if (pcc->port_xbits != 0x3) {
++ dev_err(dev, "A2600-15 should be applied on 4-bytes continuous I/O address range\n");
++ return -EINVAL;
++ }
++
++ /* set SNPWADR of snoop device */
++ regmap_write(pcc->regmap, SNPWADR, pcc->port | ((pcc->port + 2) << 16));
++
++ /* set HICRB[15:14]=11b to enable ACCEPT response for SNPWADR */
++ hicrb_en = HICRB_ENSNP0D | HICRB_ENSNP1D;
++ regmap_update_bits(pcc->regmap, HICRB, hicrb_en, hicrb_en);
++
++ /* set HICR6[19] to extend SNPWADR to 2x range */
++ regmap_update_bits(pcc->regmap, HICR6, HICR6_EN2BMODE, HICR6_EN2BMODE);
++
++ return 0;
++}
++
++static int aspeed_pcc_enable(struct aspeed_pcc *pcc, struct device *dev)
++{
++ int rc;
++
++ if (pcc->a2600_15) {
++ rc = aspeed_a2600_15(pcc, dev);
++ if (rc)
++ return rc;
++ }
++
++ /* record mode */
++ regmap_update_bits(pcc->regmap, PCCR0,
++ PCCR0_MODE_SEL_MASK,
++ pcc->rec_mode << PCCR0_MODE_SEL_SHIFT);
++
++ /* port address */
++ regmap_update_bits(pcc->regmap, PCCR1,
++ PCCR1_BASE_ADDR_MASK,
++ pcc->port << PCCR1_BASE_ADDR_SHIFT);
++
++ /* port address high bits selection or parser control */
++ regmap_update_bits(pcc->regmap, PCCR0,
++ PCCR0_ADDR_SEL_MASK,
++ pcc->port_hbits_select << PCCR0_ADDR_SEL_SHIFT);
++
++ /* port address dont care bits */
++ regmap_update_bits(pcc->regmap, PCCR1,
++ PCCR1_DONT_CARE_BITS_MASK,
++ pcc->port_xbits << PCCR1_DONT_CARE_BITS_SHIFT);
++
++ /* set DMA ring buffer size and enable interrupts */
++ if (pcc->dma_mode) {
++ regmap_write(pcc->regmap, PCCR4, pcc->dma.addr & 0xffffffff);
++ regmap_update_bits(pcc->regmap, PCCR5, PCCR5_DMA_ADDRH_MASK,
++ (pcc->dma.addr >> 32) << PCCR5_DMA_ADDRH_SHIFT);
++ regmap_update_bits(pcc->regmap, PCCR5, PCCR5_DMA_LEN_MASK,
++ (pcc->dma.size / 4) << PCCR5_DMA_LEN_SHIFT);
++ regmap_update_bits(pcc->regmap, PCCR0,
++ PCCR0_EN_DMA_INT | PCCR0_EN_DMA_MODE,
++ PCCR0_EN_DMA_INT | PCCR0_EN_DMA_MODE);
++ } else {
++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_RX_TRIG_LVL_MASK,
++ PCC_FIFO_THR_4_EIGHTH << PCCR0_RX_TRIG_LVL_SHIFT);
++ regmap_update_bits(pcc->regmap, PCCR0,
++ PCCR0_EN_RX_TMOUT_INT | PCCR0_EN_RX_AVAIL_INT,
++ PCCR0_EN_RX_TMOUT_INT | PCCR0_EN_RX_AVAIL_INT);
++ }
++
++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_EN, PCCR0_EN);
++
++ return 0;
++}
++
++static int aspeed_pcc_probe(struct platform_device *pdev)
++{
++ int rc;
++ struct aspeed_pcc *pcc;
++ struct device *dev = &pdev->dev;
++ uint32_t fifo_size = PAGE_SIZE;
++
++ pcc = devm_kzalloc(&pdev->dev, sizeof(*pcc), GFP_KERNEL);
++ if (!pcc)
++ return -ENOMEM;
++
++ pcc->dev = dev;
++ rc = of_property_read_u32(dev->of_node, "port-addr", &pcc->port);
++ if (rc) {
++ dev_err(dev, "cannot get port address\n");
++ return -ENODEV;
++ }
++
++ /* optional, by default: 0 -> 1-Byte mode */
++ of_property_read_u32(dev->of_node, "rec-mode", &pcc->rec_mode);
++ if (!is_valid_rec_mode(pcc->rec_mode)) {
++ dev_err(dev, "invalid record mode: %u\n",
++ pcc->rec_mode);
++ return -EINVAL;
++ }
++
++ /* optional, by default: 0 -> no don't care bits */
++ of_property_read_u32(dev->of_node, "port-addr-xbits", &pcc->port_xbits);
++
++ /*
++ * optional, by default: 0 -> no high address bits
++ *
++ * Note that when record mode is set to 1-Byte, this
++ * property is ignored and the corresponding HW bits
++ * behave as read/write cycle parser control with the
++ * value set to 0b11
++ */
++ if (pcc->rec_mode) {
++ of_property_read_u32(dev->of_node, "port-addr-hbits-select",
++ &pcc->port_hbits_select);
++ if (!is_valid_high_bits_select(pcc->port_hbits_select)) {
++ dev_err(dev, "invalid high address bits selection: %u\n",
++ pcc->port_hbits_select);
++ return -EINVAL;
++ }
++ }
++ else
++ pcc->port_hbits_select = 0x3;
++
++ /* AP note A2600-15 */
++ pcc->a2600_15 = of_property_read_bool(dev->of_node, "A2600-15");
++ if (pcc->a2600_15)
++ dev_info(dev, "A2600-15 AP note patch is selected\n");
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_err(dev, "cannot set 64-bits DMA mask\n");
++ return rc;
++ }
++
++ pcc->dma_mode = of_property_read_bool(dev->of_node, "dma-mode");
++ if (pcc->dma_mode) {
++ pcc->dma.size = PCC_DMA_BUFSZ;
++ pcc->dma.virt = dmam_alloc_coherent(dev,
++ pcc->dma.size,
++ &pcc->dma.addr,
++ GFP_KERNEL);
++ if (!pcc->dma.virt) {
++ dev_err(dev, "cannot allocate DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ fifo_size = roundup(pcc->dma.size, PAGE_SIZE);
++ }
++
++ rc = kfifo_alloc(&pcc->fifo, fifo_size, GFP_KERNEL);
++ if (rc) {
++ dev_err(dev, "cannot allocate kFIFO\n");
++ return -ENOMEM;
++ }
++
++ pcc->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
++ if (IS_ERR(pcc->regmap)) {
++ dev_err(dev, "cannot map register\n");
++ return -ENODEV;
++ }
++
++ /* Disable PCC and DMA Mode for safety */
++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_EN | PCCR0_EN_DMA_MODE, 0);
++
++ /* Clear Rx FIFO. */
++ regmap_update_bits(pcc->regmap, PCCR0, PCCR0_CLR_RX_FIFO, 1);
++
++ /* Clear All interrupts status. */
++ regmap_write(pcc->regmap, PCCR2,
++ PCCR2_INT_STATUS_RX_OVER | PCCR2_INT_STATUS_DMA_DONE |
++ PCCR2_INT_STATUS_PATTERN_A | PCCR2_INT_STATUS_PATTERN_B);
++
++ pcc->irq = platform_get_irq(pdev, 0);
++ if (pcc->irq < 0) {
++ dev_err(dev, "cannot get IRQ\n");
++ rc = -ENODEV;
++ goto err_free_kfifo;
++ }
++
++ rc = devm_request_irq(dev, pcc->irq, aspeed_pcc_isr, 0, DEVICE_NAME, pcc);
++ if (rc < 0) {
++ dev_err(dev, "cannot request IRQ handler\n");
++ goto err_free_kfifo;
++ }
++
++ init_waitqueue_head(&pcc->wq);
++
++ pcc->mdev_id = ida_alloc(&aspeed_pcc_ida, GFP_KERNEL);
++ if (pcc->mdev_id < 0) {
++ dev_err(dev, "cannot allocate ID\n");
++ return pcc->mdev_id;
++ }
++
++ pcc->mdev.parent = dev;
++ pcc->mdev.minor = MISC_DYNAMIC_MINOR;
++ pcc->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME,
++ pcc->mdev_id);
++ pcc->mdev.fops = &pcc_fops;
++ rc = misc_register(&pcc->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register misc device\n");
++ goto err_free_kfifo;
++ }
++
++ rc = aspeed_pcc_enable(pcc, dev);
++ if (rc) {
++ dev_err(dev, "cannot enable PCC\n");
++ goto err_dereg_mdev;
++ }
++
++ dev_set_drvdata(&pdev->dev, pcc);
++
++ dev_info(dev, "module loaded\n");
++
++ return 0;
++
++err_dereg_mdev:
++ misc_deregister(&pcc->mdev);
++
++err_free_kfifo:
++ kfifo_free(&pcc->fifo);
++
++ return rc;
++}
++
++static int aspeed_pcc_remove(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct aspeed_pcc *pcc = dev_get_drvdata(dev);
++
++ kfifo_free(&pcc->fifo);
++ misc_deregister(&pcc->mdev);
++
++ return 0;
++}
++
++static const struct of_device_id aspeed_pcc_table[] = {
++ { .compatible = "aspeed,ast2500-lpc-pcc" },
++ { .compatible = "aspeed,ast2600-lpc-pcc" },
++ { },
++};
++
++static struct platform_driver aspeed_pcc_driver = {
++ .driver = {
++ .name = "aspeed-pcc",
++ .of_match_table = aspeed_pcc_table,
++ },
++ .probe = aspeed_pcc_probe,
++ .remove = aspeed_pcc_remove,
++};
++
++module_platform_driver(aspeed_pcc_driver);
++
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Driver for Aspeed Post Code Capture");
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index 773dbcbc0..48b6cbda2 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -1,17 +1,10 @@
+ // SPDX-License-Identifier: GPL-2.0-or-later
+ /*
+ * Copyright 2017 Google Inc
+- *
+- * Provides a simple driver to control the ASPEED LPC snoop interface which
+- * allows the BMC to listen on and save the data written by
+- * the host to an arbitrary LPC I/O port.
+- *
+- * Typically used by the BMC to "watch" host boot progress via port
+- * 0x80 writes made by the BIOS during the boot process.
++ * Copyright 2023 Aspeed Technology Inc
+ */
+
+ #include <linux/bitops.h>
+-#include <linux/clk.h>
+ #include <linux/interrupt.h>
+ #include <linux/fs.h>
+ #include <linux/kfifo.h>
+@@ -22,11 +15,14 @@
+ #include <linux/platform_device.h>
+ #include <linux/poll.h>
+ #include <linux/regmap.h>
++//#include <linux/idr.h>
+
+ #define DEVICE_NAME "aspeed-lpc-snoop"
+
+-#define NUM_SNOOP_CHANNELS 2
+-#define SNOOP_FIFO_SIZE 2048
++static DEFINE_IDA(aspeed_lpc_snoop_ida);
++
++#define SNOOP_HW_CHANNEL_NUM 2
++#define SNOOP_FIFO_SIZE 2048
+
+ #define HICR5 0x80
+ #define HICR5_EN_SNP0W BIT(0)
+@@ -51,30 +47,32 @@
+ #define HICRB_ENSNP1D BIT(15)
+
+ struct aspeed_lpc_snoop_model_data {
+- /* The ast2400 has bits 14 and 15 as reserved, whereas the ast2500
+- * can use them.
++ /*
++ * The ast2400 has bits 14 and 15 as reserved, whereas the ast2500 and later
++ * can use them. These two bits for are for eSPI interface to respond ACCEPT
++ * when snooping Host port I/O write.
+ */
+ unsigned int has_hicrb_ensnp;
+ };
+
+ struct aspeed_lpc_snoop_channel {
+- struct kfifo fifo;
+- wait_queue_head_t wq;
+- struct miscdevice miscdev;
++ int id;
++ struct miscdevice mdev;
++ wait_queue_head_t wq;
++ struct kfifo fifo;
+ };
+
+ struct aspeed_lpc_snoop {
+- struct regmap *regmap;
+- int irq;
+- struct clk *clk;
+- struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
++ struct aspeed_lpc_snoop_channel chan[SNOOP_HW_CHANNEL_NUM];
++ struct regmap *regmap;
++ int irq;
+ };
+
+ static struct aspeed_lpc_snoop_channel *snoop_file_to_chan(struct file *file)
+ {
+ return container_of(file->private_data,
+ struct aspeed_lpc_snoop_channel,
+- miscdev);
++ mdev);
+ }
+
+ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+@@ -87,11 +85,13 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ if (kfifo_is_empty(&chan->fifo)) {
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
++
+ ret = wait_event_interruptible(chan->wq,
+- !kfifo_is_empty(&chan->fifo));
++ !kfifo_is_empty(&chan->fifo));
+ if (ret == -ERESTARTSYS)
+ return -EINTR;
+ }
++
+ ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
+ if (ret)
+ return ret;
+@@ -100,11 +100,12 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ }
+
+ static __poll_t snoop_file_poll(struct file *file,
+- struct poll_table_struct *pt)
++ struct poll_table_struct *pt)
+ {
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+
+ poll_wait(file, &chan->wq, pt);
++
+ return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0;
+ }
+
+@@ -120,18 +121,22 @@ static void put_fifo_with_discard(struct aspeed_lpc_snoop_channel *chan, u8 val)
+ {
+ if (!kfifo_initialized(&chan->fifo))
+ return;
++
+ if (kfifo_is_full(&chan->fifo))
+ kfifo_skip(&chan->fifo);
++
+ kfifo_put(&chan->fifo, val);
++
+ wake_up_interruptible(&chan->wq);
+ }
+
+ static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
+ {
+- struct aspeed_lpc_snoop *lpc_snoop = arg;
++ struct aspeed_lpc_snoop *snoop = arg;
+ u32 reg, data;
++ u8 val;
+
+- if (regmap_read(lpc_snoop->regmap, HICR6, ®))
++ if (regmap_read(snoop->regmap, HICR6, ®))
+ return IRQ_NONE;
+
+ /* Check if one of the snoop channels is interrupting */
+@@ -140,74 +145,64 @@ static irqreturn_t aspeed_lpc_snoop_irq(int irq, void *arg)
+ return IRQ_NONE;
+
+ /* Ack pending IRQs */
+- regmap_write(lpc_snoop->regmap, HICR6, reg);
++ regmap_write(snoop->regmap, HICR6, reg);
+
+ /* Read and save most recent snoop'ed data byte to FIFO */
+- regmap_read(lpc_snoop->regmap, SNPWDR, &data);
++ regmap_read(snoop->regmap, SNPWDR, &data);
+
+ if (reg & HICR6_STR_SNP0W) {
+- u8 val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
+-
+- put_fifo_with_discard(&lpc_snoop->chan[0], val);
++ val = (data & SNPWDR_CH0_MASK) >> SNPWDR_CH0_SHIFT;
++ put_fifo_with_discard(&snoop->chan[0], val);
+ }
+- if (reg & HICR6_STR_SNP1W) {
+- u8 val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
+
+- put_fifo_with_discard(&lpc_snoop->chan[1], val);
++ if (reg & HICR6_STR_SNP1W) {
++ val = (data & SNPWDR_CH1_MASK) >> SNPWDR_CH1_SHIFT;
++ put_fifo_with_discard(&snoop->chan[1], val);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+-static int aspeed_lpc_snoop_config_irq(struct aspeed_lpc_snoop *lpc_snoop,
+- struct platform_device *pdev)
++static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *snoop,
++ struct device *dev, int hw_channel, u16 port)
+ {
+- struct device *dev = &pdev->dev;
+- int rc;
+-
+- lpc_snoop->irq = platform_get_irq(pdev, 0);
+- if (!lpc_snoop->irq)
+- return -ENODEV;
++ const struct aspeed_lpc_snoop_model_data *model_data;
++ u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
++ struct aspeed_lpc_snoop_channel *chan;
++ int rc = 0;
+
+- rc = devm_request_irq(dev, lpc_snoop->irq,
+- aspeed_lpc_snoop_irq, IRQF_SHARED,
+- DEVICE_NAME, lpc_snoop);
+- if (rc < 0) {
+- dev_warn(dev, "Unable to request IRQ %d\n", lpc_snoop->irq);
+- lpc_snoop->irq = 0;
+- return rc;
+- }
++ model_data = of_device_get_match_data(dev);
+
+- return 0;
+-}
++ chan = &snoop->chan[hw_channel];
+
+-static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+- struct device *dev,
+- int channel, u16 lpc_port)
+-{
+- int rc = 0;
+- u32 hicr5_en, snpwadr_mask, snpwadr_shift, hicrb_en;
+- const struct aspeed_lpc_snoop_model_data *model_data =
+- of_device_get_match_data(dev);
++ init_waitqueue_head(&chan->wq);
+
+- init_waitqueue_head(&lpc_snoop->chan[channel].wq);
+ /* Create FIFO datastructure */
+- rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo,
+- SNOOP_FIFO_SIZE, GFP_KERNEL);
+- if (rc)
++ rc = kfifo_alloc(&chan->fifo, SNOOP_FIFO_SIZE, GFP_KERNEL);
++ if (rc) {
++ dev_err(dev, "cannot allocate kFIFO\n");
+ return rc;
++ }
+
+- lpc_snoop->chan[channel].miscdev.minor = MISC_DYNAMIC_MINOR;
+- lpc_snoop->chan[channel].miscdev.name =
+- devm_kasprintf(dev, GFP_KERNEL, "%s%d", DEVICE_NAME, channel);
+- lpc_snoop->chan[channel].miscdev.fops = &snoop_fops;
+- lpc_snoop->chan[channel].miscdev.parent = dev;
+- rc = misc_register(&lpc_snoop->chan[channel].miscdev);
+- if (rc)
++ chan->id = ida_alloc(&aspeed_lpc_snoop_ida, GFP_KERNEL);
++ if (chan->id < 0) {
++ dev_err(dev, "cannot allocate ID\n");
++ return chan->id;
++ }
++
++ chan->mdev.parent = dev;
++ chan->mdev.minor = MISC_DYNAMIC_MINOR;
++ chan->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%d",
++ DEVICE_NAME, chan->id);
++ chan->mdev.fops = &snoop_fops;
++ rc = misc_register(&chan->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register misc device\n");
+ return rc;
++ }
+
+ /* Enable LPC snoop channel at requested port */
+- switch (channel) {
++ switch (hw_channel) {
+ case 0:
+ hicr5_en = HICR5_EN_SNP0W | HICR5_ENINT_SNP0W;
+ snpwadr_mask = SNPWADR_CH0_MASK;
+@@ -224,27 +219,26 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ return -EINVAL;
+ }
+
+- regmap_update_bits(lpc_snoop->regmap, HICR5, hicr5_en, hicr5_en);
+- regmap_update_bits(lpc_snoop->regmap, SNPWADR, snpwadr_mask,
+- lpc_port << snpwadr_shift);
++ regmap_update_bits(snoop->regmap, HICR5, hicr5_en, hicr5_en);
++ regmap_update_bits(snoop->regmap, SNPWADR, snpwadr_mask, port << snpwadr_shift);
++
+ if (model_data->has_hicrb_ensnp)
+- regmap_update_bits(lpc_snoop->regmap, HICRB,
+- hicrb_en, hicrb_en);
++ regmap_update_bits(snoop->regmap, HICRB, hicrb_en, hicrb_en);
+
+ return rc;
+ }
+
+-static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+- int channel)
++static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *snoop,
++ int hw_channel)
+ {
+- switch (channel) {
++ switch (hw_channel) {
+ case 0:
+- regmap_update_bits(lpc_snoop->regmap, HICR5,
++ regmap_update_bits(snoop->regmap, HICR5,
+ HICR5_EN_SNP0W | HICR5_ENINT_SNP0W,
+ 0);
+ break;
+ case 1:
+- regmap_update_bits(lpc_snoop->regmap, HICR5,
++ regmap_update_bits(snoop->regmap, HICR5,
+ HICR5_EN_SNP1W | HICR5_ENINT_SNP1W,
+ 0);
+ break;
+@@ -252,96 +246,74 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop,
+ return;
+ }
+
+- kfifo_free(&lpc_snoop->chan[channel].fifo);
+- misc_deregister(&lpc_snoop->chan[channel].miscdev);
++ kfifo_free(&snoop->chan[hw_channel].fifo);
++ misc_deregister(&snoop->chan[hw_channel].mdev);
+ }
+
+ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+ {
+- struct aspeed_lpc_snoop *lpc_snoop;
++ u32 ports[SNOOP_HW_CHANNEL_NUM], port_num;
++ struct aspeed_lpc_snoop *snoop;
+ struct device *dev;
+- struct device_node *np;
+- u32 port;
+- int rc;
++ int i, rc;
+
+ dev = &pdev->dev;
+
+- lpc_snoop = devm_kzalloc(dev, sizeof(*lpc_snoop), GFP_KERNEL);
+- if (!lpc_snoop)
++ snoop = devm_kzalloc(dev, sizeof(*snoop), GFP_KERNEL);
++ if (!snoop)
+ return -ENOMEM;
+
+- np = pdev->dev.parent->of_node;
+- if (!of_device_is_compatible(np, "aspeed,ast2400-lpc-v2") &&
+- !of_device_is_compatible(np, "aspeed,ast2500-lpc-v2") &&
+- !of_device_is_compatible(np, "aspeed,ast2600-lpc-v2")) {
+- dev_err(dev, "unsupported LPC device binding\n");
+- return -ENODEV;
+- }
+-
+- lpc_snoop->regmap = syscon_node_to_regmap(np);
+- if (IS_ERR(lpc_snoop->regmap)) {
++ snoop->regmap = syscon_node_to_regmap(pdev->dev.parent->of_node);
++ if (IS_ERR(snoop->regmap)) {
+ dev_err(dev, "Couldn't get regmap\n");
+ return -ENODEV;
+ }
+
+- dev_set_drvdata(&pdev->dev, lpc_snoop);
++ dev_set_drvdata(&pdev->dev, snoop);
+
+- rc = of_property_read_u32_index(dev->of_node, "snoop-ports", 0, &port);
+- if (rc) {
+- dev_err(dev, "no snoop ports configured\n");
+- return -ENODEV;
++ snoop->irq = platform_get_irq(pdev, 0);
++ if (snoop->irq < 0) {
++ dev_err(dev, "cannot get IRQ\n");
++ return snoop->irq;
+ }
+
+- lpc_snoop->clk = devm_clk_get(dev, NULL);
+- if (IS_ERR(lpc_snoop->clk)) {
+- rc = PTR_ERR(lpc_snoop->clk);
+- if (rc != -EPROBE_DEFER)
+- dev_err(dev, "couldn't get clock\n");
+- return rc;
+- }
+- rc = clk_prepare_enable(lpc_snoop->clk);
+- if (rc) {
+- dev_err(dev, "couldn't enable clock\n");
++ rc = devm_request_irq(dev, snoop->irq, aspeed_lpc_snoop_irq,
++ IRQF_SHARED, DEVICE_NAME, snoop);
++ if (rc < 0) {
++ dev_err(dev, "cannot request IRQ %d\n", snoop->irq);
+ return rc;
+ }
+
+- rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+- if (rc)
+- goto err;
+-
+- rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
+- if (rc)
+- goto err;
++ port_num = of_property_read_variable_u32_array(dev->of_node,
++ "snoop-ports",
++ ports, 1, SNOOP_HW_CHANNEL_NUM);
++ if (port_num < 0) {
++ dev_err(dev, "no snoop ports configured\n");
++ return -ENODEV;
++ }
+
+- /* Configuration of 2nd snoop channel port is optional */
+- if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+- 1, &port) == 0) {
+- rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
+- if (rc) {
+- aspeed_lpc_disable_snoop(lpc_snoop, 0);
++ for (i = 0; i < port_num; ++i) {
++ rc = aspeed_lpc_enable_snoop(snoop, dev, i, ports[i]);
++ if (rc)
+ goto err;
+- }
++
++ dev_info(dev, "Initialised channel %d to snoop IO address 0x%x\n",
++ snoop->chan[i].id, ports[i]);
+ }
+
+ return 0;
+
+ err:
+- clk_disable_unprepare(lpc_snoop->clk);
+-
+ return rc;
+ }
+
+-static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
++static void aspeed_lpc_snoop_remove(struct platform_device *pdev)
+ {
+- struct aspeed_lpc_snoop *lpc_snoop = dev_get_drvdata(&pdev->dev);
++ struct aspeed_lpc_snoop *snoop = dev_get_drvdata(&pdev->dev);
++ int i;
+
+- /* Disable both snoop channels */
+- aspeed_lpc_disable_snoop(lpc_snoop, 0);
+- aspeed_lpc_disable_snoop(lpc_snoop, 1);
+-
+- clk_disable_unprepare(lpc_snoop->clk);
+-
+- return 0;
++ for (i = 0; i < SNOOP_HW_CHANNEL_NUM; ++i)
++ aspeed_lpc_disable_snoop(snoop, i);
+ }
+
+ static const struct aspeed_lpc_snoop_model_data ast2400_model_data = {
+@@ -368,7 +340,7 @@ static struct platform_driver aspeed_lpc_snoop_driver = {
+ .of_match_table = aspeed_lpc_snoop_match,
+ },
+ .probe = aspeed_lpc_snoop_probe,
+- .remove = aspeed_lpc_snoop_remove,
++ .remove_new = aspeed_lpc_snoop_remove,
+ };
+
+ module_platform_driver(aspeed_lpc_snoop_driver);
+@@ -376,4 +348,5 @@ module_platform_driver(aspeed_lpc_snoop_driver);
+ MODULE_DEVICE_TABLE(of, aspeed_lpc_snoop_match);
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Robert Lippert <rlippert@google.com>");
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
+ MODULE_DESCRIPTION("Linux driver to control Aspeed LPC snoop functionality");
+diff --git a/drivers/soc/aspeed/aspeed-mctp.c b/drivers/soc/aspeed/aspeed-mctp.c
+new file mode 100644
+index 000000000..68b22d1df
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-mctp.c
+@@ -0,0 +1,2523 @@
++// SPDX-License-Identifier: GPL-2.0
++// Copyright (c) 2020, Intel Corporation.
++
++#include <linux/aspeed-mctp.h>
++#include <linux/bitfield.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/io.h>
++#include <linux/kernel.h>
++#include <linux/list.h>
++#include <linux/list_sort.h>
++#include <linux/mfd/syscon.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of_platform.h>
++#include <linux/pci.h>
++#include <linux/poll.h>
++#include <linux/ptr_ring.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++#include <linux/slab.h>
++#include <linux/swab.h>
++#include <linux/uaccess.h>
++#include <linux/workqueue.h>
++#include <linux/of_reserved_mem.h>
++
++#include <uapi/linux/aspeed-mctp.h>
++
++/* AST2600 MCTP Controller registers */
++#define ASPEED_MCTP_CTRL 0x000
++#define TX_CMD_TRIGGER BIT(0)
++#define RX_CMD_READY BIT(4)
++#define MATCHING_EID BIT(9)
++
++#define ASPEED_MCTP_TX_CMD 0x004
++#define ASPEED_MCTP_RX_CMD 0x008
++
++#define ASPEED_MCTP_INT_STS 0x00c
++#define ASPEED_MCTP_INT_EN 0x010
++#define TX_CMD_SENT_INT BIT(0)
++#define TX_CMD_LAST_INT BIT(1)
++#define TX_CMD_WRONG_INT BIT(2)
++#define RX_CMD_RECEIVE_INT BIT(8)
++#define RX_CMD_NO_MORE_INT BIT(9)
++
++#define ASPEED_MCTP_EID 0x014
++#define MEMORY_SPACE_MAPPING GENMASK(31, 28)
++#define ASPEED_MCTP_OBFF_CTRL 0x018
++
++#define ASPEED_MCTP_ENGINE_CTRL 0x01c
++#define TX_MAX_PAYLOAD_SIZE_SHIFT 0
++#define TX_MAX_PAYLOAD_SIZE_MASK GENMASK(1, TX_MAX_PAYLOAD_SIZE_SHIFT)
++#define TX_MAX_PAYLOAD_SIZE(x) \
++ (((x) << TX_MAX_PAYLOAD_SIZE_SHIFT) & TX_MAX_PAYLOAD_SIZE_MASK)
++#define RX_MAX_PAYLOAD_SIZE_SHIFT 4
++#define RX_MAX_PAYLOAD_SIZE_MASK GENMASK(5, RX_MAX_PAYLOAD_SIZE_SHIFT)
++#define RX_MAX_PAYLOAD_SIZE(x) \
++ (((x) << RX_MAX_PAYLOAD_SIZE_SHIFT) & RX_MAX_PAYLOAD_SIZE_MASK)
++#define FIFO_LAYOUT_SHIFT 8
++#define FIFO_LAYOUT_MASK GENMASK(9, FIFO_LAYOUT_SHIFT)
++#define FIFO_LAYOUT(x) \
++ (((x) << FIFO_LAYOUT_SHIFT) & FIFO_LAYOUT_MASK)
++
++#define ASPEED_MCTP_RX_BUF_ADDR 0x08
++#define ASPEED_MCTP_RX_BUF_HI_ADDR 0x020
++#define ASPEED_MCTP_RX_BUF_SIZE 0x024
++#define ASPEED_MCTP_RX_BUF_RD_PTR 0x028
++#define UPDATE_RX_RD_PTR BIT(31)
++#define RX_BUF_RD_PTR_MASK GENMASK(11, 0)
++#define ASPEED_MCTP_RX_BUF_WR_PTR 0x02c
++#define RX_BUF_WR_PTR_MASK GENMASK(11, 0)
++
++#define ASPEED_MCTP_TX_BUF_ADDR 0x04
++#define ASPEED_MCTP_TX_BUF_HI_ADDR 0x030
++#define ASPEED_MCTP_TX_BUF_SIZE 0x034
++#define ASPEED_MCTP_TX_BUF_RD_PTR 0x038
++#define UPDATE_TX_RD_PTR BIT(31)
++#define TX_BUF_RD_PTR_MASK GENMASK(11, 0)
++#define ASPEED_MCTP_TX_BUF_WR_PTR 0x03c
++#define TX_BUF_WR_PTR_MASK GENMASK(11, 0)
++#define ASPEED_G7_MCTP_PCIE_BDF 0x04c
++
++#define ADDR_LEN GENMASK(26, 0)
++#define DATA_ADDR(x) (((x) >> 4) & ADDR_LEN)
++
++/* TX command */
++#define TX_LAST_CMD BIT(31)
++#define TX_DATA_ADDR_SHIFT 4
++#define TX_DATA_ADDR_MASK GENMASK(30, TX_DATA_ADDR_SHIFT)
++#define TX_DATA_ADDR(x) \
++ ((DATA_ADDR(x) << TX_DATA_ADDR_SHIFT) & TX_DATA_ADDR_MASK)
++#define TX_RESERVED_1_MASK GENMASK(1, 0) /* must be 1 */
++#define TX_RESERVED_1 1
++#define TX_STOP_AFTER_CMD BIT(16)
++#define TX_INTERRUPT_AFTER_CMD BIT(15)
++#define TX_PACKET_SIZE_SHIFT 2
++#define TX_PACKET_SIZE_MASK GENMASK(12, TX_PACKET_SIZE_SHIFT)
++#define TX_PACKET_SIZE(x) \
++ (((x) << TX_PACKET_SIZE_SHIFT) & TX_PACKET_SIZE_MASK)
++#define TX_RESERVED_0_MASK GENMASK(1, 0) /* MBZ */
++#define TX_RESERVED_0 0
++
++/* RX command */
++#define RX_INTERRUPT_AFTER_CMD BIT(2)
++#define RX_DATA_ADDR_SHIFT 4
++#define RX_DATA_ADDR_MASK GENMASK(30, RX_DATA_ADDR_SHIFT)
++#define RX_DATA_ADDR(x) \
++ ((DATA_ADDR(x) << RX_DATA_ADDR_SHIFT) & RX_DATA_ADDR_MASK)
++
++#define ADDR_LEN_2500 GENMASK(23, 0)
++#define DATA_ADDR_2500(x) (((x) >> 7) & ADDR_LEN_2500)
++
++/* TX command for ast2500 */
++#define TX_DATA_ADDR_MASK_2500 GENMASK(30, 8)
++#define TX_DATA_ADDR_2500(x) \
++ FIELD_PREP(TX_DATA_ADDR_MASK_2500, DATA_ADDR_2500(x))
++#define TX_PACKET_SIZE_2500(x) \
++ FIELD_PREP(GENMASK(11, 2), x)
++#define TX_PACKET_DEST_EID GENMASK(7, 0)
++#define TX_PACKET_TARGET_ID GENMASK(31, 16)
++#define TX_PACKET_ROUTING_TYPE BIT(14)
++#define TX_PACKET_TAG_OWNER BIT(13)
++#define TX_PACKET_PADDING_LEN GENMASK(1, 0)
++
++/* Rx command for ast2500 */
++#define RX_LAST_CMD BIT(31)
++#define RX_DATA_ADDR_MASK_2500 GENMASK(29, 7)
++#define RX_DATA_ADDR_2500(x) \
++ FIELD_PREP(RX_DATA_ADDR_MASK_2500, DATA_ADDR_2500(x))
++#define RX_PACKET_SIZE GENMASK(30, 24)
++#define RX_PACKET_SRC_EID GENMASK(23, 16)
++#define RX_PACKET_ROUTING_TYPE GENMASK(15, 14)
++#define RX_PACKET_TAG_OWNER BIT(13)
++#define RX_PACKET_SEQ_NUMBER GENMASK(12, 11)
++#define RX_PACKET_MSG_TAG GENMASK(10, 8)
++#define RX_PACKET_SOM BIT(7)
++#define RX_PACKET_EOM BIT(6)
++#define RX_PACKET_PADDING_LEN GENMASK(5, 4)
++
++/* HW buffer sizes */
++#define TX_PACKET_COUNT 48
++#define RX_PACKET_COUNT 96
++#if (RX_PACKET_COUNT % 4 != 0)
++#error The Rx buffer size should be 4-aligned.
++#error 1.Make runaway wrap boundary can be determined in Ast2600 A1/A2.
++#error 2.Fix the runaway read pointer bug in Ast2600 A3.
++#endif
++#define TX_MAX_PACKET_COUNT (TX_BUF_RD_PTR_MASK + 1)
++#define RX_MAX_PACKET_COUNT (RX_BUF_RD_PTR_MASK + 1)
++
++/* Per client packet cache sizes */
++#define RX_RING_COUNT 64
++#define TX_RING_COUNT 64
++
++/* PCIe Host Controller registers */
++#define ASPEED_PCIE_LINK 0x0c0
++#define PCIE_LINK_STS BIT(5)
++#define ASPEED_PCIE_MISC_STS_1 0x0c4
++
++/* PCIe Host Controller registers */
++#define ASPEED_G7_PCIE_LOCATE 0x300
++#define PCIE_LOCATE_IO BIT(0)
++#define ASPEED_G7_PCIE_LINK 0x358
++#define PCIE_G7_LINK_STS BIT(8)
++#define ASPEED_G7_IO_PCIE_LINK 0x344
++#define PCIE_G7_IO_LINK_STS BIT(18)
++
++/* PCI address definitions */
++#define PCI_DEV_NUM_MASK GENMASK(4, 0)
++#define PCI_BUS_NUM_SHIFT 5
++#define PCI_BUS_NUM_MASK GENMASK(12, PCI_BUS_NUM_SHIFT)
++#define GET_PCI_DEV_NUM(x) ((x) & PCI_DEV_NUM_MASK)
++#define GET_PCI_BUS_NUM(x) (((x) & PCI_BUS_NUM_MASK) >> PCI_BUS_NUM_SHIFT)
++
++/* MCTP header definitions */
++#define MCTP_HDR_SRC_EID_OFFSET 14
++#define MCTP_HDR_TAG_OFFSET 15
++#define MCTP_HDR_SOM BIT(7)
++#define MCTP_HDR_EOM BIT(6)
++#define MCTP_HDR_SOM_EOM (MCTP_HDR_SOM | MCTP_HDR_EOM)
++#define MCTP_HDR_TYPE_OFFSET 16
++#define MCTP_HDR_TYPE_CONTROL 0
++#define MCTP_HDR_TYPE_VDM_PCI 0x7e
++#define MCTP_HDR_TYPE_SPDM 0x5
++#define MCTP_HDR_TYPE_BASE_LAST MCTP_HDR_TYPE_SPDM
++#define MCTP_HDR_VENDOR_OFFSET 17
++#define MCTP_HDR_VDM_TYPE_OFFSET 19
++
++/* MCTP header DW little endian mask definitions */
++/* 0th DW */
++#define MCTP_HDR_DW_LE_ROUTING_TYPE GENMASK(26, 24)
++#define MCTP_HDR_DW_LE_PACKET_SIZE GENMASK(9, 0)
++/* 1st DW */
++#define MCTP_HDR_DW_LE_PADDING_LEN GENMASK(13, 12)
++/* 2nd DW */
++#define MCTP_HDR_DW_LE_TARGET_ID GENMASK(31, 16)
++/* 3rd DW */
++#define MCTP_HDR_DW_LE_TAG_OWNER BIT(3)
++#define MCTP_HDR_DW_LE_DEST_EID GENMASK(23, 16)
++
++#define ASPEED_MCTP_2600 0
++#define ASPEED_MCTP_2600A3 1
++
++#define ASPEED_REVISION_ID0 0x04
++#define ASPEED_REVISION_ID1 0x14
++#define ID0_AST2600A0 0x05000303
++#define ID1_AST2600A0 0x05000303
++#define ID0_AST2600A1 0x05010303
++#define ID1_AST2600A1 0x05010303
++#define ID0_AST2600A2 0x05010303
++#define ID1_AST2600A2 0x05020303
++#define ID0_AST2600A3 0x05030303
++#define ID1_AST2600A3 0x05030303
++#define ID0_AST2620A1 0x05010203
++#define ID1_AST2620A1 0x05010203
++#define ID0_AST2620A2 0x05010203
++#define ID1_AST2620A2 0x05020203
++#define ID0_AST2620A3 0x05030203
++#define ID1_AST2620A3 0x05030203
++#define ID0_AST2605A2 0x05010103
++#define ID1_AST2605A2 0x05020103
++#define ID0_AST2605A3 0x05030103
++#define ID1_AST2605A3 0x05030103
++#define ID0_AST2625A3 0x05030403
++#define ID1_AST2625A3 0x05030403
++
++#define ASPEED_G7_SCU_PCIE0_CTRL_OFFSET 0xa60
++#define ASPEED_G7_SCU_PCIE1_CTRL_OFFSET 0xae0
++#define ASPEED_G7_SCU_PCIE_CTRL_VDM_EN BIT(1)
++
++struct aspeed_mctp_match_data {
++ u32 rx_cmd_size;
++ u32 tx_cmd_size;
++ u32 packet_unit_size;
++ bool need_address_mapping;
++ bool vdm_hdr_direct_xfer;
++ bool fifo_auto_surround;
++ bool dma_need_64bits_width;
++ u32 scu_pcie_ctrl_offset;
++};
++
++struct aspeed_mctp_rx_cmd {
++ u32 rx_lo;
++ u32 rx_hi;
++};
++
++struct aspeed_mctp_tx_cmd {
++ u32 tx_lo;
++ u32 tx_hi;
++};
++
++struct aspeed_g7_mctp_tx_cmd {
++ u32 tx_lo;
++ u32 tx_mid;
++ u32 tx_hi;
++ u32 reserved;
++};
++
++struct mctp_buffer {
++ void *vaddr;
++ dma_addr_t dma_handle;
++};
++
++struct mctp_channel {
++ struct mctp_buffer data;
++ struct mctp_buffer cmd;
++ struct tasklet_struct tasklet;
++ u32 buffer_count;
++ u32 rd_ptr;
++ u32 wr_ptr;
++ bool stopped;
++};
++
++struct aspeed_mctp {
++ struct device *dev;
++ struct miscdevice mctp_miscdev;
++ const struct aspeed_mctp_match_data *match_data;
++ struct regmap *map;
++ struct reset_control *reset;
++ /*
++ * The reset of the dma block in the MCTP-RC is connected to
++ * another reset pin.
++ */
++ struct reset_control *reset_dma;
++ struct mctp_channel tx;
++ struct mctp_channel rx;
++ struct list_head clients;
++ struct mctp_client *default_client;
++ struct list_head mctp_type_handlers;
++ /*
++ * clients_lock protects list of clients, list of type handlers
++ * and default client
++ */
++ spinlock_t clients_lock;
++ struct list_head endpoints;
++ size_t endpoints_count;
++ /*
++ * endpoints_lock protects list of endpoints
++ */
++ struct mutex endpoints_lock;
++ struct {
++ struct regmap *map;
++ struct delayed_work rst_dwork;
++ bool need_uevent;
++ } pcie;
++ struct {
++ bool enable;
++ bool first_loop;
++ int packet_counter;
++ } rx_runaway_wa;
++ bool rx_warmup;
++ u8 eid;
++ struct platform_device *peci_mctp;
++ /* Use the flag to identify RC or EP */
++ bool rc_f;
++ /* Use the flag to identify the support of MCTP interrupt */
++ bool miss_mctp_int;
++ /* Rx hardware buffer size */
++ u32 rx_packet_count;
++ /* Rx pointer ring size */
++ u32 rx_ring_count;
++ /* Tx pointer ring size */
++ u32 tx_ring_count;
++ /* Delayed work for periodic detection of Rx packets */
++ struct delayed_work rx_det_dwork;
++ u32 rx_det_period_us;
++};
++
++struct mctp_client {
++ struct kref ref;
++ struct aspeed_mctp *priv;
++ struct ptr_ring tx_queue;
++ struct ptr_ring rx_queue;
++ struct list_head link;
++ wait_queue_head_t wait_queue;
++};
++
++struct mctp_type_handler {
++ u8 mctp_type;
++ u16 pci_vendor_id;
++ u16 vdm_type;
++ u16 vdm_mask;
++ struct mctp_client *client;
++ struct list_head link;
++};
++
++union aspeed_mctp_eid_data_info {
++ struct aspeed_mctp_eid_info eid_info;
++ struct aspeed_mctp_eid_ext_info eid_ext_info;
++};
++
++enum mctp_address_type {
++ ASPEED_MCTP_GENERIC_ADDR_FORMAT = 0,
++ ASPEED_MCTP_EXTENDED_ADDR_FORMAT = 1
++};
++
++struct aspeed_mctp_endpoint {
++ union aspeed_mctp_eid_data_info data;
++ struct list_head link;
++};
++
++struct kmem_cache *packet_cache;
++
++void data_dump(struct aspeed_mctp *priv, struct mctp_pcie_packet_data *data)
++{
++ int i;
++
++ dev_dbg(priv->dev, "Address %zu", (size_t)data);
++ dev_dbg(priv->dev, "VDM header:");
++ for (i = 0; i < PCIE_VDM_HDR_SIZE_DW; i++) {
++ dev_dbg(priv->dev, "%02x %02x %02x %02x", data->hdr[i] & 0xff,
++ (data->hdr[i] >> 8) & 0xff, (data->hdr[i] >> 16) & 0xff,
++ (data->hdr[i] >> 24) & 0xff);
++ }
++ dev_dbg(priv->dev, "Data payload:");
++ for (i = 0; i < PCIE_VDM_DATA_SIZE_DW; i++) {
++ dev_dbg(priv->dev, "%02x %02x %02x %02x",
++ data->payload[i] & 0xff, (data->payload[i] >> 8) & 0xff,
++ (data->payload[i] >> 16) & 0xff,
++ (data->payload[i] >> 24) & 0xff);
++ }
++}
++
++void *aspeed_mctp_packet_alloc(gfp_t flags)
++{
++ return kmem_cache_alloc(packet_cache, flags);
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_packet_alloc);
++
++void aspeed_mctp_packet_free(void *packet)
++{
++ kmem_cache_free(packet_cache, packet);
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_packet_free);
++
++static int _get_bdf(struct aspeed_mctp *priv)
++{
++ u32 reg;
++ u16 bdf, devfn;
++
++ if (priv->match_data->dma_need_64bits_width) {
++ regmap_read(priv->pcie.map, ASPEED_G7_PCIE_LOCATE, ®);
++ if (!(reg & PCIE_LOCATE_IO)) {
++ regmap_read(priv->pcie.map, ASPEED_G7_PCIE_LINK, ®);
++ if (!(reg & PCIE_G7_LINK_STS))
++ return -ENETDOWN;
++ regmap_read(priv->map, ASPEED_G7_MCTP_PCIE_BDF, ®);
++ bdf = PCI_DEVID(PCI_BUS_NUM(reg), reg & 0xff);
++ } else {
++ regmap_read(priv->pcie.map, ASPEED_G7_IO_PCIE_LINK,
++ ®);
++ if (!(reg & PCIE_G7_IO_LINK_STS))
++ return -ENETDOWN;
++ regmap_read(priv->map, ASPEED_G7_MCTP_PCIE_BDF, ®);
++ bdf = PCI_DEVID(PCI_BUS_NUM(reg), reg & 0xff);
++ }
++ } else {
++ regmap_read(priv->pcie.map, ASPEED_PCIE_LINK, ®);
++ if (!(reg & PCIE_LINK_STS))
++ return -ENETDOWN;
++ regmap_read(priv->pcie.map, ASPEED_PCIE_MISC_STS_1, ®);
++
++ reg = reg & (PCI_BUS_NUM_MASK | PCI_DEV_NUM_MASK);
++ /* only support function 0 */
++ devfn = GET_PCI_DEV_NUM(reg) << 3;
++ bdf = PCI_DEVID(GET_PCI_BUS_NUM(reg), devfn);
++ }
++
++ return bdf;
++}
++
++static uint32_t chip_version(struct device *dev)
++{
++ struct regmap *scu;
++ u32 revid0, revid1;
++
++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu");
++ if (IS_ERR(scu)) {
++ dev_err(dev, "failed to find 2600 SCU regmap\n");
++ return PTR_ERR(scu);
++ }
++ regmap_read(scu, ASPEED_REVISION_ID0, &revid0);
++ regmap_read(scu, ASPEED_REVISION_ID1, &revid1);
++ if (revid0 == ID0_AST2600A3 && revid1 == ID1_AST2600A3) {
++ /* AST2600-A3 */
++ return ASPEED_MCTP_2600A3;
++ } else if (revid0 == ID0_AST2620A3 && revid1 == ID1_AST2620A3) {
++ /* AST2620-A3 */
++ return ASPEED_MCTP_2600A3;
++ } else if (revid0 == ID0_AST2605A3 && revid1 == ID1_AST2605A3) {
++ /* AST2605-A3 */
++ return ASPEED_MCTP_2600A3;
++ } else if (revid0 == ID0_AST2625A3 && revid1 == ID1_AST2625A3) {
++ /* AST2605-A3 */
++ return ASPEED_MCTP_2600A3;
++ }
++ return ASPEED_MCTP_2600;
++}
++
++static int pcie_vdm_enable(struct device *dev)
++{
++ int ret = 0;
++ struct regmap *scu;
++ const struct aspeed_mctp_match_data *match_data =
++ of_device_get_match_data(dev);
++
++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu");
++ if (IS_ERR(scu)) {
++ dev_err(dev, "failed to find SCU regmap\n");
++ return PTR_ERR(scu);
++ }
++ ret = regmap_update_bits(scu, match_data->scu_pcie_ctrl_offset,
++ ASPEED_G7_SCU_PCIE_CTRL_VDM_EN,
++ ASPEED_G7_SCU_PCIE_CTRL_VDM_EN);
++ return ret;
++}
++
++/*
++ * HW produces and expects VDM header in little endian and payload in network order.
++ * To allow userspace to use network order for the whole packet, PCIe VDM header needs
++ * to be swapped.
++ */
++static void aspeed_mctp_swap_pcie_vdm_hdr(struct mctp_pcie_packet_data *data)
++{
++ int i;
++
++ for (i = 0; i < PCIE_VDM_HDR_SIZE_DW; i++)
++ data->hdr[i] = swab32(data->hdr[i]);
++}
++
++static void aspeed_mctp_rx_trigger(struct mctp_channel *rx)
++{
++ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx);
++ u32 reg;
++
++ /*
++ * Even though rx_buf_addr doesn't change, if we don't do the write
++ * here, the HW doesn't trigger RX. We're also clearing the
++ * RX_CMD_READY bit, otherwise we're observing a rare case where
++ * trigger isn't registered by the HW, and we're ending up with stuck
++ * HW (not reacting to wr_ptr writes).
++ * Also, note that we're writing 0 as wr_ptr. If we're writing other
++ * value, the HW behaves in a bizarre way that's hard to explain...
++ */
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY, 0);
++ if (priv->match_data->fifo_auto_surround) {
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_ADDR,
++ rx->cmd.dma_handle);
++ if (priv->match_data->dma_need_64bits_width)
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_HI_ADDR,
++ upper_32_bits(rx->cmd.dma_handle));
++ } else {
++ regmap_read(priv->map, ASPEED_MCTP_RX_BUF_ADDR, ®);
++ if (!reg) {
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_ADDR,
++ rx->cmd.dma_handle);
++ } else if (reg == (rx->cmd.dma_handle & GENMASK(28, 3))) {
++ dev_info(priv->dev,
++ "Already initialized - skipping rx dma set\n");
++ } else {
++ dev_err(priv->dev,
++ "The memory of rx dma can't be changed after the controller is activated\n");
++ return;
++ }
++ }
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_WR_PTR, 0);
++
++ /* After re-enabling RX we need to restart WA logic */
++ if (priv->rx_runaway_wa.enable)
++ priv->rx.buffer_count = priv->rx_packet_count;
++ /*
++ * When Rx warmup MCTP controller may store first packet into the 0th to the
++ * 3rd cmd. In ast2600 A3, If the packet isn't stored in the 0th cmd we need
++ * to change the rx buffer size to avoid rx runaway in first loop. In ast2600
++ * A1/A2, after first loop hardware is guaranteed to use (RX_PACKET_COUNT - 4)
++ * buffers.
++ */
++ priv->rx_warmup = true;
++ priv->rx_runaway_wa.first_loop = true;
++ priv->rx_runaway_wa.packet_counter = 0;
++
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY,
++ RX_CMD_READY);
++}
++
++static void aspeed_mctp_tx_trigger(struct mctp_channel *tx, bool notify)
++{
++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
++ u32 ctrl_val;
++ int ret;
++
++ if (notify) {
++ if (priv->match_data->dma_need_64bits_width) {
++ struct aspeed_g7_mctp_tx_cmd *last_cmd;
++
++ last_cmd = (struct aspeed_g7_mctp_tx_cmd *)tx->cmd.vaddr +
++ (tx->wr_ptr - 1) % TX_PACKET_COUNT;
++ last_cmd->tx_lo |= TX_INTERRUPT_AFTER_CMD;
++ } else {
++ struct aspeed_mctp_tx_cmd *last_cmd;
++
++ last_cmd = (struct aspeed_mctp_tx_cmd *)tx->cmd.vaddr +
++ (tx->wr_ptr - 1) % TX_PACKET_COUNT;
++ last_cmd->tx_lo |= TX_INTERRUPT_AFTER_CMD;
++ }
++ }
++ if (priv->match_data->fifo_auto_surround)
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, tx->wr_ptr);
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER,
++ TX_CMD_TRIGGER);
++ ret = regmap_read_poll_timeout_atomic(priv->map, ASPEED_MCTP_CTRL,
++ ctrl_val,
++ !(ctrl_val & TX_CMD_TRIGGER), 0,
++ 1000000);
++ if (ret) {
++ u32 rd_ptr, wr_ptr;
++
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, UPDATE_RX_RD_PTR);
++ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, &rd_ptr);
++ rd_ptr &= RX_BUF_RD_PTR_MASK;
++ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, &wr_ptr);
++ wr_ptr &= TX_BUF_RD_PTR_MASK;
++ dev_warn(priv->dev,
++ "Wait tx completed timeout rd_ptr = %x, wr_ptr = %x\n",
++ rd_ptr, wr_ptr);
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER,
++ 0);
++ }
++}
++
++static void aspeed_mctp_tx_cmd_prep(u32 *tx_hdr, struct aspeed_mctp_tx_cmd *tx_cmd)
++{
++ u32 packet_size, target_id;
++ u8 dest_eid, padding_len, routing_type, tag_owner;
++
++ packet_size = FIELD_GET(MCTP_HDR_DW_LE_PACKET_SIZE, tx_hdr[0]);
++ routing_type = FIELD_GET(MCTP_HDR_DW_LE_ROUTING_TYPE, tx_hdr[0]);
++ routing_type = routing_type ? routing_type - 1 : 0;
++ padding_len = FIELD_GET(MCTP_HDR_DW_LE_PADDING_LEN, tx_hdr[1]);
++ target_id = FIELD_GET(MCTP_HDR_DW_LE_TARGET_ID, tx_hdr[2]);
++ tag_owner = FIELD_GET(MCTP_HDR_DW_LE_TAG_OWNER, tx_hdr[3]);
++ dest_eid = FIELD_GET(MCTP_HDR_DW_LE_DEST_EID, tx_hdr[3]);
++
++ tx_cmd->tx_hi = FIELD_PREP(TX_PACKET_DEST_EID, dest_eid);
++ tx_cmd->tx_lo = FIELD_PREP(TX_PACKET_TARGET_ID, target_id) |
++ TX_INTERRUPT_AFTER_CMD |
++ FIELD_PREP(TX_PACKET_ROUTING_TYPE, routing_type) |
++ FIELD_PREP(TX_PACKET_TAG_OWNER, tag_owner) |
++ TX_PACKET_SIZE_2500(packet_size) |
++ FIELD_PREP(TX_PACKET_PADDING_LEN, padding_len);
++}
++
++static void aspeed_mctp_emit_tx_cmd(struct mctp_channel *tx,
++ struct mctp_pcie_packet *packet)
++{
++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
++ struct aspeed_mctp_tx_cmd *tx_cmd =
++ (struct aspeed_mctp_tx_cmd *)tx->cmd.vaddr + tx->wr_ptr;
++ struct aspeed_g7_mctp_tx_cmd *tx_cmd_g7 =
++ (struct aspeed_g7_mctp_tx_cmd *)tx->cmd.vaddr + tx->wr_ptr;
++ u32 packet_sz_dw = packet->size / sizeof(u32) -
++ sizeof(packet->data.hdr) / sizeof(u32);
++ u32 offset;
++
++ data_dump(priv, &packet->data);
++ aspeed_mctp_swap_pcie_vdm_hdr(&packet->data);
++
++ if (priv->match_data->vdm_hdr_direct_xfer) {
++ offset = tx->wr_ptr * sizeof(packet->data);
++ memcpy((u8 *)tx->data.vaddr + offset, &packet->data,
++ sizeof(packet->data));
++ if (priv->match_data->dma_need_64bits_width) {
++ tx_cmd_g7->tx_lo = TX_PACKET_SIZE(packet_sz_dw);
++ tx_cmd_g7->tx_mid = TX_RESERVED_1;
++ tx_cmd_g7->tx_mid |= ((tx->data.dma_handle + offset) &
++ GENMASK(31, 4));
++ tx_cmd_g7->tx_hi = upper_32_bits((tx->data.dma_handle + offset));
++ } else {
++ tx_cmd->tx_lo = TX_PACKET_SIZE(packet_sz_dw);
++ tx_cmd->tx_hi = TX_RESERVED_1;
++ tx_cmd->tx_hi |= TX_DATA_ADDR(tx->data.dma_handle + offset);
++ }
++ } else {
++ offset = tx->wr_ptr * sizeof(struct mctp_pcie_packet_data_2500);
++ memcpy((u8 *)tx->data.vaddr + offset, packet->data.payload,
++ sizeof(packet->data.payload));
++ aspeed_mctp_tx_cmd_prep(packet->data.hdr, tx_cmd);
++ tx_cmd->tx_hi |= TX_DATA_ADDR_2500(tx->data.dma_handle + offset);
++ if (tx->wr_ptr == TX_PACKET_COUNT - 1)
++ tx_cmd->tx_hi |= TX_LAST_CMD;
++ }
++ dev_dbg(priv->dev, "tx->wr_prt: %d, tx_cmd: hi:%08x lo:%08x\n",
++ tx->wr_ptr, tx_cmd->tx_hi, tx_cmd->tx_lo);
++
++ tx->wr_ptr = (tx->wr_ptr + 1) % TX_PACKET_COUNT;
++}
++
++static struct mctp_client *aspeed_mctp_client_alloc(struct aspeed_mctp *priv)
++{
++ struct mctp_client *client;
++
++ client = kzalloc(sizeof(*client), GFP_KERNEL);
++ if (!client)
++ goto out;
++
++ kref_init(&client->ref);
++ client->priv = priv;
++ ptr_ring_init(&client->tx_queue, priv->tx_ring_count, GFP_KERNEL);
++ ptr_ring_init(&client->rx_queue, priv->rx_ring_count, GFP_ATOMIC);
++
++out:
++ return client;
++}
++
++static void aspeed_mctp_client_free(struct kref *ref)
++{
++ struct mctp_client *client = container_of(ref, typeof(*client), ref);
++
++ ptr_ring_cleanup(&client->rx_queue, &aspeed_mctp_packet_free);
++ ptr_ring_cleanup(&client->tx_queue, &aspeed_mctp_packet_free);
++
++ kfree(client);
++}
++
++static void aspeed_mctp_client_get(struct mctp_client *client)
++{
++ lockdep_assert_held(&client->priv->clients_lock);
++
++ kref_get(&client->ref);
++}
++
++static void aspeed_mctp_client_put(struct mctp_client *client)
++{
++ kref_put(&client->ref, &aspeed_mctp_client_free);
++}
++
++static struct mctp_client *
++aspeed_mctp_find_handler(struct aspeed_mctp *priv,
++ struct mctp_pcie_packet *packet)
++{
++ struct mctp_type_handler *handler;
++ u8 *hdr = (u8 *)packet->data.hdr;
++ struct mctp_client *client = NULL;
++ u8 mctp_type, som_eom;
++ u16 vendor = 0;
++ u16 vdm_type = 0;
++
++ lockdep_assert_held(&priv->clients_lock);
++
++ /*
++ * Middle and EOM fragments cannot be matched to MCTP type.
++ * For consistency do not match type for any fragmented messages.
++ */
++ som_eom = hdr[MCTP_HDR_TAG_OFFSET] & MCTP_HDR_SOM_EOM;
++ if (som_eom != MCTP_HDR_SOM_EOM)
++ return NULL;
++
++ mctp_type = hdr[MCTP_HDR_TYPE_OFFSET];
++ if (mctp_type == MCTP_HDR_TYPE_VDM_PCI) {
++ vendor = *((u16 *)&hdr[MCTP_HDR_VENDOR_OFFSET]);
++ vdm_type = *((u16 *)&hdr[MCTP_HDR_VDM_TYPE_OFFSET]);
++ }
++
++ list_for_each_entry(handler, &priv->mctp_type_handlers, link) {
++ if (handler->mctp_type == mctp_type &&
++ handler->pci_vendor_id == vendor &&
++ handler->vdm_type == (vdm_type & handler->vdm_mask)) {
++ dev_dbg(priv->dev, "Found client for type %x vdm %x\n",
++ mctp_type, handler->vdm_type);
++ client = handler->client;
++ break;
++ }
++ }
++ return client;
++}
++
++static void aspeed_mctp_dispatch_packet(struct aspeed_mctp *priv,
++ struct mctp_pcie_packet *packet)
++{
++ struct mctp_client *client;
++ int ret;
++
++ spin_lock(&priv->clients_lock);
++
++ client = aspeed_mctp_find_handler(priv, packet);
++
++ if (!client)
++ client = priv->default_client;
++
++ if (client)
++ aspeed_mctp_client_get(client);
++
++ spin_unlock(&priv->clients_lock);
++
++ if (client) {
++ ret = ptr_ring_produce(&client->rx_queue, packet);
++ if (ret) {
++ /*
++ * This can happen if client process does not
++ * consume packets fast enough
++ */
++ dev_dbg(priv->dev, "Failed to store packet in client RX queue\n");
++ aspeed_mctp_packet_free(packet);
++ } else {
++ wake_up_all(&client->wait_queue);
++ }
++ aspeed_mctp_client_put(client);
++ } else {
++ dev_dbg(priv->dev, "Failed to dispatch RX packet\n");
++ aspeed_mctp_packet_free(packet);
++ }
++}
++
++static void aspeed_mctp_tx_tasklet(unsigned long data)
++{
++ struct mctp_channel *tx = (struct mctp_channel *)data;
++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
++ struct mctp_client *client;
++ bool trigger = false;
++ bool full = false;
++ u32 rd_ptr;
++
++ if (priv->match_data->fifo_auto_surround) {
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, UPDATE_RX_RD_PTR);
++ regmap_read(priv->map, ASPEED_MCTP_TX_BUF_RD_PTR, &rd_ptr);
++ rd_ptr &= TX_BUF_RD_PTR_MASK;
++ } else {
++ rd_ptr = tx->rd_ptr;
++ }
++
++ spin_lock(&priv->clients_lock);
++
++ list_for_each_entry(client, &priv->clients, link) {
++ while (!(full = (tx->wr_ptr + 1) % TX_PACKET_COUNT == rd_ptr)) {
++ struct mctp_pcie_packet *packet;
++
++ packet = ptr_ring_consume(&client->tx_queue);
++ if (!packet)
++ break;
++
++ aspeed_mctp_emit_tx_cmd(tx, packet);
++ aspeed_mctp_packet_free(packet);
++ trigger = true;
++ }
++ }
++
++ spin_unlock(&priv->clients_lock);
++
++ if (trigger)
++ aspeed_mctp_tx_trigger(tx, full);
++}
++
++void aspeed_mctp_rx_hdr_prep(struct aspeed_mctp *priv, u8 *hdr, u32 rx_lo)
++{
++ u16 bdf;
++ u8 routing_type;
++
++ /*
++ * MCTP controller will map the routing type to reduce one bit
++ * 0 (Route to RC) -> 0,
++ * 2 (Route by ID) -> 1,
++ * 3 (Broadcast from RC) -> 2
++ */
++ routing_type = FIELD_GET(RX_PACKET_ROUTING_TYPE, rx_lo);
++ routing_type = routing_type ? routing_type + 1 : 0;
++ bdf = _get_bdf(priv);
++ /* Length[7:0] */
++ hdr[0] = FIELD_GET(RX_PACKET_SIZE, rx_lo);
++ /* TD:EP:ATTR[1:0]:R or AT[1:0]:Length[9:8] */
++ hdr[1] = 0;
++ /* R or T9:TC[2:0]:R[3:0] */
++ hdr[2] = 0;
++ /* R or Fmt[2]:Fmt[1:0]=b'11:Type[4:3]=b'10:Type[2:0] */
++ hdr[3] = 0x70 | routing_type;
++ /* VDM message code = 0x7f */
++ hdr[4] = 0x7f;
++ /* R[1:0]:Pad len[1:0]:MCTP VDM Code[3:0] */
++ hdr[5] = FIELD_GET(RX_PACKET_PADDING_LEN, rx_lo) << 4;
++ /* TODO: PCI Requester ID: HW didn't get this information */
++ hdr[6] = 0;
++ hdr[7] = 5;
++ /* Vendor ID: 0x1AB4 */
++ hdr[8] = 0xb4;
++ hdr[9] = 0x1a;
++ /* PCI Target ID */
++ hdr[10] = bdf & 0xff;
++ hdr[11] = bdf >> 8 & 0xff;
++ /* SOM:EOM:Pkt Seq#[1:0]:TO:Msg Tag[2:0]*/
++ hdr[12] = FIELD_GET(RX_PACKET_SOM, rx_lo) << 7 |
++ FIELD_GET(RX_PACKET_EOM, rx_lo) << 6 |
++ FIELD_GET(RX_PACKET_SEQ_NUMBER, rx_lo) << 4 |
++ FIELD_GET(RX_PACKET_TAG_OWNER, rx_lo) << 3 |
++ FIELD_GET(RX_PACKET_MSG_TAG, rx_lo);
++ /* Source Endpoint ID */
++ hdr[13] = FIELD_GET(RX_PACKET_SRC_EID, rx_lo);
++ /* Destination Endpoint ID: HW didn't get this information*/
++ hdr[14] = priv->eid;
++ /* TODO: R[3:0]: header version[3:0] */
++ hdr[15] = 1;
++}
++
++static void aspeed_mctp_rx_tasklet(unsigned long data)
++{
++ struct mctp_channel *rx = (struct mctp_channel *)data;
++ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx);
++ struct mctp_pcie_packet *rx_packet;
++ struct aspeed_mctp_rx_cmd *rx_cmd;
++ u32 hw_read_ptr;
++ u32 *hdr, *payload;
++
++ if (priv->match_data->vdm_hdr_direct_xfer && priv->match_data->fifo_auto_surround) {
++ struct mctp_pcie_packet_data *rx_buf;
++ u32 residual_cmds = 0;
++
++ /* Trigger HW read pointer update, must be done before RX loop */
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_RD_PTR, UPDATE_RX_RD_PTR);
++
++ /*
++ * XXX: Using rd_ptr obtained from HW is unreliable so we need to
++ * maintain the state of buffer on our own by peeking into the buffer
++ * and checking where the packet was written.
++ */
++ rx_buf = (struct mctp_pcie_packet_data *)rx->data.vaddr;
++ hdr = (u32 *)&rx_buf[rx->wr_ptr];
++ if (!*hdr && priv->rx_warmup) {
++ u32 tmp_wr_ptr = rx->wr_ptr;
++
++ /*
++ * HACK: Right after start the RX hardware can put received
++ * packet into an unexpected offset - in order to locate
++ * received packet driver has to scan all RX data buffers.
++ */
++ do {
++ tmp_wr_ptr = (tmp_wr_ptr + 1) % priv->rx_packet_count;
++
++ hdr = (u32 *)&rx_buf[tmp_wr_ptr];
++ } while (!*hdr && tmp_wr_ptr != rx->wr_ptr);
++
++ if (tmp_wr_ptr != rx->wr_ptr) {
++ dev_warn(priv->dev,
++ "Runaway RX packet found %d -> %d\n",
++ rx->wr_ptr, tmp_wr_ptr);
++ residual_cmds = abs(tmp_wr_ptr - rx->wr_ptr);
++ rx->wr_ptr = tmp_wr_ptr;
++ if (!priv->rx_runaway_wa.enable &&
++ priv->rx_warmup)
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE,
++ rx->buffer_count - residual_cmds);
++ priv->rx_warmup = false;
++ }
++ } else {
++ priv->rx_warmup = false;
++ }
++
++ if (priv->rx_runaway_wa.packet_counter > priv->rx_packet_count &&
++ priv->rx_runaway_wa.first_loop) {
++ if (priv->rx_runaway_wa.enable)
++ /*
++ * Once we receive RX_PACKET_COUNT packets, hardware is
++ * guaranteed to use (RX_PACKET_COUNT - 4) buffers. Decrease
++ * buffer count by 4, then we can turn off scanning of RX
++ * buffers. RX buffer scanning should be enabled every time
++ * RX hardware is started.
++ * This is just a performance optimization - we could keep
++ * scanning RX buffers forever, but under heavy traffic it is
++ * fairly common that rx_tasklet is executed while RX buffer
++ * ring is empty.
++ */
++ rx->buffer_count = priv->rx_packet_count - 4;
++ else
++ /*
++ * Once we receive RX_PACKET_COUNT packets, we need to restore the
++ * RX buffer size to 4 byte aligned value to avoid rx runaway.
++ */
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE,
++ rx->buffer_count);
++ priv->rx_runaway_wa.first_loop = false;
++ }
++
++ while (*hdr != 0) {
++ if (FIELD_GET(MCTP_HDR_DW_LE_PACKET_SIZE, hdr[0]) * 4 >
++ ASPEED_MCTP_MTU)
++ dev_warn(priv->dev,
++ "Rx length %ld > MTU size %d\n",
++ FIELD_GET(MCTP_HDR_DW_LE_PACKET_SIZE,
++ hdr[0]) *
++ 4,
++ ASPEED_MCTP_MTU);
++ rx_packet = aspeed_mctp_packet_alloc(GFP_ATOMIC);
++ if (rx_packet) {
++ memcpy(&rx_packet->data, hdr, sizeof(rx_packet->data));
++
++ aspeed_mctp_swap_pcie_vdm_hdr(&rx_packet->data);
++
++ aspeed_mctp_dispatch_packet(priv, rx_packet);
++ } else {
++ dev_dbg(priv->dev, "Failed to allocate RX packet\n");
++ }
++ data_dump(priv, &rx_packet->data);
++ *hdr = 0;
++ rx->wr_ptr = (rx->wr_ptr + 1) % rx->buffer_count;
++ hdr = (u32 *)&rx_buf[rx->wr_ptr];
++
++ priv->rx_runaway_wa.packet_counter++;
++ }
++
++ /*
++ * Update HW write pointer, this can be done only after driver consumes
++ * packets from RX ring.
++ */
++ regmap_read(priv->map, ASPEED_MCTP_RX_BUF_RD_PTR, &hw_read_ptr);
++ hw_read_ptr &= RX_BUF_RD_PTR_MASK;
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_WR_PTR, (hw_read_ptr));
++
++ dev_dbg(priv->dev, "RX hw ptr %02d, sw ptr %2d\n",
++ hw_read_ptr, rx->wr_ptr);
++ } else {
++ struct mctp_pcie_packet_data_2500 *rx_buf;
++
++ rx_buf = (struct mctp_pcie_packet_data_2500 *)rx->data.vaddr;
++ payload = (u32 *)&rx_buf[rx->wr_ptr];
++ rx_cmd = (struct aspeed_mctp_rx_cmd *)rx->cmd.vaddr;
++ hdr = (u32 *)&((rx_cmd + rx->wr_ptr)->rx_lo);
++
++ if (!*hdr) {
++ u32 tmp_wr_ptr = rx->wr_ptr;
++
++ /*
++ * HACK: Right after start the RX hardware can put received
++ * packet into an unexpected offset - in order to locate
++ * received packet driver has to scan all RX data buffers.
++ */
++ do {
++ tmp_wr_ptr = (tmp_wr_ptr + 1) % rx->buffer_count;
++
++ hdr = (u32 *)&((rx_cmd + tmp_wr_ptr)->rx_lo);
++ } while (!*hdr && tmp_wr_ptr != rx->wr_ptr);
++
++ if (tmp_wr_ptr != rx->wr_ptr) {
++ dev_warn(priv->dev,
++ "Runaway RX packet found %d -> %d\n",
++ rx->wr_ptr, tmp_wr_ptr);
++ rx->wr_ptr = tmp_wr_ptr;
++ }
++ }
++
++ while (*hdr != 0) {
++ rx_packet = aspeed_mctp_packet_alloc(GFP_ATOMIC);
++ if (rx_packet) {
++ memcpy(rx_packet->data.payload, payload,
++ sizeof(rx_packet->data.payload));
++
++ aspeed_mctp_rx_hdr_prep(priv, (u8 *)rx_packet->data.hdr, *hdr);
++
++ aspeed_mctp_swap_pcie_vdm_hdr(&rx_packet->data);
++
++ aspeed_mctp_dispatch_packet(priv, rx_packet);
++ } else {
++ dev_dbg(priv->dev, "Failed to allocate RX packet\n");
++ }
++ dev_dbg(priv->dev,
++ "rx->wr_ptr = %d, rx_cmd->rx_lo = %08x",
++ rx->wr_ptr, *hdr);
++ data_dump(priv, &rx_packet->data);
++ *hdr = 0;
++ rx->wr_ptr = (rx->wr_ptr + 1) % rx->buffer_count;
++ payload = (u32 *)&rx_buf[rx->wr_ptr];
++ hdr = (u32 *)&((rx_cmd + rx->wr_ptr)->rx_lo);
++ }
++ }
++
++ /* Kick RX if it was stopped due to ring full condition */
++ if (rx->stopped) {
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, RX_CMD_READY,
++ RX_CMD_READY);
++ rx->stopped = false;
++ }
++}
++
++static void aspeed_mctp_rx_chan_init(struct mctp_channel *rx)
++{
++ struct aspeed_mctp *priv = container_of(rx, typeof(*priv), rx);
++ u32 *rx_cmd = (u32 *)rx->cmd.vaddr;
++ struct aspeed_mctp_rx_cmd *rx_cmd_64 =
++ (struct aspeed_mctp_rx_cmd *)rx->cmd.vaddr;
++ u32 data_size = priv->match_data->packet_unit_size;
++ u32 hw_rx_count = priv->rx_packet_count;
++ struct mctp_pcie_packet_data *rx_buf = (struct mctp_pcie_packet_data *)rx->data.vaddr;
++ int i;
++
++ if (priv->match_data->vdm_hdr_direct_xfer) {
++ if (priv->match_data->dma_need_64bits_width) {
++ for (i = 0; i < priv->rx_packet_count; i++) {
++ rx_cmd_64->rx_hi =
++ upper_32_bits((rx->data.dma_handle + data_size * i));
++ rx_cmd_64->rx_lo =
++ (rx->data.dma_handle + data_size * i) &
++ GENMASK(31, 4);
++ rx_cmd_64->rx_lo |= RX_INTERRUPT_AFTER_CMD;
++ rx_cmd_64++;
++ }
++ } else {
++ for (i = 0; i < priv->rx_packet_count; i++) {
++ *rx_cmd = RX_DATA_ADDR(rx->data.dma_handle + data_size * i);
++ *rx_cmd |= RX_INTERRUPT_AFTER_CMD;
++ rx_cmd++;
++ }
++ }
++ } else {
++ for (i = 0; i < priv->rx_packet_count; i++) {
++ rx_cmd_64->rx_hi = RX_DATA_ADDR_2500(rx->data.dma_handle + data_size * i);
++ rx_cmd_64->rx_lo = 0;
++ if (i == priv->rx_packet_count - 1)
++ rx_cmd_64->rx_hi |= RX_LAST_CMD;
++ rx_cmd_64++;
++ }
++ }
++ /* Clear the header of rx data */
++ for (i = 0; i < priv->rx_packet_count; i++)
++ *(u32 *)&rx_buf[i] = 0;
++ rx->wr_ptr = 0;
++ rx->buffer_count = priv->rx_packet_count;
++ if (priv->match_data->fifo_auto_surround) {
++ /*
++ * TODO: Once read pointer runaway bug is fixed in some future AST2x00
++ * stepping then add chip revision detection and turn on this
++ * workaround only when needed
++ */
++ if (priv->match_data->dma_need_64bits_width)
++ priv->rx_runaway_wa.enable = false;
++ else
++ priv->rx_runaway_wa.enable =
++ (chip_version(priv->dev) == ASPEED_MCTP_2600) ?
++ true :
++ false;
++
++ /*
++ * Hardware does not wrap around ASPEED_MCTP_RX_BUF_SIZE
++ * correctly - we have to set number of buffers to n/4 -1
++ */
++ if (priv->rx_runaway_wa.enable)
++ hw_rx_count = (priv->rx_packet_count / 4 - 1);
++
++ regmap_write(priv->map, ASPEED_MCTP_RX_BUF_SIZE, hw_rx_count);
++ }
++}
++
++static void aspeed_mctp_tx_chan_init(struct mctp_channel *tx)
++{
++ struct aspeed_mctp *priv = container_of(tx, typeof(*priv), tx);
++
++ tx->wr_ptr = 0;
++ tx->rd_ptr = 0;
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL, TX_CMD_TRIGGER, 0);
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_ADDR, tx->cmd.dma_handle);
++ if (priv->match_data->dma_need_64bits_width)
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_HI_ADDR,
++ upper_32_bits(tx->cmd.dma_handle));
++ if (priv->match_data->fifo_auto_surround) {
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_SIZE, TX_PACKET_COUNT);
++ regmap_write(priv->map, ASPEED_MCTP_TX_BUF_WR_PTR, 0);
++ }
++}
++
++struct mctp_client *aspeed_mctp_create_client(struct aspeed_mctp *priv)
++{
++ struct mctp_client *client;
++
++ client = aspeed_mctp_client_alloc(priv);
++ if (!client)
++ return NULL;
++
++ init_waitqueue_head(&client->wait_queue);
++
++ spin_lock_bh(&priv->clients_lock);
++ list_add_tail(&client->link, &priv->clients);
++ spin_unlock_bh(&priv->clients_lock);
++
++ return client;
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_create_client);
++
++static int aspeed_mctp_open(struct inode *inode, struct file *file)
++{
++ struct miscdevice *misc = file->private_data;
++ struct platform_device *pdev = to_platform_device(misc->parent);
++ struct aspeed_mctp *priv = platform_get_drvdata(pdev);
++ struct mctp_client *client;
++
++ client = aspeed_mctp_create_client(priv);
++ if (!client)
++ return -ENOMEM;
++
++ file->private_data = client;
++
++ return 0;
++}
++
++void aspeed_mctp_delete_client(struct mctp_client *client)
++{
++ struct aspeed_mctp *priv = client->priv;
++ struct mctp_type_handler *handler, *tmp;
++
++ spin_lock_bh(&priv->clients_lock);
++
++ list_del(&client->link);
++
++ if (priv->default_client == client)
++ priv->default_client = NULL;
++
++ list_for_each_entry_safe(handler, tmp, &priv->mctp_type_handlers,
++ link) {
++ if (handler->client == client) {
++ list_del(&handler->link);
++ kfree(handler);
++ }
++ }
++ spin_unlock_bh(&priv->clients_lock);
++
++ /* Disable the tasklet to appease lockdep */
++ local_bh_disable();
++ aspeed_mctp_client_put(client);
++ local_bh_enable();
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_delete_client);
++
++static int aspeed_mctp_release(struct inode *inode, struct file *file)
++{
++ struct mctp_client *client = file->private_data;
++
++ aspeed_mctp_delete_client(client);
++
++ return 0;
++}
++
++#define LEN_MASK_HI GENMASK(9, 8)
++#define LEN_MASK_LO GENMASK(7, 0)
++#define PCI_VDM_HDR_LEN_MASK_LO GENMASK(31, 24)
++#define PCI_VDM_HDR_LEN_MASK_HI GENMASK(17, 16)
++#define PCIE_VDM_HDR_REQUESTER_BDF_MASK GENMASK(31, 16)
++
++int aspeed_mctp_send_packet(struct mctp_client *client,
++ struct mctp_pcie_packet *packet)
++{
++ struct aspeed_mctp *priv = client->priv;
++ u32 *hdr_dw = (u32 *)packet->data.hdr;
++ u8 *hdr = (u8 *)packet->data.hdr;
++ u16 packet_data_sz_dw;
++ u16 pci_data_len_dw;
++ int ret;
++ u16 bdf;
++
++ ret = _get_bdf(priv);
++ if (ret < 0)
++ return ret;
++ bdf = ret;
++
++ /*
++ * If the data size is different from contents of PCIe VDM header,
++ * aspeed_mctp_tx_cmd will be programmed incorrectly. This may cause
++ * MCTP HW to stop working.
++ */
++ pci_data_len_dw = FIELD_PREP(LEN_MASK_LO, FIELD_GET(PCI_VDM_HDR_LEN_MASK_LO, hdr_dw[0])) |
++ FIELD_PREP(LEN_MASK_HI, FIELD_GET(PCI_VDM_HDR_LEN_MASK_HI, hdr_dw[0]));
++ if (pci_data_len_dw == 0) /* According to PCIe Spec, 0 means 1024 DW */
++ pci_data_len_dw = SZ_1K;
++
++ packet_data_sz_dw = packet->size / sizeof(u32) - sizeof(packet->data.hdr) / sizeof(u32);
++ if (packet_data_sz_dw != pci_data_len_dw)
++ return -EINVAL;
++
++ be32p_replace_bits(&hdr_dw[1], bdf, PCIE_VDM_HDR_REQUESTER_BDF_MASK);
++
++ /*
++ * XXX Don't update EID for MCTP Control messages - old EID may
++ * interfere with MCTP discovery flow.
++ */
++ if (priv->eid && hdr[MCTP_HDR_TYPE_OFFSET] != MCTP_HDR_TYPE_CONTROL)
++ hdr[MCTP_HDR_SRC_EID_OFFSET] = priv->eid;
++
++ ret = ptr_ring_produce_bh(&client->tx_queue, packet);
++ if (!ret)
++ tasklet_hi_schedule(&priv->tx.tasklet);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_send_packet);
++
++struct mctp_pcie_packet *aspeed_mctp_receive_packet(struct mctp_client *client,
++ unsigned long timeout)
++{
++ struct aspeed_mctp *priv = client->priv;
++ int ret;
++
++ ret = _get_bdf(priv);
++ if (ret < 0)
++ return ERR_PTR(ret);
++
++ ret = wait_event_interruptible_timeout(client->wait_queue,
++ __ptr_ring_peek(&client->rx_queue),
++ timeout);
++ if (ret < 0)
++ return ERR_PTR(ret);
++ else if (ret == 0)
++ return ERR_PTR(-ETIME);
++
++ return ptr_ring_consume_bh(&client->rx_queue);
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_receive_packet);
++
++void aspeed_mctp_flush_rx_queue(struct mctp_client *client)
++{
++ struct mctp_pcie_packet *packet;
++
++ while ((packet = ptr_ring_consume_bh(&client->rx_queue)))
++ aspeed_mctp_packet_free(packet);
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_flush_rx_queue);
++
++static ssize_t aspeed_mctp_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct mctp_client *client = file->private_data;
++ struct aspeed_mctp *priv = client->priv;
++ struct mctp_pcie_packet *rx_packet;
++ u32 mctp_ctrl;
++ u32 mctp_int_sts;
++
++ if (count < PCIE_MCTP_MIN_PACKET_SIZE)
++ return -EINVAL;
++
++ if (count > sizeof(rx_packet->data))
++ count = sizeof(rx_packet->data);
++
++ if (priv->miss_mctp_int) {
++ regmap_read(priv->map, ASPEED_MCTP_CTRL, &mctp_ctrl);
++ if (!(mctp_ctrl & RX_CMD_READY))
++ priv->rx.stopped = true;
++ /* Polling the RX_CMD_RECEIVE_INT to ensure rx_tasklet can find the data */
++ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &mctp_int_sts);
++ if (mctp_int_sts & RX_CMD_RECEIVE_INT)
++ regmap_write(priv->map, ASPEED_MCTP_INT_STS,
++ mctp_int_sts);
++ }
++
++ tasklet_hi_schedule(&priv->rx.tasklet);
++ rx_packet = ptr_ring_consume_bh(&client->rx_queue);
++ if (!rx_packet)
++ return -EAGAIN;
++
++ if (copy_to_user(buf, &rx_packet->data, count)) {
++ dev_err(priv->dev, "copy to user failed\n");
++ count = -EFAULT;
++ }
++
++ aspeed_mctp_packet_free(rx_packet);
++
++ return count;
++}
++
++static void aspeed_mctp_flush_tx_queue(struct mctp_client *client)
++{
++ struct mctp_pcie_packet *packet;
++
++ while ((packet = ptr_ring_consume_bh(&client->tx_queue)))
++ aspeed_mctp_packet_free(packet);
++}
++
++static void aspeed_mctp_flush_all_tx_queues(struct aspeed_mctp *priv)
++{
++ struct mctp_client *client;
++
++ spin_lock_bh(&priv->clients_lock);
++ list_for_each_entry(client, &priv->clients, link)
++ aspeed_mctp_flush_tx_queue(client);
++ spin_unlock_bh(&priv->clients_lock);
++}
++
++static ssize_t aspeed_mctp_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct mctp_client *client = file->private_data;
++ struct aspeed_mctp *priv = client->priv;
++ struct mctp_pcie_packet *tx_packet;
++ int ret;
++
++ if (count < PCIE_MCTP_MIN_PACKET_SIZE)
++ return -EINVAL;
++
++ if (count > sizeof(tx_packet->data))
++ return -ENOSPC;
++
++ tx_packet = aspeed_mctp_packet_alloc(GFP_KERNEL);
++ if (!tx_packet) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ if (copy_from_user(&tx_packet->data, buf, count)) {
++ dev_err(priv->dev, "copy from user failed\n");
++ ret = -EFAULT;
++ goto out_packet;
++ }
++
++ tx_packet->size = count;
++
++ ret = aspeed_mctp_send_packet(client, tx_packet);
++ if (ret)
++ goto out_packet;
++
++ return count;
++
++out_packet:
++ aspeed_mctp_packet_free(tx_packet);
++out:
++ return ret;
++}
++
++int aspeed_mctp_add_type_handler(struct mctp_client *client, u8 mctp_type,
++ u16 pci_vendor_id, u16 vdm_type, u16 vdm_mask)
++{
++ struct aspeed_mctp *priv = client->priv;
++ struct mctp_type_handler *handler, *new_handler;
++ int ret = 0;
++
++ if (mctp_type <= MCTP_HDR_TYPE_BASE_LAST) {
++ /* Vendor, type and type mask must be zero for types 0-5 */
++ if (pci_vendor_id != 0 || vdm_type != 0 || vdm_mask != 0)
++ return -EINVAL;
++ } else if (mctp_type == MCTP_HDR_TYPE_VDM_PCI) {
++ /* For Vendor Defined PCI type the the vendor ID must be nonzero */
++ if (pci_vendor_id == 0 || pci_vendor_id == 0xffff)
++ return -EINVAL;
++ } else {
++ return -EINVAL;
++ }
++
++ new_handler = kzalloc(sizeof(*new_handler), GFP_KERNEL);
++ if (!new_handler)
++ return -ENOMEM;
++ new_handler->mctp_type = mctp_type;
++ new_handler->pci_vendor_id = pci_vendor_id;
++ new_handler->vdm_type = vdm_type & vdm_mask;
++ new_handler->vdm_mask = vdm_mask;
++ new_handler->client = client;
++
++ spin_lock_bh(&priv->clients_lock);
++ list_for_each_entry(handler, &priv->mctp_type_handlers, link) {
++ if (handler->mctp_type == new_handler->mctp_type &&
++ handler->pci_vendor_id == new_handler->pci_vendor_id &&
++ handler->vdm_type == new_handler->vdm_type) {
++ if (handler->client != new_handler->client)
++ ret = -EBUSY;
++ kfree(new_handler);
++ goto out_unlock;
++ }
++ }
++ list_add_tail(&new_handler->link, &priv->mctp_type_handlers);
++out_unlock:
++ spin_unlock_bh(&priv->clients_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_add_type_handler);
++
++int aspeed_mctp_remove_type_handler(struct mctp_client *client,
++ u8 mctp_type, u16 pci_vendor_id,
++ u16 vdm_type, u16 vdm_mask)
++{
++ struct aspeed_mctp *priv = client->priv;
++ struct mctp_type_handler *handler, *tmp;
++ int ret = -EINVAL;
++
++ vdm_type &= vdm_mask;
++
++ spin_lock_bh(&priv->clients_lock);
++ list_for_each_entry_safe(handler, tmp, &priv->mctp_type_handlers,
++ link) {
++ if (handler->client == client &&
++ handler->mctp_type == mctp_type &&
++ handler->pci_vendor_id == pci_vendor_id &&
++ handler->vdm_type == vdm_type) {
++ list_del(&handler->link);
++ kfree(handler);
++ ret = 0;
++ break;
++ }
++ }
++ spin_unlock_bh(&priv->clients_lock);
++ return ret;
++}
++
++static int aspeed_mctp_register_default_handler(struct mctp_client *client)
++{
++ struct aspeed_mctp *priv = client->priv;
++ int ret = 0;
++
++ spin_lock_bh(&priv->clients_lock);
++
++ if (!priv->default_client)
++ priv->default_client = client;
++ else if (priv->default_client != client)
++ ret = -EBUSY;
++
++ spin_unlock_bh(&priv->clients_lock);
++
++ return ret;
++}
++
++static int
++aspeed_mctp_register_type_handler(struct mctp_client *client,
++ void __user *userbuf)
++{
++ struct aspeed_mctp *priv = client->priv;
++ struct aspeed_mctp_type_handler_ioctl handler;
++
++ if (copy_from_user(&handler, userbuf, sizeof(handler))) {
++ dev_err(priv->dev, "copy from user failed\n");
++ return -EFAULT;
++ }
++
++ return aspeed_mctp_add_type_handler(client, handler.mctp_type,
++ handler.pci_vendor_id,
++ handler.vendor_type,
++ handler.vendor_type_mask);
++}
++
++static int
++aspeed_mctp_unregister_type_handler(struct mctp_client *client,
++ void __user *userbuf)
++{
++ struct aspeed_mctp *priv = client->priv;
++ struct aspeed_mctp_type_handler_ioctl handler;
++
++ if (copy_from_user(&handler, userbuf, sizeof(handler))) {
++ dev_err(priv->dev, "copy from user failed\n");
++ return -EFAULT;
++ }
++
++ return aspeed_mctp_remove_type_handler(client, handler.mctp_type,
++ handler.pci_vendor_id,
++ handler.vendor_type,
++ handler.vendor_type_mask);
++}
++
++static int
++aspeed_mctp_filter_eid(struct aspeed_mctp *priv, void __user *userbuf)
++{
++ struct aspeed_mctp_filter_eid eid;
++
++ if (copy_from_user(&eid, userbuf, sizeof(eid))) {
++ dev_err(priv->dev, "copy from user failed\n");
++ return -EFAULT;
++ }
++
++ if (eid.enable) {
++ regmap_write(priv->map, ASPEED_MCTP_EID, eid.eid);
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL,
++ MATCHING_EID, MATCHING_EID);
++ } else {
++ regmap_update_bits(priv->map, ASPEED_MCTP_CTRL,
++ MATCHING_EID, 0);
++ }
++ return 0;
++}
++
++static int aspeed_mctp_get_bdf(struct aspeed_mctp *priv, void __user *userbuf)
++{
++ struct aspeed_mctp_get_bdf bdf = { _get_bdf(priv) };
++
++ if (copy_to_user(userbuf, &bdf, sizeof(bdf))) {
++ dev_err(priv->dev, "copy to user failed\n");
++ return -EFAULT;
++ }
++ return 0;
++}
++
++static int
++aspeed_mctp_get_medium_id(struct aspeed_mctp *priv, void __user *userbuf)
++{
++ struct aspeed_mctp_get_medium_id id = { 0x09 }; /* PCIe revision 2.0 */
++
++ if (copy_to_user(userbuf, &id, sizeof(id))) {
++ dev_err(priv->dev, "copy to user failed\n");
++ return -EFAULT;
++ }
++ return 0;
++}
++
++static int
++aspeed_mctp_get_mtu(struct aspeed_mctp *priv, void __user *userbuf)
++{
++ struct aspeed_mctp_get_mtu id = { ASPEED_MCTP_MTU };
++
++ if (copy_to_user(userbuf, &id, sizeof(id))) {
++ dev_err(priv->dev, "copy to user failed\n");
++ return -EFAULT;
++ }
++ return 0;
++}
++
++int aspeed_mctp_get_eid_bdf(struct mctp_client *client, u8 eid, u16 *bdf)
++{
++ struct aspeed_mctp_endpoint *endpoint;
++ int ret = -ENOENT;
++
++ mutex_lock(&client->priv->endpoints_lock);
++ list_for_each_entry(endpoint, &client->priv->endpoints, link) {
++ if (endpoint->data.eid_info.eid == eid) {
++ *bdf = endpoint->data.eid_info.bdf;
++ ret = 0;
++ break;
++ }
++ }
++ mutex_unlock(&client->priv->endpoints_lock);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_get_eid_bdf);
++
++int aspeed_mctp_get_eid(struct mctp_client *client, u16 bdf,
++ u8 domain_id, u8 *eid)
++{
++ struct aspeed_mctp_endpoint *endpoint;
++ int ret = -ENOENT;
++
++ mutex_lock(&client->priv->endpoints_lock);
++
++ list_for_each_entry(endpoint, &client->priv->endpoints, link) {
++ if (endpoint->data.eid_ext_info.domain_id == domain_id &&
++ endpoint->data.eid_ext_info.bdf == bdf) {
++ *eid = endpoint->data.eid_ext_info.eid;
++ ret = 0;
++ break;
++ }
++ }
++
++ mutex_unlock(&client->priv->endpoints_lock);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(aspeed_mctp_get_eid);
++
++static int
++aspeed_mctp_get_eid_info(struct aspeed_mctp *priv, void __user *userbuf,
++ enum mctp_address_type addr_format)
++{
++ int count = 0;
++ int ret = 0;
++ struct aspeed_mctp_get_eid_info get_eid;
++ struct aspeed_mctp_endpoint *endpoint;
++ void *user_ptr;
++ size_t count_to_copy;
++
++ if (copy_from_user(&get_eid, userbuf, sizeof(get_eid))) {
++ dev_err(priv->dev, "copy from user failed\n");
++ return -EFAULT;
++ }
++
++ mutex_lock(&priv->endpoints_lock);
++
++ if (get_eid.count == 0) {
++ count = priv->endpoints_count;
++ goto out_unlock;
++ }
++
++ user_ptr = u64_to_user_ptr(get_eid.ptr);
++ count_to_copy = get_eid.count > priv->endpoints_count ?
++ priv->endpoints_count : get_eid.count;
++ list_for_each_entry(endpoint, &priv->endpoints, link) {
++ if (endpoint->data.eid_info.eid < get_eid.start_eid)
++ continue;
++ if (count >= count_to_copy)
++ break;
++
++ if (addr_format == ASPEED_MCTP_EXTENDED_ADDR_FORMAT)
++ ret = copy_to_user(&(((struct aspeed_mctp_eid_ext_info *)
++ user_ptr)[count]),
++ &endpoint->data,
++ sizeof(struct aspeed_mctp_eid_ext_info));
++ else
++ ret = copy_to_user(&(((struct aspeed_mctp_eid_info *)
++ user_ptr)[count]),
++ &endpoint->data,
++ sizeof(struct aspeed_mctp_eid_info));
++
++ if (ret) {
++ dev_err(priv->dev, "copy to user failed\n");
++ ret = -EFAULT;
++ goto out_unlock;
++ }
++ count++;
++ }
++
++out_unlock:
++ get_eid.count = count;
++ if (copy_to_user(userbuf, &get_eid, sizeof(get_eid))) {
++ dev_err(priv->dev, "copy to user failed\n");
++ ret = -EFAULT;
++ }
++
++ mutex_unlock(&priv->endpoints_lock);
++ return ret;
++}
++
++static int
++eid_info_cmp(void *priv, const struct list_head *a, const struct list_head *b)
++{
++ struct aspeed_mctp_endpoint *endpoint_a;
++ struct aspeed_mctp_endpoint *endpoint_b;
++
++ if (a == b)
++ return 0;
++
++ endpoint_a = list_entry(a, typeof(*endpoint_a), link);
++ endpoint_b = list_entry(b, typeof(*endpoint_b), link);
++
++ if (endpoint_a->data.eid_info.eid < endpoint_b->data.eid_info.eid)
++ return -1;
++ else if (endpoint_a->data.eid_info.eid > endpoint_b->data.eid_info.eid)
++ return 1;
++
++ return 0;
++}
++
++static void aspeed_mctp_eid_info_list_remove(struct list_head *list)
++{
++ struct aspeed_mctp_endpoint *endpoint;
++ struct aspeed_mctp_endpoint *tmp;
++
++ list_for_each_entry_safe(endpoint, tmp, list, link) {
++ list_del(&endpoint->link);
++ kfree(endpoint);
++ }
++}
++
++static bool
++aspeed_mctp_eid_info_list_valid(struct list_head *list)
++{
++ struct aspeed_mctp_endpoint *endpoint;
++ struct aspeed_mctp_endpoint *next;
++
++ list_for_each_entry(endpoint, list, link) {
++ next = list_next_entry(endpoint, link);
++ if (&next->link == list)
++ break;
++
++ /* duplicted eids */
++ if (next->data.eid_info.eid == endpoint->data.eid_info.eid)
++ return false;
++ }
++
++ return true;
++}
++
++static int
++aspeed_mctp_set_eid_info(struct aspeed_mctp *priv, void __user *userbuf,
++ enum mctp_address_type addr_format)
++{
++ struct list_head list = LIST_HEAD_INIT(list);
++ struct aspeed_mctp_set_eid_info set_eid;
++ void *user_ptr;
++ struct aspeed_mctp_endpoint *endpoint;
++ int ret = 0;
++ u8 eid = 0;
++ size_t i;
++
++ if (copy_from_user(&set_eid, userbuf, sizeof(set_eid))) {
++ dev_err(priv->dev, "copy from user failed\n");
++ return -EFAULT;
++ }
++
++ if (set_eid.count > ASPEED_MCTP_EID_INFO_MAX)
++ return -EINVAL;
++
++ user_ptr = u64_to_user_ptr(set_eid.ptr);
++ for (i = 0; i < set_eid.count; i++) {
++ endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
++ if (!endpoint) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ memset(endpoint, 0, sizeof(*endpoint));
++
++ if (addr_format == ASPEED_MCTP_EXTENDED_ADDR_FORMAT)
++ ret = copy_from_user(&endpoint->data,
++ &(((struct aspeed_mctp_eid_ext_info *)
++ user_ptr)[i]),
++ sizeof(struct aspeed_mctp_eid_ext_info));
++ else
++ ret = copy_from_user(&endpoint->data,
++ &(((struct aspeed_mctp_eid_info *)
++ user_ptr)[i]),
++ sizeof(struct aspeed_mctp_eid_info));
++
++ if (ret) {
++ dev_err(priv->dev, "copy from user failed\n");
++ kfree(endpoint);
++ ret = -EFAULT;
++ goto out;
++ }
++
++ /* Detect self EID */
++ if (_get_bdf(priv) == endpoint->data.eid_info.bdf) {
++ /*
++ * XXX Use smallest EID with matching BDF.
++ * On some platforms there could be multiple endpoints
++ * with same BDF in routing table.
++ */
++ if (eid == 0 || endpoint->data.eid_info.eid < eid)
++ eid = endpoint->data.eid_info.eid;
++ }
++
++ list_add_tail(&endpoint->link, &list);
++ }
++
++ list_sort(NULL, &list, eid_info_cmp);
++ if (!aspeed_mctp_eid_info_list_valid(&list)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
++ mutex_lock(&priv->endpoints_lock);
++ if (list_empty(&priv->endpoints))
++ list_splice_init(&list, &priv->endpoints);
++ else
++ list_swap(&list, &priv->endpoints);
++ priv->endpoints_count = set_eid.count;
++ priv->eid = eid;
++ mutex_unlock(&priv->endpoints_lock);
++out:
++ aspeed_mctp_eid_info_list_remove(&list);
++ return ret;
++}
++
++static int aspeed_mctp_set_own_eid(struct aspeed_mctp *priv, void __user *userbuf)
++{
++ struct aspeed_mctp_set_own_eid data;
++
++ if (copy_from_user(&data, userbuf, sizeof(data))) {
++ dev_err(priv->dev, "copy from user failed\n");
++ return -EFAULT;
++ }
++
++ priv->eid = data.eid;
++
++ return 0;
++}
++
++static long
++aspeed_mctp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct mctp_client *client = file->private_data;
++ struct aspeed_mctp *priv = client->priv;
++ void __user *userbuf = (void __user *)arg;
++ int ret;
++
++ switch (cmd) {
++ case ASPEED_MCTP_IOCTL_FILTER_EID:
++ ret = aspeed_mctp_filter_eid(priv, userbuf);
++ break;
++
++ case ASPEED_MCTP_IOCTL_GET_BDF:
++ ret = aspeed_mctp_get_bdf(priv, userbuf);
++ break;
++
++ case ASPEED_MCTP_IOCTL_GET_MEDIUM_ID:
++ ret = aspeed_mctp_get_medium_id(priv, userbuf);
++ break;
++
++ case ASPEED_MCTP_IOCTL_GET_MTU:
++ ret = aspeed_mctp_get_mtu(priv, userbuf);
++ break;
++
++ case ASPEED_MCTP_IOCTL_REGISTER_DEFAULT_HANDLER:
++ ret = aspeed_mctp_register_default_handler(client);
++ break;
++
++ case ASPEED_MCTP_IOCTL_REGISTER_TYPE_HANDLER:
++ ret = aspeed_mctp_register_type_handler(client, userbuf);
++ break;
++
++ case ASPEED_MCTP_IOCTL_UNREGISTER_TYPE_HANDLER:
++ ret = aspeed_mctp_unregister_type_handler(client, userbuf);
++ break;
++
++ case ASPEED_MCTP_IOCTL_GET_EID_INFO:
++ ret = aspeed_mctp_get_eid_info(priv, userbuf, ASPEED_MCTP_GENERIC_ADDR_FORMAT);
++ break;
++
++ case ASPEED_MCTP_IOCTL_GET_EID_EXT_INFO:
++ ret = aspeed_mctp_get_eid_info(priv, userbuf, ASPEED_MCTP_EXTENDED_ADDR_FORMAT);
++ break;
++
++ case ASPEED_MCTP_IOCTL_SET_EID_INFO:
++ ret = aspeed_mctp_set_eid_info(priv, userbuf, ASPEED_MCTP_GENERIC_ADDR_FORMAT);
++ break;
++
++ case ASPEED_MCTP_IOCTL_SET_EID_EXT_INFO:
++ ret = aspeed_mctp_set_eid_info(priv, userbuf, ASPEED_MCTP_EXTENDED_ADDR_FORMAT);
++ break;
++
++ case ASPEED_MCTP_IOCTL_SET_OWN_EID:
++ ret = aspeed_mctp_set_own_eid(priv, userbuf);
++ break;
++
++ default:
++ dev_err(priv->dev, "Command not found\n");
++ ret = -ENOTTY;
++ }
++
++ return ret;
++}
++
++static __poll_t aspeed_mctp_poll(struct file *file,
++ struct poll_table_struct *pt)
++{
++ struct mctp_client *client = file->private_data;
++ __poll_t ret = 0;
++ struct aspeed_mctp *priv = client->priv;
++ struct mctp_channel *rx = &priv->rx;
++ u32 mctp_ctrl;
++ u32 mctp_int_sts;
++
++ if (priv->miss_mctp_int) {
++ regmap_read(priv->map, ASPEED_MCTP_CTRL, &mctp_ctrl);
++ if (!(mctp_ctrl & RX_CMD_READY))
++ rx->stopped = true;
++ /* Polling the RX_CMD_RECEIVE_INT to ensure rx_tasklet can find the data */
++ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &mctp_int_sts);
++ if (mctp_int_sts & RX_CMD_RECEIVE_INT)
++ regmap_write(priv->map, ASPEED_MCTP_INT_STS,
++ mctp_int_sts);
++ }
++
++ tasklet_hi_schedule(&priv->rx.tasklet);
++ poll_wait(file, &client->wait_queue, pt);
++
++ if (!ptr_ring_full_bh(&client->tx_queue))
++ ret |= EPOLLOUT;
++
++ if (__ptr_ring_peek(&client->rx_queue))
++ ret |= EPOLLIN;
++
++ return ret;
++}
++
++static const struct file_operations aspeed_mctp_fops = {
++ .owner = THIS_MODULE,
++ .open = aspeed_mctp_open,
++ .release = aspeed_mctp_release,
++ .read = aspeed_mctp_read,
++ .write = aspeed_mctp_write,
++ .unlocked_ioctl = aspeed_mctp_ioctl,
++ .poll = aspeed_mctp_poll,
++};
++
++static const struct regmap_config aspeed_mctp_regmap_cfg = {
++ .reg_bits = 32,
++ .reg_stride = 4,
++ .val_bits = 32,
++ .max_register = ASPEED_G7_MCTP_PCIE_BDF,
++};
++
++struct device_type aspeed_mctp_type = {
++ .name = "aspeed-mctp",
++};
++
++static void aspeed_mctp_send_pcie_uevent(struct kobject *kobj, bool ready)
++{
++ char *pcie_not_ready_event[] = { ASPEED_MCTP_READY "=0", NULL };
++ char *pcie_ready_event[] = { ASPEED_MCTP_READY "=1", NULL };
++
++ kobject_uevent_env(kobj, KOBJ_CHANGE,
++ ready ? pcie_ready_event : pcie_not_ready_event);
++}
++
++static void aspeed_mctp_irq_enable(struct aspeed_mctp *priv)
++{
++ u32 enable = TX_CMD_SENT_INT | TX_CMD_WRONG_INT |
++ RX_CMD_RECEIVE_INT | RX_CMD_NO_MORE_INT;
++
++ regmap_write(priv->map, ASPEED_MCTP_INT_EN, enable);
++}
++
++static void aspeed_mctp_irq_disable(struct aspeed_mctp *priv)
++{
++ regmap_write(priv->map, ASPEED_MCTP_INT_EN, 0);
++}
++
++static void aspeed_mctp_pcie_setup(struct aspeed_mctp *priv)
++{
++ int ret;
++ u8 tx_max_payload_size;
++ struct kobject *kobj = &priv->mctp_miscdev.this_device->kobj;
++
++ ret = _get_bdf(priv);
++
++ if (ret >= 0) {
++ cancel_delayed_work(&priv->pcie.rst_dwork);
++ if (priv->match_data->need_address_mapping)
++ regmap_update_bits(priv->map, ASPEED_MCTP_EID,
++ MEMORY_SPACE_MAPPING, BIT(31));
++ if (priv->match_data->dma_need_64bits_width)
++ tx_max_payload_size =
++ FIELD_GET(TX_MAX_PAYLOAD_SIZE_MASK,
++ ilog2(ASPEED_MCTP_MTU >> 6));
++ else
++ /*
++ * In ast2600, tx som and eom will not match expected result.
++ * e.g. When Maximum Transmit Unit (MTU) set to 64 byte, and then transfer
++ * size set between 61 ~ 124 (MTU-3 ~ 2*MTU-4), the engine will set all
++ * packet vdm header eom to 1, no matter what it setted. To fix that
++ * issue, the driver set MTU to next level(e.g. 64 to 128).
++ */
++ tx_max_payload_size =
++ FIELD_GET(TX_MAX_PAYLOAD_SIZE_MASK,
++ fls(ASPEED_MCTP_MTU >> 6));
++ regmap_update_bits(priv->map, ASPEED_MCTP_ENGINE_CTRL,
++ TX_MAX_PAYLOAD_SIZE_MASK,
++ tx_max_payload_size);
++ aspeed_mctp_flush_all_tx_queues(priv);
++ if (!priv->miss_mctp_int) {
++ aspeed_mctp_irq_enable(priv);
++ } else {
++ if (priv->rx_det_period_us)
++ schedule_delayed_work(&priv->rx_det_dwork,
++ usecs_to_jiffies(priv->rx_det_period_us));
++ }
++ aspeed_mctp_rx_trigger(&priv->rx);
++ aspeed_mctp_send_pcie_uevent(kobj, true);
++ } else {
++ schedule_delayed_work(&priv->pcie.rst_dwork,
++ msecs_to_jiffies(1000));
++ }
++}
++
++static void aspeed_mctp_reset_work(struct work_struct *work)
++{
++ struct aspeed_mctp *priv = container_of(work, typeof(*priv),
++ pcie.rst_dwork.work);
++ struct kobject *kobj = &priv->mctp_miscdev.this_device->kobj;
++
++ if (priv->pcie.need_uevent) {
++ aspeed_mctp_send_pcie_uevent(kobj, false);
++ priv->pcie.need_uevent = false;
++ }
++
++ aspeed_mctp_pcie_setup(priv);
++}
++
++static void aspeed_mctp_rx_detect_work(struct work_struct *work)
++{
++ struct aspeed_mctp *priv =
++ container_of(work, typeof(*priv), rx_det_dwork.work);
++
++ tasklet_hi_schedule(&priv->rx.tasklet);
++ schedule_delayed_work(&priv->rx_det_dwork,
++ usecs_to_jiffies(priv->rx_det_period_us));
++}
++
++static void aspeed_mctp_channels_init(struct aspeed_mctp *priv)
++{
++ aspeed_mctp_rx_chan_init(&priv->rx);
++ aspeed_mctp_tx_chan_init(&priv->tx);
++}
++
++static irqreturn_t aspeed_mctp_irq_handler(int irq, void *arg)
++{
++ struct aspeed_mctp *priv = arg;
++ u32 handled = 0;
++ u32 status;
++
++ regmap_read(priv->map, ASPEED_MCTP_INT_STS, &status);
++ regmap_write(priv->map, ASPEED_MCTP_INT_STS, status);
++
++ if (status & TX_CMD_SENT_INT) {
++ tasklet_hi_schedule(&priv->tx.tasklet);
++ if (!priv->match_data->fifo_auto_surround)
++ priv->tx.rd_ptr = priv->tx.rd_ptr + 1 % TX_PACKET_COUNT;
++ handled |= TX_CMD_SENT_INT;
++ }
++
++ if (status & TX_CMD_WRONG_INT) {
++ /* TODO: print the actual command */
++ dev_warn(priv->dev, "TX wrong");
++
++ handled |= TX_CMD_WRONG_INT;
++ }
++
++ if (status & RX_CMD_RECEIVE_INT) {
++ tasklet_hi_schedule(&priv->rx.tasklet);
++
++ handled |= RX_CMD_RECEIVE_INT;
++ }
++
++ if (status & RX_CMD_NO_MORE_INT) {
++ dev_dbg(priv->dev, "RX full");
++ priv->rx.stopped = true;
++ tasklet_hi_schedule(&priv->rx.tasklet);
++
++ handled |= RX_CMD_NO_MORE_INT;
++ }
++
++ if (!handled)
++ return IRQ_NONE;
++
++ return IRQ_HANDLED;
++}
++
++static irqreturn_t aspeed_mctp_pcie_rst_irq_handler(int irq, void *arg)
++{
++ struct aspeed_mctp *priv = arg;
++
++ aspeed_mctp_channels_init(priv);
++
++ priv->pcie.need_uevent = true;
++ priv->eid = 0;
++
++ schedule_delayed_work(&priv->pcie.rst_dwork, 0);
++
++ return IRQ_HANDLED;
++}
++
++static void aspeed_mctp_drv_init(struct aspeed_mctp *priv)
++{
++ INIT_LIST_HEAD(&priv->clients);
++ INIT_LIST_HEAD(&priv->mctp_type_handlers);
++ INIT_LIST_HEAD(&priv->endpoints);
++
++ spin_lock_init(&priv->clients_lock);
++ mutex_init(&priv->endpoints_lock);
++
++ INIT_DELAYED_WORK(&priv->pcie.rst_dwork, aspeed_mctp_reset_work);
++
++ tasklet_init(&priv->tx.tasklet, aspeed_mctp_tx_tasklet,
++ (unsigned long)&priv->tx);
++ tasklet_init(&priv->rx.tasklet, aspeed_mctp_rx_tasklet,
++ (unsigned long)&priv->rx);
++}
++
++static void aspeed_mctp_drv_fini(struct aspeed_mctp *priv)
++{
++ aspeed_mctp_eid_info_list_remove(&priv->endpoints);
++ tasklet_disable(&priv->tx.tasklet);
++ tasklet_kill(&priv->tx.tasklet);
++ tasklet_disable(&priv->rx.tasklet);
++ tasklet_kill(&priv->rx.tasklet);
++
++ cancel_delayed_work_sync(&priv->pcie.rst_dwork);
++ if (priv->miss_mctp_int)
++ cancel_delayed_work_sync(&priv->rx_det_dwork);
++}
++
++static int aspeed_mctp_resources_init(struct aspeed_mctp *priv)
++{
++ struct platform_device *pdev = to_platform_device(priv->dev);
++ void __iomem *regs;
++
++ regs = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(regs)) {
++ dev_err(priv->dev, "Failed to get regmap!\n");
++ return PTR_ERR(regs);
++ }
++
++ priv->map = devm_regmap_init_mmio(priv->dev, regs,
++ &aspeed_mctp_regmap_cfg);
++ if (IS_ERR(priv->map))
++ return PTR_ERR(priv->map);
++
++ priv->reset =
++ priv->rc_f ?
++ devm_reset_control_get_by_index(priv->dev, 0) :
++ devm_reset_control_get_shared_by_index(priv->dev, 0);
++ if (IS_ERR(priv->reset)) {
++ dev_err(priv->dev, "Failed to get reset!\n");
++ return PTR_ERR(priv->reset);
++ }
++
++ if (priv->rc_f) {
++ priv->reset_dma = devm_reset_control_get_shared_by_index(priv->dev, 1);
++ if (IS_ERR(priv->reset_dma)) {
++ dev_err(priv->dev, "Failed to get ep reset!\n");
++ return PTR_ERR(priv->reset_dma);
++ }
++ }
++ priv->pcie.map =
++ syscon_regmap_lookup_by_phandle(priv->dev->of_node,
++ "aspeed,pcieh");
++ if (IS_ERR(priv->pcie.map)) {
++ dev_err(priv->dev, "Failed to find PCIe Host regmap!\n");
++ return PTR_ERR(priv->pcie.map);
++ }
++
++ platform_set_drvdata(pdev, priv);
++
++ return 0;
++}
++
++static void aspeed_release_rmem(void *d)
++{
++ of_reserved_mem_device_release(d);
++}
++
++static int aspeed_mctp_dma_init(struct aspeed_mctp *priv)
++{
++ struct mctp_channel *tx = &priv->tx;
++ struct mctp_channel *rx = &priv->rx;
++ size_t alloc_size;
++ int ret = -ENOMEM;
++
++ BUILD_BUG_ON(TX_PACKET_COUNT >= TX_MAX_PACKET_COUNT);
++ BUILD_BUG_ON(RX_PACKET_COUNT >= RX_MAX_PACKET_COUNT);
++
++ ret = of_reserved_mem_device_init(priv->dev);
++ if (ret) {
++ dev_err(priv->dev, "device does not have specific DMA pool: %d\n",
++ ret);
++ return ret;
++ }
++
++ ret = devm_add_action_or_reset(priv->dev, aspeed_release_rmem,
++ priv->dev);
++ if (ret)
++ return ret;
++
++ ret = dma_set_mask_and_coherent(priv->dev, DMA_BIT_MASK(64));
++ if (ret) {
++ dev_err(priv->dev, "cannot set 64-bits DMA mask\n");
++ return ret;
++ }
++
++ alloc_size = PAGE_ALIGN(priv->rx_packet_count * priv->match_data->packet_unit_size);
++ rx->data.vaddr =
++ dma_alloc_coherent(priv->dev, alloc_size, &rx->data.dma_handle, GFP_KERNEL);
++
++ if (!rx->data.vaddr)
++ return ret;
++
++ alloc_size = PAGE_ALIGN(priv->rx_packet_count * priv->match_data->rx_cmd_size);
++ rx->cmd.vaddr = dma_alloc_coherent(priv->dev, alloc_size, &rx->cmd.dma_handle, GFP_KERNEL);
++
++ if (!rx->cmd.vaddr)
++ goto out_rx_cmd;
++
++ alloc_size = PAGE_ALIGN(TX_PACKET_COUNT * priv->match_data->packet_unit_size);
++ tx->data.vaddr =
++ dma_alloc_coherent(priv->dev, alloc_size, &tx->data.dma_handle, GFP_KERNEL);
++
++ if (!tx->data.vaddr)
++ goto out_tx_data;
++ alloc_size = PAGE_ALIGN(TX_PACKET_COUNT * priv->match_data->tx_cmd_size);
++ tx->cmd.vaddr = dma_alloc_coherent(priv->dev, alloc_size, &tx->cmd.dma_handle, GFP_KERNEL);
++
++ if (!tx->cmd.vaddr)
++ goto out_tx_cmd;
++
++ return 0;
++out_tx_cmd:
++ alloc_size = PAGE_ALIGN(TX_PACKET_COUNT *
++ priv->match_data->packet_unit_size);
++ dma_free_coherent(priv->dev, alloc_size, tx->data.vaddr,
++ tx->data.dma_handle);
++
++out_tx_data:
++ alloc_size = PAGE_ALIGN(priv->rx_packet_count *
++ priv->match_data->rx_cmd_size);
++ dma_free_coherent(priv->dev, alloc_size, rx->cmd.vaddr,
++ rx->cmd.dma_handle);
++
++out_rx_cmd:
++ alloc_size = PAGE_ALIGN(priv->rx_packet_count *
++ priv->match_data->packet_unit_size);
++ dma_free_coherent(priv->dev, alloc_size, rx->data.vaddr,
++ rx->data.dma_handle);
++
++ return ret;
++}
++
++static void aspeed_mctp_dma_fini(struct aspeed_mctp *priv)
++{
++ struct mctp_channel *tx = &priv->tx;
++ struct mctp_channel *rx = &priv->rx;
++ size_t free_size;
++
++ free_size = PAGE_ALIGN(TX_PACKET_COUNT * priv->match_data->tx_cmd_size);
++ dma_free_coherent(priv->dev, free_size, tx->cmd.vaddr,
++ tx->cmd.dma_handle);
++
++ free_size = PAGE_ALIGN(priv->rx_packet_count *
++ priv->match_data->rx_cmd_size);
++ dma_free_coherent(priv->dev, free_size, rx->cmd.vaddr,
++ rx->cmd.dma_handle);
++
++ free_size = PAGE_ALIGN(TX_PACKET_COUNT *
++ priv->match_data->packet_unit_size);
++ dma_free_coherent(priv->dev, free_size, tx->data.vaddr,
++ tx->data.dma_handle);
++
++ free_size = PAGE_ALIGN(priv->rx_packet_count *
++ priv->match_data->packet_unit_size);
++ dma_free_coherent(priv->dev, free_size, rx->data.vaddr,
++ rx->data.dma_handle);
++}
++
++static int aspeed_mctp_irq_init(struct aspeed_mctp *priv)
++{
++ struct platform_device *pdev = to_platform_device(priv->dev);
++ int irq, ret;
++
++ irq = platform_get_irq_byname_optional(pdev, "mctp");
++ if (irq < 0) {
++ /* mctp irq is option */
++ priv->miss_mctp_int = 1;
++ INIT_DELAYED_WORK(&priv->rx_det_dwork, aspeed_mctp_rx_detect_work);
++ } else {
++ ret = devm_request_irq(priv->dev, irq, aspeed_mctp_irq_handler,
++ IRQF_SHARED, dev_name(&pdev->dev), priv);
++ if (ret)
++ return ret;
++ aspeed_mctp_irq_enable(priv);
++ }
++ irq = platform_get_irq_byname(pdev, "pcie");
++ if (!irq)
++ return -ENODEV;
++
++ ret = devm_request_irq(priv->dev, irq, aspeed_mctp_pcie_rst_irq_handler,
++ IRQF_SHARED, dev_name(&pdev->dev), priv);
++ if (ret)
++ return ret;
++
++ return 0;
++}
++
++static int aspeed_mctp_hw_reset(struct aspeed_mctp *priv)
++{
++ int ret = 0;
++
++ ret = reset_control_deassert(priv->reset);
++ if (ret) {
++ dev_warn(priv->dev, "Failed to deassert reset\n");
++ return ret;
++ }
++
++ if (priv->rc_f) {
++ ret = reset_control_deassert(priv->reset_dma);
++ if (ret) {
++ dev_warn(priv->dev, "Failed to deassert ep reset\n");
++ return ret;
++ }
++ }
++
++ if (priv->match_data->dma_need_64bits_width)
++ ret = pcie_vdm_enable(priv->dev);
++
++ return ret;
++}
++
++static int aspeed_mctp_probe(struct platform_device *pdev)
++{
++ struct aspeed_mctp *priv;
++ int ret, id;
++ const char *name;
++
++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ priv->dev = &pdev->dev;
++ priv->rc_f =
++ of_find_property(priv->dev->of_node, "pcie_rc", NULL) ? 1 : 0;
++ priv->match_data = of_device_get_match_data(priv->dev);
++
++ ret = device_property_read_u32(priv->dev, "aspeed,rx-packet-count",
++ &priv->rx_packet_count);
++ if (ret) {
++ priv->rx_packet_count = RX_PACKET_COUNT;
++ } else if (priv->rx_packet_count % 4 ||
++ priv->rx_packet_count >= RX_MAX_PACKET_COUNT) {
++ dev_err(priv->dev,
++ "The aspeed,rx-packet-count:%d should be 4-aligned and less than %ld",
++ priv->rx_packet_count, RX_MAX_PACKET_COUNT);
++ ret = -EINVAL;
++ goto out;
++ }
++
++ ret = device_property_read_u32(priv->dev, "aspeed,rx-ring-count",
++ &priv->rx_ring_count);
++ if (ret)
++ priv->rx_ring_count = RX_RING_COUNT;
++
++ ret = device_property_read_u32(priv->dev, "aspeed,tx-ring-count",
++ &priv->tx_ring_count);
++ if (ret)
++ priv->tx_ring_count = TX_RING_COUNT;
++
++ ret = device_property_read_u32(priv->dev, "aspeed,rx-det-period-us",
++ &priv->rx_det_period_us);
++ if (ret)
++ priv->rx_det_period_us = 1000;
++
++ aspeed_mctp_drv_init(priv);
++
++ ret = aspeed_mctp_resources_init(priv);
++ if (ret) {
++ dev_err(priv->dev, "Failed to init resources\n");
++ goto out_drv;
++ }
++
++ ret = aspeed_mctp_dma_init(priv);
++ if (ret) {
++ dev_err(priv->dev, "Failed to init DMA\n");
++ goto out_drv;
++ }
++
++ ret = aspeed_mctp_hw_reset(priv);
++ if (ret)
++ goto out_drv;
++
++ aspeed_mctp_channels_init(priv);
++
++ id = of_alias_get_id(priv->dev->of_node, "mctp");
++ if (id < 0)
++ return id;
++ priv->mctp_miscdev.parent = priv->dev;
++ priv->mctp_miscdev.minor = MISC_DYNAMIC_MINOR;
++ priv->mctp_miscdev.name = kasprintf(GFP_KERNEL, "aspeed-mctp%d", id);
++ priv->mctp_miscdev.fops = &aspeed_mctp_fops;
++ ret = misc_register(&priv->mctp_miscdev);
++ if (ret) {
++ dev_err(priv->dev, "Failed to register miscdev\n");
++ goto out_dma;
++ }
++ priv->mctp_miscdev.this_device->type = &aspeed_mctp_type;
++
++ ret = aspeed_mctp_irq_init(priv);
++ if (ret) {
++ dev_err(priv->dev, "Failed to init IRQ!\n");
++ goto out_dma;
++ }
++ aspeed_mctp_pcie_setup(priv);
++
++ name = kasprintf(GFP_KERNEL, "peci-mctp%d", id);
++ priv->peci_mctp =
++ platform_device_register_data(priv->dev, name, PLATFORM_DEVID_NONE, NULL, 0);
++ if (IS_ERR(priv->peci_mctp))
++ dev_err(priv->dev, "Failed to register peci-mctp device\n");
++
++ return 0;
++
++out_dma:
++ aspeed_mctp_dma_fini(priv);
++out_drv:
++ aspeed_mctp_drv_fini(priv);
++out:
++ dev_err(&pdev->dev, "Failed to probe Aspeed MCTP: %d\n", ret);
++ return ret;
++}
++
++static int aspeed_mctp_remove(struct platform_device *pdev)
++{
++ struct aspeed_mctp *priv = platform_get_drvdata(pdev);
++
++ platform_device_unregister(priv->peci_mctp);
++
++ misc_deregister(&priv->mctp_miscdev);
++
++ aspeed_mctp_irq_disable(priv);
++
++ aspeed_mctp_dma_fini(priv);
++
++ aspeed_mctp_drv_fini(priv);
++
++ return 0;
++}
++
++static const struct aspeed_mctp_match_data ast2500_mctp_match_data = {
++ .rx_cmd_size = sizeof(struct aspeed_mctp_rx_cmd),
++ .tx_cmd_size = sizeof(struct aspeed_mctp_tx_cmd),
++ .packet_unit_size = 128,
++ .need_address_mapping = true,
++ .vdm_hdr_direct_xfer = false,
++ .fifo_auto_surround = false,
++};
++
++static const struct aspeed_mctp_match_data ast2600_mctp_match_data = {
++ .rx_cmd_size = sizeof(u32),
++ .tx_cmd_size = sizeof(struct aspeed_mctp_tx_cmd),
++ .packet_unit_size = sizeof(struct mctp_pcie_packet_data),
++ .need_address_mapping = false,
++ .vdm_hdr_direct_xfer = true,
++ .fifo_auto_surround = true,
++};
++
++static const struct aspeed_mctp_match_data ast2700_mctp0_match_data = {
++ .rx_cmd_size = sizeof(struct aspeed_mctp_rx_cmd),
++ .tx_cmd_size = sizeof(struct aspeed_g7_mctp_tx_cmd),
++ .packet_unit_size = sizeof(struct mctp_pcie_packet_data),
++ .need_address_mapping = false,
++ .vdm_hdr_direct_xfer = true,
++ .fifo_auto_surround = true,
++ .dma_need_64bits_width = true,
++ .scu_pcie_ctrl_offset = ASPEED_G7_SCU_PCIE0_CTRL_OFFSET,
++};
++
++static const struct aspeed_mctp_match_data ast2700_mctp1_match_data = {
++ .rx_cmd_size = sizeof(struct aspeed_mctp_rx_cmd),
++ .tx_cmd_size = sizeof(struct aspeed_g7_mctp_tx_cmd),
++ .packet_unit_size = sizeof(struct mctp_pcie_packet_data),
++ .need_address_mapping = false,
++ .vdm_hdr_direct_xfer = true,
++ .fifo_auto_surround = true,
++ .dma_need_64bits_width = true,
++ .scu_pcie_ctrl_offset = ASPEED_G7_SCU_PCIE1_CTRL_OFFSET,
++};
++
++static const struct of_device_id aspeed_mctp_match_table[] = {
++ { .compatible = "aspeed,ast2500-mctp", .data = &ast2500_mctp_match_data},
++ { .compatible = "aspeed,ast2600-mctp", .data = &ast2600_mctp_match_data},
++ { .compatible = "aspeed,ast2700-mctp0", .data = &ast2700_mctp0_match_data},
++ { .compatible = "aspeed,ast2700-mctp1", .data = &ast2700_mctp1_match_data},
++ { }
++};
++
++static struct platform_driver aspeed_mctp_driver = {
++ .driver = {
++ .name = "aspeed-mctp",
++ .of_match_table = of_match_ptr(aspeed_mctp_match_table),
++ },
++ .probe = aspeed_mctp_probe,
++ .remove = aspeed_mctp_remove,
++};
++
++static int __init aspeed_mctp_init(void)
++{
++ packet_cache =
++ kmem_cache_create_usercopy("mctp-packet",
++ sizeof(struct mctp_pcie_packet),
++ 0, 0, 0,
++ sizeof(struct mctp_pcie_packet),
++ NULL);
++ if (!packet_cache)
++ return -ENOMEM;
++
++ return platform_driver_register(&aspeed_mctp_driver);
++}
++
++static void __exit aspeed_mctp_exit(void)
++{
++ platform_driver_unregister(&aspeed_mctp_driver);
++ kmem_cache_destroy(packet_cache);
++}
++
++module_init(aspeed_mctp_init)
++module_exit(aspeed_mctp_exit)
++
++MODULE_DEVICE_TABLE(of, aspeed_mctp_match_table);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
++MODULE_DESCRIPTION("Aspeed MCTP driver");
+diff --git a/drivers/soc/aspeed/aspeed-p2a-ctrl.c b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
+index 548f44da2..8610ddacc 100644
+--- a/drivers/soc/aspeed/aspeed-p2a-ctrl.c
++++ b/drivers/soc/aspeed/aspeed-p2a-ctrl.c
+@@ -383,13 +383,11 @@ static int aspeed_p2a_ctrl_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+-static int aspeed_p2a_ctrl_remove(struct platform_device *pdev)
++static void aspeed_p2a_ctrl_remove(struct platform_device *pdev)
+ {
+ struct aspeed_p2a_ctrl *p2a_ctrl = dev_get_drvdata(&pdev->dev);
+
+ misc_deregister(&p2a_ctrl->miscdev);
+-
+- return 0;
+ }
+
+ #define SCU2C_DRAM BIT(25)
+@@ -433,7 +431,7 @@ static struct platform_driver aspeed_p2a_ctrl_driver = {
+ .of_match_table = aspeed_p2a_ctrl_match,
+ },
+ .probe = aspeed_p2a_ctrl_probe,
+- .remove = aspeed_p2a_ctrl_remove,
++ .remove_new = aspeed_p2a_ctrl_remove,
+ };
+
+ module_platform_driver(aspeed_p2a_ctrl_driver);
+diff --git a/drivers/soc/aspeed/aspeed-pcie-mmbi.c b/drivers/soc/aspeed/aspeed-pcie-mmbi.c
+new file mode 100644
+index 000000000..7428d4dd1
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-pcie-mmbi.c
+@@ -0,0 +1,389 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright (C) ASPEED Technology Inc.
++
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of.h>
++#include <linux/of_platform.h>
++#include <linux/of_reserved_mem.h>
++#include <linux/platform_device.h>
++
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
++#include <linux/regmap.h>
++#include <linux/interrupt.h>
++#include <linux/mfd/syscon.h>
++#include <linux/dma-mapping.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++
++#include "aspeed-pcie-mmbi.h"
++
++/* AST2700 E2M */
++#define ASPEED_E2M_EVENT 0x0D0
++#define ASPEED_E2M_EVENT_SET 0x0D4
++#define ASPEED_E2M_EVENT_CLR 0x0D8
++#define ASPEED_E2M_EVENT_EN 0x0DC
++#define ASPEED_E2M_ADRMAP00 0x100
++#define ASPEED_E2M_WIRQA0 0x180
++#define ASPEED_E2M_WIRQV0 0x1C0
++#define ASPEED_E2M_SPROT_SIDG0 0x210
++#define ASPEED_E2M_SPROT_CTL0 0x280
++#define ASPEED_E2M_SPROT_ADR0 0x2C0
++struct aspeed_platform {
++ int (*mmbi_init)(struct platform_device *pdev);
++};
++
++struct aspeed_pcie_mmbi {
++ struct device *dev;
++ struct regmap *device;
++ struct regmap *e2m;
++ int irq;
++ const struct aspeed_platform *platform;
++ int id;
++ int e2m_index;
++ int soc_index;
++
++ /* MISC */
++ struct miscdevice mdev;
++ wait_queue_head_t wq;
++
++ /* Memory Mapping */
++ void __iomem *mem_virt;
++ dma_addr_t mem_phy;
++ phys_addr_t mem_size;
++
++ /* BMC Interrupt */
++ bool bmc_int_en;
++ u8 bmc_int_byte;
++ u32 bmc_int_offset;
++ bool bmc_int_update;
++ wait_queue_head_t bmc_int_wq;
++};
++
++static struct aspeed_pcie_mmbi *file_aspeed_pcie_mmbi(struct file *file)
++{
++ return container_of(file->private_data, struct aspeed_pcie_mmbi, mdev);
++}
++
++static int aspeed_pcie_mmbi_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct aspeed_pcie_mmbi *mmbi = file_aspeed_pcie_mmbi(file);
++ unsigned long vsize = vma->vm_end - vma->vm_start;
++ pgprot_t prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vsize) > mmbi->mem_size)
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (mmbi->mem_phy >> PAGE_SHIFT) + vma->vm_pgoff, vsize, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static __poll_t aspeed_pcie_mmbi_poll(struct file *file, struct poll_table_struct *pt)
++{
++ struct aspeed_pcie_mmbi *mmbi = file_aspeed_pcie_mmbi(file);
++
++ poll_wait(file, &mmbi->bmc_int_wq, pt);
++
++ if (!mmbi->bmc_int_update)
++ return 0;
++
++ mmbi->bmc_int_update = false;
++
++ return EPOLLIN;
++}
++
++static long aspeed_pcie_mmib_host_int(struct file *file, struct aspeed_pcie_mmbi *mmbi)
++{
++ u32 event;
++
++ event = regmap_read(mmbi->e2m, ASPEED_E2M_EVENT, &event);
++ if (!(event & BIT(mmbi->e2m_index)))
++ regmap_write(mmbi->e2m, ASPEED_E2M_EVENT_SET, BIT(mmbi->e2m_index));
++ else
++ return -EBUSY;
++
++ return 0;
++}
++
++static long aspeed_pcie_mmbi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct aspeed_pcie_mmbi *mmbi = file_aspeed_pcie_mmbi(file);
++
++ switch (cmd) {
++ case ASPEED_PCIE_MMBI_HOST_INT:
++ return aspeed_pcie_mmib_host_int(file, mmbi);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations aspeed_pcie_mmbi_fops = {
++ .owner = THIS_MODULE,
++ .mmap = aspeed_pcie_mmbi_mmap,
++ .poll = aspeed_pcie_mmbi_poll,
++ .unlocked_ioctl = aspeed_pcie_mmbi_ioctl,
++};
++
++static irqreturn_t aspeed_pcie_mmbi_isr(int irq, void *dev_id)
++{
++ struct aspeed_pcie_mmbi *mmbi = dev_id;
++
++ mmbi->bmc_int_update = true;
++ wake_up_interruptible(&mmbi->bmc_int_wq);
++
++ return IRQ_HANDLED;
++}
++
++/*
++ * AST2700 PCIe MMBI (SCU & E2M)
++ * SoC | 0 | 1 |
++ * PCI class| MFD (0xFF_00_00) | MMBI (0x0C_0C_00) |
++ * Node | 0 1 | 0 |
++ * Alias id | 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 |
++ * PID | 3 4 5 6 11 12 13 14 | 2 3 4 5 6 7 |
++ * E2M index| 0 1 2 3 4 5 6 7 | 0 1 2 3 4 5 |
++ * BAR index| 2 3 4 5 2 3 4 5 | 0 1 2 3 4 5 |
++ * SCU BAR | 3c 4c 5c 6c 3c 4c 5c 6c | 1c 50 3c 4c 5c 6c |
++ */
++static u32 ast2700_scu_bar_offset[] = { 0x3c, 0x4c, 0x5c, 0x6c, 0x3c, 0x4c, 0x5c,
++ 0x6c, 0x1c, 0x50, 0x3c, 0x4c, 0x5c, 0x6c };
++static u32 ast2700_e2m_pid[] = { 3, 4, 5, 6, 11, 12, 13, 14, 2, 3, 4, 5, 6, 7 };
++
++static int aspeed_ast2700_pcie_mmbi_init(struct platform_device *pdev)
++{
++ struct aspeed_pcie_mmbi *mmbi = platform_get_drvdata(pdev);
++ struct device *dev = &pdev->dev;
++ u32 value, sprot_size, e2m_index, pid;
++ int ret, i;
++
++ mmbi->e2m_index = mmbi->id % 8;
++ e2m_index = mmbi->e2m_index;
++ pid = ast2700_e2m_pid[mmbi->id];
++ if (mmbi->id < 8) {
++ regmap_write(mmbi->device, 0x18, 0xFF000027);
++ mmbi->soc_index = 0;
++ } else {
++ regmap_write(mmbi->device, 0x18, 0x0C0C0027);
++ mmbi->soc_index = 1;
++ }
++
++ /* MSI */
++ regmap_update_bits(mmbi->device, 0x74, GENMASK(7, 4), BIT(7) | (5 << 4));
++
++ regmap_update_bits(mmbi->device, 0x70,
++ BIT(25) | BIT(17) | BIT(9) | BIT(1),
++ BIT(25) | BIT(17) | BIT(9) | BIT(1));
++
++ /* Create MISC device for MMBI */
++ mmbi->mdev.parent = dev;
++ mmbi->mdev.minor = MISC_DYNAMIC_MINOR;
++ mmbi->mdev.name =
++ devm_kasprintf(dev, GFP_KERNEL, "pcie%d-mmbi%d", mmbi->soc_index, e2m_index);
++ //mmbi->mdev.fops = &aspeed_pcie_mmbi_fops;
++ ret = misc_register(&mmbi->mdev);
++ if (ret) {
++ dev_err(dev, "cannot register device %s\n", mmbi->mdev.name);
++ return ret;
++ }
++ init_waitqueue_head(&mmbi->wq);
++
++ /* Calculate the BAR Size */
++ for (i = 1; i < 16; i++) {
++ /* bar size check for 4k align */
++ if ((mmbi->mem_size / 4096) == (1 << (i - 1)))
++ break;
++ }
++ if (i == 16) {
++ i = 0;
++ dev_warn(mmbi->dev, "Bar size not align for 4K : %dK\n",
++ (u32)mmbi->mem_size / 1024);
++ }
++ regmap_write(mmbi->device, ast2700_scu_bar_offset[mmbi->id], (mmbi->mem_phy >> 4) | i);
++ regmap_write(mmbi->e2m, ASPEED_E2M_ADRMAP00 + (4 * pid), (mmbi->mem_phy >> 4) | i);
++
++ /* BMC Interrupt */
++ if (mmbi->bmc_int_en) {
++ value = mmbi->mem_phy + mmbi->bmc_int_offset;
++ regmap_write(mmbi->e2m, ASPEED_E2M_WIRQA0 + (4 * e2m_index), value);
++ value = (BIT(16) << pid) | mmbi->bmc_int_byte;
++ regmap_write(mmbi->e2m, ASPEED_E2M_WIRQV0 + (4 * e2m_index), value);
++ }
++
++ /* HOST Interrupt: MSI */
++ regmap_read(mmbi->e2m, ASPEED_E2M_EVENT_EN, &value);
++ value |= BIT(e2m_index);
++ regmap_write(mmbi->e2m, ASPEED_E2M_EVENT_EN, value);
++
++ /* B2H Write Protect */
++ sprot_size = (mmbi->mem_size / 2) / SZ_1M;
++ value = (sprot_size << 16) | (mmbi->mem_phy >> 20);
++ regmap_write(mmbi->e2m, ASPEED_E2M_SPROT_ADR0 + (4 * e2m_index), value);
++ /* Enable read & disalbe write */
++ value = 1 << (8 + e2m_index);
++ regmap_write(mmbi->e2m, ASPEED_E2M_SPROT_CTL0 + (4 * e2m_index), value);
++ /* Set PID */
++ regmap_read(mmbi->e2m, ASPEED_E2M_SPROT_SIDG0 + (4 * (e2m_index / 4)), &value);
++ value |= pid << (8 * (e2m_index % 4));
++ regmap_write(mmbi->e2m, ASPEED_E2M_SPROT_SIDG0 + (4 * (e2m_index / 4)), value);
++
++ return 0;
++}
++
++struct aspeed_platform ast2700_platform = {
++ .mmbi_init = aspeed_ast2700_pcie_mmbi_init,
++};
++
++static const struct of_device_id aspeed_pcie_mmbi_of_matches[] = {
++ { .compatible = "aspeed,ast2700-pcie-mmbi", .data = &ast2700_platform },
++ {},
++};
++MODULE_DEVICE_TABLE(of, aspeed_pcie_mmbi_of_matches);
++
++static int aspeed_pcie_mmbi_probe(struct platform_device *pdev)
++{
++ struct aspeed_pcie_mmbi *mmbi;
++ struct device *dev = &pdev->dev;
++ struct resource res;
++ struct device_node *np;
++ const void *md;
++ int ret = 0;
++
++ md = of_device_get_match_data(dev);
++ if (!md)
++ return -ENODEV;
++
++ mmbi = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_pcie_mmbi), GFP_KERNEL);
++ if (!mmbi)
++ return -ENOMEM;
++ dev_set_drvdata(dev, mmbi);
++
++ mmbi->dev = dev;
++ mmbi->platform = md;
++
++ /* Get register map*/
++ mmbi->e2m = syscon_node_to_regmap(dev->of_node->parent);
++ if (IS_ERR(mmbi->e2m)) {
++ dev_err(&pdev->dev, "failed to find e2m regmap\n");
++ ret = PTR_ERR(mmbi->e2m);
++ goto out_region;
++ }
++
++ mmbi->device = syscon_regmap_lookup_by_phandle(dev->of_node->parent, "aspeed,device");
++ if (IS_ERR(mmbi->device)) {
++ dev_err(&pdev->dev, "failed to find device regmap\n");
++ ret = PTR_ERR(mmbi->device);
++ goto out_region;
++ }
++
++ /* Get MMBI memory size */
++ np = of_parse_phandle(dev->of_node, "memory-region", 0);
++ if (!np || of_address_to_resource(np, 0, &res)) {
++ dev_err(dev, "Failed to find memory-region.\n");
++ ret = -ENOMEM;
++ goto out_region;
++ }
++
++ of_node_put(np);
++
++ mmbi->mem_phy = res.start;
++ mmbi->mem_size = resource_size(&res);
++ mmbi->mem_virt = devm_ioremap_resource(dev, &res);
++ if (!mmbi->mem_virt) {
++ dev_err(dev, "cannot map mmbi memory region\n");
++ ret = -ENOMEM;
++ goto out_region;
++ }
++
++ /* Get IRQ */
++ mmbi->irq = platform_get_irq(pdev, 0);
++ if (mmbi->irq < 0) {
++ dev_err(&pdev->dev, "platform get of irq[=%d] failed!\n", mmbi->irq);
++ goto out_unmap;
++ }
++ ret = devm_request_irq(&pdev->dev, mmbi->irq, aspeed_pcie_mmbi_isr, 0,
++ dev_name(&pdev->dev), mmbi);
++ if (ret) {
++ dev_err(dev, "pcie mmbi unable to get IRQ");
++ goto out_unmap;
++ }
++
++ init_waitqueue_head(&mmbi->bmc_int_wq);
++
++ mmbi->id = of_alias_get_id(dev->of_node, "pcie_mmbi");
++ if (mmbi->id < 0) {
++ dev_err(dev, "cannot get valid E2M index value\n");
++ goto out_irq;
++ }
++
++ mmbi->bmc_int_en = true;
++ /* H2B Interrupt */
++ ret = of_property_read_u8(dev->of_node, "mmbi-bmc-int-value", &mmbi->bmc_int_byte);
++ if (ret) {
++ dev_err(dev, "cannot get valid MMBI H2B interrupt byte\n");
++ mmbi->bmc_int_en = false;
++ }
++ ret = of_property_read_u32(dev->of_node, "mmbi-bmc-int-offset", &mmbi->bmc_int_offset);
++ if (ret) {
++ dev_err(dev, "cannot get valid MMBI H2B interrupt offset\n");
++ mmbi->bmc_int_en = false;
++ }
++
++ ret = mmbi->platform->mmbi_init(pdev);
++ if (ret) {
++ dev_err(dev, "Initialize pcie mmbi failed\n");
++ goto out_irq;
++ }
++
++ dev_info(dev, "ASPEED PCIe MMBI Dev %d: driver successfully loaded.\n", mmbi->id);
++
++ return 0;
++out_irq:
++ devm_free_irq(dev, mmbi->irq, mmbi);
++out_unmap:
++ devm_iounmap(dev, mmbi->mem_virt);
++out_region:
++ devm_kfree(dev, mmbi);
++ dev_warn(dev, "aspeed bmc device: driver init failed (ret=%d)!\n", ret);
++ return ret;
++}
++
++static int aspeed_pcie_mmbi_remove(struct platform_device *pdev)
++{
++ struct aspeed_pcie_mmbi *mmbi = platform_get_drvdata(pdev);
++
++ misc_deregister(&mmbi->mdev);
++ devm_free_irq(&pdev->dev, mmbi->irq, mmbi);
++ devm_iounmap(&pdev->dev, mmbi->mem_virt);
++ devm_kfree(&pdev->dev, mmbi);
++
++ return 0;
++}
++
++static struct platform_driver aspeed_pcie_mmbi_driver = {
++ .probe = aspeed_pcie_mmbi_probe,
++ .remove = aspeed_pcie_mmbi_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_pcie_mmbi_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_pcie_mmbi_driver);
++
++MODULE_AUTHOR("Jacky Chou <jacky_chou@aspeedtech.com>");
++MODULE_DESCRIPTION("ASPEED PCI-E MMBI Driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/aspeed-pcie-mmbi.h b/drivers/soc/aspeed/aspeed-pcie-mmbi.h
+new file mode 100644
+index 000000000..0685c9796
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-pcie-mmbi.h
+@@ -0,0 +1,20 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2024 Aspeed Technology Inc.
++ */
++#ifndef __ASPEED_PCIE_MMBI_H__
++#define __ASPEED_PCIE_MMBI_H__
++
++#include <linux/ioctl.h>
++#include <linux/types.h>
++
++#define __ASPEED_PCIE_MMBI_MAGIC 0xb8
++
++/*
++ * - ASPEED_PCIE_MMBI_HOST_INT
++ * Triggle Host interrupt
++ */
++#define ASPEED_PCIE_MMBI_HOST_INT _IO(__ASPEED_PCIE_MMBI_MAGIC, \
++ 0x00)
++
++#endif
+diff --git a/drivers/soc/aspeed/aspeed-sbc.c b/drivers/soc/aspeed/aspeed-sbc.c
+new file mode 100644
+index 000000000..be4497b41
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-sbc.c
+@@ -0,0 +1,73 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/* Copyright 2022 IBM Corp. */
++
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/debugfs.h>
++
++#define SEC_STATUS 0x14
++#define ABR_IMAGE_SOURCE BIT(13)
++#define OTP_PROTECTED BIT(8)
++#define LOW_SEC_KEY BIT(7)
++#define SECURE_BOOT BIT(6)
++#define UART_BOOT BIT(5)
++
++struct sbe {
++ u8 abr_image;
++ u8 low_security_key;
++ u8 otp_protected;
++ u8 secure_boot;
++ u8 invert;
++ u8 uart_boot;
++};
++
++static struct sbe sbe;
++
++static int __init aspeed_sbc_init(void)
++{
++ struct device_node *np;
++ void __iomem *base;
++ struct dentry *sbc_dir;
++ u32 security_status;
++
++ /* AST2600 only */
++ np = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-sbc");
++ if (!of_device_is_available(np))
++ return -ENODEV;
++
++ base = of_iomap(np, 0);
++ if (!base) {
++ of_node_put(np);
++ return -ENODEV;
++ }
++
++ security_status = readl(base + SEC_STATUS);
++
++ iounmap(base);
++ of_node_put(np);
++
++ sbe.abr_image = !!(security_status & ABR_IMAGE_SOURCE);
++ sbe.low_security_key = !!(security_status & LOW_SEC_KEY);
++ sbe.otp_protected = !!(security_status & OTP_PROTECTED);
++ sbe.secure_boot = !!(security_status & SECURE_BOOT);
++ /* Invert the bit, as 1 is boot from SPI/eMMC */
++ sbe.uart_boot = !(security_status & UART_BOOT);
++
++ pr_info("AST2600 secure boot %s\n", sbe.secure_boot ? "enabled" : "disabled");
++
++ sbc_dir = debugfs_create_dir("sbc", arch_debugfs_dir);
++ if (IS_ERR(sbc_dir))
++ return PTR_ERR(sbc_dir);
++
++ debugfs_create_u8("abr_image", 0444, sbc_dir, &sbe.abr_image);
++ debugfs_create_u8("low_security_key", 0444, sbc_dir, &sbe.low_security_key);
++ debugfs_create_u8("otp_protected", 0444, sbc_dir, &sbe.otp_protected);
++ debugfs_create_u8("uart_boot", 0444, sbc_dir, &sbe.uart_boot);
++ debugfs_create_u8("secure_boot", 0444, sbc_dir, &sbe.secure_boot);
++
++ return 0;
++}
++
++subsys_initcall(aspeed_sbc_init);
+diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
+index 3f759121d..7ee7f2911 100644
+--- a/drivers/soc/aspeed/aspeed-socinfo.c
++++ b/drivers/soc/aspeed/aspeed-socinfo.c
+@@ -12,7 +12,9 @@
+ static struct {
+ const char *name;
+ const u32 id;
+-} const rev_table[] = {
++}
++
++const rev_table[] = {
+ /* AST2400 */
+ { "AST2400", 0x02000303 },
+ { "AST1400", 0x02010103 },
+@@ -27,6 +29,10 @@ static struct {
+ { "AST2620", 0x05010203 },
+ { "AST2605", 0x05030103 },
+ { "AST2625", 0x05030403 },
++ /* AST2700 */
++ { "AST2750", 0x06000003 },
++ { "AST2700", 0x06000103 },
++ { "AST2720", 0x06000203 },
+ };
+
+ static const char *siliconid_to_name(u32 siliconid)
+@@ -57,7 +63,7 @@ static const char *siliconid_to_rev(u32 siliconid)
+ case 3:
+ return "A2";
+ }
+- } else {
++ } else if (gen == 0x6) {
+ /* AST2600 */
+ switch (rev) {
+ case 0:
+@@ -69,6 +75,12 @@ static const char *siliconid_to_rev(u32 siliconid)
+ case 3:
+ return "A3";
+ }
++ } else {
++ /* AST2700 */
++ switch (rev) {
++ case 0:
++ return "A0";
++ }
+ }
+
+ return "??";
+diff --git a/drivers/soc/aspeed/aspeed-ssp.c b/drivers/soc/aspeed/aspeed-ssp.c
+new file mode 100644
+index 000000000..359b3ae85
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-ssp.c
+@@ -0,0 +1,277 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright (C) ASPEED Technology Inc.
++
++#include <linux/io.h>
++#include <linux/fs.h>
++#include <linux/mod_devicetable.h>
++#include <linux/module.h>
++#include <linux/miscdevice.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/firmware.h>
++#include <linux/platform_device.h>
++#include <linux/interrupt.h>
++#include <linux/of.h>
++#include <linux/of_reserved_mem.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
++#include <linux/dma-direct.h>
++
++#define SSP_FILE_NAME "ast2600_ssp.bin"
++#define AST2600_CVIC_TRIGGER 0x28
++#define AST2600_CVIC_PENDING_STATUS 0x18
++#define AST2600_CVIC_PENDING_CLEAR 0x1C
++
++#define SSP_CTRL_REG 0xa00
++#define SSP_CTRL_RESET_ASSERT BIT(1)
++#define SSP_CTRL_EN BIT(0)
++
++#define SSP_MEM_BASE_REG 0xa04
++#define SSP_IMEM_LIMIT_REG 0xa08
++#define SSP_DMEM_LIMIT_REG 0xa0c
++#define SSP_CACHE_RANGE_REG 0xa40
++#define SSP_CACHE_INVALID_REG 0xa44
++#define SSP_CACHE_CTRL_REG 0xa48
++#define SSP_CACHE_CLEAR_ICACHE BIT(2)
++#define SSP_CACHE_CLEAR_DCACHE BIT(1)
++#define SSP_CACHE_EN BIT(0)
++
++#define SSP_TOTAL_MEM_SZ (32 * 1024 * 1024)
++#define SSP_CACHED_MEM_SZ (16 * 1024 * 1024)
++#define SSP_UNCACHED_MEM_SZ (SSP_TOTAL_MEM_SZ - SSP_CACHED_MEM_SZ)
++#define SSP_CACHE_1ST_16MB_ENABLE BIT(0)
++
++struct ast2600_ssp {
++ struct device *dev;
++ struct regmap *scu;
++ dma_addr_t ssp_mem_phy_addr;
++ void __iomem *ssp_mem_vir_addr;
++ dma_addr_t ssp_shared_mem_phy_addr;
++ void __iomem *ssp_shared_mem_vir_addr;
++ int ssp_shared_mem_size;
++ void __iomem *cvic;
++ int irq[16];
++ int n_irq;
++};
++
++static int ast_ssp_open(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static int ast_ssp_release(struct inode *inode, struct file *file)
++{
++ return 0;
++}
++
++static const struct file_operations ast_ssp_fops = {
++ .owner = THIS_MODULE,
++ .open = ast_ssp_open,
++ .release = ast_ssp_release,
++ .llseek = no_llseek,
++};
++
++struct miscdevice ast_ssp_misc = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "ast-ssp",
++ .fops = &ast_ssp_fops,
++};
++
++static irqreturn_t ast2600_ssp_interrupt(int irq, void *dev_id)
++{
++ u32 i;
++ struct ast2600_ssp *priv = dev_id;
++ u32 isr = readl(priv->cvic + AST2600_CVIC_PENDING_STATUS);
++ u32 ssp_shared_rx_tx_size = priv->ssp_shared_mem_size / 2;
++ u32 *ssp_shared_mem_tx = priv->ssp_shared_mem_vir_addr;
++ u32 *ssp_shared_mem_rx = priv->ssp_shared_mem_vir_addr + ssp_shared_rx_tx_size;
++
++ dev_info(priv->dev, "isr %x\n", isr);
++ writel(isr, priv->cvic + AST2600_CVIC_PENDING_CLEAR);
++
++ dev_info(priv->dev, "[CA7] rx addr:%08x, tx addr:%08x\n",
++ (u32)ssp_shared_mem_rx, (u32)ssp_shared_mem_tx);
++
++ /* Check the CA7 RX data from CM3 TX data. */
++ dev_info(priv->dev, "CA7 RX data from CM3 TX data: ");
++ for (i = 0; i < ssp_shared_rx_tx_size / 4; i++) {
++ if (readl(ssp_shared_mem_rx + i) != 0) {
++ dev_info(priv->dev, "[%08x] %08x ",
++ (u32)(ssp_shared_mem_rx + i), readl(ssp_shared_mem_rx + i));
++ } else {
++ break;
++ }
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int ast_ssp_probe(struct platform_device *pdev)
++{
++ struct device_node *np, *mnode = dev_of_node(&pdev->dev);
++ const struct firmware *firmware;
++ struct ast2600_ssp *priv;
++ struct reserved_mem *rmem;
++ int i, ret;
++
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv) {
++ ret = -ENOMEM;
++ goto finish;
++ }
++
++ priv->dev = &pdev->dev;
++ priv->scu = syscon_regmap_lookup_by_phandle(priv->dev->of_node, "aspeed,scu");
++ if (IS_ERR(priv->scu)) {
++ dev_err(priv->dev, "failed to find SCU regmap\n");
++ ret = -EINVAL;
++ goto finish;
++ }
++ platform_set_drvdata(pdev, priv);
++
++ ret = misc_register(&ast_ssp_misc);
++ if (ret) {
++ pr_err("can't misc_register :(\n");
++ ret = -EIO;
++ goto finish;
++ }
++ dev_set_drvdata(ast_ssp_misc.this_device, pdev);
++
++ ret = of_reserved_mem_device_init(&pdev->dev);
++ if (ret) {
++ dev_err(priv->dev,
++ "failed to initialize reserved mem: %d\n", ret);
++ ret = -ENOMEM;
++ goto finish;
++ }
++
++ np = of_parse_phandle(priv->dev->of_node, "memory-region", 0);
++ if (!np) {
++ dev_err(priv->dev, "can't find memory-region node\n");
++ ret = -ENOMEM;
++ goto finish;
++ }
++
++ rmem = of_reserved_mem_lookup(np);
++ of_node_put(np);
++ if (!rmem) {
++ dev_err(priv->dev, "can't find reserved memory.\n");
++ ret = -ENOMEM;
++ goto finish;
++ } else {
++ priv->ssp_mem_phy_addr = rmem->base;
++ priv->ssp_mem_vir_addr = devm_ioremap(priv->dev, priv->ssp_mem_phy_addr, SSP_TOTAL_MEM_SZ);
++ if (!priv->ssp_mem_vir_addr) {
++ dev_err(priv->dev, "can't create reserved memory.\n");
++ ret = -ENOMEM;
++ goto finish;
++ } else {
++ dev_info(priv->dev, "\nSSP memory: virt(0x%08x), phys(0x%08x)\n",
++ (uint32_t)priv->ssp_mem_vir_addr, priv->ssp_mem_phy_addr);
++ }
++ }
++
++ if (of_property_read_u32(np, "shm-size", &priv->ssp_shared_mem_size)) {
++ dev_err(priv->dev, "can't find shm-size property\n");
++ ret = -ENOMEM;
++ goto finish;
++ }
++
++ priv->ssp_shared_mem_vir_addr = priv->ssp_mem_vir_addr + SSP_TOTAL_MEM_SZ
++ - priv->ssp_shared_mem_size;
++ priv->ssp_shared_mem_phy_addr = priv->ssp_mem_phy_addr + SSP_TOTAL_MEM_SZ
++ - priv->ssp_shared_mem_size;
++ dev_info(priv->dev, "\nSSP shared memory: virt(0x%08x), phys(0x%08x), size(0x%08x)\n",
++ (uint32_t)priv->ssp_shared_mem_vir_addr, priv->ssp_shared_mem_phy_addr,
++ priv->ssp_shared_mem_size);
++
++ if (request_firmware(&firmware, SSP_FILE_NAME, priv->dev) < 0) {
++ dev_err(priv->dev, "don't have %s\n", SSP_FILE_NAME);
++ release_firmware(firmware);
++ ret = -EINVAL;
++ goto finish;
++ }
++
++ memcpy(priv->ssp_mem_vir_addr, (void *)firmware->data, firmware->size);
++ release_firmware(firmware);
++
++ np = of_parse_phandle(mnode, "aspeed,cvic", 0);
++ if (!np) {
++ dev_err(priv->dev, "can't find CVIC\n");
++ ret = -EINVAL;
++ goto finish;
++ }
++
++ priv->cvic = devm_of_iomap(priv->dev, np, 0, NULL);
++ if (IS_ERR(priv->cvic)) {
++ dev_err(priv->dev, "can't map CVIC\n");
++ ret = -EINVAL;
++ goto finish;
++ }
++
++ i = 0;
++ while (0 != (priv->irq[i] = irq_of_parse_and_map(mnode, i))) {
++ ret = request_irq(priv->irq[i], ast2600_ssp_interrupt, 0,
++ "ssp-sw-irq", priv);
++ i++;
++ }
++ priv->n_irq = i;
++ dev_info(priv->dev, "%d ISRs registered\n", priv->n_irq);
++
++ regmap_write(priv->scu, SSP_CTRL_REG, 0);
++ mdelay(1);
++ regmap_write(priv->scu, SSP_MEM_BASE_REG, priv->ssp_mem_phy_addr);
++ regmap_write(priv->scu, SSP_IMEM_LIMIT_REG, priv->ssp_mem_phy_addr + SSP_CACHED_MEM_SZ);
++ regmap_write(priv->scu, SSP_DMEM_LIMIT_REG, priv->ssp_mem_phy_addr + SSP_TOTAL_MEM_SZ);
++
++ regmap_write(priv->scu, SSP_CACHE_RANGE_REG, SSP_CACHE_1ST_16MB_ENABLE);
++
++ regmap_write(priv->scu, SSP_CTRL_REG, SSP_CTRL_RESET_ASSERT);
++ mdelay(1);
++ regmap_write(priv->scu, SSP_CTRL_REG, 0);
++ mdelay(1);
++ regmap_write(priv->scu, SSP_CTRL_REG, SSP_CTRL_EN);
++
++ dev_info(priv->dev, "Init successful\n");
++ ret = 0;
++finish:
++ return ret;
++}
++
++static int ast_ssp_remove(struct platform_device *pdev)
++{
++ struct ast2600_ssp *priv = platform_get_drvdata(pdev);
++ int i;
++
++ dev_info(priv->dev, "SSP module removed\n");
++ regmap_write(priv->scu, SSP_CTRL_REG, 0);
++ for (i = 0; i < priv->n_irq; i++)
++ free_irq(priv->irq[i], priv);
++
++ kfree(priv);
++
++ misc_deregister((struct miscdevice *)&ast_ssp_misc);
++
++ return 0;
++}
++
++static const struct of_device_id of_ast_ssp_match_table[] = {
++ { .compatible = "aspeed,ast2600-ssp", },
++ {},
++};
++MODULE_DEVICE_TABLE(of, of_ast_ssp_match_table);
++
++static struct platform_driver ast_ssp_driver = {
++ .probe = ast_ssp_probe,
++ .remove = ast_ssp_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = of_ast_ssp_match_table,
++ },
++};
++
++module_platform_driver(ast_ssp_driver);
++
++MODULE_LICENSE("Dual BSD/GPL");
+diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
+index 3a4c1f28c..5453a7bf3 100644
+--- a/drivers/soc/aspeed/aspeed-uart-routing.c
++++ b/drivers/soc/aspeed/aspeed-uart-routing.c
+@@ -15,20 +15,30 @@
+ #define HICRA 0x9c
+
+ /* attributes options */
++#define UART_ROUTING_IO0 "io0"
+ #define UART_ROUTING_IO1 "io1"
+ #define UART_ROUTING_IO2 "io2"
+ #define UART_ROUTING_IO3 "io3"
+ #define UART_ROUTING_IO4 "io4"
+ #define UART_ROUTING_IO5 "io5"
+ #define UART_ROUTING_IO6 "io6"
++#define UART_ROUTING_IO7 "io7"
++#define UART_ROUTING_IO8 "io8"
++#define UART_ROUTING_IO9 "io9"
+ #define UART_ROUTING_IO10 "io10"
++#define UART_ROUTING_IO12 "io12"
++#define UART_ROUTING_UART0 "uart0"
+ #define UART_ROUTING_UART1 "uart1"
+ #define UART_ROUTING_UART2 "uart2"
+ #define UART_ROUTING_UART3 "uart3"
+ #define UART_ROUTING_UART4 "uart4"
+ #define UART_ROUTING_UART5 "uart5"
+ #define UART_ROUTING_UART6 "uart6"
++#define UART_ROUTING_UART7 "uart7"
++#define UART_ROUTING_UART8 "uart8"
++#define UART_ROUTING_UART9 "uart9"
+ #define UART_ROUTING_UART10 "uart10"
++#define UART_ROUTING_UART12 "uart12"
+ #define UART_ROUTING_RES "reserved"
+
+ struct aspeed_uart_routing {
+@@ -488,6 +498,416 @@ static const struct attribute_group ast2600_uart_routing_attr_group = {
+ .attrs = ast2600_uart_routing_attrs,
+ };
+
++/* routing selector for AST27xx node 0 */
++static struct aspeed_uart_routing_selector ast2700n0_uart9_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART9),
++ .reg = HICR9,
++ .shift = 12,
++ .mask = 0xf,
++ .options = {
++ UART_ROUTING_IO9,
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_RES,
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART12,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_io9_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO9),
++ .reg = HICR9,
++ .shift = 8,
++ .mask = 0xf,
++ .options = {
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART12,
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_RES,
++ UART_ROUTING_UART9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_uart3_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
++ .reg = HICRA,
++ .shift = 25,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO3,
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO2,
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_uart2_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
++ .reg = HICRA,
++ .shift = 22,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_uart1_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
++ .reg = HICRA,
++ .shift = 19,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_IO0,
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART0,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_uart0_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART0),
++ .reg = HICRA,
++ .shift = 16,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_io3_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
++ .reg = HICRA,
++ .shift = 9,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART9,
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_io2_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
++ .reg = HICRA,
++ .shift = 6,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART9,
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_IO0,
++ UART_ROUTING_IO1,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_io1_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
++ .reg = HICRA,
++ .shift = 3,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART9,
++ UART_ROUTING_UART0,
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n0_io0_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO0),
++ .reg = HICRA,
++ .shift = 0,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART0,
++ UART_ROUTING_UART1,
++ UART_ROUTING_UART2,
++ UART_ROUTING_UART3,
++ UART_ROUTING_UART9,
++ UART_ROUTING_IO2,
++ UART_ROUTING_IO3,
++ UART_ROUTING_IO9,
++ NULL,
++ },
++};
++
++static struct attribute *ast2700n0_uart_routing_attrs[] = {
++ &ast2700n0_uart9_sel.dev_attr.attr,
++ &ast2700n0_io9_sel.dev_attr.attr,
++ &ast2700n0_uart3_sel.dev_attr.attr,
++ &ast2700n0_uart2_sel.dev_attr.attr,
++ &ast2700n0_uart1_sel.dev_attr.attr,
++ &ast2700n0_uart0_sel.dev_attr.attr,
++ &ast2700n0_io3_sel.dev_attr.attr,
++ &ast2700n0_io2_sel.dev_attr.attr,
++ &ast2700n0_io1_sel.dev_attr.attr,
++ &ast2700n0_io0_sel.dev_attr.attr,
++ NULL,
++};
++
++static const struct attribute_group ast2700n0_uart_routing_attr_group = {
++ .attrs = ast2700n0_uart_routing_attrs,
++};
++
++/* routing selector for AST27xx node 1 */
++static struct aspeed_uart_routing_selector ast2700n1_uart10_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART10),
++ .reg = HICR9,
++ .shift = 12,
++ .mask = 0xf,
++ .options = {
++ UART_ROUTING_IO10,
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_RES,
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART12,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_io10_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO10),
++ .reg = HICR9,
++ .shift = 8,
++ .mask = 0xf,
++ .options = {
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART12,
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_RES,
++ UART_ROUTING_UART10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_uart8_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART8),
++ .reg = HICRA,
++ .shift = 25,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO8,
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO7,
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_uart7_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART7),
++ .reg = HICRA,
++ .shift = 22,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_uart6_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART6),
++ .reg = HICRA,
++ .shift = 19,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_IO5,
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART5,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_uart5_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART5),
++ .reg = HICRA,
++ .shift = 16,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_io8_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO8),
++ .reg = HICRA,
++ .shift = 9,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART10,
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_io7_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO7),
++ .reg = HICRA,
++ .shift = 6,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART10,
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_IO5,
++ UART_ROUTING_IO6,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_io6_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO6),
++ .reg = HICRA,
++ .shift = 3,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART10,
++ UART_ROUTING_UART5,
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct aspeed_uart_routing_selector ast2700n1_io5_sel = {
++ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO5),
++ .reg = HICRA,
++ .shift = 0,
++ .mask = 0x7,
++ .options = {
++ UART_ROUTING_UART5,
++ UART_ROUTING_UART6,
++ UART_ROUTING_UART7,
++ UART_ROUTING_UART8,
++ UART_ROUTING_UART10,
++ UART_ROUTING_IO7,
++ UART_ROUTING_IO8,
++ UART_ROUTING_IO10,
++ NULL,
++ },
++};
++
++static struct attribute *ast2700n1_uart_routing_attrs[] = {
++ &ast2700n1_uart10_sel.dev_attr.attr,
++ &ast2700n1_io10_sel.dev_attr.attr,
++ &ast2700n1_uart8_sel.dev_attr.attr,
++ &ast2700n1_uart7_sel.dev_attr.attr,
++ &ast2700n1_uart6_sel.dev_attr.attr,
++ &ast2700n1_uart5_sel.dev_attr.attr,
++ &ast2700n1_io8_sel.dev_attr.attr,
++ &ast2700n1_io7_sel.dev_attr.attr,
++ &ast2700n1_io6_sel.dev_attr.attr,
++ &ast2700n1_io5_sel.dev_attr.attr,
++ NULL,
++};
++
++static const struct attribute_group ast2700n1_uart_routing_attr_group = {
++ .attrs = ast2700n1_uart_routing_attrs,
++};
++
+ static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+@@ -565,14 +985,12 @@ static int aspeed_uart_routing_probe(struct platform_device *pdev)
+ return 0;
+ }
+
+-static int aspeed_uart_routing_remove(struct platform_device *pdev)
++static void aspeed_uart_routing_remove(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&dev->kobj, uart_routing->attr_grp);
+-
+- return 0;
+ }
+
+ static const struct of_device_id aspeed_uart_routing_table[] = {
+@@ -582,6 +1000,10 @@ static const struct of_device_id aspeed_uart_routing_table[] = {
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2600-uart-routing",
+ .data = &ast2600_uart_routing_attr_group },
++ { .compatible = "aspeed,ast2700n0-uart-routing",
++ .data = &ast2700n0_uart_routing_attr_group },
++ { .compatible = "aspeed,ast2700n1-uart-routing",
++ .data = &ast2700n1_uart_routing_attr_group },
+ { },
+ };
+
+@@ -591,7 +1013,7 @@ static struct platform_driver aspeed_uart_routing_driver = {
+ .of_match_table = aspeed_uart_routing_table,
+ },
+ .probe = aspeed_uart_routing_probe,
+- .remove = aspeed_uart_routing_remove,
++ .remove_new = aspeed_uart_routing_remove,
+ };
+
+ module_platform_driver(aspeed_uart_routing_driver);
+diff --git a/drivers/soc/aspeed/aspeed-udma.c b/drivers/soc/aspeed/aspeed-udma.c
+new file mode 100644
+index 000000000..9f7b58fb7
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-udma.c
+@@ -0,0 +1,433 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * Copyright 2020 Aspeed Technology Inc.
++ */
++#include <linux/bitfield.h>
++#include <linux/delay.h>
++#include <linux/dma-mapping.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <linux/platform_device.h>
++#include <linux/sizes.h>
++#include <linux/soc/aspeed/aspeed-udma.h>
++#include <linux/spinlock.h>
++
++#define DEVICE_NAME "aspeed-udma"
++
++/* UART DMA registers offset */
++#define UDMA_TX_DMA_EN 0x000
++#define UDMA_RX_DMA_EN 0x004
++#define UDMA_MISC 0x008
++#define UDMA_MISC_RX_BUFSZ GENMASK(3, 2)
++#define UDMA_MISC_TX_BUFSZ GENMASK(1, 0)
++#define UDMA_TMOUT_TIMER 0x00c
++#define UDMA_TX_DMA_RST 0x020
++#define UDMA_RX_DMA_RST 0x024
++#define UDMA_TX_DMA_INT_EN 0x030
++#define UDMA_TX_DMA_INT_STS 0x034
++#define UDMA_RX_DMA_INT_EN 0x038
++#define UDMA_RX_DMA_INT_STS 0x03c
++
++#define UDMA_CHX_OFF(x) ((x) * 0x20)
++#define UDMA_CHX_TX_RD_PTR(x) (0x040 + UDMA_CHX_OFF(x))
++#define UDMA_CHX_TX_WR_PTR(x) (0x044 + UDMA_CHX_OFF(x))
++#define UDMA_CHX_TX_BUF_ADDR(x) (0x048 + UDMA_CHX_OFF(x))
++#define UDMA_CHX_TX_CTRL(x) (0x04c + UDMA_CHX_OFF(x))
++#define UDMA_TX_CTRL_BUF_ADDRH GENMASK(10, 8)
++#define UDMA_TX_CTRL_TMOUT_DIS BIT(4)
++#define UDMA_TX_CTRL_BUFSZ GENMASK(3, 0)
++#define UDMA_CHX_RX_RD_PTR(x) (0x050 + UDMA_CHX_OFF(x))
++#define UDMA_CHX_RX_WR_PTR(x) (0x054 + UDMA_CHX_OFF(x))
++#define UDMA_CHX_RX_BUF_ADDR(x) (0x058 + UDMA_CHX_OFF(x))
++#define UDMA_CHX_RX_CTRL(x) (0x05c + UDMA_CHX_OFF(x))
++#define UDMA_RX_CTRL_BUF_ADDRH GENMASK(10, 8)
++#define UDMA_RX_CTRL_TMOUT_DIS BIT(4)
++#define UDMA_RX_CTRL_BUFSZ GENMASK(1, 0)
++
++#define UDMA_MAX_CHANNEL 16
++#define UDMA_TMOUT 0x200
++
++enum aspeed_udma_bufsz_code {
++ UDMA_BUFSZ_CODE_1KB,
++ UDMA_BUFSZ_CODE_4KB,
++ UDMA_BUFSZ_CODE_16KB,
++ UDMA_BUFSZ_CODE_64KB,
++};
++
++struct aspeed_udma_chan {
++ dma_addr_t dma_addr;
++
++ struct circ_buf *rb;
++ u32 rb_sz;
++
++ aspeed_udma_cb_t cb;
++ void *cb_arg;
++
++ bool dis_tmout;
++};
++
++struct aspeed_udma {
++ struct device *dev;
++ u8 __iomem *regs;
++ int irq;
++ struct aspeed_udma_chan tx_chs[UDMA_MAX_CHANNEL];
++ struct aspeed_udma_chan rx_chs[UDMA_MAX_CHANNEL];
++ spinlock_t lock;
++};
++
++struct aspeed_udma udma[1];
++
++static int aspeed_udma_get_bufsz_code(u32 buf_sz)
++{
++ switch (buf_sz) {
++ case SZ_1K:
++ return UDMA_BUFSZ_CODE_1KB;
++ case SZ_4K:
++ return UDMA_BUFSZ_CODE_4KB;
++ case SZ_16K:
++ return UDMA_BUFSZ_CODE_16KB;
++ case SZ_64K:
++ return UDMA_BUFSZ_CODE_64KB;
++ default:
++ break;
++ }
++
++ return -1;
++}
++
++static u32 aspeed_udma_get_tx_rptr(u32 ch_no)
++{
++ return readl(udma->regs + UDMA_CHX_TX_RD_PTR(ch_no));
++}
++
++static u32 aspeed_udma_get_rx_wptr(u32 ch_no)
++{
++ return readl(udma->regs + UDMA_CHX_RX_WR_PTR(ch_no));
++}
++
++static void aspeed_udma_set_ptr(u32 ch_no, u32 ptr, bool is_tx)
++{
++ writel(ptr, udma->regs +
++ ((is_tx) ? UDMA_CHX_TX_WR_PTR(ch_no) : UDMA_CHX_RX_RD_PTR(ch_no)));
++}
++
++void aspeed_udma_set_tx_wptr(u32 ch_no, u32 wptr)
++{
++ aspeed_udma_set_ptr(ch_no, wptr, true);
++}
++EXPORT_SYMBOL(aspeed_udma_set_tx_wptr);
++
++void aspeed_udma_set_rx_rptr(u32 ch_no, u32 rptr)
++{
++ aspeed_udma_set_ptr(ch_no, rptr, false);
++}
++EXPORT_SYMBOL(aspeed_udma_set_rx_rptr);
++
++static int aspeed_udma_free_chan(u32 ch_no, bool is_tx)
++{
++ u32 reg;
++ unsigned long flags;
++
++ if (ch_no > UDMA_MAX_CHANNEL)
++ return -EINVAL;
++
++ spin_lock_irqsave(&udma->lock, flags);
++
++ reg = readl(udma->regs +
++ ((is_tx) ? UDMA_TX_DMA_INT_EN : UDMA_RX_DMA_INT_EN));
++ reg &= ~(0x1 << ch_no);
++
++ writel(reg, udma->regs +
++ ((is_tx) ? UDMA_TX_DMA_INT_EN : UDMA_RX_DMA_INT_EN));
++
++ spin_unlock_irqrestore(&udma->lock, flags);
++
++ return 0;
++}
++
++int aspeed_udma_free_tx_chan(u32 ch_no)
++{
++ return aspeed_udma_free_chan(ch_no, true);
++}
++EXPORT_SYMBOL(aspeed_udma_free_tx_chan);
++
++int aspeed_udma_free_rx_chan(u32 ch_no)
++{
++ return aspeed_udma_free_chan(ch_no, false);
++}
++EXPORT_SYMBOL(aspeed_udma_free_rx_chan);
++
++static int aspeed_udma_request_chan(u32 ch_no, dma_addr_t addr,
++ struct circ_buf *rb, u32 rb_sz,
++ aspeed_udma_cb_t cb, void *id, bool dis_tmout, bool is_tx)
++{
++ int retval = 0;
++ int rbsz_code;
++
++ u32 reg;
++ unsigned long flags;
++ struct aspeed_udma_chan *ch;
++
++ if (ch_no > UDMA_MAX_CHANNEL) {
++ retval = -EINVAL;
++ goto out;
++ }
++
++ if (IS_ERR_OR_NULL(rb) || IS_ERR_OR_NULL(rb->buf)) {
++ retval = -EINVAL;
++ goto out;
++ }
++
++ rbsz_code = aspeed_udma_get_bufsz_code(rb_sz);
++ if (rbsz_code < 0) {
++ retval = -EINVAL;
++ goto out;
++ }
++
++ spin_lock_irqsave(&udma->lock, flags);
++
++ if (is_tx) {
++ reg = readl(udma->regs + UDMA_TX_DMA_INT_EN);
++ if (reg & (0x1 << ch_no)) {
++ retval = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ reg |= (0x1 << ch_no);
++ writel(reg, udma->regs + UDMA_TX_DMA_INT_EN);
++
++ reg = FIELD_PREP(UDMA_TX_CTRL_BUF_ADDRH, (u64)addr >> 32) |
++ ((dis_tmout) ? UDMA_TX_CTRL_TMOUT_DIS : 0) |
++ FIELD_PREP(UDMA_TX_CTRL_BUFSZ, rbsz_code);
++ writel(reg, udma->regs + UDMA_CHX_TX_CTRL(ch_no));
++
++ writel(addr, udma->regs + UDMA_CHX_TX_BUF_ADDR(ch_no));
++ } else {
++ reg = readl(udma->regs + UDMA_RX_DMA_INT_EN);
++ if (reg & (0x1 << ch_no)) {
++ retval = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ reg |= (0x1 << ch_no);
++ writel(reg, udma->regs + UDMA_RX_DMA_INT_EN);
++
++ reg = FIELD_PREP(UDMA_RX_CTRL_BUF_ADDRH, (u64)addr >> 32) |
++ ((dis_tmout) ? UDMA_RX_CTRL_TMOUT_DIS : 0) |
++ FIELD_PREP(UDMA_RX_CTRL_BUFSZ, rbsz_code);
++ writel(reg, udma->regs + UDMA_CHX_RX_CTRL(ch_no));
++
++ writel(addr, udma->regs + UDMA_CHX_RX_BUF_ADDR(ch_no));
++ }
++
++ ch = (is_tx) ? &udma->tx_chs[ch_no] : &udma->rx_chs[ch_no];
++ ch->rb = rb;
++ ch->rb_sz = rb_sz;
++ ch->cb = cb;
++ ch->cb_arg = id;
++ ch->dma_addr = addr;
++ ch->dis_tmout = dis_tmout;
++
++unlock_n_out:
++ spin_unlock_irqrestore(&udma->lock, flags);
++out:
++ return 0;
++}
++
++int aspeed_udma_request_tx_chan(u32 ch_no, dma_addr_t addr,
++ struct circ_buf *rb, u32 rb_sz,
++ aspeed_udma_cb_t cb, void *id, bool dis_tmout)
++{
++ return aspeed_udma_request_chan(ch_no, addr, rb, rb_sz, cb, id,
++ dis_tmout, true);
++}
++EXPORT_SYMBOL(aspeed_udma_request_tx_chan);
++
++int aspeed_udma_request_rx_chan(u32 ch_no, dma_addr_t addr,
++ struct circ_buf *rb, u32 rb_sz,
++ aspeed_udma_cb_t cb, void *id, bool dis_tmout)
++{
++ return aspeed_udma_request_chan(ch_no, addr, rb, rb_sz, cb, id,
++ dis_tmout, false);
++}
++EXPORT_SYMBOL(aspeed_udma_request_rx_chan);
++
++static void aspeed_udma_chan_ctrl(u32 ch_no, u32 op, bool is_tx)
++{
++ unsigned long flags;
++ u32 reg_en, reg_rst;
++ u32 reg_en_off = (is_tx) ? UDMA_TX_DMA_EN : UDMA_RX_DMA_EN;
++ u32 reg_rst_off = (is_tx) ? UDMA_TX_DMA_RST : UDMA_TX_DMA_RST;
++
++ if (ch_no > UDMA_MAX_CHANNEL)
++ return;
++
++ spin_lock_irqsave(&udma->lock, flags);
++
++ reg_en = readl(udma->regs + reg_en_off);
++ reg_rst = readl(udma->regs + reg_rst_off);
++
++ switch (op) {
++ case ASPEED_UDMA_OP_ENABLE:
++ reg_en |= (0x1 << ch_no);
++ writel(reg_en, udma->regs + reg_en_off);
++ break;
++ case ASPEED_UDMA_OP_DISABLE:
++ reg_en &= ~(0x1 << ch_no);
++ writel(reg_en, udma->regs + reg_en_off);
++ break;
++ case ASPEED_UDMA_OP_RESET:
++ reg_en &= ~(0x1 << ch_no);
++ writel(reg_en, udma->regs + reg_en_off);
++
++ reg_rst |= (0x1 << ch_no);
++ writel(reg_rst, udma->regs + reg_rst_off);
++
++ udelay(100);
++
++ reg_rst &= ~(0x1 << ch_no);
++ writel(reg_rst, udma->regs + reg_rst_off);
++ break;
++ default:
++ break;
++ }
++
++ spin_unlock_irqrestore(&udma->lock, flags);
++}
++
++void aspeed_udma_tx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op)
++{
++ aspeed_udma_chan_ctrl(ch_no, op, true);
++}
++EXPORT_SYMBOL(aspeed_udma_tx_chan_ctrl);
++
++void aspeed_udma_rx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op)
++{
++ aspeed_udma_chan_ctrl(ch_no, op, false);
++}
++EXPORT_SYMBOL(aspeed_udma_rx_chan_ctrl);
++
++static irqreturn_t aspeed_udma_isr(int irq, void *arg)
++{
++ u32 bit;
++ unsigned long tx_sts = readl(udma->regs + UDMA_TX_DMA_INT_STS);
++ unsigned long rx_sts = readl(udma->regs + UDMA_RX_DMA_INT_STS);
++
++ if (udma != (struct aspeed_udma *)arg)
++ return IRQ_NONE;
++
++ if (tx_sts == 0 && rx_sts == 0)
++ return IRQ_NONE;
++
++ for_each_set_bit(bit, &tx_sts, UDMA_MAX_CHANNEL) {
++ writel((0x1 << bit), udma->regs + UDMA_TX_DMA_INT_STS);
++ if (udma->tx_chs[bit].cb)
++ udma->tx_chs[bit].cb(aspeed_udma_get_tx_rptr(bit),
++ udma->tx_chs[bit].cb_arg);
++ }
++
++ for_each_set_bit(bit, &rx_sts, UDMA_MAX_CHANNEL) {
++ writel((0x1 << bit), udma->regs + UDMA_RX_DMA_INT_STS);
++ if (udma->rx_chs[bit].cb)
++ udma->rx_chs[bit].cb(aspeed_udma_get_rx_wptr(bit),
++ udma->rx_chs[bit].cb_arg);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int aspeed_udma_probe(struct platform_device *pdev)
++{
++ int i, rc;
++ uint32_t reg;
++ struct resource *res;
++ struct device *dev = &pdev->dev;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (IS_ERR_OR_NULL(res)) {
++ dev_err(dev, "failed to get register base\n");
++ return -ENODEV;
++ }
++
++ udma->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR_OR_NULL(udma->regs)) {
++ dev_err(dev, "failed to map registers\n");
++ return PTR_ERR(udma->regs);
++ }
++
++ /* disable for safety */
++ writel(0x0, udma->regs + UDMA_TX_DMA_EN);
++ writel(0x0, udma->regs + UDMA_RX_DMA_EN);
++
++ udma->irq = platform_get_irq(pdev, 0);
++ if (udma->irq < 0) {
++ dev_err(dev, "failed to get IRQ number\n");
++ return -ENODEV;
++ }
++
++ rc = devm_request_irq(dev, udma->irq, aspeed_udma_isr,
++ IRQF_SHARED, DEVICE_NAME, udma);
++ if (rc) {
++ dev_err(dev, "failed to request IRQ handler\n");
++ return rc;
++ }
++
++ /*
++ * For legacy design.
++ * - TX ringbuffer size: 4KB
++ * - RX ringbuffer size: 64KB
++ * - Timeout timer disabled
++ */
++ reg = FIELD_PREP(UDMA_MISC_TX_BUFSZ, UDMA_BUFSZ_CODE_4KB) |
++ FIELD_PREP(UDMA_MISC_RX_BUFSZ, UDMA_BUFSZ_CODE_64KB);
++ writel(reg, udma->regs + UDMA_MISC);
++
++ for (i = 0; i < UDMA_MAX_CHANNEL; ++i) {
++ writel(0, udma->regs + UDMA_CHX_TX_WR_PTR(i));
++ writel(0, udma->regs + UDMA_CHX_RX_RD_PTR(i));
++ }
++
++ writel(0xffffffff, udma->regs + UDMA_TX_DMA_RST);
++ writel(0x0, udma->regs + UDMA_TX_DMA_RST);
++
++ writel(0xffffffff, udma->regs + UDMA_RX_DMA_RST);
++ writel(0x0, udma->regs + UDMA_RX_DMA_RST);
++
++ writel(0x0, udma->regs + UDMA_TX_DMA_INT_EN);
++ writel(0xffffffff, udma->regs + UDMA_TX_DMA_INT_STS);
++ writel(0x0, udma->regs + UDMA_RX_DMA_INT_EN);
++ writel(0xffffffff, udma->regs + UDMA_RX_DMA_INT_STS);
++
++ writel(UDMA_TMOUT, udma->regs + UDMA_TMOUT_TIMER);
++
++ spin_lock_init(&udma->lock);
++
++ dev_set_drvdata(dev, udma);
++
++ return 0;
++}
++
++static const struct of_device_id aspeed_udma_match[] = {
++ { .compatible = "aspeed,ast2500-udma" },
++ { .compatible = "aspeed,ast2600-udma" },
++ { .compatible = "aspeed,ast2700-udma" },
++ { },
++};
++
++static struct platform_driver aspeed_udma_driver = {
++ .driver = {
++ .name = DEVICE_NAME,
++ .of_match_table = aspeed_udma_match,
++
++ },
++ .probe = aspeed_udma_probe,
++};
++
++module_platform_driver(aspeed_udma_driver);
++
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Aspeed UDMA Engine Driver");
+diff --git a/drivers/soc/aspeed/aspeed-usb-hp.c b/drivers/soc/aspeed/aspeed-usb-hp.c
+new file mode 100644
+index 000000000..f92f0b6ca
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-usb-hp.c
+@@ -0,0 +1,138 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2021 Aspeed Technology Inc.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/regmap.h>
++#include <asm/io.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
++#include <linux/mfd/syscon.h>
++#include <linux/phy/phy.h>
++
++static const struct of_device_id aspeed_usb_hp_dt_ids[] = {
++ {
++ .compatible = "aspeed,ast2600-usb2ahp",
++ },
++ {
++ .compatible = "aspeed,ast2700-usb3ahp",
++ },
++ {
++ .compatible = "aspeed,ast2700-usb3bhp",
++ },
++ {
++ .compatible = "aspeed,ast2700-usb2ahp",
++ },
++ {
++ .compatible = "aspeed,ast2700-usb2bhp",
++ },
++};
++MODULE_DEVICE_TABLE(of, aspeed_usb_hp_dt_ids);
++
++static int aspeed_usb_hp_probe(struct platform_device *pdev)
++{
++ struct clk *clk;
++ struct reset_control *rst;
++ struct regmap *device;
++ struct phy *usb3_phy;
++ bool is_pcie_xhci;
++ int rc = 0;
++
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "ast2600-usb2ahp")) {
++ dev_info(&pdev->dev, "Initialized AST2600 USB2AHP\n");
++ return 0;
++ }
++
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2700-usb3ahp") ||
++ of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2700-usb3bhp")) {
++ is_pcie_xhci = true;
++ } else if (of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2700-usb2ahp") ||
++ of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2700-usb2bhp")) {
++ is_pcie_xhci = false;
++ }
++ clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(clk))
++ return PTR_ERR(clk);
++
++ rc = clk_prepare_enable(clk);
++ if (rc) {
++ dev_err(&pdev->dev, "Unable to enable clock (%d)\n", rc);
++ return rc;
++ }
++
++ rst = devm_reset_control_get_shared(&pdev->dev, NULL);
++ if (IS_ERR(rst)) {
++ rc = PTR_ERR(rst);
++ goto err;
++ }
++ rc = reset_control_deassert(rst);
++ if (rc)
++ goto err;
++
++ device = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "aspeed,device");
++ if (IS_ERR(device)) {
++ dev_err(&pdev->dev, "failed to find device regmap\n");
++ goto err;
++ }
++
++ if (is_pcie_xhci) {
++ usb3_phy = devm_phy_get(&pdev->dev, "usb3-phy");
++ if (IS_ERR(usb3_phy)) {
++ rc = dev_err_probe(&pdev->dev, PTR_ERR(usb3_phy),
++ "failed to get usb3 phy\n");
++ goto err;
++ }
++ rc = phy_init(usb3_phy);
++ if (rc < 0) {
++ dev_err(&pdev->dev, "failed to init usb3 phy\n");
++ goto err;
++ }
++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev
++ /* Turn on PCIe xHCI without MSI */
++ regmap_update_bits(device, 0x70,
++ BIT(19) | BIT(11) | BIT(3),
++ BIT(19) | BIT(11) | BIT(3));
++ } else {
++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev
++ /* Turn on PCIe EHCI without MSI */
++ regmap_update_bits(device, 0x70,
++ BIT(18) | BIT(10) | BIT(2),
++ BIT(18) | BIT(10) | BIT(2));
++ }
++ dev_info(&pdev->dev, "Initialized AST2700 USB Host PCIe\n");
++ return 0;
++err:
++ if (clk)
++ clk_disable_unprepare(clk);
++ return rc;
++}
++
++static int aspeed_usb_hp_remove(struct platform_device *pdev)
++{
++ dev_info(&pdev->dev, "Remove USB Host PCIe\n");
++
++ return 0;
++}
++
++static struct platform_driver aspeed_usb_hp_driver = {
++ .probe = aspeed_usb_hp_probe,
++ .remove = aspeed_usb_hp_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_usb_hp_dt_ids,
++ },
++};
++module_platform_driver(aspeed_usb_hp_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+diff --git a/drivers/soc/aspeed/aspeed-usb-phy.c b/drivers/soc/aspeed/aspeed-usb-phy.c
+new file mode 100644
+index 000000000..7aa7b484b
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-usb-phy.c
+@@ -0,0 +1,113 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2021 Aspeed Technology Inc.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <asm/io.h>
++
++struct usb_phy_ctrl {
++ u32 offset;
++ u32 value;
++};
++
++static const struct of_device_id aspeed_usb_phy_dt_ids[] = {
++ {
++ .compatible = "aspeed,ast2600-uphyb",
++ },
++ {
++ .compatible = "aspeed,ast2700-uphy2a",
++ },
++ {
++ .compatible = "aspeed,ast2700-uphy2b",
++ },
++ { }
++};
++MODULE_DEVICE_TABLE(of, aspeed_usb_phy_dt_ids);
++
++static int aspeed_usb_phy_probe(struct platform_device *pdev)
++{
++ struct device_node *node = pdev->dev.of_node;
++ struct usb_phy_ctrl *ctrl_data;
++ void __iomem *base;
++ struct regmap *scu;
++ int ctrl_num = 1;
++ int ret, i;
++ u32 val;
++
++ scu = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "aspeed,scu");
++ if (IS_ERR(scu)) {
++ dev_err(&pdev->dev, "cannot to find SCU regmap\n");
++ return -ENODEV;
++ }
++
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2600-uphyb")) {
++ /* Check SCU040[3] USB port B controller reset is deassert */
++ regmap_read(scu, 0x40, &val);
++ if ((val & BIT(3)))
++ return -EPROBE_DEFER;
++ }
++
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2700-uphy2a")) {
++ /* Check SCU220[0] USB vHubA1 controller reset is deassert */
++ regmap_read(scu, 0x220, &val);
++ if ((val & BIT(0)))
++ return -EPROBE_DEFER;
++ }
++
++ if (of_device_is_compatible(pdev->dev.of_node,
++ "aspeed,ast2700-uphy2b")) {
++ /* Check SCU220[3] USB vHubB1 controller reset is deassert */
++ regmap_read(scu, 0x220, &val);
++ if ((val & BIT(3)))
++ return -EPROBE_DEFER;
++ }
++
++ ctrl_data = devm_kzalloc(&pdev->dev,
++ sizeof(struct usb_phy_ctrl) * ctrl_num,
++ GFP_KERNEL);
++ if (!ctrl_data)
++ return -ENOMEM;
++
++ base = of_iomap(node, 0);
++
++ ret = of_property_read_u32_array(node, "ctrl", (u32 *)ctrl_data,
++ ctrl_num * 2);
++ if (ret < 0) {
++ dev_err(&pdev->dev, "Could not read ctrl property\n");
++ return -EINVAL;
++ }
++
++ for (i = 0; i < ctrl_num; i++)
++ writel(ctrl_data[i].value, base + ctrl_data[i].offset);
++
++ dev_info(&pdev->dev, "Initialized USB PHY\n");
++
++ return 0;
++}
++
++static int aspeed_usb_phy_remove(struct platform_device *pdev)
++{
++ return 0;
++}
++
++static struct platform_driver aspeed_usb_phy_driver = {
++ .probe = aspeed_usb_phy_probe,
++ .remove = aspeed_usb_phy_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_usb_phy_dt_ids,
++ },
++};
++module_platform_driver(aspeed_usb_phy_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
+diff --git a/drivers/soc/aspeed/aspeed-xdma.c b/drivers/soc/aspeed/aspeed-xdma.c
+new file mode 100644
+index 000000000..cf9d8df27
+--- /dev/null
++++ b/drivers/soc/aspeed/aspeed-xdma.c
+@@ -0,0 +1,1433 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++// Copyright IBM Corp 2019
++
++#include <linux/aspeed-xdma.h>
++#include <linux/bitfield.h>
++#include <linux/clk.h>
++#include <linux/delay.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/fs.h>
++#include <linux/genalloc.h>
++#include <linux/interrupt.h>
++#include <linux/io.h>
++#include <linux/jiffies.h>
++#include <linux/mfd/syscon.h>
++#include <linux/miscdevice.h>
++#include <linux/module.h>
++#include <linux/mutex.h>
++#include <linux/of_device.h>
++#include <linux/of_reserved_mem.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/uaccess.h>
++#include <linux/wait.h>
++#include <linux/workqueue.h>
++
++#define DEVICE_NAME "aspeed-xdma"
++
++#define SCU_AST2600_MISC_CTRL 0x0c0
++#define SCU_AST2600_MISC_CTRL_XDMA_BMC BIT(8)
++#define SCU_AST2700_MISC_CTRL_XDMA_CLIENT BIT(4)
++
++#define SCU_AST2600_DEBUG_CTRL 0x0c8
++#define DEBUG_CTRL_AST2600_XDMA_DISABLE BIT(2)
++#define DEBUG_CTRL_AST2700_XDMA_DISABLE BIT(8)
++
++#define SCU_AST2500_PCIE_CONF 0x180
++#define SCU_AST2600_PCIE_CONF 0xc20
++#define SCU_AST2700_PCIE0_CONF 0x970
++#define SCU_AST2700_PCIE1_CONF 0x9B0
++#define SCU_PCIE_CONF_VGA_EN BIT(0)
++#define SCU_PCIE_CONF_VGA_EN_MMIO BIT(1)
++#define SCU_PCIE_CONF_VGA_EN_LPC BIT(2)
++#define SCU_PCIE_CONF_VGA_EN_MSI BIT(3)
++#define SCU_PCIE_CONF_VGA_EN_MCTP BIT(4)
++#define SCU_PCIE_CONF_VGA_EN_IRQ BIT(5)
++#define SCU_PCIE_CONF_VGA_EN_DMA BIT(6)
++#define SCU_PCIE_CONF_BMC_EN BIT(8)
++#define SCU_PCIE_CONF_BMC_EN_MMIO BIT(9)
++#define SCU_PCIE_CONF_BMC_EN_MSI BIT(11)
++#define SCU_PCIE_CONF_BMC_EN_MCTP BIT(12)
++#define SCU_PCIE_CONF_BMC_EN_IRQ BIT(13)
++#define SCU_PCIE_CONF_BMC_EN_DMA BIT(14)
++
++#define SCU_AST2700_PCIE0_CTRL 0xa60
++#define SCU_AST2700_PCIE1_CTRL 0xae0
++#define SCU_AST2700_PCIE_CTRL_DMA_EN BIT(2)
++
++#define SCU_AST2500_BMC_CLASS_REV 0x19c
++#define SCU_AST2600_BMC_CLASS_REV 0xc68
++#define SCU_AST2700_PCIE0_BMC_CLASS_REV 0xa18
++#define SCU_AST2700_PCIE1_BMC_CLASS_REV 0xa98
++#define SCU_BMC_CLASS_REV_XDMA 0xff000001
++#define SCU_BMC_CLASS_REV_MASK 0xffffff00
++
++#define XDMA_CMDQ_SIZE PAGE_SIZE
++#define XDMA_NUM_CMDS \
++ (XDMA_CMDQ_SIZE / sizeof(struct aspeed_xdma_cmd))
++
++/* Aspeed specification requires 100us after disabling the reset */
++#define XDMA_ENGINE_SETUP_TIME_MAX_US 1000
++#define XDMA_ENGINE_SETUP_TIME_MIN_US 100
++
++#define XDMA_CMD_AST2500_PITCH_SHIFT 3
++#define XDMA_CMD_AST2500_PITCH_BMC GENMASK_ULL(62, 51)
++#define XDMA_CMD_AST2500_PITCH_HOST GENMASK_ULL(46, 35)
++#define XDMA_CMD_AST2500_PITCH_UPSTREAM BIT_ULL(31)
++#define XDMA_CMD_AST2500_PITCH_ADDR GENMASK_ULL(29, 4)
++#define XDMA_CMD_AST2500_PITCH_ID BIT_ULL(0)
++#define XDMA_CMD_AST2500_CMD_IRQ_EN BIT_ULL(31)
++#define XDMA_CMD_AST2500_CMD_LINE_NO GENMASK_ULL(27, 16)
++#define XDMA_CMD_AST2500_CMD_IRQ_BMC BIT_ULL(15)
++#define XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT 4
++#define XDMA_CMD_AST2500_CMD_LINE_SIZE \
++ GENMASK_ULL(14, XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT)
++#define XDMA_CMD_AST2500_CMD_ID BIT_ULL(1)
++
++#define XDMA_CMD_AST2600_PITCH_BMC GENMASK_ULL(62, 48)
++#define XDMA_CMD_AST2600_PITCH_HOST GENMASK_ULL(46, 32)
++#define XDMA_CMD_AST2600_PITCH_ADDR GENMASK_ULL(30, 0)
++#define XDMA_CMD_AST2600_CMD_64_EN BIT_ULL(40)
++#define XDMA_CMD_AST2600_CMD_IRQ_BMC BIT_ULL(37)
++#define XDMA_CMD_AST2600_CMD_IRQ_HOST BIT_ULL(36)
++#define XDMA_CMD_AST2600_CMD_UPSTREAM BIT_ULL(32)
++#define XDMA_CMD_AST2600_CMD_LINE_NO GENMASK_ULL(27, 16)
++#define XDMA_CMD_AST2600_CMD_LINE_SIZE GENMASK_ULL(14, 0)
++#define XDMA_CMD_AST2600_CMD_MULTILINE_SIZE GENMASK_ULL(14, 12)
++
++#define XDMA_CMD_AST2700_PITCH_BMC GENMASK_ULL(62, 48)
++#define XDMA_CMD_AST2700_PITCH_HOST GENMASK_ULL(46, 32)
++#define XDMA_CMD_AST2700_CMD_64_EN BIT_ULL(40)
++#define XDMA_CMD_AST2700_CMD_IRQ_BMC BIT_ULL(37)
++#define XDMA_CMD_AST2700_CMD_UPSTREAM BIT_ULL(32)
++#define XDMA_CMD_AST2700_CMD_LINE_NO GENMASK_ULL(27, 16)
++#define XDMA_CMD_AST2700_CMD_LINE_SIZE GENMASK_ULL(14, 0)
++#define XDMA_CMD_AST2700_CMD_MULTILINE_SIZE GENMASK_ULL(14, 12)
++#define XDMA_CMD_AST2700_BMC_ADDR GENMASK_ULL(34, 0)
++
++#define XDMA_AST2500_QUEUE_ENTRY_SIZE 4
++#define XDMA_AST2500_HOST_CMDQ_ADDR0 0x00
++#define XDMA_AST2500_HOST_CMDQ_ENDP 0x04
++#define XDMA_AST2500_HOST_CMDQ_WRITEP 0x08
++#define XDMA_AST2500_HOST_CMDQ_READP 0x0c
++#define XDMA_AST2500_BMC_CMDQ_ADDR 0x10
++#define XDMA_AST2500_BMC_CMDQ_ENDP 0x14
++#define XDMA_AST2500_BMC_CMDQ_WRITEP 0x18
++#define XDMA_AST2500_BMC_CMDQ_READP 0x1c
++#define XDMA_BMC_CMDQ_READP_RESET 0xee882266
++#define XDMA_AST2500_CTRL 0x20
++#define XDMA_AST2500_CTRL_US_COMP BIT(4)
++#define XDMA_AST2500_CTRL_DS_COMP BIT(5)
++#define XDMA_AST2500_CTRL_DS_DIRTY BIT(6)
++#define XDMA_AST2500_CTRL_DS_SIZE_256 BIT(17)
++#define XDMA_AST2500_CTRL_DS_TIMEOUT BIT(28)
++#define XDMA_AST2500_CTRL_DS_CHECK_ID BIT(29)
++#define XDMA_AST2500_STATUS 0x24
++#define XDMA_AST2500_STATUS_US_COMP BIT(4)
++#define XDMA_AST2500_STATUS_DS_COMP BIT(5)
++#define XDMA_AST2500_STATUS_DS_DIRTY BIT(6)
++#define XDMA_AST2500_INPRG_DS_CMD1 0x38
++#define XDMA_AST2500_INPRG_DS_CMD2 0x3c
++#define XDMA_AST2500_INPRG_US_CMD00 0x40
++#define XDMA_AST2500_INPRG_US_CMD01 0x44
++#define XDMA_AST2500_INPRG_US_CMD10 0x48
++#define XDMA_AST2500_INPRG_US_CMD11 0x4c
++#define XDMA_AST2500_INPRG_US_CMD20 0x50
++#define XDMA_AST2500_INPRG_US_CMD21 0x54
++#define XDMA_AST2500_HOST_CMDQ_ADDR1 0x60
++#define XDMA_AST2500_VGA_CMDQ_ADDR0 0x64
++#define XDMA_AST2500_VGA_CMDQ_ENDP 0x68
++#define XDMA_AST2500_VGA_CMDQ_WRITEP 0x6c
++#define XDMA_AST2500_VGA_CMDQ_READP 0x70
++#define XDMA_AST2500_VGA_CMD_STATUS 0x74
++#define XDMA_AST2500_VGA_CMDQ_ADDR1 0x78
++
++#define XDMA_AST2600_QUEUE_ENTRY_SIZE 2
++#define XDMA_AST2600_HOST_CMDQ_ADDR0 0x00
++#define XDMA_AST2600_HOST_CMDQ_ADDR1 0x04
++#define XDMA_AST2600_HOST_CMDQ_ENDP 0x08
++#define XDMA_AST2600_HOST_CMDQ_WRITEP 0x0c
++#define XDMA_AST2600_HOST_CMDQ_READP 0x10
++#define XDMA_AST2600_BMC_CMDQ_ADDR 0x14
++#define XDMA_AST2600_BMC_CMDQ_ENDP 0x18
++#define XDMA_AST2600_BMC_CMDQ_WRITEP 0x1c
++#define XDMA_AST2600_BMC_CMDQ_READP 0x20
++#define XDMA_AST2600_VGA_CMDQ_ADDR0 0x24
++#define XDMA_AST2600_VGA_CMDQ_ADDR1 0x28
++#define XDMA_AST2600_VGA_CMDQ_ENDP 0x2c
++#define XDMA_AST2600_VGA_CMDQ_WRITEP 0x30
++#define XDMA_AST2600_VGA_CMDQ_READP 0x34
++#define XDMA_AST2600_CTRL 0x38
++#define XDMA_AST2600_CTRL_US_COMP BIT(16)
++#define XDMA_AST2600_CTRL_DS_COMP BIT(17)
++#define XDMA_AST2600_CTRL_DS_DIRTY BIT(18)
++#define XDMA_AST2600_CTRL_DS_SIZE_256 BIT(20)
++#define XDMA_AST2600_STATUS 0x3c
++#define XDMA_AST2600_STATUS_US_COMP BIT(16)
++#define XDMA_AST2600_STATUS_DS_COMP BIT(17)
++#define XDMA_AST2600_STATUS_DS_DIRTY BIT(18)
++#define XDMA_AST2600_INPRG_DS_CMD00 0x40
++#define XDMA_AST2600_INPRG_DS_CMD01 0x44
++#define XDMA_AST2600_INPRG_DS_CMD10 0x48
++#define XDMA_AST2600_INPRG_DS_CMD11 0x4c
++#define XDMA_AST2600_INPRG_DS_CMD20 0x50
++#define XDMA_AST2600_INPRG_DS_CMD21 0x54
++#define XDMA_AST2600_INPRG_US_CMD00 0x60
++#define XDMA_AST2600_INPRG_US_CMD01 0x64
++#define XDMA_AST2600_INPRG_US_CMD10 0x68
++#define XDMA_AST2600_INPRG_US_CMD11 0x6c
++#define XDMA_AST2600_INPRG_US_CMD20 0x70
++#define XDMA_AST2600_INPRG_US_CMD21 0x74
++
++#define XDMA_AST2700_QUEUE_ENTRY_SIZE 2
++#define XDMA_AST2700_BMC_CMDQ_ADDR0 0x10
++#define XDMA_AST2700_BMC_CMDQ_ADDR1 0x14
++#define XDMA_AST2700_BMC_CMDQ_ENDP 0x18
++#define XDMA_AST2700_BMC_CMDQ_WRITEP 0x1c
++#define XDMA_AST2700_BMC_CMDQ_READP 0x20
++#define XDMA_AST2700_CTRL 0x38
++#define XDMA_AST2700_CTRL_US_COMP BIT(16)
++#define XDMA_AST2700_CTRL_DS_COMP BIT(17)
++#define XDMA_AST2700_CTRL_DS_DIRTY BIT(18)
++#define XDMA_AST2700_CTRL_IDLE BIT(19)
++#define XDMA_AST2700_CTRL_DS_SIZE_256 BIT(20)
++#define XDMA_AST2700_STATUS 0x3c
++#define XDMA_AST2700_STATUS_US_COMP BIT(16)
++#define XDMA_AST2700_STATUS_DS_COMP BIT(17)
++#define XDMA_AST2700_STATUS_DS_DIRTY BIT(18)
++#define XDMA_AST2700_STATUS_IDLE BIT(19)
++#define XDMA_AST2700_INPRG_DS_CMD00 0x40
++#define XDMA_AST2700_INPRG_DS_CMD01 0x44
++#define XDMA_AST2700_INPRG_DS_CMD10 0x48
++#define XDMA_AST2700_INPRG_DS_CMD11 0x4c
++#define XDMA_AST2700_INPRG_DS_CMD20 0x50
++#define XDMA_AST2700_INPRG_DS_CMD21 0x54
++#define XDMA_AST2700_INPRG_US_CMD00 0x60
++#define XDMA_AST2700_INPRG_US_CMD01 0x64
++#define XDMA_AST2700_INPRG_US_CMD10 0x68
++#define XDMA_AST2700_INPRG_US_CMD11 0x6c
++#define XDMA_AST2700_INPRG_US_CMD20 0x70
++#define XDMA_AST2700_INPRG_US_CMD21 0x74
++
++struct aspeed_xdma_cmd {
++ u64 host_addr;
++ u64 pitch;
++ u64 cmd;
++ u64 reserved;
++};
++
++struct aspeed_xdma_regs {
++ u8 bmc_cmdq_addr;
++ u8 bmc_cmdq_addr_ext;
++ u8 bmc_cmdq_endp;
++ u8 bmc_cmdq_writep;
++ u8 bmc_cmdq_readp;
++ u8 control;
++ u8 status;
++};
++
++struct aspeed_xdma_status_bits {
++ u32 us_comp;
++ u32 ds_comp;
++ u32 ds_dirty;
++};
++
++struct aspeed_xdma;
++
++struct aspeed_xdma_chip {
++ u32 control;
++ u32 scu_bmc_class;
++ u32 scu_misc_ctrl;
++ u32 scu_pcie_conf;
++ u32 scu_pcie_ctrl;
++ unsigned int queue_entry_size;
++ struct aspeed_xdma_regs regs;
++ struct aspeed_xdma_status_bits status_bits;
++ unsigned int (*set_cmd)(struct aspeed_xdma *ctx,
++ struct aspeed_xdma_cmd cmds[2],
++ struct aspeed_xdma_op *op, u64 bmc_addr);
++};
++
++struct aspeed_xdma_client;
++
++struct aspeed_xdma {
++ struct kobject kobj;
++ const struct aspeed_xdma_chip *chip;
++
++ int irq;
++ int pcie_irq;
++ struct clk *clock;
++ struct device *dev;
++ void __iomem *base;
++ resource_size_t res_size;
++ resource_size_t res_start;
++ struct reset_control *reset;
++ struct reset_control *reset_rc;
++
++ /* Protects current_client */
++ spinlock_t client_lock;
++ struct aspeed_xdma_client *current_client;
++
++ /* Protects engine configuration */
++ spinlock_t engine_lock;
++ struct aspeed_xdma_cmd *cmdq;
++ unsigned int cmd_idx;
++ bool in_reset;
++ bool upstream;
++
++ /* Queue waiters for idle engine */
++ wait_queue_head_t wait;
++
++ struct work_struct reset_work;
++
++ phys_addr_t mem_phys;
++ phys_addr_t mem_size;
++ void *mem_virt;
++ dma_addr_t mem_coherent;
++ dma_addr_t cmdq_phys;
++ struct gen_pool *pool;
++
++ struct miscdevice misc;
++};
++
++struct aspeed_xdma_client {
++ struct aspeed_xdma *ctx;
++
++ bool error;
++ bool in_progress;
++ void *virt;
++ dma_addr_t phys;
++ u32 size;
++};
++
++#define CREATE_TRACE_POINTS
++#include <trace/events/xdma.h>
++
++static u32 aspeed_xdma_readl(struct aspeed_xdma *ctx, u8 reg)
++{
++ u32 v = readl(ctx->base + reg);
++
++ dev_dbg(ctx->dev, "read %02x[%08x]\n", reg, v);
++ return v;
++}
++
++static void aspeed_xdma_writel(struct aspeed_xdma *ctx, u8 reg, u32 val)
++{
++ writel(val, ctx->base + reg);
++ dev_dbg(ctx->dev, "write %02x[%08x]\n", reg, val);
++}
++
++static void aspeed_xdma_init_eng(struct aspeed_xdma *ctx)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctx->engine_lock, flags);
++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_endp,
++ ctx->chip->queue_entry_size * XDMA_NUM_CMDS);
++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_readp,
++ XDMA_BMC_CMDQ_READP_RESET);
++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep, 0);
++ aspeed_xdma_writel(ctx, ctx->chip->regs.control, ctx->chip->control);
++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_addr, ctx->cmdq_phys);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ if (ctx->chip->regs.bmc_cmdq_addr_ext)
++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_addr_ext, ctx->cmdq_phys >> 32);
++#endif
++
++ ctx->cmd_idx = 0;
++ spin_unlock_irqrestore(&ctx->engine_lock, flags);
++}
++
++static unsigned int aspeed_xdma_ast2500_set_cmd(struct aspeed_xdma *ctx,
++ struct aspeed_xdma_cmd cmds[2],
++ struct aspeed_xdma_op *op,
++ u64 bmc_addr)
++{
++ unsigned int rc = 1;
++ unsigned int pitch = 1;
++ unsigned int line_no = 1;
++ unsigned int line_size = op->len >>
++ XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
++ u64 cmd = XDMA_CMD_AST2500_CMD_IRQ_EN | XDMA_CMD_AST2500_CMD_IRQ_BMC |
++ XDMA_CMD_AST2500_CMD_ID;
++ u64 cmd_pitch = (op->direction ? XDMA_CMD_AST2500_PITCH_UPSTREAM : 0) |
++ XDMA_CMD_AST2500_PITCH_ID;
++
++ dev_dbg(ctx->dev, "xdma %s ast2500: bmc[%08llx] len[%08x] host[%08x]\n",
++ op->direction ? "upstream" : "downstream", bmc_addr, op->len,
++ (u32)op->host_addr);
++
++ if (op->len > XDMA_CMD_AST2500_CMD_LINE_SIZE) {
++ unsigned int rem;
++ unsigned int total;
++
++ line_no = op->len / XDMA_CMD_AST2500_CMD_LINE_SIZE;
++ total = XDMA_CMD_AST2500_CMD_LINE_SIZE * line_no;
++ rem = (op->len - total) >>
++ XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
++ line_size = XDMA_CMD_AST2500_CMD_LINE_SIZE;
++ pitch = line_size >> XDMA_CMD_AST2500_PITCH_SHIFT;
++ line_size >>= XDMA_CMD_AST2500_CMD_LINE_SIZE_SHIFT;
++
++ if (rem) {
++ u32 rbmc = bmc_addr + total;
++
++ cmds[1].host_addr = op->host_addr + (u64)total;
++ cmds[1].pitch = cmd_pitch |
++ ((u64)rbmc & XDMA_CMD_AST2500_PITCH_ADDR) |
++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, 1) |
++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, 1);
++ cmds[1].cmd = cmd |
++ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, 1) |
++ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE,
++ rem);
++ cmds[1].reserved = 0ULL;
++
++ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
++ 16, 1, &cmds[1], sizeof(*cmds),
++ true);
++
++ cmd &= ~(XDMA_CMD_AST2500_CMD_IRQ_EN |
++ XDMA_CMD_AST2500_CMD_IRQ_BMC);
++
++ rc++;
++ }
++ }
++
++ cmds[0].host_addr = op->host_addr;
++ cmds[0].pitch = cmd_pitch |
++ (bmc_addr & XDMA_CMD_AST2500_PITCH_ADDR) |
++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_HOST, pitch) |
++ FIELD_PREP(XDMA_CMD_AST2500_PITCH_BMC, pitch);
++ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_NO, line_no) |
++ FIELD_PREP(XDMA_CMD_AST2500_CMD_LINE_SIZE, line_size);
++ cmds[0].reserved = 0ULL;
++
++ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
++ sizeof(*cmds), true);
++
++ return rc;
++}
++
++static unsigned int aspeed_xdma_ast2600_set_cmd(struct aspeed_xdma *ctx,
++ struct aspeed_xdma_cmd cmds[2],
++ struct aspeed_xdma_op *op,
++ u64 bmc_addr)
++{
++ unsigned int rc = 1;
++ unsigned int pitch = 1;
++ unsigned int line_no = 1;
++ unsigned int line_size = op->len;
++ u64 cmd = XDMA_CMD_AST2600_CMD_IRQ_BMC |
++ (op->direction ? XDMA_CMD_AST2600_CMD_UPSTREAM : 0);
++
++ if (op->host_addr & 0xffffffff00000000ULL ||
++ (op->host_addr + (u64)op->len) & 0xffffffff00000000ULL)
++ cmd |= XDMA_CMD_AST2600_CMD_64_EN;
++
++ dev_dbg(ctx->dev, "xdma %s ast2600: bmc[%08llx] len[%08x] "
++ "host[%016llx]\n", op->direction ? "upstream" : "downstream",
++ bmc_addr, op->len, op->host_addr);
++
++ if ((op->host_addr & 0xff) + op->len > XDMA_CMD_AST2600_CMD_LINE_SIZE) {
++ unsigned int rem;
++ unsigned int total;
++
++ line_no = op->len / XDMA_CMD_AST2600_CMD_MULTILINE_SIZE;
++ total = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE * line_no;
++ rem = op->len - total;
++ line_size = XDMA_CMD_AST2600_CMD_MULTILINE_SIZE;
++ pitch = line_size;
++
++ if (rem) {
++ u32 rbmc = bmc_addr + total;
++
++ cmds[1].host_addr = op->host_addr + (u64)total;
++ cmds[1].pitch =
++ ((u64)rbmc & XDMA_CMD_AST2600_PITCH_ADDR) |
++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, 1) |
++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, 1);
++ cmds[1].cmd = cmd |
++ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, 1) |
++ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE,
++ rem);
++ cmds[1].reserved = 0ULL;
++
++ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
++ 16, 1, &cmds[1], sizeof(*cmds),
++ true);
++
++ cmd &= ~XDMA_CMD_AST2600_CMD_IRQ_BMC;
++
++ rc++;
++ }
++ }
++
++ cmds[0].host_addr = op->host_addr;
++ cmds[0].pitch = (bmc_addr & XDMA_CMD_AST2600_PITCH_ADDR) |
++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_HOST, pitch) |
++ FIELD_PREP(XDMA_CMD_AST2600_PITCH_BMC, pitch);
++ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_NO, line_no) |
++ FIELD_PREP(XDMA_CMD_AST2600_CMD_LINE_SIZE, line_size);
++ cmds[0].reserved = 0ULL;
++
++ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
++ sizeof(*cmds), true);
++
++ return rc;
++}
++
++static unsigned int aspeed_xdma_ast2700_set_cmd(struct aspeed_xdma *ctx,
++ struct aspeed_xdma_cmd cmds[2],
++ struct aspeed_xdma_op *op,
++ u64 bmc_addr)
++{
++ unsigned int rc = 1;
++ unsigned int pitch = 1;
++ unsigned int line_no = 1;
++ unsigned int line_size = op->len;
++ u64 cmd = XDMA_CMD_AST2700_CMD_IRQ_BMC |
++ (op->direction ? XDMA_CMD_AST2700_CMD_UPSTREAM : 0);
++
++ if (op->host_addr & 0xffffffff00000000ULL ||
++ (op->host_addr + (u64)op->len) & 0xffffffff00000000ULL)
++ cmd |= XDMA_CMD_AST2700_CMD_64_EN;
++
++ dev_dbg(ctx->dev, "xdma %s ast2700: bmc[%08llx] len[%08x] host[%016llx]\n",
++ op->direction ? "upstream" : "downstream",
++ bmc_addr, op->len, op->host_addr);
++
++ if (op->len > XDMA_CMD_AST2700_CMD_LINE_SIZE) {
++ unsigned int rem;
++ unsigned int total;
++
++ line_no = op->len / XDMA_CMD_AST2700_CMD_MULTILINE_SIZE;
++ total = XDMA_CMD_AST2700_CMD_MULTILINE_SIZE * line_no;
++ rem = op->len - total;
++ line_size = XDMA_CMD_AST2700_CMD_MULTILINE_SIZE;
++ pitch = line_size;
++
++ if (rem) {
++ // TODO: why +total?
++ u64 rbmc = bmc_addr + total;
++
++ cmds[1].host_addr = op->host_addr + (u64)total;
++ cmds[1].pitch =
++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_HOST, 1) |
++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_BMC, 1);
++ cmds[1].cmd = cmd |
++ FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_NO, 1) |
++ FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_SIZE,
++ rem);
++ cmds[1].reserved = rbmc & XDMA_CMD_AST2700_BMC_ADDR;
++
++ print_hex_dump_debug("xdma rem ", DUMP_PREFIX_OFFSET,
++ 16, 1, &cmds[1], sizeof(*cmds),
++ true);
++
++ cmd &= ~XDMA_CMD_AST2700_CMD_IRQ_BMC;
++
++ rc++;
++ }
++ }
++ cmds[0].host_addr = op->host_addr;
++ cmds[0].pitch =
++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_HOST, pitch) |
++ FIELD_PREP(XDMA_CMD_AST2700_PITCH_BMC, pitch);
++ cmds[0].cmd = cmd | FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_NO, line_no) |
++ FIELD_PREP(XDMA_CMD_AST2700_CMD_LINE_SIZE, line_size);
++ cmds[0].reserved = bmc_addr & XDMA_CMD_AST2700_BMC_ADDR;
++
++ print_hex_dump_debug("xdma cmd ", DUMP_PREFIX_OFFSET, 16, 1, cmds,
++ sizeof(*cmds), true);
++
++ return rc;
++}
++
++static int aspeed_xdma_start(struct aspeed_xdma *ctx, unsigned int num_cmds,
++ struct aspeed_xdma_cmd cmds[2], bool upstream,
++ struct aspeed_xdma_client *client)
++{
++ unsigned int i;
++ int rc = -EBUSY;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctx->engine_lock, flags);
++ if (ctx->in_reset)
++ goto unlock;
++
++ spin_lock(&ctx->client_lock);
++ if (ctx->current_client) {
++ spin_unlock(&ctx->client_lock);
++ goto unlock;
++ }
++
++ client->error = false;
++ client->in_progress = true;
++ ctx->current_client = client;
++ spin_unlock(&ctx->client_lock);
++
++ ctx->upstream = upstream;
++ for (i = 0; i < num_cmds; ++i) {
++ trace_xdma_start(ctx, &cmds[i]);
++ /*
++ * Use memcpy_toio here to get some barriers before starting
++ * the operation. The command(s) need to be in physical memory
++ * before the XDMA engine starts.
++ */
++ memcpy_toio(&ctx->cmdq[ctx->cmd_idx], &cmds[i],
++ sizeof(struct aspeed_xdma_cmd));
++ ctx->cmd_idx = (ctx->cmd_idx + 1) % XDMA_NUM_CMDS;
++ }
++
++ aspeed_xdma_writel(ctx, ctx->chip->regs.bmc_cmdq_writep,
++ ctx->cmd_idx * ctx->chip->queue_entry_size);
++ rc = 0;
++
++unlock:
++ spin_unlock_irqrestore(&ctx->engine_lock, flags);
++ return rc;
++}
++
++static void aspeed_xdma_done(struct aspeed_xdma *ctx, bool error)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ctx->client_lock, flags);
++ if (ctx->current_client) {
++ ctx->current_client->error = error;
++ ctx->current_client->in_progress = false;
++ ctx->current_client = NULL;
++ }
++ spin_unlock_irqrestore(&ctx->client_lock, flags);
++
++ wake_up_interruptible_all(&ctx->wait);
++}
++
++static irqreturn_t aspeed_xdma_irq(int irq, void *arg)
++{
++ struct aspeed_xdma *ctx = arg;
++ u32 status;
++
++ spin_lock(&ctx->engine_lock);
++ status = aspeed_xdma_readl(ctx, ctx->chip->regs.status);
++
++ trace_xdma_irq(status);
++
++ if (status & ctx->chip->status_bits.ds_dirty) {
++ aspeed_xdma_done(ctx, true);
++ } else {
++ if (status & ctx->chip->status_bits.us_comp) {
++ if (ctx->upstream)
++ aspeed_xdma_done(ctx, false);
++ }
++
++ if (status & ctx->chip->status_bits.ds_comp) {
++ if (!ctx->upstream)
++ aspeed_xdma_done(ctx, false);
++ }
++ }
++
++ aspeed_xdma_writel(ctx, ctx->chip->regs.status, status);
++ spin_unlock(&ctx->engine_lock);
++
++ return IRQ_HANDLED;
++}
++
++static void aspeed_xdma_reset(struct aspeed_xdma *ctx)
++{
++ unsigned long flags;
++
++ trace_xdma_reset(ctx);
++
++ reset_control_assert(ctx->reset);
++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
++ XDMA_ENGINE_SETUP_TIME_MAX_US);
++ reset_control_deassert(ctx->reset);
++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
++ XDMA_ENGINE_SETUP_TIME_MAX_US);
++
++ aspeed_xdma_init_eng(ctx);
++
++ aspeed_xdma_done(ctx, true);
++
++ spin_lock_irqsave(&ctx->engine_lock, flags);
++ ctx->in_reset = false;
++ spin_unlock_irqrestore(&ctx->engine_lock, flags);
++
++ wake_up_interruptible(&ctx->wait);
++}
++
++static void aspeed_xdma_reset_work(struct work_struct *work)
++{
++ struct aspeed_xdma *ctx = container_of(work, struct aspeed_xdma,
++ reset_work);
++
++ aspeed_xdma_reset(ctx);
++}
++
++static irqreturn_t aspeed_xdma_pcie_irq(int irq, void *arg)
++{
++ struct aspeed_xdma *ctx = arg;
++
++ trace_xdma_perst(ctx);
++
++ spin_lock(&ctx->engine_lock);
++ if (ctx->in_reset) {
++ spin_unlock(&ctx->engine_lock);
++ return IRQ_HANDLED;
++ }
++
++ ctx->in_reset = true;
++ spin_unlock(&ctx->engine_lock);
++
++ schedule_work(&ctx->reset_work);
++ return IRQ_HANDLED;
++}
++
++static ssize_t aspeed_xdma_write(struct file *file, const char __user *buf,
++ size_t len, loff_t *offset)
++{
++ int rc;
++ unsigned int num_cmds;
++ struct aspeed_xdma_op op;
++ struct aspeed_xdma_cmd cmds[2];
++ struct aspeed_xdma_client *client = file->private_data;
++ struct aspeed_xdma *ctx = client->ctx;
++
++ if (len != sizeof(op))
++ return -EINVAL;
++
++ if (copy_from_user(&op, buf, len))
++ return -EFAULT;
++
++ if (!op.len || op.len > client->size ||
++ op.direction > ASPEED_XDMA_DIRECTION_UPSTREAM)
++ return -EINVAL;
++
++ num_cmds = ctx->chip->set_cmd(ctx, cmds, &op, client->phys);
++ do {
++ rc = aspeed_xdma_start(ctx, num_cmds, cmds, !!op.direction,
++ client);
++ if (!rc)
++ break;
++
++ if ((file->f_flags & O_NONBLOCK) || rc != -EBUSY)
++ return rc;
++
++ rc = wait_event_interruptible(ctx->wait,
++ !(ctx->current_client ||
++ ctx->in_reset));
++ } while (!rc);
++
++ if (rc)
++ return -EINTR;
++
++ if (!(file->f_flags & O_NONBLOCK)) {
++ rc = wait_event_interruptible(ctx->wait, !client->in_progress);
++ if (rc)
++ return -EINTR;
++
++ if (client->error)
++ return -EIO;
++ }
++
++ return len;
++}
++
++static __poll_t aspeed_xdma_poll(struct file *file,
++ struct poll_table_struct *wait)
++{
++ __poll_t mask = 0;
++ __poll_t req = poll_requested_events(wait);
++ struct aspeed_xdma_client *client = file->private_data;
++ struct aspeed_xdma *ctx = client->ctx;
++
++ if (req & (EPOLLIN | EPOLLRDNORM)) {
++ if (READ_ONCE(client->in_progress))
++ poll_wait(file, &ctx->wait, wait);
++
++ if (!READ_ONCE(client->in_progress)) {
++ if (READ_ONCE(client->error))
++ mask |= EPOLLERR;
++ else
++ mask |= EPOLLIN | EPOLLRDNORM;
++ }
++ }
++
++ if (req & (EPOLLOUT | EPOLLWRNORM)) {
++ if (READ_ONCE(ctx->current_client))
++ poll_wait(file, &ctx->wait, wait);
++
++ if (!READ_ONCE(ctx->current_client))
++ mask |= EPOLLOUT | EPOLLWRNORM;
++ }
++
++ return mask;
++}
++
++static long aspeed_xdma_ioctl(struct file *file, unsigned int cmd,
++ unsigned long param)
++{
++ unsigned long flags;
++ struct aspeed_xdma_client *client = file->private_data;
++ struct aspeed_xdma *ctx = client->ctx;
++
++ switch (cmd) {
++ case ASPEED_XDMA_IOCTL_RESET:
++ spin_lock_irqsave(&ctx->engine_lock, flags);
++ if (ctx->in_reset) {
++ spin_unlock_irqrestore(&ctx->engine_lock, flags);
++ return 0;
++ }
++
++ ctx->in_reset = true;
++ spin_unlock_irqrestore(&ctx->engine_lock, flags);
++
++ if (READ_ONCE(ctx->current_client))
++ dev_warn(ctx->dev,
++ "User reset with transfer in progress.\n");
++
++ aspeed_xdma_reset(ctx);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static void aspeed_xdma_vma_close(struct vm_area_struct *vma)
++{
++ int rc;
++ struct aspeed_xdma_client *client = vma->vm_private_data;
++
++ rc = wait_event_interruptible(client->ctx->wait, !client->in_progress);
++ if (rc)
++ return;
++
++ gen_pool_free(client->ctx->pool, (unsigned long)client->virt,
++ client->size);
++ trace_xdma_unmap(client);
++
++ client->virt = NULL;
++ client->phys = 0;
++ client->size = 0;
++}
++
++static const struct vm_operations_struct aspeed_xdma_vm_ops = {
++ .close = aspeed_xdma_vma_close,
++};
++
++static int aspeed_xdma_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ int rc;
++ struct aspeed_xdma_client *client = file->private_data;
++ struct aspeed_xdma *ctx = client->ctx;
++
++ /* restrict file to one mapping */
++ if (client->size)
++ return -EBUSY;
++
++ client->size = vma->vm_end - vma->vm_start;
++ client->virt = gen_pool_dma_alloc(ctx->pool, client->size,
++ &client->phys);
++ if (!client->virt) {
++ trace_xdma_mmap_error(client, 0UL);
++ client->phys = 0;
++ client->size = 0;
++ return -ENOMEM;
++ }
++
++ vma->vm_pgoff = (client->phys - ctx->mem_phys) >> PAGE_SHIFT;
++ vma->vm_ops = &aspeed_xdma_vm_ops;
++ vma->vm_private_data = client;
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ rc = io_remap_pfn_range(vma, vma->vm_start, client->phys >> PAGE_SHIFT,
++ client->size, vma->vm_page_prot);
++ if (rc) {
++ dev_warn(ctx->dev, "mmap err: v[%08lx] to p[%pa], s[%08x]\n",
++ vma->vm_start, &client->phys, client->size);
++
++ gen_pool_free(ctx->pool, (unsigned long)client->virt,
++ client->size);
++
++ trace_xdma_mmap_error(client, vma->vm_start);
++ client->virt = NULL;
++ client->phys = 0;
++ client->size = 0;
++ return rc;
++ }
++
++ trace_xdma_mmap(client);
++ dev_dbg(ctx->dev, "mmap: v[%08lx] to p[%pa], s[%08x]\n",
++ vma->vm_start, &client->phys, client->size);
++
++ return 0;
++}
++
++static int aspeed_xdma_open(struct inode *inode, struct file *file)
++{
++ struct miscdevice *misc = file->private_data;
++ struct aspeed_xdma *ctx = container_of(misc, struct aspeed_xdma, misc);
++ struct aspeed_xdma_client *client = kzalloc(sizeof(*client),
++ GFP_KERNEL);
++
++ if (!client)
++ return -ENOMEM;
++
++ kobject_get(&ctx->kobj);
++ client->ctx = ctx;
++ file->private_data = client;
++ return 0;
++}
++
++static int aspeed_xdma_release(struct inode *inode, struct file *file)
++{
++ bool reset = false;
++ unsigned long flags;
++ struct aspeed_xdma_client *client = file->private_data;
++ struct aspeed_xdma *ctx = client->ctx;
++
++ spin_lock_irqsave(&ctx->client_lock, flags);
++ if (client == ctx->current_client) {
++ spin_lock(&ctx->engine_lock);
++ if (ctx->in_reset) {
++ ctx->current_client = NULL;
++ } else {
++ ctx->in_reset = true;
++ reset = true;
++ }
++ spin_unlock(&ctx->engine_lock);
++ }
++ spin_unlock_irqrestore(&ctx->client_lock, flags);
++
++ if (reset)
++ aspeed_xdma_reset(ctx);
++
++ if (client->virt) {
++ gen_pool_free(ctx->pool, (unsigned long)client->virt,
++ client->size);
++ trace_xdma_unmap(client);
++ }
++
++ kfree(client);
++ kobject_put(&ctx->kobj);
++ return 0;
++}
++
++static const struct file_operations aspeed_xdma_fops = {
++ .owner = THIS_MODULE,
++ .write = aspeed_xdma_write,
++ .poll = aspeed_xdma_poll,
++ .unlocked_ioctl = aspeed_xdma_ioctl,
++ .mmap = aspeed_xdma_mmap,
++ .open = aspeed_xdma_open,
++ .release = aspeed_xdma_release,
++};
++
++static int aspeed_xdma_init_scu(struct aspeed_xdma *ctx, struct device *dev)
++{
++ struct regmap *scu = syscon_regmap_lookup_by_phandle(dev->of_node,
++ "aspeed,scu");
++
++ if (!IS_ERR(scu)) {
++ u32 selection;
++ bool pcie_device_bmc = true;
++ const u32 bmc = SCU_PCIE_CONF_BMC_EN |
++ SCU_PCIE_CONF_BMC_EN_MSI | SCU_PCIE_CONF_BMC_EN_IRQ |
++ SCU_PCIE_CONF_BMC_EN_DMA;
++ const u32 vga = SCU_PCIE_CONF_VGA_EN |
++ SCU_PCIE_CONF_VGA_EN_MSI | SCU_PCIE_CONF_VGA_EN_IRQ |
++ SCU_PCIE_CONF_VGA_EN_DMA;
++ const char *pcie = NULL;
++
++ if (!of_property_read_string(dev->of_node,
++ "aspeed,pcie-device", &pcie)) {
++ if (!strcmp(pcie, "vga")) {
++ pcie_device_bmc = false;
++ } else if (strcmp(pcie, "bmc")) {
++ dev_err(dev,
++ "Invalid pcie-device property %s.\n",
++ pcie);
++ return -EINVAL;
++ }
++ }
++
++ if (pcie_device_bmc) {
++ selection = bmc;
++ regmap_update_bits(scu, ctx->chip->scu_bmc_class,
++ SCU_BMC_CLASS_REV_MASK,
++ SCU_BMC_CLASS_REV_XDMA);
++ } else {
++ selection = vga;
++ }
++
++ regmap_update_bits(scu, ctx->chip->scu_pcie_conf, bmc | vga,
++ selection);
++
++ if (ctx->chip->scu_misc_ctrl) {
++ u32 mask = (ctx->chip->regs.bmc_cmdq_addr_ext)
++ ? SCU_AST2700_MISC_CTRL_XDMA_CLIENT
++ : SCU_AST2600_MISC_CTRL_XDMA_BMC;
++ u32 disable = (ctx->chip->regs.bmc_cmdq_addr_ext)
++ ? DEBUG_CTRL_AST2700_XDMA_DISABLE
++ : DEBUG_CTRL_AST2600_XDMA_DISABLE;
++
++ regmap_update_bits(scu, ctx->chip->scu_misc_ctrl,
++ mask, mask);
++
++ regmap_update_bits(scu, SCU_AST2600_DEBUG_CTRL,
++ disable, 0);
++ }
++
++ if (ctx->chip->scu_pcie_ctrl) {
++ regmap_update_bits(scu, ctx->chip->scu_pcie_ctrl,
++ SCU_AST2700_PCIE_CTRL_DMA_EN,
++ SCU_AST2700_PCIE_CTRL_DMA_EN);
++ }
++ } else {
++ dev_warn(dev, "Unable to configure PCIe: %ld; continuing.\n",
++ PTR_ERR(scu));
++ }
++
++ return 0;
++}
++
++static void aspeed_xdma_kobject_release(struct kobject *kobj)
++{
++ struct aspeed_xdma *ctx = container_of(kobj, struct aspeed_xdma, kobj);
++
++ if (ctx->pcie_irq >= 0)
++ free_irq(ctx->pcie_irq, ctx);
++
++ gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE);
++
++ gen_pool_destroy(ctx->pool);
++
++ dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt,
++ ctx->mem_coherent);
++
++ if (ctx->reset_rc)
++ reset_control_put(ctx->reset_rc);
++ reset_control_put(ctx->reset);
++
++ clk_put(ctx->clock);
++
++ free_irq(ctx->irq, ctx);
++
++ iounmap(ctx->base);
++ release_mem_region(ctx->res_start, ctx->res_size);
++
++ kfree(ctx);
++}
++
++static struct kobj_type aspeed_xdma_kobject_type = {
++ .release = aspeed_xdma_kobject_release,
++};
++
++static int aspeed_xdma_iomap(struct aspeed_xdma *ctx,
++ struct platform_device *pdev)
++{
++ resource_size_t size;
++ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++
++ if (!res)
++ return -ENOMEM;
++
++ size = resource_size(res);
++ if (!request_mem_region(res->start, size, dev_name(ctx->dev)))
++ return -ENOMEM;
++
++ ctx->base = ioremap(res->start, size);
++ if (!ctx->base) {
++ release_mem_region(res->start, size);
++ return -ENOMEM;
++ }
++
++ ctx->res_start = res->start;
++ ctx->res_size = size;
++
++ return 0;
++}
++
++static int aspeed_xdma_probe(struct platform_device *pdev)
++{
++ int rc, id;
++ struct aspeed_xdma *ctx;
++ struct reserved_mem *mem;
++ struct device *dev = &pdev->dev;
++ struct device_node *memory_region;
++ const void *md = of_device_get_match_data(dev);
++ bool rc_f;
++
++ if (!md)
++ return -ENODEV;
++
++ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++
++ ctx->chip = md;
++ ctx->dev = dev;
++ platform_set_drvdata(pdev, ctx);
++ spin_lock_init(&ctx->client_lock);
++ spin_lock_init(&ctx->engine_lock);
++ INIT_WORK(&ctx->reset_work, aspeed_xdma_reset_work);
++ init_waitqueue_head(&ctx->wait);
++
++ rc_f = of_find_property(dev->of_node, "pcie_rc", NULL) ? 1 : 0;
++
++ rc = aspeed_xdma_iomap(ctx, pdev);
++ if (rc) {
++ dev_err(dev, "Failed to map registers.\n");
++ goto err_nomap;
++ }
++
++ ctx->irq = platform_get_irq(pdev, 0);
++ if (ctx->irq < 0) {
++ dev_err(dev, "Failed to find IRQ.\n");
++ rc = ctx->irq;
++ goto err_noirq;
++ }
++
++ rc = request_irq(ctx->irq, aspeed_xdma_irq, 0, dev_name(dev), ctx);
++ if (rc < 0) {
++ dev_err(dev, "Failed to request IRQ %d.\n", ctx->irq);
++ goto err_noirq;
++ }
++
++ ctx->clock = clk_get(dev, NULL);
++ if (IS_ERR(ctx->clock)) {
++ dev_err(dev, "Failed to request clock.\n");
++ rc = PTR_ERR(ctx->clock);
++ goto err_noclk;
++ }
++
++ ctx->reset = reset_control_get_exclusive(dev, NULL);
++ if (IS_ERR(ctx->reset)) {
++ dev_err(dev, "Failed to request reset control.\n");
++ rc = PTR_ERR(ctx->reset);
++ goto err_noreset;
++ }
++
++ if (rc_f) {
++ ctx->reset_rc = reset_control_get_exclusive(dev, "root-complex");
++ if (IS_ERR(ctx->reset_rc)) {
++ dev_dbg(dev, "Failed to request reset RC control.\n");
++ ctx->reset_rc = NULL;
++ }
++ }
++
++ memory_region = of_parse_phandle(dev->of_node, "memory-region", 0);
++ if (!memory_region) {
++ dev_err(dev, "Failed to find memory-region.\n");
++ rc = -ENOMEM;
++ goto err_nomem;
++ }
++
++ mem = of_reserved_mem_lookup(memory_region);
++ of_node_put(memory_region);
++ if (!mem) {
++ dev_err(dev, "Failed to find reserved memory.\n");
++ rc = -ENOMEM;
++ goto err_nomem;
++ }
++
++ ctx->mem_phys = mem->base;
++ ctx->mem_size = mem->size;
++
++ rc = of_reserved_mem_device_init(dev);
++ if (rc) {
++ dev_err(dev, "Failed to init reserved memory.\n");
++ goto err_nomem;
++ }
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
++ if (rc) {
++ dev_err(dev, "Failed to mask DMA.\n");
++ goto err_nomem;
++ }
++
++ ctx->mem_virt = dma_alloc_coherent(dev, ctx->mem_size,
++ &ctx->mem_coherent, 0);
++ if (!ctx->mem_virt) {
++ dev_err(dev, "Failed to allocate reserved memory.\n");
++ rc = -ENOMEM;
++ goto err_nomem;
++ }
++
++ ctx->pool = gen_pool_create(ilog2(PAGE_SIZE), -1);
++ if (!ctx->pool) {
++ dev_err(dev, "Failed to setup genalloc pool.\n");
++ rc = -ENOMEM;
++ goto err_nopool;
++ }
++
++ rc = gen_pool_add_virt(ctx->pool, (unsigned long)ctx->mem_virt,
++ ctx->mem_phys, ctx->mem_size, -1);
++ if (rc) {
++ dev_err(ctx->dev, "Failed to add memory to genalloc pool.\n");
++ goto err_pool_scu_clk;
++ }
++
++ rc = aspeed_xdma_init_scu(ctx, dev);
++ if (rc)
++ goto err_pool_scu_clk;
++
++ rc = clk_prepare_enable(ctx->clock);
++ if (rc) {
++ dev_err(dev, "Failed to enable the clock.\n");
++ goto err_pool_scu_clk;
++ }
++
++ if (ctx->reset_rc) {
++ rc = reset_control_deassert(ctx->reset_rc);
++ if (rc) {
++ dev_err(dev, "Failed to clear the RC reset.\n");
++ goto err_reset_rc;
++ }
++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
++ XDMA_ENGINE_SETUP_TIME_MAX_US);
++ }
++
++ rc = reset_control_deassert(ctx->reset);
++ if (rc) {
++ dev_err(dev, "Failed to clear the reset.\n");
++ goto err_reset;
++ }
++ usleep_range(XDMA_ENGINE_SETUP_TIME_MIN_US,
++ XDMA_ENGINE_SETUP_TIME_MAX_US);
++
++ ctx->cmdq = gen_pool_dma_alloc(ctx->pool, XDMA_CMDQ_SIZE,
++ &ctx->cmdq_phys);
++ if (!ctx->cmdq) {
++ dev_err(ctx->dev, "Failed to genalloc cmdq.\n");
++ rc = -ENOMEM;
++ goto err_pool;
++ }
++
++ aspeed_xdma_init_eng(ctx);
++
++ id = of_alias_get_id(dev->of_node, "xdma");
++ if (id < 0)
++ id = 0;
++
++ ctx->misc.minor = MISC_DYNAMIC_MINOR;
++ ctx->misc.fops = &aspeed_xdma_fops;
++ ctx->misc.name = kasprintf(GFP_KERNEL, "%s%d", DEVICE_NAME, id);
++ ctx->misc.parent = dev;
++ rc = misc_register(&ctx->misc);
++ if (rc) {
++ dev_err(dev, "Failed to register xdma miscdevice.\n");
++ goto err_misc;
++ }
++
++ /*
++ * This interrupt could fire immediately so only request it once the
++ * engine and driver are initialized.
++ */
++ ctx->pcie_irq = platform_get_irq(pdev, 1);
++ if (ctx->pcie_irq < 0) {
++ dev_warn(dev, "Failed to find PCI-E IRQ.\n");
++ } else {
++ rc = request_irq(ctx->pcie_irq, aspeed_xdma_pcie_irq,
++ IRQF_SHARED, dev_name(dev), ctx);
++ if (rc < 0) {
++ dev_warn(dev, "Failed to request PCI-E IRQ %d.\n", rc);
++ ctx->pcie_irq = -1;
++ }
++ }
++
++ kobject_init(&ctx->kobj, &aspeed_xdma_kobject_type);
++ return 0;
++
++err_misc:
++ gen_pool_free(ctx->pool, (unsigned long)ctx->cmdq, XDMA_CMDQ_SIZE);
++err_pool:
++ reset_control_assert(ctx->reset);
++err_reset:
++ if (ctx->reset_rc)
++ reset_control_assert(ctx->reset_rc);
++err_reset_rc:
++ clk_disable_unprepare(ctx->clock);
++err_pool_scu_clk:
++ gen_pool_destroy(ctx->pool);
++err_nopool:
++ dma_free_coherent(ctx->dev, ctx->mem_size, ctx->mem_virt,
++ ctx->mem_coherent);
++err_nomem:
++ if (ctx->reset_rc)
++ reset_control_put(ctx->reset_rc);
++ reset_control_put(ctx->reset);
++err_noreset:
++ clk_put(ctx->clock);
++err_noclk:
++ free_irq(ctx->irq, ctx);
++err_noirq:
++ iounmap(ctx->base);
++ release_mem_region(ctx->res_start, ctx->res_size);
++err_nomap:
++ kfree(ctx);
++ return rc;
++}
++
++static int aspeed_xdma_remove(struct platform_device *pdev)
++{
++ struct aspeed_xdma *ctx = platform_get_drvdata(pdev);
++
++ reset_control_assert(ctx->reset);
++ if (ctx->reset_rc)
++ reset_control_assert(ctx->reset_rc);
++ clk_disable_unprepare(ctx->clock);
++
++ aspeed_xdma_done(ctx, true);
++
++ misc_deregister(&ctx->misc);
++ kobject_put(&ctx->kobj);
++
++ return 0;
++}
++
++static const struct aspeed_xdma_chip aspeed_ast2500_xdma_chip = {
++ .control = XDMA_AST2500_CTRL_US_COMP | XDMA_AST2500_CTRL_DS_COMP |
++ XDMA_AST2500_CTRL_DS_DIRTY | XDMA_AST2500_CTRL_DS_SIZE_256 |
++ XDMA_AST2500_CTRL_DS_TIMEOUT | XDMA_AST2500_CTRL_DS_CHECK_ID,
++ .scu_bmc_class = SCU_AST2500_BMC_CLASS_REV,
++ .scu_misc_ctrl = 0,
++ .scu_pcie_conf = SCU_AST2500_PCIE_CONF,
++ .scu_pcie_ctrl = 0,
++ .queue_entry_size = XDMA_AST2500_QUEUE_ENTRY_SIZE,
++ .regs = {
++ .bmc_cmdq_addr = XDMA_AST2500_BMC_CMDQ_ADDR,
++ .bmc_cmdq_addr_ext = 0,
++ .bmc_cmdq_endp = XDMA_AST2500_BMC_CMDQ_ENDP,
++ .bmc_cmdq_writep = XDMA_AST2500_BMC_CMDQ_WRITEP,
++ .bmc_cmdq_readp = XDMA_AST2500_BMC_CMDQ_READP,
++ .control = XDMA_AST2500_CTRL,
++ .status = XDMA_AST2500_STATUS,
++ },
++ .status_bits = {
++ .us_comp = XDMA_AST2500_STATUS_US_COMP,
++ .ds_comp = XDMA_AST2500_STATUS_DS_COMP,
++ .ds_dirty = XDMA_AST2500_STATUS_DS_DIRTY,
++ },
++ .set_cmd = aspeed_xdma_ast2500_set_cmd,
++};
++
++static const struct aspeed_xdma_chip aspeed_ast2600_xdma_chip = {
++ .control = XDMA_AST2600_CTRL_US_COMP | XDMA_AST2600_CTRL_DS_COMP |
++ XDMA_AST2600_CTRL_DS_DIRTY | XDMA_AST2600_CTRL_DS_SIZE_256,
++ .scu_bmc_class = SCU_AST2600_BMC_CLASS_REV,
++ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL,
++ .scu_pcie_conf = SCU_AST2600_PCIE_CONF,
++ .scu_pcie_ctrl = 0,
++ .queue_entry_size = XDMA_AST2600_QUEUE_ENTRY_SIZE,
++ .regs = {
++ .bmc_cmdq_addr = XDMA_AST2600_BMC_CMDQ_ADDR,
++ .bmc_cmdq_addr_ext = 0,
++ .bmc_cmdq_endp = XDMA_AST2600_BMC_CMDQ_ENDP,
++ .bmc_cmdq_writep = XDMA_AST2600_BMC_CMDQ_WRITEP,
++ .bmc_cmdq_readp = XDMA_AST2600_BMC_CMDQ_READP,
++ .control = XDMA_AST2600_CTRL,
++ .status = XDMA_AST2600_STATUS,
++ },
++ .status_bits = {
++ .us_comp = XDMA_AST2600_STATUS_US_COMP,
++ .ds_comp = XDMA_AST2600_STATUS_DS_COMP,
++ .ds_dirty = XDMA_AST2600_STATUS_DS_DIRTY,
++ },
++ .set_cmd = aspeed_xdma_ast2600_set_cmd,
++};
++
++static const struct aspeed_xdma_chip aspeed_ast2700_xdma0_chip = {
++ .control = XDMA_AST2700_CTRL_US_COMP | XDMA_AST2700_CTRL_DS_COMP |
++ XDMA_AST2700_CTRL_DS_DIRTY | XDMA_AST2700_CTRL_DS_SIZE_256,
++ .scu_bmc_class = SCU_AST2700_PCIE0_BMC_CLASS_REV,
++ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL,
++ .scu_pcie_conf = SCU_AST2700_PCIE0_CONF,
++ .scu_pcie_ctrl = SCU_AST2700_PCIE0_CTRL,
++ .queue_entry_size = XDMA_AST2700_QUEUE_ENTRY_SIZE,
++ .regs = {
++ .bmc_cmdq_addr = XDMA_AST2700_BMC_CMDQ_ADDR0,
++ .bmc_cmdq_addr_ext = XDMA_AST2700_BMC_CMDQ_ADDR1,
++ .bmc_cmdq_endp = XDMA_AST2700_BMC_CMDQ_ENDP,
++ .bmc_cmdq_writep = XDMA_AST2700_BMC_CMDQ_WRITEP,
++ .bmc_cmdq_readp = XDMA_AST2700_BMC_CMDQ_READP,
++ .control = XDMA_AST2700_CTRL,
++ .status = XDMA_AST2700_STATUS,
++ },
++ .status_bits = {
++ .us_comp = XDMA_AST2700_STATUS_US_COMP,
++ .ds_comp = XDMA_AST2700_STATUS_DS_COMP,
++ .ds_dirty = XDMA_AST2700_STATUS_DS_DIRTY,
++ },
++ .set_cmd = aspeed_xdma_ast2700_set_cmd,
++};
++
++static const struct aspeed_xdma_chip aspeed_ast2700_xdma1_chip = {
++ .control = XDMA_AST2700_CTRL_US_COMP | XDMA_AST2700_CTRL_DS_COMP |
++ XDMA_AST2700_CTRL_DS_DIRTY,
++ .scu_bmc_class = SCU_AST2700_PCIE1_BMC_CLASS_REV,
++ .scu_misc_ctrl = SCU_AST2600_MISC_CTRL,
++ .scu_pcie_conf = SCU_AST2700_PCIE1_CONF,
++ .scu_pcie_ctrl = SCU_AST2700_PCIE1_CTRL,
++ .queue_entry_size = XDMA_AST2700_QUEUE_ENTRY_SIZE,
++ .regs = {
++ .bmc_cmdq_addr = XDMA_AST2700_BMC_CMDQ_ADDR0,
++ .bmc_cmdq_addr_ext = XDMA_AST2700_BMC_CMDQ_ADDR1,
++ .bmc_cmdq_endp = XDMA_AST2700_BMC_CMDQ_ENDP,
++ .bmc_cmdq_writep = XDMA_AST2700_BMC_CMDQ_WRITEP,
++ .bmc_cmdq_readp = XDMA_AST2700_BMC_CMDQ_READP,
++ .control = XDMA_AST2700_CTRL,
++ .status = XDMA_AST2700_STATUS,
++ },
++ .status_bits = {
++ .us_comp = XDMA_AST2700_STATUS_US_COMP,
++ .ds_comp = XDMA_AST2700_STATUS_DS_COMP,
++ .ds_dirty = XDMA_AST2700_STATUS_DS_DIRTY,
++ },
++ .set_cmd = aspeed_xdma_ast2700_set_cmd,
++};
++
++static const struct of_device_id aspeed_xdma_match[] = {
++ {
++ .compatible = "aspeed,ast2500-xdma",
++ .data = &aspeed_ast2500_xdma_chip,
++ },
++ {
++ .compatible = "aspeed,ast2600-xdma",
++ .data = &aspeed_ast2600_xdma_chip,
++ },
++ {
++ .compatible = "aspeed,ast2700-xdma0",
++ .data = &aspeed_ast2700_xdma0_chip,
++ },
++ {
++ .compatible = "aspeed,ast2700-xdma1",
++ .data = &aspeed_ast2700_xdma1_chip,
++ },
++ { },
++};
++
++static struct platform_driver aspeed_xdma_driver = {
++ .probe = aspeed_xdma_probe,
++ .remove = aspeed_xdma_remove,
++ .driver = {
++ .name = DEVICE_NAME,
++ .of_match_table = aspeed_xdma_match,
++ },
++};
++
++module_platform_driver(aspeed_xdma_driver);
++
++MODULE_AUTHOR("Eddie James");
++MODULE_DESCRIPTION("ASPEED XDMA Engine Driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/soc/aspeed/ast2500-espi.c b/drivers/soc/aspeed/ast2500-espi.c
+new file mode 100644
+index 000000000..a9de566f7
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2500-espi.c
+@@ -0,0 +1,1739 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/clk.h>
++#include <linux/sizes.h>
++#include <linux/module.h>
++#include <linux/bitfield.h>
++#include <linux/of_device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/miscdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/uaccess.h>
++#include <linux/vmalloc.h>
++#include <linux/poll.h>
++#include <linux/delay.h>
++
++#include "ast2500-espi.h"
++
++#define DEVICE_NAME "aspeed-espi"
++
++#define PERIF_MCYC_UNLOCK 0xfedc756e
++#define PERIF_MCYC_ALIGN SZ_64K
++
++#define FLASH_SAFS_ALIGN SZ_16M
++
++struct ast2500_espi_perif {
++ struct {
++ bool enable;
++ void *virt;
++ dma_addr_t taddr;
++ uint32_t saddr;
++ uint32_t size;
++ } mcyc;
++
++ struct {
++ bool enable;
++ void *np_tx_virt;
++ dma_addr_t np_tx_addr;
++ void *pc_tx_virt;
++ dma_addr_t pc_tx_addr;
++ void *pc_rx_virt;
++ dma_addr_t pc_rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex np_tx_mtx;
++ struct mutex pc_tx_mtx;
++ struct mutex pc_rx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2500_espi_vw {
++ struct {
++ bool hw_mode;
++ uint32_t val;
++ } gpio;
++
++ struct miscdevice mdev;
++};
++
++struct ast2500_espi_oob {
++ struct {
++ bool enable;
++ void *tx_virt;
++ dma_addr_t tx_addr;
++ void *rx_virt;
++ dma_addr_t rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex tx_mtx;
++ struct mutex rx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2500_espi_flash {
++ struct {
++ uint32_t mode;
++ phys_addr_t taddr;
++ uint32_t size;
++ } safs;
++
++ struct {
++ bool enable;
++ void *tx_virt;
++ dma_addr_t tx_addr;
++ void *rx_virt;
++ dma_addr_t rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex rx_mtx;
++ struct mutex tx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2500_espi {
++ struct device *dev;
++ void __iomem *regs;
++ struct clk *clk;
++ int irq;
++
++ struct ast2500_espi_perif perif;
++ struct ast2500_espi_vw vw;
++ struct ast2500_espi_oob oob;
++ struct ast2500_espi_flash flash;
++};
++
++/* peripheral channel (CH0) */
++static long ast2500_espi_perif_pc_get_rx(struct file *fp,
++ struct ast2500_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2500_espi, perif);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&perif->pc_rx_mtx))
++ return -EAGAIN;
++
++ if (!perif->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&perif->pc_rx_mtx);
++
++ if (!perif->rx_ready) {
++ rc = wait_event_interruptible(perif->wq, perif->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_CTRL);
++ cyc = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ switch (cyc) {
++ case ESPI_PERIF_MSG:
++ pkt_len = sizeof(struct espi_perif_msg);
++ break;
++ case ESPI_PERIF_MSG_D:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_perif_msg);
++ break;
++ case ESPI_PERIF_SUC_CMPLT_D_MIDDLE:
++ case ESPI_PERIF_SUC_CMPLT_D_FIRST:
++ case ESPI_PERIF_SUC_CMPLT_D_LAST:
++ case ESPI_PERIF_SUC_CMPLT_D_ONLY:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_perif_cmplt);
++ break;
++ case ESPI_PERIF_SUC_CMPLT:
++ case ESPI_PERIF_UNSUC_CMPLT:
++ pkt_len = sizeof(struct espi_perif_cmplt);
++ break;
++ default:
++ rc = -EFAULT;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (perif->dma.enable) {
++ memcpy(hdr + 1, perif->dma.pc_rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&perif->lock, flags);
++
++ writel(ESPI_PERIF_PC_RX_CTRL_SERV_PEND, espi->regs + ESPI_PERIF_PC_RX_CTRL);
++ perif->rx_ready = 0;
++
++ spin_unlock_irqrestore(&perif->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&perif->pc_rx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_perif_pc_put_tx(struct file *fp,
++ struct ast2500_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2500_espi, perif);
++
++ if (!mutex_trylock(&perif->pc_tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_PERIF_PC_TX_CTRL);
++ if (reg & ESPI_PERIF_PC_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (perif->dma.enable) {
++ memcpy(perif->dma.pc_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_PERIF_PC_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_LEN, len)
++ | ESPI_PERIF_PC_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_PERIF_PC_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_n_out:
++ mutex_unlock(&perif->pc_tx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_perif_np_put_tx(struct file *fp,
++ struct ast2500_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2500_espi, perif);
++
++ if (!mutex_trylock(&perif->np_tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_PERIF_NP_TX_CTRL);
++ if (reg & ESPI_PERIF_NP_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (perif->dma.enable) {
++ memcpy(perif->dma.np_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_PERIF_NP_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_LEN, len)
++ | ESPI_PERIF_NP_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_PERIF_NP_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_n_out:
++ mutex_unlock(&perif->np_tx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_perif_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2500_espi_perif *perif;
++ struct aspeed_espi_ioc ioc;
++
++ perif = container_of(fp->private_data, struct ast2500_espi_perif, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_PERIF_PC_GET_RX:
++ return ast2500_espi_perif_pc_get_rx(fp, perif, &ioc);
++ case ASPEED_ESPI_PERIF_PC_PUT_TX:
++ return ast2500_espi_perif_pc_put_tx(fp, perif, &ioc);
++ case ASPEED_ESPI_PERIF_NP_PUT_TX:
++ return ast2500_espi_perif_np_put_tx(fp, perif, &ioc);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static int ast2500_espi_perif_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2500_espi_perif *perif;
++ unsigned long vm_size;
++ pgprot_t vm_prot;
++
++ perif = container_of(fp->private_data, struct ast2500_espi_perif, mdev);
++ if (!perif->mcyc.enable)
++ return -EPERM;
++
++ vm_size = vma->vm_end - vma->vm_start;
++ vm_prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > perif->mcyc.size)
++ return -EINVAL;
++
++ vm_prot = pgprot_noncached(vm_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (perif->mcyc.taddr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, vm_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static const struct file_operations ast2500_espi_perif_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2500_espi_perif_mmap,
++ .unlocked_ioctl = ast2500_espi_perif_ioctl,
++};
++
++static void ast2500_espi_perif_isr(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_perif *perif;
++ unsigned long flags;
++ uint32_t sts;
++
++ perif = &espi->perif;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_PERIF_PC_RX_CMPLT) {
++ writel(ESPI_INT_STS_PERIF_PC_RX_CMPLT, espi->regs + ESPI_INT_STS);
++
++ spin_lock_irqsave(&perif->lock, flags);
++ perif->rx_ready = true;
++ spin_unlock_irqrestore(&perif->lock, flags);
++
++ wake_up_interruptible(&perif->wq);
++ }
++}
++
++static void ast2500_espi_perif_reset(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_perif *perif;
++ struct device *dev;
++ uint32_t reg, mask;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_PERIF);
++ writel(reg, espi->regs + ESPI_INT_EN);
++ writel(ESPI_INT_STS_PERIF, espi->regs + ESPI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_SW_RST
++ | ESPI_CTRL_PERIF_NP_RX_SW_RST
++ | ESPI_CTRL_PERIF_PC_TX_SW_RST
++ | ESPI_CTRL_PERIF_PC_RX_SW_RST
++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN
++ | ESPI_CTRL_PERIF_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CTRL_PERIF_NP_TX_SW_RST
++ | ESPI_CTRL_PERIF_NP_RX_SW_RST
++ | ESPI_CTRL_PERIF_PC_TX_SW_RST
++ | ESPI_CTRL_PERIF_PC_RX_SW_RST);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (perif->mcyc.enable) {
++ mask = ~(perif->mcyc.size - 1);
++ writel(PERIF_MCYC_UNLOCK, espi->regs + ESPI_PERIF_MCYC_MASK);
++ writel(mask, espi->regs + ESPI_PERIF_MCYC_MASK);
++
++ writel(perif->mcyc.saddr, espi->regs + ESPI_PERIF_MCYC_SADDR);
++ writel(perif->mcyc.taddr, espi->regs + ESPI_PERIF_MCYC_TADDR);
++ }
++
++ if (perif->dma.enable) {
++ writel(perif->dma.np_tx_addr, espi->regs + ESPI_PERIF_NP_TX_DMA);
++ writel(perif->dma.pc_tx_addr, espi->regs + ESPI_PERIF_PC_TX_DMA);
++ writel(perif->dma.pc_rx_addr, espi->regs + ESPI_PERIF_PC_RX_DMA);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CTRL);
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN) | ESPI_INT_EN_PERIF_PC_RX_CMPLT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_PERIF_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2500_espi_perif_probe(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_perif *perif;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ init_waitqueue_head(&perif->wq);
++
++ spin_lock_init(&perif->lock);
++
++ mutex_init(&perif->np_tx_mtx);
++ mutex_init(&perif->pc_tx_mtx);
++ mutex_init(&perif->pc_rx_mtx);
++
++ perif->mcyc.enable = of_property_read_bool(dev->of_node, "perif-mcyc-enable");
++ if (perif->mcyc.enable) {
++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-src-addr", &perif->mcyc.saddr);
++ if (rc || !IS_ALIGNED(perif->mcyc.saddr, PERIF_MCYC_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned memory cycle host address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-size", &perif->mcyc.size);
++ if (rc || !IS_ALIGNED(perif->mcyc.size, PERIF_MCYC_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned memory cycle size\n");
++ return -EINVAL;
++ }
++
++ perif->mcyc.virt = dmam_alloc_coherent(dev, perif->mcyc.size,
++ &perif->mcyc.taddr, GFP_KERNEL);
++ if (!perif->mcyc.virt) {
++ dev_err(dev, "cannot allocate memory cycle\n");
++ return -ENOMEM;
++ }
++ }
++
++ perif->dma.enable = of_property_read_bool(dev->of_node, "perif-dma-mode");
++ if (perif->dma.enable) {
++ perif->dma.pc_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.pc_tx_addr, GFP_KERNEL);
++ if (!perif->dma.pc_tx_virt) {
++ dev_err(dev, "cannot allocate posted TX DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ perif->dma.pc_rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.pc_rx_addr, GFP_KERNEL);
++ if (!perif->dma.pc_rx_virt) {
++ dev_err(dev, "cannot allocate posted RX DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ perif->dma.np_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.np_tx_addr, GFP_KERNEL);
++ if (!perif->dma.np_tx_virt) {
++ dev_err(dev, "cannot allocate non-posted TX DMA buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ perif->mdev.parent = dev;
++ perif->mdev.minor = MISC_DYNAMIC_MINOR;
++ perif->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-peripheral", DEVICE_NAME);
++ perif->mdev.fops = &ast2500_espi_perif_fops;
++ rc = misc_register(&perif->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", perif->mdev.name);
++ return rc;
++ }
++
++ ast2500_espi_perif_reset(espi);
++
++ return 0;
++}
++
++static int ast2500_espi_perif_remove(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_perif *perif;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_PERIF);
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN
++ | ESPI_CTRL_PERIF_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (perif->mcyc.enable)
++ dmam_free_coherent(dev, perif->mcyc.size, perif->mcyc.virt,
++ perif->mcyc.taddr);
++
++ if (perif->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.np_tx_virt,
++ perif->dma.np_tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_tx_virt,
++ perif->dma.pc_tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_rx_virt,
++ perif->dma.pc_rx_addr);
++ }
++
++ mutex_destroy(&perif->np_tx_mtx);
++ mutex_destroy(&perif->pc_tx_mtx);
++ mutex_destroy(&perif->pc_rx_mtx);
++
++ misc_deregister(&perif->mdev);
++
++ return 0;
++}
++
++/* virtual wire channel (CH1) */
++static long ast2500_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2500_espi_vw *vw;
++ struct ast2500_espi *espi;
++ uint32_t gpio;
++
++ vw = container_of(fp->private_data, struct ast2500_espi_vw, mdev);
++ espi = container_of(vw, struct ast2500_espi, vw);
++ gpio = vw->gpio.val;
++
++ switch (cmd) {
++ case ASPEED_ESPI_VW_GET_GPIO_VAL:
++ if (put_user(gpio, (uint32_t __user *)arg))
++ return -EFAULT;
++ break;
++ case ASPEED_ESPI_VW_PUT_GPIO_VAL:
++ if (get_user(gpio, (uint32_t __user *)arg))
++ return -EFAULT;
++
++ writel(gpio, espi->regs + ESPI_VW_GPIO_VAL);
++ break;
++ default:
++ return -EINVAL;
++ };
++
++ return 0;
++}
++
++static const struct file_operations ast2500_espi_vw_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2500_espi_vw_ioctl,
++};
++
++static void ast2500_espi_vw_isr(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_vw *vw;
++ uint32_t reg, sts, sts_sysevt;
++
++ vw = &espi->vw;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_VW_SYSEVT) {
++ sts_sysevt = readl(espi->regs + ESPI_VW_SYSEVT_INT_STS);
++
++ if (sts_sysevt & ESPI_VW_SYSEVT_INT_STS_HOST_RST_WARN) {
++ reg = readl(espi->regs + ESPI_VW_SYSEVT) | ESPI_VW_SYSEVT_HOST_RST_ACK;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT);
++ writel(ESPI_VW_SYSEVT_INT_STS_HOST_RST_WARN, espi->regs + ESPI_VW_SYSEVT_INT_STS);
++ }
++
++ if (sts_sysevt & ESPI_VW_SYSEVT_INT_STS_OOB_RST_WARN) {
++ reg = readl(espi->regs + ESPI_VW_SYSEVT) | ESPI_VW_SYSEVT_OOB_RST_ACK;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT);
++ writel(ESPI_VW_SYSEVT_INT_STS_OOB_RST_WARN, espi->regs + ESPI_VW_SYSEVT_INT_STS);
++ }
++
++ writel(ESPI_INT_STS_VW_SYSEVT, espi->regs + ESPI_INT_STS);
++ }
++
++ if (sts & ESPI_INT_STS_VW_SYSEVT1) {
++ sts_sysevt = readl(espi->regs + ESPI_VW_SYSEVT1_INT_STS);
++
++ if (sts_sysevt & ESPI_VW_SYSEVT1_INT_STS_SUSPEND_WARN) {
++ reg = readl(espi->regs + ESPI_VW_SYSEVT1) | ESPI_VW_SYSEVT1_SUSPEND_ACK;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT1);
++ writel(ESPI_VW_SYSEVT1_INT_STS_SUSPEND_WARN, espi->regs + ESPI_VW_SYSEVT1_INT_STS);
++ }
++
++ writel(ESPI_INT_STS_VW_SYSEVT1, espi->regs + ESPI_INT_STS);
++ }
++
++ if (sts & ESPI_INT_STS_VW_GPIO) {
++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL);
++ writel(ESPI_INT_STS_VW_GPIO, espi->regs + ESPI_INT_STS);
++ }
++}
++
++static void ast2500_espi_vw_reset(struct ast2500_espi *espi)
++{
++ uint32_t reg;
++ struct ast2500_espi_vw *vw = &espi->vw;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_VW);
++ writel(reg, espi->regs + ESPI_INT_EN);
++ writel(ESPI_INT_STS_VW, espi->regs + ESPI_INT_STS);
++
++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL);
++
++ /* Host Reset Warn and OOB Reset Warn system events */
++ reg = readl(espi->regs + ESPI_VW_SYSEVT_INT_T2)
++ | ESPI_VW_SYSEVT_INT_T2_HOST_RST_WARN
++ | ESPI_VW_SYSEVT_INT_T2_OOB_RST_WARN;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT_INT_T2);
++
++ reg = readl(espi->regs + ESPI_VW_SYSEVT_INT_EN)
++ | ESPI_VW_SYSEVT_INT_EN_HOST_RST_WARN
++ | ESPI_VW_SYSEVT_INT_EN_OOB_RST_WARN;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT_INT_EN);
++
++ /* Suspend Warn system event */
++ reg = readl(espi->regs + ESPI_VW_SYSEVT1_INT_T0) | ESPI_VW_SYSEVT1_INT_T0_SUSPEND_WARN;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT1_INT_T0);
++
++ reg = readl(espi->regs + ESPI_VW_SYSEVT1_INT_EN) | ESPI_VW_SYSEVT1_INT_EN_SUSPEND_WARN;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT1_INT_EN);
++
++ reg = readl(espi->regs + ESPI_INT_EN)
++ | ESPI_INT_EN_VW_GPIO
++ | ESPI_INT_EN_VW_SYSEVT
++ | ESPI_INT_EN_VW_SYSEVT1;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_VW_SYSEVT)
++ | ESPI_VW_SYSEVT_SLV_BOOT_STS
++ | ESPI_VW_SYSEVT_SLV_BOOT_DONE;
++ writel(reg, espi->regs + ESPI_VW_SYSEVT);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ((vw->gpio.hw_mode) ? 0 : ESPI_CTRL_VW_GPIO_SW)
++ | ESPI_CTRL_VW_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2500_espi_vw_probe(struct ast2500_espi *espi)
++{
++ int rc;
++ struct device *dev = espi->dev;
++ struct ast2500_espi_vw *vw = &espi->vw;
++
++ writel(0x0, espi->regs + ESPI_VW_SYSEVT_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_VW_SYSEVT_INT_STS);
++
++ writel(0x0, espi->regs + ESPI_VW_SYSEVT1_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_VW_SYSEVT1_INT_STS);
++
++ vw->gpio.hw_mode = of_property_read_bool(dev->of_node, "vw-gpio-hw-mode");
++
++ vw->mdev.parent = dev;
++ vw->mdev.minor = MISC_DYNAMIC_MINOR;
++ vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-vw", DEVICE_NAME);
++ vw->mdev.fops = &ast2500_espi_vw_fops;
++ rc = misc_register(&vw->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", vw->mdev.name);
++ return rc;
++ }
++
++ ast2500_espi_vw_reset(espi);
++
++ return 0;
++}
++
++static int ast2500_espi_vw_remove(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_vw *vw;
++ uint32_t reg;
++
++ vw = &espi->vw;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_VW);
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ misc_deregister(&vw->mdev);
++
++ return 0;
++}
++
++/* out-of-band channel (CH2) */
++static long ast2500_espi_oob_get_rx(struct file *fp,
++ struct ast2500_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(oob, struct ast2500_espi, oob);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&oob->rx_mtx))
++ return -EAGAIN;
++
++ if (!oob->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&oob->rx_mtx);
++
++ if (!oob->rx_ready) {
++ rc = wait_event_interruptible(oob->wq, oob->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_OOB_RX_CTRL);
++ cyc = FIELD_GET(ESPI_OOB_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_OOB_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_OOB_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr);
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (oob->dma.enable) {
++ memcpy(hdr + 1, oob->dma.rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ pkt[i] = readl(espi->regs + ESPI_OOB_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&oob->lock, flags);
++
++ writel(ESPI_OOB_RX_CTRL_SERV_PEND, espi->regs + ESPI_OOB_RX_CTRL);
++ oob->rx_ready = 0;
++
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&oob->rx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_oob_put_tx(struct file *fp,
++ struct ast2500_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(oob, struct ast2500_espi, oob);
++
++ if (!mutex_trylock(&oob->tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_OOB_TX_CTRL);
++ if (reg & ESPI_OOB_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len > ESPI_MAX_PKT_LEN) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (oob->dma.enable) {
++ memcpy(oob->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_OOB_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_OOB_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_OOB_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_OOB_TX_CTRL_LEN, len)
++ | ESPI_OOB_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_OOB_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&oob->tx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2500_espi_oob *oob;
++ struct aspeed_espi_ioc ioc;
++
++ oob = container_of(fp->private_data, struct ast2500_espi_oob, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_OOB_GET_RX:
++ return ast2500_espi_oob_get_rx(fp, oob, &ioc);
++ case ASPEED_ESPI_OOB_PUT_TX:
++ return ast2500_espi_oob_put_tx(fp, oob, &ioc);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations ast2500_espi_oob_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2500_espi_oob_ioctl,
++};
++
++static void ast2500_espi_oob_isr(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_oob *oob;
++ unsigned long flags;
++ uint32_t sts;
++
++ oob = &espi->oob;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_OOB_RX_CMPLT) {
++ writel(ESPI_INT_STS_OOB_RX_CMPLT, espi->regs + ESPI_INT_STS);
++
++ spin_lock_irqsave(&oob->lock, flags);
++ oob->rx_ready = true;
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ wake_up_interruptible(&oob->wq);
++ }
++}
++
++static void ast2500_espi_oob_reset(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_oob *oob;
++ uint32_t reg;
++
++ oob = &espi->oob;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_OOB);
++ writel(reg, espi->regs + ESPI_INT_EN);
++ writel(ESPI_INT_STS_OOB, espi->regs + ESPI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_OOB_TX_SW_RST
++ | ESPI_CTRL_OOB_RX_SW_RST
++ | ESPI_CTRL_OOB_TX_DMA_EN
++ | ESPI_CTRL_OOB_RX_DMA_EN
++ | ESPI_CTRL_OOB_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CTRL_OOB_TX_SW_RST | ESPI_CTRL_OOB_RX_SW_RST);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (oob->dma.enable) {
++ writel(oob->dma.tx_addr, espi->regs + ESPI_OOB_TX_DMA);
++ writel(oob->dma.rx_addr, espi->regs + ESPI_OOB_RX_DMA);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ESPI_CTRL_OOB_TX_DMA_EN
++ | ESPI_CTRL_OOB_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CTRL);
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN) | ESPI_INT_EN_OOB_RX_CMPLT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_OOB_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2500_espi_oob_probe(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_oob *oob;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ oob = &espi->oob;
++
++ init_waitqueue_head(&oob->wq);
++
++ spin_lock_init(&oob->lock);
++
++ mutex_init(&oob->tx_mtx);
++ mutex_init(&oob->rx_mtx);
++
++ oob->dma.enable = of_property_read_bool(dev->of_node, "oob-dma-mode");
++ if (oob->dma.enable) {
++ oob->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &oob->dma.tx_addr, GFP_KERNEL);
++ if (!oob->dma.tx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++
++ oob->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &oob->dma.rx_addr, GFP_KERNEL);
++ if (!oob->dma.rx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ oob->mdev.parent = dev;
++ oob->mdev.minor = MISC_DYNAMIC_MINOR;
++ oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-oob", DEVICE_NAME);
++ oob->mdev.fops = &ast2500_espi_oob_fops;
++ rc = misc_register(&oob->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", oob->mdev.name);
++ return rc;
++ }
++
++ ast2500_espi_oob_reset(espi);
++
++ return 0;
++}
++
++static int ast2500_espi_oob_remove(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_oob *oob;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ oob = &espi->oob;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_OOB);
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_OOB_TX_DMA_EN
++ | ESPI_CTRL_OOB_RX_DMA_EN
++ | ESPI_CTRL_OOB_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (oob->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, oob->dma.tx_virt, oob->dma.tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, oob->dma.rx_virt, oob->dma.rx_addr);
++ }
++
++ mutex_destroy(&oob->tx_mtx);
++ mutex_destroy(&oob->rx_mtx);
++
++ misc_deregister(&oob->mdev);
++
++ return 0;
++}
++
++/* flash channel (CH3) */
++static long ast2500_espi_flash_get_rx(struct file *fp,
++ struct ast2500_espi_flash *flash,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ rc = 0;
++
++ espi = container_of(flash, struct ast2500_espi, flash);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&flash->rx_mtx))
++ return -EAGAIN;
++
++ if (!flash->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&flash->rx_mtx);
++
++ if (!flash->rx_ready) {
++ rc = wait_event_interruptible(flash->wq, flash->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_FLASH_RX_CTRL);
++ cyc = FIELD_GET(ESPI_FLASH_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_FLASH_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_FLASH_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ switch (cyc) {
++ case ESPI_FLASH_WRITE:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_flash_rwe);
++ break;
++ case ESPI_FLASH_READ:
++ case ESPI_FLASH_ERASE:
++ pkt_len = sizeof(struct espi_flash_rwe);
++ break;
++ case ESPI_FLASH_SUC_CMPLT_D_MIDDLE:
++ case ESPI_FLASH_SUC_CMPLT_D_FIRST:
++ case ESPI_FLASH_SUC_CMPLT_D_LAST:
++ case ESPI_FLASH_SUC_CMPLT_D_ONLY:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_flash_cmplt);
++ break;
++ case ESPI_FLASH_SUC_CMPLT:
++ case ESPI_FLASH_UNSUC_CMPLT:
++ pkt_len = sizeof(struct espi_flash_cmplt);
++ break;
++ default:
++ rc = -EFAULT;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (flash->dma.enable) {
++ memcpy(hdr + 1, flash->dma.rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ pkt[i] = readl(espi->regs + ESPI_FLASH_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&flash->lock, flags);
++
++ writel(ESPI_FLASH_RX_CTRL_SERV_PEND, espi->regs + ESPI_FLASH_RX_CTRL);
++ flash->rx_ready = 0;
++
++ spin_unlock_irqrestore(&flash->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&flash->rx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_flash_put_tx(struct file *fp,
++ struct ast2500_espi_flash *flash,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2500_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(flash, struct ast2500_espi, flash);
++
++ if (!mutex_trylock(&flash->tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_FLASH_TX_CTRL);
++ if (reg & ESPI_FLASH_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (flash->dma.enable) {
++ memcpy(flash->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_FLASH_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_FLASH_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_LEN, len)
++ | ESPI_FLASH_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_FLASH_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&flash->tx_mtx);
++
++ return rc;
++}
++
++static long ast2500_espi_flash_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2500_espi_flash *flash;
++ struct aspeed_espi_ioc ioc;
++
++ flash = container_of(fp->private_data, struct ast2500_espi_flash, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_FLASH_GET_RX:
++ return ast2500_espi_flash_get_rx(fp, flash, &ioc);
++ case ASPEED_ESPI_FLASH_PUT_TX:
++ return ast2500_espi_flash_put_tx(fp, flash, &ioc);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations ast2500_espi_flash_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2500_espi_flash_ioctl,
++};
++
++static void ast2500_espi_flash_isr(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_flash *flash;
++ unsigned long flags;
++ uint32_t sts;
++
++ flash = &espi->flash;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_FLASH_RX_CMPLT) {
++ spin_lock_irqsave(&flash->lock, flags);
++ flash->rx_ready = true;
++ spin_unlock_irqrestore(&flash->lock, flags);
++
++ wake_up_interruptible(&flash->wq);
++
++ writel(ESPI_INT_STS_FLASH_RX_CMPLT, espi->regs + ESPI_INT_STS);
++ }
++}
++
++static void ast2500_espi_flash_reset(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_flash *flash = &espi->flash;
++ uint32_t reg;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_FLASH);
++ writel(reg, espi->regs + ESPI_INT_EN);
++ writel(ESPI_INT_STS_FLASH, espi->regs + ESPI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_FLASH_TX_SW_RST
++ | ESPI_CTRL_FLASH_RX_SW_RST
++ | ESPI_CTRL_FLASH_TX_DMA_EN
++ | ESPI_CTRL_FLASH_RX_DMA_EN
++ | ESPI_CTRL_FLASH_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CTRL_FLASH_TX_SW_RST | ESPI_CTRL_FLASH_RX_SW_RST);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (flash->safs.mode == SAFS_MODE_MIX) {
++ reg = FIELD_PREP(ESPI_FLASH_SAFS_TADDR_BASE, flash->safs.taddr >> 24)
++ | FIELD_PREP(ESPI_FLASH_SAFS_TADDR_MASK, (~(flash->safs.size - 1)) >> 24);
++ writel(reg, espi->regs + ESPI_FLASH_SAFS_TADDR);
++ } else {
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_FLASH_SAFS_SW_MODE;
++ writel(reg, espi->regs + ESPI_CTRL);
++ }
++
++ if (flash->dma.enable) {
++ writel(flash->dma.tx_addr, espi->regs + ESPI_FLASH_TX_DMA);
++ writel(flash->dma.rx_addr, espi->regs + ESPI_FLASH_RX_DMA);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ESPI_CTRL_FLASH_TX_DMA_EN
++ | ESPI_CTRL_FLASH_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CTRL);
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN) | ESPI_INT_EN_FLASH_RX_CMPLT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_FLASH_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2500_espi_flash_probe(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_flash *flash;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ flash = &espi->flash;
++
++ init_waitqueue_head(&flash->wq);
++
++ spin_lock_init(&flash->lock);
++
++ mutex_init(&flash->tx_mtx);
++ mutex_init(&flash->rx_mtx);
++
++ flash->safs.mode = SAFS_MODE_MIX;
++
++ of_property_read_u32(dev->of_node, "flash-safs-mode", &flash->safs.mode);
++ if (flash->safs.mode == SAFS_MODE_MIX) {
++ rc = of_property_read_u32(dev->of_node, "flash-safs-tgt-addr", &flash->safs.taddr);
++ if (rc || !IS_ALIGNED(flash->safs.taddr, FLASH_SAFS_ALIGN)) {
++ dev_err(dev, "cannot get 16MB-aligned SAFS target address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "flash-safs-size", &flash->safs.size);
++ if (rc || !IS_ALIGNED(flash->safs.size, FLASH_SAFS_ALIGN)) {
++ dev_err(dev, "cannot get 16MB-aligned SAFS size\n");
++ return -ENODEV;
++ }
++ }
++
++ flash->dma.enable = of_property_read_bool(dev->of_node, "flash-dma-mode");
++ if (flash->dma.enable) {
++ flash->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.tx_addr, GFP_KERNEL);
++ if (!flash->dma.tx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++
++ flash->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.rx_addr, GFP_KERNEL);
++ if (!flash->dma.rx_virt) {
++ dev_err(dev, "cannot allocate DMA RX buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ flash->mdev.parent = dev;
++ flash->mdev.minor = MISC_DYNAMIC_MINOR;
++ flash->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-flash", DEVICE_NAME);
++ flash->mdev.fops = &ast2500_espi_flash_fops;
++ rc = misc_register(&flash->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", flash->mdev.name);
++ return rc;
++ }
++
++ ast2500_espi_flash_reset(espi);
++
++ return 0;
++}
++
++static int ast2500_espi_flash_remove(struct ast2500_espi *espi)
++{
++ struct ast2500_espi_flash *flash;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ flash = &espi->flash;
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_FLASH);
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_FLASH_TX_DMA_EN
++ | ESPI_CTRL_FLASH_RX_DMA_EN
++ | ESPI_CTRL_FLASH_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (flash->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.tx_virt, flash->dma.tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.rx_virt, flash->dma.rx_addr);
++ }
++
++ mutex_destroy(&flash->tx_mtx);
++ mutex_destroy(&flash->rx_mtx);
++
++ misc_deregister(&flash->mdev);
++
++ return 0;
++}
++
++/* global control */
++static irqreturn_t ast2500_espi_isr(int irq, void *arg)
++{
++ struct ast2500_espi *espi;
++ uint32_t sts;
++
++ espi = (struct ast2500_espi *)arg;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++ if (!sts)
++ return IRQ_NONE;
++
++ if (sts & ESPI_INT_STS_PERIF)
++ ast2500_espi_perif_isr(espi);
++
++ if (sts & ESPI_INT_STS_VW)
++ ast2500_espi_vw_isr(espi);
++
++ if (sts & ESPI_INT_STS_OOB)
++ ast2500_espi_oob_isr(espi);
++
++ if (sts & ESPI_INT_STS_FLASH)
++ ast2500_espi_flash_isr(espi);
++
++ if (sts & ESPI_INT_STS_RST_DEASSERT) {
++ ast2500_espi_perif_reset(espi);
++ ast2500_espi_vw_reset(espi);
++ ast2500_espi_oob_reset(espi);
++ ast2500_espi_flash_reset(espi);
++ writel(ESPI_INT_STS_RST_DEASSERT, espi->regs + ESPI_INT_STS);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int ast2500_espi_probe(struct platform_device *pdev)
++{
++ struct ast2500_espi *espi;
++ struct resource *res;
++ struct device *dev;
++ uint32_t reg;
++ int rc;
++
++ dev = &pdev->dev;
++
++ espi = devm_kzalloc(dev, sizeof(*espi), GFP_KERNEL);
++ if (!espi)
++ return -ENOMEM;
++
++ espi->dev = dev;
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_err(dev, "cannot set 64-bits DMA mask\n");
++ return rc;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "cannot get resource\n");
++ return -ENODEV;
++ }
++
++ espi->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(espi->regs)) {
++ dev_err(dev, "cannot map registers\n");
++ return PTR_ERR(espi->regs);
++ }
++
++ espi->irq = platform_get_irq(pdev, 0);
++ if (espi->irq < 0) {
++ dev_err(dev, "cannot get IRQ number\n");
++ return -ENODEV;
++ }
++
++ espi->clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(espi->clk)) {
++ dev_err(dev, "cannot get clock control\n");
++ return PTR_ERR(espi->clk);
++ }
++
++ rc = clk_prepare_enable(espi->clk);
++ if (rc) {
++ dev_err(dev, "cannot enable clocks\n");
++ return rc;
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~ESPI_INT_EN_RST_DEASSERT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ rc = ast2500_espi_perif_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init peripheral channel, rc=%d\n", rc);
++ return rc;
++ }
++
++ rc = ast2500_espi_vw_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init vw channel, rc=%d\n", rc);
++ goto err_remove_perif;
++ }
++
++ rc = ast2500_espi_oob_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init oob channel, rc=%d\n", rc);
++ goto err_remove_vw;
++ }
++
++ rc = ast2500_espi_flash_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init flash channel, rc=%d\n", rc);
++ goto err_remove_oob;
++ }
++
++ rc = devm_request_irq(dev, espi->irq, ast2500_espi_isr, 0, dev_name(dev), espi);
++ if (rc) {
++ dev_err(dev, "cannot request IRQ\n");
++ goto err_remove_flash;
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg |= ESPI_INT_EN_RST_DEASSERT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ dev_set_drvdata(dev, espi);
++
++ dev_info(dev, "module loaded\n");
++
++ return 0;
++
++err_remove_flash:
++ ast2500_espi_flash_remove(espi);
++err_remove_oob:
++ ast2500_espi_oob_remove(espi);
++err_remove_vw:
++ ast2500_espi_vw_remove(espi);
++err_remove_perif:
++ ast2500_espi_perif_remove(espi);
++
++ return rc;
++}
++
++static int ast2500_espi_remove(struct platform_device *pdev)
++{
++ struct ast2500_espi *espi;
++ struct device *dev;
++ uint32_t reg;
++ int rc;
++
++ dev = &pdev->dev;
++
++ espi = (struct ast2500_espi *)dev_get_drvdata(dev);
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~(ESPI_INT_EN_RST_DEASSERT);
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ rc = ast2500_espi_perif_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2500_espi_vw_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2500_espi_oob_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2500_espi_flash_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ return 0;
++}
++
++static const struct of_device_id ast2500_espi_of_matches[] = {
++ { .compatible = "aspeed,ast2500-espi" },
++ { },
++};
++
++static struct platform_driver ast2500_espi_driver = {
++ .driver = {
++ .name = "ast2500-espi",
++ .of_match_table = ast2500_espi_of_matches,
++ },
++ .probe = ast2500_espi_probe,
++ .remove = ast2500_espi_remove,
++};
++
++module_platform_driver(ast2500_espi_driver);
++
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
++MODULE_DESCRIPTION("Control of AST2500 eSPI Device");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/ast2500-espi.h b/drivers/soc/aspeed/ast2500-espi.h
+new file mode 100644
+index 000000000..b4ee0bc25
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2500-espi.h
+@@ -0,0 +1,250 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#ifndef _AST2500_ESPI_H_
++#define _AST2500_ESPI_H_
++
++#include <linux/bits.h>
++#include "aspeed-espi-comm.h"
++
++/* registers */
++#define ESPI_CTRL 0x000
++#define ESPI_CTRL_FLASH_TX_SW_RST BIT(31)
++#define ESPI_CTRL_FLASH_RX_SW_RST BIT(30)
++#define ESPI_CTRL_OOB_TX_SW_RST BIT(29)
++#define ESPI_CTRL_OOB_RX_SW_RST BIT(28)
++#define ESPI_CTRL_PERIF_NP_TX_SW_RST BIT(27)
++#define ESPI_CTRL_PERIF_NP_RX_SW_RST BIT(26)
++#define ESPI_CTRL_PERIF_PC_TX_SW_RST BIT(25)
++#define ESPI_CTRL_PERIF_PC_RX_SW_RST BIT(24)
++#define ESPI_CTRL_FLASH_TX_DMA_EN BIT(23)
++#define ESPI_CTRL_FLASH_RX_DMA_EN BIT(22)
++#define ESPI_CTRL_OOB_TX_DMA_EN BIT(21)
++#define ESPI_CTRL_OOB_RX_DMA_EN BIT(20)
++#define ESPI_CTRL_PERIF_NP_TX_DMA_EN BIT(19)
++#define ESPI_CTRL_PERIF_PC_TX_DMA_EN BIT(17)
++#define ESPI_CTRL_PERIF_PC_RX_DMA_EN BIT(16)
++#define ESPI_CTRL_FLASH_SAFS_SW_MODE BIT(10)
++#define ESPI_CTRL_VW_GPIO_SW BIT(9)
++#define ESPI_CTRL_FLASH_SW_RDY BIT(7)
++#define ESPI_CTRL_OOB_SW_RDY BIT(4)
++#define ESPI_CTRL_VW_SW_RDY BIT(3)
++#define ESPI_CTRL_PERIF_SW_RDY BIT(1)
++#define ESPI_STS 0x004
++#define ESPI_INT_STS 0x008
++#define ESPI_INT_STS_RST_DEASSERT BIT(31)
++#define ESPI_INT_STS_OOB_RX_TMOUT BIT(23)
++#define ESPI_INT_STS_VW_SYSEVT1 BIT(22)
++#define ESPI_INT_STS_FLASH_TX_ERR BIT(21)
++#define ESPI_INT_STS_OOB_TX_ERR BIT(20)
++#define ESPI_INT_STS_FLASH_TX_ABT BIT(19)
++#define ESPI_INT_STS_OOB_TX_ABT BIT(18)
++#define ESPI_INT_STS_PERIF_NP_TX_ABT BIT(17)
++#define ESPI_INT_STS_PERIF_PC_TX_ABT BIT(16)
++#define ESPI_INT_STS_FLASH_RX_ABT BIT(15)
++#define ESPI_INT_STS_OOB_RX_ABT BIT(14)
++#define ESPI_INT_STS_PERIF_NP_RX_ABT BIT(13)
++#define ESPI_INT_STS_PERIF_PC_RX_ABT BIT(12)
++#define ESPI_INT_STS_PERIF_NP_TX_ERR BIT(11)
++#define ESPI_INT_STS_PERIF_PC_TX_ERR BIT(10)
++#define ESPI_INT_STS_VW_GPIO BIT(9)
++#define ESPI_INT_STS_VW_SYSEVT BIT(8)
++#define ESPI_INT_STS_FLASH_TX_CMPLT BIT(7)
++#define ESPI_INT_STS_FLASH_RX_CMPLT BIT(6)
++#define ESPI_INT_STS_OOB_TX_CMPLT BIT(5)
++#define ESPI_INT_STS_OOB_RX_CMPLT BIT(4)
++#define ESPI_INT_STS_PERIF_NP_TX_CMPLT BIT(3)
++#define ESPI_INT_STS_PERIF_PC_TX_CMPLT BIT(1)
++#define ESPI_INT_STS_PERIF_PC_RX_CMPLT BIT(0)
++#define ESPI_INT_EN 0x00c
++#define ESPI_INT_EN_RST_DEASSERT BIT(31)
++#define ESPI_INT_EN_OOB_RX_TMOUT BIT(23)
++#define ESPI_INT_EN_VW_SYSEVT1 BIT(22)
++#define ESPI_INT_EN_FLASH_TX_ERR BIT(21)
++#define ESPI_INT_EN_OOB_TX_ERR BIT(20)
++#define ESPI_INT_EN_FLASH_TX_ABT BIT(19)
++#define ESPI_INT_EN_OOB_TX_ABT BIT(18)
++#define ESPI_INT_EN_PERIF_NP_TX_ABT BIT(17)
++#define ESPI_INT_EN_PERIF_PC_TX_ABT BIT(16)
++#define ESPI_INT_EN_FLASH_RX_ABT BIT(15)
++#define ESPI_INT_EN_OOB_RX_ABT BIT(14)
++#define ESPI_INT_EN_PERIF_NP_RX_ABT BIT(13)
++#define ESPI_INT_EN_PERIF_PC_RX_ABT BIT(12)
++#define ESPI_INT_EN_PERIF_NP_TX_ERR BIT(11)
++#define ESPI_INT_EN_PERIF_PC_TX_ERR BIT(10)
++#define ESPI_INT_EN_VW_GPIO BIT(9)
++#define ESPI_INT_EN_VW_SYSEVT BIT(8)
++#define ESPI_INT_EN_FLASH_TX_CMPLT BIT(7)
++#define ESPI_INT_EN_FLASH_RX_CMPLT BIT(6)
++#define ESPI_INT_EN_OOB_TX_CMPLT BIT(5)
++#define ESPI_INT_EN_OOB_RX_CMPLT BIT(4)
++#define ESPI_INT_EN_PERIF_NP_TX_CMPLT BIT(3)
++#define ESPI_INT_EN_PERIF_PC_TX_CMPLT BIT(1)
++#define ESPI_INT_EN_PERIF_PC_RX_CMPLT BIT(0)
++#define ESPI_PERIF_PC_RX_DMA 0x010
++#define ESPI_PERIF_PC_RX_CTRL 0x014
++#define ESPI_PERIF_PC_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_PERIF_PC_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_PERIF_PC_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_PERIF_PC_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_PERIF_PC_RX_DATA 0x018
++#define ESPI_PERIF_PC_TX_DMA 0x020
++#define ESPI_PERIF_PC_TX_CTRL 0x024
++#define ESPI_PERIF_PC_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_PERIF_PC_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_PERIF_PC_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_PERIF_PC_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_PERIF_PC_TX_DATA 0x028
++#define ESPI_PERIF_NP_TX_DMA 0x030
++#define ESPI_PERIF_NP_TX_CTRL 0x034
++#define ESPI_PERIF_NP_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_PERIF_NP_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_PERIF_NP_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_PERIF_NP_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_PERIF_NP_TX_DATA 0x038
++#define ESPI_OOB_RX_DMA 0x040
++#define ESPI_OOB_RX_CTRL 0x044
++#define ESPI_OOB_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_OOB_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_OOB_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_OOB_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_OOB_RX_DATA 0x048
++#define ESPI_OOB_TX_DMA 0x050
++#define ESPI_OOB_TX_CTRL 0x054
++#define ESPI_OOB_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_OOB_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_OOB_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_OOB_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_OOB_TX_DATA 0x058
++#define ESPI_FLASH_RX_DMA 0x060
++#define ESPI_FLASH_RX_CTRL 0x064
++#define ESPI_FLASH_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_FLASH_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_FLASH_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_FLASH_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_FLASH_RX_DATA 0x068
++#define ESPI_FLASH_TX_DMA 0x070
++#define ESPI_FLASH_TX_CTRL 0x074
++#define ESPI_FLASH_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_FLASH_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_FLASH_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_FLASH_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_FLASH_TX_DATA 0x078
++#define ESPI_PERIF_MCYC_SADDR 0x084
++#define ESPI_PERIF_MCYC_TADDR 0x088
++#define ESPI_PERIF_MCYC_MASK 0x08c
++#define ESPI_FLASH_SAFS_TADDR 0x090
++#define ESPI_FLASH_SAFS_TADDR_BASE GENMASK(31, 24)
++#define ESPI_FLASH_SAFS_TADDR_MASK GENMASK(15, 8)
++#define ESPI_VW_SYSEVT_INT_EN 0x094
++#define ESPI_VW_SYSEVT_INT_EN_HOST_RST_WARN BIT(8)
++#define ESPI_VW_SYSEVT_INT_EN_OOB_RST_WARN BIT(6)
++#define ESPI_VW_SYSEVT 0x098
++#define ESPI_VW_SYSEVT_HOST_RST_ACK BIT(27)
++#define ESPI_VW_SYSEVT_SLV_BOOT_STS BIT(23)
++#define ESPI_VW_SYSEVT_SLV_BOOT_DONE BIT(20)
++#define ESPI_VW_SYSEVT_OOB_RST_ACK BIT(16)
++#define ESPI_VW_SYSEVT_HOST_RST_WARN BIT(8)
++#define ESPI_VW_SYSEVT_OOB_RST_WARN BIT(6)
++#define ESPI_VW_GPIO_VAL 0x09c
++#define ESPI_GEN_CAP_N_CONF 0x0a0
++#define ESPI_CH0_CAP_N_CONF 0x0a4
++#define ESPI_CH1_CAP_N_CONF 0x0a8
++#define ESPI_CH2_CAP_N_CONF 0x0ac
++#define ESPI_CH3_CAP_N_CONF 0x0b0
++#define ESPI_CH3_CAP_N_CONF2 0x0b4
++#define ESPI_VW_GPIO_DIR 0x0c0
++#define ESPI_VW_GPIO_GRP 0x0c4
++#define ESPI_VW_SYSEVT1_INT_EN 0x100
++#define ESPI_VW_SYSEVT1_INT_EN_SUSPEND_WARN BIT(0)
++#define ESPI_VW_SYSEVT1 0x104
++#define ESPI_VW_SYSEVT1_SUSPEND_ACK BIT(20)
++#define ESPI_VW_SYSEVT1_SUSPEND_WARN BIT(0)
++#define ESPI_VW_SYSEVT_INT_T0 0x110
++#define ESPI_VW_SYSEVT_INT_T1 0x114
++#define ESPI_VW_SYSEVT_INT_T2 0x118
++#define ESPI_VW_SYSEVT_INT_T2_HOST_RST_WARN BIT(8)
++#define ESPI_VW_SYSEVT_INT_T2_OOB_RST_WARN BIT(6)
++#define ESPI_VW_SYSEVT_INT_STS 0x11c
++#define ESPI_VW_SYSEVT_INT_STS_HOST_RST_WARN BIT(8)
++#define ESPI_VW_SYSEVT_INT_STS_OOB_RST_WARN BIT(6)
++#define ESPI_VW_SYSEVT1_INT_T0 0x120
++#define ESPI_VW_SYSEVT1_INT_T0_SUSPEND_WARN BIT(0)
++#define ESPI_VW_SYSEVT1_INT_T1 0x124
++#define ESPI_VW_SYSEVT1_INT_T2 0x128
++#define ESPI_VW_SYSEVT1_INT_STS 0x12c
++#define ESPI_VW_SYSEVT1_INT_STS_SUSPEND_WARN BIT(0)
++
++/* collect ESPI_INT_EN bits for convenience */
++#define ESPI_INT_EN_PERIF \
++ (ESPI_INT_EN_PERIF_NP_TX_ABT | \
++ ESPI_INT_EN_PERIF_PC_TX_ABT | \
++ ESPI_INT_EN_PERIF_NP_RX_ABT | \
++ ESPI_INT_EN_PERIF_PC_RX_ABT | \
++ ESPI_INT_EN_PERIF_NP_TX_ERR | \
++ ESPI_INT_EN_PERIF_PC_TX_ERR | \
++ ESPI_INT_EN_PERIF_NP_TX_CMPLT | \
++ ESPI_INT_EN_PERIF_PC_TX_CMPLT | \
++ ESPI_INT_EN_PERIF_PC_RX_CMPLT)
++
++#define ESPI_INT_EN_VW \
++ (ESPI_INT_EN_VW_SYSEVT1 | \
++ ESPI_INT_EN_VW_GPIO | \
++ ESPI_INT_EN_VW_SYSEVT)
++
++#define ESPI_INT_EN_OOB \
++ (ESPI_INT_EN_OOB_RX_TMOUT | \
++ ESPI_INT_EN_OOB_TX_ERR | \
++ ESPI_INT_EN_OOB_TX_ABT | \
++ ESPI_INT_EN_OOB_RX_ABT | \
++ ESPI_INT_EN_OOB_TX_CMPLT | \
++ ESPI_INT_EN_OOB_RX_CMPLT)
++
++#define ESPI_INT_EN_FLASH \
++ (ESPI_INT_EN_FLASH_TX_ERR | \
++ ESPI_INT_EN_FLASH_TX_ABT | \
++ ESPI_INT_EN_FLASH_RX_ABT | \
++ ESPI_INT_EN_FLASH_TX_CMPLT | \
++ ESPI_INT_EN_FLASH_RX_CMPLT)
++
++/* collect ESPI_INT_STS bits for convenience */
++#define ESPI_INT_STS_PERIF \
++ (ESPI_INT_STS_PERIF_NP_TX_ABT | \
++ ESPI_INT_STS_PERIF_PC_TX_ABT | \
++ ESPI_INT_STS_PERIF_NP_RX_ABT | \
++ ESPI_INT_STS_PERIF_PC_RX_ABT | \
++ ESPI_INT_STS_PERIF_NP_TX_ERR | \
++ ESPI_INT_STS_PERIF_PC_TX_ERR | \
++ ESPI_INT_STS_PERIF_NP_TX_CMPLT | \
++ ESPI_INT_STS_PERIF_PC_TX_CMPLT | \
++ ESPI_INT_STS_PERIF_PC_RX_CMPLT)
++
++#define ESPI_INT_STS_VW \
++ (ESPI_INT_STS_VW_SYSEVT1 | \
++ ESPI_INT_STS_VW_GPIO | \
++ ESPI_INT_STS_VW_SYSEVT)
++
++#define ESPI_INT_STS_OOB \
++ (ESPI_INT_STS_OOB_RX_TMOUT | \
++ ESPI_INT_STS_OOB_TX_ERR | \
++ ESPI_INT_STS_OOB_TX_ABT | \
++ ESPI_INT_STS_OOB_RX_ABT | \
++ ESPI_INT_STS_OOB_TX_CMPLT | \
++ ESPI_INT_STS_OOB_RX_CMPLT)
++
++#define ESPI_INT_STS_FLASH \
++ (ESPI_INT_STS_FLASH_TX_ERR | \
++ ESPI_INT_STS_FLASH_TX_ABT | \
++ ESPI_INT_STS_FLASH_RX_ABT | \
++ ESPI_INT_STS_FLASH_TX_CMPLT | \
++ ESPI_INT_STS_FLASH_RX_CMPLT)
++
++/* consistent with DTS property "flash-safs-mode" */
++enum ast2500_safs_mode {
++ SAFS_MODE_MIX = 0x0,
++ SAFS_MODE_SW,
++ SAFS_MODES,
++};
++
++#endif
+diff --git a/drivers/soc/aspeed/ast2600-espi.c b/drivers/soc/aspeed/ast2600-espi.c
+new file mode 100644
+index 000000000..c3201df31
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2600-espi.c
+@@ -0,0 +1,2141 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/clk.h>
++#include <linux/sizes.h>
++#include <linux/module.h>
++#include <linux/bitfield.h>
++#include <linux/of_device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/miscdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/uaccess.h>
++#include <linux/vmalloc.h>
++#include <linux/poll.h>
++#include <linux/delay.h>
++#include <linux/reset.h>
++
++#include "ast2600-espi.h"
++
++#define DEVICE_NAME "aspeed-espi"
++
++#define PERIF_MCYC_ALIGN SZ_64K
++#define PERIF_MMBI_ALIGN SZ_64K
++#define PERIF_MMBI_INST_NUM 8
++
++#define OOB_DMA_RPTR_KEY 0x45538073
++#define OOB_DMA_DESC_NUM 8
++#define OOB_DMA_DESC_CUSTOM 0x4
++
++#define FLASH_SAFS_ALIGN SZ_16M
++
++struct ast2600_espi_perif_mmbi {
++ void *b2h_virt;
++ void *h2b_virt;
++ dma_addr_t b2h_addr;
++ dma_addr_t h2b_addr;
++ struct miscdevice b2h_mdev;
++ struct miscdevice h2b_mdev;
++ bool host_rwp_update;
++ wait_queue_head_t wq;
++ struct ast2600_espi_perif *perif;
++};
++
++struct ast2600_espi_perif {
++ struct {
++ bool enable;
++ int irq;
++ void *virt;
++ dma_addr_t taddr;
++ uint32_t saddr;
++ uint32_t size;
++ uint32_t inst_size;
++ struct ast2600_espi_perif_mmbi inst[PERIF_MMBI_INST_NUM];
++ } mmbi;
++
++ struct {
++ bool enable;
++ void *virt;
++ dma_addr_t taddr;
++ uint32_t saddr;
++ uint32_t size;
++ } mcyc;
++
++ struct {
++ bool enable;
++ void *np_tx_virt;
++ dma_addr_t np_tx_addr;
++ void *pc_tx_virt;
++ dma_addr_t pc_tx_addr;
++ void *pc_rx_virt;
++ dma_addr_t pc_rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex np_tx_mtx;
++ struct mutex pc_tx_mtx;
++ struct mutex pc_rx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2600_espi_vw {
++ struct {
++ bool hw_mode;
++ uint32_t dir;
++ uint32_t val;
++ } gpio;
++
++ struct miscdevice mdev;
++};
++
++struct ast2600_espi_oob_dma_tx_desc {
++ uint32_t data_addr;
++ uint8_t cyc;
++ uint16_t tag : 4;
++ uint16_t len : 12;
++ uint8_t msg_type : 3;
++ uint8_t raz0 : 1;
++ uint8_t pec : 1;
++ uint8_t int_en : 1;
++ uint8_t pause : 1;
++ uint8_t raz1 : 1;
++ uint32_t raz2;
++ uint32_t raz3;
++} __packed;
++
++struct ast2600_espi_oob_dma_rx_desc {
++ uint32_t data_addr;
++ uint8_t cyc;
++ uint16_t tag : 4;
++ uint16_t len : 12;
++ uint8_t raz : 7;
++ uint8_t dirty : 1;
++} __packed;
++
++struct ast2600_espi_oob {
++ struct {
++ bool enable;
++ struct ast2600_espi_oob_dma_tx_desc *txd_virt;
++ dma_addr_t txd_addr;
++ struct ast2600_espi_oob_dma_rx_desc *rxd_virt;
++ dma_addr_t rxd_addr;
++ void *tx_virt;
++ dma_addr_t tx_addr;
++ void *rx_virt;
++ dma_addr_t rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex tx_mtx;
++ struct mutex rx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2600_espi_flash {
++ struct {
++ uint32_t mode;
++ phys_addr_t taddr;
++ uint32_t size;
++ } safs;
++
++ struct {
++ bool enable;
++ void *tx_virt;
++ dma_addr_t tx_addr;
++ void *rx_virt;
++ dma_addr_t rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex rx_mtx;
++ struct mutex tx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2600_espi {
++ struct device *dev;
++ void __iomem *regs;
++ struct reset_control *rst;
++ struct clk *clk;
++ int irq;
++
++ struct ast2600_espi_perif perif;
++ struct ast2600_espi_vw vw;
++ struct ast2600_espi_oob oob;
++ struct ast2600_espi_flash flash;
++};
++
++/* peripheral channel (CH0) */
++static int ast2600_espi_mmbi_b2h_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2600_espi_perif_mmbi *mmbi;
++ struct ast2600_espi_perif *perif;
++ struct ast2600_espi *espi;
++ unsigned long vm_size;
++ pgprot_t prot;
++
++ mmbi = container_of(fp->private_data, struct ast2600_espi_perif_mmbi, b2h_mdev);
++
++ perif = mmbi->perif;
++
++ espi = container_of(perif, struct ast2600_espi, perif);
++
++ vm_size = vma->vm_end - vma->vm_start;
++ prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (SZ_4K << perif->mmbi.inst_size))
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (mmbi->b2h_addr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static int ast2600_espi_mmbi_h2b_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2600_espi_perif_mmbi *mmbi;
++ struct ast2600_espi_perif *perif;
++ struct ast2600_espi *espi;
++ unsigned long vm_size;
++ pgprot_t prot;
++
++ mmbi = container_of(fp->private_data, struct ast2600_espi_perif_mmbi, h2b_mdev);
++
++ perif = mmbi->perif;
++
++ espi = container_of(perif, struct ast2600_espi, perif);
++
++ vm_size = vma->vm_end - vma->vm_start;
++ prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (SZ_4K << perif->mmbi.inst_size))
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (mmbi->h2b_addr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static __poll_t ast2600_espi_mmbi_h2b_poll(struct file *fp, struct poll_table_struct *pt)
++{
++ struct ast2600_espi_perif_mmbi *mmbi;
++
++ mmbi = container_of(fp->private_data, struct ast2600_espi_perif_mmbi, h2b_mdev);
++
++ poll_wait(fp, &mmbi->wq, pt);
++
++ if (!mmbi->host_rwp_update)
++ return 0;
++
++ mmbi->host_rwp_update = false;
++
++ return EPOLLIN;
++}
++
++static long ast2600_espi_perif_pc_get_rx(struct file *fp,
++ struct ast2600_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2600_espi, perif);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&perif->pc_rx_mtx))
++ return -EAGAIN;
++
++ if (!perif->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&perif->pc_rx_mtx);
++
++ if (!perif->rx_ready) {
++ rc = wait_event_interruptible(perif->wq, perif->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_CTRL);
++ cyc = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_PERIF_PC_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ switch (cyc) {
++ case ESPI_PERIF_MSG:
++ pkt_len = sizeof(struct espi_perif_msg);
++ break;
++ case ESPI_PERIF_MSG_D:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_perif_msg);
++ break;
++ case ESPI_PERIF_SUC_CMPLT_D_MIDDLE:
++ case ESPI_PERIF_SUC_CMPLT_D_FIRST:
++ case ESPI_PERIF_SUC_CMPLT_D_LAST:
++ case ESPI_PERIF_SUC_CMPLT_D_ONLY:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_perif_cmplt);
++ break;
++ case ESPI_PERIF_SUC_CMPLT:
++ case ESPI_PERIF_UNSUC_CMPLT:
++ pkt_len = sizeof(struct espi_perif_cmplt);
++ break;
++ default:
++ rc = -EFAULT;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (perif->dma.enable) {
++ memcpy(hdr + 1, perif->dma.pc_rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ reg = readl(espi->regs + ESPI_PERIF_PC_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&perif->lock, flags);
++
++ writel(ESPI_PERIF_PC_RX_CTRL_SERV_PEND, espi->regs + ESPI_PERIF_PC_RX_CTRL);
++ perif->rx_ready = 0;
++
++ spin_unlock_irqrestore(&perif->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&perif->pc_rx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_perif_pc_put_tx(struct file *fp,
++ struct ast2600_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2600_espi, perif);
++
++ if (!mutex_trylock(&perif->pc_tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_PERIF_PC_TX_CTRL);
++ if (reg & ESPI_PERIF_PC_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (perif->dma.enable) {
++ memcpy(perif->dma.pc_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_PERIF_PC_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_PERIF_PC_TX_CTRL_LEN, len)
++ | ESPI_PERIF_PC_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_PERIF_PC_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_n_out:
++ mutex_unlock(&perif->pc_tx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_perif_np_put_tx(struct file *fp,
++ struct ast2600_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2600_espi, perif);
++
++ if (!mutex_trylock(&perif->np_tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_PERIF_NP_TX_CTRL);
++ if (reg & ESPI_PERIF_NP_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (perif->dma.enable) {
++ memcpy(perif->dma.np_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_PERIF_NP_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_PERIF_NP_TX_CTRL_LEN, len)
++ | ESPI_PERIF_NP_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_PERIF_NP_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_n_out:
++ mutex_unlock(&perif->np_tx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_perif_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2600_espi_perif *perif;
++ struct aspeed_espi_ioc ioc;
++
++ perif = container_of(fp->private_data, struct ast2600_espi_perif, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_PERIF_PC_GET_RX:
++ return ast2600_espi_perif_pc_get_rx(fp, perif, &ioc);
++ case ASPEED_ESPI_PERIF_PC_PUT_TX:
++ return ast2600_espi_perif_pc_put_tx(fp, perif, &ioc);
++ case ASPEED_ESPI_PERIF_NP_PUT_TX:
++ return ast2600_espi_perif_np_put_tx(fp, perif, &ioc);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static int ast2600_espi_perif_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2600_espi_perif *perif;
++ unsigned long vm_size;
++ pgprot_t vm_prot;
++
++ perif = container_of(fp->private_data, struct ast2600_espi_perif, mdev);
++ if (!perif->mcyc.enable)
++ return -EPERM;
++
++ vm_size = vma->vm_end - vma->vm_start;
++ vm_prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > perif->mcyc.size)
++ return -EINVAL;
++
++ vm_prot = pgprot_noncached(vm_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (perif->mcyc.taddr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, vm_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static const struct file_operations ast2600_espi_mmbi_b2h_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2600_espi_mmbi_b2h_mmap,
++};
++
++static const struct file_operations ast2600_espi_mmbi_h2b_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2600_espi_mmbi_h2b_mmap,
++ .poll = ast2600_espi_mmbi_h2b_poll,
++};
++
++static const struct file_operations ast2600_espi_perif_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2600_espi_perif_mmap,
++ .unlocked_ioctl = ast2600_espi_perif_ioctl,
++};
++
++static irqreturn_t ast2600_espi_perif_mmbi_isr(int irq, void *arg)
++{
++ struct ast2600_espi_perif_mmbi *mmbi;
++ struct ast2600_espi_perif *perif;
++ struct ast2600_espi *espi;
++ uint32_t sts, tmp;
++ uint32_t *p;
++ int i;
++
++ espi = (struct ast2600_espi *)arg;
++
++ perif = &espi->perif;
++
++ sts = readl(espi->regs + ESPI_MMBI_INT_STS);
++ if (!sts)
++ return IRQ_NONE;
++
++ for (i = 0, tmp = sts; i < PERIF_MMBI_INST_NUM; ++i, tmp >>= 2) {
++ if (!(tmp & 0x3))
++ continue;
++
++ mmbi = &perif->mmbi.inst[i];
++
++ p = (uint32_t *)mmbi->h2b_virt;
++ p[0] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i));
++ p[1] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i) + 4);
++
++ mmbi->host_rwp_update = true;
++
++ wake_up_interruptible(&mmbi->wq);
++ }
++
++ writel(sts, espi->regs + ESPI_MMBI_INT_STS);
++
++ return IRQ_HANDLED;
++}
++
++static void ast2600_espi_perif_isr(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_perif *perif;
++ unsigned long flags;
++ uint32_t sts;
++
++ perif = &espi->perif;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_PERIF_PC_RX_CMPLT) {
++ writel(ESPI_INT_STS_PERIF_PC_RX_CMPLT, espi->regs + ESPI_INT_STS);
++
++ spin_lock_irqsave(&perif->lock, flags);
++ perif->rx_ready = true;
++ spin_unlock_irqrestore(&perif->lock, flags);
++
++ wake_up_interruptible(&perif->wq);
++ }
++}
++
++static void ast2600_espi_perif_reset(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_perif *perif;
++ struct device *dev;
++ uint32_t reg, mask;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ writel(ESPI_INT_EN_PERIF, espi->regs + ESPI_INT_EN_CLR);
++ writel(ESPI_INT_STS_PERIF, espi->regs + ESPI_INT_STS);
++
++ writel(0x0, espi->regs + ESPI_MMBI_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_MMBI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CTRL2);
++ reg &= ~(ESPI_CTRL2_MCYC_RD_DIS_WDT | ESPI_CTRL2_MCYC_WR_DIS_WDT);
++ writel(reg, espi->regs + ESPI_CTRL2);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_SW_RST
++ | ESPI_CTRL_PERIF_NP_RX_SW_RST
++ | ESPI_CTRL_PERIF_PC_TX_SW_RST
++ | ESPI_CTRL_PERIF_PC_RX_SW_RST
++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN
++ | ESPI_CTRL_PERIF_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CTRL_PERIF_NP_TX_SW_RST
++ | ESPI_CTRL_PERIF_NP_RX_SW_RST
++ | ESPI_CTRL_PERIF_PC_TX_SW_RST
++ | ESPI_CTRL_PERIF_PC_RX_SW_RST);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (perif->mmbi.enable) {
++ reg = readl(espi->regs + ESPI_MMBI_CTRL);
++ reg &= ~(ESPI_MMBI_CTRL_EN);
++ writel(reg, espi->regs + ESPI_MMBI_CTRL);
++
++ mask = ~(perif->mmbi.size - 1);
++ writel(mask, espi->regs + ESPI_PERIF_MMBI_MASK);
++ writel(perif->mmbi.saddr, espi->regs + ESPI_PERIF_MMBI_SADDR);
++ writel(perif->mmbi.taddr, espi->regs + ESPI_PERIF_MMBI_TADDR);
++
++ writel(0xffffffff, espi->regs + ESPI_MMBI_INT_EN);
++
++ reg = FIELD_PREP(ESPI_MMBI_CTRL_INST_SZ, perif->mmbi.inst_size)
++ | FIELD_PREP(ESPI_MMBI_CTRL_TOTAL_SZ, perif->mmbi.inst_size)
++ | ESPI_MMBI_CTRL_EN;
++ writel(reg, espi->regs + ESPI_MMBI_CTRL);
++
++ reg = readl(espi->regs + ESPI_CTRL2) & ~(ESPI_CTRL2_MMBI_RD_DIS | ESPI_CTRL2_MMBI_WR_DIS);
++ writel(reg, espi->regs + ESPI_CTRL2);
++ }
++
++ if (perif->mcyc.enable) {
++ mask = ~(perif->mcyc.size - 1);
++ writel(mask, espi->regs + ESPI_PERIF_MCYC_MASK);
++ writel(perif->mcyc.saddr, espi->regs + ESPI_PERIF_MCYC_SADDR);
++ writel(perif->mcyc.taddr, espi->regs + ESPI_PERIF_MCYC_TADDR);
++
++ reg = readl(espi->regs + ESPI_CTRL2) & ~(ESPI_CTRL2_MCYC_RD_DIS | ESPI_CTRL2_MCYC_WR_DIS);
++ writel(reg, espi->regs + ESPI_CTRL2);
++ }
++
++ if (perif->dma.enable) {
++ writel(perif->dma.np_tx_addr, espi->regs + ESPI_PERIF_NP_TX_DMA);
++ writel(perif->dma.pc_tx_addr, espi->regs + ESPI_PERIF_PC_TX_DMA);
++ writel(perif->dma.pc_rx_addr, espi->regs + ESPI_PERIF_PC_RX_DMA);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ESPI_CTRL_PERIF_NP_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CTRL);
++ }
++
++ writel(ESPI_INT_EN_PERIF_PC_RX_CMPLT, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_PERIF_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2600_espi_perif_probe(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_perif_mmbi *mmbi;
++ struct ast2600_espi_perif *perif;
++ struct platform_device *pdev;
++ struct device *dev;
++ int i, rc;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ init_waitqueue_head(&perif->wq);
++
++ spin_lock_init(&perif->lock);
++
++ mutex_init(&perif->np_tx_mtx);
++ mutex_init(&perif->pc_tx_mtx);
++ mutex_init(&perif->pc_rx_mtx);
++
++ perif->mmbi.enable = of_property_read_bool(dev->of_node, "perif-mmbi-enable");
++ if (perif->mmbi.enable) {
++ pdev = container_of(dev, struct platform_device, dev);
++
++ perif->mmbi.irq = platform_get_irq(pdev, 1);
++ if (perif->mmbi.irq < 0) {
++ dev_err(dev, "cannot get MMBI IRQ number\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "perif-mmbi-src-addr", &perif->mmbi.saddr);
++ if (rc || !IS_ALIGNED(perif->mmbi.saddr, PERIF_MMBI_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned MMBI host address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "perif-mmbi-instance-size", &perif->mmbi.inst_size);
++ if (rc || perif->mmbi.inst_size >= MMBI_INST_SIZE_TYPES) {
++ dev_err(dev, "cannot get valid MMBI instance size\n");
++ return -EINVAL;
++ }
++
++ perif->mmbi.size = (SZ_8K << perif->mmbi.inst_size) * PERIF_MMBI_INST_NUM;
++ perif->mmbi.virt = dmam_alloc_coherent(dev, perif->mmbi.size,
++ &perif->mmbi.taddr, GFP_KERNEL);
++ if (!perif->mmbi.virt) {
++ dev_err(dev, "cannot allocate MMBI\n");
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < PERIF_MMBI_INST_NUM; ++i) {
++ mmbi = &perif->mmbi.inst[i];
++
++ init_waitqueue_head(&mmbi->wq);
++
++ mmbi->perif = perif;
++ mmbi->host_rwp_update = false;
++
++ mmbi->b2h_virt = perif->mmbi.virt + ((SZ_4K << perif->mmbi.inst_size) * i);
++ mmbi->b2h_addr = perif->mmbi.taddr + ((SZ_4K << perif->mmbi.inst_size) * i);
++ mmbi->b2h_mdev.parent = dev;
++ mmbi->b2h_mdev.minor = MISC_DYNAMIC_MINOR;
++ mmbi->b2h_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi-b2h%d", DEVICE_NAME, i);
++ mmbi->b2h_mdev.fops = &ast2600_espi_mmbi_b2h_fops;
++ rc = misc_register(&mmbi->b2h_mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", mmbi->b2h_mdev.name);
++ return rc;
++ }
++
++ mmbi->h2b_virt = perif->mmbi.virt + ((SZ_4K << perif->mmbi.inst_size) * (i + PERIF_MMBI_INST_NUM));
++ mmbi->h2b_addr = perif->mmbi.taddr + ((SZ_4K << perif->mmbi.inst_size) * (i + PERIF_MMBI_INST_NUM));
++ mmbi->h2b_mdev.parent = dev;
++ mmbi->h2b_mdev.minor = MISC_DYNAMIC_MINOR;
++ mmbi->h2b_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi-h2b%d", DEVICE_NAME, i);
++ mmbi->h2b_mdev.fops = &ast2600_espi_mmbi_h2b_fops;
++ rc = misc_register(&mmbi->h2b_mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", mmbi->h2b_mdev.name);
++ return rc;
++ }
++ }
++ }
++
++ perif->mcyc.enable = of_property_read_bool(dev->of_node, "perif-mcyc-enable");
++ if (perif->mcyc.enable) {
++ if (perif->mmbi.enable) {
++ dev_err(dev, "cannot enable memory cycle, occupied by MMBI\n");
++ return -EPERM;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-src-addr", &perif->mcyc.saddr);
++ if (rc || !IS_ALIGNED(perif->mcyc.saddr, PERIF_MCYC_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned memory cycle host address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "perif-mcyc-size", &perif->mcyc.size);
++ if (rc || !IS_ALIGNED(perif->mcyc.size, PERIF_MCYC_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned memory cycle size\n");
++ return -EINVAL;
++ }
++
++ perif->mcyc.virt = dmam_alloc_coherent(dev, perif->mcyc.size,
++ &perif->mcyc.taddr, GFP_KERNEL);
++ if (!perif->mcyc.virt) {
++ dev_err(dev, "cannot allocate memory cycle\n");
++ return -ENOMEM;
++ }
++ }
++
++ perif->dma.enable = of_property_read_bool(dev->of_node, "perif-dma-mode");
++ if (perif->dma.enable) {
++ perif->dma.pc_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.pc_tx_addr, GFP_KERNEL);
++ if (!perif->dma.pc_tx_virt) {
++ dev_err(dev, "cannot allocate posted TX DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ perif->dma.pc_rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.pc_rx_addr, GFP_KERNEL);
++ if (!perif->dma.pc_rx_virt) {
++ dev_err(dev, "cannot allocate posted RX DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ perif->dma.np_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.np_tx_addr, GFP_KERNEL);
++ if (!perif->dma.np_tx_virt) {
++ dev_err(dev, "cannot allocate non-posted TX DMA buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ perif->mdev.parent = dev;
++ perif->mdev.minor = MISC_DYNAMIC_MINOR;
++ perif->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-peripheral", DEVICE_NAME);
++ perif->mdev.fops = &ast2600_espi_perif_fops;
++ rc = misc_register(&perif->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", perif->mdev.name);
++ return rc;
++ }
++
++ ast2600_espi_perif_reset(espi);
++
++ if (perif->mmbi.enable) {
++ rc = devm_request_irq(dev, espi->perif.mmbi.irq,
++ ast2600_espi_perif_mmbi_isr, 0, dev_name(dev), espi);
++ if (rc) {
++ dev_err(dev, "cannot request MMBI IRQ\n");
++ return rc;
++ }
++ }
++
++ return 0;
++}
++
++static int ast2600_espi_perif_remove(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_perif_mmbi *mmbi;
++ struct ast2600_espi_perif *perif;
++ struct device *dev;
++ uint32_t reg;
++ int i;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ writel(ESPI_INT_EN_PERIF, espi->regs + ESPI_INT_EN_CLR);
++
++ reg = readl(espi->regs + ESPI_CTRL2);
++ reg |= (ESPI_CTRL2_MCYC_RD_DIS | ESPI_CTRL2_MCYC_WR_DIS);
++ writel(reg, espi->regs + ESPI_CTRL2);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_PERIF_NP_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_TX_DMA_EN
++ | ESPI_CTRL_PERIF_PC_RX_DMA_EN
++ | ESPI_CTRL_PERIF_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (perif->mmbi.enable) {
++ reg = readl(espi->regs + ESPI_MMBI_CTRL);
++ reg &= ~ESPI_MMBI_CTRL_EN;
++ writel(reg, espi->regs + ESPI_MMBI_CTRL);
++
++ for (i = 0; i < PERIF_MMBI_INST_NUM; ++i) {
++ mmbi = &perif->mmbi.inst[i];
++ misc_deregister(&mmbi->b2h_mdev);
++ misc_deregister(&mmbi->h2b_mdev);
++ }
++
++ dmam_free_coherent(dev, perif->mmbi.size, perif->mmbi.virt,
++ perif->mmbi.taddr);
++ }
++
++ if (perif->mcyc.enable)
++ dmam_free_coherent(dev, perif->mcyc.size, perif->mcyc.virt,
++ perif->mcyc.taddr);
++
++ if (perif->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.np_tx_virt,
++ perif->dma.np_tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_tx_virt,
++ perif->dma.pc_tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_rx_virt,
++ perif->dma.pc_rx_addr);
++ }
++
++ mutex_destroy(&perif->np_tx_mtx);
++ mutex_destroy(&perif->pc_tx_mtx);
++ mutex_destroy(&perif->pc_rx_mtx);
++
++ misc_deregister(&perif->mdev);
++
++ return 0;
++}
++
++/* virtual wire channel (CH1) */
++static long ast2600_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2600_espi_vw *vw;
++ struct ast2600_espi *espi;
++ uint32_t gpio;
++
++ vw = container_of(fp->private_data, struct ast2600_espi_vw, mdev);
++ espi = container_of(vw, struct ast2600_espi, vw);
++ gpio = vw->gpio.val;
++
++ switch (cmd) {
++ case ASPEED_ESPI_VW_GET_GPIO_VAL:
++ if (put_user(gpio, (uint32_t __user *)arg))
++ return -EFAULT;
++ break;
++ case ASPEED_ESPI_VW_PUT_GPIO_VAL:
++ if (get_user(gpio, (uint32_t __user *)arg))
++ return -EFAULT;
++
++ writel(gpio, espi->regs + ESPI_VW_GPIO_VAL);
++ break;
++ default:
++ return -EINVAL;
++ };
++
++ return 0;
++}
++
++static const struct file_operations ast2600_espi_vw_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2600_espi_vw_ioctl,
++};
++
++static void ast2600_espi_vw_isr(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_vw *vw;
++ uint32_t sts;
++
++ vw = &espi->vw;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_VW_GPIO) {
++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL);
++ writel(ESPI_INT_STS_VW_GPIO, espi->regs + ESPI_INT_STS);
++ }
++}
++
++static void ast2600_espi_vw_reset(struct ast2600_espi *espi)
++{
++ uint32_t reg;
++ struct ast2600_espi_vw *vw = &espi->vw;
++
++ writel(ESPI_INT_EN_VW, espi->regs + ESPI_INT_EN_CLR);
++ writel(ESPI_INT_STS_VW, espi->regs + ESPI_INT_STS);
++
++ writel(vw->gpio.dir, espi->regs + ESPI_VW_GPIO_DIR);
++
++ vw->gpio.val = readl(espi->regs + ESPI_VW_GPIO_VAL);
++
++ reg = readl(espi->regs + ESPI_CTRL2) & ~(ESPI_CTRL2_VW_TX_SORT);
++ writel(reg, espi->regs + ESPI_CTRL2);
++
++ writel(ESPI_INT_EN_VW_GPIO, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ((vw->gpio.hw_mode) ? 0 : ESPI_CTRL_VW_GPIO_SW)
++ | ESPI_CTRL_VW_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2600_espi_vw_probe(struct ast2600_espi *espi)
++{
++ int rc;
++ struct device *dev = espi->dev;
++ struct ast2600_espi_vw *vw = &espi->vw;
++
++ vw->gpio.hw_mode = of_property_read_bool(dev->of_node, "vw-gpio-hw-mode");
++ of_property_read_u32(dev->of_node, "vw-gpio-direction", &vw->gpio.dir);
++
++ vw->mdev.parent = dev;
++ vw->mdev.minor = MISC_DYNAMIC_MINOR;
++ vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-vw", DEVICE_NAME);
++ vw->mdev.fops = &ast2600_espi_vw_fops;
++ rc = misc_register(&vw->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", vw->mdev.name);
++ return rc;
++ }
++
++ ast2600_espi_vw_reset(espi);
++
++ return 0;
++}
++
++static int ast2600_espi_vw_remove(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_vw *vw;
++
++ vw = &espi->vw;
++
++ writel(ESPI_INT_EN_VW, espi->regs + ESPI_INT_EN_CLR);
++
++ misc_deregister(&vw->mdev);
++
++ return 0;
++}
++
++/* out-of-band channel (CH2) */
++static long ast2600_espi_oob_dma_get_rx(struct file *fp,
++ struct ast2600_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ struct ast2600_espi_oob_dma_rx_desc *d;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint32_t wptr, pkt_len;
++ unsigned long flags;
++ uint8_t *pkt;
++ int rc;
++
++ espi = container_of(oob, struct ast2600_espi, oob);
++
++ wptr = FIELD_PREP(ESPI_OOB_RX_DESC_WPTR_WP, readl(espi->regs + ESPI_OOB_RX_DESC_WPTR));
++
++ d = &oob->dma.rxd_virt[wptr];
++
++ if (!d->dirty)
++ return -EFAULT;
++
++ pkt_len = ((d->len) ? d->len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr);
++
++ if (ioc->pkt_len < pkt_len)
++ return -EINVAL;
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt)
++ return -ENOMEM;
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = d->cyc;
++ hdr->tag = d->tag;
++ hdr->len_h = d->len >> 8;
++ hdr->len_l = d->len & 0xff;
++ memcpy(hdr + 1, oob->dma.rx_virt + (PAGE_SIZE * wptr), pkt_len - sizeof(*hdr));
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&oob->lock, flags);
++
++ /* make current descriptor available again */
++ d->dirty = 0;
++
++ wptr = (wptr + 1) % OOB_DMA_DESC_NUM;
++ writel(wptr | ESPI_OOB_RX_DESC_WPTR_RECV_EN, espi->regs + ESPI_OOB_RX_DESC_WPTR);
++
++ /* set ready flag base on the next RX descriptor */
++ oob->rx_ready = oob->dma.rxd_virt[wptr].dirty;
++
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++ return rc;
++}
++
++static long ast2600_espi_oob_get_rx(struct file *fp,
++ struct ast2600_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(oob, struct ast2600_espi, oob);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&oob->rx_mtx))
++ return -EAGAIN;
++
++ if (!oob->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&oob->rx_mtx);
++
++ if (!oob->rx_ready) {
++ rc = wait_event_interruptible(oob->wq, oob->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ if (oob->dma.enable) {
++ rc = ast2600_espi_oob_dma_get_rx(fp, oob, ioc);
++ goto unlock_mtx_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_OOB_RX_CTRL);
++ cyc = FIELD_GET(ESPI_OOB_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_OOB_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_OOB_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr);
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ pkt[i] = readl(espi->regs + ESPI_OOB_RX_DATA) & 0xff;
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&oob->lock, flags);
++
++ writel(ESPI_OOB_RX_CTRL_SERV_PEND, espi->regs + ESPI_OOB_RX_CTRL);
++ oob->rx_ready = 0;
++
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&oob->rx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_oob_dma_put_tx(struct file *fp,
++ struct ast2600_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ struct ast2600_espi_oob_dma_tx_desc *d;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint32_t rptr, wptr;
++ uint8_t *pkt;
++ int rc;
++
++ espi = container_of(oob, struct ast2600_espi, oob);
++
++ pkt = vzalloc(ioc->pkt_len);
++ if (!pkt)
++ return -ENOMEM;
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /* kick HW to update descriptor read/write pointer */
++ writel(ESPI_OOB_TX_DESC_RPTR_UPDATE, espi->regs + ESPI_OOB_TX_DESC_RPTR);
++
++ rptr = readl(espi->regs + ESPI_OOB_TX_DESC_RPTR);
++ wptr = readl(espi->regs + ESPI_OOB_TX_DESC_WPTR);
++
++ if (((wptr + 1) % OOB_DMA_DESC_NUM) == rptr) {
++ rc = -EBUSY;
++ goto free_n_out;
++ }
++
++ d = &oob->dma.txd_virt[wptr];
++ d->cyc = hdr->cyc;
++ d->tag = hdr->tag;
++ d->len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++ d->msg_type = OOB_DMA_DESC_CUSTOM;
++
++ memcpy(oob->dma.tx_virt + (PAGE_SIZE * wptr), hdr + 1, ioc->pkt_len - sizeof(*hdr));
++
++ dma_wmb();
++
++ wptr = (wptr + 1) % OOB_DMA_DESC_NUM;
++ writel(wptr | ESPI_OOB_TX_DESC_WPTR_SEND_EN, espi->regs + ESPI_OOB_TX_DESC_WPTR);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++ return rc;
++}
++
++static long ast2600_espi_oob_put_tx(struct file *fp,
++ struct ast2600_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(oob, struct ast2600_espi, oob);
++
++ if (!mutex_trylock(&oob->tx_mtx))
++ return -EAGAIN;
++
++ if (oob->dma.enable) {
++ rc = ast2600_espi_oob_dma_put_tx(fp, oob, ioc);
++ goto unlock_mtx_n_out;
++ }
++
++ reg = readl(espi->regs + ESPI_OOB_TX_CTRL);
++ if (reg & ESPI_OOB_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len > ESPI_MAX_PKT_LEN) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_OOB_TX_DATA);
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_OOB_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_OOB_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_OOB_TX_CTRL_LEN, len)
++ | ESPI_OOB_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_OOB_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&oob->tx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2600_espi_oob *oob;
++ struct aspeed_espi_ioc ioc;
++
++ oob = container_of(fp->private_data, struct ast2600_espi_oob, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_OOB_GET_RX:
++ return ast2600_espi_oob_get_rx(fp, oob, &ioc);
++ case ASPEED_ESPI_OOB_PUT_TX:
++ return ast2600_espi_oob_put_tx(fp, oob, &ioc);
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations ast2600_espi_oob_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2600_espi_oob_ioctl,
++};
++
++static void ast2600_espi_oob_isr(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_oob *oob;
++ unsigned long flags;
++ uint32_t sts;
++
++ oob = &espi->oob;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_OOB_RX_CMPLT) {
++ writel(ESPI_INT_STS_OOB_RX_CMPLT, espi->regs + ESPI_INT_STS);
++
++ spin_lock_irqsave(&oob->lock, flags);
++ oob->rx_ready = true;
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ wake_up_interruptible(&oob->wq);
++ }
++}
++
++static void ast2600_espi_oob_reset(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_oob *oob;
++ dma_addr_t tx_addr, rx_addr;
++ uint32_t reg;
++ int i;
++
++ writel(ESPI_INT_EN_OOB, espi->regs + ESPI_INT_EN_CLR);
++ writel(ESPI_INT_STS_OOB, espi->regs + ESPI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_OOB_TX_SW_RST
++ | ESPI_CTRL_OOB_RX_SW_RST
++ | ESPI_CTRL_OOB_TX_DMA_EN
++ | ESPI_CTRL_OOB_RX_DMA_EN
++ | ESPI_CTRL_OOB_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CTRL_OOB_TX_SW_RST | ESPI_CTRL_OOB_RX_SW_RST);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ oob = &espi->oob;
++
++ if (oob->dma.enable) {
++ tx_addr = oob->dma.tx_addr;
++ rx_addr = oob->dma.rx_addr;
++
++ for (i = 0; i < OOB_DMA_DESC_NUM; ++i) {
++ oob->dma.txd_virt[i].data_addr = tx_addr;
++ tx_addr += PAGE_SIZE;
++
++ oob->dma.rxd_virt[i].data_addr = rx_addr;
++ oob->dma.rxd_virt[i].dirty = 0;
++ rx_addr += PAGE_SIZE;
++ }
++
++ writel(oob->dma.txd_addr, espi->regs + ESPI_OOB_TX_DMA);
++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_OOB_TX_DESC_RPTR);
++ writel(0x0, espi->regs + ESPI_OOB_TX_DESC_WPTR);
++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_OOB_TX_DESC_NUM);
++
++ writel(oob->dma.rxd_addr, espi->regs + ESPI_OOB_RX_DMA);
++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_OOB_RX_DESC_RPTR);
++ writel(0x0, espi->regs + ESPI_OOB_RX_DESC_WPTR);
++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_OOB_RX_DESC_NUM);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ESPI_CTRL_OOB_TX_DMA_EN
++ | ESPI_CTRL_OOB_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ /* activate RX DMA to make OOB_FREE */
++ writel(ESPI_OOB_RX_DESC_WPTR_RECV_EN, espi->regs + ESPI_OOB_RX_DESC_WPTR);
++ }
++
++ writel(ESPI_INT_EN_OOB_RX_CMPLT, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_OOB_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2600_espi_oob_probe(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_oob *oob;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ oob = &espi->oob;
++
++ init_waitqueue_head(&oob->wq);
++
++ spin_lock_init(&oob->lock);
++
++ mutex_init(&oob->tx_mtx);
++ mutex_init(&oob->rx_mtx);
++
++ oob->dma.enable = of_property_read_bool(dev->of_node, "oob-dma-mode");
++ if (oob->dma.enable) {
++ oob->dma.txd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM, &oob->dma.txd_addr, GFP_KERNEL);
++ if (!oob->dma.txd_virt) {
++ dev_err(dev, "cannot allocate DMA TX descriptor\n");
++ return -ENOMEM;
++ }
++ oob->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.tx_addr, GFP_KERNEL);
++ if (!oob->dma.tx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++
++ oob->dma.rxd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM, &oob->dma.rxd_addr, GFP_KERNEL);
++ if (!oob->dma.rxd_virt) {
++ dev_err(dev, "cannot allocate DMA RX descriptor\n");
++ return -ENOMEM;
++ }
++
++ oob->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.rx_addr, GFP_KERNEL);
++ if (!oob->dma.rx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ oob->mdev.parent = dev;
++ oob->mdev.minor = MISC_DYNAMIC_MINOR;
++ oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-oob", DEVICE_NAME);
++ oob->mdev.fops = &ast2600_espi_oob_fops;
++ rc = misc_register(&oob->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", oob->mdev.name);
++ return rc;
++ }
++
++ ast2600_espi_oob_reset(espi);
++
++ return 0;
++}
++
++static int ast2600_espi_oob_remove(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_oob *oob;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ oob = &espi->oob;
++
++ writel(ESPI_INT_EN_OOB, espi->regs + ESPI_INT_EN_CLR);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_OOB_TX_DMA_EN
++ | ESPI_CTRL_OOB_RX_DMA_EN
++ | ESPI_CTRL_OOB_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (oob->dma.enable) {
++ dmam_free_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM,
++ oob->dma.txd_virt, oob->dma.txd_addr);
++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM,
++ oob->dma.tx_virt, oob->dma.tx_addr);
++ dmam_free_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM,
++ oob->dma.rxd_virt, oob->dma.rxd_addr);
++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM,
++ oob->dma.rx_virt, oob->dma.rx_addr);
++ }
++
++ mutex_destroy(&oob->tx_mtx);
++ mutex_destroy(&oob->rx_mtx);
++
++ misc_deregister(&oob->mdev);
++
++ return 0;
++}
++
++/* flash channel (CH3) */
++static long ast2600_espi_flash_get_rx(struct file *fp,
++ struct ast2600_espi_flash *flash,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ rc = 0;
++
++ espi = container_of(flash, struct ast2600_espi, flash);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&flash->rx_mtx))
++ return -EAGAIN;
++
++ if (!flash->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&flash->rx_mtx);
++
++ if (!flash->rx_ready) {
++ rc = wait_event_interruptible(flash->wq, flash->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_FLASH_RX_CTRL);
++ cyc = FIELD_GET(ESPI_FLASH_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_FLASH_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_FLASH_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ switch (cyc) {
++ case ESPI_FLASH_WRITE:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_flash_rwe);
++ break;
++ case ESPI_FLASH_READ:
++ case ESPI_FLASH_ERASE:
++ pkt_len = sizeof(struct espi_flash_rwe);
++ break;
++ case ESPI_FLASH_SUC_CMPLT_D_MIDDLE:
++ case ESPI_FLASH_SUC_CMPLT_D_FIRST:
++ case ESPI_FLASH_SUC_CMPLT_D_LAST:
++ case ESPI_FLASH_SUC_CMPLT_D_ONLY:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_flash_cmplt);
++ break;
++ case ESPI_FLASH_SUC_CMPLT:
++ case ESPI_FLASH_UNSUC_CMPLT:
++ pkt_len = sizeof(struct espi_flash_cmplt);
++ break;
++ default:
++ rc = -EFAULT;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (flash->dma.enable) {
++ memcpy(hdr + 1, flash->dma.rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ pkt[i] = readl(espi->regs + ESPI_FLASH_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&flash->lock, flags);
++
++ writel(ESPI_FLASH_RX_CTRL_SERV_PEND, espi->regs + ESPI_FLASH_RX_CTRL);
++ flash->rx_ready = 0;
++
++ spin_unlock_irqrestore(&flash->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&flash->rx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_flash_put_tx(struct file *fp,
++ struct ast2600_espi_flash *flash,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2600_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(flash, struct ast2600_espi, flash);
++
++ if (!mutex_trylock(&flash->tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_FLASH_TX_CTRL);
++ if (reg & ESPI_FLASH_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (flash->dma.enable) {
++ memcpy(flash->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_FLASH_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_FLASH_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_FLASH_TX_CTRL_LEN, len)
++ | ESPI_FLASH_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_FLASH_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&flash->tx_mtx);
++
++ return rc;
++}
++
++static long ast2600_espi_flash_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2600_espi_flash *flash;
++ struct aspeed_espi_ioc ioc;
++
++ flash = container_of(fp->private_data, struct ast2600_espi_flash, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_FLASH_GET_RX:
++ return ast2600_espi_flash_get_rx(fp, flash, &ioc);
++ case ASPEED_ESPI_FLASH_PUT_TX:
++ return ast2600_espi_flash_put_tx(fp, flash, &ioc);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations ast2600_espi_flash_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2600_espi_flash_ioctl,
++};
++
++static void ast2600_espi_flash_isr(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_flash *flash;
++ unsigned long flags;
++ uint32_t sts;
++
++ flash = &espi->flash;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++
++ if (sts & ESPI_INT_STS_FLASH_RX_CMPLT) {
++ writel(ESPI_INT_STS_FLASH_RX_CMPLT, espi->regs + ESPI_INT_STS);
++
++ spin_lock_irqsave(&flash->lock, flags);
++ flash->rx_ready = true;
++ spin_unlock_irqrestore(&flash->lock, flags);
++
++ wake_up_interruptible(&flash->wq);
++ }
++}
++
++static void ast2600_espi_flash_reset(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_flash *flash;
++ uint32_t reg;
++
++ flash = &espi->flash;
++
++ writel(ESPI_INT_EN_FLASH, espi->regs + ESPI_INT_EN_CLR);
++ writel(ESPI_INT_STS_FLASH, espi->regs + ESPI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_FLASH_TX_SW_RST
++ | ESPI_CTRL_FLASH_RX_SW_RST
++ | ESPI_CTRL_FLASH_TX_DMA_EN
++ | ESPI_CTRL_FLASH_RX_DMA_EN
++ | ESPI_CTRL_FLASH_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CTRL_FLASH_TX_SW_RST | ESPI_CTRL_FLASH_RX_SW_RST);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ reg = readl(espi->regs + ESPI_CTRL) & ~ESPI_CTRL_FLASH_SAFS_MODE;
++ reg |= FIELD_PREP(ESPI_CTRL_FLASH_SAFS_MODE, flash->safs.mode);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (flash->safs.mode == SAFS_MODE_MIX) {
++ reg = FIELD_PREP(ESPI_FLASH_SAFS_TADDR_BASE, flash->safs.taddr >> 24)
++ | FIELD_PREP(ESPI_FLASH_SAFS_TADDR_MASK, (~(flash->safs.size - 1)) >> 24);
++ writel(reg, espi->regs + ESPI_FLASH_SAFS_TADDR);
++ }
++
++ if (flash->dma.enable) {
++ writel(flash->dma.tx_addr, espi->regs + ESPI_FLASH_TX_DMA);
++ writel(flash->dma.rx_addr, espi->regs + ESPI_FLASH_RX_DMA);
++
++ reg = readl(espi->regs + ESPI_CTRL)
++ | ESPI_CTRL_FLASH_TX_DMA_EN
++ | ESPI_CTRL_FLASH_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CTRL);
++ }
++
++ writel(ESPI_INT_EN_FLASH_RX_CMPLT, espi->regs + ESPI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CTRL) | ESPI_CTRL_FLASH_SW_RDY;
++ writel(reg, espi->regs + ESPI_CTRL);
++}
++
++static int ast2600_espi_flash_probe(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_flash *flash;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ flash = &espi->flash;
++
++ init_waitqueue_head(&flash->wq);
++
++ spin_lock_init(&flash->lock);
++
++ mutex_init(&flash->tx_mtx);
++ mutex_init(&flash->rx_mtx);
++
++ flash->safs.mode = SAFS_MODE_HW;
++
++ of_property_read_u32(dev->of_node, "flash-safs-mode", &flash->safs.mode);
++ if (flash->safs.mode == SAFS_MODE_MIX) {
++ rc = of_property_read_u32(dev->of_node, "flash-safs-tgt-addr", &flash->safs.taddr);
++ if (rc || !IS_ALIGNED(flash->safs.taddr, FLASH_SAFS_ALIGN)) {
++ dev_err(dev, "cannot get 16MB-aligned SAFS target address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "flash-safs-size", &flash->safs.size);
++ if (rc || !IS_ALIGNED(flash->safs.size, FLASH_SAFS_ALIGN)) {
++ dev_err(dev, "cannot get 16MB-aligned SAFS size\n");
++ return -ENODEV;
++ }
++ }
++
++ flash->dma.enable = of_property_read_bool(dev->of_node, "flash-dma-mode");
++ if (flash->dma.enable) {
++ flash->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.tx_addr, GFP_KERNEL);
++ if (!flash->dma.tx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++
++ flash->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.rx_addr, GFP_KERNEL);
++ if (!flash->dma.rx_virt) {
++ dev_err(dev, "cannot allocate DMA RX buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ flash->mdev.parent = dev;
++ flash->mdev.minor = MISC_DYNAMIC_MINOR;
++ flash->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-flash", DEVICE_NAME);
++ flash->mdev.fops = &ast2600_espi_flash_fops;
++ rc = misc_register(&flash->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", flash->mdev.name);
++ return rc;
++ }
++
++ ast2600_espi_flash_reset(espi);
++
++ return 0;
++}
++
++static int ast2600_espi_flash_remove(struct ast2600_espi *espi)
++{
++ struct ast2600_espi_flash *flash;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ flash = &espi->flash;
++
++ writel(ESPI_INT_EN_FLASH, espi->regs + ESPI_INT_EN_CLR);
++
++ reg = readl(espi->regs + ESPI_CTRL);
++ reg &= ~(ESPI_CTRL_FLASH_TX_DMA_EN
++ | ESPI_CTRL_FLASH_RX_DMA_EN
++ | ESPI_CTRL_FLASH_SW_RDY);
++ writel(reg, espi->regs + ESPI_CTRL);
++
++ if (flash->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.tx_virt, flash->dma.tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.rx_virt, flash->dma.rx_addr);
++ }
++
++ mutex_destroy(&flash->tx_mtx);
++ mutex_destroy(&flash->rx_mtx);
++
++ misc_deregister(&flash->mdev);
++
++ return 0;
++}
++
++/* global control */
++static irqreturn_t ast2600_espi_isr(int irq, void *arg)
++{
++ struct ast2600_espi *espi;
++ uint32_t sts;
++
++ espi = (struct ast2600_espi *)arg;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++ if (!sts)
++ return IRQ_NONE;
++
++ if (sts & ESPI_INT_STS_PERIF)
++ ast2600_espi_perif_isr(espi);
++
++ if (sts & ESPI_INT_STS_VW)
++ ast2600_espi_vw_isr(espi);
++
++ if (sts & ESPI_INT_STS_OOB)
++ ast2600_espi_oob_isr(espi);
++
++ if (sts & ESPI_INT_STS_FLASH)
++ ast2600_espi_flash_isr(espi);
++
++ if (sts & ESPI_INT_STS_RST_DEASSERT) {
++ /* this will clear all interrupt enable and status */
++ reset_control_assert(espi->rst);
++ reset_control_deassert(espi->rst);
++
++ ast2600_espi_perif_reset(espi);
++ ast2600_espi_vw_reset(espi);
++ ast2600_espi_oob_reset(espi);
++ ast2600_espi_flash_reset(espi);
++
++ /* re-enable eSPI_RESET# interrupt */
++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int ast2600_espi_probe(struct platform_device *pdev)
++{
++ struct ast2600_espi *espi;
++ struct resource *res;
++ struct device *dev;
++ int rc;
++
++ dev = &pdev->dev;
++
++ espi = devm_kzalloc(dev, sizeof(*espi), GFP_KERNEL);
++ if (!espi)
++ return -ENOMEM;
++
++ espi->dev = dev;
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_err(dev, "cannot set 64-bits DMA mask\n");
++ return rc;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "cannot get resource\n");
++ return -ENODEV;
++ }
++
++ espi->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(espi->regs)) {
++ dev_err(dev, "cannot map registers\n");
++ return PTR_ERR(espi->regs);
++ }
++
++ espi->irq = platform_get_irq(pdev, 0);
++ if (espi->irq < 0) {
++ dev_err(dev, "cannot get IRQ number\n");
++ return -ENODEV;
++ }
++
++ espi->rst = devm_reset_control_get_exclusive_by_index(dev, 0);
++ if (IS_ERR(espi->rst)) {
++ dev_err(dev, "cannot get reset control\n");
++ return PTR_ERR(espi->rst);
++ }
++
++ espi->clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(espi->clk)) {
++ dev_err(dev, "cannot get clock control\n");
++ return PTR_ERR(espi->clk);
++ }
++
++ rc = clk_prepare_enable(espi->clk);
++ if (rc) {
++ dev_err(dev, "cannot enable clocks\n");
++ return rc;
++ }
++
++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN_CLR);
++
++ rc = ast2600_espi_perif_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init peripheral channel, rc=%d\n", rc);
++ return rc;
++ }
++
++ rc = ast2600_espi_vw_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init vw channel, rc=%d\n", rc);
++ goto err_remove_perif;
++ }
++
++ rc = ast2600_espi_oob_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init oob channel, rc=%d\n", rc);
++ goto err_remove_vw;
++ }
++
++ rc = ast2600_espi_flash_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init flash channel, rc=%d\n", rc);
++ goto err_remove_oob;
++ }
++
++ rc = devm_request_irq(dev, espi->irq, ast2600_espi_isr, 0, dev_name(dev), espi);
++ if (rc) {
++ dev_err(dev, "cannot request IRQ\n");
++ goto err_remove_flash;
++ }
++
++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN);
++
++ dev_set_drvdata(dev, espi);
++
++ dev_info(dev, "module loaded\n");
++
++ return 0;
++
++err_remove_flash:
++ ast2600_espi_flash_remove(espi);
++err_remove_oob:
++ ast2600_espi_oob_remove(espi);
++err_remove_vw:
++ ast2600_espi_vw_remove(espi);
++err_remove_perif:
++ ast2600_espi_perif_remove(espi);
++
++ return rc;
++}
++
++static int ast2600_espi_remove(struct platform_device *pdev)
++{
++ struct ast2600_espi *espi;
++ struct device *dev;
++ int rc;
++
++ dev = &pdev->dev;
++
++ espi = (struct ast2600_espi *)dev_get_drvdata(dev);
++
++ writel(ESPI_INT_EN_RST_DEASSERT, espi->regs + ESPI_INT_EN_CLR);
++
++ rc = ast2600_espi_perif_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2600_espi_vw_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2600_espi_oob_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2600_espi_flash_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ return 0;
++}
++
++static const struct of_device_id ast2600_espi_of_matches[] = {
++ { .compatible = "aspeed,ast2600-espi" },
++ { },
++};
++
++static struct platform_driver ast2600_espi_driver = {
++ .driver = {
++ .name = "ast2600-espi",
++ .of_match_table = ast2600_espi_of_matches,
++ },
++ .probe = ast2600_espi_probe,
++ .remove = ast2600_espi_remove,
++};
++
++module_platform_driver(ast2600_espi_driver);
++
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
++MODULE_DESCRIPTION("Control of AST2600 eSPI Device");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/ast2600-espi.h b/drivers/soc/aspeed/ast2600-espi.h
+new file mode 100644
+index 000000000..eb826f225
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2600-espi.h
+@@ -0,0 +1,297 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#ifndef _AST2600_ESPI_H_
++#define _AST2600_ESPI_H_
++
++#include <linux/bits.h>
++#include "aspeed-espi-comm.h"
++
++/* registers */
++#define ESPI_CTRL 0x000
++#define ESPI_CTRL_FLASH_TX_SW_RST BIT(31)
++#define ESPI_CTRL_FLASH_RX_SW_RST BIT(30)
++#define ESPI_CTRL_OOB_TX_SW_RST BIT(29)
++#define ESPI_CTRL_OOB_RX_SW_RST BIT(28)
++#define ESPI_CTRL_PERIF_NP_TX_SW_RST BIT(27)
++#define ESPI_CTRL_PERIF_NP_RX_SW_RST BIT(26)
++#define ESPI_CTRL_PERIF_PC_TX_SW_RST BIT(25)
++#define ESPI_CTRL_PERIF_PC_RX_SW_RST BIT(24)
++#define ESPI_CTRL_FLASH_TX_DMA_EN BIT(23)
++#define ESPI_CTRL_FLASH_RX_DMA_EN BIT(22)
++#define ESPI_CTRL_OOB_TX_DMA_EN BIT(21)
++#define ESPI_CTRL_OOB_RX_DMA_EN BIT(20)
++#define ESPI_CTRL_PERIF_NP_TX_DMA_EN BIT(19)
++#define ESPI_CTRL_PERIF_PC_TX_DMA_EN BIT(17)
++#define ESPI_CTRL_PERIF_PC_RX_DMA_EN BIT(16)
++#define ESPI_CTRL_FLASH_SAFS_MODE GENMASK(11, 10)
++#define ESPI_CTRL_VW_GPIO_SW BIT(9)
++#define ESPI_CTRL_FLASH_SW_RDY BIT(7)
++#define ESPI_CTRL_OOB_SW_RDY BIT(4)
++#define ESPI_CTRL_VW_SW_RDY BIT(3)
++#define ESPI_CTRL_PERIF_SW_RDY BIT(1)
++#define ESPI_STS 0x004
++#define ESPI_INT_STS 0x008
++#define ESPI_INT_STS_RST_DEASSERT BIT(31)
++#define ESPI_INT_STS_OOB_RX_TMOUT BIT(23)
++#define ESPI_INT_STS_VW_SYSEVT1 BIT(22)
++#define ESPI_INT_STS_FLASH_TX_ERR BIT(21)
++#define ESPI_INT_STS_OOB_TX_ERR BIT(20)
++#define ESPI_INT_STS_FLASH_TX_ABT BIT(19)
++#define ESPI_INT_STS_OOB_TX_ABT BIT(18)
++#define ESPI_INT_STS_PERIF_NP_TX_ABT BIT(17)
++#define ESPI_INT_STS_PERIF_PC_TX_ABT BIT(16)
++#define ESPI_INT_STS_FLASH_RX_ABT BIT(15)
++#define ESPI_INT_STS_OOB_RX_ABT BIT(14)
++#define ESPI_INT_STS_PERIF_NP_RX_ABT BIT(13)
++#define ESPI_INT_STS_PERIF_PC_RX_ABT BIT(12)
++#define ESPI_INT_STS_PERIF_NP_TX_ERR BIT(11)
++#define ESPI_INT_STS_PERIF_PC_TX_ERR BIT(10)
++#define ESPI_INT_STS_VW_GPIO BIT(9)
++#define ESPI_INT_STS_VW_SYSEVT BIT(8)
++#define ESPI_INT_STS_FLASH_TX_CMPLT BIT(7)
++#define ESPI_INT_STS_FLASH_RX_CMPLT BIT(6)
++#define ESPI_INT_STS_OOB_TX_CMPLT BIT(5)
++#define ESPI_INT_STS_OOB_RX_CMPLT BIT(4)
++#define ESPI_INT_STS_PERIF_NP_TX_CMPLT BIT(3)
++#define ESPI_INT_STS_PERIF_PC_TX_CMPLT BIT(1)
++#define ESPI_INT_STS_PERIF_PC_RX_CMPLT BIT(0)
++#define ESPI_INT_EN 0x00c
++#define ESPI_INT_EN_RST_DEASSERT BIT(31)
++#define ESPI_INT_EN_OOB_RX_TMOUT BIT(23)
++#define ESPI_INT_EN_VW_SYSEVT1 BIT(22)
++#define ESPI_INT_EN_FLASH_TX_ERR BIT(21)
++#define ESPI_INT_EN_OOB_TX_ERR BIT(20)
++#define ESPI_INT_EN_FLASH_TX_ABT BIT(19)
++#define ESPI_INT_EN_OOB_TX_ABT BIT(18)
++#define ESPI_INT_EN_PERIF_NP_TX_ABT BIT(17)
++#define ESPI_INT_EN_PERIF_PC_TX_ABT BIT(16)
++#define ESPI_INT_EN_FLASH_RX_ABT BIT(15)
++#define ESPI_INT_EN_OOB_RX_ABT BIT(14)
++#define ESPI_INT_EN_PERIF_NP_RX_ABT BIT(13)
++#define ESPI_INT_EN_PERIF_PC_RX_ABT BIT(12)
++#define ESPI_INT_EN_PERIF_NP_TX_ERR BIT(11)
++#define ESPI_INT_EN_PERIF_PC_TX_ERR BIT(10)
++#define ESPI_INT_EN_VW_GPIO BIT(9)
++#define ESPI_INT_EN_VW_SYSEVT BIT(8)
++#define ESPI_INT_EN_FLASH_TX_CMPLT BIT(7)
++#define ESPI_INT_EN_FLASH_RX_CMPLT BIT(6)
++#define ESPI_INT_EN_OOB_TX_CMPLT BIT(5)
++#define ESPI_INT_EN_OOB_RX_CMPLT BIT(4)
++#define ESPI_INT_EN_PERIF_NP_TX_CMPLT BIT(3)
++#define ESPI_INT_EN_PERIF_PC_TX_CMPLT BIT(1)
++#define ESPI_INT_EN_PERIF_PC_RX_CMPLT BIT(0)
++#define ESPI_PERIF_PC_RX_DMA 0x010
++#define ESPI_PERIF_PC_RX_CTRL 0x014
++#define ESPI_PERIF_PC_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_PERIF_PC_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_PERIF_PC_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_PERIF_PC_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_PERIF_PC_RX_DATA 0x018
++#define ESPI_PERIF_PC_TX_DMA 0x020
++#define ESPI_PERIF_PC_TX_CTRL 0x024
++#define ESPI_PERIF_PC_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_PERIF_PC_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_PERIF_PC_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_PERIF_PC_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_PERIF_PC_TX_DATA 0x028
++#define ESPI_PERIF_NP_TX_DMA 0x030
++#define ESPI_PERIF_NP_TX_CTRL 0x034
++#define ESPI_PERIF_NP_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_PERIF_NP_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_PERIF_NP_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_PERIF_NP_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_PERIF_NP_TX_DATA 0x038
++#define ESPI_OOB_RX_DMA 0x040
++#define ESPI_OOB_RX_CTRL 0x044
++#define ESPI_OOB_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_OOB_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_OOB_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_OOB_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_OOB_RX_DATA 0x048
++#define ESPI_OOB_TX_DMA 0x050
++#define ESPI_OOB_TX_CTRL 0x054
++#define ESPI_OOB_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_OOB_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_OOB_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_OOB_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_OOB_TX_DATA 0x058
++#define ESPI_FLASH_RX_DMA 0x060
++#define ESPI_FLASH_RX_CTRL 0x064
++#define ESPI_FLASH_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_FLASH_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_FLASH_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_FLASH_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_FLASH_RX_DATA 0x068
++#define ESPI_FLASH_TX_DMA 0x070
++#define ESPI_FLASH_TX_CTRL 0x074
++#define ESPI_FLASH_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_FLASH_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_FLASH_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_FLASH_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_FLASH_TX_DATA 0x078
++#define ESPI_CTRL2 0x080
++#define ESPI_CTRL2_VW_TX_SORT BIT(30)
++#define ESPI_CTRL2_MCYC_RD_DIS_WDT BIT(11)
++#define ESPI_CTRL2_MCYC_WR_DIS_WDT BIT(10)
++#define ESPI_CTRL2_MCYC_RD_DIS BIT(6)
++#define ESPI_CTRL2_MMBI_RD_DIS ESPI_CTRL2_MCYC_RD_DIS
++#define ESPI_CTRL2_MCYC_WR_DIS BIT(4)
++#define ESPI_CTRL2_MMBI_WR_DIS ESPI_CTRL2_MCYC_WR_DIS
++#define ESPI_PERIF_MCYC_SADDR 0x084
++#define ESPI_PERIF_MMBI_SADDR ESPI_PERIF_MCYC_SADDR
++#define ESPI_PERIF_MCYC_TADDR 0x088
++#define ESPI_PERIF_MMBI_TADDR ESPI_PERIF_MCYC_TADDR
++#define ESPI_PERIF_MCYC_MASK 0x08c
++#define ESPI_PERIF_MMBI_MASK ESPI_PERIF_MCYC_MASK
++#define ESPI_FLASH_SAFS_TADDR 0x090
++#define ESPI_FLASH_SAFS_TADDR_BASE GENMASK(31, 24)
++#define ESPI_FLASH_SAFS_TADDR_MASK GENMASK(15, 8)
++#define ESPI_VW_SYSEVT_INT_EN 0x094
++#define ESPI_VW_SYSEVT 0x098
++#define ESPI_VW_SYSEVT_HOST_RST_ACK BIT(27)
++#define ESPI_VW_SYSEVT_RST_CPU_INIT BIT(26)
++#define ESPI_VW_SYSEVT_SLV_BOOT_STS BIT(23)
++#define ESPI_VW_SYSEVT_NON_FATAL_ERR BIT(22)
++#define ESPI_VW_SYSEVT_FATAL_ERR BIT(21)
++#define ESPI_VW_SYSEVT_SLV_BOOT_DONE BIT(20)
++#define ESPI_VW_SYSEVT_OOB_RST_ACK BIT(16)
++#define ESPI_VW_SYSEVT_NMI_OUT BIT(10)
++#define ESPI_VW_SYSEVT_SMI_OUT BIT(9)
++#define ESPI_VW_SYSEVT_HOST_RST_WARN BIT(8)
++#define ESPI_VW_SYSEVT_OOB_RST_WARN BIT(6)
++#define ESPI_VW_SYSEVT_PLTRSTN BIT(5)
++#define ESPI_VW_SYSEVT_SUSPEND BIT(4)
++#define ESPI_VW_SYSEVT_S5_SLEEP BIT(2)
++#define ESPI_VW_SYSEVT_S4_SLEEP BIT(1)
++#define ESPI_VW_SYSEVT_S3_SLEEP BIT(0)
++#define ESPI_VW_GPIO_VAL 0x09c
++#define ESPI_GEN_CAP_N_CONF 0x0a0
++#define ESPI_CH0_CAP_N_CONF 0x0a4
++#define ESPI_CH1_CAP_N_CONF 0x0a8
++#define ESPI_CH2_CAP_N_CONF 0x0ac
++#define ESPI_CH3_CAP_N_CONF 0x0b0
++#define ESPI_CH3_CAP_N_CONF2 0x0b4
++#define ESPI_VW_GPIO_DIR 0x0c0
++#define ESPI_VW_GPIO_GRP 0x0c4
++#define ESPI_INT_EN_CLR 0x0fc
++#define ESPI_VW_SYSEVT1_INT_EN 0x100
++#define ESPI_VW_SYSEVT1 0x104
++#define ESPI_VW_SYSEVT1_SUSPEND_ACK BIT(20)
++#define ESPI_VW_SYSEVT1_SUSPEND_WARN BIT(0)
++#define ESPI_VW_SYSEVT_INT_T0 0x110
++#define ESPI_VW_SYSEVT_INT_T1 0x114
++#define ESPI_VW_SYSEVT_INT_T2 0x118
++#define ESPI_VW_SYSEVT_INT_STS 0x11c
++#define ESPI_VW_SYSEVT1_INT_T0 0x120
++#define ESPI_VW_SYSEVT1_INT_T1 0x124
++#define ESPI_VW_SYSEVT1_INT_T2 0x128
++#define ESPI_VW_SYSEVT1_INT_STS 0x12c
++#define ESPI_OOB_RX_DESC_NUM 0x130
++#define ESPI_OOB_RX_DESC_RPTR 0x134
++#define ESPI_OOB_RX_DESC_RPTR_UPDATE BIT(31)
++#define ESPI_OOB_RX_DESC_RPTR_RP GENMASK(11, 0)
++#define ESPI_OOB_RX_DESC_WPTR 0x138
++#define ESPI_OOB_RX_DESC_WPTR_RECV_EN BIT(31)
++#define ESPI_OOB_RX_DESC_WPTR_SP GENMASK(27, 16)
++#define ESPI_OOB_RX_DESC_WPTR_WP GENMASK(11, 0)
++#define ESPI_OOB_TX_DESC_NUM 0x140
++#define ESPI_OOB_TX_DESC_RPTR 0x144
++#define ESPI_OOB_TX_DESC_RPTR_UPDATE BIT(31)
++#define ESPI_OOB_TX_DESC_WPTR 0x148
++#define ESPI_OOB_TX_DESC_WPTR_SEND_EN BIT(31)
++#define ESPI_MMBI_CTRL 0x800
++#define ESPI_MMBI_CTRL_INST_SZ GENMASK(10, 8)
++#define ESPI_MMBI_CTRL_TOTAL_SZ GENMASK(6, 4)
++#define ESPI_MMBI_CTRL_EN BIT(0)
++#define ESPI_MMBI_INT_STS 0x808
++#define ESPI_MMBI_INT_EN 0x80c
++#define ESPI_MMBI_HOST_RWP(x) (0x810 + ((x) << 3))
++
++/* collect ESPI_INT_EN bits for convenience */
++#define ESPI_INT_EN_PERIF \
++ (ESPI_INT_EN_PERIF_NP_TX_ABT | \
++ ESPI_INT_EN_PERIF_PC_TX_ABT | \
++ ESPI_INT_EN_PERIF_NP_RX_ABT | \
++ ESPI_INT_EN_PERIF_PC_RX_ABT | \
++ ESPI_INT_EN_PERIF_NP_TX_ERR | \
++ ESPI_INT_EN_PERIF_PC_TX_ERR | \
++ ESPI_INT_EN_PERIF_NP_TX_CMPLT | \
++ ESPI_INT_EN_PERIF_PC_TX_CMPLT | \
++ ESPI_INT_EN_PERIF_PC_RX_CMPLT)
++
++#define ESPI_INT_EN_VW \
++ (ESPI_INT_EN_VW_SYSEVT1 | \
++ ESPI_INT_EN_VW_GPIO | \
++ ESPI_INT_EN_VW_SYSEVT)
++
++#define ESPI_INT_EN_OOB \
++ (ESPI_INT_EN_OOB_RX_TMOUT | \
++ ESPI_INT_EN_OOB_TX_ERR | \
++ ESPI_INT_EN_OOB_TX_ABT | \
++ ESPI_INT_EN_OOB_RX_ABT | \
++ ESPI_INT_EN_OOB_TX_CMPLT | \
++ ESPI_INT_EN_OOB_RX_CMPLT)
++
++#define ESPI_INT_EN_FLASH \
++ (ESPI_INT_EN_FLASH_TX_ERR | \
++ ESPI_INT_EN_FLASH_TX_ABT | \
++ ESPI_INT_EN_FLASH_RX_ABT | \
++ ESPI_INT_EN_FLASH_TX_CMPLT | \
++ ESPI_INT_EN_FLASH_RX_CMPLT)
++
++/* collect ESPI_INT_STS bits for convenience */
++#define ESPI_INT_STS_PERIF \
++ (ESPI_INT_STS_PERIF_NP_TX_ABT | \
++ ESPI_INT_STS_PERIF_PC_TX_ABT | \
++ ESPI_INT_STS_PERIF_NP_RX_ABT | \
++ ESPI_INT_STS_PERIF_PC_RX_ABT | \
++ ESPI_INT_STS_PERIF_NP_TX_ERR | \
++ ESPI_INT_STS_PERIF_PC_TX_ERR | \
++ ESPI_INT_STS_PERIF_NP_TX_CMPLT | \
++ ESPI_INT_STS_PERIF_PC_TX_CMPLT | \
++ ESPI_INT_STS_PERIF_PC_RX_CMPLT)
++
++#define ESPI_INT_STS_VW \
++ (ESPI_INT_STS_VW_SYSEVT1 | \
++ ESPI_INT_STS_VW_GPIO | \
++ ESPI_INT_STS_VW_SYSEVT)
++
++#define ESPI_INT_STS_OOB \
++ (ESPI_INT_STS_OOB_RX_TMOUT | \
++ ESPI_INT_STS_OOB_TX_ERR | \
++ ESPI_INT_STS_OOB_TX_ABT | \
++ ESPI_INT_STS_OOB_RX_ABT | \
++ ESPI_INT_STS_OOB_TX_CMPLT | \
++ ESPI_INT_STS_OOB_RX_CMPLT)
++
++#define ESPI_INT_STS_FLASH \
++ (ESPI_INT_STS_FLASH_TX_ERR | \
++ ESPI_INT_STS_FLASH_TX_ABT | \
++ ESPI_INT_STS_FLASH_RX_ABT | \
++ ESPI_INT_STS_FLASH_TX_CMPLT | \
++ ESPI_INT_STS_FLASH_RX_CMPLT)
++
++/* consistent with DTS property "flash-safs-mode" */
++enum ast2600_safs_mode {
++ SAFS_MODE_MIX = 0x0,
++ SAFS_MODE_SW,
++ SAFS_MODE_HW,
++ SAFS_MODES,
++};
++
++/* consistent with DTS property "perif-mmbi-instance-size" */
++enum ast2600_mmbi_instance_size {
++ MMBI_INST_SIZE_8KB = 0x0,
++ MMBI_INST_SIZE_16KB,
++ MMBI_INST_SIZE_32KB,
++ MMBI_INST_SIZE_64KB,
++ MMBI_INST_SIZE_128KB,
++ MMBI_INST_SIZE_256KB,
++ MMBI_INST_SIZE_512KB,
++ MMBI_INST_SIZE_1024KB,
++ MMBI_INST_SIZE_TYPES,
++};
++
++#endif
+diff --git a/drivers/soc/aspeed/ast2600-otp.c b/drivers/soc/aspeed/ast2600-otp.c
+new file mode 100644
+index 000000000..05b27476d
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2600-otp.c
+@@ -0,0 +1,640 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) ASPEED Technology Inc.
++ */
++
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++#include <linux/mfd/syscon.h>
++#include <linux/of.h>
++#include <linux/module.h>
++#include <asm/io.h>
++#include <uapi/linux/aspeed-otp.h>
++#include <linux/soc/aspeed/aspeed-otp.h>
++
++#define ASPEED_REVISION_ID0 0x04
++#define ASPEED_REVISION_ID1 0x14
++#define ID0_AST2600A0 0x05000303
++#define ID1_AST2600A0 0x05000303
++#define ID0_AST2600A1 0x05010303
++#define ID1_AST2600A1 0x05010303
++#define ID0_AST2600A2 0x05010303
++#define ID1_AST2600A2 0x05020303
++#define ID0_AST2600A3 0x05030303
++#define ID1_AST2600A3 0x05030303
++#define ID0_AST2620A1 0x05010203
++#define ID1_AST2620A1 0x05010203
++#define ID0_AST2620A2 0x05010203
++#define ID1_AST2620A2 0x05020203
++#define ID0_AST2620A3 0x05030203
++#define ID1_AST2620A3 0x05030203
++#define ID0_AST2605A2 0x05010103
++#define ID1_AST2605A2 0x05020103
++#define ID0_AST2605A3 0x05030103
++#define ID1_AST2605A3 0x05030103
++#define ID0_AST2625A3 0x05030403
++#define ID1_AST2625A3 0x05030403
++
++#define OTP_PROTECT_KEY 0x0
++#define OTP_PASSWD 0x349fe38a
++#define OTP_COMMAND 0x4
++#define OTP_TIMING 0x8
++#define OTP_ADDR 0x10
++#define OTP_STATUS 0x14
++#define OTP_COMPARE_1 0x20
++#define OTP_COMPARE_2 0x24
++#define OTP_COMPARE_3 0x28
++#define OTP_COMPARE_4 0x2c
++#define SW_REV_ID0 0x68
++#define SW_REV_ID1 0x6c
++#define SEC_KEY_NUM 0x78
++#define RETRY 20
++
++struct aspeed_otp {
++ struct miscdevice miscdev;
++ void __iomem *reg_base;
++ bool is_open;
++ u32 otp_ver;
++ u32 *data;
++};
++
++static DEFINE_SPINLOCK(otp_state_lock);
++
++static inline u32 aspeed_otp_read(struct aspeed_otp *ctx, u32 reg)
++{
++ int val;
++
++ val = readl(ctx->reg_base + reg);
++ // printk("read:reg = 0x%08x, val = 0x%08x\n", reg, val);
++ return val;
++}
++
++static inline void aspeed_otp_write(struct aspeed_otp *ctx, u32 val, u32 reg)
++{
++ // printk("write:reg = 0x%08x, val = 0x%08x\n", reg, val);
++ writel(val, ctx->reg_base + reg);
++}
++
++static uint32_t chip_version(u32 revid0, u32 revid1)
++{
++ if (revid0 == ID0_AST2600A0 && revid1 == ID1_AST2600A0) {
++ /* AST2600-A0 */
++ return OTP_A0;
++ } else if (revid0 == ID0_AST2600A1 && revid1 == ID1_AST2600A1) {
++ /* AST2600-A1 */
++ return OTP_A1;
++ } else if (revid0 == ID0_AST2600A2 && revid1 == ID1_AST2600A2) {
++ /* AST2600-A2 */
++ return OTP_A2;
++ } else if (revid0 == ID0_AST2600A3 && revid1 == ID1_AST2600A3) {
++ /* AST2600-A3 */
++ return OTP_A3;
++ } else if (revid0 == ID0_AST2620A1 && revid1 == ID1_AST2620A1) {
++ /* AST2620-A1 */
++ return OTP_A1;
++ } else if (revid0 == ID0_AST2620A2 && revid1 == ID1_AST2620A2) {
++ /* AST2620-A2 */
++ return OTP_A2;
++ } else if (revid0 == ID0_AST2620A3 && revid1 == ID1_AST2620A3) {
++ /* AST2620-A3 */
++ return OTP_A3;
++ } else if (revid0 == ID0_AST2605A2 && revid1 == ID1_AST2605A2) {
++ /* AST2605-A2 */
++ return OTP_A2;
++ } else if (revid0 == ID0_AST2605A3 && revid1 == ID1_AST2605A3) {
++ /* AST2605-A3 */
++ return OTP_A3;
++ } else if (revid0 == ID0_AST2625A3 && revid1 == ID1_AST2625A3) {
++ /* AST2605-A3 */
++ return OTP_A3;
++ }
++ return -1;
++}
++
++static void wait_complete(struct aspeed_otp *ctx)
++{
++ int reg;
++ int i = 0;
++
++ do {
++ reg = aspeed_otp_read(ctx, OTP_STATUS);
++ if ((reg & 0x6) == 0x6)
++ i++;
++ } while (i != 2);
++}
++
++static void otp_write(struct aspeed_otp *ctx, u32 otp_addr, u32 val)
++{
++ aspeed_otp_write(ctx, otp_addr, OTP_ADDR); //write address
++ aspeed_otp_write(ctx, val, OTP_COMPARE_1); //write val
++ aspeed_otp_write(ctx, 0x23b1e362, OTP_COMMAND); //write command
++ wait_complete(ctx);
++}
++
++static void otp_soak(struct aspeed_otp *ctx, int soak)
++{
++ if (ctx->otp_ver == OTP_A2 || ctx->otp_ver == OTP_A3) {
++ switch (soak) {
++ case 0: //default
++ otp_write(ctx, 0x3000, 0x0); // Write MRA
++ otp_write(ctx, 0x5000, 0x0); // Write MRB
++ otp_write(ctx, 0x1000, 0x0); // Write MR
++ break;
++ case 1: //normal program
++ otp_write(ctx, 0x3000, 0x1320); // Write MRA
++ otp_write(ctx, 0x5000, 0x1008); // Write MRB
++ otp_write(ctx, 0x1000, 0x0024); // Write MR
++ aspeed_otp_write(ctx, 0x04191388, OTP_TIMING); // 200us
++ break;
++ case 2: //soak program
++ otp_write(ctx, 0x3000, 0x1320); // Write MRA
++ otp_write(ctx, 0x5000, 0x0007); // Write MRB
++ otp_write(ctx, 0x1000, 0x0100); // Write MR
++ aspeed_otp_write(ctx, 0x04193a98, OTP_TIMING); // 600us
++ break;
++ }
++ } else {
++ switch (soak) {
++ case 0: //default
++ otp_write(ctx, 0x3000, 0x0); // Write MRA
++ otp_write(ctx, 0x5000, 0x0); // Write MRB
++ otp_write(ctx, 0x1000, 0x0); // Write MR
++ break;
++ case 1: //normal program
++ otp_write(ctx, 0x3000, 0x4021); // Write MRA
++ otp_write(ctx, 0x5000, 0x302f); // Write MRB
++ otp_write(ctx, 0x1000, 0x4020); // Write MR
++ aspeed_otp_write(ctx, 0x04190760, OTP_TIMING); // 75us
++ break;
++ case 2: //soak program
++ otp_write(ctx, 0x3000, 0x4021); // Write MRA
++ otp_write(ctx, 0x5000, 0x1027); // Write MRB
++ otp_write(ctx, 0x1000, 0x4820); // Write MR
++ aspeed_otp_write(ctx, 0x041930d4, OTP_TIMING); // 500us
++ break;
++ }
++ }
++
++ wait_complete(ctx);
++}
++
++static int verify_bit(struct aspeed_otp *ctx, u32 otp_addr, int bit_offset, int value)
++{
++ u32 ret[2];
++
++ if (otp_addr % 2 == 0)
++ aspeed_otp_write(ctx, otp_addr, OTP_ADDR); //Read address
++ else
++ aspeed_otp_write(ctx, otp_addr - 1, OTP_ADDR); //Read address
++
++ aspeed_otp_write(ctx, 0x23b1e361, OTP_COMMAND); //trigger read
++ wait_complete(ctx);
++ ret[0] = aspeed_otp_read(ctx, OTP_COMPARE_1);
++ ret[1] = aspeed_otp_read(ctx, OTP_COMPARE_2);
++
++ if (otp_addr % 2 == 0) {
++ if (((ret[0] >> bit_offset) & 1) == value)
++ return 0;
++ else
++ return -1;
++ } else {
++ if (((ret[1] >> bit_offset) & 1) == value)
++ return 0;
++ else
++ return -1;
++ }
++}
++
++static void otp_prog(struct aspeed_otp *ctx, u32 otp_addr, u32 prog_bit)
++{
++ otp_write(ctx, 0x0, prog_bit);
++ aspeed_otp_write(ctx, otp_addr, OTP_ADDR); //write address
++ aspeed_otp_write(ctx, prog_bit, OTP_COMPARE_1); //write data
++ aspeed_otp_write(ctx, 0x23b1e364, OTP_COMMAND); //write command
++ wait_complete(ctx);
++}
++
++static void _otp_prog_bit(struct aspeed_otp *ctx, u32 value, u32 prog_address, u32 bit_offset)
++{
++ int prog_bit;
++
++ if (prog_address % 2 == 0) {
++ if (value)
++ prog_bit = ~(0x1 << bit_offset);
++ else
++ return;
++ } else {
++ if (ctx->otp_ver != OTP_A3)
++ prog_address |= 1 << 15;
++ if (!value)
++ prog_bit = 0x1 << bit_offset;
++ else
++ return;
++ }
++ otp_prog(ctx, prog_address, prog_bit);
++}
++
++static int otp_prog_bit(struct aspeed_otp *ctx, u32 value, u32 prog_address, u32 bit_offset)
++{
++ int pass;
++ int i;
++
++ otp_soak(ctx, 1);
++ _otp_prog_bit(ctx, value, prog_address, bit_offset);
++ pass = 0;
++
++ for (i = 0; i < RETRY; i++) {
++ if (verify_bit(ctx, prog_address, bit_offset, value) != 0) {
++ otp_soak(ctx, 2);
++ _otp_prog_bit(ctx, value, prog_address, bit_offset);
++ if (verify_bit(ctx, prog_address, bit_offset, value) != 0) {
++ otp_soak(ctx, 1);
++ } else {
++ pass = 1;
++ break;
++ }
++ } else {
++ pass = 1;
++ break;
++ }
++ }
++ otp_soak(ctx, 0);
++ return pass;
++}
++
++static void otp_read_conf_dw(struct aspeed_otp *ctx, u32 offset, u32 *buf)
++{
++ u32 config_offset;
++
++ config_offset = 0x800;
++ config_offset |= (offset / 8) * 0x200;
++ config_offset |= (offset % 8) * 0x2;
++
++ aspeed_otp_write(ctx, config_offset, OTP_ADDR); //Read address
++ aspeed_otp_write(ctx, 0x23b1e361, OTP_COMMAND); //trigger read
++ wait_complete(ctx);
++ buf[0] = aspeed_otp_read(ctx, OTP_COMPARE_1);
++}
++
++static void otp_read_conf(struct aspeed_otp *ctx, u32 offset, u32 len)
++{
++ int i, j;
++
++ otp_soak(ctx, 0);
++ for (i = offset, j = 0; j < len; i++, j++)
++ otp_read_conf_dw(ctx, i, &ctx->data[j]);
++}
++
++static void otp_read_data_2dw(struct aspeed_otp *ctx, u32 offset, u32 *buf)
++{
++ aspeed_otp_write(ctx, offset, OTP_ADDR); //Read address
++ aspeed_otp_write(ctx, 0x23b1e361, OTP_COMMAND); //trigger read
++ wait_complete(ctx);
++ buf[0] = aspeed_otp_read(ctx, OTP_COMPARE_1);
++ buf[1] = aspeed_otp_read(ctx, OTP_COMPARE_2);
++}
++
++static void otp_read_data(struct aspeed_otp *ctx, u32 offset, u32 len)
++{
++ int i, j;
++ u32 ret[2];
++
++ otp_soak(ctx, 0);
++
++ i = offset;
++ j = 0;
++ if (offset % 2) {
++ otp_read_data_2dw(ctx, i - 1, ret);
++ ctx->data[0] = ret[1];
++ i++;
++ j++;
++ }
++ for (; j < len; i += 2, j += 2)
++ otp_read_data_2dw(ctx, i, &ctx->data[j]);
++}
++
++static int otp_prog_data(struct aspeed_otp *ctx, u32 value, u32 dw_offset, u32 bit_offset)
++{
++ u32 read[2];
++ int otp_bit;
++
++ if (dw_offset % 2 == 0) {
++ otp_read_data_2dw(ctx, dw_offset, read);
++ otp_bit = (read[0] >> bit_offset) & 0x1;
++
++ if (otp_bit == 1 && value == 0) {
++ pr_err("OTPDATA%X[%X] = 1\n", dw_offset, bit_offset);
++ pr_err("OTP is programed, which can't be cleaned\n");
++ return -EINVAL;
++ }
++ } else {
++ otp_read_data_2dw(ctx, dw_offset - 1, read);
++ otp_bit = (read[1] >> bit_offset) & 0x1;
++
++ if (otp_bit == 0 && value == 1) {
++ pr_err("OTPDATA%X[%X] = 1\n", dw_offset, bit_offset);
++ pr_err("OTP is programed, which can't be writen\n");
++ return -EINVAL;
++ }
++ }
++ if (otp_bit == value) {
++ pr_err("OTPDATA%X[%X] = %d\n", dw_offset, bit_offset, value);
++ pr_err("No need to program\n");
++ return 0;
++ }
++
++ return otp_prog_bit(ctx, value, dw_offset, bit_offset);
++}
++
++static int otp_prog_conf(struct aspeed_otp *ctx, u32 value, u32 dw_offset, u32 bit_offset)
++{
++ u32 read;
++ u32 prog_address = 0;
++ int otp_bit;
++
++ otp_read_conf_dw(ctx, dw_offset, &read);
++
++ prog_address = 0x800;
++ prog_address |= (dw_offset / 8) * 0x200;
++ prog_address |= (dw_offset % 8) * 0x2;
++ otp_bit = (read >> bit_offset) & 0x1;
++ if (otp_bit == value) {
++ pr_err("OTPCFG%X[%X] = %d\n", dw_offset, bit_offset, value);
++ pr_err("No need to program\n");
++ return 0;
++ }
++ if (otp_bit == 1 && value == 0) {
++ pr_err("OTPCFG%X[%X] = 1\n", dw_offset, bit_offset);
++ pr_err("OTP is programed, which can't be clean\n");
++ return -EINVAL;
++ }
++
++ return otp_prog_bit(ctx, value, prog_address, bit_offset);
++}
++
++struct aspeed_otp *glob_ctx;
++
++void otp_read_data_buf(u32 offset, u32 *buf, u32 len)
++{
++ int i, j;
++ u32 ret[2];
++
++ aspeed_otp_write(glob_ctx, OTP_PASSWD, OTP_PROTECT_KEY);
++
++ otp_soak(glob_ctx, 0);
++
++ i = offset;
++ j = 0;
++ if (offset % 2) {
++ otp_read_data_2dw(glob_ctx, i - 1, ret);
++ buf[0] = ret[1];
++ i++;
++ j++;
++ }
++ for (; j < len; i += 2, j += 2)
++ otp_read_data_2dw(glob_ctx, i, &buf[j]);
++ aspeed_otp_write(glob_ctx, 0, OTP_PROTECT_KEY);
++}
++EXPORT_SYMBOL(otp_read_data_buf);
++
++void otp_read_conf_buf(u32 offset, u32 *buf, u32 len)
++{
++ int i, j;
++
++ aspeed_otp_write(glob_ctx, OTP_PASSWD, OTP_PROTECT_KEY);
++ otp_soak(glob_ctx, 0);
++ for (i = offset, j = 0; j < len; i++, j++)
++ otp_read_conf_dw(glob_ctx, i, &buf[j]);
++ aspeed_otp_write(glob_ctx, 0, OTP_PROTECT_KEY);
++}
++EXPORT_SYMBOL(otp_read_conf_buf);
++
++static long otp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct miscdevice *c = file->private_data;
++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev);
++ void __user *argp = (void __user *)arg;
++ struct otp_read xfer;
++ struct otp_prog prog;
++ u32 reg_read[2];
++ int ret = 0;
++
++ switch (cmd) {
++ case ASPEED_OTP_READ_DATA:
++ if (copy_from_user(&xfer, argp, sizeof(struct otp_read)))
++ return -EFAULT;
++ if ((xfer.offset + xfer.len) > 0x800) {
++ pr_err("out of range");
++ return -EINVAL;
++ }
++
++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY);
++ otp_read_data(ctx, xfer.offset, xfer.len);
++ aspeed_otp_write(ctx, 0, OTP_PROTECT_KEY);
++
++ if (copy_to_user(xfer.data, ctx->data, xfer.len * 4))
++ return -EFAULT;
++ if (copy_to_user(argp, &xfer, sizeof(struct otp_read)))
++ return -EFAULT;
++ break;
++ case ASPEED_OTP_READ_CONF:
++ if (copy_from_user(&xfer, argp, sizeof(struct otp_read)))
++ return -EFAULT;
++ if ((xfer.offset + xfer.len) > 0x800) {
++ pr_err("out of range");
++ return -EINVAL;
++ }
++
++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY);
++ otp_read_conf(ctx, xfer.offset, xfer.len);
++ aspeed_otp_write(ctx, 0, OTP_PROTECT_KEY);
++
++ if (copy_to_user(xfer.data, ctx->data, xfer.len * 4))
++ return -EFAULT;
++ if (copy_to_user(argp, &xfer, sizeof(struct otp_read)))
++ return -EFAULT;
++ break;
++ case ASPEED_OTP_PROG_DATA:
++ if (copy_from_user(&prog, argp, sizeof(struct otp_prog)))
++ return -EFAULT;
++ if (prog.bit_offset >= 32 || (prog.value != 0 && prog.value != 1)) {
++ pr_err("out of range");
++ return -EINVAL;
++ }
++ if (prog.dw_offset >= 0x800) {
++ pr_err("out of range");
++ return -EINVAL;
++ }
++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY);
++ ret = otp_prog_data(ctx, prog.value, prog.dw_offset, prog.bit_offset);
++ break;
++ case ASPEED_OTP_PROG_CONF:
++ if (copy_from_user(&prog, argp, sizeof(struct otp_prog)))
++ return -EFAULT;
++ if (prog.bit_offset >= 32 || (prog.value != 0 && prog.value != 1)) {
++ pr_err("out of range");
++ return -EINVAL;
++ }
++ if (prog.dw_offset >= 0x20) {
++ pr_err("out of range");
++ return -EINVAL;
++ }
++ aspeed_otp_write(ctx, OTP_PASSWD, OTP_PROTECT_KEY);
++ ret = otp_prog_conf(ctx, prog.value, prog.dw_offset, prog.bit_offset);
++ break;
++ case ASPEED_OTP_VER:
++ if (copy_to_user(argp, &ctx->otp_ver, sizeof(u32)))
++ return -EFAULT;
++ break;
++ case ASPEED_OTP_SW_RID:
++ reg_read[0] = aspeed_otp_read(ctx, SW_REV_ID0);
++ reg_read[1] = aspeed_otp_read(ctx, SW_REV_ID1);
++ if (copy_to_user(argp, reg_read, sizeof(u32) * 2))
++ return -EFAULT;
++ break;
++ case ASPEED_SEC_KEY_NUM:
++ reg_read[0] = aspeed_otp_read(ctx, SEC_KEY_NUM) & 7;
++ if (copy_to_user(argp, reg_read, sizeof(u32)))
++ return -EFAULT;
++ break;
++ }
++ return ret;
++}
++
++static int otp_open(struct inode *inode, struct file *file)
++{
++ struct miscdevice *c = file->private_data;
++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev);
++
++ spin_lock(&otp_state_lock);
++
++ if (ctx->is_open) {
++ spin_unlock(&otp_state_lock);
++ return -EBUSY;
++ }
++
++ ctx->is_open = true;
++
++ spin_unlock(&otp_state_lock);
++
++ return 0;
++}
++
++static int otp_release(struct inode *inode, struct file *file)
++{
++ struct miscdevice *c = file->private_data;
++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev);
++
++ spin_lock(&otp_state_lock);
++
++ ctx->is_open = false;
++
++ spin_unlock(&otp_state_lock);
++
++ return 0;
++}
++
++static const struct file_operations otp_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = otp_ioctl,
++ .open = otp_open,
++ .release = otp_release,
++};
++
++static const struct of_device_id aspeed_otp_of_matches[] = {
++ { .compatible = "aspeed,ast2600-sbc" },
++ { }
++};
++MODULE_DEVICE_TABLE(of, aspeed_otp_of_matches);
++
++static int aspeed_otp_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct regmap *scu;
++ struct aspeed_otp *priv;
++ struct resource *res;
++ u32 revid0, revid1;
++ int rc;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ glob_ctx = priv;
++ if (!priv)
++ return -ENOMEM;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n");
++ return -ENOENT;
++ }
++
++ priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
++ if (!priv->reg_base)
++ return -EIO;
++
++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu");
++ if (IS_ERR(scu)) {
++ dev_err(dev, "failed to find 2600 SCU regmap\n");
++ return PTR_ERR(scu);
++ }
++
++ regmap_read(scu, ASPEED_REVISION_ID0, &revid0);
++ regmap_read(scu, ASPEED_REVISION_ID1, &revid1);
++
++ priv->otp_ver = chip_version(revid0, revid1);
++
++ if (priv->otp_ver == -1) {
++ dev_err(dev, "invalid SCU\n");
++ return -EINVAL;
++ }
++
++ priv->data = kmalloc(8192, GFP_KERNEL);
++ if (!priv->data)
++ return -ENOMEM;
++
++ dev_set_drvdata(dev, priv);
++
++ /* Set up the miscdevice */
++ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
++ priv->miscdev.name = "aspeed-otp";
++ priv->miscdev.fops = &otp_fops;
++
++ /* Register the device */
++ rc = misc_register(&priv->miscdev);
++ if (rc) {
++ dev_err(dev, "Unable to register device\n");
++ return rc;
++ }
++
++ return 0;
++}
++
++static int aspeed_otp_remove(struct platform_device *pdev)
++{
++ struct aspeed_otp *ctx = dev_get_drvdata(&pdev->dev);
++
++ kfree(ctx->data);
++ misc_deregister(&ctx->miscdev);
++
++ return 0;
++}
++
++static struct platform_driver aspeed_otp_driver = {
++ .probe = aspeed_otp_probe,
++ .remove = aspeed_otp_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_otp_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_otp_driver);
++
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("ASPEED OTP Driver");
+diff --git a/drivers/soc/aspeed/ast2700-espi.c b/drivers/soc/aspeed/ast2700-espi.c
+new file mode 100644
+index 000000000..f9715b4a5
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2700-espi.c
+@@ -0,0 +1,2216 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#include <linux/io.h>
++#include <linux/irq.h>
++#include <linux/clk.h>
++#include <linux/sizes.h>
++#include <linux/module.h>
++#include <linux/bitfield.h>
++#include <linux/count_zeros.h>
++#include <linux/of_device.h>
++#include <linux/of_address.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include <linux/miscdevice.h>
++#include <linux/dma-mapping.h>
++#include <linux/uaccess.h>
++#include <linux/vmalloc.h>
++#include <linux/poll.h>
++#include <linux/delay.h>
++
++#include "ast2700-espi.h"
++
++#define DEVICE_NAME "aspeed-espi"
++
++static DEFINE_IDA(ast2700_espi_ida);
++
++#define PERIF_MCYC_ALIGN SZ_64K
++#define PERIF_MMBI_ALIGN SZ_64M
++#define PERIF_MMBI_MAX_INST 8
++
++#define OOB_DMA_RPTR_KEY 0x4f4f4253
++#define OOB_DMA_DESC_NUM 8
++#define OOB_DMA_DESC_CUSTOM 0x4
++
++#define FLASH_EDAF_ALIGN SZ_16M
++
++struct ast2700_espi_perif_mmbi {
++ void *b2h_virt;
++ void *h2b_virt;
++ dma_addr_t b2h_addr;
++ dma_addr_t h2b_addr;
++ struct miscdevice b2h_mdev;
++ struct miscdevice h2b_mdev;
++ bool host_rwp_update;
++ wait_queue_head_t wq;
++ struct ast2700_espi_perif *perif;
++};
++
++struct ast2700_espi_perif {
++ struct {
++ bool enable;
++ int irq;
++ void *virt;
++ dma_addr_t taddr;
++ uint64_t saddr;
++ uint64_t size;
++ uint32_t inst_num;
++ uint32_t inst_size;
++ struct ast2700_espi_perif_mmbi inst[PERIF_MMBI_MAX_INST];
++ } mmbi;
++
++ struct {
++ bool enable;
++ void *virt;
++ dma_addr_t taddr;
++ uint64_t saddr;
++ uint64_t size;
++ } mcyc;
++
++ struct {
++ bool enable;
++ void *np_tx_virt;
++ dma_addr_t np_tx_addr;
++ void *pc_tx_virt;
++ dma_addr_t pc_tx_addr;
++ void *pc_rx_virt;
++ dma_addr_t pc_rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex np_tx_mtx;
++ struct mutex pc_tx_mtx;
++ struct mutex pc_rx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2700_espi_vw {
++ struct {
++ bool hw_mode;
++ uint32_t grp;
++ uint32_t dir0;
++ uint32_t dir1;
++ uint32_t val0;
++ uint32_t val1;
++ } gpio;
++
++ struct miscdevice mdev;
++};
++
++struct ast2700_espi_oob_dma_tx_desc {
++ uint32_t data_addrl;
++ uint32_t data_addrh;
++ uint8_t cyc;
++ uint16_t tag : 4;
++ uint16_t len : 12;
++ uint8_t msg_type : 3;
++ uint8_t raz0 : 1;
++ uint8_t pec : 1;
++ uint8_t int_en : 1;
++ uint8_t pause : 1;
++ uint8_t raz1 : 1;
++ uint32_t raz2;
++ uint32_t raz3;
++ uint32_t pad[3];
++} __packed;
++
++struct ast2700_espi_oob_dma_rx_desc {
++ uint32_t data_addrl;
++ uint32_t data_addrh;
++ uint8_t cyc;
++ uint16_t tag : 4;
++ uint16_t len : 12;
++ uint8_t raz : 7;
++ uint8_t dirty : 1;
++ uint32_t pad[1];
++} __packed;
++
++struct ast2700_espi_oob {
++ struct {
++ bool enable;
++ struct ast2700_espi_oob_dma_tx_desc *txd_virt;
++ dma_addr_t txd_addr;
++ struct ast2700_espi_oob_dma_rx_desc *rxd_virt;
++ dma_addr_t rxd_addr;
++ void *tx_virt;
++ dma_addr_t tx_addr;
++ void *rx_virt;
++ dma_addr_t rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex tx_mtx;
++ struct mutex rx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2700_espi_flash {
++ struct {
++ uint32_t mode;
++ phys_addr_t taddr;
++ uint64_t size;
++ } edaf;
++
++ struct {
++ bool enable;
++ void *tx_virt;
++ dma_addr_t tx_addr;
++ void *rx_virt;
++ dma_addr_t rx_addr;
++ } dma;
++
++ bool rx_ready;
++ wait_queue_head_t wq;
++
++ spinlock_t lock;
++ struct mutex rx_mtx;
++ struct mutex tx_mtx;
++
++ struct miscdevice mdev;
++};
++
++struct ast2700_espi {
++ struct device *dev;
++ void __iomem *regs;
++ struct clk *clk;
++ int dev_id;
++ int irq;
++
++ struct ast2700_espi_perif perif;
++ struct ast2700_espi_vw vw;
++ struct ast2700_espi_oob oob;
++ struct ast2700_espi_flash flash;
++};
++
++/* peripheral channel (CH0) */
++static int ast2700_espi_mmbi_b2h_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2700_espi_perif_mmbi *mmbi;
++ struct ast2700_espi_perif *perif;
++ struct ast2700_espi *espi;
++ unsigned long vm_size;
++ pgprot_t prot;
++
++ mmbi = container_of(fp->private_data, struct ast2700_espi_perif_mmbi, b2h_mdev);
++
++ perif = mmbi->perif;
++
++ espi = container_of(perif, struct ast2700_espi, perif);
++
++ vm_size = vma->vm_end - vma->vm_start;
++ prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (perif->mmbi.inst_size >> 1))
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (mmbi->b2h_addr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static int ast2700_espi_mmbi_h2b_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2700_espi_perif_mmbi *mmbi;
++ struct ast2700_espi_perif *perif;
++ struct ast2700_espi *espi;
++ unsigned long vm_size;
++ pgprot_t prot;
++
++ mmbi = container_of(fp->private_data, struct ast2700_espi_perif_mmbi, h2b_mdev);
++
++ perif = mmbi->perif;
++
++ espi = container_of(perif, struct ast2700_espi, perif);
++
++ vm_size = vma->vm_end - vma->vm_start;
++ prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > (perif->mmbi.inst_size >> 1))
++ return -EINVAL;
++
++ prot = pgprot_noncached(prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (mmbi->h2b_addr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static __poll_t ast2700_espi_mmbi_h2b_poll(struct file *fp, struct poll_table_struct *pt)
++{
++ struct ast2700_espi_perif_mmbi *mmbi;
++
++ mmbi = container_of(fp->private_data, struct ast2700_espi_perif_mmbi, h2b_mdev);
++
++ poll_wait(fp, &mmbi->wq, pt);
++
++ if (!mmbi->host_rwp_update)
++ return 0;
++
++ mmbi->host_rwp_update = false;
++
++ return EPOLLIN;
++}
++
++static long ast2700_espi_perif_pc_get_rx(struct file *fp,
++ struct ast2700_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2700_espi, perif);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&perif->pc_rx_mtx))
++ return -EAGAIN;
++
++ if (!perif->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&perif->pc_rx_mtx);
++
++ if (!perif->rx_ready) {
++ rc = wait_event_interruptible(perif->wq, perif->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_CH0_PC_RX_CTRL);
++ cyc = FIELD_GET(ESPI_CH0_PC_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_CH0_PC_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_CH0_PC_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ switch (cyc) {
++ case ESPI_PERIF_MSG:
++ pkt_len = sizeof(struct espi_perif_msg);
++ break;
++ case ESPI_PERIF_MSG_D:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_perif_msg);
++ break;
++ case ESPI_PERIF_SUC_CMPLT_D_MIDDLE:
++ case ESPI_PERIF_SUC_CMPLT_D_FIRST:
++ case ESPI_PERIF_SUC_CMPLT_D_LAST:
++ case ESPI_PERIF_SUC_CMPLT_D_ONLY:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_perif_cmplt);
++ break;
++ case ESPI_PERIF_SUC_CMPLT:
++ case ESPI_PERIF_UNSUC_CMPLT:
++ pkt_len = sizeof(struct espi_perif_cmplt);
++ break;
++ default:
++ rc = -EFAULT;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (perif->dma.enable) {
++ memcpy(hdr + 1, perif->dma.pc_rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ reg = readl(espi->regs + ESPI_CH0_PC_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&perif->lock, flags);
++
++ writel(ESPI_CH0_PC_RX_CTRL_SERV_PEND, espi->regs + ESPI_CH0_PC_RX_CTRL);
++ perif->rx_ready = 0;
++
++ spin_unlock_irqrestore(&perif->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&perif->pc_rx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_perif_pc_put_tx(struct file *fp,
++ struct ast2700_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2700_espi, perif);
++
++ if (!mutex_trylock(&perif->pc_tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_CH0_PC_TX_CTRL);
++ if (reg & ESPI_CH0_PC_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (perif->dma.enable) {
++ memcpy(perif->dma.pc_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_CH0_PC_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_CH0_PC_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_CH0_PC_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_CH0_PC_TX_CTRL_LEN, len)
++ | ESPI_CH0_PC_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_CH0_PC_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_n_out:
++ mutex_unlock(&perif->pc_tx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_perif_np_put_tx(struct file *fp,
++ struct ast2700_espi_perif *perif,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(perif, struct ast2700_espi, perif);
++
++ if (!mutex_trylock(&perif->np_tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_CH0_NP_TX_CTRL);
++ if (reg & ESPI_CH0_NP_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (perif->dma.enable) {
++ memcpy(perif->dma.np_tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_CH0_NP_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_CH0_NP_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_CH0_NP_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_CH0_NP_TX_CTRL_LEN, len)
++ | ESPI_CH0_NP_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_CH0_NP_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_n_out:
++ mutex_unlock(&perif->np_tx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_perif_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2700_espi_perif *perif;
++ struct aspeed_espi_ioc ioc;
++
++ perif = container_of(fp->private_data, struct ast2700_espi_perif, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_PERIF_PC_GET_RX:
++ return ast2700_espi_perif_pc_get_rx(fp, perif, &ioc);
++ case ASPEED_ESPI_PERIF_PC_PUT_TX:
++ return ast2700_espi_perif_pc_put_tx(fp, perif, &ioc);
++ case ASPEED_ESPI_PERIF_NP_PUT_TX:
++ return ast2700_espi_perif_np_put_tx(fp, perif, &ioc);
++ default:
++ break;
++ };
++
++ return -EINVAL;
++}
++
++static int ast2700_espi_perif_mmap(struct file *fp, struct vm_area_struct *vma)
++{
++ struct ast2700_espi_perif *perif;
++ unsigned long vm_size;
++ pgprot_t vm_prot;
++
++ perif = container_of(fp->private_data, struct ast2700_espi_perif, mdev);
++ if (!perif->mcyc.enable)
++ return -EPERM;
++
++ vm_size = vma->vm_end - vma->vm_start;
++ vm_prot = vma->vm_page_prot;
++
++ if (((vma->vm_pgoff << PAGE_SHIFT) + vm_size) > perif->mcyc.size)
++ return -EINVAL;
++
++ vm_prot = pgprot_noncached(vm_prot);
++
++ if (remap_pfn_range(vma, vma->vm_start,
++ (perif->mcyc.taddr >> PAGE_SHIFT) + vma->vm_pgoff,
++ vm_size, vm_prot))
++ return -EAGAIN;
++
++ return 0;
++}
++
++static const struct file_operations ast2700_espi_mmbi_b2h_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2700_espi_mmbi_b2h_mmap,
++};
++
++static const struct file_operations ast2700_espi_mmbi_h2b_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2700_espi_mmbi_h2b_mmap,
++ .poll = ast2700_espi_mmbi_h2b_poll,
++};
++
++static const struct file_operations ast2700_espi_perif_fops = {
++ .owner = THIS_MODULE,
++ .mmap = ast2700_espi_perif_mmap,
++ .unlocked_ioctl = ast2700_espi_perif_ioctl,
++};
++
++static irqreturn_t ast2700_espi_perif_mmbi_isr(int irq, void *arg)
++{
++ struct ast2700_espi_perif_mmbi *mmbi;
++ struct ast2700_espi_perif *perif;
++ struct ast2700_espi *espi;
++ uint32_t sts, tmp;
++ uint32_t *p;
++ int i;
++
++ espi = (struct ast2700_espi *)arg;
++
++ perif = &espi->perif;
++
++ sts = readl(espi->regs + ESPI_MMBI_INT_STS);
++ if (!sts)
++ return IRQ_NONE;
++
++ for (i = 0, tmp = sts; i < perif->mmbi.inst_num; ++i, tmp >>= 2) {
++ if (!(tmp & 0x3))
++ continue;
++
++ mmbi = &perif->mmbi.inst[i];
++
++ p = (uint32_t *)mmbi->h2b_virt;
++ p[0] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i));
++ p[1] = readl(espi->regs + ESPI_MMBI_HOST_RWP(i) + 4);
++
++ mmbi->host_rwp_update = true;
++
++ wake_up_interruptible(&mmbi->wq);
++ }
++
++ writel(sts, espi->regs + ESPI_MMBI_INT_STS);
++
++ return IRQ_HANDLED;
++}
++
++static void ast2700_espi_perif_isr(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_perif *perif;
++ unsigned long flags;
++ uint32_t sts;
++
++ perif = &espi->perif;
++
++ sts = readl(espi->regs + ESPI_CH0_INT_STS);
++
++ if (sts & ESPI_CH0_INT_STS_PC_RX_CMPLT) {
++ writel(ESPI_CH0_INT_STS_PC_RX_CMPLT, espi->regs + ESPI_CH0_INT_STS);
++
++ spin_lock_irqsave(&perif->lock, flags);
++ perif->rx_ready = true;
++ spin_unlock_irqrestore(&perif->lock, flags);
++
++ wake_up_interruptible(&perif->wq);
++ }
++}
++
++static void ast2700_espi_perif_reset(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_perif *perif;
++ struct device *dev;
++ uint64_t mask;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ writel(0x0, espi->regs + ESPI_CH0_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_CH0_INT_STS);
++
++ writel(0x0, espi->regs + ESPI_MMBI_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_MMBI_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL);
++ reg &= ~(ESPI_CH0_CTRL_MCYC_RD_DIS_WDT | ESPI_CH0_CTRL_MCYC_WR_DIS_WDT);
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++
++ reg = readl(espi->regs + ESPI_CH0_MCYC0_MASKL);
++ reg &= ~ESPI_CH0_MCYC0_MASKL_EN;
++ writel(reg, espi->regs + ESPI_CH0_MCYC0_MASKL);
++
++ reg = readl(espi->regs + ESPI_CH0_MCYC1_MASKL);
++ reg &= ~ESPI_CH0_MCYC1_MASKL_EN;
++ writel(reg, espi->regs + ESPI_CH0_MCYC1_MASKL);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL);
++ reg |= (ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS);
++ reg &= ~(ESPI_CH0_CTRL_NP_TX_RST
++ | ESPI_CH0_CTRL_NP_RX_RST
++ | ESPI_CH0_CTRL_PC_TX_RST
++ | ESPI_CH0_CTRL_PC_RX_RST
++ | ESPI_CH0_CTRL_NP_TX_DMA_EN
++ | ESPI_CH0_CTRL_PC_TX_DMA_EN
++ | ESPI_CH0_CTRL_PC_RX_DMA_EN
++ | ESPI_CH0_CTRL_SW_RDY);
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CH0_CTRL_NP_TX_RST
++ | ESPI_CH0_CTRL_NP_RX_RST
++ | ESPI_CH0_CTRL_PC_TX_RST
++ | ESPI_CH0_CTRL_PC_RX_RST);
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++
++ if (perif->mmbi.enable) {
++ reg = readl(espi->regs + ESPI_MMBI_CTRL);
++ reg &= ~ESPI_MMBI_CTRL_EN;
++ writel(reg, espi->regs + ESPI_MMBI_CTRL);
++
++ mask = ~(perif->mmbi.size - 1);
++ writel(mask >> 32, espi->regs + ESPI_CH0_MCYC0_MASKH);
++ writel(mask & 0xffffffff, espi->regs + ESPI_CH0_MCYC0_MASKL);
++ writel((perif->mmbi.saddr >> 32), espi->regs + ESPI_CH0_MCYC0_SADDRH);
++ writel((perif->mmbi.saddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC0_SADDRL);
++ writel((perif->mmbi.taddr >> 32), espi->regs + ESPI_CH0_MCYC0_TADDRH);
++ writel((perif->mmbi.taddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC0_TADDRL);
++
++ writel((0x1 << (perif->mmbi.inst_num * 2)) - 1, espi->regs + ESPI_MMBI_INT_EN);
++
++ reg = FIELD_PREP(ESPI_MMBI_CTRL_INST_NUM, count_trailing_zeros(perif->mmbi.inst_num))
++ | ESPI_MMBI_CTRL_EN;
++ writel(reg, espi->regs + ESPI_MMBI_CTRL);
++
++ reg = readl(espi->regs + ESPI_CH0_MCYC0_MASKL) | ESPI_CH0_MCYC0_MASKL_EN;
++ writel(reg, espi->regs + ESPI_CH0_MCYC0_MASKL);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL);
++ reg &= ~(ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS);
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++ }
++
++ if (perif->mcyc.enable) {
++ mask = ~(perif->mcyc.size - 1);
++ writel(mask >> 32, espi->regs + ESPI_CH0_MCYC1_MASKH);
++ writel(mask & 0xffffffff, espi->regs + ESPI_CH0_MCYC1_MASKL);
++ writel((perif->mcyc.saddr >> 32), espi->regs + ESPI_CH0_MCYC1_SADDRH);
++ writel((perif->mcyc.saddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC1_SADDRL);
++ writel((perif->mcyc.taddr >> 32), espi->regs + ESPI_CH0_MCYC1_TADDRH);
++ writel((perif->mcyc.taddr & 0xffffffff), espi->regs + ESPI_CH0_MCYC1_TADDRL);
++
++ reg = readl(espi->regs + ESPI_CH0_MCYC1_MASKL) | ESPI_CH0_MCYC1_MASKL_EN;
++ writel(reg, espi->regs + ESPI_CH0_MCYC1_MASKL);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL);
++ reg &= ~(ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS);
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++ }
++
++ if (perif->dma.enable) {
++ writel((perif->dma.np_tx_addr >> 32), espi->regs + ESPI_CH0_NP_TX_DMAH);
++ writel((perif->dma.np_tx_addr & 0xffffffff), espi->regs + ESPI_CH0_NP_TX_DMAL);
++ writel((perif->dma.pc_tx_addr >> 32), espi->regs + ESPI_CH0_PC_TX_DMAH);
++ writel((perif->dma.pc_tx_addr & 0xffffffff), espi->regs + ESPI_CH0_PC_TX_DMAL);
++ writel((perif->dma.pc_rx_addr >> 32), espi->regs + ESPI_CH0_PC_RX_DMAH);
++ writel((perif->dma.pc_rx_addr & 0xffffffff), espi->regs + ESPI_CH0_PC_RX_DMAL);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL)
++ | ESPI_CH0_CTRL_NP_TX_DMA_EN
++ | ESPI_CH0_CTRL_PC_TX_DMA_EN
++ | ESPI_CH0_CTRL_PC_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++ }
++
++ writel(ESPI_CH0_INT_EN_PC_RX_CMPLT, espi->regs + ESPI_CH0_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL) | ESPI_CH0_CTRL_SW_RDY;
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++}
++
++static int ast2700_espi_perif_probe(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_perif_mmbi *mmbi;
++ struct ast2700_espi_perif *perif;
++ struct platform_device *pdev;
++ struct device_node *np;
++ struct resource res;
++ struct device *dev;
++ int i, rc;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ init_waitqueue_head(&perif->wq);
++
++ spin_lock_init(&perif->lock);
++
++ mutex_init(&perif->np_tx_mtx);
++ mutex_init(&perif->pc_tx_mtx);
++ mutex_init(&perif->pc_rx_mtx);
++
++ perif->mmbi.enable = of_property_read_bool(dev->of_node, "perif-mmbi-enable");
++ if (perif->mmbi.enable) {
++ pdev = container_of(dev, struct platform_device, dev);
++
++ perif->mmbi.irq = platform_get_irq(pdev, 1);
++ if (perif->mmbi.irq < 0) {
++ dev_err(dev, "cannot get MMBI IRQ number\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u64(dev->of_node, "perif-mmbi-src-addr", &perif->mmbi.saddr);
++ if (rc || !IS_ALIGNED(perif->mmbi.saddr, PERIF_MMBI_ALIGN)) {
++ dev_err(dev, "cannot get 64MB-aligned MMBI host address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "perif-mmbi-instance-num", &perif->mmbi.inst_num);
++ if (rc ||
++ perif->mmbi.inst_num == 0 ||
++ perif->mmbi.inst_num > PERIF_MMBI_MAX_INST ||
++ (perif->mmbi.inst_num & (perif->mmbi.inst_num - 1))) {
++ dev_err(dev, "cannot get valid MMBI instance number, expect 1/2/4/8\n");
++ return -EINVAL;
++ }
++
++ np = of_parse_phandle(dev->of_node, "perif-mmbi-tgt-memory", 0);
++ if (!np || of_address_to_resource(np, 0, &res)) {
++ dev_err(dev, "cannot get MMBI memory region\n");
++ return -ENODEV;
++ }
++
++ of_node_put(np);
++
++ perif->mmbi.taddr = res.start;
++ perif->mmbi.size = resource_size(&res);
++ perif->mmbi.inst_size = perif->mmbi.size / perif->mmbi.inst_num;
++ if (!IS_ALIGNED(perif->mmbi.taddr, PERIF_MMBI_ALIGN) ||
++ !IS_ALIGNED(perif->mmbi.size, PERIF_MMBI_ALIGN)) {
++ dev_err(dev, "cannot get 64MB-aligned MMBI address/size\n");
++ return -EINVAL;
++ }
++
++ perif->mmbi.virt = devm_ioremap_resource(dev, &res);
++ if (!perif->mmbi.virt) {
++ dev_err(dev, "cannot map MMBI memory region\n");
++ return -ENOMEM;
++ }
++
++ memset_io(perif->mmbi.virt, 0, perif->mmbi.size);
++
++ for (i = 0; i < perif->mmbi.inst_num; ++i) {
++ mmbi = &perif->mmbi.inst[i];
++
++ init_waitqueue_head(&mmbi->wq);
++
++ mmbi->perif = perif;
++ mmbi->host_rwp_update = false;
++
++ mmbi->b2h_virt = perif->mmbi.virt + ((perif->mmbi.inst_size >> 1) * i);
++ mmbi->b2h_addr = perif->mmbi.taddr + ((perif->mmbi.inst_size >> 1) * i);
++ mmbi->b2h_mdev.parent = dev;
++ mmbi->b2h_mdev.minor = MISC_DYNAMIC_MINOR;
++ mmbi->b2h_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi%d-b2h%d",
++ DEVICE_NAME, espi->dev_id, i);
++ mmbi->b2h_mdev.fops = &ast2700_espi_mmbi_b2h_fops;
++ rc = misc_register(&mmbi->b2h_mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", mmbi->b2h_mdev.name);
++ return rc;
++ }
++
++ mmbi->h2b_virt = perif->mmbi.virt + ((perif->mmbi.inst_size >> 1) * (i + perif->mmbi.inst_num));
++ mmbi->h2b_addr = perif->mmbi.taddr + ((perif->mmbi.inst_size >> 1) * (i + perif->mmbi.inst_num));
++ mmbi->h2b_mdev.parent = dev;
++ mmbi->h2b_mdev.minor = MISC_DYNAMIC_MINOR;
++ mmbi->h2b_mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-mmbi%d-h2b%d",
++ DEVICE_NAME, espi->dev_id, i);
++ mmbi->h2b_mdev.fops = &ast2700_espi_mmbi_h2b_fops;
++ rc = misc_register(&mmbi->h2b_mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", mmbi->h2b_mdev.name);
++ return rc;
++ }
++ }
++ }
++
++ perif->mcyc.enable = of_property_read_bool(dev->of_node, "perif-mcyc-enable");
++ if (perif->mcyc.enable) {
++ rc = of_property_read_u64(dev->of_node, "perif-mcyc-src-addr", &perif->mcyc.saddr);
++ if (rc || !IS_ALIGNED(perif->mcyc.saddr, PERIF_MCYC_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned memory cycle host address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u64(dev->of_node, "perif-mcyc-size", &perif->mcyc.size);
++ if (rc || !IS_ALIGNED(perif->mcyc.size, PERIF_MCYC_ALIGN)) {
++ dev_err(dev, "cannot get 64KB-aligned memory cycle size\n");
++ return -EINVAL;
++ }
++
++ perif->mcyc.virt = dmam_alloc_coherent(dev, perif->mcyc.size,
++ &perif->mcyc.taddr, GFP_KERNEL);
++ if (!perif->mcyc.virt) {
++ dev_err(dev, "cannot allocate memory cycle\n");
++ return -ENOMEM;
++ }
++ }
++
++ perif->dma.enable = of_property_read_bool(dev->of_node, "perif-dma-mode");
++ if (perif->dma.enable) {
++ perif->dma.pc_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.pc_tx_addr, GFP_KERNEL);
++ if (!perif->dma.pc_tx_virt) {
++ dev_err(dev, "cannot allocate posted TX DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ perif->dma.pc_rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.pc_rx_addr, GFP_KERNEL);
++ if (!perif->dma.pc_rx_virt) {
++ dev_err(dev, "cannot allocate posted RX DMA buffer\n");
++ return -ENOMEM;
++ }
++
++ perif->dma.np_tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE,
++ &perif->dma.np_tx_addr, GFP_KERNEL);
++ if (!perif->dma.np_tx_virt) {
++ dev_err(dev, "cannot allocate non-posted TX DMA buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ perif->mdev.parent = dev;
++ perif->mdev.minor = MISC_DYNAMIC_MINOR;
++ perif->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-peripheral%d", DEVICE_NAME, espi->dev_id);
++ perif->mdev.fops = &ast2700_espi_perif_fops;
++ rc = misc_register(&perif->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", perif->mdev.name);
++ return rc;
++ }
++
++ ast2700_espi_perif_reset(espi);
++
++ if (perif->mmbi.enable) {
++ rc = devm_request_irq(dev, espi->perif.mmbi.irq,
++ ast2700_espi_perif_mmbi_isr, 0, dev_name(dev), espi);
++ if (rc) {
++ dev_err(dev, "cannot request MMBI IRQ\n");
++ return rc;
++ }
++ }
++
++ return 0;
++}
++
++static int ast2700_espi_perif_remove(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_perif_mmbi *mmbi;
++ struct ast2700_espi_perif *perif;
++ struct device *dev;
++ uint32_t reg;
++ int i;
++
++ dev = espi->dev;
++
++ perif = &espi->perif;
++
++ writel(0x0, espi->regs + ESPI_CH0_INT_EN);
++ writel(0x0, espi->regs + ESPI_MMBI_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH0_MCYC0_MASKL);
++ reg &= ~ESPI_CH0_MCYC0_MASKL_EN;
++ writel(reg, espi->regs + ESPI_CH0_MCYC0_MASKL);
++
++ reg = readl(espi->regs + ESPI_CH0_MCYC1_MASKL);
++ reg &= ~ESPI_CH0_MCYC1_MASKL_EN;
++ writel(reg, espi->regs + ESPI_CH0_MCYC1_MASKL);
++
++ reg = readl(espi->regs + ESPI_CH0_CTRL);
++ reg |= (ESPI_CH0_CTRL_MCYC_RD_DIS | ESPI_CH0_CTRL_MCYC_WR_DIS);
++ reg &= ~(ESPI_CH0_CTRL_NP_TX_DMA_EN
++ | ESPI_CH0_CTRL_PC_TX_DMA_EN
++ | ESPI_CH0_CTRL_PC_RX_DMA_EN
++ | ESPI_CH0_CTRL_SW_RDY);
++ writel(reg, espi->regs + ESPI_CH0_CTRL);
++
++ if (perif->mmbi.enable) {
++ reg = readl(espi->regs + ESPI_MMBI_CTRL);
++ reg &= ~ESPI_MMBI_CTRL_EN;
++ writel(reg, espi->regs + ESPI_MMBI_CTRL);
++
++ for (i = 0; i < perif->mmbi.inst_num; ++i) {
++ mmbi = &perif->mmbi.inst[i];
++ misc_deregister(&mmbi->b2h_mdev);
++ misc_deregister(&mmbi->h2b_mdev);
++ }
++
++ devm_iounmap(dev, perif->mmbi.virt);
++ }
++
++ if (perif->mcyc.enable)
++ dmam_free_coherent(dev, perif->mcyc.size, perif->mcyc.virt,
++ perif->mcyc.taddr);
++
++ if (perif->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.np_tx_virt,
++ perif->dma.np_tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_tx_virt,
++ perif->dma.pc_tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, perif->dma.pc_rx_virt,
++ perif->dma.pc_rx_addr);
++ }
++
++ mutex_destroy(&perif->np_tx_mtx);
++ mutex_destroy(&perif->pc_tx_mtx);
++ mutex_destroy(&perif->pc_rx_mtx);
++
++ misc_deregister(&perif->mdev);
++
++ return 0;
++}
++
++/* virtual wire channel (CH1) */
++static long ast2700_espi_vw_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2700_espi_vw *vw;
++ struct ast2700_espi *espi;
++ uint64_t gpio;
++
++ vw = container_of(fp->private_data, struct ast2700_espi_vw, mdev);
++ espi = container_of(vw, struct ast2700_espi, vw);
++ gpio = ((uint64_t)vw->gpio.val1 << 32) | vw->gpio.val0;
++
++ switch (cmd) {
++ case ASPEED_ESPI_VW_GET_GPIO_VAL:
++ if (put_user(gpio, (uint64_t __user *)arg))
++ return -EFAULT;
++ break;
++
++ case ASPEED_ESPI_VW_PUT_GPIO_VAL:
++ if (get_user(gpio, (uint64_t __user *)arg))
++ return -EFAULT;
++
++ writel(gpio >> 32, espi->regs + ESPI_CH1_GPIO_VAL1);
++ writel(gpio & 0xffffffff, espi->regs + ESPI_CH1_GPIO_VAL0);
++ break;
++
++ default:
++ return -EINVAL;
++ };
++
++ return 0;
++}
++
++static const struct file_operations ast2700_espi_vw_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2700_espi_vw_ioctl,
++};
++
++static void ast2700_espi_vw_isr(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_vw *vw;
++ uint32_t sts;
++
++ vw = &espi->vw;
++
++ sts = readl(espi->regs + ESPI_CH1_INT_STS);
++
++ if (sts & ESPI_CH1_INT_STS_GPIO) {
++ vw->gpio.val0 = readl(espi->regs + ESPI_CH1_GPIO_VAL0);
++ vw->gpio.val1 = readl(espi->regs + ESPI_CH1_GPIO_VAL1);
++ writel(ESPI_CH1_INT_STS_GPIO, espi->regs + ESPI_CH1_INT_STS);
++ }
++}
++
++static void ast2700_espi_vw_reset(struct ast2700_espi *espi)
++{
++ uint32_t reg;
++ struct ast2700_espi_vw *vw = &espi->vw;
++
++ writel(0x0, espi->regs + ESPI_CH1_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_CH1_INT_STS);
++
++ writel(vw->gpio.grp, espi->regs + ESPI_CH1_GPIO_GRP);
++ writel(vw->gpio.dir0, espi->regs + ESPI_CH1_GPIO_DIR0);
++ writel(vw->gpio.dir1, espi->regs + ESPI_CH1_GPIO_DIR1);
++
++ vw->gpio.val0 = readl(espi->regs + ESPI_CH1_GPIO_VAL0);
++ vw->gpio.val1 = readl(espi->regs + ESPI_CH1_GPIO_VAL1);
++
++ writel(ESPI_CH1_INT_EN_GPIO, espi->regs + ESPI_CH1_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH1_CTRL)
++ | ((vw->gpio.hw_mode) ? ESPI_CH1_CTRL_GPIO_HW : 0)
++ | ESPI_CH1_CTRL_SW_RDY;
++ writel(reg, espi->regs + ESPI_CH1_CTRL);
++}
++
++static int ast2700_espi_vw_probe(struct ast2700_espi *espi)
++{
++ int rc;
++ struct device *dev = espi->dev;
++ struct ast2700_espi_vw *vw = &espi->vw;
++
++ vw->gpio.hw_mode = of_property_read_bool(dev->of_node, "vw-gpio-hw-mode");
++ of_property_read_u32(dev->of_node, "vw-gpio-group", &vw->gpio.grp);
++ of_property_read_u32_index(dev->of_node, "vw-gpio-direction", 0, &vw->gpio.dir0);
++ of_property_read_u32_index(dev->of_node, "vw-gpio-direction", 1, &vw->gpio.dir1);
++
++ vw->mdev.parent = dev;
++ vw->mdev.minor = MISC_DYNAMIC_MINOR;
++ vw->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-vw%d", DEVICE_NAME, espi->dev_id);
++ vw->mdev.fops = &ast2700_espi_vw_fops;
++ rc = misc_register(&vw->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", vw->mdev.name);
++ return rc;
++ }
++
++ ast2700_espi_vw_reset(espi);
++
++ return 0;
++}
++
++static int ast2700_espi_vw_remove(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_vw *vw;
++
++ vw = &espi->vw;
++
++ writel(0x0, espi->regs + ESPI_CH1_INT_EN);
++
++ misc_deregister(&vw->mdev);
++
++ return 0;
++}
++
++/* out-of-band channel (CH2) */
++static long ast2700_espi_oob_dma_get_rx(struct file *fp,
++ struct ast2700_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ struct ast2700_espi_oob_dma_rx_desc *d;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint32_t wptr, pkt_len;
++ unsigned long flags;
++ uint8_t *pkt;
++ int rc;
++
++ espi = container_of(oob, struct ast2700_espi, oob);
++
++ wptr = FIELD_PREP(ESPI_CH2_RX_DESC_WPTR_WP, readl(espi->regs + ESPI_CH2_RX_DESC_WPTR));
++
++ d = &oob->dma.rxd_virt[wptr];
++
++ if (!d->dirty)
++ return -EFAULT;
++
++ pkt_len = ((d->len) ? d->len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr);
++
++ if (ioc->pkt_len < pkt_len)
++ return -EINVAL;
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt)
++ return -ENOMEM;
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = d->cyc;
++ hdr->tag = d->tag;
++ hdr->len_h = d->len >> 8;
++ hdr->len_l = d->len & 0xff;
++ memcpy(hdr + 1, oob->dma.rx_virt + (PAGE_SIZE * wptr), pkt_len - sizeof(*hdr));
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&oob->lock, flags);
++
++ /* make current descriptor available again */
++ d->dirty = 0;
++
++ wptr = ((wptr + 1) % OOB_DMA_DESC_NUM);
++ writel(wptr | ESPI_CH2_RX_DESC_WPTR_VALID, espi->regs + ESPI_CH2_RX_DESC_WPTR);
++
++ /* set ready flag base on the next RX descriptor */
++ oob->rx_ready = oob->dma.rxd_virt[wptr].dirty;
++
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++ return rc;
++}
++
++static long ast2700_espi_oob_get_rx(struct file *fp,
++ struct ast2700_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(oob, struct ast2700_espi, oob);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&oob->rx_mtx))
++ return -EAGAIN;
++
++ if (!oob->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&oob->rx_mtx);
++
++ if (!oob->rx_ready) {
++ rc = wait_event_interruptible(oob->wq, oob->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ if (oob->dma.enable) {
++ rc = ast2700_espi_oob_dma_get_rx(fp, oob, ioc);
++ goto unlock_mtx_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_CH2_RX_CTRL);
++ cyc = FIELD_GET(ESPI_CH2_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_CH2_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_CH2_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) + sizeof(struct espi_comm_hdr);
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ for (i = sizeof(*hdr); i < pkt_len; ++i) {
++ reg = readl(espi->regs + ESPI_CH2_RX_DATA);
++ pkt[i] = reg & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&oob->lock, flags);
++
++ writel(ESPI_CH2_RX_CTRL_SERV_PEND, espi->regs + ESPI_CH2_RX_CTRL);
++ oob->rx_ready = 0;
++
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&oob->rx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_oob_dma_put_tx(struct file *fp,
++ struct ast2700_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ struct ast2700_espi_oob_dma_tx_desc *d;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint32_t rptr, wptr;
++ uint8_t *pkt;
++ int rc;
++
++ espi = container_of(oob, struct ast2700_espi, oob);
++
++ pkt = vzalloc(ioc->pkt_len);
++ if (!pkt)
++ return -ENOMEM;
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /* kick HW to update descriptor read/write pointer */
++ writel(ESPI_CH2_TX_DESC_RPTR_UPT, espi->regs + ESPI_CH2_TX_DESC_RPTR);
++
++ rptr = readl(espi->regs + ESPI_CH2_TX_DESC_RPTR);
++ wptr = readl(espi->regs + ESPI_CH2_TX_DESC_WPTR);
++
++ if (((wptr + 1) % OOB_DMA_DESC_NUM) == rptr) {
++ rc = -EBUSY;
++ goto free_n_out;
++ }
++
++ d = &oob->dma.txd_virt[wptr];
++ d->cyc = hdr->cyc;
++ d->tag = hdr->tag;
++ d->len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++ d->msg_type = OOB_DMA_DESC_CUSTOM;
++
++ memcpy(oob->dma.tx_virt + (PAGE_SIZE * wptr), hdr + 1, ioc->pkt_len - sizeof(*hdr));
++
++ dma_wmb();
++
++ wptr = (wptr + 1) % OOB_DMA_DESC_NUM;
++ writel(wptr | ESPI_CH2_TX_DESC_WPTR_VALID, espi->regs + ESPI_CH2_TX_DESC_WPTR);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++ return rc;
++}
++
++static long ast2700_espi_oob_put_tx(struct file *fp,
++ struct ast2700_espi_oob *oob,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(oob, struct ast2700_espi, oob);
++
++ if (!mutex_trylock(&oob->tx_mtx))
++ return -EAGAIN;
++
++ if (oob->dma.enable) {
++ rc = ast2700_espi_oob_dma_put_tx(fp, oob, ioc);
++ goto unlock_mtx_n_out;
++ }
++
++ reg = readl(espi->regs + ESPI_CH2_TX_CTRL);
++ if (reg & ESPI_CH2_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len > ESPI_MAX_PKT_LEN) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_CH2_TX_DATA);
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_CH2_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_CH2_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_CH2_TX_CTRL_LEN, len)
++ | ESPI_CH2_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_CH2_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&oob->tx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_oob_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2700_espi_oob *oob;
++ struct aspeed_espi_ioc ioc;
++
++ oob = container_of(fp->private_data, struct ast2700_espi_oob, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_OOB_GET_RX:
++ return ast2700_espi_oob_get_rx(fp, oob, &ioc);
++ case ASPEED_ESPI_OOB_PUT_TX:
++ return ast2700_espi_oob_put_tx(fp, oob, &ioc);
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations ast2700_espi_oob_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2700_espi_oob_ioctl,
++};
++
++static void ast2700_espi_oob_isr(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_oob *oob;
++ unsigned long flags;
++ uint32_t sts;
++
++ oob = &espi->oob;
++
++ sts = readl(espi->regs + ESPI_CH2_INT_STS);
++
++ if (sts & ESPI_CH2_INT_STS_RX_CMPLT) {
++ writel(ESPI_CH2_INT_STS_RX_CMPLT, espi->regs + ESPI_CH2_INT_STS);
++
++ spin_lock_irqsave(&oob->lock, flags);
++ oob->rx_ready = true;
++ spin_unlock_irqrestore(&oob->lock, flags);
++
++ wake_up_interruptible(&oob->wq);
++ }
++}
++
++static void ast2700_espi_oob_reset(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_oob *oob;
++ dma_addr_t tx_addr, rx_addr;
++ uint32_t reg;
++ int i;
++
++ oob = &espi->oob;
++
++ writel(0x0, espi->regs + ESPI_CH2_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_CH2_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CH2_CTRL);
++ reg &= ~(ESPI_CH2_CTRL_TX_RST
++ | ESPI_CH2_CTRL_RX_RST
++ | ESPI_CH2_CTRL_TX_DMA_EN
++ | ESPI_CH2_CTRL_RX_DMA_EN
++ | ESPI_CH2_CTRL_SW_RDY);
++ writel(reg, espi->regs + ESPI_CH2_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CH2_CTRL_TX_RST | ESPI_CH2_CTRL_RX_RST);
++ writel(reg, espi->regs + ESPI_CH2_CTRL);
++
++ if (oob->dma.enable) {
++ tx_addr = oob->dma.tx_addr;
++ rx_addr = oob->dma.rx_addr;
++
++ for (i = 0; i < OOB_DMA_DESC_NUM; ++i) {
++ oob->dma.txd_virt[i].data_addrh = tx_addr >> 32;
++ oob->dma.txd_virt[i].data_addrl = tx_addr & 0xffffffff;
++ tx_addr += PAGE_SIZE;
++
++ oob->dma.rxd_virt[i].data_addrh = rx_addr >> 32;
++ oob->dma.rxd_virt[i].data_addrl = rx_addr & 0xffffffff;
++ oob->dma.rxd_virt[i].dirty = 0;
++ rx_addr += PAGE_SIZE;
++ }
++
++ writel(oob->dma.txd_addr >> 32, espi->regs + ESPI_CH2_TX_DMAH);
++ writel(oob->dma.txd_addr & 0xffffffff, espi->regs + ESPI_CH2_TX_DMAL);
++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_CH2_TX_DESC_RPTR);
++ writel(0x0, espi->regs + ESPI_CH2_TX_DESC_WPTR);
++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_CH2_TX_DESC_EPTR);
++
++ writel(oob->dma.rxd_addr >> 32, espi->regs + ESPI_CH2_RX_DMAH);
++ writel(oob->dma.rxd_addr & 0xffffffff, espi->regs + ESPI_CH2_RX_DMAL);
++ writel(OOB_DMA_RPTR_KEY, espi->regs + ESPI_CH2_RX_DESC_RPTR);
++ writel(0x0, espi->regs + ESPI_CH2_RX_DESC_WPTR);
++ writel(OOB_DMA_DESC_NUM, espi->regs + ESPI_CH2_RX_DESC_EPTR);
++
++ reg = readl(espi->regs + ESPI_CH2_CTRL)
++ | ESPI_CH2_CTRL_TX_DMA_EN
++ | ESPI_CH2_CTRL_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CH2_CTRL);
++
++ /* activate RX DMA to make OOB_FREE */
++ reg = readl(espi->regs + ESPI_CH2_RX_DESC_WPTR) | ESPI_CH2_RX_DESC_WPTR_VALID;
++ writel(reg, espi->regs + ESPI_CH2_RX_DESC_WPTR);
++ }
++
++ writel(ESPI_CH2_INT_EN_RX_CMPLT, espi->regs + ESPI_CH2_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH2_CTRL) | ESPI_CH2_CTRL_SW_RDY;
++ writel(reg, espi->regs + ESPI_CH2_CTRL);
++}
++
++static int ast2700_espi_oob_probe(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_oob *oob;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ oob = &espi->oob;
++
++ init_waitqueue_head(&oob->wq);
++
++ spin_lock_init(&oob->lock);
++
++ mutex_init(&oob->tx_mtx);
++ mutex_init(&oob->rx_mtx);
++
++ oob->dma.enable = of_property_read_bool(dev->of_node, "oob-dma-mode");
++ if (oob->dma.enable) {
++ oob->dma.txd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM, &oob->dma.txd_addr, GFP_KERNEL);
++ if (!oob->dma.txd_virt) {
++ dev_err(dev, "cannot allocate DMA TX descriptor\n");
++ return -ENOMEM;
++ }
++ oob->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.tx_addr, GFP_KERNEL);
++ if (!oob->dma.tx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++
++ oob->dma.rxd_virt = dmam_alloc_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM, &oob->dma.rxd_addr, GFP_KERNEL);
++ if (!oob->dma.rxd_virt) {
++ dev_err(dev, "cannot allocate DMA RX descriptor\n");
++ return -ENOMEM;
++ }
++
++ oob->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM, &oob->dma.rx_addr, GFP_KERNEL);
++ if (!oob->dma.rx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ oob->mdev.parent = dev;
++ oob->mdev.minor = MISC_DYNAMIC_MINOR;
++ oob->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-oob%d", DEVICE_NAME, espi->dev_id);
++ oob->mdev.fops = &ast2700_espi_oob_fops;
++ rc = misc_register(&oob->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", oob->mdev.name);
++ return rc;
++ }
++
++ ast2700_espi_oob_reset(espi);
++
++ return 0;
++}
++
++static int ast2700_espi_oob_remove(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_oob *oob;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ oob = &espi->oob;
++
++ writel(0x0, espi->regs + ESPI_CH2_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH2_CTRL);
++ reg &= ~(ESPI_CH2_CTRL_TX_DMA_EN
++ | ESPI_CH2_CTRL_RX_DMA_EN
++ | ESPI_CH2_CTRL_SW_RDY);
++ writel(reg, espi->regs + ESPI_CH2_CTRL);
++
++ if (oob->dma.enable) {
++ dmam_free_coherent(dev, sizeof(*oob->dma.txd_virt) * OOB_DMA_DESC_NUM,
++ oob->dma.txd_virt, oob->dma.txd_addr);
++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM,
++ oob->dma.tx_virt, oob->dma.tx_addr);
++ dmam_free_coherent(dev, sizeof(*oob->dma.rxd_virt) * OOB_DMA_DESC_NUM,
++ oob->dma.rxd_virt, oob->dma.rxd_addr);
++ dmam_free_coherent(dev, PAGE_SIZE * OOB_DMA_DESC_NUM,
++ oob->dma.rx_virt, oob->dma.rx_addr);
++ }
++
++ mutex_destroy(&oob->tx_mtx);
++ mutex_destroy(&oob->rx_mtx);
++
++ misc_deregister(&oob->mdev);
++
++ return 0;
++}
++
++/* flash channel (CH3) */
++static long ast2700_espi_flash_get_rx(struct file *fp,
++ struct ast2700_espi_flash *flash,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ unsigned long flags;
++ uint32_t pkt_len;
++ uint8_t *pkt;
++ int i, rc;
++
++ rc = 0;
++
++ espi = container_of(flash, struct ast2700_espi, flash);
++
++ if (fp->f_flags & O_NONBLOCK) {
++ if (!mutex_trylock(&flash->rx_mtx))
++ return -EAGAIN;
++
++ if (!flash->rx_ready) {
++ rc = -ENODATA;
++ goto unlock_mtx_n_out;
++ }
++ } else {
++ mutex_lock(&flash->rx_mtx);
++
++ if (!flash->rx_ready) {
++ rc = wait_event_interruptible(flash->wq, flash->rx_ready);
++ if (rc == -ERESTARTSYS) {
++ rc = -EINTR;
++ goto unlock_mtx_n_out;
++ }
++ }
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ reg = readl(espi->regs + ESPI_CH3_RX_CTRL);
++ cyc = FIELD_GET(ESPI_CH3_RX_CTRL_CYC, reg);
++ tag = FIELD_GET(ESPI_CH3_RX_CTRL_TAG, reg);
++ len = FIELD_GET(ESPI_CH3_RX_CTRL_LEN, reg);
++
++ /*
++ * calculate the length of the rest part of the
++ * eSPI packet to be read from HW and copied to
++ * user space.
++ */
++ switch (cyc) {
++ case ESPI_FLASH_WRITE:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_flash_rwe);
++ break;
++ case ESPI_FLASH_READ:
++ case ESPI_FLASH_ERASE:
++ pkt_len = sizeof(struct espi_flash_rwe);
++ break;
++ case ESPI_FLASH_SUC_CMPLT_D_MIDDLE:
++ case ESPI_FLASH_SUC_CMPLT_D_FIRST:
++ case ESPI_FLASH_SUC_CMPLT_D_LAST:
++ case ESPI_FLASH_SUC_CMPLT_D_ONLY:
++ pkt_len = ((len) ? len : ESPI_MAX_PLD_LEN) +
++ sizeof(struct espi_flash_cmplt);
++ break;
++ case ESPI_FLASH_SUC_CMPLT:
++ case ESPI_FLASH_UNSUC_CMPLT:
++ pkt_len = sizeof(struct espi_flash_cmplt);
++ break;
++ default:
++ rc = -EFAULT;
++ goto unlock_mtx_n_out;
++ }
++
++ if (ioc->pkt_len < pkt_len) {
++ rc = -EINVAL;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++ hdr->cyc = cyc;
++ hdr->tag = tag;
++ hdr->len_h = len >> 8;
++ hdr->len_l = len & 0xff;
++
++ if (flash->dma.enable) {
++ memcpy(hdr + 1, flash->dma.rx_virt, pkt_len - sizeof(*hdr));
++ } else {
++ for (i = sizeof(*hdr); i < pkt_len; ++i)
++ pkt[i] = readl(espi->regs + ESPI_CH3_RX_DATA) & 0xff;
++ }
++
++ if (copy_to_user((void __user *)ioc->pkt, pkt, pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ spin_lock_irqsave(&flash->lock, flags);
++
++ writel(ESPI_CH3_RX_CTRL_SERV_PEND, espi->regs + ESPI_CH3_RX_CTRL);
++ flash->rx_ready = 0;
++
++ spin_unlock_irqrestore(&flash->lock, flags);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&flash->rx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_flash_put_tx(struct file *fp,
++ struct ast2700_espi_flash *flash,
++ struct aspeed_espi_ioc *ioc)
++{
++ uint32_t reg, cyc, tag, len;
++ struct ast2700_espi *espi;
++ struct espi_comm_hdr *hdr;
++ uint8_t *pkt;
++ int i, rc;
++
++ espi = container_of(flash, struct ast2700_espi, flash);
++
++ if (!mutex_trylock(&flash->tx_mtx))
++ return -EAGAIN;
++
++ reg = readl(espi->regs + ESPI_CH3_TX_CTRL);
++ if (reg & ESPI_CH3_TX_CTRL_TRIG_PEND) {
++ rc = -EBUSY;
++ goto unlock_mtx_n_out;
++ }
++
++ pkt = vmalloc(ioc->pkt_len);
++ if (!pkt) {
++ rc = -ENOMEM;
++ goto unlock_mtx_n_out;
++ }
++
++ hdr = (struct espi_comm_hdr *)pkt;
++
++ if (copy_from_user(pkt, (void __user *)ioc->pkt, ioc->pkt_len)) {
++ rc = -EFAULT;
++ goto free_n_out;
++ }
++
++ /*
++ * common header (i.e. cycle type, tag, and length)
++ * part is written to HW registers
++ */
++ if (flash->dma.enable) {
++ memcpy(flash->dma.tx_virt, hdr + 1, ioc->pkt_len - sizeof(*hdr));
++ dma_wmb();
++ } else {
++ for (i = sizeof(*hdr); i < ioc->pkt_len; ++i)
++ writel(pkt[i], espi->regs + ESPI_CH3_TX_DATA);
++ }
++
++ cyc = hdr->cyc;
++ tag = hdr->tag;
++ len = (hdr->len_h << 8) | (hdr->len_l & 0xff);
++
++ reg = FIELD_PREP(ESPI_CH3_TX_CTRL_CYC, cyc)
++ | FIELD_PREP(ESPI_CH3_TX_CTRL_TAG, tag)
++ | FIELD_PREP(ESPI_CH3_TX_CTRL_LEN, len)
++ | ESPI_CH3_TX_CTRL_TRIG_PEND;
++ writel(reg, espi->regs + ESPI_CH3_TX_CTRL);
++
++ rc = 0;
++
++free_n_out:
++ vfree(pkt);
++
++unlock_mtx_n_out:
++ mutex_unlock(&flash->tx_mtx);
++
++ return rc;
++}
++
++static long ast2700_espi_flash_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
++{
++ struct ast2700_espi_flash *flash;
++ struct aspeed_espi_ioc ioc;
++
++ flash = container_of(fp->private_data, struct ast2700_espi_flash, mdev);
++
++ if (copy_from_user(&ioc, (void __user *)arg, sizeof(ioc)))
++ return -EFAULT;
++
++ if (ioc.pkt_len > ESPI_MAX_PKT_LEN)
++ return -EINVAL;
++
++ switch (cmd) {
++ case ASPEED_ESPI_FLASH_GET_RX:
++ return ast2700_espi_flash_get_rx(fp, flash, &ioc);
++ case ASPEED_ESPI_FLASH_PUT_TX:
++ return ast2700_espi_flash_put_tx(fp, flash, &ioc);
++ };
++
++ return -EINVAL;
++}
++
++static const struct file_operations ast2700_espi_flash_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = ast2700_espi_flash_ioctl,
++};
++
++static void ast2700_espi_flash_isr(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_flash *flash;
++ unsigned long flags;
++ uint32_t sts;
++
++ flash = &espi->flash;
++
++ sts = readl(espi->regs + ESPI_CH3_INT_STS);
++
++ if (sts & ESPI_CH3_INT_STS_RX_CMPLT) {
++ writel(ESPI_CH3_INT_STS_RX_CMPLT, espi->regs + ESPI_CH3_INT_STS);
++
++ spin_lock_irqsave(&flash->lock, flags);
++ flash->rx_ready = true;
++ spin_unlock_irqrestore(&flash->lock, flags);
++
++ wake_up_interruptible(&flash->wq);
++ }
++}
++
++static void ast2700_espi_flash_reset(struct ast2700_espi *espi)
++{
++ uint32_t reg;
++ uint64_t mask;
++ struct ast2700_espi_flash *flash = &espi->flash;
++
++ writel(0x0, espi->regs + ESPI_CH3_INT_EN);
++ writel(0xffffffff, espi->regs + ESPI_CH3_INT_STS);
++
++ reg = readl(espi->regs + ESPI_CH3_CTRL);
++ reg &= ~(ESPI_CH3_CTRL_TX_RST
++ | ESPI_CH3_CTRL_RX_RST
++ | ESPI_CH3_CTRL_TX_DMA_EN
++ | ESPI_CH3_CTRL_RX_DMA_EN
++ | ESPI_CH3_CTRL_SW_RDY);
++ writel(reg, espi->regs + ESPI_CH3_CTRL);
++
++ udelay(1);
++
++ reg |= (ESPI_CH3_CTRL_TX_RST | ESPI_CH3_CTRL_RX_RST);
++ writel(reg, espi->regs + ESPI_CH3_CTRL);
++
++ if (flash->edaf.mode == EDAF_MODE_MIX) {
++ mask = ~(flash->edaf.size - 1);
++ writel(mask >> 32, espi->regs + ESPI_CH3_EDAF_MASKH);
++ writel(mask & 0xffffffff, espi->regs + ESPI_CH3_EDAF_MASKL);
++ writel(flash->edaf.taddr >> 32, espi->regs + ESPI_CH3_EDAF_TADDRH);
++ writel(flash->edaf.taddr & 0xffffffff, espi->regs + ESPI_CH3_EDAF_TADDRL);
++ }
++
++ reg = readl(espi->regs + ESPI_CH3_CTRL) & ~ESPI_CH3_CTRL_EDAF_MODE;
++ reg |= FIELD_PREP(ESPI_CH3_CTRL_EDAF_MODE, flash->edaf.mode);
++ writel(reg, espi->regs + ESPI_CH3_CTRL);
++
++ if (flash->dma.enable) {
++ writel(flash->dma.tx_addr >> 32, espi->regs + ESPI_CH3_TX_DMAH);
++ writel(flash->dma.tx_addr & 0xffffffff, espi->regs + ESPI_CH3_TX_DMAL);
++ writel(flash->dma.rx_addr >> 32, espi->regs + ESPI_CH3_RX_DMAH);
++ writel(flash->dma.rx_addr & 0xffffffff, espi->regs + ESPI_CH3_RX_DMAL);
++
++ reg = readl(espi->regs + ESPI_CH3_CTRL)
++ | ESPI_CH3_CTRL_TX_DMA_EN
++ | ESPI_CH3_CTRL_RX_DMA_EN;
++ writel(reg, espi->regs + ESPI_CH3_CTRL);
++ }
++
++ writel(ESPI_CH3_INT_EN_RX_CMPLT, espi->regs + ESPI_CH3_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH3_CTRL) | ESPI_CH3_CTRL_SW_RDY;
++ writel(reg, espi->regs + ESPI_CH3_CTRL);
++}
++
++static int ast2700_espi_flash_probe(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_flash *flash;
++ struct device *dev;
++ int rc;
++
++ dev = espi->dev;
++
++ flash = &espi->flash;
++
++ init_waitqueue_head(&flash->wq);
++
++ spin_lock_init(&flash->lock);
++
++ mutex_init(&flash->tx_mtx);
++ mutex_init(&flash->rx_mtx);
++
++ flash->edaf.mode = EDAF_MODE_HW;
++
++ of_property_read_u32(dev->of_node, "flash-edaf-mode", &flash->edaf.mode);
++ if (flash->edaf.mode == EDAF_MODE_MIX) {
++ rc = of_property_read_u64(dev->of_node, "flash-edaf-tgt-addr", &flash->edaf.taddr);
++ if (rc || !IS_ALIGNED(flash->edaf.taddr, FLASH_EDAF_ALIGN)) {
++ dev_err(dev, "cannot get 16MB-aligned eDAF address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u64(dev->of_node, "flash-edaf-size", &flash->edaf.size);
++ if (rc || !IS_ALIGNED(flash->edaf.size, FLASH_EDAF_ALIGN)) {
++ dev_err(dev, "cannot get 16MB-aligned eDAF size\n");
++ return -ENODEV;
++ }
++ }
++
++ flash->dma.enable = of_property_read_bool(dev->of_node, "flash-dma-mode");
++ if (flash->dma.enable) {
++ flash->dma.tx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.tx_addr, GFP_KERNEL);
++ if (!flash->dma.tx_virt) {
++ dev_err(dev, "cannot allocate DMA TX buffer\n");
++ return -ENOMEM;
++ }
++
++ flash->dma.rx_virt = dmam_alloc_coherent(dev, PAGE_SIZE, &flash->dma.rx_addr, GFP_KERNEL);
++ if (!flash->dma.rx_virt) {
++ dev_err(dev, "cannot allocate DMA RX buffer\n");
++ return -ENOMEM;
++ }
++ }
++
++ flash->mdev.parent = dev;
++ flash->mdev.minor = MISC_DYNAMIC_MINOR;
++ flash->mdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s-flash%d", DEVICE_NAME, espi->dev_id);
++ flash->mdev.fops = &ast2700_espi_flash_fops;
++ rc = misc_register(&flash->mdev);
++ if (rc) {
++ dev_err(dev, "cannot register device %s\n", flash->mdev.name);
++ return rc;
++ }
++
++ ast2700_espi_flash_reset(espi);
++
++ return 0;
++}
++
++static int ast2700_espi_flash_remove(struct ast2700_espi *espi)
++{
++ struct ast2700_espi_flash *flash;
++ struct device *dev;
++ uint32_t reg;
++
++ dev = espi->dev;
++
++ flash = &espi->flash;
++
++ writel(0x0, espi->regs + ESPI_CH3_INT_EN);
++
++ reg = readl(espi->regs + ESPI_CH3_CTRL);
++ reg &= ~(ESPI_CH3_CTRL_TX_DMA_EN
++ | ESPI_CH3_CTRL_RX_DMA_EN
++ | ESPI_CH3_CTRL_SW_RDY);
++ writel(reg, espi->regs + ESPI_CH3_CTRL);
++
++ if (flash->dma.enable) {
++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.tx_virt, flash->dma.tx_addr);
++ dmam_free_coherent(dev, PAGE_SIZE, flash->dma.rx_virt, flash->dma.rx_addr);
++ }
++
++ mutex_destroy(&flash->tx_mtx);
++ mutex_destroy(&flash->rx_mtx);
++
++ misc_deregister(&flash->mdev);
++
++ return 0;
++}
++
++/* global control */
++static irqreturn_t ast2700_espi_isr(int irq, void *arg)
++{
++ uint32_t sts;
++ struct ast2700_espi *espi = (struct ast2700_espi *)arg;
++
++ sts = readl(espi->regs + ESPI_INT_STS);
++ if (!sts)
++ return IRQ_NONE;
++
++ if (sts & ESPI_INT_STS_CH0)
++ ast2700_espi_perif_isr(espi);
++
++ if (sts & ESPI_INT_STS_CH1)
++ ast2700_espi_vw_isr(espi);
++
++ if (sts & ESPI_INT_STS_CH2)
++ ast2700_espi_oob_isr(espi);
++
++ if (sts & ESPI_INT_STS_CH3)
++ ast2700_espi_flash_isr(espi);
++
++ if (sts & ESPI_INT_STS_RST_DEASSERT) {
++ ast2700_espi_perif_reset(espi);
++ ast2700_espi_vw_reset(espi);
++ ast2700_espi_oob_reset(espi);
++ ast2700_espi_flash_reset(espi);
++ writel(ESPI_INT_STS_RST_DEASSERT, espi->regs + ESPI_INT_STS);
++ }
++
++ return IRQ_HANDLED;
++}
++
++static int ast2700_espi_probe(struct platform_device *pdev)
++{
++ struct ast2700_espi *espi;
++ struct resource *res;
++ struct device *dev;
++ uint32_t reg;
++ int rc;
++
++ dev = &pdev->dev;
++
++ espi = devm_kzalloc(dev, sizeof(*espi), GFP_KERNEL);
++ if (!espi)
++ return -ENOMEM;
++
++ espi->dev = dev;
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_err(dev, "cannot set 64-bits DMA mask\n");
++ return rc;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "cannot get resource\n");
++ return -ENODEV;
++ }
++
++ espi->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(espi->regs)) {
++ dev_err(dev, "cannot map registers\n");
++ return PTR_ERR(espi->regs);
++ }
++
++ espi->irq = platform_get_irq(pdev, 0);
++ if (espi->irq < 0) {
++ dev_err(dev, "cannot get IRQ number\n");
++ return -ENODEV;
++ }
++
++ espi->clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(espi->clk)) {
++ dev_err(dev, "cannot get clock control\n");
++ return PTR_ERR(espi->clk);
++ }
++
++ rc = clk_prepare_enable(espi->clk);
++ if (rc) {
++ dev_err(dev, "cannot enable clocks\n");
++ return rc;
++ }
++
++ espi->dev_id = ida_alloc(&ast2700_espi_ida, GFP_KERNEL);
++ if (espi->dev_id < 0) {
++ dev_err(dev, "cannote allocate device ID\n");
++ return espi->dev_id;
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~ESPI_INT_EN_RST_DEASSERT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ rc = ast2700_espi_perif_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init CH0, rc=%d\n", rc);
++ return rc;
++ }
++
++ rc = ast2700_espi_vw_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init CH1, rc=%d\n", rc);
++ goto err_remove_perif;
++ }
++
++ rc = ast2700_espi_oob_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init CH2, rc=%d\n", rc);
++ goto err_remove_vw;
++ }
++
++ rc = ast2700_espi_flash_probe(espi);
++ if (rc) {
++ dev_err(dev, "cannot init CH3, rc=%d\n", rc);
++ goto err_remove_oob;
++ }
++
++ rc = devm_request_irq(dev, espi->irq, ast2700_espi_isr, 0, dev_name(dev), espi);
++ if (rc) {
++ dev_err(dev, "cannot request IRQ\n");
++ goto err_remove_flash;
++ }
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg |= ESPI_INT_EN_RST_DEASSERT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ dev_set_drvdata(dev, espi);
++
++ dev_info(dev, "module loaded\n");
++
++ return 0;
++
++err_remove_flash:
++ ast2700_espi_flash_remove(espi);
++err_remove_oob:
++ ast2700_espi_oob_remove(espi);
++err_remove_vw:
++ ast2700_espi_vw_remove(espi);
++err_remove_perif:
++ ast2700_espi_perif_remove(espi);
++
++ return rc;
++}
++
++static int ast2700_espi_remove(struct platform_device *pdev)
++{
++ struct ast2700_espi *espi;
++ struct device *dev;
++ uint32_t reg;
++ int rc;
++
++ dev = &pdev->dev;
++
++ espi = (struct ast2700_espi *)dev_get_drvdata(dev);
++
++ reg = readl(espi->regs + ESPI_INT_EN);
++ reg &= ~ESPI_INT_EN_RST_DEASSERT;
++ writel(reg, espi->regs + ESPI_INT_EN);
++
++ rc = ast2700_espi_perif_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2700_espi_vw_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2700_espi_oob_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ rc = ast2700_espi_flash_remove(espi);
++ if (rc)
++ dev_warn(dev, "cannot remove peripheral channel, rc=%d\n", rc);
++
++ return 0;
++}
++
++static const struct of_device_id ast2700_espi_of_matches[] = {
++ { .compatible = "aspeed,ast2700-espi" },
++ { },
++};
++
++static struct platform_driver ast2700_espi_driver = {
++ .driver = {
++ .name = "ast2700-espi",
++ .of_match_table = ast2700_espi_of_matches,
++ },
++ .probe = ast2700_espi_probe,
++ .remove = ast2700_espi_remove,
++};
++
++module_platform_driver(ast2700_espi_driver);
++
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
++MODULE_DESCRIPTION("Control of AST2700 eSPI Device");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/soc/aspeed/ast2700-espi.h b/drivers/soc/aspeed/ast2700-espi.h
+new file mode 100644
+index 000000000..cd1206b36
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2700-espi.h
+@@ -0,0 +1,275 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#ifndef _AST2700_ESPI_H_
++#define _AST2700_ESPI_H_
++
++#include <linux/bits.h>
++#include "aspeed-espi-comm.h"
++
++/* global registers */
++#define ESPI_CTRL 0x000
++#define ESPI_STS 0x004
++#define ESPI_INT_STS 0x008
++#define ESPI_INT_STS_RST_DEASSERT BIT(31)
++#define ESPI_INT_STS_RST_ASSERT BIT(30)
++#define ESPI_INT_STS_CH3 BIT(3)
++#define ESPI_INT_STS_CH2 BIT(2)
++#define ESPI_INT_STS_CH1 BIT(1)
++#define ESPI_INT_STS_CH0 BIT(0)
++#define ESPI_INT_EN 0x00c
++#define ESPI_INT_EN_RST_DEASSERT BIT(31)
++#define ESPI_INT_EN_RST_ASSERT BIT(30)
++#define ESPI_DEV_ID 0x010
++#define ESPI_CAP_GEN 0x014
++#define ESPI_CAP_CH0 0x018
++#define ESPI_CAP_CH1 0x01c
++#define ESPI_CAP_CH2 0x020
++#define ESPI_CAP_CH3_0 0x024
++#define ESPI_CAP_CH3_1 0x028
++#define ESPI_DEV_STS 0x030
++#define ESPI_DBG_CTRL 0x034
++#define ESPI_DBG_ADDRL 0x038
++#define ESPI_DBG_ADDRH 0x03c
++#define ESPI_DBG_CMD 0x040
++#define ESPI_DBG_RES 0x044
++#define ESPI_CH_ACC_CTRL 0x04c
++#define ESPI_CH_ACC_OFST1 0x050
++#define ESPI_CH_ACC_OFST2 0x054
++#define ESPI_WPROT0 0x0f8
++#define ESPI_WPROT1 0x0fc
++
++/* peripheral channel (ch0) registers */
++#define ESPI_CH0_CTRL 0x100
++#define ESPI_CH0_CTRL_NP_TX_RST BIT(31)
++#define ESPI_CH0_CTRL_NP_RX_RST BIT(30)
++#define ESPI_CH0_CTRL_PC_TX_RST BIT(29)
++#define ESPI_CH0_CTRL_PC_RX_RST BIT(28)
++#define ESPI_CH0_CTRL_NP_TX_DMA_EN BIT(19)
++#define ESPI_CH0_CTRL_PC_TX_DMA_EN BIT(17)
++#define ESPI_CH0_CTRL_PC_RX_DMA_EN BIT(16)
++#define ESPI_CH0_CTRL_MCYC_RD_DIS_WDT BIT(9)
++#define ESPI_CH0_CTRL_MCYC_WR_DIS_WDT BIT(8)
++#define ESPI_CH0_CTRL_MCYC_RD_DIS BIT(6)
++#define ESPI_CH0_CTRL_MCYC_WR_DIS BIT(4)
++#define ESPI_CH0_CTRL_SW_RDY BIT(1)
++#define ESPI_CH0_STS 0x104
++#define ESPI_CH0_INT_STS 0x108
++#define ESPI_CH0_INT_STS_PC_RX_CMPLT BIT(0)
++#define ESPI_CH0_INT_EN 0x10c
++#define ESPI_CH0_INT_EN_PC_RX_CMPLT BIT(0)
++#define ESPI_CH0_PC_RX_DMAL 0x110
++#define ESPI_CH0_PC_RX_DMAH 0x114
++#define ESPI_CH0_PC_RX_CTRL 0x118
++#define ESPI_CH0_PC_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_CH0_PC_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH0_PC_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH0_PC_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH0_PC_RX_DATA 0x11c
++#define ESPI_CH0_PC_TX_DMAL 0x120
++#define ESPI_CH0_PC_TX_DMAH 0x124
++#define ESPI_CH0_PC_TX_CTRL 0x128
++#define ESPI_CH0_PC_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_CH0_PC_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH0_PC_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH0_PC_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH0_PC_TX_DATA 0x12c
++#define ESPI_CH0_NP_TX_DMAL 0x130
++#define ESPI_CH0_NP_TX_DMAH 0x134
++#define ESPI_CH0_NP_TX_CTRL 0x138
++#define ESPI_CH0_NP_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_CH0_NP_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH0_NP_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH0_NP_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH0_NP_TX_DATA 0x13c
++#define ESPI_CH0_MCYC0_SADDRL 0x140
++#define ESPI_CH0_MCYC0_SADDRH 0x144
++#define ESPI_CH0_MCYC0_TADDRL 0x148
++#define ESPI_CH0_MCYC0_TADDRH 0x14c
++#define ESPI_CH0_MCYC0_MASKL 0x150
++#define ESPI_CH0_MCYC0_MASKL_EN BIT(0)
++#define ESPI_CH0_MCYC0_MASKH 0x154
++#define ESPI_CH0_MCYC1_SADDRL 0x158
++#define ESPI_CH0_MCYC1_SADDRH 0x15c
++#define ESPI_CH0_MCYC1_TADDRL 0x160
++#define ESPI_CH0_MCYC1_TADDRH 0x164
++#define ESPI_CH0_MCYC1_MASKL 0x168
++#define ESPI_CH0_MCYC1_MASKL_EN BIT(0)
++#define ESPI_CH0_MCYC1_MASKH 0x16c
++#define ESPI_CH0_WPROT0 0x1f8
++#define ESPI_CH0_WPROT1 0x1fc
++
++/* virtual wire channel (ch1) registers */
++#define ESPI_CH1_CTRL 0x200
++#define ESPI_CH1_CTRL_GPIO_HW BIT(9)
++#define ESPI_CH1_CTRL_SW_RDY BIT(1)
++#define ESPI_CH1_STS 0x204
++#define ESPI_CH1_INT_STS 0x208
++#define ESPI_CH1_INT_STS_GPIO BIT(2)
++#define ESPI_CH1_INT_EN 0x20c
++#define ESPI_CH1_INT_EN_GPIO BIT(2)
++#define ESPI_CH1_EVT0 0x210
++#define ESPI_CH1_EVT0_INT_EN 0x214
++#define ESPI_CH1_EVT0_INT_T0 0x218
++#define ESPI_CH1_EVT0_INT_T1 0x21c
++#define ESPI_CH1_EVT0_INT_T2 0x220
++#define ESPI_CH1_EVT0_INT_STS 0x224
++#define ESPI_CH1_EVT1 0x230
++#define ESPI_CH1_EVT1_INT_EN 0x234
++#define ESPI_CH1_EVT1_INT_T0 0x238
++#define ESPI_CH1_EVT1_INT_T1 0x23c
++#define ESPI_CH1_EVT1_INT_T2 0x240
++#define ESPI_CH1_EVT1_INT_STS 0x244
++#define ESPI_CH1_GPIO_VAL0 0x250
++#define ESPI_CH1_GPIO_VAL1 0x254
++#define ESPI_CH1_GPIO_DIR0 0x258
++#define ESPI_CH1_GPIO_DIR1 0x258
++#define ESPI_CH1_GPIO_RSTSEL0 0x260
++#define ESPI_CH1_GPIO_RSTSEL1 0x264
++#define ESPI_CH1_GPIO_GRP 0x268
++#define ESPI_CH1_GP50_DIR0 0x270
++#define ESPI_CH1_GP50_DIR1 0x274
++#define ESPI_CH1_GP50_VAL0 0x278
++#define ESPI_CH1_GP50_VAL1 0x27c
++#define ESPI_CH1_SW_INT 0x280
++#define ESPI_CH1_INT_RSTSEL0 0x284
++#define ESPI_CH1_INT_RSTSEL1 0x288
++#define ESPI_CH1_WPROT0 0x2f8
++#define ESPI_CH1_WPROT1 0x2fc
++
++/* out-of-band channel (ch2) registers */
++#define ESPI_CH2_CTRL 0x300
++#define ESPI_CH2_CTRL_TX_RST BIT(31)
++#define ESPI_CH2_CTRL_RX_RST BIT(30)
++#define ESPI_CH2_CTRL_TX_DMA_EN BIT(17)
++#define ESPI_CH2_CTRL_RX_DMA_EN BIT(16)
++#define ESPI_CH2_CTRL_SW_RDY BIT(4)
++#define ESPI_CH2_STS 0x304
++#define ESPI_CH2_INT_STS 0x308
++#define ESPI_CH2_INT_STS_RX_CMPLT BIT(0)
++#define ESPI_CH2_INT_EN 0x30c
++#define ESPI_CH2_INT_EN_RX_CMPLT BIT(0)
++#define ESPI_CH2_RX_DMAL 0x310
++#define ESPI_CH2_RX_DMAH 0x314
++#define ESPI_CH2_RX_CTRL 0x318
++#define ESPI_CH2_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_CH2_RX_CTRL_PEC BIT(24)
++#define ESPI_CH2_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH2_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH2_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH2_RX_DATA 0x31c
++#define ESPI_CH2_TX_DMAL 0x320
++#define ESPI_CH2_TX_DMAH 0x324
++#define ESPI_CH2_TX_CTRL 0x328
++#define ESPI_CH2_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_CH2_TX_CTRL_PEC BIT(24)
++#define ESPI_CH2_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH2_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH2_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH2_TX_DATA 0x32c
++#define ESPI_CH2_RX_DESC_EPTR 0x330
++#define ESPI_CH2_RX_DESC_RPTR 0x334
++#define ESPI_CH2_RX_DESC_RPTR_UPDATE BIT(31)
++#define ESPI_CH2_RX_DESC_RPTR_RP GENMASK(11, 0)
++#define ESPI_CH2_RX_DESC_WPTR 0x338
++#define ESPI_CH2_RX_DESC_WPTR_VALID BIT(31)
++#define ESPI_CH2_RX_DESC_WPTR_SP GENMASK(27, 16)
++#define ESPI_CH2_RX_DESC_WPTR_WP GENMASK(11, 0)
++#define ESPI_CH2_RX_DESC_TMOUT 0x33c
++#define ESPI_CH2_TX_DESC_EPTR 0x340
++#define ESPI_CH2_TX_DESC_RPTR 0x344
++#define ESPI_CH2_TX_DESC_RPTR_UPT BIT(31)
++#define ESPI_CH2_TX_DESC_WPTR 0x348
++#define ESPI_CH2_TX_DESC_WPTR_VALID BIT(31)
++#define ESPI_CH2_WPROT0 0x3f8
++#define ESPI_CH2_WPROT1 0x3fc
++
++/* flash channel (ch3) registers */
++#define ESPI_CH3_CTRL 0x400
++#define ESPI_CH3_CTRL_TX_RST BIT(31)
++#define ESPI_CH3_CTRL_RX_RST BIT(30)
++#define ESPI_CH3_CTRL_TX_DMA_EN BIT(17)
++#define ESPI_CH3_CTRL_RX_DMA_EN BIT(16)
++#define ESPI_CH3_CTRL_EDAF_MODE GENMASK(9, 8)
++#define ESPI_CH3_CTRL_SW_RDY BIT(5)
++#define ESPI_CH3_STS 0x404
++#define ESPI_CH3_INT_STS 0x408
++#define ESPI_CH3_INT_STS_RX_CMPLT BIT(0)
++#define ESPI_CH3_INT_EN 0x40c
++#define ESPI_CH3_INT_EN_RX_CMPLT BIT(0)
++#define ESPI_CH3_RX_DMAL 0x410
++#define ESPI_CH3_RX_DMAH 0x414
++#define ESPI_CH3_RX_CTRL 0x418
++#define ESPI_CH3_RX_CTRL_SERV_PEND BIT(31)
++#define ESPI_CH3_RX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH3_RX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH3_RX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH3_RX_DATA 0x41c
++#define ESPI_CH3_TX_DMAL 0x420
++#define ESPI_CH3_TX_DMAH 0x424
++#define ESPI_CH3_TX_CTRL 0x428
++#define ESPI_CH3_TX_CTRL_TRIG_PEND BIT(31)
++#define ESPI_CH3_TX_CTRL_LEN GENMASK(23, 12)
++#define ESPI_CH3_TX_CTRL_TAG GENMASK(11, 8)
++#define ESPI_CH3_TX_CTRL_CYC GENMASK(7, 0)
++#define ESPI_CH3_TX_DATA 0x42c
++#define ESPI_CH3_EDAF_TADDRL 0x430
++#define ESPI_CH3_EDAF_TADDRH 0x434
++#define ESPI_CH3_EDAF_MASKL 0x438
++#define ESPI_CH3_EDAF_MASKH 0x43c
++#define ESPI_CH3_WPROT0 0x4f8
++#define ESPI_CH3_WPROT1 0x4fc
++
++/* eDAF filter registers */
++#define ESPI_EDAF_FLTR_SADDR0 0x510
++#define ESPI_EDAF_FLTR_EADDR0 0x514
++#define ESPI_EDAF_FLTR_SADDR1 0x518
++#define ESPI_EDAF_FLTR_EADDR1 0x51c
++#define ESPI_EDAF_FLTR_SADDR2 0x520
++#define ESPI_EDAF_FLTR_EADDR2 0x524
++#define ESPI_EDAF_FLTR_SADDR3 0x528
++#define ESPI_EDAF_FLTR_EADDR3 0x52c
++#define ESPI_EDAF_FLTR_SADDR4 0x530
++#define ESPI_EDAF_FLTR_EADDR4 0x534
++#define ESPI_EDAF_FLTR_SADDR5 0x538
++#define ESPI_EDAF_FLTR_EADDR5 0x53c
++#define ESPI_EDAF_FLTR_SADDR6 0x540
++#define ESPI_EDAF_FLTR_EADDR6 0x544
++#define ESPI_EDAF_FLTR_SADDR7 0x548
++#define ESPI_EDAF_FLTR_EADDR7 0x54c
++#define ESPI_EDAF_FLTR_SADDR8 0x550
++#define ESPI_EDAF_FLTR_EADDR8 0x554
++#define ESPI_EDAF_FLTR_SADDR9 0x558
++#define ESPI_EDAF_FLTR_EADDR9 0x55c
++#define ESPI_EDAF_FLTR_SADDR10 0x560
++#define ESPI_EDAF_FLTR_EADDR10 0x564
++#define ESPI_EDAF_FLTR_SADDR11 0x568
++#define ESPI_EDAF_FLTR_EADDR11 0x56c
++#define ESPI_EDAF_FLTR_SADDR12 0x570
++#define ESPI_EDAF_FLTR_EADDR12 0x574
++#define ESPI_EDAF_FLTR_SADDR13 0x578
++#define ESPI_EDAF_FLTR_EADDR13 0x57c
++#define ESPI_EDAF_FLTR_SADDR14 0x580
++#define ESPI_EDAF_FLTR_EADDR14 0x584
++#define ESPI_EDAF_FLTR_SADDR15 0x588
++#define ESPI_EDAF_FLTR_EADDR15 0x58c
++#define ESPI_EDAF_WPROT0 0x5f8
++#define ESPI_EDAF_WPROT1 0x5fc
++
++/* MMBI registers */
++#define ESPI_MMBI_CTRL 0x800
++#define ESPI_MMBI_CTRL_INST_NUM GENMASK(6, 4)
++#define ESPI_MMBI_CTRL_EN BIT(0)
++#define ESPI_MMBI_INT_STS 0x808
++#define ESPI_MMBI_INT_EN 0x80c
++#define ESPI_MMBI_HOST_RWP(x) (0x810 + ((x) << 3))
++
++enum ast2700_edaf_mode {
++ EDAF_MODE_MIX,
++ EDAF_MODE_SW,
++ EDAF_MODE_HW,
++ EDAF_MODES,
++};
++
++#endif
+diff --git a/drivers/soc/aspeed/ast2700-otp.c b/drivers/soc/aspeed/ast2700-otp.c
+new file mode 100644
+index 000000000..7b50d89e8
+--- /dev/null
++++ b/drivers/soc/aspeed/ast2700-otp.c
+@@ -0,0 +1,567 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2024 Aspeed Technology Inc.
++ */
++
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/platform_device.h>
++#include <linux/regmap.h>
++#include <linux/spinlock.h>
++#include <linux/uaccess.h>
++#include <linux/mfd/syscon.h>
++#include <linux/of.h>
++#include <linux/module.h>
++#include <asm/io.h>
++#include <uapi/linux/aspeed-otp.h>
++
++static DEFINE_SPINLOCK(otp_state_lock);
++
++/***********************
++ * *
++ * OTP regs definition *
++ * *
++ ***********************/
++#define OTP_REG_SIZE 0x200
++
++#define OTP_PASSWD 0x349fe38a
++#define OTP_CMD_READ 0x23b1e361
++#define OTP_CMD_PROG 0x23b1e364
++#define OTP_CMD_PROG_MULTI 0x23b1e365
++#define OTP_CMD_CMP 0x23b1e363
++#define OTP_CMD_BIST 0x23b1e368
++
++#define OTP_CMD_OFFSET 0x20
++#define OTP_MASTER OTP_M0
++
++#define OTP_KEY 0x0
++#define OTP_CMD (OTP_MASTER * OTP_CMD_OFFSET + 0x4)
++#define OTP_WDATA_0 (OTP_MASTER * OTP_CMD_OFFSET + 0x8)
++#define OTP_WDATA_1 (OTP_MASTER * OTP_CMD_OFFSET + 0xc)
++#define OTP_WDATA_2 (OTP_MASTER * OTP_CMD_OFFSET + 0x10)
++#define OTP_WDATA_3 (OTP_MASTER * OTP_CMD_OFFSET + 0x14)
++#define OTP_STATUS (OTP_MASTER * OTP_CMD_OFFSET + 0x18)
++#define OTP_ADDR (OTP_MASTER * OTP_CMD_OFFSET + 0x1c)
++#define OTP_RDATA (OTP_MASTER * OTP_CMD_OFFSET + 0x20)
++
++#define OTP_DBG00 0x0C4
++#define OTP_DBG01 0x0C8
++#define OTP_MASTER_PID 0x0D0
++#define OTP_ECC_EN 0x0D4
++#define OTP_CMD_LOCK 0x0D8
++#define OTP_SW_RST 0x0DC
++#define OTP_SLV_ID 0x0E0
++#define OTP_PMC_CQ 0x0E4
++#define OTP_FPGA 0x0EC
++#define OTP_CLR_FPGA 0x0F0
++#define OTP_REGION_ROM_PATCH 0x100
++#define OTP_REGION_OTPCFG 0x104
++#define OTP_REGION_OTPSTRAP 0x108
++#define OTP_REGION_OTPSTRAP_EXT 0x10C
++#define OTP_REGION_SECURE0 0x120
++#define OTP_REGION_SECURE0_RANGE 0x124
++#define OTP_REGION_SECURE1 0x128
++#define OTP_REGION_SECURE1_RANGE 0x12C
++#define OTP_REGION_SECURE2 0x130
++#define OTP_REGION_SECURE2_RANGE 0x134
++#define OTP_REGION_SECURE3 0x138
++#define OTP_REGION_SECURE3_RANGE 0x13C
++#define OTP_REGION_USR0 0x140
++#define OTP_REGION_USR0_RANGE 0x144
++#define OTP_REGION_USR1 0x148
++#define OTP_REGION_USR1_RANGE 0x14C
++#define OTP_REGION_USR2 0x150
++#define OTP_REGION_USR2_RANGE 0x154
++#define OTP_REGION_USR3 0x158
++#define OTP_REGION_USR3_RANGE 0x15C
++#define OTP_REGION_CALIPTRA_0 0x160
++#define OTP_REGION_CALIPTRA_0_RANGE 0x164
++#define OTP_REGION_CALIPTRA_1 0x168
++#define OTP_REGION_CALIPTRA_1_RANGE 0x16C
++#define OTP_REGION_CALIPTRA_2 0x170
++#define OTP_REGION_CALIPTRA_2_RANGE 0x174
++#define OTP_REGION_CALIPTRA_3 0x178
++#define OTP_REGION_CALIPTRA_3_RANGE 0x17C
++#define OTP_RBP_SOC_SVN 0x180
++#define OTP_RBP_SOC_KEYRETIRE 0x184
++#define OTP_RBP_CALIP_SVN 0x188
++#define OTP_RBP_CALIP_KEYRETIRE 0x18C
++#define OTP_PUF 0x1A0
++#define OTP_MASTER_ID 0x1B0
++#define OTP_MASTER_ID_EXT 0x1B4
++#define OTP_R_MASTER_ID 0x1B8
++#define OTP_R_MASTER_ID_EXT 0x1BC
++#define OTP_SOC_ECCKEY 0x1C0
++#define OTP_SEC_BOOT_EN 0x1C4
++#define OTP_SOC_KEY 0x1C8
++#define OTP_CALPITRA_MANU_KEY 0x1CC
++#define OTP_CALPITRA_OWNER_KEY 0x1D0
++#define OTP_FW_ID_LSB 0x1D4
++#define OTP_FW_ID_MSB 0x1D8
++#define OTP_CALIP_FMC_SVN 0x1DC
++#define OTP_CALIP_RUNTIME_SVN0 0x1E0
++#define OTP_CALIP_RUNTIME_SVN1 0x1E4
++#define OTP_CALIP_RUNTIME_SVN2 0x1E8
++#define OTP_CALIP_RUNTIME_SVN3 0x1EC
++#define OTP_SVN_WLOCK 0x1F0
++#define OTP_INTR_EN 0x200
++#define OTP_INTR_STS 0x204
++#define OTP_INTR_MID 0x208
++#define OTP_INTR_FUNC_INFO 0x20C
++#define OTP_INTR_M_INFO 0x210
++#define OTP_INTR_R_INFO 0x214
++
++#define OTP_PMC 0x400
++#define OTP_DAP 0x500
++
++/* OTP status: [0] */
++#define OTP_STS_IDLE 0x0
++#define OTP_STS_BUSY 0x1
++
++/* OTP cmd status: [7:4] */
++#define OTP_GET_CMD_STS(x) (((x) & 0xF0) >> 4)
++#define OTP_STS_PASS 0x0
++#define OTP_STS_FAIL 0x1
++#define OTP_STS_CMP_FAIL 0x2
++#define OTP_STS_REGION_FAIL 0x3
++#define OTP_STS_MASTER_FAIL 0x4
++
++/* OTP ECC EN */
++#define ECC_ENABLE 0x1
++#define ECC_DISABLE 0x0
++#define ECCBRP_EN BIT(0)
++
++#define ROM_REGION_START_ADDR 0x0
++#define ROM_REGION_END_ADDR 0x3e0
++#define RBP_REGION_START_ADDR ROM_REGION_END_ADDR
++#define RBP_REGION_END_ADDR 0x400
++#define CONF_REGION_START_ADDR RBP_REGION_END_ADDR
++#define CONF_REGION_END_ADDR 0x420
++#define STRAP_REGION_START_ADDR CONF_REGION_END_ADDR
++#define STRAP_REGION_END_ADDR 0x430
++#define STRAPEXT_REGION_START_ADDR STRAP_REGION_END_ADDR
++#define STRAPEXT_REGION_END_ADDR 0x440
++#define USER_REGION_START_ADDR STRAPEXT_REGION_END_ADDR
++#define USER_REGION_END_ADDR 0x1000
++#define SEC_REGION_START_ADDR USER_REGION_END_ADDR
++#define SEC_REGION_END_ADDR 0x1c00
++#define CAL_REGION_START_ADDR SEC_REGION_END_ADDR
++#define CAL_REGION_END_ADDR 0x1f80
++#define SW_PUF_REGION_START_ADDR CAL_REGION_END_ADDR
++#define SW_PUF_REGION_END_ADDR 0x1fc0
++#define HW_PUF_REGION_START_ADDR SW_PUF_REGION_END_ADDR
++#define HW_PUF_REGION_END_ADDR 0x2000
++
++#define OTP_MEMORY_SIZE (HW_PUF_REGION_END_ADDR * 2)
++
++#define OTP_TIMEOUT_US 10000
++
++/* OTPSTRAP */
++#define OTPSTRAP0_ADDR STRAP_REGION_START_ADDR
++#define OTPSTRAP14_ADDR (OTPSTRAP0_ADDR + 0xe)
++
++#define OTPTOOL_VERSION(a, b, c) (((a) << 24) + ((b) << 12) + (c))
++#define OTPTOOL_VERSION_MAJOR(x) (((x) >> 24) & 0xff)
++#define OTPTOOL_VERSION_PATCHLEVEL(x) (((x) >> 12) & 0xfff)
++#define OTPTOOL_VERSION_SUBLEVEL(x) ((x) & 0xfff)
++#define OTPTOOL_COMPT_VERSION 2
++
++enum otp_error_code {
++ OTP_SUCCESS,
++ OTP_READ_FAIL,
++ OTP_PROG_FAIL,
++ OTP_CMP_FAIL,
++};
++
++enum aspeed_otp_master_id {
++ OTP_M0 = 0,
++ OTP_M1,
++ OTP_M2,
++ OTP_M3,
++ OTP_M4,
++ OTP_M5,
++ OTP_MID_MAX,
++};
++
++struct aspeed_otp {
++ struct miscdevice miscdev;
++ struct device *dev;
++ void __iomem *base;
++ u32 chip_revid0;
++ u32 chip_revid1;
++ bool is_open;
++ int gbl_ecc_en;
++ u8 *data;
++};
++
++enum otp_ioctl_cmds {
++ GET_ECC_STATUS = 1,
++ SET_ECC_ENABLE,
++};
++
++enum otp_ecc_codes {
++ OTP_ECC_MISMATCH = -1,
++ OTP_ECC_DISABLE = 0,
++ OTP_ECC_ENABLE = 1,
++};
++
++static void otp_unlock(struct device *dev)
++{
++ struct aspeed_otp *ctx = dev_get_drvdata(dev);
++
++ writel(OTP_PASSWD, ctx->base + OTP_KEY);
++}
++
++static void otp_lock(struct device *dev)
++{
++ struct aspeed_otp *ctx = dev_get_drvdata(dev);
++
++ writel(0x1, ctx->base + OTP_KEY);
++}
++
++static int wait_complete(struct device *dev)
++{
++ struct aspeed_otp *ctx = dev_get_drvdata(dev);
++ int ret;
++ u32 val;
++
++ ret = readl_poll_timeout(ctx->base + OTP_STATUS, val, (val == 0x0),
++ 1, OTP_TIMEOUT_US);
++ if (ret)
++ dev_warn(dev, "timeout. sts:0x%x\n", val);
++
++ return ret;
++}
++
++static int otp_read_data(struct aspeed_otp *ctx, u32 offset, u16 *data)
++{
++ struct device *dev = ctx->dev;
++ int ret;
++
++ writel(ctx->gbl_ecc_en, ctx->base + OTP_ECC_EN);
++ writel(offset, ctx->base + OTP_ADDR);
++ writel(OTP_CMD_READ, ctx->base + OTP_CMD);
++ ret = wait_complete(dev);
++ if (ret)
++ return OTP_READ_FAIL;
++
++ data[0] = readl(ctx->base + OTP_RDATA);
++
++ return 0;
++}
++
++int otp_prog_data(struct aspeed_otp *ctx, u32 offset, u16 data)
++{
++ struct device *dev = ctx->dev;
++ int ret;
++
++ writel(ctx->gbl_ecc_en, ctx->base + OTP_ECC_EN);
++ writel(offset, ctx->base + OTP_ADDR);
++ writel(data, ctx->base + OTP_WDATA_0);
++ writel(OTP_CMD_PROG, ctx->base + OTP_CMD);
++ ret = wait_complete(dev);
++ if (ret)
++ return OTP_PROG_FAIL;
++
++ return 0;
++}
++
++int otp_prog_multi_data(struct aspeed_otp *ctx, u32 offset, u32 *data, int count)
++{
++ struct device *dev = ctx->dev;
++ int ret;
++
++ writel(ctx->gbl_ecc_en, ctx->base + OTP_ECC_EN);
++ writel(offset, ctx->base + OTP_ADDR);
++ for (int i = 0; i < count; i++)
++ writel(data[i], ctx->base + OTP_WDATA_0 + 4 * i);
++
++ writel(OTP_CMD_PROG_MULTI, ctx->base + OTP_CMD);
++ ret = wait_complete(dev);
++ if (ret)
++ return OTP_PROG_FAIL;
++
++ return 0;
++}
++
++static int aspeed_otp_read(struct aspeed_otp *ctx, int offset,
++ void *buf, int size)
++{
++ struct device *dev = ctx->dev;
++ u16 *data = buf;
++ int ret;
++
++ otp_unlock(dev);
++ for (int i = 0; i < size; i++) {
++ ret = otp_read_data(ctx, offset + i, data + i);
++ if (ret) {
++ dev_warn(ctx->dev, "read failed\n");
++ break;
++ }
++ }
++
++ otp_lock(dev);
++ return ret;
++}
++
++static int aspeed_otp_write(struct aspeed_otp *ctx, int offset,
++ const void *buf, int size)
++{
++ struct device *dev = ctx->dev;
++ u32 *data32 = (u32 *)buf;
++ u16 *data = (u16 *)buf;
++ int ret;
++
++ otp_unlock(dev);
++
++ if (size == 1)
++ ret = otp_prog_data(ctx, offset, data[0]);
++ else
++ ret = otp_prog_multi_data(ctx, offset, data32, size / 2);
++
++ if (ret)
++ dev_warn(ctx->dev, "prog failed\n");
++
++ otp_lock(dev);
++ return ret;
++}
++
++static int aspeed_otp_ecc_en(struct aspeed_otp *ctx)
++{
++ struct device *dev = ctx->dev;
++ int ret = 0;
++
++ /* Check ecc is already enabled */
++ if (ctx->gbl_ecc_en == 1)
++ return 0;
++
++ otp_unlock(dev);
++
++ /* enable cfg ecc */
++ ret = otp_prog_data(ctx, OTPSTRAP14_ADDR, 0x1);
++ if (ret) {
++ dev_warn(dev, "%s: prog failed\n", __func__);
++ goto end;
++ }
++
++ ctx->gbl_ecc_en = 1;
++end:
++ otp_lock(dev);
++
++ return ret;
++}
++
++static long aspeed_otp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct miscdevice *c = file->private_data;
++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev);
++ void __user *argp = (void __user *)arg;
++ struct otp_revid revid;
++ struct otp_read rdata;
++ struct otp_prog pdata;
++ int ret = 0;
++
++ switch (cmd) {
++ case ASPEED_OTP_READ_DATA:
++ if (copy_from_user(&rdata, argp, sizeof(struct otp_read)))
++ return -EFAULT;
++
++ ret = aspeed_otp_read(ctx, rdata.offset, ctx->data, rdata.len);
++ if (ret)
++ return -EFAULT;
++
++ if (copy_to_user(rdata.data, ctx->data, rdata.len * 2))
++ return -EFAULT;
++
++ break;
++
++ case ASPEED_OTP_PROG_DATA:
++ if (copy_from_user(&pdata, argp, sizeof(struct otp_prog)))
++ return -EFAULT;
++
++ ret = aspeed_otp_write(ctx, pdata.w_offset, pdata.data, pdata.len);
++ break;
++
++ case ASPEED_OTP_GET_ECC:
++ if (copy_to_user(argp, &ctx->gbl_ecc_en, sizeof(ctx->gbl_ecc_en)))
++ return -EFAULT;
++ break;
++
++ case ASPEED_OTP_SET_ECC:
++ ret = aspeed_otp_ecc_en(ctx);
++ break;
++
++ case ASPEED_OTP_GET_REVID:
++ revid.revid0 = ctx->chip_revid0;
++ revid.revid1 = ctx->chip_revid1;
++ if (copy_to_user(argp, &revid, sizeof(struct otp_revid)))
++ return -EFAULT;
++ break;
++ default:
++ dev_warn(ctx->dev, "cmd 0x%x is not supported\n", cmd);
++ break;
++ }
++
++ return ret;
++}
++
++static int aspeed_otp_ecc_init(struct device *dev)
++{
++ struct aspeed_otp *ctx = dev_get_drvdata(dev);
++ int ret;
++ u32 val;
++
++ otp_unlock(dev);
++
++ /* Check cfg_ecc_en */
++ writel(0, ctx->base + OTP_ECC_EN);
++ writel(OTPSTRAP14_ADDR, ctx->base + OTP_ADDR);
++ writel(OTP_CMD_READ, ctx->base + OTP_CMD);
++ ret = wait_complete(dev);
++ if (ret)
++ return OTP_READ_FAIL;
++
++ val = readl(ctx->base + OTP_RDATA);
++ if (val & 0x1)
++ ctx->gbl_ecc_en = 0x1;
++ else
++ ctx->gbl_ecc_en = 0x0;
++
++ otp_lock(dev);
++
++ return 0;
++}
++
++static int aspeed_otp_open(struct inode *inode, struct file *file)
++{
++ struct miscdevice *c = file->private_data;
++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev);
++
++ spin_lock(&otp_state_lock);
++
++ if (ctx->is_open) {
++ spin_unlock(&otp_state_lock);
++ return -EBUSY;
++ }
++
++ ctx->is_open = true;
++
++ spin_unlock(&otp_state_lock);
++
++ return 0;
++}
++
++static int aspeed_otp_release(struct inode *inode, struct file *file)
++{
++ struct miscdevice *c = file->private_data;
++ struct aspeed_otp *ctx = container_of(c, struct aspeed_otp, miscdev);
++
++ spin_lock(&otp_state_lock);
++
++ ctx->is_open = false;
++
++ spin_unlock(&otp_state_lock);
++
++ return 0;
++}
++
++static const struct file_operations otp_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = aspeed_otp_ioctl,
++ .open = aspeed_otp_open,
++ .release = aspeed_otp_release,
++};
++
++static const struct of_device_id aspeed_otp_of_matches[] = {
++ { .compatible = "aspeed,ast2700-otp" },
++ { }
++};
++MODULE_DEVICE_TABLE(of, aspeed_otp_of_matches);
++
++static int aspeed_otp_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct regmap *scu0, *scu1;
++ struct aspeed_otp *priv;
++ struct resource *res;
++ int rc;
++
++ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(&pdev->dev, "cannot get IORESOURCE_MEM\n");
++ return -ENOENT;
++ }
++
++ priv->base = devm_ioremap_resource(&pdev->dev, res);
++ if (!priv->base)
++ return -EIO;
++
++ scu0 = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu0");
++ scu1 = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu1");
++ if (IS_ERR(scu0) || IS_ERR(scu1)) {
++ dev_err(dev, "failed to find SCU regmap\n");
++ return PTR_ERR(scu0) || PTR_ERR(scu1);
++ }
++
++ regmap_read(scu0, 0x0, &priv->chip_revid0);
++ regmap_read(scu1, 0x0, &priv->chip_revid1);
++
++ priv->dev = dev;
++ dev_set_drvdata(dev, priv);
++
++ /* OTP ECC init */
++ rc = aspeed_otp_ecc_init(dev);
++ if (rc)
++ return -EIO;
++
++ priv->data = kmalloc(OTP_MEMORY_SIZE, GFP_KERNEL);
++ if (!priv->data)
++ return -ENOMEM;
++
++ /* Set up the miscdevice */
++ priv->miscdev.minor = MISC_DYNAMIC_MINOR;
++ priv->miscdev.name = "aspeed-otp";
++ priv->miscdev.fops = &otp_fops;
++
++ /* Register the device */
++ rc = misc_register(&priv->miscdev);
++ if (rc) {
++ dev_err(dev, "Unable to register device\n");
++ return rc;
++ }
++
++ dev_info(dev, "Aspeed OTP driver successfully registered\n");
++
++ return 0;
++}
++
++static int aspeed_otp_remove(struct platform_device *pdev)
++{
++ struct aspeed_otp *ctx = dev_get_drvdata(&pdev->dev);
++
++ kfree(ctx->data);
++ misc_deregister(&ctx->miscdev);
++
++ return 0;
++}
++
++static struct platform_driver aspeed_otp_driver = {
++ .probe = aspeed_otp_probe,
++ .remove = aspeed_otp_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_otp_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_otp_driver);
++
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("ASPEED OTP Driver");
+diff --git a/drivers/soc/aspeed/rvas/Kconfig b/drivers/soc/aspeed/rvas/Kconfig
+new file mode 100644
+index 000000000..ef02e1b76
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/Kconfig
+@@ -0,0 +1,9 @@
++menu "ASPEED RVAS drivers"
++
++config ASPEED_RVAS
++ tristate "ASPEED RVAS driver"
++ default n
++ help
++ Driver for ASPEED RVAS Engine
++
++endmenu
+diff --git a/drivers/soc/aspeed/rvas/Makefile b/drivers/soc/aspeed/rvas/Makefile
+new file mode 100644
+index 000000000..1cccd7e37
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/Makefile
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_ASPEED_RVAS) += rvas.o
++rvas-y := video_main.o hardware_engines.o video_engine.o
++
+diff --git a/drivers/soc/aspeed/rvas/hardware_engines.c b/drivers/soc/aspeed/rvas/hardware_engines.c
+new file mode 100644
+index 000000000..25b644044
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/hardware_engines.c
+@@ -0,0 +1,2203 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * File Name : hardware_engines.c
++ * Description : AST2600 frame grabber hardware engines
++ *
++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/dma-mapping.h>
++#include <linux/mm.h>
++#include <asm/cacheflush.h>
++#include <linux/time.h>
++
++#include "hardware_engines.h"
++#include "video.h"
++#include "video_debug.h"
++
++static u32 dwBucketSizeRegOffset[BSE_MAX_BUCKET_SIZE_REGS] = { 0x20, 0x24, 0x28,
++ 0x2c, 0x30, 0x34, 0x38, 0x3c, 0x40, 0x44, 0x48, 0x4c, 0x50, 0x54, 0x58,
++ 0x5c };
++static u32 arrBuckSizeRegIndex[16] = { 3, 5, 8, 6, 1, 7, 11, 10, 14, 13, 2, 4,
++ 9, 12, 0, 15 };
++
++static struct Resolution resTable1[0x3B - 0x30 + 1] = { { 800, 600 }, { 1024, 768 }, {
++ 1280, 1024 }, { 1600, 1200 }, { 1920, 1200 }, { 1280, 800 },
++ { 1440, 900 }, { 1680, 1050 }, { 1920, 1080 }, { 1366, 768 }, { 1600,
++ 900 }, { 1152, 864 }, };
++
++static struct Resolution resTable2[0x52 - 0x50 + 1] = { { 320, 240 }, { 400, 300 }, {
++ 512, 384 }, };
++
++static void prepare_bse_descriptor_2(struct Descriptor *pDAddress,
++ phys_addr_t source_addr,
++ phys_addr_t dest_addr,
++ bool bNotLastEntry,
++ u16 wStride,
++ u8 bytesPerPixel,
++ u32 dwFetchWidthPixels,
++ u32 dwFetchHeight,
++ bool bInterrupt,
++ u8 byBuckSizeRegIndex);
++
++static struct BSEAggregateRegister set_up_bse_bucket_2(struct AstRVAS *pAstRVAS,
++ u8 *abyBitIndexes,
++ u8 byTotalBucketCount,
++ u8 byBSBytesPerPixel,
++ u32 dwFetchWidthPixels,
++ u32 dwFetchHeight,
++ u32 dwBucketSizeIndex);
++
++static inline u32 ast_video_read(void __iomem *video_reg_base, u32 reg)
++{
++ u32 val = readl(video_reg_base + reg);
++
++ return val;
++}
++
++// Get color depth
++static void ast_video_get_color_mode(u8 byNewColorMode, struct VideoGeometry *pvg)
++{
++ switch (byNewColorMode) {
++ case MODE_EGA:
++ pvg->gmt = VGAGraphicsMode; //4pp mode12/mode6A
++ pvg->byBitsPerPixel = 4;
++ break;
++
++ case MODE_VGA:
++ pvg->gmt = VGAGraphicsMode; //mode 13
++ pvg->byBitsPerPixel = 8;
++ break;
++
++ case MODE_BPP16:
++ pvg->gmt = AGAGraphicsMode;
++ pvg->byBitsPerPixel = 16;
++ break;
++
++ case MODE_BPP32:
++ pvg->gmt = AGAGraphicsMode;
++ pvg->byBitsPerPixel = 32;
++ break;
++
++ case MODE_TEXT:
++ pvg->gmt = TextMode;
++ pvg->byBitsPerPixel = 0;
++ break;
++
++ case MODE_CGA:
++ break;
++
++ default:
++ pvg->byBitsPerPixel = 8;
++ break;
++ }
++}
++
++//Mode ID mapping - use ID as index to the resolution table
++static void ast_video_get_indexed_mode(struct ModeInfo *pModeInfo, struct VideoGeometry *pvg)
++{
++ u8 byModeIndex = (pModeInfo->byModeID & 0xf0);
++
++ HW_ENG_DBG("Mode ID %#x\n", pModeInfo->byModeID);
++ pvg->byModeID = pModeInfo->byModeID;
++
++ if (pModeInfo->byModeID == 0x12) {
++ pvg->wScreenWidth = 640;
++ pvg->wScreenHeight = 480;
++ } else if (byModeIndex == 0x20) {
++ pvg->wScreenWidth = 640;
++ pvg->wScreenHeight = 480;
++ } else if (byModeIndex == 0x30) {
++ pvg->wScreenWidth =
++ resTable1[pModeInfo->byModeID & 0x0f].wWidth;
++ pvg->wScreenHeight =
++ resTable1[pModeInfo->byModeID & 0x0f].wHeight;
++ } else if (byModeIndex == 0x50) {
++ pvg->wScreenWidth =
++ resTable2[pModeInfo->byModeID & 0x03].wWidth;
++ pvg->wScreenHeight =
++ resTable2[pModeInfo->byModeID & 0x03].wHeight;
++ } else if (byModeIndex == 0x60) {
++ pvg->wScreenWidth = 800;
++ pvg->wScreenHeight = 600;
++ } else {
++ HW_ENG_DBG("Mode ID %#x\n", pModeInfo->byModeID);
++ pvg->wScreenWidth = 0;
++ pvg->wScreenHeight = 0;
++ }
++}
++
++//check special modes
++static void ast_video_set_special_modes(struct ModeInfo *pModeInfo, struct AstRVAS *pAstRVAS)
++{
++ u8 byVGACR1 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x1); //number of chars per line
++ u8 byVGACR7 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x7);
++ u8 byVGACR12 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x12);
++ u8 byVGASR1 = readb(pAstRVAS->grce_reg_base + GRCE_SEQ_OFFSET + 0x1);
++ struct VideoGeometry *pvg = &pAstRVAS->current_vg;
++ u32 dwHorizontalDisplayEnd = 0;
++ u32 dwVerticalDisplayEnd = 0;
++
++ dwHorizontalDisplayEnd = (byVGACR1 + 1) << 3;
++ dwVerticalDisplayEnd = (((byVGACR7 & 0x40) << 3)
++ | ((byVGACR7 & 0x2) << 7) | byVGACR12) + 1;
++
++ HW_ENG_DBG("byVGACR1=0x%x,byVGACR7=0x%x,byVGACR12=0x%x\n", byVGACR1,
++ byVGACR7, byVGACR12);
++ HW_ENG_DBG("Mode ID %#x, dwHorizontalDisplayEnd 0x%x, dwVerticalDisplayEnd 0x%x\n",
++ pModeInfo->byModeID, dwHorizontalDisplayEnd,
++ dwVerticalDisplayEnd);
++
++ // set up special mode
++ if (VGAGraphicsMode == pvg->gmt && pvg->byBitsPerPixel == 8) { // mode 13
++ pvg->wScreenHeight = 200;
++ pvg->wScreenWidth = 320;
++ pvg->wStride = 320;
++ } else if (TextMode == pvg->gmt) { // text mode
++ pvg->wScreenHeight = dwVerticalDisplayEnd;
++ pvg->wScreenWidth = dwHorizontalDisplayEnd;
++
++ if (!(byVGASR1 & 0x1))
++ pvg->wScreenWidth += (byVGACR1 + 1);
++
++ pvg->wStride = pvg->wScreenWidth;
++ } else if (pvg->byBitsPerPixel == 4) {
++ pvg->wStride = pvg->wScreenWidth;
++ }
++}
++
++static u32 ast_video_get_pitch(struct AstRVAS *pAstRVAS)
++{
++ u32 dwPitch = 0;
++ u8 byVGACR13 = 0;
++ u8 byVGACR14 = 0;
++ u8 byVGACR17 = 0;
++ u16 wOffsetUpper = 0;
++ u16 wOffset = 0;
++ struct VideoGeometry *pvg = &pAstRVAS->current_vg;
++
++ //read actual register
++ byVGACR13 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x13);
++ byVGACR14 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x14);
++ byVGACR17 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC_OFFSET + 0x17);
++ wOffsetUpper = readb(pAstRVAS->grce_reg_base + 0xb0);
++
++ wOffset = (wOffsetUpper << 8) | byVGACR13;
++ HW_ENG_DBG("wOffsetUpper= %#x, byVGACR13= %#x, byVGACR14= %#x, byVGACR17= %#x, wOffset= %#x\n",
++ wOffsetUpper, byVGACR13, byVGACR14, byVGACR17, wOffset);
++
++ if (byVGACR14 & 0x40)
++ dwPitch = wOffset << 3; //DW mode
++ else if (byVGACR17 & 0x40)
++ dwPitch = wOffset << 1; //byte mode
++ else
++ dwPitch = wOffset << 2; //word mode
++
++ if (pvg->gmt != TextMode) {
++ u8 byBppPowerOfTwo = 0;
++
++ if (pvg->byBitsPerPixel == 32)
++ byBppPowerOfTwo = 2;
++ else if (pvg->byBitsPerPixel == 16)
++ byBppPowerOfTwo = 1;
++ else if (pvg->byBitsPerPixel == 8)
++ byBppPowerOfTwo = 0;
++ else
++ byBppPowerOfTwo = 3; // 4bpp
++
++ //convert it to logic width in pixel
++ if (pvg->byBitsPerPixel > 4)
++ dwPitch >>= byBppPowerOfTwo;
++ else
++ dwPitch <<= byBppPowerOfTwo;
++ }
++
++ return dwPitch;
++}
++
++void update_video_geometry(struct AstRVAS *ast_rvas)
++{
++ struct ModeInfo *pModeInfo;
++ struct NewModeInfoHeader *pNMIH;
++ struct DisplayEnd *pDE;
++ u8 byNewColorMode = 0;
++ u32 VGA_Scratch_Register_350 = 0; //VIDEO_NEW_MODE_INFO_HEADER
++ u32 VGA_Scratch_Register_354 = 0; //VIDEO_HDE
++ u32 VGA_Scratch_Register_34C = 0; //VIDEO_HDE
++ struct VideoGeometry *cur_vg = &ast_rvas->current_vg;
++
++ VGA_Scratch_Register_350 = ast_video_read(ast_rvas->grce_reg_base,
++ AST_VIDEO_SCRATCH_350);
++ VGA_Scratch_Register_34C = ast_video_read(ast_rvas->grce_reg_base,
++ AST_VIDEO_SCRATCH_34C);
++ VGA_Scratch_Register_354 = ast_video_read(ast_rvas->grce_reg_base,
++ AST_VIDEO_SCRATCH_354);
++
++ pModeInfo = (struct ModeInfo *)(&VGA_Scratch_Register_34C);
++ pNMIH = (struct NewModeInfoHeader *)(&VGA_Scratch_Register_350);
++ pDE = (struct DisplayEnd *)(&VGA_Scratch_Register_354);
++ HW_ENG_DBG("pModeInfo: byColorMode: %#x byModeID: %#x byRefreshRateIndex: %#x byScanLines: %#x\n",
++ pModeInfo->byColorMode, pModeInfo->byModeID,
++ pModeInfo->byRefreshRateIndex, pModeInfo->byScanLines);
++ HW_ENG_DBG("pNMIH: byColorDepth: %#x byDisplayInfo: %#x byMhzPixelClock: %#x byReserved: %#x\n",
++ pNMIH->byColorDepth, pNMIH->byDisplayInfo,
++ pNMIH->byMhzPixelClock, pNMIH->byReserved);
++ HW_ENG_DBG("pDE: HDE: %#x VDE: %#x\n", pDE->HDE, pDE->VDE);
++
++ byNewColorMode = ((pModeInfo->byColorMode) & 0xf0) >> 4;
++ HW_ENG_DBG("byNewColorMode= %#x,byModeID=0x%x\n", byNewColorMode,
++ pModeInfo->byModeID);
++ ast_video_get_color_mode(byNewColorMode, cur_vg);
++
++ if (pNMIH->byDisplayInfo == MODE_GET_INFO_DE) {
++ cur_vg->wScreenWidth = pDE->HDE;
++ cur_vg->wScreenHeight = pDE->VDE;
++ cur_vg->byBitsPerPixel = pNMIH->byColorDepth;
++ cur_vg->byModeID = pModeInfo->byModeID;
++ } else {
++ ast_video_get_indexed_mode(pModeInfo, cur_vg);
++ }
++
++ cur_vg->wStride = (u16)ast_video_get_pitch(ast_rvas);
++ HW_ENG_DBG("Calculated pitch in pixels= %u\n", cur_vg->wStride);
++
++ if (cur_vg->wStride < cur_vg->wScreenWidth)
++ cur_vg->wStride = cur_vg->wScreenWidth;
++
++ HW_ENG_DBG("Before current display width %u, height %u, pitch %u, color depth %u, mode %d\n",
++ cur_vg->wScreenWidth, cur_vg->wScreenHeight,
++ cur_vg->wStride, cur_vg->byBitsPerPixel, cur_vg->gmt);
++
++ if (cur_vg->gmt == TextMode ||
++ (cur_vg->gmt == VGAGraphicsMode && pModeInfo->byModeID == 0x13)) {
++ ast_video_set_special_modes(pModeInfo, ast_rvas);
++ }
++
++ //mode transition
++ if (cur_vg->wScreenHeight < 200 || cur_vg->wScreenWidth < 320)
++ cur_vg->gmt = InvalidMode;
++
++ if (cur_vg->gmt == TextMode) {
++ u8 byVGACR9 = readb(ast_rvas->grce_reg_base + GRCE_CRTC_OFFSET + 0x9);
++ u32 dwCharacterHeight = ((byVGACR9) & 0x1f) + 1;
++
++ HW_ENG_DBG("byModeID=0x%x,dwCharacterHeight=%d\n",
++ cur_vg->byModeID, dwCharacterHeight);
++
++ if (dwCharacterHeight != 8 && dwCharacterHeight != 14 &&
++ dwCharacterHeight != 16)
++ cur_vg->gmt = InvalidMode;
++
++ if (cur_vg->wScreenWidth > 720 || cur_vg->wScreenHeight > 400)
++ cur_vg->gmt = InvalidMode;
++ }
++
++ HW_ENG_DBG("current display width %u, height %u, pitch %u, color depth %u, mode %d\n",
++ cur_vg->wScreenWidth, cur_vg->wScreenHeight,
++ cur_vg->wStride, cur_vg->byBitsPerPixel, cur_vg->gmt);
++}
++
++//check and update current video geometry
++bool video_geometry_change(struct AstRVAS *ast_rvas, u32 dwGRCEStatus)
++{
++ bool b_geometry_changed = false;
++ struct VideoGeometry *cur_vg = &ast_rvas->current_vg;
++ struct VideoGeometry pre_vg;
++
++ memcpy(&pre_vg, cur_vg, sizeof(pre_vg));
++ update_video_geometry(ast_rvas);
++ b_geometry_changed = memcmp(&pre_vg, cur_vg, sizeof(struct VideoGeometry))
++ != 0;
++ HW_ENG_DBG("b_geometry_changed: %d\n", b_geometry_changed);
++ return b_geometry_changed;
++}
++
++void ioctl_get_video_geometry(struct RvasIoctl *ri, struct AstRVAS *ast_rvas)
++{
++ memcpy(&ri->vg, &ast_rvas->current_vg, sizeof(struct VideoGeometry));
++// HW_ENG_DBG("b_geometry_changed: %d\n", b_geometry_changed);
++}
++
++void print_frame_buffer(u32 dwSizeByBytes, struct VGAMemInfo FBInfo)
++{
++ u32 iter = 0;
++ phys_addr_t *frame_buffer_base = NULL;
++ u32 dwNumMappedPages = 0;
++
++ dwNumMappedPages = ((dwSizeByBytes + 4095) >> 12);
++ frame_buffer_base = (phys_addr_t *)ioremap(FBInfo.qwFBPhysStart, dwNumMappedPages << 12);
++
++ if (frame_buffer_base) {
++ HW_ENG_DBG("==============%s===========\n", __func__);
++
++ for (iter = 0; iter < (dwSizeByBytes >> 2); iter++) {
++ HW_ENG_DBG("0x%x, ", frame_buffer_base[iter]);
++
++ if ((iter % 16) == 0)
++ HW_ENG_DBG("\n");
++ }
++
++ HW_ENG_DBG("===========END=============\n");
++ iounmap((void *)frame_buffer_base);
++ }
++}
++
++void ioctl_get_grc_register(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ void *virt_add = 0;
++ u32 size = 0;
++
++ HW_ENG_DBG("Start\n");
++ virt_add = get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ size = ri->rmh1_mem_size;
++
++ if (virt_is_valid_rsvd_mem((u32)ri->rmh, size, pAstRVAS)) {
++ memcpy((void *)virt_add,
++ (const void *)(pAstRVAS->grce_reg_base), 0x40);
++ memset((void *)(((u8 *)virt_add) + 0x40), 0x0, 0x20);
++ memcpy((void *)(((u8 *)virt_add) + 0x60),
++ (const void *)(pAstRVAS->grce_reg_base + 0x60),
++ GRCE_SIZE - 0x60);
++ ri->rs = SuccessStatus;
++ } else {
++ ri->rs = InvalidMemoryHandle;
++ }
++}
++
++void ioctl_read_snoop_map(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ struct ContextTable *pct = get_context_entry(ri->rc, pAstRVAS);
++ void *virt_add = 0;
++ u32 size = 0;
++
++ virt_add = get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ size = ri->rmh_mem_size;
++
++ disable_grce_tse_interrupt(pAstRVAS);
++ HW_ENG_DBG("Start\n");
++
++ if (pct) {
++ if (virt_is_valid_rsvd_mem((u32)ri->rmh, size, pAstRVAS)) {
++ update_all_snoop_context(pAstRVAS);
++ memcpy((void *)virt_add, pct->aqwSnoopMap,
++ sizeof(pct->aqwSnoopMap));
++
++ if (ri->flag) {
++ ///get the context snoop address
++ memset(pct->aqwSnoopMap, 0x00,
++ sizeof(pct->aqwSnoopMap));
++ memset(&pct->sa, 0x00, sizeof(pct->sa));
++ }
++ ri->rs = SuccessStatus;
++ } else {
++ ri->rs = InvalidMemoryHandle;
++ }
++ } else {
++ ri->rs = InvalidContextHandle;
++ }
++
++ enable_grce_tse_interrupt(pAstRVAS);
++}
++
++void ioctl_read_snoop_aggregate(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ struct ContextTable *pct = get_context_entry(ri->rc, pAstRVAS);
++
++ disable_grce_tse_interrupt(pAstRVAS);
++
++ if (pct) {
++ update_all_snoop_context(pAstRVAS);
++ memcpy(&ri->sa, &pct->sa, sizeof(pct->sa));
++ HW_ENG_DBG("ri->sa.qwCol: %#llx qwRow: %#llx flag: %u\n",
++ ri->sa.qwCol, ri->sa.qwRow, ri->flag);
++
++ if (ri->flag)
++ memset(&pct->sa, 0x00, sizeof(pct->sa));
++
++ ri->rs = SuccessStatus;
++ } else {
++ ri->rs = InvalidContextHandle;
++ HW_ENG_DBG("Invalid Context\n");
++ }
++
++ enable_grce_tse_interrupt(pAstRVAS);
++}
++
++void ioctl_set_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ void __iomem *addrTSICR;
++
++ pAstRVAS->tse_tsicr = ri->tse_counter;
++ addrTSICR = pAstRVAS->fg_reg_base + TSE_TileSnoop_Interrupt_Count;
++ writel(pAstRVAS->tse_tsicr, addrTSICR);// max wait time before interrupt
++ ri->rs = SuccessStatus;
++}
++
++void ioctl_get_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ ri->tse_counter = pAstRVAS->tse_tsicr;
++ ri->rs = SuccessStatus;
++}
++
++// Get the screen offset from the GRC registers
++u32 get_screen_offset(struct AstRVAS *pAstRVAS)
++{
++ u32 dwScreenOffset = 0;
++ void __iomem *addrVGACRC = pAstRVAS->grce_reg_base + GRCE_CRTC + 0xC; // Ch
++ void __iomem *addrVGACRD = pAstRVAS->grce_reg_base + GRCE_CRTC + 0xD; // Dh
++ void __iomem *addrVGACRAF = pAstRVAS->grce_reg_base + GRCE_CRTCEXT + 0x2F;
++
++ if (pAstRVAS->current_vg.gmt == AGAGraphicsMode) {
++ dwScreenOffset = ((readb(addrVGACRAF) << 16) | ((readb(addrVGACRC)) << 8) |
++ (readb(addrVGACRD)));
++ dwScreenOffset *= pAstRVAS->current_vg.byBitsPerPixel >> 3;
++ }
++
++ HW_ENG_DBG("ScreenOffset: %#8.8x\n", dwScreenOffset);
++
++ return dwScreenOffset;
++}
++
++void reset_snoop_engine(struct AstRVAS *pAstRVAS)
++{
++ void __iomem *addr_snoop = pAstRVAS->fg_reg_base + TSE_SnoopMap_Offset;
++ u32 reg_value = 0;
++ u32 iter;
++
++ writel(0x0, pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset);
++ writel(0x3, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset);
++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_Status_Register_Offset);
++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_CS0Reg);
++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_CS1Reg);
++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_RS0Reg);
++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_RS1Reg);
++
++ //Clear TSRR00 to TSRR126 (TSRR01 to TSRR127), Snoop Map
++ for (iter = 0; iter < 0x80; ++iter) {
++ reg_value = readl(addr_snoop) + 1;
++ writel(reg_value, addr_snoop);
++ }
++
++ reg_value = readl(pAstRVAS->fg_reg_base + TSE_TileCount_Register_Offset);
++}
++
++void set_snoop_engine(bool b_geom_chg, struct AstRVAS *pAstRVAS)
++{
++ void __iomem *tscmd_reg = pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset;
++ void __iomem *tsfbsa_reg = pAstRVAS->fg_reg_base + TSE_FrameBuffer_Offset;
++ void __iomem *tsulr_reg = pAstRVAS->fg_reg_base + TSE_UpperLimit_Offset;
++ u32 new_tsfbsa = 0;
++ u32 tscmd = 0;
++ u8 byBytesPerPixel = 0x0;
++ u8 byTSCMDBytesPerPixel = 0x0;
++ int cContext;
++ u32 dwStride;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ // Calculate Start Address into the Frame Buffer
++ new_tsfbsa = get_screen_offset(pAstRVAS);
++ tscmd = readl(tscmd_reg);
++
++ tscmd &= (1 << TSCMD_INT_ENBL_BIT);
++
++ HW_ENG_DBG("Latest TSFBSA: %#8.8x\n", new_tsfbsa);
++ HW_ENG_DBG("pAstRVAS->current_vg: bpp %u Mode:%#x gmt:%d Width:%u Height:%u Stride:%u\n",
++ pAstRVAS->current_vg.byBitsPerPixel,
++ pAstRVAS->current_vg.byModeID, pAstRVAS->current_vg.gmt,
++ pAstRVAS->current_vg.wScreenWidth,
++ pAstRVAS->current_vg.wScreenHeight,
++ pAstRVAS->current_vg.wStride);
++
++ if (b_geom_chg || (readl(tsfbsa_reg) != new_tsfbsa)) {
++ byBytesPerPixel = pAstRVAS->current_vg.byBitsPerPixel >> 3;
++
++ if (pAstRVAS->current_vg.gmt == VGAGraphicsMode ||
++ pAstRVAS->current_vg.byBitsPerPixel == 4) {
++ byTSCMDBytesPerPixel = 0;
++ } else {
++ switch (byBytesPerPixel) {
++ case 1:
++ byTSCMDBytesPerPixel = 0;
++ break;
++
++ case 2:
++ byTSCMDBytesPerPixel = 1;
++ break;
++
++ case 3:
++ case 4:
++ byTSCMDBytesPerPixel = 2;
++ break;
++ }
++ }
++ dwStride = pAstRVAS->current_vg.wStride;
++
++ if (byBytesPerPixel == 3)
++ dwStride = (dwStride + dwStride + dwStride) >> 2;
++ else if (pAstRVAS->current_vg.byBitsPerPixel == 4)
++ dwStride >>= 1;
++
++ // set TSE SCR
++ // start the tile snoop engine
++ // flip the 15 bit
++ if (!(readl(tscmd_reg) & TSCMD_SCREEN_OWNER))
++ tscmd |= TSCMD_SCREEN_OWNER;
++
++ tscmd |= (dwStride << TSCMD_PITCH_BIT) | (1 << TSCMD_CPT_BIT)
++ | (1 << TSCMD_RPT_BIT)
++ | (byTSCMDBytesPerPixel << TSCMD_BPP_BIT)
++ | (1 << TSCMD_VGA_MODE_BIT) | (1 << TSCMD_TSE_ENBL_BIT);
++ HW_ENG_DBG("tscmd: %#8.8x\n", tscmd);
++ // set the TSFBSA & TSULR
++ writel(new_tsfbsa, tsfbsa_reg);
++ writel(BSE_UPPER_LIMIT, tsulr_reg);
++ writel(tscmd, tscmd_reg);
++ //reset snoop information
++ get_snoop_map_data(pAstRVAS);
++ memset((void *)pAstRVAS->accrued_sm, 0,
++ sizeof(pAstRVAS->accrued_sm));
++ memset((void *)&pAstRVAS->accrued_sa, 0,
++ sizeof(pAstRVAS->accrued_sa));
++
++ for (cContext = 0; cContext < MAX_NUM_CONTEXT; cContext++) {
++ if (ppctContextTable[cContext]) {
++ memset(ppctContextTable[cContext]->aqwSnoopMap,
++ 0,
++ sizeof(ppctContextTable[cContext]->aqwSnoopMap));
++ memset(&ppctContextTable[cContext]->sa, 0,
++ sizeof(ppctContextTable[cContext]->sa));
++ }
++ } // for each context
++ } // if
++}
++
++//
++// ReadSnoopMap to Clear
++//
++void get_snoop_map_data(struct AstRVAS *pAstRVAS)
++{
++ u32 dwSMDword;
++ u64 aqwSnoopMap[SNOOP_MAP_QWORD_COUNT];
++ //u32 dw_iter;
++
++ get_snoop_aggregate(pAstRVAS);
++ memcpy((void *)aqwSnoopMap,
++ (const void *)(pAstRVAS->fg_reg_base + TSE_SnoopMap_Offset),
++ sizeof(aqwSnoopMap));
++
++ //HW_ENG_DBG("Snoop Map:\n");
++ //HW_ENG_DBG("==========\n");
++
++ //for (dw_iter = 0; dw_iter < SNOOP_MAP_QWORD_COUNT; ++dw_iter)
++ //HW_ENG_DBG("[%2u]: 0x%16.16llx\n", dw_iter, aqwSnoopMap[dw_iter]);
++
++ //HW_ENG_DBG("==========\n\n");
++
++ // copy 512 snoop map
++ for (dwSMDword = 0; dwSMDword < SNOOP_MAP_QWORD_COUNT; ++dwSMDword)
++ pAstRVAS->accrued_sm[dwSMDword] |= aqwSnoopMap[dwSMDword];
++}
++
++void get_snoop_aggregate(struct AstRVAS *pAstRVAS)
++{
++ u64 qwRow = 0;
++ u64 qwCol = 0;
++
++ // copy the snoop aggregate,row 64 bits
++ qwRow = readl(pAstRVAS->fg_reg_base + TSE_RS1Reg);
++ qwRow = qwRow << 32;
++ qwRow |= readl(pAstRVAS->fg_reg_base + TSE_RS0Reg);
++
++ // column
++ qwCol = readl(pAstRVAS->fg_reg_base + TSE_CS1Reg);
++ qwCol = qwCol << 32;
++ qwCol |= readl(pAstRVAS->fg_reg_base + TSE_CS0Reg);
++
++ HW_ENG_DBG("Snoop Aggregate Row: 0x%16.16llx\n", qwRow);
++ HW_ENG_DBG("Snoop Aggregate Col: 0x%16.16llx\n", qwCol);
++ HW_ENG_DBG("DRIVER:: %s\n", __func__);
++ HW_ENG_DBG("DRIVER:: row [%#llx]\n", qwRow);
++ HW_ENG_DBG("DRIVER:: col [%#llx]\n", qwCol);
++
++ pAstRVAS->accrued_sa.qwCol |= qwCol;
++ pAstRVAS->accrued_sa.qwRow |= qwRow;
++}
++
++u64 reinterpret_32bpp_snoop_row_as_24bpp(u64 theSnoopRow)
++{
++ u64 qwResult = 0;
++ u64 qwSourceBit = 1;
++ u32 cSourceBit;
++ u64 qwBitResult = 0;
++
++ for (cSourceBit = 0; cSourceBit < 64; ++cSourceBit) {
++ if (theSnoopRow & qwSourceBit) {
++ qwBitResult = ((cSourceBit * 128) / 96);
++ qwResult |= (((u64)3) << qwBitResult);
++ }
++
++ qwSourceBit <<= 1;
++ }
++
++ return qwResult;
++}
++
++//
++//one tile: 32x32,
++//
++void convert_snoop_map(struct AstRVAS *pAstRVAS)
++{
++ u32 dwAllRows = (pAstRVAS->current_vg.wScreenHeight + 31) >> 5;
++ u32 cRow;
++
++ for (cRow = 0; cRow < dwAllRows; ++cRow)
++ pAstRVAS->accrued_sm[cRow] =
++ reinterpret_32bpp_snoop_row_as_24bpp(pAstRVAS->accrued_sm[cRow]);
++
++ pAstRVAS->accrued_sa.qwCol =
++ reinterpret_32bpp_snoop_row_as_24bpp(pAstRVAS->accrued_sa.qwCol);
++}
++
++void update_all_snoop_context(struct AstRVAS *pAstRVAS)
++{
++ u32 cContext;
++ u32 iSMDword;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ if (pAstRVAS->current_vg.byBitsPerPixel == 24)
++ convert_snoop_map(pAstRVAS);
++
++ for (cContext = 0; cContext < MAX_NUM_CONTEXT; cContext++)
++ if (ppctContextTable[cContext]) {
++ for (iSMDword = 0; iSMDword < SNOOP_MAP_QWORD_COUNT;
++ iSMDword++)
++ ppctContextTable[cContext]->aqwSnoopMap[iSMDword] |=
++ pAstRVAS->accrued_sm[iSMDword];
++
++ ppctContextTable[cContext]->sa.qwRow |=
++ pAstRVAS->accrued_sa.qwRow;
++ ppctContextTable[cContext]->sa.qwCol |=
++ pAstRVAS->accrued_sa.qwCol;
++ }
++
++ //reset snoop map and aggregate
++ memset((void *)pAstRVAS->accrued_sm, 0, sizeof(pAstRVAS->accrued_sm));
++ memset((void *)&pAstRVAS->accrued_sa, 0x00,
++ sizeof(pAstRVAS->accrued_sa));
++}
++
++static u32 setup_tfe_cr(struct FetchOperation *pfo)
++{
++ u32 dwTFECR = 0;
++
++ if (pfo->bEnableRLE)
++ dwTFECR = (pfo->byRLETripletCode << 24)
++ | (pfo->byRLERepeatCode << 16);
++
++ dwTFECR &= TFCTL_DESCRIPTOR_IN_DDR_MASK;
++ dwTFECR |= 1;
++ dwTFECR |= 1 << 1; // enabled IRQ
++ HW_ENG_DBG("dwTFECR: %#x\n", dwTFECR);
++ return dwTFECR;
++}
++
++static void start_skip_mode_skip(struct Descriptor *desc_virt,
++ phys_addr_t desc_phys,
++ phys_addr_t source_phys, phys_addr_t dest_addr, u16 wStride,
++ u8 bytesPerPixel, u32 dwFetchWidthPixels,
++ u32 dwFetchHeight, bool bRLEOverFLow)
++{
++ struct Descriptor *pVirtDesc = desc_virt;
++
++ // Fetch Skipping data to a temp buffer
++ prepare_tfe_descriptor(pVirtDesc, source_phys, dest_addr, true, 1,
++ false, wStride, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight,
++ LowByteMode, bRLEOverFLow, 0);
++
++ dest_addr += dwFetchWidthPixels * dwFetchHeight;
++ pVirtDesc++;
++
++ if (bytesPerPixel == 3 || bytesPerPixel == 4) {
++ prepare_tfe_descriptor(pVirtDesc, source_phys, dest_addr,
++ true, 1, false, wStride, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight,
++ MiddleByteMode, bRLEOverFLow, 0);
++
++ dest_addr += dwFetchWidthPixels * dwFetchHeight;
++ pVirtDesc++;
++ }
++
++ prepare_tfe_descriptor(pVirtDesc, source_phys, dest_addr, false, 1,
++ false, wStride, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight,
++ TopByteMode, bRLEOverFLow, 1);
++}
++
++// calculate pure fetch size
++static u32 calculate_fetch_size(enum SelectedByteMode sbm, u8 bytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight)
++{
++ u32 dwFetchSize = 0;
++
++ switch (sbm) {
++ case AllBytesMode:
++ dwFetchSize = dwFetchWidthPixels * dwFetchHeight
++ * bytesPerPixel;
++ break;
++
++ case SkipMode:
++ if (bytesPerPixel == 3 || bytesPerPixel == 4)
++ dwFetchSize = dwFetchWidthPixels * dwFetchHeight * 3;
++ else
++ dwFetchSize = dwFetchWidthPixels * dwFetchHeight
++ * bytesPerPixel;
++ break;
++
++ case PlanarToPackedMode:
++ dwFetchSize = (dwFetchWidthPixels * dwFetchHeight);
++ break;
++
++ case PackedToPackedMode:
++ break;
++
++ default:
++ HW_ENG_DBG("Mode= %d is not supported\n", sbm);
++ break;
++ } //switch
++ return dwFetchSize;
++}
++
++static void display_fetch_info(struct FetchVideoTilesArg *pFVTDescriptor, u32 dwCD)
++{
++ struct FetchRegion *pfr = NULL;
++
++ pfr = &pFVTDescriptor->pfo[dwCD].fr;
++ HW_ENG_DBG("FETCH - 1 dwCD: %u\n", dwCD);
++ HW_ENG_DBG("pfr->wLeftX :%d\n", pfr->wLeftX);
++ HW_ENG_DBG("pfr->wTopY :%d\n", pfr->wTopY);
++ HW_ENG_DBG("pfr->wRightX :%d\n", pfr->wRightX);
++ HW_ENG_DBG("pfr->wBottomY :%d\n", pfr->wBottomY);
++ HW_ENG_DBG(" bEanbleRLE %d\n", pFVTDescriptor->pfo[dwCD].bEnableRLE);
++ HW_ENG_DBG("Stride : %d\n", pFVTDescriptor->vg.wStride);
++}
++
++void ioctl_fetch_video_tiles(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ struct FetchVideoTilesArg *pFVTDescriptor;
++ u32 dwCD = 0;
++ struct Descriptor *pdesc_virt;
++ phys_addr_t qw_desc_phys;
++ phys_addr_t qw_source_phys;
++ phys_addr_t qw_destination_phys;
++ u8 bytesPerPixel;
++ struct FetchRegion *pfr;
++ bool bNotLastEntry = false;
++ u32 dwTFECR = 0;
++ u32 dwTotalFetchSize = 0;
++ u32 dwRLESize = 0;
++ bool bRLEOverFLow = false;
++ u32 dwFetchWidthPixels = 0;
++ u32 dwFetchHeight = 0;
++ phys_addr_t arg_phys = 0;
++ phys_addr_t data_phys_out = 0;
++ phys_addr_t data_phys_temp = 0;
++ u16 stride = 0;
++ bool bSkippingMode = false;
++ void *desc_virt = NULL;
++ phys_addr_t desc_phy = 0;
++ struct ContextTable *ctx_entry = NULL;
++
++ HW_ENG_DBG("DRIVER:::: TILE FETCH CHAINING\n");
++ ctx_entry = get_context_entry(ri->rc, pAstRVAS);
++
++ if (ctx_entry) {
++ desc_virt = ctx_entry->desc_virt;
++ desc_phy = ctx_entry->desc_phy;
++ } else {
++ HW_ENG_DBG("Returning with invalid Context handle: 0x%p\n", ri->rc);
++ ri->rs = InvalidContextHandle;
++ return;
++ }
++
++ ri->rs = SuccessStatus;
++ //struct FetchVideoTilesArg buffer
++ arg_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ //Fetch final dest buffer
++ data_phys_out = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS);
++ //Intermediate Buffer
++ data_phys_temp = get_phys_add_rsvd_mem((u32)ri->rmh2, pAstRVAS);
++
++ qw_destination_phys = data_phys_out;
++ pFVTDescriptor = (struct FetchVideoTilesArg *)get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ HW_ENG_DBG("Destination virtual Add: 0x%llx\n", get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS));
++ HW_ENG_DBG("Destination Physical Add: %llx\n", qw_destination_phys);
++ memset(desc_virt, 0x00, PAGE_SIZE);
++
++ if (arg_phys && data_phys_out && data_phys_temp) {
++ pdesc_virt = (struct Descriptor *)desc_virt;
++ qw_desc_phys = desc_phy;
++ HW_ENG_DBG("Descriptor Virtual Addr: %llx\n",
++ (phys_addr_t)desc_virt);
++ HW_ENG_DBG("Descriptor Physical Addr: %llx\n", qw_desc_phys);
++ stride = pFVTDescriptor->vg.wStride;
++
++ if (pFVTDescriptor->vg.byBitsPerPixel == 4) {
++ bytesPerPixel = 1;
++ stride >>= 1;
++ } else {
++ bytesPerPixel = pFVTDescriptor->vg.byBitsPerPixel >> 3;
++ }
++
++ HW_ENG_DBG("u8 per pixel:%u\n", bytesPerPixel);
++ // fetch all data to Destination 1 without RLE
++ HW_ENG_DBG("FETCH - 0\n");
++ HW_ENG_DBG("COUNT OF Operation: %u\n", pFVTDescriptor->cfo);
++
++ for (dwCD = 0; dwCD < pFVTDescriptor->cfo; dwCD++) {
++ display_fetch_info(pFVTDescriptor, dwCD);
++ // Set up Control Register.
++ dwTFECR = setup_tfe_cr(&pFVTDescriptor->pfo[dwCD]);
++ pfr = &pFVTDescriptor->pfo[dwCD].fr;
++ // find Source Address
++ if (pFVTDescriptor->vg.byBitsPerPixel == 4) {
++ qw_source_phys = get_phy_fb_start_address(pAstRVAS)
++ + ((pfr->wLeftX * bytesPerPixel) >> 1)
++ + pfr->wTopY * stride
++ * bytesPerPixel;
++
++ dwFetchWidthPixels = (pfr->wRightX - pfr->wLeftX + 1) >> 1;
++ } else {
++ qw_source_phys = get_phy_fb_start_address(pAstRVAS)
++ + pfr->wLeftX * bytesPerPixel
++ + pfr->wTopY * stride
++ * bytesPerPixel;
++
++ dwFetchWidthPixels = (pfr->wRightX - pfr->wLeftX + 1);
++ }
++ HW_ENG_DBG("dwCD: %u qw_source_phys: %#x\n", dwCD,
++ qw_source_phys);
++ dwFetchHeight = pfr->wBottomY - pfr->wTopY + 1;
++
++ HW_ENG_DBG("DESCRIPTOR virtual ADDRESS: 0x%p\n",
++ pdesc_virt);
++ if (pFVTDescriptor->vg.byBitsPerPixel == 4)
++ pFVTDescriptor->pfo[dwCD].sbm =
++ PlanarToPackedMode;
++
++ pFVTDescriptor->pfo[dwCD].dwFetchSize =
++ calculate_fetch_size(pFVTDescriptor->pfo[dwCD].sbm,
++ bytesPerPixel, dwFetchWidthPixels,
++ dwFetchHeight);
++ bSkippingMode =
++ (pFVTDescriptor->pfo[dwCD].sbm == SkipMode) ?
++ true : false;
++
++ if (bSkippingMode && bytesPerPixel > 1) {
++ u32 skipSrcAddr = qw_source_phys;
++ u32 skipDestAddr = qw_destination_phys;
++ u8 byPostBytesPerPixel =
++ (bytesPerPixel == 2) ? 2 : 3;
++ HW_ENG_DBG("In SkippingMode...\n");
++
++ if (pFVTDescriptor->pfo[dwCD].bEnableRLE) {
++ //skip data to intermediate buffer
++ skipDestAddr = data_phys_temp;
++ }
++
++ start_skip_mode_skip(pdesc_virt,
++ qw_desc_phys, skipSrcAddr,
++ skipDestAddr,
++ pFVTDescriptor->vg.wStride,
++ bytesPerPixel, dwFetchWidthPixels,
++ dwFetchHeight, bRLEOverFLow);
++
++ if (pFVTDescriptor->pfo[dwCD].bEnableRLE) {
++ u32 rleSrcAddr = skipDestAddr;
++ u32 rleDesAddr = qw_destination_phys;
++
++ ///// take second look at skip mode for using map single
++ if (sleep_on_tfe_busy(pAstRVAS,
++ qw_desc_phys, // Descriptor physical Address
++ dwTFECR, // control register value
++ pFVTDescriptor->pfo[dwCD].dwFetchSize, // bandwidth limitor value
++ &dwRLESize, // out:: rle size
++ &pFVTDescriptor->pfo[dwCD].dwCheckSum
++ ) == false) { // out:: cs size
++ ri->rs = GenericError;
++ return;
++ }
++
++ // perform RLE from Temp buffer to qw_destination_phys
++ //HW_ENG_DBG("skip rle\n");
++ prepare_tfe_descriptor(pdesc_virt,
++ rleSrcAddr, rleDesAddr,
++ bNotLastEntry, 1,
++ pFVTDescriptor->pfo[dwCD].bEnableRLE,
++ dwFetchWidthPixels,
++ byPostBytesPerPixel,
++ dwFetchWidthPixels,
++ dwFetchHeight, AllBytesMode,
++ bRLEOverFLow, 1);
++ }
++ } else {
++ HW_ENG_DBG("Preparing TFE Descriptor with no skipping...\n");
++ prepare_tfe_descriptor(pdesc_virt,
++ qw_source_phys, qw_destination_phys,
++ bNotLastEntry, 1,
++ pFVTDescriptor->pfo[dwCD].bEnableRLE,
++ stride, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight,
++ pFVTDescriptor->pfo[dwCD].sbm,
++ bRLEOverFLow, 1);
++ HW_ENG_DBG("Successfully prepared TFE Descriptor with no skipping\n");
++ }
++ HW_ENG_DBG("Sleeping while TFE is busy...\n");
++
++ if (sleep_on_tfe_busy(pAstRVAS, qw_desc_phys, // Descriptor physical Address
++ dwTFECR, // control register value
++ pFVTDescriptor->pfo[dwCD].dwFetchSize, // bandwidth limitor value
++ &dwRLESize, // out:: rle size
++ &pFVTDescriptor->pfo[dwCD].dwCheckSum
++ ) == false) { // out:: cs size
++ ri->rs = GenericError;
++ return;
++ }
++
++ HW_ENG_DBG("After sleep where TFE was busy\n");
++
++ //HW_ENG_DBG("skip rle end\n");
++ if (!pFVTDescriptor->pfo[dwCD].bEnableRLE) { // RLE not enabled
++ HW_ENG_DBG("RLE is off\n");
++ pFVTDescriptor->pfo[dwCD].bRLEFailed = false;
++ dwRLESize =
++ pFVTDescriptor->pfo[dwCD].dwFetchSize;
++ dwTotalFetchSize +=
++ pFVTDescriptor->pfo[dwCD].dwFetchSize;
++ } else { // RLE enabled
++ HW_ENG_DBG("RLE Enabled\n");
++ if (dwRLESize
++ >= pFVTDescriptor->pfo[dwCD].dwFetchSize) { // FAILED
++ HW_ENG_DBG("DRVIER:: RLE failed RLE: %u > %u\n",
++ dwRLESize,
++ pFVTDescriptor->pfo[dwCD].dwFetchSize);
++ pFVTDescriptor->pfo[dwCD].bRLEFailed =
++ true;
++
++ if (bSkippingMode) {
++ phys_addr_t skip_source_addr =
++ qw_source_phys;
++ phys_addr_t skip_dest_addr =
++ qw_destination_phys;
++
++ start_skip_mode_skip(pdesc_virt,
++ qw_desc_phys,
++ skip_source_addr,
++ skip_dest_addr,
++ pFVTDescriptor->vg.wStride,
++ bytesPerPixel,
++ dwFetchWidthPixels,
++ dwFetchHeight,
++ bRLEOverFLow);
++ } else {
++ HW_ENG_DBG(" FETCH - 4\n");
++ prepare_tfe_descriptor(pdesc_virt,
++ qw_source_phys,
++ qw_destination_phys,
++ bNotLastEntry, 1, false,
++ pFVTDescriptor->vg.wStride,
++ bytesPerPixel,
++ dwFetchWidthPixels,
++ dwFetchHeight,
++ pFVTDescriptor->pfo[dwCD].sbm,
++ bRLEOverFLow, 1);
++ }
++
++ if (sleep_on_tfe_busy(pAstRVAS,
++ qw_desc_phys, // Descriptor physical Address
++ dwTFECR, // control register value
++ pFVTDescriptor->pfo[dwCD].dwFetchSize, // bandwidth limitor value
++ &dwRLESize, // out:: rle size
++ &pFVTDescriptor->pfo[dwCD].dwCheckSum
++ ) == false) { // out:: cs size
++ ri->rs = GenericError;
++ return;
++ }
++
++ dwTotalFetchSize +=
++ pFVTDescriptor->pfo[dwCD].dwFetchSize;
++ dwRLESize =
++ pFVTDescriptor->pfo[dwCD].dwFetchSize;
++ } else { //RLE successful
++ pFVTDescriptor->pfo[dwCD].bRLEFailed =
++ false;
++ dwTotalFetchSize += dwRLESize;
++ dwTotalFetchSize = (dwTotalFetchSize
++ + 0x3) & 0xfffffffc;
++ }
++ } //RLE Enabled
++
++ pFVTDescriptor->pfo[dwCD].dwFetchRLESize = dwRLESize;
++ HW_ENG_DBG("DRIVER:: RLE: %u, nonRLE: %u\n", dwRLESize,
++ pFVTDescriptor->pfo[dwCD].dwFetchSize);
++ HW_ENG_DBG("FETCH:: loop FETCH size: %u\n", dwTotalFetchSize);
++ qw_destination_phys = data_phys_out + dwTotalFetchSize;
++ } //for TFE
++
++ pFVTDescriptor->dwTotalOutputSize = dwTotalFetchSize;
++ HW_ENG_DBG("Fetch Size: %#x\n", dwTotalFetchSize);
++ } else {
++ dev_err(pAstRVAS->pdev, "Memory allocation failure\n");
++ ri->rs = InvalidMemoryHandle;
++ }
++} // End - ioctl_fetch_video_tiles
++
++void prepare_ldma_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, u32 dwLDMASize, u8 byNotLastEntry)
++{
++ u8 byInterrupt = 0;
++
++ HW_ENG_DBG("pDAddress: 0x%p\n", pDAddress);
++
++ // initialize to 0
++ pDAddress->dw0General = 0x00;
++ pDAddress->dw1FetchWidthLine = 0x00;
++ pDAddress->dw2SourceAddr = 0x00;
++ pDAddress->dw3DestinationAddr = 0x00;
++
++ // initialize to 0
++ if (!byNotLastEntry)
++ byInterrupt = 0x1;
++
++ pDAddress->dw0General = ((dwLDMASize - 1) << 8) | (byNotLastEntry << 1)
++ | byInterrupt;
++ pDAddress->dw2SourceAddr = (u32)source_addr;
++ pDAddress->dw3DestinationAddr = (u32)dest_addr;
++
++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General);
++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine);
++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr);
++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr);
++}
++
++//
++// ioctl_run_length_encode_data - encode buffer data
++//
++void ioctl_run_length_encode_data(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ struct Descriptor *pDescriptorAdd = NULL;
++ struct Descriptor *pDescriptorAddPhys = NULL;
++ u8 bytesPerPixel;
++ bool bNotLastEntry = true;
++ u32 dwTFECR = 0;
++ bool bRLEOverFLow = false;
++ u32 dwFetchWidthPixels = 0;
++ u32 dwFetchHeight = 0;
++ u32 dwPhysAddIn;
++ u32 dwPhysAddOut;
++ u32 data_size = 0;
++ void *desc_virt = NULL;
++ u32 desc_phy = 0;
++ struct ContextTable *ctx_entry = NULL;
++
++ ctx_entry = get_context_entry(ri->rc, pAstRVAS);
++ if (ctx_entry) {
++ desc_virt = ctx_entry->desc_virt;
++ desc_phy = ctx_entry->desc_phy;
++ } else {
++ ri->rs = InvalidContextHandle;
++ return;
++ }
++
++ ri->rs = SuccessStatus;
++
++ dwPhysAddIn = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ dwPhysAddOut = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS);
++
++ data_size = ri->rmh_mem_size;
++ pDescriptorAdd = (struct Descriptor *)ctx_entry->desc_virt;
++ pDescriptorAddPhys = (struct Descriptor *)ctx_entry->desc_phy;
++
++ HW_ENG_DBG("pDescriptorAdd=%#x, phy=%#x\n", (u32)pDescriptorAdd,
++ (u32)pDescriptorAddPhys);
++
++ if (dwPhysAddIn && dwPhysAddOut) {
++ // Enable TFE
++ dwTFECR = (ri->encode & 0xffff0000) << 16;
++ dwTFECR |= 1;
++ dwTFECR &= TFCTL_DESCRIPTOR_IN_DDR_MASK;
++
++ // triplet code and repeat code
++ bNotLastEntry = false;
++ bRLEOverFLow = true;
++ dwFetchWidthPixels = TILE_SIZE;
++ dwFetchHeight = data_size / TILE_SIZE;
++ bytesPerPixel = 1;
++
++ prepare_tfe_descriptor(pDescriptorAdd, dwPhysAddIn,
++ dwPhysAddOut, bNotLastEntry, 1,
++ 1, dwFetchWidthPixels,
++ bytesPerPixel, dwFetchWidthPixels,
++ dwFetchHeight, AllBytesMode, bRLEOverFLow, 1);
++
++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys,
++ dwTFECR, data_size, &ri->rle_len,
++ &ri->rle_checksum) == false) {
++ ri->rs = GenericError;
++ dev_err(pAstRVAS->pdev, "%s sleep_on_tfe_busy ERROR\n", __func__);
++ return;
++ }
++ } else {
++ ri->rs = InvalidMemoryHandle;
++ }
++}
++
++static u32 get_video_slice_fetch_width(u8 cBuckets)
++{
++ u32 dwFetchWidthPixels = 0;
++
++ switch (cBuckets) {
++ case 3:
++ dwFetchWidthPixels = ((TILE_SIZE << 5) * 3) >> 3;
++ break;
++
++ case 8:
++ dwFetchWidthPixels = TILE_SIZE << 5;
++ break;
++
++ case 16:
++ dwFetchWidthPixels = (TILE_SIZE << 5) * 2;
++ break;
++
++ case 24:
++ dwFetchWidthPixels = (TILE_SIZE << 5) * 3;
++ break;
++
++ default:
++ dwFetchWidthPixels = TILE_SIZE << 2;
++ break;
++ }
++
++ return dwFetchWidthPixels;
++}
++
++void ioctl_fetch_video_slices(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ struct FetchVideoSlicesArg *pFVSA;
++ u32 dwCD;
++ struct Descriptor *pdesc_virt;
++ phys_addr_t qw_desc_phys;
++ phys_addr_t source_addr;
++ phys_addr_t slice_dest_addr;
++ u8 bytesPerPixel;
++ bool bNotLastEntry = true;
++ bool bInterrupt = false;
++ u32 dwTFECR = 0;
++ u32 dwFetchSize = 0;
++ bool bRLEOverFLow = false;
++ u32 dwFetchWidthPixels = 0;
++ u32 dwFetchHeight = 0;
++ phys_addr_t arg_phys = 0;
++ phys_addr_t data_phys_out = 0;
++ phys_addr_t data_phys_rle = 0;
++ struct BSEAggregateRegister aBSEAR;
++ struct Descriptor *pNextDescriptor = 0;
++ phys_addr_t dest_next_addr = 0;
++ u32 dwBucketSizeIter = 0;
++ bool bBucketSizeEnable = 0;
++ void __iomem *addrBSCR = pAstRVAS->fg_reg_base + BSE_Command_Register;
++ void *desc_virt = NULL;
++ phys_addr_t desc_phy = 0;
++ struct ContextTable *ctx_entry = get_context_entry(ri->rc, pAstRVAS);
++
++ HW_ENG_DBG("Start\n");
++
++ if (ctx_entry) {
++ desc_virt = ctx_entry->desc_virt;
++ desc_phy = ctx_entry->desc_phy;
++ } else {
++ pr_err("BSE: Cannot get valid context\n");
++ ri->rs = InvalidContextHandle;
++ return;
++ }
++
++ arg_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ data_phys_out = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS);
++ data_phys_rle = get_phys_add_rsvd_mem((u32)ri->rmh2, pAstRVAS);
++
++ if (!arg_phys || !data_phys_out || !data_phys_rle) {
++ pr_err("BSE: Invalid memory handle\n");
++ ri->rs = InvalidMemoryHandle;
++ return;
++ }
++ ri->rs = SuccessStatus;
++ slice_dest_addr = data_phys_out;
++ pFVSA = (struct FetchVideoSlicesArg *)get_virt_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++
++ HW_ENG_DBG("bEnableRLE: %d cBuckets: %u cfr: %u\n", pFVSA->bEnableRLE,
++ pFVSA->cBuckets, pFVSA->cfr);
++
++ if (pFVSA->cfr > 1) {
++ writel(readl(addrBSCR) | BSE_ENABLE_MULT_BUCKET_SZS, addrBSCR);
++ bBucketSizeEnable = 1;
++ } else {
++ writel(readl(addrBSCR) & (~BSE_ENABLE_MULT_BUCKET_SZS), addrBSCR);
++ bBucketSizeEnable = 0;
++ }
++
++ HW_ENG_DBG("*pdwBSCR: %#x bBucketSizeEnable: %d\n", readl(addrBSCR),
++ bBucketSizeEnable);
++
++ pdesc_virt = ctx_entry->desc_virt;
++ qw_desc_phys = ctx_entry->desc_phy;
++ bytesPerPixel = pFVSA->vg.byBitsPerPixel >> 3;
++
++ HW_ENG_DBG("BSE:: u8 per pixel: %d\n", bytesPerPixel);
++ HW_ENG_DBG("BSE:: cfr: %u bucket size: %d\n", pFVSA->cfr, pFVSA->cBuckets);
++
++ pNextDescriptor = pdesc_virt;
++ dest_next_addr = slice_dest_addr;
++ // Prepare BSE Descriptors for all Regions
++ HW_ENG_DBG("pNextDescriptor 0x%p dest_next_addr: %#x\n", pNextDescriptor,
++ dest_next_addr);
++
++ for (dwCD = 0; dwCD < pFVSA->cfr; dwCD++) {
++ HW_ENG_DBG("dwCD: %u\n", dwCD);
++ HW_ENG_DBG("pfr->wLeftX :%d\n", pFVSA->pfr[dwCD].wLeftX);
++ HW_ENG_DBG("pfr->wTopY :%d\n", pFVSA->pfr[dwCD].wTopY);
++ HW_ENG_DBG("pfr->wRightX :%d\n", pFVSA->pfr[dwCD].wRightX);
++ HW_ENG_DBG("pfr->wBottomY :%d\n", pFVSA->pfr[dwCD].wBottomY);
++
++ source_addr = get_phy_fb_start_address(pAstRVAS)
++ + pFVSA->pfr[dwCD].wLeftX * bytesPerPixel
++ + pFVSA->pfr[dwCD].wTopY * pFVSA->vg.wStride
++ * bytesPerPixel;
++ dwFetchWidthPixels = (pFVSA->pfr[dwCD].wRightX
++ - pFVSA->pfr[dwCD].wLeftX + 1);
++ dwFetchHeight = pFVSA->pfr[dwCD].wBottomY
++ - pFVSA->pfr[dwCD].wTopY + 1;
++
++ HW_ENG_DBG("BSE Width in Pixel: %d\n", dwFetchWidthPixels);
++ HW_ENG_DBG("BSE Height: %d bBucketSizeEnable: %d\n", dwFetchHeight,
++ bBucketSizeEnable);
++
++ if (!bBucketSizeEnable) {
++ bNotLastEntry = false;
++ bInterrupt = true;
++ prepare_bse_descriptor(pdesc_virt,
++ source_addr, slice_dest_addr,
++ bNotLastEntry, pFVSA->vg.wStride, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight, bInterrupt);
++ dwFetchSize += (pFVSA->cBuckets
++ * (dwFetchWidthPixels * dwFetchHeight) >> 3);
++ aBSEAR = setUp_bse_bucket(pFVSA->abyBitIndexes,
++ pFVSA->cBuckets, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight);
++
++ } else {
++ if (dwCD == pFVSA->cfr - 1) {
++ bNotLastEntry = false;
++ bInterrupt = true;
++ } else {
++ bNotLastEntry = true;
++ bInterrupt = false;
++ }
++
++ prepare_bse_descriptor_2(pNextDescriptor,
++ source_addr,
++ dest_next_addr, bNotLastEntry,
++ pFVSA->vg.wStride, bytesPerPixel,
++ dwFetchWidthPixels, dwFetchHeight,
++ bInterrupt,
++ arrBuckSizeRegIndex[dwBucketSizeIter]);
++
++ aBSEAR = set_up_bse_bucket_2(pAstRVAS,
++ pFVSA->abyBitIndexes, pFVSA->cBuckets,
++ bytesPerPixel, dwFetchWidthPixels,
++ dwFetchHeight,
++ arrBuckSizeRegIndex[dwBucketSizeIter]);
++
++ dwBucketSizeIter++;
++ pNextDescriptor++;
++ dwFetchSize += pFVSA->cBuckets
++ * ((dwFetchWidthPixels * dwFetchHeight) >> 3); //each bucket size
++ dest_next_addr = slice_dest_addr
++ + dwFetchSize;
++ }
++ }
++
++ //bse now
++ if (pFVSA->cBuckets <= FULL_BUCKETS_COUNT) {
++ if (bBucketSizeEnable)
++ aBSEAR.dwBSDBS = 0x80000000;
++
++ HW_ENG_DBG("Sleeping on BSE to complete\n");
++
++ if (sleep_on_bse_busy(pAstRVAS, qw_desc_phys, aBSEAR,
++ dwFetchSize) == false) {
++ dev_err(pAstRVAS->pdev, ".....BSE Timeout\n");
++ ri->rs = GenericError;
++ return;
++ }
++ }
++ HW_ENG_DBG("Fetched the bit slices\n");
++ //RLE
++ pFVSA->dwSlicedSize = dwFetchSize;
++ pFVSA->dwSlicedRLESize = pFVSA->dwSlicedSize;
++
++ // do RLE if RLE is on. Fetch from Destination 1 to Destination 2 with RLE on
++ bNotLastEntry = false;
++
++ if (pFVSA->bEnableRLE) {
++ HW_ENG_DBG("BSE - 3 (RLE Enabled)\n");
++ // Enable TFE
++ dwTFECR = ((pFVSA->byRLETripletCode << 24)
++ | (pFVSA->byRLERepeatCode << 16));
++ dwTFECR |= ((0x1 << 1) | 1);
++ dwTFECR &= TFCTL_DESCRIPTOR_IN_DDR_MASK;
++
++ bRLEOverFLow = true;
++ bytesPerPixel = 1;
++
++ dwFetchWidthPixels = get_video_slice_fetch_width(pFVSA->cBuckets);
++ dwFetchHeight = dwFetchSize / dwFetchWidthPixels;
++
++ prepare_tfe_descriptor(pdesc_virt, data_phys_out,
++ data_phys_rle, bNotLastEntry, 1, pFVSA->bEnableRLE,
++ dwFetchWidthPixels, bytesPerPixel, dwFetchWidthPixels,
++ dwFetchHeight, 0, bRLEOverFLow, 1);
++
++ HW_ENG_DBG("TFE-RLE Control Register value: 0x%x\n", dwTFECR);
++
++ if (sleep_on_tfe_busy(pAstRVAS, qw_desc_phys, // Descriptor physical Address
++ dwTFECR, // control register value
++ dwFetchSize, // bandwidth limiter value
++ &pFVSA->dwSlicedRLESize, // out:: rle size
++ &pFVSA->dwCheckSum
++ ) == false) {
++ ri->rs = GenericError;
++ return;
++ }
++
++ HW_ENG_DBG("Finishing RLE Fetching\n");
++
++ if (pFVSA->dwSlicedRLESize >= pFVSA->dwSlicedSize)
++ pFVSA->bRLEFailed = true;
++ else
++ pFVSA->bRLEFailed = false;
++ } // RLE enabled
++
++ memcpy((void *)&dwFetchSize, (void *)&pFVSA->dwSlicedRLESize, 4);
++}
++
++void ioctl_fetch_text_data(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ bool bRLEOn = ri->tfm.bEnableRLE;
++
++ ri->rs = SuccessStatus;
++
++ // first time fetch
++ on_fetch_text_data(ri, bRLEOn, pAstRVAS);
++}
++
++void on_fetch_text_data(struct RvasIoctl *ri, bool bRLEOn, struct AstRVAS *pAstRVAS)
++{
++ struct Descriptor *pDescriptorAdd;
++ struct Descriptor *pDescriptorAddPhys;
++ u32 dwScreenOffset = 0x00;
++ phys_addr_t source_addr = get_phy_fb_start_address(pAstRVAS);
++ phys_addr_t dest_addr;
++ bool bRLEOverFlow = false;
++ bool bInterrupt = true;
++ u32 wFetchLines = 0;
++ u8 byCharacterPerLine = 0;
++ u16 wFetchWidthInBytes = 0;
++ phys_addr_t data_phys = 0;
++ phys_addr_t data_phys_rle = 0;
++ phys_addr_t data_phys_temp = 0;
++ u32 dwCtrlRegValue = 0;
++ u32 dwMinBufSize = 0;
++ void *desc_virt = NULL;
++ phys_addr_t desc_phy = 0;
++ struct ContextTable *ctx_entry = NULL;
++
++ HW_ENG_DBG("Start\n");
++ ctx_entry = get_context_entry(ri->rc, pAstRVAS);
++ if (ctx_entry) {
++ desc_virt = ctx_entry->desc_virt;
++ desc_phy = ctx_entry->desc_phy;
++ } else {
++ ri->rs = InvalidContextHandle;
++ return;
++ }
++
++ wFetchLines = get_text_mode_fetch_lines(pAstRVAS, ri->vg.wScreenHeight);
++ byCharacterPerLine = get_text_mode_character_per_line(pAstRVAS,
++ ri->vg.wScreenWidth);
++
++ data_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ data_phys_rle = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS);
++
++ if (!data_phys || !data_phys_rle) {
++ ri->rs = InvalidMemoryHandle;
++ dev_err(pAstRVAS->pdev, "Fetch Text: Invalid Memoryhandle\n");
++ return;
++ }
++
++ dwMinBufSize = (byCharacterPerLine * wFetchLines) << 1;
++
++ if (ri->rmh_mem_size < dwMinBufSize) {
++ //either buffer is too small or invalid data in registers
++ ri->rs = GenericError;
++ dev_err(pAstRVAS->pdev, "Fetch Text: required buffer len:0x%x\n", dwMinBufSize);
++ return;
++ }
++ memset(desc_virt, 0x00, MAX_DESC_SIZE);
++ pDescriptorAdd = desc_virt;
++ pDescriptorAddPhys = (struct Descriptor *)desc_phy;
++ dest_addr = data_phys;
++
++ // Enable TFE
++ dwCtrlRegValue |= 1;
++ dwCtrlRegValue &= TFCTL_DESCRIPTOR_IN_DDR_MASK;
++ // set up the text alignment
++ dwScreenOffset = get_screen_offset(pAstRVAS);
++ source_addr += dwScreenOffset;
++ HW_ENG_DBG("screen offset:%#x, Source start Addr: %%llx\n", dwScreenOffset,
++ source_addr);
++ if (ri->tfm.dpm == AttrMode) { // ATTR and ASCII
++ data_phys_temp = data_phys_rle;
++ wFetchWidthInBytes = byCharacterPerLine << 3;
++ // must fetch both ascii & attr
++ HW_ENG_DBG("Attribute and ASCII\n");
++ prepare_tfe_text_descriptor(desc_virt, source_addr,
++ data_phys_temp,
++ false, wFetchWidthInBytes, wFetchLines,
++ ri->tfm.dpm, bRLEOverFlow, bInterrupt);
++ ri->tfm.dwFetchSize = (byCharacterPerLine * wFetchLines) << 1;
++ } else if (ri->tfm.dpm == AsciiOnlyMode) {
++ wFetchWidthInBytes = byCharacterPerLine << 3;
++ HW_ENG_DBG("ASCII Only\n");
++ prepare_tfe_text_descriptor(desc_virt, source_addr,
++ dest_addr,
++ false, wFetchWidthInBytes, wFetchLines,
++ ri->tfm.dpm, bRLEOverFlow, bInterrupt);
++ ri->tfm.dwFetchSize = byCharacterPerLine * wFetchLines;
++ } else if (ri->tfm.dpm == FontFetchMode) {
++ wFetchWidthInBytes = byCharacterPerLine << 2;
++ HW_ENG_DBG("Font Only\n");
++ prepare_tfe_text_descriptor(desc_virt, source_addr,
++ dest_addr,
++ false, wFetchWidthInBytes,
++ wFetchLines + 256,
++ ri->tfm.dpm, bRLEOverFlow, bInterrupt);
++
++ ri->tfm.dwFetchSize = MAX_TEXT_DATA_SIZE;
++ }
++ dwCtrlRegValue |= 1 << 1; // enabled IRQ
++ if (ri->tfm.dpm == AttrMode) {
++ if (sleep_on_tfe_text_busy(pAstRVAS, desc_phy, dwCtrlRegValue, // control register value
++ ri->tfm.dwFetchSize, // bandwidth limitor value
++ &ri->tfm.dwFetchRLESize, // out:: rle size
++ &ri->tfm.dwCheckSum) == false) {
++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for attributes\n");
++ ri->rs = GenericError;
++ return;
++ }
++ } else {
++ if (sleep_on_tfe_text_busy(pAstRVAS, desc_phy, dwCtrlRegValue,
++ ri->tfm.dwFetchSize, &ri->tfm.dwFetchRLESize,
++ &ri->tfm.dwCheckSum) == false) {
++ ri->rs = GenericError;
++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for others\n");
++ return;
++ }
++ }
++
++ if (ri->tfm.dpm == AttrMode) {
++ //separate ATTR from ATTR+ASCII
++ source_addr = data_phys_temp;
++ dest_addr = data_phys;
++ prepare_tfe_descriptor(desc_virt, data_phys_temp, data_phys,
++ false, //not last entry?
++ 1, //checksum
++ false, //RLE?
++ byCharacterPerLine,
++ 2, //byBpp,
++ byCharacterPerLine, wFetchLines, TopByteMode,
++ bRLEOverFlow, bInterrupt);
++
++ ri->tfm.dwFetchSize = byCharacterPerLine * wFetchLines;
++
++ dwCtrlRegValue |= 1 << 1; // enabled IRQ
++ if (sleep_on_tfe_text_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys,
++ dwCtrlRegValue, ri->tfm.dwFetchSize,
++ &ri->tfm.dwFetchRLESize, &ri->tfm.dwCheckSum) == false) {
++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for attributes # 2\n");
++ ri->rs = GenericError;
++ return;
++ }
++ }
++ // RLE enabled
++ if (bRLEOn) {
++ bRLEOverFlow = true;
++ dwCtrlRegValue = 1;
++ dwCtrlRegValue |= (ri->tfm.byRLETripletCode << 24)
++ | (ri->tfm.byRLERepeatCode << 16);
++ source_addr = dest_addr;
++ dest_addr = data_phys_rle;
++
++ // RLE only
++ prepare_tfe_descriptor(pDescriptorAdd, source_addr,
++ dest_addr,
++ false, //not last entry?
++ 1, //checksum
++ bRLEOn, //RLE?
++ ri->tfm.dwFetchSize / wFetchLines, 1,
++ ri->tfm.dwFetchSize / wFetchLines, wFetchLines,
++ AllBytesMode, bRLEOverFlow, bInterrupt);
++
++ dwCtrlRegValue |= 1 << 1; // enabled IRQ
++
++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, // Descriptor physical Address
++ dwCtrlRegValue, // control register value
++ ri->tfm.dwFetchSize, // bandwidth limitor value
++ &ri->tfm.dwFetchRLESize, // out:: rle size
++ &ri->tfm.dwCheckSum) == false) { // out:: cs size
++ dev_err(pAstRVAS->pdev, "Could not sleep_on_tfe_busy for RLE for Text Mode\n");
++ ri->rs = GenericError;
++ return;
++ } //sleeponTFEBusy
++ }
++ if (bRLEOn) {
++ ri->tfm.bRLEFailed =
++ (ri->tfm.dwFetchRLESize < ri->tfm.dwFetchSize) ?
++ false : true;
++ }
++}
++
++u8 get_text_mode_character_per_line(struct AstRVAS *pAstRVAS, u16 wScreenWidth)
++{
++ u8 byCharPerLine = 0x00;
++ u8 byCharWidth = 0;
++ u8 byVGASR1 = readb(pAstRVAS->grce_reg_base + GRCE_SEQ + 0x1);
++
++ byCharWidth = (byVGASR1 & 0x1) ? 8 : 9;
++ byCharPerLine = wScreenWidth / byCharWidth;
++
++ return byCharPerLine;
++}
++
++u16 get_text_mode_fetch_lines(struct AstRVAS *pAstRVAS, u16 wScreenHeight)
++{
++ u8 byVGACR9 = readb(pAstRVAS->grce_reg_base + GRCE_CRTC + 0x9);
++ u8 byFontHeight = (byVGACR9 & 0x1F) + 1;
++ u16 wFetchLines;
++
++ wFetchLines = wScreenHeight / byFontHeight;
++
++ return wFetchLines;
++}
++
++//
++// HELPER Functions
++//
++
++void prepare_bse_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bNotLastEntry,
++ u16 wStride, u8 bytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight,
++ bool bInterrupt)
++{
++ u16 wDestinationStride;
++
++ // initialize to 0
++ pDAddress->dw0General = 0x00;
++ pDAddress->dw1FetchWidthLine = 0x00;
++ pDAddress->dw2SourceAddr = 0x00;
++ pDAddress->dw3DestinationAddr = 0x00;
++
++ wDestinationStride = dwFetchWidthPixels >> 3;
++
++ // initialize to 0
++ pDAddress->dw0General = ((wStride * bytesPerPixel) << 16)
++ | (wDestinationStride << 8) | (bNotLastEntry << 1) | bInterrupt;
++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16)
++ | (dwFetchWidthPixels * bytesPerPixel - 1);
++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc;
++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc;
++
++ HW_ENG_DBG("After SETTING BSE Descriptor\n");
++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General);
++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine);
++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr);
++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr);
++}
++
++//for descriptor chaining
++void prepare_bse_descriptor_2(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bNotLastEntry,
++ u16 wStride, u8 bytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight,
++ bool bInterrupt, u8 byBuckSizeRegIndex)
++{
++ u16 wDestinationStride;
++
++ // initialize to 0
++ pDAddress->dw0General = 0x00;
++ pDAddress->dw1FetchWidthLine = 0x00;
++ pDAddress->dw2SourceAddr = 0x00;
++ pDAddress->dw3DestinationAddr = 0x00;
++
++ wDestinationStride = dwFetchWidthPixels >> 3;
++
++ // initialize to 0
++ pDAddress->dw0General = ((wStride * bytesPerPixel) << 16)
++ | (wDestinationStride << 8)
++ | (byBuckSizeRegIndex << BSE_BUCK_SZ_INDEX_POS)
++ | (bNotLastEntry << 1) | bInterrupt;
++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16)
++ | (dwFetchWidthPixels * bytesPerPixel - 1);
++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc;
++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc;
++
++ HW_ENG_DBG("AFter SETTING BSE Descriptor\n");
++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General);
++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine);
++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr);
++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr);
++}
++
++struct BSEAggregateRegister set_up_bse_bucket_2(struct AstRVAS *pAstRVAS, u8 *abyBitIndexes,
++ u8 byTotalBucketCount, u8 byBSBytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight,
++ u32 dwBucketSizeIndex)
++{
++ struct BSEAggregateRegister aBSEAR = { 0 };
++ void __iomem *addrBSDBS = 0;
++ void __iomem *addrBSCR = pAstRVAS->fg_reg_base + BSE_Command_Register;
++
++ if (dwBucketSizeIndex >= BSE_MAX_BUCKET_SIZE_REGS) {
++ dev_err(pAstRVAS->pdev, "Video::BSE bucket size index %d too big!",
++ dwBucketSizeIndex);
++ return aBSEAR;
++ }
++
++ addrBSDBS = pAstRVAS->fg_reg_base + BSE_REG_BASE + dwBucketSizeRegOffset[dwBucketSizeIndex];
++
++ // initialize
++ memset((void *)&aBSEAR, 0x00, sizeof(struct BSEAggregateRegister));
++ aBSEAR = setUp_bse_bucket(abyBitIndexes, byTotalBucketCount,
++ byBSBytesPerPixel, dwFetchWidthPixels, dwFetchHeight);
++
++ writel(aBSEAR.dwBSDBS, addrBSDBS);
++ aBSEAR.dwBSCR |= readl(addrBSCR) & (BSE_ENABLE_MULT_BUCKET_SZS);
++ HW_ENG_DBG("BSE Bucket size register index %d, [%#x], readback 0x%x\n",
++ dwBucketSizeIndex, aBSEAR.dwBSDBS, readl(addrBSCR));
++
++ return aBSEAR;
++}
++
++struct BSEAggregateRegister setUp_bse_bucket(u8 *abyBitIndexes, u8 byTotalBucketCount,
++ u8 byBSBytesPerPixel, u32 dwFetchWidthPixels,
++ u32 dwFetchHeight)
++{
++ struct BSEAggregateRegister aBSEAR;
++ u32 dwSrcBucketSize = MAX_LMEM_BUCKET_SIZE;
++ u32 dwDestBucketSize = dwFetchWidthPixels * dwFetchHeight >> 3; //each bucket size
++ u8 byRegisterPosition = 0;
++ u8 cBucket;
++
++ // initialize
++ memset((void *)&aBSEAR, 0x00, sizeof(struct BSEAggregateRegister));
++
++ for (cBucket = 0; cBucket < byTotalBucketCount; cBucket++) {
++ if (cBucket < 6) {
++ HW_ENG_DBG("BUCKET: 0x%x, Bit Position: 0x%x\n", cBucket,
++ abyBitIndexes[cBucket]);
++ HW_ENG_DBG("BSBPS0 Position: 0x%x\n", byRegisterPosition);
++ aBSEAR.adwBSBPS[0] |= abyBitIndexes[cBucket]
++ << byRegisterPosition;
++
++ byRegisterPosition += 5;
++ } else if (cBucket >= 6 && cBucket < 12) {
++ if (cBucket == 6)
++ byRegisterPosition = 0;
++
++ HW_ENG_DBG("BUCKET: 0x%x, Bit Position: 0x%x\n", cBucket,
++ abyBitIndexes[cBucket]);
++ HW_ENG_DBG("BSBPS1 Position: 0x%x\n", byRegisterPosition);
++ aBSEAR.adwBSBPS[1] |= abyBitIndexes[cBucket]
++ << byRegisterPosition;
++ byRegisterPosition += 5;
++ } else {
++ if (cBucket == 12)
++ byRegisterPosition = 0;
++
++ HW_ENG_DBG("BUCKET: 0x%x, Bit Position: 0x%x\n", cBucket,
++ abyBitIndexes[cBucket]);
++ HW_ENG_DBG("BSBPS2 Position: 0x%x\n", byRegisterPosition);
++ aBSEAR.adwBSBPS[2] |= abyBitIndexes[cBucket]
++ << byRegisterPosition;
++ byRegisterPosition += 5;
++ }
++ }
++
++ aBSEAR.dwBSCR = (((byTotalBucketCount - 1) << 8)
++ | ((byBSBytesPerPixel - 1) << 4) | (0x0 << 3)
++ | (0x1 << 1) | 0x1) & BSCMD_MASK;
++ aBSEAR.dwBSDBS = ((dwSrcBucketSize << 24) | dwDestBucketSize)
++ & 0xfcfffffc;
++
++ HW_ENG_DBG("dwFetchWidthPixels [%#x], dwFetchHeight [%#x]\n",
++ dwFetchWidthPixels, dwFetchHeight);
++ HW_ENG_DBG("BSE Destination Bucket Size [%#x]\n", dwDestBucketSize);
++ HW_ENG_DBG("BSE Control [%#x]\n", aBSEAR.dwBSCR);
++ HW_ENG_DBG("BSE BSDBS [%#x]\n", aBSEAR.dwBSDBS);
++ HW_ENG_DBG("BSE BSBPS0 [%#x]\n", aBSEAR.adwBSBPS[0]);
++ HW_ENG_DBG("BSE BSBPS1 [%#x]\n", aBSEAR.adwBSBPS[1]);
++ HW_ENG_DBG("BSE BSBPS2 [%#x]\n", aBSEAR.adwBSBPS[2]);
++
++ return aBSEAR;
++}
++
++void prepare_tfe_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bNotLastEntry, u8 bCheckSum,
++ bool bEnabledRLE, u16 wStride, u8 bytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight,
++ enum SelectedByteMode sbm, bool bRLEOverFLow,
++ bool bInterrupt)
++{
++ enum SkipByteMode skipBM = NoByteSkip;
++ enum DataProccessMode dpm = NormalTileMode;
++ enum StartBytePosition sbp = StartFromByte0;
++
++ HW_ENG_DBG("BEFORE SETTING TFE Descriptor\n");
++ // initialize to 0
++ pDAddress->dw0General = 0x00;
++ pDAddress->dw1FetchWidthLine = 0x00;
++ pDAddress->dw2SourceAddr = 0x00;
++ pDAddress->dw3DestinationAddr = 0x00;
++
++ if (dwFetchHeight & 0x3)
++ dwFetchHeight = ((dwFetchHeight + 3) >> 2) << 2;
++
++ switch (sbm) {
++ case AllBytesMode:
++ break;
++
++ case LowByteMode:
++ dpm = SplitByteMode;
++ if (bytesPerPixel == 2)
++ skipBM = SkipOneByte;
++ else if (bytesPerPixel == 3)
++ skipBM = SkipTwoByte;
++ else if (bytesPerPixel == 4)
++ skipBM = SkipThreeByte;
++ break;
++
++ case MiddleByteMode:
++ dpm = SplitByteMode;
++ if (bytesPerPixel == 2) {
++ skipBM = SkipOneByte;
++ sbp = StartFromByte1;
++ } else if (bytesPerPixel == 3) {
++ skipBM = SkipTwoByte;
++ sbp = StartFromByte1;
++ } else if (bytesPerPixel == 4) {
++ skipBM = SkipThreeByte;
++ sbp = StartFromByte1;
++ }
++ break;
++
++ case TopByteMode:
++ dpm = SplitByteMode;
++ if (bytesPerPixel == 2) {
++ skipBM = SkipOneByte;
++ sbp = StartFromByte1;
++ } else if (bytesPerPixel == 3) {
++ skipBM = SkipTwoByte;
++ sbp = StartFromByte2;
++ } else if (bytesPerPixel == 4) {
++ skipBM = SkipThreeByte;
++ sbp = StartFromByte2;
++ }
++ break;
++
++ case PlanarToPackedMode:
++ dpm = FourBitPlanarMode;
++ break;
++
++ case PackedToPackedMode:
++ dpm = FourBitPackedMode;
++ break;
++
++ default:
++ break;
++ }
++
++ if (dwFetchWidthPixels > wStride)
++ wStride = dwFetchWidthPixels;
++
++ pDAddress->dw0General = ((wStride * bytesPerPixel) << 16) | (dpm << 13)
++ | (sbp << 10) | (skipBM << 8) | (bRLEOverFLow << 7)
++ | (bCheckSum << 5) | (bEnabledRLE << 4) | (bNotLastEntry << 1)
++ | bInterrupt;
++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16)
++ | (dwFetchWidthPixels * bytesPerPixel - 1);
++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc;
++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc;
++
++ HW_ENG_DBG("After SETTING TFE Descriptor\n");
++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General);
++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine);
++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr);
++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr);
++}
++
++void prepare_tfe_text_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bEnabledRLE, u32 dwFetchWidth,
++ u32 dwFetchHeight, enum DataProccessMode dpm,
++ bool bRLEOverFLow, bool bInterrupt)
++{
++ // initialize to 0
++ pDAddress->dw0General = 0x00;
++ pDAddress->dw1FetchWidthLine = 0x00;
++ pDAddress->dw2SourceAddr = 0x00;
++ pDAddress->dw3DestinationAddr = 0x00;
++
++ if (dwFetchHeight & 0x3)
++ dwFetchHeight = ((dwFetchHeight + 3) >> 2) << 2;
++
++ pDAddress->dw0General = (dwFetchWidth << 16) | (dpm << 13)
++ | (bRLEOverFLow << 7) | (1 << 5) | (bEnabledRLE << 4)
++ | bInterrupt;
++ pDAddress->dw1FetchWidthLine = ((dwFetchHeight - 1) << 16)
++ | (dwFetchWidth - 1);
++ pDAddress->dw2SourceAddr = (u32)source_addr & 0xfffffffc;
++ pDAddress->dw3DestinationAddr = (u32)dest_addr & 0xfffffffc;
++
++ HW_ENG_DBG("u32 0: 0x%x\n", pDAddress->dw0General);
++ HW_ENG_DBG("u32 1: 0x%x\n", pDAddress->dw1FetchWidthLine);
++ HW_ENG_DBG("u32 2: 0x%x\n", pDAddress->dw2SourceAddr);
++ HW_ENG_DBG("u32 3: 0x%x\n", pDAddress->dw3DestinationAddr);
++}
++
++void on_fetch_mode_13_data(struct AstRVAS *pAstRVAS, struct RvasIoctl *ri, bool bRLEOn)
++{
++ struct Descriptor *pDescriptorAdd;
++ struct Descriptor *pDescriptorAddPhys;
++ phys_addr_t source_addr = get_phy_fb_start_address(pAstRVAS);
++ phys_addr_t dest_addr;
++ bool bRLEOverFlow = false;
++ bool bNotLastEntry = false;
++ bool bInterrupt = 1;
++ u32 dwFetchHeight = MODE13_HEIGHT;
++ u32 dwFetchWidth = MODE13_WIDTH;
++ phys_addr_t data_phys = 0;
++ phys_addr_t data_phys_rle = 0;
++ u32 dwCtrlRegValue = 0x55AA0080;
++ void *desc_virt = NULL;
++ phys_addr_t desc_phy = 0;
++ struct ContextTable *ctx_entry = NULL;
++
++ HW_ENG_DBG("Start, bRLEOn: %d\n", bRLEOn);
++
++ ctx_entry = get_context_entry(ri->rc, pAstRVAS);
++
++ if (ctx_entry) {
++ desc_virt = ctx_entry->desc_virt;
++ desc_phy = ctx_entry->desc_phy;
++ } else {
++ pr_err("Mode 13: Failed to get context\n");
++ ri->rs = InvalidContextHandle;
++ return;
++ }
++
++ ri->tfm.dwFetchSize = MODE13_HEIGHT * MODE13_WIDTH;
++
++ data_phys = get_phys_add_rsvd_mem((u32)ri->rmh, pAstRVAS);
++ data_phys_rle = get_phys_add_rsvd_mem((u32)ri->rmh1, pAstRVAS);
++
++ if (!data_phys || !data_phys_rle) {
++ ri->rs = InvalidMemoryHandle;
++ dev_err(pAstRVAS->pdev, "Fetch Text: Invalid Memoryhandle\n");
++ return;
++ }
++ if (!data_phys || (bRLEOn && !data_phys_rle)) {
++ pr_err("Mode 13: Invalid memory handle\n");
++ ri->rs = InvalidMemoryHandle;
++ return;
++ }
++
++ pDescriptorAdd = desc_virt;
++ pDescriptorAddPhys = (struct Descriptor *)desc_phy;
++
++ HW_ENG_DBG("\n===========MODE 13 FETCHED DATA===========\n");
++
++ // Enable TFE
++ dwCtrlRegValue |= 1;
++ dwCtrlRegValue &= TFCTL_DESCRIPTOR_IN_DDR_MASK;
++ dest_addr = data_phys;
++ prepare_tfe_descriptor(pDescriptorAdd, source_addr,
++ dest_addr,
++ false, //is last entry
++ 1, //checksum
++ false, //No RLE
++ dwFetchWidth,
++ 1, //bytes per pixel
++ dwFetchWidth, dwFetchHeight,
++ PackedToPackedMode, bRLEOverFlow,
++ 1);
++
++ dwCtrlRegValue |= 1 << 1; // enabled IRQ
++
++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, // Descriptor physical Address
++ dwCtrlRegValue, // control register value
++ ri->tfm.dwFetchSize, // bandwidth limitor value
++ &ri->tfm.dwFetchRLESize, // out:: rle size
++ &ri->tfm.dwCheckSum) == false) { // out:: cs size
++ ri->rs = GenericError;
++ return;
++ }
++
++ // RLE enabled
++ if (bRLEOn) {
++ bRLEOverFlow = true;
++ dwCtrlRegValue = 1;
++ dwCtrlRegValue |= (ri->tfm.byRLETripletCode << 24)
++ | (ri->tfm.byRLERepeatCode << 16);
++ source_addr = data_phys;
++ dest_addr = data_phys_rle;
++ HW_ENG_DBG("RLE is on\n");
++
++ prepare_tfe_descriptor(pDescriptorAdd, source_addr,
++ dest_addr,
++ bNotLastEntry, //not last entry?
++ 1, //checksum
++ bRLEOn, //RLE?
++ dwFetchWidth, 1, dwFetchWidth, dwFetchHeight,
++ AllBytesMode, bRLEOverFlow, bInterrupt);
++
++ dwCtrlRegValue |= 1 << 1; // enabled IRQ
++
++ if (sleep_on_tfe_busy(pAstRVAS, (phys_addr_t)pDescriptorAddPhys, // Descriptor physical Address
++ dwCtrlRegValue, // control register value
++ ri->tfm.dwFetchSize, // bandwidth limitor value
++ &ri->tfm.dwFetchRLESize, // out:: rle size
++ &ri->tfm.dwCheckSum) == false) { // out:: cs size
++ ri->rs = GenericError;
++ return;
++ } //sleeponTFEBusy
++ }
++
++ if (bRLEOn)
++ ri->tfm.bRLEFailed =
++ (ri->tfm.dwFetchRLESize < ri->tfm.dwFetchSize) ?
++ false : true;
++}
++
++void ioctl_fetch_mode_13_data(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ bool bRLEOn = ri->tfm.bEnableRLE;
++
++ ri->rs = SuccessStatus;
++
++ // first time fetch
++ on_fetch_mode_13_data(pAstRVAS, ri, bRLEOn);
++
++ if (ri->rs != SuccessStatus)
++ return;
++
++ //if RLE fail. need to TFE without RLE to first buffer
++ if (ri->tfm.bEnableRLE & ri->tfm.bRLEFailed) {
++ bRLEOn = false;
++ on_fetch_mode_13_data(pAstRVAS, ri, bRLEOn);
++ }
++}
++
++
++
++// Enable Snoop Interrupts and TSE, Disable FIQ
++static void enable_tse_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 reg_val = 0;
++ void __iomem *reg_addr = pAstRVAS->fg_reg_base
++ + TSE_SnoopCommand_Register_Offset;
++
++ reg_val = readl(reg_addr);
++ reg_val |= SNOOP_IRQ_MASK;
++ reg_val &= ~SNOOP_FIQ_MASK;
++
++ HW_ENG_DBG("Enabled TSE Interrupts[%#X]\n", reg_val);
++ writel(reg_val, reg_addr);
++ pAstRVAS->tse_tsicr = TSE_INTR_COUNT;
++ reg_addr = pAstRVAS->fg_reg_base
++ + TSE_TileSnoop_Interrupt_Count;
++ //set max wait time before interrupt
++ writel(pAstRVAS->tse_tsicr, reg_addr);
++}
++
++//disable tse interrupt
++static void disable_tse_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 reg_val = 0;
++ void __iomem *reg_addr = pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset;
++
++ // Disable Snoop Interrupts and TSE, Disable FIQ
++ reg_val = readl(reg_addr);
++ HW_ENG_DBG("disable interrupt\n");
++ reg_val &= ~(SNOOP_IRQ_MASK | SNOOP_FIQ_MASK);
++ writel(reg_val, reg_addr);
++}
++
++static void enable_grce_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 reg_val = 0;
++ void __iomem *reg_addr = pAstRVAS->grce_reg_base + GRCE_CTL0;
++
++ reg_val = readl(reg_addr);
++ reg_val |= GRC_IRQ_MASK;
++ writel(reg_val, reg_addr);
++ HW_ENG_DBG("Enabled GRC Interrupts[%#X]\n", reg_val);
++}
++
++//enable all interrupts
++void enable_grce_tse_interrupt(struct AstRVAS *pAstRVAS)
++{
++ enable_grce_interrupt(pAstRVAS);
++ enable_tse_interrupt(pAstRVAS);
++}
++
++void disable_grce_tse_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 reg_val = 0;
++
++ HW_ENG_DBG("disable_interrupts- grce_reg_base: %p GRCE_CTL0: %#x\n",
++ pAstRVAS->grce_reg_base, GRCE_CTL0);
++ reg_val = readl(pAstRVAS->grce_reg_base + GRCE_CTL0);
++ writel(reg_val & (~GRC_IRQ_MASK), pAstRVAS->grce_reg_base + GRCE_CTL0);
++ disable_tse_interrupt(pAstRVAS);
++}
++
++u32 clear_tse_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 tse_sts = 0;
++ u32 tse_tile_status = 0;
++ u32 tse_snoop_ctrl = 0;
++ void __iomem *tse_ctrl_addr = pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset;
++
++ HW_ENG_DBG("clear tse inerrupt");
++ tse_sts = readl(pAstRVAS->fg_reg_base + TSE_Status_Register_Offset);
++ tse_snoop_ctrl = readl(pAstRVAS->fg_reg_base + TSE_SnoopCommand_Register_Offset);
++
++ if (tse_sts & (TSSTS_TC_SCREEN0 | TSSTS_TC_SCREEN1)) {
++ if (tse_sts & TSSTS_TC_SCREEN0) {
++ HW_ENG_DBG("Snoop** Update Screen 0\n");
++ // clear interrupt and switch to screen 1
++ tse_snoop_ctrl |= TSCMD_SCREEN_OWNER;
++ writel(tse_sts, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset);
++ writel(tse_snoop_ctrl, tse_ctrl_addr);
++
++ } else if (tse_sts & TSSTS_TC_SCREEN1) {
++ HW_ENG_DBG("Snoop** Update Screen 1\n");
++ tse_snoop_ctrl &= ~TSCMD_SCREEN_OWNER; // snap shutter
++ // clear status
++ writel(tse_sts, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset);
++ // clear interrupt and switch to screen 1
++ writel(tse_snoop_ctrl, tse_ctrl_addr);
++ }
++ // read clear interrupt
++ tse_tile_status = readl(pAstRVAS->fg_reg_base
++ + TSE_TileCount_Register_Offset);
++
++ if (tse_sts & TSSTS_FIFO_OVFL) {
++ //need to send full frame
++ dev_err(pAstRVAS->pdev, "TSE snoop fifo overflow\n");
++ writel(TSSTS_FIFO_OVFL, pAstRVAS->fg_reg_base + TSE_Status_Register_Offset);
++ memset((void *)pAstRVAS->accrued_sm, 0xff, sizeof(pAstRVAS->accrued_sm));
++ memset((void *)&pAstRVAS->accrued_sa, 0xff,
++ sizeof(pAstRVAS->accrued_sa));
++ } else {
++ get_snoop_map_data(pAstRVAS);
++ }
++ }
++ return tse_sts;
++}
++
++// LDMA interrupt
++bool clear_ldma_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 ldma_sts = 0;
++
++ ldma_sts = readl(pAstRVAS->fg_reg_base + LDMA_Status_Register);
++
++ if (ldma_sts & 0x02) {
++ //HW_ENG_DBG("Got a LDMA interrupt\n");
++ // write 1 to clear the interrupt
++ writel(0x2, pAstRVAS->fg_reg_base + LDMA_Status_Register);
++ return true;
++ }
++ return false;
++}
++
++bool clear_tfe_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 tfe_sts = 0;
++
++ tfe_sts = readl(pAstRVAS->fg_reg_base + TFE_Status_Register);
++
++ if (tfe_sts & 0x02) {
++ // HW_ENG_DBG("Debug: TFSTS Interrupt is triggered\n");
++ writel(0x2, pAstRVAS->fg_reg_base + TFE_Status_Register);
++ return true;
++ }
++ return false;
++}
++
++bool clear_bse_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 bse_sts = 0;
++
++ bse_sts = readl(pAstRVAS->fg_reg_base + BSE_Status_Register);
++
++ if (bse_sts & 0x02) {
++ writel(0x2, pAstRVAS->fg_reg_base + BSE_Status_Register);
++ return true;
++ }
++ return false;
++}
++
++void setup_lmem(struct AstRVAS *pAstRVAS)
++{
++ writel(0x0, pAstRVAS->fg_reg_base + LMEM_BASE_REG_3);
++ writel(0x2000, pAstRVAS->fg_reg_base + LMEM_LIMIT_REG_3);
++ writel(0x9c89c8, pAstRVAS->fg_reg_base + LMEM11_P0);
++ writel(0x9c89c8, pAstRVAS->fg_reg_base + LMEM12_P0);
++ writel(0xf3cf3c, pAstRVAS->fg_reg_base + LMEM11_P1);
++ writel(0x067201, pAstRVAS->fg_reg_base + LMEM11_P2);
++ writel(0x00F3CF3C, pAstRVAS->fg_reg_base + LMEM10_P1);
++ writel(0x00067201, pAstRVAS->fg_reg_base + LMEM10_P2);
++}
++
++bool host_suspended(struct AstRVAS *pAstRVAS)
++{
++ u32 GRCE18 = readl(pAstRVAS->grce_reg_base + GRCE_ATTR_VGAIR0_OFFSET);
++
++ // VGAER is GRCE19
++ // VGAER bit[0]:0 - vga disabled (host suspended)
++ // 1 - vga enabled
++ HW_ENG_DBG("GRCE18:%#x\n", GRCE18);
++ if (GRCE18 & 0x100)
++ return false;
++ else
++ return true;
++}
++
+diff --git a/drivers/soc/aspeed/rvas/hardware_engines.h b/drivers/soc/aspeed/rvas/hardware_engines.h
+new file mode 100644
+index 000000000..a0564052b
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/hardware_engines.h
+@@ -0,0 +1,551 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This file is part of the ASPEED Linux Device Driver for ASPEED Baseboard Management Controller.
++ * Refer to the README file included with this package for driver version and adapter compatibility.
++ *
++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved.
++ */
++
++#ifndef __HARDWAREENGINES_H__
++#define __HARDWAREENGINES_H__
++
++#include <linux/semaphore.h>
++#include <linux/miscdevice.h>
++#include "video_ioctl.h"
++
++#define MAX_NUM_CONTEXT (8)
++#define MAX_NUM_MEM_TBL (24)//each context has 3
++
++#define MAX_DESC_SIZE (PAGE_SIZE) // (0x400)
++
++#define ENGINE_TIMEOUT_IN_SECONDS (3)
++#define TFE_TIMEOUT_IN_MS (750)
++#define DESCRIPTOR_SIZE (16)
++#define TILE_SIZE (32)
++#define MAX_LMEM_BUCKET_SIZE (0x80)
++
++#define EIGHT_BYTE_ALIGNMENT_MASK (0xfffffff7)
++#define SIXTEEN_BYTE_ALIGNMENT_MASK (0x8)
++#define TFCTL_DESCRIPTOR_IN_DDR_MASK (0xffffff7f)
++#define BSCMD_MASK (0xffff0f37)
++
++#define TEXT_MODE_BUFFER_ALIGNMENT (16)
++#define MODE_13_CHAR_WIDTH (32)
++#define BSE_MEMORY_ACCESS_MASK (0x00ffffff)
++#define MEM_TABLE_SIZE_INCR (8)
++#define MEMORY_TABLE_GROW_INCR (8)
++
++#define MAX_TEXT_DATA_SIZE (8192)
++
++
++// For 2700
++//#define SCU200_System_Reset_Control_Register (0x200)
++#define SCU204_System_Reset_Control_Clear_Register (0x204)
++#define SCU240_Clock_Stop_Control_Register (0x240)
++#define SCU244_Clock_Stop_Control_Clear_Register (0x244)
++//#define SCU500_Hardware_Strap1_Register (0x500)
++//TO DO local monitor on off
++//single node - vga and dp
++//dual node- node 0- vga only, node 1- dp only
++#define SCU000_Silicon_Revision_ID (0x0)
++#define SCU448_Pin_Ctrl (0x448)
++//#define SCU0C0_Misc1_Ctrl (0x0C0)
++//#define SCU0D0_Misc3_Ctrl (0x0D0)
++ //SCU448 IO
++#define VGAVS_ENBL_27 (0x70000000)
++#define VGAHS_ENBL_27 (0x7000000)
++//SCU0C0
++#define VGA0_CRT_DISBL BIT(1)
++#define VGA1_CRT_DISBL BIT(2)
++//SCU0D0 IO
++#define VGA0_PWR_OFF_VDAC BIT(2)
++#define VGA1_PWR_OFF_VDAC BIT(3)
++
++#define SCU_RVAS1_ENGINE_BIT BIT(10)
++#define SCU_RVAS0_ENGINE_BIT BIT(9)
++#define SCU_RVAS1_STOP_CLOCK_BIT BIT(28)
++#define SCU_RVAS0_STOP_CLOCK_BIT BIT(25)
++
++// For 2600
++//SCU
++#define SCU000_Protection_Key_Register (0x000)
++#define SCU040_Module_Reset_Control_Register_Set_1 (0x040)
++#define SCU044_Module_Reset_Control_Clear_Register_1 (0x044)
++#define SCU080_Clock_Stop_Control_Register_Set_1 (0x080)
++#define SCU084_Clock_Stop_Control_Clear_Register (0x084)
++#define SCU500_Hardware_Strap1_Register (0x500)
++#define SCU418_Pin_Ctrl (0x418)
++#define SCU0C0_Misc1_Ctrl (0x0C0)
++#define SCU0D0_Misc3_Ctrl (0x0D0)
++//SCU418
++#define VGAVS_ENBL BIT(31)
++#define VGAHS_ENBL BIT(30)
++//SCU0C0
++#define VGA_CRT_DISBL BIT(6)
++//SCU0D0
++#define PWR_OFF_VDAC BIT(3)
++
++#define SCU_UNLOCK_PWD (0x1688A8A8)
++#define SCU_RVAS_ENGINE_BIT BIT(9)
++#define SCU_RVAS_STOP_CLOCK_BIT BIT(25)
++//
++//MCR -edac
++#define MCR_CONF 0x04 /* configuration register */
++
++//DP
++#define DPTX_Configuration_Register (0x100)
++#define DPTX_PHY_Configuration_Register (0x104)
++//DPTX100
++#define AUX_RESETN (24)
++//DPTX104
++#define DP_TX_I_MAIN_ON (8)
++
++//TOP REG
++#define TOP_REG_OFFSET (0x0)
++#define TOP_REG_CTL (TOP_REG_OFFSET + 0x00)
++#define TOP_REG_STS (TOP_REG_OFFSET + 0x04)
++#define LMEM_BASE_REG_3 (TOP_REG_OFFSET + 0x2c)
++#define LMEM_LIMIT_REG_3 (TOP_REG_OFFSET + 0x3c)
++#define LMEM11_P0 (TOP_REG_OFFSET + 0x4c)
++#define LMEM12_P0 (TOP_REG_OFFSET + 0x50)
++#define LMEM10_P1 (TOP_REG_OFFSET + 0x80)
++#define LMEM11_P1 (TOP_REG_OFFSET + 0x84)
++#define LMEM10_P2 (TOP_REG_OFFSET + 0xA0)
++#define LMEM11_P2 (TOP_REG_OFFSET + 0xA4)
++
++#define TSE_SnoopCommand_Register_Offset (0x0400)
++#define TSE_TileCount_Register_Offset (0x0418)
++#define TSE_Status_Register_Offset (0x0404)
++#define TSE_CS0Reg (0x0408)
++#define TSE_CS1Reg (0x040c)
++#define TSE_RS0Reg (0x0410)
++#define TSE_RS1Reg (0x0414)
++#define TSE_TileSnoop_Interrupt_Count (0x0420)
++#define TSE_FrameBuffer_Offset (0x041c)
++#define TSE_UpperLimit_Offset (0x0424)
++#define TSE_SnoopMap_Offset (0x0600)
++
++#define TFE_Descriptor_Table_Offset (0x0108)
++#define TFE_Descriptor_Control_Resgister (0x0100)
++#define TFE_Status_Register (0x0104)
++#define TFE_RLE_CheckSum (0x010C)
++#define TFE_RLE_Byte_Count (0x0110)
++#define TFE_RLE_LIMITOR (0x0114)
++
++#define BSE_REG_BASE (0x0200)
++#define BSE_Command_Register (0x0200)
++#define BSE_Status_Register (0x0204)
++#define BSE_Descriptor_Table_Base_Register (0x0208)
++#define BSE_Destination_Buket_Size_Resgister (0x020c)
++#define BSE_Bit_Position_Register_0 (0x0210)
++#define BSE_Bit_Position_Register_1 (0x0214)
++#define BSE_Bit_Position_Register_2 (0x0218)
++#define BSE_LMEM_Temp_Buffer_Offset (0x0000)
++#define BSE_ENABLE_MULT_BUCKET_SZS BIT(12)
++#define BSE_BUCK_SZ_INDEX_POS (4)
++#define BSE_MAX_BUCKET_SIZE_REGS (16)
++#define BSE_BIT_MASK_Register_Offset (0x54)
++
++#define LDMA_Control_Register (0x0300)
++#define LDMA_Status_Register (0x0304)
++#define LDMA_Descriptor_Table_Base_Register (0x0308)
++#define LDMA_CheckSum_Register (0x030c)
++#define LDMA_LMEM_Descriptor_Offset (0x4000)
++
++//Shadow
++#define GRCE_SIZE (0x800)
++#define GRCE_ATTR_OFFSET (0x0)
++#define GRCE_ATTR_VGAIR0_OFFSET (0x18)
++#define GRCE_SEQ_OFFSET (0x20)
++#define GRCE_GCTL_OFFSET (0x30)
++#define GRCE_GRCCTL0_OFFSET (0x58)
++#define GRCE_GRCSTS_OFFSET (0x5c)
++#define GRCE_CRTC_OFFSET (0x60)
++#define GRCE_CRTCEXT_OFFSET (0x80)
++#define GRCE_XCURCTL_OFFSET (0xc8)
++#define GRCE_PAL_OFFSET (0x400)
++//size
++#define GRCELT_RAM_SIZE (0x400)
++#define GRCE_XCURCOL_SIZE (0x40)
++#define GRCE_XCURCTL_SIZE (0x40)
++#define GRCE_CRTC_SIZE (0x40)
++#define GRCE_CRTCEXT_SIZE (0x8)
++#define GRCE_SEQ_SIZE (0x8)
++#define GRCE_GCTL_SIZE (0x8)
++#define GRCE_ATTR_SIZE (0x20)
++
++#define GRCELT_RAM (GRCE_PAL_OFFSET)
++#define GRCE_XCURCTL (GRCE_XCURCTL_OFFSET)
++#define GRCE_CRTC (GRCE_CRTC_OFFSET)
++#define GRCE_CRTCEXT (GRCE_CRTCEXT_OFFSET)
++#define GRCE_SEQ (GRCE_SEQ_OFFSET)
++#define GRCE_GCTL (GRCE_GCTL_OFFSET)
++#define GRCE_CTL0 (GRCE_GRCCTL0_OFFSET)
++#define GRCE_STATUS_REGISTER (GRCE_GRCSTS_OFFSET)
++#define GRCE_ATTR (GRCE_ATTR_OFFSET)
++#define AST_VIDEO_SCRATCH_34C (0x8c)
++#define AST_VIDEO_SCRATCH_350 (0x90)
++#define AST_VIDEO_SCRATCH_354 (0x94)
++#define MODE_GET_INFO_DE (0xA8)
++
++//GRC interrupt
++#define GRC_FIQ_MASK (0x000003ff)
++#define GRC_IRQ_MASK (0x000003ff)
++#define GRC_INT_STS_MASK (0x000003ff)
++#define GRCSTS_XCUR_POS BIT(9)
++#define GRCSTS_XCUR_DDR BIT(8)
++#define GRCSTS_XCUR_CTL BIT(7)
++#define GRCSTS_PLT_RAM BIT(6)
++#define GRCSTS_XCRTC BIT(5)
++#define GRCSTS_CRTC BIT(4)
++#define GRCSTS_GCTL BIT(3)
++#define GRCSTS_SEQ BIT(2)
++#define GRCSTS_ATTR1 BIT(1)
++#define GRCSTS_ATTR0 BIT(0)
++#define SNOOP_RESTART (GRCSTS_XCUR_CTL | GRCSTS_XCRTC | GRCSTS_CRTC | GRCSTS_GCTL)
++
++//snoop TSE
++#define SNOOP_TSE_MASK (0x00000001)
++#define SNOOP_IRQ_MASK (0x00000100)
++#define SNOOP_FIQ_MASK (0x00000200)
++#define TSCMD_SCREEN_OWNER BIT(15)
++#define TSCMD_PITCH_BIT (16)
++#define TSCMD_INT_ENBL_BIT (8)
++#define TSCMD_CPT_BIT (6)
++#define TSCMD_RPT_BIT (4)
++#define TSCMD_BPP_BIT (2)
++#define TSCMD_VGA_MODE_BIT (1)
++#define TSCMD_TSE_ENBL_BIT (0)
++#define TSSTS_FIFO_OVFL BIT(5)
++#define TSSTS_FONT BIT(4)
++#define TSSTS_ATTR BIT(3)
++#define TSSTS_ASCII BIT(2)
++#define TSSTS_TC_SCREEN1 BIT(1)
++#define TSSTS_TC_SCREEN0 BIT(0)
++#define TSSTS_ALL (0x3f)
++
++#define TSE_INTR_COUNT (0xCB700) //50MHz clock ~1/60 sec
++//#define TSE_INTR_COUNT (0x196E00) //50MHz clock ~1/30 sec
++#define TIMER_INTR_COUNT (0x65000) // 25MHz clock ~1/60 sec
++
++#ifdef CONFIG_MACH_ASPEED_G6
++//Timer
++/* Register byte offsets */
++// AST2600 Timer registers
++#define TIMER_STATUS_BIT(x) (1 << ((x) - 1))
++
++#define OFFSET_TIMER1 0x00 /* * timer 1 offset */
++#define OFFSET_TIMER2 0x10 /* * timer 2 offset */
++#define OFFSET_TIMER3 0x20 /* * timer 3 offset */
++#define OFFSET_TIMER4 0x40 /* * timer 4 offset */
++#define OFFSET_TIMER5 0x50 /* * timer 5 offset */
++#define OFFSET_TIMER6 0x60 /* * timer 6 offset */
++#define OFFSET_TIMER7 0x70 /* * timer 7 offset */
++#define OFFSET_TIMER8 0x80 /* * timer 8 offset */
++
++#define OFF_TIMER_REG_CURR_CNT 0x00
++#define OFF_TIMER_REG_LOAD_CNT 0x04
++#define OFF_TIMER_REG_EO0 0x08 /* Read to clear interrupt */
++#define OFF_TIMER_REG_EOI 0x0c /* Read to clear interrupt */
++#define OFF_TIMER_REG_STAT 0x10 /* Timer Interrupt Status */
++#define OFF_TIMER_REG_CONTROL 0x30 /* Control Register */
++#define OFF_TIMER_REG_STATUS 0x34 /* Status Register */
++#define OFF_TIMER_REG_CLEAR_CONTROL 0x3C /* Control Register */
++#define RB_OFF_TIMERS_STAT 0xA0 /* * timers status offset */
++
++#define CTRL_TIMER1 (0)
++#define CTRL_TIMER2 (4)
++#define CTRL_TIMER3 (8)
++#define CTRL_TIMER4 (12)
++#define CTRL_TIMER5 (16)
++#define CTRL_TIMER6 (20)
++#define CTRL_TIMER7 (24)
++#define CTRL_TIMER8 (28)
++#define BIT_TIMER_ENBL BIT(0)
++#define BIT_TIMER_CLK_SEL BIT(1)
++#define BIT_INTERRUPT_ENBL BIT(2)
++#define BIT_TIMER_STAT BIT(0)
++#endif
++
++#define SNOOP_MAP_QWORD_COUNT (64)
++#define BSE_UPPER_LIMIT (0x900000) //(0x540000)
++#define FULL_BUCKETS_COUNT (16)
++#define MODE13_HEIGHT (200)
++#define MODE13_WIDTH (320)
++
++#define NUM_SNOOP_ROWS (64)
++
++//vga memory information
++#define DDR_SIZE_CONFIG_BITS (0x3)
++#define VGA_MEM_SIZE_CONFIG_BITS (0x3)
++#define DDR_BASE_27 (0x400000000)
++#define DDR_BASE (0x80000000)
++
++//grce
++#define VGACR0_REG (0x60)
++#define VGACR9F_REG (0x9F)
++
++//display out
++#define VGA_OUT BIT(0)
++#define DP_OUT BIT(1)
++
++struct ContextTable {
++ struct inode *pin;
++ struct file *pf;
++ struct SnoopAggregate sa;
++ u64 aqwSnoopMap[NUM_SNOOP_ROWS];
++ void *rc;
++ struct EventMap emEventWaiting;
++ struct EventMap emEventReceived;
++ u32 dwEventWaitInMs;
++ void *desc_virt;
++ phys_addr_t desc_phy;
++};
++
++struct MemoryMapTable {
++ struct file *pf;
++ void *pvVirtualAddr;
++ dma_addr_t mem_phys;
++ u32 dwLength;
++ u8 byDmaAlloc;
++ u8 byReserved[3];
++};
++
++union EmDwordUnion {
++ struct EventMap em;
++ u32 dw;
++};
++
++struct Descriptor {
++ u32 dw0General;
++ u32 dw1FetchWidthLine;
++ u32 dw2SourceAddr;
++ u32 dw3DestinationAddr;
++};
++
++struct BSEAggregateRegister {
++ u32 dwBSCR;
++ u32 dwBSDBS;
++ u32 adwBSBPS[3];
++};
++
++enum SkipByteMode {
++ NoByteSkip = 0, SkipOneByte = 1, SkipTwoByte = 2, SkipThreeByte = 3
++};
++
++enum StartBytePosition {
++ StartFromByte0 = 0,
++ StartFromByte1 = 1,
++ StartFromByte2 = 2,
++ StartFromByte3 = 3
++};
++
++struct VGAMemInfo {
++ u32 dwVGASize;
++ u32 dwDRAMSize;
++ phys_addr_t qwFBPhysStart;
++};
++
++struct VideoDataBufferInfo {
++ u32 dwSize;
++ phys_addr_t dwPhys;
++ phys_addr_t dwVirt;
++};
++
++enum ColorMode {
++ MODE_EGA = 0x0, //4bpp eg. mode 12/6A
++ MODE_VGA = 0x1, //mode 13
++ MODE_BPP15 = 0x2,
++ MODE_BPP16 = 0x3,
++ MODE_BPP32 = 0x4,
++ MODE_TEXT = 0xE,
++ MODE_CGA = 0xF
++};
++
++struct ModeInfo {
++ u8 byColorMode;
++ u8 byRefreshRateIndex;
++ u8 byModeID;
++ u8 byScanLines;
++};
++
++struct NewModeInfoHeader {
++ u8 byReserved;
++ u8 byDisplayInfo;
++ u8 byColorDepth;
++ u8 byMhzPixelClock;
++};
++
++struct DisplayEnd {
++ u16 HDE;
++ u16 VDE;
++};
++
++struct Resolution {
++ u16 wWidth;
++ u16 wHeight;
++};
++
++struct Video_OsSleepStruct {
++ wait_queue_head_t queue;
++ struct timer_list tim;
++ u8 Timeout;
++};
++
++struct EngineInfo {
++ struct semaphore sem;
++ struct Video_OsSleepStruct wait;
++ u8 finished;
++};
++
++struct VideoMem {
++ dma_addr_t phy;
++ void *pVirt;
++ u32 size;
++};
++
++struct VideoEngineMem {
++ struct VideoMem captureBuf0;
++ struct VideoMem captureBuf1;
++ struct VideoMem jpegTable;
++};
++
++struct aspeed_rvas_config {
++ u8 version;
++ const u32 *dram_table;
++};
++
++struct AstRVAS {
++ struct miscdevice rvas_dev;
++ struct aspeed_rvas_config *config;
++ void *pdev;
++ int irq_fge; //FrameGrabber IRQ number
++ int irq_vga; // VGA IRQ number
++ int irq_video;
++ void __iomem *fg_reg_base;
++ void __iomem *grce_reg_base;
++ void __iomem *video_reg_base;
++ struct regmap *scu;
++ struct regmap *scu_io;
++ struct reset_control *rvas_reset;
++ struct reset_control *video_engine_reset;
++ struct VGAMemInfo FBInfo;
++ u64 accrued_sm[SNOOP_MAP_QWORD_COUNT];
++ struct SnoopAggregate accrued_sa;
++ struct VideoGeometry current_vg;
++ u32 snoop_stride;
++ u32 tse_tsicr;
++ struct EngineInfo tfe_engine;
++ struct EngineInfo bse_engine;
++ struct EngineInfo ldma_engine;
++ struct EngineInfo video_engine;
++ struct semaphore mem_sem;
++ struct semaphore context_sem;
++ struct Video_OsSleepStruct video_wait;
++ u8 video_intr_occurred;
++ u8 timer_irq_requested;
++ u8 display_out;
++ u8 rvas_index;
++ struct ContextTable *ppctContextTable[MAX_NUM_CONTEXT];
++ u32 dwMemoryTableSize;
++ u32 dwScreenOffset;
++ struct MemoryMapTable *ppmmtMemoryTable[MAX_NUM_MEM_TBL];
++ struct completion video_compression_complete;
++ struct completion video_capture_complete;
++ struct clk *vclk;
++ struct clk *eclk;
++ struct clk *rvasclk;
++ void __iomem *dp_base;
++ u32 sequence;
++ struct VideoEngineMem vem;
++ u8 veClkOn;
++};
++
++//
++// IOCTL functions
++//
++void ioctl_get_video_geometry(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_wait_for_video_event(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_get_grc_register(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_read_snoop_map(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_read_snoop_aggregate(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_set_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_get_tse_tsicr(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_reset_video_engine(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++
++//vidoe fetch functions
++void ioctl_fetch_video_tiles(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_fetch_video_slices(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_run_length_encode_data(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_fetch_text_data(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++void ioctl_fetch_mode_13_data(struct RvasIoctl *ri, struct AstRVAS *ast_rvas);
++phys_addr_t get_phy_fb_start_address(struct AstRVAS *ast_rvas);
++bool video_geometry_change(struct AstRVAS *ast_rvas, u32 dwGRCEStatus);
++void update_video_geometry(struct AstRVAS *ast_rvas);
++
++//interrupts
++void enable_grce_tse_interrupt(struct AstRVAS *ast_rvas);
++void disable_grce_tse_interrupt(struct AstRVAS *ast_rvas);
++u32 clear_tse_interrupt(struct AstRVAS *ast_rvas);
++bool clear_ldma_interrupt(struct AstRVAS *ast_rvas);
++bool clear_tfe_interrupt(struct AstRVAS *ast_rvas);
++bool clear_bse_interrupt(struct AstRVAS *ast_rvas);
++u32 get_screen_offset(struct AstRVAS *ast_rvas);
++//
++void setup_lmem(struct AstRVAS *ast_rvas);
++//
++// helper functions
++//
++
++struct BSEAggregateRegister setUp_bse_bucket(u8 *abyBitIndexes, u8 byTotalBucketCount,
++ u8 byBSBytesPerPixel, u32 dwFetchWidthPixels,
++ u32 dwFetchHeight);
++void prepare_bse_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bNotLastEntry,
++ u16 wStride, u8 bytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight,
++ bool bInterrupt);
++
++void prepare_tfe_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bNotLastEntry, u8 bCheckSum,
++ bool bEnabledRLE, u16 wStride, u8 bytesPerPixel,
++ u32 dwFetchWidthPixels, u32 dwFetchHeight,
++ enum SelectedByteMode sbm, bool bRLEOverFLow,
++ bool bInterrupt);
++void prepare_tfe_text_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, bool bEnabledRLE, u32 dwFetchWidth,
++ u32 dwFetchHeight, enum DataProccessMode dpm,
++ bool bRLEOverFLow, bool bInterrupt);
++void prepare_ldma_descriptor(struct Descriptor *pDAddress, phys_addr_t source_addr,
++ phys_addr_t dest_addr, u32 dwLDMASize, u8 byNotLastEntry);
++
++u8 get_text_mode_character_per_line(struct AstRVAS *ast_rvas, u16 wScreenWidth);
++u16 get_text_mode_fetch_lines(struct AstRVAS *ast_rvas, u16 wScreenHeight);
++void on_fetch_text_data(struct RvasIoctl *ri, bool bRLEOn, struct AstRVAS *ast_rvas);
++
++void reset_snoop_engine(struct AstRVAS *ast_rvas);
++void set_snoop_engine(bool b_geom_chg, struct AstRVAS *ast_rvas);
++u64 reinterpret_32bpp_snoop_row_as_24bpp(u64 theSnoopRow);
++
++void convert_snoop_map(struct AstRVAS *ast_rvas);
++void update_all_snoop_context(struct AstRVAS *ast_rvas);
++void get_snoop_map_data(struct AstRVAS *ast_rvas);
++void get_snoop_aggregate(struct AstRVAS *ast_rvas);
++
++void sleep_on_ldma_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys);
++bool sleep_on_tfe_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys,
++ u32 dwTFEControlR, u32 dwTFERleLimitor, u32 *pdwRLESize,
++ u32 *pdwCheckSum);
++
++bool sleep_on_tfe_text_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys,
++ u32 dwTFEControlR, u32 dwTFERleLimitor, u32 *pdwRLESize,
++ u32 *pdwCheckSum);
++
++bool sleep_on_bse_busy(struct AstRVAS *ast_rvas, phys_addr_t desc_addr_phys,
++ struct BSEAggregateRegister aBSEAR, u32 size);
++
++void enable_grce_tse_interrupt(struct AstRVAS *ast_rvas);
++void disable_grce_tse_interrupt(struct AstRVAS *ast_rvas);
++
++bool host_suspended(struct AstRVAS *pAstRVAS);
++#endif // __HARDWAREENGINES_H__
+diff --git a/drivers/soc/aspeed/rvas/video.h b/drivers/soc/aspeed/rvas/video.h
+new file mode 100644
+index 000000000..cafd941e8
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/video.h
+@@ -0,0 +1,41 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/******************************************************************************
++ * video.h
++ *
++ * This file is part of the ASPEED Linux Device Driver for ASPEED Baseboard Management Controller.
++ * Refer to the README file included with this package for driver version and adapter compatibility.
++ *
++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved.
++ */
++
++#ifndef __RVAS_VIDEO_H__
++#define __RVAS_VIDEO_H__
++
++#define RVAS_DRIVER_NAME "rvas"
++#define Stringify(x) #x
++
++//
++//functions
++//
++void ioctl_new_context(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS);
++void ioctl_delete_context(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS);
++void ioctl_alloc(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS);
++void ioctl_free(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS);
++void ioctl_update_lms(u8 lms_on, struct AstRVAS *ast_rvas);
++void ioctl_update_lms_2700(u8 lms_on, struct AstRVAS *ast_rvas);
++u32 ioctl_get_lm_status(struct AstRVAS *ast_rvas);
++u32 ioctl_get_lm_status_2700(struct AstRVAS *ast_rvas);
++
++//void* get_from_rsvd_mem(u32 size, u32 *phys_add, struct AstRVAS *pAstRVAS);
++void *get_virt_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS);
++dma_addr_t get_phys_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS);
++u32 get_len_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS);
++
++//int release_rsvd_mem(u32 size, u32 phys_add);
++bool virt_is_valid_rsvd_mem(u32 index, u32 size, struct AstRVAS *pAstRVAS);
++
++struct ContextTable *get_new_context_table_entry(struct AstRVAS *pAstRVAS);
++struct ContextTable *get_context_entry(const void *crc, struct AstRVAS *pAstRVAS);
++bool remove_context_table_entry(const void *crmh, struct AstRVAS *pAstRVAS);
++
++#endif // __RVAS_VIDEO_H__
+diff --git a/drivers/soc/aspeed/rvas/video_debug.h b/drivers/soc/aspeed/rvas/video_debug.h
+new file mode 100644
+index 000000000..dbf620207
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/video_debug.h
+@@ -0,0 +1,35 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Copyright (C) 2019-2021 ASPEED Technology Inc.
++ */
++
++#ifndef AST_VIDEO_DEBUG_H_
++#define AST_VIDEO_DEBUG_H_
++
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/fcntl.h>
++
++//#define RVAS_VIDEO_DEBUG
++//#define VIDEO_ENGINE_DEBUG
++//#define HARDWARE_ENGINE_DEBUG
++
++#ifdef RVAS_VIDEO_DEBUG
++#define VIDEO_DBG(fmt, args...) ({ dev_printk(KERNEL_INFO, pAstRVAS->pdev, "%s() " fmt, __func__, ## args); })
++#else
++#define VIDEO_DBG(fmt, args...) do; while (0)
++#endif // RVAS_VIDEO_DEBUG
++
++#ifdef VIDEO_ENGINE_DEBUG
++#define VIDEO_ENG_DBG(fmt, args...) ({ dev_printk(KERNEL_INFO, pAstRVAS->pdev, "%s() " fmt, __func__, ## args); })
++#else
++#define VIDEO_ENG_DBG(fmt, args...) do; while (0)
++#endif // RVAS_VIDEO_DEBUG
++
++#ifdef HARDWARE_ENGINE_DEBUG
++#define HW_ENG_DBG(fmt, args...) ({ dev_printk(KERNEL_INFO, pAstRVAS->pdev, "%s() " fmt, __func__, ## args); })
++#else
++#define HW_ENG_DBG(fmt, args...) do; while (0)
++#endif // RVAS_VIDEO_DEBUG
++
++#endif // AST_VIDEO_DEBUG_H_
+diff --git a/drivers/soc/aspeed/rvas/video_engine.c b/drivers/soc/aspeed/rvas/video_engine.c
+new file mode 100644
+index 000000000..851755a3c
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/video_engine.c
+@@ -0,0 +1,1339 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * File Name : video_engines.c
++ * Description : AST2600 video engines
++ *
++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#include <linux/poll.h>
++#include <linux/slab.h>
++#include <linux/sched.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/platform_device.h>
++#include <linux/types.h>
++#include <linux/interrupt.h>
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/miscdevice.h>
++#include <linux/hwmon-sysfs.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <linux/dma-mapping.h>
++#include <asm/io.h>
++#include <linux/of.h>
++#include <linux/of_reserved_mem.h>
++#include <asm/uaccess.h>
++
++#include "video_ioctl.h"
++#include "video_engine.h"
++#include "video_debug.h"
++#include "hardware_engines.h"
++
++//
++//functions
++//
++static inline void video_write(struct AstRVAS *pAstRVAS, u32 val, u32 reg);
++static inline u32 video_read(struct AstRVAS *pAstRVAS, u32 reg);
++
++static u32 get_vga_mem_base(struct AstRVAS *pAstRVAS);
++static int reserve_video_engine_memory(struct AstRVAS *pAstRVAS);
++static void init_jpeg_table(struct AstRVAS *pAstRVAS);
++static void video_set_scaling(struct AstRVAS *pAstRVAS);
++static int video_capture_trigger(struct AstRVAS *pAstRVAS);
++static void dump_buffer(phys_addr_t qwPhyStreamAddress, u32 size);
++
++//
++// function definitions
++//
++/**
++ * _make_addr - make address fit for ast2700
++ * @addr: dma address for hardware to work
++ *
++ * Return: 32bit format of address
++ */
++static inline u32 _make_addr(dma_addr_t addr)
++{
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ // In ast2700, it store higt byte[35:32] in low byte[3:0]
++ return (addr >> 32) | (u32)(addr);
++#else
++ return addr;
++#endif
++}
++
++void ioctl_get_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS)
++{
++ u32 VR004_SeqCtrl = video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL);
++ u32 VR060_ComCtrl = video_read(pAstRVAS, AST_VIDEO_COMPRESS_CTRL);
++
++ // status
++ pVideoConfig->rs = SuccessStatus;
++
++ pVideoConfig->engine = 0; // engine = 1 is Video Management
++ pVideoConfig->capture_format = 0;
++ pVideoConfig->compression_mode = 0;
++
++ pVideoConfig->compression_format = (VR004_SeqCtrl >> 13) & 0x1;
++ pVideoConfig->YUV420_mode = (VR004_SeqCtrl >> 10) & 0x3;
++ pVideoConfig->AutoMode = (VR004_SeqCtrl >> 5) & 0x1;
++
++ pVideoConfig->rc4_enable = (VR060_ComCtrl >> 5) & 0x1;
++ pVideoConfig->Visual_Lossless = (VR060_ComCtrl >> 16) & 0x1;
++ pVideoConfig->Y_JPEGTableSelector = VIDEO_GET_DCT_LUM(VR060_ComCtrl);
++ pVideoConfig->AdvanceTableSelector = (VR060_ComCtrl >> 27) & 0xf;
++}
++
++void ioctl_set_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS)
++{
++ int i, base = 0;
++ u32 ctrl = 0; //for VR004, VR204
++ u32 compress_ctrl = 0x00080000;
++ u32 *tlb_table = pAstRVAS->vem.jpegTable.pVirt;
++
++ // status
++ pVideoConfig->rs = SuccessStatus;
++
++ VIDEO_ENG_DBG("\n");
++
++ ctrl = video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL);
++
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) &
++ ~(G6_VIDEO_FRAME_CT_MASK | G6_VIDEO_MULTI_JPEG_MODE | G6_VIDEO_MULTI_JPEG_FLAG_MODE), AST_VIDEO_PASS_CTRL);
++
++ ctrl &= ~VIDEO_AUTO_COMPRESS;
++ ctrl |= G5_VIDEO_COMPRESS_JPEG_MODE;
++ ctrl &= ~VIDEO_COMPRESS_FORMAT_MASK; //~(3<<10) bit 4 is set to 0
++
++ if (pVideoConfig->YUV420_mode)
++ ctrl |= VIDEO_COMPRESS_FORMAT(YUV420);
++
++ if (pVideoConfig->rc4_enable)
++ compress_ctrl |= VIDEO_ENCRYP_ENABLE;
++
++ switch (pVideoConfig->compression_mode) {
++ case 0: //DCT only
++ compress_ctrl |= VIDEO_DCT_ONLY_ENCODE;
++ break;
++ case 1: //DCT VQ mix 2-color
++ compress_ctrl &= ~(VIDEO_4COLOR_VQ_ENCODE | VIDEO_DCT_ONLY_ENCODE);
++ break;
++ case 2: //DCT VQ mix 4-color
++ compress_ctrl |= VIDEO_4COLOR_VQ_ENCODE;
++ break;
++ default:
++ dev_err(pAstRVAS->pdev, "unknown compression mode:%d\n", pVideoConfig->compression_mode);
++ break;
++ }
++
++ if (pVideoConfig->Visual_Lossless) {
++ compress_ctrl |= VIDEO_HQ_ENABLE;
++ compress_ctrl |= VIDEO_HQ_DCT_LUM(pVideoConfig->AdvanceTableSelector);
++ compress_ctrl |= VIDEO_HQ_DCT_CHROM((pVideoConfig->AdvanceTableSelector + 16));
++ } else {
++ compress_ctrl &= ~VIDEO_HQ_ENABLE;
++ }
++
++ video_write(pAstRVAS, ctrl, AST_VIDEO_SEQ_CTRL);
++ // we are using chrominance quantization table instead of luminance quantization table
++ video_write(pAstRVAS, compress_ctrl | VIDEO_DCT_LUM(pVideoConfig->Y_JPEGTableSelector) | VIDEO_DCT_CHROM(pVideoConfig->Y_JPEGTableSelector + 16), AST_VIDEO_COMPRESS_CTRL);
++ VIDEO_ENG_DBG("VR04: %#X\n", video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL));
++ VIDEO_ENG_DBG("VR60: %#X\n", video_read(pAstRVAS, AST_VIDEO_COMPRESS_CTRL));
++
++ // chose a table for JPEG or multi-JPEG
++ if (pVideoConfig->compression_format >= 1) {
++ VIDEO_ENG_DBG("Choose a JPEG Table\n");
++ for (i = 0; i < 12; i++) {
++ base = (1024 * i);
++ //base = (256 * i);
++ if (pVideoConfig->YUV420_mode) //yuv420
++ tlb_table[base + 46] = 0x00220103; //for YUV420 mode
++ else
++ tlb_table[base + 46] = 0x00110103; //for YUV444 mode)
++ }
++ }
++
++ video_set_scaling(pAstRVAS);
++}
++
++//
++void ioctl_get_video_engine_data_2700(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, dma_addr_t dwPhyStreamAddress)
++{
++ u32 yuv_shift;
++ u32 scan_lines;
++ int timeout = 0;
++ u32 x0;
++ u32 y0;
++ phys_addr_t start_addr;
++ u32 frame_count = 0;
++ u32 old_src_addr, new_src_addr;
++ u32 offset;
++
++ pArrayMJConfig->rs = SuccessStatus;
++
++ VIDEO_ENG_DBG("\n");
++ VIDEO_ENG_DBG("before Stream buffer: %#llx\n", dwPhyStreamAddress);
++ //dump_buffer(dwPhyStreamAddress,100);
++
++ video_write(pAstRVAS, _make_addr(dwPhyStreamAddress), AST_VIDEO_STREAM_BUFF);
++
++ if (host_suspended(pAstRVAS)) {
++ pArrayMJConfig->rs = HostSuspended;
++ VIDEO_ENG_DBG("HostSuspended Timeout\n");
++ return;
++ }
++
++ VIDEO_ENG_DBG("irq status: %#x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS));
++
++#ifdef AUTO_COMPRESS
++ if (video_cc_auto_trigger(pAstRVAS) == 0) {
++ VIDEO_ENG_DBG("auto Ccc Timeout\n");
++ pArrayMJConfig->multi_jpeg_frames = 0;
++ pArrayMJConfig->rs = CompressionTimedOut;
++ return;
++ }
++#else
++ if (video_capture_trigger(pAstRVAS) == 0) {
++ dev_err(pAstRVAS->pdev, " capture timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS));
++ pArrayMJConfig->multi_jpeg_frames = 0;
++ pArrayMJConfig->rs = CaptureTimedOut;
++ return;
++ }
++ // clear all the interrupt since there is a bug in HW engine ast2700
++ // only capture interrupt is enable, but compression intterupt is generated
++ video_write(pAstRVAS, VIDEO_CAPTURE_COMPLETE | VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_STS);
++#endif
++
++ // start compression setup
++ video_write(pAstRVAS, VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_EN);
++ init_completion(&pAstRVAS->video_compression_complete);
++
++ scan_lines = video_read(pAstRVAS, AST_VIDEO_SOURCE_SCAN_LINE);
++ frame_count = video_read(pAstRVAS, AST_VIDEO_COMPRESS_FRAME_COUNT_RB);
++
++ // VR07C odd , get source from 0x4C
++ // VR07C even, get source from 0x44
++ old_src_addr = (frame_count & 0x01) ? AST_VIDEO_SOURCE_BUFF0 : AST_VIDEO_SOURCE_BUFF1;
++ new_src_addr = (frame_count & 0x01) ? AST_VIDEO_SOURCE_BUFF1 : AST_VIDEO_SOURCE_BUFF0;
++
++ start_addr = video_read(pAstRVAS, old_src_addr);
++ // make sure BCD is disable
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_BCD_CTRL) & ~VIDEO_BCD_CHG_EN, AST_VIDEO_BCD_CTRL);
++
++ if (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & VIDEO_COMPRESS_FORMAT(YUV420)) {
++ // YUV 420
++ VIDEO_ENG_DBG("Debug: YUV420\n");
++ yuv_shift = 4;
++ } else {
++ VIDEO_ENG_DBG("Debug: YUV444\n");
++ yuv_shift = 3;
++ }
++
++ //update compress window
++ video_write(pAstRVAS,
++ pArrayMJConfig->frame[0].wWidthPixels << 16 |
++ pArrayMJConfig->frame[0].wHeightPixels, AST_VIDEO_COMPRESS_WIN);
++
++ x0 = pArrayMJConfig->frame[0].wXPixels;
++ y0 = pArrayMJConfig->frame[0].wYPixels;
++
++ offset = (scan_lines * y0) + ((256 * x0) >> yuv_shift);
++
++ video_write(pAstRVAS, start_addr + offset, new_src_addr);
++ VIDEO_ENG_DBG("write to %#x, with address: %#x", start_addr + offset, new_src_addr);
++ // trigger compression
++ VIDEO_ENG_DBG("trigger compression\n");
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) |
++ VIDEO_COMPRESS_TRIGGER, AST_VIDEO_SEQ_CTRL);
++
++ timeout = wait_for_completion_interruptible_timeout(&pAstRVAS->video_compression_complete, HZ);
++
++ if (timeout == 0) {
++ dev_err(pAstRVAS->pdev, " compression timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS));
++ pArrayMJConfig->multi_jpeg_frames = 0;
++ pArrayMJConfig->rs = CompressionTimedOut;
++ }
++ pArrayMJConfig->multi_jpeg_frames = 1;
++ pArrayMJConfig->frame[0].dwSizeInBytes = video_read(pAstRVAS, AST_VIDEO_JPEG_SIZE);
++
++ VIDEO_ENG_DBG("compressed size: %d\n", pArrayMJConfig->frame[0].dwSizeInBytes);
++
++ //clear
++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) &
++ ~(G5_VIDEO_COMPRESS_JPEG_MODE | VIDEO_CAPTURE_MULTI_FRAME | VIDEO_COMPRESS_TRIGGER))
++ , AST_VIDEO_SEQ_CTRL);
++
++ pAstRVAS->sequence++;
++ if (pAstRVAS->sequence & 0x01) {
++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf1.phy), AST_VIDEO_SOURCE_BUFF0);//44h
++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf0.phy), AST_VIDEO_SOURCE_BUFF1);//4Ch
++ } else {
++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf0.phy), AST_VIDEO_SOURCE_BUFF0);//44h
++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf1.phy), AST_VIDEO_SOURCE_BUFF1);//4Ch
++ }
++ VIDEO_ENG_DBG("[%#x]: %#x\n", AST_VIDEO_SOURCE_BUFF0, video_read(pAstRVAS, AST_VIDEO_SOURCE_BUFF0));
++ VIDEO_ENG_DBG("[%#x]: %#x\n", AST_VIDEO_SOURCE_BUFF1, video_read(pAstRVAS, AST_VIDEO_SOURCE_BUFF1));
++
++ //TODO: kernel dump here...
++ //dump_buffer(dwPhyStreamAddress,100);
++}
++
++void ioctl_get_video_engine_data(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, phys_addr_t dwPhyStreamAddress)
++{
++ u32 yuv_shift;
++ u32 yuv_msk;
++ u32 scan_lines;
++ int timeout = 0;
++ u32 x0;
++ u32 y0;
++ int i = 0;
++ u32 dw_w_h;
++ phys_addr_t start_addr;
++ u32 multi_jpeg_data = 0;
++ u32 VR044;
++ u32 nextFrameOffset = 0;
++
++ pArrayMJConfig->rs = SuccessStatus;
++
++ VIDEO_ENG_DBG("\n");
++ VIDEO_ENG_DBG("before Stream buffer:\n");
++ //dump_buffer(dwPhyStreamAddress,100);
++
++ video_write(pAstRVAS, dwPhyStreamAddress, AST_VIDEO_STREAM_BUFF);
++
++ if (host_suspended(pAstRVAS)) {
++ pArrayMJConfig->rs = HostSuspended;
++ VIDEO_ENG_DBG("HostSuspended Timeout\n");
++ return;
++ }
++
++ if (video_capture_trigger(pAstRVAS) == 0) {
++ pArrayMJConfig->rs = CaptureTimedOut;
++ VIDEO_ENG_DBG("Capture Timeout\n");
++ return;
++ }
++ //dump_buffer(dwPhyStreamAddress,100);
++ // start compression setup
++ video_write(pAstRVAS, VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_EN);
++ init_completion(&pAstRVAS->video_compression_complete);
++ VIDEO_ENG_DBG("capture complete buffer:\n");
++
++ //dump_buffer(vem.captureBuf0.phy,100);
++ VR044 = video_read(pAstRVAS, AST_VIDEO_SOURCE_BUFF0);
++
++ scan_lines = video_read(pAstRVAS, AST_VIDEO_SOURCE_SCAN_LINE);
++ VIDEO_ENG_DBG("scan_lines: %#x\n", scan_lines);
++
++ if (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & VIDEO_COMPRESS_FORMAT(YUV420)) {
++ // YUV 420
++ VIDEO_ENG_DBG("Debug: YUV420\n");
++ yuv_shift = 4;
++ yuv_msk = 0xf;
++ } else {
++ // YUV 444
++ VIDEO_ENG_DBG("Debug: YUV444\n");
++ yuv_shift = 3;
++ yuv_msk = 0x7;
++ }
++
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | G6_VIDEO_MULTI_JPEG_FLAG_MODE |
++ (G6_VIDEO_JPEG__COUNT(pArrayMJConfig->multi_jpeg_frames - 1) | G6_VIDEO_MULTI_JPEG_MODE), AST_VIDEO_PASS_CTRL);
++
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_BCD_CTRL) & ~VIDEO_BCD_CHG_EN, AST_VIDEO_BCD_CTRL);
++
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) | VIDEO_CTRL_ADDRESS_MAP_MULTI_JPEG, AST_VIDEO_CTRL);
++
++ for (i = 0; i < pArrayMJConfig->multi_jpeg_frames; i++) {
++ VIDEO_ENG_DBG("Debug: Before: [%d]: x: %#x y: %#x w: %#x h: %#x\n", i,
++ pArrayMJConfig->frame[i].wXPixels,
++ pArrayMJConfig->frame[i].wYPixels,
++ pArrayMJConfig->frame[i].wWidthPixels,
++ pArrayMJConfig->frame[i].wHeightPixels);
++ x0 = pArrayMJConfig->frame[i].wXPixels;
++ y0 = pArrayMJConfig->frame[i].wYPixels;
++ dw_w_h = SET_FRAME_W_H(pArrayMJConfig->frame[i].wWidthPixels, pArrayMJConfig->frame[i].wHeightPixels);
++
++ start_addr = VR044 + (scan_lines * y0) + ((256 * x0) / (1 << yuv_shift));
++
++ VIDEO_ENG_DBG("VR%x dw_w_h: %#x, VR%x : addr : %#x, x0 %d, y0 %d\n",
++ AST_VIDEO_MULTI_JPEG_SRAM + (8 * i), dw_w_h,
++ AST_VIDEO_MULTI_JPEG_SRAM + (8 * i) + 4, start_addr, x0, y0);
++ video_write(pAstRVAS, dw_w_h, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i));
++ video_write(pAstRVAS, start_addr, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i) + 4);
++ }
++
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(VIDEO_CAPTURE_TRIGGER | VIDEO_COMPRESS_FORCE_IDLE | VIDEO_COMPRESS_TRIGGER), AST_VIDEO_SEQ_CTRL);
++
++ //set mode for multi-jpeg mode VR004[5:3]
++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~VIDEO_AUTO_COMPRESS)
++ | VIDEO_CAPTURE_MULTI_FRAME | G5_VIDEO_COMPRESS_JPEG_MODE, AST_VIDEO_SEQ_CTRL);
++
++ //If CPU is too fast, pleas read back and trigger
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) | VIDEO_COMPRESS_TRIGGER, AST_VIDEO_SEQ_CTRL);
++ VIDEO_ENG_DBG("wait_for_completion_interruptible_timeout...\n");
++
++ timeout = wait_for_completion_interruptible_timeout(&pAstRVAS->video_compression_complete, HZ / 2);
++
++ if (timeout == 0) {
++ dev_err(pAstRVAS->pdev, "multi compression timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS));
++ pArrayMJConfig->multi_jpeg_frames = 0;
++ pArrayMJConfig->rs = CompressionTimedOut;
++ } else {
++ VIDEO_ENG_DBG("400 %x , 404 %x\n", video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM), video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + 4));
++ VIDEO_ENG_DBG("408 %x , 40c %x\n", video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + 8), video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + 0xC));
++ VIDEO_ENG_DBG("done reading 408\n");
++
++ for (i = 0; i < pArrayMJConfig->multi_jpeg_frames; i++) {
++ pArrayMJConfig->frame[i].dwOffsetInBytes = nextFrameOffset;
++
++ multi_jpeg_data = video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i) + 4);
++ if (multi_jpeg_data & BIT(7)) {
++ pArrayMJConfig->frame[i].dwSizeInBytes = video_read(pAstRVAS, AST_VIDEO_MULTI_JPEG_SRAM + (8 * i)) & 0xffffff;
++ nextFrameOffset = (multi_jpeg_data & ~BIT(7)) >> 1;
++ } else {
++ pArrayMJConfig->frame[i].dwSizeInBytes = 0;
++ nextFrameOffset = 0;
++ }
++ VIDEO_ENG_DBG("[%d] size %d, dwOffsetInBytes %x\n", i, pArrayMJConfig->frame[i].dwSizeInBytes, pArrayMJConfig->frame[i].dwOffsetInBytes);
++ } //for
++ }
++
++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(G5_VIDEO_COMPRESS_JPEG_MODE | VIDEO_CAPTURE_MULTI_FRAME))
++ | VIDEO_AUTO_COMPRESS, AST_VIDEO_SEQ_CTRL);
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) &
++ ~(G6_VIDEO_FRAME_CT_MASK | G6_VIDEO_MULTI_JPEG_MODE), AST_VIDEO_PASS_CTRL);
++
++ //VIDEO_ENG_DBG("after Stream buffer:\n");
++ //dump_buffer(dwPhyStreamAddress,100);
++}
++
++irqreturn_t ast_video_isr(int this_irq, void *dev_id)
++{
++ u32 status;
++ u32 enabled_irq;
++ struct AstRVAS *pAstRVAS = dev_id;
++
++ status = video_read(pAstRVAS, AST_VIDEO_INT_STS);
++ enabled_irq = video_read(pAstRVAS, AST_VIDEO_INT_EN);
++
++ VIDEO_ENG_DBG("sts: %#x enabled: %#x\n", status, enabled_irq);
++
++ status &= enabled_irq;
++ if (status & VIDEO_COMPRESS_COMPLETE) {
++ video_write(pAstRVAS, VIDEO_COMPRESS_COMPLETE, AST_VIDEO_INT_STS);
++ complete(&pAstRVAS->video_compression_complete);
++ }
++ if (status & VIDEO_CAPTURE_COMPLETE) {
++ video_write(pAstRVAS, VIDEO_CAPTURE_COMPLETE, AST_VIDEO_INT_STS);
++ VIDEO_ENG_DBG("capture complete\n");
++ complete(&pAstRVAS->video_capture_complete);
++ }
++
++ return IRQ_HANDLED;
++}
++
++void enable_video_interrupt(struct AstRVAS *pAstRVAS)
++{
++ u32 intCtrReg = video_read(pAstRVAS, AST_VIDEO_INT_EN);
++
++ intCtrReg = (VIDEO_COMPRESS_COMPLETE | VIDEO_CAPTURE_COMPLETE);
++ video_write(pAstRVAS, intCtrReg, AST_VIDEO_INT_EN);
++}
++
++void disable_video_interrupt(struct AstRVAS *pAstRVAS)
++{
++ video_write(pAstRVAS, 0, AST_VIDEO_INT_EN);
++ video_write(pAstRVAS, 0xffffffff, AST_VIDEO_INT_STS);
++}
++
++void video_engine_rc4Reset(struct AstRVAS *pAstRVAS)
++{
++ //rc4 init reset ..
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) | VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL);
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) & ~VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL);
++}
++
++// setup functions
++int video_engine_reserveMem(struct AstRVAS *pAstRVAS)
++{
++ int result = 0;
++
++ // reserve mem
++ result = reserve_video_engine_memory(pAstRVAS);
++ if (result < 0) {
++ dev_err(pAstRVAS->pdev, "Error Reserving Video Engine Memory\n");
++ return result;
++ }
++ return 0;
++}
++
++int free_video_engine_memory(struct AstRVAS *pAstRVAS)
++{
++ int size = pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size + pAstRVAS->vem.jpegTable.size;
++
++ if (size && pAstRVAS->vem.captureBuf0.pVirt) {
++ dma_free_coherent(pAstRVAS->pdev, size,
++ pAstRVAS->vem.captureBuf0.pVirt,
++ pAstRVAS->vem.captureBuf0.phy);
++ } else {
++ return -1;
++ }
++ VIDEO_ENG_DBG("After dma_free_coherent\n");
++
++ return 0;
++}
++
++// this function needs to be called when graphic mode change
++void video_set_Window(struct AstRVAS *pAstRVAS)
++{
++ u32 scan_line;
++ u32 screenHeightAligned = ((pAstRVAS->current_vg.wScreenHeight + 0x1f) & (~0x1f));
++
++ VIDEO_ENG_DBG("\n");
++
++ //set direct mode
++ if (pAstRVAS->config->version == 7) {
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ~(VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH), AST_VIDEO_PASS_CTRL);
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH, AST_VIDEO_PASS_CTRL);
++ video_write(pAstRVAS, _make_addr(get_vga_mem_base(pAstRVAS)), AST_VIDEO_DIRECT_BASE);
++ video_write(pAstRVAS, VIDEO_FETCH_TIMING(0) | VIDEO_FETCH_LINE_OFFSET(pAstRVAS->current_vg.wStride * 4), AST_VIDEO_DIRECT_CTRL);
++ }
++ //compression x,y
++ video_write(pAstRVAS, VIDEO_COMPRESS_H(pAstRVAS->current_vg.wStride) | VIDEO_COMPRESS_V(screenHeightAligned), AST_VIDEO_COMPRESS_WIN);
++ VIDEO_ENG_DBG("reg offset[%#x]: %#x\n", AST_VIDEO_COMPRESS_WIN, video_read(pAstRVAS, AST_VIDEO_COMPRESS_WIN));
++
++ if (pAstRVAS->current_vg.wStride == 1680)
++ video_write(pAstRVAS, VIDEO_CAPTURE_H(1728) | VIDEO_CAPTURE_V(screenHeightAligned), AST_VIDEO_CAPTURE_WIN);
++ else
++ video_write(pAstRVAS, VIDEO_CAPTURE_H(pAstRVAS->current_vg.wStride) | VIDEO_CAPTURE_V(screenHeightAligned), AST_VIDEO_CAPTURE_WIN);
++
++ VIDEO_ENG_DBG("reg offset[%#x]: %#x\n", AST_VIDEO_CAPTURE_WIN, video_read(pAstRVAS, AST_VIDEO_CAPTURE_WIN));
++
++ // set scan_line VR048
++ if ((pAstRVAS->current_vg.wStride % 8) == 0) {
++ video_write(pAstRVAS, pAstRVAS->current_vg.wStride * 4, AST_VIDEO_SOURCE_SCAN_LINE);
++ } else {
++ scan_line = pAstRVAS->current_vg.wStride;
++ scan_line = scan_line + 16 - (scan_line % 16);
++ scan_line = scan_line * 4;
++ video_write(pAstRVAS, scan_line, AST_VIDEO_SOURCE_SCAN_LINE);
++ }
++}
++
++void set_direct_mode(struct AstRVAS *pAstRVAS)
++{
++ int Direct_Mode = 0;
++ u32 ColorDepthIndex;
++ u32 VGA_Scratch_Register_350, VGA_Scratch_Register_354, VGA_Scratch_Register_34C, Color_Depth;
++
++ VIDEO_ENG_DBG("\n");
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ~(VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH), AST_VIDEO_PASS_CTRL);
++
++ VGA_Scratch_Register_350 = video_read(pAstRVAS, AST_VIDEO_E_SCRATCH_350);
++ VGA_Scratch_Register_34C = video_read(pAstRVAS, AST_VIDEO_E_SCRATCH_34C);
++ VGA_Scratch_Register_354 = video_read(pAstRVAS, AST_VIDEO_E_SCRATCH_354);
++
++ if (((VGA_Scratch_Register_350 & 0xff00) >> 8) == 0xA8) {
++ Color_Depth = ((VGA_Scratch_Register_350 & 0xff0000) >> 16);
++
++ if (Color_Depth < 15)
++ Direct_Mode = 0;
++ else
++ Direct_Mode = 1;
++
++ } else { //Original mode information
++ ColorDepthIndex = (VGA_Scratch_Register_34C >> 4) & 0x0F;
++
++ if (ColorDepthIndex == 0xe || ColorDepthIndex == 0xf) {
++ Direct_Mode = 0;
++ } else {
++ if (ColorDepthIndex > 2)
++ Direct_Mode = 1;
++ else
++ Direct_Mode = 0;
++ }
++ }
++
++ if (Direct_Mode) {
++ VIDEO_ENG_DBG("Direct Mode\n");
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) | VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH, AST_VIDEO_PASS_CTRL);
++ video_write(pAstRVAS, _make_addr(get_vga_mem_base(pAstRVAS)), AST_VIDEO_DIRECT_BASE);
++ video_write(pAstRVAS, VIDEO_FETCH_TIMING(0) | VIDEO_FETCH_LINE_OFFSET(pAstRVAS->current_vg.wStride * 4), AST_VIDEO_DIRECT_CTRL);
++ } else {
++ VIDEO_ENG_DBG("Sync None Direct Mode\n");
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) & ~(VIDEO_AUTO_FETCH | VIDEO_DIRECT_FETCH), AST_VIDEO_PASS_CTRL);
++ }
++}
++
++// return timeout 0 - timeout; non 0 is successful
++static int video_capture_trigger(struct AstRVAS *pAstRVAS)
++{
++ int timeout = 0;
++
++ VIDEO_ENG_DBG("\n");
++
++ // only enable capture interrupt
++ video_write(pAstRVAS, VIDEO_CAPTURE_COMPLETE, AST_VIDEO_INT_EN);
++
++ init_completion(&pAstRVAS->video_capture_complete);
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) |
++ VIDEO_SET_CAPTURE_FORMAT(1) | VIDEO_DIRECT_FETCH, AST_VIDEO_PASS_CTRL);
++
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_BCD_CTRL) & ~VIDEO_BCD_CHG_EN, AST_VIDEO_BCD_CTRL);
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(VIDEO_CAPTURE_TRIGGER | VIDEO_COMPRESS_FORCE_IDLE | VIDEO_COMPRESS_TRIGGER | VIDEO_AUTO_COMPRESS), AST_VIDEO_SEQ_CTRL);
++ //If CPU is too fast, pleas read back and trigger
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) | G5_VIDEO_COMPRESS_JPEG_MODE | VIDEO_CAPTURE_TRIGGER, AST_VIDEO_SEQ_CTRL);
++
++ timeout = wait_for_completion_interruptible_timeout(&pAstRVAS->video_capture_complete, HZ / 2);
++
++ if (timeout == 0)
++ dev_err(pAstRVAS->pdev, "Capture timeout sts %x\n", video_read(pAstRVAS, AST_VIDEO_INT_STS));
++
++ //clear
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_SEQ_CTRL) & ~(VIDEO_CAPTURE_TRIGGER | VIDEO_COMPRESS_FORCE_IDLE | VIDEO_COMPRESS_TRIGGER | VIDEO_AUTO_COMPRESS), AST_VIDEO_SEQ_CTRL);
++
++ //dump_buffer(pAstRVAS->vem.captureBuf0.phy, 1024);
++ return timeout;
++}
++
++//
++// static functions
++//
++static u32 get_vga_mem_base(struct AstRVAS *pAstRVAS)
++{
++ u32 vga_mem_size, mem_size;
++
++ mem_size = pAstRVAS->FBInfo.dwDRAMSize;
++ vga_mem_size = pAstRVAS->FBInfo.dwVGASize;
++ VIDEO_ENG_DBG("VGA Info : MEM Size %dMB, VGA Mem Size %dMB\n", mem_size / 1024 / 1024, vga_mem_size / 1024 / 1024);
++ return (mem_size - vga_mem_size);
++}
++
++static void dump_buffer(phys_addr_t dwPhyStreamAddress, u32 size)
++{
++ u32 iC;
++ u32 val = 0;
++
++ for (iC = 0; iC < size; iC += 4) {
++ val = readl((void *)(dwPhyStreamAddress + iC));
++ VIDEO_ENG_DBG("%#x, ", val);
++ }
++}
++
++static void video_set_scaling(struct AstRVAS *pAstRVAS)
++{
++ u32 ctrl = video_read(pAstRVAS, AST_VIDEO_CTRL);
++ //no scaling
++ ctrl &= ~VIDEO_CTRL_DWN_SCALING_MASK;
++
++ VIDEO_ENG_DBG("Scaling Disable\n");
++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING0);
++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING1);
++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING2);
++ video_write(pAstRVAS, 0x00200000, AST_VIDEO_SCALING3);
++
++ video_write(pAstRVAS, 0x10001000, AST_VIDEO_SCAL_FACTOR);
++ video_write(pAstRVAS, ctrl, AST_VIDEO_CTRL);
++
++ video_set_Window(pAstRVAS);
++}
++
++void video_ctrl_init(struct AstRVAS *pAstRVAS)
++{
++ u8 inputdelay = 0x4;
++
++ VIDEO_ENG_DBG("\n");
++ if (pAstRVAS->config->version == 7) {
++ VIDEO_ENG_DBG("reg address: 0x%llx\n", pAstRVAS->video_reg_base);
++ /* Unlock VE registers */
++ video_write(pAstRVAS, VIDEO_PROTECT_UNLOCK, AST_VIDEO_PROTECT);
++ inputdelay = 0x1;
++ }
++
++ /* disable interrupts */
++ video_write(pAstRVAS, 0, AST_VIDEO_INT_EN);
++ video_write(pAstRVAS, 0xffffffff, AST_VIDEO_INT_STS);
++ video_write(pAstRVAS, 0, AST_VIDEO_BCD_CTRL);
++
++ /* Clear the offset */
++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_PRO);
++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_READ);
++
++ /*write src addr and jped addr to register*/
++ pAstRVAS->sequence = 1;
++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf0.phy), AST_VIDEO_SOURCE_BUFF0);//44h
++ video_write(pAstRVAS, _make_addr(pAstRVAS->vem.captureBuf1.phy), AST_VIDEO_SOURCE_BUFF1);//4Ch
++ video_write(pAstRVAS, pAstRVAS->vem.jpegTable.phy, AST_VIDEO_JPEG_HEADER_BUFF); //40h
++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_READ); //3Ch
++
++ // ============================= JPEG init ===========================================
++ init_jpeg_table(pAstRVAS);
++ VIDEO_ENG_DBG("JpegTable in Memory:0x%llx\n", pAstRVAS->vem.jpegTable.pVirt);
++ //dump_buffer(pAstRVAS->vem.jpegTable.phy, 80);
++
++ // ===================================================================================
++ //Specification define bit 12:13 must always 0;
++ video_write(pAstRVAS, (video_read(pAstRVAS, AST_VIDEO_PASS_CTRL) &
++ ~(VIDEO_DUAL_EDGE_MODE | VIDEO_18BIT_SINGLE_EDGE)) |
++ VIDEO_DVO_INPUT_DELAY(inputdelay),
++ AST_VIDEO_PASS_CTRL);
++
++ video_write(pAstRVAS, VIDEO_STREAM_PKT_N(STREAM_32_PKTS) |
++ VIDEO_STREAM_PKT_SIZE(STREAM_128KB), AST_VIDEO_STREAM_SIZE);
++ //rc4 init reset ..
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) | VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL);
++ video_write(pAstRVAS, video_read(pAstRVAS, AST_VIDEO_CTRL) & ~VIDEO_CTRL_RC4_RST, AST_VIDEO_CTRL);
++
++ //CRC/REDUCE_BIT register clear
++ video_write(pAstRVAS, 0, AST_VIDEO_CRC1);
++ video_write(pAstRVAS, 0, AST_VIDEO_CRC2);
++ video_write(pAstRVAS, 0, AST_VIDEO_DATA_TRUNCA);
++ video_write(pAstRVAS, 0, AST_VIDEO_COMPRESS_READ);
++}
++
++static int reserve_video_engine_memory(struct AstRVAS *pAstRVAS)
++{
++ u32 size;
++ dma_addr_t phys_add = 0;
++ void *virt_add = 0;
++
++ memset(&pAstRVAS->vem, 0, sizeof(struct VideoEngineMem));
++ pAstRVAS->vem.captureBuf0.size = VIDEO_CAPTURE_BUFFER_SIZE; //size 10M
++ pAstRVAS->vem.captureBuf1.size = VIDEO_CAPTURE_BUFFER_SIZE; //size 10M
++ pAstRVAS->vem.jpegTable.size = VIDEO_JPEG_TABLE_SIZE; //size 1M
++
++ size = pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size + pAstRVAS->vem.jpegTable.size;
++ VIDEO_ENG_DBG("Allocating memory size: 0x%x\n", size);
++ virt_add = dma_alloc_coherent(pAstRVAS->pdev, size, &phys_add,
++ GFP_KERNEL);
++
++ if (!virt_add) {
++ pr_err("Cannot alloc buffer for video engine\n");
++ return -ENOMEM;
++ }
++
++ pAstRVAS->vem.captureBuf0.phy = phys_add;
++ pAstRVAS->vem.captureBuf1.phy = phys_add + pAstRVAS->vem.captureBuf0.size;
++ pAstRVAS->vem.jpegTable.phy = phys_add + pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size;
++
++ pAstRVAS->vem.captureBuf0.pVirt = (void *)virt_add;
++ pAstRVAS->vem.captureBuf1.pVirt = (void *)(virt_add + pAstRVAS->vem.captureBuf0.size);
++ pAstRVAS->vem.jpegTable.pVirt = (void *)(virt_add + pAstRVAS->vem.captureBuf0.size + pAstRVAS->vem.captureBuf1.size);
++
++ VIDEO_ENG_DBG("Allocated: phys: 0x%llx\n", phys_add);
++ VIDEO_ENG_DBG("Phy: Buf0:0x%llx; Buf1:0x%llx; jpegT:0x%llx\n", pAstRVAS->vem.captureBuf0.phy, pAstRVAS->vem.captureBuf1.phy, pAstRVAS->vem.jpegTable.phy);
++ VIDEO_ENG_DBG("Virt: Buf0:0x%llx; Buf1:0x%llx; JpegT:0x%llx\n", pAstRVAS->vem.captureBuf0.pVirt, pAstRVAS->vem.captureBuf1.pVirt, pAstRVAS->vem.jpegTable.pVirt);
++
++ return 0;
++}
++
++/************************************************ JPEG ***************************************************************************************/
++static void init_jpeg_table(struct AstRVAS *pAstRVAS)
++{
++ int i = 0;
++ int base = 0;
++ u32 *tlb_table = pAstRVAS->vem.jpegTable.pVirt;
++
++ //JPEG header default value:
++ for (i = 0; i < 12; i++) {
++ base = (256 * i);
++ tlb_table[base + 0] = 0xE0FFD8FF;
++ tlb_table[base + 1] = 0x464A1000;
++ tlb_table[base + 2] = 0x01004649;
++ tlb_table[base + 3] = 0x60000101;
++ tlb_table[base + 4] = 0x00006000;
++ tlb_table[base + 5] = 0x0F00FEFF;
++ tlb_table[base + 6] = 0x00002D05;
++ tlb_table[base + 7] = 0x00000000;
++ tlb_table[base + 8] = 0x00000000;
++ tlb_table[base + 9] = 0x00DBFF00;
++ tlb_table[base + 44] = 0x081100C0;
++ tlb_table[base + 45] = 0x00000000;
++ tlb_table[base + 47] = 0x03011102;
++ tlb_table[base + 48] = 0xC4FF0111;
++ tlb_table[base + 49] = 0x00001F00;
++ tlb_table[base + 50] = 0x01010501;
++ tlb_table[base + 51] = 0x01010101;
++ tlb_table[base + 52] = 0x00000000;
++ tlb_table[base + 53] = 0x00000000;
++ tlb_table[base + 54] = 0x04030201;
++ tlb_table[base + 55] = 0x08070605;
++ tlb_table[base + 56] = 0xFF0B0A09;
++ tlb_table[base + 57] = 0x10B500C4;
++ tlb_table[base + 58] = 0x03010200;
++ tlb_table[base + 59] = 0x03040203;
++ tlb_table[base + 60] = 0x04040505;
++ tlb_table[base + 61] = 0x7D010000;
++ tlb_table[base + 62] = 0x00030201;
++ tlb_table[base + 63] = 0x12051104;
++ tlb_table[base + 64] = 0x06413121;
++ tlb_table[base + 65] = 0x07615113;
++ tlb_table[base + 66] = 0x32147122;
++ tlb_table[base + 67] = 0x08A19181;
++ tlb_table[base + 68] = 0xC1B14223;
++ tlb_table[base + 69] = 0xF0D15215;
++ tlb_table[base + 70] = 0x72623324;
++ tlb_table[base + 71] = 0x160A0982;
++ tlb_table[base + 72] = 0x1A191817;
++ tlb_table[base + 73] = 0x28272625;
++ tlb_table[base + 74] = 0x35342A29;
++ tlb_table[base + 75] = 0x39383736;
++ tlb_table[base + 76] = 0x4544433A;
++ tlb_table[base + 77] = 0x49484746;
++ tlb_table[base + 78] = 0x5554534A;
++ tlb_table[base + 79] = 0x59585756;
++ tlb_table[base + 80] = 0x6564635A;
++ tlb_table[base + 81] = 0x69686766;
++ tlb_table[base + 82] = 0x7574736A;
++ tlb_table[base + 83] = 0x79787776;
++ tlb_table[base + 84] = 0x8584837A;
++ tlb_table[base + 85] = 0x89888786;
++ tlb_table[base + 86] = 0x9493928A;
++ tlb_table[base + 87] = 0x98979695;
++ tlb_table[base + 88] = 0xA3A29A99;
++ tlb_table[base + 89] = 0xA7A6A5A4;
++ tlb_table[base + 90] = 0xB2AAA9A8;
++ tlb_table[base + 91] = 0xB6B5B4B3;
++ tlb_table[base + 92] = 0xBAB9B8B7;
++ tlb_table[base + 93] = 0xC5C4C3C2;
++ tlb_table[base + 94] = 0xC9C8C7C6;
++ tlb_table[base + 95] = 0xD4D3D2CA;
++ tlb_table[base + 96] = 0xD8D7D6D5;
++ tlb_table[base + 97] = 0xE2E1DAD9;
++ tlb_table[base + 98] = 0xE6E5E4E3;
++ tlb_table[base + 99] = 0xEAE9E8E7;
++ tlb_table[base + 100] = 0xF4F3F2F1;
++ tlb_table[base + 101] = 0xF8F7F6F5;
++ tlb_table[base + 102] = 0xC4FFFAF9;
++ tlb_table[base + 103] = 0x00011F00;
++ tlb_table[base + 104] = 0x01010103;
++ tlb_table[base + 105] = 0x01010101;
++ tlb_table[base + 106] = 0x00000101;
++ tlb_table[base + 107] = 0x00000000;
++ tlb_table[base + 108] = 0x04030201;
++ tlb_table[base + 109] = 0x08070605;
++ tlb_table[base + 110] = 0xFF0B0A09;
++ tlb_table[base + 111] = 0x11B500C4;
++ tlb_table[base + 112] = 0x02010200;
++ tlb_table[base + 113] = 0x04030404;
++ tlb_table[base + 114] = 0x04040507;
++ tlb_table[base + 115] = 0x77020100;
++ tlb_table[base + 116] = 0x03020100;
++ tlb_table[base + 117] = 0x21050411;
++ tlb_table[base + 118] = 0x41120631;
++ tlb_table[base + 119] = 0x71610751;
++ tlb_table[base + 120] = 0x81322213;
++ tlb_table[base + 121] = 0x91421408;
++ tlb_table[base + 122] = 0x09C1B1A1;
++ tlb_table[base + 123] = 0xF0523323;
++ tlb_table[base + 124] = 0xD1726215;
++ tlb_table[base + 125] = 0x3424160A;
++ tlb_table[base + 126] = 0x17F125E1;
++ tlb_table[base + 127] = 0x261A1918;
++ tlb_table[base + 128] = 0x2A292827;
++ tlb_table[base + 129] = 0x38373635;
++ tlb_table[base + 130] = 0x44433A39;
++ tlb_table[base + 131] = 0x48474645;
++ tlb_table[base + 132] = 0x54534A49;
++ tlb_table[base + 133] = 0x58575655;
++ tlb_table[base + 134] = 0x64635A59;
++ tlb_table[base + 135] = 0x68676665;
++ tlb_table[base + 136] = 0x74736A69;
++ tlb_table[base + 137] = 0x78777675;
++ tlb_table[base + 138] = 0x83827A79;
++ tlb_table[base + 139] = 0x87868584;
++ tlb_table[base + 140] = 0x928A8988;
++ tlb_table[base + 141] = 0x96959493;
++ tlb_table[base + 142] = 0x9A999897;
++ tlb_table[base + 143] = 0xA5A4A3A2;
++ tlb_table[base + 144] = 0xA9A8A7A6;
++ tlb_table[base + 145] = 0xB4B3B2AA;
++ tlb_table[base + 146] = 0xB8B7B6B5;
++ tlb_table[base + 147] = 0xC3C2BAB9;
++ tlb_table[base + 148] = 0xC7C6C5C4;
++ tlb_table[base + 149] = 0xD2CAC9C8;
++ tlb_table[base + 150] = 0xD6D5D4D3;
++ tlb_table[base + 151] = 0xDAD9D8D7;
++ tlb_table[base + 152] = 0xE5E4E3E2;
++ tlb_table[base + 153] = 0xE9E8E7E6;
++ tlb_table[base + 154] = 0xF4F3F2EA;
++ tlb_table[base + 155] = 0xF8F7F6F5;
++ tlb_table[base + 156] = 0xDAFFFAF9;
++ tlb_table[base + 157] = 0x01030C00;
++ tlb_table[base + 158] = 0x03110200;
++ tlb_table[base + 159] = 0x003F0011;
++
++ //Table 0
++ if (i == 0) {
++ tlb_table[base + 10] = 0x0D140043;
++ tlb_table[base + 11] = 0x0C0F110F;
++ tlb_table[base + 12] = 0x11101114;
++ tlb_table[base + 13] = 0x17141516;
++ tlb_table[base + 14] = 0x1E20321E;
++ tlb_table[base + 15] = 0x3D1E1B1B;
++ tlb_table[base + 16] = 0x32242E2B;
++ tlb_table[base + 17] = 0x4B4C3F48;
++ tlb_table[base + 18] = 0x44463F47;
++ tlb_table[base + 19] = 0x61735A50;
++ tlb_table[base + 20] = 0x566C5550;
++ tlb_table[base + 21] = 0x88644644;
++ tlb_table[base + 22] = 0x7A766C65;
++ tlb_table[base + 23] = 0x4D808280;
++ tlb_table[base + 24] = 0x8C978D60;
++ tlb_table[base + 25] = 0x7E73967D;
++ tlb_table[base + 26] = 0xDBFF7B80;
++ tlb_table[base + 27] = 0x1F014300;
++ tlb_table[base + 28] = 0x272D2121;
++ tlb_table[base + 29] = 0x3030582D;
++ tlb_table[base + 30] = 0x697BB958;
++ tlb_table[base + 31] = 0xB8B9B97B;
++ tlb_table[base + 32] = 0xB9B8A6A6;
++ tlb_table[base + 33] = 0xB9B9B9B9;
++ tlb_table[base + 34] = 0xB9B9B9B9;
++ tlb_table[base + 35] = 0xB9B9B9B9;
++ tlb_table[base + 36] = 0xB9B9B9B9;
++ tlb_table[base + 37] = 0xB9B9B9B9;
++ tlb_table[base + 38] = 0xB9B9B9B9;
++ tlb_table[base + 39] = 0xB9B9B9B9;
++ tlb_table[base + 40] = 0xB9B9B9B9;
++ tlb_table[base + 41] = 0xB9B9B9B9;
++ tlb_table[base + 42] = 0xB9B9B9B9;
++ tlb_table[base + 43] = 0xFFB9B9B9;
++ }
++ //Table 1
++ if (i == 1) {
++ tlb_table[base + 10] = 0x0C110043;
++ tlb_table[base + 11] = 0x0A0D0F0D;
++ tlb_table[base + 12] = 0x0F0E0F11;
++ tlb_table[base + 13] = 0x14111213;
++ tlb_table[base + 14] = 0x1A1C2B1A;
++ tlb_table[base + 15] = 0x351A1818;
++ tlb_table[base + 16] = 0x2B1F2826;
++ tlb_table[base + 17] = 0x4142373F;
++ tlb_table[base + 18] = 0x3C3D373E;
++ tlb_table[base + 19] = 0x55644E46;
++ tlb_table[base + 20] = 0x4B5F4A46;
++ tlb_table[base + 21] = 0x77573D3C;
++ tlb_table[base + 22] = 0x6B675F58;
++ tlb_table[base + 23] = 0x43707170;
++ tlb_table[base + 24] = 0x7A847B54;
++ tlb_table[base + 25] = 0x6E64836D;
++ tlb_table[base + 26] = 0xDBFF6C70;
++ tlb_table[base + 27] = 0x1B014300;
++ tlb_table[base + 28] = 0x22271D1D;
++ tlb_table[base + 29] = 0x2A2A4C27;
++ tlb_table[base + 30] = 0x5B6BA04C;
++ tlb_table[base + 31] = 0xA0A0A06B;
++ tlb_table[base + 32] = 0xA0A0A0A0;
++ tlb_table[base + 33] = 0xA0A0A0A0;
++ tlb_table[base + 34] = 0xA0A0A0A0;
++ tlb_table[base + 35] = 0xA0A0A0A0;
++ tlb_table[base + 36] = 0xA0A0A0A0;
++ tlb_table[base + 37] = 0xA0A0A0A0;
++ tlb_table[base + 38] = 0xA0A0A0A0;
++ tlb_table[base + 39] = 0xA0A0A0A0;
++ tlb_table[base + 40] = 0xA0A0A0A0;
++ tlb_table[base + 41] = 0xA0A0A0A0;
++ tlb_table[base + 42] = 0xA0A0A0A0;
++ tlb_table[base + 43] = 0xFFA0A0A0;
++ }
++ //Table 2
++ if (i == 2) {
++ tlb_table[base + 10] = 0x090E0043;
++ tlb_table[base + 11] = 0x090A0C0A;
++ tlb_table[base + 12] = 0x0C0B0C0E;
++ tlb_table[base + 13] = 0x110E0F10;
++ tlb_table[base + 14] = 0x15172415;
++ tlb_table[base + 15] = 0x2C151313;
++ tlb_table[base + 16] = 0x241A211F;
++ tlb_table[base + 17] = 0x36372E34;
++ tlb_table[base + 18] = 0x31322E33;
++ tlb_table[base + 19] = 0x4653413A;
++ tlb_table[base + 20] = 0x3E4E3D3A;
++ tlb_table[base + 21] = 0x62483231;
++ tlb_table[base + 22] = 0x58564E49;
++ tlb_table[base + 23] = 0x385D5E5D;
++ tlb_table[base + 24] = 0x656D6645;
++ tlb_table[base + 25] = 0x5B536C5A;
++ tlb_table[base + 26] = 0xDBFF595D;
++ tlb_table[base + 27] = 0x16014300;
++ tlb_table[base + 28] = 0x1C201818;
++ tlb_table[base + 29] = 0x22223F20;
++ tlb_table[base + 30] = 0x4B58853F;
++ tlb_table[base + 31] = 0x85858558;
++ tlb_table[base + 32] = 0x85858585;
++ tlb_table[base + 33] = 0x85858585;
++ tlb_table[base + 34] = 0x85858585;
++ tlb_table[base + 35] = 0x85858585;
++ tlb_table[base + 36] = 0x85858585;
++ tlb_table[base + 37] = 0x85858585;
++ tlb_table[base + 38] = 0x85858585;
++ tlb_table[base + 39] = 0x85858585;
++ tlb_table[base + 40] = 0x85858585;
++ tlb_table[base + 41] = 0x85858585;
++ tlb_table[base + 42] = 0x85858585;
++ tlb_table[base + 43] = 0xFF858585;
++ }
++ //Table 3
++ if (i == 3) {
++ tlb_table[base + 10] = 0x070B0043;
++ tlb_table[base + 11] = 0x07080A08;
++ tlb_table[base + 12] = 0x0A090A0B;
++ tlb_table[base + 13] = 0x0D0B0C0C;
++ tlb_table[base + 14] = 0x11121C11;
++ tlb_table[base + 15] = 0x23110F0F;
++ tlb_table[base + 16] = 0x1C141A19;
++ tlb_table[base + 17] = 0x2B2B2429;
++ tlb_table[base + 18] = 0x27282428;
++ tlb_table[base + 19] = 0x3842332E;
++ tlb_table[base + 20] = 0x313E302E;
++ tlb_table[base + 21] = 0x4E392827;
++ tlb_table[base + 22] = 0x46443E3A;
++ tlb_table[base + 23] = 0x2C4A4A4A;
++ tlb_table[base + 24] = 0x50565137;
++ tlb_table[base + 25] = 0x48425647;
++ tlb_table[base + 26] = 0xDBFF474A;
++ tlb_table[base + 27] = 0x12014300;
++ tlb_table[base + 28] = 0x161A1313;
++ tlb_table[base + 29] = 0x1C1C331A;
++ tlb_table[base + 30] = 0x3D486C33;
++ tlb_table[base + 31] = 0x6C6C6C48;
++ tlb_table[base + 32] = 0x6C6C6C6C;
++ tlb_table[base + 33] = 0x6C6C6C6C;
++ tlb_table[base + 34] = 0x6C6C6C6C;
++ tlb_table[base + 35] = 0x6C6C6C6C;
++ tlb_table[base + 36] = 0x6C6C6C6C;
++ tlb_table[base + 37] = 0x6C6C6C6C;
++ tlb_table[base + 38] = 0x6C6C6C6C;
++ tlb_table[base + 39] = 0x6C6C6C6C;
++ tlb_table[base + 40] = 0x6C6C6C6C;
++ tlb_table[base + 41] = 0x6C6C6C6C;
++ tlb_table[base + 42] = 0x6C6C6C6C;
++ tlb_table[base + 43] = 0xFF6C6C6C;
++ }
++ //Table 4
++ if (i == 4) {
++ tlb_table[base + 10] = 0x06090043;
++ tlb_table[base + 11] = 0x05060706;
++ tlb_table[base + 12] = 0x07070709;
++ tlb_table[base + 13] = 0x0A09090A;
++ tlb_table[base + 14] = 0x0D0E160D;
++ tlb_table[base + 15] = 0x1B0D0C0C;
++ tlb_table[base + 16] = 0x16101413;
++ tlb_table[base + 17] = 0x21221C20;
++ tlb_table[base + 18] = 0x1E1F1C20;
++ tlb_table[base + 19] = 0x2B332824;
++ tlb_table[base + 20] = 0x26302624;
++ tlb_table[base + 21] = 0x3D2D1F1E;
++ tlb_table[base + 22] = 0x3735302D;
++ tlb_table[base + 23] = 0x22393A39;
++ tlb_table[base + 24] = 0x3F443F2B;
++ tlb_table[base + 25] = 0x38334338;
++ tlb_table[base + 26] = 0xDBFF3739;
++ tlb_table[base + 27] = 0x0D014300;
++ tlb_table[base + 28] = 0x11130E0E;
++ tlb_table[base + 29] = 0x15152613;
++ tlb_table[base + 30] = 0x2D355026;
++ tlb_table[base + 31] = 0x50505035;
++ tlb_table[base + 32] = 0x50505050;
++ tlb_table[base + 33] = 0x50505050;
++ tlb_table[base + 34] = 0x50505050;
++ tlb_table[base + 35] = 0x50505050;
++ tlb_table[base + 36] = 0x50505050;
++ tlb_table[base + 37] = 0x50505050;
++ tlb_table[base + 38] = 0x50505050;
++ tlb_table[base + 39] = 0x50505050;
++ tlb_table[base + 40] = 0x50505050;
++ tlb_table[base + 41] = 0x50505050;
++ tlb_table[base + 42] = 0x50505050;
++ tlb_table[base + 43] = 0xFF505050;
++ }
++ //Table 5
++ if (i == 5) {
++ tlb_table[base + 10] = 0x04060043;
++ tlb_table[base + 11] = 0x03040504;
++ tlb_table[base + 12] = 0x05040506;
++ tlb_table[base + 13] = 0x07060606;
++ tlb_table[base + 14] = 0x09090F09;
++ tlb_table[base + 15] = 0x12090808;
++ tlb_table[base + 16] = 0x0F0A0D0D;
++ tlb_table[base + 17] = 0x16161315;
++ tlb_table[base + 18] = 0x14151315;
++ tlb_table[base + 19] = 0x1D221B18;
++ tlb_table[base + 20] = 0x19201918;
++ tlb_table[base + 21] = 0x281E1514;
++ tlb_table[base + 22] = 0x2423201E;
++ tlb_table[base + 23] = 0x17262726;
++ tlb_table[base + 24] = 0x2A2D2A1C;
++ tlb_table[base + 25] = 0x25222D25;
++ tlb_table[base + 26] = 0xDBFF2526;
++ tlb_table[base + 27] = 0x09014300;
++ tlb_table[base + 28] = 0x0B0D0A0A;
++ tlb_table[base + 29] = 0x0E0E1A0D;
++ tlb_table[base + 30] = 0x1F25371A;
++ tlb_table[base + 31] = 0x37373725;
++ tlb_table[base + 32] = 0x37373737;
++ tlb_table[base + 33] = 0x37373737;
++ tlb_table[base + 34] = 0x37373737;
++ tlb_table[base + 35] = 0x37373737;
++ tlb_table[base + 36] = 0x37373737;
++ tlb_table[base + 37] = 0x37373737;
++ tlb_table[base + 38] = 0x37373737;
++ tlb_table[base + 39] = 0x37373737;
++ tlb_table[base + 40] = 0x37373737;
++ tlb_table[base + 41] = 0x37373737;
++ tlb_table[base + 42] = 0x37373737;
++ tlb_table[base + 43] = 0xFF373737;
++ }
++ //Table 6
++ if (i == 6) {
++ tlb_table[base + 10] = 0x02030043;
++ tlb_table[base + 11] = 0x01020202;
++ tlb_table[base + 12] = 0x02020203;
++ tlb_table[base + 13] = 0x03030303;
++ tlb_table[base + 14] = 0x04040704;
++ tlb_table[base + 15] = 0x09040404;
++ tlb_table[base + 16] = 0x07050606;
++ tlb_table[base + 17] = 0x0B0B090A;
++ tlb_table[base + 18] = 0x0A0A090A;
++ tlb_table[base + 19] = 0x0E110D0C;
++ tlb_table[base + 20] = 0x0C100C0C;
++ tlb_table[base + 21] = 0x140F0A0A;
++ tlb_table[base + 22] = 0x1211100F;
++ tlb_table[base + 23] = 0x0B131313;
++ tlb_table[base + 24] = 0x1516150E;
++ tlb_table[base + 25] = 0x12111612;
++ tlb_table[base + 26] = 0xDBFF1213;
++ tlb_table[base + 27] = 0x04014300;
++ tlb_table[base + 28] = 0x05060505;
++ tlb_table[base + 29] = 0x07070D06;
++ tlb_table[base + 30] = 0x0F121B0D;
++ tlb_table[base + 31] = 0x1B1B1B12;
++ tlb_table[base + 32] = 0x1B1B1B1B;
++ tlb_table[base + 33] = 0x1B1B1B1B;
++ tlb_table[base + 34] = 0x1B1B1B1B;
++ tlb_table[base + 35] = 0x1B1B1B1B;
++ tlb_table[base + 36] = 0x1B1B1B1B;
++ tlb_table[base + 37] = 0x1B1B1B1B;
++ tlb_table[base + 38] = 0x1B1B1B1B;
++ tlb_table[base + 39] = 0x1B1B1B1B;
++ tlb_table[base + 40] = 0x1B1B1B1B;
++ tlb_table[base + 41] = 0x1B1B1B1B;
++ tlb_table[base + 42] = 0x1B1B1B1B;
++ tlb_table[base + 43] = 0xFF1B1B1B;
++ }
++ //Table 7
++ if (i == 7) {
++ tlb_table[base + 10] = 0x01020043;
++ tlb_table[base + 11] = 0x01010101;
++ tlb_table[base + 12] = 0x01010102;
++ tlb_table[base + 13] = 0x02020202;
++ tlb_table[base + 14] = 0x03030503;
++ tlb_table[base + 15] = 0x06030202;
++ tlb_table[base + 16] = 0x05030404;
++ tlb_table[base + 17] = 0x07070607;
++ tlb_table[base + 18] = 0x06070607;
++ tlb_table[base + 19] = 0x090B0908;
++ tlb_table[base + 20] = 0x080A0808;
++ tlb_table[base + 21] = 0x0D0A0706;
++ tlb_table[base + 22] = 0x0C0B0A0A;
++ tlb_table[base + 23] = 0x070C0D0C;
++ tlb_table[base + 24] = 0x0E0F0E09;
++ tlb_table[base + 25] = 0x0C0B0F0C;
++ tlb_table[base + 26] = 0xDBFF0C0C;
++ tlb_table[base + 27] = 0x03014300;
++ tlb_table[base + 28] = 0x03040303;
++ tlb_table[base + 29] = 0x04040804;
++ tlb_table[base + 30] = 0x0A0C1208;
++ tlb_table[base + 31] = 0x1212120C;
++ tlb_table[base + 32] = 0x12121212;
++ tlb_table[base + 33] = 0x12121212;
++ tlb_table[base + 34] = 0x12121212;
++ tlb_table[base + 35] = 0x12121212;
++ tlb_table[base + 36] = 0x12121212;
++ tlb_table[base + 37] = 0x12121212;
++ tlb_table[base + 38] = 0x12121212;
++ tlb_table[base + 39] = 0x12121212;
++ tlb_table[base + 40] = 0x12121212;
++ tlb_table[base + 41] = 0x12121212;
++ tlb_table[base + 42] = 0x12121212;
++ tlb_table[base + 43] = 0xFF121212;
++ }
++ //Table 8
++ if (i == 8) {
++ tlb_table[base + 10] = 0x01020043;
++ tlb_table[base + 11] = 0x01010101;
++ tlb_table[base + 12] = 0x01010102;
++ tlb_table[base + 13] = 0x02020202;
++ tlb_table[base + 14] = 0x03030503;
++ tlb_table[base + 15] = 0x06030202;
++ tlb_table[base + 16] = 0x05030404;
++ tlb_table[base + 17] = 0x07070607;
++ tlb_table[base + 18] = 0x06070607;
++ tlb_table[base + 19] = 0x090B0908;
++ tlb_table[base + 20] = 0x080A0808;
++ tlb_table[base + 21] = 0x0D0A0706;
++ tlb_table[base + 22] = 0x0C0B0A0A;
++ tlb_table[base + 23] = 0x070C0D0C;
++ tlb_table[base + 24] = 0x0E0F0E09;
++ tlb_table[base + 25] = 0x0C0B0F0C;
++ tlb_table[base + 26] = 0xDBFF0C0C;
++ tlb_table[base + 27] = 0x02014300;
++ tlb_table[base + 28] = 0x03030202;
++ tlb_table[base + 29] = 0x04040703;
++ tlb_table[base + 30] = 0x080A0F07;
++ tlb_table[base + 31] = 0x0F0F0F0A;
++ tlb_table[base + 32] = 0x0F0F0F0F;
++ tlb_table[base + 33] = 0x0F0F0F0F;
++ tlb_table[base + 34] = 0x0F0F0F0F;
++ tlb_table[base + 35] = 0x0F0F0F0F;
++ tlb_table[base + 36] = 0x0F0F0F0F;
++ tlb_table[base + 37] = 0x0F0F0F0F;
++ tlb_table[base + 38] = 0x0F0F0F0F;
++ tlb_table[base + 39] = 0x0F0F0F0F;
++ tlb_table[base + 40] = 0x0F0F0F0F;
++ tlb_table[base + 41] = 0x0F0F0F0F;
++ tlb_table[base + 42] = 0x0F0F0F0F;
++ tlb_table[base + 43] = 0xFF0F0F0F;
++ }
++ //Table 9
++ if (i == 9) {
++ tlb_table[base + 10] = 0x01010043;
++ tlb_table[base + 11] = 0x01010101;
++ tlb_table[base + 12] = 0x01010101;
++ tlb_table[base + 13] = 0x01010101;
++ tlb_table[base + 14] = 0x02020302;
++ tlb_table[base + 15] = 0x04020202;
++ tlb_table[base + 16] = 0x03020303;
++ tlb_table[base + 17] = 0x05050405;
++ tlb_table[base + 18] = 0x05050405;
++ tlb_table[base + 19] = 0x07080606;
++ tlb_table[base + 20] = 0x06080606;
++ tlb_table[base + 21] = 0x0A070505;
++ tlb_table[base + 22] = 0x09080807;
++ tlb_table[base + 23] = 0x05090909;
++ tlb_table[base + 24] = 0x0A0B0A07;
++ tlb_table[base + 25] = 0x09080B09;
++ tlb_table[base + 26] = 0xDBFF0909;
++ tlb_table[base + 27] = 0x02014300;
++ tlb_table[base + 28] = 0x02030202;
++ tlb_table[base + 29] = 0x03030503;
++ tlb_table[base + 30] = 0x07080C05;
++ tlb_table[base + 31] = 0x0C0C0C08;
++ tlb_table[base + 32] = 0x0C0C0C0C;
++ tlb_table[base + 33] = 0x0C0C0C0C;
++ tlb_table[base + 34] = 0x0C0C0C0C;
++ tlb_table[base + 35] = 0x0C0C0C0C;
++ tlb_table[base + 36] = 0x0C0C0C0C;
++ tlb_table[base + 37] = 0x0C0C0C0C;
++ tlb_table[base + 38] = 0x0C0C0C0C;
++ tlb_table[base + 39] = 0x0C0C0C0C;
++ tlb_table[base + 40] = 0x0C0C0C0C;
++ tlb_table[base + 41] = 0x0C0C0C0C;
++ tlb_table[base + 42] = 0x0C0C0C0C;
++ tlb_table[base + 43] = 0xFF0C0C0C;
++ }
++ //Table 10
++ if (i == 10) {
++ tlb_table[base + 10] = 0x01010043;
++ tlb_table[base + 11] = 0x01010101;
++ tlb_table[base + 12] = 0x01010101;
++ tlb_table[base + 13] = 0x01010101;
++ tlb_table[base + 14] = 0x01010201;
++ tlb_table[base + 15] = 0x03010101;
++ tlb_table[base + 16] = 0x02010202;
++ tlb_table[base + 17] = 0x03030303;
++ tlb_table[base + 18] = 0x03030303;
++ tlb_table[base + 19] = 0x04050404;
++ tlb_table[base + 20] = 0x04050404;
++ tlb_table[base + 21] = 0x06050303;
++ tlb_table[base + 22] = 0x06050505;
++ tlb_table[base + 23] = 0x03060606;
++ tlb_table[base + 24] = 0x07070704;
++ tlb_table[base + 25] = 0x06050706;
++ tlb_table[base + 26] = 0xDBFF0606;
++ tlb_table[base + 27] = 0x01014300;
++ tlb_table[base + 28] = 0x01020101;
++ tlb_table[base + 29] = 0x02020402;
++ tlb_table[base + 30] = 0x05060904;
++ tlb_table[base + 31] = 0x09090906;
++ tlb_table[base + 32] = 0x09090909;
++ tlb_table[base + 33] = 0x09090909;
++ tlb_table[base + 34] = 0x09090909;
++ tlb_table[base + 35] = 0x09090909;
++ tlb_table[base + 36] = 0x09090909;
++ tlb_table[base + 37] = 0x09090909;
++ tlb_table[base + 38] = 0x09090909;
++ tlb_table[base + 39] = 0x09090909;
++ tlb_table[base + 40] = 0x09090909;
++ tlb_table[base + 41] = 0x09090909;
++ tlb_table[base + 42] = 0x09090909;
++ tlb_table[base + 43] = 0xFF090909;
++ }
++ //Table 11
++ if (i == 11) {
++ tlb_table[base + 10] = 0x01010043;
++ tlb_table[base + 11] = 0x01010101;
++ tlb_table[base + 12] = 0x01010101;
++ tlb_table[base + 13] = 0x01010101;
++ tlb_table[base + 14] = 0x01010101;
++ tlb_table[base + 15] = 0x01010101;
++ tlb_table[base + 16] = 0x01010101;
++ tlb_table[base + 17] = 0x01010101;
++ tlb_table[base + 18] = 0x01010101;
++ tlb_table[base + 19] = 0x02020202;
++ tlb_table[base + 20] = 0x02020202;
++ tlb_table[base + 21] = 0x03020101;
++ tlb_table[base + 22] = 0x03020202;
++ tlb_table[base + 23] = 0x01030303;
++ tlb_table[base + 24] = 0x03030302;
++ tlb_table[base + 25] = 0x03020303;
++ tlb_table[base + 26] = 0xDBFF0403;
++ tlb_table[base + 27] = 0x01014300;
++ tlb_table[base + 28] = 0x01010101;
++ tlb_table[base + 29] = 0x01010201;
++ tlb_table[base + 30] = 0x03040602;
++ tlb_table[base + 31] = 0x06060604;
++ tlb_table[base + 32] = 0x06060606;
++ tlb_table[base + 33] = 0x06060606;
++ tlb_table[base + 34] = 0x06060606;
++ tlb_table[base + 35] = 0x06060606;
++ tlb_table[base + 36] = 0x06060606;
++ tlb_table[base + 37] = 0x06060606;
++ tlb_table[base + 38] = 0x06060606;
++ tlb_table[base + 39] = 0x06060606;
++ tlb_table[base + 40] = 0x06060606;
++ tlb_table[base + 41] = 0x06060606;
++ tlb_table[base + 42] = 0x06060606;
++ tlb_table[base + 43] = 0xFF060606;
++ }
++ }
++}
++
++static inline void
++video_write(struct AstRVAS *pAstRVAS, u32 val, u32 reg)
++{
++ VIDEO_ENG_DBG("write offset: %x, val: %x\n", reg, val);
++ //Video is lock after reset, need always unlock
++ //unlock
++ writel(VIDEO_PROTECT_UNLOCK, pAstRVAS->video_reg_base);
++ writel(val, pAstRVAS->video_reg_base + reg);
++}
++
++static inline u32
++video_read(struct AstRVAS *pAstRVAS, u32 reg)
++{
++ u32 val = readl(pAstRVAS->video_reg_base + reg);
++
++ VIDEO_ENG_DBG("read offset: %x, val: %x\n", reg, val);
++ return val;
++}
+diff --git a/drivers/soc/aspeed/rvas/video_engine.h b/drivers/soc/aspeed/rvas/video_engine.h
+new file mode 100644
+index 000000000..a1042b8ba
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/video_engine.h
+@@ -0,0 +1,270 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * File Name : video_engines.h
++ * Description : AST2600 video engines
++ *
++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#ifndef __VIDEO_ENGINE_H__
++#define __VIDEO_ENGINE_H__
++
++#include "video_ioctl.h"
++#include "hardware_engines.h"
++
++#define VIDEO_STREAM_BUFFER_SIZE (0x400000) //4M
++#define VIDEO_CAPTURE_BUFFER_SIZE (0xA00000) //10M
++#define VIDEO_JPEG_TABLE_SIZE (0x100000) //1M
++
++#define SCU_VIDEO_ENGINE_BIT BIT(6)
++#define SCU_VIDEO_CAPTURE_STOP_CLOCK_BIT BIT(3)
++#define SCU_VIDEO_ENGINE_STOP_CLOCK_BIT BIT(1)
++/***********************************************************************/
++/* Register for VIDEO */
++#define AST_VIDEO_PROTECT 0x000 /* protection key register */
++#define AST_VIDEO_SEQ_CTRL 0x004 /* Video Sequence Control register */
++#define AST_VIDEO_PASS_CTRL 0x008 /* Video Pass 1 Control register */
++
++//VR008[5]=1
++#define AST_VIDEO_DIRECT_BASE 0x00C /* Video Direct Frame buffer mode control Register VR008[5]=1 */
++#define AST_VIDEO_DIRECT_CTRL 0x010 /* Video Direct Frame buffer mode control Register VR008[5]=1 */
++
++//VR008[5]=0
++#define AST_VIDEO_TIMING_H 0x00C /* Video Timing Generation Setting Register */
++#define AST_VIDEO_TIMING_V 0x010 /* Video Timing Generation Setting Register */
++#define AST_VIDEO_SCAL_FACTOR 0x014 /* Video Scaling Factor Register */
++
++#define AST_VIDEO_SCALING0 0x018 /* Video Scaling Filter Parameter Register #0 */
++#define AST_VIDEO_SCALING1 0x01C /* Video Scaling Filter Parameter Register #1 */
++#define AST_VIDEO_SCALING2 0x020 /* Video Scaling Filter Parameter Register #2 */
++#define AST_VIDEO_SCALING3 0x024 /* Video Scaling Filter Parameter Register #3 */
++
++#define AST_VIDEO_BCD_CTRL 0x02C /* Video BCD Control Register */
++#define AST_VIDEO_CAPTURE_WIN 0x030 /* Video Capturing Window Setting Register */
++#define AST_VIDEO_COMPRESS_WIN 0x034 /* Video Compression Window Setting Register */
++
++#define AST_VIDEO_COMPRESS_PRO 0x038 /* Video Compression Stream Buffer Processing Offset Register */
++#define AST_VIDEO_COMPRESS_READ 0x03C /* Video Compression Stream Buffer Read Offset Register */
++
++#define AST_VIDEO_JPEG_HEADER_BUFF 0x040 /* Video Based Address of JPEG Header Buffer Register */
++#define AST_VIDEO_SOURCE_BUFF0 0x044 /* Video Based Address of Video Source Buffer #1 Register */
++#define AST_VIDEO_SOURCE_SCAN_LINE 0x048 /* Video Scan Line Offset of Video Source Buffer Register */
++#define AST_VIDEO_SOURCE_BUFF1 0x04C /* Video Based Address of Video Source Buffer #2 Register */
++#define AST_VIDEO_BCD_BUFF 0x050 /* Video Base Address of BCD Flag Buffer Register */
++#define AST_VIDEO_STREAM_BUFF 0x054 /* Video Base Address of Compressed Video Stream Buffer Register */
++#define AST_VIDEO_STREAM_SIZE 0x058 /* Video Stream Buffer Size Register */
++
++#define AST_VIDEO_COMPRESS_CTRL 0x060 /* Video Compression Control Register */
++
++#define AST_VIDEO_COMPRESS_DATA_COUNT 0x070 /* Video Total Size of Compressed Video Stream Read Back Register */
++#define AST_VIDEO_COMPRESS_BLOCK_COUNT 0x074 /* Video Total Number of Compressed Video Block Read Back Register */
++#define AST_VIDEO_COMPRESS_FRAME_END 0x078 /* Video Frame-end offset of compressed video stream buffer read back Register */
++#define AST_VIDEO_COMPRESS_FRAME_COUNT_RB 0x7C
++#define AST_VIDEO_JPEG_SIZE 0x084
++
++#define AST_VIDEO_CTRL 0x300 /* Video Control Register */
++#define AST_VIDEO_INT_EN 0x304 /* Video interrupt Enable */
++#define AST_VIDEO_INT_STS 0x308 /* Video interrupt status */
++#define AST_VIDEO_MODE_DETECT 0x30C /* Video Mode Detection Parameter Register */
++
++#define AST_VIDEO_CRC1 0x320 /* Primary CRC Parameter Register */
++#define AST_VIDEO_CRC2 0x324 /* Second CRC Parameter Register */
++#define AST_VIDEO_DATA_TRUNCA 0x328 /* Video Data Truncation Register */
++
++#define AST_VIDEO_E_SCRATCH_34C 0x34C /* Video Scratch Remap Read Back */
++#define AST_VIDEO_E_SCRATCH_350 0x350 /* Video Scratch Remap Read Back */
++#define AST_VIDEO_E_SCRATCH_354 0x354 /* Video Scratch Remap Read Back */
++
++//multi jpeg
++#define AST_VIDEO_ENCRYPT_SRAM 0x400 /* Video RC4/AES128 Encryption Key Register #0 ~ #63 */
++#define AST_VIDEO_MULTI_JPEG_SRAM (AST_VIDEO_ENCRYPT_SRAM) /* Multi JPEG registers */
++
++#define REG_32_BIT_SZ_IN_BYTES (sizeof(u32))
++
++#define SET_FRAME_W_H(w, h) ((((u32)(h)) & 0x1fff) | ((((u32)(w)) & 0x1fff) << 13))
++#define SET_FRAME_START_ADDR(addr) ((addr) & 0x7fffff80)
++
++/////////////////////////////////////////////////////////////////////////////
++
++/* AST_VIDEO_PROTECT: 0x000 - protection key register */
++#define VIDEO_PROTECT_UNLOCK 0x1A038AA8
++
++/* AST_VIDEO_SEQ_CTRL 0x004 Video Sequence Control register */
++#define VIDEO_HALT_ENG_STS BIT(21)
++#define VIDEO_COMPRESS_BUSY BIT(18)
++#define VIDEO_CAPTURE_BUSY BIT(16)
++#define VIDEO_HALT_ENG_TRIGGER BIT(12)
++#define VIDEO_COMPRESS_FORMAT_MASK BIT(10)
++#define VIDEO_GET_COMPRESS_FORMAT(x) (((x) >> 10) & 0x3) // 0 YUV444
++#define VIDEO_COMPRESS_FORMAT(x) ((x) << 10) // 0 YUV444
++#define YUV420 1
++
++#define G5_VIDEO_COMPRESS_JPEG_MODE BIT(13)
++#define VIDEO_YUV2RGB_DITHER_EN BIT(8)
++
++#define VIDEO_COMPRESS_JPEG_MODE BIT(8)
++
++//if bit 0 : 1
++#define VIDEO_INPUT_MODE_CHG_WDT BIT(7)
++#define VIDEO_INSERT_FULL_COMPRESS BIT(6)
++#define VIDEO_AUTO_COMPRESS BIT(5)
++#define VIDEO_COMPRESS_TRIGGER BIT(4)
++#define VIDEO_CAPTURE_MULTI_FRAME BIT(3)
++#define VIDEO_COMPRESS_FORCE_IDLE BIT(2)
++#define VIDEO_CAPTURE_TRIGGER BIT(1)
++#define VIDEO_DETECT_TRIGGER BIT(0)
++
++#define VIDEO_HALT_ENG_RB BIT(21)
++
++#define VIDEO_ABCD_CHG_EN BIT(1)
++#define VIDEO_BCD_CHG_EN (1)
++
++/* AST_VIDEO_PASS_CTRL 0x008 Video Pass1 Control register */
++#define G6_VIDEO_MULTI_JPEG_FLAG_MODE BIT(31)
++#define G6_VIDEO_MULTI_JPEG_MODE BIT(30)
++#define G6_VIDEO_JPEG__COUNT(x) ((x) << 24)
++#define G6_VIDEO_FRAME_CT_MASK (0x3f << 24)
++//x * source frame rate / 60
++#define VIDEO_FRAME_RATE_CTRL(x) ((x) << 16)
++#define VIDEO_HSYNC_POLARITY_CTRL BIT(15)
++#define VIDEO_INTERLANCE_MODE BIT(14)
++#define VIDEO_DUAL_EDGE_MODE BIT(13) //0 : Single edage
++#define VIDEO_18BIT_SINGLE_EDGE BIT(12) //0: 24bits
++#define VIDEO_DVO_INPUT_DELAY_MASK (7 << 9)
++#define VIDEO_DVO_INPUT_DELAY(x) ((x) << 9) //0 : no delay , 1: 1ns, 2: 2ns, 3:3ns, 4: inversed clock but no delay
++// if bit 5 : 0
++#define VIDEO_HW_CURSOR_DIS BIT(8)
++// if bit 5 : 1
++#define VIDEO_AUTO_FETCH BIT(8) //
++#define VIDEO_CAPTURE_FORMATE_MASK (3 << 6)
++
++#define VIDEO_SET_CAPTURE_FORMAT(x) ((x) << 6)
++#define JPEG_MODE 1
++#define RGB_MODE 2
++#define GRAY_MODE 3
++#define VIDEO_DIRECT_FETCH BIT(5)
++// if bit 5 : 0
++#define VIDEO_INTERNAL_DE BIT(4)
++#define VIDEO_EXT_ADC_ATTRIBUTE BIT(3)
++
++/* AST_VIDEO_DIRECT_CTRL 0x010 Video Direct Frame buffer mode control Register VR008[5]=1 */
++#define VIDEO_FETCH_TIMING(x) ((x) << 16)
++#define VIDEO_FETCH_LINE_OFFSET(x) ((x) & 0xffff)
++
++/* AST_VIDEO_CAPTURE_WIN 0x030 Video Capturing Window Setting Register */
++#define VIDEO_CAPTURE_V(x) ((x) & 0x7ff)
++#define VIDEO_CAPTURE_H(x) (((x) & 0x7ff) << 16)
++
++/* AST_VIDEO_COMPRESS_WIN 0x034 Video Compression Window Setting Register */
++#define VIDEO_COMPRESS_V(x) ((x) & 0x7ff)
++#define VIDEO_GET_COMPRESS_V(x) ((x) & 0x7ff)
++#define VIDEO_COMPRESS_H(x) (((x) & 0x7ff) << 16)
++#define VIDEO_GET_COMPRESS_H(x) (((x) >> 16) & 0x7ff)
++
++/* AST_VIDEO_STREAM_SIZE 0x058 Video Stream Buffer Size Register */
++#define VIDEO_STREAM_PKT_N(x) ((x) << 3)
++#define STREAM_4_PKTS 0
++#define STREAM_8_PKTS 1
++#define STREAM_16_PKTS 2
++#define STREAM_32_PKTS 3
++#define STREAM_64_PKTS 4
++#define STREAM_128_PKTS 5
++
++#define VIDEO_STREAM_PKT_SIZE(x) (x)
++#define STREAM_1KB 0
++#define STREAM_2KB 1
++#define STREAM_4KB 2
++#define STREAM_8KB 3
++#define STREAM_16KB 4
++#define STREAM_32KB 5
++#define STREAM_64KB 6
++#define STREAM_128KB 7
++
++/* AST_VIDEO_COMPRESS_CTRL 0x060 Video Compression Control Register */
++#define VIDEO_DCT_CQT_SELECTION (0xf << 6) // bit 6-9, bit 10 for which quantization is referred
++#define VIDEO_DCT_HQ_CQT_SELECTION (0xf << 27) // bit 27-30, bit 31 for which quantization is referred
++
++#define VIDEO_HQ_DCT_LUM(x) ((x) << 27)
++#define VIDEO_GET_HQ_DCT_LUM(x) (((x) >> 27) & 0x1f)
++#define VIDEO_HQ_DCT_CHROM(x) ((x) << 22)
++#define VIDEO_GET_HQ_DCT_CHROM(x) (((x) >> 22) & 0x1f)
++#define VIDEO_HQ_DCT_MASK (0x3ff << 22)
++#define VIDEO_DCT_HUFFMAN_ENCODE(x) ((x) << 20)
++#define VIDEO_DCT_RESET BIT(17)
++#define VIDEO_HQ_ENABLE BIT(16)
++#define VIDEO_GET_HQ_ENABLE(x) (((x) >> 16) & 0x1)
++#define VIDEO_DCT_LUM(x) ((x) << 11)
++#define VIDEO_GET_DCT_LUM(x) (((x) >> 11) & 0x1f)
++#define VIDEO_DCT_CHROM(x) ((x) << 6)
++#define VIDEO_GET_DCT_CHROM(x) (((x) >> 6) & 0x1f)
++#define VIDEO_DCT_MASK (0x3ff << 6)
++#define VIDEO_ENCRYP_ENABLE BIT(5)
++#define VIDEO_COMPRESS_QUANTIZ_MODE BIT(2)
++#define VIDEO_4COLOR_VQ_ENCODE BIT(1)
++#define VIDEO_DCT_ONLY_ENCODE (1)
++#define VIDEO_DCT_VQ_MASK (0x3)
++
++#define VIDEO_CTRL_RC4_TEST_MODE BIT(9)
++#define VIDEO_CTRL_RC4_RST BIT(8)
++
++#define VIDEO_CTRL_ADDRESS_MAP_MULTI_JPEG (0x3 << 30)
++
++#define VIDEO_CTRL_DWN_SCALING_MASK (0x3 << 4)
++#define VIDEO_CTRL_DWN_SCALING_ENABLE_LINE_BUFFER BIT(4)
++
++/* AST_VIDEO_INT_EN 0x304 Video interrupt Enable */
++/* AST_VIDEO_INT_STS 0x308 Video interrupt status */
++#define VM_COMPRESS_COMPLETE BIT(17)
++#define VM_CAPTURE_COMPLETE BIT(16)
++
++#define VIDEO_FRAME_COMPLETE BIT(5)
++#define VIDEO_MODE_DETECT_RDY BIT(4)
++#define VIDEO_COMPRESS_COMPLETE BIT(3)
++#define VIDEO_COMPRESS_PKT_COMPLETE BIT(2)
++#define VIDEO_CAPTURE_COMPLETE BIT(1)
++#define VIDEO_MODE_DETECT_WDT BIT(0)
++
++/***********************************************************************/
++struct ast_capture_mode {
++ u8 engine_idx; //set 0: engine 0, engine 1
++ u8 differential; //set 0: full, 1:diff frame
++ u8 mode_change; //get 0: no, 1:change
++};
++
++struct ast_compression_mode {
++ u8 engine_idx; //set 0: engine 0, engine 1
++ u8 mode_change; //get 0: no, 1:change
++ u32 total_size; //get
++ u32 block_count; //get
++};
++
++/***********************************************************************/
++struct INTERNAL_MODE {
++ u16 HorizontalActive;
++ u16 VerticalActive;
++ u16 RefreshRateIndex;
++ u32 PixelClock;
++};
++
++// ioctl functions
++void ioctl_get_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS);
++void ioctl_set_video_engine_config(struct VideoConfig *pVideoConfig, struct AstRVAS *pAstRVAS);
++void ioctl_get_video_engine_data(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, phys_addr_t dwPhyStreamAddress);
++void ioctl_get_video_engine_data_2700(struct MultiJpegConfig *pArrayMJConfig, struct AstRVAS *pAstRVAS, dma_addr_t dwPhyStreamAddress);
++
++//local functions
++irqreturn_t ast_video_isr(int this_irq, void *dev_id);
++int video_engine_reserveMem(struct AstRVAS *pAstRVAS);
++void enable_video_interrupt(struct AstRVAS *pAstRVAS);
++void disable_video_interrupt(struct AstRVAS *pAstRVAS);
++void video_set_Window(struct AstRVAS *pAstRVAS);
++int free_video_engine_memory(struct AstRVAS *pAstRVAS);
++void video_ctrl_init(struct AstRVAS *pAstRVAS);
++void video_engine_rc4Reset(struct AstRVAS *pAstRVAS);
++void set_direct_mode(struct AstRVAS *pAstRVAS);
++
++#endif // __VIDEO_ENGINE_H__
+diff --git a/drivers/soc/aspeed/rvas/video_ioctl.h b/drivers/soc/aspeed/rvas/video_ioctl.h
+new file mode 100644
+index 000000000..36443294f
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/video_ioctl.h
+@@ -0,0 +1,275 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * This file is part of the ASPEED Linux Device Driver for ASPEED Baseboard Management Controller.
++ * Refer to the README file included with this package for driver version and adapter compatibility.
++ *
++ * Copyright (C) 2019-2021 ASPEED Technology Inc. All rights reserved.
++ *
++ */
++
++#ifndef _VIDEO_IOCTL_H
++#define _VIDEO_IOCTL_H
++
++#include <linux/types.h>
++
++#define RVAS_MAGIC ('b')
++#define CMD_IOCTL_TURN_LOCAL_MONITOR_ON _IOR(RVAS_MAGIC, IOCTL_TURN_LOCAL_MONITOR_ON, struct RvasIoctl)
++#define CMD_IOCTL_TURN_LOCAL_MONITOR_OFF _IOR(RVAS_MAGIC, IOCTL_TURN_LOCAL_MONITOR_OFF, struct RvasIoctl)
++#define CMD_IOCTL_IS_LOCAL_MONITOR_ENABLED _IOR(RVAS_MAGIC, IOCTL_IS_LOCAL_MONITOR_ENABLED, struct RvasIoctl)
++#define CMD_IOCTL_GET_VIDEO_GEOMETRY _IOWR(RVAS_MAGIC, IOCTL_GET_VIDEO_GEOMETRY, struct RvasIoctl)
++#define CMD_IOCTL_WAIT_FOR_VIDEO_EVENT _IOWR(RVAS_MAGIC, IOCTL_WAIT_FOR_VIDEO_EVENT, struct RvasIoctl)
++#define CMD_IOCTL_GET_GRC_REGIESTERS _IOWR(RVAS_MAGIC, IOCTL_GET_GRC_REGIESTERS, struct RvasIoctl)
++#define CMD_IOCTL_READ_SNOOP_MAP _IOWR(RVAS_MAGIC, IOCTL_READ_SNOOP_MAP, struct RvasIoctl)
++#define CMD_IOCTL_READ_SNOOP_AGGREGATE _IOWR(RVAS_MAGIC, IOCTL_READ_SNOOP_AGGREGATE, struct RvasIoctl)
++#define CMD_IOCTL_FETCH_VIDEO_TILES _IOWR(RVAS_MAGIC, IOCTL_FETCH_VIDEO_TILES, struct RvasIoctl)
++#define CMD_IOCTL_FETCH_VIDEO_SLICES _IOWR(RVAS_MAGIC, IOCTL_FETCH_VIDEO_SLICES, struct RvasIoctl)
++#define CMD_IOCTL_RUN_LENGTH_ENCODE_DATA _IOWR(RVAS_MAGIC, IOCTL_RUN_LENGTH_ENCODE_DATA, struct RvasIoctl)
++#define CMD_IOCTL_FETCH_TEXT_DATA _IOWR(RVAS_MAGIC, IOCTL_FETCH_TEXT_DATA, struct RvasIoctl)
++#define CMD_IOCTL_FETCH_MODE13_DATA _IOWR(RVAS_MAGIC, IOCTL_FETCH_MODE13_DATA, struct RvasIoctl)
++#define CMD_IOCTL_NEW_CONTEXT _IOWR(RVAS_MAGIC, IOCTL_NEW_CONTEXT, struct RvasIoctl)
++#define CMD_IOCTL_DEL_CONTEXT _IOWR(RVAS_MAGIC, IOCTL_DEL_CONTEXT, struct RvasIoctl)
++#define CMD_IOCTL_ALLOC _IOWR(RVAS_MAGIC, IOCTL_ALLOC, struct RvasIoctl)
++#define CMD_IOCTL_FREE _IOWR(RVAS_MAGIC, IOCTL_FREE, struct RvasIoctl)
++#define CMD_IOCTL_SET_TSE_COUNTER _IOWR(RVAS_MAGIC, IOCTL_SET_TSE_COUNTER, struct RvasIoctl)
++#define CMD_IOCTL_GET_TSE_COUNTER _IOWR(RVAS_MAGIC, IOCTL_GET_TSE_COUNTER, struct RvasIoctl)
++#define CMD_IOCTL_VIDEO_ENGINE_RESET _IOWR(RVAS_MAGIC, IOCTL_VIDEO_ENGINE_RESET, struct RvasIoctl)
++//jpeg
++#define CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG _IOW(RVAS_MAGIC, IOCTL_SET_VIDEO_ENGINE_CONFIG, struct VideoConfig*)
++#define CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG _IOW(RVAS_MAGIC, IOCTL_GET_VIDEO_ENGINE_CONFIG, struct VideoConfig*)
++#define CMD_IOCTL_GET_VIDEO_ENGINE_DATA _IOWR(RVAS_MAGIC, IOCTL_GET_VIDEO_ENGINE_DATA, struct MultiJpegConfig*)
++
++enum HARD_WARE_ENGINE_IOCTL {
++ IOCTL_TURN_LOCAL_MONITOR_ON = 20, //REMOTE VIDEO GENERAL IOCTL
++ IOCTL_TURN_LOCAL_MONITOR_OFF,
++ IOCTL_IS_LOCAL_MONITOR_ENABLED,
++
++ IOCTL_GET_VIDEO_GEOMETRY = 40, // REMOTE VIDEO
++ IOCTL_WAIT_FOR_VIDEO_EVENT,
++ IOCTL_GET_GRC_REGIESTERS,
++ IOCTL_READ_SNOOP_MAP,
++ IOCTL_READ_SNOOP_AGGREGATE,
++ IOCTL_FETCH_VIDEO_TILES,
++ IOCTL_FETCH_VIDEO_SLICES,
++ IOCTL_RUN_LENGTH_ENCODE_DATA,
++ IOCTL_FETCH_TEXT_DATA,
++ IOCTL_FETCH_MODE13_DATA,
++ IOCTL_NEW_CONTEXT,
++ IOCTL_DEL_CONTEXT,
++ IOCTL_ALLOC,
++ IOCTL_FREE,
++ IOCTL_SET_TSE_COUNTER,
++ IOCTL_GET_TSE_COUNTER,
++ IOCTL_VIDEO_ENGINE_RESET,
++ IOCTL_SET_VIDEO_ENGINE_CONFIG,
++ IOCTL_GET_VIDEO_ENGINE_CONFIG,
++ IOCTL_GET_VIDEO_ENGINE_DATA,
++};
++
++enum GraphicsModeType {
++ InvalidMode = 0, TextMode = 1, VGAGraphicsMode = 2, AGAGraphicsMode = 3
++};
++
++enum RVASStatus {
++ SuccessStatus = 0,
++ GenericError = 1,
++ MemoryAllocError = 2,
++ InvalidMemoryHandle = 3,
++ CannotMapMemory = 4,
++ CannotUnMapMemory = 5,
++ TimedOut = 6,
++ InvalidContextHandle = 7,
++ CaptureTimedOut = 8,
++ CompressionTimedOut = 9,
++ HostSuspended
++};
++
++enum SelectedByteMode {
++ AllBytesMode = 0,
++ SkipMode = 1,
++ PlanarToPackedMode,
++ PackedToPackedMode,
++ LowByteMode,
++ MiddleByteMode,
++ TopByteMode
++};
++
++enum DataProccessMode {
++ NormalTileMode = 0,
++ FourBitPlanarMode = 1,
++ FourBitPackedMode = 2,
++ AttrMode = 3,
++ AsciiOnlyMode = 4,
++ FontFetchMode = 5,
++ SplitByteMode = 6
++};
++
++enum ResetEngineMode {
++ ResetAll = 0,
++ ResetRvasEngine = 1,
++ ResetVeEngine = 2
++};
++
++struct VideoGeometry {
++ u16 wScreenWidth;
++ u16 wScreenHeight;
++ u16 wStride;
++ u8 byBitsPerPixel;
++ u8 byModeID;
++ enum GraphicsModeType gmt;
++};
++
++struct EventMap {
++ u32 bPaletteChanged :1;
++ u32 bATTRChanged :1;
++ u32 bSEQChanged :1;
++ u32 bGCTLChanged :1;
++ u32 bCRTCChanged :1;
++ u32 bCRTCEXTChanged :1;
++ u32 bPLTRAMChanged :1;
++ u32 bXCURCOLChanged :1;
++ u32 bXCURCTLChanged :1;
++ u32 bXCURPOSChanged :1;
++ u32 bDoorbellA :1;
++ u32 bDoorbellB :1;
++ u32 bGeometryChanged :1;
++ u32 bSnoopChanged :1;
++ u32 bTextFontChanged :1;
++ u32 bTextATTRChanged :1;
++ u32 bTextASCIIChanged :1;
++};
++
++struct FetchMap {
++ //in parameters
++ bool bEnableRLE;
++ u8 bTextAlignDouble; // 0 - 8 byte, 1 - 16 byte
++ u8 byRLETripletCode;
++ u8 byRLERepeatCode;
++ enum DataProccessMode dpm;
++ //out parameters
++ u32 dwFetchSize;
++ u32 dwFetchRLESize;
++ u32 dwCheckSum;
++ bool bRLEFailed;
++ u8 rsvd[3];
++};
++
++struct SnoopAggregate {
++ u64 qwRow;
++ u64 qwCol;
++};
++
++struct FetchRegion {
++ u16 wTopY;
++ u16 wLeftX;
++ u16 wBottomY;
++ u16 wRightX;
++};
++
++struct FetchOperation {
++ struct FetchRegion fr;
++ enum SelectedByteMode sbm;
++ u32 dwFetchSize;
++ u32 dwFetchRLESize;
++ u32 dwCheckSum;
++ bool bRLEFailed;
++ bool bEnableRLE;
++ u8 byRLETripletCode;
++ u8 byRLERepeatCode;
++ u8 byVGATextAlignment; //0-8bytes, 1-16bytes.
++ u8 rsvd[3];
++};
++
++struct FetchVideoTilesArg {
++ struct VideoGeometry vg;
++ u32 dwTotalOutputSize;
++ u32 cfo;
++ struct FetchOperation pfo[4];
++};
++
++struct FetchVideoSlicesArg {
++ struct VideoGeometry vg;
++ u32 dwSlicedSize;
++ u32 dwSlicedRLESize;
++ u32 dwCheckSum;
++ bool bEnableRLE;
++ bool bRLEFailed;
++ u8 byRLETripletCode;
++ u8 byRLERepeatCode;
++ u8 cBuckets;
++ u8 rsvd[3];
++ u8 abyBitIndexes[24];
++ u32 cfr;
++ struct FetchRegion pfr[4];
++};
++
++struct RVASBuffer {
++ void *pv;
++ size_t cb;
++};
++
++struct RvasIoctl {
++ enum RVASStatus rs;
++ void *rc;
++ struct RVASBuffer rvb;
++ void *rmh;
++ void *rmh1;
++ void *rmh2;
++ u32 rmh_mem_size;
++ u32 rmh1_mem_size;
++ u32 rmh2_mem_size;
++ struct VideoGeometry vg;
++ struct EventMap em;
++ struct SnoopAggregate sa;
++ union {
++ u32 tse_counter;
++ u32 req_mem_size;
++ u32 encode;
++ u32 time_out;
++ };
++ u32 rle_len; // RLE Length
++ u32 rle_checksum;
++ struct FetchMap tfm;
++ u8 flag;
++ u8 lms;
++ u8 resetMode;
++ u8 rsvd;
++};
++
++//
++// Video Engine
++//
++
++#define MAX_MULTI_FRAME_CT (32)
++
++struct VideoConfig {
++ u8 engine; //0: engine 0 - normal engine, engine 1 - VM legacy engine
++ u8 compression_mode; //0:DCT, 1:DCT_VQ mix VQ-2 color, 2:DCT_VQ mix VQ-4 color 9:
++ u8 compression_format; //0:ASPEED 1:JPEG
++ u8 capture_format; //0:CCIR601-2 YUV, 1:JPEG YUV, 2:RGB for ASPEED mode only, 3:Gray
++ u8 rc4_enable; //0:disable 1:enable
++ u8 YUV420_mode; //0:YUV444, 1:YUV420
++ u8 Visual_Lossless;
++ u8 Y_JPEGTableSelector;
++ u8 AdvanceTableSelector;
++ u8 AutoMode;
++ u8 rsvd[2];
++ enum RVASStatus rs;
++};
++
++struct MultiJpegFrame {
++ u32 dwSizeInBytes; // Image size in bytes
++ u32 dwOffsetInBytes; // Offset in bytes
++ u16 wXPixels; // In: X coordinate
++ u16 wYPixels; // In: Y coordinate
++ u16 wWidthPixels; // In: Width for Fetch
++ u16 wHeightPixels; // In: Height for Fetch
++};
++
++struct MultiJpegConfig {
++ unsigned char multi_jpeg_frames; // frame count
++ struct MultiJpegFrame frame[MAX_MULTI_FRAME_CT]; // The Multi Frames
++ void *aStreamHandle;
++ enum RVASStatus rs;
++};
++
++#endif // _VIDEO_IOCTL_H
+diff --git a/drivers/soc/aspeed/rvas/video_main.c b/drivers/soc/aspeed/rvas/video_main.c
+new file mode 100644
+index 000000000..6d486d219
+--- /dev/null
++++ b/drivers/soc/aspeed/rvas/video_main.c
+@@ -0,0 +1,1851 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * File Name : video_main.c
++ * Description : AST2600 RVAS hardware engines
++ *
++ * Copyright (C) ASPEED Technology Inc.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/interrupt.h>
++#include <linux/device.h>
++#include <linux/reset.h>
++#include <asm/uaccess.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/platform_device.h>
++#include <linux/cdev.h>
++#include <linux/dma-mapping.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/mm.h>
++#include <linux/of_reserved_mem.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <linux/clk.h>
++
++#include "video_ioctl.h"
++#include "hardware_engines.h"
++#include "video.h"
++#include "video_debug.h"
++#include "video_engine.h"
++
++#define TEST_GRCE_DETECT_RESOLUTION_CHG
++
++static long video_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
++static int video_open(struct inode *inode, struct file *file);
++static int video_release(struct inode *inode, struct file *file);
++static irqreturn_t fge_handler(int irq, void *dev_id);
++static void video_off(struct AstRVAS *pAstRVAS);
++static void video_on(struct AstRVAS *pAstRVAS);
++
++static void video_os_init_sleep_struct(struct Video_OsSleepStruct *Sleep);
++static void video_ss_wakeup_on_timeout(struct Video_OsSleepStruct *Sleep);
++static void enable_rvas_engines(struct AstRVAS *pAstRVAS);
++static void video_engine_init(struct AstRVAS *pAstRVAS);
++static void rvas_init(struct AstRVAS *pAstRVAS);
++static void reset_rvas_engine(struct AstRVAS *pAstRVAS);
++static void reset_video_engine(struct AstRVAS *pAstRVAS);
++static void set_FBInfo_size(struct AstRVAS *pAstRVAS, void __iomem *mcr_base);
++
++static long video_os_sleep_on_timeout(struct Video_OsSleepStruct *Sleep, u8 *Var, long msecs);
++
++static struct AstRVAS *file_ast_rvas(struct file *file)
++{
++ return container_of(file->private_data, struct AstRVAS, rvas_dev);
++}
++
++static long video_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ int iResult = 0;
++ struct RvasIoctl ri;
++ struct VideoConfig video_config;
++ struct MultiJpegConfig multi_jpeg;
++ u8 bVideoCmd = 0;
++ dma_addr_t dw_phys = 0;
++ struct AstRVAS *pAstRVAS = file_ast_rvas(file);
++
++ VIDEO_DBG("Start\n");
++ VIDEO_DBG("pAstRVAS: 0x%p\n", pAstRVAS);
++ memset(&ri, 0, sizeof(ri));
++
++ if (cmd != CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG &&
++ cmd != CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG &&
++ cmd != CMD_IOCTL_GET_VIDEO_ENGINE_DATA) {
++ if (raw_copy_from_user(&ri, (void *)arg, sizeof(struct RvasIoctl))) {
++ dev_err(pAstRVAS->pdev, "Copy from user buffer Failed\n");
++ return -EINVAL;
++ }
++
++ ri.rs = SuccessStatus;
++ bVideoCmd = 0;
++ } else {
++ bVideoCmd = 1;
++ }
++
++ VIDEO_DBG(" Command = 0x%x\n", cmd);
++
++ switch (cmd) {
++ case CMD_IOCTL_TURN_LOCAL_MONITOR_ON:
++ if (pAstRVAS->config->version == 7)
++ ioctl_update_lms_2700(0x1, pAstRVAS);
++ else
++ ioctl_update_lms(0x1, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_TURN_LOCAL_MONITOR_OFF:
++ if (pAstRVAS->config->version == 7)
++ ioctl_update_lms_2700(0x0, pAstRVAS);
++ else
++ ioctl_update_lms(0x0, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_IS_LOCAL_MONITOR_ENABLED:
++ u32 status;
++
++ if (pAstRVAS->config->version == 7)
++ status = ioctl_get_lm_status_2700(pAstRVAS);
++ else
++ status = ioctl_get_lm_status(pAstRVAS);
++
++ if (status)
++ ri.lms = 0x1;
++ else
++ ri.lms = 0x0;
++ break;
++
++ case CMD_IOCTL_GET_VIDEO_GEOMETRY:
++ VIDEO_DBG(" Command CMD_IOCTL_GET_VIDEO_GEOMETRY\n");
++ ioctl_get_video_geometry(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_WAIT_FOR_VIDEO_EVENT:
++ VIDEO_DBG(" Command CMD_IOCTL_WAIT_FOR_VIDEO_EVENT\n");
++ ioctl_wait_for_video_event(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_GET_GRC_REGIESTERS:
++ VIDEO_DBG(" Command CMD_IOCTL_GET_GRC_REGIESTERS\n");
++ ioctl_get_grc_register(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_READ_SNOOP_MAP:
++ VIDEO_DBG(" Command CMD_IOCTL_READ_SNOOP_MAP\n");
++ ioctl_read_snoop_map(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_READ_SNOOP_AGGREGATE:
++ VIDEO_DBG(" Command CMD_IOCTL_READ_SNOOP_AGGREGATE\n");
++ ioctl_read_snoop_aggregate(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_FETCH_VIDEO_TILES: ///
++ VIDEO_DBG("CMD_IOCTL_FETCH_VIDEO_TILES\n");
++ ioctl_fetch_video_tiles(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_FETCH_VIDEO_SLICES:
++ VIDEO_DBG(" Command CMD_IOCTL_FETCH_VIDEO_SLICES\n");
++ ioctl_fetch_video_slices(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_RUN_LENGTH_ENCODE_DATA:
++ VIDEO_DBG(" Command CMD_IOCTL_RUN_LENGTH_ENCODE_DATA\n");
++ ioctl_run_length_encode_data(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_FETCH_TEXT_DATA:
++ VIDEO_DBG(" Command CMD_IOCTL_FETCH_TEXT_DATA\n");
++ ioctl_fetch_text_data(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_FETCH_MODE13_DATA:
++ VIDEO_DBG(" Command CMD_IOCTL_FETCH_MODE13_DATA\n");
++ ioctl_fetch_mode_13_data(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_ALLOC:
++ VIDEO_DBG(" Command CMD_IOCTL_ALLOC\n");
++ ioctl_alloc(file, &ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_FREE:
++ VIDEO_DBG(" Command CMD_IOCTL_FREE\n");
++ ioctl_free(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_NEW_CONTEXT:
++ VIDEO_DBG(" Command CMD_IOCTL_NEW_CONTEXT\n");
++ ioctl_new_context(file, &ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_DEL_CONTEXT:
++ VIDEO_DBG(" Command CMD_IOCTL_DEL_CONTEXT\n");
++ ioctl_delete_context(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_SET_TSE_COUNTER:
++ VIDEO_DBG(" Command CMD_IOCTL_SET_TSE_COUNTER\n");
++ ioctl_set_tse_tsicr(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_GET_TSE_COUNTER:
++ VIDEO_DBG(" Command CMD_IOCTL_GET_TSE_COUNTER\n");
++ ioctl_get_tse_tsicr(&ri, pAstRVAS);
++ break;
++
++ case CMD_IOCTL_VIDEO_ENGINE_RESET:
++ VIDEO_ENG_DBG(" Command CMD_IOCTL_VIDEO_ENGINE_RESET\n");
++ ioctl_reset_video_engine(&ri, pAstRVAS);
++ break;
++ case CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG:
++ VIDEO_DBG(" Command CMD_IOCTL_GET_VIDEO_ENGINE_CONFIG\n");
++ ioctl_get_video_engine_config(&video_config, pAstRVAS);
++
++ iResult = raw_copy_to_user((void *)arg, &video_config, sizeof(video_config));
++ break;
++ case CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG:
++ VIDEO_DBG(" Command CMD_IOCTL_SET_VIDEO_ENGINE_CONFIG\n");
++ iResult = raw_copy_from_user(&video_config, (void *)arg, sizeof(video_config));
++
++ ioctl_set_video_engine_config(&video_config, pAstRVAS);
++ break;
++ case CMD_IOCTL_GET_VIDEO_ENGINE_DATA:
++ VIDEO_DBG(" Command CMD_IOCTL_GET_VIDEO_ENGINE_DATA\n");
++ iResult = raw_copy_from_user(&multi_jpeg, (void *)arg, sizeof(multi_jpeg));
++ dw_phys = get_phys_add_rsvd_mem((u32)multi_jpeg.aStreamHandle, pAstRVAS);
++ VIDEO_DBG("physical stream address: %#llx\n", dw_phys);
++
++ if (dw_phys == 0) {
++ dev_err(pAstRVAS->pdev, "Error of getting stream buffer address\n");
++ } else {
++ if (pAstRVAS->config->version == 7)
++ ioctl_get_video_engine_data_2700(&multi_jpeg, pAstRVAS, dw_phys);
++ else
++ ioctl_get_video_engine_data(&multi_jpeg, pAstRVAS, dw_phys);
++ }
++
++ iResult = raw_copy_to_user((void *)arg, &multi_jpeg, sizeof(multi_jpeg));
++ break;
++ default:
++ dev_err(pAstRVAS->pdev, "Unknown Ioctl: %#x\n", cmd);
++ iResult = -EINVAL;
++ break;
++ }
++
++ if (!iResult && !bVideoCmd)
++ if (raw_copy_to_user((void *)arg, &ri, sizeof(struct RvasIoctl))) {
++ dev_err(pAstRVAS->pdev, "Copy to user buffer Failed\n");
++ iResult = -EINVAL;
++ }
++
++ return iResult;
++}
++
++phys_addr_t get_phy_fb_start_address(struct AstRVAS *pAstRVAS)
++{
++ u32 dw_offset = get_screen_offset(pAstRVAS);
++
++ pAstRVAS->FBInfo.qwFBPhysStart = (pAstRVAS->config->version == 7)
++ ? DDR_BASE_27
++ : DDR_BASE;
++ pAstRVAS->FBInfo.qwFBPhysStart += pAstRVAS->FBInfo.dwDRAMSize - pAstRVAS->FBInfo.dwVGASize + dw_offset;
++ if (pAstRVAS->rvas_index == 1)
++ pAstRVAS->FBInfo.qwFBPhysStart -= pAstRVAS->FBInfo.dwVGASize;
++
++ HW_ENG_DBG("Frame buffer start address: %#x, dram size: %#x, vga size: %#x\n",
++ pAstRVAS->FBInfo.qwFBPhysStart,
++ pAstRVAS->FBInfo.dwDRAMSize,
++ pAstRVAS->FBInfo.dwVGASize);
++
++ return pAstRVAS->FBInfo.qwFBPhysStart;
++}
++
++static int video_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ size_t size;
++ u32 dw_index;
++ u8 found = 0;
++ struct AstRVAS *pAstRVAS = file_ast_rvas(file);
++
++ struct MemoryMapTable **pmmt = pAstRVAS->ppmmtMemoryTable;
++
++ size = vma->vm_end - vma->vm_start;
++ vma->vm_private_data = pAstRVAS;
++ VIDEO_DBG("vma->vm_start 0x%lx, vma->vm_end 0x%lx, vma->vm_pgoff=0x%llx\n",
++ vma->vm_start,
++ vma->vm_end,
++ vma->vm_pgoff);
++ VIDEO_DBG("(vma->vm_pgoff << PAGE_SHIFT) = 0x%llx\n", (vma->vm_pgoff << PAGE_SHIFT));
++ for (dw_index = 0; dw_index < MAX_NUM_MEM_TBL; ++dw_index) {
++ if (pmmt[dw_index]) {
++ VIDEO_DBG("index %d, phys_addr=0x%llx, virt_addr=%p, length=0x%x\n",
++ dw_index,
++ pmmt[dw_index]->mem_phys,
++ pmmt[dw_index]->pvVirtualAddr,
++ pmmt[dw_index]->dwLength);
++ if ((vma->vm_pgoff << PAGE_SHIFT) == pmmt[dw_index]->mem_phys) {
++ found = 1;
++ if (size > pmmt[dw_index]->dwLength) {
++ pr_err("required size exceed alloc size\n");
++ return -EAGAIN;
++ }
++ break;
++ }
++ }
++ }
++ if (!found) {
++ pr_err("no match mem entry\n");
++ return -EAGAIN;
++ }
++
++ vm_flags_set(vma, VM_IO);
++ if (pAstRVAS->config->version == 7)
++ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
++ else
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ if (io_remap_pfn_range(vma, vma->vm_start,
++ ((u32)vma->vm_pgoff), size,
++ vma->vm_page_prot)) {
++ pr_err("remap_pfn_range fail at %s()\n", __func__);
++ return -EAGAIN;
++ }
++
++ return 0;
++}
++
++static int video_open(struct inode *pin, struct file *file)
++{
++ struct AstRVAS *pAstRVAS = file_ast_rvas(file);
++
++ VIDEO_DBG("\n");
++
++ // make sure the rvas clk is running.
++ // if it's already enabled, clk_enable will just return.
++ clk_enable(pAstRVAS->rvasclk);
++
++ return 0;
++}
++
++void free_all_mem_entries(struct AstRVAS *pAstRVAS)
++{
++ u32 dw_index;
++ struct MemoryMapTable **pmmt = pAstRVAS->ppmmtMemoryTable;
++ void *virt_add;
++ dma_addr_t dw_phys;
++ u32 len;
++
++ VIDEO_DBG("Removing mem map entries...\n");
++ for (dw_index = 0; dw_index < MAX_NUM_MEM_TBL; ++dw_index) {
++ if (pmmt[dw_index]) {
++ if (pmmt[dw_index]->mem_phys) {
++ virt_add = get_virt_add_rsvd_mem(dw_index, pAstRVAS);
++ dw_phys = get_phys_add_rsvd_mem(dw_index, pAstRVAS);
++ len = get_len_rsvd_mem(dw_index, pAstRVAS);
++ dma_free_coherent(pAstRVAS->pdev, len, virt_add, dw_phys);
++ }
++ pmmt[dw_index]->pf = NULL;
++ kfree(pmmt[dw_index]);
++ pmmt[dw_index] = NULL;
++ }
++ }
++}
++
++static int video_release(struct inode *inode, struct file *file)
++{
++ u32 dw_index;
++ struct AstRVAS *pAstRVAS = file_ast_rvas(file);
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ VIDEO_DBG("Start\n");
++
++ free_all_mem_entries(pAstRVAS);
++
++ VIDEO_DBG("ppctContextTable: 0x%p\n", ppctContextTable);
++
++ disable_grce_tse_interrupt(pAstRVAS);
++
++ for (dw_index = 0; dw_index < MAX_NUM_CONTEXT; ++dw_index) {
++ if (ppctContextTable[dw_index]) {
++ VIDEO_DBG("Releasing Context dw_index: %u\n", dw_index);
++ kfree(ppctContextTable[dw_index]);
++ ppctContextTable[dw_index] = NULL;
++ }
++ }
++ enable_grce_tse_interrupt(pAstRVAS);
++ VIDEO_DBG("End\n");
++
++ return 0;
++}
++
++static struct file_operations video_module_ops = { .compat_ioctl = video_ioctl,
++ .unlocked_ioctl = video_ioctl, .open = video_open, .release =
++ video_release, .mmap = video_mmap, .owner = THIS_MODULE, };
++
++static struct miscdevice video_misc = { .minor = MISC_DYNAMIC_MINOR, .name =
++ RVAS_DRIVER_NAME, .fops = &video_module_ops, };
++
++void ioctl_new_context(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS)
++{
++ struct ContextTable *pct;
++
++ VIDEO_DBG("Start\n");
++ pct = get_new_context_table_entry(pAstRVAS);
++
++ if (pct) {
++ pct->desc_virt = dma_alloc_coherent(pAstRVAS->pdev, PAGE_SIZE, (dma_addr_t *)&pct->desc_phy, GFP_KERNEL);
++ if (!pct->desc_virt) {
++ pri->rs = MemoryAllocError;
++ return;
++ }
++ pri->rc = pct->rc;
++ } else {
++ pri->rs = MemoryAllocError;
++ }
++
++ VIDEO_DBG("end: return status: %d\n", pri->rs);
++}
++
++void ioctl_delete_context(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS)
++{
++ VIDEO_DBG("Start\n");
++
++ VIDEO_DBG("pri->rc: %d\n", pri->rc);
++ if (remove_context_table_entry(pri->rc, pAstRVAS)) {
++ VIDEO_DBG("Success in removing\n");
++ pri->rs = SuccessStatus;
++ } else {
++ VIDEO_DBG("Failed in removing\n");
++ pri->rs = InvalidMemoryHandle;
++ }
++}
++
++int get_mem_entry(struct AstRVAS *pAstRVAS)
++{
++ int index = 0;
++ u32 dw_size = 0;
++ bool found = false;
++
++ down(&pAstRVAS->mem_sem);
++ do {
++ if (pAstRVAS->ppmmtMemoryTable[index]) {
++ index++;
++ } else {
++ found = true;
++ break;
++ }
++
++ } while (!found && (index < MAX_NUM_MEM_TBL));
++
++ if (found) {
++ dw_size = sizeof(struct MemoryMapTable);
++ pAstRVAS->ppmmtMemoryTable[index] = kmalloc(dw_size, GFP_KERNEL);
++ if (!pAstRVAS->ppmmtMemoryTable[index])
++ index = -1;
++ } else {
++ index = -1;
++ }
++
++ up(&pAstRVAS->mem_sem);
++ return index;
++}
++
++bool delete_mem_entry(const void *crmh, struct AstRVAS *pAstRVAS)
++{
++ bool b_ret = false;
++ u32 dw_index = (u32)crmh;
++
++ VIDEO_DBG("Start, dw_index: %#x\n", dw_index);
++
++ down(&pAstRVAS->mem_sem);
++ if (dw_index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[dw_index]) {
++ VIDEO_DBG("mem: 0x%p\n", pAstRVAS->ppmmtMemoryTable[dw_index]);
++ kfree(pAstRVAS->ppmmtMemoryTable[dw_index]);
++ pAstRVAS->ppmmtMemoryTable[dw_index] = NULL;
++ b_ret = true;
++ }
++ up(&pAstRVAS->mem_sem);
++ VIDEO_DBG("End\n");
++ return b_ret;
++}
++
++void *get_virt_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS)
++{
++ if (index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[index])
++ return pAstRVAS->ppmmtMemoryTable[index]->pvVirtualAddr;
++
++ return 0;
++}
++
++dma_addr_t get_phys_add_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS)
++{
++ if (index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[index])
++ return pAstRVAS->ppmmtMemoryTable[index]->mem_phys;
++
++ return 0;
++}
++
++u32 get_len_rsvd_mem(u32 index, struct AstRVAS *pAstRVAS)
++{
++ u32 len = 0;
++
++ if (index < MAX_NUM_MEM_TBL && pAstRVAS->ppmmtMemoryTable[index])
++ len = pAstRVAS->ppmmtMemoryTable[index]->dwLength;
++
++ return len;
++}
++
++bool virt_is_valid_rsvd_mem(u32 index, u32 size, struct AstRVAS *pAstRVAS)
++{
++ if (index < MAX_NUM_MEM_TBL &&
++ pAstRVAS->ppmmtMemoryTable[index] &&
++ pAstRVAS->ppmmtMemoryTable[index]->dwLength)
++ return true;
++
++ return false;
++}
++
++void ioctl_alloc(struct file *file, struct RvasIoctl *pri, struct AstRVAS *pAstRVAS)
++{
++ u32 size;
++ dma_addr_t phys_add = 0;
++ void *virt_add = 0;
++ u32 index = get_mem_entry(pAstRVAS);
++
++ if (index < 0 || index >= MAX_NUM_MEM_TBL) {
++ pri->rs = MemoryAllocError;
++ return;
++ }
++ if (pri->req_mem_size < PAGE_SIZE)
++ pri->req_mem_size = PAGE_SIZE;
++
++ size = pri->req_mem_size;
++
++ VIDEO_DBG("Allocating memory size: 0x%x\n", size);
++ virt_add = dma_alloc_coherent(pAstRVAS->pdev, size, &phys_add,
++ GFP_KERNEL);
++ if (virt_add) {
++ pri->rmh = (void *)index;
++ pri->rvb.pv = (void *)phys_add;
++ pri->rvb.cb = size;
++ pri->rs = SuccessStatus;
++ pAstRVAS->ppmmtMemoryTable[index]->pf = file;
++ pAstRVAS->ppmmtMemoryTable[index]->mem_phys = phys_add;
++ pAstRVAS->ppmmtMemoryTable[index]->pvVirtualAddr = (void *)virt_add;
++ pAstRVAS->ppmmtMemoryTable[index]->dwLength = size;
++ pAstRVAS->ppmmtMemoryTable[index]->byDmaAlloc = 1;
++ } else {
++ if (pAstRVAS->ppmmtMemoryTable[index])
++ delete_mem_entry((void *)index, pAstRVAS);
++
++ pr_err("Cannot alloc video destination data buffer\n");
++ pri->rs = MemoryAllocError;
++ }
++ VIDEO_DBG("Allocated: index: 0x%x phys: %llx cb: 0x%x\n", index,
++ phys_add, pri->rvb.cb);
++}
++
++void ioctl_free(struct RvasIoctl *pri, struct AstRVAS *pAstRVAS)
++{
++ void *virt_add = get_virt_add_rsvd_mem((u32)pri->rmh, pAstRVAS);
++ dma_addr_t dw_phys = get_phys_add_rsvd_mem((u32)pri->rmh, pAstRVAS);
++ u32 len = get_len_rsvd_mem((u32)pri->rmh, pAstRVAS);
++
++ VIDEO_DBG("Start\n");
++ VIDEO_DBG("Freeing: rmh: 0x%p, phys: 0x%x, size 0x%x virt_add: 0x%p len: %u\n",
++ pri->rmh, dw_phys, pri->rvb.cb, virt_add, len);
++
++ delete_mem_entry(pri->rmh, pAstRVAS);
++ VIDEO_DBG("After delete_mem_entry\n");
++
++ dma_free_coherent(pAstRVAS->pdev, len,
++ virt_add,
++ dw_phys);
++ VIDEO_DBG("After dma_free_coherent\n");
++}
++
++//AST2700 has both VGA output and DP out.
++//AST2750 has VGA output for host node 0/VGA0 and DP output for host node 1/VGA1.
++void ioctl_update_lms_2700(u8 lms_on, struct AstRVAS *pAstRVAS)
++{
++ u32 reg_scu000 = 0;
++ u32 reg_scu448 = 0;
++ u32 reg_scu0C0 = 0;
++ u32 reg_scu0D0 = 0;
++ u32 reg_dptx100 = 0;
++ u32 reg_dptx104 = 0;
++ u32 chip_efuse_option = 0;
++ u32 vga_crt_disbl = 0;
++ u32 vga_pwr_off_vdac = 0;
++
++ if (pAstRVAS->rvas_index == 0x0) {
++ vga_crt_disbl = VGA0_CRT_DISBL;
++ vga_pwr_off_vdac = VGA0_PWR_OFF_VDAC;
++ } else {
++ vga_crt_disbl = VGA1_CRT_DISBL;
++ vga_pwr_off_vdac = VGA1_PWR_OFF_VDAC;
++ }
++
++ regmap_read(pAstRVAS->scu, SCU000_Silicon_Revision_ID, ®_scu000);
++ chip_efuse_option = (reg_scu000 & 0xff00) >> 8;
++ regmap_read(pAstRVAS->scu_io, SCU448_Pin_Ctrl, ®_scu448);
++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_scu0C0);
++ regmap_read(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, ®_scu0D0);
++ if ((chip_efuse_option == 0 && pAstRVAS->rvas_index == 0x1) || chip_efuse_option == 1) {
++ if (pAstRVAS->dp_base) {
++ reg_dptx100 = readl(pAstRVAS->dp_base + DPTX_Configuration_Register);
++ reg_dptx104 = readl(pAstRVAS->dp_base + DPTX_PHY_Configuration_Register);
++ }
++ }
++
++ if (lms_on) {
++ if ((reg_scu448 & VGAVS_ENBL_27) == 0 && (reg_scu448 & VGAHS_ENBL_27) == 0) {
++ reg_scu448 |= (VGAVS_ENBL_27 | VGAHS_ENBL_27);
++ regmap_write(pAstRVAS->scu_io, SCU448_Pin_Ctrl, reg_scu448);
++ }
++ if (reg_scu0C0 & vga_crt_disbl) {
++ reg_scu0C0 &= ~vga_crt_disbl;
++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0);
++ }
++ if (reg_scu0D0 & vga_pwr_off_vdac) {
++ reg_scu0D0 &= ~vga_pwr_off_vdac;
++ regmap_write(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, reg_scu0D0);
++ }
++ //dp output
++ if (pAstRVAS->dp_base) {
++ reg_dptx100 |= 1 << AUX_RESETN;
++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register);
++ }
++ } else { //turn off
++ if ((reg_scu448 & VGAVS_ENBL_27) == 1 || (reg_scu448 & VGAHS_ENBL_27) == 1) {
++ reg_scu448 &= ~(VGAVS_ENBL_27 | VGAHS_ENBL_27);
++ regmap_write(pAstRVAS->scu_io, SCU448_Pin_Ctrl, reg_scu448);
++ }
++ if (!(reg_scu0C0 & vga_crt_disbl)) {
++ reg_scu0C0 |= vga_crt_disbl;
++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0);
++ }
++ if (!(reg_scu0D0 & vga_pwr_off_vdac)) {
++ reg_scu0D0 |= vga_pwr_off_vdac;
++ regmap_write(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, reg_scu0D0);
++ }
++ //dp output
++ if ((chip_efuse_option == 0 && pAstRVAS->rvas_index == 0x1) || chip_efuse_option == 1) {
++ if (pAstRVAS->dp_base) {
++ reg_dptx100 &= ~(1 << AUX_RESETN);
++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register);
++ reg_dptx104 &= ~(1 << DP_TX_I_MAIN_ON);
++ writel(reg_dptx104, pAstRVAS->dp_base + DPTX_PHY_Configuration_Register);
++ }
++ }
++ }
++}
++
++u32 ioctl_get_lm_status_2700(struct AstRVAS *pAstRVAS)
++{
++ u32 reg_val = 0;
++
++ regmap_read(pAstRVAS->scu_io, SCU448_Pin_Ctrl, ®_val);
++ if ((reg_val & VGAVS_ENBL_27) == 1 || (reg_val & VGAHS_ENBL_27) == 1) {
++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_val);
++ if (pAstRVAS->rvas_index == 0x0) {
++ if (!(reg_val & VGA0_CRT_DISBL)) {
++ regmap_read(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, ®_val);
++ if (!(reg_val & VGA0_PWR_OFF_VDAC))
++ return 1;
++ }
++ } else {
++ if (!(reg_val & VGA1_CRT_DISBL)) {
++ regmap_read(pAstRVAS->scu_io, SCU0D0_Misc3_Ctrl, ®_val);
++ if (!(reg_val & VGA1_PWR_OFF_VDAC))
++ return 1;
++ }
++ }
++ }
++ return 0;
++}
++
++void ioctl_update_lms(u8 lms_on, struct AstRVAS *pAstRVAS)
++{
++ u32 reg_scu418 = 0;
++ u32 reg_scu0C0 = 0;
++ u32 reg_scu0D0 = 0;
++ u32 reg_dptx100 = 0;
++ u32 reg_dptx104 = 0;
++
++ regmap_read(pAstRVAS->scu, SCU418_Pin_Ctrl, ®_scu418);
++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_scu0C0);
++ regmap_read(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, ®_scu0D0);
++ if (pAstRVAS->dp_base) {
++ reg_dptx100 = readl(pAstRVAS->dp_base + DPTX_Configuration_Register);
++ reg_dptx104 = readl(pAstRVAS->dp_base + DPTX_PHY_Configuration_Register);
++ }
++
++ if (lms_on) {
++ if (!(reg_scu418 & (VGAVS_ENBL | VGAHS_ENBL))) {
++ reg_scu418 |= (VGAVS_ENBL | VGAHS_ENBL);
++ regmap_write(pAstRVAS->scu, SCU418_Pin_Ctrl, reg_scu418);
++ }
++ if (reg_scu0C0 & VGA_CRT_DISBL) {
++ reg_scu0C0 &= ~VGA_CRT_DISBL;
++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0);
++ }
++ if (reg_scu0D0 & PWR_OFF_VDAC) {
++ reg_scu0D0 &= ~PWR_OFF_VDAC;
++ regmap_write(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, reg_scu0D0);
++ }
++ //dp output
++ if (pAstRVAS->dp_base) {
++ reg_dptx100 |= 1 << AUX_RESETN;
++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register);
++ }
++ } else { //turn off
++ if (reg_scu418 & (VGAVS_ENBL | VGAHS_ENBL)) {
++ reg_scu418 &= ~(VGAVS_ENBL | VGAHS_ENBL);
++ regmap_write(pAstRVAS->scu, SCU418_Pin_Ctrl, reg_scu418);
++ }
++ if (!(reg_scu0C0 & VGA_CRT_DISBL)) {
++ reg_scu0C0 |= VGA_CRT_DISBL;
++ regmap_write(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, reg_scu0C0);
++ }
++ if (!(reg_scu0D0 & PWR_OFF_VDAC)) {
++ reg_scu0D0 |= PWR_OFF_VDAC;
++ regmap_write(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, reg_scu0D0);
++ }
++ //dp output
++ if (pAstRVAS->dp_base) {
++ reg_dptx100 &= ~(1 << AUX_RESETN);
++ writel(reg_dptx100, pAstRVAS->dp_base + DPTX_Configuration_Register);
++ reg_dptx104 &= ~(1 << DP_TX_I_MAIN_ON);
++ writel(reg_dptx104, pAstRVAS->dp_base + DPTX_PHY_Configuration_Register);
++ }
++ }
++}
++
++u32 ioctl_get_lm_status(struct AstRVAS *pAstRVAS)
++{
++ u32 reg_val = 0;
++
++ regmap_read(pAstRVAS->scu, SCU418_Pin_Ctrl, ®_val);
++ if (reg_val & (VGAVS_ENBL | VGAHS_ENBL)) {
++ regmap_read(pAstRVAS->scu, SCU0C0_Misc1_Ctrl, ®_val);
++ if (!(reg_val & VGA_CRT_DISBL)) {
++ regmap_read(pAstRVAS->scu, SCU0D0_Misc3_Ctrl, ®_val);
++ if (!(reg_val & PWR_OFF_VDAC))
++ return 1;
++
++ }
++ }
++ return 0;
++}
++
++void init_osr_es(struct AstRVAS *pAstRVAS)
++{
++ VIDEO_DBG("Start\n");
++ sema_init(&pAstRVAS->mem_sem, 1);
++ sema_init(&pAstRVAS->context_sem, 1);
++
++ video_os_init_sleep_struct(&pAstRVAS->video_wait);
++
++ memset(&pAstRVAS->tfe_engine, 0x00, sizeof(struct EngineInfo));
++ memset(&pAstRVAS->bse_engine, 0x00, sizeof(struct EngineInfo));
++ memset(&pAstRVAS->ldma_engine, 0x00, sizeof(struct EngineInfo));
++ sema_init(&pAstRVAS->tfe_engine.sem, 1);
++ sema_init(&pAstRVAS->bse_engine.sem, 1);
++ sema_init(&pAstRVAS->ldma_engine.sem, 1);
++ video_os_init_sleep_struct(&pAstRVAS->tfe_engine.wait);
++ video_os_init_sleep_struct(&pAstRVAS->bse_engine.wait);
++ video_os_init_sleep_struct(&pAstRVAS->ldma_engine.wait);
++
++ memset(pAstRVAS->ppctContextTable, 0x00, MAX_NUM_CONTEXT * sizeof(u32));
++ pAstRVAS->dwMemoryTableSize = MAX_NUM_MEM_TBL;
++ memset(pAstRVAS->ppmmtMemoryTable, 0x00, MAX_NUM_MEM_TBL * sizeof(u32));
++ VIDEO_DBG("End\n");
++}
++
++void release_osr_es(struct AstRVAS *pAstRVAS)
++{
++ u32 dw_index;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ VIDEO_DBG("Removing contexts...\n");
++ for (dw_index = 0; dw_index < MAX_NUM_CONTEXT; ++dw_index) {
++ //if (ppctContextTable[dw_index]) {
++ kfree(ppctContextTable[dw_index]);
++ ppctContextTable[dw_index] = NULL;
++ //} kfree(NULL) is safe and this check is probably not require
++ }
++
++ free_all_mem_entries(pAstRVAS);
++}
++
++//Retrieve a context entry
++struct ContextTable *get_context_entry(const void *crc, struct AstRVAS *pAstRVAS)
++{
++ struct ContextTable *pct = NULL;
++ u32 dw_index = (u32)crc;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ if (dw_index < MAX_NUM_CONTEXT && ppctContextTable[dw_index] &&
++ ppctContextTable[dw_index]->rc == crc)
++ pct = ppctContextTable[dw_index];
++
++ return pct;
++}
++
++struct ContextTable *get_new_context_table_entry(struct AstRVAS *pAstRVAS)
++{
++ struct ContextTable *pct = NULL;
++ u32 dw_index = 0;
++ bool b_found = false;
++ u32 dw_size = 0;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ disable_grce_tse_interrupt(pAstRVAS);
++ down(&pAstRVAS->context_sem);
++ while (!b_found && (dw_index < MAX_NUM_CONTEXT)) {
++ if (!(ppctContextTable[dw_index]))
++ b_found = true;
++ else
++ ++dw_index;
++ }
++ if (b_found) {
++ dw_size = sizeof(struct ContextTable);
++ pct = kmalloc(dw_size, GFP_KERNEL);
++
++ if (pct) {
++ memset(pct, 0x00, sizeof(struct ContextTable));
++ pct->rc = (void *)dw_index;
++ memset(&pct->aqwSnoopMap, 0xff,
++ sizeof(pct->aqwSnoopMap));
++ memset(&pct->sa, 0xff, sizeof(pct->sa));
++ ppctContextTable[dw_index] = pct;
++ }
++ }
++ up(&pAstRVAS->context_sem);
++ enable_grce_tse_interrupt(pAstRVAS);
++
++ return pct;
++}
++
++bool remove_context_table_entry(const void *crc, struct AstRVAS *pAstRVAS)
++{
++ bool b_ret = false;
++ u32 dw_index = (u32)crc;
++ struct ContextTable *ctx_entry;
++
++ VIDEO_DBG("Start\n");
++
++ VIDEO_DBG("dw_index: %u\n", dw_index);
++
++ if (dw_index < MAX_NUM_CONTEXT) {
++ ctx_entry = pAstRVAS->ppctContextTable[dw_index];
++ VIDEO_DBG("ctx_entry: 0x%p\n", ctx_entry);
++
++ if (ctx_entry) {
++ disable_grce_tse_interrupt(pAstRVAS);
++ if (!ctx_entry->desc_virt) {
++ VIDEO_DBG("Removing memory, virt: 0x%p phys: %#x\n",
++ ctx_entry->desc_virt,
++ ctx_entry->desc_phy);
++
++ dma_free_coherent(pAstRVAS->pdev, PAGE_SIZE, ctx_entry->desc_virt, ctx_entry->desc_phy);
++ }
++ VIDEO_DBG("Removing memory: 0x%p\n", ctx_entry);
++ pAstRVAS->ppctContextTable[dw_index] = NULL;
++ kfree(ctx_entry);
++ b_ret = true;
++ enable_grce_tse_interrupt(pAstRVAS);
++ }
++ }
++ return b_ret;
++}
++
++void display_event_map(const struct EventMap *pem)
++{
++ VIDEO_DBG("EM:\n");
++ VIDEO_DBG("*************************\n");
++ VIDEO_DBG(" bATTRChanged= %u\n", pem->bATTRChanged);
++ VIDEO_DBG(" bCRTCChanged= %u\n", pem->bCRTCChanged);
++ VIDEO_DBG(" bCRTCEXTChanged= %u\n", pem->bCRTCEXTChanged);
++ VIDEO_DBG(" bDoorbellA= %u\n", pem->bDoorbellA);
++ VIDEO_DBG(" bDoorbellB= %u\n", pem->bDoorbellB);
++ VIDEO_DBG(" bGCTLChanged= %u\n", pem->bGCTLChanged);
++ VIDEO_DBG(" bGeometryChanged= %u\n", pem->bGeometryChanged);
++ VIDEO_DBG(" bPLTRAMChanged= %u\n", pem->bPLTRAMChanged);
++ VIDEO_DBG(" bPaletteChanged= %u\n", pem->bPaletteChanged);
++ VIDEO_DBG(" bSEQChanged= %u\n", pem->bSEQChanged);
++ VIDEO_DBG(" bSnoopChanged= %u\n", pem->bSnoopChanged);
++ VIDEO_DBG(" bTextASCIIChanged= %u\n", pem->bTextASCIIChanged);
++ VIDEO_DBG(" bTextATTRChanged= %u\n", pem->bTextATTRChanged);
++ VIDEO_DBG(" bTextFontChanged= %u\n", pem->bTextFontChanged);
++ VIDEO_DBG(" bXCURCOLChanged= %u\n", pem->bXCURCOLChanged);
++ VIDEO_DBG(" bXCURCTLChanged= %u\n", pem->bXCURCTLChanged);
++ VIDEO_DBG(" bXCURPOSChanged= %u\n", pem->bXCURPOSChanged);
++ VIDEO_DBG("*************************\n");
++}
++
++void ioctl_wait_for_video_event(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ union EmDwordUnion eduRequested;
++ union EmDwordUnion eduReturned;
++ union EmDwordUnion eduChanged;
++ struct EventMap anEm;
++ u32 result = 1;
++ int iTimerRemaining = ri->time_out;
++ unsigned long ulTimeStart, ulTimeEnd, ulElapsedTime;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++
++ memset(&anEm, 0x0, sizeof(struct EventMap));
++
++ VIDEO_DBG("Calling VideoSleepOnTimeout\n");
++
++ eduRequested.em = ri->em;
++ VIDEO_DBG("eduRequested.em:\n");
++ //display_event_map(&eduRequested.em);
++ eduChanged.em = ppctContextTable[(int)ri->rc]->emEventReceived;
++ VIDEO_DBG("eduChanged.em:\n");
++ //display_event_map(&eduChanged.em);
++
++ // While event has not occurred and there is still time remaining for wait
++ while (!(eduChanged.dw & eduRequested.dw) && (iTimerRemaining > 0) &&
++ result) {
++ pAstRVAS->video_intr_occurred = 0;
++ ulTimeStart = jiffies_to_msecs(jiffies);
++ result = video_os_sleep_on_timeout(&pAstRVAS->video_wait,
++ &pAstRVAS->video_intr_occurred,
++ iTimerRemaining);
++ ulTimeEnd = jiffies_to_msecs(jiffies);
++ ulElapsedTime = (ulTimeEnd - ulTimeStart);
++ iTimerRemaining -= (int)ulElapsedTime;
++ eduChanged.em = ppctContextTable[(int)ri->rc]->emEventReceived;
++// VIDEO_DBG("Elapsedtime [%u], timestart[%u], timeend[%u]\n", dwElapsedTime, dwTimeStart, dwTimeEnd);
++
++ VIDEO_DBG("ulElapsedTime [%lu], ulTimeStart[%lu], ulTimeEnd[%lu]\n",
++ ulElapsedTime, ulTimeStart, ulTimeEnd);
++ VIDEO_DBG("HZ [%ul]\n", HZ);
++ VIDEO_DBG("result [%u], iTimerRemaining [%d]\n", result,
++ iTimerRemaining);
++ }
++
++ if (result == 0 && ri->time_out != 0) {
++ VIDEO_DBG("IOCTL Timedout\n");
++ ri->rs = TimedOut;
++ memset(&ri->em, 0x0, sizeof(struct EventMap));
++ } else {
++ eduChanged.em = ppctContextTable[(int)ri->rc]->emEventReceived;
++ VIDEO_DBG("Event Received[%X]\n", eduChanged.dw);
++ // Mask out the changes we are waiting on
++ eduReturned.dw = eduChanged.dw & eduRequested.dw;
++
++ // Reset flags of changes that have been returned
++ eduChanged.dw &= ~(eduReturned.dw);
++ VIDEO_DBG("Event Reset[%X]\n", eduChanged.dw);
++ ppctContextTable[(int)ri->rc]->emEventReceived = eduChanged.em;
++
++ // Copy changes back to ri
++ ri->em = eduReturned.em;
++ VIDEO_DBG("ri->em:\n");
++ //display_event_map(&ri->em);
++ ri->rs = SuccessStatus;
++ VIDEO_DBG("Success [%x]\n",
++ eduReturned.dw);
++ }
++}
++
++static void update_context_events(struct AstRVAS *pAstRVAS,
++ union EmDwordUnion eduFge_status)
++{
++ union EmDwordUnion eduEmReceived;
++ u32 dwIter = 0;
++ struct ContextTable **ppctContextTable = pAstRVAS->ppctContextTable;
++ // VIDEO_DBG("Setting up context\n");
++ for (dwIter = 0; dwIter < MAX_NUM_CONTEXT; ++dwIter) {
++ if (ppctContextTable[dwIter]) {
++ // VIDEO_DBG ("Copying EventMap to RVAS Context\n");
++ memcpy((void *)&eduEmReceived,
++ (void *)&ppctContextTable[dwIter]->emEventReceived,
++ sizeof(union EmDwordUnion));
++ eduEmReceived.dw |= eduFge_status.dw;
++ memcpy((void *)&ppctContextTable[dwIter]->emEventReceived,
++ (void *)&eduEmReceived,
++ sizeof(union EmDwordUnion));
++ }
++ }
++ pAstRVAS->video_intr_occurred = 1;
++ video_ss_wakeup_on_timeout(&pAstRVAS->video_wait);
++}
++
++static irqreturn_t fge_handler(int irq, void *dev_id)
++{
++ union EmDwordUnion eduFge_status;
++ u32 tse_sts = 0;
++ u32 dwGRCEStatus = 0;
++ bool bFgeItr = false;
++ bool bTfeItr = false;
++ bool bBSEItr = false;
++ bool bLdmaItr = false;
++ bool vg_changed = false;
++ u32 dw_screen_offset = 0;
++ struct AstRVAS *pAstRVAS = (struct AstRVAS *)dev_id;
++ struct VideoGeometry *cur_vg = NULL;
++
++ memset(&eduFge_status, 0x0, sizeof(union EmDwordUnion));
++ bFgeItr = false;
++
++ // Checking for GRC status changes
++ dwGRCEStatus = readl(pAstRVAS->grce_reg_base + GRCE_STATUS_REGISTER);
++ if (dwGRCEStatus & GRC_INT_STS_MASK) {
++ VIDEO_DBG("GRC Status Changed: %#x\n", dwGRCEStatus);
++ eduFge_status.dw |= dwGRCEStatus & GRC_INT_STS_MASK;
++ bFgeItr = true;
++
++ if (dwGRCEStatus & 0x30) {
++ dw_screen_offset = get_screen_offset(pAstRVAS);
++
++ if (pAstRVAS->dwScreenOffset != dw_screen_offset) {
++ pAstRVAS->dwScreenOffset = dw_screen_offset;
++ vg_changed = true;
++ }
++ }
++ }
++ vg_changed |= video_geometry_change(pAstRVAS, dwGRCEStatus);
++ if (vg_changed) {
++ eduFge_status.em.bGeometryChanged = true;
++ bFgeItr = true;
++ set_snoop_engine(vg_changed, pAstRVAS);
++ video_set_Window(pAstRVAS);
++ VIDEO_DBG("Geometry has changed\n");
++ VIDEO_DBG("Reconfigure TSE\n");
++ }
++ // Checking and clear TSE Intr Status
++ tse_sts = clear_tse_interrupt(pAstRVAS);
++
++ if (tse_sts & TSSTS_ALL) {
++ bFgeItr = true;
++ if (tse_sts & (TSSTS_TC_SCREEN0 | TSSTS_TC_SCREEN1)) {
++ eduFge_status.em.bSnoopChanged = 1;
++ cur_vg = &pAstRVAS->current_vg;
++
++ if (cur_vg->gmt == TextMode) {
++ eduFge_status.em.bTextASCIIChanged = 1;
++ eduFge_status.em.bTextATTRChanged = 1;
++ eduFge_status.em.bTextFontChanged = 1;
++ }
++ }
++ if (tse_sts & TSSTS_ASCII) {
++ //VIDEO_DBG("Text Ascii Changed\n");
++ eduFge_status.em.bTextASCIIChanged = 1;
++ }
++
++ if (tse_sts & TSSTS_ATTR) {
++ //VIDEO_DBG("Text Attr Changed\n");
++ eduFge_status.em.bTextATTRChanged = 1;
++ }
++
++ if (tse_sts & TSSTS_FONT) {
++ //VIDEO_DBG("Text Font Changed\n");
++ eduFge_status.em.bTextFontChanged = 1;
++ }
++ }
++
++ if (clear_ldma_interrupt(pAstRVAS)) {
++ bLdmaItr = true;
++ pAstRVAS->ldma_engine.finished = 1;
++ video_ss_wakeup_on_timeout(&pAstRVAS->ldma_engine.wait);
++ }
++
++ if (clear_tfe_interrupt(pAstRVAS)) {
++ bTfeItr = true;
++ pAstRVAS->tfe_engine.finished = 1;
++ video_ss_wakeup_on_timeout(&pAstRVAS->tfe_engine.wait);
++ }
++
++ if (clear_bse_interrupt(pAstRVAS)) {
++ bBSEItr = true;
++ pAstRVAS->bse_engine.finished = 1;
++ video_ss_wakeup_on_timeout(&pAstRVAS->bse_engine.wait);
++ }
++
++ if (!bFgeItr && !bTfeItr && !bBSEItr && !bLdmaItr) {
++ //VIDEO_DBG(" Unknown Interrupt\n");
++// VIDEO_DBG("TFE CRT [%#x].", *fge_intr);
++ return IRQ_NONE;
++ }
++
++ if (bFgeItr) {
++ update_context_events(pAstRVAS, eduFge_status);
++ pAstRVAS->video_intr_occurred = 1;
++ video_ss_wakeup_on_timeout(&pAstRVAS->video_wait);
++ }
++
++ return IRQ_HANDLED;
++}
++
++/*Sleep and Wakeup Functions*/
++
++void video_os_init_sleep_struct(struct Video_OsSleepStruct *Sleep)
++{
++ init_waitqueue_head(&Sleep->queue);
++ Sleep->Timeout = 0;
++}
++
++void video_ss_wakeup_on_timeout(struct Video_OsSleepStruct *Sleep)
++{
++ /* Wakeup Process and Kill timeout handler */
++ wake_up(&Sleep->queue);
++}
++
++long video_os_sleep_on_timeout(struct Video_OsSleepStruct *Sleep, u8 *Var, long msecs)
++{
++ long timeout; /* In jiffies */
++ u8 *Condition = Var;
++ /* Sleep on the Condition for a wakeup */
++ timeout = wait_event_interruptible_timeout(Sleep->queue,
++ (*Condition == 1),
++ msecs_to_jiffies(msecs));
++
++ return timeout;
++}
++
++void disable_video_engines(struct AstRVAS *pAstRVAS)
++{
++ clk_disable(pAstRVAS->eclk);
++ clk_disable(pAstRVAS->vclk);
++}
++
++void enable_video_engines(struct AstRVAS *pAstRVAS)
++{
++ clk_enable(pAstRVAS->eclk);
++ clk_enable(pAstRVAS->vclk);
++}
++
++void disable_rvas_engines(struct AstRVAS *pAstRVAS)
++{
++ clk_disable(pAstRVAS->rvasclk);
++}
++
++void enable_rvas_engines(struct AstRVAS *pAstRVAS)
++{
++ // ast2600 clk enable does
++ // reset engine reset at SCU040
++ // delay 100 us
++ // enable clock at SCU080
++ // delay 10ms
++ // disable engine reset at SCU040
++
++ // ast2700 clk enable only enable clock at SCU240
++ clk_enable(pAstRVAS->rvasclk);
++}
++
++static void reset_rvas_engine(struct AstRVAS *pAstRVAS)
++{
++ disable_rvas_engines(pAstRVAS);
++ if (pAstRVAS->config->version == 7)
++ reset_control_deassert(pAstRVAS->rvas_reset);
++ enable_rvas_engines(pAstRVAS);
++ rvas_init(pAstRVAS);
++}
++
++static void video_on(struct AstRVAS *pAstRVAS)
++{
++ if (pAstRVAS->config->version == 7) {
++ // enable clk
++ regmap_write(pAstRVAS->scu, 0x200, 0x40);
++ mdelay(200);
++ regmap_write(pAstRVAS->scu, 0x244, 0x2);
++ regmap_write(pAstRVAS->scu, 0x244, 0x8);
++ mdelay(100);
++ regmap_write(pAstRVAS->scu, 0x204, 0x40);
++ } else {
++ video_engine_init(pAstRVAS);
++ }
++}
++
++static void video_off(struct AstRVAS *pAstRVAS)
++{
++ if (pAstRVAS->config->version == 7) {
++ disable_video_interrupt(pAstRVAS);
++ // stop clock
++ regmap_write(pAstRVAS->scu, 0x240, 0x2);
++ regmap_write(pAstRVAS->scu, 0x240, 0x8);
++ mdelay(100);
++ } else {
++ disable_video_engines(pAstRVAS);
++ enable_video_engines(pAstRVAS);
++ }
++}
++
++static void reset_video_engine(struct AstRVAS *pAstRVAS)
++{
++ video_off(pAstRVAS);
++ video_on(pAstRVAS);
++}
++
++void ioctl_reset_video_engine(struct RvasIoctl *ri, struct AstRVAS *pAstRVAS)
++{
++ enum ResetEngineMode resetMode = ri->resetMode;
++
++ switch (resetMode) {
++ case ResetAll:
++ VIDEO_DBG("reset all engine\n");
++ reset_rvas_engine(pAstRVAS);
++ reset_video_engine(pAstRVAS);
++ break;
++ case ResetRvasEngine:
++ VIDEO_DBG("reset rvas engine\n");
++ reset_rvas_engine(pAstRVAS);
++ break;
++ case ResetVeEngine:
++ VIDEO_DBG("reset video engine\n");
++ reset_video_engine(pAstRVAS);
++ break;
++ default:
++ dev_err(pAstRVAS->pdev, "Error resetting: no such mode: %d\n", resetMode);
++ break;
++ }
++
++ if (ri)
++ ri->rs = SuccessStatus;
++}
++
++static ssize_t rvas_reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
++{
++ struct AstRVAS *pAstRVAS = dev_get_drvdata(dev);
++ u32 val = kstrtoul(buf, 10, NULL);
++
++ if (val)
++ ioctl_reset_video_engine(NULL, pAstRVAS);
++
++ return count;
++}
++
++static DEVICE_ATTR_WO(rvas_reset);
++
++static struct attribute *ast_rvas_attributes[] = {
++ &dev_attr_rvas_reset.attr,
++ NULL
++};
++
++static const struct attribute_group rvas_attribute_group = {
++ .attrs = ast_rvas_attributes
++};
++
++bool sleep_on_tfe_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys,
++ u32 dwTFEControlR, u32 dwTFERleLimitor,
++ u32 *pdwRLESize, u32 *pdwCheckSum)
++{
++ void __iomem *addrTFEDTBR = pAstRVAS->fg_reg_base + TFE_Descriptor_Table_Offset;
++ void __iomem *addrTFECR = pAstRVAS->fg_reg_base + TFE_Descriptor_Control_Resgister;
++ void __iomem *addrTFERleL = pAstRVAS->fg_reg_base + TFE_RLE_LIMITOR;
++ void __iomem *addrTFERSTS = pAstRVAS->fg_reg_base + TFE_Status_Register;
++ bool bResult = true;
++
++ down(&pAstRVAS->tfe_engine.sem);
++ VIDEO_DBG("In Busy Semaphore......\n");
++
++ VIDEO_DBG("Before change, TFECR: %#x\n", readl(addrTFECR));
++ writel(dwTFEControlR, addrTFECR);
++ VIDEO_DBG("After change, TFECR: %#x\n", readl(addrTFECR));
++ writel(dwTFERleLimitor, addrTFERleL);
++ VIDEO_DBG("dwTFEControlR: %#x\n", dwTFEControlR);
++ VIDEO_DBG("dwTFERleLimitor: %#x\n", dwTFERleLimitor);
++ VIDEO_DBG("desc_addr_phys: %#x\n", desc_addr_phys);
++ // put descriptor add to TBR and Fetch start
++ writel((u32)desc_addr_phys, addrTFEDTBR);
++ //wTFETiles = 1;
++ pAstRVAS->tfe_engine.finished = 0;
++ video_os_sleep_on_timeout(&pAstRVAS->tfe_engine.wait,
++ &pAstRVAS->tfe_engine.finished,
++ TFE_TIMEOUT_IN_MS);
++
++ if (!pAstRVAS->tfe_engine.finished) {
++ dev_err(pAstRVAS->pdev, "Video TFE failed\n");
++ writel(0x00, addrTFERSTS);
++ pAstRVAS->tfe_engine.finished = 1;
++ bResult = false;
++ }
++
++ writel((readl(addrTFECR) & (~0x3)), addrTFECR); // Disable IRQ and Turn off TFE when done
++ *pdwRLESize = readl(pAstRVAS->fg_reg_base + TFE_RLE_Byte_Count);
++ *pdwCheckSum = readl(pAstRVAS->fg_reg_base + TFE_RLE_CheckSum);
++
++ up(&pAstRVAS->tfe_engine.sem);
++ VIDEO_DBG("Done Busy: bResult: %d\n", bResult);
++
++ return bResult;
++}
++
++bool sleep_on_tfe_text_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys,
++ u32 dwTFEControlR, u32 dwTFERleLimitor, u32 *pdwRLESize,
++ u32 *pdwCheckSum)
++{
++ void __iomem *addrTFEDTBR = pAstRVAS->fg_reg_base + TFE_Descriptor_Table_Offset;
++ void __iomem *addrTFECR = pAstRVAS->fg_reg_base + TFE_Descriptor_Control_Resgister;
++ void __iomem *addrTFERleL = pAstRVAS->fg_reg_base + TFE_RLE_LIMITOR;
++ void __iomem *addrTFERSTS = pAstRVAS->fg_reg_base + TFE_Status_Register;
++ bool bResult = true;
++
++ down(&pAstRVAS->tfe_engine.sem);
++ VIDEO_DBG("In Busy Semaphore......\n");
++
++ VIDEO_DBG("Before change, TFECR: %#x\n", readl(addrTFECR));
++ writel(dwTFEControlR, addrTFECR);
++ VIDEO_DBG("After change, TFECR: %#x\n", readl(addrTFECR));
++ writel(dwTFERleLimitor, addrTFERleL);
++ VIDEO_DBG("dwTFEControlR: %#x\n", dwTFEControlR);
++ VIDEO_DBG("dwTFERleLimitor: %#x\n", dwTFERleLimitor);
++ VIDEO_DBG("desc_addr_phys: %#x\n", desc_addr_phys);
++ // put descriptor add to TBR and Fetch start
++ writel((u32)desc_addr_phys, addrTFEDTBR);
++ //wTFETiles = 1;
++ pAstRVAS->tfe_engine.finished = 0;
++ video_os_sleep_on_timeout(&pAstRVAS->tfe_engine.wait,
++ &pAstRVAS->tfe_engine.finished, TFE_TIMEOUT_IN_MS);
++
++ if (!pAstRVAS->tfe_engine.finished) {
++ dev_err(pAstRVAS->pdev, "Video TFE failed\n");
++ writel(0x00, addrTFERSTS);
++ pAstRVAS->tfe_engine.finished = 1;
++ bResult = false;
++ }
++
++ writel((readl(addrTFECR) & (~0x3)), addrTFECR);// Disable IRQ and Turn off TFE when done
++ writel((readl(addrTFERSTS) | 0x2), addrTFERSTS); // clear status bit
++ *pdwRLESize = readl(pAstRVAS->fg_reg_base + TFE_RLE_Byte_Count);
++ *pdwCheckSum = readl(pAstRVAS->fg_reg_base + TFE_RLE_CheckSum);
++
++ up(&pAstRVAS->tfe_engine.sem);
++ VIDEO_DBG("Done Busy: bResult: %d\n", bResult);
++
++ return bResult;
++}
++
++bool sleep_on_bse_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys,
++ struct BSEAggregateRegister aBSEAR, u32 size)
++{
++ void __iomem *addrBSEDTBR = pAstRVAS->fg_reg_base + BSE_Descriptor_Table_Base_Register;
++ void __iomem *addrBSCR = pAstRVAS->fg_reg_base + BSE_Command_Register;
++ void __iomem *addrBSDBS = pAstRVAS->fg_reg_base + BSE_Destination_Buket_Size_Resgister;
++ void __iomem *addrBSBPS0 = pAstRVAS->fg_reg_base + BSE_Bit_Position_Register_0;
++ void __iomem *addrBSBPS1 = pAstRVAS->fg_reg_base + BSE_Bit_Position_Register_1;
++ void __iomem *addrBSBPS2 = pAstRVAS->fg_reg_base + BSE_Bit_Position_Register_2;
++ void __iomem *addrBSESSTS = pAstRVAS->fg_reg_base + BSE_Status_Register;
++ u8 byCounter = 0;
++ bool bResult = true;
++
++ down(&pAstRVAS->bse_engine.sem);
++ pAstRVAS->bse_engine.finished = 0;
++
++ // Set BSE Temp buffer address, and clear lower u16
++ writel(BSE_LMEM_Temp_Buffer_Offset << 16, addrBSCR);
++ writel(readl(addrBSCR) | (aBSEAR.dwBSCR & 0X00000FFF), addrBSCR);
++ writel(aBSEAR.dwBSDBS, addrBSDBS);
++ writel(aBSEAR.adwBSBPS[0], addrBSBPS0);
++ writel(aBSEAR.adwBSBPS[1], addrBSBPS1);
++ writel(aBSEAR.adwBSBPS[2], addrBSBPS2);
++
++ writel((u32)desc_addr_phys, addrBSEDTBR);
++
++ while (!pAstRVAS->bse_engine.finished) {
++ VIDEO_DBG("BSE Sleeping...\n");
++ video_os_sleep_on_timeout(&pAstRVAS->bse_engine.wait,
++ &pAstRVAS->bse_engine.finished,
++ 1000); // loop if bse timedout
++ byCounter++;
++ VIDEO_DBG("Back from BSE Sleeping, finished: %u\n",
++ pAstRVAS->bse_engine.finished);
++
++ if (byCounter == ENGINE_TIMEOUT_IN_SECONDS) {
++ writel(0x00, addrBSESSTS);
++ pAstRVAS->bse_engine.finished = 1;
++ dev_err(pAstRVAS->pdev, "TIMEOUT::Waiting BSE\n");
++ bResult = false;
++ }
++ }
++
++ VIDEO_DBG("*pdwBSESSTS = %#x\n", readl(addrBSESSTS));
++ writel(readl(addrBSCR) & (~0x3), addrBSCR);
++
++ up(&pAstRVAS->bse_engine.sem);
++
++ return bResult;
++}
++
++void sleep_on_ldma_busy(struct AstRVAS *pAstRVAS, phys_addr_t desc_addr_phys)
++{
++ void __iomem *addrLDMADTBR = pAstRVAS->fg_reg_base + LDMA_Descriptor_Table_Base_Register;
++ void __iomem *addrLDMAControlR = pAstRVAS->fg_reg_base + LDMA_Control_Register;
++
++ VIDEO_DBG("In sleepONldma busy\n");
++
++ down(&pAstRVAS->ldma_engine.sem);
++
++ pAstRVAS->ldma_engine.finished = 0;
++
++ writel(0x83, addrLDMAControlR);// descriptor can only in LMEM FOR LDMA
++ writel((u32)desc_addr_phys, addrLDMADTBR);
++ VIDEO_DBG("LDMA: control [%#x]\n", readl(addrLDMAControlR));
++ VIDEO_DBG("LDMA: DTBR [%#x]\n", readl(addrLDMADTBR));
++
++ while (!pAstRVAS->ldma_engine.finished)
++ video_os_sleep_on_timeout(&pAstRVAS->ldma_engine.wait, (u8 *)&pAstRVAS->ldma_engine.finished, 1000); // loop if bse timedout
++
++ VIDEO_DBG("LDMA wake up\n");
++ writel(readl(addrLDMAControlR) & (~0x3), addrLDMAControlR);
++ up(&pAstRVAS->ldma_engine.sem);
++}
++
++static int video_drv_get_resources(struct platform_device *pdev)
++{
++ int result = 0;
++ struct resource *io_fg;
++ struct resource *io_grc;
++ struct resource *io_video;
++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev);
++
++ //get resources from platform
++ io_fg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ VIDEO_DBG("io_fg: 0x%p\n", io_fg);
++
++ if (!io_fg) {
++ dev_err(&pdev->dev, "No Frame Grabber IORESOURCE_MEM entry\n");
++ return -ENOENT;
++ }
++ io_grc = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ VIDEO_DBG("io_grc: 0x%p\n", io_grc);
++ if (!io_grc) {
++ dev_err(&pdev->dev, "No GRCE IORESOURCE_MEM entry\n");
++ return -ENOENT;
++ }
++ io_video = platform_get_resource(pdev, IORESOURCE_MEM, 2);
++ VIDEO_DBG("io_video: 0x%p\n", io_video);
++ if (!io_video) {
++ dev_err(&pdev->dev, "No video compression IORESOURCE_MEM entry\n");
++ return -ENOENT;
++ }
++
++ //map resource by device
++ pAstRVAS->fg_reg_base = devm_ioremap_resource(&pdev->dev, io_fg);
++ VIDEO_DBG("fg_reg_base: %p\n", pAstRVAS->fg_reg_base);
++ if (IS_ERR((void *)pAstRVAS->fg_reg_base)) {
++ result = PTR_ERR((void *)pAstRVAS->fg_reg_base);
++ dev_err(&pdev->dev, "Cannot map FG registers\n");
++ pAstRVAS->fg_reg_base = 0;
++ return result;
++ }
++ pAstRVAS->grce_reg_base = devm_ioremap_resource(&pdev->dev, io_grc);
++ VIDEO_DBG("grce_reg_base: %p\n", pAstRVAS->grce_reg_base);
++ if (IS_ERR((void *)pAstRVAS->grce_reg_base)) {
++ result = PTR_ERR((void *)pAstRVAS->grce_reg_base);
++ dev_err(&pdev->dev, "Cannot map GRC registers\n");
++ pAstRVAS->grce_reg_base = 0;
++ return result;
++ }
++ pAstRVAS->video_reg_base = devm_ioremap_resource(&pdev->dev, io_video);
++ VIDEO_DBG("video_reg_base: %p\n", pAstRVAS->video_reg_base);
++ if (IS_ERR((void *)pAstRVAS->video_reg_base)) {
++ result = PTR_ERR((void *)pAstRVAS->video_reg_base);
++ dev_err(&pdev->dev, "Cannot map video registers\n");
++ pAstRVAS->video_reg_base = 0;
++ return result;
++ }
++
++ pAstRVAS->config = of_device_get_match_data(&pdev->dev);
++ if (!pAstRVAS->config)
++ return -ENODEV;
++ return 0;
++}
++
++static int video_drv_get_irqs(struct platform_device *pdev)
++{
++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev);
++
++ pAstRVAS->irq_fge = platform_get_irq(pdev, 0);
++ VIDEO_DBG("irq_fge: %#x\n", pAstRVAS->irq_fge);
++ if (pAstRVAS->irq_fge < 0) {
++ dev_err(&pdev->dev, "NO FGE irq entry\n");
++ return -ENOENT;
++ }
++ pAstRVAS->irq_vga = platform_get_irq(pdev, 1);
++ VIDEO_DBG("irq_vga: %#x\n", pAstRVAS->irq_vga);
++ if (pAstRVAS->irq_vga < 0) {
++ dev_err(&pdev->dev, "NO VGA irq entry\n");
++ return -ENOENT;
++ }
++ pAstRVAS->irq_video = platform_get_irq(pdev, 2);
++ VIDEO_DBG("irq_video: %#x\n", pAstRVAS->irq_video);
++ if (pAstRVAS->irq_video < 0) {
++ dev_err(&pdev->dev, "NO video compression entry\n");
++ return -ENOENT;
++ }
++ return 0;
++}
++
++static int video_drv_get_clock(struct platform_device *pdev)
++{
++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev);
++
++ pAstRVAS->eclk = devm_clk_get(&pdev->dev, "eclk");
++ if (IS_ERR(pAstRVAS->eclk)) {
++ dev_err(&pdev->dev, "no eclk clock defined\n");
++ return PTR_ERR(pAstRVAS->eclk);
++ }
++
++ clk_prepare_enable(pAstRVAS->eclk);
++
++ pAstRVAS->vclk = devm_clk_get(&pdev->dev, "vclk");
++ if (IS_ERR(pAstRVAS->vclk)) {
++ dev_err(&pdev->dev, "no vclk clock defined\n");
++ return PTR_ERR(pAstRVAS->vclk);
++ }
++
++ clk_prepare_enable(pAstRVAS->vclk);
++
++ if (pAstRVAS->config->version == 7) {
++ pAstRVAS->rvasclk = devm_clk_get(&pdev->dev, "rvasclk");
++ if (IS_ERR(pAstRVAS->rvasclk)) {
++ pAstRVAS->rvasclk = devm_clk_get(&pdev->dev, "rvas2clk");
++ if (IS_ERR(pAstRVAS->rvasclk)) {
++ dev_err(&pdev->dev, "no rvasclk or rvas2clk clock defined\n");
++ return PTR_ERR(pAstRVAS->rvasclk);
++ }
++ }
++ } else {
++ pAstRVAS->rvasclk = devm_clk_get(&pdev->dev, "rvasclk-gate");
++ if (IS_ERR(pAstRVAS->rvasclk)) {
++ dev_err(&pdev->dev, "no rvasclk clock defined\n");
++ return PTR_ERR(pAstRVAS->rvasclk);
++ }
++ }
++ clk_prepare_enable(pAstRVAS->rvasclk);
++ return 0;
++}
++
++static int video_drv_map_irqs(struct platform_device *pdev)
++{
++ int result = 0;
++ struct AstRVAS *pAstRVAS = platform_get_drvdata(pdev);
++ //Map IRQS to handler
++ VIDEO_DBG("Requesting IRQs, irq_fge: %d, irq_vga: %d, irq_video: %d\n",
++ pAstRVAS->irq_fge, pAstRVAS->irq_vga, pAstRVAS->irq_video);
++
++ result = devm_request_irq(&pdev->dev, pAstRVAS->irq_fge, fge_handler, 0,
++ dev_name(&pdev->dev), pAstRVAS);
++ if (result) {
++ pr_err("Error in requesting IRQ\n");
++ pr_err("RVAS: Failed request FGE irq %d\n", pAstRVAS->irq_fge);
++ misc_deregister(&pAstRVAS->rvas_dev);
++ return result;
++ }
++
++ result = devm_request_irq(&pdev->dev, pAstRVAS->irq_vga, fge_handler, 0,
++ dev_name(&pdev->dev), pAstRVAS);
++ if (result) {
++ pr_err("Error in requesting IRQ\n");
++ pr_err("RVAS: Failed request vga irq %d\n", pAstRVAS->irq_vga);
++ misc_deregister(&pAstRVAS->rvas_dev);
++ return result;
++ }
++
++ result = devm_request_irq(&pdev->dev, pAstRVAS->irq_video, ast_video_isr, 0,
++ dev_name(&pdev->dev), pAstRVAS);
++ if (result) {
++ pr_err("Error in requesting IRQ\n");
++ pr_err("RVAS: Failed request video irq %d\n", pAstRVAS->irq_video);
++ misc_deregister(&pAstRVAS->rvas_dev);
++ return result;
++ }
++
++ return result;
++}
++
++//
++//
++//
++static int video_drv_probe(struct platform_device *pdev)
++{
++ int result = 0;
++ struct AstRVAS *pAstRVAS;
++ struct regmap *sdram_scu;
++ struct device_node *dp_node;
++ struct device_node *edac_node;
++ void __iomem *mcr_base;
++
++ pr_info("RVAS driver probe\n");
++ pAstRVAS = devm_kzalloc(&pdev->dev, sizeof(struct AstRVAS), GFP_KERNEL);
++ VIDEO_DBG("pAstRVAS: 0x%llx\n", pAstRVAS);
++
++ if (!pAstRVAS) {
++ dev_err(pAstRVAS->pdev, "Cannot allocate device structure\n");
++ return -ENOMEM;
++ }
++ dev_set_drvdata(&pdev->dev, pAstRVAS);
++ pAstRVAS->pdev = (void *)&pdev->dev;
++
++ // Get resources
++ result = video_drv_get_resources(pdev);
++ if (result < 0) {
++ dev_err(pAstRVAS->pdev, "video_probe: Error getting resources\n");
++ return result;
++ }
++
++ if (pAstRVAS->config->version == 7)
++ result = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++ else
++ result = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++
++ if (result) {
++ dev_err(&pdev->dev, "Failed to set DMA mask\n");
++ of_reserved_mem_device_release(&pdev->dev);
++ }
++
++ //get irqs
++ result = video_drv_get_irqs(pdev);
++ if (result < 0) {
++ dev_err(pAstRVAS->pdev, "video_probe: Error getting irqs\n");
++ return result;
++ }
++
++ pAstRVAS->rvas_reset = devm_reset_control_get_by_index(&pdev->dev, 0);
++ if (IS_ERR(pAstRVAS->rvas_reset)) {
++ dev_err(&pdev->dev, "can't get rvas reset\n");
++ return -ENOENT;
++ }
++ if (pAstRVAS->config->version == 7)
++ reset_control_deassert(pAstRVAS->rvas_reset);
++ pAstRVAS->video_engine_reset = devm_reset_control_get_shared_by_index(&pdev->dev, 1);
++ if (IS_ERR(pAstRVAS->video_engine_reset)) {
++ dev_err(&pdev->dev, "can't get video engine reset\n");
++ return -ENOENT;
++ }
++
++ //prepare video engine clock
++ result = video_drv_get_clock(pdev);
++ if (result < 0) {
++ dev_err(pAstRVAS->pdev, "video_probe: Error getting clocks\n");
++ return result;
++ }
++
++ dp_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-displayport");
++ if (!dp_node) {
++ dev_err(&pdev->dev, "cannot find dp node\n");
++ } else {
++ pAstRVAS->dp_base = of_iomap(dp_node, 0);
++ if (!pAstRVAS->dp_base)
++ dev_err(&pdev->dev, "failed to iomem of display port\n");
++ }
++ if (pAstRVAS->config->version == 7) {
++ pAstRVAS->FBInfo.dwDRAMSize = 0x40000000; // 1GB
++ // VGA size is fixed with 32MB
++ pAstRVAS->FBInfo.dwVGASize = 0x2000000;
++ } else {
++ edac_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-sdram-edac");
++ if (!edac_node) {
++ dev_err(&pdev->dev, "cannot find edac node\n");
++ } else {
++ mcr_base = of_iomap(edac_node, 0);
++ if (!mcr_base)
++ dev_err(&pdev->dev, "failed to iomem of MCR\n");
++ }
++
++ set_FBInfo_size(pAstRVAS, mcr_base);
++ }
++ //scu
++ if (pAstRVAS->config->version == 7) {
++ sdram_scu = syscon_regmap_lookup_by_compatible("aspeed,ast2700-scu0");
++ VIDEO_DBG("sdram_scu: 0x%llx\n", sdram_scu);
++ if (IS_ERR(sdram_scu)) {
++ dev_err(&pdev->dev, "failed to find ast2700-scu0 regmap\n");
++ return PTR_ERR(sdram_scu);
++ }
++ pAstRVAS->scu = sdram_scu;
++
++ sdram_scu = syscon_regmap_lookup_by_compatible("aspeed,ast2700-scu1");
++ VIDEO_DBG("sdram_scu: 0x%llx\n", sdram_scu);
++ if (IS_ERR(sdram_scu)) {
++ dev_err(&pdev->dev, "failed to find ast2700-scu0 regmap\n");
++ return PTR_ERR(sdram_scu);
++ }
++ pAstRVAS->scu_io = sdram_scu;
++ } else {
++ sdram_scu = syscon_regmap_lookup_by_compatible("aspeed,ast2600-scu");
++ VIDEO_DBG("sdram_scu: 0x%llx\n", sdram_scu);
++ if (IS_ERR(sdram_scu)) {
++ dev_err(&pdev->dev, "failed to find ast2600-scu regmap\n");
++ return PTR_ERR(sdram_scu);
++ }
++ pAstRVAS->scu = sdram_scu;
++ }
++ pAstRVAS->rvas_dev = video_misc;
++ if (pAstRVAS->config->version == 7) {
++ if (of_alias_get_id(pdev->dev.of_node, "rvas") == 1) {
++ pAstRVAS->rvas_index = 1;
++ pAstRVAS->rvas_dev.name = "rvas1";
++ } else {
++ pAstRVAS->rvas_index = 0;
++ }
++ }
++ pAstRVAS->rvas_dev.parent = &pdev->dev;
++ result = misc_register(&pAstRVAS->rvas_dev);
++ if (result) {
++ pr_err("Failed in rvas misc device register (err: %d)\n", result);
++ return result;
++ }
++ pr_info("Video misc minor %d\n", pAstRVAS->rvas_dev.minor);
++ VIDEO_DBG("pdev: 0x%llx dev: 0x%llx pAstRVAS: 0x%llx rvas_dev: 0x%llx\n", pdev,
++ &pdev->dev, pAstRVAS, pAstRVAS->rvas_dev);
++
++ if (sysfs_create_group(&pdev->dev.kobj, &rvas_attribute_group)) {
++ pr_err("Failed in creating group\n");
++ return -1;
++ }
++
++ VIDEO_DBG("Disabling interrupts...\n");
++ disable_grce_tse_interrupt(pAstRVAS);
++
++ //reserve memory
++ of_reserved_mem_device_init(&pdev->dev);
++
++ // map irqs to irq_handlers
++ result = video_drv_map_irqs(pdev);
++ if (result < 0) {
++ dev_err(pAstRVAS->pdev, "video_probe: Error mapping irqs\n");
++ return result;
++ }
++ VIDEO_DBG("After IRQ registration\n");
++
++ init_osr_es(pAstRVAS);
++ rvas_init(pAstRVAS);
++ video_engine_reserveMem(pAstRVAS);
++ video_on(pAstRVAS);
++
++ pr_info("RVAS: driver successfully loaded.\n");
++ return result;
++}
++
++static void rvas_init(struct AstRVAS *pAstRVAS)
++{
++ VIDEO_ENG_DBG("\n");
++
++ reset_snoop_engine(pAstRVAS);
++ update_video_geometry(pAstRVAS);
++
++ set_snoop_engine(true, pAstRVAS);
++ enable_grce_tse_interrupt(pAstRVAS);
++}
++
++static void video_engine_init(struct AstRVAS *pAstRVAS)
++{
++ VIDEO_ENG_DBG("\n");
++ // video engine
++ video_ctrl_init(pAstRVAS);
++ video_engine_rc4Reset(pAstRVAS);
++ if (pAstRVAS->config->version == 7)
++ set_direct_mode(pAstRVAS);
++ video_set_Window(pAstRVAS);
++ enable_video_interrupt(pAstRVAS);
++}
++
++static int video_drv_remove(struct platform_device *pdev)
++{
++ struct AstRVAS *pAstRVAS = NULL;
++
++ VIDEO_DBG("\n");
++ pAstRVAS = platform_get_drvdata(pdev);
++ video_off(pAstRVAS);
++ VIDEO_DBG("disable_grce_tse_interrupt...\n");
++ disable_grce_tse_interrupt(pAstRVAS);
++ disable_video_interrupt(pAstRVAS);
++
++ sysfs_remove_group(&pdev->dev.kobj, &rvas_attribute_group);
++
++ VIDEO_DBG("misc_deregister...\n");
++ misc_deregister(&pAstRVAS->rvas_dev);
++
++ VIDEO_DBG("Releasing OSRes...\n");
++ release_osr_es(pAstRVAS);
++
++ free_video_engine_memory(pAstRVAS);
++ pr_info("RVAS: driver successfully unloaded.\n");
++ return 0;
++}
++
++static const u32 ast2400_dram_table[] = {
++ 0x04000000, //64MB
++ 0x08000000, //128MB
++ 0x10000000, //256MB
++ 0x20000000, //512MB
++};
++
++static const u32 ast2500_dram_table[] = {
++ 0x08000000, //128MB
++ 0x10000000, //256MB
++ 0x20000000, //512MB
++ 0x40000000, //1024MB
++};
++
++static const u32 ast2600_dram_table[] = {
++ 0x10000000, //256MB
++ 0x20000000, //512MB
++ 0x40000000, //1024MB
++ 0x80000000, //2048MB
++};
++
++static const u32 aspeed_vga_table[] = {
++ 0x800000, //8MB
++ 0x1000000, //16MB
++ 0x2000000, //32MB
++ 0x4000000, //64MB
++};
++
++static const struct aspeed_rvas_config ast2600_config = {
++ .version = 6,
++ .dram_table = ast2600_dram_table,
++};
++
++static const struct aspeed_rvas_config ast2700_config = {
++ .version = 7,
++ .dram_table = ast2600_dram_table,
++};
++
++static void set_FBInfo_size(struct AstRVAS *pAstRVAS, void __iomem *mcr_base)
++{
++ u32 reg_mcr004 = readl(mcr_base + MCR_CONF);
++
++ pAstRVAS->FBInfo.dwDRAMSize = pAstRVAS->config->dram_table[reg_mcr004 & 0x3];
++
++ pAstRVAS->FBInfo.dwVGASize = aspeed_vga_table[((reg_mcr004 & 0xC) >> 2)];
++}
++
++static const struct of_device_id ast_rvas_match[] = {
++ { .compatible = "aspeed,ast2700-rvas", .data = &ast2700_config },
++ { .compatible = "aspeed,ast2600-rvas", .data = &ast2600_config },
++ { },
++};
++
++MODULE_DEVICE_TABLE(of, ast_rvas_match);
++
++static struct platform_driver video_driver = {
++ .probe = video_drv_probe,
++ .remove = video_drv_remove,
++ .driver = { .of_match_table = of_match_ptr(ast_rvas_match), .name =
++ RVAS_DRIVER_NAME, .owner = THIS_MODULE, }, };
++
++module_platform_driver(video_driver);
++
++MODULE_AUTHOR("ASPEED Technology");
++MODULE_DESCRIPTION("RVAS video driver module for AST2600");
++MODULE_LICENSE("GPL");
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0007-uart_udma_2700.patch b/recipes-kernel/linux/files/0007-uart_udma_2700.patch
deleted file mode 100644
index 006600c..0000000
--- a/recipes-kernel/linux/files/0007-uart_udma_2700.patch
+++ /dev/null
@@ -1,1085 +0,0 @@
-From 68d8f1726d559aad2ad816d02c1e941a8e4e0fbb Mon Sep 17 00:00:00 2001
-From: Shao-Chieh Chao <jieh.sc.chao@mail.foxconn.com>
-Date: Wed, 11 Dec 2024 10:44:33 +0800
-Subject: [PATCH] revise patch0007
-
----
- drivers/soc/aspeed/Kconfig | 9 +
- drivers/soc/aspeed/Makefile | 3 +
- drivers/soc/aspeed/aspeed-udma.c | 433 +++++++++++++++++++++
- drivers/tty/serial/8250/8250_aspeed.c | 506 +++++++++++++++++++++++++
- drivers/tty/serial/8250/8250_early.c | 1 +
- drivers/tty/serial/8250/Kconfig | 9 +
- drivers/tty/serial/8250/Makefile | 1 +
- include/linux/soc/aspeed/aspeed-udma.h | 30 ++
- 8 files changed, 992 insertions(+)
- create mode 100644 drivers/soc/aspeed/aspeed-udma.c
- create mode 100644 drivers/tty/serial/8250/8250_aspeed.c
- create mode 100644 include/linux/soc/aspeed/aspeed-udma.h
-
-diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
-index f579ee0b5..d03b5462e 100644
---- a/drivers/soc/aspeed/Kconfig
-+++ b/drivers/soc/aspeed/Kconfig
-@@ -52,6 +52,15 @@ config ASPEED_SOCINFO
- help
- Say yes to support decoding of ASPEED BMC information.
-
-+
-+config ASPEED_UDMA
-+ tristate "Aspeed UDMA Engine Driver"
-+ depends on ARCH_ASPEED && REGMAP && MFD_SYSCON && HAS_DMA
-+ help
-+ Enable support for the Aspeed UDMA Engine found on the Aspeed AST2XXX
-+ SOCs. The UDMA engine can perform UART DMA operations between the memory
-+ buffer and the UART/VUART devices.
-+
- endmenu
-
- endif
-diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
-index b35d74592..79d826fef 100644
---- a/drivers/soc/aspeed/Makefile
-+++ b/drivers/soc/aspeed/Makefile
-@@ -4,3 +4,6 @@ obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
- obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o
- obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
- obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
-+obj-$(CONFIG_ASPEED_SBC) += aspeed-sbc.o
-+obj-$(CONFIG_ASPEED_XDMA) += aspeed-xdma.o
-+obj-$(CONFIG_ASPEED_UDMA) += aspeed-udma.o
-\ No newline at end of file
-diff --git a/drivers/soc/aspeed/aspeed-udma.c b/drivers/soc/aspeed/aspeed-udma.c
-new file mode 100644
-index 000000000..9f7b58fb7
---- /dev/null
-+++ b/drivers/soc/aspeed/aspeed-udma.c
-@@ -0,0 +1,433 @@
-+// SPDX-License-Identifier: GPL-2.0-or-later
-+/*
-+ * Copyright 2020 Aspeed Technology Inc.
-+ */
-+#include <linux/bitfield.h>
-+#include <linux/delay.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/interrupt.h>
-+#include <linux/io.h>
-+#include <linux/module.h>
-+#include <linux/of.h>
-+#include <linux/of_device.h>
-+#include <linux/platform_device.h>
-+#include <linux/sizes.h>
-+#include <linux/soc/aspeed/aspeed-udma.h>
-+#include <linux/spinlock.h>
-+
-+#define DEVICE_NAME "aspeed-udma"
-+
-+/* UART DMA registers offset */
-+#define UDMA_TX_DMA_EN 0x000
-+#define UDMA_RX_DMA_EN 0x004
-+#define UDMA_MISC 0x008
-+#define UDMA_MISC_RX_BUFSZ GENMASK(3, 2)
-+#define UDMA_MISC_TX_BUFSZ GENMASK(1, 0)
-+#define UDMA_TMOUT_TIMER 0x00c
-+#define UDMA_TX_DMA_RST 0x020
-+#define UDMA_RX_DMA_RST 0x024
-+#define UDMA_TX_DMA_INT_EN 0x030
-+#define UDMA_TX_DMA_INT_STS 0x034
-+#define UDMA_RX_DMA_INT_EN 0x038
-+#define UDMA_RX_DMA_INT_STS 0x03c
-+
-+#define UDMA_CHX_OFF(x) ((x) * 0x20)
-+#define UDMA_CHX_TX_RD_PTR(x) (0x040 + UDMA_CHX_OFF(x))
-+#define UDMA_CHX_TX_WR_PTR(x) (0x044 + UDMA_CHX_OFF(x))
-+#define UDMA_CHX_TX_BUF_ADDR(x) (0x048 + UDMA_CHX_OFF(x))
-+#define UDMA_CHX_TX_CTRL(x) (0x04c + UDMA_CHX_OFF(x))
-+#define UDMA_TX_CTRL_BUF_ADDRH GENMASK(10, 8)
-+#define UDMA_TX_CTRL_TMOUT_DIS BIT(4)
-+#define UDMA_TX_CTRL_BUFSZ GENMASK(3, 0)
-+#define UDMA_CHX_RX_RD_PTR(x) (0x050 + UDMA_CHX_OFF(x))
-+#define UDMA_CHX_RX_WR_PTR(x) (0x054 + UDMA_CHX_OFF(x))
-+#define UDMA_CHX_RX_BUF_ADDR(x) (0x058 + UDMA_CHX_OFF(x))
-+#define UDMA_CHX_RX_CTRL(x) (0x05c + UDMA_CHX_OFF(x))
-+#define UDMA_RX_CTRL_BUF_ADDRH GENMASK(10, 8)
-+#define UDMA_RX_CTRL_TMOUT_DIS BIT(4)
-+#define UDMA_RX_CTRL_BUFSZ GENMASK(1, 0)
-+
-+#define UDMA_MAX_CHANNEL 16
-+#define UDMA_TMOUT 0x200
-+
-+enum aspeed_udma_bufsz_code {
-+ UDMA_BUFSZ_CODE_1KB,
-+ UDMA_BUFSZ_CODE_4KB,
-+ UDMA_BUFSZ_CODE_16KB,
-+ UDMA_BUFSZ_CODE_64KB,
-+};
-+
-+struct aspeed_udma_chan {
-+ dma_addr_t dma_addr;
-+
-+ struct circ_buf *rb;
-+ u32 rb_sz;
-+
-+ aspeed_udma_cb_t cb;
-+ void *cb_arg;
-+
-+ bool dis_tmout;
-+};
-+
-+struct aspeed_udma {
-+ struct device *dev;
-+ u8 __iomem *regs;
-+ int irq;
-+ struct aspeed_udma_chan tx_chs[UDMA_MAX_CHANNEL];
-+ struct aspeed_udma_chan rx_chs[UDMA_MAX_CHANNEL];
-+ spinlock_t lock;
-+};
-+
-+struct aspeed_udma udma[1];
-+
-+static int aspeed_udma_get_bufsz_code(u32 buf_sz)
-+{
-+ switch (buf_sz) {
-+ case SZ_1K:
-+ return UDMA_BUFSZ_CODE_1KB;
-+ case SZ_4K:
-+ return UDMA_BUFSZ_CODE_4KB;
-+ case SZ_16K:
-+ return UDMA_BUFSZ_CODE_16KB;
-+ case SZ_64K:
-+ return UDMA_BUFSZ_CODE_64KB;
-+ default:
-+ break;
-+ }
-+
-+ return -1;
-+}
-+
-+static u32 aspeed_udma_get_tx_rptr(u32 ch_no)
-+{
-+ return readl(udma->regs + UDMA_CHX_TX_RD_PTR(ch_no));
-+}
-+
-+static u32 aspeed_udma_get_rx_wptr(u32 ch_no)
-+{
-+ return readl(udma->regs + UDMA_CHX_RX_WR_PTR(ch_no));
-+}
-+
-+static void aspeed_udma_set_ptr(u32 ch_no, u32 ptr, bool is_tx)
-+{
-+ writel(ptr, udma->regs +
-+ ((is_tx) ? UDMA_CHX_TX_WR_PTR(ch_no) : UDMA_CHX_RX_RD_PTR(ch_no)));
-+}
-+
-+void aspeed_udma_set_tx_wptr(u32 ch_no, u32 wptr)
-+{
-+ aspeed_udma_set_ptr(ch_no, wptr, true);
-+}
-+EXPORT_SYMBOL(aspeed_udma_set_tx_wptr);
-+
-+void aspeed_udma_set_rx_rptr(u32 ch_no, u32 rptr)
-+{
-+ aspeed_udma_set_ptr(ch_no, rptr, false);
-+}
-+EXPORT_SYMBOL(aspeed_udma_set_rx_rptr);
-+
-+static int aspeed_udma_free_chan(u32 ch_no, bool is_tx)
-+{
-+ u32 reg;
-+ unsigned long flags;
-+
-+ if (ch_no > UDMA_MAX_CHANNEL)
-+ return -EINVAL;
-+
-+ spin_lock_irqsave(&udma->lock, flags);
-+
-+ reg = readl(udma->regs +
-+ ((is_tx) ? UDMA_TX_DMA_INT_EN : UDMA_RX_DMA_INT_EN));
-+ reg &= ~(0x1 << ch_no);
-+
-+ writel(reg, udma->regs +
-+ ((is_tx) ? UDMA_TX_DMA_INT_EN : UDMA_RX_DMA_INT_EN));
-+
-+ spin_unlock_irqrestore(&udma->lock, flags);
-+
-+ return 0;
-+}
-+
-+int aspeed_udma_free_tx_chan(u32 ch_no)
-+{
-+ return aspeed_udma_free_chan(ch_no, true);
-+}
-+EXPORT_SYMBOL(aspeed_udma_free_tx_chan);
-+
-+int aspeed_udma_free_rx_chan(u32 ch_no)
-+{
-+ return aspeed_udma_free_chan(ch_no, false);
-+}
-+EXPORT_SYMBOL(aspeed_udma_free_rx_chan);
-+
-+static int aspeed_udma_request_chan(u32 ch_no, dma_addr_t addr,
-+ struct circ_buf *rb, u32 rb_sz,
-+ aspeed_udma_cb_t cb, void *id, bool dis_tmout, bool is_tx)
-+{
-+ int retval = 0;
-+ int rbsz_code;
-+
-+ u32 reg;
-+ unsigned long flags;
-+ struct aspeed_udma_chan *ch;
-+
-+ if (ch_no > UDMA_MAX_CHANNEL) {
-+ retval = -EINVAL;
-+ goto out;
-+ }
-+
-+ if (IS_ERR_OR_NULL(rb) || IS_ERR_OR_NULL(rb->buf)) {
-+ retval = -EINVAL;
-+ goto out;
-+ }
-+
-+ rbsz_code = aspeed_udma_get_bufsz_code(rb_sz);
-+ if (rbsz_code < 0) {
-+ retval = -EINVAL;
-+ goto out;
-+ }
-+
-+ spin_lock_irqsave(&udma->lock, flags);
-+
-+ if (is_tx) {
-+ reg = readl(udma->regs + UDMA_TX_DMA_INT_EN);
-+ if (reg & (0x1 << ch_no)) {
-+ retval = -EBUSY;
-+ goto unlock_n_out;
-+ }
-+
-+ reg |= (0x1 << ch_no);
-+ writel(reg, udma->regs + UDMA_TX_DMA_INT_EN);
-+
-+ reg = FIELD_PREP(UDMA_TX_CTRL_BUF_ADDRH, (u64)addr >> 32) |
-+ ((dis_tmout) ? UDMA_TX_CTRL_TMOUT_DIS : 0) |
-+ FIELD_PREP(UDMA_TX_CTRL_BUFSZ, rbsz_code);
-+ writel(reg, udma->regs + UDMA_CHX_TX_CTRL(ch_no));
-+
-+ writel(addr, udma->regs + UDMA_CHX_TX_BUF_ADDR(ch_no));
-+ } else {
-+ reg = readl(udma->regs + UDMA_RX_DMA_INT_EN);
-+ if (reg & (0x1 << ch_no)) {
-+ retval = -EBUSY;
-+ goto unlock_n_out;
-+ }
-+
-+ reg |= (0x1 << ch_no);
-+ writel(reg, udma->regs + UDMA_RX_DMA_INT_EN);
-+
-+ reg = FIELD_PREP(UDMA_RX_CTRL_BUF_ADDRH, (u64)addr >> 32) |
-+ ((dis_tmout) ? UDMA_RX_CTRL_TMOUT_DIS : 0) |
-+ FIELD_PREP(UDMA_RX_CTRL_BUFSZ, rbsz_code);
-+ writel(reg, udma->regs + UDMA_CHX_RX_CTRL(ch_no));
-+
-+ writel(addr, udma->regs + UDMA_CHX_RX_BUF_ADDR(ch_no));
-+ }
-+
-+ ch = (is_tx) ? &udma->tx_chs[ch_no] : &udma->rx_chs[ch_no];
-+ ch->rb = rb;
-+ ch->rb_sz = rb_sz;
-+ ch->cb = cb;
-+ ch->cb_arg = id;
-+ ch->dma_addr = addr;
-+ ch->dis_tmout = dis_tmout;
-+
-+unlock_n_out:
-+ spin_unlock_irqrestore(&udma->lock, flags);
-+out:
-+ return 0;
-+}
-+
-+int aspeed_udma_request_tx_chan(u32 ch_no, dma_addr_t addr,
-+ struct circ_buf *rb, u32 rb_sz,
-+ aspeed_udma_cb_t cb, void *id, bool dis_tmout)
-+{
-+ return aspeed_udma_request_chan(ch_no, addr, rb, rb_sz, cb, id,
-+ dis_tmout, true);
-+}
-+EXPORT_SYMBOL(aspeed_udma_request_tx_chan);
-+
-+int aspeed_udma_request_rx_chan(u32 ch_no, dma_addr_t addr,
-+ struct circ_buf *rb, u32 rb_sz,
-+ aspeed_udma_cb_t cb, void *id, bool dis_tmout)
-+{
-+ return aspeed_udma_request_chan(ch_no, addr, rb, rb_sz, cb, id,
-+ dis_tmout, false);
-+}
-+EXPORT_SYMBOL(aspeed_udma_request_rx_chan);
-+
-+static void aspeed_udma_chan_ctrl(u32 ch_no, u32 op, bool is_tx)
-+{
-+ unsigned long flags;
-+ u32 reg_en, reg_rst;
-+ u32 reg_en_off = (is_tx) ? UDMA_TX_DMA_EN : UDMA_RX_DMA_EN;
-+ u32 reg_rst_off = (is_tx) ? UDMA_TX_DMA_RST : UDMA_TX_DMA_RST;
-+
-+ if (ch_no > UDMA_MAX_CHANNEL)
-+ return;
-+
-+ spin_lock_irqsave(&udma->lock, flags);
-+
-+ reg_en = readl(udma->regs + reg_en_off);
-+ reg_rst = readl(udma->regs + reg_rst_off);
-+
-+ switch (op) {
-+ case ASPEED_UDMA_OP_ENABLE:
-+ reg_en |= (0x1 << ch_no);
-+ writel(reg_en, udma->regs + reg_en_off);
-+ break;
-+ case ASPEED_UDMA_OP_DISABLE:
-+ reg_en &= ~(0x1 << ch_no);
-+ writel(reg_en, udma->regs + reg_en_off);
-+ break;
-+ case ASPEED_UDMA_OP_RESET:
-+ reg_en &= ~(0x1 << ch_no);
-+ writel(reg_en, udma->regs + reg_en_off);
-+
-+ reg_rst |= (0x1 << ch_no);
-+ writel(reg_rst, udma->regs + reg_rst_off);
-+
-+ udelay(100);
-+
-+ reg_rst &= ~(0x1 << ch_no);
-+ writel(reg_rst, udma->regs + reg_rst_off);
-+ break;
-+ default:
-+ break;
-+ }
-+
-+ spin_unlock_irqrestore(&udma->lock, flags);
-+}
-+
-+void aspeed_udma_tx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op)
-+{
-+ aspeed_udma_chan_ctrl(ch_no, op, true);
-+}
-+EXPORT_SYMBOL(aspeed_udma_tx_chan_ctrl);
-+
-+void aspeed_udma_rx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op)
-+{
-+ aspeed_udma_chan_ctrl(ch_no, op, false);
-+}
-+EXPORT_SYMBOL(aspeed_udma_rx_chan_ctrl);
-+
-+static irqreturn_t aspeed_udma_isr(int irq, void *arg)
-+{
-+ u32 bit;
-+ unsigned long tx_sts = readl(udma->regs + UDMA_TX_DMA_INT_STS);
-+ unsigned long rx_sts = readl(udma->regs + UDMA_RX_DMA_INT_STS);
-+
-+ if (udma != (struct aspeed_udma *)arg)
-+ return IRQ_NONE;
-+
-+ if (tx_sts == 0 && rx_sts == 0)
-+ return IRQ_NONE;
-+
-+ for_each_set_bit(bit, &tx_sts, UDMA_MAX_CHANNEL) {
-+ writel((0x1 << bit), udma->regs + UDMA_TX_DMA_INT_STS);
-+ if (udma->tx_chs[bit].cb)
-+ udma->tx_chs[bit].cb(aspeed_udma_get_tx_rptr(bit),
-+ udma->tx_chs[bit].cb_arg);
-+ }
-+
-+ for_each_set_bit(bit, &rx_sts, UDMA_MAX_CHANNEL) {
-+ writel((0x1 << bit), udma->regs + UDMA_RX_DMA_INT_STS);
-+ if (udma->rx_chs[bit].cb)
-+ udma->rx_chs[bit].cb(aspeed_udma_get_rx_wptr(bit),
-+ udma->rx_chs[bit].cb_arg);
-+ }
-+
-+ return IRQ_HANDLED;
-+}
-+
-+static int aspeed_udma_probe(struct platform_device *pdev)
-+{
-+ int i, rc;
-+ uint32_t reg;
-+ struct resource *res;
-+ struct device *dev = &pdev->dev;
-+
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+ if (IS_ERR_OR_NULL(res)) {
-+ dev_err(dev, "failed to get register base\n");
-+ return -ENODEV;
-+ }
-+
-+ udma->regs = devm_ioremap_resource(dev, res);
-+ if (IS_ERR_OR_NULL(udma->regs)) {
-+ dev_err(dev, "failed to map registers\n");
-+ return PTR_ERR(udma->regs);
-+ }
-+
-+ /* disable for safety */
-+ writel(0x0, udma->regs + UDMA_TX_DMA_EN);
-+ writel(0x0, udma->regs + UDMA_RX_DMA_EN);
-+
-+ udma->irq = platform_get_irq(pdev, 0);
-+ if (udma->irq < 0) {
-+ dev_err(dev, "failed to get IRQ number\n");
-+ return -ENODEV;
-+ }
-+
-+ rc = devm_request_irq(dev, udma->irq, aspeed_udma_isr,
-+ IRQF_SHARED, DEVICE_NAME, udma);
-+ if (rc) {
-+ dev_err(dev, "failed to request IRQ handler\n");
-+ return rc;
-+ }
-+
-+ /*
-+ * For legacy design.
-+ * - TX ringbuffer size: 4KB
-+ * - RX ringbuffer size: 64KB
-+ * - Timeout timer disabled
-+ */
-+ reg = FIELD_PREP(UDMA_MISC_TX_BUFSZ, UDMA_BUFSZ_CODE_4KB) |
-+ FIELD_PREP(UDMA_MISC_RX_BUFSZ, UDMA_BUFSZ_CODE_64KB);
-+ writel(reg, udma->regs + UDMA_MISC);
-+
-+ for (i = 0; i < UDMA_MAX_CHANNEL; ++i) {
-+ writel(0, udma->regs + UDMA_CHX_TX_WR_PTR(i));
-+ writel(0, udma->regs + UDMA_CHX_RX_RD_PTR(i));
-+ }
-+
-+ writel(0xffffffff, udma->regs + UDMA_TX_DMA_RST);
-+ writel(0x0, udma->regs + UDMA_TX_DMA_RST);
-+
-+ writel(0xffffffff, udma->regs + UDMA_RX_DMA_RST);
-+ writel(0x0, udma->regs + UDMA_RX_DMA_RST);
-+
-+ writel(0x0, udma->regs + UDMA_TX_DMA_INT_EN);
-+ writel(0xffffffff, udma->regs + UDMA_TX_DMA_INT_STS);
-+ writel(0x0, udma->regs + UDMA_RX_DMA_INT_EN);
-+ writel(0xffffffff, udma->regs + UDMA_RX_DMA_INT_STS);
-+
-+ writel(UDMA_TMOUT, udma->regs + UDMA_TMOUT_TIMER);
-+
-+ spin_lock_init(&udma->lock);
-+
-+ dev_set_drvdata(dev, udma);
-+
-+ return 0;
-+}
-+
-+static const struct of_device_id aspeed_udma_match[] = {
-+ { .compatible = "aspeed,ast2500-udma" },
-+ { .compatible = "aspeed,ast2600-udma" },
-+ { .compatible = "aspeed,ast2700-udma" },
-+ { },
-+};
-+
-+static struct platform_driver aspeed_udma_driver = {
-+ .driver = {
-+ .name = DEVICE_NAME,
-+ .of_match_table = aspeed_udma_match,
-+
-+ },
-+ .probe = aspeed_udma_probe,
-+};
-+
-+module_platform_driver(aspeed_udma_driver);
-+
-+MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Aspeed UDMA Engine Driver");
-diff --git a/drivers/tty/serial/8250/8250_aspeed.c b/drivers/tty/serial/8250/8250_aspeed.c
-new file mode 100644
-index 000000000..b2c43f3b8
---- /dev/null
-+++ b/drivers/tty/serial/8250/8250_aspeed.c
-@@ -0,0 +1,506 @@
-+// SPDX-License-Identifier: GPL-2.0
-+/*
-+ * Copyright (C) ASPEED Technology Inc.
-+ */
-+#include <linux/device.h>
-+#include <linux/io.h>
-+#include <linux/module.h>
-+#include <linux/serial_8250.h>
-+#include <linux/serial_reg.h>
-+#include <linux/of.h>
-+#include <linux/of_irq.h>
-+#include <linux/of_platform.h>
-+#include <linux/platform_device.h>
-+#include <linux/clk.h>
-+#include <linux/reset.h>
-+#include <linux/dma-mapping.h>
-+#include <linux/circ_buf.h>
-+#include <linux/tty_flip.h>
-+#include <linux/pm_runtime.h>
-+#include <linux/soc/aspeed/aspeed-udma.h>
-+
-+#include "8250.h"
-+
-+#define DEVICE_NAME "aspeed-uart"
-+
-+/* offsets for the aspeed virtual uart registers */
-+#define VUART_GCRA 0x20
-+#define VUART_GCRA_VUART_EN BIT(0)
-+#define VUART_GCRA_SIRQ_POLARITY BIT(1)
-+#define VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5)
-+#define VUART_GCRB 0x24
-+#define VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4)
-+#define VUART_GCRB_HOST_SIRQ_SHIFT 4
-+#define VUART_ADDRL 0x28
-+#define VUART_ADDRH 0x2c
-+
-+#define DMA_TX_BUFSZ PAGE_SIZE
-+#define DMA_RX_BUFSZ (64 * 1024)
-+
-+struct uart_ops ast8250_pops;
-+
-+struct ast8250_vuart {
-+ u32 port;
-+ u32 sirq;
-+ u32 sirq_pol;
-+};
-+
-+struct ast8250_udma {
-+ u32 ch;
-+
-+ u32 tx_rbsz;
-+ u32 rx_rbsz;
-+
-+ dma_addr_t tx_addr;
-+ dma_addr_t rx_addr;
-+
-+ struct circ_buf *tx_rb;
-+ struct circ_buf *rx_rb;
-+
-+ bool tx_tmout_dis;
-+ bool rx_tmout_dis;
-+};
-+
-+struct ast8250_data {
-+ int line;
-+
-+ u8 __iomem *regs;
-+
-+ bool is_vuart;
-+ bool use_dma;
-+
-+ struct reset_control *rst;
-+ struct clk *clk;
-+
-+ struct ast8250_vuart vuart;
-+ struct ast8250_udma dma;
-+};
-+
-+static void ast8250_dma_tx_complete(int tx_rb_rptr, void *id)
-+{
-+ u32 count;
-+ unsigned long flags;
-+ struct uart_port *port = (struct uart_port*)id;
-+ struct ast8250_data *data = port->private_data;
-+
-+ spin_lock_irqsave(&port->lock, flags);
-+
-+ count = CIRC_CNT(tx_rb_rptr, port->state->xmit.tail, data->dma.tx_rbsz);
-+ port->state->xmit.tail = tx_rb_rptr;
-+ port->icount.tx += count;
-+
-+ if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
-+ uart_write_wakeup(port);
-+
-+ spin_unlock_irqrestore(&port->lock, flags);
-+}
-+
-+static void ast8250_dma_rx_complete(int rx_rb_wptr, void *id)
-+{
-+ unsigned long flags;
-+ struct uart_port *up = (struct uart_port*)id;
-+ struct tty_port *tp = &up->state->port;
-+ struct ast8250_data *data = up->private_data;
-+ struct ast8250_udma *dma = &data->dma;
-+ struct circ_buf *rx_rb = dma->rx_rb;
-+ u32 rx_rbsz = dma->rx_rbsz;
-+ u32 count = 0;
-+
-+ spin_lock_irqsave(&up->lock, flags);
-+
-+ rx_rb->head = rx_rb_wptr;
-+
-+ dma_sync_single_for_cpu(up->dev,
-+ dma->rx_addr, dma->rx_rbsz, DMA_FROM_DEVICE);
-+
-+ while (CIRC_CNT(rx_rb->head, rx_rb->tail, rx_rbsz)) {
-+ count = CIRC_CNT_TO_END(rx_rb->head, rx_rb->tail, rx_rbsz);
-+
-+ tty_insert_flip_string(tp, rx_rb->buf + rx_rb->tail, count);
-+
-+ rx_rb->tail += count;
-+ rx_rb->tail %= rx_rbsz;
-+
-+ up->icount.rx += count;
-+ }
-+
-+ if (count) {
-+ aspeed_udma_set_rx_rptr(data->dma.ch, rx_rb->tail);
-+ tty_flip_buffer_push(tp);
-+ }
-+
-+ spin_unlock_irqrestore(&up->lock, flags);
-+}
-+
-+static void ast8250_dma_start_tx(struct uart_port *port)
-+{
-+ struct ast8250_data *data = port->private_data;
-+ struct ast8250_udma *dma = &data->dma;
-+ struct circ_buf *tx_rb = dma->tx_rb;
-+
-+ dma_sync_single_for_device(port->dev,
-+ dma->tx_addr, dma->tx_rbsz, DMA_TO_DEVICE);
-+
-+ aspeed_udma_set_tx_wptr(dma->ch, tx_rb->head);
-+}
-+
-+static void ast8250_dma_pops_hook(struct uart_port *port)
-+{
-+ static int first = 1;
-+
-+ if (first) {
-+ ast8250_pops = *port->ops;
-+ ast8250_pops.start_tx = ast8250_dma_start_tx;
-+ }
-+
-+ first = 0;
-+ port->ops = &ast8250_pops;
-+}
-+
-+static void ast8250_vuart_init(struct ast8250_data *data)
-+{
-+ u8 reg;
-+ struct ast8250_vuart *vuart = &data->vuart;
-+
-+ /* IO port address */
-+ writeb((u8)(vuart->port >> 0), data->regs + VUART_ADDRL);
-+ writeb((u8)(vuart->port >> 8), data->regs + VUART_ADDRH);
-+
-+ /* SIRQ number */
-+ reg = readb(data->regs + VUART_GCRB);
-+ reg &= ~VUART_GCRB_HOST_SIRQ_MASK;
-+ reg |= ((vuart->sirq << VUART_GCRB_HOST_SIRQ_SHIFT) & VUART_GCRB_HOST_SIRQ_MASK);
-+ writeb(reg, data->regs + VUART_GCRB);
-+
-+ /* SIRQ polarity */
-+ reg = readb(data->regs + VUART_GCRA);
-+ if (vuart->sirq_pol)
-+ reg |= VUART_GCRA_SIRQ_POLARITY;
-+ else
-+ reg &= ~VUART_GCRA_SIRQ_POLARITY;
-+ writeb(reg, data->regs + VUART_GCRA);
-+}
-+
-+static void ast8250_vuart_set_host_tx_discard(struct ast8250_data *data, bool discard)
-+{
-+ u8 reg;
-+
-+ reg = readb(data->regs + VUART_GCRA);
-+ if (discard)
-+ reg &= ~VUART_GCRA_DISABLE_HOST_TX_DISCARD;
-+ else
-+ reg |= VUART_GCRA_DISABLE_HOST_TX_DISCARD;
-+ writeb(reg, data->regs + VUART_GCRA);
-+}
-+
-+static void ast8250_vuart_set_enable(struct ast8250_data *data, bool enable)
-+{
-+ u8 reg;
-+
-+ reg = readb(data->regs + VUART_GCRA);
-+ if (enable)
-+ reg |= VUART_GCRA_VUART_EN;
-+ else
-+ reg &= ~VUART_GCRA_VUART_EN;
-+ writeb(reg, data->regs + VUART_GCRA);
-+}
-+
-+static int ast8250_handle_irq(struct uart_port *port)
-+{
-+ u32 iir = port->serial_in(port, UART_IIR);
-+ return serial8250_handle_irq(port, iir);
-+}
-+
-+static int ast8250_startup(struct uart_port *port)
-+{
-+ int rc = 0;
-+ struct ast8250_data *data = port->private_data;
-+ struct ast8250_udma *dma;
-+
-+ if (data->is_vuart)
-+ ast8250_vuart_set_host_tx_discard(data, false);
-+
-+ if (data->use_dma) {
-+ dma = &data->dma;
-+
-+ dma->tx_rbsz = DMA_TX_BUFSZ;
-+ dma->rx_rbsz = DMA_RX_BUFSZ;
-+
-+ /*
-+ * We take the xmit buffer passed from upper layers as
-+ * the DMA TX buffer and allocate a new buffer for the
-+ * RX use.
-+ *
-+ * To keep the TX/RX operation consistency, we use the
-+ * streaming DMA interface instead of the coherent one
-+ */
-+ dma->tx_rb = &port->state->xmit;
-+ dma->rx_rb->buf = kzalloc(data->dma.rx_rbsz, GFP_KERNEL);
-+ if (IS_ERR_OR_NULL(dma->rx_rb->buf)) {
-+ dev_err(port->dev, "failed to allcoate RX DMA buffer\n");
-+ rc = -ENOMEM;
-+ goto out;
-+ }
-+
-+ dma->tx_addr = dma_map_single(port->dev, dma->tx_rb->buf,
-+ dma->tx_rbsz, DMA_TO_DEVICE);
-+ if (dma_mapping_error(port->dev, dma->tx_addr)) {
-+ dev_err(port->dev, "failed to map streaming TX DMA region\n");
-+ rc = -ENOMEM;
-+ goto free_dma_n_out;
-+ }
-+
-+ dma->rx_addr = dma_map_single(port->dev, dma->rx_rb->buf,
-+ dma->rx_rbsz, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(port->dev, dma->rx_addr)) {
-+ dev_err(port->dev, "failed to map streaming RX DMA region\n");
-+ rc = -ENOMEM;
-+ goto free_dma_n_out;
-+ }
-+
-+ rc = aspeed_udma_request_tx_chan(dma->ch, dma->tx_addr,
-+ dma->tx_rb, dma->tx_rbsz, ast8250_dma_tx_complete, port, dma->tx_tmout_dis);
-+ if (rc) {
-+ dev_err(port->dev, "failed to request DMA TX channel\n");
-+ goto free_dma_n_out;
-+ }
-+
-+ rc = aspeed_udma_request_rx_chan(dma->ch, dma->rx_addr,
-+ dma->rx_rb, dma->rx_rbsz, ast8250_dma_rx_complete, port, dma->rx_tmout_dis);
-+ if (rc) {
-+ dev_err(port->dev, "failed to request DMA RX channel\n");
-+ goto free_dma_n_out;
-+ }
-+
-+ ast8250_dma_pops_hook(port);
-+
-+ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_ENABLE);
-+ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_ENABLE);
-+ }
-+
-+ memset(&port->icount, 0, sizeof(port->icount));
-+ return serial8250_do_startup(port);
-+
-+free_dma_n_out:
-+ kfree(dma->rx_rb->buf);
-+out:
-+ return rc;
-+}
-+
-+static void ast8250_shutdown(struct uart_port *port)
-+{
-+ int rc;
-+ struct ast8250_data *data = port->private_data;
-+ struct ast8250_udma *dma;
-+
-+ if (data->use_dma) {
-+ dma = &data->dma;
-+
-+ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_RESET);
-+ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_RESET);
-+
-+ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_DISABLE);
-+ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_DISABLE);
-+
-+ rc = aspeed_udma_free_tx_chan(dma->ch);
-+ if (rc)
-+ dev_err(port->dev, "failed to free DMA TX channel, rc=%d\n", rc);
-+
-+ rc = aspeed_udma_free_rx_chan(dma->ch);
-+ if (rc)
-+ dev_err(port->dev, "failed to free DMA TX channel, rc=%d\n", rc);
-+
-+ dma_unmap_single(port->dev, dma->tx_addr,
-+ dma->tx_rbsz, DMA_TO_DEVICE);
-+ dma_unmap_single(port->dev, dma->rx_addr,
-+ dma->rx_rbsz, DMA_FROM_DEVICE);
-+
-+ if (dma->rx_rb->buf)
-+ kfree(dma->rx_rb->buf);
-+ }
-+
-+ if (data->is_vuart)
-+ ast8250_vuart_set_host_tx_discard(data, true);
-+
-+ serial8250_do_shutdown(port);
-+}
-+
-+static int __maybe_unused ast8250_suspend(struct device *dev)
-+{
-+ struct ast8250_data *data = dev_get_drvdata(dev);
-+ serial8250_suspend_port(data->line);
-+ return 0;
-+}
-+
-+static int __maybe_unused ast8250_resume(struct device *dev)
-+{
-+ struct ast8250_data *data = dev_get_drvdata(dev);
-+ serial8250_resume_port(data->line);
-+ return 0;
-+}
-+
-+static int ast8250_probe(struct platform_device *pdev)
-+{
-+ int rc;
-+ struct uart_8250_port uart = {};
-+ struct uart_port *port = &uart.port;
-+ struct device *dev = &pdev->dev;
-+ struct ast8250_data *data;
-+
-+ struct resource *res;
-+ u32 irq;
-+
-+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
-+ if (rc) {
-+ dev_err(dev, "cannot set 64-bits DMA mask\n");
-+ return rc;
-+ }
-+
-+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
-+ if (data == NULL)
-+ return -ENOMEM;
-+
-+ data->dma.rx_rb = devm_kzalloc(dev, sizeof(data->dma.rx_rb), GFP_KERNEL);
-+ if (data->dma.rx_rb == NULL)
-+ return -ENOMEM;
-+
-+ irq = platform_get_irq(pdev, 0);
-+ if (irq < 0) {
-+ if (irq != -EPROBE_DEFER)
-+ dev_err(dev, "failed to get IRQ number\n");
-+ return irq;
-+ }
-+
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+ if (res == NULL) {
-+ dev_err(dev, "failed to get register base\n");
-+ return -ENODEV;
-+ }
-+
-+ data->regs = devm_ioremap(dev, res->start, resource_size(res));
-+ if (IS_ERR(data->regs)) {
-+ dev_err(dev, "failed to map registers\n");
-+ return PTR_ERR(data->regs);
-+ }
-+
-+ data->clk = devm_clk_get(dev, NULL);
-+ if (IS_ERR(data->clk)) {
-+ dev_err(dev, "failed to get clocks\n");
-+ return -ENODEV;
-+ }
-+
-+ rc = clk_prepare_enable(data->clk);
-+ if (rc) {
-+ dev_err(dev, "failed to enable clock\n");
-+ return rc;
-+ }
-+
-+ data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
-+ if (!IS_ERR(data->rst))
-+ reset_control_deassert(data->rst);
-+
-+ data->is_vuart = of_property_read_bool(dev->of_node, "virtual");
-+ if (data->is_vuart) {
-+ rc = of_property_read_u32(dev->of_node, "port", &data->vuart.port);
-+ if (rc) {
-+ dev_err(dev, "failed to get VUART port address\n");
-+ return -ENODEV;
-+ }
-+
-+ rc = of_property_read_u32(dev->of_node, "sirq", &data->vuart.sirq);
-+ if (rc) {
-+ dev_err(dev, "failed to get VUART SIRQ number\n");
-+ return -ENODEV;
-+ }
-+
-+ rc = of_property_read_u32(dev->of_node, "sirq-polarity", &data->vuart.sirq_pol);
-+ if (rc) {
-+ dev_err(dev, "failed to get VUART SIRQ polarity\n");
-+ return -ENODEV;
-+ }
-+
-+ ast8250_vuart_init(data);
-+ ast8250_vuart_set_host_tx_discard(data, true);
-+ ast8250_vuart_set_enable(data, true);
-+ }
-+
-+ data->use_dma = of_property_read_bool(dev->of_node, "dma-mode");
-+ if (data->use_dma) {
-+ rc = of_property_read_u32(dev->of_node, "dma-channel", &data->dma.ch);
-+ if (rc) {
-+ dev_err(dev, "failed to get DMA channel\n");
-+ return -ENODEV;
-+ }
-+
-+ data->dma.tx_tmout_dis = of_property_read_bool(dev->of_node, "dma-tx-timeout-disable");
-+ data->dma.rx_tmout_dis = of_property_read_bool(dev->of_node, "dma-rx-timeout-disable");
-+ }
-+
-+ spin_lock_init(&port->lock);
-+ port->dev = dev;
-+ port->type = PORT_16550A;
-+ port->irq = irq;
-+ port->line = of_alias_get_id(dev->of_node, "serial");
-+ port->handle_irq = ast8250_handle_irq;
-+ port->mapbase = res->start;
-+ port->mapsize = resource_size(res);
-+ port->membase = data->regs;
-+ port->uartclk = clk_get_rate(data->clk);
-+ port->regshift = 2;
-+ port->iotype = UPIO_MEM32;
-+ port->flags = UPF_FIXED_TYPE | UPF_FIXED_PORT | UPF_SHARE_IRQ;
-+ port->startup = ast8250_startup;
-+ port->shutdown = ast8250_shutdown;
-+ port->private_data = data;
-+ uart.bugs |= UART_BUG_TXRACE;
-+
-+ data->line = serial8250_register_8250_port(&uart);
-+ if (data->line < 0) {
-+ dev_err(dev, "failed to register 8250 port\n");
-+ return data->line;
-+ }
-+
-+ pm_runtime_set_active(&pdev->dev);
-+ pm_runtime_enable(&pdev->dev);
-+
-+ platform_set_drvdata(pdev, data);
-+ return 0;
-+}
-+
-+static int ast8250_remove(struct platform_device *pdev)
-+{
-+ struct ast8250_data *data = platform_get_drvdata(pdev);
-+
-+ if (data->is_vuart)
-+ ast8250_vuart_set_enable(data, false);
-+
-+ serial8250_unregister_port(data->line);
-+ return 0;
-+}
-+
-+static const struct dev_pm_ops ast8250_pm_ops = {
-+ SET_SYSTEM_SLEEP_PM_OPS(ast8250_suspend, ast8250_resume)
-+};
-+
-+static const struct of_device_id ast8250_of_match[] = {
-+ { .compatible = "aspeed,ast2500-uart" },
-+ { .compatible = "aspeed,ast2600-uart" },
-+ { .compatible = "aspeed,ast2700-uart" },
-+ { },
-+};
-+
-+static struct platform_driver ast8250_platform_driver = {
-+ .driver = {
-+ .name = DEVICE_NAME,
-+ .pm = &ast8250_pm_ops,
-+ .of_match_table = ast8250_of_match,
-+ },
-+ .probe = ast8250_probe,
-+ .remove = ast8250_remove,
-+};
-+
-+module_platform_driver(ast8250_platform_driver);
-+
-+MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
-+MODULE_LICENSE("GPL");
-+MODULE_DESCRIPTION("Aspeed UART Driver");
-diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
-index e3f482fd3..9260b174c 100644
---- a/drivers/tty/serial/8250/8250_early.c
-+++ b/drivers/tty/serial/8250/8250_early.c
-@@ -170,6 +170,7 @@ OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
- OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
- OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
- OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
-+OF_EARLYCON_DECLARE(uart, "aspeed,ast2700-uart", early_serial8250_setup);
-
- #ifdef CONFIG_SERIAL_8250_OMAP
-
-diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
-index ee17cf5c4..263e7d1c8 100644
---- a/drivers/tty/serial/8250/Kconfig
-+++ b/drivers/tty/serial/8250/Kconfig
-@@ -255,6 +255,15 @@ config SERIAL_8250_ACCENT
- To compile this driver as a module, choose M here: the module
- will be called 8250_accent.
-
-+config SERIAL_8250_ASPEED
-+ tristate "Aspeed UART"
-+ depends on SERIAL_8250 && ARCH_ASPEED
-+ select ASPEED_UDMA
-+ help
-+ If you have a system using an Aspeed AST26xx SoCs and wish to
-+ make use of its 16550A-compatible UART devices with DMA support,
-+ say Y to this option. If unsure, say N.
-+
- config SERIAL_8250_ASPEED_VUART
- tristate "Aspeed Virtual UART"
- depends on SERIAL_8250
-diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
-index 628b75be3..74148767f 100644
---- a/drivers/tty/serial/8250/Makefile
-+++ b/drivers/tty/serial/8250/Makefile
-@@ -20,6 +20,7 @@ obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
- obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
- obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
- obj-$(CONFIG_SERIAL_8250_ASPEED_VUART) += 8250_aspeed_vuart.o
-+obj-$(CONFIG_SERIAL_8250_ASPEED) += 8250_aspeed.o
- obj-$(CONFIG_SERIAL_8250_BCM2835AUX) += 8250_bcm2835aux.o
- obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
- obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
-diff --git a/include/linux/soc/aspeed/aspeed-udma.h b/include/linux/soc/aspeed/aspeed-udma.h
-new file mode 100644
-index 000000000..33acea745
---- /dev/null
-+++ b/include/linux/soc/aspeed/aspeed-udma.h
-@@ -0,0 +1,30 @@
-+#ifndef __ASPEED_UDMA_H__
-+#define __ASPEED_UDMA_H__
-+
-+#include <linux/circ_buf.h>
-+
-+typedef void (*aspeed_udma_cb_t)(int rb_rwptr, void *id);
-+
-+enum aspeed_udma_ops {
-+ ASPEED_UDMA_OP_ENABLE,
-+ ASPEED_UDMA_OP_DISABLE,
-+ ASPEED_UDMA_OP_RESET,
-+};
-+
-+void aspeed_udma_set_tx_wptr(u32 ch_no, u32 wptr);
-+void aspeed_udma_set_rx_rptr(u32 ch_no, u32 rptr);
-+
-+void aspeed_udma_tx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op);
-+void aspeed_udma_rx_chan_ctrl(u32 ch_no, enum aspeed_udma_ops op);
-+
-+int aspeed_udma_request_tx_chan(u32 ch_no, dma_addr_t addr,
-+ struct circ_buf *rb, u32 rb_sz,
-+ aspeed_udma_cb_t cb, void *id, bool en_tmout);
-+int aspeed_udma_request_rx_chan(u32 ch_no, dma_addr_t addr,
-+ struct circ_buf *rb, u32 rb_sz,
-+ aspeed_udma_cb_t cb, void *id, bool en_tmout);
-+
-+int aspeed_udma_free_tx_chan(u32 ch_no);
-+int aspeed_udma_free_rx_chan(u32 ch_no);
-+
-+#endif
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0008-Add-uart-drvier-for-ast2700.patch b/recipes-kernel/linux/files/0008-Add-uart-drvier-for-ast2700.patch
new file mode 100644
index 0000000..2699c1d
--- /dev/null
+++ b/recipes-kernel/linux/files/0008-Add-uart-drvier-for-ast2700.patch
@@ -0,0 +1,591 @@
+From 3b4708f914cb7d7c5b5b22a58cb541474351ffb4 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 10:24:17 +0800
+Subject: [PATCH] Add uart drvier for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/tty/serial/8250/8250_aspeed.c | 506 ++++++++++++++++++++
+ drivers/tty/serial/8250/8250_aspeed_vuart.c | 2 +
+ drivers/tty/serial/8250/8250_early.c | 1 +
+ drivers/tty/serial/8250/Kconfig | 9 +
+ drivers/tty/serial/8250/Makefile | 1 +
+ 5 files changed, 519 insertions(+)
+ create mode 100644 drivers/tty/serial/8250/8250_aspeed.c
+
+diff --git a/drivers/tty/serial/8250/8250_aspeed.c b/drivers/tty/serial/8250/8250_aspeed.c
+new file mode 100644
+index 000000000..b2c43f3b8
+--- /dev/null
++++ b/drivers/tty/serial/8250/8250_aspeed.c
+@@ -0,0 +1,506 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) ASPEED Technology Inc.
++ */
++#include <linux/device.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/serial_8250.h>
++#include <linux/serial_reg.h>
++#include <linux/of.h>
++#include <linux/of_irq.h>
++#include <linux/of_platform.h>
++#include <linux/platform_device.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
++#include <linux/dma-mapping.h>
++#include <linux/circ_buf.h>
++#include <linux/tty_flip.h>
++#include <linux/pm_runtime.h>
++#include <linux/soc/aspeed/aspeed-udma.h>
++
++#include "8250.h"
++
++#define DEVICE_NAME "aspeed-uart"
++
++/* offsets for the aspeed virtual uart registers */
++#define VUART_GCRA 0x20
++#define VUART_GCRA_VUART_EN BIT(0)
++#define VUART_GCRA_SIRQ_POLARITY BIT(1)
++#define VUART_GCRA_DISABLE_HOST_TX_DISCARD BIT(5)
++#define VUART_GCRB 0x24
++#define VUART_GCRB_HOST_SIRQ_MASK GENMASK(7, 4)
++#define VUART_GCRB_HOST_SIRQ_SHIFT 4
++#define VUART_ADDRL 0x28
++#define VUART_ADDRH 0x2c
++
++#define DMA_TX_BUFSZ PAGE_SIZE
++#define DMA_RX_BUFSZ (64 * 1024)
++
++struct uart_ops ast8250_pops;
++
++struct ast8250_vuart {
++ u32 port;
++ u32 sirq;
++ u32 sirq_pol;
++};
++
++struct ast8250_udma {
++ u32 ch;
++
++ u32 tx_rbsz;
++ u32 rx_rbsz;
++
++ dma_addr_t tx_addr;
++ dma_addr_t rx_addr;
++
++ struct circ_buf *tx_rb;
++ struct circ_buf *rx_rb;
++
++ bool tx_tmout_dis;
++ bool rx_tmout_dis;
++};
++
++struct ast8250_data {
++ int line;
++
++ u8 __iomem *regs;
++
++ bool is_vuart;
++ bool use_dma;
++
++ struct reset_control *rst;
++ struct clk *clk;
++
++ struct ast8250_vuart vuart;
++ struct ast8250_udma dma;
++};
++
++static void ast8250_dma_tx_complete(int tx_rb_rptr, void *id)
++{
++ u32 count;
++ unsigned long flags;
++ struct uart_port *port = (struct uart_port*)id;
++ struct ast8250_data *data = port->private_data;
++
++ spin_lock_irqsave(&port->lock, flags);
++
++ count = CIRC_CNT(tx_rb_rptr, port->state->xmit.tail, data->dma.tx_rbsz);
++ port->state->xmit.tail = tx_rb_rptr;
++ port->icount.tx += count;
++
++ if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
++ uart_write_wakeup(port);
++
++ spin_unlock_irqrestore(&port->lock, flags);
++}
++
++static void ast8250_dma_rx_complete(int rx_rb_wptr, void *id)
++{
++ unsigned long flags;
++ struct uart_port *up = (struct uart_port*)id;
++ struct tty_port *tp = &up->state->port;
++ struct ast8250_data *data = up->private_data;
++ struct ast8250_udma *dma = &data->dma;
++ struct circ_buf *rx_rb = dma->rx_rb;
++ u32 rx_rbsz = dma->rx_rbsz;
++ u32 count = 0;
++
++ spin_lock_irqsave(&up->lock, flags);
++
++ rx_rb->head = rx_rb_wptr;
++
++ dma_sync_single_for_cpu(up->dev,
++ dma->rx_addr, dma->rx_rbsz, DMA_FROM_DEVICE);
++
++ while (CIRC_CNT(rx_rb->head, rx_rb->tail, rx_rbsz)) {
++ count = CIRC_CNT_TO_END(rx_rb->head, rx_rb->tail, rx_rbsz);
++
++ tty_insert_flip_string(tp, rx_rb->buf + rx_rb->tail, count);
++
++ rx_rb->tail += count;
++ rx_rb->tail %= rx_rbsz;
++
++ up->icount.rx += count;
++ }
++
++ if (count) {
++ aspeed_udma_set_rx_rptr(data->dma.ch, rx_rb->tail);
++ tty_flip_buffer_push(tp);
++ }
++
++ spin_unlock_irqrestore(&up->lock, flags);
++}
++
++static void ast8250_dma_start_tx(struct uart_port *port)
++{
++ struct ast8250_data *data = port->private_data;
++ struct ast8250_udma *dma = &data->dma;
++ struct circ_buf *tx_rb = dma->tx_rb;
++
++ dma_sync_single_for_device(port->dev,
++ dma->tx_addr, dma->tx_rbsz, DMA_TO_DEVICE);
++
++ aspeed_udma_set_tx_wptr(dma->ch, tx_rb->head);
++}
++
++static void ast8250_dma_pops_hook(struct uart_port *port)
++{
++ static int first = 1;
++
++ if (first) {
++ ast8250_pops = *port->ops;
++ ast8250_pops.start_tx = ast8250_dma_start_tx;
++ }
++
++ first = 0;
++ port->ops = &ast8250_pops;
++}
++
++static void ast8250_vuart_init(struct ast8250_data *data)
++{
++ u8 reg;
++ struct ast8250_vuart *vuart = &data->vuart;
++
++ /* IO port address */
++ writeb((u8)(vuart->port >> 0), data->regs + VUART_ADDRL);
++ writeb((u8)(vuart->port >> 8), data->regs + VUART_ADDRH);
++
++ /* SIRQ number */
++ reg = readb(data->regs + VUART_GCRB);
++ reg &= ~VUART_GCRB_HOST_SIRQ_MASK;
++ reg |= ((vuart->sirq << VUART_GCRB_HOST_SIRQ_SHIFT) & VUART_GCRB_HOST_SIRQ_MASK);
++ writeb(reg, data->regs + VUART_GCRB);
++
++ /* SIRQ polarity */
++ reg = readb(data->regs + VUART_GCRA);
++ if (vuart->sirq_pol)
++ reg |= VUART_GCRA_SIRQ_POLARITY;
++ else
++ reg &= ~VUART_GCRA_SIRQ_POLARITY;
++ writeb(reg, data->regs + VUART_GCRA);
++}
++
++static void ast8250_vuart_set_host_tx_discard(struct ast8250_data *data, bool discard)
++{
++ u8 reg;
++
++ reg = readb(data->regs + VUART_GCRA);
++ if (discard)
++ reg &= ~VUART_GCRA_DISABLE_HOST_TX_DISCARD;
++ else
++ reg |= VUART_GCRA_DISABLE_HOST_TX_DISCARD;
++ writeb(reg, data->regs + VUART_GCRA);
++}
++
++static void ast8250_vuart_set_enable(struct ast8250_data *data, bool enable)
++{
++ u8 reg;
++
++ reg = readb(data->regs + VUART_GCRA);
++ if (enable)
++ reg |= VUART_GCRA_VUART_EN;
++ else
++ reg &= ~VUART_GCRA_VUART_EN;
++ writeb(reg, data->regs + VUART_GCRA);
++}
++
++static int ast8250_handle_irq(struct uart_port *port)
++{
++ u32 iir = port->serial_in(port, UART_IIR);
++ return serial8250_handle_irq(port, iir);
++}
++
++static int ast8250_startup(struct uart_port *port)
++{
++ int rc = 0;
++ struct ast8250_data *data = port->private_data;
++ struct ast8250_udma *dma;
++
++ if (data->is_vuart)
++ ast8250_vuart_set_host_tx_discard(data, false);
++
++ if (data->use_dma) {
++ dma = &data->dma;
++
++ dma->tx_rbsz = DMA_TX_BUFSZ;
++ dma->rx_rbsz = DMA_RX_BUFSZ;
++
++ /*
++ * We take the xmit buffer passed from upper layers as
++ * the DMA TX buffer and allocate a new buffer for the
++ * RX use.
++ *
++ * To keep the TX/RX operation consistency, we use the
++ * streaming DMA interface instead of the coherent one
++ */
++ dma->tx_rb = &port->state->xmit;
++ dma->rx_rb->buf = kzalloc(data->dma.rx_rbsz, GFP_KERNEL);
++ if (IS_ERR_OR_NULL(dma->rx_rb->buf)) {
++ dev_err(port->dev, "failed to allcoate RX DMA buffer\n");
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ dma->tx_addr = dma_map_single(port->dev, dma->tx_rb->buf,
++ dma->tx_rbsz, DMA_TO_DEVICE);
++ if (dma_mapping_error(port->dev, dma->tx_addr)) {
++ dev_err(port->dev, "failed to map streaming TX DMA region\n");
++ rc = -ENOMEM;
++ goto free_dma_n_out;
++ }
++
++ dma->rx_addr = dma_map_single(port->dev, dma->rx_rb->buf,
++ dma->rx_rbsz, DMA_FROM_DEVICE);
++ if (dma_mapping_error(port->dev, dma->rx_addr)) {
++ dev_err(port->dev, "failed to map streaming RX DMA region\n");
++ rc = -ENOMEM;
++ goto free_dma_n_out;
++ }
++
++ rc = aspeed_udma_request_tx_chan(dma->ch, dma->tx_addr,
++ dma->tx_rb, dma->tx_rbsz, ast8250_dma_tx_complete, port, dma->tx_tmout_dis);
++ if (rc) {
++ dev_err(port->dev, "failed to request DMA TX channel\n");
++ goto free_dma_n_out;
++ }
++
++ rc = aspeed_udma_request_rx_chan(dma->ch, dma->rx_addr,
++ dma->rx_rb, dma->rx_rbsz, ast8250_dma_rx_complete, port, dma->rx_tmout_dis);
++ if (rc) {
++ dev_err(port->dev, "failed to request DMA RX channel\n");
++ goto free_dma_n_out;
++ }
++
++ ast8250_dma_pops_hook(port);
++
++ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_ENABLE);
++ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_ENABLE);
++ }
++
++ memset(&port->icount, 0, sizeof(port->icount));
++ return serial8250_do_startup(port);
++
++free_dma_n_out:
++ kfree(dma->rx_rb->buf);
++out:
++ return rc;
++}
++
++static void ast8250_shutdown(struct uart_port *port)
++{
++ int rc;
++ struct ast8250_data *data = port->private_data;
++ struct ast8250_udma *dma;
++
++ if (data->use_dma) {
++ dma = &data->dma;
++
++ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_RESET);
++ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_RESET);
++
++ aspeed_udma_tx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_DISABLE);
++ aspeed_udma_rx_chan_ctrl(dma->ch, ASPEED_UDMA_OP_DISABLE);
++
++ rc = aspeed_udma_free_tx_chan(dma->ch);
++ if (rc)
++ dev_err(port->dev, "failed to free DMA TX channel, rc=%d\n", rc);
++
++ rc = aspeed_udma_free_rx_chan(dma->ch);
++ if (rc)
++ dev_err(port->dev, "failed to free DMA TX channel, rc=%d\n", rc);
++
++ dma_unmap_single(port->dev, dma->tx_addr,
++ dma->tx_rbsz, DMA_TO_DEVICE);
++ dma_unmap_single(port->dev, dma->rx_addr,
++ dma->rx_rbsz, DMA_FROM_DEVICE);
++
++ if (dma->rx_rb->buf)
++ kfree(dma->rx_rb->buf);
++ }
++
++ if (data->is_vuart)
++ ast8250_vuart_set_host_tx_discard(data, true);
++
++ serial8250_do_shutdown(port);
++}
++
++static int __maybe_unused ast8250_suspend(struct device *dev)
++{
++ struct ast8250_data *data = dev_get_drvdata(dev);
++ serial8250_suspend_port(data->line);
++ return 0;
++}
++
++static int __maybe_unused ast8250_resume(struct device *dev)
++{
++ struct ast8250_data *data = dev_get_drvdata(dev);
++ serial8250_resume_port(data->line);
++ return 0;
++}
++
++static int ast8250_probe(struct platform_device *pdev)
++{
++ int rc;
++ struct uart_8250_port uart = {};
++ struct uart_port *port = &uart.port;
++ struct device *dev = &pdev->dev;
++ struct ast8250_data *data;
++
++ struct resource *res;
++ u32 irq;
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_err(dev, "cannot set 64-bits DMA mask\n");
++ return rc;
++ }
++
++ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
++ if (data == NULL)
++ return -ENOMEM;
++
++ data->dma.rx_rb = devm_kzalloc(dev, sizeof(data->dma.rx_rb), GFP_KERNEL);
++ if (data->dma.rx_rb == NULL)
++ return -ENOMEM;
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0) {
++ if (irq != -EPROBE_DEFER)
++ dev_err(dev, "failed to get IRQ number\n");
++ return irq;
++ }
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res == NULL) {
++ dev_err(dev, "failed to get register base\n");
++ return -ENODEV;
++ }
++
++ data->regs = devm_ioremap(dev, res->start, resource_size(res));
++ if (IS_ERR(data->regs)) {
++ dev_err(dev, "failed to map registers\n");
++ return PTR_ERR(data->regs);
++ }
++
++ data->clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(data->clk)) {
++ dev_err(dev, "failed to get clocks\n");
++ return -ENODEV;
++ }
++
++ rc = clk_prepare_enable(data->clk);
++ if (rc) {
++ dev_err(dev, "failed to enable clock\n");
++ return rc;
++ }
++
++ data->rst = devm_reset_control_get_optional_exclusive(dev, NULL);
++ if (!IS_ERR(data->rst))
++ reset_control_deassert(data->rst);
++
++ data->is_vuart = of_property_read_bool(dev->of_node, "virtual");
++ if (data->is_vuart) {
++ rc = of_property_read_u32(dev->of_node, "port", &data->vuart.port);
++ if (rc) {
++ dev_err(dev, "failed to get VUART port address\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "sirq", &data->vuart.sirq);
++ if (rc) {
++ dev_err(dev, "failed to get VUART SIRQ number\n");
++ return -ENODEV;
++ }
++
++ rc = of_property_read_u32(dev->of_node, "sirq-polarity", &data->vuart.sirq_pol);
++ if (rc) {
++ dev_err(dev, "failed to get VUART SIRQ polarity\n");
++ return -ENODEV;
++ }
++
++ ast8250_vuart_init(data);
++ ast8250_vuart_set_host_tx_discard(data, true);
++ ast8250_vuart_set_enable(data, true);
++ }
++
++ data->use_dma = of_property_read_bool(dev->of_node, "dma-mode");
++ if (data->use_dma) {
++ rc = of_property_read_u32(dev->of_node, "dma-channel", &data->dma.ch);
++ if (rc) {
++ dev_err(dev, "failed to get DMA channel\n");
++ return -ENODEV;
++ }
++
++ data->dma.tx_tmout_dis = of_property_read_bool(dev->of_node, "dma-tx-timeout-disable");
++ data->dma.rx_tmout_dis = of_property_read_bool(dev->of_node, "dma-rx-timeout-disable");
++ }
++
++ spin_lock_init(&port->lock);
++ port->dev = dev;
++ port->type = PORT_16550A;
++ port->irq = irq;
++ port->line = of_alias_get_id(dev->of_node, "serial");
++ port->handle_irq = ast8250_handle_irq;
++ port->mapbase = res->start;
++ port->mapsize = resource_size(res);
++ port->membase = data->regs;
++ port->uartclk = clk_get_rate(data->clk);
++ port->regshift = 2;
++ port->iotype = UPIO_MEM32;
++ port->flags = UPF_FIXED_TYPE | UPF_FIXED_PORT | UPF_SHARE_IRQ;
++ port->startup = ast8250_startup;
++ port->shutdown = ast8250_shutdown;
++ port->private_data = data;
++ uart.bugs |= UART_BUG_TXRACE;
++
++ data->line = serial8250_register_8250_port(&uart);
++ if (data->line < 0) {
++ dev_err(dev, "failed to register 8250 port\n");
++ return data->line;
++ }
++
++ pm_runtime_set_active(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++
++ platform_set_drvdata(pdev, data);
++ return 0;
++}
++
++static int ast8250_remove(struct platform_device *pdev)
++{
++ struct ast8250_data *data = platform_get_drvdata(pdev);
++
++ if (data->is_vuart)
++ ast8250_vuart_set_enable(data, false);
++
++ serial8250_unregister_port(data->line);
++ return 0;
++}
++
++static const struct dev_pm_ops ast8250_pm_ops = {
++ SET_SYSTEM_SLEEP_PM_OPS(ast8250_suspend, ast8250_resume)
++};
++
++static const struct of_device_id ast8250_of_match[] = {
++ { .compatible = "aspeed,ast2500-uart" },
++ { .compatible = "aspeed,ast2600-uart" },
++ { .compatible = "aspeed,ast2700-uart" },
++ { },
++};
++
++static struct platform_driver ast8250_platform_driver = {
++ .driver = {
++ .name = DEVICE_NAME,
++ .pm = &ast8250_pm_ops,
++ .of_match_table = ast8250_of_match,
++ },
++ .probe = ast8250_probe,
++ .remove = ast8250_remove,
++};
++
++module_platform_driver(ast8250_platform_driver);
++
++MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("Aspeed UART Driver");
+diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+index 4a9e71b2d..787153094 100644
+--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
++++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
+@@ -588,6 +588,8 @@ static int aspeed_vuart_remove(struct platform_device *pdev)
+ static const struct of_device_id aspeed_vuart_table[] = {
+ { .compatible = "aspeed,ast2400-vuart" },
+ { .compatible = "aspeed,ast2500-vuart" },
++ { .compatible = "aspeed,ast2600-vuart" },
++ { .compatible = "aspeed,ast2700-vuart" },
+ { },
+ };
+
+diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
+index e3f482fd3..9260b174c 100644
+--- a/drivers/tty/serial/8250/8250_early.c
++++ b/drivers/tty/serial/8250/8250_early.c
+@@ -170,6 +170,7 @@ OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
+ OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
+ OF_EARLYCON_DECLARE(uart, "nvidia,tegra20-uart", early_serial8250_setup);
+ OF_EARLYCON_DECLARE(uart, "snps,dw-apb-uart", early_serial8250_setup);
++OF_EARLYCON_DECLARE(uart, "aspeed,ast2700-uart", early_serial8250_setup);
+
+ #ifdef CONFIG_SERIAL_8250_OMAP
+
+diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
+index ee17cf5c4..263e7d1c8 100644
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -255,6 +255,15 @@ config SERIAL_8250_ACCENT
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_accent.
+
++config SERIAL_8250_ASPEED
++ tristate "Aspeed UART"
++ depends on SERIAL_8250 && ARCH_ASPEED
++ select ASPEED_UDMA
++ help
++ If you have a system using an Aspeed AST26xx SoCs and wish to
++ make use of its 16550A-compatible UART devices with DMA support,
++ say Y to this option. If unsure, say N.
++
+ config SERIAL_8250_ASPEED_VUART
+ tristate "Aspeed Virtual UART"
+ depends on SERIAL_8250
+diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
+index 628b75be3..74148767f 100644
+--- a/drivers/tty/serial/8250/Makefile
++++ b/drivers/tty/serial/8250/Makefile
+@@ -20,6 +20,7 @@ obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
+ obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
+ obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
+ obj-$(CONFIG_SERIAL_8250_ASPEED_VUART) += 8250_aspeed_vuart.o
++obj-$(CONFIG_SERIAL_8250_ASPEED) += 8250_aspeed.o
+ obj-$(CONFIG_SERIAL_8250_BCM2835AUX) += 8250_bcm2835aux.o
+ obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
+ obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0008-fmc_spi_mtd.patch b/recipes-kernel/linux/files/0009-Add-SPI-and-MTD-driver-for-ast2700.patch
similarity index 94%
rename from recipes-kernel/linux/files/0008-fmc_spi_mtd.patch
rename to recipes-kernel/linux/files/0009-Add-SPI-and-MTD-driver-for-ast2700.patch
index 1f41758..6ac95e4 100644
--- a/recipes-kernel/linux/files/0008-fmc_spi_mtd.patch
+++ b/recipes-kernel/linux/files/0009-Add-SPI-and-MTD-driver-for-ast2700.patch
@@ -1,11 +1,17 @@
-From 4868062139fdea5671dcaa2ee6dfceca8dff365d Mon Sep 17 00:00:00 2001
-From: Shao-Chieh Chao <jieh.sc.chao@mail.foxconn.com>
-Date: Thu, 12 Dec 2024 09:42:06 +0800
-Subject: [PATCH] revise patch0008
+From e444e96841ef75b3c76dc46da4103583725656a4 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 10:29:46 +0800
+Subject: [PATCH] Add SPI and MTD driver for ast2700
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
---
drivers/mtd/spi-nor/core.c | 178 ++-
drivers/mtd/spi-nor/core.h | 12 +
+ drivers/mtd/spi-nor/everspin.c | 57 +-
drivers/mtd/spi-nor/gigadevice.c | 20 +-
drivers/mtd/spi-nor/issi.c | 6 +
drivers/mtd/spi-nor/macronix.c | 3 +
@@ -15,10 +21,9 @@
drivers/mtd/spi-nor/winbond.c | 85 +-
drivers/spi/Kconfig | 10 +
drivers/spi/Makefile | 1 +
- drivers/spi/spi-aspeed-smc.c | 1881 +++++++++++++++++++++++++-----
+ drivers/spi/spi-aspeed-smc.c | 1891 +++++++++++++++++++++++++-----
drivers/spi/spi-aspeed-txrx.c | 643 ++++++++++
- include/linux/mtd/spi-nor.h | 6 +
- 14 files changed, 2513 insertions(+), 341 deletions(-)
+ 14 files changed, 2529 insertions(+), 386 deletions(-)
create mode 100644 drivers/spi/spi-aspeed-txrx.c
diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
@@ -336,6 +341,75 @@
+int spi_nor_prep_and_lock_rd(struct spi_nor *nor, loff_t start, size_t len);
+
#endif /* __LINUX_MTD_SPI_NOR_INTERNAL_H */
+diff --git a/drivers/mtd/spi-nor/everspin.c b/drivers/mtd/spi-nor/everspin.c
+index ae815af53..84a07c2e0 100644
+--- a/drivers/mtd/spi-nor/everspin.c
++++ b/drivers/mtd/spi-nor/everspin.c
+@@ -1,48 +1,23 @@
+-// We replace the everspin spi nor file as it has a part that we will
+-// never use with gBMC systems.
+-#include <linux/mtd/spi-nor.h>
+-#include <linux/version.h>
+-#include "core.h"
+-
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)
+-#define SNOR_ID(a, b, c) (((a) << 16) | ((b) << 8) | (c))
+-#define GINFO(name, id, sec) { (name), INFO((id), 0, 64 * 1024, (sec), SECT_4K) }
+-#elif LINUX_VERSION_CODE < KERNEL_VERSION(6,7,0)
+-#define SNOR_ID(a, b, c) (((a) << 16) | ((b) << 8) | (c))
+-#define GINFO(name, id, sec) { (name), INFO((id), 0, 64 * 1024, (sec)) NO_SFDP_FLAGS(SECT_4K) }
+-#else
+-#define GINFO(gname, gid, sec) { \
+- .id = (gid), \
+- .name = (gname), \
+- .size = 64 * 1024 * (sec), \
+- .no_sfdp_flags = SECT_4K, \
+-}
+-#endif
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) 2005, Intec Automation Inc.
++ * Copyright (C) 2014, Freescale Semiconductor, Inc.
++ */
+
+-static const struct flash_info google_nor_parts[] = {
+- GINFO("hothb2-512", SNOR_ID(0x26, 0x02, 0x17), 512),
+- GINFO("hothb2-1024", SNOR_ID(0x26, 0x02, 0x1a), 1024),
+- GINFO("dauntlessd2-1024", SNOR_ID(0x26, 0x22, 0x1a), 1024),
+- GINFO("dauntlessd2-2048", SNOR_ID(0x26, 0x22, 0x1b), 2048),
+-};
++#include <linux/mtd/spi-nor.h>
+
+-static void google_nor_default_init(struct spi_nor *nor)
+-{
+- nor->params->quad_enable = spi_nor_sr1_bit6_quad_enable;
+-#if LINUX_VERSION_CODE < KERNEL_VERSION(6,4,0)
+- nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode;
+-#else
+- nor->params->set_4byte_addr_mode = spi_nor_set_4byte_addr_mode_en4b_ex4b;
+-#endif
+-}
++#include "core.h"
+
+-static const struct spi_nor_fixups google_nor_fixups = {
+- .default_init = google_nor_default_init,
++static const struct flash_info everspin_nor_parts[] = {
++ /* Everspin */
++ { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
++ { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
++ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3) },
++ { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3) },
+ };
+
+ const struct spi_nor_manufacturer spi_nor_everspin = {
+- .name = "google",
+- .parts = google_nor_parts,
+- .nparts = ARRAY_SIZE(google_nor_parts),
+- .fixups = &google_nor_fixups,
++ .name = "everspin",
++ .parts = everspin_nor_parts,
++ .nparts = ARRAY_SIZE(everspin_nor_parts),
+ };
diff --git a/drivers/mtd/spi-nor/gigadevice.c b/drivers/mtd/spi-nor/gigadevice.c
index d57ddaf15..e96468ad4 100644
--- a/drivers/mtd/spi-nor/gigadevice.c
@@ -603,7 +677,7 @@
obj-$(CONFIG_SPI_ATMEL_QUADSPI) += atmel-quadspi.o
obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c
-index 21b0fa646..0b9caae23 100644
+index 21b0fa646..5acb646fb 100644
--- a/drivers/spi/spi-aspeed-smc.c
+++ b/drivers/spi/spi-aspeed-smc.c
@@ -7,10 +7,15 @@
@@ -786,7 +860,7 @@
+ if (field == SPI_OP_ALL || field == SPI_OP_CMD) {
+ if (op->cmd.buswidth == 4)
+ return CTRL_IO_QUAD_IO;
-+ }
+ }
+
+ if (field == SPI_OP_ALL || field == SPI_OP_ADDR) {
+ if (op->addr.buswidth == 4)
@@ -800,7 +874,7 @@
+ return CTRL_IO_QUAD_DATA;
+ else if (op->data.buswidth == 2)
+ return CTRL_IO_DUAL_DATA;
- }
++ }
+
+ return CTRL_IO_SINGLE_DATA;
}
@@ -846,7 +920,7 @@
return 0;
}
-@@ -230,26 +316,35 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
+@@ -230,58 +316,88 @@ static ssize_t aspeed_spi_read_user(struct aspeed_spi_chip *chip,
const struct spi_mem_op *op,
u64 offset, size_t len, void *buf)
{
@@ -867,7 +941,8 @@
+ aspeed_spi_set_io_mode(chip, io_mode);
+ ret = aspeed_spi_send_addr(chip, op->addr.nbytes, op->addr.val);
if (ret < 0)
- return ret;
+- return ret;
++ goto stop_user;
if (op->dummy.buswidth && op->dummy.nbytes) {
- for (i = 0; i < op->dummy.nbytes / op->dummy.buswidth; i++)
@@ -882,12 +957,14 @@
-
aspeed_spi_read_from_ahb(buf, chip->ahb_base, len);
+
++stop_user:
aspeed_spi_stop_user(chip);
+- return 0;
+
- return 0;
++ return ret;
}
-@@ -257,31 +352,50 @@ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
+ static ssize_t aspeed_spi_write_user(struct aspeed_spi_chip *chip,
const struct spi_mem_op *op)
{
int ret;
@@ -904,15 +981,18 @@
+ aspeed_spi_set_io_mode(chip, io_mode);
+ ret = aspeed_spi_send_addr(chip, op->addr.nbytes, op->addr.val);
if (ret < 0)
- return ret;
+- return ret;
++ goto stop_user;
+
+ io_mode = aspeed_spi_get_io_mode(op, SPI_OP_DATA);
+ aspeed_spi_set_io_mode(chip, io_mode);
aspeed_spi_write_to_ahb(chip->ahb_base, op->data.buf.out, op->data.nbytes);
+
++stop_user:
aspeed_spi_stop_user(chip);
+- return 0;
+
- return 0;
++ return ret;
}
/* support for 1-1-1, 1-1-2 or 1-1-4 */
@@ -941,7 +1021,7 @@
return false;
}
-@@ -297,8 +411,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
+@@ -297,8 +413,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
{
struct aspeed_spi *aspi = spi_controller_get_devdata(mem->spi->controller);
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(mem->spi, 0)];
@@ -950,7 +1030,7 @@
int ret = 0;
dev_dbg(aspi->dev,
-@@ -308,40 +420,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
+@@ -308,40 +422,6 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
op->dummy.buswidth, op->data.buswidth,
op->addr.nbytes, op->dummy.nbytes, op->data.nbytes);
@@ -991,7 +1071,7 @@
if (op->data.dir == SPI_MEM_DATA_IN) {
if (!op->addr.nbytes)
ret = aspeed_spi_read_reg(chip, op);
-@@ -356,12 +434,520 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
+@@ -356,12 +436,520 @@ static int do_aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *o
}
/* Restore defaults */
@@ -1514,7 +1594,7 @@
static int aspeed_spi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
int ret;
-@@ -381,171 +967,310 @@ static const char *aspeed_spi_get_name(struct spi_mem *mem)
+@@ -381,171 +969,310 @@ static const char *aspeed_spi_get_name(struct spi_mem *mem)
spi_get_chipselect(mem->spi, 0));
}
@@ -1555,7 +1635,7 @@
- windows[cs].offset, windows[cs].size);
+ if (aspi->chips[cs].ahb_base)
+ devm_iounmap(dev, aspi->chips[cs].ahb_base);
-+ }
+ }
+
+ for (cs = 0; cs < aspi->data->max_cs; cs++) {
+ seg_reg = seg_reg_base + cs * 4;
@@ -1625,7 +1705,7 @@
+ (u64)start, (u64)end - 1);
+ return -ENOMEM;
+ }
- }
++ }
+
+ return 0;
}
@@ -1659,19 +1739,19 @@
- chip->ahb_window_size = win->size;
+ for (cs = 0; cs < aspi->num_cs; cs++)
+ aspi->chips[cs].ahb_window_sz = aspi->data->min_window_sz;
-+
-+ /* Close unused CS */
-+ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++)
-+ aspi->chips[cs].ahb_window_sz = 0;
- dev_dbg(aspi->dev, "CE%d default window [ 0x%.8x - 0x%.8x ] %dMB",
- chip->cs, aspi->ahb_base_phy + win->offset,
- aspi->ahb_base_phy + win->offset + win->size - 1,
- win->size >> 20);
-+ if (aspi->data->adjust_window)
-+ aspi->data->adjust_window(aspi);
++ /* Close unused CS */
++ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++)
++ aspi->chips[cs].ahb_window_sz = 0;
- return chip->ahb_window_size ? 0 : -1;
++ if (aspi->data->adjust_window)
++ aspi->data->adjust_window(aspi);
++
+ return aspeed_spi_set_window(aspi);
}
@@ -1731,8 +1811,7 @@
+
+ return 0;
+}
-
-- writel(seg_val, seg_reg);
++
+static int aspeed_adjust_window_ast2400(struct aspeed_spi *aspi)
+{
+ int ret;
@@ -1771,6 +1850,49 @@
+ chips[cs].ahb_window_sz = aspi->data->min_window_sz;
+ }
+- writel(seg_val, seg_reg);
++ /*
++ * If commnad mode or normal mode is used, the start address of a
++ * decoding range should be multiple of its related flash size.
++ * Namely, the total decoding size from flash 0 to flash N should
++ * be multiple of the size of flash (N + 1).
++ */
++ for (cs = aspi->num_cs - 1; cs >= 0; cs--) {
++ pre_sz = 0;
++ for (i = 0; i < cs; i++)
++ pre_sz += chips[i].ahb_window_sz;
++
++ if (chips[cs].ahb_window_sz != 0 &&
++ (pre_sz % chips[cs].ahb_window_sz) != 0) {
++ extra_sz = chips[cs].ahb_window_sz -
++ (pre_sz % chips[cs].ahb_window_sz);
++ chips[0].ahb_window_sz += extra_sz;
++ }
++ }
++
++ ret = aspeed_spi_trim_window_size(aspi);
++ if (ret != 0)
++ return ret;
++
++ if (aspi->data == &ast2500_spi_data)
++ chips[1].ahb_window_sz = 0x08000000 - chips[0].ahb_window_sz;
++
++ return 0;
++}
++
++static int aspeed_adjust_window_ast2600(struct aspeed_spi *aspi)
++{
++ int ret;
++ int i;
++ int cs;
++ size_t pre_sz;
++ size_t extra_sz;
++ struct aspeed_spi_chip *chips = aspi->chips;
++
++ /* Close unused CS. */
++ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++)
++ chips[cs].ahb_window_sz = 0;
+
/*
- * Restore initial value if something goes wrong else we could
- * loose access to the chip.
@@ -1805,48 +1927,6 @@
+ ret = aspeed_spi_trim_window_size(aspi);
+ if (ret != 0)
+ return ret;
-+
-+ if (aspi->data == &ast2500_spi_data)
-+ chips[1].ahb_window_sz = 0x08000000 - chips[0].ahb_window_sz;
-+
-+ return 0;
-+}
-+
-+static int aspeed_adjust_window_ast2600(struct aspeed_spi *aspi)
-+{
-+ int ret;
-+ int i;
-+ int cs;
-+ size_t pre_sz;
-+ size_t extra_sz;
-+ struct aspeed_spi_chip *chips = aspi->chips;
-+
-+ /* Close unused CS. */
-+ for (cs = aspi->num_cs; cs < aspi->data->max_cs; cs++)
-+ chips[cs].ahb_window_sz = 0;
-+
-+ /*
-+ * If commnad mode or normal mode is used, the start address of a
-+ * decoding range should be multiple of its related flash size.
-+ * Namely, the total decoding size from flash 0 to flash N should
-+ * be multiple of the size of flash (N + 1).
-+ */
-+ for (cs = aspi->num_cs - 1; cs >= 0; cs--) {
-+ pre_sz = 0;
-+ for (i = 0; i < cs; i++)
-+ pre_sz += chips[i].ahb_window_sz;
-+
-+ if (chips[cs].ahb_window_sz != 0 &&
-+ (pre_sz % chips[cs].ahb_window_sz) != 0) {
-+ extra_sz = chips[cs].ahb_window_sz -
-+ (pre_sz % chips[cs].ahb_window_sz);
-+ chips[0].ahb_window_sz += extra_sz;
-+ }
-+ }
-+
-+ ret = aspeed_spi_trim_window_size(aspi);
-+ if (ret != 0)
-+ return ret;
return 0;
}
@@ -1937,7 +2017,7 @@
return 0;
}
-@@ -557,6 +1282,9 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+@@ -557,6 +1284,9 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
struct spi_mem_op *op = &desc->info.op_tmpl;
u32 ctl_val;
@@ -1947,7 +2027,7 @@
int ret = 0;
dev_dbg(aspi->dev,
-@@ -569,50 +1297,108 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
+@@ -569,50 +1299,108 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)
chip->clk_freq = desc->mem->spi->max_speed_hz;
@@ -2088,7 +2168,7 @@
return ret;
}
-@@ -624,7 +1410,7 @@ static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
+@@ -624,7 +1412,7 @@ static ssize_t aspeed_spi_dirmap_read(struct spi_mem_dirmap_desc *desc,
struct aspeed_spi_chip *chip = &aspi->chips[spi_get_chipselect(desc->mem->spi, 0)];
/* Switch to USER command mode if mapping window is too small */
@@ -2097,7 +2177,7 @@
int ret;
ret = aspeed_spi_read_user(chip, &desc->info.op_tmpl, offset, len, buf);
-@@ -645,6 +1431,48 @@ static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
+@@ -645,6 +1433,48 @@ static const struct spi_controller_mem_ops aspeed_spi_mem_ops = {
.dirmap_read = aspeed_spi_dirmap_read,
};
@@ -2146,7 +2226,7 @@
static void aspeed_spi_chip_set_type(struct aspeed_spi *aspi, unsigned int cs, int type)
{
u32 reg;
-@@ -673,6 +1501,8 @@ static int aspeed_spi_setup(struct spi_device *spi)
+@@ -673,6 +1503,8 @@ static int aspeed_spi_setup(struct spi_device *spi)
const struct aspeed_spi_data *data = aspi->data;
unsigned int cs = spi_get_chipselect(spi, 0);
struct aspeed_spi_chip *chip = &aspi->chips[cs];
@@ -2155,7 +2235,7 @@
chip->aspi = aspi;
chip->cs = cs;
-@@ -682,15 +1512,20 @@ static int aspeed_spi_setup(struct spi_device *spi)
+@@ -682,15 +1514,20 @@ static int aspeed_spi_setup(struct spi_device *spi)
if (data->hastype)
aspeed_spi_chip_set_type(aspi, cs, CONFIG_TYPE_SPI);
@@ -2181,7 +2261,7 @@
dev_dbg(aspi->dev, "CE%d setup done\n", cs);
return 0;
}
-@@ -720,7 +1555,8 @@ static int aspeed_spi_probe(struct platform_device *pdev)
+@@ -720,7 +1557,8 @@ static int aspeed_spi_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct aspeed_spi *aspi;
struct resource *res;
@@ -2191,7 +2271,7 @@
data = of_device_get_match_data(&pdev->dev);
if (!data)
-@@ -735,20 +1571,16 @@ static int aspeed_spi_probe(struct platform_device *pdev)
+@@ -735,20 +1573,16 @@ static int aspeed_spi_probe(struct platform_device *pdev)
aspi->data = data;
aspi->dev = dev;
@@ -2217,7 +2297,7 @@
if (IS_ERR(aspi->clk)) {
dev_err(dev, "missing clock\n");
return PTR_ERR(aspi->clk);
-@@ -760,31 +1592,140 @@ static int aspeed_spi_probe(struct platform_device *pdev)
+@@ -760,31 +1594,140 @@ static int aspeed_spi_probe(struct platform_device *pdev)
return -EINVAL;
}
@@ -2368,7 +2448,7 @@
return ret;
}
-@@ -792,8 +1733,14 @@ static void aspeed_spi_remove(struct platform_device *pdev)
+@@ -792,8 +1735,14 @@ static void aspeed_spi_remove(struct platform_device *pdev)
{
struct aspeed_spi *aspi = platform_get_drvdata(pdev);
@@ -2384,7 +2464,7 @@
}
/*
-@@ -805,19 +1752,20 @@ static void aspeed_spi_remove(struct platform_device *pdev)
+@@ -805,19 +1754,20 @@ static void aspeed_spi_remove(struct platform_device *pdev)
* The address range is encoded with absolute addresses in the overall
* mapping window.
*/
@@ -2409,7 +2489,7 @@
}
/*
-@@ -827,18 +1775,18 @@ static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
+@@ -827,18 +1777,18 @@ static u32 aspeed_spi_segment_reg(struct aspeed_spi *aspi, u32 start, u32 end)
#define AST2600_SEG_ADDR_MASK 0x0ff00000
@@ -2432,7 +2512,7 @@
/* segment is disabled */
if (!end_offset)
-@@ -848,28 +1796,193 @@ static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
+@@ -848,28 +1798,193 @@ static u32 aspeed_spi_segment_ast2600_end(struct aspeed_spi *aspi,
}
static u32 aspeed_spi_segment_ast2600_reg(struct aspeed_spi *aspi,
@@ -2631,7 +2711,7 @@
memcpy_fromio(test_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
if (memcmp(test_buf, golden_buf, CALIBRATE_BUF_SIZE) != 0) {
#if defined(VERBOSE_DEBUG)
-@@ -882,7 +1995,10 @@ static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
+@@ -882,7 +1997,10 @@ static bool aspeed_spi_check_reads(struct aspeed_spi_chip *chip,
return true;
}
@@ -2643,7 +2723,7 @@
/*
* The timing register is shared by all devices. Only update for CE0.
-@@ -912,7 +2028,7 @@ static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
+@@ -912,7 +2030,7 @@ static int aspeed_spi_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
pass = aspeed_spi_check_reads(chip, golden_buf, test_buf);
dev_dbg(aspi->dev,
" * [%08x] %d HCLK delay, %dns DI delay : %s",
@@ -2652,7 +2732,7 @@
pass ? "PASS" : "FAIL");
if (pass) {
pass_count++;
-@@ -958,17 +2074,6 @@ static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
+@@ -958,17 +2076,6 @@ static bool aspeed_spi_check_calib_data(const u8 *test_buf, u32 size)
return cnt >= 64;
}
@@ -2670,7 +2750,7 @@
static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
{
struct aspeed_spi *aspi = chip->aspi;
-@@ -979,10 +2084,17 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
+@@ -979,10 +2086,17 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
u8 *golden_buf = NULL;
u8 *test_buf = NULL;
int i, rc, best_div = -1;
@@ -2688,7 +2768,7 @@
/*
* use the related low frequency to get check calibration data
* and get golden data.
-@@ -998,7 +2110,7 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
+@@ -998,7 +2112,7 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
memcpy_fromio(golden_buf, chip->ahb_base, CALIBRATE_BUF_SIZE);
if (!aspeed_spi_check_calib_data(golden_buf, CALIBRATE_BUF_SIZE)) {
@@ -2697,7 +2777,7 @@
goto no_calib;
}
-@@ -1008,42 +2120,81 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
+@@ -1008,42 +2122,81 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
#endif
/* Now we iterate the HCLK dividers until we find our breaking point */
@@ -2791,7 +2871,7 @@
#define TIMING_REG_AST2600(chip) \
((chip)->aspi->regs + (chip)->aspi->data->timing + \
(chip)->cs * 4)
-@@ -1051,60 +2202,104 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
+@@ -1051,60 +2204,104 @@ static int aspeed_spi_do_calibration(struct aspeed_spi_chip *chip)
static int aspeed_spi_ast2600_calibrate(struct aspeed_spi_chip *chip, u32 hdiv,
const u8 *golden_buf, u8 *test_buf)
{
@@ -2927,7 +3007,7 @@
.max_cs = 5,
.hastype = true,
.we0 = 16,
-@@ -1112,13 +2307,17 @@ static const struct aspeed_spi_data ast2400_fmc_data = {
+@@ -1112,13 +2309,17 @@ static const struct aspeed_spi_data ast2400_fmc_data = {
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xfffff0ff,
.hdiv_max = 1,
@@ -2945,7 +3025,7 @@
.max_cs = 1,
.hastype = false,
.we0 = 0,
-@@ -1126,11 +2325,13 @@ static const struct aspeed_spi_data ast2400_spi_data = {
+@@ -1126,11 +2327,13 @@ static const struct aspeed_spi_data ast2400_spi_data = {
.timing = 0x14,
.hclk_mask = 0xfffff0ff,
.hdiv_max = 1,
@@ -2959,7 +3039,7 @@
.max_cs = 3,
.hastype = true,
.we0 = 16,
-@@ -1138,13 +2339,17 @@ static const struct aspeed_spi_data ast2500_fmc_data = {
+@@ -1138,13 +2341,17 @@ static const struct aspeed_spi_data ast2500_fmc_data = {
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xffffd0ff,
.hdiv_max = 1,
@@ -2977,7 +3057,7 @@
.max_cs = 2,
.hastype = false,
.we0 = 16,
-@@ -1152,13 +2357,17 @@ static const struct aspeed_spi_data ast2500_spi_data = {
+@@ -1152,13 +2359,17 @@ static const struct aspeed_spi_data ast2500_spi_data = {
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xffffd0ff,
.hdiv_max = 1,
@@ -2995,7 +3075,7 @@
.max_cs = 3,
.hastype = false,
.mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
-@@ -1167,13 +2376,17 @@ static const struct aspeed_spi_data ast2600_fmc_data = {
+@@ -1167,13 +2378,17 @@ static const struct aspeed_spi_data ast2600_fmc_data = {
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
@@ -3013,7 +3093,7 @@
.max_cs = 2,
.hastype = false,
.mode_bits = SPI_RX_QUAD | SPI_TX_QUAD,
-@@ -1182,10 +2395,50 @@ static const struct aspeed_spi_data ast2600_spi_data = {
+@@ -1182,10 +2397,50 @@ static const struct aspeed_spi_data ast2600_spi_data = {
.timing = CE0_TIMING_COMPENSATION_REG,
.hclk_mask = 0xf0fff0ff,
.hdiv_max = 2,
@@ -3064,7 +3144,7 @@
};
static const struct of_device_id aspeed_spi_matches[] = {
-@@ -1195,6 +2448,8 @@ static const struct of_device_id aspeed_spi_matches[] = {
+@@ -1195,6 +2450,8 @@ static const struct of_device_id aspeed_spi_matches[] = {
{ .compatible = "aspeed,ast2500-spi", .data = &ast2500_spi_data },
{ .compatible = "aspeed,ast2600-fmc", .data = &ast2600_fmc_data },
{ .compatible = "aspeed,ast2600-spi", .data = &ast2600_spi_data },
@@ -3722,35 +3802,6 @@
+MODULE_AUTHOR("Chin-Ting Kuo");
+MODULE_LICENSE("GPL");
+
-diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
-index cdcfe0fd2..7dc2eea25 100644
---- a/include/linux/mtd/spi-nor.h
-+++ b/include/linux/mtd/spi-nor.h
-@@ -21,6 +21,7 @@
- /* Flash opcodes. */
- #define SPINOR_OP_WRDI 0x04 /* Write disable */
- #define SPINOR_OP_WREN 0x06 /* Write enable */
-+#define SPINOR_OP_VSR_WREN 0x50 /* Write enable for volatile register */
- #define SPINOR_OP_RDSR 0x05 /* Read status register */
- #define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */
- #define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */
-@@ -80,6 +81,9 @@
- /* Used for SST flashes only. */
- #define SPINOR_OP_BP 0x02 /* Byte program */
- #define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */
-+#define SPINOR_OP_SST_RDNVCR 0xB5 /* Read nonvolatile configuration register */
-+#define SPINOR_OP_SST_WRNVCR 0xB1 /* Write nonvolatile configuration register */
-+#define SPINOR_SST_RST_HOLD_CTRL BIT(4) /* Nonvolatile configuration register bit 4*/
-
- /* Used for Macronix and Winbond flashes. */
- #define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */
-@@ -450,4 +454,6 @@ static inline struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
- int spi_nor_scan(struct spi_nor *nor, const char *name,
- const struct spi_nor_hwcaps *hwcaps);
-
-+u32 spi_nor_convert_addr(struct spi_nor *nor, loff_t addr);
-+
- #endif
--
2.34.1
diff --git a/recipes-kernel/linux/files/0009-Enable-wdt-driver.patch b/recipes-kernel/linux/files/0009-Enable-wdt-driver.patch
deleted file mode 100644
index f82ad60..0000000
--- a/recipes-kernel/linux/files/0009-Enable-wdt-driver.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From 0905536fb62e463c21191d4578edf51b6bafd670 Mon Sep 17 00:00:00 2001
-From: wukaihua <eason.kh.wu@fii-na.corp-partner.google.com>
-Date: Mon, 25 Nov 2024 13:58:44 +0800
-Subject: [PATCH] Enable wdt driver
-
-Enable watchdog driver for ast2700.
-
-Signed-off-by: wukaihua <eason.kh.wu@fii-na.corp-partner.google.com>
----
- drivers/watchdog/aspeed_wdt.c | 9 +++++++--
- 1 file changed, 7 insertions(+), 2 deletions(-)
-
-diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
-index b4773a6aaf8c..8556b93c4386 100644
---- a/drivers/watchdog/aspeed_wdt.c
-+++ b/drivers/watchdog/aspeed_wdt.c
-@@ -57,6 +57,7 @@ static const struct of_device_id aspeed_wdt_of_table[] = {
- { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
- { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
- { .compatible = "aspeed,ast2600-wdt", .data = &ast2600_config },
-+ { .compatible = "aspeed,ast2700-wdt", .data = &ast2600_config },
- { },
- };
- MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
-@@ -69,7 +70,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
- #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
- #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
- #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
--#define WDT_CTRL_1MHZ_CLK BIT(4)
-+#define WDT_CTRL_RST_SOC BIT(4)
-+#define WDT_CTRL_1MHZ_CLK BIT(4) /* AST2400 only */
- #define WDT_CTRL_WDT_EXT BIT(3)
- #define WDT_CTRL_WDT_INTR BIT(2)
- #define WDT_CTRL_RESET_SYSTEM BIT(1)
-@@ -373,13 +375,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
- ret = of_property_read_string(np, "aspeed,reset-type", &reset_type);
- if (ret) {
- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
-+ if (!of_device_is_compatible(np, "aspeed,ast2400-wdt"))
-+ wdt->ctrl |= WDT_CTRL_RST_SOC;
- } else {
- if (!strcmp(reset_type, "cpu"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
- WDT_CTRL_RESET_SYSTEM;
- else if (!strcmp(reset_type, "soc"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
-- WDT_CTRL_RESET_SYSTEM;
-+ WDT_CTRL_RESET_SYSTEM |
-+ WDT_CTRL_RST_SOC;
- else if (!strcmp(reset_type, "system"))
- wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
- WDT_CTRL_RESET_SYSTEM;
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0010-Add-crypto-driver-for-ast2700.patch b/recipes-kernel/linux/files/0010-Add-crypto-driver-for-ast2700.patch
new file mode 100644
index 0000000..52b222c
--- /dev/null
+++ b/recipes-kernel/linux/files/0010-Add-crypto-driver-for-ast2700.patch
@@ -0,0 +1,793 @@
+From 7eb009685638eb63381644c28b740c217c76295a Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 10:59:37 +0800
+Subject: [PATCH] Add crypto for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ crypto/Kconfig | 9 +
+ crypto/Makefile | 1 +
+ crypto/af_alg.c | 30 ++-
+ crypto/algif_aead.c | 46 ++--
+ crypto/algif_akcipher.c | 457 ++++++++++++++++++++++++++++++++++++++++
+ crypto/algif_skcipher.c | 35 +--
+ crypto/tcrypt.c | 12 +-
+ 7 files changed, 550 insertions(+), 40 deletions(-)
+ create mode 100644 crypto/algif_akcipher.c
+
+diff --git a/crypto/Kconfig b/crypto/Kconfig
+index fc0f75d8b..149b72322 100644
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1387,6 +1387,15 @@ config CRYPTO_USER_API_AEAD
+ See Documentation/crypto/userspace-if.rst and
+ https://www.chronox.de/libkcapi/html/index.html
+
++config CRYPTO_USER_API_AKCIPHER
++ tristate "User-space interface for asymmetric key cipher algorithms"
++ depends on NET
++ select CRYPTO_AKCIPHER2
++ select CRYPTO_USER_API
++ help
++ This option enables the user-spaces interface for asymmetric
++ key cipher algorithms.
++
+ config CRYPTO_USER_API_ENABLE_OBSOLETE
+ bool "Obsolete cryptographic algorithms"
+ depends on CRYPTO_USER_API
+diff --git a/crypto/Makefile b/crypto/Makefile
+index 953a7e105..35fb9f1bf 100644
+--- a/crypto/Makefile
++++ b/crypto/Makefile
+@@ -181,6 +181,7 @@ obj-$(CONFIG_CRYPTO_USER_API_HASH) += algif_hash.o
+ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
+ obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
+ obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
++obj-$(CONFIG_CRYPTO_USER_API_AKCIPHER) += algif_akcipher.o
+ obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
+ obj-$(CONFIG_CRYPTO_OFB) += ofb.o
+ obj-$(CONFIG_CRYPTO_ECC) += ecc.o
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 68cc9290c..b92ad73b3 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -205,13 +205,17 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+ return err;
+ }
+
+-static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
++static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen,
++ int (*setkey)(void *private, const u8 *key,
++ unsigned int keylen))
+ {
+ struct alg_sock *ask = alg_sk(sk);
+- const struct af_alg_type *type = ask->type;
+ u8 *key;
+ int err;
+
++ if (!setkey)
++ return -ENOPROTOOPT;
++
+ key = sock_kmalloc(sk, keylen, GFP_KERNEL);
+ if (!key)
+ return -ENOMEM;
+@@ -220,7 +224,7 @@ static int alg_setkey(struct sock *sk, sockptr_t ukey, unsigned int keylen)
+ if (copy_from_sockptr(key, ukey, keylen))
+ goto out;
+
+- err = type->setkey(ask->private, key, keylen);
++ err = setkey(ask->private, key, keylen);
+
+ out:
+ sock_kzfree_s(sk, key, keylen);
+@@ -377,13 +381,17 @@ static int alg_setsockopt(struct socket *sock, int level, int optname,
+ case ALG_SET_KEY_BY_KEY_SERIAL:
+ if (sock->state == SS_CONNECTED)
+ goto unlock;
+- if (!type->setkey)
+- goto unlock;
+
+ if (optname == ALG_SET_KEY_BY_KEY_SERIAL)
+ err = alg_setkey_by_key_serial(ask, optval, optlen);
+ else
+- err = alg_setkey(sk, optval, optlen);
++ err = alg_setkey(sk, optval, optlen, type->setkey);
++ break;
++ case ALG_SET_PUBKEY:
++ if (sock->state == SS_CONNECTED)
++ goto unlock;
++
++ err = alg_setkey(sk, optval, optlen, type->setpubkey);
+ break;
+ case ALG_SET_AEAD_AUTHSIZE:
+ if (sock->state == SS_CONNECTED)
+@@ -943,9 +951,9 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ struct af_alg_tsgl *sgl;
+ struct af_alg_control con = {};
+ long copied = 0;
+- bool enc = false;
+ bool init = false;
+ int err = 0;
++ int op = 0;
+
+ if (msg->msg_controllen) {
+ err = af_alg_cmsg_send(msg, &con);
+@@ -954,11 +962,11 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+
+ init = true;
+ switch (con.op) {
++ case ALG_OP_VERIFY:
++ case ALG_OP_SIGN:
+ case ALG_OP_ENCRYPT:
+- enc = true;
+- break;
+ case ALG_OP_DECRYPT:
+- enc = false;
++ op = con.op;
+ break;
+ default:
+ return -EINVAL;
+@@ -982,7 +990,7 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ ctx->init = true;
+
+ if (init) {
+- ctx->enc = enc;
++ ctx->op = op;
+ if (con.iv)
+ memcpy(ctx->iv, con.iv->iv, ivsize);
+
+diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
+index 7d58cbbce..839f8c8f2 100644
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -55,7 +55,7 @@ static inline bool aead_sufficient_data(struct sock *sk)
+ * The minimum amount of memory needed for an AEAD cipher is
+ * the AAD and in case of decryption the tag.
+ */
+- return ctx->used >= ctx->aead_assoclen + (ctx->enc ? 0 : as);
++ return ctx->used >= ctx->aead_assoclen + (ctx->op ? 0 : as);
+ }
+
+ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+@@ -138,7 +138,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ * buffer provides the tag which is consumed resulting in only the
+ * plaintext without a buffer for the tag returned to the caller.
+ */
+- if (ctx->enc)
++ if (ctx->op)
+ outlen = used + as;
+ else
+ outlen = used - as;
+@@ -212,7 +212,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ /* Use the RX SGL as source (and destination) for crypto op. */
+ rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
+
+- if (ctx->enc) {
++ if (ctx->op == ALG_OP_ENCRYPT) {
+ /*
+ * Encryption operation - The in-place cipher operation is
+ * achieved by the following operation:
+@@ -229,7 +229,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ if (err)
+ goto free;
+ af_alg_pull_tsgl(sk, processed, NULL, 0);
+- } else {
++ } else if (ctx->op == ALG_OP_DECRYPT) {
+ /*
+ * Decryption operation - To achieve an in-place cipher
+ * operation, the following SGL structure is used:
+@@ -276,6 +276,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ } else
+ /* no RX SGL present (e.g. authentication only) */
+ rsgl_src = areq->tsgl;
++ } else {
++ err = -EOPNOTSUPP;
++ goto free;
+ }
+
+ /* Initialize the crypto operation */
+@@ -295,26 +298,37 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ af_alg_async_cb, areq);
+- err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
+- crypto_aead_decrypt(&areq->cra_u.aead_req);
+-
+- /* AIO operation in progress */
+- if (err == -EINPROGRESS)
+- return -EIOCBQUEUED;
+-
+- sock_put(sk);
+ } else {
+ /* Synchronous operation */
+ aead_request_set_callback(&areq->cra_u.aead_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &ctx->wait);
+- err = crypto_wait_req(ctx->enc ?
+- crypto_aead_encrypt(&areq->cra_u.aead_req) :
+- crypto_aead_decrypt(&areq->cra_u.aead_req),
+- &ctx->wait);
+ }
+
++ switch (ctx->op) {
++ case ALG_OP_ENCRYPT:
++ err = crypto_aead_encrypt(&areq->cra_u.aead_req);
++ break;
++ case ALG_OP_DECRYPT:
++ err = crypto_aead_decrypt(&areq->cra_u.aead_req);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ goto free;
++ }
++
++ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
++ /* AIO operation in progress */
++ if (err == -EINPROGRESS)
++ return -EIOCBQUEUED;
++
++ sock_put(sk);
++
++ } else {
++ /* Wait for synchronous operation completion */
++ err = crypto_wait_req(err, &ctx->wait);
++ }
+
+ free:
+ af_alg_free_resources(areq);
+diff --git a/crypto/algif_akcipher.c b/crypto/algif_akcipher.c
+new file mode 100644
+index 000000000..a0fbee0aa
+--- /dev/null
++++ b/crypto/algif_akcipher.c
+@@ -0,0 +1,457 @@
++// SPDX-License-Identifier: GPL-2.0-or-later
++/*
++ * algif_akcipher: User-space interface for asymmetric cipher algorithms
++ *
++ * Copyright (C) 2017, Stephan Mueller <smueller@chronox.de>
++ *
++ * This file provides the user-space API for asymmetric ciphers.
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the Free
++ * Software Foundation; either version 2 of the License, or (at your option)
++ * any later version.
++ *
++ * The following concept of the memory management is used:
++ *
++ * The kernel maintains two SGLs, the TX SGL and the RX SGL. The TX SGL is
++ * filled by user space with the data submitted via sendpage/sendmsg. Filling
++ * up the TX SGL does not cause a crypto operation -- the data will only be
++ * tracked by the kernel. Upon receipt of one recvmsg call, the caller must
++ * provide a buffer which is tracked with the RX SGL.
++ *
++ * During the processing of the recvmsg operation, the cipher request is
++ * allocated and prepared. As part of the recvmsg operation, the processed
++ * TX buffers are extracted from the TX SGL into a separate SGL.
++ *
++ * After the completion of the crypto operation, the RX SGL and the cipher
++ * request is released. The extracted TX SGL parts are released together with
++ * the RX SGL release.
++ */
++
++#include <crypto/akcipher.h>
++#include <crypto/scatterwalk.h>
++#include <crypto/if_alg.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/net.h>
++#include <net/sock.h>
++
++struct akcipher_tfm {
++ struct crypto_akcipher *akcipher;
++ bool has_key;
++};
++
++static int akcipher_sendmsg(struct socket *sock, struct msghdr *msg,
++ size_t size)
++{
++ return af_alg_sendmsg(sock, msg, size, 0);
++}
++
++static int _akcipher_recvmsg(struct socket *sock, struct msghdr *msg,
++ size_t ignored, int flags)
++{
++ struct sock *sk = sock->sk;
++ struct alg_sock *ask = alg_sk(sk);
++ struct sock *psk = ask->parent;
++ struct alg_sock *pask = alg_sk(psk);
++ struct af_alg_ctx *ctx = ask->private;
++ struct akcipher_tfm *akc = pask->private;
++ struct crypto_akcipher *tfm = akc->akcipher;
++ struct af_alg_async_req *areq;
++ int err = 0;
++ int maxsize;
++ size_t len = 0;
++ size_t used = 0;
++
++ maxsize = crypto_akcipher_maxsize(tfm);
++ if (maxsize < 0)
++ return maxsize;
++
++ /* Allocate cipher request for current operation. */
++ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
++ crypto_akcipher_reqsize(tfm));
++ if (IS_ERR(areq))
++ return PTR_ERR(areq);
++
++ /* convert iovecs of output buffers into RX SGL */
++ err = af_alg_get_rsgl(sk, msg, flags, areq, maxsize, &len);
++ if (err)
++ goto free;
++
++ /* ensure output buffer is sufficiently large */
++ if (len < maxsize) {
++ pr_err("%s: output buffer is not large enough. len:%zu, maxsize:0x%x\n",
++ __func__, len, maxsize);
++ err = -EMSGSIZE;
++ goto free;
++ }
++
++ /*
++ * Create a per request TX SGL for this request which tracks the
++ * SG entries from the global TX SGL.
++ */
++ used = ctx->used;
++ areq->tsgl_entries = af_alg_count_tsgl(sk, used, 0);
++ if (!areq->tsgl_entries)
++ areq->tsgl_entries = 1;
++ areq->tsgl = sock_kmalloc(sk, sizeof(*areq->tsgl) * areq->tsgl_entries,
++ GFP_KERNEL);
++ if (!areq->tsgl) {
++ err = -ENOMEM;
++ goto free;
++ }
++ sg_init_table(areq->tsgl, areq->tsgl_entries);
++ af_alg_pull_tsgl(sk, used, areq->tsgl, 0);
++
++ /* Handle specific operation: verify op */
++ if (ctx->op == ALG_OP_VERIFY)
++ used -= len;
++
++ /* Initialize the crypto operation */
++ akcipher_request_set_tfm(&areq->cra_u.akcipher_req, tfm);
++ akcipher_request_set_crypt(&areq->cra_u.akcipher_req, areq->tsgl,
++ areq->first_rsgl.sgl.sgt.sgl, used, len);
++
++ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
++ /* AIO operation */
++ areq->iocb = msg->msg_iocb;
++ akcipher_request_set_callback(&areq->cra_u.akcipher_req,
++ CRYPTO_TFM_REQ_MAY_SLEEP,
++ af_alg_async_cb, areq);
++ } else {
++ /* Synchronous operation */
++ akcipher_request_set_callback(&areq->cra_u.akcipher_req,
++ CRYPTO_TFM_REQ_MAY_SLEEP |
++ CRYPTO_TFM_REQ_MAY_BACKLOG,
++ crypto_req_done,
++ &ctx->wait);
++ }
++
++ switch (ctx->op) {
++ case ALG_OP_ENCRYPT:
++ err = crypto_akcipher_encrypt(&areq->cra_u.akcipher_req);
++ break;
++ case ALG_OP_DECRYPT:
++ err = crypto_akcipher_decrypt(&areq->cra_u.akcipher_req);
++ break;
++ case ALG_OP_SIGN:
++ err = crypto_akcipher_sign(&areq->cra_u.akcipher_req);
++ break;
++ case ALG_OP_VERIFY:
++ err = crypto_akcipher_verify(&areq->cra_u.akcipher_req);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ goto free;
++ }
++
++ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
++ /* AIO operation in progress */
++ if (err == -EINPROGRESS) {
++ pr_info("%s: AIO operation in progress\n", __func__);
++ sock_hold(sk);
++
++ /* Remember output size that will be generated. */
++ areq->outlen = areq->cra_u.akcipher_req.dst_len;
++
++ return -EIOCBQUEUED;
++ }
++ } else {
++ /* Wait for synchronous operation completion */
++ err = crypto_wait_req(err, &ctx->wait);
++ }
++
++free:
++ af_alg_free_resources(areq);
++
++ return err ? err : areq->cra_u.akcipher_req.dst_len;
++}
++
++static int akcipher_recvmsg(struct socket *sock, struct msghdr *msg,
++ size_t ignored, int flags)
++{
++ struct sock *sk = sock->sk;
++ struct alg_sock *ask = alg_sk(sk);
++ struct sock *psk = ask->parent;
++ struct alg_sock *pask = alg_sk(psk);
++ struct akcipher_tfm *akc = pask->private;
++ struct crypto_akcipher *tfm = akc->akcipher;
++
++ int ret = 0;
++
++ lock_sock(sk);
++
++ while (msg_data_left(msg)) {
++ int err = _akcipher_recvmsg(sock, msg, ignored, flags);
++
++ /*
++ * This error covers -EIOCBQUEUED which implies that we can
++ * only handle one AIO request. If the caller wants to have
++ * multiple AIO requests in parallel, he must make multiple
++ * separate AIO calls.
++ */
++ if (err <= 0) {
++ if (err == -EIOCBQUEUED || err == -EBADMSG || !ret)
++ ret = err;
++ goto out;
++ }
++
++ ret += err;
++
++ /*
++ * The caller must provide crypto_akcipher_maxsize per request.
++ * If he provides more, we conclude that multiple akcipher
++ * operations are requested.
++ */
++ iov_iter_advance(&msg->msg_iter,
++ crypto_akcipher_maxsize(tfm) - err);
++ }
++
++out:
++ af_alg_wmem_wakeup(sk);
++ release_sock(sk);
++ return ret;
++}
++
++static struct proto_ops algif_akcipher_ops = {
++ .family = PF_ALG,
++
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .getname = sock_no_getname,
++ .ioctl = sock_no_ioctl,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .mmap = sock_no_mmap,
++ .bind = sock_no_bind,
++ .accept = sock_no_accept,
++
++ .release = af_alg_release,
++ .sendmsg = akcipher_sendmsg,
++ .recvmsg = akcipher_recvmsg,
++ .poll = af_alg_poll,
++};
++
++static int akcipher_check_key(struct socket *sock)
++{
++ int err = 0;
++ struct sock *psk;
++ struct alg_sock *pask;
++ struct akcipher_tfm *tfm;
++ struct sock *sk = sock->sk;
++ struct alg_sock *ask = alg_sk(sk);
++
++ lock_sock(sk);
++ if (atomic_read(&ask->refcnt))
++ goto unlock_child;
++
++ psk = ask->parent;
++ pask = alg_sk(ask->parent);
++ tfm = pask->private;
++
++ err = -ENOKEY;
++ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
++ if (!tfm->has_key)
++ goto unlock;
++
++ atomic_inc(&pask->refcnt);
++ if (!atomic_read(&pask->refcnt))
++ sock_hold(psk);
++
++ atomic_set(&ask->refcnt, 1);
++ sock_put(psk);
++
++ err = 0;
++
++unlock:
++ release_sock(psk);
++unlock_child:
++ release_sock(sk);
++
++ return err;
++}
++
++static int akcipher_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
++ size_t size)
++{
++ int err;
++
++ err = akcipher_check_key(sock);
++ if (err)
++ return err;
++
++ return akcipher_sendmsg(sock, msg, size);
++}
++
++static int akcipher_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
++ size_t ignored, int flags)
++{
++ int err;
++
++ err = akcipher_check_key(sock);
++ if (err)
++ return err;
++
++ return akcipher_recvmsg(sock, msg, ignored, flags);
++}
++
++static struct proto_ops algif_akcipher_ops_nokey = {
++ .family = PF_ALG,
++
++ .connect = sock_no_connect,
++ .socketpair = sock_no_socketpair,
++ .getname = sock_no_getname,
++ .ioctl = sock_no_ioctl,
++ .listen = sock_no_listen,
++ .shutdown = sock_no_shutdown,
++ .mmap = sock_no_mmap,
++ .bind = sock_no_bind,
++ .accept = sock_no_accept,
++
++ .release = af_alg_release,
++ .sendmsg = akcipher_sendmsg_nokey,
++ .recvmsg = akcipher_recvmsg_nokey,
++ .poll = af_alg_poll,
++};
++
++static void *akcipher_bind(const char *name, u32 type, u32 mask)
++{
++ struct akcipher_tfm *tfm;
++ struct crypto_akcipher *akcipher;
++
++ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
++ if (!tfm)
++ return ERR_PTR(-ENOMEM);
++
++ akcipher = crypto_alloc_akcipher(name, type, mask);
++ if (IS_ERR(akcipher)) {
++ kfree(tfm);
++ return ERR_CAST(akcipher);
++ }
++
++ tfm->akcipher = akcipher;
++
++ return tfm;
++}
++
++static void akcipher_release(void *private)
++{
++ struct akcipher_tfm *tfm = private;
++ struct crypto_akcipher *akcipher = tfm->akcipher;
++
++ crypto_free_akcipher(akcipher);
++ kfree(tfm);
++}
++
++static int akcipher_setprivkey(void *private, const u8 *key,
++ unsigned int keylen)
++{
++ struct akcipher_tfm *tfm = private;
++ struct crypto_akcipher *akcipher = tfm->akcipher;
++ int err;
++
++ err = crypto_akcipher_set_priv_key(akcipher, key, keylen);
++ tfm->has_key = !err;
++
++ /* Return the maximum size of the akcipher operation. */
++ if (!err)
++ err = crypto_akcipher_maxsize(akcipher);
++
++ return err;
++}
++
++static int akcipher_setpubkey(void *private, const u8 *key, unsigned int keylen)
++{
++ struct akcipher_tfm *tfm = private;
++ struct crypto_akcipher *akcipher = tfm->akcipher;
++ int err;
++
++ err = crypto_akcipher_set_pub_key(akcipher, key, keylen);
++ tfm->has_key = !err;
++
++ /* Return the maximum size of the akcipher operation. */
++ if (!err)
++ err = crypto_akcipher_maxsize(akcipher);
++
++ return err;
++}
++
++static void akcipher_sock_destruct(struct sock *sk)
++{
++ struct alg_sock *ask = alg_sk(sk);
++ struct af_alg_ctx *ctx = ask->private;
++
++ af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
++ sock_kfree_s(sk, ctx, ctx->len);
++ af_alg_release_parent(sk);
++}
++
++static int akcipher_accept_parent_nokey(void *private, struct sock *sk)
++{
++ struct af_alg_ctx *ctx;
++ struct alg_sock *ask = alg_sk(sk);
++ unsigned int len = sizeof(*ctx);
++
++ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
++ if (!ctx)
++ return -ENOMEM;
++ memset(ctx, 0, len);
++
++ INIT_LIST_HEAD(&ctx->tsgl_list);
++ ctx->len = len;
++ ctx->used = 0;
++ atomic_set(&ctx->rcvused, 0);
++ ctx->more = 0;
++ ctx->merge = 0;
++ ctx->op = 0;
++ crypto_init_wait(&ctx->wait);
++
++ ask->private = ctx;
++
++ sk->sk_destruct = akcipher_sock_destruct;
++
++ return 0;
++}
++
++static int akcipher_accept_parent(void *private, struct sock *sk)
++{
++ struct akcipher_tfm *tfm = private;
++
++ if (!tfm->has_key)
++ return -ENOKEY;
++
++ return akcipher_accept_parent_nokey(private, sk);
++}
++
++static const struct af_alg_type algif_type_akcipher = {
++ .bind = akcipher_bind,
++ .release = akcipher_release,
++ .setkey = akcipher_setprivkey,
++ .setpubkey = akcipher_setpubkey,
++ .setauthsize = NULL,
++ .accept = akcipher_accept_parent,
++ .accept_nokey = akcipher_accept_parent_nokey,
++ .ops = &algif_akcipher_ops,
++ .ops_nokey = &algif_akcipher_ops_nokey,
++ .name = "akcipher",
++ .owner = THIS_MODULE
++};
++
++static int __init algif_akcipher_init(void)
++{
++ return af_alg_register_type(&algif_type_akcipher);
++}
++
++static void __exit algif_akcipher_exit(void)
++{
++ af_alg_unregister_type(&algif_type_akcipher);
++}
++
++module_init(algif_akcipher_init);
++module_exit(algif_akcipher_exit);
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
++MODULE_DESCRIPTION("Asymmetric kernel crypto API user space interface");
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 9ada9b741..5064d7fc6 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -118,25 +118,36 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+ CRYPTO_TFM_REQ_MAY_SLEEP,
+ af_alg_async_cb, areq);
+- err = ctx->enc ?
+- crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
+- crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
++ } else {
++ /* Synchronous operation */
++ skcipher_request_set_callback(&areq->cra_u.skcipher_req,
++ CRYPTO_TFM_REQ_MAY_SLEEP |
++ CRYPTO_TFM_REQ_MAY_BACKLOG,
++ crypto_req_done, &ctx->wait);
++ }
+
++ switch (ctx->op) {
++ case ALG_OP_ENCRYPT:
++ err = crypto_skcipher_encrypt(&areq->cra_u.skcipher_req);
++ break;
++ case ALG_OP_DECRYPT:
++ err = crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
++ break;
++ default:
++ err = -EOPNOTSUPP;
++ goto free;
++ }
++
++ if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS)
+ return -EIOCBQUEUED;
+
+ sock_put(sk);
++
+ } else {
+- /* Synchronous operation */
+- skcipher_request_set_callback(&areq->cra_u.skcipher_req,
+- CRYPTO_TFM_REQ_MAY_SLEEP |
+- CRYPTO_TFM_REQ_MAY_BACKLOG,
+- crypto_req_done, &ctx->wait);
+- err = crypto_wait_req(ctx->enc ?
+- crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
+- crypto_skcipher_decrypt(&areq->cra_u.skcipher_req),
+- &ctx->wait);
++ /* Wait for synchronous operation completion */
++ err = crypto_wait_req(err, &ctx->wait);
+ }
+
+
+diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
+index 202ca1a31..c29ac558f 100644
+--- a/crypto/tcrypt.c
++++ b/crypto/tcrypt.c
+@@ -1855,6 +1855,15 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
+ ret = min(ret, tcrypt_test("cfb(aria)"));
+ ret = min(ret, tcrypt_test("ctr(aria)"));
+ break;
++ case 193:
++ ret = min(ret, tcrypt_test("rsa"));
++ break;
++ case 194:
++ ret = min(ret, tcrypt_test("ecdsa-nist-p256"));
++ break;
++ case 195:
++ ret = min(ret, tcrypt_test("ecdsa-nist-p384"));
++ break;
+ case 200:
+ test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+@@ -2899,9 +2908,10 @@ static int __init tcrypt_mod_init(void)
+ err = do_test(alg, type, mask, mode, num_mb);
+
+ if (err) {
+- pr_err("one or more tests failed!\n");
++ pr_err("one or more tests failed!, err:%d\n", err);
+ goto err_free_tv;
+ } else {
++ pr_info("alg:%s, mode:%d tests passed\n", alg, mode);
+ pr_debug("all tests passed\n");
+ }
+
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0011-Add-watchdog-driver-for-ast2700.patch b/recipes-kernel/linux/files/0011-Add-watchdog-driver-for-ast2700.patch
new file mode 100644
index 0000000..5eb13a2
--- /dev/null
+++ b/recipes-kernel/linux/files/0011-Add-watchdog-driver-for-ast2700.patch
@@ -0,0 +1,398 @@
+From 15431b6d35a22b6d1745e743c7c17d610e317e28 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 13:33:57 +0800
+Subject: [PATCH] Add watchdog driver for ast2700
+
+Enable watchdog driver for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/watchdog/aspeed_wdt.c | 241 +++++++++++++++++++++++++++++++---
+ 1 file changed, 226 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
+index b72a858bb..20a3924df 100644
+--- a/drivers/watchdog/aspeed_wdt.c
++++ b/drivers/watchdog/aspeed_wdt.c
+@@ -11,52 +11,139 @@
+ #include <linux/io.h>
+ #include <linux/kernel.h>
+ #include <linux/kstrtox.h>
++#include <linux/mfd/syscon.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/of_irq.h>
+ #include <linux/platform_device.h>
++#include <linux/regmap.h>
+ #include <linux/watchdog.h>
+
+ static bool nowayout = WATCHDOG_NOWAYOUT;
+ module_param(nowayout, bool, 0);
+ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
++struct aspeed_wdt {
++ struct watchdog_device wdd;
++ void __iomem *base;
++ u32 ctrl;
++ const struct aspeed_wdt_config *cfg;
++};
++
++struct aspeed_wdt_scu {
++ const char *compatible;
++ u32 reset_status_reg;
++ u32 wdt_reset_mask;
++ u32 wdt_sw_reset_mask;
++ u32 wdt_reset_mask_shift;
++};
+
+ struct aspeed_wdt_config {
+ u32 ext_pulse_width_mask;
+ u32 irq_shift;
+ u32 irq_mask;
++ struct aspeed_wdt_scu scu;
++ u32 reset_mask_num;
++ u32 sw_reset_ctrl;
++ u32 sw_reset_mask_offset;
++ int (*restart)(struct aspeed_wdt *wdt);
+ };
+
+-struct aspeed_wdt {
+- struct watchdog_device wdd;
+- void __iomem *base;
+- u32 ctrl;
+- const struct aspeed_wdt_config *cfg;
+-};
++static int aspeed_ast2400_wdt_restart(struct aspeed_wdt *wdt);
++static int aspeed_ast2600_wdt_restart(struct aspeed_wdt *wdt);
++static int aspeed_ast2700_wdt_restart(struct aspeed_wdt *wdt);
+
+ static const struct aspeed_wdt_config ast2400_config = {
+ .ext_pulse_width_mask = 0xff,
+ .irq_shift = 0,
+ .irq_mask = 0,
++ .scu = {
++ .compatible = "aspeed,ast2400-scu",
++ .reset_status_reg = 0x3c,
++ .wdt_reset_mask = 0x1,
++ .wdt_sw_reset_mask = 0,
++ .wdt_reset_mask_shift = 1,
++ },
++ .reset_mask_num = 1,
++ .sw_reset_ctrl = 0x0,
++ .sw_reset_mask_offset = 0x0,
++ .restart = aspeed_ast2400_wdt_restart,
+ };
+
+ static const struct aspeed_wdt_config ast2500_config = {
+ .ext_pulse_width_mask = 0xfffff,
+ .irq_shift = 12,
+ .irq_mask = GENMASK(31, 12),
++ .scu = {
++ .compatible = "aspeed,ast2500-scu",
++ .reset_status_reg = 0x3c,
++ .wdt_reset_mask = 0x1,
++ .wdt_sw_reset_mask = 0,
++ .wdt_reset_mask_shift = 2,
++ },
++ .reset_mask_num = 1,
++ .sw_reset_ctrl = 0x0,
++ .sw_reset_mask_offset = 0x0,
++ .restart = aspeed_ast2400_wdt_restart,
+ };
+
+ static const struct aspeed_wdt_config ast2600_config = {
+ .ext_pulse_width_mask = 0xfffff,
+ .irq_shift = 0,
+ .irq_mask = GENMASK(31, 10),
++ .scu = {
++ .compatible = "aspeed,ast2600-scu",
++ .reset_status_reg = 0x74,
++ .wdt_reset_mask = 0xf,
++ .wdt_sw_reset_mask = 0x8,
++ .wdt_reset_mask_shift = 16,
++ },
++ .reset_mask_num = 2,
++ .sw_reset_ctrl = 0x24,
++ .sw_reset_mask_offset = 0x28,
++ .restart = aspeed_ast2600_wdt_restart,
++};
++
++static const struct aspeed_wdt_config ast2700a0_config = {
++ .ext_pulse_width_mask = 0xfffff,
++ .irq_shift = 0,
++ .irq_mask = GENMASK(31, 10),
++ .scu = {
++ .compatible = "aspeed,ast2700a0-scu0",
++ .reset_status_reg = 0x70,
++ .wdt_reset_mask = 0xf,
++ .wdt_sw_reset_mask = 0x8,
++ .wdt_reset_mask_shift = 0,
++ },
++ .reset_mask_num = 5,
++ .sw_reset_ctrl = 0x30,
++ .sw_reset_mask_offset = 0x34,
++ .restart = aspeed_ast2700_wdt_restart,
++};
++
++static const struct aspeed_wdt_config ast2700_config = {
++ .ext_pulse_width_mask = 0xfffff,
++ .irq_shift = 0,
++ .irq_mask = GENMASK(31, 10),
++ .scu = {
++ .compatible = "aspeed,ast2700-scu0",
++ .reset_status_reg = 0x70,
++ .wdt_reset_mask = 0xf,
++ .wdt_sw_reset_mask = 0x8,
++ .wdt_reset_mask_shift = 0,
++ },
++ .reset_mask_num = 5,
++ .sw_reset_ctrl = 0x30,
++ .sw_reset_mask_offset = 0x34,
++ .restart = aspeed_ast2700_wdt_restart,
+ };
+
+ static const struct of_device_id aspeed_wdt_of_table[] = {
+ { .compatible = "aspeed,ast2400-wdt", .data = &ast2400_config },
+ { .compatible = "aspeed,ast2500-wdt", .data = &ast2500_config },
+ { .compatible = "aspeed,ast2600-wdt", .data = &ast2600_config },
++ { .compatible = "aspeed,ast2700a0-wdt", .data = &ast2700a0_config },
++ { .compatible = "aspeed,ast2700-wdt", .data = &ast2700_config },
+ { },
+ };
+ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+@@ -69,7 +156,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+ #define WDT_CTRL_RESET_MODE_SOC (0x00 << 5)
+ #define WDT_CTRL_RESET_MODE_FULL_CHIP (0x01 << 5)
+ #define WDT_CTRL_RESET_MODE_ARM_CPU (0x10 << 5)
+-#define WDT_CTRL_1MHZ_CLK BIT(4)
++#define WDT_CTRL_RST_SOC BIT(4)
++#define WDT_CTRL_1MHZ_CLK BIT(4) /* AST2400 only */
+ #define WDT_CTRL_WDT_EXT BIT(3)
+ #define WDT_CTRL_WDT_INTR BIT(2)
+ #define WDT_CTRL_RESET_SYSTEM BIT(1)
+@@ -79,6 +167,10 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+ #define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1)
+ #define WDT_CLEAR_TIMEOUT_STATUS 0x14
+ #define WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION BIT(0)
++#define WDT_RESET_MASK1 0x1c
++#define WDT_RESET_MASK2 0x20
++#define WDT_SW_FLAGS_CTRL 0x4C
++#define WDT_SW_RESET_INDICATOR 0x80
+
+ /*
+ * WDT_RESET_WIDTH controls the characteristics of the external pulse (if
+@@ -113,6 +205,10 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
+ #define WDT_OPEN_DRAIN_MAGIC (0x8A << 24)
+
+ #define WDT_RESTART_MAGIC 0x4755
++#define WDT_SW_RESET_COUNT_CLEAR 0xDEADDEAD
++#define WDT_SW_RESET_ENABLE 0xAEEDF123
++
++#define WDT_SW_FLAGS_CLR 0xEA000000
+
+ /* 32 bits at 1MHz, in milliseconds */
+ #define WDT_MAX_TIMEOUT_MS 4294967
+@@ -198,11 +294,8 @@ static int aspeed_wdt_set_pretimeout(struct watchdog_device *wdd,
+ return 0;
+ }
+
+-static int aspeed_wdt_restart(struct watchdog_device *wdd,
+- unsigned long action, void *data)
++static int aspeed_ast2400_wdt_restart(struct aspeed_wdt *wdt)
+ {
+- struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+-
+ wdt->ctrl &= ~WDT_CTRL_BOOT_SECONDARY;
+ aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
+
+@@ -211,6 +304,110 @@ static int aspeed_wdt_restart(struct watchdog_device *wdd,
+ return 0;
+ }
+
++static int aspeed_ast2600_wdt_restart(struct aspeed_wdt *wdt)
++{
++ u32 reg;
++ u32 ctrl = WDT_CTRL_RESET_MODE_SOC |
++ WDT_CTRL_RESET_SYSTEM;
++ int i;
++
++ for (i = 0; i < wdt->cfg->reset_mask_num; i++) {
++ reg = readl(wdt->base + WDT_RESET_MASK1 + i * 4);
++ writel(reg,
++ wdt->base + wdt->cfg->sw_reset_mask_offset + i * 4);
++ }
++
++ writel(ctrl, wdt->base + WDT_CTRL);
++ writel(WDT_SW_RESET_COUNT_CLEAR, wdt->base + wdt->cfg->sw_reset_ctrl);
++ writel(WDT_SW_RESET_ENABLE, wdt->base + wdt->cfg->sw_reset_ctrl);
++
++ /* system must be reset immediately */
++ mdelay(1000);
++
++ return 0;
++}
++
++static int aspeed_ast2700_wdt_restart(struct aspeed_wdt *wdt)
++{
++ writel(WDT_SW_RESET_INDICATOR, wdt->base + WDT_SW_FLAGS_CTRL);
++
++ wdt->ctrl = WDT_CTRL_RST_SOC | WDT_CTRL_RESET_SYSTEM;
++ aspeed_wdt_enable(wdt, 128 * WDT_RATE_1MHZ / 1000);
++
++ mdelay(1000);
++
++ return 0;
++}
++
++static int aspeed_wdt_restart(struct watchdog_device *wdd,
++ unsigned long action, void *data)
++{
++ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
++
++ return wdt->cfg->restart(wdt);
++}
++
++static void aspeed_wdt_update_bootstatus(struct platform_device *pdev,
++ struct aspeed_wdt *wdt)
++{
++ struct resource *res;
++ struct aspeed_wdt_scu scu = wdt->cfg->scu;
++ struct regmap *scu_base;
++ struct device dev = pdev->dev;
++ u32 reset_mask_width;
++ u32 reset_mask_shift;
++ u32 reg_size;
++ u32 idx = 0;
++ u32 status;
++ int ret;
++
++ if (!of_device_is_compatible(dev.of_node, "aspeed,ast2400-wdt")) {
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (res) {
++ reg_size = res->end - res->start;
++ if (reg_size) {
++ idx = ((intptr_t)wdt->base & 0x00000fff) /
++ reg_size;
++ }
++ }
++ }
++
++ scu_base = syscon_regmap_lookup_by_compatible(scu.compatible);
++ if (IS_ERR(scu_base))
++ return;
++
++ ret = regmap_read(scu_base, scu.reset_status_reg, &status);
++ if (ret)
++ return;
++
++ reset_mask_width = hweight32(scu.wdt_reset_mask);
++ reset_mask_shift = scu.wdt_reset_mask_shift +
++ reset_mask_width * idx;
++
++ if (status & (scu.wdt_sw_reset_mask << reset_mask_shift))
++ wdt->wdd.bootstatus = WDIOF_EXTERN1;
++ else if (status & (scu.wdt_reset_mask << reset_mask_shift))
++ wdt->wdd.bootstatus = WDIOF_CARDRESET;
++
++ if (of_device_is_compatible(dev.of_node, "aspeed,ast2700a0-wdt") ||
++ of_device_is_compatible(dev.of_node, "aspeed,ast2700-wdt")) {
++ status = readl(wdt->base + WDT_SW_FLAGS_CTRL);
++ if (status & WDT_SW_RESET_INDICATOR) {
++ wdt->wdd.bootstatus = WDIOF_EXTERN1;
++ writel(WDT_SW_FLAGS_CLR, wdt->base + WDT_SW_FLAGS_CTRL);
++ }
++ }
++
++ if (of_device_is_compatible(dev.of_node, "aspeed,ast2400-wdt") ||
++ of_device_is_compatible(dev.of_node, "aspeed,ast2500-wdt")) {
++ status &= ~(scu.wdt_reset_mask << reset_mask_shift);
++ regmap_write(scu_base, scu.reset_status_reg, status);
++ } else {
++ regmap_write(scu_base, scu.reset_status_reg,
++ scu.wdt_reset_mask << reset_mask_shift);
++ }
++}
++
+ /* access_cs0 shows if cs0 is accessible, hence the reverted bit */
+ static ssize_t access_cs0_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+@@ -296,8 +493,10 @@ static irqreturn_t aspeed_wdt_irq(int irq, void *arg)
+ struct aspeed_wdt *wdt = to_aspeed_wdt(wdd);
+ u32 status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+
+- if (status & WDT_TIMEOUT_STATUS_IRQ)
++ if (status & WDT_TIMEOUT_STATUS_IRQ) {
+ watchdog_notify_pretimeout(wdd);
++ writel(0x1, wdt->base + WDT_CLEAR_TIMEOUT_STATUS);
++ }
+
+ return IRQ_HANDLED;
+ }
+@@ -312,6 +511,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ u32 duration;
+ u32 status;
+ int ret;
++ int i;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+@@ -371,13 +571,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ ret = of_property_read_string(np, "aspeed,reset-type", &reset_type);
+ if (ret) {
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC | WDT_CTRL_RESET_SYSTEM;
++ if (!of_device_is_compatible(np, "aspeed,ast2400-wdt"))
++ wdt->ctrl |= WDT_CTRL_RST_SOC;
+ } else {
+ if (!strcmp(reset_type, "cpu"))
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_ARM_CPU |
+ WDT_CTRL_RESET_SYSTEM;
+ else if (!strcmp(reset_type, "soc"))
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_SOC |
+- WDT_CTRL_RESET_SYSTEM;
++ WDT_CTRL_RESET_SYSTEM |
++ WDT_CTRL_RST_SOC;
+ else if (!strcmp(reset_type, "system"))
+ wdt->ctrl |= WDT_CTRL_RESET_MODE_FULL_CHIP |
+ WDT_CTRL_RESET_SYSTEM;
+@@ -402,6 +605,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+
+ if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
+ (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
++ u32 reset_mask[6];
++ size_t nrstmask = wdt->cfg->reset_mask_num;
+ u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
+
+ reg &= wdt->cfg->ext_pulse_width_mask;
+@@ -419,6 +624,12 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ reg |= WDT_OPEN_DRAIN_MAGIC;
+
+ writel(reg, wdt->base + WDT_RESET_WIDTH);
++
++ ret = of_property_read_u32_array(np, "aspeed,reset-mask", reset_mask, nrstmask);
++ if (!ret) {
++ for (i = 0; i < nrstmask; i++)
++ writel(reset_mask[i], wdt->base + WDT_RESET_MASK1 + i * 4);
++ }
+ }
+
+ if (!of_property_read_u32(np, "aspeed,ext-pulse-duration", &duration)) {
+@@ -447,10 +658,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ writel(duration - 1, wdt->base + WDT_RESET_WIDTH);
+ }
+
++ aspeed_wdt_update_bootstatus(pdev, wdt);
++
+ status = readl(wdt->base + WDT_TIMEOUT_STATUS);
+ if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) {
+- wdt->wdd.bootstatus = WDIOF_CARDRESET;
+-
+ if (of_device_is_compatible(np, "aspeed,ast2400-wdt") ||
+ of_device_is_compatible(np, "aspeed,ast2500-wdt"))
+ wdt->wdd.groups = bswitch_groups;
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0011-Enable-FTGMAC100-driver-for-mac0.patch b/recipes-kernel/linux/files/0011-Enable-FTGMAC100-driver-for-mac0.patch
deleted file mode 100644
index 72d4d33..0000000
--- a/recipes-kernel/linux/files/0011-Enable-FTGMAC100-driver-for-mac0.patch
+++ /dev/null
@@ -1,487 +0,0 @@
-From 226a25cc141d5514ba7e85bc44fe4f127fd0bc41 Mon Sep 17 00:00:00 2001
-From: Ethan <ethan.im.hsieh@fii-na.corp-partner.google.com>
-Date: Fri, 3 Jan 2025 17:27:45 +0800
-Subject: [PATCH] Enable-FTGMAC100-driver-for-mac0
-
-Signed-off-by: Ethan <ethan.im.hsieh@fii-na.corp-partner.google.com>
----
- drivers/net/ethernet/faraday/Kconfig | 3 +-
- drivers/net/ethernet/faraday/ftgmac100.c | 138 ++++++++++++++++++-----
- drivers/net/ethernet/faraday/ftgmac100.h | 10 ++
- drivers/net/mdio/mdio-aspeed.c | 3 +
- 4 files changed, 126 insertions(+), 28 deletions(-)
-
-diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
-index c699bd6bc..c9c977211 100644
---- a/drivers/net/ethernet/faraday/Kconfig
-+++ b/drivers/net/ethernet/faraday/Kconfig
-@@ -29,9 +29,8 @@ config FTMAC100
- config FTGMAC100
- tristate "Faraday FTGMAC100 Gigabit Ethernet support"
- depends on ARM || COMPILE_TEST
-- depends on !64BIT || BROKEN
- select PHYLIB
-- select MDIO_ASPEED if MACH_ASPEED_G6
-+ select MDIO_ASPEED if MACH_ASPEED_G6 || MACH_ASPEED_G7
- select CRC32
- help
- This driver supports the FTGMAC100 Gigabit Ethernet controller
-diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
-index 9135b918d..7b9fe77da 100644
---- a/drivers/net/ethernet/faraday/ftgmac100.c
-+++ b/drivers/net/ethernet/faraday/ftgmac100.c
-@@ -9,6 +9,7 @@
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
- #include <linux/clk.h>
-+#include <linux/reset.h>
- #include <linux/dma-mapping.h>
- #include <linux/etherdevice.h>
- #include <linux/ethtool.h>
-@@ -19,11 +20,13 @@
- #include <linux/of.h>
- #include <linux/of_mdio.h>
- #include <linux/phy.h>
-+#include <linux/phy/phy.h>
- #include <linux/platform_device.h>
- #include <linux/property.h>
- #include <linux/crc32.h>
- #include <linux/if_vlan.h>
- #include <linux/of_net.h>
-+#include <linux/phy_fixed.h>
- #include <net/ip.h>
- #include <net/ncsi.h>
-
-@@ -50,6 +53,15 @@
- #define FTGMAC_100MHZ 100000000
- #define FTGMAC_25MHZ 25000000
-
-+/* For NC-SI to register a fixed-link phy device */
-+static struct fixed_phy_status ncsi_phy_status = {
-+ .link = 1,
-+ .speed = SPEED_100,
-+ .duplex = DUPLEX_FULL,
-+ .pause = 0,
-+ .asym_pause = 0
-+};
-+
- struct ftgmac100 {
- /* Registers */
- struct resource *res;
-@@ -88,6 +100,7 @@ struct ftgmac100 {
- struct work_struct reset_task;
- struct mii_bus *mii_bus;
- struct clk *clk;
-+ struct reset_control *rst;
-
- /* AST2500/AST2600 RMII ref clock gate */
- struct clk *rclk;
-@@ -109,6 +122,10 @@ struct ftgmac100 {
- /* Misc */
- bool need_mac_restart;
- bool is_aspeed;
-+ bool is_ast2700_rmii;
-+
-+ /* AST2700 SGMII */
-+ struct phy *sgmii;
- };
-
- static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
-@@ -255,10 +272,12 @@ static void ftgmac100_init_hw(struct ftgmac100 *priv)
- iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
-
- /* Setup RX ring buffer base */
-- iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
-+ iowrite32(lower_32_bits(priv->rxdes_dma), priv->base + FTGMAC100_OFFSET_RXR_BADR);
-+ iowrite32(upper_32_bits(priv->rxdes_dma), priv->base + FTGMAC100_OFFSET_RXR_BADDR_HIGH);
-
- /* Setup TX ring buffer base */
-- iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-+ iowrite32(lower_32_bits(priv->txdes_dma), priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
-+ iowrite32(upper_32_bits(priv->txdes_dma), priv->base + FTGMAC100_OFFSET_TXR_BADDR_HIGH);
-
- /* Configure RX buffer size */
- iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
-@@ -339,6 +358,9 @@ static void ftgmac100_start_hw(struct ftgmac100 *priv)
- if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- maccr |= FTGMAC100_MACCR_RM_VLAN;
-
-+ if (priv->is_ast2700_rmii)
-+ maccr |= FTGMAC100_MACCR_RMII_ENABLE;
-+
- /* Hit the HW */
- iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
- }
-@@ -415,7 +437,8 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
- priv->rx_skbs[entry] = skb;
-
- /* Store DMA address into RX desc */
-- rxdes->rxdes3 = cpu_to_le32(map);
-+ rxdes->rxdes2 = FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI, upper_32_bits(map));
-+ rxdes->rxdes3 = lower_32_bits(map);
-
- /* Ensure the above is ordered vs clearing the OWN bit */
- dma_wmb();
-@@ -541,7 +564,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
- csum_vlan & 0xffff);
-
- /* Tear down DMA mapping, do necessary cache management */
-- map = le32_to_cpu(rxdes->rxdes3);
-+ map = le32_to_cpu(rxdes->rxdes3) | ((rxdes->rxdes2 & FTGMAC100_RXDES2_RXBUF_BADR_HI) << 16);
-
- #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
- /* When we don't have an iommu, we can save cycles by not
-@@ -553,7 +576,6 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
- dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
- #endif
-
--
- /* Resplenish rx ring */
- ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
- priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
-@@ -572,7 +594,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
- (*processed)++;
- return true;
-
-- drop:
-+drop:
- /* Clean rxdes0 (which resets own bit) */
- rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
- priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
-@@ -618,7 +640,9 @@ static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
- struct ftgmac100_txdes *txdes,
- u32 ctl_stat)
- {
-- dma_addr_t map = le32_to_cpu(txdes->txdes3);
-+ dma_addr_t map = le32_to_cpu(txdes->txdes3) |
-+ ((txdes->txdes2 & FTGMAC100_TXDES2_TXBUF_BADR_HI) << 16);
-+
- size_t len;
-
- if (ctl_stat & FTGMAC100_TXDES0_FTS) {
-@@ -656,6 +680,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
- ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
- txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
-
-+ /* Ensure the descriptor config is visible before setting the tx
-+ * pointer.
-+ */
-+ smp_wmb();
-+
- priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
-
- return true;
-@@ -769,7 +798,8 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
- f_ctl_stat |= FTGMAC100_TXDES0_FTS;
- if (nfrags == 0)
- f_ctl_stat |= FTGMAC100_TXDES0_LTS;
-- txdes->txdes3 = cpu_to_le32(map);
-+ txdes->txdes2 = FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI, upper_32_bits((ulong)map));
-+ txdes->txdes3 = lower_32_bits(map);
- txdes->txdes1 = cpu_to_le32(csum_vlan);
-
- /* Next descriptor */
-@@ -797,7 +827,9 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
- ctl_stat |= FTGMAC100_TXDES0_LTS;
- txdes->txdes0 = cpu_to_le32(ctl_stat);
- txdes->txdes1 = 0;
-- txdes->txdes3 = cpu_to_le32(map);
-+ txdes->txdes2 =
-+ FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI, upper_32_bits((ulong)map));
-+ txdes->txdes3 = lower_32_bits(map);
-
- /* Next one */
- pointer = ftgmac100_next_tx_pointer(priv, pointer);
-@@ -809,6 +841,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
- dma_wmb();
- first->txdes0 = cpu_to_le32(f_ctl_stat);
-
-+ /* Ensure the descriptor config is visible before setting the tx
-+ * pointer.
-+ */
-+ smp_wmb();
-+
- /* Update next TX pointer */
- priv->tx_pointer = pointer;
-
-@@ -829,7 +866,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
-
- return NETDEV_TX_OK;
-
-- dma_err:
-+dma_err:
- if (net_ratelimit())
- netdev_err(netdev, "map tx fragment failed\n");
-
-@@ -851,7 +888,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
- * last fragment, so we know ftgmac100_free_tx_packet()
- * hasn't freed the skb yet.
- */
-- drop:
-+drop:
- /* Drop the packet */
- dev_kfree_skb_any(skb);
- netdev->stats.tx_dropped++;
-@@ -867,7 +904,8 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
- for (i = 0; i < priv->rx_q_entries; i++) {
- struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
- struct sk_buff *skb = priv->rx_skbs[i];
-- dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
-+ dma_addr_t map = le32_to_cpu(rxdes->rxdes3) |
-+ ((rxdes->rxdes2 & FTGMAC100_RXDES2_RXBUF_BADR_HI) << 16);
-
- if (!skb)
- continue;
-@@ -966,7 +1004,9 @@ static void ftgmac100_init_rings(struct ftgmac100 *priv)
- for (i = 0; i < priv->rx_q_entries; i++) {
- rxdes = &priv->rxdes[i];
- rxdes->rxdes0 = 0;
-- rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
-+ rxdes->rxdes2 = FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI,
-+ upper_32_bits(priv->rx_scratch_dma));
-+ rxdes->rxdes3 = lower_32_bits(priv->rx_scratch_dma);
- }
- /* Mark the end of the ring */
- rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
-@@ -1229,7 +1269,6 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
- more = ftgmac100_rx_packet(priv, &work_done);
- } while (more && work_done < budget);
-
--
- /* The interrupt is telling us to kick the MAC back to life
- * after an RX overflow
- */
-@@ -1319,7 +1358,6 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
- if (priv->mii_bus)
- mutex_lock(&priv->mii_bus->mdio_lock);
-
--
- /* Check if the interface is still up */
- if (!netif_running(netdev))
- goto bail;
-@@ -1344,7 +1382,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
- ftgmac100_init_all(priv, true);
-
- netdev_dbg(netdev, "Reset done !\n");
-- bail:
-+bail:
- if (priv->mii_bus)
- mutex_unlock(&priv->mii_bus->mdio_lock);
- if (netdev->phydev)
-@@ -1418,7 +1456,6 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
-
- if (netdev->phydev)
- mutex_lock(&netdev->phydev->lock);
--
- }
-
- static int ftgmac100_mii_probe(struct net_device *netdev)
-@@ -1531,7 +1568,8 @@ static int ftgmac100_open(struct net_device *netdev)
- if (netdev->phydev) {
- /* If we have a PHY, start polling */
- phy_start(netdev->phydev);
-- } else if (priv->use_ncsi) {
-+ }
-+ if (priv->use_ncsi) {
- /* If using NC-SI, set our carrier on and start the stack */
- netif_carrier_on(netdev);
-
-@@ -1543,15 +1581,16 @@ static int ftgmac100_open(struct net_device *netdev)
-
- return 0;
-
-- err_ncsi:
-+err_ncsi:
-+ phy_stop(netdev->phydev);
- napi_disable(&priv->napi);
- netif_stop_queue(netdev);
-- err_alloc:
-+err_alloc:
- ftgmac100_free_buffers(priv);
- free_irq(netdev->irq, netdev);
-- err_irq:
-+err_irq:
- netif_napi_del(&priv->napi);
-- err_hw:
-+err_hw:
- iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
- ftgmac100_free_rings(priv);
- return err;
-@@ -1577,7 +1616,7 @@ static int ftgmac100_stop(struct net_device *netdev)
- netif_napi_del(&priv->napi);
- if (netdev->phydev)
- phy_stop(netdev->phydev);
-- else if (priv->use_ncsi)
-+ if (priv->use_ncsi)
- ncsi_stop_dev(priv->ndev);
-
- ftgmac100_stop_hw(priv);
-@@ -1715,6 +1754,9 @@ static void ftgmac100_phy_disconnect(struct net_device *netdev)
- phy_disconnect(netdev->phydev);
- if (of_phy_is_fixed_link(priv->dev->of_node))
- of_phy_deregister_fixed_link(priv->dev->of_node);
-+
-+ if (priv->use_ncsi)
-+ fixed_phy_unregister(netdev->phydev);
- }
-
- static void ftgmac100_destroy_mdio(struct net_device *netdev)
-@@ -1792,6 +1834,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
- struct resource *res;
- int irq;
- struct net_device *netdev;
-+ struct phy_device *phydev;
- struct ftgmac100 *priv;
- struct device_node *np;
- int err = 0;
-@@ -1856,7 +1899,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
- np = pdev->dev.of_node;
- if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
- of_device_is_compatible(np, "aspeed,ast2500-mac") ||
-- of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
-+ of_device_is_compatible(np, "aspeed,ast2600-mac") ||
-+ of_device_is_compatible(np, "aspeed,ast2700-mac"))) {
- priv->rxdes0_edorr_mask = BIT(30);
- priv->txdes0_edotr_mask = BIT(30);
- priv->is_aspeed = true;
-@@ -1879,6 +1923,14 @@ static int ftgmac100_probe(struct platform_device *pdev)
- err = -EINVAL;
- goto err_phy_connect;
- }
-+
-+ phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, NULL);
-+ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
-+ PHY_INTERFACE_MODE_MII);
-+ if (err) {
-+ dev_err(&pdev->dev, "Connecting PHY failed\n");
-+ goto err_phy_connect;
-+ }
- } else if (np && of_phy_is_fixed_link(np)) {
- struct phy_device *phy;
-
-@@ -1944,18 +1996,50 @@ static int ftgmac100_probe(struct platform_device *pdev)
- dev_err(priv->dev, "MII probe failed!\n");
- goto err_ncsi_dev;
- }
--
- }
-
- if (priv->is_aspeed) {
-+ struct reset_control *rst;
-+
- err = ftgmac100_setup_clk(priv);
- if (err)
- goto err_phy_connect;
-
-+ rst = devm_reset_control_get_optional(priv->dev, NULL);
-+ if (IS_ERR(rst))
-+ goto err_register_netdev;
-+
-+ priv->rst = rst;
-+ err = reset_control_assert(priv->rst);
-+ mdelay(10);
-+ err = reset_control_deassert(priv->rst);
-+
- /* Disable ast2600 problematic HW arbitration */
-- if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
-+ if (of_device_is_compatible(np, "aspeed,ast2600-mac") ||
-+ of_device_is_compatible(np, "aspeed,ast2700-mac"))
- iowrite32(FTGMAC100_TM_DEFAULT,
- priv->base + FTGMAC100_OFFSET_TM);
-+
-+ if (of_device_is_compatible(np, "aspeed,ast2700-mac")) {
-+ phy_interface_t phy_intf;
-+
-+ err = of_get_phy_mode(np, &phy_intf);
-+ if (err)
-+ phy_intf = PHY_INTERFACE_MODE_RGMII;
-+ priv->is_ast2700_rmii = (phy_intf == PHY_INTERFACE_MODE_RMII) ||
-+ priv->use_ncsi;
-+ if (phy_intf == PHY_INTERFACE_MODE_SGMII) {
-+ priv->sgmii = devm_phy_optional_get(&pdev->dev, "sgmii");
-+ if (IS_ERR(priv->sgmii)) {
-+ dev_err(priv->dev, "Failed to get sgmii phy (%ld)\n",
-+ PTR_ERR(priv->sgmii));
-+ return PTR_ERR(priv->sgmii);
-+ }
-+ phy_init(priv->sgmii);
-+ if (np && of_phy_is_fixed_link(np))
-+ phy_set_speed(priv->sgmii, netdev->phydev->speed);
-+ }
-+ }
- }
-
- /* Default ring sizes */
-@@ -1989,6 +2073,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
- goto err_register_netdev;
- }
-
-+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
-+
- netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
-
- return 0;
-diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
-index 4968f6f0b..f8c30a09d 100644
---- a/drivers/net/ethernet/faraday/ftgmac100.h
-+++ b/drivers/net/ethernet/faraday/ftgmac100.h
-@@ -57,6 +57,13 @@
- #define FTGMAC100_OFFSET_RX_RUNT 0xc0
- #define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4
- #define FTGMAC100_OFFSET_RX_COL_LOST 0xc8
-+/* reserved 0xcc - 0x174 */
-+#define FTGMAC100_OFFSET_TXR_BADDR_LOW 0x178 /* ast2700 */
-+#define FTGMAC100_OFFSET_TXR_BADDR_HIGH 0x17c /* ast2700 */
-+#define FTGMAC100_OFFSET_HPTXR_BADDR_LOW 0x180 /* ast2700 */
-+#define FTGMAC100_OFFSET_HPTXR_BADDR_HIGH 0x184 /* ast2700 */
-+#define FTGMAC100_OFFSET_RXR_BADDR_LOW 0x188 /* ast2700 */
-+#define FTGMAC100_OFFSET_RXR_BADDR_HIGH 0x18C /* ast2700 */
-
- /*
- * Interrupt status register & interrupt enable register
-@@ -166,6 +173,7 @@
- #define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16)
- #define FTGMAC100_MACCR_RX_BROADPKT (1 << 17)
- #define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18)
-+#define FTGMAC100_MACCR_RMII_ENABLE BIT(20) /* defined in ast2700 */
- #define FTGMAC100_MACCR_FAST_MODE (1 << 19)
- #define FTGMAC100_MACCR_SW_RST (1 << 31)
-
-@@ -225,6 +233,7 @@ struct ftgmac100_txdes {
- #define FTGMAC100_TXDES1_TX2FIC (1 << 30)
- #define FTGMAC100_TXDES1_TXIC (1 << 31)
-
-+#define FTGMAC100_TXDES2_TXBUF_BADR_HI GENMASK(18, 16)
- /*
- * Receive descriptor, aligned to 16 bytes
- */
-@@ -271,4 +280,5 @@ struct ftgmac100_rxdes {
- #define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26)
- #define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27)
-
-+#define FTGMAC100_RXDES2_RXBUF_BADR_HI GENMASK(18, 16)
- #endif /* __FTGMAC100_H */
-diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
-index c727103c8..e8e18b9df 100644
---- a/drivers/net/mdio/mdio-aspeed.c
-+++ b/drivers/net/mdio/mdio-aspeed.c
-@@ -62,6 +62,8 @@ static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
- | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data);
-
- iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
-+ /* Add dummy read to ensure triggering mdio controller */
-+ (void)ioread32(ctx->base + ASPEED_MDIO_CTRL);
-
- return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
- !(ctrl & ASPEED_MDIO_CTRL_FIRE),
-@@ -190,6 +192,7 @@ static int aspeed_mdio_remove(struct platform_device *pdev)
-
- static const struct of_device_id aspeed_mdio_of_match[] = {
- { .compatible = "aspeed,ast2600-mdio", },
-+ { .compatible = "aspeed,ast2700-mdio", },
- { },
- };
- MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0010-Enable-reset-controller-driver.patch b/recipes-kernel/linux/files/0012-Add-reset-controller-driver-for-ast2700.patch
similarity index 81%
rename from recipes-kernel/linux/files/0010-Enable-reset-controller-driver.patch
rename to recipes-kernel/linux/files/0012-Add-reset-controller-driver-for-ast2700.patch
index d5764ac..e1af47c 100644
--- a/recipes-kernel/linux/files/0010-Enable-reset-controller-driver.patch
+++ b/recipes-kernel/linux/files/0012-Add-reset-controller-driver-for-ast2700.patch
@@ -1,19 +1,22 @@
-From 5704a10df0d25ecc92203a0211b264aceae84d92 Mon Sep 17 00:00:00 2001
-From: Hsieh I-Ming <ethan.im.hsieh@fii-foxconn.com>
-Date: Mon, 9 Dec 2024 19:55:26 +0800
-Subject: [PATCH] Enable reset controller driver
+From a6b34d2f743c8892f149fa018065445e06ae74fc Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 13:39:37 +0800
+Subject: [PATCH] Add reset controller driver for ast2700
-Signed-off-by: Hsieh I-Ming <ethan.im.hsieh@fii-foxconn.com>
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
---
- drivers/reset/Kconfig | 6 +
+ drivers/reset/Kconfig | 7 +
drivers/reset/Makefile | 1 +
- drivers/reset/reset-aspeed.c | 276 +++++++++++++++++++++++++++++++++++
- drivers/reset/reset-simple.c | 1 +
- 4 files changed, 284 insertions(+)
+ drivers/reset/reset-aspeed.c | 327 +++++++++++++++++++++++++++++++++++
+ 3 files changed, 335 insertions(+)
create mode 100644 drivers/reset/reset-aspeed.c
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
-index 531c9c6a7b54..b7521b7a8d3f 100644
+index ccd59ddd7..b7521b7a8 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -22,6 +22,12 @@ config RESET_A10SR
@@ -29,8 +32,16 @@
config RESET_ATH79
bool "AR71xx Reset Driver" if COMPILE_TEST
default ATH79
+@@ -139,6 +145,7 @@ config RESET_MESON_AUDIO_ARB
+ config RESET_NPCM
+ bool "NPCM BMC Reset Driver" if COMPILE_TEST
+ default ARCH_NPCM
++ select AUXILIARY_BUS
+ help
+ This enables the reset controller driver for Nuvoton NPCM
+ BMC SoCs.
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
-index 8270da8a4baa..010ee1eadaaa 100644
+index 8270da8a4..010ee1ead 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,6 +5,7 @@ obj-y += starfive/
@@ -43,16 +54,17 @@
obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
diff --git a/drivers/reset/reset-aspeed.c b/drivers/reset/reset-aspeed.c
new file mode 100644
-index 000000000000..27d0efab3748
+index 000000000..965984a58
--- /dev/null
+++ b/drivers/reset/reset-aspeed.c
-@@ -0,0 +1,276 @@
+@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2024 ASPEED Technology Inc.
+ */
+
+#include <linux/auxiliary_bus.h>
++#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
@@ -61,6 +73,7 @@
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/reset/aspeed,ast2700-scu.h>
++#include <soc/aspeed/reset-aspeed.h>
+
+#define SCU0_RESET_CTRL1 0x200
+#define SCU0_RESET_CTRL2 0x220
@@ -72,7 +85,8 @@
+
+struct ast2700_reset_signal {
+ bool dedicated_clr; /* dedicated reset clr offset */
-+ u32 offset, bit;
++ u32 offset;
++ u32 bit;
+};
+
+struct aspeed_reset_info {
@@ -203,20 +217,21 @@
+ [SCU1_RESET_PCIE2RST] = { false, SCU1_PCIE3_CTRL, BIT(0) },
+};
+
-+#define to_aspeed_reset(p) container_of(p, struct aspeed_reset, rcdev)
++static inline struct aspeed_reset *to_aspeed_reset(struct reset_controller_dev *rcdev)
++{
++ return container_of(rcdev, struct aspeed_reset, rcdev);
++}
+
+static int aspeed_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
-+ unsigned long flags;
+
+ if (rc->info->signal[id].dedicated_clr) {
+ writel(rc->info->signal[id].bit, reg_offset);
+ } else {
-+ spin_lock_irqsave(&rc->lock, flags);
++ guard(spinlock_irqsave)(&rc->lock);
+ writel(readl(reg_offset) & ~rc->info->signal[id].bit, reg_offset);
-+ spin_unlock_irqrestore(&rc->lock, flags);
+ }
+
+ return 0;
@@ -226,14 +241,12 @@
+{
+ struct aspeed_reset *rc = to_aspeed_reset(rcdev);
+ void __iomem *reg_offset = rc->base + rc->info->signal[id].offset;
-+ unsigned long flags;
+
+ if (rc->info->signal[id].dedicated_clr) {
+ writel(rc->info->signal[id].bit, reg_offset + 0x04);
+ } else {
-+ spin_lock_irqsave(&rc->lock, flags);
++ guard(spinlock_irqsave)(&rc->lock);
+ writel(readl(reg_offset) | rc->info->signal[id].bit, reg_offset);
-+ spin_unlock_irqrestore(&rc->lock, flags);
+ }
+
+ return 0;
@@ -276,7 +289,7 @@
+
+ spin_lock_init(&reset->lock);
+
-+ reset->info = (struct aspeed_reset_info *)(id->driver_data);
++ reset->info = (struct aspeed_reset_info *)id->driver_data;
+ reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.nr_resets = reset->info->nr_resets;
+ reset->rcdev.ops = &aspeed_reset_ops;
@@ -285,9 +298,6 @@
+ reset->rcdev.of_reset_n_cells = 1;
+ reset->base = (void __iomem *)adev->dev.platform_data;
+
-+ if (!reset->base)
-+ return -ENOMEM;
-+
+ dev_set_drvdata(dev, reset);
+ if (reset->info->reset_init)
+ reset->info->reset_init(reset);
@@ -295,6 +305,54 @@
+ return devm_reset_controller_register(dev, &reset->rcdev);
+}
+
++static void aspeed_reset_unregister_adev(void *_adev)
++{
++ struct auxiliary_device *adev = _adev;
++
++ auxiliary_device_delete(adev);
++ auxiliary_device_uninit(adev);
++}
++
++static void aspeed_reset_adev_release(struct device *dev)
++{
++ struct auxiliary_device *adev = to_auxiliary_dev(dev);
++
++ kfree(adev);
++}
++
++int aspeed_reset_controller_register(struct device *clk_dev, void __iomem *base,
++ const char *adev_name)
++{
++ struct auxiliary_device *adev;
++ int ret;
++
++ adev = kzalloc(sizeof(*adev), GFP_KERNEL);
++ if (!adev)
++ return -ENOMEM;
++
++ adev->name = adev_name;
++ adev->dev.parent = clk_dev;
++ adev->dev.release = aspeed_reset_adev_release;
++ adev->id = 666u;
++
++ ret = auxiliary_device_init(adev);
++ if (ret) {
++ kfree(adev);
++ return ret;
++ }
++
++ ret = auxiliary_device_add(adev);
++ if (ret) {
++ auxiliary_device_uninit(adev);
++ return ret;
++ }
++
++ adev->dev.platform_data = (__force void *)base;
++
++ return devm_add_action_or_reset(clk_dev, aspeed_reset_unregister_adev, adev);
++}
++EXPORT_SYMBOL_GPL(aspeed_reset_controller_register);
++
+static const struct aspeed_reset_info ast2700_reset0_info = {
+ .nr_resets = ARRAY_SIZE(ast2700_reset0_signals),
+ .signal = ast2700_reset0_signals,
@@ -307,8 +365,8 @@
+};
+
+static const struct auxiliary_device_id aspeed_reset_ids[] = {
-+ { .name = "clk_ast2700.reset0", .driver_data = (kernel_ulong_t)&ast2700_reset0_info },
-+ { .name = "clk_ast2700.reset1", .driver_data = (kernel_ulong_t)&ast2700_reset1_info },
++ { .name = "reset_aspeed.reset0", .driver_data = (kernel_ulong_t)&ast2700_reset0_info },
++ { .name = "reset_aspeed.reset1", .driver_data = (kernel_ulong_t)&ast2700_reset1_info },
+ { }
+};
+MODULE_DEVICE_TABLE(auxiliary, aspeed_reset_ids);
@@ -318,23 +376,15 @@
+ .id_table = aspeed_reset_ids,
+};
+
-+module_auxiliary_driver(aspeed_reset_driver);
++static int __init rest_aspeed_init(void)
++{
++ return auxiliary_driver_register(&aspeed_reset_driver);
++}
++subsys_initcall(rest_aspeed_init);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
+MODULE_DESCRIPTION("ASPEED SoC Reset Controller Driver");
+MODULE_LICENSE("GPL");
-diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
-index 7ea5adbf2097..ab7daa7469f1 100644
---- a/drivers/reset/reset-simple.c
-+++ b/drivers/reset/reset-simple.c
-@@ -144,6 +144,7 @@ static const struct of_device_id reset_simple_dt_ids[] = {
- { .compatible = "aspeed,ast2400-lpc-reset" },
- { .compatible = "aspeed,ast2500-lpc-reset" },
- { .compatible = "aspeed,ast2600-lpc-reset" },
-+ { .compatible = "aspeed,ast2700-lpc-reset" },
- { .compatible = "bitmain,bm1880-reset",
- .data = &reset_simple_active_low },
- { .compatible = "brcm,bcm4908-misc-pcie-reset",
--
2.34.1
diff --git a/recipes-kernel/linux/files/0013-Add-net-ftgmac-driver-for-ast2700.patch b/recipes-kernel/linux/files/0013-Add-net-ftgmac-driver-for-ast2700.patch
new file mode 100644
index 0000000..4846c4d
--- /dev/null
+++ b/recipes-kernel/linux/files/0013-Add-net-ftgmac-driver-for-ast2700.patch
@@ -0,0 +1,645 @@
+From 53afa8bcc137d8ec2a8a0c7aa5cb66c160a0b17a Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 13:48:51 +0800
+Subject: [PATCH] Add net ftgmac driver for ast2700
+
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/net/ethernet/faraday/Kconfig | 7 +-
+ drivers/net/ethernet/faraday/ftgmac100.c | 197 +++++++++++++++++------
+ drivers/net/ethernet/faraday/ftgmac100.h | 10 ++
+ drivers/net/mdio/mdio-aspeed.c | 2 +
+ drivers/net/phy/broadcom.c | 45 ++++++
+ 5 files changed, 208 insertions(+), 53 deletions(-)
+
+diff --git a/drivers/net/ethernet/faraday/Kconfig b/drivers/net/ethernet/faraday/Kconfig
+index c699bd6bc..c765663c0 100644
+--- a/drivers/net/ethernet/faraday/Kconfig
++++ b/drivers/net/ethernet/faraday/Kconfig
+@@ -6,7 +6,7 @@
+ config NET_VENDOR_FARADAY
+ bool "Faraday devices"
+ default y
+- depends on ARM || COMPILE_TEST
++ depends on ARM || ARM64 || COMPILE_TEST
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+@@ -28,10 +28,9 @@ config FTMAC100
+
+ config FTGMAC100
+ tristate "Faraday FTGMAC100 Gigabit Ethernet support"
+- depends on ARM || COMPILE_TEST
+- depends on !64BIT || BROKEN
++ depends on ARM || ARM64 || COMPILE_TEST
+ select PHYLIB
+- select MDIO_ASPEED if MACH_ASPEED_G6
++ select MDIO_ASPEED if ARCH_ASPEED
+ select CRC32
+ help
+ This driver supports the FTGMAC100 Gigabit Ethernet controller
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
+index 9135b918d..75658dffa 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.c
++++ b/drivers/net/ethernet/faraday/ftgmac100.c
+@@ -9,6 +9,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/clk.h>
++#include <linux/reset.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/etherdevice.h>
+ #include <linux/ethtool.h>
+@@ -19,11 +20,13 @@
+ #include <linux/of.h>
+ #include <linux/of_mdio.h>
+ #include <linux/phy.h>
++#include <linux/phy/phy.h>
+ #include <linux/platform_device.h>
+ #include <linux/property.h>
+ #include <linux/crc32.h>
+ #include <linux/if_vlan.h>
+ #include <linux/of_net.h>
++#include <linux/phy_fixed.h>
+ #include <net/ip.h>
+ #include <net/ncsi.h>
+
+@@ -50,6 +53,15 @@
+ #define FTGMAC_100MHZ 100000000
+ #define FTGMAC_25MHZ 25000000
+
++/* For NC-SI to register a fixed-link phy device */
++static struct fixed_phy_status ncsi_phy_status = {
++ .link = 1,
++ .speed = SPEED_100,
++ .duplex = DUPLEX_FULL,
++ .pause = 0,
++ .asym_pause = 0
++};
++
+ struct ftgmac100 {
+ /* Registers */
+ struct resource *res;
+@@ -88,6 +100,7 @@ struct ftgmac100 {
+ struct work_struct reset_task;
+ struct mii_bus *mii_bus;
+ struct clk *clk;
++ struct reset_control *rst;
+
+ /* AST2500/AST2600 RMII ref clock gate */
+ struct clk *rclk;
+@@ -109,6 +122,9 @@ struct ftgmac100 {
+ /* Misc */
+ bool need_mac_restart;
+ bool is_aspeed;
++
++ /* AST2700 SGMII */
++ struct phy *sgmii;
+ };
+
+ static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
+@@ -255,10 +271,12 @@ static void ftgmac100_init_hw(struct ftgmac100 *priv)
+ iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
+
+ /* Setup RX ring buffer base */
+- iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
++ iowrite32(lower_32_bits(priv->rxdes_dma), priv->base + FTGMAC100_OFFSET_RXR_BADR);
++ iowrite32(upper_32_bits(priv->rxdes_dma), priv->base + FTGMAC100_OFFSET_RXR_BADDR_HIGH);
+
+ /* Setup TX ring buffer base */
+- iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
++ iowrite32(lower_32_bits(priv->txdes_dma), priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
++ iowrite32(upper_32_bits(priv->txdes_dma), priv->base + FTGMAC100_OFFSET_TXR_BADDR_HIGH);
+
+ /* Configure RX buffer size */
+ iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
+@@ -311,6 +329,7 @@ static void ftgmac100_init_hw(struct ftgmac100 *priv)
+ static void ftgmac100_start_hw(struct ftgmac100 *priv)
+ {
+ u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
++ struct phy_device *phydev = priv->netdev->phydev;
+
+ /* Keep the original GMAC and FAST bits */
+ maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
+@@ -339,6 +358,10 @@ static void ftgmac100_start_hw(struct ftgmac100 *priv)
+ if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ maccr |= FTGMAC100_MACCR_RM_VLAN;
+
++ if (of_device_is_compatible(priv->dev->of_node, "aspeed,ast2700-mac") &&
++ phydev && phydev->interface == PHY_INTERFACE_MODE_RMII)
++ maccr |= FTGMAC100_MACCR_RMII_ENABLE;
++
+ /* Hit the HW */
+ iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
+ }
+@@ -415,7 +438,9 @@ static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
+ priv->rx_skbs[entry] = skb;
+
+ /* Store DMA address into RX desc */
+- rxdes->rxdes3 = cpu_to_le32(map);
++ rxdes->rxdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI,
++ upper_32_bits(map)));
++ rxdes->rxdes3 = cpu_to_le32(lower_32_bits(map));
+
+ /* Ensure the above is ordered vs clearing the OWN bit */
+ dma_wmb();
+@@ -541,7 +566,8 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+ csum_vlan & 0xffff);
+
+ /* Tear down DMA mapping, do necessary cache management */
+- map = le32_to_cpu(rxdes->rxdes3);
++ map = le32_to_cpu(rxdes->rxdes3) |
++ ((le32_to_cpu(rxdes->rxdes2) & FTGMAC100_RXDES2_RXBUF_BADR_HI) << 16);
+
+ #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
+ /* When we don't have an iommu, we can save cycles by not
+@@ -553,7 +579,6 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+ dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
+ #endif
+
+-
+ /* Resplenish rx ring */
+ ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+@@ -572,7 +597,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
+ (*processed)++;
+ return true;
+
+- drop:
++drop:
+ /* Clean rxdes0 (which resets own bit) */
+ rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
+ priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
+@@ -618,9 +643,12 @@ static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
+ struct ftgmac100_txdes *txdes,
+ u32 ctl_stat)
+ {
+- dma_addr_t map = le32_to_cpu(txdes->txdes3);
++ dma_addr_t map;
+ size_t len;
+
++ map = le32_to_cpu(txdes->txdes3) |
++ ((le32_to_cpu(txdes->txdes2) & FTGMAC100_TXDES2_TXBUF_BADR_HI) << 16);
++
+ if (ctl_stat & FTGMAC100_TXDES0_FTS) {
+ len = skb_headlen(skb);
+ dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
+@@ -656,6 +684,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
+ ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
+ txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
+
++ /* Ensure the descriptor config is visible before setting the tx
++ * pointer.
++ */
++ smp_wmb();
++
+ priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
+
+ return true;
+@@ -769,7 +802,9 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ f_ctl_stat |= FTGMAC100_TXDES0_FTS;
+ if (nfrags == 0)
+ f_ctl_stat |= FTGMAC100_TXDES0_LTS;
+- txdes->txdes3 = cpu_to_le32(map);
++ txdes->txdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI,
++ upper_32_bits((ulong)map)));
++ txdes->txdes3 = cpu_to_le32(lower_32_bits(map));
+ txdes->txdes1 = cpu_to_le32(csum_vlan);
+
+ /* Next descriptor */
+@@ -797,7 +832,9 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ ctl_stat |= FTGMAC100_TXDES0_LTS;
+ txdes->txdes0 = cpu_to_le32(ctl_stat);
+ txdes->txdes1 = 0;
+- txdes->txdes3 = cpu_to_le32(map);
++ txdes->txdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_TXDES2_TXBUF_BADR_HI,
++ upper_32_bits((ulong)map)));
++ txdes->txdes3 = cpu_to_le32(lower_32_bits(map));
+
+ /* Next one */
+ pointer = ftgmac100_next_tx_pointer(priv, pointer);
+@@ -809,6 +846,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ dma_wmb();
+ first->txdes0 = cpu_to_le32(f_ctl_stat);
+
++ /* Ensure the descriptor config is visible before setting the tx
++ * pointer.
++ */
++ smp_wmb();
++
+ /* Update next TX pointer */
+ priv->tx_pointer = pointer;
+
+@@ -829,7 +871,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+
+ return NETDEV_TX_OK;
+
+- dma_err:
++dma_err:
+ if (net_ratelimit())
+ netdev_err(netdev, "map tx fragment failed\n");
+
+@@ -851,7 +893,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
+ * last fragment, so we know ftgmac100_free_tx_packet()
+ * hasn't freed the skb yet.
+ */
+- drop:
++drop:
+ /* Drop the packet */
+ dev_kfree_skb_any(skb);
+ netdev->stats.tx_dropped++;
+@@ -867,7 +909,10 @@ static void ftgmac100_free_buffers(struct ftgmac100 *priv)
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
+ struct sk_buff *skb = priv->rx_skbs[i];
+- dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
++ dma_addr_t map;
++
++ map = le32_to_cpu(rxdes->rxdes3) |
++ ((le32_to_cpu(rxdes->rxdes2) & FTGMAC100_RXDES2_RXBUF_BADR_HI) << 16);
+
+ if (!skb)
+ continue;
+@@ -966,7 +1011,9 @@ static void ftgmac100_init_rings(struct ftgmac100 *priv)
+ for (i = 0; i < priv->rx_q_entries; i++) {
+ rxdes = &priv->rxdes[i];
+ rxdes->rxdes0 = 0;
+- rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
++ rxdes->rxdes2 = cpu_to_le32(FIELD_PREP(FTGMAC100_RXDES2_RXBUF_BADR_HI,
++ upper_32_bits(priv->rx_scratch_dma)));
++ rxdes->rxdes3 = cpu_to_le32(lower_32_bits(priv->rx_scratch_dma));
+ }
+ /* Mark the end of the ring */
+ rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
+@@ -1229,7 +1276,6 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
+ more = ftgmac100_rx_packet(priv, &work_done);
+ } while (more && work_done < budget);
+
+-
+ /* The interrupt is telling us to kick the MAC back to life
+ * after an RX overflow
+ */
+@@ -1319,7 +1365,6 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
+ if (priv->mii_bus)
+ mutex_lock(&priv->mii_bus->mdio_lock);
+
+-
+ /* Check if the interface is still up */
+ if (!netif_running(netdev))
+ goto bail;
+@@ -1344,7 +1389,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
+ ftgmac100_init_all(priv, true);
+
+ netdev_dbg(netdev, "Reset done !\n");
+- bail:
++bail:
+ if (priv->mii_bus)
+ mutex_unlock(&priv->mii_bus->mdio_lock);
+ if (netdev->phydev)
+@@ -1418,7 +1463,6 @@ static void ftgmac100_adjust_link(struct net_device *netdev)
+
+ if (netdev->phydev)
+ mutex_lock(&netdev->phydev->lock);
+-
+ }
+
+ static int ftgmac100_mii_probe(struct net_device *netdev)
+@@ -1531,7 +1575,8 @@ static int ftgmac100_open(struct net_device *netdev)
+ if (netdev->phydev) {
+ /* If we have a PHY, start polling */
+ phy_start(netdev->phydev);
+- } else if (priv->use_ncsi) {
++ }
++ if (priv->use_ncsi) {
+ /* If using NC-SI, set our carrier on and start the stack */
+ netif_carrier_on(netdev);
+
+@@ -1543,15 +1588,16 @@ static int ftgmac100_open(struct net_device *netdev)
+
+ return 0;
+
+- err_ncsi:
++err_ncsi:
++ phy_stop(netdev->phydev);
+ napi_disable(&priv->napi);
+ netif_stop_queue(netdev);
+- err_alloc:
++err_alloc:
+ ftgmac100_free_buffers(priv);
+ free_irq(netdev->irq, netdev);
+- err_irq:
++err_irq:
+ netif_napi_del(&priv->napi);
+- err_hw:
++err_hw:
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
+ ftgmac100_free_rings(priv);
+ return err;
+@@ -1577,7 +1623,7 @@ static int ftgmac100_stop(struct net_device *netdev)
+ netif_napi_del(&priv->napi);
+ if (netdev->phydev)
+ phy_stop(netdev->phydev);
+- else if (priv->use_ncsi)
++ if (priv->use_ncsi)
+ ncsi_stop_dev(priv->ndev);
+
+ ftgmac100_stop_hw(priv);
+@@ -1708,13 +1754,21 @@ static int ftgmac100_setup_mdio(struct net_device *netdev)
+ static void ftgmac100_phy_disconnect(struct net_device *netdev)
+ {
+ struct ftgmac100 *priv = netdev_priv(netdev);
++ struct phy_device *phydev = netdev->phydev;
+
+- if (!netdev->phydev)
+- return;
++ if (priv->sgmii) {
++ phy_exit(priv->sgmii);
++ devm_phy_put(priv->dev, priv->sgmii);
++ }
+
+- phy_disconnect(netdev->phydev);
+- if (of_phy_is_fixed_link(priv->dev->of_node))
+- of_phy_deregister_fixed_link(priv->dev->of_node);
++ if (phydev) {
++ phy_disconnect(phydev);
++ if (of_phy_is_fixed_link(priv->dev->of_node))
++ of_phy_deregister_fixed_link(priv->dev->of_node);
++
++ if (priv->use_ncsi)
++ fixed_phy_unregister(phydev);
++ }
+ }
+
+ static void ftgmac100_destroy_mdio(struct net_device *netdev)
+@@ -1792,6 +1846,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ struct resource *res;
+ int irq;
+ struct net_device *netdev;
++ struct phy_device *phydev;
+ struct ftgmac100 *priv;
+ struct device_node *np;
+ int err = 0;
+@@ -1856,7 +1911,8 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ np = pdev->dev.of_node;
+ if (np && (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+ of_device_is_compatible(np, "aspeed,ast2500-mac") ||
+- of_device_is_compatible(np, "aspeed,ast2600-mac"))) {
++ of_device_is_compatible(np, "aspeed,ast2600-mac") ||
++ of_device_is_compatible(np, "aspeed,ast2700-mac"))) {
+ priv->rxdes0_edorr_mask = BIT(30);
+ priv->txdes0_edotr_mask = BIT(30);
+ priv->is_aspeed = true;
+@@ -1879,35 +1935,30 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ err = -EINVAL;
+ goto err_phy_connect;
+ }
+- } else if (np && of_phy_is_fixed_link(np)) {
+- struct phy_device *phy;
+
+- err = of_phy_register_fixed_link(np);
+- if (err) {
+- dev_err(&pdev->dev, "Failed to register fixed PHY\n");
++ phydev = fixed_phy_register(PHY_POLL, &ncsi_phy_status, np);
++ if (IS_ERR(phydev)) {
++ dev_err(&pdev->dev, "failed to register fixed PHY device\n");
++ err = PTR_ERR(phydev);
+ goto err_phy_connect;
+ }
+-
+- phy = of_phy_get_and_connect(priv->netdev, np,
+- &ftgmac100_adjust_link);
+- if (!phy) {
+- dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
+- of_phy_deregister_fixed_link(np);
+- err = -EINVAL;
++ err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
++ PHY_INTERFACE_MODE_RMII);
++ if (err) {
++ dev_err(&pdev->dev, "Connecting PHY failed\n");
+ goto err_phy_connect;
+ }
+-
+- /* Display what we found */
+- phy_attached_info(phy);
+- } else if (np && of_get_property(np, "phy-handle", NULL)) {
++ } else if (np && (of_phy_is_fixed_link(np) ||
++ of_get_property(np, "phy-handle", NULL))) {
+ struct phy_device *phy;
+
+ /* Support "mdio"/"phy" child nodes for ast2400/2500 with
+ * an embedded MDIO controller. Automatically scan the DTS for
+ * available PHYs and register them.
+ */
+- if (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
+- of_device_is_compatible(np, "aspeed,ast2500-mac")) {
++ if (of_get_property(np, "phy-handle", NULL) &&
++ (of_device_is_compatible(np, "aspeed,ast2400-mac") ||
++ of_device_is_compatible(np, "aspeed,ast2500-mac"))) {
+ err = ftgmac100_setup_mdio(netdev);
+ if (err)
+ goto err_setup_mdio;
+@@ -1944,7 +1995,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ dev_err(priv->dev, "MII probe failed!\n");
+ goto err_ncsi_dev;
+ }
+-
+ }
+
+ if (priv->is_aspeed) {
+@@ -1953,9 +2003,52 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ goto err_phy_connect;
+
+ /* Disable ast2600 problematic HW arbitration */
+- if (of_device_is_compatible(np, "aspeed,ast2600-mac"))
++ if (of_device_is_compatible(np, "aspeed,ast2600-mac") ||
++ of_device_is_compatible(np, "aspeed,ast2700-mac"))
+ iowrite32(FTGMAC100_TM_DEFAULT,
+ priv->base + FTGMAC100_OFFSET_TM);
++
++ if (of_device_is_compatible(np, "aspeed,ast2700-mac")) {
++ if (netdev->phydev->interface == PHY_INTERFACE_MODE_SGMII) {
++ priv->sgmii = devm_phy_optional_get(&pdev->dev, "sgmii");
++ if (IS_ERR(priv->sgmii)) {
++ dev_err(priv->dev, "Failed to get sgmii phy (%ld)\n",
++ PTR_ERR(priv->sgmii));
++ err = PTR_ERR(priv->sgmii);
++ goto err_register_netdev;
++ }
++ }
++ }
++ }
++
++ priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
++ if (IS_ERR(priv->rst)) {
++ err = PTR_ERR(priv->rst);
++ goto err_register_netdev;
++ }
++
++ err = reset_control_assert(priv->rst);
++ if (err) {
++ dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
++ goto err_register_netdev;
++ }
++ usleep_range(10000, 20000);
++ err = reset_control_deassert(priv->rst);
++ if (err) {
++ dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
++ goto err_register_netdev;
++ }
++
++ if (priv->sgmii) {
++ /* The default is Nway on SGMII. */
++ err = phy_init(priv->sgmii);
++ if (err) {
++ dev_err(priv->dev, "Failed to init sgmii phy\n");
++ goto err_register_netdev;
++ }
++ /* If using fixed link in dts, sgmii need to be forced */
++ if (of_phy_is_fixed_link(np))
++ phy_set_speed(priv->sgmii, netdev->phydev->speed);
+ }
+
+ /* Default ring sizes */
+@@ -1982,6 +2075,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
+ netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
+ netdev->features |= netdev->hw_features;
+
++ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++ if (err) {
++ dev_err(&pdev->dev, "64-bit DMA enable failed\n");
++ goto err_register_netdev;
++ }
++
+ /* register network device */
+ err = register_netdev(netdev);
+ if (err) {
+diff --git a/drivers/net/ethernet/faraday/ftgmac100.h b/drivers/net/ethernet/faraday/ftgmac100.h
+index 4968f6f0b..f8c30a09d 100644
+--- a/drivers/net/ethernet/faraday/ftgmac100.h
++++ b/drivers/net/ethernet/faraday/ftgmac100.h
+@@ -57,6 +57,13 @@
+ #define FTGMAC100_OFFSET_RX_RUNT 0xc0
+ #define FTGMAC100_OFFSET_RX_CRCER_FTL 0xc4
+ #define FTGMAC100_OFFSET_RX_COL_LOST 0xc8
++/* reserved 0xcc - 0x174 */
++#define FTGMAC100_OFFSET_TXR_BADDR_LOW 0x178 /* ast2700 */
++#define FTGMAC100_OFFSET_TXR_BADDR_HIGH 0x17c /* ast2700 */
++#define FTGMAC100_OFFSET_HPTXR_BADDR_LOW 0x180 /* ast2700 */
++#define FTGMAC100_OFFSET_HPTXR_BADDR_HIGH 0x184 /* ast2700 */
++#define FTGMAC100_OFFSET_RXR_BADDR_LOW 0x188 /* ast2700 */
++#define FTGMAC100_OFFSET_RXR_BADDR_HIGH 0x18C /* ast2700 */
+
+ /*
+ * Interrupt status register & interrupt enable register
+@@ -166,6 +173,7 @@
+ #define FTGMAC100_MACCR_RX_MULTIPKT (1 << 16)
+ #define FTGMAC100_MACCR_RX_BROADPKT (1 << 17)
+ #define FTGMAC100_MACCR_DISCARD_CRCERR (1 << 18)
++#define FTGMAC100_MACCR_RMII_ENABLE BIT(20) /* defined in ast2700 */
+ #define FTGMAC100_MACCR_FAST_MODE (1 << 19)
+ #define FTGMAC100_MACCR_SW_RST (1 << 31)
+
+@@ -225,6 +233,7 @@ struct ftgmac100_txdes {
+ #define FTGMAC100_TXDES1_TX2FIC (1 << 30)
+ #define FTGMAC100_TXDES1_TXIC (1 << 31)
+
++#define FTGMAC100_TXDES2_TXBUF_BADR_HI GENMASK(18, 16)
+ /*
+ * Receive descriptor, aligned to 16 bytes
+ */
+@@ -271,4 +280,5 @@ struct ftgmac100_rxdes {
+ #define FTGMAC100_RXDES1_UDP_CHKSUM_ERR (1 << 26)
+ #define FTGMAC100_RXDES1_IP_CHKSUM_ERR (1 << 27)
+
++#define FTGMAC100_RXDES2_RXBUF_BADR_HI GENMASK(18, 16)
+ #endif /* __FTGMAC100_H */
+diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
+index c727103c8..836571ae7 100644
+--- a/drivers/net/mdio/mdio-aspeed.c
++++ b/drivers/net/mdio/mdio-aspeed.c
+@@ -62,6 +62,8 @@ static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad,
+ | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data);
+
+ iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL);
++ /* Add dummy read to ensure triggering mdio controller */
++ (void)ioread32(ctx->base + ASPEED_MDIO_CTRL);
+
+ return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl,
+ !(ctrl & ASPEED_MDIO_CTRL_FIRE),
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index 04b2e6eeb..81edddc3b 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -439,6 +439,37 @@ static int bcm54xx_config_init(struct phy_device *phydev)
+ return 0;
+ }
+
++static int bcm5221_config_init(struct phy_device *phydev)
++{
++ int reg, err;
++
++ reg = phy_read(phydev, MII_BCM5221_TEST);
++ reg |= MII_BCM5221_TEST_ENABLE_SHADOWS;
++ err = phy_write(phydev, MII_BCM5221_TEST, reg);
++ if (err < 0)
++ return err;
++
++ reg = phy_read(phydev, MII_BCM5221_SHDOW_AUX_STAT2);
++ reg |= MII_BCM5221_SHDOW_AUX_STAT2_APD;
++ err = phy_write(phydev, MII_BCM5221_SHDOW_AUX_STAT2, reg);
++ if (err < 0)
++ return err;
++
++ reg = phy_read(phydev, MII_BCM5221_SHDOW_AUX_MODE4);
++ reg |= MII_BCM5221_SHDOW_AUX_MODE4_CLKLOPWR;
++ err = phy_write(phydev, MII_BCM5221_SHDOW_AUX_MODE4, reg);
++ if (err < 0)
++ return err;
++
++ reg = phy_read(phydev, MII_BCM5221_TEST);
++ reg &= ~MII_BCM5221_TEST_ENABLE_SHADOWS;
++ err = phy_write(phydev, MII_BCM5221_TEST, reg);
++ if (err < 0)
++ return err;
++
++ return 0;
++}
++
+ static int bcm54xx_iddq_set(struct phy_device *phydev, bool enable)
+ {
+ int ret = 0;
+@@ -1276,6 +1307,19 @@ static struct phy_driver broadcom_drivers[] = {
+ .config_intr = bcm_phy_config_intr,
+ .handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
++}, {
++ .phy_id = PHY_ID_BCM5221,
++ .phy_id_mask = 0xfffffff0,
++ .name = "Broadcom BCM5221",
++ /* PHY_GBIT_FEATURES */
++ .get_sset_count = bcm_phy_get_sset_count,
++ .get_strings = bcm_phy_get_strings,
++ .get_stats = bcm54xx_get_stats,
++ .probe = bcm54xx_phy_probe,
++ .config_init = bcm5221_config_init,
++ .config_intr = bcm_phy_config_intr,
++ .handle_interrupt = bcm_phy_handle_interrupt,
++ .link_change_notify = bcm54xx_link_change_notify,
+ } };
+
+ module_phy_driver(broadcom_drivers);
+@@ -1296,6 +1340,7 @@ static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
+ { PHY_ID_BCM50610M, 0xfffffff0 },
+ { PHY_ID_BCM57780, 0xfffffff0 },
+ { PHY_ID_BCMAC131, 0xfffffff0 },
++ { PHY_ID_BCM5221, 0xfffffff0},
+ { PHY_ID_BCM5241, 0xfffffff0 },
+ { PHY_ID_BCM5395, 0xfffffff0 },
+ { PHY_ID_BCM53125, 0xfffffff0 },
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0013-Enable-sgpio-driver.patch b/recipes-kernel/linux/files/0013-Enable-sgpio-driver.patch
deleted file mode 100644
index e01455d..0000000
--- a/recipes-kernel/linux/files/0013-Enable-sgpio-driver.patch
+++ /dev/null
@@ -1,639 +0,0 @@
-From 7a5cf97e1bad7022e5f42c083d28b0dced29bd02 Mon Sep 17 00:00:00 2001
-From: Shao-Chieh Chao <jieh.sc.chao@mail.foxconn.com>
-Date: Fri, 20 Dec 2024 09:15:23 +0800
-Subject: [PATCH] enable sgpio driver
-
----
- drivers/gpio/gpio-aspeed-sgpio.c | 428 ++++++++++++++++++++++++++-----
- 1 file changed, 363 insertions(+), 65 deletions(-)
-
-diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
-index 72755fee6..bf47a2489 100644
---- a/drivers/gpio/gpio-aspeed-sgpio.c
-+++ b/drivers/gpio/gpio-aspeed-sgpio.c
-@@ -18,7 +18,42 @@
- #include <linux/spinlock.h>
- #include <linux/string.h>
-
--#define ASPEED_SGPIO_CTRL 0x54
-+#define SGPIO_G7_IRQ_STS_BASE 0x40
-+#define SGPIO_G7_IRQ_STS_OFFSET(x) (SGPIO_G7_IRQ_STS_BASE + (x) * 0x4)
-+#define SGPIO_G7_CTRL_REG_BASE 0x80
-+#define SGPIO_G7_CTRL_REG_OFFSET(x) (SGPIO_G7_CTRL_REG_BASE + (x) * 0x4)
-+#define SGPIO_G7_OUT_DATA BIT(0)
-+#define SGPIO_G7_IRQ_EN BIT(2)
-+#define SGPIO_G7_IRQ_TYPE0 BIT(3)
-+#define SGPIO_G7_IRQ_TYPE1 BIT(4)
-+#define SGPIO_G7_IRQ_TYPE2 BIT(5)
-+#define SGPIO_G7_RST_TOLERANCE BIT(6)
-+#define SGPIO_G7_INPUT_MASK BIT(9)
-+#define SGPIO_G7_HW_BYPASS_EN BIT(10)
-+#define SGPIO_G7_HW_IN_SEL BIT(11)
-+#define SGPIO_G7_IRQ_STS BIT(12)
-+#define SGPIO_G7_IN_DATA BIT(13)
-+#define SGPIO_G7_PARALLEL_IN_DATA BIT(14)
-+
-+static inline u32 field_get(u32 _mask, u32 _val)
-+{
-+ return (((_val) & (_mask)) >> (ffs(_mask) - 1));
-+}
-+
-+static inline u32 field_prep(u32 _mask, u32 _val)
-+{
-+ return (((_val) << (ffs(_mask) - 1)) & (_mask));
-+}
-+
-+static inline void ast_write_bits(void __iomem *addr, u32 mask, u32 val)
-+{
-+ iowrite32((ioread32(addr) & ~(mask)) | field_prep(mask, val), addr);
-+}
-+
-+static inline void ast_clr_bits(void __iomem *addr, u32 mask)
-+{
-+ iowrite32((ioread32(addr) & ~(mask)), addr);
-+}
-
- #define ASPEED_SGPIO_CLK_DIV_MASK GENMASK(31, 16)
- #define ASPEED_SGPIO_ENABLE BIT(0)
-@@ -26,6 +61,9 @@
-
- struct aspeed_sgpio_pdata {
- const u32 pin_mask;
-+ const u16 ctrl_reg;
-+ const int version;
-+ const bool slave;
- };
-
- struct aspeed_sgpio {
-@@ -35,6 +73,7 @@ struct aspeed_sgpio {
- raw_spinlock_t lock;
- void __iomem *base;
- int irq;
-+ int version;
- };
-
- struct aspeed_sgpio_bank {
-@@ -169,16 +208,23 @@ static bool aspeed_sgpio_is_input(unsigned int offset)
- static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
- {
- struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
-- const struct aspeed_sgpio_bank *bank = to_bank(offset);
-+ const struct aspeed_sgpio_bank *bank;
-+ void __iomem *addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
- unsigned long flags;
- enum aspeed_sgpio_reg reg;
- int rc = 0;
-
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
-- reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
-- rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
--
-+ if (gpio->version == 7) {
-+ reg = aspeed_sgpio_is_input(offset) ? SGPIO_G7_IN_DATA :
-+ SGPIO_G7_OUT_DATA;
-+ rc = !!(field_get(reg, ioread32(addr)));
-+ } else {
-+ bank = to_bank(offset);
-+ reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
-+ rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
-+ }
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
- return rc;
-@@ -211,6 +257,30 @@ static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
- return 0;
- }
-
-+static int sgpio_g7_set_value(struct gpio_chip *gc, unsigned int offset,
-+ int val)
-+{
-+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
-+ void __iomem *addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
-+ u32 reg = 0;
-+
-+ if (aspeed_sgpio_is_input(offset))
-+ return -EINVAL;
-+
-+ // Ensure the serial out value control by the software.
-+ ast_clr_bits(addr, SGPIO_G7_HW_BYPASS_EN | SGPIO_G7_HW_IN_SEL);
-+ reg = ioread32(addr);
-+
-+ if (val)
-+ reg |= SGPIO_G7_OUT_DATA;
-+ else
-+ reg &= ~SGPIO_G7_OUT_DATA;
-+
-+ iowrite32(reg, addr);
-+
-+ return 0;
-+}
-+
- static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
- {
- struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
-@@ -218,7 +288,10 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
-
- raw_spin_lock_irqsave(&gpio->lock, flags);
-
-- sgpio_set_value(gc, offset, val);
-+ if (gpio->version == 7)
-+ sgpio_g7_set_value(gc, offset, val);
-+ else
-+ sgpio_set_value(gc, offset, val);
-
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
- }
-@@ -238,7 +311,10 @@ static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int v
- * error-out in sgpio_set_value if this isn't an output GPIO */
-
- raw_spin_lock_irqsave(&gpio->lock, flags);
-- rc = sgpio_set_value(gc, offset, val);
-+ if (gpio->version == 7)
-+ rc = sgpio_g7_set_value(gc, offset, val);
-+ else
-+ rc = sgpio_set_value(gc, offset, val);
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
-
- return rc;
-@@ -265,6 +341,19 @@ static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
- *bit = GPIO_BIT(*offset);
- }
-
-+static void irqd_to_aspeed_g7_sgpio_data(struct irq_data *d,
-+ struct aspeed_sgpio **gpio,
-+ int *offset)
-+{
-+ struct aspeed_sgpio *internal;
-+
-+ *offset = irqd_to_hwirq(d);
-+ internal = irq_data_get_irq_chip_data(d);
-+ WARN_ON(!internal);
-+
-+ *gpio = internal;
-+}
-+
- static void aspeed_sgpio_irq_ack(struct irq_data *d)
- {
- const struct aspeed_sgpio_bank *bank;
-@@ -285,6 +374,24 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
- raw_spin_unlock_irqrestore(&gpio->lock, flags);
- }
-
-+static void aspeed_g7_sgpio_irq_ack(struct irq_data *d)
-+{
-+ struct aspeed_sgpio *gpio;
-+ unsigned long flags;
-+ void __iomem *status_addr;
-+ int offset;
-+
-+ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
-+
-+ status_addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
-+
-+ raw_spin_lock_irqsave(&gpio->lock, flags);
-+
-+ ast_write_bits(status_addr, SGPIO_G7_IRQ_STS, 1);
-+
-+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
-+}
-+
- static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
- {
- const struct aspeed_sgpio_bank *bank;
-@@ -320,6 +427,32 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
-
- }
-
-+static void aspeed_g7_sgpio_irq_set_mask(struct irq_data *d, bool set)
-+{
-+ struct aspeed_sgpio *gpio;
-+ unsigned long flags;
-+ void __iomem *addr;
-+ int offset;
-+
-+ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
-+ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
-+
-+ /* Unmasking the IRQ */
-+ if (set)
-+ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
-+
-+ raw_spin_lock_irqsave(&gpio->lock, flags);
-+ if (set)
-+ ast_write_bits(addr, SGPIO_G7_IRQ_EN, 1);
-+ else
-+ ast_clr_bits(addr, SGPIO_G7_IRQ_EN);
-+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
-+
-+ /* Masking the IRQ */
-+ if (!set)
-+ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d));
-+}
-+
- static void aspeed_sgpio_irq_mask(struct irq_data *d)
- {
- aspeed_sgpio_irq_set_mask(d, false);
-@@ -330,6 +463,16 @@ static void aspeed_sgpio_irq_unmask(struct irq_data *d)
- aspeed_sgpio_irq_set_mask(d, true);
- }
-
-+static void aspeed_g7_sgpio_irq_mask(struct irq_data *d)
-+{
-+ aspeed_g7_sgpio_irq_set_mask(d, false);
-+}
-+
-+static void aspeed_g7_sgpio_irq_unmask(struct irq_data *d)
-+{
-+ aspeed_g7_sgpio_irq_set_mask(d, true);
-+}
-+
- static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
- {
- u32 type0 = 0;
-@@ -390,6 +533,53 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
- return 0;
- }
-
-+static int aspeed_g7_sgpio_set_type(struct irq_data *d, unsigned int type)
-+{
-+ u32 type0 = 0;
-+ u32 type1 = 0;
-+ u32 type2 = 0;
-+ irq_flow_handler_t handler;
-+ struct aspeed_sgpio *gpio;
-+ unsigned long flags;
-+ void __iomem *addr;
-+ int offset;
-+
-+ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
-+ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
-+
-+ switch (type & IRQ_TYPE_SENSE_MASK) {
-+ case IRQ_TYPE_EDGE_BOTH:
-+ type2 = 1;
-+ fallthrough;
-+ case IRQ_TYPE_EDGE_RISING:
-+ type0 = 1;
-+ fallthrough;
-+ case IRQ_TYPE_EDGE_FALLING:
-+ handler = handle_edge_irq;
-+ break;
-+ case IRQ_TYPE_LEVEL_HIGH:
-+ type0 = 1;
-+ fallthrough;
-+ case IRQ_TYPE_LEVEL_LOW:
-+ type1 = 1;
-+ handler = handle_level_irq;
-+ break;
-+ default:
-+ return -EINVAL;
-+ }
-+
-+ raw_spin_lock_irqsave(&gpio->lock, flags);
-+
-+ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE2, type2);
-+ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE1, type1);
-+ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE0, type0);
-+
-+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
-+
-+ irq_set_handler_locked(d, handler);
-+ return 0;
-+}
-+
- static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
- {
- struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-@@ -412,6 +602,29 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
- chained_irq_exit(ic, desc);
- }
-
-+static void aspeed_g7_sgpio_irq_handler(struct irq_desc *desc)
-+{
-+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
-+ struct irq_chip *ic = irq_desc_get_chip(desc);
-+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
-+ unsigned int i, p, banks;
-+ unsigned long reg;
-+ void __iomem *addr;
-+
-+ chained_irq_enter(ic, desc);
-+
-+ banks = DIV_ROUND_UP(gpio->chip.ngpio >> 2, 32);
-+ for (i = 0; i < banks; i++) {
-+ addr = gpio->base + SGPIO_G7_IRQ_STS_OFFSET(i);
-+
-+ reg = ioread32(addr);
-+
-+ for_each_set_bit(p, ®, 32)
-+ generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2);
-+ }
-+ chained_irq_exit(ic, desc);
-+}
-+
- static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
- {
- const struct aspeed_sgpio_bank *bank;
-@@ -423,6 +636,15 @@ static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
- seq_printf(p, dev_name(gpio->dev));
- }
-
-+static void aspeed_g7_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
-+{
-+ struct aspeed_sgpio *gpio;
-+ int offset;
-+
-+ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
-+ seq_printf(p, dev_name(gpio->dev));
-+}
-+
- static const struct irq_chip aspeed_sgpio_irq_chip = {
- .irq_ack = aspeed_sgpio_irq_ack,
- .irq_mask = aspeed_sgpio_irq_mask,
-@@ -433,6 +655,16 @@ static const struct irq_chip aspeed_sgpio_irq_chip = {
- GPIOCHIP_IRQ_RESOURCE_HELPERS,
- };
-
-+static const struct irq_chip aspeed_g7_sgpio_irq_chip = {
-+ .irq_ack = aspeed_g7_sgpio_irq_ack,
-+ .irq_mask = aspeed_g7_sgpio_irq_mask,
-+ .irq_unmask = aspeed_g7_sgpio_irq_unmask,
-+ .irq_set_type = aspeed_g7_sgpio_set_type,
-+ .irq_print_chip = aspeed_g7_sgpio_irq_print_chip,
-+ .flags = IRQCHIP_IMMUTABLE,
-+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
-+};
-+
- static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
- struct platform_device *pdev)
- {
-@@ -446,41 +678,49 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
-
- gpio->irq = rc;
-
-- /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
-- for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
-- bank = &aspeed_sgpio_banks[i];
-- /* disable irq enable bits */
-- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable));
-- /* clear status bits */
-- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
-- }
-+ if (gpio->version != 7)
-+ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
-+ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
-+ bank = &aspeed_sgpio_banks[i];
-+ /* disable irq enable bits */
-+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable));
-+ /* clear status bits */
-+ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
-+ }
-
- irq = &gpio->chip.irq;
-- gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip);
-+ if (gpio->version == 7)
-+ gpio_irq_chip_set_chip(irq, &aspeed_g7_sgpio_irq_chip);
-+ else
-+ gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip);
- irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
- irq->handler = handle_bad_irq;
- irq->default_type = IRQ_TYPE_NONE;
-- irq->parent_handler = aspeed_sgpio_irq_handler;
-+ irq->parent_handler = (gpio->version == 7) ?
-+ aspeed_g7_sgpio_irq_handler :
-+ aspeed_sgpio_irq_handler;
- irq->parent_handler_data = gpio;
- irq->parents = &gpio->irq;
- irq->num_parents = 1;
-
-- /* Apply default IRQ settings */
-- for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
-- bank = &aspeed_sgpio_banks[i];
-- /* set falling or level-low irq */
-- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
-- /* trigger type is edge */
-- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
-- /* single edge trigger */
-- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
-- }
-+ if (gpio->version != 7)
-+ /* Apply default IRQ settings */
-+ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
-+ bank = &aspeed_sgpio_banks[i];
-+ /* set falling or level-low irq */
-+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
-+ /* trigger type is edge */
-+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
-+ /* single edge trigger */
-+ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
-+ }
-
- return 0;
- }
-
- static const struct aspeed_sgpio_pdata ast2400_sgpio_pdata = {
- .pin_mask = GENMASK(9, 6),
-+ .ctrl_reg = 0x54,
- };
-
- static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
-@@ -509,38 +749,82 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
- return 0;
- }
-
-+static int aspeed_g7_sgpio_reset_tolerance(struct gpio_chip *chip,
-+ unsigned int offset, bool enable)
-+{
-+ struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
-+ unsigned long flags;
-+ void __iomem *reg;
-+
-+ reg = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
-+
-+ raw_spin_lock_irqsave(&gpio->lock, flags);
-+
-+ if (enable)
-+ ast_write_bits(reg, SGPIO_G7_RST_TOLERANCE, 1);
-+ else
-+ ast_clr_bits(reg, SGPIO_G7_RST_TOLERANCE);
-+
-+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
-+
-+ return 0;
-+}
-+
- static int aspeed_sgpio_set_config(struct gpio_chip *chip, unsigned int offset,
- unsigned long config)
- {
-+ struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
- unsigned long param = pinconf_to_config_param(config);
- u32 arg = pinconf_to_config_argument(config);
-
-- if (param == PIN_CONFIG_PERSIST_STATE)
-- return aspeed_sgpio_reset_tolerance(chip, offset, arg);
-+ if (param == PIN_CONFIG_PERSIST_STATE) {
-+ if (gpio->version == 7)
-+ return aspeed_g7_sgpio_reset_tolerance(chip, offset,
-+ arg);
-+ else
-+ return aspeed_sgpio_reset_tolerance(chip, offset, arg);
-+ }
-
- return -ENOTSUPP;
- }
-
- static const struct aspeed_sgpio_pdata ast2600_sgpiom_pdata = {
- .pin_mask = GENMASK(10, 6),
-+ .ctrl_reg = 0x54,
-+};
-+
-+static const struct aspeed_sgpio_pdata ast2700_sgpiom_pdata = {
-+ .pin_mask = GENMASK(11, 6),
-+ .ctrl_reg = 0x0,
-+ .version = 7,
-+};
-+
-+static const struct aspeed_sgpio_pdata ast2700_sgpios_pdata = {
-+ .pin_mask = GENMASK(11, 6),
-+ .ctrl_reg = 0x0,
-+ .version = 7,
-+ .slave = 1,
- };
-
- static const struct of_device_id aspeed_sgpio_of_table[] = {
- { .compatible = "aspeed,ast2400-sgpio", .data = &ast2400_sgpio_pdata, },
- { .compatible = "aspeed,ast2500-sgpio", .data = &ast2400_sgpio_pdata, },
- { .compatible = "aspeed,ast2600-sgpiom", .data = &ast2600_sgpiom_pdata, },
-+ { .compatible = "aspeed,ast2700-sgpiom", .data = &ast2700_sgpiom_pdata, },
-+ { .compatible = "aspeed,ast2700-sgpios", .data = &ast2700_sgpios_pdata, },
- {}
- };
-
- MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table);
-
--static int __init aspeed_sgpio_probe(struct platform_device *pdev)
-+static int aspeed_sgpio_probe(struct platform_device *pdev)
- {
- u32 nr_gpios, sgpio_freq, sgpio_clk_div, gpio_cnt_regval, pin_mask;
- const struct aspeed_sgpio_pdata *pdata;
- struct aspeed_sgpio *gpio;
- unsigned long apb_freq;
-- int rc;
-+ void __iomem *addr;
-+ int rc, i;
-
- gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
- if (!gpio)
-@@ -557,6 +841,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
- return -EINVAL;
-
- pin_mask = pdata->pin_mask;
-+ gpio->version = pdata->version;
-
- rc = device_property_read_u32(&pdev->dev, "ngpios", &nr_gpios);
- if (rc < 0) {
-@@ -568,41 +853,53 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
- return -EINVAL;
- }
-
-- rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq);
-- if (rc < 0) {
-- dev_err(&pdev->dev, "Could not read bus-frequency property\n");
-- return -EINVAL;
-- }
--
-- gpio->pclk = devm_clk_get(&pdev->dev, NULL);
-- if (IS_ERR(gpio->pclk)) {
-- dev_err(&pdev->dev, "devm_clk_get failed\n");
-- return PTR_ERR(gpio->pclk);
-+ if (gpio->version == 7 && !pdata->slave)
-+ for (i = 0; i < nr_gpios; i++) {
-+ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(i);
-+ ast_clr_bits(addr, SGPIO_G7_HW_BYPASS_EN |
-+ SGPIO_G7_HW_IN_SEL);
-+ }
-+
-+ if (!pdata->slave) {
-+ rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq);
-+ if (rc < 0) {
-+ dev_err(&pdev->dev, "Could not read bus-frequency property\n");
-+ return -EINVAL;
-+ }
-+
-+ gpio->pclk = devm_clk_get(&pdev->dev, NULL);
-+ if (IS_ERR(gpio->pclk)) {
-+ dev_err(&pdev->dev, "devm_clk_get failed\n");
-+ return PTR_ERR(gpio->pclk);
-+ }
-+
-+ apb_freq = clk_get_rate(gpio->pclk);
-+
-+ /*
-+ * From the datasheet,
-+ * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1)
-+ * period = 2 * (GPIO254[31:16] + 1) / PCLK
-+ * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK)
-+ * frequency = PCLK / (2 * (GPIO254[31:16] + 1))
-+ * frequency * 2 * (GPIO254[31:16] + 1) = PCLK
-+ * GPIO254[31:16] = PCLK / (frequency * 2) - 1
-+ */
-+ if (sgpio_freq == 0)
-+ return -EINVAL;
-+
-+ sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1;
-+
-+ if (sgpio_clk_div > (1 << 16) - 1)
-+ return -EINVAL;
-+
-+ gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask;
-+ iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) |
-+ gpio_cnt_regval | ASPEED_SGPIO_ENABLE,
-+ gpio->base + pdata->ctrl_reg);
-+ } else {
-+ iowrite32(ASPEED_SGPIO_ENABLE, gpio->base + pdata->ctrl_reg);
- }
-
-- apb_freq = clk_get_rate(gpio->pclk);
--
-- /*
-- * From the datasheet,
-- * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1)
-- * period = 2 * (GPIO254[31:16] + 1) / PCLK
-- * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK)
-- * frequency = PCLK / (2 * (GPIO254[31:16] + 1))
-- * frequency * 2 * (GPIO254[31:16] + 1) = PCLK
-- * GPIO254[31:16] = PCLK / (frequency * 2) - 1
-- */
-- if (sgpio_freq == 0)
-- return -EINVAL;
--
-- sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1;
--
-- if (sgpio_clk_div > (1 << 16) - 1)
-- return -EINVAL;
--
-- gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask;
-- iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval |
-- ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL);
--
- raw_spin_lock_init(&gpio->lock);
-
- gpio->chip.parent = &pdev->dev;
-@@ -629,11 +926,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
- }
-
- static struct platform_driver aspeed_sgpio_driver = {
-+ .probe = aspeed_sgpio_probe,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = aspeed_sgpio_of_table,
- },
- };
-
--module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe);
-+module_platform_driver(aspeed_sgpio_driver);
- MODULE_DESCRIPTION("Aspeed Serial GPIO Driver");
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0012-Enable-gpio-controller-driver.patch b/recipes-kernel/linux/files/0014-Add-gpio-sgpio-driver-for-ast2700.patch
similarity index 64%
rename from recipes-kernel/linux/files/0012-Enable-gpio-controller-driver.patch
rename to recipes-kernel/linux/files/0014-Add-gpio-sgpio-driver-for-ast2700.patch
index 9a10ddd..81a1785 100644
--- a/recipes-kernel/linux/files/0012-Enable-gpio-controller-driver.patch
+++ b/recipes-kernel/linux/files/0014-Add-gpio-sgpio-driver-for-ast2700.patch
@@ -1,14 +1,647 @@
-From 53786a9d6d830cc51a1f7d96f97c19482b0ee898 Mon Sep 17 00:00:00 2001
-From: Shao-Chieh Chao <jieh.sc.chao@mail.foxconn.com>
-Date: Fri, 29 Nov 2024 17:41:35 +0800
-Subject: [PATCH] update gpio controller driver
+From 2dff37cda6c1f009fb464a488814d7a2ed3a4dac Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 13:53:12 +0800
+Subject: [PATCH] Add gpio sgpio driver for ast2700
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
---
- drivers/gpio/gpio-aspeed.c | 621 +++++++++++++++++++++++--------------
- 1 file changed, 392 insertions(+), 229 deletions(-)
+ drivers/gpio/gpio-aspeed-sgpio.c | 428 +++++++++++++++++----
+ drivers/gpio/gpio-aspeed.c | 621 +++++++++++++++++++------------
+ 2 files changed, 755 insertions(+), 294 deletions(-)
+diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
+index 72755fee6..bf47a2489 100644
+--- a/drivers/gpio/gpio-aspeed-sgpio.c
++++ b/drivers/gpio/gpio-aspeed-sgpio.c
+@@ -18,7 +18,42 @@
+ #include <linux/spinlock.h>
+ #include <linux/string.h>
+
+-#define ASPEED_SGPIO_CTRL 0x54
++#define SGPIO_G7_IRQ_STS_BASE 0x40
++#define SGPIO_G7_IRQ_STS_OFFSET(x) (SGPIO_G7_IRQ_STS_BASE + (x) * 0x4)
++#define SGPIO_G7_CTRL_REG_BASE 0x80
++#define SGPIO_G7_CTRL_REG_OFFSET(x) (SGPIO_G7_CTRL_REG_BASE + (x) * 0x4)
++#define SGPIO_G7_OUT_DATA BIT(0)
++#define SGPIO_G7_IRQ_EN BIT(2)
++#define SGPIO_G7_IRQ_TYPE0 BIT(3)
++#define SGPIO_G7_IRQ_TYPE1 BIT(4)
++#define SGPIO_G7_IRQ_TYPE2 BIT(5)
++#define SGPIO_G7_RST_TOLERANCE BIT(6)
++#define SGPIO_G7_INPUT_MASK BIT(9)
++#define SGPIO_G7_HW_BYPASS_EN BIT(10)
++#define SGPIO_G7_HW_IN_SEL BIT(11)
++#define SGPIO_G7_IRQ_STS BIT(12)
++#define SGPIO_G7_IN_DATA BIT(13)
++#define SGPIO_G7_PARALLEL_IN_DATA BIT(14)
++
++static inline u32 field_get(u32 _mask, u32 _val)
++{
++ return (((_val) & (_mask)) >> (ffs(_mask) - 1));
++}
++
++static inline u32 field_prep(u32 _mask, u32 _val)
++{
++ return (((_val) << (ffs(_mask) - 1)) & (_mask));
++}
++
++static inline void ast_write_bits(void __iomem *addr, u32 mask, u32 val)
++{
++ iowrite32((ioread32(addr) & ~(mask)) | field_prep(mask, val), addr);
++}
++
++static inline void ast_clr_bits(void __iomem *addr, u32 mask)
++{
++ iowrite32((ioread32(addr) & ~(mask)), addr);
++}
+
+ #define ASPEED_SGPIO_CLK_DIV_MASK GENMASK(31, 16)
+ #define ASPEED_SGPIO_ENABLE BIT(0)
+@@ -26,6 +61,9 @@
+
+ struct aspeed_sgpio_pdata {
+ const u32 pin_mask;
++ const u16 ctrl_reg;
++ const int version;
++ const bool slave;
+ };
+
+ struct aspeed_sgpio {
+@@ -35,6 +73,7 @@ struct aspeed_sgpio {
+ raw_spinlock_t lock;
+ void __iomem *base;
+ int irq;
++ int version;
+ };
+
+ struct aspeed_sgpio_bank {
+@@ -169,16 +208,23 @@ static bool aspeed_sgpio_is_input(unsigned int offset)
+ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset)
+ {
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+- const struct aspeed_sgpio_bank *bank = to_bank(offset);
++ const struct aspeed_sgpio_bank *bank;
++ void __iomem *addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
+ unsigned long flags;
+ enum aspeed_sgpio_reg reg;
+ int rc = 0;
+
+ raw_spin_lock_irqsave(&gpio->lock, flags);
+
+- reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
+- rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
+-
++ if (gpio->version == 7) {
++ reg = aspeed_sgpio_is_input(offset) ? SGPIO_G7_IN_DATA :
++ SGPIO_G7_OUT_DATA;
++ rc = !!(field_get(reg, ioread32(addr)));
++ } else {
++ bank = to_bank(offset);
++ reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata;
++ rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset));
++ }
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return rc;
+@@ -211,6 +257,30 @@ static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val)
+ return 0;
+ }
+
++static int sgpio_g7_set_value(struct gpio_chip *gc, unsigned int offset,
++ int val)
++{
++ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
++ void __iomem *addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
++ u32 reg = 0;
++
++ if (aspeed_sgpio_is_input(offset))
++ return -EINVAL;
++
++ // Ensure the serial out value control by the software.
++ ast_clr_bits(addr, SGPIO_G7_HW_BYPASS_EN | SGPIO_G7_HW_IN_SEL);
++ reg = ioread32(addr);
++
++ if (val)
++ reg |= SGPIO_G7_OUT_DATA;
++ else
++ reg &= ~SGPIO_G7_OUT_DATA;
++
++ iowrite32(reg, addr);
++
++ return 0;
++}
++
+ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+ {
+ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
+@@ -218,7 +288,10 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val)
+
+ raw_spin_lock_irqsave(&gpio->lock, flags);
+
+- sgpio_set_value(gc, offset, val);
++ if (gpio->version == 7)
++ sgpio_g7_set_value(gc, offset, val);
++ else
++ sgpio_set_value(gc, offset, val);
+
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ }
+@@ -238,7 +311,10 @@ static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int v
+ * error-out in sgpio_set_value if this isn't an output GPIO */
+
+ raw_spin_lock_irqsave(&gpio->lock, flags);
+- rc = sgpio_set_value(gc, offset, val);
++ if (gpio->version == 7)
++ rc = sgpio_g7_set_value(gc, offset, val);
++ else
++ rc = sgpio_set_value(gc, offset, val);
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+
+ return rc;
+@@ -265,6 +341,19 @@ static void irqd_to_aspeed_sgpio_data(struct irq_data *d,
+ *bit = GPIO_BIT(*offset);
+ }
+
++static void irqd_to_aspeed_g7_sgpio_data(struct irq_data *d,
++ struct aspeed_sgpio **gpio,
++ int *offset)
++{
++ struct aspeed_sgpio *internal;
++
++ *offset = irqd_to_hwirq(d);
++ internal = irq_data_get_irq_chip_data(d);
++ WARN_ON(!internal);
++
++ *gpio = internal;
++}
++
+ static void aspeed_sgpio_irq_ack(struct irq_data *d)
+ {
+ const struct aspeed_sgpio_bank *bank;
+@@ -285,6 +374,24 @@ static void aspeed_sgpio_irq_ack(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gpio->lock, flags);
+ }
+
++static void aspeed_g7_sgpio_irq_ack(struct irq_data *d)
++{
++ struct aspeed_sgpio *gpio;
++ unsigned long flags;
++ void __iomem *status_addr;
++ int offset;
++
++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
++
++ status_addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
++
++ raw_spin_lock_irqsave(&gpio->lock, flags);
++
++ ast_write_bits(status_addr, SGPIO_G7_IRQ_STS, 1);
++
++ raw_spin_unlock_irqrestore(&gpio->lock, flags);
++}
++
+ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
+ {
+ const struct aspeed_sgpio_bank *bank;
+@@ -320,6 +427,32 @@ static void aspeed_sgpio_irq_set_mask(struct irq_data *d, bool set)
+
+ }
+
++static void aspeed_g7_sgpio_irq_set_mask(struct irq_data *d, bool set)
++{
++ struct aspeed_sgpio *gpio;
++ unsigned long flags;
++ void __iomem *addr;
++ int offset;
++
++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
++ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
++
++ /* Unmasking the IRQ */
++ if (set)
++ gpiochip_enable_irq(&gpio->chip, irqd_to_hwirq(d));
++
++ raw_spin_lock_irqsave(&gpio->lock, flags);
++ if (set)
++ ast_write_bits(addr, SGPIO_G7_IRQ_EN, 1);
++ else
++ ast_clr_bits(addr, SGPIO_G7_IRQ_EN);
++ raw_spin_unlock_irqrestore(&gpio->lock, flags);
++
++ /* Masking the IRQ */
++ if (!set)
++ gpiochip_disable_irq(&gpio->chip, irqd_to_hwirq(d));
++}
++
+ static void aspeed_sgpio_irq_mask(struct irq_data *d)
+ {
+ aspeed_sgpio_irq_set_mask(d, false);
+@@ -330,6 +463,16 @@ static void aspeed_sgpio_irq_unmask(struct irq_data *d)
+ aspeed_sgpio_irq_set_mask(d, true);
+ }
+
++static void aspeed_g7_sgpio_irq_mask(struct irq_data *d)
++{
++ aspeed_g7_sgpio_irq_set_mask(d, false);
++}
++
++static void aspeed_g7_sgpio_irq_unmask(struct irq_data *d)
++{
++ aspeed_g7_sgpio_irq_set_mask(d, true);
++}
++
+ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
+ {
+ u32 type0 = 0;
+@@ -390,6 +533,53 @@ static int aspeed_sgpio_set_type(struct irq_data *d, unsigned int type)
+ return 0;
+ }
+
++static int aspeed_g7_sgpio_set_type(struct irq_data *d, unsigned int type)
++{
++ u32 type0 = 0;
++ u32 type1 = 0;
++ u32 type2 = 0;
++ irq_flow_handler_t handler;
++ struct aspeed_sgpio *gpio;
++ unsigned long flags;
++ void __iomem *addr;
++ int offset;
++
++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
++ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
++
++ switch (type & IRQ_TYPE_SENSE_MASK) {
++ case IRQ_TYPE_EDGE_BOTH:
++ type2 = 1;
++ fallthrough;
++ case IRQ_TYPE_EDGE_RISING:
++ type0 = 1;
++ fallthrough;
++ case IRQ_TYPE_EDGE_FALLING:
++ handler = handle_edge_irq;
++ break;
++ case IRQ_TYPE_LEVEL_HIGH:
++ type0 = 1;
++ fallthrough;
++ case IRQ_TYPE_LEVEL_LOW:
++ type1 = 1;
++ handler = handle_level_irq;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ raw_spin_lock_irqsave(&gpio->lock, flags);
++
++ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE2, type2);
++ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE1, type1);
++ ast_write_bits(addr, SGPIO_G7_IRQ_TYPE0, type0);
++
++ raw_spin_unlock_irqrestore(&gpio->lock, flags);
++
++ irq_set_handler_locked(d, handler);
++ return 0;
++}
++
+ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
+ {
+ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
+@@ -412,6 +602,29 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
+ chained_irq_exit(ic, desc);
+ }
+
++static void aspeed_g7_sgpio_irq_handler(struct irq_desc *desc)
++{
++ struct gpio_chip *gc = irq_desc_get_handler_data(desc);
++ struct irq_chip *ic = irq_desc_get_chip(desc);
++ struct aspeed_sgpio *gpio = gpiochip_get_data(gc);
++ unsigned int i, p, banks;
++ unsigned long reg;
++ void __iomem *addr;
++
++ chained_irq_enter(ic, desc);
++
++ banks = DIV_ROUND_UP(gpio->chip.ngpio >> 2, 32);
++ for (i = 0; i < banks; i++) {
++ addr = gpio->base + SGPIO_G7_IRQ_STS_OFFSET(i);
++
++ reg = ioread32(addr);
++
++ for_each_set_bit(p, ®, 32)
++ generic_handle_domain_irq(gc->irq.domain, (i * 32 + p) * 2);
++ }
++ chained_irq_exit(ic, desc);
++}
++
+ static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+ {
+ const struct aspeed_sgpio_bank *bank;
+@@ -423,6 +636,15 @@ static void aspeed_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
+ seq_printf(p, dev_name(gpio->dev));
+ }
+
++static void aspeed_g7_sgpio_irq_print_chip(struct irq_data *d, struct seq_file *p)
++{
++ struct aspeed_sgpio *gpio;
++ int offset;
++
++ irqd_to_aspeed_g7_sgpio_data(d, &gpio, &offset);
++ seq_printf(p, dev_name(gpio->dev));
++}
++
+ static const struct irq_chip aspeed_sgpio_irq_chip = {
+ .irq_ack = aspeed_sgpio_irq_ack,
+ .irq_mask = aspeed_sgpio_irq_mask,
+@@ -433,6 +655,16 @@ static const struct irq_chip aspeed_sgpio_irq_chip = {
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+
++static const struct irq_chip aspeed_g7_sgpio_irq_chip = {
++ .irq_ack = aspeed_g7_sgpio_irq_ack,
++ .irq_mask = aspeed_g7_sgpio_irq_mask,
++ .irq_unmask = aspeed_g7_sgpio_irq_unmask,
++ .irq_set_type = aspeed_g7_sgpio_set_type,
++ .irq_print_chip = aspeed_g7_sgpio_irq_print_chip,
++ .flags = IRQCHIP_IMMUTABLE,
++ GPIOCHIP_IRQ_RESOURCE_HELPERS,
++};
++
+ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
+ struct platform_device *pdev)
+ {
+@@ -446,41 +678,49 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio,
+
+ gpio->irq = rc;
+
+- /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
+- for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+- bank = &aspeed_sgpio_banks[i];
+- /* disable irq enable bits */
+- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable));
+- /* clear status bits */
+- iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
+- }
++ if (gpio->version != 7)
++ /* Disable IRQ and clear Interrupt status registers for all SGPIO Pins. */
++ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
++ bank = &aspeed_sgpio_banks[i];
++ /* disable irq enable bits */
++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_enable));
++ /* clear status bits */
++ iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_status));
++ }
+
+ irq = &gpio->chip.irq;
+- gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip);
++ if (gpio->version == 7)
++ gpio_irq_chip_set_chip(irq, &aspeed_g7_sgpio_irq_chip);
++ else
++ gpio_irq_chip_set_chip(irq, &aspeed_sgpio_irq_chip);
+ irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask;
+ irq->handler = handle_bad_irq;
+ irq->default_type = IRQ_TYPE_NONE;
+- irq->parent_handler = aspeed_sgpio_irq_handler;
++ irq->parent_handler = (gpio->version == 7) ?
++ aspeed_g7_sgpio_irq_handler :
++ aspeed_sgpio_irq_handler;
+ irq->parent_handler_data = gpio;
+ irq->parents = &gpio->irq;
+ irq->num_parents = 1;
+
+- /* Apply default IRQ settings */
+- for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
+- bank = &aspeed_sgpio_banks[i];
+- /* set falling or level-low irq */
+- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
+- /* trigger type is edge */
+- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
+- /* single edge trigger */
+- iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
+- }
++ if (gpio->version != 7)
++ /* Apply default IRQ settings */
++ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) {
++ bank = &aspeed_sgpio_banks[i];
++ /* set falling or level-low irq */
++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0));
++ /* trigger type is edge */
++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1));
++ /* single edge trigger */
++ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2));
++ }
+
+ return 0;
+ }
+
+ static const struct aspeed_sgpio_pdata ast2400_sgpio_pdata = {
+ .pin_mask = GENMASK(9, 6),
++ .ctrl_reg = 0x54,
+ };
+
+ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
+@@ -509,38 +749,82 @@ static int aspeed_sgpio_reset_tolerance(struct gpio_chip *chip,
+ return 0;
+ }
+
++static int aspeed_g7_sgpio_reset_tolerance(struct gpio_chip *chip,
++ unsigned int offset, bool enable)
++{
++ struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
++ unsigned long flags;
++ void __iomem *reg;
++
++ reg = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(offset >> 1);
++
++ raw_spin_lock_irqsave(&gpio->lock, flags);
++
++ if (enable)
++ ast_write_bits(reg, SGPIO_G7_RST_TOLERANCE, 1);
++ else
++ ast_clr_bits(reg, SGPIO_G7_RST_TOLERANCE);
++
++ raw_spin_unlock_irqrestore(&gpio->lock, flags);
++
++ return 0;
++}
++
+ static int aspeed_sgpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+ {
++ struct aspeed_sgpio *gpio = gpiochip_get_data(chip);
+ unsigned long param = pinconf_to_config_param(config);
+ u32 arg = pinconf_to_config_argument(config);
+
+- if (param == PIN_CONFIG_PERSIST_STATE)
+- return aspeed_sgpio_reset_tolerance(chip, offset, arg);
++ if (param == PIN_CONFIG_PERSIST_STATE) {
++ if (gpio->version == 7)
++ return aspeed_g7_sgpio_reset_tolerance(chip, offset,
++ arg);
++ else
++ return aspeed_sgpio_reset_tolerance(chip, offset, arg);
++ }
+
+ return -ENOTSUPP;
+ }
+
+ static const struct aspeed_sgpio_pdata ast2600_sgpiom_pdata = {
+ .pin_mask = GENMASK(10, 6),
++ .ctrl_reg = 0x54,
++};
++
++static const struct aspeed_sgpio_pdata ast2700_sgpiom_pdata = {
++ .pin_mask = GENMASK(11, 6),
++ .ctrl_reg = 0x0,
++ .version = 7,
++};
++
++static const struct aspeed_sgpio_pdata ast2700_sgpios_pdata = {
++ .pin_mask = GENMASK(11, 6),
++ .ctrl_reg = 0x0,
++ .version = 7,
++ .slave = 1,
+ };
+
+ static const struct of_device_id aspeed_sgpio_of_table[] = {
+ { .compatible = "aspeed,ast2400-sgpio", .data = &ast2400_sgpio_pdata, },
+ { .compatible = "aspeed,ast2500-sgpio", .data = &ast2400_sgpio_pdata, },
+ { .compatible = "aspeed,ast2600-sgpiom", .data = &ast2600_sgpiom_pdata, },
++ { .compatible = "aspeed,ast2700-sgpiom", .data = &ast2700_sgpiom_pdata, },
++ { .compatible = "aspeed,ast2700-sgpios", .data = &ast2700_sgpios_pdata, },
+ {}
+ };
+
+ MODULE_DEVICE_TABLE(of, aspeed_sgpio_of_table);
+
+-static int __init aspeed_sgpio_probe(struct platform_device *pdev)
++static int aspeed_sgpio_probe(struct platform_device *pdev)
+ {
+ u32 nr_gpios, sgpio_freq, sgpio_clk_div, gpio_cnt_regval, pin_mask;
+ const struct aspeed_sgpio_pdata *pdata;
+ struct aspeed_sgpio *gpio;
+ unsigned long apb_freq;
+- int rc;
++ void __iomem *addr;
++ int rc, i;
+
+ gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+@@ -557,6 +841,7 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
+ return -EINVAL;
+
+ pin_mask = pdata->pin_mask;
++ gpio->version = pdata->version;
+
+ rc = device_property_read_u32(&pdev->dev, "ngpios", &nr_gpios);
+ if (rc < 0) {
+@@ -568,41 +853,53 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq);
+- if (rc < 0) {
+- dev_err(&pdev->dev, "Could not read bus-frequency property\n");
+- return -EINVAL;
+- }
+-
+- gpio->pclk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(gpio->pclk)) {
+- dev_err(&pdev->dev, "devm_clk_get failed\n");
+- return PTR_ERR(gpio->pclk);
++ if (gpio->version == 7 && !pdata->slave)
++ for (i = 0; i < nr_gpios; i++) {
++ addr = gpio->base + SGPIO_G7_CTRL_REG_OFFSET(i);
++ ast_clr_bits(addr, SGPIO_G7_HW_BYPASS_EN |
++ SGPIO_G7_HW_IN_SEL);
++ }
++
++ if (!pdata->slave) {
++ rc = device_property_read_u32(&pdev->dev, "bus-frequency", &sgpio_freq);
++ if (rc < 0) {
++ dev_err(&pdev->dev, "Could not read bus-frequency property\n");
++ return -EINVAL;
++ }
++
++ gpio->pclk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(gpio->pclk)) {
++ dev_err(&pdev->dev, "devm_clk_get failed\n");
++ return PTR_ERR(gpio->pclk);
++ }
++
++ apb_freq = clk_get_rate(gpio->pclk);
++
++ /*
++ * From the datasheet,
++ * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1)
++ * period = 2 * (GPIO254[31:16] + 1) / PCLK
++ * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK)
++ * frequency = PCLK / (2 * (GPIO254[31:16] + 1))
++ * frequency * 2 * (GPIO254[31:16] + 1) = PCLK
++ * GPIO254[31:16] = PCLK / (frequency * 2) - 1
++ */
++ if (sgpio_freq == 0)
++ return -EINVAL;
++
++ sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1;
++
++ if (sgpio_clk_div > (1 << 16) - 1)
++ return -EINVAL;
++
++ gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask;
++ iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) |
++ gpio_cnt_regval | ASPEED_SGPIO_ENABLE,
++ gpio->base + pdata->ctrl_reg);
++ } else {
++ iowrite32(ASPEED_SGPIO_ENABLE, gpio->base + pdata->ctrl_reg);
+ }
+
+- apb_freq = clk_get_rate(gpio->pclk);
+-
+- /*
+- * From the datasheet,
+- * SGPIO period = 1/PCLK * 2 * (GPIO254[31:16] + 1)
+- * period = 2 * (GPIO254[31:16] + 1) / PCLK
+- * frequency = 1 / (2 * (GPIO254[31:16] + 1) / PCLK)
+- * frequency = PCLK / (2 * (GPIO254[31:16] + 1))
+- * frequency * 2 * (GPIO254[31:16] + 1) = PCLK
+- * GPIO254[31:16] = PCLK / (frequency * 2) - 1
+- */
+- if (sgpio_freq == 0)
+- return -EINVAL;
+-
+- sgpio_clk_div = (apb_freq / (sgpio_freq * 2)) - 1;
+-
+- if (sgpio_clk_div > (1 << 16) - 1)
+- return -EINVAL;
+-
+- gpio_cnt_regval = ((nr_gpios / 8) << ASPEED_SGPIO_PINS_SHIFT) & pin_mask;
+- iowrite32(FIELD_PREP(ASPEED_SGPIO_CLK_DIV_MASK, sgpio_clk_div) | gpio_cnt_regval |
+- ASPEED_SGPIO_ENABLE, gpio->base + ASPEED_SGPIO_CTRL);
+-
+ raw_spin_lock_init(&gpio->lock);
+
+ gpio->chip.parent = &pdev->dev;
+@@ -629,11 +926,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev)
+ }
+
+ static struct platform_driver aspeed_sgpio_driver = {
++ .probe = aspeed_sgpio_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = aspeed_sgpio_of_table,
+ },
+ };
+
+-module_platform_driver_probe(aspeed_sgpio_driver, aspeed_sgpio_probe);
++module_platform_driver(aspeed_sgpio_driver);
+ MODULE_DESCRIPTION("Aspeed Serial GPIO Driver");
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
-index 58f107194fda..cc3d23e88658 100644
+index 58f107194..cc3d23e88 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -30,6 +30,27 @@
diff --git a/recipes-kernel/linux/files/0014-Enable-i2c-driver.patch b/recipes-kernel/linux/files/0015-Add-i2c-driver-for-ast2700.patch
similarity index 62%
rename from recipes-kernel/linux/files/0014-Enable-i2c-driver.patch
rename to recipes-kernel/linux/files/0015-Add-i2c-driver-for-ast2700.patch
index a0a7c41..e8581e0 100644
--- a/recipes-kernel/linux/files/0014-Enable-i2c-driver.patch
+++ b/recipes-kernel/linux/files/0015-Add-i2c-driver-for-ast2700.patch
@@ -1,20 +1,21 @@
-From 7b1dc26c328c42ba323a659498c280b86e552ccb Mon Sep 17 00:00:00 2001
-From: OpenEmbedded <oe.patch@oe>
-Date: Tue, 31 Dec 2024 15:42:50 +0800
-Subject: [PATCH] Enable-i2c-driver
+From d154070ef1f64df6c2c2415f4fc33d09f07eb182 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 13:58:50 +0800
+Subject: [PATCH] Add i2c driver for ast2700
-from linux-aspeed:
-a769cc67850759a3952f7a40f5f5798c3d0f7bfd
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
-Signed-off-by: OpenEmbedded <oe.patch@oe>
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
---
drivers/i2c/busses/Kconfig | 11 +
drivers/i2c/busses/Makefile | 1 +
- drivers/i2c/busses/i2c-ast2600.c | 1748 ++++++++++++++++++++++++++++++
+ drivers/i2c/busses/i2c-aspeed.c | 1 +
+ drivers/i2c/busses/i2c-ast2600.c | 2199 ++++++++++++++++++++++++++++++
drivers/i2c/i2c-core-base.c | 38 +
drivers/i2c/i2c-mux.c | 50 +
- include/linux/i2c.h | 42 +
- 6 files changed, 1890 insertions(+)
+ 6 files changed, 2300 insertions(+)
create mode 100644 drivers/i2c/busses/i2c-ast2600.c
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
@@ -24,7 +25,7 @@
@@ -390,6 +390,17 @@ config I2C_ALTERA
This driver can also be built as a module. If so, the module
will be called i2c-altera.
-
+
+config I2C_AST2600
+ tristate "Aspeed I2C v2 Controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
@@ -51,12 +52,24 @@
obj-$(CONFIG_I2C_AT91) += i2c-at91.o
i2c-at91-objs := i2c-at91-core.o i2c-at91-master.o
ifeq ($(CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL),y)
+diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
+index 5511fd46a..ce8c4846b 100644
+--- a/drivers/i2c/busses/i2c-aspeed.c
++++ b/drivers/i2c/busses/i2c-aspeed.c
+@@ -445,6 +445,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
+ irq_status);
+ irq_handled |= (irq_status & ASPEED_I2CD_INTR_MASTER_ERRORS);
+ if (bus->master_state != ASPEED_I2C_MASTER_INACTIVE) {
++ irq_handled = irq_status;
+ bus->cmd_err = ret;
+ bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+ goto out_complete;
diff --git a/drivers/i2c/busses/i2c-ast2600.c b/drivers/i2c/busses/i2c-ast2600.c
new file mode 100644
-index 000000000..038aaf87d
+index 000000000..bea54127f
--- /dev/null
+++ b/drivers/i2c/busses/i2c-ast2600.c
-@@ -0,0 +1,1748 @@
+@@ -0,0 +1,2199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ASPEED AST2600 new register set I2C controller driver
@@ -73,8 +86,9 @@
+#include <linux/i2c-smbus.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
-+#include <linux/module.h>
++#include <linux/minmax.h>
+#include <linux/mfd/syscon.h>
++#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
@@ -82,6 +96,8 @@
+#include <linux/slab.h>
+#include <linux/string_helpers.h>
+
++#include <asm/unaligned.h>
++
+#define AST2600_I2CG_ISR 0x00
+#define AST2600_I2CG_SLAVE_ISR 0x04
+#define AST2600_I2CG_OWNER 0x08
@@ -124,7 +140,7 @@
+#define AST2600_I2CC_SDA_DRIVE_1T_EN BIT(8)
+#define AST2600_I2CC_M_SDA_DRIVE_1T_EN BIT(7)
+#define AST2600_I2CC_M_HIGH_SPEED_EN BIT(6)
-+#define AST2600_I2CC_4T_DEBOUNCE GENMASK(5, 4)
++#define AST2700_I2CC_MANUAL_DEBOUNCE GENMASK(5, 4)
+/* reserver 5 : 2 */
+#define AST2600_I2CC_SLAVE_EN BIT(1)
+#define AST2600_I2CC_MASTER_EN BIT(0)
@@ -211,14 +227,11 @@
+/* 0x1C : I2CM Master DMA Transfer Length Register */
+#define AST2600_I2CM_DMA_LEN 0x1C
+/* Master Tx Rx support length 1 ~ 65536 */
-+#ifdef CONFIG_MACH_ASPEED_G7
-+#define AST2600_I2CM_SET_RX_DMA_LEN(x) (((x) & GENMASK(15, 0)) << 16)
-+#define AST2600_I2CM_SET_TX_DMA_LEN(x) ((x) & GENMASK(15, 0))
-+#else
++#define AST2700_I2CM_SET_RX_DMA_LEN(x) (((x) & GENMASK(15, 0)) << 16)
++#define AST2700_I2CM_SET_TX_DMA_LEN(x) ((x) & GENMASK(15, 0))
+/* Master Tx Rx support length 1 ~ 4096 */
+#define AST2600_I2CM_SET_RX_DMA_LEN(x) ((((x) & GENMASK(11, 0)) << 16) | BIT(31))
+#define AST2600_I2CM_SET_TX_DMA_LEN(x) (((x) & GENMASK(11, 0)) | BIT(15))
-+#endif
+
+/* 0x20 : I2CS Slave Interrupt Control Register */
+#define AST2600_I2CS_IER 0x20
@@ -269,14 +282,11 @@
+#define AST2600_I2CS_DMA_LEN 0x2C
+
+/* Slave Tx Rx support length 1 ~ 65536 */
-+#ifdef CONFIG_MACH_ASPEED_G7
-+#define AST2600_I2CS_SET_RX_DMA_LEN(x) ((((x) - 1) & GENMASK(15, 0)) << 16)
-+#define AST2600_I2CS_SET_TX_DMA_LEN(x) (((x) - 1) & GENMASK(15, 0))
-+#else
++#define AST2700_I2CS_SET_RX_DMA_LEN(x) ((((x) - 1) & GENMASK(15, 0)) << 16)
++#define AST2700_I2CS_SET_TX_DMA_LEN(x) (((x) - 1) & GENMASK(15, 0))
+/* Slave Tx Rx support length 1 ~ 4096 */
+#define AST2600_I2CS_SET_RX_DMA_LEN(x) (((((x) - 1) & GENMASK(11, 0)) << 16) | BIT(31))
+#define AST2600_I2CS_SET_TX_DMA_LEN(x) ((((x) - 1) & GENMASK(11, 0)) | BIT(15))
-+#endif
+
+/* I2CM Master DMA Tx Buffer Register */
+#define AST2600_I2CM_TX_DMA 0x30
@@ -319,6 +329,12 @@
+#define MSIC_CONFIG_ACTIMING1 0x74
+#define MSIC_I2C_SET_TIMEOUT(s, m) (((s) << 16) | (m))
+
++/* 0x9c : Misc 2 Debounce Setting */
++#define MSIC2_CONFIG 0x9C
++#define AST2700_DEBOUNCE_MASK GENMASK(7, 0)
++#define AST2700_DEBOUNCE_LEVEL_MAX 0x20
++#define AST2700_DEBOUNCE_LEVEL_MIN 0x2
++
+#define I2C_SLAVE_MSG_BUF_SIZE 4096
+
+#define AST2600_I2C_DMA_SIZE 4096
@@ -326,7 +342,8 @@
+#define MASTER_TRIGGER_LAST_STOP (AST2600_I2CM_RX_CMD_LAST | AST2600_I2CM_STOP_CMD)
+#define SLAVE_TRIGGER_CMD (AST2600_I2CS_ACTIVE_ALL | AST2600_I2CS_PKT_MODE_EN)
+
-+#define AST_I2C_TIMEOUT_CLK 0x1
++#define AST2600_I2C_TIMEOUT_CLK 0x1
++#define AST2700_I2C_TIMEOUT_CLK 0x3
+
+enum xfer_mode {
+ BYTE_MODE,
@@ -334,45 +351,46 @@
+ DMA_MODE,
+};
+
++enum i2c_version {
++ AST2600,
++ AST2700,
++ AST2700A0,
++};
++
+struct ast2600_i2c_bus {
+ struct i2c_adapter adap;
+ struct device *dev;
+ void __iomem *reg_base;
+ struct regmap *global_regs;
+ struct reset_control *rst;
-+ int irq;
-+ int ast2700_workaround;
-+ enum xfer_mode mode;
+ struct clk *clk;
-+ u32 apb_clk;
+ struct i2c_timings timing_info;
-+ int slave_operate;
++ struct completion cmd_complete;
++ struct i2c_msg *msgs;
++ u8 *master_safe_buf;
++ dma_addr_t master_dma_addr;
++ u32 apb_clk;
+ u32 timeout;
++ int irq;
++ int cmd_err;
++ int msgs_index;
++ int msgs_count;
++ int master_xfer_cnt;
++ size_t buf_index;
++ size_t buf_size;
++ enum xfer_mode mode;
++ enum i2c_version version;
++ bool multi_master;
++ u32 debounce_level;
++ /* Buffer mode */
++ void __iomem *buf_base;
++ int ast2700_workaround;
+ /* smbus alert */
+ bool alert_enable;
+ struct i2c_smbus_alert_setup alert_data;
+ struct i2c_client *ara;
-+ /* Multi-master */
-+ bool multi_master;
-+ /* master structure */
-+ int cmd_err;
-+ struct completion cmd_complete;
-+ struct i2c_msg *msgs;
-+ size_t buf_index;
-+ /* cur xfer msgs index*/
-+ int msgs_index;
-+ int msgs_count;
-+ u8 *master_safe_buf;
-+ dma_addr_t master_dma_addr;
-+ /*total xfer count */
-+ int master_xfer_cnt;
-+ /* Buffer mode */
-+ void __iomem *buf_base;
-+ size_t buf_size;
-+ /* Slave structure */
-+ int slave_xfer_len;
-+ int slave_xfer_cnt;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
++ int slave_operate;
+ unsigned char *slave_dma_buf;
+ dma_addr_t slave_dma_addr;
+ struct i2c_client *slave;
@@ -382,23 +400,23 @@
+static u32 ast2600_select_i2c_clock(struct ast2600_i2c_bus *i2c_bus)
+{
+ unsigned long base_clk[16];
-+ int baseclk_idx;
++ int baseclk_idx = 0;
++ int divisor = 0;
+ u32 clk_div_reg;
+ u32 scl_low;
+ u32 scl_high;
-+ int divisor;
+ u32 data;
+
+ regmap_read(i2c_bus->global_regs, AST2600_I2CG_CLK_DIV_CTRL, &clk_div_reg);
+
-+ for (int i = 0; i < 16; i++) {
++ for (int i = 0; i < ARRAY_SIZE(base_clk); i++) {
+ if (i == 0)
+ base_clk[i] = i2c_bus->apb_clk;
-+ else if ((i > 0) && (i < 5))
++ else if (i < 5)
+ base_clk[i] = (i2c_bus->apb_clk * 2) /
-+ (((clk_div_reg >> ((i - 1) * 8)) & GENMASK(7, 0)) + 2);
++ (((clk_div_reg >> ((i - 1) * 8)) & GENMASK(7, 0)) + 2);
+ else
-+ base_clk[i] = base_clk[4] / (1 << (i - 5));
++ base_clk[i] = base_clk[4] >> (i - 5);
+
+ if ((base_clk[i] / i2c_bus->timing_info.bus_freq_hz) <= 32) {
+ baseclk_idx = i;
@@ -412,16 +430,42 @@
+ scl_high = (divisor - scl_low - 2) & GENMASK(3, 0);
+ data = (scl_high - 1) << 20 | scl_high << 16 | scl_low << 12 | baseclk_idx;
+ if (i2c_bus->timeout) {
-+#ifdef CONFIG_MACH_ASPEED_G7
++ i2c_bus->timeout = min(divisor, 31);
++ data |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout);
++ data |= AST2600_I2CC_TOUTBASECLK(AST2600_I2C_TIMEOUT_CLK);
++ }
++
++ return data;
++}
++
++static u32 ast2700_select_i2c_clock(struct ast2600_i2c_bus *i2c_bus)
++{
++ unsigned long base_clk;
++ int baseclk_idx = 0;
++ int divisor = 0;
++ u32 scl_low;
++ u32 scl_high;
++ u32 data;
++
++ for (int i = 0; i < 0x100; i++) {
++ base_clk = (i2c_bus->apb_clk) / (i + 1);
++ if ((base_clk / i2c_bus->timing_info.bus_freq_hz) <= 32) {
++ baseclk_idx = i;
++ divisor = DIV_ROUND_UP(base_clk, i2c_bus->timing_info.bus_freq_hz);
++ break;
++ }
++ }
++
++ baseclk_idx = min(baseclk_idx, 0xff);
++ divisor = min(divisor, 32);
++ scl_low = min(divisor * 9 / 16 - 1, 15);
++ scl_high = (divisor - scl_low - 2) & GENMASK(3, 0);
++ data = (scl_high - 1) << 20 | scl_high << 16 | scl_low << 12 | baseclk_idx;
++ if (i2c_bus->timeout) {
++ i2c_bus->timeout = min(i2c_bus->timeout, 255);
+ writel(MSIC_I2C_SET_TIMEOUT(i2c_bus->timeout, i2c_bus->timeout),
+ i2c_bus->reg_base + MSIC_CONFIG_ACTIMING1);
-+#else
-+ /* ast2600 only have [4:0] range */
-+ if (i2c_bus->timeout > 31)
-+ i2c_bus->timeout = 31;
-+ data |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout);
-+#endif
-+ data |= AST2600_I2CC_TOUTBASECLK(AST_I2C_TIMEOUT_CLK);
++ data |= AST2600_I2CC_TOUTBASECLK(AST2700_I2C_TIMEOUT_CLK);
+ }
+
+ return data;
@@ -436,17 +480,6 @@
+
+ dev_dbg(i2c_bus->dev, "%d-bus recovery bus [%x]\n", i2c_bus->adap.nr, state);
+
-+ ctrl = readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
-+
-+ /* Disable master/slave mode */
-+ writel(ctrl & ~(AST2600_I2CC_MASTER_EN | AST2600_I2CC_SLAVE_EN),
-+ i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
-+ readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
-+
-+ /* Enable controller into original mode */
-+ writel(ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
-+ readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
-+
+ reinit_completion(&i2c_bus->cmd_complete);
+ i2c_bus->cmd_err = 0;
+
@@ -457,12 +490,11 @@
+ r = wait_for_completion_timeout(&i2c_bus->cmd_complete, i2c_bus->adap.timeout);
+ if (r == 0) {
+ dev_dbg(i2c_bus->dev, "recovery timed out\n");
-+ ret = -ETIMEDOUT;
-+ } else {
-+ if (i2c_bus->cmd_err) {
-+ dev_dbg(i2c_bus->dev, "recovery error\n");
-+ ret = -EPROTO;
-+ }
++ writel(ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
++ return -ETIMEDOUT;
++ } else if (i2c_bus->cmd_err) {
++ dev_dbg(i2c_bus->dev, "recovery error\n");
++ ret = -EPROTO;
+ }
+ }
+
@@ -473,31 +505,36 @@
+ ret = -EPROTO;
+ }
+
-+ /* restore original master/slave setting */
-+ writel(ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+static void ast2600_i2c_slave_packet_dma_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts)
+{
-+ int slave_rx_len;
++ int slave_rx_len = 0;
+ u32 cmd = 0;
+ u8 value;
+ int i;
-+ u32 c_isr = sts;
+
+ sts &= ~(AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_SADDR_PENDING
+ | AST2600_I2CS_ADDR_NAK_MASK);
+ /* Handle i2c slave timeout condition */
+ if (AST2600_I2CS_INACTIVE_TO & sts) {
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
-+ writel(0, i2c_bus->reg_base + AST2600_I2CS_DMA_LEN_STS);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(0, i2c_bus->reg_base + AST2600_I2CS_DMA_LEN_STS);
+ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
-+ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
-+ writel(c_isr, i2c_bus->reg_base + AST2600_I2CS_ISR); /* ast2700 a1 */
++ if (i2c_bus->version == AST2700)
++ writel(readl(i2c_bus->reg_base + AST2600_I2CS_ISR),
++ i2c_bus->reg_base + AST2600_I2CS_ISR);
++ else
++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
+ return;
+ }
@@ -505,27 +542,7 @@
+ sts &= ~(AST2600_I2CS_PKT_DONE | AST2600_I2CS_PKT_ERROR);
+
+ switch (sts) {
-+#ifdef CONFIG_MACH_ASPEED_G7
-+ /* AST2700A0 workaround */
-+ case 0:
-+ case AST2600_I2CS_WAIT_RX_DMA:
-+ case AST2600_I2CS_SLAVE_MATCH:
-+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
-+ if (readl(i2c_bus->reg_base + AST2600_I2CS_CMD_STS) & AST2600_I2CS_RX_DMA_EN) {
-+ cmd = 0;
-+ } else {
-+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
-+ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size),
-+ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
-+ }
-+ break;
-+#endif
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_RX_DMA:
-+#ifdef CONFIG_MACH_ASPEED_G7
-+ /* AST2700A0 workaround */
-+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
-+ break;
-+#endif
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA:
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ slave_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base +
@@ -534,25 +551,25 @@
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED,
+ &i2c_bus->slave_dma_buf[i]);
+ }
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
+ break;
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP:
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
+ break;
+ case AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
-+#ifdef CONFIG_MACH_ASPEED_G7
-+ /* AST2700A0 bug workaround miss start */
-+ if (i2c_bus->ast2700_workaround) {
-+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
-+ i2c_bus->ast2700_workaround = 0;
-+ }
-+ fallthrough;
-+#endif
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE_NAK |
+ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_RX_DMA |
@@ -570,8 +587,12 @@
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED,
+ &i2c_bus->slave_dma_buf[i]);
+ }
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ if (sts & AST2600_I2CS_STOP)
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
@@ -592,32 +613,48 @@
+ }
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_REQUESTED,
+ &i2c_bus->slave_dma_buf[0]);
-+ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN;
+ break;
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_TX_DMA:
+ /* First Start read */
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_REQUESTED,
+ &i2c_bus->slave_dma_buf[0]);
-+ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN;
+ break;
+ case AST2600_I2CS_WAIT_TX_DMA:
+ /* it should be next start read */
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_PROCESSED,
+ &i2c_bus->slave_dma_buf[0]);
-+ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN;
+ break;
+ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP | AST2600_I2CS_SLAVE_MATCH:
+ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP:
+ /* it just tx complete */
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
+ break;
+ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
@@ -633,15 +670,216 @@
+ }
+
+ if (cmd) {
-+ writel(0, i2c_bus->reg_base + AST2600_I2CS_DMA_LEN_STS);
++ if (i2c_bus->version == AST2700A0)
++ writel(0, i2c_bus->reg_base + AST2600_I2CS_DMA_LEN_STS);
+ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
+ }
+ if (sts & AST2600_I2CS_STOP)
+ i2c_bus->ast2700_workaround = 1;
+
-+ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
-+ dev_dbg(i2c_bus->dev, "sts2 %x, c_sts2 %x\n", sts, c_isr);
-+ writel(c_isr, i2c_bus->reg_base + AST2600_I2CS_ISR);
++ if (i2c_bus->version == AST2700)
++ writel(readl(i2c_bus->reg_base + AST2600_I2CS_ISR),
++ i2c_bus->reg_base + AST2600_I2CS_ISR);
++ else
++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR);
++}
++
++static void ast2700_a0_i2c_slave_packet_dma_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts)
++{
++ int slave_rx_len = 0;
++ u32 cmd = 0;
++ u8 value;
++ int i;
++
++ sts &= ~(AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_SADDR_PENDING
++ | AST2600_I2CS_ADDR_NAK_MASK);
++ /* Handle i2c slave timeout condition */
++ if (AST2600_I2CS_INACTIVE_TO & sts) {
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(0, i2c_bus->reg_base + AST2600_I2CS_DMA_LEN_STS);
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
++ if (i2c_bus->version == AST2700)
++ writel(readl(i2c_bus->reg_base + AST2600_I2CS_ISR),
++ i2c_bus->reg_base + AST2600_I2CS_ISR);
++ else
++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ return;
++ }
++
++ sts &= ~(AST2600_I2CS_PKT_DONE | AST2600_I2CS_PKT_ERROR);
++
++ switch (sts) {
++ /* AST2700A0 workaround */
++ case 0:
++ case AST2600_I2CS_WAIT_RX_DMA:
++ case AST2600_I2CS_SLAVE_MATCH:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ if (readl(i2c_bus->reg_base + AST2600_I2CS_CMD_STS) & AST2600_I2CS_RX_DMA_EN) {
++ cmd = 0;
++ } else {
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ }
++ break;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_RX_DMA:
++ /* AST2700A0 workaround */
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ break;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ slave_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CS_DMA_LEN_STS));
++ for (i = 0; i < slave_rx_len; i++) {
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED,
++ &i2c_bus->slave_dma_buf[i]);
++ }
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
++ break;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
++ break;
++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ /* AST2700A0 bug workaround miss start */
++ if (i2c_bus->ast2700_workaround) {
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ i2c_bus->ast2700_workaround = 0;
++ }
++ fallthrough;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE_NAK |
++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_RX_DMA |
++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ case AST2600_I2CS_RX_DONE_NAK | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_STOP:
++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_RX_DMA:
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ if (sts & AST2600_I2CS_SLAVE_MATCH)
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++
++ slave_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CS_DMA_LEN_STS));
++ for (i = 0; i < slave_rx_len; i++) {
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED,
++ &i2c_bus->slave_dma_buf[i]);
++ }
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (sts & AST2600_I2CS_STOP)
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
++ break;
++
++ /* it is Mw data Mr coming -> it need send tx */
++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_TX_DMA:
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_WAIT_TX_DMA:
++ /* it should be repeat start read */
++ if (sts & AST2600_I2CS_SLAVE_MATCH)
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++
++ slave_rx_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CS_DMA_LEN_STS));
++ for (i = 0; i < slave_rx_len; i++) {
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED,
++ &i2c_bus->slave_dma_buf[i]);
++ }
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_REQUESTED,
++ &i2c_bus->slave_dma_buf[0]);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN;
++ break;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_TX_DMA:
++ /* First Start read */
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_REQUESTED,
++ &i2c_bus->slave_dma_buf[0]);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN;
++ break;
++ case AST2600_I2CS_WAIT_TX_DMA:
++ /* it should be next start read */
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_PROCESSED,
++ &i2c_bus->slave_dma_buf[0]);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_TX_DMA_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_DMA_EN;
++ break;
++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP | AST2600_I2CS_SLAVE_MATCH:
++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP:
++ /* it just tx complete */
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_DMA_EN;
++ break;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ break;
++ case AST2600_I2CS_STOP:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ break;
++ default:
++ dev_dbg(i2c_bus->dev, "unhandled slave isr case %x, sts %x\n", sts,
++ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF));
++ break;
++ }
++
++ if (cmd) {
++ if (i2c_bus->version == AST2700A0)
++ writel(0, i2c_bus->reg_base + AST2600_I2CS_DMA_LEN_STS);
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
++ }
++ if (sts & AST2600_I2CS_STOP)
++ i2c_bus->ast2700_workaround = 1;
++
++ if (i2c_bus->version == AST2700)
++ writel(readl(i2c_bus->reg_base + AST2600_I2CS_ISR),
++ i2c_bus->reg_base + AST2600_I2CS_ISR);
++ else
++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
+ readl(i2c_bus->reg_base + AST2600_I2CS_ISR);
+}
+
@@ -666,7 +904,6 @@
+ u32 ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & AST2600_I2CC_AC_TIMING_MASK;
+
+ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
-+ ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & AST2600_I2CC_AC_TIMING_MASK;
+ ac_timing |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout);
+ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
+ /* Clear irq and re-send slave trigger command */
@@ -683,7 +920,180 @@
+ i2c_bus->slave_operate = 1;
+
+ switch (sts) {
-+#ifdef CONFIG_MACH_ASPEED_G7
++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_WAIT_RX_DMA |
++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ case AST2600_I2CS_SLAVE_PENDING |
++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ case AST2600_I2CS_SLAVE_PENDING |
++ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ fallthrough;
++ case AST2600_I2CS_SLAVE_PENDING |
++ AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ cmd = SLAVE_TRIGGER_CMD;
++ if (sts & AST2600_I2CS_RX_DONE) {
++ slave_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CC_BUFF_CTRL));
++ for (i = 0; i < slave_rx_len; i++) {
++ value = readb(i2c_bus->buf_base + 0x10 + i);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
++ }
++ }
++ if (readl(i2c_bus->reg_base + AST2600_I2CS_CMD_STS) & AST2600_I2CS_RX_BUFF_EN)
++ cmd = 0;
++ else
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_BUFF_EN;
++
++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ break;
++ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_RX_DONE:
++ cmd = SLAVE_TRIGGER_CMD;
++ slave_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CC_BUFF_CTRL));
++ for (i = 0; i < slave_rx_len; i++) {
++ value = readb(i2c_bus->buf_base + 0x10 + i);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
++ }
++ cmd |= AST2600_I2CS_RX_BUFF_EN;
++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ break;
++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_WAIT_RX_DMA |
++ AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ cmd = SLAVE_TRIGGER_CMD;
++ slave_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CC_BUFF_CTRL));
++ for (i = 0; i < slave_rx_len; i++) {
++ value = readb(i2c_bus->buf_base + 0x10 + i);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
++ }
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ cmd |= AST2600_I2CS_RX_BUFF_EN;
++ writel(AST2600_I2CC_SET_RX_BUF_LEN(i2c_bus->buf_size),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ break;
++ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ cmd = SLAVE_TRIGGER_CMD;
++ slave_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CC_BUFF_CTRL));
++ for (i = 0; i < slave_rx_len; i++) {
++ value = readb(i2c_bus->buf_base + 0x10 + i);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
++ }
++ /* workaround for avoid next start with len != 0 */
++ writel(BIT(0), i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ break;
++ case AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
++ cmd = SLAVE_TRIGGER_CMD;
++ slave_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CC_BUFF_CTRL));
++ for (i = 0; i < slave_rx_len; i++) {
++ value = readb(i2c_bus->buf_base + 0x10 + i);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
++ }
++ /* workaround for avoid next start with len != 0 */
++ writel(BIT(0), i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ break;
++ case AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_SLAVE_MATCH:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
++ writeb(value, i2c_bus->buf_base);
++ writel(AST2600_I2CC_SET_TX_BUF_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_BUFF_EN;
++ break;
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_RX_DONE:
++ case AST2600_I2CS_WAIT_TX_DMA | AST2600_I2CS_RX_DONE:
++ case AST2600_I2CS_WAIT_TX_DMA:
++ /* it should be repeat start read */
++ if (sts & AST2600_I2CS_SLAVE_MATCH)
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++
++ if (sts & AST2600_I2CS_RX_DONE) {
++ slave_rx_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
++ AST2600_I2CC_BUFF_CTRL));
++ for (i = 0; i < slave_rx_len; i++) {
++ value = readb(i2c_bus->buf_base + 0x10 + i);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_RECEIVED, &value);
++ }
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_REQUESTED, &value);
++ } else {
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_READ_PROCESSED, &value);
++ }
++ writeb(value, i2c_bus->buf_base);
++ writel(AST2600_I2CC_SET_TX_BUF_LEN(1),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_TX_BUFF_EN;
++ break;
++ /* workaround : trigger the cmd twice to fix next state keep 1000000 */
++ case AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
++ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_BUFF_EN;
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
++ break;
++
++ case AST2600_I2CS_TX_NAK | AST2600_I2CS_STOP:
++ case AST2600_I2CS_STOP:
++ cmd = SLAVE_TRIGGER_CMD;
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ break;
++ default:
++ dev_dbg(i2c_bus->dev, "unhandled slave isr case %x, sts %x\n", sts,
++ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF));
++ break;
++ }
++
++ if (cmd)
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
++ readl(i2c_bus->reg_base + AST2600_I2CS_ISR);
++
++ if ((sts & AST2600_I2CS_STOP) && !(sts & AST2600_I2CS_SLAVE_PENDING))
++ i2c_bus->slave_operate = 0;
++}
++
++static void ast2700_a0_i2c_slave_packet_buff_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts)
++{
++ int slave_rx_len = 0;
++ u32 cmd = 0;
++ u8 value;
++ int i;
++
++ /* due to master slave is common buffer, so need force the master stop not issue */
++ if (readl(i2c_bus->reg_base + AST2600_I2CM_CMD_STS) & GENMASK(15, 0)) {
++ writel(0, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++ i2c_bus->cmd_err = -EBUSY;
++ writel(0, i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ complete(&i2c_bus->cmd_complete);
++ }
++
++ /* Handle i2c slave timeout condition */
++ if (AST2600_I2CS_INACTIVE_TO & sts) {
++ /* Reset time out counter */
++ u32 ac_timing = readl(i2c_bus->reg_base + AST2600_I2CC_AC_TIMING) & AST2600_I2CC_AC_TIMING_MASK;
++
++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
++ ac_timing |= AST2600_I2CC_TTIMEOUT(i2c_bus->timeout);
++ writel(ac_timing, i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
++ /* Clear irq and re-send slave trigger command */
++ writel(SLAVE_TRIGGER_CMD, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
++ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_ISR);
++ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
++ i2c_bus->slave_operate = 0;
++ return;
++ }
++
++ sts &= ~(AST2600_I2CS_PKT_DONE | AST2600_I2CS_PKT_ERROR);
++
++ if (sts & AST2600_I2CS_SLAVE_MATCH)
++ i2c_bus->slave_operate = 1;
++
++ switch (sts) {
+ case 0:
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_WRITE_REQUESTED, &value);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_BUFF_EN;
@@ -706,9 +1116,6 @@
+ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
+ cmd = SLAVE_TRIGGER_CMD | AST2600_I2CS_RX_BUFF_EN;
+ break;
-+#endif
-+
-+#ifdef CONFIG_MACH_ASPEED_G7
+ case AST2600_I2CS_WAIT_RX_DMA |
+ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
+ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_WAIT_RX_DMA |
@@ -721,16 +1128,6 @@
+ }
+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
+ break;
-+#else
-+ case AST2600_I2CS_SLAVE_PENDING | AST2600_I2CS_WAIT_RX_DMA |
-+ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
-+ case AST2600_I2CS_SLAVE_PENDING |
-+ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE | AST2600_I2CS_STOP:
-+ case AST2600_I2CS_SLAVE_PENDING |
-+ AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_STOP:
-+ i2c_slave_event(i2c_bus->slave, I2C_SLAVE_STOP, &value);
-+ fallthrough;
-+#endif
+ case AST2600_I2CS_SLAVE_PENDING |
+ AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
+ case AST2600_I2CS_WAIT_RX_DMA | AST2600_I2CS_SLAVE_MATCH | AST2600_I2CS_RX_DONE:
@@ -919,10 +1316,17 @@
+ isr &= ~(AST2600_I2CS_ADDR_INDICATE_MASK);
+
+ if (AST2600_I2CS_PKT_DONE & isr) {
-+ if (i2c_bus->mode == DMA_MODE)
-+ ast2600_i2c_slave_packet_dma_irq(i2c_bus, isr);
-+ else
-+ ast2600_i2c_slave_packet_buff_irq(i2c_bus, isr);
++ if (i2c_bus->mode == DMA_MODE) {
++ if (i2c_bus->version == AST2700A0)
++ ast2700_a0_i2c_slave_packet_dma_irq(i2c_bus, isr);
++ else
++ ast2600_i2c_slave_packet_dma_irq(i2c_bus, isr);
++ } else {
++ if (i2c_bus->version == AST2700A0)
++ ast2700_a0_i2c_slave_packet_buff_irq(i2c_bus, isr);
++ else
++ ast2600_i2c_slave_packet_buff_irq(i2c_bus, isr);
++ }
+ } else {
+ ast2600_i2c_slave_byte_irq(i2c_bus, isr);
+ }
@@ -931,14 +1335,255 @@
+}
+#endif
+
++static int ast2600_i2c_setup_dma_tx(u32 cmd, struct ast2600_i2c_bus *i2c_bus)
++{
++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
++ int xfer_len = msg->len - i2c_bus->master_xfer_cnt;
++ int ret;
++
++ cmd |= AST2600_I2CM_PKT_EN;
++
++ if (xfer_len > AST2600_I2C_DMA_SIZE)
++ xfer_len = AST2600_I2C_DMA_SIZE;
++ else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
++ cmd |= AST2600_I2CM_STOP_CMD;
++
++ if (cmd & AST2600_I2CM_START_CMD) {
++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr);
++ if (xfer_len) {
++ i2c_bus->master_safe_buf = i2c_get_dma_safe_msg_buf(msg, 1);
++ if (!i2c_bus->master_safe_buf)
++ return -ENOMEM;
++ i2c_bus->master_dma_addr =
++ dma_map_single(i2c_bus->dev, i2c_bus->master_safe_buf,
++ msg->len, DMA_TO_DEVICE);
++ ret = dma_mapping_error(i2c_bus->dev, i2c_bus->master_dma_addr);
++ if (ret) {
++ i2c_put_dma_safe_msg_buf(i2c_bus->master_safe_buf, msg, false);
++ i2c_bus->master_safe_buf = NULL;
++ return ret;
++ }
++ }
++ }
++
++ if (xfer_len) {
++ cmd |= AST2600_I2CM_TX_DMA_EN | AST2600_I2CM_TX_CMD;
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CM_SET_TX_DMA_LEN(xfer_len - 1),
++ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
++ else
++ writel(AST2600_I2CM_SET_TX_DMA_LEN(xfer_len - 1),
++ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
++ writel(lower_32_bits(i2c_bus->master_dma_addr),
++ i2c_bus->reg_base + AST2600_I2CM_TX_DMA);
++ writel(upper_32_bits(i2c_bus->master_dma_addr),
++ i2c_bus->reg_base + AST2600_I2CM_TX_DMA_H);
++ }
++
++ if (i2c_bus->version == AST2700A0)
++ writel(0, i2c_bus->reg_base + AST2600_I2CM_DMA_LEN_STS);
++
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
++ return 0;
++}
++
++static int ast2600_i2c_setup_buff_tx(u32 cmd, struct ast2600_i2c_bus *i2c_bus)
++{
++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
++ int xfer_len = msg->len - i2c_bus->master_xfer_cnt;
++ u32 wbuf_dword;
++ int i;
++
++ cmd |= AST2600_I2CM_PKT_EN;
++
++ if (xfer_len > i2c_bus->buf_size)
++ xfer_len = i2c_bus->buf_size;
++ else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
++ cmd |= AST2600_I2CM_STOP_CMD;
++
++ if (cmd & AST2600_I2CM_START_CMD)
++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr);
++
++ if (xfer_len) {
++ cmd |= AST2600_I2CM_TX_BUFF_EN | AST2600_I2CM_TX_CMD;
++ /*
++ * The controller's buffer register supports dword writes only.
++ * Therefore, write dwords to the buffer register in a 4-byte aligned,
++ * and write the remaining unaligned data at the end.
++ */
++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR))
++ return -ENOMEM;
++ for (i = 0; i < xfer_len; i += 4) {
++ int xfer_cnt = i2c_bus->master_xfer_cnt + i;
++
++ switch (min(xfer_len - i, 4) % 4) {
++ case 1:
++ wbuf_dword = msg->buf[xfer_cnt];
++ break;
++ case 2:
++ wbuf_dword = get_unaligned_le16(&msg->buf[xfer_cnt]);
++ break;
++ case 3:
++ wbuf_dword = get_unaligned_le24(&msg->buf[xfer_cnt]);
++ break;
++ default:
++ wbuf_dword = get_unaligned_le32(&msg->buf[xfer_cnt]);
++ break;
++ }
++ writel(wbuf_dword, i2c_bus->buf_base + i);
++ }
++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR))
++ return -ENOMEM;
++ writel(AST2600_I2CC_SET_TX_BUF_LEN(xfer_len),
++ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++ }
++
++ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR))
++ return -ENOMEM;
++
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
++ return 0;
++}
++
++static int ast2600_i2c_setup_byte_tx(u32 cmd, struct ast2600_i2c_bus *i2c_bus)
++{
++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
++ int xfer_len;
++
++ xfer_len = msg->len - i2c_bus->master_xfer_cnt;
++
++ cmd |= AST2600_I2CM_PKT_EN;
++
++ if (cmd & AST2600_I2CM_START_CMD)
++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr);
++
++ if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) &&
++ xfer_len == 1)
++ cmd |= AST2600_I2CM_STOP_CMD;
++
++ if (xfer_len) {
++ cmd |= AST2600_I2CM_TX_CMD;
++ writel(msg->buf[i2c_bus->master_xfer_cnt],
++ i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF);
++ }
++
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
++ return 0;
++}
++
++static int ast2600_i2c_setup_dma_rx(u32 cmd, struct ast2600_i2c_bus *i2c_bus)
++{
++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
++ int xfer_len = msg->len - i2c_bus->master_xfer_cnt;
++ int ret;
++
++ cmd |= AST2600_I2CM_PKT_EN | AST2600_I2CM_RX_DMA_EN | AST2600_I2CM_RX_CMD;
++
++ if (msg->flags & I2C_M_RECV_LEN) {
++ dev_dbg(i2c_bus->dev, "smbus read\n");
++ xfer_len = 1;
++ } else if (xfer_len > AST2600_I2C_DMA_SIZE) {
++ xfer_len = AST2600_I2C_DMA_SIZE;
++ } else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) {
++ cmd |= MASTER_TRIGGER_LAST_STOP;
++ }
++
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CM_SET_RX_DMA_LEN(xfer_len - 1),
++ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
++ else
++ writel(AST2600_I2CM_SET_RX_DMA_LEN(xfer_len - 1),
++ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
++
++ if (cmd & AST2600_I2CM_START_CMD) {
++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr);
++ i2c_bus->master_safe_buf = i2c_get_dma_safe_msg_buf(msg, 1);
++ if (!i2c_bus->master_safe_buf)
++ return -ENOMEM;
++ if (msg->flags & I2C_M_RECV_LEN)
++ i2c_bus->master_dma_addr =
++ dma_map_single(i2c_bus->dev, i2c_bus->master_safe_buf,
++ I2C_SMBUS_BLOCK_MAX + 3, DMA_FROM_DEVICE);
++ else
++ i2c_bus->master_dma_addr =
++ dma_map_single(i2c_bus->dev, i2c_bus->master_safe_buf,
++ msg->len, DMA_FROM_DEVICE);
++ ret = dma_mapping_error(i2c_bus->dev, i2c_bus->master_dma_addr);
++ if (ret) {
++ i2c_put_dma_safe_msg_buf(i2c_bus->master_safe_buf, msg, false);
++ i2c_bus->master_safe_buf = NULL;
++ return -ENOMEM;
++ }
++ }
++
++ writel(lower_32_bits(i2c_bus->master_dma_addr +
++ i2c_bus->master_xfer_cnt),
++ i2c_bus->reg_base + AST2600_I2CM_RX_DMA);
++ writel(upper_32_bits(i2c_bus->master_dma_addr +
++ i2c_bus->master_xfer_cnt),
++ i2c_bus->reg_base + AST2600_I2CM_RX_DMA_H);
++
++ if (i2c_bus->version == AST2700A0)
++ writel(0, i2c_bus->reg_base + AST2600_I2CM_DMA_LEN_STS);
++
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
++ return 0;
++}
++
++static int ast2600_i2c_setup_buff_rx(u32 cmd, struct ast2600_i2c_bus *i2c_bus)
++{
++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
++ int xfer_len = msg->len - i2c_bus->master_xfer_cnt;
++
++ cmd |= AST2600_I2CM_PKT_EN | AST2600_I2CM_RX_BUFF_EN | AST2600_I2CM_RX_CMD;
++
++ if (cmd & AST2600_I2CM_START_CMD)
++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr);
++
++ if (msg->flags & I2C_M_RECV_LEN) {
++ dev_dbg(i2c_bus->dev, "smbus read\n");
++ xfer_len = 1;
++ } else if (xfer_len > i2c_bus->buf_size) {
++ xfer_len = i2c_bus->buf_size;
++ } else if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) {
++ cmd |= MASTER_TRIGGER_LAST_STOP;
++ }
++
++ writel(AST2600_I2CC_SET_RX_BUF_LEN(xfer_len), i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
++
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
++ return 0;
++}
++
++static int ast2600_i2c_setup_byte_rx(u32 cmd, struct ast2600_i2c_bus *i2c_bus)
++{
++ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
++
++ cmd |= AST2600_I2CM_PKT_EN | AST2600_I2CM_RX_CMD;
++
++ if (cmd & AST2600_I2CM_START_CMD)
++ cmd |= AST2600_I2CM_PKT_ADDR(msg->addr);
++
++ if (msg->flags & I2C_M_RECV_LEN) {
++ dev_dbg(i2c_bus->dev, "smbus read\n");
++ } else if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) &&
++ ((i2c_bus->master_xfer_cnt + 1) == msg->len)) {
++ cmd |= MASTER_TRIGGER_LAST_STOP;
++ }
++
++ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
++ return 0;
++}
++
+static int ast2600_i2c_do_start(struct ast2600_i2c_bus *i2c_bus)
+{
+ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
-+ int xfer_len = 0;
-+ int i = 0;
-+ u32 cmd;
-+
-+ cmd = AST2600_I2CM_PKT_EN | AST2600_I2CM_PKT_ADDR(msg->addr) | AST2600_I2CM_START_CMD;
+
+ /* send start */
+ dev_dbg(i2c_bus->dev, "[%d] %sing %d byte%s %s 0x%02x\n",
@@ -950,155 +1595,21 @@
+ i2c_bus->buf_index = 0;
+
+ if (msg->flags & I2C_M_RD) {
-+ cmd |= AST2600_I2CM_RX_CMD;
-+ if (i2c_bus->mode == DMA_MODE) {
-+ /* dma mode */
-+ cmd |= AST2600_I2CM_RX_DMA_EN;
-+
-+ if (msg->flags & I2C_M_RECV_LEN) {
-+ xfer_len = 1;
-+ } else {
-+ if (msg->len > AST2600_I2C_DMA_SIZE) {
-+ xfer_len = AST2600_I2C_DMA_SIZE;
-+ } else {
-+ xfer_len = msg->len;
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= MASTER_TRIGGER_LAST_STOP;
-+ }
-+ }
-+ writel(AST2600_I2CM_SET_RX_DMA_LEN(xfer_len - 1),
-+ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
-+ i2c_bus->master_safe_buf = i2c_get_dma_safe_msg_buf(msg, 1);
-+ if (!i2c_bus->master_safe_buf)
-+ return -ENOMEM;
-+ if (msg->flags & I2C_M_RECV_LEN)
-+ i2c_bus->master_dma_addr =
-+ dma_map_single(i2c_bus->dev, i2c_bus->master_safe_buf,
-+ I2C_SMBUS_BLOCK_MAX + 3, DMA_FROM_DEVICE);
-+ else
-+ i2c_bus->master_dma_addr =
-+ dma_map_single(i2c_bus->dev, i2c_bus->master_safe_buf,
-+ msg->len, DMA_FROM_DEVICE);
-+ if (dma_mapping_error(i2c_bus->dev, i2c_bus->master_dma_addr)) {
-+ i2c_put_dma_safe_msg_buf(i2c_bus->master_safe_buf, msg, false);
-+ i2c_bus->master_safe_buf = NULL;
-+ return -ENOMEM;
-+ }
-+ writel(lower_32_bits(i2c_bus->master_dma_addr),
-+ i2c_bus->reg_base + AST2600_I2CM_RX_DMA);
-+#ifdef CONFIG_64BIT
-+ writel(upper_32_bits(i2c_bus->master_dma_addr),
-+ i2c_bus->reg_base + AST2600_I2CM_RX_DMA_H);
-+#endif
-+ } else if (i2c_bus->mode == BUFF_MODE) {
-+ /* buff mode */
-+ cmd |= AST2600_I2CM_RX_BUFF_EN;
-+ if (msg->flags & I2C_M_RECV_LEN) {
-+ dev_dbg(i2c_bus->dev, "smbus read\n");
-+ xfer_len = 1;
-+ } else {
-+ if (msg->len > i2c_bus->buf_size) {
-+ xfer_len = i2c_bus->buf_size;
-+ } else {
-+ xfer_len = msg->len;
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= MASTER_TRIGGER_LAST_STOP;
-+ }
-+ }
-+ writel(AST2600_I2CC_SET_RX_BUF_LEN(xfer_len),
-+ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
-+ } else {
-+ /* byte mode */
-+ xfer_len = 1;
-+ if (msg->flags & I2C_M_RECV_LEN) {
-+ dev_dbg(i2c_bus->dev, "smbus read\n");
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) {
-+ if (msg->len == 1)
-+ cmd |= MASTER_TRIGGER_LAST_STOP;
-+ }
-+ }
-+ }
++ if (i2c_bus->mode == DMA_MODE)
++ return ast2600_i2c_setup_dma_rx(AST2600_I2CM_START_CMD, i2c_bus);
++ else if (i2c_bus->mode == BUFF_MODE)
++ return ast2600_i2c_setup_buff_rx(AST2600_I2CM_START_CMD, i2c_bus);
++ else
++ return ast2600_i2c_setup_byte_rx(AST2600_I2CM_START_CMD, i2c_bus);
+ } else {
-+ if (i2c_bus->mode == DMA_MODE) {
-+ /* dma mode */
-+ if (msg->len > AST2600_I2C_DMA_SIZE) {
-+ xfer_len = AST2600_I2C_DMA_SIZE;
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= AST2600_I2CM_STOP_CMD;
-+ xfer_len = msg->len;
-+ }
-+
-+ if (xfer_len) {
-+ cmd |= AST2600_I2CM_TX_DMA_EN | AST2600_I2CM_TX_CMD;
-+ writel(AST2600_I2CM_SET_TX_DMA_LEN(xfer_len - 1),
-+ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
-+ i2c_bus->master_safe_buf = i2c_get_dma_safe_msg_buf(msg, 1);
-+ if (!i2c_bus->master_safe_buf)
-+ return -ENOMEM;
-+ i2c_bus->master_dma_addr =
-+ dma_map_single(i2c_bus->dev, i2c_bus->master_safe_buf,
-+ msg->len, DMA_TO_DEVICE);
-+ if (dma_mapping_error(i2c_bus->dev, i2c_bus->master_dma_addr)) {
-+ i2c_put_dma_safe_msg_buf(i2c_bus->master_safe_buf,
-+ msg, false);
-+ i2c_bus->master_safe_buf = NULL;
-+ return -ENOMEM;
-+ }
-+ writel(lower_32_bits(i2c_bus->master_dma_addr),
-+ i2c_bus->reg_base + AST2600_I2CM_TX_DMA);
-+#ifdef CONFIG_64BIT
-+ writel(upper_32_bits(i2c_bus->master_dma_addr),
-+ i2c_bus->reg_base + AST2600_I2CM_TX_DMA_H);
-+#endif
-+ }
-+ } else if (i2c_bus->mode == BUFF_MODE) {
-+ u8 wbuf[4];
-+ /* buff mode */
-+ if (msg->len > i2c_bus->buf_size) {
-+ xfer_len = i2c_bus->buf_size;
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= AST2600_I2CM_STOP_CMD;
-+ xfer_len = msg->len;
-+ }
-+ if (xfer_len) {
-+ cmd |= AST2600_I2CM_TX_BUFF_EN | AST2600_I2CM_TX_CMD;
-+ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR))
-+ return -ENOMEM;
-+ writel(AST2600_I2CC_SET_TX_BUF_LEN(xfer_len),
-+ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
-+ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR))
-+ return -ENOMEM;
-+ for (i = 0; i < xfer_len; i++) {
-+ wbuf[i % 4] = msg->buf[i];
-+ if (i % 4 == 3)
-+ writel(*(u32 *)wbuf, i2c_bus->buf_base + i - 3);
-+ }
-+ if (--i % 4 != 3)
-+ writel(*(u32 *)wbuf, i2c_bus->buf_base + i - (i % 4));
-+ }
-+ if (readl(i2c_bus->reg_base + AST2600_I2CS_ISR))
-+ return -ENOMEM;
-+ } else {
-+ /* byte mode */
-+ if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) && msg->len <= 1)
-+ cmd |= AST2600_I2CM_STOP_CMD;
-+
-+ if (msg->len) {
-+ cmd |= AST2600_I2CM_TX_CMD;
-+ xfer_len = 1;
-+ writel(msg->buf[0], i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF);
-+ } else {
-+ xfer_len = 0;
-+ }
-+ }
++ if (i2c_bus->mode == DMA_MODE)
++ return ast2600_i2c_setup_dma_tx(AST2600_I2CM_START_CMD, i2c_bus);
++ else if (i2c_bus->mode == BUFF_MODE)
++ return ast2600_i2c_setup_buff_tx(AST2600_I2CM_START_CMD, i2c_bus);
++ else
++ return ast2600_i2c_setup_byte_tx(AST2600_I2CM_START_CMD, i2c_bus);
+ }
-+#ifdef CONFIG_MACH_ASPEED_G7 /*ast2700*/
-+ writel(0, i2c_bus->reg_base + AST2600_I2CM_DMA_LEN_STS);
-+#endif
-+ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++
+ return 0;
+}
+
@@ -1117,12 +1628,14 @@
+static void ast2600_i2c_master_package_irq(struct ast2600_i2c_bus *i2c_bus, u32 sts)
+{
+ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
-+ u32 cmd = AST2600_I2CM_PKT_EN;
+ int xfer_len;
+ int i;
+
-+ writel(AST2600_I2CM_PKT_DONE, i2c_bus->reg_base + AST2600_I2CM_ISR);
-+ writel(sts, i2c_bus->reg_base + AST2600_I2CM_ISR);
++ if (i2c_bus->version == AST2700)
++ writel(sts, i2c_bus->reg_base + AST2600_I2CM_ISR);
++ else
++ writel(AST2600_I2CM_PKT_DONE, i2c_bus->reg_base + AST2600_I2CM_ISR);
++
+ sts &= ~(AST2600_I2CM_PKT_DONE | AST2600_I2CM_SW_ISR_MASK);
+
+ switch (sts) {
@@ -1151,18 +1664,14 @@
+ break;
+ case AST2600_I2CM_TX_ACK:
+ case AST2600_I2CM_TX_ACK | AST2600_I2CM_NORMAL_STOP:
-+ if (i2c_bus->mode == DMA_MODE) {
++ if (i2c_bus->mode == DMA_MODE)
+ xfer_len = AST2600_I2C_GET_TX_DMA_LEN(readl(i2c_bus->reg_base +
+ AST2600_I2CM_DMA_LEN_STS));
-+#ifdef CONFIG_MACH_ASPEED_G7 /*ast2700*/
-+ writel(0, i2c_bus->reg_base + AST2600_I2CM_DMA_LEN_STS);
-+#endif
-+ } else if (i2c_bus->mode == BUFF_MODE) {
++ else if (i2c_bus->mode == BUFF_MODE)
+ xfer_len = AST2600_I2CC_GET_TX_BUF_LEN(readl(i2c_bus->reg_base +
+ AST2600_I2CC_BUFF_CTRL));
-+ } else {
++ else
+ xfer_len = 1;
-+ }
+
+ i2c_bus->master_xfer_cnt += xfer_len;
+
@@ -1184,57 +1693,12 @@
+ }
+ }
+ } else {
-+ /* do next tx */
-+ cmd |= AST2600_I2CM_TX_CMD;
-+ if (i2c_bus->mode == DMA_MODE) {
-+ cmd |= AST2600_I2CM_TX_DMA_EN;
-+ xfer_len = msg->len - i2c_bus->master_xfer_cnt;
-+ if (xfer_len > AST2600_I2C_DMA_SIZE) {
-+ xfer_len = AST2600_I2C_DMA_SIZE;
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= AST2600_I2CM_STOP_CMD;
-+ }
-+ writel(AST2600_I2CM_SET_TX_DMA_LEN(xfer_len - 1),
-+ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
-+ writel(lower_32_bits(i2c_bus->master_dma_addr +
-+ i2c_bus->master_xfer_cnt),
-+ i2c_bus->reg_base + AST2600_I2CM_TX_DMA);
-+#ifdef CONFIG_64BIT
-+ writel(upper_32_bits(i2c_bus->master_dma_addr +
-+ i2c_bus->master_xfer_cnt),
-+ i2c_bus->reg_base + AST2600_I2CM_TX_DMA_H);
-+#endif
-+ } else if (i2c_bus->mode == BUFF_MODE) {
-+ u8 wbuf[4];
-+
-+ cmd |= AST2600_I2CM_TX_BUFF_EN;
-+ xfer_len = msg->len - i2c_bus->master_xfer_cnt;
-+ if (xfer_len > i2c_bus->buf_size) {
-+ xfer_len = i2c_bus->buf_size;
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= AST2600_I2CM_STOP_CMD;
-+ }
-+ for (i = 0; i < xfer_len; i++) {
-+ wbuf[i % 4] = msg->buf[i2c_bus->master_xfer_cnt + i];
-+ if (i % 4 == 3)
-+ writel(*(u32 *)wbuf, i2c_bus->buf_base + i - 3);
-+ }
-+ if (--i % 4 != 3)
-+ writel(*(u32 *)wbuf, i2c_bus->buf_base + i - (i % 4));
-+ writel(AST2600_I2CC_SET_TX_BUF_LEN(xfer_len),
-+ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
-+ } else {
-+ /* byte */
-+ if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) &&
-+ ((i2c_bus->master_xfer_cnt + 1) == msg->len)) {
-+ cmd |= AST2600_I2CM_STOP_CMD;
-+ }
-+ writel(msg->buf[i2c_bus->master_xfer_cnt],
-+ i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF);
-+ }
-+ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++ if (i2c_bus->mode == DMA_MODE)
++ ast2600_i2c_setup_dma_tx(0, i2c_bus);
++ else if (i2c_bus->mode == BUFF_MODE)
++ ast2600_i2c_setup_buff_tx(0, i2c_bus);
++ else
++ ast2600_i2c_setup_byte_tx(0, i2c_bus);
+ }
+ break;
+ case AST2600_I2CM_RX_DONE:
@@ -1257,9 +1721,6 @@
+ if (i2c_bus->mode == DMA_MODE) {
+ xfer_len = AST2600_I2C_GET_RX_DMA_LEN(readl(i2c_bus->reg_base +
+ AST2600_I2CM_DMA_LEN_STS));
-+#ifdef CONFIG_MACH_ASPEED_G7 /*ast2700*/
-+ writel(0, i2c_bus->reg_base + AST2600_I2CM_DMA_LEN_STS);
-+#endif
+ } else if (i2c_bus->mode == BUFF_MODE) {
+ xfer_len = AST2600_I2CC_GET_RX_BUF_LEN(readl(i2c_bus->reg_base +
+ AST2600_I2CC_BUFF_CTRL));
@@ -1306,45 +1767,12 @@
+ }
+ }
+ } else {
-+ /* next rx */
-+ cmd |= AST2600_I2CM_RX_CMD;
-+ if (i2c_bus->mode == DMA_MODE) {
-+ cmd |= AST2600_I2CM_RX_DMA_EN;
-+ xfer_len = msg->len - i2c_bus->master_xfer_cnt;
-+ if (xfer_len > AST2600_I2C_DMA_SIZE) {
-+ xfer_len = AST2600_I2C_DMA_SIZE;
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= MASTER_TRIGGER_LAST_STOP;
-+ }
-+ writel(AST2600_I2CM_SET_RX_DMA_LEN(xfer_len - 1),
-+ i2c_bus->reg_base + AST2600_I2CM_DMA_LEN);
-+ writel(lower_32_bits(i2c_bus->master_dma_addr +
-+ i2c_bus->master_xfer_cnt),
-+ i2c_bus->reg_base + AST2600_I2CM_RX_DMA);
-+#ifdef CONFIG_64BIT
-+ writel(upper_32_bits(i2c_bus->master_dma_addr +
-+ i2c_bus->master_xfer_cnt),
-+ i2c_bus->reg_base + AST2600_I2CM_RX_DMA_H);
-+#endif
-+ } else if (i2c_bus->mode == BUFF_MODE) {
-+ cmd |= AST2600_I2CM_RX_BUFF_EN;
-+ xfer_len = msg->len - i2c_bus->master_xfer_cnt;
-+ if (xfer_len > i2c_bus->buf_size) {
-+ xfer_len = i2c_bus->buf_size;
-+ } else {
-+ if (i2c_bus->msgs_index + 1 == i2c_bus->msgs_count)
-+ cmd |= MASTER_TRIGGER_LAST_STOP;
-+ }
-+ writel(AST2600_I2CC_SET_RX_BUF_LEN(xfer_len),
-+ i2c_bus->reg_base + AST2600_I2CC_BUFF_CTRL);
-+ } else {
-+ if ((i2c_bus->msgs_index + 1 == i2c_bus->msgs_count) &&
-+ ((i2c_bus->master_xfer_cnt + 1) == msg->len)) {
-+ cmd |= MASTER_TRIGGER_LAST_STOP;
-+ }
-+ }
-+ writel(cmd, i2c_bus->reg_base + AST2600_I2CM_CMD_STS);
++ if (i2c_bus->mode == DMA_MODE)
++ ast2600_i2c_setup_dma_rx(0, i2c_bus);
++ else if (i2c_bus->mode == BUFF_MODE)
++ ast2600_i2c_setup_buff_rx(0, i2c_bus);
++ else
++ ast2600_i2c_setup_byte_rx(0, i2c_bus);
+ }
+ break;
+ default:
@@ -1397,7 +1825,10 @@
+
+ i2c_bus->cmd_err = ast2600_i2c_irq_err_to_errno(sts);
+ if (i2c_bus->cmd_err) {
-+ writel(AST2600_I2CM_PKT_DONE, i2c_bus->reg_base + AST2600_I2CM_ISR);
++ if (i2c_bus->version == AST2700)
++ writel(sts, i2c_bus->reg_base + AST2600_I2CM_ISR);
++ else
++ writel(AST2600_I2CM_PKT_DONE, i2c_bus->reg_base + AST2600_I2CM_ISR);
+ complete(&i2c_bus->cmd_complete);
+ return 1;
+ }
@@ -1429,7 +1860,6 @@
+ unsigned long timeout;
+ int ret;
+
-+ /* If bus is busy in a single master environment, attempt recovery. */
+ if (!i2c_bus->multi_master &&
+ (readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF) & AST2600_I2CC_BUS_BUSY_STS)) {
+ ret = ast2600_i2c_recover_bus(i2c_bus);
@@ -1472,6 +1902,10 @@
+ readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF));
+ writel(0, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
+ writel(ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
++ if (i2c_bus->multi_master &&
++ (readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF) &
++ AST2600_I2CC_BUS_BUSY_STS))
++ ast2600_i2c_recover_bus(i2c_bus);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ if (ctrl & AST2600_I2CC_SLAVE_EN) {
+ u32 cmd = SLAVE_TRIGGER_CMD;
@@ -1480,18 +1914,18 @@
+ cmd |= AST2600_I2CS_RX_DMA_EN;
+ writel(lower_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_RX_DMA);
-+#ifdef CONFIG_64BIT
+ writel(upper_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_RX_DMA_H);
-+#endif
+ writel(lower_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_TX_DMA);
-+#ifdef CONFIG_64BIT
+ writel(upper_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_TX_DMA_H);
-+#endif
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ } else if (i2c_bus->mode == BUFF_MODE) {
+ cmd = SLAVE_TRIGGER_CMD;
+ } else {
@@ -1500,12 +1934,6 @@
+ writel(cmd, i2c_bus->reg_base + AST2600_I2CS_CMD_STS);
+ }
+#endif
-+
-+ if (i2c_bus->multi_master &&
-+ (readl(i2c_bus->reg_base + AST2600_I2CC_STS_AND_BUFF) &
-+ AST2600_I2CC_BUS_BUSY_STS))
-+ ast2600_i2c_recover_bus(i2c_bus);
-+
+ ret = -ETIMEDOUT;
+ } else {
+ ret = i2c_bus->cmd_err;
@@ -1519,8 +1947,12 @@
+ if (i2c_bus->master_safe_buf) {
+ struct i2c_msg *msg = &i2c_bus->msgs[i2c_bus->msgs_index];
+
-+ dma_unmap_single(i2c_bus->dev, i2c_bus->master_dma_addr, msg->len,
-+ DMA_TO_DEVICE);
++ if (msg->flags & I2C_M_RD)
++ dma_unmap_single(i2c_bus->dev, i2c_bus->master_dma_addr, msg->len,
++ DMA_FROM_DEVICE);
++ else
++ dma_unmap_single(i2c_bus->dev, i2c_bus->master_dma_addr, msg->len,
++ DMA_TO_DEVICE);
+ i2c_put_dma_safe_msg_buf(i2c_bus->master_safe_buf, msg, true);
+ i2c_bus->master_safe_buf = NULL;
+ }
@@ -1532,7 +1964,7 @@
+static void ast2600_i2c_init(struct ast2600_i2c_bus *i2c_bus)
+{
+ struct platform_device *pdev = to_platform_device(i2c_bus->dev);
-+ u32 fun_ctrl = AST2600_I2CC_BUS_AUTO_RELEASE | AST2600_I2CC_MASTER_EN | AST2600_I2CC_4T_DEBOUNCE;
++ u32 fun_ctrl = AST2600_I2CC_BUS_AUTO_RELEASE | AST2600_I2CC_MASTER_EN;
+
+ /* I2C Reset */
+ writel(0, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
@@ -1541,13 +1973,39 @@
+ if (!i2c_bus->multi_master)
+ fun_ctrl |= AST2600_I2CC_MULTI_MASTER_DIS;
+
++ /* I2C Debounce level */
++ if (i2c_bus->version != AST2600) {
++ if (!device_property_read_u32(&pdev->dev, "debounce-level", &i2c_bus->debounce_level)) {
++ u32 debounce_level = 0;
++
++ /* AST2700 support manual debounce setting */
++ if (i2c_bus->version == AST2700) {
++ if (i2c_bus->debounce_level > AST2700_DEBOUNCE_LEVEL_MAX)
++ i2c_bus->debounce_level = AST2700_DEBOUNCE_LEVEL_MAX;
++ if (i2c_bus->debounce_level < AST2700_DEBOUNCE_LEVEL_MIN)
++ i2c_bus->debounce_level = AST2700_DEBOUNCE_LEVEL_MIN;
++ }
++
++ debounce_level = readl(i2c_bus->reg_base + MSIC2_CONFIG)
++ & ~(AST2700_DEBOUNCE_MASK);
++
++ debounce_level |= i2c_bus->debounce_level;
++ writel(debounce_level, i2c_bus->reg_base + MSIC2_CONFIG);
++
++ fun_ctrl |= AST2700_I2CC_MANUAL_DEBOUNCE;
++ }
++ }
++
+ /* Enable Master Mode */
+ writel(fun_ctrl, i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL);
+ /* disable slave address */
+ writel(0, i2c_bus->reg_base + AST2600_I2CS_ADDR_CTRL);
+
+ /* Set AC Timing */
-+ writel(ast2600_select_i2c_clock(i2c_bus), i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
++ if (i2c_bus->version == AST2700)
++ writel(ast2700_select_i2c_clock(i2c_bus), i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
++ else
++ writel(ast2600_select_i2c_clock(i2c_bus), i2c_bus->reg_base + AST2600_I2CC_AC_TIMING);
+
+ /* Clear Interrupt */
+ writel(GENMASK(27, 0), i2c_bus->reg_base + AST2600_I2CM_ISR);
@@ -1564,12 +2022,10 @@
+
+ writel(GENMASK(27, 0), i2c_bus->reg_base + AST2600_I2CS_ISR);
+
-+ if (i2c_bus->mode == BYTE_MODE) {
++ if (i2c_bus->mode == BYTE_MODE)
+ writel(GENMASK(15, 0), i2c_bus->reg_base + AST2600_I2CS_IER);
-+ } else {
-+ /* Set interrupt generation of I2C slave controller */
++ else
+ writel(AST2600_I2CS_PKT_DONE, i2c_bus->reg_base + AST2600_I2CS_IER);
-+ }
+#endif
+}
+
@@ -1593,18 +2049,18 @@
+ cmd |= AST2600_I2CS_RX_DMA_EN;
+ writel(lower_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_RX_DMA);
-+#ifdef CONFIG_64BIT
+ writel(upper_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_RX_DMA_H);
-+#endif
+ writel(lower_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_TX_DMA);
-+#ifdef CONFIG_64BIT
+ writel(upper_32_bits(i2c_bus->slave_dma_addr),
+ i2c_bus->reg_base + AST2600_I2CS_TX_DMA_H);
-+#endif
-+ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
-+ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ if (i2c_bus->version == AST2700A0)
++ writel(AST2700_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
++ else
++ writel(AST2600_I2CS_SET_RX_DMA_LEN(I2C_SLAVE_MSG_BUF_SIZE),
++ i2c_bus->reg_base + AST2600_I2CS_DMA_LEN);
+ } else if (i2c_bus->mode == BUFF_MODE) {
+ cmd = SLAVE_TRIGGER_CMD;
+ } else {
@@ -1620,9 +2076,9 @@
+ return 0;
+}
+
-+static int ast2600_i2c_unreg_slave(struct i2c_client *slave)
++static int ast2600_i2c_unreg_slave(struct i2c_client *client)
+{
-+ struct ast2600_i2c_bus *i2c_bus = i2c_get_adapdata(slave->adapter);
++ struct ast2600_i2c_bus *i2c_bus = i2c_get_adapdata(client->adapter);
+
+ /* Turn off slave mode. */
+ writel(~AST2600_I2CC_SLAVE_EN & readl(i2c_bus->reg_base + AST2600_I2CC_FUN_CTRL),
@@ -1643,21 +2099,13 @@
+
+static const struct i2c_algorithm i2c_ast2600_algorithm = {
+ .master_xfer = ast2600_i2c_master_xfer,
++ .functionality = ast2600_i2c_functionality,
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ .reg_slave = ast2600_i2c_reg_slave,
+ .unreg_slave = ast2600_i2c_unreg_slave,
+#endif
-+ .functionality = ast2600_i2c_functionality,
+};
+
-+static const struct of_device_id ast2600_i2c_bus_of_table[] = {
-+ {
-+ .compatible = "aspeed,ast2600-i2cv2",
-+ },
-+ {}
-+};
-+MODULE_DEVICE_TABLE(of, ast2600_i2c_bus_of_table);
-+
+static int ast2600_i2c_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
@@ -1670,6 +2118,8 @@
+ if (!i2c_bus)
+ return -ENOMEM;
+
++ i2c_bus->version = (enum i2c_version)device_get_match_data(dev);
++
+ i2c_bus->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(i2c_bus->reg_base))
+ return PTR_ERR(i2c_bus->reg_base);
@@ -1680,7 +2130,8 @@
+
+ reset_control_deassert(i2c_bus->rst);
+
-+ i2c_bus->global_regs = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,global-regs");
++ i2c_bus->global_regs =
++ syscon_regmap_lookup_by_phandle(dev_of_node(dev), "aspeed,global-regs");
+ if (IS_ERR(i2c_bus->global_regs))
+ return PTR_ERR(i2c_bus->global_regs);
+
@@ -1690,26 +2141,30 @@
+ regmap_write(i2c_bus->global_regs, AST2600_I2CG_CLK_DIV_CTRL, I2CCG_DIV_CTRL);
+ }
+
++#if IS_ENABLED(CONFIG_I2C_SLAVE)
+ i2c_bus->slave_operate = 0;
-+ i2c_bus->dev = dev;
-+#ifdef CONFIG_MACH_ASPEED_G7
-+ i2c_bus->mode = DMA_MODE;
-+#else
-+ i2c_bus->mode = BUFF_MODE;
+#endif
++ i2c_bus->dev = dev;
++ if (i2c_bus->version == AST2600)
++ i2c_bus->mode = BUFF_MODE;
++ else
++ i2c_bus->mode = DMA_MODE;
+
-+ if (device_property_read_bool(&pdev->dev, "aspeed,enable-buff"))
++ if (device_property_read_bool(dev, "aspeed,enable-byte"))
++ i2c_bus->mode = BYTE_MODE;
++
++ if (device_property_read_bool(dev, "aspeed,enable-buff"))
+ i2c_bus->mode = BUFF_MODE;
+
-+ if (device_property_read_bool(&pdev->dev, "aspeed,enable-dma"))
++ if (device_property_read_bool(dev, "aspeed,enable-dma"))
+ i2c_bus->mode = DMA_MODE;
+
+ if (i2c_bus->mode == BUFF_MODE) {
+ i2c_bus->buf_base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
-+ if (!IS_ERR_OR_NULL(i2c_bus->buf_base))
-+ i2c_bus->buf_size = resource_size(res) / 2;
-+ else
++ if (IS_ERR(i2c_bus->buf_base))
+ i2c_bus->mode = BYTE_MODE;
++ else
++ i2c_bus->buf_size = resource_size(res) / 2;
+ }
+
+ /*
@@ -1774,9 +2229,9 @@
+ if (ret)
+ return ret;
+
-+ dev_info(dev, "%s [%d]: adapter [%d khz] mode [%d]\n",
++ dev_info(dev, "%s [%d]: adapter [%d khz] mode [%d] version [%d]\n",
+ dev->of_node->name, i2c_bus->adap.nr, i2c_bus->timing_info.bus_freq_hz / 1000,
-+ i2c_bus->mode);
++ i2c_bus->mode, i2c_bus->version);
+
+ return 0;
+}
@@ -1792,14 +2247,23 @@
+ return 0;
+}
+
++static const struct of_device_id aspeed_i2c_bus_of_table[] = {
++ { .compatible = "aspeed,ast2600-i2cv2", .data = (const void *)AST2600, },
++ { .compatible = "aspeed,ast2700a0-i2c", .data = (const void *)AST2700A0, },
++ { .compatible = "aspeed,ast2700-i2c", .data = (const void *)AST2700, },
++ {}
++};
++MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table);
++
+static struct platform_driver ast2600_i2c_bus_driver = {
+ .probe = ast2600_i2c_probe,
+ .remove = ast2600_i2c_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
-+ .of_match_table = ast2600_i2c_bus_of_table,
++ .of_match_table = aspeed_i2c_bus_of_table,
+ },
+};
++
+module_platform_driver(ast2600_i2c_bus_driver);
+
+MODULE_AUTHOR("Ryan Chen <ryan_chen@aspeedtech.com>");
@@ -1936,73 +2400,6 @@
/* Sanity check on class */
if (i2c_mux_parent_classes(parent) & class & ~I2C_CLASS_DEPRECATED)
dev_err(&parent->dev,
-diff --git a/include/linux/i2c.h b/include/linux/i2c.h
-index 32cf5708d..e131ab9db 100644
---- a/include/linux/i2c.h
-+++ b/include/linux/i2c.h
-@@ -582,6 +582,26 @@ struct i2c_lock_operations {
- void (*unlock_bus)(struct i2c_adapter *adapter, unsigned int flags);
- };
-
-+/**
-+ * struct i2c_mux_root_operations - represent operations to lock and select
-+ * the adapter's mux channel (if a mux is present)
-+ * @lock_select: Get exclusive access to the root I2C bus adapter with the
-+ * correct mux channel selected for the adapter
-+ * @unlock_deslect: Release exclusive access to the root I2C bus adapter and
-+ * deselect the mux channel for the adapter
-+ *
-+ * Some I2C clients need the ability to control the root I2C bus even if the
-+ * endpoint device is behind a mux. For example, a driver for a chip that
-+ * can't handle any I2C traffic on the bus while coming out of reset (including
-+ * an I2C-driven mux switching channels) may need to lock the root bus with
-+ * the mux selection fixed for the entire time the device is in reset.
-+ * These operations are for such a purpose.
-+ */
-+struct i2c_mux_root_operations {
-+ struct i2c_adapter *(*lock_select)(struct i2c_adapter *adapter);
-+ void (*unlock_deselect)(struct i2c_adapter *adapter);
-+};
-+
- /**
- * struct i2c_timings - I2C timing information
- * @bus_freq_hz: the bus frequency in Hz
-@@ -724,6 +744,7 @@ struct i2c_adapter {
-
- /* data fields that are valid for all devices */
- const struct i2c_lock_operations *lock_ops;
-+ const struct i2c_mux_root_operations *mux_root_ops;
- struct rt_mutex bus_lock;
- struct rt_mutex mux_lock;
-
-@@ -816,6 +837,27 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
- adapter->lock_ops->unlock_bus(adapter, flags);
- }
-
-+/**
-+ * i2c_lock_select_bus - Get exclusive access to the root I2C bus with the
-+ * target's mux channel (if a mux is present) selected.
-+ * @adapter: Target I2C bus
-+ *
-+ * Return the root I2C bus if mux selection succeeds, an ERR_PTR otherwise
-+ */
-+static inline struct i2c_adapter *i2c_lock_select_bus(struct i2c_adapter *adapter)
-+{
-+ return adapter->mux_root_ops->lock_select(adapter);
-+}
-+
-+/**
-+ * i2c_unlock_deselect_bus - Release exclusive access to the root I2C bus
-+ * @adapter: Target I2C bus
-+ */
-+static inline void i2c_unlock_deselect_bus(struct i2c_adapter *adapter)
-+{
-+ adapter->mux_root_ops->unlock_deselect(adapter);
-+}
-+
- /**
- * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core
- * @adap: Adapter to mark as suspended
---
+--
2.34.1
diff --git a/recipes-kernel/linux/files/0015-porting-emmc-driver.patch b/recipes-kernel/linux/files/0016-Add-emmc-driver-for-ast2700.patch
similarity index 85%
rename from recipes-kernel/linux/files/0015-porting-emmc-driver.patch
rename to recipes-kernel/linux/files/0016-Add-emmc-driver-for-ast2700.patch
index a728046..241e0a0 100644
--- a/recipes-kernel/linux/files/0015-porting-emmc-driver.patch
+++ b/recipes-kernel/linux/files/0016-Add-emmc-driver-for-ast2700.patch
@@ -1,21 +1,59 @@
-From 724b98bf19eab7d4715976639b3537874105e62e Mon Sep 17 00:00:00 2001
-From: wukaihua <eason.kh.wu@fii-na.corp-partner.google.com>
-Date: Mon, 6 Jan 2025 10:37:59 +0800
-Subject: [PATCH] porting emmc driver
+From 2fdbfa8f349f70b6d4492336338b406b2242e30c Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Fri, 7 Mar 2025 14:04:58 +0800
+Subject: [PATCH] Add emmc driver for ast2700
-porting emmc driver for ast2700
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
-Source:
-AspeedTech-BMC github:
-https://github.com/AspeedTech-BMC/linux/blob/03952f2ca0d780e5d3b3aca4a8fd004743073f8e/drivers/mmc/host/sdhci-of-aspeed.c
-
-Signed-off-by: wukaihua <eason.kh.wu@fii-na.corp-partner.google.com>
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
---
- drivers/mmc/host/sdhci-of-aspeed.c | 452 +++++++++++++----------------
- 1 file changed, 206 insertions(+), 246 deletions(-)
+ drivers/mmc/core/core.c | 1 -
+ drivers/mmc/core/host.c | 10 +-
+ drivers/mmc/host/sdhci-of-aspeed.c | 458 +++++++++++++----------------
+ drivers/mmc/host/sdhci-pltfm.c | 2 +
+ 4 files changed, 217 insertions(+), 254 deletions(-)
+diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
+index a8c17b4cd..1df799bfc 100644
+--- a/drivers/mmc/core/core.c
++++ b/drivers/mmc/core/core.c
+@@ -991,7 +991,6 @@ void mmc_set_initial_state(struct mmc_host *host)
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+- host->ios.drv_type = 0;
+ host->ios.enhanced_strobe = false;
+
+ /*
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index cf396e8f3..c8538be5a 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -222,14 +222,16 @@ static void mmc_retune_timer(struct timer_list *t)
+ static void mmc_of_parse_timing_phase(struct device *dev, const char *prop,
+ struct mmc_clk_phase *phase)
+ {
+- int degrees[2] = {0};
++ int degree_info[4] = {0};
+ int rc;
+
+- rc = device_property_read_u32_array(dev, prop, degrees, 2);
++ rc = device_property_read_u32_array(dev, prop, degree_info, 4);
+ phase->valid = !rc;
+ if (phase->valid) {
+- phase->in_deg = degrees[0];
+- phase->out_deg = degrees[1];
++ phase->inv_in_deg = degree_info[0] ? true : false;
++ phase->in_deg = degree_info[1];
++ phase->inv_out_deg = degree_info[2] ? true : false;
++ phase->out_deg = degree_info[3];
+ }
+ }
+
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
-index 8379a0620..2c1421b99 100644
+index 8379a0620..e049309a9 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -13,6 +13,7 @@
@@ -35,7 +73,7 @@
#define ASPEED_SDC_S1_PHASE_OUT GENMASK(15, 11)
#define ASPEED_SDC_S1_PHASE_IN_EN BIT(10)
#define ASPEED_SDC_S1_PHASE_OUT_EN GENMASK(9, 8)
-@@ -31,50 +34,43 @@
+@@ -31,61 +34,54 @@
#define ASPEED_SDC_S0_PHASE_OUT_EN GENMASK(1, 0)
#define ASPEED_SDC_PHASE_MAX 31
@@ -104,6 +142,20 @@
};
/*
+ * The function sets the mirror register for updating
+ * capbilities of the current slot.
+ *
+- * slot | capability | caps_reg | mirror_reg
++ * slot | capability | caps_reg | mirror_reg
+ * -----|-------------|----------|------------
+- * 0 | CAP1_1_8V | SDIO140 | SDIO10
++ * 0 | CAP1_1_8V | SDIO140 | SDIO10
+ * 0 | CAP2_SDR104 | SDIO144 | SDIO14
+- * 1 | CAP1_1_8V | SDIO240 | SDIO20
++ * 1 | CAP1_1_8V | SDIO240 | SDIO20
+ * 1 | CAP2_SDR104 | SDIO244 | SDIO24
+ */
+ static void aspeed_sdc_set_slot_capability(struct sdhci_host *host, struct aspeed_sdc *sdc,
@@ -121,226 +117,216 @@ static void aspeed_sdc_configure_8bit_mode(struct aspeed_sdc *sdc,
info |= sdhci->width_mask;
else
@@ -647,6 +699,19 @@
static struct platform_driver aspeed_sdc_driver = {
.driver = {
.name = "sd-controller-aspeed",
+diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
+index a72e123a5..0ccb6a8c8 100644
+--- a/drivers/mmc/host/sdhci-pltfm.c
++++ b/drivers/mmc/host/sdhci-pltfm.c
+@@ -106,6 +106,8 @@ void sdhci_get_property(struct platform_device *pdev)
+ if (device_property_read_bool(dev, "wakeup-source") ||
+ device_property_read_bool(dev, "enable-sdio-wakeup")) /* legacy */
+ host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ;
++ if (device_property_read_u8(dev, "sdhci-drive-type", &host->mmc->ios.drv_type) != 0)
++ host->mmc->ios.drv_type = 0;
+ }
+ EXPORT_SYMBOL_GPL(sdhci_get_property);
+
--
2.34.1
diff --git a/recipes-kernel/linux/files/0016-Enable-jtag-driver.patch b/recipes-kernel/linux/files/0017-Add-jtag-driver-for-ast2700.patch
similarity index 78%
rename from recipes-kernel/linux/files/0016-Enable-jtag-driver.patch
rename to recipes-kernel/linux/files/0017-Add-jtag-driver-for-ast2700.patch
index c1d45fb..d581071 100644
--- a/recipes-kernel/linux/files/0016-Enable-jtag-driver.patch
+++ b/recipes-kernel/linux/files/0017-Add-jtag-driver-for-ast2700.patch
@@ -1,12 +1,12 @@
-From 79b9a779c2169f1c90cbf8642e8c21fb79fe2358 Mon Sep 17 00:00:00 2001
+From 11812005f0abb8c88be98e6a0a6e1e7cb1d8d7ac Mon Sep 17 00:00:00 2001
From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
-Date: Mon, 13 Jan 2025 15:54:08 +0800
-Subject: [PATCH] Enable jtag driver
+Date: Tue, 11 Mar 2025 15:44:01 +0800
+Subject: [PATCH] Add jtag driver for ast2700
-Add aspeed jtag driver.
+This is base on aspeed SDK 9.05.
+From linux-aspeed:
+769f62b7baa84d6998723b0ea60280e380183553
-https://github.com/AspeedTech-BMC/linux/tree/v00.06.03
-(cherry picked from commit a769cc67850759a3952f7a40f5f5798c3d0f7bfd)
Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
---
drivers/Kconfig | 1 +
@@ -15,15 +15,11 @@
drivers/jtag/Makefile | 2 +
drivers/jtag/jtag-aspeed.c | 1655 ++++++++++++++++++++++++++++++++++++
drivers/jtag/jtag.c | 381 +++++++++
- include/linux/jtag.h | 49 ++
- include/uapi/linux/jtag.h | 370 ++++++++
- 8 files changed, 2490 insertions(+)
+ 6 files changed, 2071 insertions(+)
create mode 100644 drivers/jtag/Kconfig
create mode 100644 drivers/jtag/Makefile
create mode 100644 drivers/jtag/jtag-aspeed.c
create mode 100644 drivers/jtag/jtag.c
- create mode 100644 include/linux/jtag.h
- create mode 100644 include/uapi/linux/jtag.h
diff --git a/drivers/Kconfig b/drivers/Kconfig
index efb66e25f..ea53948b1 100644
@@ -2137,437 +2133,6 @@
+MODULE_AUTHOR("Oleksandr Shamray <oleksandrs@mellanox.com>");
+MODULE_DESCRIPTION("Generic jtag support");
+MODULE_LICENSE("GPL v2");
-diff --git a/include/linux/jtag.h b/include/linux/jtag.h
-new file mode 100644
-index 000000000..3b7157df3
---- /dev/null
-+++ b/include/linux/jtag.h
-@@ -0,0 +1,49 @@
-+/* SPDX-License-Identifier: GPL-2.0 */
-+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved. */
-+/* Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com> */
-+/* Copyright (c) 2019 Intel Corporation */
-+
-+#ifndef __LINUX_JTAG_H
-+#define __LINUX_JTAG_H
-+
-+#include <linux/types.h>
-+#include <uapi/linux/jtag.h>
-+
-+#define JTAG_MAX_XFER_DATA_LEN (0xFFFFFFFF) //65535
-+
-+struct jtag;
-+/**
-+ * struct jtag_ops - callbacks for JTAG control functions:
-+ *
-+ * @freq_get: get frequency function. Filled by dev driver
-+ * @freq_set: set frequency function. Filled by dev driver
-+ * @status_get: get JTAG TAPC state function. Mandatory, Filled by dev driver
-+ * @status_set: set JTAG TAPC state function. Mandatory, Filled by dev driver
-+ * @xfer: send JTAG xfer function. Mandatory func. Filled by dev driver
-+ * @mode_set: set specific work mode for JTAG. Filled by dev driver
-+ * @trst_set: set TRST pin active(pull low) for JTAG. Filled by dev driver
-+ * @bitbang: set low level bitbang operations. Filled by dev driver
-+ * @enable: enables JTAG interface in master mode. Filled by dev driver
-+ * @disable: disables JTAG interface master mode. Filled by dev driver
-+ */
-+struct jtag_ops {
-+ int (*freq_get)(struct jtag *jtag, u32 *freq);
-+ int (*freq_set)(struct jtag *jtag, u32 freq);
-+ int (*status_get)(struct jtag *jtag, u32 *state);
-+ int (*status_set)(struct jtag *jtag, struct jtag_tap_state *endst);
-+ int (*xfer)(struct jtag *jtag, struct jtag_xfer *xfer, u8 *xfer_data);
-+ int (*mode_set)(struct jtag *jtag, struct jtag_mode *jtag_mode);
-+ int (*trst_set)(struct jtag *jtag, u32 active);
-+ int (*bitbang)(struct jtag *jtag, struct bitbang_packet *bitbang,
-+ struct tck_bitbang *bitbang_data);
-+ int (*enable)(struct jtag *jtag);
-+ int (*disable)(struct jtag *jtag);
-+};
-+
-+void *jtag_priv(struct jtag *jtag);
-+int devm_jtag_register(struct device *dev, struct jtag *jtag);
-+struct jtag *jtag_alloc(struct device *host, size_t priv_size,
-+ const struct jtag_ops *ops);
-+void jtag_free(struct jtag *jtag);
-+
-+#endif /* __LINUX_JTAG_H */
-diff --git a/include/uapi/linux/jtag.h b/include/uapi/linux/jtag.h
-new file mode 100644
-index 000000000..77d0b471e
---- /dev/null
-+++ b/include/uapi/linux/jtag.h
-@@ -0,0 +1,370 @@
-+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-+/* Copyright (c) 2018 Mellanox Technologies. All rights reserved. */
-+/* Copyright (c) 2018 Oleksandr Shamray <oleksandrs@mellanox.com> */
-+/* Copyright (c) 2019 Intel Corporation */
-+
-+#ifndef __UAPI_LINUX_JTAG_H
-+#define __UAPI_LINUX_JTAG_H
-+
-+#include <linux/types.h>
-+#include <linux/ioctl.h>
-+
-+/*
-+ * JTAG_XFER_MODE: JTAG transfer mode. Used to set JTAG controller transfer mode
-+ * This is bitmask for feature param in jtag_mode for ioctl JTAG_SIOCMODE
-+ */
-+#define JTAG_XFER_MODE 0
-+/*
-+ * JTAG_CONTROL_MODE: JTAG controller mode. Used to set JTAG controller mode
-+ * This is bitmask for feature param in jtag_mode for ioctl JTAG_SIOCMODE
-+ */
-+#define JTAG_CONTROL_MODE 1
-+/*
-+ * JTAG_MASTER_OUTPUT_DISABLE: JTAG master mode output disable, it is used to
-+ * enable other devices to own the JTAG bus.
-+ * This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
-+ */
-+#define JTAG_MASTER_OUTPUT_DISABLE 0
-+/*
-+ * JTAG_MASTER_MODE: JTAG master mode. Used to set JTAG controller master mode
-+ * This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
-+ */
-+#define JTAG_MASTER_MODE 1
-+/*
-+ * JTAG_XFER_HW_MODE: JTAG hardware mode. Used to set HW drived or bitbang
-+ * mode. This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
-+ */
-+#define JTAG_XFER_HW_MODE 1
-+/*
-+ * JTAG_XFER_SW_MODE: JTAG software mode. Used to set SW drived or bitbang
-+ * mode. This is bitmask for mode param in jtag_mode for ioctl JTAG_SIOCMODE
-+ */
-+#define JTAG_XFER_SW_MODE 0
-+
-+/**
-+ * enum jtag_tapstate:
-+ *
-+ * @JTAG_STATE_TLRESET: JTAG state machine Test Logic Reset state
-+ * @JTAG_STATE_IDLE: JTAG state machine IDLE state
-+ * @JTAG_STATE_SELECTDR: JTAG state machine SELECT_DR state
-+ * @JTAG_STATE_CAPTUREDR: JTAG state machine CAPTURE_DR state
-+ * @JTAG_STATE_SHIFTDR: JTAG state machine SHIFT_DR state
-+ * @JTAG_STATE_EXIT1DR: JTAG state machine EXIT-1 DR state
-+ * @JTAG_STATE_PAUSEDR: JTAG state machine PAUSE_DR state
-+ * @JTAG_STATE_EXIT2DR: JTAG state machine EXIT-2 DR state
-+ * @JTAG_STATE_UPDATEDR: JTAG state machine UPDATE DR state
-+ * @JTAG_STATE_SELECTIR: JTAG state machine SELECT_IR state
-+ * @JTAG_STATE_CAPTUREIR: JTAG state machine CAPTURE_IR state
-+ * @JTAG_STATE_SHIFTIR: JTAG state machine SHIFT_IR state
-+ * @JTAG_STATE_EXIT1IR: JTAG state machine EXIT-1 IR state
-+ * @JTAG_STATE_PAUSEIR: JTAG state machine PAUSE_IR state
-+ * @JTAG_STATE_EXIT2IR: JTAG state machine EXIT-2 IR state
-+ * @JTAG_STATE_UPDATEIR: JTAG state machine UPDATE IR state
-+ * @JTAG_STATE_CURRENT: JTAG current state, saved by driver
-+ */
-+enum jtag_tapstate {
-+ JTAG_STATE_TLRESET,
-+ JTAG_STATE_IDLE,
-+ JTAG_STATE_SELECTDR,
-+ JTAG_STATE_CAPTUREDR,
-+ JTAG_STATE_SHIFTDR,
-+ JTAG_STATE_EXIT1DR,
-+ JTAG_STATE_PAUSEDR,
-+ JTAG_STATE_EXIT2DR,
-+ JTAG_STATE_UPDATEDR,
-+ JTAG_STATE_SELECTIR,
-+ JTAG_STATE_CAPTUREIR,
-+ JTAG_STATE_SHIFTIR,
-+ JTAG_STATE_EXIT1IR,
-+ JTAG_STATE_PAUSEIR,
-+ JTAG_STATE_EXIT2IR,
-+ JTAG_STATE_UPDATEIR,
-+ JTAG_STATE_CURRENT
-+};
-+
-+/**
-+ * enum jtag_reset:
-+ *
-+ * @JTAG_NO_RESET: JTAG run TAP from current state
-+ * @JTAG_FORCE_RESET: JTAG force TAP to reset state
-+ */
-+enum jtag_reset {
-+ JTAG_NO_RESET = 0,
-+ JTAG_FORCE_RESET = 1,
-+};
-+
-+/**
-+ * enum jtag_xfer_type:
-+ *
-+ * @JTAG_SIR_XFER: SIR transfer
-+ * @JTAG_SDR_XFER: SDR transfer
-+ */
-+enum jtag_xfer_type {
-+ JTAG_SIR_XFER = 0,
-+ JTAG_SDR_XFER = 1,
-+};
-+
-+/**
-+ * enum jtag_xfer_direction:
-+ *
-+ * @JTAG_READ_XFER: read transfer
-+ * @JTAG_WRITE_XFER: write transfer
-+ * @JTAG_READ_WRITE_XFER: read & write transfer
-+ */
-+enum jtag_xfer_direction {
-+ JTAG_READ_XFER = 1,
-+ JTAG_WRITE_XFER = 2,
-+ JTAG_READ_WRITE_XFER = 3,
-+};
-+
-+/**
-+ * struct jtag_tap_state - forces JTAG state machine to go into a TAPC
-+ * state
-+ *
-+ * @reset: 0 - run IDLE/PAUSE from current state
-+ * 1 - go through TEST_LOGIC/RESET state before IDLE/PAUSE
-+ * @end: completion flag
-+ * @tck: clock counter
-+ *
-+ * Structure provide interface to JTAG device for JTAG set state execution.
-+ */
-+struct jtag_tap_state {
-+ __u8 reset;
-+ __u8 from;
-+ __u8 endstate;
-+ __u8 tck;
-+};
-+
-+/**
-+ * union pad_config - Padding Configuration:
-+ *
-+ * @type: transfer type
-+ * @pre_pad_number: Number of prepadding bits bit[11:0]
-+ * @post_pad_number: Number of prepadding bits bit[23:12]
-+ * @pad_data : Bit value to be used by pre and post padding bit[24]
-+ * @int_value: unsigned int packed padding configuration value bit[32:0]
-+ *
-+ * Structure provide pre and post padding configuration in a single __u32
-+ */
-+union pad_config {
-+ struct {
-+ __u32 pre_pad_number : 12;
-+ __u32 post_pad_number : 12;
-+ __u32 pad_data : 1;
-+ __u32 rsvd : 7;
-+ };
-+ __u32 int_value;
-+};
-+
-+/**
-+ * struct jtag_xfer - jtag xfer:
-+ *
-+ * @type: transfer type
-+ * @direction: xfer direction
-+ * @from: xfer current state
-+ * @endstate: xfer end state
-+ * @padding: xfer padding
-+ * @length: xfer bits length
-+ * @tdio : xfer data array
-+ *
-+ * Structure provide interface to JTAG device for JTAG SDR/SIR xfer execution.
-+ */
-+struct jtag_xfer {
-+ __u8 type;
-+ __u8 direction;
-+ __u8 from;
-+ __u8 endstate;
-+ __u32 padding;
-+ __u32 length;
-+ __u64 tdio;
-+};
-+
-+/**
-+ * struct bitbang_packet - jtag bitbang array packet:
-+ *
-+ * @data: JTAG Bitbang struct array pointer(input/output)
-+ * @length: array size (input)
-+ *
-+ * Structure provide interface to JTAG device for JTAG bitbang bundle execution
-+ */
-+struct bitbang_packet {
-+ struct tck_bitbang *data;
-+ __u32 length;
-+} __attribute__((__packed__));
-+
-+/**
-+ * struct jtag_bitbang - jtag bitbang:
-+ *
-+ * @tms: JTAG TMS
-+ * @tdi: JTAG TDI (input)
-+ * @tdo: JTAG TDO (output)
-+ *
-+ * Structure provide interface to JTAG device for JTAG bitbang execution.
-+ */
-+struct tck_bitbang {
-+ __u8 tms;
-+ __u8 tdi;
-+ __u8 tdo;
-+} __attribute__((__packed__));
-+
-+/**
-+ * struct jtag_mode - jtag mode:
-+ *
-+ * @feature: 0 - JTAG feature setting selector for JTAG controller HW/SW
-+ * 1 - JTAG feature setting selector for controller bus master
-+ * mode output (enable / disable).
-+ * @mode: (0 - SW / 1 - HW) for JTAG_XFER_MODE feature(0)
-+ * (0 - output disable / 1 - output enable) for JTAG_CONTROL_MODE
-+ * feature(1)
-+ *
-+ * Structure provide configuration modes to JTAG device.
-+ */
-+struct jtag_mode {
-+ __u32 feature;
-+ __u32 mode;
-+};
-+
-+/* ioctl interface */
-+#define __JTAG_IOCTL_MAGIC 0xb2
-+
-+#define JTAG_SIOCSTATE _IOW(__JTAG_IOCTL_MAGIC, 0, struct jtag_tap_state)
-+#define JTAG_SIOCFREQ _IOW(__JTAG_IOCTL_MAGIC, 1, unsigned int)
-+#define JTAG_GIOCFREQ _IOR(__JTAG_IOCTL_MAGIC, 2, unsigned int)
-+#define JTAG_IOCXFER _IOWR(__JTAG_IOCTL_MAGIC, 3, struct jtag_xfer)
-+#define JTAG_GIOCSTATUS _IOWR(__JTAG_IOCTL_MAGIC, 4, enum jtag_tapstate)
-+#define JTAG_SIOCMODE _IOW(__JTAG_IOCTL_MAGIC, 5, unsigned int)
-+#define JTAG_IOCBITBANG _IOW(__JTAG_IOCTL_MAGIC, 6, unsigned int)
-+#define JTAG_SIOCTRST _IOW(__JTAG_IOCTL_MAGIC, 7, unsigned int)
-+
-+/**
-+ * struct tms_cycle - This structure represents a tms cycle state.
-+ *
-+ * @tmsbits: is the bitwise representation of the needed tms transitions to
-+ * move from one state to another.
-+ * @count: number of jumps needed to move to the needed state.
-+ *
-+ */
-+struct tms_cycle {
-+ unsigned char tmsbits;
-+ unsigned char count;
-+};
-+
-+/*
-+ * This is the complete set TMS cycles for going from any TAP state to any
-+ * other TAP state, following a "shortest path" rule.
-+ */
-+static const struct tms_cycle _tms_cycle_lookup[][16] = {
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* TLR */{{0x00, 0}, {0x00, 1}, {0x02, 2}, {0x02, 3}, {0x02, 4}, {0x0a, 4},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x0a, 5}, {0x2a, 6}, {0x1a, 5}, {0x06, 3}, {0x06, 4}, {0x06, 5},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x16, 5}, {0x16, 6}, {0x56, 7}, {0x36, 6} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* RTI */{{0x07, 3}, {0x00, 0}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x05, 4}, {0x15, 5}, {0x0d, 4}, {0x03, 2}, {0x03, 3}, {0x03, 4},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x1b, 5} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* SelDR*/{{0x03, 2}, {0x03, 3}, {0x00, 0}, {0x00, 1}, {0x00, 2}, {0x02, 2},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x02, 3}, {0x0a, 4}, {0x06, 3}, {0x01, 1}, {0x01, 2}, {0x01, 3},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x05, 3}, {0x05, 4}, {0x15, 5}, {0x0d, 4} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* CapDR*/{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x00, 0}, {0x00, 1}, {0x01, 1},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x01, 2}, {0x05, 3}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* SDR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x00, 0}, {0x01, 1},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x01, 2}, {0x05, 3}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* Ex1DR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x02, 3}, {0x00, 0},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x00, 1}, {0x02, 2}, {0x01, 1}, {0x07, 3}, {0x07, 4}, {0x07, 5},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x17, 5}, {0x17, 6}, {0x57, 7}, {0x37, 6} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* PDR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x01, 2}, {0x05, 3},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x00, 0}, {0x01, 1}, {0x03, 2}, {0x0f, 4}, {0x0f, 5}, {0x0f, 6},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x2f, 6}, {0x2f, 7}, {0xaf, 8}, {0x6f, 7} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* Ex2DR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x00, 1}, {0x02, 2},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x02, 3}, {0x00, 0}, {0x01, 1}, {0x07, 3}, {0x07, 4}, {0x07, 5},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x17, 5}, {0x17, 6}, {0x57, 7}, {0x37, 6} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* UpdDR*/{{0x07, 3}, {0x00, 1}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x05, 4}, {0x15, 5}, {0x00, 0}, {0x03, 2}, {0x03, 3}, {0x03, 4},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x1b, 5} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* SelIR*/{{0x01, 1}, {0x01, 2}, {0x05, 3}, {0x05, 4}, {0x05, 5}, {0x15, 5},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x15, 6}, {0x55, 7}, {0x35, 6}, {0x00, 0}, {0x00, 1}, {0x00, 2},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x02, 2}, {0x02, 3}, {0x0a, 4}, {0x06, 3} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* CapIR*/{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x00, 0}, {0x00, 1},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x01, 1}, {0x01, 2}, {0x05, 3}, {0x03, 2} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* SIR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x0f, 5}, {0x00, 0},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x01, 1}, {0x01, 2}, {0x05, 3}, {0x03, 2} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* Ex1IR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x03, 4}, {0x0b, 4},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x0b, 5}, {0x2b, 6}, {0x1b, 5}, {0x07, 3}, {0x07, 4}, {0x02, 3},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x00, 0}, {0x00, 1}, {0x02, 2}, {0x01, 1} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* PIR */{{0x1f, 5}, {0x03, 3}, {0x07, 3}, {0x07, 4}, {0x07, 5}, {0x17, 5},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x17, 6}, {0x57, 7}, {0x37, 6}, {0x0f, 4}, {0x0f, 5}, {0x01, 2},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x05, 3}, {0x00, 0}, {0x01, 1}, {0x03, 2} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* Ex2IR*/{{0x0f, 4}, {0x01, 2}, {0x03, 2}, {0x03, 3}, {0x03, 4}, {0x0b, 4},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x0b, 5}, {0x2b, 6}, {0x1b, 5}, {0x07, 3}, {0x07, 4}, {0x00, 1},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x02, 2}, {0x02, 3}, {0x00, 0}, {0x01, 1} },
-+
-+/* TLR RTI SelDR CapDR SDR Ex1DR*/
-+/* UpdIR*/{{0x07, 3}, {0x00, 1}, {0x01, 1}, {0x01, 2}, {0x01, 3}, {0x05, 3},
-+/* PDR Ex2DR UpdDR SelIR CapIR SIR*/
-+ {0x05, 4}, {0x15, 5}, {0x0d, 4}, {0x03, 2}, {0x03, 3}, {0x03, 4},
-+/* Ex1IR PIR Ex2IR UpdIR*/
-+ {0x0b, 4}, {0x0b, 5}, {0x2b, 6}, {0x00, 0} },
-+};
-+
-+#endif /* __UAPI_LINUX_JTAG_H */
--
2.34.1
diff --git a/recipes-kernel/linux/files/0017-Add-uart-routing.patch b/recipes-kernel/linux/files/0017-Add-uart-routing.patch
deleted file mode 100644
index e2716df..0000000
--- a/recipes-kernel/linux/files/0017-Add-uart-routing.patch
+++ /dev/null
@@ -1,508 +0,0 @@
-From fcfb127412e24417b704f343c67a8c3ad1cd6537 Mon Sep 17 00:00:00 2001
-From: Elliot She <elliot.tc.she@fii-na.corp-partner.google.com>
-Date: Tue, 21 Jan 2025 09:48:06 +0800
-Subject: [PATCH] add uart routing
-
-add uart routing
-
-Source:
-AspeedTech-BMC github:
-https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/drivers/soc/aspeed/aspeed-uart-routing.c
-(cherry picked from commit 4730e2c83d0fdd01a1df86135d5c7630ac074e49)
-
-Signed-off-by:Elliot She<elliot.tc.she@fii-na.corp-partner.google.com>
----
- drivers/soc/aspeed/aspeed-uart-routing.c | 430 ++++++++++++++++++++++-
- 1 file changed, 426 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
-index 3a4c1f28c..3b3a856cf 100644
---- a/drivers/soc/aspeed/aspeed-uart-routing.c
-+++ b/drivers/soc/aspeed/aspeed-uart-routing.c
-@@ -15,20 +15,30 @@
- #define HICRA 0x9c
-
- /* attributes options */
-+#define UART_ROUTING_IO0 "io0"
- #define UART_ROUTING_IO1 "io1"
- #define UART_ROUTING_IO2 "io2"
- #define UART_ROUTING_IO3 "io3"
- #define UART_ROUTING_IO4 "io4"
- #define UART_ROUTING_IO5 "io5"
- #define UART_ROUTING_IO6 "io6"
-+#define UART_ROUTING_IO7 "io7"
-+#define UART_ROUTING_IO8 "io8"
-+#define UART_ROUTING_IO9 "io9"
- #define UART_ROUTING_IO10 "io10"
-+#define UART_ROUTING_IO12 "io12"
-+#define UART_ROUTING_UART0 "uart0"
- #define UART_ROUTING_UART1 "uart1"
- #define UART_ROUTING_UART2 "uart2"
- #define UART_ROUTING_UART3 "uart3"
- #define UART_ROUTING_UART4 "uart4"
- #define UART_ROUTING_UART5 "uart5"
- #define UART_ROUTING_UART6 "uart6"
-+#define UART_ROUTING_UART7 "uart7"
-+#define UART_ROUTING_UART8 "uart8"
-+#define UART_ROUTING_UART9 "uart9"
- #define UART_ROUTING_UART10 "uart10"
-+#define UART_ROUTING_UART12 "uart12"
- #define UART_ROUTING_RES "reserved"
-
- struct aspeed_uart_routing {
-@@ -488,6 +498,416 @@ static const struct attribute_group ast2600_uart_routing_attr_group = {
- .attrs = ast2600_uart_routing_attrs,
- };
-
-+/* routing selector for AST27xx node 0 */
-+static struct aspeed_uart_routing_selector ast2700n0_uart9_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART9),
-+ .reg = HICR9,
-+ .shift = 12,
-+ .mask = 0xf,
-+ .options = {
-+ UART_ROUTING_IO9,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_RES,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART12,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_io9_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO9),
-+ .reg = HICR9,
-+ .shift = 8,
-+ .mask = 0xf,
-+ .options = {
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART12,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_RES,
-+ UART_ROUTING_UART9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_uart3_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
-+ .reg = HICRA,
-+ .shift = 25,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_uart2_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
-+ .reg = HICRA,
-+ .shift = 22,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_uart1_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
-+ .reg = HICRA,
-+ .shift = 19,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_uart0_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART0),
-+ .reg = HICRA,
-+ .shift = 16,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_io3_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
-+ .reg = HICRA,
-+ .shift = 9,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART9,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_io2_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
-+ .reg = HICRA,
-+ .shift = 6,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART9,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_IO0,
-+ UART_ROUTING_IO1,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_io1_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
-+ .reg = HICRA,
-+ .shift = 3,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART9,
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n0_io0_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO0),
-+ .reg = HICRA,
-+ .shift = 0,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART0,
-+ UART_ROUTING_UART1,
-+ UART_ROUTING_UART2,
-+ UART_ROUTING_UART3,
-+ UART_ROUTING_UART9,
-+ UART_ROUTING_IO2,
-+ UART_ROUTING_IO3,
-+ UART_ROUTING_IO9,
-+ NULL,
-+ },
-+};
-+
-+static struct attribute *ast2700n0_uart_routing_attrs[] = {
-+ &ast2700n0_uart9_sel.dev_attr.attr,
-+ &ast2700n0_io9_sel.dev_attr.attr,
-+ &ast2700n0_uart3_sel.dev_attr.attr,
-+ &ast2700n0_uart2_sel.dev_attr.attr,
-+ &ast2700n0_uart1_sel.dev_attr.attr,
-+ &ast2700n0_uart0_sel.dev_attr.attr,
-+ &ast2700n0_io3_sel.dev_attr.attr,
-+ &ast2700n0_io2_sel.dev_attr.attr,
-+ &ast2700n0_io1_sel.dev_attr.attr,
-+ &ast2700n0_io0_sel.dev_attr.attr,
-+ NULL,
-+};
-+
-+static const struct attribute_group ast2700n0_uart_routing_attr_group = {
-+ .attrs = ast2700n0_uart_routing_attrs,
-+};
-+
-+/* routing selector for AST27xx node 1 */
-+static struct aspeed_uart_routing_selector ast2700n1_uart10_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART10),
-+ .reg = HICR9,
-+ .shift = 12,
-+ .mask = 0xf,
-+ .options = {
-+ UART_ROUTING_IO10,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_RES,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART12,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_io10_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO10),
-+ .reg = HICR9,
-+ .shift = 8,
-+ .mask = 0xf,
-+ .options = {
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART12,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_RES,
-+ UART_ROUTING_UART10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_uart8_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART8),
-+ .reg = HICRA,
-+ .shift = 25,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_uart7_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART7),
-+ .reg = HICRA,
-+ .shift = 22,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_uart6_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART6),
-+ .reg = HICRA,
-+ .shift = 19,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_uart5_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART0),
-+ .reg = HICRA,
-+ .shift = 16,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_io8_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO8),
-+ .reg = HICRA,
-+ .shift = 9,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART10,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_io7_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO7),
-+ .reg = HICRA,
-+ .shift = 6,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART10,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_IO5,
-+ UART_ROUTING_IO6,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_io6_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO6),
-+ .reg = HICRA,
-+ .shift = 3,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART10,
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct aspeed_uart_routing_selector ast2700n1_io5_sel = {
-+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO0),
-+ .reg = HICRA,
-+ .shift = 0,
-+ .mask = 0x7,
-+ .options = {
-+ UART_ROUTING_UART5,
-+ UART_ROUTING_UART6,
-+ UART_ROUTING_UART7,
-+ UART_ROUTING_UART8,
-+ UART_ROUTING_UART10,
-+ UART_ROUTING_IO7,
-+ UART_ROUTING_IO8,
-+ UART_ROUTING_IO10,
-+ NULL,
-+ },
-+};
-+
-+static struct attribute *ast2700n1_uart_routing_attrs[] = {
-+ &ast2700n1_uart10_sel.dev_attr.attr,
-+ &ast2700n1_io10_sel.dev_attr.attr,
-+ &ast2700n1_uart8_sel.dev_attr.attr,
-+ &ast2700n1_uart7_sel.dev_attr.attr,
-+ &ast2700n1_uart6_sel.dev_attr.attr,
-+ &ast2700n1_uart5_sel.dev_attr.attr,
-+ &ast2700n1_io8_sel.dev_attr.attr,
-+ &ast2700n1_io7_sel.dev_attr.attr,
-+ &ast2700n1_io6_sel.dev_attr.attr,
-+ &ast2700n1_io5_sel.dev_attr.attr,
-+ NULL,
-+};
-+
-+static const struct attribute_group ast2700n1_uart_routing_attr_group = {
-+ .attrs = ast2700n1_uart_routing_attrs,
-+};
-+
- static ssize_t aspeed_uart_routing_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-@@ -565,14 +985,12 @@ static int aspeed_uart_routing_probe(struct platform_device *pdev)
- return 0;
- }
-
--static int aspeed_uart_routing_remove(struct platform_device *pdev)
-+static void aspeed_uart_routing_remove(struct platform_device *pdev)
- {
- struct device *dev = &pdev->dev;
- struct aspeed_uart_routing *uart_routing = platform_get_drvdata(pdev);
-
- sysfs_remove_group(&dev->kobj, uart_routing->attr_grp);
--
-- return 0;
- }
-
- static const struct of_device_id aspeed_uart_routing_table[] = {
-@@ -582,6 +1000,10 @@ static const struct of_device_id aspeed_uart_routing_table[] = {
- .data = &ast2500_uart_routing_attr_group },
- { .compatible = "aspeed,ast2600-uart-routing",
- .data = &ast2600_uart_routing_attr_group },
-+ { .compatible = "aspeed,ast2700n0-uart-routing",
-+ .data = &ast2700n0_uart_routing_attr_group },
-+ { .compatible = "aspeed,ast2700n1-uart-routing",
-+ .data = &ast2700n1_uart_routing_attr_group },
- { },
- };
-
-@@ -591,7 +1013,7 @@ static struct platform_driver aspeed_uart_routing_driver = {
- .of_match_table = aspeed_uart_routing_table,
- },
- .probe = aspeed_uart_routing_probe,
-- .remove = aspeed_uart_routing_remove,
-+ .remove_new = aspeed_uart_routing_remove,
- };
-
- module_platform_driver(aspeed_uart_routing_driver);
---
-2.34.1
-
diff --git a/recipes-kernel/linux/files/0018-Add-RNG-drivers-for-ast2700.patch b/recipes-kernel/linux/files/0018-Add-RNG-drivers-for-ast2700.patch
new file mode 100644
index 0000000..47c617d
--- /dev/null
+++ b/recipes-kernel/linux/files/0018-Add-RNG-drivers-for-ast2700.patch
@@ -0,0 +1,6074 @@
+From 7e76b30c2bc881df8a70a0d2b9db9b22ca0cea1c Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 15:12:03 +0800
+Subject: [PATCH] Add RNG drivers for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/char/hw_random/Kconfig | 15 +
+ drivers/char/hw_random/Makefile | 2 +
+ drivers/char/hw_random/aspeed-rng.c | 136 ++
+ drivers/char/hw_random/dwc/Kconfig | 18 +
+ drivers/char/hw_random/dwc/Makefile | 22 +
+ .../dwc/src/pdu/common/include/elppdu_error.h | 96 +
+ .../dwc/src/pdu/common/pdu/pdu_dev32.c | 165 ++
+ .../dwc/src/pdu/linux/include/elppdu.h | 125 +
+ .../hw_random/dwc/src/pdu/linux/kernel/pdu.c | 188 ++
+ .../dwc/src/pdu/linux/kernel/spacc_mem.c | 191 ++
+ .../hw_random/dwc/src/trng/include/nisttrng.h | 63 +
+ .../dwc/src/trng/include/nisttrng_common.h | 144 ++
+ .../dwc/src/trng/include/nisttrng_hw.h | 457 ++++
+ .../dwc/src/trng/include/nisttrng_private.h | 89 +
+ .../dwc/src/trng/include/synversion.h | 52 +
+ .../hw_random/dwc/src/trng/kernel/nist_trng.c | 2171 +++++++++++++++++
+ .../hw_random/dwc/src/trng/trng/nist_trng.c | 950 ++++++++
+ .../dwc/src/trng/trng/nist_trng_private.c | 1022 ++++++++
+ 18 files changed, 5906 insertions(+)
+ create mode 100644 drivers/char/hw_random/aspeed-rng.c
+ create mode 100644 drivers/char/hw_random/dwc/Kconfig
+ create mode 100644 drivers/char/hw_random/dwc/Makefile
+ create mode 100644 drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h
+ create mode 100644 drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c
+ create mode 100644 drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h
+ create mode 100644 drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c
+ create mode 100644 drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/include/nisttrng.h
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/include/synversion.h
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c
+ create mode 100644 drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c
+
+diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
+index 8de74dcfa..72cb00938 100644
+--- a/drivers/char/hw_random/Kconfig
++++ b/drivers/char/hw_random/Kconfig
+@@ -573,6 +573,21 @@ config HW_RANDOM_JH7110
+ To compile this driver as a module, choose M here.
+ The module will be called jh7110-trng.
+
++config HW_RANDOM_ASPEED
++ tristate "Aspeed Random Number Generator support"
++ depends on ARCH_ASPEED
++ default HW_RANDOM
++ help
++ This driver provides kernel-side support for the Random Number
++ Generator hardware found on Aspeed ast2600/ast2700 devices.
++
++ To compile this driver as a module, choose M here: the
++ module will be called aspeed-rng.
++
++ If unsure, say Y.
++
++source "drivers/char/hw_random/dwc/Kconfig"
++
+ endif # HW_RANDOM
+
+ config UML_RANDOM
+diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
+index 32549a118..df034aa00 100644
+--- a/drivers/char/hw_random/Makefile
++++ b/drivers/char/hw_random/Makefile
+@@ -49,3 +49,5 @@ obj-$(CONFIG_HW_RANDOM_ARM_SMCCC_TRNG) += arm_smccc_trng.o
+ obj-$(CONFIG_HW_RANDOM_CN10K) += cn10k-rng.o
+ obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
+ obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
++obj-$(CONFIG_HW_RANDOM_ASPEED) += aspeed-rng.o
++obj-$(CONFIG_HW_RANDOM_DWC) += dwc/
+diff --git a/drivers/char/hw_random/aspeed-rng.c b/drivers/char/hw_random/aspeed-rng.c
+new file mode 100644
+index 000000000..b122661ad
+--- /dev/null
++++ b/drivers/char/hw_random/aspeed-rng.c
+@@ -0,0 +1,136 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (C) ASPEED Technology Inc.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/regmap.h>
++#include <linux/slab.h>
++#include <linux/err.h>
++#include <linux/io.h>
++#include <linux/hw_random.h>
++#include <linux/platform_device.h>
++
++#define TRNG_CTL 0x00
++#define TRNG_EN 0x0
++#define TRNG_MODE 0x04
++#define TRNG_RDY 0x1f
++#define TRNG_ODATA 0x04
++
++struct aspeed_trng {
++ u32 ver;
++ void __iomem *base;
++ struct hwrng rng;
++ unsigned int present: 1;
++ ktime_t period;
++ struct hrtimer timer;
++ struct completion completion;
++};
++
++static int aspeed_trng_read(struct hwrng *rng, void *buf, size_t max,
++ bool wait)
++{
++ struct aspeed_trng *priv = container_of(rng, struct aspeed_trng, rng);
++ u32 *data = buf;
++ size_t read = 0;
++ int timeout = max / 4 + 1;
++
++ while (read < max) {
++ if (!(readl(priv->base + TRNG_CTL) & (1 << TRNG_RDY))) {
++ if (wait) {
++ if (timeout-- == 0)
++ return read;
++ } else {
++ return 0;
++ }
++ } else {
++ *data = readl(priv->base + TRNG_ODATA);
++ data++;
++ read += 4;
++ }
++ }
++
++ return read;
++}
++
++static void aspeed_trng_enable(struct aspeed_trng *priv)
++{
++ u32 ctl;
++
++ ctl = readl(priv->base + TRNG_CTL);
++ ctl = ctl & ~(1 << TRNG_EN); /* enable rng */
++ ctl = ctl | (3 << TRNG_MODE); /* select mode */
++
++ writel(ctl, priv->base + TRNG_CTL);
++}
++
++static void aspeed_trng_disable(struct aspeed_trng *priv)
++{
++ writel(1, priv->base + TRNG_CTL);
++}
++
++static int aspeed_trng_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct aspeed_trng *priv;
++ struct resource *res;
++ int ret;
++
++ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ priv->base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(priv->base))
++ return PTR_ERR(priv->base);
++
++ priv->rng.name = pdev->name;
++ priv->rng.quality = 900;
++ priv->rng.read = aspeed_trng_read;
++
++ aspeed_trng_enable(priv);
++
++ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
++ if (ret)
++ return ret;
++
++ platform_set_drvdata(pdev, priv);
++
++ dev_info(dev, "Aspeed Hardware RNG successfully registered\n");
++
++ return 0;
++}
++
++static int aspeed_trng_remove(struct platform_device *pdev)
++{
++ struct aspeed_trng *priv = platform_get_drvdata(pdev);
++
++ aspeed_trng_disable(priv);
++
++ return 0;
++}
++
++static const struct of_device_id aspeed_trng_dt_ids[] = {
++ { .compatible = "aspeed,ast2600-trng" },
++ { .compatible = "aspeed,ast2700-trng" },
++ {}
++};
++MODULE_DEVICE_TABLE(of, aspeed_trng_dt_ids);
++
++static struct platform_driver aspeed_trng_driver = {
++ .probe = aspeed_trng_probe,
++ .remove = aspeed_trng_remove,
++ .driver = {
++ .name = "aspeed-trng",
++ .of_match_table = aspeed_trng_dt_ids,
++ },
++};
++
++module_platform_driver(aspeed_trng_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
++MODULE_DESCRIPTION("Aspeed true random number generator driver");
+diff --git a/drivers/char/hw_random/dwc/Kconfig b/drivers/char/hw_random/dwc/Kconfig
+new file mode 100644
+index 000000000..9d586eb0d
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/Kconfig
+@@ -0,0 +1,18 @@
++# SPDX-License-Identifier: GPL-2.0-only
++#
++# DWC Hardware Random Number Generator (RNG) configuration
++#
++
++config HW_RANDOM_DWC
++ tristate "DesignWare Cores HW Random Number Generator support"
++ depends on HW_RANDOM
++ depends on ARCH_ASPEED || COMPILE_TEST
++ help
++ This driver provides kernel-side support for the DWC
++ Random Number Generator hardware found on Aspeed SoCs.
++
++ To compile this driver as a module, choose M here. the
++ module will be called dwc-rng.
++
++ If unsure, say Y.
++
+diff --git a/drivers/char/hw_random/dwc/Makefile b/drivers/char/hw_random/dwc/Makefile
+new file mode 100644
+index 000000000..d8ce87703
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/Makefile
+@@ -0,0 +1,22 @@
++# SPDX-License-Identifier: GPL-2.0
++
++ccflags-y := -I $(srctree)/$(src)/src/trng/include \
++ -I $(srctree)/$(src)/src/pdu/linux/include
++
++obj-$(CONFIG_HW_RANDOM_DWC) += elppdu.o
++elppdu-objs := src/pdu/linux/kernel/pdu.o \
++ src/pdu/common/pdu/pdu_dev32.o \
++
++obj-$(CONFIG_HW_RANDOM_DWC) += elpmem.o
++elpmem-objs := src/pdu/linux/kernel/spacc_mem.o \
++
++obj-$(CONFIG_HW_RANDOM_DWC) += nisttrng.o
++nisttrng-objs := src/trng/kernel/nist_trng.o \
++ src/trng/trng/nist_trng.o \
++ src/trng/trng/nist_trng_private.o \
++
++clean:
++ @find \( -name '*.o' \
++ -o -name '*.a' \
++ -o -name '*.order' \
++ \) -type f -print | xargs rm -rvf
+diff --git a/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h b/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h
+new file mode 100644
+index 000000000..358e79533
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/pdu/common/include/elppdu_error.h
+@@ -0,0 +1,96 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef SYNPDU_ERROR_H_
++#define SYNPDU_ERROR_H_
++
++/*
++ * Common error definitions. Be sure to update pdu_error_code when changing
++ * anything in this list.
++ */
++
++#define CRYPTO_OK (0)
++#define CRYPTO_FAILED (-1)
++#define CRYPTO_INPROGRESS (-2)
++#define CRYPTO_INVALID_HANDLE (-3)
++#define CRYPTO_INVALID_CONTEXT (-4)
++#define CRYPTO_INVALID_SIZE (-5)
++#define CRYPTO_NOT_INITIALIZED (-6)
++#define CRYPTO_NO_MEM (-7)
++#define CRYPTO_INVALID_ALG (-8)
++#define CRYPTO_INVALID_KEY_SIZE (-9)
++#define CRYPTO_INVALID_ARGUMENT (-10)
++#define CRYPTO_MODULE_DISABLED (-11)
++#define CRYPTO_NOT_IMPLEMENTED (-12)
++#define CRYPTO_INVALID_BLOCK_ALIGNMENT (-13)
++#define CRYPTO_INVALID_MODE (-14)
++#define CRYPTO_INVALID_KEY (-15)
++#define CRYPTO_AUTHENTICATION_FAILED (-16)
++#define CRYPTO_INVALID_IV_SIZE (-17)
++#define CRYPTO_MEMORY_ERROR (-18)
++#define CRYPTO_LAST_ERROR (-19)
++#define CRYPTO_HALTED (-20)
++#define CRYPTO_TIMEOUT (-21)
++#define CRYPTO_SRM_FAILED (-22)
++#define CRYPTO_COMMON_ERROR_MAX (-100)
++#define CRYPTO_INVALID_ICV_KEY_SIZE (-100)
++#define CRYPTO_INVALID_PARAMETER_SIZE (-101)
++#define CRYPTO_SEQUENCE_OVERFLOW (-102)
++#define CRYPTO_DISABLED (-103)
++#define CRYPTO_INVALID_VERSION (-104)
++#define CRYPTO_FATAL (-105)
++#define CRYPTO_INVALID_PAD (-106)
++#define CRYPTO_FIFO_FULL (-107)
++#define CRYPTO_INVALID_SEQUENCE (-108)
++#define CRYPTO_INVALID_FIRMWARE (-109)
++#define CRYPTO_NOT_FOUND (-110)
++#define CRYPTO_CMD_FIFO_INACTIVE (-111)
++#define CRYPTO_INVALID_PROTOCOL (-112)
++#define CRYPTO_REPLAY (-113)
++#define CRYPTO_NOT_INSTANTIATED (-114)
++#define CRYPTO_RESEED_REQUIRED (-115)
++
++#endif
+diff --git a/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c b/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c
+new file mode 100644
+index 000000000..95a5b1cc0
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/pdu/common/pdu/pdu_dev32.c
+@@ -0,0 +1,165 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "elppdu.h"
++
++void pdu_to_dev32(void *addr_, u32 *src, unsigned long nword)
++{
++ unsigned char *addr = addr_;
++
++ while (nword--) {
++ pdu_io_write32(addr, *src++);
++ addr += 4;
++ }
++}
++EXPORT_SYMBOL(pdu_to_dev32);
++
++void pdu_from_dev32(u32 *dst, void *addr_, unsigned long nword)
++{
++ unsigned char *addr = addr_;
++
++ while (nword--) {
++ *dst++ = pdu_io_read32(addr);
++ addr += 4;
++ }
++}
++EXPORT_SYMBOL(pdu_from_dev32);
++
++void pdu_to_dev32_big(void *addr_, const unsigned char *src,
++ unsigned long nword)
++{
++ unsigned char *addr = addr_;
++ unsigned long v;
++
++ while (nword--) {
++ v = 0;
++ v = (v << 8) | ((unsigned long)*src++);
++ v = (v << 8) | ((unsigned long)*src++);
++ v = (v << 8) | ((unsigned long)*src++);
++ v = (v << 8) | ((unsigned long)*src++);
++ pdu_io_write32(addr, v);
++ addr += 4;
++ }
++}
++EXPORT_SYMBOL(pdu_to_dev32_big);
++
++void pdu_from_dev32_big(unsigned char *dst, void *addr_, unsigned long nword)
++{
++ unsigned char *addr = addr_;
++ unsigned long v;
++
++ while (nword--) {
++ v = pdu_io_read32(addr);
++ addr += 4;
++ *dst++ = (v >> 24) & 0xFF;
++ v <<= 8;
++ *dst++ = (v >> 24) & 0xFF;
++ v <<= 8;
++ *dst++ = (v >> 24) & 0xFF;
++ v <<= 8;
++ *dst++ = (v >> 24) & 0xFF;
++ v <<= 8;
++ }
++}
++EXPORT_SYMBOL(pdu_from_dev32_big);
++
++void pdu_to_dev32_little(void *addr_, const unsigned char *src,
++ unsigned long nword)
++{
++ unsigned char *addr = addr_;
++ unsigned long v;
++
++ while (nword--) {
++ v = 0;
++ v = (v >> 8) | ((unsigned long)*src++ << 24UL);
++ v = (v >> 8) | ((unsigned long)*src++ << 24UL);
++ v = (v >> 8) | ((unsigned long)*src++ << 24UL);
++ v = (v >> 8) | ((unsigned long)*src++ << 24UL);
++ pdu_io_write32(addr, v);
++ addr += 4;
++ }
++}
++EXPORT_SYMBOL(pdu_to_dev32_little);
++
++void pdu_from_dev32_little(unsigned char *dst, void *addr_, unsigned long nword)
++{
++ unsigned char *addr = addr_;
++ unsigned long v;
++
++ while (nword--) {
++ v = pdu_io_read32(addr);
++ addr += 4;
++ *dst++ = v & 0xFF;
++ v >>= 8;
++ *dst++ = v & 0xFF;
++ v >>= 8;
++ *dst++ = v & 0xFF;
++ v >>= 8;
++ *dst++ = v & 0xFF;
++ v >>= 8;
++ }
++}
++EXPORT_SYMBOL(pdu_from_dev32_little);
++
++void pdu_to_dev32_s(void *addr, const unsigned char *src, unsigned long nword,
++ int endian)
++{
++ if (endian)
++ pdu_to_dev32_big(addr, src, nword);
++ else
++ pdu_to_dev32_little(addr, src, nword);
++}
++EXPORT_SYMBOL(pdu_to_dev32_s);
++
++void pdu_from_dev32_s(unsigned char *dst, void *addr, unsigned long nword,
++ int endian)
++{
++ if (endian)
++ pdu_from_dev32_big(dst, addr, nword);
++ else
++ pdu_from_dev32_little(dst, addr, nword);
++}
++EXPORT_SYMBOL(pdu_from_dev32_s);
+diff --git a/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h b/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h
+new file mode 100644
+index 000000000..7aff78411
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/pdu/linux/include/elppdu.h
+@@ -0,0 +1,125 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef SYNPDU_H_
++#define SYNPDU_H_
++
++/* Platform Specific */
++#include <linux/kernel.h> /* printk() */
++#include <linux/types.h> /* size_t */
++#include <linux/string.h> /* memcpy()/etc */
++#include <linux/seq_file.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/dma-mapping.h>
++#include <linux/vmalloc.h>
++#include <linux/io.h>
++#include <linux/ctype.h>
++#include <linux/version.h>
++
++#ifndef PDU_BASE_ADDR
++#define PDU_BASE_ADDR 0x14c3b000
++#endif
++
++#ifndef PDU_BASE_IRQ
++#define PDU_BASE_IRQ 91
++#endif
++
++#define PDU_SINGLE_CORE 1
++#define PDU_SINGLE_NIST_TRNG 1
++
++#if 1
++#define SYNHW_PRINT printk
++#else
++#define SYNHW_PRINT(...)
++#endif
++
++#define CPU_YIELD
++#define SYNHW_MEMCPY memcpy
++
++// Debug modifier for printing, in linux adding KERN_DEBUG makes the output only show up in debug logs (avoids /var/log/messages)
++#define SYNHW_PRINT_DEBUG KERN_DEBUG
++
++// Locking
++#define PDU_LOCK_TYPE spinlock_t
++#define PDU_INIT_LOCK(lock) spin_lock_init(lock)
++
++// these are for IRQ contexts
++#define PDU_LOCK(lock, flags) spin_lock_irqsave(lock, flags)
++#define PDU_UNLOCK(lock, flags) spin_unlock_irqrestore(lock, flags)
++
++// these are for bottom half BH contexts
++#define PDU_LOCK_TYPE_BH struct mutex
++#define PDU_INIT_LOCK_BH(lock) mutex_init(lock)
++#define PDU_LOCK_BH(lock) mutex_lock(lock)
++#define PDU_UNLOCK_BH(lock) mutex_unlock(lock)
++
++#include "../../common/include/elppdu_error.h"
++
++void *pdu_linux_map_regs(struct device *dev, struct resource *regs);
++
++void pdu_io_write32(void *addr, unsigned long val);
++void pdu_io_cached_write32(void *addr, unsigned long val, u32 *cache);
++unsigned long pdu_io_read32(void *addr);
++
++void pdu_to_dev32(void *addr, u32 *src, unsigned long nword);
++void pdu_from_dev32(u32 *dst, void *addr, unsigned long nword);
++void pdu_to_dev32_big(void *addr, const unsigned char *src, unsigned long nword);
++void pdu_from_dev32_big(unsigned char *dst, void *addr, unsigned long nword);
++void pdu_to_dev32_little(void *addr, const unsigned char *src, unsigned long nword);
++void pdu_from_dev32_little(unsigned char *dst, void *addr, unsigned long nword);
++void pdu_from_dev32_s(unsigned char *dst, void *addr, unsigned long nword, int endian);
++void pdu_to_dev32_s(void *addr, const unsigned char *src, unsigned long nword, int endian);
++
++void *pdu_malloc(unsigned long n);
++void pdu_free(void *p);
++
++int pdu_error_code(int code);
++
++#endif
++
+diff --git a/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c
+new file mode 100644
+index 000000000..65aa2beed
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/pdu.c
+@@ -0,0 +1,188 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2011-2017 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++
++#include "elppdu.h"
++
++static bool trace_io;
++module_param(trace_io, bool, 0600);
++MODULE_PARM_DESC(trace_io, "Trace MMIO reads/writes");
++
++void *pdu_linux_map_regs(struct device *dev, struct resource *regs)
++{
++ return devm_ioremap_resource(dev, regs);
++}
++EXPORT_SYMBOL(pdu_linux_map_regs);
++
++void pdu_io_write32(void *addr, unsigned long val)
++{
++ if (trace_io)
++ SYNHW_PRINT("PDU: write %.8lx -> %p\n", val, addr);
++
++ writel(val, addr);
++}
++EXPORT_SYMBOL(pdu_io_write32);
++
++void pdu_io_cached_write32(void *addr, unsigned long val, uint32_t *cache)
++{
++ if (*cache == val) {
++ if (trace_io) {
++ SYNHW_PRINT("PDU: write %.8lx -> %p (cached)\n", val,
++ addr);
++ }
++ return;
++ }
++
++ *cache = val;
++ pdu_io_write32(addr, val);
++}
++EXPORT_SYMBOL(pdu_io_cached_write32);
++
++unsigned long pdu_io_read32(void *addr)
++{
++ unsigned long val;
++
++ val = readl(addr);
++
++ if (trace_io)
++ SYNHW_PRINT("PDU: read %.8lx <- %p\n", val, addr);
++
++ return val;
++}
++EXPORT_SYMBOL(pdu_io_read32);
++
++/* Platform specific memory allocation */
++void *pdu_malloc(unsigned long n)
++{
++ return vmalloc(n);
++}
++
++void pdu_free(void *p)
++{
++ vfree(p);
++}
++
++/* Convert SDK error codes to corresponding kernel error codes. */
++int pdu_error_code(int code)
++{
++ switch (code) {
++ case CRYPTO_INPROGRESS:
++ return -EINPROGRESS;
++ case CRYPTO_INVALID_HANDLE:
++ case CRYPTO_INVALID_CONTEXT:
++ return -ENXIO;
++ case CRYPTO_NOT_INITIALIZED:
++ return -ENODATA;
++ case CRYPTO_INVALID_SIZE:
++ case CRYPTO_INVALID_ALG:
++ case CRYPTO_INVALID_KEY_SIZE:
++ case CRYPTO_INVALID_ARGUMENT:
++ case CRYPTO_INVALID_BLOCK_ALIGNMENT:
++ case CRYPTO_INVALID_MODE:
++ case CRYPTO_INVALID_KEY:
++ case CRYPTO_INVALID_IV_SIZE:
++ case CRYPTO_INVALID_ICV_KEY_SIZE:
++ case CRYPTO_INVALID_PARAMETER_SIZE:
++ case CRYPTO_REPLAY:
++ case CRYPTO_INVALID_PROTOCOL:
++ case CRYPTO_RESEED_REQUIRED:
++ return -EINVAL;
++ case CRYPTO_NOT_IMPLEMENTED:
++ case CRYPTO_MODULE_DISABLED:
++ return -ENOTSUPP;
++ case CRYPTO_NO_MEM:
++ return -ENOMEM;
++ case CRYPTO_INVALID_PAD:
++ case CRYPTO_INVALID_SEQUENCE:
++ return -EILSEQ;
++ case CRYPTO_MEMORY_ERROR:
++ return -EIO;
++ case CRYPTO_TIMEOUT:
++ return -ETIMEDOUT;
++ case CRYPTO_HALTED:
++ return -ECANCELED;
++ case CRYPTO_AUTHENTICATION_FAILED:
++ case CRYPTO_SEQUENCE_OVERFLOW:
++ case CRYPTO_INVALID_VERSION:
++ return -EPROTO;
++ case CRYPTO_FIFO_FULL:
++ return -EBUSY;
++ case CRYPTO_SRM_FAILED:
++ case CRYPTO_DISABLED:
++ case CRYPTO_LAST_ERROR:
++ return -EAGAIN;
++ case CRYPTO_FAILED:
++ case CRYPTO_FATAL:
++ return -EIO;
++ case CRYPTO_INVALID_FIRMWARE:
++ return -ENOEXEC;
++ case CRYPTO_NOT_FOUND:
++ return -ENOENT;
++ }
++
++ /*
++ * Any unrecognized code is either success (i.e., zero) or a negative
++ * error code, which may be meaningless but at least will still be
++ * recognized as an error.
++ */
++ return code;
++}
++EXPORT_SYMBOL(pdu_error_code);
++
++static int __init pdu_mod_init(void)
++{
++ return 0;
++}
++
++static void __exit pdu_mod_exit(void)
++{
++}
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Synopsys, Inc.");
++module_init(pdu_mod_init);
++module_exit(pdu_mod_exit);
+diff --git a/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c
+new file mode 100644
+index 000000000..cc74ff17b
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/pdu/linux/kernel/spacc_mem.c
+@@ -0,0 +1,191 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2011-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/io-mapping.h>
++#include <linux/irqdomain.h>
++#include <linux/of_irq.h>
++#include <linux/err.h>
++
++#include "elppdu.h"
++
++static unsigned long vex_baseaddr = PDU_BASE_ADDR;
++module_param_named(baseaddr, vex_baseaddr, ulong, 0);
++MODULE_PARM_DESC(baseaddr, "Hardware base address (default " __stringify(PDU_BASE_ADDR) ")");
++
++// max of 16 devices
++#define MAX_DEV 16
++
++static struct platform_device *devices[MAX_DEV];
++static int dev_id;
++
++static void register_device(const char *name, int id,
++ const struct resource *res, unsigned int num)
++{
++ char suffix[16] = "";
++ struct platform_device_info pdevinfo = {
++ .name = name,
++ .id = id,
++ .res = res,
++ .num_res = num,
++ .dma_mask = 0xffffffff,
++ };
++
++ if (dev_id >= MAX_DEV) {
++ pr_err("Too many devices; increase MAX_DEV.\n");
++ return;
++ }
++
++ devices[dev_id] = platform_device_register_full(&pdevinfo);
++ if (IS_ERR(devices[dev_id])) {
++ if (id >= 0)
++ snprintf(suffix, sizeof(suffix), ".%d", id);
++ pr_err("Failed to register %s%s\n", name, suffix);
++
++ devices[dev_id] = NULL;
++ return;
++ }
++
++ dev_id++;
++}
++
++static int __init get_irq_num(unsigned int irq_num)
++{
++ if (IS_ENABLED(CONFIG_ARCH_ZYNQ)) {
++ struct of_phandle_args args = { 0 };
++
++ /*
++ * Since this driver is for non-DT use but Zynq uses DT to setup IRQs,
++ * find the GIC by searching for its DT node then manually create the
++ * IRQ mappings.
++ */
++
++ do {
++ args.np = of_find_node_with_property(args.np,
++ "interrupt-controller");
++ if (!args.np) {
++ pr_err("cannot find IRQ controller");
++ return -ENODEV;
++ }
++ } while (!of_device_is_compatible(args.np, "arm,cortex-a9-gic"));
++
++ if (irq_num < 32 || irq_num >= 96) {
++ pr_err("SPI interrupts must be in the range [32,96) on Zynq\n");
++ return -EINVAL;
++ }
++
++ args.args_count = 3;
++ args.args[0] = 0; /* SPI */
++ args.args[1] = irq_num - 32;
++ args.args[2] = 4; /* Active high, level-sensitive */
++
++ irq_num = irq_create_of_mapping(&args);
++ of_node_put(args.np);
++ if (irq_num == 0)
++ return -EINVAL;
++ }
++
++ if (irq_num > INT_MAX)
++ return -EINVAL;
++
++ return irq_num;
++}
++
++static int __init pdu_vex_mod_init(void)
++{
++ int irq_num = get_irq_num(PDU_BASE_IRQ);
++ struct resource res[2];
++#ifndef PDU_SINGLE_CORE
++ void *pdu_mem;
++ int i, rc;
++#endif
++
++ if (irq_num >= 0) {
++ res[1] = (struct resource){
++ .start = irq_num,
++ .end = irq_num,
++ .flags = IORESOURCE_IRQ,
++ };
++ } else {
++ res[1] = (struct resource){ 0 };
++ pr_err("IRQ setup failed (error %d), not using IRQs\n",
++ irq_num);
++ }
++
++#ifdef PDU_SINGLE_BASIC_TRNG
++ res[0] = (struct resource){
++ .start = vex_baseaddr,
++ .end = vex_baseaddr + 0x80 - 1,
++ .flags = IORESOURCE_MEM,
++ };
++ register_device("basic_trng", -1, res, 2);
++#endif
++
++#ifdef PDU_SINGLE_NIST_TRNG
++ res[0] = (struct resource){
++ .start = vex_baseaddr,
++ .end = vex_baseaddr + 0x800 - 1,
++ .flags = IORESOURCE_MEM,
++ };
++ register_device("nist_trng", -1, res, 2);
++#endif
++
++ return 0;
++}
++module_init(pdu_vex_mod_init);
++
++static void __exit pdu_vex_mod_exit(void)
++{
++ int i;
++
++ for (i = 0; i < MAX_DEV; i++)
++ platform_device_unregister(devices[i]);
++}
++module_exit(pdu_vex_mod_exit);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Synopsys, Inc.");
+diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h
+new file mode 100644
+index 000000000..7dda788b3
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng.h
+@@ -0,0 +1,63 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef NISTTRNG_H
++#define NISTTRNG_H
++
++#include "synversion.h"
++#include "elppdu.h"
++#include "nisttrng_hw.h"
++#include "nisttrng_common.h"
++#include "nisttrng_private.h"
++
++int nisttrng_init(struct nist_trng_state *state, u32 *base);
++int nisttrng_instantiate(struct nist_trng_state *state, int req_sec_strength, int pred_resist, void *personal_str);
++int nisttrng_uninstantiate(struct nist_trng_state *state);
++int nisttrng_reseed(struct nist_trng_state *state, int pred_resist, void *addin_str);
++int nisttrng_generate(struct nist_trng_state *state, void *random_bits, unsigned long req_num_bytes, int req_sec_strength, int pred_resist, void *addin_str);
++int nisttrng_rbc(struct nist_trng_state *state, int enable, int rbc_num, int rate, int urun_blnk);
++int nisttrng_generate_public_vtrng(struct nist_trng_state *state, void *random_bits, unsigned long req_num_bytes, int vtrng);
++#endif
+diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h
+new file mode 100644
+index 000000000..2164796bf
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_common.h
+@@ -0,0 +1,144 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++// ------------------------------------------------------------------------
++//
++// (C) COPYRIGHT 2012 - 2016 SYNOPSYS, INC.
++// ALL RIGHTS RESERVED
++//
++// (C) COPYRIGHT 2012-2016 Synopsys, Inc.
++// This Synopsys software and all associated documentation are
++// proprietary to Synopsys, Inc. and may only be used pursuant
++// to the terms and conditions of a written license agreement
++// with Synopsys, Inc. All other use, reproduction, modification,
++// or distribution of the Synopsys software or the associated
++// documentation is strictly prohibited.
++//
++// ------------------------------------------------------------------------
++
++#ifndef NISTTRNG_COMMON_H
++#define NISTTRNG_COMMON_H
++
++#define NIST_TRNG_RETRY_MAX 5000000UL
++
++#define NIST_DFLT_MAX_BITS_PER_REQ BIT(19)
++#define NIST_DFLT_MAX_REQ_PER_SEED BIT(48)
++
++/* Do not change the following parameters */
++#define NIST_TRNG_DFLT_MAX_REJECTS 10
++
++#define DEBUG(...)
++//#define DEBUG(...) printk(__VA_ARGS__)
++
++enum nisttrng_sec_strength {
++ SEC_STRNT_AES128 = 0,
++ SEC_STRNT_AES256 = 1
++};
++
++enum nisttrng_drbg_arch {
++ AES128 = 0,
++ AES256 = 1
++};
++
++enum nisttrng_current_state {
++ NIST_TRNG_STATE_INITIALIZE = 0,
++ NIST_TRNG_STATE_UNINSTANTIATE,
++ NIST_TRNG_STATE_INSTANTIATE,
++ NIST_TRNG_STATE_RESEED,
++ NIST_TRNG_STATE_GENERATE
++};
++
++struct nist_trng_state {
++ u32 *base;
++
++ /* Hardware features and build ID */
++ struct {
++ struct {
++ enum nisttrng_drbg_arch drbg_arch;
++ unsigned int extra_ps_present,
++ secure_rst_state,
++ diag_level_basic_trng,
++ diag_level_stat_hlt,
++ diag_level_ns;
++ } features;
++
++ struct {
++ unsigned int ext_enum,
++ ext_ver,
++ rel_num;
++ } corekit_rel;
++
++ struct {
++ unsigned int core_type,
++ bg8,
++ cdc_synch_depth,
++ background_noise,
++ edu_present,
++ aes_datapath,
++ aes_max_key_size,
++ personilzation_str;
++ } build_cfg0;
++
++ struct {
++ unsigned int num_raw_noise_blks,
++ sticky_startup,
++ auto_correlation_test,
++ mono_bit_test,
++ run_test,
++ poker_test,
++ raw_ht_adap_test,
++ raw_ht_rep_test,
++ ent_src_rep_smpl_size,
++ ent_src_rep_test,
++ ent_src_rep_min_entropy;
++ } build_cfg1;
++
++ struct {
++ unsigned int rbc2_rate_width,
++ rbc1_rate_width,
++ rbc0_rate_width,
++ public_vtrng_channels,
++ esm_channel,
++ rbc_channels,
++ fifo_depth;
++ } edu_build_cfg0;
++ } config;
++
++ /* status */
++ struct {
++ //nist_trng_current_state current_state;
++ enum nisttrng_current_state current_state; // old for now
++ unsigned int nonce_mode,
++ secure_mode,
++ pred_resist;
++ //nist_trng_sec_strength sec_strength;
++ enum nisttrng_sec_strength sec_strength;
++ unsigned int pad_ps_addin;
++ unsigned int alarm_code;
++ // Private VTRNG STAT, all the public trng will have the same STAT as public TRNG in terms of
++ // rnc_enabled and seed_enum
++ struct {
++ unsigned int seed_enum,
++ rnc_enabled;
++ } edu_vstat;
++ } status;
++
++ /* reminders and alarms */
++ struct {
++ unsigned long max_bits_per_req;
++ unsigned long long max_req_per_seed;
++ unsigned long bits_per_req_left;
++ unsigned long long req_per_seed_left;
++ } counters;
++};
++
++#define nist_trng_zero_status(x) \
++ memset(&((x)->status), 0, sizeof((x)->status))
++
++#define DRBG_INSTANTIATED(cs) \
++ ((((cs) == NIST_TRNG_STATE_INSTANTIATE) || \
++ ((cs) == NIST_TRNG_STATE_RESEED) || \
++ ((cs) == NIST_TRNG_STATE_GENERATE)) ? 1 : 0)
++
++#define REQ_SEC_STRENGTH_IS_VALID(sec_st) \
++ ((((sec_st) > 0) && ((sec_st) <= 256)) ? 1 : 0)
++
++#endif
+diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h
+new file mode 100644
+index 000000000..0522e20aa
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_hw.h
+@@ -0,0 +1,457 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef NISTTRNG_HW_H
++#define NISTTRNG_HW_H
++
++/* HW related Parameters */
++#define NIST_TRNG_RAND_BLK_SIZE_BITS 128
++#define CHX_URUN_BLANK_AFTER_RESET 0x3
++
++/* registers */
++#define NIST_TRNG_REG_CTRL 0x00
++#define NIST_TRNG_REG_MODE 0x01
++#define NIST_TRNG_REG_SMODE 0x02
++#define NIST_TRNG_REG_STAT 0x03
++#define NIST_TRNG_REG_IE 0x04
++#define NIST_TRNG_REG_ISTAT 0x05
++#define NIST_TRNG_REG_ALARM 0x06
++#define NIST_TRNG_REG_COREKIT_REL 0x07
++#define NIST_TRNG_REG_FEATURES 0x08
++#define NIST_TRNG_REG_RAND0 0x09
++#define NIST_TRNG_REG_RAND1 0x0A
++#define NIST_TRNG_REG_RAND2 0x0B
++#define NIST_TRNG_REG_RAND3 0x0C
++#define NIST_TRNG_REG_NPA_DATA0 0x0D
++#define NIST_TRNG_REG_NPA_DATA1 0x0E
++#define NIST_TRNG_REG_NPA_DATA2 0x0F
++#define NIST_TRNG_REG_NPA_DATA3 0x10
++#define NIST_TRNG_REG_NPA_DATA4 0x11
++#define NIST_TRNG_REG_NPA_DATA5 0x12
++#define NIST_TRNG_REG_NPA_DATA6 0x13
++#define NIST_TRNG_REG_NPA_DATA7 0x14
++#define NIST_TRNG_REG_NPA_DATA8 0x15
++#define NIST_TRNG_REG_NPA_DATA9 0x16
++#define NIST_TRNG_REG_NPA_DATA10 0x17
++#define NIST_TRNG_REG_NPA_DATA11 0x18
++#define NIST_TRNG_REG_NPA_DATA12 0x19
++#define NIST_TRNG_REG_NPA_DATA13 0x1A
++#define NIST_TRNG_REG_NPA_DATA14 0x1B
++#define NIST_TRNG_REG_NPA_DATA15 0x1C
++#define NIST_TRNG_REG_SEED0 0x1D
++#define NIST_TRNG_REG_SEED1 0x1E
++#define NIST_TRNG_REG_SEED2 0x1F
++#define NIST_TRNG_REG_SEED3 0x20
++#define NIST_TRNG_REG_SEED4 0x21
++#define NIST_TRNG_REG_SEED5 0x22
++#define NIST_TRNG_REG_SEED6 0x23
++#define NIST_TRNG_REG_SEED7 0x24
++#define NIST_TRNG_REG_SEED8 0x25
++#define NIST_TRNG_REG_SEED9 0x26
++#define NIST_TRNG_REG_SEED10 0x27
++#define NIST_TRNG_REG_SEED11 0x28
++#define NIST_TRNG_REG_TIME_TO_SEED 0x34
++#define NIST_TRNG_REG_IA_RDATA 0x38
++#define NIST_TRNG_REG_IA_WDATA 0x39
++#define NIST_TRNG_REG_IA_ADDR 0x3A
++#define NIST_TRNG_REG_IA_CMD 0x3B
++#define NIST_TRNG_REG_BUILD_CFG0 0x3C
++#define NIST_TRNG_REG_BUILD_CFG1 0x3D
++
++/* nist edu registers */
++#define NIST_TRNG_EDU_RNC_CTRL 0x100
++#define NIST_TRNG_EDU_FLUSH_CTRL 0x101
++#define NIST_TRNG_EDU_RESEED_CNTR 0x102
++#define NIST_TRNG_EDU_RBC_CTRL 0x104
++#define NIST_TRNG_EDU_STAT 0x106
++#define NIST_TRNG_EDU_IE 0x108
++#define NIST_TRNG_EDU_ISTAT 0x109
++#define NIST_TRNG_EDU_BUILD_CFG0 0x12C
++#define NIST_TRNG_EDU_VCTRL 0x138
++#define NIST_TRNG_EDU_VSTAT 0x139
++#define NIST_TRNG_EDU_VIE 0x13A
++#define NIST_TRNG_EDU_VISTAT 0x13B
++#define NIST_TRNG_EDU_VRAND_0 0x13C
++#define NIST_TRNG_EDU_VRAND_1 0x13D
++#define NIST_TRNG_EDU_VRAND_2 0x13E
++#define NIST_TRNG_EDU_VRAND_3 0x13F
++
++/* edu vtrng registers */
++#define NIST_TRNG_EDU_VTRNG_VCTRL0 0x180
++#define NIST_TRNG_EDU_VTRNG_VSTAT0 0x181
++#define NIST_TRNG_EDU_VTRNG_VIE0 0x182
++#define NIST_TRNG_EDU_VTRNG_VISTAT0 0x183
++#define NIST_TRNG_EDU_VTRNG_VRAND0_0 0x184
++#define NIST_TRNG_EDU_VTRNG_VRAND0_1 0x185
++#define NIST_TRNG_EDU_VTRNG_VRAND0_2 0x186
++#define NIST_TRNG_EDU_VTRNG_VRAND0_3 0x187
++#define NIST_TRNG_EDU_VTRNG_VCTRL1 0x188
++#define NIST_TRNG_EDU_VTRNG_VSTAT1 0x189
++#define NIST_TRNG_EDU_VTRNG_VIE1 0x18A
++#define NIST_TRNG_EDU_VTRNG_VISTAT1 0x18B
++#define NIST_TRNG_EDU_VTRNG_VRAND1_0 0x18C
++#define NIST_TRNG_EDU_VTRNG_VRAND1_1 0x18D
++#define NIST_TRNG_EDU_VTRNG_VRAND1_2 0x18E
++#define NIST_TRNG_EDU_VTRNG_VRAND1_3 0x18F
++#define NIST_TRNG_EDU_VTRNG_VCTRL2 0x190
++#define NIST_TRNG_EDU_VTRNG_VSTAT2 0x191
++#define NIST_TRNG_EDU_VTRNG_VIE2 0x192
++#define NIST_TRNG_EDU_VTRNG_VISTAT2 0x193
++#define NIST_TRNG_EDU_VTRNG_VRAND2_0 0x194
++#define NIST_TRNG_EDU_VTRNG_VRAND2_1 0x195
++#define NIST_TRNG_EDU_VTRNG_VRAND2_2 0x196
++#define NIST_TRNG_EDU_VTRNG_VRAND2_3 0x197
++#define NIST_TRNG_EDU_VTRNG_VCTRL3 0x198
++#define NIST_TRNG_EDU_VTRNG_VSTAT3 0x199
++#define NIST_TRNG_EDU_VTRNG_VIE3 0x19A
++#define NIST_TRNG_EDU_VTRNG_VISTAT3 0x19B
++#define NIST_TRNG_EDU_VTRNG_VRAND3_0 0x19C
++#define NIST_TRNG_EDU_VTRNG_VRAND3_1 0x19D
++#define NIST_TRNG_EDU_VTRNG_VRAND3_2 0x19E
++#define NIST_TRNG_EDU_VTRNG_VRAND3_3 0x19F
++#define NIST_TRNG_EDU_VTRNG_VCTRL4 0x1A0
++#define NIST_TRNG_EDU_VTRNG_VSTAT4 0x1A1
++#define NIST_TRNG_EDU_VTRNG_VIE4 0x1A2
++#define NIST_TRNG_EDU_VTRNG_VISTAT4 0x1A3
++#define NIST_TRNG_EDU_VTRNG_VRAND4_0 0x1A4
++#define NIST_TRNG_EDU_VTRNG_VRAND4_1 0x1A5
++#define NIST_TRNG_EDU_VTRNG_VRAND4_2 0x1A6
++#define NIST_TRNG_EDU_VTRNG_VRAND4_3 0x1A7
++#define NIST_TRNG_EDU_VTRNG_VCTRL5 0x1A8
++#define NIST_TRNG_EDU_VTRNG_VSTAT5 0x1A9
++#define NIST_TRNG_EDU_VTRNG_VIE5 0x1AA
++#define NIST_TRNG_EDU_VTRNG_VISTAT5 0x1AB
++#define NIST_TRNG_EDU_VTRNG_VRAND5_0 0x1AC
++#define NIST_TRNG_EDU_VTRNG_VRAND5_1 0x1AD
++#define NIST_TRNG_EDU_VTRNG_VRAND5_2 0x1AE
++#define NIST_TRNG_EDU_VTRNG_VRAND5_3 0x1AF
++#define NIST_TRNG_EDU_VTRNG_VCTRL6 0x1B0
++#define NIST_TRNG_EDU_VTRNG_VSTAT6 0x1B1
++#define NIST_TRNG_EDU_VTRNG_VIE6 0x1B2
++#define NIST_TRNG_EDU_VTRNG_VISTAT6 0x1B3
++#define NIST_TRNG_EDU_VTRNG_VRAND6_0 0x1B4
++#define NIST_TRNG_EDU_VTRNG_VRAND6_1 0x1B5
++#define NIST_TRNG_EDU_VTRNG_VRAND6_2 0x1B6
++#define NIST_TRNG_EDU_VTRNG_VRAND6_3 0x1B7
++#define NIST_TRNG_EDU_VTRNG_VCTRL7 0x1B8
++#define NIST_TRNG_EDU_VTRNG_VSTAT7 0x1B9
++#define NIST_TRNG_EDU_VTRNG_VIE7 0x1BA
++#define NIST_TRNG_EDU_VTRNG_VISTAT7 0x1BB
++#define NIST_TRNG_EDU_VTRNG_VRAND7_0 0x1BC
++#define NIST_TRNG_EDU_VTRNG_VRAND7_1 0x1BD
++#define NIST_TRNG_EDU_VTRNG_VRAND7_2 0x1BE
++#define NIST_TRNG_EDU_VTRNG_VRAND7_3 0x1BF
++
++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_NOP 0x0
++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_GET_RANDOM 0x1
++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_INIT 0x2
++
++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_MASK 0x3Ul
++#define NIST_TRNG_EDU_VTRNG_VCTRL_CMD_SET(y, x) (((y) & ~(NIST_TRNG_EDU_VTRNG_VCTRL_CMD_MASK)) | ((x)))
++
++/* CTRL */
++#define NIST_TRNG_REG_CTRL_CMD_NOP 0
++#define NIST_TRNG_REG_CTRL_CMD_GEN_NOISE 1
++#define NIST_TRNG_REG_CTRL_CMD_GEN_NONCE 2
++#define NIST_TRNG_REG_CTRL_CMD_CREATE_STATE 3
++#define NIST_TRNG_REG_CTRL_CMD_RENEW_STATE 4
++#define NIST_TRNG_REG_CTRL_CMD_REFRESH_ADDIN 5
++#define NIST_TRNG_REG_CTRL_CMD_GEN_RANDOM 6
++#define NIST_TRNG_REG_CTRL_CMD_ADVANCE_STATE 7
++#define NIST_TRNG_REG_CTRL_CMD_KAT 8
++#define NIST_TRNG_REG_CTRL_CMD_ZEROIZE 15
++
++/* EDU CTRL */
++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_DISABLE_TO_HOLD 0
++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE 1
++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_DISABLE_TO_IDLE 2
++#define NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE 3
++
++#define NIST_TRNG_EDU_RNC_CTRL_CMD_MASK 0x3Ul
++#define NIST_TRNG_EDU_RNC_CTRL_CMD_SET(y, x) (((y) & ~(NIST_TRNG_EDU_RNC_CTRL_CMD_MASK)) | ((x)))
++
++/* EDU_FLUSH_CTRL */
++#define _NIST_TRNG_EDU_FLUSH_CTRL_CH2_RBC 3
++#define _NIST_TRNG_EDU_FLUSH_CTRL_CH1_RBC 2
++#define _NIST_TRNG_EDU_FLUSH_CTRL_CH0_RBC 1
++#define _NIST_TRNG_EDU_FLUSH_CTRL_FIFO 0
++
++#define NIST_TRNG_EDU_FLUSH_CTRL_CH2_RBC BIT(_NIST_TRNG_EDU_FLUSH_CTRL_CH2_RBC)
++#define NIST_TRNG_EDU_FLUSH_CTRL_CH1_RBC BIT(_NIST_TRNG_EDU_FLUSH_CTRL_CH1_RBC)
++#define NIST_TRNG_EDU_FLUSH_CTRL_CH0_RBC BIT(_NIST_TRNG_EDU_FLUSH_CTRL_CH0_RBC)
++#define NIST_TRNG_EDU_FLUSH_CTRL_FIFO BIT(_NIST_TRNG_EDU_FLUSH_CTRL_FIFO)
++
++/*EDU_RBC_CTRL*/
++#define _NIST_TRNG_EDU_RBC_CTRL_CH2_URUN_BLANK 28
++#define _NIST_TRNG_EDU_RBC_CTRL_CH1_URUN_BLANK 26
++#define _NIST_TRNG_EDU_RBC_CTRL_CH0_URUN_BLANK 24
++#define _NIST_TRNG_EDU_RBC_CTRL_CH2_RATE 16
++#define _NIST_TRNG_EDU_RBC_CTRL_CH1_RATE 8
++#define _NIST_TRNG_EDU_RBC_CTRL_CH0_RATE 0
++
++#define _NIST_TRNG_EDU_RBC_CTRL_CH_RATE_MASK 0xFUL
++#define _NIST_TRNG_EDU_RBC_CTRL_CH_URUN_BLANK_MASK 0x3UL
++
++#define NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(z, y, x) (((y) & ~(_NIST_TRNG_EDU_RBC_CTRL_CH_RATE_MASK << (x))) | ((z) << (x)))
++#define NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(z, y, x) (((y) & ~(_NIST_TRNG_EDU_RBC_CTRL_CH_URUN_BLANK_MASK << (x))) | ((z) << (x)))
++
++#define NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(y, x) ((_NIST_TRNG_EDU_RBC_CTRL_CH_RATE_MASK) & ((y) >> (x)))
++#define NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(y, x) ((_NIST_TRNG_EDU_RBC_CTRL_CH_URUN_BLANK_MASK) & ((y) >> (x)))
++
++#define NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE_AFTER_RESET 0x0
++#define NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK_AFTER_RESET 0x3
++
++/* MODE */
++#define _NIST_TRNG_REG_MODE_KAT_SEL 7
++#define _NIST_TRNG_REG_MODE_KAT_VEC 5
++#define _NIST_TRNG_REG_MODE_ADDIN_PRESENT 4
++#define _NIST_TRNG_REG_MODE_PRED_RESIST 3
++#define _NIST_TRNG_REG_MODE_SEC_ALG 0
++
++#define NIST_TRNG_REG_MODE_ADDIN_PRESENT BIT(_NIST_TRNG_REG_MODE_ADDIN_PRESENT)
++#define NIST_TRNG_REG_MODE_PRED_RESIST BIT(_NIST_TRNG_REG_MODE_PRED_RESIST)
++#define NIST_TRNG_REG_MODE_SEC_ALG BIT(_NIST_TRNG_REG_MODE_SEC_ALG)
++
++/* SMODE */
++#define _NIST_TRNG_REG_SMODE_NOISE_COLLECT 31
++#define _NIST_TRNG_REG_SMODE_INDIV_HT_DISABLE 16
++#define _NIST_TRNG_REG_SMODE_MAX_REJECTS 2
++#define _NIST_TRNG_REG_SMODE_MISSION_MODE 1
++#define _NIST_TRNG_REG_SMODE_SECURE_EN _NIST_TRNG_REG_SMODE_MISSION_MODE
++#define _NIST_TRNG_REG_SMODE_NONCE 0
++
++#define NIST_TRNG_REG_SMODE_MAX_REJECTS(x) ((x) << _NIST_TRNG_REG_SMODE_MAX_REJECTS)
++#define NIST_TRNG_REG_SMODE_SECURE_EN(x) ((x) << _NIST_TRNG_REG_SMODE_SECURE_EN)
++#define NIST_TRNG_REG_SMODE_NONCE BIT(_NIST_TRNG_REG_SMODE_NONCE)
++
++/* STAT */
++#define _NIST_TRNG_REG_STAT_BUSY 31
++#define _NIST_TRNG_REG_STAT_STARTUP_TEST_IN_PROG 10
++#define _NIST_TRNG_REG_STAT_STARTUP_TEST_STUCK 9
++#define _NIST_TRNG_REG_STAT_DRBG_STATE 7
++#define _NIST_TRNG_REG_STAT_SECURE 6
++#define _NIST_TRNG_REG_STAT_NONCE_MODE 5
++#define _NIST_TRNG_REG_STAT_SEC_ALG 4
++#define _NIST_TRNG_REG_STAT_LAST_CMD 0
++
++#define NIST_TRNG_REG_STAT_BUSY BIT(_NIST_TRNG_REG_STAT_BUSY)
++//#define NIST_TRNG_REG_STAT_DRBG_STATE (1UL<<_NIST_TRNG_REG_STAT_DRBG_STATE)
++//#define NIST_TRNG_REG_STAT_SECURE (1UL << _NIST_TRNG_REG_STAT_SECURE)
++//#define NIST_TRNG_REG_STAT_NONCE_MODE (1UL << _NIST_TRNG_REG_STAT_NONCE_MODE)
++//#define NIST_TRNG_REG_STAT_SEC_ALG (1UL << _NIST_TRNG_REG_STAT_SEC_ALG)
++//#define NIST_TRNG_REG_STAT_LAST_CMD(x) (((x) >> _NIST_TRNG_REG_STAT_LAST_CMD)&0xF)
++
++/*EDU_STAT*/
++
++#define NIST_TRNG_EDU_STAT_FIFO_LEVEL(x) (((x) >> 24) & 255)
++#define NIST_TRNG_EDU_STAT_TTT_INDEX(x) (((x) >> 16) & 255)
++#define NIST_TRNG_EDU_STAT_RNC_BUSY(x) (((x) >> 3) & 7)
++#define NIST_TRNG_EDU_STAT_RNC_ENABLED(x) (((x) >> 2) & 1)
++#define NIST_TRNG_EDU_STAT_FIFO_EMPTY(x) (((x) >> 1) & 1)
++#define NIST_TRNG_EDU_STAT_FIFO_FULL(x) ((x) & 1)
++
++/* IE */
++#define _NIST_TRNG_REG_IE_GLBL 31
++#define _NIST_TRNG_REG_IE_DONE 4
++#define _NIST_TRNG_REG_IE_ALARMS 3
++#define _NIST_TRNG_REG_IE_NOISE_RDY 2
++#define _NIST_TRNG_REG_IE_KAT_COMPLETE 1
++#define _NIST_TRNG_REG_IE_ZEROIZE 0
++
++#define NIST_TRNG_REG_IE_GLBL BIT(_NIST_TRNG_REG_IE_GLBL)
++#define NIST_TRNG_REG_IE_DONE BIT(_NIST_TRNG_REG_IE_DONE)
++#define NIST_TRNG_REG_IE_ALARMS BIT(_NIST_TRNG_REG_IE_ALARMS)
++#define NIST_TRNG_REG_IE_NOISE_RDY BIT(_NIST_TRNG_REG_IE_NOISE_RDY)
++#define NIST_TRNG_REG_IE_KAT_COMPLETE BIT(_NIST_TRNG_REG_IE_KAT_COMPLETE)
++#define NIST_TRNG_REG_IE_ZEROIZE BIT(_NIST_TRNG_REG_IE_ZEROIZE)
++
++/* ISTAT */
++#define _NIST_TRNG_REG_ISTAT_DONE 4
++#define _NIST_TRNG_REG_ISTAT_ALARMS 3
++#define _NIST_TRNG_REG_ISTAT_NOISE_RDY 2
++#define _NIST_TRNG_REG_ISTAT_KAT_COMPLETE 1
++#define _NIST_TRNG_REG_ISTAT_ZEROIZE 0
++
++#define NIST_TRNG_REG_ISTAT_DONE BIT(_NIST_TRNG_REG_ISTAT_DONE)
++#define NIST_TRNG_REG_ISTAT_ALARMS BIT(_NIST_TRNG_REG_ISTAT_ALARMS)
++#define NIST_TRNG_REG_ISTAT_NOISE_RDY BIT(_NIST_TRNG_REG_ISTAT_NOISE_RDY)
++#define NIST_TRNG_REG_ISTAT_KAT_COMPLETE BIT(_NIST_TRNG_REG_ISTAT_KAT_COMPLETE)
++#define NIST_TRNG_REG_ISTAT_ZEROIZE BIT(_NIST_TRNG_REG_ISTAT_ZEROIZE)
++
++/*EDU_ISTAT*/
++
++#define _NIST_TRNG_EDU_ISTAT_CH2_RBC_URUN 8
++#define _NIST_TRNG_EDU_ISTAT_CH1_RBC_URUN 7
++#define _NIST_TRNG_EDU_ISTAT_CH0_RBC_URUN 6
++#define _NIST_TRNG_EDU_ISTAT_PRIVATE_VTRNG 5
++#define _NIST_TRNG_EDU_ISTAT_WAIT_EXP_TIMEOUT 4
++#define _NIST_TRNG_EDU_ISTAT_RNC_DRVN_OFFLINE 3
++#define _NIST_TRNG_EDU_ISTAT_FIFO_URUN 2
++#define _NIST_TRNG_EDU_ISTAT_ACCESS_VIOL 1
++#define _NIST_TRNG_EDU_ISTAT_RESEED_REMINDER 0
++
++#define NIST_TRNG_EDU_ISTAT_CH2_RBC_URUN BIT(_NIST_TRNG_EDU_ISTAT_CH2_RBC_URUN)
++#define NIST_TRNG_EDU_ISTAT_CH1_RBC_URUN BIT(_NIST_TRNG_EDU_ISTAT_CH1_RBC_URUN)
++#define NIST_TRNG_EDU_ISTAT_CH0_RBC_URUN BIT(_NIST_TRNG_EDU_ISTAT_CH0_RBC_URUN)
++#define NIST_TRNG_EDU_ISTAT_PRIVATE_VTRNG BIT(_NIST_TRNG_EDU_ISTAT_PRIVATE_VTRNG)
++#define NIST_TRNG_EDU_ISTAT_WAIT_EXP_TIMEOUT BIT(_NIST_TRNG_EDU_ISTAT_WAIT_EXP_TIMEOUT)
++#define NIST_TRNG_EDU_ISTAT_RNC_DRVN_OFFLINE BIT(_NIST_TRNG_EDU_ISTAT_RNC_DRVN_OFFLINE)
++#define NIST_TRNG_EDU_ISTAT_FIFO_URUN BIT(_NIST_TRNG_EDU_ISTAT_FIFO_URUN)
++#define NIST_TRNG_EDU_ISTAT_ACCESS_VIOL BIT(_NIST_TRNG_EDU_ISTAT_ACCESS_VIOL)
++#define NIST_TRNG_EDU_ISTAT_RESEED_REMINDER BIT(_NIST_TRNG_EDU_ISTAT_RESEED_REMINDER)
++
++/* ALARMS */
++#define NIST_TRNG_REG_ALARM_ILLEGAL_CMD_SEQ BIT(4)
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_OK 0
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_KAT_STAT 1
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_KAT 2
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_MONOBIT 3
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_RUN 4
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_LONGRUN 5
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_AUTOCORRELATION 6
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_POKER 7
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_REPETITION_COUNT 8
++#define NIST_TRNG_REG_ALARM_FAILED_TEST_ID_ADAPATIVE_PROPORTION 9
++
++/* COREKIT_REL */
++#define NIST_TRNG_REG_EXT_ENUM(x) (((x) >> 28) & 0xF)
++#define NIST_TRNG_REG_EXT_VER(x) (((x) >> 23) & 0xFF)
++#define NIST_TRNG_REG_REL_NUM(x) ((x) & 0xFFFF)
++
++// This will be deleted ?? per comments in hw details. ie use CFG
++/* FEATURES */
++#define NIST_TRNG_REG_FEATURES_AES_256(x) (((x) >> 9) & 1)
++#define NIST_TRNG_REG_FEATURES_EXTRA_PS_PRESENT(x) (((x) >> 8) & 1)
++#define NIST_TRNG_REG_FEATURES_DIAG_LEVEL_NS(x) (((x) >> 7) & 1)
++#define NIST_TRNG_REG_FEATURES_DIAG_LEVEL_BASIC_TRNG(x) (((x) >> 4) & 7)
++#define NIST_TRNG_REG_FEATURES_DIAG_LEVEL_ST_HLT(x) (((x) >> 1) & 7)
++#define NIST_TRNG_REG_FEATURES_SECURE_RST_STATE(x) ((x) & 1)
++
++/* build_CFG0 */
++#define NIST_TRNG_REG_CFG0_PERSONILIZATION_STR(x) (((x) >> 14) & 1)
++#define NIST_TRNG_REG_CFG0_AES_MAX_KEY_SIZE(x) (((x) >> 13) & 1)
++#define NIST_TRNG_REG_CFG0_AES_DATAPATH(x) (((x) >> 12) & 1)
++#define NIST_TRNG_REG_CFG0_EDU_PRESENT(x) (((x) >> 11) & 1)
++#define NIST_TRNG_REG_CFG0_BACGROUND_NOISE(x) (((x) >> 10) & 1)
++#define NIST_TRNG_REG_CFG0_CDC_SYNCH_DEPTH(x) (((x) >> 8) & 3)
++#define NIST_TRNG_REG_CFG0_BG8(x) (((x) >> 7) & 1)
++#define NIST_TRNG_REG_CFG0_CORE_TYPE(x) ((x) & 3)
++
++/* build_CFG1 */
++#define NIST_TRNG_REG_CFG1_ENT_SRC_REP_MIN_ENTROPY(x) (((x) >> 24) & 255)
++#define NIST_TRNG_REG_CFG1_ENT_SRC_REP_TEST(x) (((x) >> 23) & 1)
++#define NIST_TRNG_REG_CFG1_ENT_SRC_REP_SMPL_SIZE(x) (((x) >> 20) & 7)
++#define NIST_TRNG_REG_CFG1_RAW_HT_REP_TEST(x) (((x) >> 19) & 1)
++#define NIST_TRNG_REG_CFG1_RAW_HT_ADAP_TEST(x) (((x) >> 16) & 7)
++#define NIST_TRNG_REG_CFG1_POKER_TEST(x) (((x) >> 15) & 1)
++#define NIST_TRNG_REG_CFG1_RUN_TEST(x) (((x) >> 14) & 1)
++#define NIST_TRNG_REG_CFG1_MONO_BIT_TEST(x) (((x) >> 13) & 1)
++#define NIST_TRNG_REG_CFG1_AUTO_CORRELATION_TEST(x) (((x) >> 12) & 1)
++#define NIST_TRNG_REG_CFG1_STICKY_STARTUP(x) (((x) >> 8) & 1)
++#define NIST_TRNG_REG_CFG1_NUM_RAW_NOISE_BLKS(x) ((x) & 255)
++
++/* EDU_BUILD_CFG0 */
++#define NIST_TRNG_REG_EDU_CFG0_RBC2_RATE_WIDTH(x) (((x) >> 20) & 7)
++#define NIST_TRNG_REG_EDU_CFG0_RBC1_RATE_WIDTH(x) (((x) >> 16) & 7)
++#define NIST_TRNG_REG_EDU_CFG0_RBC0_RATE_WIDTH(x) (((x) >> 12) & 7)
++#define NIST_TRNG_REG_EDU_CFG0_PUBLIC_VTRNG_CHANNELS(x) (((x) >> 8) & 15)
++#define NIST_TRNG_REG_EDU_CFG0_ESM_CHANNEL(x) (((x) >> 6) & 1)
++#define NIST_TRNG_REG_EDU_CFG0_RBC_CHANNELS(x) (((x) >> 4) & 3)
++#define NIST_TRNG_REG_EDU_CFG0_FIFO_DEPTH(x) (((x) >> 2) & 7)
++
++/* EDU_VSTAT */
++#define NIST_TRNG_REG_EDU_VSTAT_BUSY(x) (((x) >> 31) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_RNC_ENABLED(x) (((x) >> 30) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SEED_ENUM(x) (((x) >> 28) & 3)
++#define NIST_TRNG_REG_EDU_VSTAT_RWUE(x) (((x) >> 27) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_RWNE(x) (((x) >> 26) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SRWE(x) (((x) >> 25) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_ANY_RW1(x) (((x) >> 24) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_BCKGRND_NOISE(x) (((x) >> 23) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_RNC_FIFO_EMPTY(x) (((x) >> 22) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI3(x) (((x) >> 15) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI2(x) (((x) >> 14) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI1(x) (((x) >> 13) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_RWI0(x) (((x) >> 12) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD3(x) (((x) >> 11) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD2(x) (((x) >> 10) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD1(x) (((x) >> 9) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD0(x) (((x) >> 8) & 1)
++#define NIST_TRNG_REG_EDU_VSTAT_CURRENT_CMD(x) (((x) >> 4) & 15)
++#define NIST_TRNG_REG_EDU_VSTAT_LAST_CMD(x) ((x) & 15)
++
++#define _NIST_TRNG_REG_SMODE_MAX_REJECTS_MASK 255UL
++#define _NIST_TRNG_REG_SMODE_SECURE_EN_MASK 1UL
++#define _NIST_TRNG_REG_SMODE_NONCE_MASK 1UL
++#define _NIST_TRNG_REG_MODE_SEC_ALG_MASK 1UL
++#define _NIST_TRNG_REG_MODE_ADDIN_PRESENT_MASK 1UL
++#define _NIST_TRNG_REG_MODE_PRED_RESIST_MASK 1UL
++#define _NIST_TRNG_REG_MODE_KAT_SEL_MASK 3UL
++#define _NIST_TRNG_REG_MODE_KAT_VEC_MASK 3UL
++#define _NIST_TRNG_REG_STAT_DRBG_STATE_MASK 3UL
++#define _NIST_TRNG_REG_STAT_SECURE_MASK 1UL
++#define _NIST_TRNG_REG_STAT_NONCE_MASK 1UL
++
++#define NIST_TRNG_REG_SMODE_SET_MAX_REJECTS(y, x) (((y) & ~(_NIST_TRNG_REG_SMODE_MAX_REJECTS_MASK << _NIST_TRNG_REG_SMODE_MAX_REJECTS)) | ((x) << _NIST_TRNG_REG_SMODE_MAX_REJECTS))
++#define NIST_TRNG_REG_SMODE_SET_SECURE_EN(y, x) (((y) & ~(_NIST_TRNG_REG_SMODE_SECURE_EN_MASK << _NIST_TRNG_REG_SMODE_SECURE_EN)) | ((x) << _NIST_TRNG_REG_SMODE_SECURE_EN))
++#define NIST_TRNG_REG_SMODE_SET_NONCE(y, x) (((y) & ~(_NIST_TRNG_REG_SMODE_NONCE_MASK << _NIST_TRNG_REG_SMODE_NONCE)) | ((x) << _NIST_TRNG_REG_SMODE_NONCE))
++#define NIST_TRNG_REG_SMODE_GET_MAX_REJECTS(x) (((x) >> _NIST_TRNG_REG_SMODE_MAX_REJECTS) & _NIST_TRNG_REG_SMODE_MAX_REJECTS_MASK)
++#define NIST_TRNG_REG_SMODE_GET_SECURE_EN(x) (((x) >> _NIST_TRNG_REG_SMODE_SECURE_EN) & _NIST_TRNG_REG_SMODE_SECURE_EN_MASK)
++#define NIST_TRNG_REG_SMODE_GET_NONCE(x) (((x) >> _NIST_TRNG_REG_SMODE_NONCE) & _NIST_TRNG_REG_SMODE_NONCE_MASK)
++
++#define NIST_TRNG_REG_MODE_SET_SEC_ALG(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_SEC_ALG_MASK << _NIST_TRNG_REG_MODE_SEC_ALG)) | ((x) << _NIST_TRNG_REG_MODE_SEC_ALG))
++#define NIST_TRNG_REG_MODE_SET_PRED_RESIST(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_PRED_RESIST_MASK << _NIST_TRNG_REG_MODE_PRED_RESIST)) | ((x) << _NIST_TRNG_REG_MODE_PRED_RESIST))
++#define NIST_TRNG_REG_MODE_SET_ADDIN_PRESENT(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_ADDIN_PRESENT_MASK << _NIST_TRNG_REG_MODE_ADDIN_PRESENT)) | ((x) << _NIST_TRNG_REG_MODE_ADDIN_PRESENT))
++#define NIST_TRNG_REG_MODE_SET_KAT_SEL(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_KAT_SEL_MASK << _NIST_TRNG_REG_MODE_KAT_SEL)) | ((x) << _NIST_TRNG_REG_MODE_KAT_SEL))
++#define NIST_TRNG_REG_MODE_SET_KAT_VEC(y, x) (((y) & ~(_NIST_TRNG_REG_MODE_KAT_VEC_MASK << _NIST_TRNG_REG_MODE_KAT_VEC)) | ((x) << _NIST_TRNG_REG_MODE_KAT_VEC))
++#define NIST_TRNG_REG_MODE_GET_SEC_ALG(x) (((x) >> _NIST_TRNG_REG_MODE_SEC_ALG) & _NIST_TRNG_REG_MODE_SEC_ALG_MASK)
++#define NIST_TRNG_REG_MODE_GET_PRED_RESIST(x) (((x) >> _NIST_TRNG_REG_MODE_PRED_RESIST) & _NIST_TRNG_REG_MODE_PRED_RESIST_MASK)
++#define NIST_TRNG_REG_MODE_GET_ADDIN_PRESENT(x) (((x) >> _NIST_TRNG_REG_MODE_ADDIN_PRESENT) & _NIST_TRNG_REG_MODE_ADDIN_PRESENT_MASK)
++#define NIST_TRNG_REG_STAT_GET_DRBG_STATE(x) (((x) >> _NIST_TRNG_REG_STAT_DRBG_STATE) & _NIST_TRNG_REG_STAT_DRBG_STATE_MASK)
++#define NIST_TRNG_REG_STAT_GET_SECURE(x) (((x) >> _NIST_TRNG_REG_STAT_SECURE) & _NIST_TRNG_REG_STAT_SECURE_MASK)
++#define NIST_TRNG_REG_STAT_GET_NONCE(x) (((x) >> _NIST_TRNG_REG_STAT_NONCE_MODE) & _NIST_TRNG_REG_STAT_NONCE_MASK)
++
++#endif
+diff --git a/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h
+new file mode 100644
+index 000000000..bdec454b7
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/include/nisttrng_private.h
+@@ -0,0 +1,89 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef NISTTRNG_PRIVATE_H
++#define NISTTRNG_PRIVATE_H
++
++#include "elppdu.h"
++#include "nisttrng_hw.h"
++#include "nisttrng_common.h"
++
++int nisttrng_wait_on_busy(struct nist_trng_state *state);
++int nisttrng_wait_on_done(struct nist_trng_state *state);
++int nisttrng_wait_on_noise_rdy(struct nist_trng_state *state);
++int nisttrng_get_alarms(struct nist_trng_state *state);
++int nisttrng_reset_state(struct nist_trng_state *state);
++
++/* ---------- Reminders ---------- */
++int nisttrng_reset_counters(struct nist_trng_state *state);
++int nisttrng_set_reminder_max_bits_per_req(struct nist_trng_state *state, unsigned long max_bits_per_req);
++int nisttrng_set_reminder_max_req_per_seed(struct nist_trng_state *state, unsigned long long max_req_per_seed);
++int nisttrng_check_seed_lifetime(struct nist_trng_state *state);
++
++/* ---------- Set field APIs ---------- */
++int nisttrng_set_sec_strength(struct nist_trng_state *state, int req_sec_strength);
++int nisttrng_set_addin_present(struct nist_trng_state *state, int addin_present);
++int nisttrng_set_pred_resist(struct nist_trng_state *state, int pred_resist);
++int nisttrng_set_secure_mode(struct nist_trng_state *state, int secure_mode);
++int nisttrng_set_nonce_mode(struct nist_trng_state *state, int nonce_mode);
++
++/* ---------- Load data APIs ---------- */
++int nisttrng_load_ps_addin(struct nist_trng_state *state, void *input_str);
++
++/* ---------- Command APIs ---------- */
++int nisttrng_get_entropy_input(struct nist_trng_state *state, void *input_nonce, int nonce_operation);
++int nisttrng_refresh_addin(struct nist_trng_state *state, void *addin_str);
++int nisttrng_gen_random(struct nist_trng_state *state, void *random_bits, unsigned long req_num_bytes);
++int nisttrng_advance_state(struct nist_trng_state *state);
++int nisttrng_kat(struct nist_trng_state *state, int kat_sel, int kat_vec);
++int nisttrng_full_kat(struct nist_trng_state *state);
++int nisttrng_zeroize(struct nist_trng_state *state);
++
++/* ---------- edu related ---------- */
++
++int nisttrng_rnc(struct nist_trng_state *state, int rnc_ctrl_cmd);
++int nisttrng_wait_fifo_full(struct nist_trng_state *state);
++#endif
+diff --git a/drivers/char/hw_random/dwc/src/trng/include/synversion.h b/drivers/char/hw_random/dwc/src/trng/include/synversion.h
+new file mode 100644
+index 000000000..0b3a4e76c
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/include/synversion.h
+@@ -0,0 +1,52 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef VERSION_H
++#define VERSION_H
++
++#define TRNG_VERSION "1.00a"
++
++#endif
+diff --git a/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c b/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c
+new file mode 100644
+index 000000000..607f62554
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/kernel/nist_trng.c
+@@ -0,0 +1,2171 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2017 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/dma-mapping.h>
++#include <asm/uaccess.h>
++#include <asm/param.h>
++#include <linux/err.h>
++#include <linux/hw_random.h>
++#include <linux/io.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++
++#include <linux/crypto.h>
++#include <crypto/internal/rng.h>
++
++#include "nisttrng.h"
++
++#define SYNOPSYS_HWRNG_DRIVER_NAME "hwrng-nist_trng"
++
++#define num_gen_bytes 64
++static unsigned long max_reads = 128;
++
++struct synopsys_nisttrng_driver {
++ struct nist_trng_state nisttrng;
++ void *hwrng_drv;
++ void *crypto_drv;
++ unsigned char rand_out[num_gen_bytes];
++};
++
++static unsigned int xxd_vtrng;
++
++static void nisttrng_reinit(struct nist_trng_state *nist_trng)
++{
++ int err;
++
++ err = nisttrng_uninstantiate(nist_trng);
++ if (err && err != CRYPTO_NOT_INSTANTIATED)
++ goto ERR;
++
++ err = nisttrng_instantiate(nist_trng, 128, 1, NULL);
++ if (err)
++ goto ERR;
++
++ERR:
++ DEBUG("NIST_TRNG: Trying to reinitialize after a fatal alarm: %d\n",
++ err);
++}
++
++static int nisttrng_platform_driver_read(struct platform_device *pdev,
++ void *buf, size_t max, bool wait)
++{
++ struct synopsys_nisttrng_driver *data = 0;
++ int nisttrng_error = -1;
++ u32 *out = kmalloc(max, GFP_KERNEL);
++ unsigned int vtrng;
++
++ if (!out) {
++ SYNHW_PRINT("memory not allocated\n");
++ return -1;
++ }
++
++ if (!pdev || !buf || !max)
++ return nisttrng_error;
++
++ data = platform_get_drvdata(pdev);
++ if (data == 0)
++ return nisttrng_error;
++
++ if (data->nisttrng.config.build_cfg0.edu_present) {
++ vtrng = xxd_vtrng % ((data->nisttrng.config.edu_build_cfg0
++ .public_vtrng_channels) +
++ 1);
++ if (vtrng == 0) {
++ /* private vtrng */
++ nisttrng_error = nisttrng_generate(&data->nisttrng, out, max,
++ data->nisttrng.status.sec_strength ? 256 : 128,
++ data->nisttrng.status.pred_resist, NULL);
++ } else {
++ /* public vtrng */
++ nisttrng_error = nisttrng_generate_public_vtrng(&data->nisttrng, out, max, vtrng - 1);
++ }
++ xxd_vtrng++;
++ } else {
++ /* nist core vtrng */
++ nisttrng_error = nisttrng_generate(&data->nisttrng, out, max,
++ data->nisttrng.status.sec_strength ? 256 : 128,
++ data->nisttrng.status.pred_resist, NULL);
++ }
++ if (nisttrng_error < 0) {
++ if (data->nisttrng.status.alarm_code)
++ nisttrng_reinit(&data->nisttrng);
++
++ return nisttrng_error;
++ }
++
++ memcpy(buf, out, max);
++ kfree(out);
++
++ return max;
++}
++
++int nisttrng_hwrng_driver_read(struct hwrng *rng, void *buf, size_t max,
++ bool wait)
++{
++ struct platform_device *pdev = 0;
++
++ if (rng == 0)
++ return -1;
++
++ pdev = (struct platform_device *)rng->priv;
++ return nisttrng_platform_driver_read(pdev, buf, max, wait);
++}
++
++static ssize_t ckr_show(struct device *dev, struct device_attribute *devattr,
++ char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "rel_num=%u, ext_ver=%u, ext_enum=%u\n",
++ priv->nisttrng.config.corekit_rel.rel_num,
++ priv->nisttrng.config.corekit_rel.ext_ver,
++ priv->nisttrng.config.corekit_rel.ext_enum);
++}
++
++static ssize_t features_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf,
++ "drbg_arch = %u, diag_basic_trng=%u, diag_st_hlt=%u, diag_ns=%u, secure_rst_state=%u, extra_ps_present=%u\n",
++ priv->nisttrng.config.features.drbg_arch,
++ priv->nisttrng.config.features.diag_level_basic_trng,
++ priv->nisttrng.config.features.diag_level_stat_hlt,
++ priv->nisttrng.config.features.diag_level_ns,
++ priv->nisttrng.config.features.secure_rst_state,
++ priv->nisttrng.config.features.extra_ps_present);
++}
++
++static ssize_t secure_show(struct device *dev, struct device_attribute *devattr,
++ char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%s\n", NIST_TRNG_REG_SMODE_GET_SECURE_EN(pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_SMODE)) ? "on" : "off");
++}
++
++static ssize_t secure_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ int ret;
++
++ ret = nisttrng_set_secure_mode(&priv->nisttrng,
++ sysfs_streq(buf, "on") ? 1 : 0);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t nonce_show(struct device *dev, struct device_attribute *devattr,
++ char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%s\n", NIST_TRNG_REG_SMODE_GET_NONCE(pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_SMODE)) ? "on" : "off");
++}
++
++static ssize_t nonce_store(struct device *dev, struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ int ret;
++
++ ret = nisttrng_set_nonce_mode(&priv->nisttrng,
++ sysfs_streq(buf, "on") ? 1 : 0);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t sec_strength_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%s\n",
++ priv->nisttrng.status.sec_strength ? "256" : "128");
++}
++
++static ssize_t sec_strength_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ int tmp;
++ int ret;
++
++ if (count > 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtoint(foo, 10, &tmp);
++ if (ret)
++ return ret;
++
++ ret = nisttrng_set_sec_strength(&priv->nisttrng, tmp);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t rand_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ unsigned int x;
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ for (x = 0; x < 4; x++) {
++ sprintf(buf + 8 * x, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_RAND0 + 3 - x));
++ }
++
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++static ssize_t seed_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ unsigned int x;
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ for (x = 0; x < 12; x++) {
++ sprintf(buf + 8 * x, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_SEED0 + 11 - x));
++ }
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++static ssize_t seed_reg_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int x, tmp;
++ int ret;
++
++ // string must be at least 12 32-bit words long in 0 padded hex
++ if (count < (2 * 12 * 4))
++ return -1;
++
++ foo[8] = 0;
++ for (x = 0; x < 12; x++) {
++ memcpy(foo, buf + x * 8, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SEED0 + x,
++ tmp);
++ }
++
++ return count;
++}
++
++static ssize_t npa_data_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ unsigned int x;
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ for (x = 0; x < 16; x++) {
++ sprintf(buf + 8 * x, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_NPA_DATA0 + 15 - x));
++ }
++
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++static ssize_t npa_data_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int x, tmp;
++ int ret;
++
++ // string must be at least 16 32-bit words long in 0 padded hex
++ if (count < (2 * 16 * 4))
++ return -1;
++
++ foo[8] = 0;
++ for (x = 0; x < 16; x++) {
++ memcpy(foo, buf + x * 8, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_NPA_DATA0 + x, tmp);
++ }
++
++ return count;
++}
++
++static ssize_t ctrl_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_CTRL));
++}
++
++static ssize_t ctrl_reg_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_CTRL, tmp);
++ return count;
++}
++
++static ssize_t istat_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_ISTAT));
++}
++
++static ssize_t istat_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_ISTAT, tmp);
++ return count;
++}
++
++static ssize_t mode_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_MODE));
++}
++
++static ssize_t mode_reg_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_MODE, tmp);
++
++ return count;
++}
++
++static ssize_t smode_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_SMODE));
++}
++
++static ssize_t smode_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SMODE, tmp);
++ return count;
++}
++
++static ssize_t alarm_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_ALARM));
++}
++
++static ssize_t alarm_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_ALARM, tmp);
++ return count;
++}
++
++static ssize_t stat_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_STAT));
++}
++
++static ssize_t ia_wdata_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_WDATA, tmp);
++ return count;
++}
++
++static ssize_t ia_wdata_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_WDATA));
++}
++
++static ssize_t ia_rdata_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_RDATA));
++}
++
++static ssize_t ia_addr_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR, tmp);
++ return count;
++}
++
++static ssize_t ia_addr_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR));
++}
++
++static ssize_t ia_cmd_reg_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD, tmp);
++ return count;
++}
++
++static ssize_t ia_cmd_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD));
++}
++
++static ssize_t rnc_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_EDU_RNC_CTRL));
++}
++
++static ssize_t rnc_reg_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned int tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtouint(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_EDU_RNC_CTRL, tmp);
++
++ return count;
++}
++
++static ssize_t rbc_reg_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ pdu_io_read32(priv->nisttrng.base + NIST_TRNG_EDU_RBC_CTRL));
++}
++
++static ssize_t rbc_reg_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ char opts_str[5];
++ unsigned int opts_int;
++ int enable, rbc_num, rate, urun_blnk, ret;
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ opts_str[4] = 0;
++ memcpy(opts_str, buf, 4);
++ ret = kstrtouint(opts_str, 16, &opts_int);
++ if (ret)
++ return ret;
++
++ SYNHW_PRINT("%s %x\n", __func__, opts_int);
++
++ enable = (opts_int >> 12 & 0xf);
++ if (enable > 1) {
++ SYNHW_PRINT("incorrect enable %x\n", enable);
++ return -1;
++ }
++
++ rbc_num = (opts_int >> 8 & 0xf);
++ if (rbc_num > priv->nisttrng.config.edu_build_cfg0.rbc_channels - 1) {
++ SYNHW_PRINT("incorrect rbc_num %x\n", rbc_num);
++ return -1;
++ }
++
++ rate = (opts_int >> 4 & 0xf);
++ if (rate > 8) {
++ SYNHW_PRINT("incorrect rate %x\n", rate);
++ return -1;
++ }
++
++ urun_blnk = (opts_int & 0xf);
++ if (urun_blnk > 3) {
++ SYNHW_PRINT("incorrect urun_blnk %x\n", urun_blnk);
++ return -1;
++ }
++
++ SYNHW_PRINT("enable %x rbc_num %x rate %x urun_blnk %x\n", enable,
++ rbc_num, rate, urun_blnk);
++
++ ret = nisttrng_rbc(&priv->nisttrng, enable, rbc_num, rate,
++ urun_blnk);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t hw_state_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ u32 addr;
++ int i;
++ int tot_char;
++
++ addr = 0x20;
++ tot_char = sprintf(buf, "Key = ");
++ for (i = 0; i < 8; i++) {
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR,
++ addr + 7 - i);
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD,
++ 0x80000000);
++ tot_char += sprintf(buf + tot_char, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_IA_RDATA));
++ }
++ tot_char += sprintf(buf + tot_char, "\n");
++
++ addr = 0x28;
++ tot_char += sprintf(buf + tot_char, "V = ");
++ for (i = 0; i < 4; i++) {
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR,
++ addr + 3 - i);
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD,
++ 0x80000000);
++ tot_char += sprintf(buf + tot_char, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_IA_RDATA));
++ }
++
++ tot_char += sprintf(buf + tot_char, "\n");
++
++ return tot_char;
++}
++
++static ssize_t max_bits_per_req_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ unsigned long tmp;
++ int ret;
++
++ // string must be at least a 32-bit word in 0 padded hex
++ if (count < 8)
++ return -1;
++
++ foo[8] = 0;
++ memcpy(foo, buf, 8);
++ ret = kstrtoul(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ ret = nisttrng_set_reminder_max_bits_per_req(&priv->nisttrng,
++ tmp);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t max_bits_per_req_show(struct device *dev,
++ struct device_attribute *devattr,
++ char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08lx\n",
++ priv->nisttrng.counters.max_bits_per_req);
++}
++
++static ssize_t max_req_per_seed_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[17];
++ unsigned long long tmp;
++ int ret;
++
++ // string must be at least a 64-bit word in 0 padded hex
++ if (count < 16)
++ return -1;
++
++ foo[16] = 0;
++ memcpy(foo, buf, 16);
++ ret = kstrtoull(foo, 16, &tmp);
++ if (ret)
++ return ret;
++
++ ret = nisttrng_set_reminder_max_req_per_seed(&priv->nisttrng,
++ tmp);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t max_req_per_seed_show(struct device *dev,
++ struct device_attribute *devattr,
++ char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ return sprintf(buf, "%08llx\n",
++ priv->nisttrng.counters.max_req_per_seed);
++}
++
++static ssize_t collect_ent_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ int rep;
++ int i, j;
++ int ret;
++ u32 tmp;
++ int t;
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ // Change to TEST mode
++ DEBUG("Change to TEST mode\n");
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SMODE, 0x00000028);
++ // Turn on the noise collect mode
++ DEBUG("Turn on the noise collect mode\n");
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_SMODE, 0x80000028);
++
++ // issue generate entropy command
++ DEBUG("Issue a GEN_NOISE command\n");
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_GEN_NOISE);
++
++ // read raw noise
++ // 2 reads if sec_strength is 128 and 3 reads if it is 256
++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128)
++ rep = 2;
++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256)
++ rep = 3;
++
++ for (i = 0; i < rep; i++) {
++ t = NIST_TRNG_RETRY_MAX;
++ tmp = 0;
++ DEBUG("Wait for NOISE_RDY interrupt.\n");
++ do {
++ tmp = pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_ISTAT);
++ } while (!(tmp & (NIST_TRNG_REG_ISTAT_NOISE_RDY |
++ NIST_TRNG_REG_ISTAT_ALARMS)) &&
++ --t);
++
++ DEBUG("Read NPA_DATAx\n");
++ for (j = 0; j < 16; j++) {
++ sprintf(buf + 128 * i + 8 * j, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_NPA_DATA0 + j));
++ }
++
++ // clear NOISE_RDY IRQ
++ DEBUG("Clear NOISE_RDY interrupt.\n");
++ ret = nisttrng_wait_on_noise_rdy(&priv->nisttrng);
++ if (ret)
++ return -1;
++ }
++
++ DEBUG("Wait for DONE\n");
++ ret = nisttrng_wait_on_done(&priv->nisttrng);
++ if (ret)
++ return -1;
++
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++static ssize_t collect_ent_nsout_show(struct device *dev,
++ struct device_attribute *devattr,
++ char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ int rep;
++ int i;
++ int ret;
++
++ // generate entropy
++ ret = nisttrng_get_entropy_input(&priv->nisttrng, NULL, 0);
++ if (ret)
++ return -1;
++
++ // read NS_OUTPUTx
++ // 32 reads if sec_strength is 128 and 48 reads if it is 256
++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128)
++ rep = 32;
++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256)
++ rep = 48;
++
++ for (i = 0; i < rep; i++) {
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_ADDR,
++ 0x70 + rep - 1 - i);
++ pdu_io_write32(priv->nisttrng.base + NIST_TRNG_REG_IA_CMD,
++ 0x80000000);
++ sprintf(buf + 8 * i, "%08lx",
++ pdu_io_read32(priv->nisttrng.base +
++ NIST_TRNG_REG_IA_RDATA));
++ }
++
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++static ssize_t nonce_seed_with_df_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ u32 seed[48] = { 0 };
++ int rep;
++ int i;
++ int ret;
++
++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128)
++ rep = 2;
++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256)
++ rep = 3;
++
++ DEBUG("Number of char in input = %zu\n", count);
++ if (count != (rep * 128))
++ return -1;
++
++ foo[8] = 0;
++ for (i = 0; i < (rep * 16); i++) {
++ memcpy(foo, buf + i * 8, 8);
++ ret = kstrtouint(foo, 16, (seed + (rep * 16 - 1) - i));
++ if (ret)
++ return ret;
++ }
++
++ ret = nisttrng_get_entropy_input(&priv->nisttrng, seed, 1);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t nonce_seed_direct_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char foo[9];
++ u32 seed[12] = { 0 };
++ int rep;
++ int i;
++ int ret;
++
++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128)
++ rep = 2;
++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256)
++ rep = 3;
++
++ DEBUG("Number of char in input = %zu\n", count);
++ if (count != (rep * 32))
++ return -1;
++
++ foo[8] = 0;
++ for (i = 0; i < (rep * 4); i++) {
++ memcpy(foo, buf + i * 8, 8);
++ ret = kstrtouint(foo, 16, (seed + (rep * 4 - 1) - i));
++ if (ret)
++ return ret;
++ }
++
++ ret = nisttrng_get_entropy_input(&priv->nisttrng, seed, 0);
++ if (ret)
++ return -1;
++
++ return count;
++}
++
++static ssize_t instantiate_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char opts_str[101];
++ unsigned int opts_int;
++ int req_sec_strength = 256;
++ int pred_resist = 1;
++ bool ps_exists = 0;
++ u32 ps[12];
++ unsigned int ps_length;
++ int i;
++ int ret;
++
++ /* First 3 digits:
++ * they have to be 0 or 1
++ * 2-1-0 --> 2: predictoin resistance, 1: security strength, 0: personilizatoin string existence
++ */
++ opts_str[3] = 0;
++ memcpy(opts_str, buf, 3);
++ ret = kstrtouint(opts_str, 2, &opts_int);
++ if (ret)
++ return ret;
++
++ if (((opts_str[0] != '0') && (opts_str[0] != '1')) ||
++ ((opts_str[1] != '0') && (opts_str[1] != '1')) ||
++ ((opts_str[2] != '0') && (opts_str[2] != '1'))) {
++ SYNHW_PRINT("Invalid input options: First 3 digits can only be 1 or 0\n");
++ return -1;
++ }
++
++ if (opts_int & 1)
++ ps_exists = 1;
++ else
++ ps_exists = 0;
++
++ if (opts_int & 2)
++ req_sec_strength = 256;
++ else
++ req_sec_strength = 128;
++
++ if (opts_int & 4)
++ pred_resist = 1;
++ else
++ pred_resist = 0;
++
++ /* check input option length */
++ if (!ps_exists) {
++ if (count != 3) {
++ SYNHW_PRINT("Invalid input options: If personilization string does not exist, options has to be 3 char.\n");
++ return -1;
++ }
++ } else {
++ if (req_sec_strength == 128) {
++ if (count != 64 + 4) { // +4 for options and "-"
++ SYNHW_PRINT("Invalid input options: If personilization string exists and security strength is 128-bit, options has to be 68 char (not %zu char).\n",
++ count);
++ return -1;
++ }
++ } else if (req_sec_strength == 256) {
++ if (count !=
++ 96 + 4) { // +4 for options and "-", +1 because of the termination char that count includes
++ SYNHW_PRINT("Invalid input options: If personilization string exists and security strength is 256-bit, options has to be 100 char (not %zu char).\n",
++ count);
++ return -1;
++ }
++ } else {
++ SYNHW_PRINT("Invalid input options\n");
++ return -1;
++ }
++ }
++
++ /* Personilization string */
++ for (i = 0; i < 12; i++)
++ ps[i] = 0;
++
++ if (req_sec_strength == 128)
++ ps_length = 64;
++ else if (req_sec_strength == 256)
++ ps_length = 96;
++ else
++ SYNHW_PRINT("Invalid security strength\n");
++
++ if (ps_exists) {
++ opts_str[1] = 0;
++ memcpy(opts_str, buf + 3, 1);
++
++ if (opts_str[0] == '-') {
++ opts_str[8] = 0;
++ for (i = 0; i < ps_length / 8; i++) {
++ memcpy(opts_str, buf + 4 + i * 8, 8);
++ ret = kstrtouint(opts_str, 16,
++ ps + (ps_length / 8 - 1) - i);
++ if (ret)
++ return ret;
++ }
++ } else {
++ SYNHW_PRINT("4th character of input has to be \"-\" when personilization string exists\n");
++ }
++
++ ret = nisttrng_instantiate(&priv->nisttrng,
++ req_sec_strength, pred_resist,
++ ps);
++ if (ret)
++ return -1;
++
++ } else {
++ ret = nisttrng_instantiate(&priv->nisttrng,
++ req_sec_strength, pred_resist,
++ NULL);
++ if (ret)
++ return -1;
++ }
++
++ return count;
++}
++
++static ssize_t uninstantiate_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++
++ nisttrng_uninstantiate(&priv->nisttrng);
++
++ return count;
++}
++
++static ssize_t reseed_store(struct device *dev, struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char opts_str[100];
++ unsigned int opts_int;
++ int pred_resist = 1;
++ bool addin_exists = 0;
++ u32 addin[12];
++ unsigned int addin_length;
++ int i;
++ int ret;
++
++ /* First 2 digits:
++ * they have to be 0 or 1
++ * 1-0 --> 1: predictoin resistance, 0: additional input string existence
++ */
++ opts_str[2] = 0;
++ memcpy(opts_str, buf, 2);
++ ret = kstrtouint(opts_str, 2, &opts_int);
++ if (ret)
++ return ret;
++
++ if (((opts_str[0] != '0') && (opts_str[0] != '1')) ||
++ ((opts_str[1] != '0') && (opts_str[1] != '1'))) {
++ SYNHW_PRINT("Invalid input options: First 2 digits can only be 1 or 0\n");
++ return -1;
++ }
++
++ if (opts_int & 1)
++ addin_exists = 1;
++ else
++ addin_exists = 0;
++
++ if (opts_int & 2)
++ pred_resist = 1;
++ else
++ pred_resist = 0;
++
++ /* check input option length */
++ if (!addin_exists) {
++ if (count != 2) {
++ SYNHW_PRINT("Invalid input options: If additional input does not exist, options has to be 2 char.\n");
++ return -1;
++ }
++ } else {
++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128) {
++ if (count != 64 + 3) { // +3 for options and "-"
++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 128-bit, options has to be 67 char.\n");
++ return -1;
++ }
++ } else if (priv->nisttrng.status.sec_strength ==
++ SEC_STRNT_AES256) {
++ if (count != 96 + 3) { // +3 for options and "-"
++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 256-bit, options has to be 99 char.\n");
++ return -1;
++ }
++ } else {
++ SYNHW_PRINT("Invalid input options\n");
++ return -1;
++ }
++ }
++
++ /* Additional input */
++ for (i = 0; i < 12; i++)
++ addin[i] = 0;
++
++ if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES128)
++ addin_length = 64;
++ else if (priv->nisttrng.status.sec_strength == SEC_STRNT_AES256)
++ addin_length = 96;
++ else
++ SYNHW_PRINT("Invalid security strength\n");
++
++ if (addin_exists) {
++ opts_str[1] = 0;
++ memcpy(opts_str, buf + 2, 1);
++
++ if (opts_str[0] == '-') {
++ opts_str[8] = 0;
++ for (i = 0; i < addin_length / 8; i++) {
++ memcpy(opts_str, buf + 3 + i * 8, 8);
++ ret = kstrtouint(opts_str, 16, addin + (addin_length / 8 - 1) - i);
++ if (ret)
++ return ret;
++ }
++ } else {
++ SYNHW_PRINT("3rd character of input has to be \"-\" when additional input exists\n");
++ }
++
++ ret = nisttrng_reseed(&priv->nisttrng, pred_resist,
++ addin);
++ if (ret)
++ return -1;
++
++ } else {
++ ret = nisttrng_reseed(&priv->nisttrng, pred_resist,
++ NULL);
++ if (ret)
++ return -1;
++ }
++
++ return count;
++}
++
++static ssize_t generate_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char opts_str[101];
++ unsigned int opts_int;
++ int req_sec_strength = 128;
++ int pred_resist = 1;
++ bool addin_exists = 0;
++ unsigned char out[num_gen_bytes];
++ u32 addin[12];
++ unsigned int addin_length;
++ int i;
++ int ret;
++
++ /* First 3 digits:
++ * they have to be 0 or 1
++ * 2-1-0 --> 2: predictoin resistance, 1: security strength, 0: additional input string existence
++ */
++ opts_str[3] = 0;
++ memcpy(opts_str, buf, 3);
++ ret = kstrtouint(opts_str, 2, &opts_int);
++ if (ret)
++ return ret;
++
++ if (((opts_str[0] != '0') && (opts_str[0] != '1')) ||
++ ((opts_str[1] != '0') && (opts_str[1] != '1')) ||
++ ((opts_str[2] != '0') && (opts_str[2] != '1'))) {
++ SYNHW_PRINT("Invalid input options: First 3 digits can only be 1 or 0\n");
++ return -1;
++ }
++
++ if (opts_int & 1)
++ addin_exists = 1;
++ else
++ addin_exists = 0;
++
++ if (opts_int & 2)
++ req_sec_strength = 256;
++ else
++ req_sec_strength = 128;
++
++ if (opts_int & 4)
++ pred_resist = 1;
++ else
++ pred_resist = 0;
++
++ /* check input option length */
++ if (!addin_exists) {
++ if (count != 3) {
++ SYNHW_PRINT("Invalid input options: If additional input does not exist, options has to be 3 char.\n");
++ return -1;
++ }
++ } else {
++ if (req_sec_strength == 128) {
++ if (count != 64 + 4) { // +4 for options and "-"
++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 128-bit, options has to be 68 char.\n");
++ return -1;
++ }
++ } else if (req_sec_strength == 256) {
++ if (count != 96 + 4) { // +4 for options and "-"
++ SYNHW_PRINT("Invalid input options: If additional input exists and security strength is 256-bit, options has to be 100 char.\n");
++ return -1;
++ }
++ } else {
++ SYNHW_PRINT("Invalid input options\n");
++ return -1;
++ }
++ }
++
++ /* Additional input */
++ for (i = 0; i < 12; i++)
++ addin[i] = 0;
++
++ if (req_sec_strength == 128)
++ addin_length = 64;
++ else if (req_sec_strength == 256)
++ addin_length = 96;
++ else
++ SYNHW_PRINT("Invalid security strength\n");
++
++ if (addin_exists) {
++ opts_str[1] = 0;
++ memcpy(opts_str, buf + 3, 1);
++
++ if (opts_str[0] == '-') {
++ opts_str[8] = 0;
++ for (i = 0; i < addin_length / 8; i++) {
++ memcpy(opts_str, buf + 4 + i * 8, 8);
++ ret = kstrtouint(opts_str, 16, addin + (addin_length / 8 - 1) - i);
++ if (ret)
++ return ret;
++ }
++ } else {
++ SYNHW_PRINT("4th character of input has to be \"-\" when additional input exists\n");
++ }
++
++ ret = nisttrng_generate(&priv->nisttrng, (u32 *)out,
++ num_gen_bytes, req_sec_strength,
++ pred_resist, addin);
++ if (ret)
++ return -1;
++
++ } else {
++ ret = nisttrng_generate(&priv->nisttrng, (u32 *)out,
++ num_gen_bytes, req_sec_strength,
++ pred_resist, NULL);
++ if (ret)
++ return -1;
++ }
++
++ /* store the result */
++ memcpy(priv->rand_out, out, sizeof(out));
++
++ return count;
++}
++
++static ssize_t generate_pub_vtrng_store(struct device *dev,
++ struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ char opts_str[2];
++ unsigned int opts_int;
++ unsigned char out[num_gen_bytes];
++ int ret;
++
++ opts_str[1] = 0;
++ memcpy(opts_str, buf, 1);
++ ret = kstrtouint(opts_str, 16, &opts_int);
++ if (ret)
++ return ret;
++
++ SYNHW_PRINT("%s %d %d %d %d\n", __func__, opts_str[0],
++ priv->nisttrng.config.edu_build_cfg0.public_vtrng_channels,
++ opts_str[1], opts_int);
++
++ ret = nisttrng_generate_public_vtrng(&priv->nisttrng,
++ (u32 *)out,
++ num_gen_bytes, opts_int);
++ if (ret)
++ return -1;
++
++ memcpy(priv->rand_out, out, sizeof(out));
++
++ return count;
++}
++
++/* rand_out_show displays last generated random number (num_gen_bytes number of bytes), not just the last block. */
++static ssize_t rand_out_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ unsigned int i, j;
++ unsigned long rand;
++ bool all_zero = true;
++
++ /* If all bits of the rand_reg register are 0, display 0 */
++ for (i = 0; i < 4; i++) {
++ rand = pdu_io_read32(priv->nisttrng.base + NIST_TRNG_REG_RAND0 +
++ (3 - i));
++ if (rand != 0) {
++ all_zero = false;
++ break;
++ }
++ }
++
++ if (all_zero) {
++ sprintf(buf + 2 * i, "%02x", 0);
++ } else {
++ for (i = 0; i < (num_gen_bytes / 16); i++) {
++ for (j = 0; j < 16; j++) {
++ sprintf(buf + 2 * (i * 16 + j), "%02x",
++ priv->rand_out[(i + 1) * 16 - 1 - j]);
++ }
++ }
++ j = 0;
++ while (i * 16 + j < num_gen_bytes) {
++ sprintf(buf + 2 * (i * 16 + j), "%02x",
++ priv->rand_out[num_gen_bytes - 1 - j]);
++ j++;
++ }
++ }
++
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++/* rand_out_vtrng_show displays last generated random number (num_gen_bytes number of bytes), not just the last block. */
++static ssize_t rand_out_vtrng_show(struct device *dev,
++ struct device_attribute *devattr, char *buf)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ unsigned int i, j;
++
++ /* If all bits of the rand_reg register are 0, display 0 */
++
++ for (i = 0; i < (num_gen_bytes / 16); i++) {
++ for (j = 0; j < 16; j++) {
++ sprintf(buf + 2 * (i * 16 + j), "%02x",
++ priv->rand_out[(i + 1) * 16 - 1 - j]);
++ }
++ }
++
++ j = 0;
++ while (i * 16 + j < num_gen_bytes) {
++ sprintf(buf + 2 * (i * 16 + j), "%02x",
++ priv->rand_out[num_gen_bytes - 1 - j]);
++ j++;
++ }
++
++ strcat(buf, "\n");
++ return strlen(buf);
++}
++
++static ssize_t kat_store(struct device *dev, struct device_attribute *devattr,
++ const char *buf, size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ int ret;
++
++ if (sysfs_streq(buf, "full")) {
++ ret = nisttrng_full_kat(&priv->nisttrng);
++ if (ret)
++ return -1;
++
++ } else if (sysfs_streq(buf, "00")) {
++ ret = nisttrng_kat(&priv->nisttrng, 0, 0);
++ if (ret)
++ return -1;
++
++ } else if (sysfs_streq(buf, "01")) {
++ ret = nisttrng_kat(&priv->nisttrng, 0, 1);
++ if (ret)
++ return -1;
++
++ } else if (sysfs_streq(buf, "10")) {
++ ret = nisttrng_kat(&priv->nisttrng, 1, 0);
++ if (ret)
++ return -1;
++
++ } else if (sysfs_streq(buf, "11")) {
++ ret = nisttrng_kat(&priv->nisttrng, 1, 1);
++ if (ret)
++ return -1;
++
++ } else {
++ ret = nisttrng_full_kat(&priv->nisttrng);
++ if (ret)
++ return -1;
++ }
++
++ return count;
++}
++
++static void str_to_384_bit(char *buf, u32 *out)
++{
++ char foo[9];
++ int i;
++ int ret;
++
++ foo[8] = 0;
++ for (i = 0; i < 12; i++) {
++ memcpy(foo, buf + i * 8, 8);
++ ret = kstrtouint(foo, 16, out + 11 - i);
++ }
++}
++
++/* This attribute is only for test purpuses */
++static ssize_t test_attr_store(struct device *dev,
++ struct device_attribute *devattr, const char *buf,
++ size_t count)
++{
++ struct synopsys_nisttrng_driver *priv = dev_get_drvdata(dev);
++ int i;
++ int err;
++ u32 addin[12];
++ u32 ps[12];
++ char *out;
++
++ char buf_seed1[96] =
++ "c54805274bde00aa5289e0513579019707666d2fa7a1c8908865891c87c0c652335a4d3cc415bc30742b164647f8820f";
++ char buf_ps1[96] =
++ "d63fb5afa2101fa4b8a6c3b89d9c250ac728fc1ddad0e7585b5d54728ed20c2f940e89155596e3b963635b6d6088164b";
++ char buf_addin1[96] =
++ "744bfae3c23a5cc9a3b373b6c50795068d35eb8a339746ac810d16f864e880061082edf9d2687c211960aa83400f85f9";
++ char buf_seed2[96] =
++ "b2ad31d1f20dcf30dd526ec9156c07f270216bdb59197325bab180675929888ab699c54fb21819b7d921d6346bff2f7f";
++ char buf_addin2[96] =
++ "ad55c682962aa4fe9ebc227c9402e79b0aa7874844d33eaee7e2d15baf81d9d33936e4d93f28ad109657b512aee115a5";
++ char buf_seed3[96] =
++ "eca449048d26fd38f8ca435237dce66eadec7069ee5dd0b70084b819a711c0820a7556bbd0ae20f06e5169278b593b71";
++ u32 tmp[12];
++
++ for (i = 0; i < 12; i++)
++ addin[i] = i;
++
++ for (i = 0; i < 12; i++)
++ ps[i] = i + 100;
++
++ /* SDK doc example - Prediction Resistance not available, no Reseed */
++ err = nisttrng_uninstantiate(&priv->nisttrng);
++ if (err && err != CRYPTO_NOT_INSTANTIATED)
++ return -1;
++
++ if (nisttrng_instantiate(&priv->nisttrng, 128, 0, ps) < 0)
++ return -1;
++
++ out = kmalloc(10, GFP_KERNEL);
++ if (nisttrng_generate(&priv->nisttrng, out, 10, 128, 0, addin) < 0)
++ return -1;
++
++ DEBUG("----- Generate 10 bytes\n");
++ for (i = 0; i < 10; i++)
++ DEBUG("%02x", out[i]);
++
++ DEBUG("\n");
++ kfree(out);
++
++ out = kmalloc(512, GFP_KERNEL);
++ if (nisttrng_generate(&priv->nisttrng, out, 512, 128, 0, addin) < 0)
++ return -1;
++
++ DEBUG("----- Generate 512 bytes\n");
++ for (i = 0; i < 512; i++)
++ DEBUG("%02x", out[i]);
++
++ DEBUG("\n");
++ kfree(out);
++
++ out = kmalloc(41, GFP_KERNEL);
++ if (nisttrng_generate(&priv->nisttrng, out, 41, 128, 0, addin) < 0)
++ return -1;
++
++ DEBUG("----- Generate 41 bytes\n");
++ for (i = 0; i < 41; i++)
++ DEBUG("%02x", out[i]);
++
++ DEBUG("\n");
++ kfree(out);
++
++ err = nisttrng_uninstantiate(&priv->nisttrng);
++ if (err < 0 && err != CRYPTO_NOT_INSTANTIATED)
++ return -1;
++
++ /* SDK doc example - DRBG Validation */
++ err = nisttrng_uninstantiate(&priv->nisttrng);
++ if (err && err != CRYPTO_NOT_INSTANTIATED)
++ return -1;
++
++ if (nisttrng_set_nonce_mode(&priv->nisttrng, 1) < 0)
++ return -1;
++
++ out = kmalloc(64, GFP_KERNEL);
++ str_to_384_bit(buf_seed1, tmp);
++ if (nisttrng_get_entropy_input(&priv->nisttrng, tmp, 0) < 0)
++ return -1;
++
++ str_to_384_bit(buf_ps1, tmp);
++ if (nisttrng_instantiate(&priv->nisttrng, 256, 1, tmp) < 0)
++ return -1;
++
++ str_to_384_bit(buf_seed2, tmp);
++ if (nisttrng_get_entropy_input(&priv->nisttrng, tmp, 0) < 0)
++ return -1;
++
++ str_to_384_bit(buf_addin1, tmp);
++ if (nisttrng_generate(&priv->nisttrng, out, 64, 256, 1, tmp) < 0)
++ return -1;
++
++ str_to_384_bit(buf_seed3, tmp);
++ if (nisttrng_get_entropy_input(&priv->nisttrng, tmp, 0) < 0)
++ return -1;
++
++ str_to_384_bit(buf_addin2, tmp);
++ if (nisttrng_generate(&priv->nisttrng, out, 64, 256, 1, tmp) < 0)
++ return -1;
++
++ memcpy(priv->rand_out, out, 64);
++
++ return count;
++}
++
++static DEVICE_ATTR_RO(ckr);
++static DEVICE_ATTR_RO(features);
++static DEVICE_ATTR_RW(secure);
++static DEVICE_ATTR_RW(nonce);
++static DEVICE_ATTR_RW(sec_strength);
++
++static DEVICE_ATTR_RW(mode_reg);
++static DEVICE_ATTR_RW(smode_reg);
++static DEVICE_ATTR_RW(alarm_reg);
++static DEVICE_ATTR_RO(rand_reg);
++static DEVICE_ATTR_RO(rand_out);
++static DEVICE_ATTR_RO(rand_out_vtrng);
++static DEVICE_ATTR_RW(seed_reg);
++static DEVICE_ATTR_RW(npa_data_reg);
++static DEVICE_ATTR_RW(ctrl_reg);
++static DEVICE_ATTR_RW(istat_reg);
++static DEVICE_ATTR_RO(stat_reg);
++static DEVICE_ATTR_RW(rnc_reg);
++static DEVICE_ATTR_RW(rbc_reg);
++
++static DEVICE_ATTR_RW(ia_wdata_reg);
++static DEVICE_ATTR_RO(ia_rdata_reg);
++static DEVICE_ATTR_RW(ia_addr_reg);
++static DEVICE_ATTR_RW(ia_cmd_reg);
++static DEVICE_ATTR_RO(hw_state);
++
++static DEVICE_ATTR_RO(collect_ent);
++static DEVICE_ATTR_RO(collect_ent_nsout);
++static DEVICE_ATTR_WO(nonce_seed_with_df);
++static DEVICE_ATTR_WO(nonce_seed_direct);
++static DEVICE_ATTR_WO(instantiate);
++static DEVICE_ATTR_WO(uninstantiate);
++static DEVICE_ATTR_WO(reseed);
++static DEVICE_ATTR_WO(generate);
++static DEVICE_ATTR_WO(generate_pub_vtrng);
++static DEVICE_ATTR_WO(kat);
++
++static DEVICE_ATTR_RW(max_bits_per_req);
++static DEVICE_ATTR_RW(max_req_per_seed);
++
++static DEVICE_ATTR_WO(test_attr);
++
++static const struct attribute_group nisttrng_attr_group = {
++ .attrs =
++ (struct attribute *[]){
++ &dev_attr_ckr.attr,
++ //&dev_attr_stepping.attr,
++ &dev_attr_features.attr, &dev_attr_secure.attr,
++ &dev_attr_nonce.attr, &dev_attr_sec_strength.attr,
++
++ &dev_attr_mode_reg.attr, &dev_attr_smode_reg.attr,
++ &dev_attr_alarm_reg.attr, &dev_attr_rand_reg.attr,
++ &dev_attr_rand_out.attr, &dev_attr_rand_out_vtrng.attr,
++ &dev_attr_seed_reg.attr, &dev_attr_npa_data_reg.attr,
++ &dev_attr_ctrl_reg.attr, &dev_attr_istat_reg.attr,
++ &dev_attr_stat_reg.attr, &dev_attr_rnc_reg.attr,
++ &dev_attr_rbc_reg.attr,
++
++ &dev_attr_ia_wdata_reg.attr,
++ &dev_attr_ia_rdata_reg.attr, &dev_attr_ia_addr_reg.attr,
++ &dev_attr_ia_cmd_reg.attr, &dev_attr_hw_state.attr,
++
++ &dev_attr_collect_ent.attr,
++ &dev_attr_collect_ent_nsout.attr,
++ &dev_attr_nonce_seed_with_df.attr,
++ &dev_attr_nonce_seed_direct.attr,
++ &dev_attr_instantiate.attr,
++ &dev_attr_uninstantiate.attr, &dev_attr_reseed.attr,
++ &dev_attr_generate.attr,
++ &dev_attr_generate_pub_vtrng.attr, &dev_attr_kat.attr,
++
++ &dev_attr_max_bits_per_req.attr,
++ &dev_attr_max_req_per_seed.attr,
++
++ &dev_attr_test_attr.attr, NULL },
++};
++
++static int nisttrng_self_test(struct nist_trng_state *nist_trng)
++{
++ u32 seed[16], out[4], x, y;
++
++ static const u32 exp128[10][4] = {
++ { 0x5db79bb2, 0xc3a0df1e, 0x099482b6,
++ 0xc319981e }, // The 1st generated output
++ { 0xb344d301, 0xdbd97ca0, 0x6e66e668,
++ 0x0bcd4625 }, // The 2nd generate output
++ { 0xec553f18, 0xa0e5c3cb, 0x752c03c2,
++ 0x5e7b04f7 }, // The 3rd generate output
++ { 0xcfe23e6e, 0x5302edc2, 0xdbf7b05b,
++ 0x2c817c0f }, // The 4th generate output
++ { 0xbd5a8726, 0x028c43d0, 0xb77ac4e3,
++ 0x0844ba2c }, // The 5th generate output
++ { 0xa63b4c0e, 0x8d11d0ba, 0x08b5a10f,
++ 0xab731aff }, // The 6th generate output
++ { 0xb7b56a2f, 0x1d84d1f0, 0xe48d1a0a,
++ 0x43a010a6 }, // The 7th generate output
++ { 0xcf66439d, 0xc937451d, 0x75c34d20,
++ 0x21a21398 }, // The 8th generate output
++ { 0xcb6f0a57, 0x5ff34705, 0x08838e49,
++ 0x21137614 }, // The 9th generate output
++ { 0x61c48b24, 0x25c18d29, 0xc6005e4e,
++ 0xae3b0389 }, // The 10th generate output
++ };
++
++ static const u32 exp256[10][4] = {
++ { 0x1f1a1441, 0xa0865ece, 0x9ff8d5b9,
++ 0x3f78ace6 }, // The 1st generated output
++ { 0xf8190a86, 0x6d6ded2a, 0xc4d0e9bf,
++ 0x24dab55c }, // The 2nd generate output
++ { 0xd3948b74, 0x3dfea516, 0x9c3b86a2,
++ 0xeb184b41 }, // The 3rd generate output
++ { 0x2eb82ab6, 0x2aceefda, 0xc0cf6a5f,
++ 0xa45cb333 }, // The 4th generate output
++ { 0xa49b1c7b, 0x5b51bac7, 0x7586770b,
++ 0x8cb2c392 }, // The 5th generate output
++ { 0x3f3ba09d, 0xa2c9ad29, 0x9687fb8f,
++ 0xa5ae3fd5 }, // The 6th generate output
++ { 0x11dd1076, 0xe37e86cb, 0xced0220a,
++ 0x00448c4f }, // The 7th generate output
++ { 0x955a5e52, 0x84ee38b1, 0xb3271e5f,
++ 0x097751e3 }, // The 8th generate output
++ { 0x5cd73ba8, 0xd8a36a1e, 0xa8a2d7c3,
++ 0xa96de048 }, // The 9th generate output
++ { 0xfb374c63, 0x827b85fa, 0x244e0c7a,
++ 0xa09afd39 }, // The 10th generate output
++ };
++
++ int ret, enable, rate, urun;
++ u32 tmp;
++
++ for (x = 0; x < 16; x++)
++ seed[x] = 0x12345679 * (x + 1);
++
++ DEBUG("Doing a self-test with security strength of 128\n");
++ ret = nisttrng_uninstantiate(nist_trng);
++ if (ret && ret != CRYPTO_NOT_INSTANTIATED)
++ goto ERR;
++
++ //if ((ret = nisttrng_set_secure_mode(nist_trng, 0))) { goto ERR; }
++ ret = nisttrng_set_nonce_mode(nist_trng, 1);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_set_sec_strength(nist_trng, 128);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_get_entropy_input(nist_trng, seed, 0);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_instantiate(nist_trng, 128, 0, NULL);
++ if (ret)
++ goto ERR;
++
++ if (nist_trng->config.build_cfg0.edu_present) {
++ ret = nisttrng_wait_fifo_full(nist_trng);
++ if (ret)
++ goto ERR;
++ }
++
++ ret = nisttrng_generate(nist_trng, out, 16, 128, 0, NULL);
++ if (ret)
++ goto ERR;
++
++ if (nist_trng->config.features.extra_ps_present) {
++ DEBUG("skip KAT with extra_ps_present\n");
++ } else {
++ DEBUG("nist_trng: AES-128 Self-test output: ");
++ for (x = 0; x < 4; x++)
++ DEBUG("0x%08lx ", (unsigned long)out[x]);
++
++ if (nist_trng->config.build_cfg0.edu_present) {
++ if (nist_trng->config.edu_build_cfg0
++ .esm_channel) { //if esm_channel is available the first random number goes to esm
++ for (x = 0; x < 4; x++) {
++ if (out[x] != exp128[1][x])
++ ret = 1;
++ }
++ }
++ } else {
++ for (x = 0; x < 4; x++) {
++ if (out[x] != exp128[0][x])
++ ret = 1;
++ }
++ }
++
++ if (ret) {
++ SYNHW_PRINT("... FAILED comparison\n");
++ ret = -1;
++ goto ERR;
++ } else {
++ DEBUG("... PASSED\n");
++ }
++ }
++
++ // if edu is available check all the pvtrng's
++ if (nist_trng->config.build_cfg0.edu_present) {
++ for (x = 0;
++ x < nist_trng->config.edu_build_cfg0.public_vtrng_channels;
++ x++) {
++ DEBUG("vtrng %d\n", x);
++ ret = nisttrng_generate_public_vtrng(nist_trng, out, 16, x);
++ if (ret)
++ goto ERR;
++
++ for (y = 0; y < 4; y++) {
++ DEBUG("0x%08lx ", (unsigned long)out[y]);
++ if (out[y] != exp128[x + 2][y])
++ ret = 1;
++ }
++ if (ret) {
++ SYNHW_PRINT("... FAILED comparison\n");
++ ret = -1;
++ goto ERR;
++ } else {
++ DEBUG("... PASSED\n");
++ }
++ }
++ }
++ // if edu is available empty the fifo before creating the new instance with strength of 256
++ if (nist_trng->config.build_cfg0.edu_present) {
++ nisttrng_rnc(nist_trng,
++ NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE);
++ tmp = NIST_TRNG_REG_ISTAT_DONE;
++ //always clear the busy bit after disabling RNC
++ pdu_io_write32(nist_trng->base + NIST_TRNG_REG_ISTAT, tmp);
++ tmp = pdu_io_read32(nist_trng->base + NIST_TRNG_REG_ISTAT);
++ do {
++ ret = nisttrng_generate_public_vtrng(nist_trng, out, 16, 0);
++ if (ret)
++ goto ERR;
++
++ tmp = pdu_io_read32(nist_trng->base +
++ NIST_TRNG_EDU_STAT);
++
++ } while (!NIST_TRNG_EDU_STAT_FIFO_EMPTY(tmp));
++ }
++
++ if (nist_trng->config.features.drbg_arch == AES256) {
++ // test AES-256 mode
++ DEBUG("Doing a self-test with security strength of 256\n");
++ ret = nisttrng_uninstantiate(nist_trng);
++ if (ret && ret != CRYPTO_NOT_INSTANTIATED)
++ goto ERR;
++
++ ret = nisttrng_set_nonce_mode(nist_trng, 1);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_set_sec_strength(nist_trng, 256);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_get_entropy_input(nist_trng, seed, 0);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_instantiate(nist_trng, 256, 0, NULL);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_generate(nist_trng, out, 16, 256, 0, NULL);
++ if (ret)
++ goto ERR;
++
++ if (nist_trng->config.features.extra_ps_present) {
++ DEBUG("skip KAT with extra_ps_present\n");
++ } else {
++ DEBUG("nist_trng: AES-256 Self-test output: ");
++ for (x = 0; x < 4; x++)
++ DEBUG("0x%08lx ", (unsigned long)out[x]);
++
++ for (x = 0; x < 4; x++) {
++ if (out[x] != exp256[0][x])
++ ret = 1;
++ }
++ if (ret) {
++ SYNHW_PRINT("... FAILED comparison\n");
++ ret = -1;
++ goto ERR;
++ } else {
++ DEBUG("... PASSED\n");
++ }
++ }
++ }
++
++ // if edu is available check all the pvtrng's
++ if (nist_trng->config.build_cfg0.edu_present) {
++ for (x = 0;
++ x < nist_trng->config.edu_build_cfg0.public_vtrng_channels;
++ x++) {
++ DEBUG("vtrng 256 %d\n", x);
++ ret = nisttrng_generate_public_vtrng(nist_trng, out, 16, x);
++ if (ret)
++ goto ERR;
++
++ for (y = 0; y < 4; y++) {
++ DEBUG("0x%08lx ", (unsigned long)out[y]);
++ if (out[y] != exp256[x + 1][y])
++ ret = 1;
++ }
++ if (ret) {
++ SYNHW_PRINT("... FAILED comparison\n");
++ ret = -1;
++ goto ERR;
++ } else {
++ DEBUG("... PASSED\n");
++ }
++ }
++
++ //Test RBC channels
++ // enable RBC channels with rate of 2 and urun 1
++ enable = 1;
++ rate = 2;
++ urun = 1;
++ for (x = 0; x < nist_trng->config.edu_build_cfg0.rbc_channels;
++ x++) {
++ ret = nisttrng_rbc(nist_trng, enable, x, rate, urun);
++ if (ret)
++ goto ERR;
++
++ tmp = pdu_io_read32(nist_trng->base +
++ NIST_TRNG_EDU_RBC_CTRL);
++
++ switch (x) {
++ case 0:
++ if (rate != NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH0_RATE) ||
++ urun != NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH0_URUN_BLANK)) {
++ goto ERR;
++ }
++ break;
++ case 1:
++ if (rate != NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH1_RATE) ||
++ urun != NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH1_URUN_BLANK)) {
++ goto ERR;
++ }
++ break;
++ case 2:
++ if (rate != NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH2_RATE) ||
++ urun != NISTTRNG_EDU_RBC_CTRL_GET_CH_URUN_BLANK(tmp, _NIST_TRNG_EDU_RBC_CTRL_CH2_URUN_BLANK)) {
++ goto ERR;
++ }
++ break;
++ default:
++ DEBUG("Incorrect rbc_num = %d\n", x);
++ goto ERR;
++ }
++ }
++ DEBUG("RBC test passed\n");
++ }
++
++ //IF RNCis not disable, disable it
++ if (pdu_io_read32(nist_trng->base + NIST_TRNG_EDU_RNC_CTRL) !=
++ NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE) {
++ nisttrng_rnc(nist_trng,
++ NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE);
++ tmp = NIST_TRNG_REG_ISTAT_DONE;
++ //always clear the busy bit after disabling RNC
++ pdu_io_write32(nist_trng->base + NIST_TRNG_REG_ISTAT, tmp);
++ }
++
++ /* back to the noise mode */
++ ret = nisttrng_set_nonce_mode(nist_trng, 0);
++ if (ret)
++ goto ERR;
++
++ ret = nisttrng_zeroize(nist_trng);
++ if (ret)
++ goto ERR;
++ERR:
++ return ret;
++}
++
++static int nisttrng_driver_probe(struct platform_device *pdev)
++{
++ struct synopsys_nisttrng_driver *data;
++ struct hwrng *hwrng_driver_info = 0;
++ struct resource *cfg, *irq;
++ u32 *base_addr;
++ int ret;
++
++ // version
++ SYNHW_PRINT("DWC_TRNG_DriverSDK_%s\n", TRNG_VERSION);
++
++ cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
++
++ if (!cfg || !irq) {
++ SYNHW_PRINT("no memory or IRQ resource\n");
++ return -ENOMEM;
++ }
++
++ DEBUG("=================================================================\n");
++ DEBUG("nisttrng_probe: Device at %08lx(%08lx) of size %lu bytes\n",
++ (unsigned long)cfg->start, (unsigned long)cfg->end,
++ (unsigned long)resource_size(cfg));
++
++ data = devm_kzalloc(&pdev->dev, sizeof(struct synopsys_nisttrng_driver),
++ GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ platform_set_drvdata(pdev, data);
++
++ base_addr = pdu_linux_map_regs(&pdev->dev, cfg);
++ if (IS_ERR(base_addr)) {
++ dev_err(&pdev->dev, "unable to remap io mem\n");
++ return PTR_ERR(base_addr);
++ }
++
++ ret = nisttrng_init(&data->nisttrng, (u32 *)base_addr);
++ if (ret) {
++ SYNHW_PRINT("NIST_TRNG init failed (%d)\n", ret);
++ devm_kfree(&pdev->dev, data);
++ return ret;
++ }
++
++ /* if max_reads is not 0, change the max_req_per_seed according to max_reads */
++ if (max_reads) {
++ ret = nisttrng_set_reminder_max_req_per_seed(&data->nisttrng, max_reads);
++ if (ret) {
++ SYNHW_PRINT("NIST_TRNG maximum request-per-seed setup failed (%d)\n",
++ ret);
++ devm_kfree(&pdev->dev, data);
++ return ret;
++ }
++ }
++
++ // issue quick self test
++ ret = nisttrng_self_test(&data->nisttrng);
++ if (ret) {
++ devm_kfree(&pdev->dev, data);
++ return -ENOMEM;
++ }
++
++ // ready the device for use
++ ret = nisttrng_instantiate(&data->nisttrng,
++ data->nisttrng.config.features.drbg_arch ? 256 : 128, 1, NULL);
++ if (ret) {
++ SYNHW_PRINT("NIST_TRNG instantiate failed (%d)\n", ret);
++ devm_kfree(&pdev->dev, data);
++ return -ENOMEM;
++ }
++
++ // at this point the device should be ready for a call to gen_random
++ hwrng_driver_info =
++ devm_kzalloc(&pdev->dev, sizeof(struct hwrng), GFP_KERNEL);
++ if (!hwrng_driver_info) {
++ devm_kfree(&pdev->dev, data);
++ return -ENOMEM;
++ }
++
++ hwrng_driver_info->name = devm_kzalloc(&pdev->dev,
++ sizeof(SYNOPSYS_HWRNG_DRIVER_NAME) + 1, GFP_KERNEL);
++ if (!hwrng_driver_info->name) {
++ devm_kfree(&pdev->dev, data);
++ devm_kfree(&pdev->dev, hwrng_driver_info);
++ return -ENOMEM;
++ }
++
++ memset((void *)hwrng_driver_info->name, 0,
++ sizeof(SYNOPSYS_HWRNG_DRIVER_NAME) + 1);
++ strscpy((char *)hwrng_driver_info->name, SYNOPSYS_HWRNG_DRIVER_NAME,
++ sizeof(SYNOPSYS_HWRNG_DRIVER_NAME));
++
++ hwrng_driver_info->read = &nisttrng_hwrng_driver_read;
++ hwrng_driver_info->data_present = 0;
++ hwrng_driver_info->priv = (unsigned long)pdev;
++ hwrng_driver_info->quality = 1024;
++
++ data->hwrng_drv = hwrng_driver_info;
++ ret = hwrng_register(hwrng_driver_info);
++
++ if (ret) {
++ SYNHW_PRINT("unable to load HWRNG driver (error %d)\n", ret);
++ devm_kfree(&pdev->dev, (void *)hwrng_driver_info->name);
++ devm_kfree(&pdev->dev, hwrng_driver_info);
++ devm_kfree(&pdev->dev, data);
++ return ret;
++ }
++
++ ret = sysfs_create_group(&pdev->dev.kobj, &nisttrng_attr_group);
++ if (ret < 0) {
++ SYNHW_PRINT("unable to initialize sysfs group (error %d)\n",
++ ret);
++ hwrng_unregister(hwrng_driver_info);
++ devm_kfree(&pdev->dev, (void *)hwrng_driver_info->name);
++ devm_kfree(&pdev->dev, hwrng_driver_info);
++ devm_kfree(&pdev->dev, data);
++ return ret;
++ }
++ SYNHW_PRINT("SYN NIST_TRNG registering HW_RANDOM\n");
++ return 0;
++}
++
++static int nisttrng_driver_remove(struct platform_device *pdev)
++{
++ struct synopsys_nisttrng_driver *data = platform_get_drvdata(pdev);
++ struct hwrng *hwrng_driver_info = (struct hwrng *)data->hwrng_drv;
++
++ SYNHW_PRINT("SYN NIST_TRNG unregistering from HW_RANDOM\n");
++ hwrng_unregister(hwrng_driver_info);
++ sysfs_remove_group(&pdev->dev.kobj, &nisttrng_attr_group);
++ devm_kfree(&pdev->dev, (void *)hwrng_driver_info->name);
++ devm_kfree(&pdev->dev, hwrng_driver_info);
++ devm_kfree(&pdev->dev, data);
++ return 0;
++}
++
++static struct platform_driver s_nisttrng_platform_driver_info = {
++ .probe = nisttrng_driver_probe,
++ .remove = nisttrng_driver_remove,
++ .driver = {
++ .name = "nist_trng",
++ .owner = THIS_MODULE,
++ },
++};
++
++static int __init nisttrng_platform_driver_start(void)
++{
++ return platform_driver_register(&s_nisttrng_platform_driver_info);
++}
++
++static void __exit nisttrng_platform_driver_end(void)
++{
++ platform_driver_unregister(&s_nisttrng_platform_driver_info);
++}
++
++module_init(nisttrng_platform_driver_start);
++module_exit(nisttrng_platform_driver_end);
++
++module_param(max_reads, ulong, 0);
++MODULE_PARM_DESC(max_reads, "Max # of reads between reseeds (default is 128)");
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Synopsys, Inc.");
+diff --git a/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c
+new file mode 100644
+index 000000000..8698a474d
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng.c
+@@ -0,0 +1,950 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "nisttrng_hw.h"
++#include "nisttrng.h"
++
++/* Initialize the NIST_TRNG state structure */
++int nisttrng_init(struct nist_trng_state *state, u32 *base)
++{
++ int err;
++ u32 tmp;
++
++ DEBUG(">> %s: initialize the NIST_TRNG\n", __func__);
++
++ memset(state, 0, sizeof(*state));
++
++ state->base = base;
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* hardware features*/
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_FEATURES);
++
++ state->config.features.drbg_arch = NIST_TRNG_REG_FEATURES_AES_256(tmp);
++ state->config.features.extra_ps_present =
++ NIST_TRNG_REG_FEATURES_EXTRA_PS_PRESENT(tmp);
++ state->config.features.secure_rst_state =
++ NIST_TRNG_REG_FEATURES_SECURE_RST_STATE(tmp);
++ state->config.features.diag_level_basic_trng =
++ NIST_TRNG_REG_FEATURES_DIAG_LEVEL_BASIC_TRNG(tmp);
++ state->config.features.diag_level_stat_hlt =
++ NIST_TRNG_REG_FEATURES_DIAG_LEVEL_ST_HLT(tmp);
++ state->config.features.diag_level_ns =
++ NIST_TRNG_REG_FEATURES_DIAG_LEVEL_NS(tmp);
++
++ /* corekit */
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_COREKIT_REL);
++ state->config.corekit_rel.ext_enum = NIST_TRNG_REG_EXT_ENUM(tmp);
++ state->config.corekit_rel.ext_ver = NIST_TRNG_REG_EXT_VER(tmp);
++ state->config.corekit_rel.rel_num = NIST_TRNG_REG_REL_NUM(tmp);
++
++ /* clear registers */
++ pdu_io_write32(state->base + NIST_TRNG_REG_ALARM, 0xFFFFFFFF);
++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, 0xFFFFFFFF);
++
++ /* setup the NIST_TRNG in secure mode, self seeding mode, with prediction resistance, maximum possible security strength */
++ /* SMODE */
++ tmp = 0;
++ tmp = NIST_TRNG_REG_SMODE_SET_SECURE_EN(tmp, 1);
++ tmp = NIST_TRNG_REG_SMODE_SET_NONCE(tmp, 0);
++ tmp = NIST_TRNG_REG_SMODE_SET_MAX_REJECTS(tmp,
++ NIST_TRNG_DFLT_MAX_REJECTS);
++ pdu_io_write32(state->base + NIST_TRNG_REG_SMODE, tmp);
++ state->status.secure_mode = 1;
++ state->status.nonce_mode = 0;
++ /* MODE */
++ tmp = 0;
++ if (state->config.features.drbg_arch == AES256) {
++ tmp = NIST_TRNG_REG_MODE_SET_SEC_ALG(tmp, 1);
++ state->status.sec_strength = SEC_STRNT_AES256;
++
++ } else if (state->config.features.drbg_arch == AES128) {
++ tmp = NIST_TRNG_REG_MODE_SET_SEC_ALG(tmp, 0);
++ state->status.sec_strength = SEC_STRNT_AES128;
++
++ } else {
++ SYNHW_PRINT("Invalid DRBG architecture");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ tmp = NIST_TRNG_REG_MODE_SET_PRED_RESIST(tmp, 1);
++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, 0);
++ state->status.pred_resist = 1;
++ /* rest of the status */
++ state->status.alarm_code = 0;
++ state->status.pad_ps_addin = 0;
++
++ /* reminders - set the counters to the standard's maximum values. An API is be provided to change those on demand.*/
++ nisttrng_set_reminder_max_bits_per_req(state,
++ NIST_DFLT_MAX_BITS_PER_REQ);
++ nisttrng_set_reminder_max_req_per_seed(state,
++ NIST_DFLT_MAX_REQ_PER_SEED);
++
++ /* display features */
++ SYNHW_PRINT("NIST_TRNG: Hardware rel_num=0x%x, ext_ver=0x%x, ext_enum=0x%x\n",
++ state->config.corekit_rel.rel_num,
++ state->config.corekit_rel.ext_ver,
++ state->config.corekit_rel.ext_enum);
++ switch (state->config.features.drbg_arch) {
++ case AES128:
++ DEBUG("NIST_TRNG: DRBG Architecture=128-bit AES, Extra Personalization Existence=%u\n",
++ state->config.features.extra_ps_present);
++ break;
++ case AES256:
++ DEBUG("NIST_TRNG: DRBG Architecture=256-bit AES, Extra Personalization Existence=%u\n",
++ state->config.features.extra_ps_present);
++ break;
++ default:
++ SYNHW_PRINT("Invalid DRBG architecture");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ DEBUG("initialization is done, going for a zeroize\n");
++
++ // BUILD_CFG0
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_BUILD_CFG0);
++ state->config.build_cfg0.core_type = NIST_TRNG_REG_CFG0_CORE_TYPE(tmp);
++ state->config.build_cfg0.bg8 = NIST_TRNG_REG_CFG0_BG8(tmp);
++ state->config.build_cfg0.cdc_synch_depth =
++ NIST_TRNG_REG_CFG0_CDC_SYNCH_DEPTH(tmp);
++ state->config.build_cfg0.background_noise =
++ NIST_TRNG_REG_CFG0_BACGROUND_NOISE(tmp);
++ state->config.build_cfg0.edu_present =
++ NIST_TRNG_REG_CFG0_EDU_PRESENT(tmp);
++ state->config.build_cfg0.aes_datapath =
++ NIST_TRNG_REG_CFG0_AES_DATAPATH(tmp);
++ state->config.build_cfg0.aes_max_key_size =
++ NIST_TRNG_REG_CFG0_AES_MAX_KEY_SIZE(tmp);
++ state->config.build_cfg0.personilzation_str =
++ NIST_TRNG_REG_CFG0_PERSONILIZATION_STR(tmp);
++ DEBUG("NIST_TRNG: BUILD_CFG0 core_type=%u, bg8=%u, cdc_synch_depth=%u, background_noise=%u\n",
++ state->config.build_cfg0.core_type, state->config.build_cfg0.bg8,
++ state->config.build_cfg0.cdc_synch_depth,
++ state->config.build_cfg0.background_noise);
++ DEBUG("edu_present=%u, aes_datapath=%u, aes_max_key_size=%u, personilzation_str=%u\n",
++ state->config.build_cfg0.edu_present,
++ state->config.build_cfg0.aes_datapath,
++ state->config.build_cfg0.aes_max_key_size,
++ state->config.build_cfg0.personilzation_str);
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_BUILD_CFG1);
++ DEBUG("NIST_TRNG: NIST_TRNG_REG_BUILD_CFG1=0x%x\n", tmp);
++ state->config.build_cfg1.num_raw_noise_blks =
++ NIST_TRNG_REG_CFG1_NUM_RAW_NOISE_BLKS(tmp);
++ state->config.build_cfg1.sticky_startup =
++ NIST_TRNG_REG_CFG1_STICKY_STARTUP(tmp);
++ state->config.build_cfg1.auto_correlation_test =
++ NIST_TRNG_REG_CFG1_AUTO_CORRELATION_TEST(tmp);
++ state->config.build_cfg1.mono_bit_test =
++ NIST_TRNG_REG_CFG1_MONO_BIT_TEST(tmp);
++ state->config.build_cfg1.run_test = NIST_TRNG_REG_CFG1_RUN_TEST(tmp);
++ state->config.build_cfg1.poker_test =
++ NIST_TRNG_REG_CFG1_POKER_TEST(tmp);
++ state->config.build_cfg1.raw_ht_adap_test =
++ NIST_TRNG_REG_CFG1_RAW_HT_ADAP_TEST(tmp);
++ state->config.build_cfg1.raw_ht_rep_test =
++ NIST_TRNG_REG_CFG1_RAW_HT_REP_TEST(tmp);
++ state->config.build_cfg1.ent_src_rep_smpl_size =
++ NIST_TRNG_REG_CFG1_ENT_SRC_REP_SMPL_SIZE(tmp);
++ state->config.build_cfg1.ent_src_rep_test =
++ NIST_TRNG_REG_CFG1_ENT_SRC_REP_TEST(tmp);
++ state->config.build_cfg1.ent_src_rep_min_entropy =
++ NIST_TRNG_REG_CFG1_ENT_SRC_REP_MIN_ENTROPY(tmp);
++ DEBUG("NIST_TRNG: BUILD_CFG1 num_raw_noise_blks=%u, sticky_startup=%u, auto_correlation_test=%u\n",
++ state->config.build_cfg1.num_raw_noise_blks,
++ state->config.build_cfg1.sticky_startup,
++ state->config.build_cfg1.auto_correlation_test);
++ DEBUG("mono_bit_test=%u, run_test=%u, poker_test=%u, raw_ht_adap_test=%u\n",
++ state->config.build_cfg1.mono_bit_test,
++ state->config.build_cfg1.run_test,
++ state->config.build_cfg1.poker_test,
++ state->config.build_cfg1.raw_ht_adap_test);
++ DEBUG("raw_ht_rep_test=%u, ent_src_rep_smpl_size=%u, ent_src_rep_test=%u, ent_src_rep_min_entropy=%u\n",
++ state->config.build_cfg1.raw_ht_rep_test,
++ state->config.build_cfg1.ent_src_rep_smpl_size,
++ state->config.build_cfg1.ent_src_rep_test,
++ state->config.build_cfg1.ent_src_rep_min_entropy);
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_BUILD_CFG0);
++ state->config.edu_build_cfg0.rbc2_rate_width =
++ NIST_TRNG_REG_EDU_CFG0_RBC2_RATE_WIDTH(tmp);
++ state->config.edu_build_cfg0.rbc1_rate_width =
++ NIST_TRNG_REG_EDU_CFG0_RBC1_RATE_WIDTH(tmp);
++ state->config.edu_build_cfg0.rbc0_rate_width =
++ NIST_TRNG_REG_EDU_CFG0_RBC0_RATE_WIDTH(tmp);
++ state->config.edu_build_cfg0.public_vtrng_channels =
++ NIST_TRNG_REG_EDU_CFG0_PUBLIC_VTRNG_CHANNELS(tmp);
++ state->config.edu_build_cfg0.esm_channel =
++ NIST_TRNG_REG_EDU_CFG0_ESM_CHANNEL(tmp);
++ state->config.edu_build_cfg0.rbc_channels =
++ NIST_TRNG_REG_EDU_CFG0_RBC_CHANNELS(tmp);
++ state->config.edu_build_cfg0.fifo_depth =
++ NIST_TRNG_REG_EDU_CFG0_FIFO_DEPTH(tmp);
++ DEBUG("NIST_TRNG: EDU_BUILD_CFG0 rbc2_rate_width=%u, rbc1_rate_width=%u, rbc0_rate_width=%u\n",
++ state->config.edu_build_cfg0.rbc2_rate_width,
++ state->config.edu_build_cfg0.rbc1_rate_width,
++ state->config.edu_build_cfg0.rbc0_rate_width);
++ DEBUG("public_vtrng_channels=%u, esm_channel=%u, rbc_channels=%u, fifo_depth=%u\n",
++ state->config.edu_build_cfg0.public_vtrng_channels,
++ state->config.edu_build_cfg0.esm_channel,
++ state->config.edu_build_cfg0.rbc_channels,
++ state->config.edu_build_cfg0.fifo_depth);
++
++ state->status.edu_vstat.seed_enum =
++ NIST_TRNG_REG_EDU_VSTAT_SEED_ENUM(tmp);
++ state->status.edu_vstat.rnc_enabled =
++ NIST_TRNG_REG_EDU_VSTAT_RNC_ENABLED(tmp);
++
++ err = nisttrng_zeroize(state);
++ if (err)
++ goto ERR;
++
++ err = CRYPTO_OK;
++ state->status.current_state = NIST_TRNG_STATE_INITIALIZE;
++ERR:
++ DEBUG("--- %s Return, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_init */
++EXPORT_SYMBOL(nisttrng_init);
++
++/* Instantiate the DRBG state */
++int nisttrng_instantiate(struct nist_trng_state *state, int req_sec_strength,
++ int pred_resist, void *personal_str)
++{
++ int err;
++ u32 tmp;
++ u32 zero_ps[12] = { 0 };
++ int i = 0;
++
++ DEBUG(">> %s: security strength = %u, pred_resist = %u, personilization string existence = %u\n",
++ __func__, req_sec_strength, pred_resist, (personal_str) ? 1 : 0);
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* If DRBG is already instantiated or if current state does not allow an instantiate, return error */
++ if (DRBG_INSTANTIATED(state->status.current_state)) {
++ DEBUG("Initial check: DRBG state is already instantiated\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ if (state->status.current_state != NIST_TRNG_STATE_INITIALIZE &&
++ state->status.current_state != NIST_TRNG_STATE_UNINSTANTIATE) {
++ DEBUG("Cannot instantiate in the current state (%u)\n",
++ state->status.current_state);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* if hardware is not configured to accept extra personalization string, but personal_str is not NULL, return error */
++ if (!state->config.features.extra_ps_present && personal_str) {
++ DEBUG("HW config does not allow extra PS\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* Validate and set the security strength */
++ err = nisttrng_set_sec_strength(state, req_sec_strength);
++ if (err)
++ goto ERR;
++
++ /* get entropy - noise seeding. If the mode is nonce, get_entropy must be called by the user prior to the instantiate function */
++ DEBUG("Seeding mode is: %s\n",
++ state->status.nonce_mode ? "Nonce" : "Noise");
++ if (!state->status.nonce_mode) { /* noise seeding */
++ err = nisttrng_get_entropy_input(state, NULL, 0);
++ if (err)
++ goto ERR;
++ }
++
++ /* load the personilization string if hardware is configured to accept it */
++ if (state->config.features.extra_ps_present) {
++ /* if HW is configured to accept personilizatoin string, it will use whatever is in the NPA_DATAx. So, if the string is NULL, just load 0. */
++ if (!personal_str)
++ personal_str = &zero_ps[0];
++
++ err = nisttrng_load_ps_addin(state, personal_str);
++ if (err)
++ goto ERR;
++ }
++
++ /* initiate the Create_State command and wait on done */
++ DEBUG("Create the DRBG state\n");
++
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_CREATE_STATE);
++ err = nisttrng_wait_on_done(state);
++ if (err)
++ goto ERR;
++
++ /* check STAT register to make sure DRBG is instantiated */
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT);
++ if (!NIST_TRNG_REG_STAT_GET_DRBG_STATE(tmp)) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* reset reminder and alarms counters */
++ nisttrng_reset_counters(state);
++
++ //if EDU is available enable RNC and disable prediction resistance , disable all RBC,s
++ //state->config.build_cfg0.edu_present = 0;
++ if (state->config.build_cfg0.edu_present) {
++ //disable prediction resistance
++ err = nisttrng_set_pred_resist(state, 0);
++ if (err)
++ goto ERR;
++
++ //enable RNC
++ nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE);
++ // disable all RBC,s
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_RBC_CTRL);
++ for (i = 0; i < state->config.edu_build_cfg0.rbc_channels;
++ i++) {
++ err = nisttrng_rbc(state, 0, i, 0,
++ CHX_URUN_BLANK_AFTER_RESET);
++ if (err)
++ goto ERR;
++ }
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_RBC_CTRL);
++
++ } else {
++ /* set the prediction resistance */
++ err = nisttrng_set_pred_resist(state, pred_resist);
++ if (err)
++ goto ERR;
++ }
++
++ err = CRYPTO_OK;
++ state->status.current_state = NIST_TRNG_STATE_INSTANTIATE;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_instantiate */
++EXPORT_SYMBOL(nisttrng_instantiate);
++
++/* Uninstantiate the DRBG state and zeroize */
++int nisttrng_uninstantiate(struct nist_trng_state *state)
++{
++ int err;
++ int err_tmp;
++ u32 tmp;
++
++ DEBUG(">> %s: uninstantiate the DRBG and zeroize\n", __func__);
++ //printf(" nisttrng_uninstantiate: uninstantiate the DRBG and zeroize\n");
++ err = CRYPTO_OK;
++ err_tmp = CRYPTO_OK;
++
++ //disable RNC
++ if (state->config.build_cfg0.edu_present) {
++ if (state->status.edu_vstat.rnc_enabled) {
++ DEBUG("%s: disable RNC\n", __func__);
++ nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE);
++ //always clear the busy bit after disabling RNC
++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, tmp);
++ }
++ }
++
++ /* if DRBG is instantiated, return CRYPTO_NOT_INSTANTIATED, but still do the zeroize */
++ if (!DRBG_INSTANTIATED(state->status.current_state))
++ err_tmp = CRYPTO_NOT_INSTANTIATED;
++
++ /* zeroize */
++ err = nisttrng_zeroize(state);
++ if (err)
++ goto ERR;
++
++ if (err == CRYPTO_OK && err_tmp == CRYPTO_NOT_INSTANTIATED)
++ err = CRYPTO_NOT_INSTANTIATED;
++
++ state->status.current_state = NIST_TRNG_STATE_UNINSTANTIATE;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_uninstantiate */
++EXPORT_SYMBOL(nisttrng_uninstantiate);
++
++/* enable/disable specific rbc
++ * rbc_num = rbc channel num
++ * urun_blnk = underrun blanking duration for rbc channel
++ * rate = sets rate of serial entropy output for rbc channel
++ */
++int nisttrng_rbc(struct nist_trng_state *state, int enable, int rbc_num, int rate,
++ int urun_blnk)
++{
++ int err = 0;
++ u32 tmp_rbc = 0;
++
++ tmp_rbc = pdu_io_read32(state->base + NIST_TRNG_EDU_RBC_CTRL);
++
++ if (enable) {
++ if (rate > 15) {
++ DEBUG("Incorrect rate = %d\n", rate);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ if (urun_blnk > 3) {
++ DEBUG("Incorrect urun_blnk = %d\n", urun_blnk);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ } else { //disable
++ rate = NISTTRNG_EDU_RBC_CTRL_GET_CH_RATE_AFTER_RESET;
++ urun_blnk = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK_AFTER_RESET;
++ }
++
++ switch (rbc_num) {
++ case 0:
++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(rate, tmp_rbc, _NIST_TRNG_EDU_RBC_CTRL_CH0_RATE);
++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(urun_blnk, tmp_rbc,
++ _NIST_TRNG_EDU_RBC_CTRL_CH0_URUN_BLANK);
++
++ break;
++ case 1:
++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(rate, tmp_rbc, _NIST_TRNG_EDU_RBC_CTRL_CH1_RATE);
++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(urun_blnk, tmp_rbc,
++ _NIST_TRNG_EDU_RBC_CTRL_CH1_URUN_BLANK);
++
++ break;
++ case 2:
++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_RATE(rate, tmp_rbc, _NIST_TRNG_EDU_RBC_CTRL_CH2_RATE);
++ tmp_rbc = NISTTRNG_EDU_RBC_CTRL_SET_CH_URUN_BLANK(urun_blnk, tmp_rbc,
++ _NIST_TRNG_EDU_RBC_CTRL_CH2_URUN_BLANK);
++ break;
++ default:
++ DEBUG("Incorrect rbc_num = %d\n", rbc_num);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ pdu_io_write32(state->base + NIST_TRNG_EDU_RBC_CTRL, tmp_rbc);
++
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
++
++/* Reseed */
++int nisttrng_reseed(struct nist_trng_state *state, int pred_resist, void *addin_str)
++{
++ int rnc_flag = 0;
++ int err;
++
++ DEBUG(">> %s: pred_resist = %u, additional strign existence = %u\n",
++ __func__, pred_resist, (addin_str) ? 1 : 0);
++
++ if (state->config.build_cfg0.edu_present) {
++ if (state->status.edu_vstat.rnc_enabled) {
++ // disable_rnc
++ err = nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_DISABLE_TO_HOLD);
++ if (err)
++ goto ERR;
++
++ rnc_flag = 1;
++ }
++ }
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* if the DRBG is not instantiated return error */
++ if (!DRBG_INSTANTIATED(state->status.current_state)) {
++ DEBUG("DRBG is not instantiated\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* if pred_resist is set but, pred_resist that the DRBG is instantiated with is not 1, return error */
++ err = nisttrng_set_pred_resist(state, pred_resist);
++ if (err)
++ goto ERR;
++
++ /* get entropy - noise seeding. If the mode is nonce, get_entropy must be called by the user prior to the instantiate function */
++ if (!state->status.nonce_mode) { /* noise seeding */
++ err = nisttrng_get_entropy_input(state, NULL, 0);
++ if (err)
++ goto ERR;
++ }
++
++ /* if addin_str is not NULL, it means that the additionl input is available and has to be loaded */
++ if (addin_str) {
++ /* set the ADDIN_PRESENT field of the MODE register to 1 */
++ err = nisttrng_set_addin_present(state, 1);
++ if (err)
++ goto ERR;
++
++ /* load the additional input */
++ err = nisttrng_load_ps_addin(state, addin_str);
++ if (err)
++ goto ERR;
++
++ } else {
++ /* set the ADDIN_PRESENT field of the MODE register to 0 */
++ err = nisttrng_set_addin_present(state, 0);
++ if (err)
++ goto ERR;
++ }
++
++ /* initiate the reseed and wait on done */
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_RENEW_STATE);
++ err = nisttrng_wait_on_done(state);
++ if (err)
++ goto ERR;
++
++ /* reset reminder and alarms counters */
++ nisttrng_reset_counters(state);
++
++ if (rnc_flag) {
++ // rnc_enable
++ err = nisttrng_rnc(state, NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE);
++ if (err)
++ goto ERR;
++ }
++
++ err = CRYPTO_OK;
++ state->status.current_state = NIST_TRNG_STATE_RESEED;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_reseed */
++EXPORT_SYMBOL(nisttrng_reseed);
++
++int nisttrng_vtrng_wait_on_busy(struct nist_trng_state *state, int priv, int vtrng)
++{
++ u32 tmp, t;
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ if (priv) { //private vtrng
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VSTAT);
++ } while (NIST_TRNG_REG_EDU_VSTAT_BUSY(tmp) && --t);
++
++ } else { //public vtrng
++ do {
++ tmp = pdu_io_read32(state->base +
++ NIST_TRNG_EDU_VTRNG_VSTAT0 +
++ 8 * vtrng);
++ } while (NIST_TRNG_REG_EDU_VSTAT_BUSY(tmp) && --t);
++ }
++
++ if (t)
++ return CRYPTO_OK;
++
++ SYNHW_PRINT("wait_on_: failed timeout: %08lx\n",
++ (unsigned long)tmp);
++
++ return CRYPTO_TIMEOUT;
++} /* nisttrng_vtrng_wait_on_busy */
++
++int nisttrng_generate_public_vtrng(struct nist_trng_state *state, void *random_bits,
++ unsigned long req_num_bytes, int vtrng)
++{
++ int err = 0;
++ u32 tmp;
++ unsigned int remained_bytes;
++ unsigned long req_num_blks;
++ int i, j;
++
++ DEBUG(">> %s : requested number of bytes = %lu, vtrng num = %u\n",
++ __func__, req_num_bytes, vtrng);
++
++ /* make sure random_bits is not NULL */
++ if (!random_bits) {
++ DEBUG("random_bits pointer cannot be NULL\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ if (vtrng > state->config.edu_build_cfg0.public_vtrng_channels) {
++ DEBUG("vtrng channel invalid (%u)\n", vtrng);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ if (state->status.edu_vstat.rnc_enabled == 0)
++ DEBUG("rnc_disabled\n");
++
++ if (state->status.edu_vstat.seed_enum == 0)
++ DEBUG("not seed_enum\n");
++
++ /* loop on generate to get the requested number of bits. Each generate gives NIST_TRNG_RAND_BLK_SIZE_BITS bits. */
++ req_num_blks = ((req_num_bytes * 8) % NIST_TRNG_RAND_BLK_SIZE_BITS) ?
++ (((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS) + 1) :
++ ((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS);
++
++ for (i = 0; i < req_num_blks; i++) {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VTRNG_VCTRL0 +
++ (vtrng * 8));
++ tmp = NIST_TRNG_EDU_VTRNG_VCTRL_CMD_SET(tmp, NIST_TRNG_EDU_VTRNG_VCTRL_CMD_GET_RANDOM);
++ pdu_io_write32(state->base + NIST_TRNG_EDU_VTRNG_VCTRL0 + (vtrng * 8),
++ tmp);
++
++ // check busy
++ err = nisttrng_vtrng_wait_on_busy(state, 0, vtrng);
++ if (err)
++ goto ERR;
++
++ // check for error
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VTRNG_VISTAT0 +
++ (vtrng * 8));
++ if (NIST_TRNG_REG_EDU_VSTAT_ANY_RW1(tmp))
++ DEBUG("EDU_VSTAT_ANY_RW1 set 0x%x\n", tmp);
++
++ // check that all valid
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VTRNG_VSTAT0 +
++ 8 * vtrng);
++ if ((NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD0(tmp) == 0) ||
++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD1(tmp) == 0) ||
++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD2(tmp) == 0) ||
++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD3(tmp) == 0)) {
++ DEBUG("EDU_VSTAT_SLICE_VLD fail 0x%x\n", tmp);
++ }
++
++ /* read the generated random number block and store */
++ for (j = 0; j < (NIST_TRNG_RAND_BLK_SIZE_BITS / 32); j++) {
++ tmp = pdu_io_read32(state->base +
++ NIST_TRNG_EDU_VTRNG_VRAND0_0 +
++ (vtrng * 8) + j);
++ /* copy to random_bits byte-by-byte, until req_num_bytes are copied */
++ remained_bytes = req_num_bytes -
++ (i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4);
++ if (remained_bytes > 4) {
++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4, &tmp, 4);
++
++ /* decrement the bits counter and return error if generated more than the maximum*/
++ state->counters.bits_per_req_left =
++ state->counters.bits_per_req_left -
++ 4 * 8;
++ if (state->counters.bits_per_req_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ } else {
++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4, &tmp, remained_bytes);
++
++ /* decrement the bits counter and return error if generated more than the maximum*/
++ state->counters.bits_per_req_left =
++ state->counters.bits_per_req_left -
++ remained_bytes * 8;
++ if (state->counters.bits_per_req_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ break;
++ }
++ }
++ }
++
++ err = CRYPTO_OK;
++ state->status.current_state = NIST_TRNG_STATE_GENERATE;
++ERR:
++ if (err)
++ random_bits = NULL;
++
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
++
++int nisttrng_generate_private_vtrng(struct nist_trng_state *state, void *random_bits,
++ unsigned long req_num_bytes)
++{
++ int err;
++ u32 tmp;
++ unsigned int remained_bytes;
++ unsigned long req_num_blks;
++ int i, j;
++
++ DEBUG(">> %s : requested number of bytes = %lu ",
++ __func__, req_num_bytes);
++
++ /* requested number of bits has to be less that the programmed maximum */
++ if ((req_num_bytes * 8) > state->counters.max_bits_per_req) {
++ SYNHW_PRINT("requested number of bits (%lu) is larger than the set maximum (%lu)\n",
++ (req_num_bytes * 8), state->counters.max_bits_per_req);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* make sure random_bits is not NULL */
++ if (!random_bits) {
++ SYNHW_PRINT("random_bits pointer cannot be NULL\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ if (state->status.edu_vstat.rnc_enabled == 0)
++ DEBUG("rnc_disabled\n");
++
++ if (state->status.edu_vstat.seed_enum == 0)
++ DEBUG("not seed_enum\n");
++
++ /* loop on generate to get the requested number of bits. Each generate gives NIST_TRNG_RAND_BLK_SIZE_BITS bits. */
++ req_num_blks = ((req_num_bytes * 8) % NIST_TRNG_RAND_BLK_SIZE_BITS) ?
++ (((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS) + 1) :
++ ((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS);
++
++ for (i = 0; i < req_num_blks; i++) {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VCTRL);
++ tmp = NIST_TRNG_EDU_VTRNG_VCTRL_CMD_SET(tmp, NIST_TRNG_EDU_VTRNG_VCTRL_CMD_GET_RANDOM);
++ pdu_io_write32(state->base + NIST_TRNG_EDU_VCTRL, tmp);
++
++ // check busy
++ err = nisttrng_vtrng_wait_on_busy(state, 1, 0);
++ if (err)
++ goto ERR;
++
++ // check for error
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VISTAT);
++ if (NIST_TRNG_REG_EDU_VSTAT_ANY_RW1(tmp))
++ DEBUG("EDU_VSTAT_ANY_RW1 set 0x%x\n", tmp);
++
++ //check that all valid
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_VSTAT);
++ if ((NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD0(tmp) == 0) ||
++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD1(tmp) == 0) ||
++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD2(tmp) == 0) ||
++ (NIST_TRNG_REG_EDU_VSTAT_SLICE_VLD3(tmp) == 0)) {
++ DEBUG("EDU_VSTAT_SLICE_VLD fail 0x%x\n", tmp);
++ }
++
++ /* read the generated random number block and store */
++ for (j = 0; j < (NIST_TRNG_RAND_BLK_SIZE_BITS / 32); j++) {
++ tmp = pdu_io_read32(state->base +
++ NIST_TRNG_EDU_VRAND_0 + j);
++ /* copy to random_bits byte-by-byte, until req_num_bytes are copied */
++ remained_bytes = req_num_bytes -
++ (i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4);
++ if (remained_bytes > 4) {
++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4, &tmp, 4);
++
++ /* decrement the bits counter and return error if generated more than the maximum*/
++ state->counters.bits_per_req_left =
++ state->counters.bits_per_req_left -
++ 4 * 8;
++ if (state->counters.bits_per_req_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ } else {
++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4, &tmp, remained_bytes);
++
++ /* decrement the bits counter and return error if generated more than the maximum*/
++ state->counters.bits_per_req_left =
++ state->counters.bits_per_req_left -
++ remained_bytes * 8;
++ if (state->counters.bits_per_req_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ break;
++ }
++ }
++ }
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
++
++/* Generate */
++int nisttrng_generate(struct nist_trng_state *state, void *random_bits,
++ unsigned long req_num_bytes, int req_sec_strength,
++ int pred_resist, void *addin_str)
++{
++ int err;
++ int reseed_required;
++
++ DEBUG(">> %s: requested number of bytes = %lu, security strength = %u, pred_resist = %u, additional string existence = %u\n",
++ __func__, req_num_bytes, req_sec_strength, pred_resist,
++ (addin_str) ? 1 : 0);
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* if the DRBG is not instantiated return error */
++ if (!DRBG_INSTANTIATED(state->status.current_state)) {
++ SYNHW_PRINT("DRBG is not instantiated\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* requested number of bits has to be less that the programmed maximum */
++ if ((req_num_bytes * 8) > state->counters.max_bits_per_req) {
++ SYNHW_PRINT("requested number of bits (%lu) is larger than the set maximum (%lu)\n",
++ (req_num_bytes * 8), state->counters.max_bits_per_req);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* security strength has to be lower than what the DRBG is instantiated with. set_sec_strength function checks this. */
++ err = nisttrng_set_sec_strength(state, req_sec_strength);
++ if (err)
++ goto ERR;
++
++ /* set the prediction resistance - if pred_resist is set but, pred_resist that the DRBG is instantiated with is not 1, return error */
++ err = nisttrng_set_pred_resist(state, pred_resist);
++ if (err)
++ goto ERR;
++
++ /* make sure random_bits is not NULL */
++ if (!random_bits) {
++ DEBUG("random_bits pointer cannot be NULL\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* set the reseed required flag to 0. The loop is to check at the end whether a reseed is required at the end and jump back to reseed and generate if needed. This is the NIST mandated procedure */
++ reseed_required = 0;
++
++ if (!addin_str) {
++ /* set the ADDIN_PRESENT field of the MODE register to 1 */
++ err = nisttrng_set_addin_present(state, 0);
++ if (err)
++ goto ERR;
++ }
++
++ do {
++ void *generate_addin_str = addin_str;
++
++ if (pred_resist | reseed_required) {
++ err = nisttrng_reseed(state, pred_resist, addin_str);
++ if (err)
++ goto ERR;
++
++ /* SP800-90a says that if reseed is executed, any additional input string is only used in the reseed phase and replaced by NULL in the generate phase */
++ generate_addin_str = NULL;
++ err = nisttrng_set_addin_present(state, 0);
++ if (err)
++ goto ERR;
++
++ /* ADDIN_PRESENT field in MODE has to be set back to 0 to avoid illegal cmd sequence */
++ reseed_required = 0;
++ }
++
++ /* generate process */
++ if (nisttrng_check_seed_lifetime(state) == CRYPTO_RESEED_REQUIRED) {
++ reseed_required = 1;
++
++ } else {
++ reseed_required = 0;
++
++ /* Refresh_Addin command if additional input is not NULL*/
++ if (generate_addin_str) {
++ err = nisttrng_refresh_addin(state, generate_addin_str);
++ if (err)
++ goto ERR;
++ }
++
++ /* Generate all random bits */
++ /* if EDU present then get random number from private vtrng */
++
++ //state->config.build_cfg0.edu_present = 0;
++ if (state->config.build_cfg0.edu_present) {
++ err = nisttrng_generate_private_vtrng(state, random_bits,
++ req_num_bytes);
++ if (err)
++ goto ERR;
++
++ } else {
++ err = nisttrng_gen_random(state, random_bits,
++ req_num_bytes);
++ if (err)
++ goto ERR;
++
++ /* Advance the state - if it returns CRYPTO_RESEED_REQUIRED, have to jump back and do a reseed and generate */
++ err = nisttrng_advance_state(state);
++ if (err)
++ goto ERR;
++ }
++ }
++ } while (reseed_required);
++
++ err = CRYPTO_OK;
++ state->status.current_state = NIST_TRNG_STATE_GENERATE;
++ERR:
++ if (err)
++ random_bits = NULL;
++
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_generate */
++EXPORT_SYMBOL(nisttrng_generate);
+diff --git a/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c
+new file mode 100644
+index 000000000..4bfdf0122
+--- /dev/null
++++ b/drivers/char/hw_random/dwc/src/trng/trng/nist_trng_private.c
+@@ -0,0 +1,1022 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * This Synopsys software and associated documentation (hereinafter the
++ * "Software") is an unsupported proprietary work of Synopsys, Inc. unless
++ * otherwise expressly agreed to in writing between Synopsys and you. The
++ * Software IS NOT an item of Licensed Software or a Licensed Product under
++ * any End User Software License Agreement or Agreement for Licensed Products
++ * with Synopsys or any supplement thereto. Synopsys is a registered trademark
++ * of Synopsys, Inc. Other names included in the SOFTWARE may be the
++ * trademarks of their respective owners.
++ *
++ * The contents of this file are dual-licensed; you may select either version
++ * 2 of the GNU General Public License ("GPL") or the BSD-3-Clause license
++ * ("BSD-3-Clause"). The GPL is included in the COPYING file accompanying the
++ * SOFTWARE. The BSD License is copied below.
++ *
++ * BSD-3-Clause License:
++ * Copyright (c) 2012-2016 Synopsys, Inc. and/or its affiliates.
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ *
++ * 1. Redistributions of source code must retain the above copyright notice,
++ * this list of conditions, and the following disclaimer, without
++ * modification.
++ *
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * 3. The names of the above-listed copyright holders may not be used to
++ * endorse or promote products derived from this software without specific
++ * prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
++ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
++ * POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "nisttrng_hw.h"
++#include "nisttrng.h"
++
++/* Wait functions */
++static int nisttrng_wait_on_(struct nist_trng_state *state, u32 mask)
++{
++ u32 tmp;
++ int t;
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_ISTAT);
++ } while (!(tmp & (mask | NIST_TRNG_REG_ISTAT_ALARMS)) && --t);
++
++ if (tmp & NIST_TRNG_REG_ISTAT_ALARMS)
++ return nisttrng_get_alarms(state);
++
++ if (t) {
++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT, mask);
++ return CRYPTO_OK;
++
++ } else {
++ SYNHW_PRINT("wait_on_: failed timeout: %08lx\n",
++ (unsigned long)tmp);
++ return CRYPTO_TIMEOUT;
++ }
++} /* nisttrng_wait_on_ */
++
++int nisttrng_wait_on_done(struct nist_trng_state *state)
++{
++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_DONE);
++} /* nisttrng_wait_on_done */
++EXPORT_SYMBOL(nisttrng_wait_on_done);
++
++int nisttrng_wait_on_noise_rdy(struct nist_trng_state *state)
++{
++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_NOISE_RDY);
++} /* nisttrng_wait_on_noise_rdy */
++
++static int nisttrng_wait_on_zeroize(struct nist_trng_state *state)
++{
++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_ZEROIZE);
++} /* nisttrng_wait_on_zeroize */
++
++static int nisttrng_wait_on_kat_completed(struct nist_trng_state *state)
++{
++ return nisttrng_wait_on_(state, NIST_TRNG_REG_ISTAT_KAT_COMPLETE);
++} /* nisttrng_wait_on_kat_completed */
++
++int nisttrng_wait_on_busy(struct nist_trng_state *state)
++{
++ u32 tmp, t;
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT);
++ } while ((tmp & NIST_TRNG_REG_STAT_BUSY) && --t);
++
++ if (t)
++ return CRYPTO_OK;
++
++ SYNHW_PRINT("wait_on_busy: failed timeout: %08lx\n",
++ (unsigned long)tmp);
++ return CRYPTO_TIMEOUT;
++} /* nisttrng_wait_on_busy */
++EXPORT_SYMBOL(nisttrng_wait_on_busy);
++
++/* Read and return alarm. Zeroize if there is an alarm*/
++int nisttrng_get_alarms(struct nist_trng_state *state)
++{
++ u32 tmp;
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_ISTAT);
++ if (tmp & NIST_TRNG_REG_ISTAT_ALARMS) {
++ // alarm happened
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_ALARM);
++ DEBUG("Received alarm: %lx\n", (unsigned long)tmp);
++ // clear istat
++ pdu_io_write32(state->base + NIST_TRNG_REG_ISTAT,
++ NIST_TRNG_REG_ISTAT_ALARMS);
++ pdu_io_write32(state->base + NIST_TRNG_REG_ALARM, 0x1F);
++ state->status.alarm_code = tmp & 0x1F;
++
++ /* zeroize if there was an alarm */
++ if (state->status.alarm_code !=
++ NIST_TRNG_REG_ALARM_FAILED_TEST_ID_OK) {
++ nisttrng_zeroize(state);
++ }
++ } else {
++ state->status.alarm_code = 0;
++ }
++
++ if (state->status.alarm_code)
++ return CRYPTO_FATAL;
++ else
++ return CRYPTO_OK;
++} /* nisttrng_get_alarms */
++EXPORT_SYMBOL(nisttrng_get_alarms);
++
++/* Reset reminder and alarm counters */
++int nisttrng_reset_counters(struct nist_trng_state *state)
++{
++ state->counters.bits_per_req_left = state->counters.max_bits_per_req;
++ state->counters.req_per_seed_left = state->counters.max_req_per_seed;
++
++ return 0;
++} /* nisttrng_reset_counters */
++EXPORT_SYMBOL(nisttrng_reset_counters);
++
++/* When a zeroize happens some of the struct objects should reset */
++int nisttrng_reset_state(struct nist_trng_state *state)
++{
++ nisttrng_reset_counters(state);
++ state->status.pad_ps_addin = 0;
++ state->status.current_state = NIST_TRNG_STATE_UNINSTANTIATE;
++
++ return 0;
++} /* nisttrng_reset_state */
++
++/* ---------- Set field APIs ---------- */
++
++/*
++ * Sets the security strength of the DRBG instance.
++ * > req_sec_strength has to be an integer. The API chooses one of SEC_STRNT_AES128 or SEC_STRNT_AES256 as follows:
++ * 0 < req_sec_strength <= 128 --> security strength = SEC_STRNT_AES128
++ * 128 < req_sec_strength <= 256 --> security strength = SEC_STRNT_AES256
++ * else --> Invalid security strength
++ * > If the DRBG is instantiated, a new security strength change request with greater security strength will return error.
++ */
++int nisttrng_set_sec_strength(struct nist_trng_state *state, int req_sec_strength)
++{
++ int err;
++ u32 tmp;
++ enum nisttrng_sec_strength chosen_sec_strength;
++
++ DEBUG(">> %s: security strength = %i\n", __func__,
++ req_sec_strength);
++
++ /* choose the security strength */
++ /* set the security strength to the lowest security strength greater or equal to the req_sec_strenght from the set {128, 256} */
++ if (REQ_SEC_STRENGTH_IS_VALID(req_sec_strength)) {
++ if (req_sec_strength > 0 && req_sec_strength <= 128) {
++ chosen_sec_strength = SEC_STRNT_AES128;
++
++ } else if (((req_sec_strength > 128) &&
++ (req_sec_strength <= 256)) &&
++ (state->config.features.drbg_arch == AES256)) {
++ chosen_sec_strength = SEC_STRNT_AES256;
++
++ } else { /* should not get here, because we have already checked the validity */
++ DEBUG("Invalid security strength\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ } else {
++ DEBUG("Invalid security strength\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ DEBUG("chosen security strength = %u\n", chosen_sec_strength);
++
++ /* set the security strenght - at this point security strength is validated and converted */
++ if (DRBG_INSTANTIATED(state->status.current_state) &&
++ chosen_sec_strength != state->status.sec_strength) {
++ /* security strength can only change when the DRBG is not instantiated. */
++ /* if the new security strength is less that what the DRBG is instantiated with, accept it, but don't change in HW. If it's more, return error */
++ if (chosen_sec_strength < state->status.sec_strength) {
++ DEBUG("Lowering the security strength. DRBG is already instantiated.\n");
++ state->status.pad_ps_addin = 4;
++ state->status.sec_strength = chosen_sec_strength;
++
++ } else {
++ state->status.pad_ps_addin = 0;
++ DEBUG("Cannot select a higher security strenght once the DRBG is instantiated\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ } else {
++ DEBUG("Updating the security strength.\n");
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE);
++ tmp = NIST_TRNG_REG_MODE_SET_SEC_ALG(tmp, chosen_sec_strength);
++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp);
++
++ state->status.pad_ps_addin = 0;
++ state->status.sec_strength = chosen_sec_strength;
++ }
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_set_sec_strength */
++EXPORT_SYMBOL(nisttrng_set_sec_strength);
++
++/*
++ * Sets the ADDIN_PRESENT field of the MODE register according to the addin_present input.
++ */
++int nisttrng_set_addin_present(struct nist_trng_state *state, int addin_present)
++{
++ u32 tmp;
++
++ DEBUG(">> %s, adding_present = %u\n", __func__,
++ addin_present);
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE);
++ tmp = NIST_TRNG_REG_MODE_SET_ADDIN_PRESENT(tmp, addin_present);
++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp);
++
++ DEBUG("--- Return %s, err = %i\n", __func__, 0);
++ return 0;
++} /* nisttrng_set_addin_present */
++EXPORT_SYMBOL(nisttrng_set_addin_present);
++
++/*
++ * Sets the PRED_RESIST field of the MODE register according to the pred_resist input.
++ * > If the DRBG is instantiated with prediction resistance of 0, and a change to the prediction resistance of 1 is requested,
++ * the API will return an error.
++ */
++int nisttrng_set_pred_resist(struct nist_trng_state *state, int pred_resist)
++{
++ int err;
++ u32 tmp;
++
++ DEBUG(">> %s: pred_resist = %u\n", __func__, pred_resist);
++
++ /* if DRBG is instantiated, prediction resistance can only change from 1 to 0 and not vice versa. This is a NIST requirement. */
++ if (DRBG_INSTANTIATED(state->status.current_state) && pred_resist &&
++ !state->status.pred_resist) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE);
++ tmp = NIST_TRNG_REG_MODE_SET_PRED_RESIST(tmp, pred_resist);
++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp);
++
++ state->status.pred_resist = pred_resist;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_set_pred_resist */
++EXPORT_SYMBOL(nisttrng_set_pred_resist);
++
++/*
++ * Puts the NIST_TRNG in either the SECURE or PROMISCUOUS mode.
++ * > A value of 1 for secure_mode puts the core in the SECURE mode and a value of 0 puts it in the PROMISCUOUS mode.
++ * > Any change to the secure mode of the NIST_TRNG will result in a complete zeroize, and will set the seeding mode to self-seeding.
++ * A zeroize will not destroy the programmed mode and ALARM register value.
++ * It keeps the programmed mode to avoid re-programming.
++ * It also, maintains the ALARM register value, so that the user can read the value to understand the reason of the occurred alarm.
++ */
++int nisttrng_set_secure_mode(struct nist_trng_state *state, int secure_mode)
++{
++ int err;
++ u32 tmp;
++ int t;
++
++ DEBUG(">> %s: secure_mode = %u\n", __func__, secure_mode);
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_SMODE);
++ tmp = NIST_TRNG_REG_SMODE_SET_SECURE_EN(tmp, secure_mode);
++ pdu_io_write32(state->base + NIST_TRNG_REG_SMODE, tmp);
++
++ /* wait until STAT register indicates that the mode is applied */
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT);
++ } while ((NIST_TRNG_REG_STAT_GET_SECURE(tmp) != secure_mode) && --t);
++
++ if (!t) {
++ err = CRYPTO_TIMEOUT;
++ goto ERR;
++ }
++
++ /* if secure mode changes, a zeroize will happen in HW. */
++ if (state->status.secure_mode != secure_mode) {
++ DEBUG("secure mode changed. zeroize happened. reset sw state\n");
++ /* nonce mode goes back to default. */
++ state->status.nonce_mode = 0;
++ /* reset the SW state */
++ nisttrng_reset_state(state);
++ }
++
++ state->status.secure_mode = secure_mode;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_set_secure_mode */
++EXPORT_SYMBOL(nisttrng_set_secure_mode);
++
++/*
++ * To change the seeding mode of the NIST_TRNG.
++ * > A value of 1 for nonce_mode will put the NIST_TRNG in the nonce seeding mode, which means that the seed will be provided by the user,
++ * unlike the noise or self-seeding mode (normal mode of operation) in which the seed is generated by the internal entropy source.
++ * > Any transition to or from the nonce mode will zeroize the NIST_TRNG.
++ */
++int nisttrng_set_nonce_mode(struct nist_trng_state *state, int nonce_mode)
++{
++ int err;
++ u32 tmp;
++ int t;
++
++ DEBUG(">> %s: nonce_mode = %u\n", __func__, nonce_mode);
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_SMODE);
++ tmp = NIST_TRNG_REG_SMODE_SET_NONCE(tmp, nonce_mode);
++ pdu_io_write32(state->base + NIST_TRNG_REG_SMODE, tmp);
++
++ /* wait until STAT register indicates that the mode is applied */
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_STAT);
++ } while ((NIST_TRNG_REG_STAT_GET_NONCE(tmp) != nonce_mode) && --t);
++
++ if (!t) {
++ err = CRYPTO_TIMEOUT;
++ goto ERR;
++ }
++
++ /* if nonce mode changes, a zeroize will happen in HW. */
++ if (state->status.nonce_mode != nonce_mode) {
++ DEBUG("nonce mode changed. zeroize happened. reset sw state\n");
++ /* reset the SW state */
++ nisttrng_reset_state(state);
++ }
++
++ state->status.nonce_mode = nonce_mode;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_set_nonce_mode */
++EXPORT_SYMBOL(nisttrng_set_nonce_mode);
++
++/* ---------- Load data APIs ---------- */
++/*
++ * Loads the additional input or personalization string into the NPA_DATAx registers.
++ * > Loads the proper number of bits (256 or 384) according to the security strength stored in the state handle.
++ */
++int nisttrng_load_ps_addin(struct nist_trng_state *state, void *input_str)
++{
++ int err;
++ int i, j;
++ int str_size;
++
++ DEBUG(">> %s starts...\n", __func__);
++
++ /* return error if the pointer is NULL */
++ if (!input_str) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* calculate the length based on the security strength */
++ if (state->status.sec_strength == SEC_STRNT_AES128)
++ str_size = 8; /* 256/32 */
++ else if (state->status.sec_strength == SEC_STRNT_AES256)
++ str_size = 12; /* 384/32 */
++
++ for (i = 0; i < str_size; i++) {
++ pdu_io_write32(state->base + NIST_TRNG_REG_NPA_DATA0 + i,
++ ((u32 *)input_str)[i]);
++ }
++
++ j = str_size + state->status.pad_ps_addin;
++ /* if security strength is lowered after the DRBG is instantiated, pad PS and ADDIN with 0 at the MSB side */
++ DEBUG("pad NPA_DATA with %u zeros at the MSB side\n",
++ state->status.pad_ps_addin);
++ for (i = str_size; i < j; i++)
++ pdu_io_write32(state->base + NIST_TRNG_REG_NPA_DATA0 + i, 0);
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_load_ps_addin */
++EXPORT_SYMBOL(nisttrng_load_ps_addin);
++
++/* ---------- Command APIs ---------- */
++/*
++ * Provides entropy and is used in both nonce and noise (self) seeding modes of operation:
++ * > If the NIST_TRNG is in the nonce mode, entropy must be provided by the user; otherwise (in the self-seeding mode) entropy will be generated by the internal entropy source of the NIST_TRNG.
++ * > In the noise mode, calling the API will initiate a seeding command. Depending on the programmed security strength, a 256 or 384-bit seed will be generated.
++ * > Inputs 2 and 3 are only used when the core is in the nonce mode.
++ * > In the nonce mode, the NIST_TRNG can be seeded either through 2 or 3 blocks of 512-bit nonce values which are passed to the internal derivation function to increase the entropy,
++ * or it can be seeded by a 256 or 384-bit nonce written directly into the SEEDx registers.
++ * Passing a value of 1 to nonce_operation selects the former scenario and a value of 0 selects the latter.
++ * > The input_nonce pointer must point to a memory location with a sufficient number of initialized bits.
++ * > Table below shows the required number of bits depending on the nonce_operation and the security strength values.
++ * nonce_operation | Security Strength | Bit length requirement
++ * ------------------------------------------------------------------------------------------
++ * 1 (using the Derivation Function) | SEC_STRNT_AES128 | 2x512 = 1024
++ * 1 (using the Derivation Function) | SEC_STRNT_AES256 | 3x512 = 1536
++ * 0 (loading the seed into SEEDx) | SEC_STRNT_AES128 | 256
++ * 0 (loading the seed into SEEDx) | SEC_STRNT_AES256 | 384
++ * > Generated entropy is secret information held securely within the HW and remains inaccessible to the user, unless the HW core is in the PROMISCUOUS mode.
++ */
++int nisttrng_get_entropy_input(struct nist_trng_state *state, void *input_nonce,
++ int nonce_operation)
++{
++ int err;
++ int nonce_ld_cntr = 0;
++ int i, j;
++
++ DEBUG(">> %s: seeding mode = %s, nonce_operation = %u\n", __func__,
++ (state->status.nonce_mode ? "Nonce" : "Noise"), nonce_operation);
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* --- Seeding --- */
++ if (state->status.nonce_mode) { /* --- nonce mode --- */
++ if (!input_nonce) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ nonce_ld_cntr = 0;
++
++ if (state->status.sec_strength == SEC_STRNT_AES128)
++ nonce_ld_cntr = 2;
++ else if (state->status.sec_strength == SEC_STRNT_AES256)
++ nonce_ld_cntr = 3;
++
++ if (nonce_operation) { /* load the noise inside NPA_DATAx register and issue gen_nonce command */
++ for (i = 0; i < nonce_ld_cntr; i++) {
++ /* load the nonoce */
++ for (j = 0; j < 16; j++) {
++ pdu_io_write32(state->base +
++ NIST_TRNG_REG_NPA_DATA0 + j,
++ ((u32 *)input_nonce)[16 * i + j]);
++ }
++
++ /* issue the command and wait on done */
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_GEN_NONCE);
++
++ if (nisttrng_wait_on_done(state)) {
++ err = CRYPTO_FATAL;
++ goto ERR;
++ };
++ }
++
++ } else {
++ /* load the nonoce */
++ for (i = 0; i < 4 * nonce_ld_cntr; i++) {
++ pdu_io_write32(state->base + NIST_TRNG_REG_SEED0 + i,
++ ((u32 *)input_nonce)[i]);
++ }
++ }
++ } else { /* --- noise mode --- */
++ /* issue the command and wait on done */
++ DEBUG("issue the Gen_Noise command\n");
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_GEN_NOISE);
++
++ if (nisttrng_wait_on_done(state)) {
++ err = CRYPTO_FATAL;
++ goto ERR;
++ };
++ }
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_get_entropy_input */
++EXPORT_SYMBOL(nisttrng_get_entropy_input);
++
++/*
++ * Generate Function:
++ * > The Generate function in NIST_TRNG HW is broken down into 3 steps: Refresh_Addin, Gen_Random and Advance_State.
++ * nisttrng_generate incorporates all these steps and some extra checks into one simple API.
++ * > There is one API for each step, below ||
++ * \/
++ */
++/*
++ * Generate Part 1 - Refresh_Addin: Additional input string is used to add to the HW state entropy.
++ * > This API calls nisttrng_set_addin_present to set the ADDIN_PRESENT field of the MODE register to 1.
++ * > Then it loads the additional input provided by addin_str pointer into the NPA_DATAx by calling the nisttrng_load_ps_addin.
++ * > Then, it issues a Refresh_Addin command to the HW.
++ * > If the addin_str pointer is NULL, the API will return error.
++ */
++int nisttrng_refresh_addin(struct nist_trng_state *state, void *addin_str)
++{
++ int err;
++
++ DEBUG(">> %s starts...\n", __func__);
++
++ /* if the DRBG is not intantiated return error */
++ if (!DRBG_INSTANTIATED(state->status.current_state)) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* This API should not be called with a NULL additional input string */
++ if (!addin_str) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* set the ADDIN_PRESENT field of the MODE register to 1 */
++ err = nisttrng_set_addin_present(state, 1);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_load_ps_addin(state, addin_str);
++ if (err)
++ goto ERR;
++
++ /* execute the command and wait on done*/
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_REFRESH_ADDIN);
++
++ err = nisttrng_wait_on_done(state);
++ if (err)
++ goto ERR;
++
++ err = 0;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_refresh_addin */
++EXPORT_SYMBOL(nisttrng_refresh_addin);
++
++/*
++ * Generate Part 2 - Gen_Random: generates the requested number of bits.
++ * > This API issues the Gen_Random command to the HW as many times as indicated by req_num_bytes to generate the requested number of bits.
++ * > If the requested number of bits (i.e. 128×req_num_blks) is more than the maximum value specified by max_bits_per_req, the API will return error.
++ * > Random bits will be returned in random_bits.
++ */
++int nisttrng_gen_random(struct nist_trng_state *state, void *random_bits,
++ unsigned long req_num_bytes)
++{
++ int err;
++ int i, j;
++ u32 tmp;
++ unsigned int remained_bytes;
++ unsigned long req_num_blks;
++
++ DEBUG(">> %s: req_num_bytes = %lu\n", __func__, req_num_bytes);
++
++ /* if the DRBG is not intantiated return error */
++ if (!DRBG_INSTANTIATED(state->status.current_state)) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* requested number of bits has to be less that the programmed maximum */
++ if ((req_num_bytes * 8) > state->counters.max_bits_per_req) {
++ DEBUG("requested number of bits (%lu) is larger than the set maximum (%lu)\n",
++ (req_num_bytes * 8), state->counters.max_bits_per_req);
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* make sure random_bits is not NULL */
++ if (!random_bits) {
++ DEBUG("random_bits pointer cannot be NULL\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* loop on generate to get the requested number of bits. Each generate gives NIST_TRNG_RAND_BLK_SIZE_BITS bits. */
++ req_num_blks =
++ ((req_num_bytes * 8) % NIST_TRNG_RAND_BLK_SIZE_BITS) ?
++ (((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS) +
++ 1) :
++ ((req_num_bytes * 8) / NIST_TRNG_RAND_BLK_SIZE_BITS);
++
++ for (i = 0; i < req_num_blks; i++) {
++ /* issue gen_random and wait on done */
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_GEN_RANDOM);
++
++ err = nisttrng_wait_on_done(state);
++ if (err)
++ goto ERR;
++
++ /* read the generated random number block and store */
++ for (j = 0; j < (NIST_TRNG_RAND_BLK_SIZE_BITS / 32); j++) {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_RAND0 + j);
++ /* copy to random_bits byte-by-byte, until req_num_bytes are copied */
++ remained_bytes = req_num_bytes -
++ (i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4);
++ if (remained_bytes > 4) {
++ memcpy(random_bits +
++ i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4, &tmp, 4);
++
++ /* decrement the bits counter and return error if generated more than the maximum*/
++ state->counters.bits_per_req_left =
++ state->counters.bits_per_req_left -
++ 4 * 8;
++
++ if (state->counters.bits_per_req_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ } else {
++ memcpy(random_bits + i * (NIST_TRNG_RAND_BLK_SIZE_BITS / 8) +
++ j * 4, &tmp, remained_bytes);
++
++ /* decrement the bits counter and return error if generated more than the maximum*/
++ state->counters.bits_per_req_left =
++ state->counters.bits_per_req_left -
++ remained_bytes * 8;
++
++ if (state->counters.bits_per_req_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++ break;
++ }
++ }
++ }
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_gen_random */
++EXPORT_SYMBOL(nisttrng_gen_random);
++
++/*
++ * Generate Part 3 - Advance the state: advances the state of the DRBG.
++ * > This API issues the Advance_State command to the HW.
++ * > Then it updates the counter for the number of generate requests per seed.
++ * > The counter must be checked every time before starting the Generate process and a reseed must be issued if the limit is reached. This check is incorporated inside nisttrng_generate API.
++ * > Note that we don't have to provide additional input again for this API, because if it had been provided in refresh_addin stage, HW will lock the NPA_DATAx, so it will be still available
++ */
++int nisttrng_advance_state(struct nist_trng_state *state)
++{
++ int err;
++
++ DEBUG(">> %s starts...\n", __func__);
++
++ /* if the DRBG is not intantiated return error */
++ if (!DRBG_INSTANTIATED(state->status.current_state)) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* make sure there is no alarm and the core is not busy */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = nisttrng_wait_on_busy(state);
++ if (err)
++ goto ERR;
++
++ /* issue advance_state and wait on done */
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_ADVANCE_STATE);
++ err = nisttrng_wait_on_done(state);
++ if (err)
++ goto ERR;
++
++ /* generate is finished, reset the bits_per_req_left counter */
++ state->counters.bits_per_req_left = state->counters.max_bits_per_req;
++
++ --state->counters.req_per_seed_left;
++ if (state->counters.req_per_seed_left < 0) {
++ err = CRYPTO_FAILED;
++ goto ERR;
++ } /* just a check */
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_advance_state */
++
++int nisttrng_check_seed_lifetime(struct nist_trng_state *state)
++{
++ int err;
++
++ if (state->counters.req_per_seed_left <= 0) {
++ DEBUG("maximum number of requests per seed is reached\n");
++ err = CRYPTO_RESEED_REQUIRED;
++ goto ERR;
++ }
++
++ err = CRYPTO_OK;
++ERR:
++ return err;
++}
++EXPORT_SYMBOL(nisttrng_advance_state);
++
++/*
++ * Perform Known Answer Test
++ * > The NIST_TRNG can perform a KAT on the DRBG and the derivation function inside the entropy source. There are also two different vectors available to do the KAT.
++ * > The kat_sel input selects whether the KAT should be performed on the DRBG or the derivation function.
++ * > The kat_vec input chooses the KAT vector.
++ * > Selections are done by writing the values to the MODE register.
++ * > If the KAT fails, the API returns error.
++ */
++int nisttrng_kat(struct nist_trng_state *state, int kat_sel, int kat_vec)
++{
++ int err;
++ u32 tmp;
++
++ DEBUG(">> %s: kat_sel = %u, kat_vec = %u\n", __func__,
++ kat_sel, kat_vec);
++
++ /* set KAT_SEL and KAT_VEC */
++ tmp = pdu_io_read32(state->base + NIST_TRNG_REG_MODE);
++ tmp = NIST_TRNG_REG_MODE_SET_KAT_SEL(tmp, kat_sel);
++ tmp = NIST_TRNG_REG_MODE_SET_KAT_VEC(tmp, kat_vec);
++ pdu_io_write32(state->base + NIST_TRNG_REG_MODE, tmp);
++
++ /* issue the command and wait on kat_completed */
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_KAT);
++
++ err = nisttrng_wait_on_kat_completed(state);
++ if (err)
++ goto ERR;
++
++ /* check for alarms */
++ err = nisttrng_get_alarms(state);
++ if (err)
++ goto ERR;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_kat */
++EXPORT_SYMBOL(nisttrng_kat);
++
++/*
++ * Performs a full KAT with all four combinations of the kat_sel and kat_vec
++ * > If any of the KAT fails, the API returns error.
++ */
++int nisttrng_full_kat(struct nist_trng_state *state)
++{
++ int err;
++
++ DEBUG(">> %s starts...\n", __func__);
++
++ /* SEL = 0, Vec = 0 */
++ err = nisttrng_kat(state, 0, 0);
++ if (err)
++ goto ERR;
++
++ /* SEL = 0, Vec = 1 */
++ err = nisttrng_kat(state, 0, 1);
++ if (err)
++ goto ERR;
++
++ /* SEL = 1, Vec = 0 */
++ err = nisttrng_kat(state, 1, 0);
++ if (err)
++ goto ERR;
++
++ /* SEL = 1, Vec = 1 */
++ err = nisttrng_kat(state, 1, 1);
++ if (err)
++ goto ERR;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_full_kat */
++EXPORT_SYMBOL(nisttrng_full_kat);
++
++/*
++ * max_bits_per_req reminder initialized by nisttrng_init can change using this API.
++ * > If this API is called when the DRBG is instantiated, an error will be returned.
++ * > If the requested maximum is more than the standard's limit (determinded by NIST_TRNG_DFLT_MAX_BITS_PER_REQ), the API will return an error.
++ */
++int nisttrng_set_reminder_max_bits_per_req(struct nist_trng_state *state,
++ unsigned long max_bits_per_req)
++{
++ int err;
++
++ DEBUG(">> %s: %lu\n", __func__, max_bits_per_req);
++
++ /* if the DRBG is instantiated, cannot change the value */
++ if (DRBG_INSTANTIATED(state->status.current_state)) {
++ DEBUG("cannot change the reminder value when DRBG is already instantiated\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* requested value cannot be more than NIST's limit */
++ if (max_bits_per_req > NIST_DFLT_MAX_BITS_PER_REQ) {
++ DEBUG("requested max_bits_per_req is more than standard's limit\n");
++ err = CRYPTO_INVALID_ARGUMENT;
++ goto ERR;
++ }
++
++ state->counters.max_bits_per_req = max_bits_per_req;
++ state->counters.bits_per_req_left = max_bits_per_req;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
++EXPORT_SYMBOL(nisttrng_set_reminder_max_bits_per_req);
++
++/*
++ * max_req_per_seed reminder initialized by nisttrng_init can change using this API.
++ * > If this API is called when the DRBG is instantiated, an error will be returned.
++ * > If the requested maximum is more than the standard's limit (determinded by NIST_TRNG_DFLT_MAX_REQ_PER_SEED), the API will return an error.
++ */
++int nisttrng_set_reminder_max_req_per_seed(struct nist_trng_state *state,
++ unsigned long long max_req_per_seed)
++{
++ int err;
++
++ DEBUG(">> %s: %llu\n", __func__, max_req_per_seed);
++
++ /* if the DRBG is instantiated, cannot change the value */
++ if (DRBG_INSTANTIATED(state->status.current_state)) {
++ DEBUG("cannot change the reminder value when DRBG is already instantiated\n");
++ err = CRYPTO_FAILED;
++ goto ERR;
++ }
++
++ /* requested value cannot be more than NIST's limit */
++ if (max_req_per_seed > NIST_DFLT_MAX_REQ_PER_SEED) {
++ DEBUG("requested max_req_per_seed is more than standard's limit\n");
++ err = CRYPTO_INVALID_ARGUMENT;
++ goto ERR;
++ }
++
++ state->counters.max_req_per_seed = max_req_per_seed;
++ state->counters.req_per_seed_left = max_req_per_seed;
++
++ err = CRYPTO_OK;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
++EXPORT_SYMBOL(nisttrng_set_reminder_max_req_per_seed);
++
++/*
++ * Zeroize command
++ * > A zeroize will not destroy the programmed mode and ALARM register value.
++ * It keeps the programmed mode to avoid re-programming.
++ * It also, maintains the ALARM register value, so that the user can read the value to understand the reason of the occurred alarm.
++ */
++int nisttrng_zeroize(struct nist_trng_state *state)
++{
++ int err;
++
++ DEBUG(">> %s: zeroize the core\n", __func__);
++
++ /* issue zeroize command */
++ pdu_io_write32(state->base + NIST_TRNG_REG_CTRL,
++ NIST_TRNG_REG_CTRL_CMD_ZEROIZE);
++
++ /* wait on zeroize done */
++ err = nisttrng_wait_on_zeroize(state);
++ if (err)
++ goto ERR;
++
++ /* reset the SW state */
++ nisttrng_reset_state(state);
++
++ err = CRYPTO_OK;
++ state->status.current_state = NIST_TRNG_STATE_UNINSTANTIATE;
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++} /* nisttrng_zeroize */
++EXPORT_SYMBOL(nisttrng_zeroize);
++
++int nisttrng_rnc(struct nist_trng_state *state, int rnc_ctrl_cmd)
++{
++ int err = 0;
++ u32 tmp;
++
++ DEBUG(">> %s cmd %d\n", __func__, rnc_ctrl_cmd);
++
++ if (rnc_ctrl_cmd > NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_FINISH_TO_IDLE) {
++ DEBUG(">> Invalid cmd %d\n", rnc_ctrl_cmd);
++ err = -1;
++ goto ERR;
++ }
++
++ if (!state->config.build_cfg0.edu_present) {
++ DEBUG(">> edu not present\n");
++ err = -1;
++ goto ERR;
++ }
++
++ pdu_io_write32(state->base + NIST_TRNG_EDU_RNC_CTRL, rnc_ctrl_cmd);
++ if (rnc_ctrl_cmd == NIST_TRNG_EDU_RNC_CTRL_CMD_RNC_ENABLE) {
++ // wait till rnc is enabled
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_STAT);
++ } while (!NIST_TRNG_EDU_STAT_RNC_ENABLED(tmp));
++
++ state->status.edu_vstat.rnc_enabled = 1;
++
++ } else {
++ // wait till rnc is idle (disabled)
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_STAT);
++ } while (NIST_TRNG_EDU_STAT_RNC_ENABLED(tmp));
++
++ state->status.edu_vstat.rnc_enabled = 0;
++ }
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
++EXPORT_SYMBOL(nisttrng_rnc);
++
++int nisttrng_wait_fifo_full(struct nist_trng_state *state)
++{
++ int err = 0;
++ u32 tmp, t;
++
++ t = NIST_TRNG_RETRY_MAX;
++
++ DEBUG(">> %s starts...\n", __func__);
++
++ do {
++ tmp = pdu_io_read32(state->base + NIST_TRNG_EDU_STAT);
++ } while ((!NIST_TRNG_EDU_STAT_FIFO_FULL(tmp)) && --t);
++
++ if (t) {
++ err = CRYPTO_OK;
++ } else {
++ DEBUG("wait_on_fifo_full: failed timeout: %08lx\n",
++ (unsigned long)tmp);
++ err = CRYPTO_TIMEOUT;
++ goto ERR;
++ }
++
++ERR:
++ DEBUG("--- Return %s, err = %i\n", __func__, err);
++ return err;
++}
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0019-Add-ADC-driver-for-ast2700.patch b/recipes-kernel/linux/files/0019-Add-ADC-driver-for-ast2700.patch
new file mode 100644
index 0000000..e98e6d9
--- /dev/null
+++ b/recipes-kernel/linux/files/0019-Add-ADC-driver-for-ast2700.patch
@@ -0,0 +1,502 @@
+From 5938c2f5c65733f9db58e4efd0a877a41680264e Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 15:18:51 +0800
+Subject: [PATCH] Add ADC driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/iio/adc/aspeed_adc.c | 302 +++++++++++++++++++++++++++++++----
+ 1 file changed, 271 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
+index 998e8bcc0..123bea86e 100644
+--- a/drivers/iio/adc/aspeed_adc.c
++++ b/drivers/iio/adc/aspeed_adc.c
+@@ -72,6 +72,8 @@
+ #define ASPEED_ADC_BAT_SENSING_ENABLE BIT(13)
+ #define ASPEED_ADC_CTRL_CHANNEL GENMASK(31, 16)
+ #define ASPEED_ADC_CTRL_CHANNEL_ENABLE(ch) FIELD_PREP(ASPEED_ADC_CTRL_CHANNEL, BIT(ch))
++#define ADC_MASK(n) ((n) < 16 ? ((1U << (n)) - 1) : 0xFFFF)
++#define ASPEED_ADC_CTRL_CHANNELS_ENABLE(chs) FIELD_PREP(ASPEED_ADC_CTRL_CHANNEL, ADC_MASK(chs))
+
+ #define ASPEED_ADC_INIT_POLLING_TIME 500
+ #define ASPEED_ADC_INIT_TIMEOUT 500000
+@@ -82,6 +84,8 @@
+ */
+ #define ASPEED_ADC_DEF_SAMPLING_RATE 65000
+
++static DEFINE_IDA(aspeed_adc_ida);
++
+ struct aspeed_adc_trim_locate {
+ const unsigned int offset;
+ const unsigned int field;
+@@ -95,6 +99,7 @@ struct aspeed_adc_model_data {
+ bool wait_init_sequence;
+ bool need_prescaler;
+ bool bat_sense_sup;
++ bool require_extra_eoc;
+ u8 scaler_bit_width;
+ unsigned int num_channels;
+ const struct aspeed_adc_trim_locate *trim_locate;
+@@ -107,6 +112,7 @@ struct adc_gain {
+
+ struct aspeed_adc_data {
+ struct device *dev;
++ int id;
+ const struct aspeed_adc_model_data *model_data;
+ struct regulator *regulator;
+ void __iomem *base;
+@@ -120,6 +126,26 @@ struct aspeed_adc_data {
+ int cv;
+ bool battery_sensing;
+ struct adc_gain battery_mode_gain;
++ unsigned int required_eoc_num;
++ u16 *upper_bound;
++ u16 *lower_bound;
++ bool *upper_en;
++ bool *lower_en;
++};
++
++static const struct iio_event_spec aspeed_adc_events[] = {
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_RISING,
++ .mask_separate =
++ BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
++ },
++ {
++ .type = IIO_EV_TYPE_THRESH,
++ .dir = IIO_EV_DIR_FALLING,
++ .mask_separate =
++ BIT(IIO_EV_INFO_VALUE) | BIT(IIO_EV_INFO_ENABLE),
++ },
+ };
+
+ #define ASPEED_CHAN(_idx, _data_reg_addr) { \
+@@ -131,6 +157,8 @@ struct aspeed_adc_data {
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
++ .event_spec = aspeed_adc_events, \
++ .num_event_specs = ARRAY_SIZE(aspeed_adc_events), \
+ }
+
+ static const struct iio_chan_spec aspeed_adc_iio_channels[] = {
+@@ -175,18 +203,11 @@ static const struct iio_chan_spec aspeed_adc_iio_bat_channels[] = {
+
+ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev)
+ {
+- struct device_node *syscon;
+ struct regmap *scu;
+ u32 scu_otp, trimming_val;
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+
+- syscon = of_find_node_by_name(NULL, "syscon");
+- if (syscon == NULL) {
+- dev_warn(data->dev, "Couldn't find syscon node\n");
+- return -EOPNOTSUPP;
+- }
+- scu = syscon_node_to_regmap(syscon);
+- of_node_put(syscon);
++ scu = syscon_regmap_lookup_by_phandle(data->dev->of_node, "aspeed,scu");
+ if (IS_ERR(scu)) {
+ dev_warn(data->dev, "Failed to get syscon regmap\n");
+ return -EOPNOTSUPP;
+@@ -277,36 +298,68 @@ static int aspeed_adc_set_sampling_rate(struct iio_dev *indio_dev, u32 rate)
+ return 0;
+ }
+
++static int aspeed_adc_get_voltage_raw(struct aspeed_adc_data *data, struct iio_chan_spec const *chan)
++{
++ int val;
++
++ val = readw(data->base + chan->address);
++ dev_dbg(data->dev,
++ "%d upper_bound: %d %x, lower_bound: %d %x, delay: %d * %d ns",
++ chan->channel, data->upper_en[chan->channel],
++ data->upper_bound[chan->channel], data->lower_en[chan->channel],
++ data->lower_bound[chan->channel], data->sample_period_ns,
++ data->required_eoc_num);
++ if (data->upper_en[chan->channel]) {
++ if (val >= data->upper_bound[chan->channel]) {
++ ndelay(data->sample_period_ns *
++ data->required_eoc_num);
++ val = readw(data->base + chan->address);
++ }
++ }
++ if (data->lower_en[chan->channel]) {
++ if (val <= data->lower_bound[chan->channel]) {
++ ndelay(data->sample_period_ns *
++ data->required_eoc_num);
++ val = readw(data->base + chan->address);
++ }
++ }
++ return val;
++}
++
+ static int aspeed_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+ {
+ struct aspeed_adc_data *data = iio_priv(indio_dev);
+- u32 adc_engine_control_reg_val;
++ u32 engine_ctrl_tmp_val, reg_val;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+- if (data->battery_sensing && chan->channel == 7) {
+- adc_engine_control_reg_val =
+- readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+- writel(adc_engine_control_reg_val |
+- FIELD_PREP(ASPEED_ADC_CH7_MODE,
+- ASPEED_ADC_CH7_BAT) |
+- ASPEED_ADC_BAT_SENSING_ENABLE,
+- data->base + ASPEED_REG_ENGINE_CONTROL);
++ if (data->model_data->bat_sense_sup &&
++ chan->channel == data->model_data->num_channels - 1) {
++ engine_ctrl_tmp_val = readl(data->base + ASPEED_REG_ENGINE_CONTROL);
++ reg_val = engine_ctrl_tmp_val &
++ ~ASPEED_ADC_CTRL_CHANNELS_ENABLE(data->model_data->num_channels);
++ reg_val |= ASPEED_ADC_CTRL_CHANNEL_ENABLE(chan->channel);
++ if (data->battery_sensing)
++ reg_val |= FIELD_PREP(ASPEED_ADC_CH7_MODE, ASPEED_ADC_CH7_BAT) |
++ ASPEED_ADC_BAT_SENSING_ENABLE;
++ writel(reg_val, data->base + ASPEED_REG_ENGINE_CONTROL);
+ /*
+ * After enable battery sensing mode need to wait some time for adc stable
+ * Experiment result is 1ms.
+ */
+ mdelay(1);
+- *val = readw(data->base + chan->address);
+- *val = (*val * data->battery_mode_gain.mult) /
+- data->battery_mode_gain.div;
++ *val = aspeed_adc_get_voltage_raw(data, chan);
++ if (data->battery_sensing)
++ *val = (*val * data->battery_mode_gain.mult) /
++ data->battery_mode_gain.div;
+ /* Restore control register value */
+- writel(adc_engine_control_reg_val,
++ writel(engine_ctrl_tmp_val,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+- } else
+- *val = readw(data->base + chan->address);
++ } else {
++ *val = aspeed_adc_get_voltage_raw(data, chan);
++ }
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OFFSET:
+@@ -369,9 +422,106 @@ static int aspeed_adc_reg_access(struct iio_dev *indio_dev,
+ return 0;
+ }
+
++static int aspeed_adc_read_event_config(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir)
++{
++ struct aspeed_adc_data *data = iio_priv(indio_dev);
++
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ return data->upper_en[chan->channel];
++ case IIO_EV_DIR_FALLING:
++ return data->lower_en[chan->channel];
++ default:
++ return -EINVAL;
++ }
++}
++
++static int aspeed_adc_write_event_config(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ int state)
++{
++ struct aspeed_adc_data *data = iio_priv(indio_dev);
++
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ data->upper_en[chan->channel] = state ? 1 : 0;
++ break;
++ case IIO_EV_DIR_FALLING:
++ data->lower_en[chan->channel] = state ? 1 : 0;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int aspeed_adc_write_event_value(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ enum iio_event_info info, int val,
++ int val2)
++{
++ struct aspeed_adc_data *data = iio_priv(indio_dev);
++
++ if (info != IIO_EV_INFO_VALUE)
++ return -EINVAL;
++
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ if (val >= BIT(ASPEED_RESOLUTION_BITS))
++ return -EINVAL;
++ data->upper_bound[chan->channel] = val;
++ break;
++ case IIO_EV_DIR_FALLING:
++ data->lower_bound[chan->channel] = val;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int aspeed_adc_read_event_value(struct iio_dev *indio_dev,
++ const struct iio_chan_spec *chan,
++ enum iio_event_type type,
++ enum iio_event_direction dir,
++ enum iio_event_info info, int *val,
++ int *val2)
++{
++ struct aspeed_adc_data *data = iio_priv(indio_dev);
++
++ if (info != IIO_EV_INFO_VALUE)
++ return -EINVAL;
++
++ switch (dir) {
++ case IIO_EV_DIR_RISING:
++ *val = data->upper_bound[chan->channel];
++ break;
++ case IIO_EV_DIR_FALLING:
++ *val = data->lower_bound[chan->channel];
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return IIO_VAL_INT;
++}
++
+ static const struct iio_info aspeed_adc_iio_info = {
+ .read_raw = aspeed_adc_read_raw,
+ .write_raw = aspeed_adc_write_raw,
++ .read_event_config = &aspeed_adc_read_event_config,
++ .write_event_config = &aspeed_adc_write_event_config,
++ .read_event_value = &aspeed_adc_read_event_value,
++ .write_event_value = &aspeed_adc_write_event_value,
+ .debugfs_reg_access = aspeed_adc_reg_access,
+ };
+
+@@ -382,6 +532,13 @@ static void aspeed_adc_unregister_fixed_divider(void *data)
+ clk_hw_unregister_fixed_factor(clk);
+ }
+
++static void aspeed_adc_ida_remove(void *data)
++{
++ struct aspeed_adc_data *priv_data = data;
++
++ ida_simple_remove(&aspeed_adc_ida, priv_data->id);
++}
++
+ static void aspeed_adc_reset_assert(void *data)
+ {
+ struct reset_control *rst = data;
+@@ -488,6 +645,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
+ u32 adc_engine_control_reg_val;
+ unsigned long scaler_flags = 0;
+ char clk_name[32], clk_parent_name[32];
++ const char *model_name;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*data));
+ if (!indio_dev)
+@@ -502,12 +660,44 @@ static int aspeed_adc_probe(struct platform_device *pdev)
+ if (IS_ERR(data->base))
+ return PTR_ERR(data->base);
+
++ data->upper_bound = devm_kzalloc(&pdev->dev,
++ sizeof(data->upper_bound) *
++ data->model_data->num_channels,
++ GFP_KERNEL);
++ if (!data->upper_bound)
++ return -ENOMEM;
++ data->upper_en = devm_kzalloc(&pdev->dev,
++ sizeof(data->upper_en) *
++ data->model_data->num_channels,
++ GFP_KERNEL);
++ if (!data->upper_en)
++ return -ENOMEM;
++ data->lower_bound = devm_kzalloc(&pdev->dev,
++ sizeof(data->lower_bound) *
++ data->model_data->num_channels,
++ GFP_KERNEL);
++ if (!data->lower_bound)
++ return -ENOMEM;
++ data->lower_en = devm_kzalloc(&pdev->dev,
++ sizeof(data->lower_en) *
++ data->model_data->num_channels,
++ GFP_KERNEL);
++ if (!data->lower_en)
++ return -ENOMEM;
++ data->id = ida_simple_get(&aspeed_adc_ida, 0, 0, GFP_KERNEL);
++ if (data->id < 0)
++ return data->id;
++ ret = devm_add_action_or_reset(data->dev, aspeed_adc_ida_remove, data);
++ if (ret)
++ return ret;
++ model_name = kasprintf(GFP_KERNEL, "%s-%d",
++ data->model_data->model_name, data->id);
+ /* Register ADC clock prescaler with source specified by device tree. */
+ spin_lock_init(&data->clk_lock);
+ snprintf(clk_parent_name, ARRAY_SIZE(clk_parent_name), "%s",
+ of_clk_get_parent_name(pdev->dev.of_node, 0));
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-fixed-div",
+- data->model_data->model_name);
++ model_name);
+ data->fixed_div_clk = clk_hw_register_fixed_factor(
+ &pdev->dev, clk_name, clk_parent_name, 0, 1, 2);
+ if (IS_ERR(data->fixed_div_clk))
+@@ -522,7 +712,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
+
+ if (data->model_data->need_prescaler) {
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-prescaler",
+- data->model_data->model_name);
++ model_name);
+ data->clk_prescaler = devm_clk_hw_register_divider(
+ &pdev->dev, clk_name, clk_parent_name, 0,
+ data->base + ASPEED_REG_CLOCK_CONTROL, 17, 15, 0,
+@@ -538,7 +728,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
+ * setting to adjust the prescaler as well.
+ */
+ snprintf(clk_name, ARRAY_SIZE(clk_name), "%s-scaler",
+- data->model_data->model_name);
++ model_name);
+ data->clk_scaler = devm_clk_hw_register_divider(
+ &pdev->dev, clk_name, clk_parent_name, scaler_flags,
+ data->base + ASPEED_REG_CLOCK_CONTROL, 0,
+@@ -627,13 +817,25 @@ static int aspeed_adc_probe(struct platform_device *pdev)
+
+ aspeed_adc_compensation(indio_dev);
+ /* Start all channels in normal mode. */
+- adc_engine_control_reg_val =
+- readl(data->base + ASPEED_REG_ENGINE_CONTROL);
+- adc_engine_control_reg_val |= ASPEED_ADC_CTRL_CHANNEL;
++ adc_engine_control_reg_val = readl(data->base + ASPEED_REG_ENGINE_CONTROL);
++ /* Disable the last channel when the controller supports battery sensing */
++ if (data->model_data->bat_sense_sup)
++ adc_engine_control_reg_val |=
++ ASPEED_ADC_CTRL_CHANNELS_ENABLE(data->model_data->num_channels - 1);
++ else
++ adc_engine_control_reg_val |=
++ ASPEED_ADC_CTRL_CHANNELS_ENABLE(data->model_data->num_channels);
+ writel(adc_engine_control_reg_val,
+ data->base + ASPEED_REG_ENGINE_CONTROL);
+-
+- indio_dev->name = data->model_data->model_name;
++ adc_engine_control_reg_val =
++ FIELD_GET(ASPEED_ADC_CTRL_CHANNEL,
++ readl(data->base + ASPEED_REG_ENGINE_CONTROL));
++ data->required_eoc_num = hweight_long(adc_engine_control_reg_val);
++ if (data->model_data->require_extra_eoc &&
++ (adc_engine_control_reg_val &
++ BIT(data->model_data->num_channels - 1)))
++ data->required_eoc_num += 12;
++ indio_dev->name = model_name;
+ indio_dev->info = &aspeed_adc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = data->battery_sensing ?
+@@ -660,6 +862,16 @@ static const struct aspeed_adc_trim_locate ast2600_adc1_trim = {
+ .field = GENMASK(7, 4),
+ };
+
++static const struct aspeed_adc_trim_locate ast2700_adc0_trim = {
++ .offset = 0x828,
++ .field = GENMASK(3, 0),
++};
++
++static const struct aspeed_adc_trim_locate ast2700_adc1_trim = {
++ .offset = 0x828,
++ .field = GENMASK(7, 4),
++};
++
+ static const struct aspeed_adc_model_data ast2400_model_data = {
+ .model_name = "ast2400-adc",
+ .vref_fixed_mv = 2500,
+@@ -668,6 +880,7 @@ static const struct aspeed_adc_model_data ast2400_model_data = {
+ .need_prescaler = true,
+ .scaler_bit_width = 10,
+ .num_channels = 16,
++ .require_extra_eoc = 0,
+ };
+
+ static const struct aspeed_adc_model_data ast2500_model_data = {
+@@ -680,6 +893,7 @@ static const struct aspeed_adc_model_data ast2500_model_data = {
+ .scaler_bit_width = 10,
+ .num_channels = 16,
+ .trim_locate = &ast2500_adc_trim,
++ .require_extra_eoc = 0,
+ };
+
+ static const struct aspeed_adc_model_data ast2600_adc0_model_data = {
+@@ -691,6 +905,7 @@ static const struct aspeed_adc_model_data ast2600_adc0_model_data = {
+ .scaler_bit_width = 16,
+ .num_channels = 8,
+ .trim_locate = &ast2600_adc0_trim,
++ .require_extra_eoc = 1,
+ };
+
+ static const struct aspeed_adc_model_data ast2600_adc1_model_data = {
+@@ -702,6 +917,29 @@ static const struct aspeed_adc_model_data ast2600_adc1_model_data = {
+ .scaler_bit_width = 16,
+ .num_channels = 8,
+ .trim_locate = &ast2600_adc1_trim,
++ .require_extra_eoc = 1,
++};
++
++static const struct aspeed_adc_model_data ast2700_adc0_model_data = {
++ .model_name = "ast2700-adc0",
++ .min_sampling_rate = 10000,
++ .max_sampling_rate = 500000,
++ .wait_init_sequence = true,
++ .bat_sense_sup = true,
++ .scaler_bit_width = 16,
++ .num_channels = 8,
++ .trim_locate = &ast2700_adc0_trim,
++};
++
++static const struct aspeed_adc_model_data ast2700_adc1_model_data = {
++ .model_name = "ast2700-adc1",
++ .min_sampling_rate = 10000,
++ .max_sampling_rate = 500000,
++ .wait_init_sequence = true,
++ .bat_sense_sup = true,
++ .scaler_bit_width = 16,
++ .num_channels = 8,
++ .trim_locate = &ast2700_adc1_trim,
+ };
+
+ static const struct of_device_id aspeed_adc_matches[] = {
+@@ -709,6 +947,8 @@ static const struct of_device_id aspeed_adc_matches[] = {
+ { .compatible = "aspeed,ast2500-adc", .data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-adc0", .data = &ast2600_adc0_model_data },
+ { .compatible = "aspeed,ast2600-adc1", .data = &ast2600_adc1_model_data },
++ { .compatible = "aspeed,ast2700-adc0", .data = &ast2700_adc0_model_data },
++ { .compatible = "aspeed,ast2700-adc1", .data = &ast2700_adc1_model_data },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, aspeed_adc_matches);
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0020-Add-RTC-driver-for-ast2700.patch b/recipes-kernel/linux/files/0020-Add-RTC-driver-for-ast2700.patch
new file mode 100644
index 0000000..2070f9f
--- /dev/null
+++ b/recipes-kernel/linux/files/0020-Add-RTC-driver-for-ast2700.patch
@@ -0,0 +1,325 @@
+From f44045ddde312ce34ad8df6c28c12609e8e95aa7 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 15:23:47 +0800
+Subject: [PATCH] Add RTC driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/rtc/rtc-aspeed.c | 237 +++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 230 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
+index 880b015ee..2196aaf47 100644
+--- a/drivers/rtc/rtc-aspeed.c
++++ b/drivers/rtc/rtc-aspeed.c
+@@ -10,14 +10,64 @@
+ struct aspeed_rtc {
+ struct rtc_device *rtc_dev;
+ void __iomem *base;
++ spinlock_t irq_lock; /* interrupt enable register lock */
++ struct mutex write_mutex; /* serialize registers write */
+ };
+
+ #define RTC_TIME 0x00
+ #define RTC_YEAR 0x04
++#define RTC_ALARM 0x08
+ #define RTC_CTRL 0x10
++#define RTC_ALARM_STATUS 0x14
+
+-#define RTC_UNLOCK BIT(1)
+-#define RTC_ENABLE BIT(0)
++#define RTC_ENABLE BIT(0)
++#define RTC_UNLOCK BIT(1)
++#define RTC_ALARM_MODE BIT(2)
++#define RTC_ALARM_SEC_ENABLE BIT(3)
++#define RTC_ALARM_MIN_ENABLE BIT(4)
++#define RTC_ALARM_HOUR_ENABLE BIT(5)
++#define RTC_ALARM_MDAY_ENABLE BIT(6)
++
++#define RTC_ALARM_SEC_CB_STATUS BIT(0)
++#define RTC_ALARM_MIN_STATUS BIT(1)
++#define RTC_ALARM_HOUR_STATUS BIT(2)
++#define RTC_ALARM_MDAY_STATUS BIT(3)
++
++/*
++ * enable a rtc interrupt
++ */
++static void aspeed_rtc_int_enable(struct aspeed_rtc *rtc, u32 intr)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc->irq_lock, flags);
++ writel(readl(rtc->base + RTC_CTRL) | intr, rtc->base + RTC_CTRL);
++ spin_unlock_irqrestore(&rtc->irq_lock, flags);
++}
++
++/*
++ * disable a rtc interrupt
++ */
++static void aspeed_rtc_int_disable(struct aspeed_rtc *rtc, u32 intr)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc->irq_lock, flags);
++ writel(readl(rtc->base + RTC_CTRL) & ~intr, rtc->base + RTC_CTRL);
++ spin_unlock_irqrestore(&rtc->irq_lock, flags);
++}
++
++/*
++ * clean a rtc interrupt status
++ */
++static void aspeed_rtc_clean_alarm(struct aspeed_rtc *rtc)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc->irq_lock, flags);
++ writel(readl(rtc->base + RTC_ALARM_STATUS), rtc->base + RTC_ALARM_STATUS);
++ spin_unlock_irqrestore(&rtc->irq_lock, flags);
++}
+
+ static int aspeed_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ {
+@@ -45,7 +95,7 @@ static int aspeed_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ tm->tm_mon = ((reg2 >> 0) & 0x0f) - 1;
+ tm->tm_year = year + (cent * 100) - 1900;
+
+- dev_dbg(dev, "%s %ptR", __func__, tm);
++ dev_dbg(dev, "%s %ptR\n", __func__, tm);
+
+ return 0;
+ }
+@@ -56,6 +106,8 @@ static int aspeed_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ u32 reg1, reg2, ctrl;
+ int year, cent;
+
++ dev_dbg(dev, "%s %ptR\n", __func__, tm);
++
+ cent = (tm->tm_year + 1900) / 100;
+ year = tm->tm_year % 100;
+
+@@ -77,40 +129,211 @@ static int aspeed_rtc_set_time(struct device *dev, struct rtc_time *tm)
+ return 0;
+ }
+
++static int aspeed_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
++{
++ struct aspeed_rtc *rtc = dev_get_drvdata(dev);
++ unsigned int alarm_enable;
++
++ dev_dbg(dev, "%s, enabled:%x\n", __func__, enabled);
++
++ alarm_enable = RTC_ALARM_MODE | RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE |
++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE;
++ if (enabled)
++ aspeed_rtc_int_enable(rtc, alarm_enable);
++ else
++ aspeed_rtc_int_disable(rtc, alarm_enable);
++
++ return 0;
++}
++
++static int aspeed_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
++{
++ struct aspeed_rtc *rtc = dev_get_drvdata(dev);
++ u32 reg1, reg2;
++ unsigned int alarm_enable;
++ unsigned int alarm_status;
++
++ if (!(readl(rtc->base + RTC_CTRL) & RTC_ENABLE)) {
++ dev_dbg(dev, "%s failing as rtc disabled\n", __func__);
++ return -EINVAL;
++ }
++
++ do {
++ reg2 = readl(rtc->base + RTC_YEAR);
++ reg1 = readl(rtc->base + RTC_TIME);
++ } while (reg1 != readl(rtc->base + RTC_TIME));
++
++ /* read alarm value */
++ alarm->time.tm_mday = (reg1 >> 24) & 0x1f;
++ alarm->time.tm_hour = (reg1 >> 16) & 0x1f;
++ alarm->time.tm_min = (reg1 >> 8) & 0x3f;
++ alarm->time.tm_sec = (reg1 >> 0) & 0x3f;
++
++ dev_dbg(dev, "%s %ptR\n", __func__, &alarm->time);
++
++ alarm_enable = RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE |
++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE;
++ alarm_status = RTC_ALARM_SEC_CB_STATUS | RTC_ALARM_MIN_STATUS |
++ RTC_ALARM_HOUR_STATUS | RTC_ALARM_MDAY_STATUS;
++
++ /* don't allow the ALARM read to mess up ALARM_STATUS */
++ mutex_lock(&rtc->write_mutex);
++
++ /* alarm is enabled if the interrupt is enabled */
++ if (readl(rtc->base + RTC_CTRL) & alarm_enable)
++ alarm->enabled = true;
++ else
++ alarm->enabled = false;
++
++ /* alarm interrupt asserted or not */
++ if (readl(rtc->base + RTC_ALARM_STATUS) & alarm_status)
++ alarm->pending = true;
++ else
++ alarm->pending = false;
++
++ mutex_unlock(&rtc->write_mutex);
++
++ return 0;
++}
++
++static int aspeed_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
++{
++ struct aspeed_rtc *rtc = dev_get_drvdata(dev);
++ unsigned int alarm_enable;
++ u32 reg;
++
++ if (!(readl(rtc->base + RTC_CTRL) & RTC_ENABLE)) {
++ dev_dbg(dev, "%s failing as rtc disabled\n", __func__);
++ return -EINVAL;
++ }
++
++ dev_dbg(dev, "%s %ptR\n", __func__, &alarm->time);
++
++ alarm_enable = RTC_ALARM_MODE | RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE |
++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE;
++
++ /* don't allow the ALARM read to mess up ALARM_STATUS */
++ mutex_lock(&rtc->write_mutex);
++
++ /* write the new alarm time */
++ reg = (alarm->time.tm_mday << 24) | (alarm->time.tm_hour << 16) |
++ (alarm->time.tm_min << 8) | alarm->time.tm_sec;
++ writel(reg, rtc->base + RTC_ALARM);
++
++ /* alarm is enabled if the interrupt is enabled */
++ if (alarm->enabled)
++ aspeed_rtc_int_enable(rtc, alarm_enable);
++ else
++ aspeed_rtc_int_disable(rtc, alarm_enable);
++
++ mutex_unlock(&rtc->write_mutex);
++
++ return 0;
++}
++
+ static const struct rtc_class_ops aspeed_rtc_ops = {
+- .read_time = aspeed_rtc_read_time,
+- .set_time = aspeed_rtc_set_time,
++ .read_time = aspeed_rtc_read_time,
++ .set_time = aspeed_rtc_set_time,
++ .alarm_irq_enable = aspeed_rtc_alarm_irq_enable,
++ .read_alarm = aspeed_rtc_read_alarm,
++ .set_alarm = aspeed_rtc_set_alarm,
+ };
+
++static irqreturn_t aspeed_rtc_irq(int irq, void *dev_id)
++{
++ struct aspeed_rtc *rtc = dev_id;
++ unsigned int alarm_enable;
++
++ alarm_enable = RTC_ALARM_MODE | RTC_ALARM_SEC_ENABLE | RTC_ALARM_MIN_ENABLE |
++ RTC_ALARM_HOUR_ENABLE | RTC_ALARM_MDAY_ENABLE;
++ aspeed_rtc_int_disable(rtc, alarm_enable);
++ aspeed_rtc_clean_alarm(rtc);
++
++ return IRQ_HANDLED;
++}
++
+ static int aspeed_rtc_probe(struct platform_device *pdev)
+ {
+ struct aspeed_rtc *rtc;
++ unsigned int irq;
++ int rc;
++ u32 ctrl;
+
+ rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
+ if (!rtc)
+ return -ENOMEM;
+
+ rtc->base = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(rtc->base))
++ if (IS_ERR(rtc->base)) {
++ dev_err(&pdev->dev, "cannot ioremap resource for rtc\n");
+ return PTR_ERR(rtc->base);
++ }
+
+ rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
++ spin_lock_init(&rtc->irq_lock);
++ mutex_init(&rtc->write_mutex);
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return irq;
++
++ rc = devm_request_irq(&pdev->dev, irq, aspeed_rtc_irq,
++ 0, pdev->name, rtc);
++ if (rc) {
++ dev_err(&pdev->dev, "interrupt number %d is not available.\n", irq);
++ goto err;
++ }
++
+ platform_set_drvdata(pdev, rtc);
+
++ device_init_wakeup(&pdev->dev, true);
++
+ rtc->rtc_dev->ops = &aspeed_rtc_ops;
+ rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
+
+- return devm_rtc_register_device(rtc->rtc_dev);
++ /*
++ * In devm_rtc_register_device,
++ * rtc_hctosys read time from RTC to check hardware status.
++ * In rtc_read_time, run aspeed_rtc_read_time and check the rtc_time.
++ * As a result, need to enable and initialize RTC time.
++ *
++ * Enable and unlock RTC to initialize RTC time to 1970-01-01T01:01:01
++ * and re-lock and ensure enable is set now that a time is programmed.
++ */
++ ctrl = readl(rtc->base + RTC_CTRL);
++ writel(ctrl | RTC_UNLOCK, rtc->base + RTC_CTRL);
++
++ /*
++ * Initial value set to year:70,mon:0,mday:1,hour:1,min:1,sec:1
++ * rtc_valid_tm check whether in suitable range or not.
++ */
++ writel(0x01010101, rtc->base + RTC_TIME);
++ writel(0x00134601, rtc->base + RTC_YEAR);
++
++ /* Re-lock and ensure enable is set now that a time is programmed */
++ writel(ctrl | RTC_ENABLE, rtc->base + RTC_CTRL);
++
++ rc = devm_rtc_register_device(rtc->rtc_dev);
++ if (rc) {
++ dev_err(&pdev->dev, "can't register rtc device\n");
++ goto err;
++ }
++
++ return 0;
++
++err:
++ return rc;
+ }
+
+ static const struct of_device_id aspeed_rtc_match[] = {
+ { .compatible = "aspeed,ast2400-rtc", },
+ { .compatible = "aspeed,ast2500-rtc", },
+ { .compatible = "aspeed,ast2600-rtc", },
++ { .compatible = "aspeed,ast2700-rtc", },
+ {}
+ };
+ MODULE_DEVICE_TABLE(of, aspeed_rtc_match);
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0021-Add-USB-uhci-ehci-driver-for-ast2700.patch b/recipes-kernel/linux/files/0021-Add-USB-uhci-ehci-driver-for-ast2700.patch
new file mode 100644
index 0000000..ae43919
--- /dev/null
+++ b/recipes-kernel/linux/files/0021-Add-USB-uhci-ehci-driver-for-ast2700.patch
@@ -0,0 +1,124 @@
+From c38bfd27ed60d2aae4b62039d50eb8973deba725 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 16:03:31 +0800
+Subject: [PATCH] Add USB uhci ehci driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/usb/host/ehci-platform.c | 9 ++++++++-
+ drivers/usb/host/uhci-hcd.h | 3 ++-
+ drivers/usb/host/uhci-platform.c | 15 +++++++++++++--
+ 3 files changed, 23 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
+index 98b073185..a45f711fb 100644
+--- a/drivers/usb/host/ehci-platform.c
++++ b/drivers/usb/host/ehci-platform.c
+@@ -126,6 +126,7 @@ static struct usb_ehci_pdata ehci_platform_defaults = {
+ .power_on = ehci_platform_power_on,
+ .power_suspend = ehci_platform_power_off,
+ .power_off = ehci_platform_power_off,
++ .dma_mask_64 = 1,
+ };
+
+ /**
+@@ -297,7 +298,9 @@ static int ehci_platform_probe(struct platform_device *dev)
+ if (of_device_is_compatible(dev->dev.of_node,
+ "aspeed,ast2500-ehci") ||
+ of_device_is_compatible(dev->dev.of_node,
+- "aspeed,ast2600-ehci"))
++ "aspeed,ast2600-ehci") ||
++ of_device_is_compatible(dev->dev.of_node,
++ "aspeed,ast2700-ehci"))
+ ehci->is_aspeed = 1;
+
+ if (soc_device_match(quirk_poll_match))
+@@ -373,6 +376,10 @@ static int ehci_platform_probe(struct platform_device *dev)
+ if (err)
+ goto err_power;
+
++ /* Set 512 bytes for transmit FIFO threshold */
++ if (ehci->is_aspeed)
++ writel(((readl(hcd->regs + 0x84) & ~0xC0) | 0x80), hcd->regs + 0x84);
++
+ device_wakeup_enable(hcd->self.controller);
+ device_enable_async_suspend(hcd->self.controller);
+ platform_set_drvdata(dev, hcd);
+diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h
+index 13ee2a614..8b5057dc0 100644
+--- a/drivers/usb/host/uhci-hcd.h
++++ b/drivers/usb/host/uhci-hcd.h
+@@ -354,7 +354,7 @@ struct uhci_td {
+ * To prevent "bouncing" in the presence of electrical noise,
+ * when there are no devices attached we delay for 1 second in the
+ * RUNNING_NODEVS state before switching to the AUTO_STOPPED state.
+- *
++ *
+ * (Note that the AUTO_STOPPED state won't be necessary once the hub
+ * driver learns to autosuspend.)
+ */
+@@ -445,6 +445,7 @@ struct uhci_hcd {
+ short load[MAX_PHASE]; /* Periodic allocations */
+
+ struct clk *clk; /* (optional) clock source */
++ struct reset_control *rsts; /* (optional) clock reset */
+
+ /* Reset host controller */
+ void (*reset_hc) (struct uhci_hcd *uhci);
+diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
+index 3dec5dd3a..94d404276 100644
+--- a/drivers/usb/host/uhci-platform.c
++++ b/drivers/usb/host/uhci-platform.c
+@@ -11,6 +11,7 @@
+ #include <linux/of.h>
+ #include <linux/device.h>
+ #include <linux/platform_device.h>
++#include <linux/reset.h>
+
+ static int uhci_platform_init(struct usb_hcd *hcd)
+ {
+@@ -80,7 +81,7 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+- ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret)
+ return ret;
+
+@@ -113,7 +114,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
+ }
+ if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
+ of_device_is_compatible(np, "aspeed,ast2500-uhci") ||
+- of_device_is_compatible(np, "aspeed,ast2600-uhci")) {
++ of_device_is_compatible(np, "aspeed,ast2600-uhci") ||
++ of_device_is_compatible(np, "aspeed,ast2700-uhci")) {
+ uhci->is_aspeed = 1;
+ dev_info(&pdev->dev,
+ "Enabled Aspeed implementation workarounds\n");
+@@ -132,6 +134,15 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
+ goto err_rmr;
+ }
+
++ uhci->rsts = devm_reset_control_array_get_optional_shared(&pdev->dev);
++ if (IS_ERR(uhci->rsts)) {
++ ret = PTR_ERR(uhci->rsts);
++ goto err_clk;
++ }
++ ret = reset_control_deassert(uhci->rsts);
++ if (ret)
++ goto err_clk;
++
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0022-Add-crypto-driver-for-ast2700.patch b/recipes-kernel/linux/files/0022-Add-crypto-driver-for-ast2700.patch
new file mode 100644
index 0000000..7ed7828
--- /dev/null
+++ b/recipes-kernel/linux/files/0022-Add-crypto-driver-for-ast2700.patch
@@ -0,0 +1,4180 @@
+From 6237e8593bbed6ba5712103f44c856696204018a Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 16:10:14 +0800
+Subject: [PATCH] Add crypto driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/crypto/aspeed/Kconfig | 57 +-
+ drivers/crypto/aspeed/Makefile | 9 +-
+ drivers/crypto/aspeed/aspeed-ecdsa.c | 756 ++++++++++++++++++
+ drivers/crypto/aspeed/aspeed-ecdsa.h | 116 +++
+ drivers/crypto/aspeed/aspeed-hace-crypto.c | 182 ++++-
+ drivers/crypto/aspeed/aspeed-hace-hash.c | 260 +++---
+ drivers/crypto/aspeed/aspeed-hace.c | 176 ++---
+ drivers/crypto/aspeed/aspeed-hace.h | 39 +-
+ drivers/crypto/aspeed/aspeed-rsss-hash.c | 877 +++++++++++++++++++++
+ drivers/crypto/aspeed/aspeed-rsss-rsa.c | 608 ++++++++++++++
+ drivers/crypto/aspeed/aspeed-rsss.c | 190 +++++
+ drivers/crypto/aspeed/aspeed-rsss.h | 270 +++++++
+ 12 files changed, 3270 insertions(+), 270 deletions(-)
+ create mode 100644 drivers/crypto/aspeed/aspeed-ecdsa.c
+ create mode 100644 drivers/crypto/aspeed/aspeed-ecdsa.h
+ create mode 100644 drivers/crypto/aspeed/aspeed-rsss-hash.c
+ create mode 100644 drivers/crypto/aspeed/aspeed-rsss-rsa.c
+ create mode 100644 drivers/crypto/aspeed/aspeed-rsss.c
+ create mode 100644 drivers/crypto/aspeed/aspeed-rsss.h
+
+diff --git a/drivers/crypto/aspeed/Kconfig b/drivers/crypto/aspeed/Kconfig
+index db6c5b4cd..8c0a17742 100644
+--- a/drivers/crypto/aspeed/Kconfig
++++ b/drivers/crypto/aspeed/Kconfig
+@@ -1,26 +1,32 @@
+ config CRYPTO_DEV_ASPEED
+- tristate "Support for Aspeed cryptographic engine driver"
++ bool "Support for Aspeed cryptographic engine driver"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select CRYPTO_ENGINE
+ help
+- Hash and Crypto Engine (HACE) is designed to accelerate the
+- throughput of hash data digest, encryption and decryption.
++ Say Y here to get to see options for Aspeed hardware crypto devices
+
+- Select y here to have support for the cryptographic driver
+- available on Aspeed SoC.
++if CRYPTO_DEV_ASPEED
+
+ config CRYPTO_DEV_ASPEED_DEBUG
+ bool "Enable Aspeed crypto debug messages"
+- depends on CRYPTO_DEV_ASPEED
+ help
+ Print Aspeed crypto debugging messages if you use this
+ option to ask for those messages.
+ Avoid enabling this option for production build to
+ minimize driver timing.
+
++config CRYPTO_DEV_ASPEED_HACE
++ tristate "Enable Aspeed Hash & Crypto Engine (HACE) Engine"
++ help
++ Hash and Crypto Engine (HACE) is designed to accelerate the
++ throughput of hash data digest, encryption and decryption.
++
++ Select y here to have support for the cryptographic driver
++ available on Aspeed SoC.
++
+ config CRYPTO_DEV_ASPEED_HACE_HASH
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) hash"
+- depends on CRYPTO_DEV_ASPEED
++ depends on CRYPTO_DEV_ASPEED_HACE
+ select CRYPTO_SHA1
+ select CRYPTO_SHA256
+ select CRYPTO_SHA512
+@@ -33,7 +39,7 @@ config CRYPTO_DEV_ASPEED_HACE_HASH
+
+ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ bool "Enable Aspeed Hash & Crypto Engine (HACE) crypto"
+- depends on CRYPTO_DEV_ASPEED
++ depends on CRYPTO_DEV_ASPEED_HACE
+ select CRYPTO_AES
+ select CRYPTO_DES
+ select CRYPTO_ECB
+@@ -48,12 +54,41 @@ config CRYPTO_DEV_ASPEED_HACE_CRYPTO
+ with ECB/CBC/CFB/OFB/CTR options.
+
+ config CRYPTO_DEV_ASPEED_ACRY
+- bool "Enable Aspeed ACRY RSA Engine"
+- depends on CRYPTO_DEV_ASPEED
+- select CRYPTO_ENGINE
++ tristate "Enable Aspeed ACRY RSA Engine"
++ depends on MACH_ASPEED_G6
+ select CRYPTO_RSA
+ help
+ Select here to enable Aspeed ECC/RSA Engine (ACRY)
+ RSA driver.
+ Supports 256 bits to 4096 bits RSA encryption/decryption
+ and signature/verification.
++
++config CRYPTO_DEV_ASPEED_RSSS
++ tristate "Enable Aspeed RSSS Engine"
++ depends on ARCH_ASPEED
++ select CRYPTO_RSA
++ select CRYPTO_SHA3
++ select CRYPTO_SM3
++ select CRYPTO_SM4
++ help
++ Select here to enable Aspeed RSSS Engine driver.
++ Supports RSA 512 to 4096 bits encryption/decryption and
++ signature/verification, SHA3-224/256/384/512 and XOF of
++ SHAKE 128/256, SM3 Hash crypto, SM4 ECB/CBC/CFB/OFB/CTR
++ cryptographic algorithms.
++ It's a new hardware design from ast2700 for simply SRAM
++ layout.
++
++config CRYPTO_DEV_ASPEED_ECDSA
++ tristate "Enable Aspeed ECDSA Engine"
++ depends on ARCH_ASPEED
++ select CRYPTO_ECC
++ select CRYPTO_ECDSA
++ select CRYPTO_AKCIPHER
++ help
++ Select here to enable Aspeed ECC Engine for ECDSA driver.
++ Supports ECDSA (Elliptic Curve Digital Signature Algorithm)
++ using curves P-256, P-384.
++ Only signature verification is implemented.
++
++endif #CRYPTO_DEV_ASPEED
+diff --git a/drivers/crypto/aspeed/Makefile b/drivers/crypto/aspeed/Makefile
+index 15862752c..8fd14d64c 100644
+--- a/drivers/crypto/aspeed/Makefile
++++ b/drivers/crypto/aspeed/Makefile
+@@ -1,11 +1,14 @@
+ hace-hash-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH) := aspeed-hace-hash.o
+ hace-crypto-$(CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO) := aspeed-hace-crypto.o
+
+-obj-$(CONFIG_CRYPTO_DEV_ASPEED) += aspeed_crypto.o
++obj-$(CONFIG_CRYPTO_DEV_ASPEED_HACE) += aspeed_crypto.o
+ aspeed_crypto-objs := aspeed-hace.o \
+ $(hace-hash-y) \
+ $(hace-crypto-y)
+
+-aspeed_acry-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o
++obj-$(CONFIG_CRYPTO_DEV_ASPEED_ACRY) += aspeed-acry.o
+
+-obj-$(CONFIG_CRYPTO_DEV_ASPEED) += $(aspeed_acry-y)
++obj-$(CONFIG_CRYPTO_DEV_ASPEED_RSSS) += aspeed_rsss.o
++aspeed_rsss-objs := aspeed-rsss.o aspeed-rsss-rsa.o aspeed-rsss-hash.o
++
++obj-$(CONFIG_CRYPTO_DEV_ASPEED_ECDSA) += aspeed-ecdsa.o
+diff --git a/drivers/crypto/aspeed/aspeed-ecdsa.c b/drivers/crypto/aspeed/aspeed-ecdsa.c
+new file mode 100644
+index 000000000..333652e4b
+--- /dev/null
++++ b/drivers/crypto/aspeed/aspeed-ecdsa.c
+@@ -0,0 +1,756 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++
++#include <linux/clk.h>
++#include <linux/reset.h>
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include <linux/module.h>
++#include <linux/asn1_decoder.h>
++#include <linux/scatterlist.h>
++#include <linux/iopoll.h>
++#include <linux/interrupt.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
++#include <crypto/akcipher.h>
++#include <crypto/ecdh.h>
++#include <crypto/engine.h>
++#include <crypto/internal/akcipher.h>
++#include <crypto/internal/ecc.h>
++#include <crypto/sha2.h>
++
++#include "aspeed-ecdsa.h"
++
++//#define ASPEED_ECDSA_IRQ_MODE
++
++static int aspeed_ecdsa_self_test(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ u32 val;
++
++ ast_write(ecdsa_dev, ECC_EN, ASPEED_ECC_CTRL_REG);
++ val = ast_read(ecdsa_dev, ASPEED_ECC_CTRL_REG);
++ if (val != ECC_EN)
++ return -EIO;
++
++ ast_write(ecdsa_dev, 0x0, ASPEED_ECC_CTRL_REG);
++ val = ast_read(ecdsa_dev, ASPEED_ECC_CTRL_REG);
++ if (val)
++ return -EIO;
++
++ return 0;
++}
++
++static inline struct akcipher_request *
++ akcipher_request_cast(struct crypto_async_request *req)
++{
++ return container_of(req, struct akcipher_request, base);
++}
++
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
++static void hexdump(const char *name, unsigned char *buf, unsigned int len)
++{
++ pr_info("%s\n", name);
++ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
++ 16, 1,
++ buf, len, false);
++}
++#else
++static void hexdump(const char *name, unsigned char *buf, unsigned int len)
++{
++ /* empty */
++}
++#endif
++
++static void buff_reverse(u8 *dst, u8 *src, int len)
++{
++ for (int i = 0; i < len; i++)
++ dst[len - i - 1] = src[i];
++}
++
++static bool aspeed_ecdsa_need_fallback(struct aspeed_ecc_ctx *ctx, int d_len)
++{
++ int curve_id = ctx->curve_id;
++
++ switch (curve_id) {
++ case ECC_CURVE_NIST_P256:
++ if (d_len != SHA256_DIGEST_SIZE)
++ return true;
++ break;
++ case ECC_CURVE_NIST_P384:
++ if (d_len != SHA384_DIGEST_SIZE)
++ return true;
++ break;
++ }
++
++ return false;
++}
++
++#ifndef ASPEED_ECDSA_IRQ_MODE
++static int aspeed_ecdsa_wait_complete(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++ u32 sts;
++ int ret;
++
++ ret = readl_poll_timeout(ecdsa_dev->regs + ASPEED_ECC_STS_REG, sts,
++ ((sts & ECC_IDLE) == ECC_IDLE),
++ ASPEED_ECC_POLLING_TIME,
++ ASPEED_ECC_TIMEOUT * 10);
++ if (ret) {
++ dev_err(ecdsa_dev->dev, "ECC engine wrong status\n");
++ return -EIO;
++ }
++
++ sts = ast_read(ecdsa_dev, ASPEED_ECC_STS_REG) & ECC_VERIFY_PASS;
++ if (sts == ECC_VERIFY_PASS) {
++ ecdsa_engine->results = 0;
++ AST_DBG(ecdsa_dev, "Verify PASS !\n");
++
++ } else {
++ ecdsa_engine->results = -EKEYREJECTED;
++ AST_DBG(ecdsa_dev, "Verify FAILED !\n");
++ }
++
++ /* Stop ECDSA engine */
++ if (ecdsa_engine->flags & CRYPTO_FLAGS_BUSY)
++ tasklet_schedule(&ecdsa_engine->done_task);
++ else
++ dev_err(ecdsa_dev->dev, "ECDSA no active requests.\n");
++
++ return ecdsa_engine->results;
++}
++#endif
++
++static int aspeed_hw_trigger(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ ast_write(ecdsa_dev, 0x1, ASPEED_ECC_ECDSA_VERIFY);
++
++ ast_write(ecdsa_dev, ECC_EN, ASPEED_ECC_CMD_REG);
++ ast_write(ecdsa_dev, 0x0, ASPEED_ECC_CMD_REG);
++
++#ifdef ASPEED_ECDSA_IRQ_MODE
++ return 0;
++#else
++ return aspeed_ecdsa_wait_complete(ecdsa_dev);
++#endif
++}
++
++static int _aspeed_ecdsa_verify(struct aspeed_ecc_ctx *ctx, const u64 *hash,
++ const u64 *r, const u64 *s)
++{
++ int nbytes = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
++ const struct ecc_curve *curve = ctx->curve;
++ void __iomem *base = ctx->ecdsa_dev->regs;
++ unsigned int ndigits = curve->g.ndigits;
++ u8 *data, *buf;
++
++ /* 0 < r < n and 0 < s < n */
++ if (vli_is_zero(r, ndigits) || vli_cmp(r, curve->n, ndigits) >= 0 ||
++ vli_is_zero(s, ndigits) || vli_cmp(s, curve->n, ndigits) >= 0)
++ return -EBADMSG;
++
++ /* hash is given */
++ AST_DBG(ctx->ecdsa_dev, "hash : %016llx %016llx ... %016llx\n",
++ hash[ndigits - 1], hash[ndigits - 2], hash[0]);
++
++ data = vmalloc(nbytes);
++ if (!data)
++ return -ENOMEM;
++
++ buf = (u8 *)r;
++ hexdump("Dump r:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)r, nbytes);
++ memcpy_toio(base + ASPEED_ECC_SIGN_R_REG, data, nbytes);
++
++ buf = (u8 *)s;
++ hexdump("Dump s:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)s, nbytes);
++ memcpy_toio(base + ASPEED_ECC_SIGN_S_REG, data, nbytes);
++
++ buf = (u8 *)hash;
++ hexdump("Dump m:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)hash, nbytes);
++ memcpy_toio(base + ASPEED_ECC_MESSAGE_REG, data, nbytes);
++
++ vfree(data);
++
++ return aspeed_hw_trigger(ctx->ecdsa_dev);
++}
++
++static int aspeed_ecdsa_handle_queue(struct aspeed_ecdsa_dev *ecdsa_dev,
++ struct akcipher_request *req)
++{
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++ int ret;
++
++ if (aspeed_ecdsa_need_fallback(ctx, req->dst_len)) {
++ AST_DBG(ctx->ecdsa_dev, "SW fallback\n");
++
++ akcipher_request_set_tfm(req, ctx->fallback_tfm);
++ ret = crypto_akcipher_verify(req);
++ akcipher_request_set_tfm(req, tfm);
++
++ AST_DBG(ctx->ecdsa_dev, "SW verify...ret:0x%x\n", ret);
++
++ return ret;
++ }
++
++ return crypto_transfer_akcipher_request_to_engine(ecdsa_dev->crypt_engine_ecdsa, req);
++}
++
++static int aspeed_ecdsa_trigger(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++ struct akcipher_request *req = ecdsa_engine->req;
++ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++ size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
++ struct ecdsa_signature_ctx sig_ctx = {
++ .curve = ctx->curve,
++ };
++ u8 rawhash[ECC_MAX_BYTES];
++ u64 hash[ECC_MAX_DIGITS];
++ unsigned char *buffer;
++ ssize_t diff;
++ int ret;
++
++ if (unlikely(!ctx->pub_key_set))
++ return -EINVAL;
++
++ buffer = kmalloc(req->src_len + req->dst_len, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
++
++ /* Input src: signature + digest */
++ sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, req->src_len + req->dst_len),
++ buffer, req->src_len + req->dst_len, 0);
++
++ ret = asn1_ber_decoder(&ecdsasignature_decoder, &sig_ctx,
++ buffer, req->src_len);
++ if (ret < 0)
++ goto error;
++
++ /* if the hash is shorter then we will add leading zeros to fit to ndigits */
++ diff = keylen - req->dst_len;
++ if (diff >= 0) {
++ if (diff)
++ memset(rawhash, 0, diff);
++ memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
++ } else if (diff < 0) {
++ /* given hash is longer, we take the left-most bytes */
++ memcpy(&rawhash, buffer + req->src_len, keylen);
++ }
++
++ ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
++
++ ret = _aspeed_ecdsa_verify(ctx, hash, sig_ctx.r, sig_ctx.s);
++
++error:
++ kfree(buffer);
++
++ return ret;
++}
++
++/*
++ * Verify an ECDSA signature.
++ */
++static int aspeed_ecdsa_verify(struct akcipher_request *req)
++{
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(cipher);
++ struct aspeed_ecdsa_dev *ecdsa_dev = ctx->ecdsa_dev;
++
++ ctx->trigger = aspeed_ecdsa_trigger;
++
++ return aspeed_ecdsa_handle_queue(ecdsa_dev, req);
++}
++
++static int aspeed_ecdsa_ecc_ctx_init(struct aspeed_ecc_ctx *ctx, unsigned int curve_id)
++{
++ void __iomem *base = ctx->ecdsa_dev->regs;
++ u8 *data, *buf;
++ u32 ctrl;
++ int nbytes;
++
++ ctx->curve_id = curve_id;
++ ctx->curve = ecc_get_curve(curve_id);
++ if (!ctx->curve)
++ return -EINVAL;
++
++ nbytes = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
++
++ switch (curve_id) {
++ case ECC_CURVE_NIST_P256:
++ AST_DBG(ctx->ecdsa_dev, "curve ECC_CURVE_NIST_P256\n");
++ ctrl = ECDSA_256_EN;
++ break;
++ case ECC_CURVE_NIST_P384:
++ AST_DBG(ctx->ecdsa_dev, "curve ECC_CURVE_NIST_P384\n");
++ ctrl = ECDSA_384_EN;
++ break;
++ }
++
++ ast_write(ctx->ecdsa_dev, ECC_EN | ctrl, ASPEED_ECC_CTRL_REG);
++
++ /* Initial Curve: ecc point/p/a/n */
++ data = vmalloc(nbytes);
++ if (!data)
++ return -ENOMEM;
++
++ buf = (u8 *)ctx->curve->g.x;
++ hexdump("Dump Gx:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->curve->g.x, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_GX_REG, data, nbytes);
++
++ buf = (u8 *)ctx->curve->g.y;
++ hexdump("Dump Gy:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->curve->g.y, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_GY_REG, data, nbytes);
++
++ buf = (u8 *)ctx->curve->p;
++ hexdump("Dump P:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->curve->p, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_P_REG, data, nbytes);
++
++ buf = (u8 *)ctx->curve->a;
++ hexdump("Dump A:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->curve->a, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_A_REG, data, nbytes);
++
++ buf = (u8 *)ctx->curve->n;
++ hexdump("Dump N:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->curve->n, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_N_REG, data, nbytes);
++
++ vfree(data);
++ return 0;
++}
++
++static void aspeed_ecdsa_ecc_ctx_deinit(struct aspeed_ecc_ctx *ctx)
++{
++ ctx->pub_key_set = false;
++}
++
++static void aspeed_ecdsa_ecc_ctx_reset(struct aspeed_ecc_ctx *ctx)
++{
++ ctx->pub_key = ECC_POINT_INIT(ctx->x, ctx->y,
++ ctx->curve->g.ndigits);
++}
++
++/*
++ * Set the public key given the raw uncompressed key data from an X509
++ * certificate. The key data contain the concatenated X and Y coordinates of
++ * the public key.
++ */
++static int aspeed_ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key,
++ unsigned int keylen)
++{
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++ int nbytes = ctx->curve->g.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
++ void __iomem *base = ctx->ecdsa_dev->regs;
++ const unsigned char *d = key;
++ const u64 *digits = (const u64 *)&d[1];
++ unsigned int ndigits;
++ u8 *data, *buf;
++ int ret;
++
++ ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
++ if (ret)
++ return ret;
++
++ aspeed_ecdsa_ecc_ctx_reset(ctx);
++
++ if (keylen < 1 || (((keylen - 1) >> 1) % sizeof(u64)) != 0)
++ return -EINVAL;
++ /* we only accept uncompressed format indicated by '4' */
++ if (d[0] != 4)
++ return -EINVAL;
++
++ keylen--;
++ ndigits = (keylen >> 1) / sizeof(u64);
++ if (ndigits != ctx->curve->g.ndigits)
++ return -EINVAL;
++
++ ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
++ ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
++ ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
++
++ /* Set public key: Qx/Qy */
++ data = vmalloc(nbytes);
++ if (!data)
++ return -ENOMEM;
++
++ buf = (u8 *)ctx->pub_key.x;
++ hexdump("Dump Qx:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->pub_key.x, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_QX_REG, data, nbytes);
++
++ buf = (u8 *)ctx->pub_key.y;
++ hexdump("Dump Qy:", buf, nbytes);
++
++ buff_reverse(data, (u8 *)ctx->pub_key.y, nbytes);
++ memcpy_toio(base + ASPEED_ECC_PAR_QY_REG, data, nbytes);
++
++ ctx->pub_key_set = ret == 0;
++
++ vfree(data);
++
++ return ret;
++}
++
++static void aspeed_ecdsa_exit_tfm(struct crypto_akcipher *tfm)
++{
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++
++ aspeed_ecdsa_ecc_ctx_deinit(ctx);
++
++ crypto_free_akcipher(ctx->fallback_tfm);
++}
++
++static unsigned int aspeed_ecdsa_max_size(struct crypto_akcipher *tfm)
++{
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++
++ return ctx->pub_key.ndigits << ECC_DIGITS_TO_BYTES_SHIFT;
++}
++
++static int aspeed_ecdsa_nist_p384_init_tfm(struct crypto_akcipher *tfm)
++{
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
++ const char *name = crypto_tfm_alg_name(&tfm->base);
++ struct aspeed_ecdsa_alg *ecdsa_alg;
++
++ ecdsa_alg = container_of(alg, struct aspeed_ecdsa_alg, akcipher.base);
++
++ ctx->ecdsa_dev = ecdsa_alg->ecdsa_dev;
++
++ AST_DBG(ctx->ecdsa_dev, "\n");
++ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->fallback_tfm)) {
++ dev_err(ctx->ecdsa_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
++ name, PTR_ERR(ctx->fallback_tfm));
++ return PTR_ERR(ctx->fallback_tfm);
++ }
++
++ return aspeed_ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P384);
++}
++
++static int aspeed_ecdsa_nist_p256_init_tfm(struct crypto_akcipher *tfm)
++{
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
++ const char *name = crypto_tfm_alg_name(&tfm->base);
++ struct aspeed_ecdsa_alg *ecdsa_alg;
++
++ ecdsa_alg = container_of(alg, struct aspeed_ecdsa_alg, akcipher.base);
++
++ ctx->ecdsa_dev = ecdsa_alg->ecdsa_dev;
++
++ AST_DBG(ctx->ecdsa_dev, "\n");
++ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->fallback_tfm)) {
++ dev_err(ctx->ecdsa_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
++ name, PTR_ERR(ctx->fallback_tfm));
++ return PTR_ERR(ctx->fallback_tfm);
++ }
++
++ return aspeed_ecdsa_ecc_ctx_init(ctx, ECC_CURVE_NIST_P256);
++}
++
++static int aspeed_ecdsa_complete(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++ struct akcipher_request *req = ecdsa_engine->req;
++ int results = ecdsa_engine->results;
++
++ ecdsa_engine->flags &= ~CRYPTO_FLAGS_BUSY;
++
++ crypto_finalize_akcipher_request(ecdsa_dev->crypt_engine_ecdsa, req, results);
++
++ return results;
++}
++
++static int aspeed_ecdsa_do_request(struct crypto_engine *engine, void *areq)
++{
++ struct akcipher_request *req = akcipher_request_cast(areq);
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_ecc_ctx *ctx = akcipher_tfm_ctx(cipher);
++ struct aspeed_ecdsa_dev *ecdsa_dev = ctx->ecdsa_dev;
++ struct aspeed_engine_ecdsa *ecdsa_engine;
++
++ ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++ ecdsa_engine->req = req;
++ ecdsa_engine->flags |= CRYPTO_FLAGS_BUSY;
++ ecdsa_engine->resume = aspeed_ecdsa_complete;
++
++ return ctx->trigger(ecdsa_dev);
++}
++
++static void aspeed_ecdsa_done_task(unsigned long data)
++{
++ struct aspeed_ecdsa_dev *ecdsa_dev = (struct aspeed_ecdsa_dev *)data;
++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++ u32 ctrl;
++
++ /* Reset engine */
++ ctrl = ast_read(ecdsa_dev, ASPEED_ECC_CTRL_REG);
++ ast_write(ecdsa_dev, 0, ASPEED_ECC_CTRL_REG);
++
++ /* Memory barrier to ensure ecc ctrl is reset. */
++ mb();
++ ast_write(ecdsa_dev, ctrl, ASPEED_ECC_CTRL_REG);
++
++ (void)ecdsa_engine->resume(ecdsa_dev);
++}
++
++static struct aspeed_ecdsa_alg aspeed_ecdsa_nist_p256 = {
++ .akcipher.base = {
++ .verify = aspeed_ecdsa_verify,
++ .set_pub_key = aspeed_ecdsa_set_pub_key,
++ .max_size = aspeed_ecdsa_max_size,
++ .init = aspeed_ecdsa_nist_p256_init_tfm,
++ .exit = aspeed_ecdsa_exit_tfm,
++ .base = {
++ .cra_name = "ecdsa-nist-p256",
++ .cra_driver_name = "aspeed-ecdsa-nist-p256",
++ .cra_priority = 300,
++ .cra_module = THIS_MODULE,
++ .cra_ctxsize = sizeof(struct aspeed_ecc_ctx),
++ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ },
++ },
++ .akcipher.op = {
++ .do_one_request = aspeed_ecdsa_do_request,
++ },
++};
++
++static struct aspeed_ecdsa_alg aspeed_ecdsa_nist_p384 = {
++ .akcipher.base = {
++ .verify = aspeed_ecdsa_verify,
++ .set_pub_key = aspeed_ecdsa_set_pub_key,
++ .max_size = aspeed_ecdsa_max_size,
++ .init = aspeed_ecdsa_nist_p384_init_tfm,
++ .exit = aspeed_ecdsa_exit_tfm,
++ .base = {
++ .cra_name = "ecdsa-nist-p384",
++ .cra_driver_name = "aspeed-ecdsa-nist-p384",
++ .cra_priority = 300,
++ .cra_module = THIS_MODULE,
++ .cra_ctxsize = sizeof(struct aspeed_ecc_ctx),
++ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ },
++ },
++ .akcipher.op = {
++ .do_one_request = aspeed_ecdsa_do_request,
++ },
++};
++
++static int aspeed_ecdsa_register(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ int rc;
++
++ aspeed_ecdsa_nist_p256.ecdsa_dev = ecdsa_dev;
++ rc = crypto_engine_register_akcipher(&aspeed_ecdsa_nist_p256.akcipher);
++ if (rc)
++ goto nist_p256_error;
++
++ aspeed_ecdsa_nist_p384.ecdsa_dev = ecdsa_dev;
++ rc = crypto_engine_register_akcipher(&aspeed_ecdsa_nist_p384.akcipher);
++ if (rc)
++ goto nist_p384_error;
++
++ return 0;
++
++nist_p384_error:
++ crypto_engine_unregister_akcipher(&aspeed_ecdsa_nist_p256.akcipher);
++
++nist_p256_error:
++ return rc;
++}
++
++static void aspeed_ecdsa_unregister(struct aspeed_ecdsa_dev *ecdsa_dev)
++{
++ crypto_engine_unregister_akcipher(&aspeed_ecdsa_nist_p256.akcipher);
++ crypto_engine_unregister_akcipher(&aspeed_ecdsa_nist_p384.akcipher);
++}
++
++#ifdef ASPEED_ECDSA_IRQ_MODE
++/* ecdsa interrupt service routine. */
++static irqreturn_t aspeed_ecdsa_irq(int irq, void *dev)
++{
++ struct aspeed_ecdsa_dev *ecdsa_dev = (struct aspeed_ecdsa_dev *)dev;
++ struct aspeed_engine_ecdsa *ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++ u32 sts;
++
++ sts = ast_read(ecdsa_dev, ASPEED_ECC_INT_STS);
++ ast_write(ecdsa_dev, sts, ASPEED_ECC_INT_STS);
++
++ AST_DBG(ecdsa_dev, "irq sts:0x%x\n", sts);
++
++ sts = ast_read(ecdsa_dev, ASPEED_ECC_STS_REG) & ECC_VERIFY_PASS;
++ if (sts == ECC_VERIFY_PASS) {
++ AST_DBG(ecdsa_dev, "Verify PASS !\n");
++
++ ecdsa_engine->results = 0;
++ /* Stop ECDSA engine */
++ if (ecdsa_engine->flags & CRYPTO_FLAGS_BUSY)
++ tasklet_schedule(&ecdsa_engine->done_task);
++ else
++ dev_err(ecdsa_dev->dev, "ECDSA no active requests.\n");
++
++ } else {
++ ecdsa_engine->results = -EKEYREJECTED;
++ AST_DBG(ecdsa_dev, "Verify FAILED !\n");
++ }
++
++ return IRQ_HANDLED;
++}
++#endif
++
++static const struct of_device_id aspeed_ecdsa_of_matches[] = {
++ { .compatible = "aspeed,ast2700-ecdsa", },
++ {},
++};
++
++static int aspeed_ecdsa_probe(struct platform_device *pdev)
++{
++ struct aspeed_engine_ecdsa *ecdsa_engine;
++ struct aspeed_ecdsa_dev *ecdsa_dev;
++ struct device *dev = &pdev->dev;
++ int rc;
++
++ ecdsa_dev = devm_kzalloc(dev, sizeof(struct aspeed_ecdsa_dev),
++ GFP_KERNEL);
++ if (!ecdsa_dev)
++ return -ENOMEM;
++
++ ecdsa_dev->dev = dev;
++
++ platform_set_drvdata(pdev, ecdsa_dev);
++
++ ecdsa_dev->regs = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(ecdsa_dev->regs))
++ return PTR_ERR(ecdsa_dev->regs);
++
++#ifdef ASPEED_ECDSA_IRQ_MODE
++ /* Get irq number and register it */
++ ecdsa_dev->irq = platform_get_irq(pdev, 0);
++ if (ecdsa_dev->irq < 0)
++ return -ENXIO;
++
++ rc = devm_request_irq(dev, ecdsa_dev->irq, aspeed_ecdsa_irq, 0,
++ dev_name(dev), ecdsa_dev);
++ if (rc) {
++ dev_err(dev, "Failed to request irq.\n");
++ return rc;
++ }
++
++ /* Enable interrupt */
++ ast_write(ecdsa_dev, 0x1, ASPEED_ECC_INT_EN);
++#endif
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_warn(&pdev->dev, "No suitable DMA available\n");
++ return rc;
++ }
++
++ ecdsa_dev->clk = devm_clk_get_enabled(dev, NULL);
++ if (IS_ERR(ecdsa_dev->clk)) {
++ dev_err(dev, "Failed to get ecdsa clk\n");
++ return PTR_ERR(ecdsa_dev->clk);
++ }
++
++ ecdsa_dev->rst = devm_reset_control_get_shared(dev, NULL);
++ if (IS_ERR(ecdsa_dev->rst)) {
++ dev_err(dev, "Failed to get ecdsa reset\n");
++ return PTR_ERR(ecdsa_dev->rst);
++ }
++
++ rc = reset_control_deassert(ecdsa_dev->rst);
++ if (rc) {
++ dev_err(dev, "Deassert ecdsa reset failed\n");
++ return rc;
++ }
++
++ ecdsa_engine = &ecdsa_dev->ecdsa_engine;
++
++ /* Initialize crypto hardware engine structure for ECDSA */
++ ecdsa_dev->crypt_engine_ecdsa = crypto_engine_alloc_init(ecdsa_dev->dev, true);
++ if (!ecdsa_dev->crypt_engine_ecdsa) {
++ rc = -ENOMEM;
++ goto end;
++ }
++
++ rc = crypto_engine_start(ecdsa_dev->crypt_engine_ecdsa);
++ if (rc)
++ goto err_engine_ecdsa_start;
++
++ tasklet_init(&ecdsa_engine->done_task, aspeed_ecdsa_done_task,
++ (unsigned long)ecdsa_dev);
++
++ /* Self-test */
++ rc = aspeed_ecdsa_self_test(ecdsa_dev);
++ if (rc)
++ goto err_engine_ecdsa_start;
++
++ rc = aspeed_ecdsa_register(ecdsa_dev);
++ if (rc) {
++ dev_err(dev, "ECDSA algo register failed\n");
++ return rc;
++ }
++
++ dev_info(dev, "Aspeed ECDSA Hardware Accelerator successfully registered\n");
++
++ return 0;
++
++err_engine_ecdsa_start:
++ crypto_engine_exit(ecdsa_dev->crypt_engine_ecdsa);
++end:
++ return rc;
++}
++
++static int aspeed_ecdsa_remove(struct platform_device *pdev)
++{
++ struct aspeed_ecdsa_dev *ecdsa_dev = platform_get_drvdata(pdev);
++
++ aspeed_ecdsa_unregister(ecdsa_dev);
++
++ return 0;
++}
++
++MODULE_DEVICE_TABLE(of, aspeed_ecdsa_of_matches);
++
++static struct platform_driver aspeed_ecdsa_driver = {
++ .probe = aspeed_ecdsa_probe,
++ .remove = aspeed_ecdsa_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_ecdsa_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_ecdsa_driver);
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
++MODULE_DESCRIPTION("ASPEED ECDSA algorithm driver acceleration");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/crypto/aspeed/aspeed-ecdsa.h b/drivers/crypto/aspeed/aspeed-ecdsa.h
+new file mode 100644
+index 000000000..18532c076
+--- /dev/null
++++ b/drivers/crypto/aspeed/aspeed-ecdsa.h
+@@ -0,0 +1,116 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#ifndef __ASPEED_ECDSA_H__
++#define __ASPEED_ECDSA_H__
++
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
++#define AST_DBG(d, fmt, ...) \
++ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
++#else
++#define AST_DBG(d, fmt, ...) \
++ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
++#endif
++
++/*************************
++ * *
++ * ECDSA regs definition *
++ * *
++ *************************/
++#define ASPEED_ECC_STS_REG 0xb0
++#define ASPEED_ECC_CTRL_REG 0xb4
++#define ASPEED_ECC_CMD_REG 0xbc
++#define ASPEED_ECC_INT_EN 0xc0
++#define ASPEED_ECC_INT_STS 0xc4
++
++#define ASPEED_ECC_DATA_BASE 0x800
++#define ASPEED_ECC_PAR_GX_REG 0x800
++#define ASPEED_ECC_PAR_GY_REG 0x840
++#define ASPEED_ECC_PAR_QX_REG 0x880
++#define ASPEED_ECC_PAR_QY_REG 0x8c0
++#define ASPEED_ECC_PAR_P_REG 0x900
++#define ASPEED_ECC_PAR_A_REG 0x940
++#define ASPEED_ECC_PAR_N_REG 0x980
++#define ASPEED_ECC_SIGN_R_REG 0x9c0
++#define ASPEED_ECC_SIGN_S_REG 0xa00
++#define ASPEED_ECC_MESSAGE_REG 0xa40
++#define ASPEED_ECC_ECDSA_VERIFY 0xbc0
++
++/* sts */
++#define ECC_IDLE BIT(0)
++#define ECC_VERIFY_PASS BIT(1)
++
++/* ctrl/cmd */
++#define ECC_EN BIT(0)
++#define ECDSA_384_EN 0x0
++#define ECDSA_256_EN BIT(1)
++#define ADDR_BE BIT(2)
++#define DATA_BE BIT(3)
++
++#define PAR_LEN_256 32
++#define PAR_LEN_384 48
++
++#define ASPEED_ECC_POLLING_TIME 100
++#define ASPEED_ECC_TIMEOUT 100000 /* 100 ms */
++
++#define CRYPTO_FLAGS_BUSY BIT(1)
++
++#define ast_write(ast, val, offset) \
++ writel((val), (ast)->regs + (offset))
++
++#define ast_read(ast, offset) \
++ readl((ast)->regs + (offset))
++
++struct aspeed_ecdsa_dev;
++
++typedef int (*aspeed_ecdsa_fn_t)(struct aspeed_ecdsa_dev *);
++
++struct aspeed_ecc_ctx {
++ struct aspeed_ecdsa_dev *ecdsa_dev;
++ unsigned int curve_id;
++ const struct ecc_curve *curve;
++
++ bool pub_key_set;
++ u64 x[ECC_MAX_DIGITS]; /* pub key x and y coordinates */
++ u64 y[ECC_MAX_DIGITS];
++ struct ecc_point pub_key;
++
++ struct crypto_akcipher *fallback_tfm;
++
++ aspeed_ecdsa_fn_t trigger;
++};
++
++struct ecdsa_signature_ctx {
++ const struct ecc_curve *curve;
++ u64 r[ECC_MAX_DIGITS];
++ u64 s[ECC_MAX_DIGITS];
++};
++
++struct aspeed_engine_ecdsa {
++ struct tasklet_struct done_task;
++ unsigned long flags;
++ struct akcipher_request *req;
++ int results;
++
++ /* callback func */
++ aspeed_ecdsa_fn_t resume;
++};
++
++struct aspeed_ecdsa_alg {
++ struct aspeed_ecdsa_dev *ecdsa_dev;
++ struct akcipher_engine_alg akcipher;
++};
++
++struct aspeed_ecdsa_dev {
++ void __iomem *regs;
++ struct device *dev;
++ struct clk *clk;
++ struct reset_control *rst;
++ int irq;
++
++ struct crypto_engine *crypt_engine_ecdsa;
++ struct aspeed_engine_ecdsa ecdsa_engine;
++};
++
++extern const struct asn1_decoder ecdsasignature_decoder;
++
++#endif
+diff --git a/drivers/crypto/aspeed/aspeed-hace-crypto.c b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+index f0eddb785..6ac656b9a 100644
+--- a/drivers/crypto/aspeed/aspeed-hace-crypto.c
++++ b/drivers/crypto/aspeed/aspeed-hace-crypto.c
+@@ -24,6 +24,11 @@
+ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+ #endif
+
++#define ASPEED_SEC_PROTECTION 0x0
++#define SEC_UNLOCK_PASSWORD 0x349fe38a
++#define ASPEED_VAULT_KEY_CTRL 0x80C
++#define SEC_VK_CTRL_VK_SELECTION BIT(0)
++
+ static int aspeed_crypto_do_fallback(struct skcipher_request *areq)
+ {
+ struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq);
+@@ -210,6 +215,13 @@ static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev)
+ ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr,
+ ASPEED_HACE_DEST);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr >> 32,
++ ASPEED_HACE_SRC_H);
++ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr >> 32,
++ ASPEED_HACE_DEST_H);
++#endif
++
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+@@ -222,21 +234,65 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+ struct aspeed_sg_list *src_list, *dst_list;
+ dma_addr_t src_dma_addr, dst_dma_addr;
+ struct aspeed_cipher_reqctx *rctx;
++ struct crypto_skcipher *cipher;
++ struct aspeed_cipher_ctx *ctx;
+ struct skcipher_request *req;
+ struct scatterlist *s;
++ int use_vault_key = 0;
+ int src_sg_len;
+ int dst_sg_len;
+ int total, i;
+ int rc;
++ u32 val;
+
+ CIPHER_DBG(hace_dev, "\n");
+
+ req = crypto_engine->req;
++ cipher = crypto_skcipher_reqtfm(req);
++ ctx = crypto_skcipher_ctx(cipher);
+ rctx = skcipher_request_ctx(req);
+
+ rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL |
+ HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN;
+
++ if (crypto_engine->load_vault_key) {
++ writel(SEC_UNLOCK_PASSWORD, hace_dev->sec_regs + ASPEED_SEC_PROTECTION);
++ CIPHER_DBG(hace_dev, "unlock SB, SEC000=0x%x\n", readl(hace_dev->sec_regs + ASPEED_SEC_PROTECTION));
++ val = readl(hace_dev->sec_regs + ASPEED_VAULT_KEY_CTRL);
++ if (val & BIT(2)) {
++ if (ctx->dummy_key == 1 && !(val & BIT(0))) {
++ use_vault_key = 1;
++ CIPHER_DBG(hace_dev, "Use Vault key 1\n");
++ } else if (ctx->dummy_key == 2 && (val & BIT(0))) {
++ use_vault_key = 1;
++ CIPHER_DBG(hace_dev, "Use Vault key 2\n");
++ } else {
++ use_vault_key = 0;
++ }
++ } else {
++ if (ctx->dummy_key == 1) {
++ use_vault_key = 1;
++ val &= ~SEC_VK_CTRL_VK_SELECTION;
++ writel(val, hace_dev->sec_regs + ASPEED_VAULT_KEY_CTRL);
++ CIPHER_DBG(hace_dev, "Set Vault key 1\n");
++ } else if (ctx->dummy_key == 2) {
++ use_vault_key = 1;
++ val |= SEC_VK_CTRL_VK_SELECTION;
++ writel(val, hace_dev->sec_regs + ASPEED_VAULT_KEY_CTRL);
++ CIPHER_DBG(hace_dev, "Set Vault key 2\n");
++ } else {
++ use_vault_key = 0;
++ }
++ }
++ writel(0x0, hace_dev->sec_regs + ASPEED_SEC_PROTECTION);
++ CIPHER_DBG(hace_dev, "lock SB, SEC000=0x%x\n", readl(hace_dev->sec_regs + ASPEED_SEC_PROTECTION));
++
++ if (use_vault_key)
++ rctx->enc_cmd |= HACE_CMD_AES_KEY_FROM_OTP;
++ else
++ rctx->enc_cmd &= ~HACE_CMD_AES_KEY_FROM_OTP;
++ }
++
+ /* BIDIRECTIONAL */
+ if (req->dst == req->src) {
+ src_sg_len = dma_map_sg(hace_dev->dev, req->src,
+@@ -334,6 +390,12 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
+ /* Trigger engines */
+ ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC);
+ ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST);
++
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ ast_hace_write(hace_dev, src_dma_addr >> 32, ASPEED_HACE_SRC_H);
++ ast_hace_write(hace_dev, dst_dma_addr >> 32, ASPEED_HACE_DEST_H);
++#endif
++
+ ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN);
+ ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD);
+
+@@ -382,6 +444,10 @@ static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+
+ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma,
+ ASPEED_HACE_CONTEXT);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma >> 32,
++ ASPEED_HACE_CONTEXT_H);
++#endif
+
+ if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) {
+ if (rctx->enc_cmd & HACE_CMD_DES_SELECT)
+@@ -392,7 +458,8 @@ static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev)
+ AES_BLOCK_SIZE);
+ }
+
+- if (hace_dev->version == AST2600_VERSION) {
++ if (hace_dev->version == AST2600_VERSION ||
++ hace_dev->version == AST2700_VERSION) {
+ memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len);
+
+ return aspeed_sk_start_sg(hace_dev);
+@@ -628,6 +695,8 @@ static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
+
+ CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8));
+
++ ctx->dummy_key = find_dummy_key(key, keylen);
++
+ if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
+ keylen != AES_KEYSIZE_256)
+ return -EINVAL;
+@@ -1149,19 +1218,49 @@ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++)
+ crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+
+- if (hace_dev->version != AST2600_VERSION)
++ if (hace_dev->version == AST2500_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++)
+ crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher);
+ }
+
++#ifdef CONFIG_AST2600_OTP
++void find_vault_key(struct aspeed_hace_dev *hace_dev)
++{
++ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
++ u32 otp_data[16];
++ int i;
++
++ crypto_engine->load_vault_key = 0;
++
++ otp_read_data_buf(0, otp_data, 16);
++ for (i = 0; i < 16; i++) {
++ CIPHER_DBG(hace_dev, "OTPDATA%d=%x\n", i, otp_data[i]);
++ if (((otp_data[i] >> 14) & 0xf) == 1) {
++ CIPHER_DBG(hace_dev, "Found vault key in OTP\n");
++ crypto_engine->load_vault_key = 1;
++ return;
++ }
++ if (otp_data[i] & BIT(13))
++ break;
++ }
++ CIPHER_DBG(hace_dev, "Not found vault key in OTP\n");
++}
++#endif
++
+ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+ {
+ int rc, i;
+
+ CIPHER_DBG(hace_dev, "\n");
+
++#ifdef CONFIG_AST2600_OTP
++ find_vault_key(hace_dev);
++#else
++ hace_dev->crypto_engine.load_vault_key = 0;
++#endif
++
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) {
+ aspeed_crypto_algs[i].hace_dev = hace_dev;
+ rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher);
+@@ -1171,7 +1270,7 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+ }
+ }
+
+- if (hace_dev->version != AST2600_VERSION)
++ if (hace_dev->version == AST2500_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) {
+@@ -1183,3 +1282,80 @@ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev)
+ }
+ }
+ }
++
++static void aspeed_hace_crypto_done_task(unsigned long data)
++{
++ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
++ struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
++
++ crypto_engine->resume(hace_dev);
++}
++
++int aspeed_hace_crypto_init(struct aspeed_hace_dev *hace_dev)
++{
++ struct aspeed_engine_crypto *crypto_engine;
++ int rc;
++
++ crypto_engine = &hace_dev->crypto_engine;
++
++ /* Initialize crypto hardware engine structure for crypto */
++ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
++ true);
++ if (!hace_dev->crypt_engine_crypto) {
++ rc = -ENOMEM;
++ goto end;
++ }
++
++ rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
++ if (rc)
++ goto err_engine_crypto_start;
++
++ tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
++ (unsigned long)hace_dev);
++
++ /* Allocate DMA buffer for crypto engine context used */
++ crypto_engine->cipher_ctx =
++ dmam_alloc_coherent(hace_dev->dev,
++ PAGE_SIZE,
++ &crypto_engine->cipher_ctx_dma,
++ GFP_KERNEL);
++ if (!crypto_engine->cipher_ctx) {
++ dev_err(hace_dev->dev, "Failed to allocate cipher ctx dma\n");
++ rc = -ENOMEM;
++ goto err_engine_crypto_start;
++ }
++
++ /* Allocate DMA buffer for crypto engine input used */
++ crypto_engine->cipher_addr =
++ dmam_alloc_coherent(hace_dev->dev,
++ ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
++ &crypto_engine->cipher_dma_addr,
++ GFP_KERNEL);
++ if (!crypto_engine->cipher_addr) {
++ dev_err(hace_dev->dev, "Failed to allocate cipher addr dma\n");
++ rc = -ENOMEM;
++ goto err_engine_crypto_start;
++ }
++
++ /* Allocate DMA buffer for crypto engine output used */
++ if (hace_dev->version == AST2600_VERSION ||
++ hace_dev->version == AST2700_VERSION) {
++ crypto_engine->dst_sg_addr =
++ dmam_alloc_coherent(hace_dev->dev,
++ ASPEED_CRYPTO_DST_DMA_BUF_LEN,
++ &crypto_engine->dst_sg_dma_addr,
++ GFP_KERNEL);
++ if (!crypto_engine->dst_sg_addr) {
++ dev_err(hace_dev->dev, "Failed to allocate dst_sg dma\n");
++ rc = -ENOMEM;
++ goto err_engine_crypto_start;
++ }
++ }
++
++ return 0;
++
++err_engine_crypto_start:
++ crypto_engine_exit(hace_dev->crypt_engine_crypto);
++end:
++ return rc;
++}
+diff --git a/drivers/crypto/aspeed/aspeed-hace-hash.c b/drivers/crypto/aspeed/aspeed-hace-hash.c
+index 0b6e49c06..ab2ad15a8 100644
+--- a/drivers/crypto/aspeed/aspeed-hace-hash.c
++++ b/drivers/crypto/aspeed/aspeed-hace-hash.c
+@@ -77,6 +77,7 @@ static const __be64 sha512_iv[8] = {
+ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ struct aspeed_sham_reqctx *rctx)
+ {
++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ unsigned int index, padlen;
+ __be64 bits[2];
+
+@@ -89,9 +90,9 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ bits[0] = cpu_to_be64(rctx->digcnt[0] << 3);
+ index = rctx->bufcnt & 0x3f;
+ padlen = (index < 56) ? (56 - index) : ((64 + 56) - index);
+- *(rctx->buffer + rctx->bufcnt) = 0x80;
+- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8);
++ *(hash_engine->buffer_addr + rctx->bufcnt) = 0x80;
++ memset(hash_engine->buffer_addr + rctx->bufcnt + 1, 0, padlen - 1);
++ memcpy(hash_engine->buffer_addr + rctx->bufcnt + padlen, bits, 8);
+ rctx->bufcnt += padlen + 8;
+ break;
+ default:
+@@ -100,9 +101,9 @@ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev,
+ rctx->digcnt[0] >> 61);
+ index = rctx->bufcnt & 0x7f;
+ padlen = (index < 112) ? (112 - index) : ((128 + 112) - index);
+- *(rctx->buffer + rctx->bufcnt) = 0x80;
+- memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1);
+- memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16);
++ *(hash_engine->buffer_addr + rctx->bufcnt) = 0x80;
++ memset(hash_engine->buffer_addr + rctx->bufcnt + 1, 0, padlen - 1);
++ memcpy(hash_engine->buffer_addr + rctx->bufcnt + padlen, bits, 16);
+ rctx->bufcnt += padlen + 16;
+ break;
+ }
+@@ -125,7 +126,7 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+ AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain);
+
+ if (rctx->bufcnt)
+- memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt);
++ memcpy(hash_engine->ahash_src_addr, hash_engine->buffer_addr, rctx->bufcnt);
+
+ if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) {
+ scatterwalk_map_and_copy(hash_engine->ahash_src_addr +
+@@ -138,21 +139,13 @@ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev)
+ return -EINVAL;
+ }
+
+- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg,
++ scatterwalk_map_and_copy(hash_engine->buffer_addr, rctx->src_sg,
+ rctx->offset, remain, 0);
+
+ rctx->bufcnt = remain;
+- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+- SHA512_DIGEST_SIZE,
+- DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+- return -ENOMEM;
+- }
+
+ hash_engine->src_length = length - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+- hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+ }
+@@ -187,30 +180,12 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+ }
+
+ src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr;
+- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+- SHA512_DIGEST_SIZE,
+- DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+- rc = -ENOMEM;
+- goto free_src_sg;
+- }
+
+ if (rctx->bufcnt != 0) {
+ u32 phy_addr;
+ u32 len;
+
+- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+- rctx->buffer,
+- rctx->block_size * 2,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+- rc = -ENOMEM;
+- goto free_rctx_digest;
+- }
+-
+- phy_addr = rctx->buffer_dma_addr;
++ phy_addr = hash_engine->buffer_dma_addr;
+ len = rctx->bufcnt;
+ length -= len;
+
+@@ -244,23 +219,15 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
+
+ if (length != 0) {
+ rc = -EINVAL;
+- goto free_rctx_buffer;
++ goto free_src_sg;
+ }
+
+ rctx->offset = rctx->total - remain;
+ hash_engine->src_length = rctx->total + rctx->bufcnt - remain;
+ hash_engine->src_dma = hash_engine->ahash_src_dma_addr;
+- hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return 0;
+
+-free_rctx_buffer:
+- if (rctx->bufcnt != 0)
+- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+- rctx->block_size * 2, DMA_TO_DEVICE);
+-free_rctx_digest:
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+ free_src_sg:
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+@@ -294,13 +261,7 @@ static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev)
+
+ AHASH_DBG(hace_dev, "\n");
+
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+-
+- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+- rctx->block_size * 2, DMA_TO_DEVICE);
+-
+- memcpy(req->result, rctx->digest, rctx->digsize);
++ memcpy(req->result, hash_engine->digest_addr, rctx->digsize);
+
+ return aspeed_ahash_complete(hace_dev);
+ }
+@@ -319,7 +280,7 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+ &hash_engine->src_dma, &hash_engine->digest_dma,
+ hash_engine->src_length);
+
+- rctx->cmd |= HASH_CMD_INT_ENABLE;
++ rctx->cmd |= HASH_CMD_INT_ENABLE | HASH_CMD_MBUS_REQ_SYNC_EN;
+ hash_engine->resume = resume;
+
+ ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC);
+@@ -329,6 +290,14 @@ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev,
+ ASPEED_HACE_HASH_KEY_BUFF);
+ ast_hace_write(hace_dev, hash_engine->src_length,
+ ASPEED_HACE_HASH_DATA_LEN);
++#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
++ ast_hace_write(hace_dev, hash_engine->src_dma >> 32,
++ ASPEED_HACE_HASH_SRC_H);
++ ast_hace_write(hace_dev, hash_engine->digest_dma >> 32,
++ ASPEED_HACE_HASH_DIGEST_BUFF_H);
++ ast_hace_write(hace_dev, hash_engine->digest_dma >> 32,
++ ASPEED_HACE_HASH_KEY_BUFF_H);
++#endif
+
+ /* Memory barrier to ensure all data setup before engine starts */
+ mb();
+@@ -351,55 +320,24 @@ static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev)
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
+- int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+-
+- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+- rctx->block_size * 2, DMA_TO_DEVICE);
+-
+ /* o key pad + hash sum 1 */
+- memcpy(rctx->buffer, bctx->opad, rctx->block_size);
+- memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize);
++ memcpy(hash_engine->buffer_addr, bctx->opad, rctx->block_size);
++ memcpy(hash_engine->buffer_addr + rctx->block_size,
++ hash_engine->digest_addr, rctx->digsize);
+
+ rctx->bufcnt = rctx->block_size + rctx->digsize;
+ rctx->digcnt[0] = rctx->block_size + rctx->digsize;
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+- memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize);
+-
+- rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest,
+- SHA512_DIGEST_SIZE,
+- DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+- rc = -ENOMEM;
+- goto end;
+- }
+-
+- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer,
+- rctx->block_size * 2,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+- rc = -ENOMEM;
+- goto free_rctx_digest;
+- }
++ memcpy(hash_engine->digest_addr, rctx->sha_iv, rctx->ivsize);
+
+- hash_engine->src_dma = rctx->buffer_dma_addr;
++ hash_engine->src_dma = hash_engine->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+- hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+-
+-free_rctx_digest:
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+-end:
+- return rc;
+ }
+
+ static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+@@ -407,47 +345,19 @@ static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev)
+ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+ struct ahash_request *req = hash_engine->req;
+ struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req);
+- int rc = 0;
+
+ AHASH_DBG(hace_dev, "\n");
+
+ aspeed_ahash_fill_padding(hace_dev, rctx);
+
+- rctx->digest_dma_addr = dma_map_single(hace_dev->dev,
+- rctx->digest,
+- SHA512_DIGEST_SIZE,
+- DMA_BIDIRECTIONAL);
+- if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx digest error\n");
+- rc = -ENOMEM;
+- goto end;
+- }
+-
+- rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
+- rctx->buffer,
+- rctx->block_size * 2,
+- DMA_TO_DEVICE);
+- if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) {
+- dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n");
+- rc = -ENOMEM;
+- goto free_rctx_digest;
+- }
+-
+- hash_engine->src_dma = rctx->buffer_dma_addr;
++ hash_engine->src_dma = hash_engine->buffer_dma_addr;
+ hash_engine->src_length = rctx->bufcnt;
+- hash_engine->digest_dma = rctx->digest_dma_addr;
+
+ if (rctx->flags & SHA_FLAGS_HMAC)
+ return aspeed_hace_ahash_trigger(hace_dev,
+ aspeed_ahash_hmac_resume);
+
+ return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer);
+-
+-free_rctx_digest:
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+-end:
+- return rc;
+ }
+
+ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+@@ -461,15 +371,7 @@ static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev)
+ dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents,
+ DMA_TO_DEVICE);
+
+- if (rctx->bufcnt != 0)
+- dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr,
+- rctx->block_size * 2,
+- DMA_TO_DEVICE);
+-
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+-
+- scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset,
++ scatterwalk_map_and_copy(hash_engine->buffer_addr, rctx->src_sg, rctx->offset,
+ rctx->total - rctx->offset, 0);
+
+ rctx->bufcnt = rctx->total - rctx->offset;
+@@ -489,9 +391,6 @@ static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev)
+
+ AHASH_DBG(hace_dev, "\n");
+
+- dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr,
+- SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL);
+-
+ if (rctx->flags & SHA_FLAGS_FINUP)
+ return aspeed_ahash_req_final(hace_dev);
+
+@@ -508,12 +407,12 @@ static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev)
+
+ AHASH_DBG(hace_dev, "\n");
+
+- if (hace_dev->version == AST2600_VERSION) {
+- rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
+- resume = aspeed_ahash_update_resume_sg;
++ if (hace_dev->version == AST2500_VERSION) {
++ resume = aspeed_ahash_update_resume;
+
+ } else {
+- resume = aspeed_ahash_update_resume;
++ rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL;
++ resume = aspeed_ahash_update_resume_sg;
+ }
+
+ ret = hash_engine->dma_prepare(hace_dev);
+@@ -566,10 +465,10 @@ static void aspeed_ahash_prepare_request(struct crypto_engine *engine,
+ hash_engine = &hace_dev->hash_engine;
+ hash_engine->req = req;
+
+- if (hace_dev->version == AST2600_VERSION)
+- hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+- else
++ if (hace_dev->version == AST2500_VERSION)
+ hash_engine->dma_prepare = aspeed_ahash_dma_prepare;
++ else
++ hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg;
+ }
+
+ static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq)
+@@ -584,6 +483,7 @@ static int aspeed_sham_update(struct ahash_request *req)
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes);
+
+@@ -598,7 +498,7 @@ static int aspeed_sham_update(struct ahash_request *req)
+ rctx->digcnt[1]++;
+
+ if (rctx->bufcnt + rctx->total < rctx->block_size) {
+- scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt,
++ scatterwalk_map_and_copy(hash_engine->buffer_addr + rctx->bufcnt,
+ rctx->src_sg, rctx->offset,
+ rctx->total, 0);
+ rctx->bufcnt += rctx->total;
+@@ -665,6 +565,7 @@ static int aspeed_sham_init(struct ahash_request *req)
+ struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm);
+ struct aspeed_hace_dev *hace_dev = tctx->hace_dev;
+ struct aspeed_sha_hmac_ctx *bctx = tctx->base;
++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
+
+ AHASH_DBG(hace_dev, "%s: digest size:%d\n",
+ crypto_tfm_alg_name(&tfm->base),
+@@ -681,7 +582,7 @@ static int aspeed_sham_init(struct ahash_request *req)
+ rctx->block_size = SHA1_BLOCK_SIZE;
+ rctx->sha_iv = sha1_iv;
+ rctx->ivsize = 32;
+- memcpy(rctx->digest, sha1_iv, rctx->ivsize);
++ memcpy(hash_engine->digest_addr, sha1_iv, rctx->ivsize);
+ break;
+ case SHA224_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP;
+@@ -690,7 +591,7 @@ static int aspeed_sham_init(struct ahash_request *req)
+ rctx->block_size = SHA224_BLOCK_SIZE;
+ rctx->sha_iv = sha224_iv;
+ rctx->ivsize = 32;
+- memcpy(rctx->digest, sha224_iv, rctx->ivsize);
++ memcpy(hash_engine->digest_addr, sha224_iv, rctx->ivsize);
+ break;
+ case SHA256_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP;
+@@ -699,7 +600,7 @@ static int aspeed_sham_init(struct ahash_request *req)
+ rctx->block_size = SHA256_BLOCK_SIZE;
+ rctx->sha_iv = sha256_iv;
+ rctx->ivsize = 32;
+- memcpy(rctx->digest, sha256_iv, rctx->ivsize);
++ memcpy(hash_engine->digest_addr, sha256_iv, rctx->ivsize);
+ break;
+ case SHA384_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 |
+@@ -709,7 +610,7 @@ static int aspeed_sham_init(struct ahash_request *req)
+ rctx->block_size = SHA384_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha384_iv;
+ rctx->ivsize = 64;
+- memcpy(rctx->digest, sha384_iv, rctx->ivsize);
++ memcpy(hash_engine->digest_addr, sha384_iv, rctx->ivsize);
+ break;
+ case SHA512_DIGEST_SIZE:
+ rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 |
+@@ -719,7 +620,7 @@ static int aspeed_sham_init(struct ahash_request *req)
+ rctx->block_size = SHA512_BLOCK_SIZE;
+ rctx->sha_iv = (const __be32 *)sha512_iv;
+ rctx->ivsize = 64;
+- memcpy(rctx->digest, sha512_iv, rctx->ivsize);
++ memcpy(hash_engine->digest_addr, sha512_iv, rctx->ivsize);
+ break;
+ default:
+ dev_warn(tctx->hace_dev->dev, "digest size %d not support\n",
+@@ -731,12 +632,13 @@ static int aspeed_sham_init(struct ahash_request *req)
+ rctx->total = 0;
+ rctx->digcnt[0] = 0;
+ rctx->digcnt[1] = 0;
++ hash_engine->digest_dma = hash_engine->digest_dma_addr;
+
+ /* HMAC init */
+ if (tctx->flags & SHA_FLAGS_HMAC) {
+ rctx->digcnt[0] = rctx->block_size;
+ rctx->bufcnt = rctx->block_size;
+- memcpy(rctx->buffer, bctx->ipad, rctx->block_size);
++ memcpy(hash_engine->buffer_addr, bctx->ipad, rctx->block_size);
+ rctx->flags |= SHA_FLAGS_HMAC;
+ }
+
+@@ -1196,7 +1098,7 @@ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++)
+ crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash);
+
+- if (hace_dev->version != AST2600_VERSION)
++ if (hace_dev->version == AST2500_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++)
+@@ -1218,7 +1120,7 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+ }
+ }
+
+- if (hace_dev->version != AST2600_VERSION)
++ if (hace_dev->version == AST2500_VERSION)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) {
+@@ -1230,3 +1132,71 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev)
+ }
+ }
+ }
++
++static void aspeed_hace_hash_done_task(unsigned long data)
++{
++ struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
++ struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
++
++ hash_engine->resume(hace_dev);
++}
++
++int aspeed_hace_hash_init(struct aspeed_hace_dev *hace_dev)
++{
++ struct aspeed_engine_hash *hash_engine;
++ int rc;
++
++ hash_engine = &hace_dev->hash_engine;
++
++ /* Initialize crypto hardware engine structure for hash */
++ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
++ true);
++ if (!hace_dev->crypt_engine_hash) {
++ rc = -ENOMEM;
++ goto end;
++ }
++
++ rc = crypto_engine_start(hace_dev->crypt_engine_hash);
++ if (rc)
++ goto err_engine_hash_start;
++
++ tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
++ (unsigned long)hace_dev);
++
++ /* Allocate DMA buffer for hash engine input used */
++ hash_engine->ahash_src_addr =
++ dmam_alloc_coherent(hace_dev->dev,
++ ASPEED_HASH_SRC_DMA_BUF_LEN,
++ &hash_engine->ahash_src_dma_addr,
++ GFP_KERNEL);
++ if (!hash_engine->ahash_src_addr) {
++ dev_err(hace_dev->dev, "Failed to allocate dma buffer\n");
++ rc = -ENOMEM;
++ goto err_engine_hash_start;
++ }
++
++ hash_engine->buffer_addr = dmam_alloc_coherent(hace_dev->dev, SHA512_BLOCK_SIZE * 2,
++ &hash_engine->buffer_dma_addr,
++ GFP_KERNEL);
++ if (!hash_engine->buffer_addr) {
++ dev_err(hace_dev->dev, "Failed to allocate DMA buffer\n");
++ rc = -ENOMEM;
++ goto err_engine_hash_start;
++ }
++
++ hash_engine->digest_addr = dmam_alloc_coherent(hace_dev->dev, SHA512_DIGEST_SIZE,
++ &hash_engine->digest_dma_addr,
++ GFP_KERNEL);
++ if (!hash_engine->digest_addr) {
++ dev_err(hace_dev->dev, "Failed to allocate DMA digest buffer\n");
++ rc = -ENOMEM;
++ goto err_engine_hash_start;
++ }
++
++ return 0;
++
++err_engine_hash_start:
++ crypto_engine_exit(hace_dev->crypt_engine_hash);
++end:
++ return rc;
++}
+diff --git a/drivers/crypto/aspeed/aspeed-hace.c b/drivers/crypto/aspeed/aspeed-hace.c
+index 8f7aab82e..8ce38027c 100644
+--- a/drivers/crypto/aspeed/aspeed-hace.c
++++ b/drivers/crypto/aspeed/aspeed-hace.c
+@@ -6,6 +6,7 @@
+ #include "aspeed-hace.h"
+ #include <crypto/engine.h>
+ #include <linux/clk.h>
++#include <linux/reset.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/err.h>
+ #include <linux/interrupt.h>
+@@ -26,6 +27,21 @@
+ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
+ #endif
+
++static unsigned char *dummy_key1;
++static unsigned char *dummy_key2;
++
++int find_dummy_key(const char *key, int keylen)
++{
++ int ret = 0;
++
++ if (dummy_key1 && memcmp(key, dummy_key1, keylen) == 0)
++ ret = 1;
++ else if (dummy_key2 && memcmp(key, dummy_key2, keylen) == 0)
++ ret = 2;
++
++ return ret;
++}
++
+ /* HACE interrupt service routine */
+ static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+ {
+@@ -53,23 +69,9 @@ static irqreturn_t aspeed_hace_irq(int irq, void *dev)
+ dev_warn(hace_dev->dev, "CRYPTO no active requests.\n");
+ }
+
+- return IRQ_HANDLED;
+-}
+-
+-static void aspeed_hace_crypto_done_task(unsigned long data)
+-{
+- struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+- struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine;
+-
+- crypto_engine->resume(hace_dev);
+-}
+-
+-static void aspeed_hace_hash_done_task(unsigned long data)
+-{
+- struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data;
+- struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine;
++ HACE_DBG(hace_dev, "handled\n");
+
+- hash_engine->resume(hace_dev);
++ return IRQ_HANDLED;
+ }
+
+ static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev)
+@@ -95,16 +97,18 @@ static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev)
+ static const struct of_device_id aspeed_hace_of_matches[] = {
+ { .compatible = "aspeed,ast2500-hace", .data = (void *)5, },
+ { .compatible = "aspeed,ast2600-hace", .data = (void *)6, },
++ { .compatible = "aspeed,ast2700-hace", .data = (void *)7, },
+ {},
+ };
+
+ static int aspeed_hace_probe(struct platform_device *pdev)
+ {
+- struct aspeed_engine_crypto *crypto_engine;
+ const struct of_device_id *hace_dev_id;
+- struct aspeed_engine_hash *hash_engine;
+ struct aspeed_hace_dev *hace_dev;
++ struct device_node *sec_node;
++ struct device *dev = &pdev->dev;
+ int rc;
++ int err;
+
+ hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev),
+ GFP_KERNEL);
+@@ -119,8 +123,6 @@ static int aspeed_hace_probe(struct platform_device *pdev)
+
+ hace_dev->dev = &pdev->dev;
+ hace_dev->version = (unsigned long)hace_dev_id->data;
+- hash_engine = &hace_dev->hash_engine;
+- crypto_engine = &hace_dev->crypto_engine;
+
+ platform_set_drvdata(pdev, hace_dev);
+
+@@ -153,100 +155,74 @@ static int aspeed_hace_probe(struct platform_device *pdev)
+ return rc;
+ }
+
+- /* Initialize crypto hardware engine structure for hash */
+- hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev,
+- true);
+- if (!hace_dev->crypt_engine_hash) {
+- rc = -ENOMEM;
+- goto clk_exit;
++ hace_dev->rst = devm_reset_control_get_shared(dev, NULL);
++ if (IS_ERR(hace_dev->rst)) {
++ dev_err(&pdev->dev, "Failed to get hace reset\n");
++ return PTR_ERR(hace_dev->rst);
+ }
+
+- rc = crypto_engine_start(hace_dev->crypt_engine_hash);
+- if (rc)
+- goto err_engine_hash_start;
+-
+- tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task,
+- (unsigned long)hace_dev);
+-
+- /* Initialize crypto hardware engine structure for crypto */
+- hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev,
+- true);
+- if (!hace_dev->crypt_engine_crypto) {
+- rc = -ENOMEM;
+- goto err_engine_hash_start;
++ rc = reset_control_deassert(hace_dev->rst);
++ if (rc) {
++ dev_err(&pdev->dev, "Deassert hace reset failed\n");
++ return rc;
+ }
+
+- rc = crypto_engine_start(hace_dev->crypt_engine_crypto);
+- if (rc)
+- goto err_engine_crypto_start;
+-
+- tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task,
+- (unsigned long)hace_dev);
+-
+- /* Allocate DMA buffer for hash engine input used */
+- hash_engine->ahash_src_addr =
+- dmam_alloc_coherent(&pdev->dev,
+- ASPEED_HASH_SRC_DMA_BUF_LEN,
+- &hash_engine->ahash_src_dma_addr,
+- GFP_KERNEL);
+- if (!hash_engine->ahash_src_addr) {
+- dev_err(&pdev->dev, "Failed to allocate dma buffer\n");
+- rc = -ENOMEM;
+- goto err_engine_crypto_start;
++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_warn(&pdev->dev, "No suitable DMA available\n");
++ return rc;
+ }
+
+- /* Allocate DMA buffer for crypto engine context used */
+- crypto_engine->cipher_ctx =
+- dmam_alloc_coherent(&pdev->dev,
+- PAGE_SIZE,
+- &crypto_engine->cipher_ctx_dma,
+- GFP_KERNEL);
+- if (!crypto_engine->cipher_ctx) {
+- dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n");
+- rc = -ENOMEM;
+- goto err_engine_crypto_start;
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
++ rc = aspeed_hace_hash_init(hace_dev);
++ if (rc) {
++ dev_err(&pdev->dev, "Hash init failed\n");
++ return rc;
++ }
++#endif
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
++ rc = aspeed_hace_crypto_init(hace_dev);
++ if (rc) {
++ dev_err(&pdev->dev, "Crypto init failed\n");
++ return rc;
+ }
+
+- /* Allocate DMA buffer for crypto engine input used */
+- crypto_engine->cipher_addr =
+- dmam_alloc_coherent(&pdev->dev,
+- ASPEED_CRYPTO_SRC_DMA_BUF_LEN,
+- &crypto_engine->cipher_dma_addr,
+- GFP_KERNEL);
+- if (!crypto_engine->cipher_addr) {
+- dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n");
+- rc = -ENOMEM;
+- goto err_engine_crypto_start;
++ if (of_find_property(dev->of_node, "dummy-key1", NULL)) {
++ dummy_key1 = kzalloc(DUMMY_KEY_SIZE, GFP_KERNEL);
++ if (dummy_key1) {
++ err = of_property_read_u8_array(dev->of_node, "dummy-key1", dummy_key1, DUMMY_KEY_SIZE);
++ if (err)
++ dev_err(dev, "error of reading dummy_key 1\n");
++ } else {
++ dev_err(dev, "error dummy_key1 allocation\n");
++ }
+ }
+
+- /* Allocate DMA buffer for crypto engine output used */
+- if (hace_dev->version == AST2600_VERSION) {
+- crypto_engine->dst_sg_addr =
+- dmam_alloc_coherent(&pdev->dev,
+- ASPEED_CRYPTO_DST_DMA_BUF_LEN,
+- &crypto_engine->dst_sg_dma_addr,
+- GFP_KERNEL);
+- if (!crypto_engine->dst_sg_addr) {
+- dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n");
+- rc = -ENOMEM;
+- goto err_engine_crypto_start;
++ if (of_find_property(dev->of_node, "dummy-key2", NULL)) {
++ dummy_key2 = kzalloc(DUMMY_KEY_SIZE, GFP_KERNEL);
++ if (dummy_key2) {
++ err = of_property_read_u8_array(dev->of_node, "dummy-key2", dummy_key2, DUMMY_KEY_SIZE);
++ if (err)
++ dev_err(dev, "error of reading dummy_key 2\n");
++ } else {
++ dev_err(dev, "error dummy_key2 allocation\n");
+ }
+ }
+
++ sec_node = of_find_compatible_node(NULL, NULL, "aspeed,ast2600-sbc");
++ if (!sec_node) {
++ dev_err(dev, "cannot find sbc node\n");
++ } else {
++ hace_dev->sec_regs = of_iomap(sec_node, 0);
++ if (!hace_dev->sec_regs)
++ dev_err(dev, "failed to map SBC registers\n");
++ }
++#endif
+ aspeed_hace_register(hace_dev);
+
+ dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n");
+
+ return 0;
+-
+-err_engine_crypto_start:
+- crypto_engine_exit(hace_dev->crypt_engine_crypto);
+-err_engine_hash_start:
+- crypto_engine_exit(hace_dev->crypt_engine_hash);
+-clk_exit:
+- clk_disable_unprepare(hace_dev->clk);
+-
+- return rc;
+ }
+
+ static int aspeed_hace_remove(struct platform_device *pdev)
+@@ -257,12 +233,14 @@ static int aspeed_hace_remove(struct platform_device *pdev)
+
+ aspeed_hace_unregister(hace_dev);
+
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH
+ crypto_engine_exit(hace_dev->crypt_engine_hash);
+- crypto_engine_exit(hace_dev->crypt_engine_crypto);
+-
+ tasklet_kill(&hash_engine->done_task);
++#endif
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO
++ crypto_engine_exit(hace_dev->crypt_engine_crypto);
+ tasklet_kill(&crypto_engine->done_task);
+-
++#endif
+ clk_disable_unprepare(hace_dev->clk);
+
+ return 0;
+diff --git a/drivers/crypto/aspeed/aspeed-hace.h b/drivers/crypto/aspeed/aspeed-hace.h
+index 68f70e01f..6bde454b0 100644
+--- a/drivers/crypto/aspeed/aspeed-hace.h
++++ b/drivers/crypto/aspeed/aspeed-hace.h
+@@ -10,6 +10,7 @@
+ #include <linux/compiler_attributes.h>
+ #include <linux/interrupt.h>
+ #include <linux/types.h>
++#include <linux/soc/aspeed/aspeed-otp.h>
+
+ /*****************************
+ * *
+@@ -23,7 +24,7 @@
+ #define ASPEED_HACE_CMD 0x10 /* Crypto Engine Command Register */
+
+ /* G5 */
+-#define ASPEED_HACE_TAG 0x18 /* HACE Tag Register */
++#define ASPEED_HACE_TAG 0x18 /* HACE Tag Write Buffer Base Address Register */
+ /* G6 */
+ #define ASPEED_HACE_GCM_ADD_LEN 0x14 /* Crypto AES-GCM Additional Data Length Register */
+ #define ASPEED_HACE_GCM_TAG_BASE_ADDR 0x18 /* Crypto AES-GCM Tag Write Buff Base Address Reg */
+@@ -36,6 +37,15 @@
+ #define ASPEED_HACE_HASH_DATA_LEN 0x2C /* Hash Data Length Register */
+ #define ASPEED_HACE_HASH_CMD 0x30 /* Hash Engine Command Register */
+
++/* G7 */
++#define ASPEED_HACE_SRC_H 0x80 /* Crypto Data Source Base High Address Register */
++#define ASPEED_HACE_DEST_H 0x84 /* Crypto Data Destination Base High Address Register */
++#define ASPEED_HACE_CONTEXT_H 0x88 /* Crypto Context Buffer Base High Address Register */
++#define ASPEED_HACE_TAG_H 0x8C /* HACE Tag Write Buffer Base High Address Register */
++#define ASPEED_HACE_HASH_SRC_H 0x90 /* Hash Data Source Base High Address Register */
++#define ASPEED_HACE_HASH_DIGEST_BUFF_H 0x94 /* Hash Digest Write Buffer Base High Address Register */
++#define ASPEED_HACE_HASH_KEY_BUFF_H 0x98 /* Hash HMAC Key Buffer Base High Address Register */
++
+ /* crypto cmd */
+ #define HACE_CMD_SINGLE_DES 0
+ #define HACE_CMD_TRIPLE_DES BIT(17)
+@@ -132,6 +142,8 @@
+ #define HACE_CMD_IV_REQUIRE (HACE_CMD_CBC | HACE_CMD_CFB | \
+ HACE_CMD_OFB | HACE_CMD_CTR)
+
++#define DUMMY_KEY_SIZE 32
++
+ struct aspeed_hace_dev;
+ struct scatterlist;
+
+@@ -151,6 +163,14 @@ struct aspeed_engine_hash {
+ void *ahash_src_addr;
+ dma_addr_t ahash_src_dma_addr;
+
++ /* remain data buffer */
++ u8 *buffer_addr;
++ dma_addr_t buffer_dma_addr;
++
++ /* output buffer */
++ void *digest_addr;
++ dma_addr_t digest_dma_addr;
++
+ dma_addr_t src_dma;
+ dma_addr_t digest_dma;
+
+@@ -190,14 +210,7 @@ struct aspeed_sham_reqctx {
+ size_t ivsize;
+ const __be32 *sha_iv;
+
+- /* remain data buffer */
+- u8 buffer[SHA512_BLOCK_SIZE * 2];
+- dma_addr_t buffer_dma_addr;
+ size_t bufcnt; /* buffer counter */
+-
+- /* output buffer */
+- u8 digest[SHA512_DIGEST_SIZE] __aligned(64);
+- dma_addr_t digest_dma_addr;
+ u64 digcnt[2];
+ };
+
+@@ -220,12 +233,14 @@ struct aspeed_engine_crypto {
+
+ /* callback func */
+ aspeed_hace_fn_t resume;
++ int load_vault_key;
+ };
+
+ struct aspeed_cipher_ctx {
+ struct aspeed_hace_dev *hace_dev;
+ int key_len;
+ u8 key[AES_MAX_KEYLENGTH];
++ int dummy_key;
+
+ /* callback func */
+ aspeed_hace_fn_t start;
+@@ -243,9 +258,11 @@ struct aspeed_cipher_reqctx {
+
+ struct aspeed_hace_dev {
+ void __iomem *regs;
++ void __iomem *sec_regs;
+ struct device *dev;
+ int irq;
+ struct clk *clk;
++ struct reset_control *rst;
+ unsigned long version;
+
+ struct crypto_engine *crypt_engine_hash;
+@@ -268,7 +285,8 @@ struct aspeed_hace_alg {
+
+ enum aspeed_version {
+ AST2500_VERSION = 5,
+- AST2600_VERSION
++ AST2600_VERSION,
++ AST2700_VERSION,
+ };
+
+ #define ast_hace_write(hace, val, offset) \
+@@ -280,5 +298,8 @@ void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+ void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev);
+ void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
+ void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev);
++int aspeed_hace_hash_init(struct aspeed_hace_dev *hace_dev);
++int aspeed_hace_crypto_init(struct aspeed_hace_dev *hace_dev);
++int find_dummy_key(const char *key, int keylen);
+
+ #endif
+diff --git a/drivers/crypto/aspeed/aspeed-rsss-hash.c b/drivers/crypto/aspeed/aspeed-rsss-hash.c
+new file mode 100644
+index 000000000..a0800108f
+--- /dev/null
++++ b/drivers/crypto/aspeed/aspeed-rsss-hash.c
+@@ -0,0 +1,877 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++
++#include <linux/device.h>
++#include <linux/dma-mapping.h>
++#include <linux/dmapool.h>
++#include "aspeed-rsss.h"
++
++//#define RSSS_SHA3_POLLING_MODE
++
++static int aspeed_sha3_self_test(struct aspeed_rsss_dev *rsss_dev)
++{
++ u32 pattern = 0xbeef;
++ u32 val;
++
++ ast_rsss_write(rsss_dev, pattern, ASPEED_SHA3_SRC_LO);
++ val = ast_rsss_read(rsss_dev, ASPEED_SHA3_SRC_LO);
++ if (val != pattern)
++ return -EIO;
++
++ ast_rsss_write(rsss_dev, 0x0, ASPEED_SHA3_SRC_LO);
++ val = ast_rsss_read(rsss_dev, ASPEED_SHA3_SRC_LO);
++ if (val)
++ return -EIO;
++
++ return 0;
++}
++
++static int aspeed_sha3_dma_prepare(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++ int length, remain;
++
++ rctx = ahash_request_ctx(req);
++ remain = (rctx->total + rctx->bufcnt) % rctx->blksize;
++ length = rctx->total + rctx->bufcnt - remain;
++
++ RSSS_DBG(rsss_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x, %s:0x%x\n",
++ "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
++ "offset", rctx->offset, "length", length,
++ "remain", remain);
++
++ if (rctx->bufcnt)
++ memcpy(sha3_engine->ahash_src_addr, sha3_engine->buffer_addr,
++ rctx->bufcnt);
++
++ if (length < ASPEED_HASH_SRC_DMA_BUF_LEN) {
++ scatterwalk_map_and_copy(sha3_engine->ahash_src_addr + rctx->bufcnt,
++ rctx->src_sg, rctx->offset,
++ rctx->total - remain, 0);
++ rctx->offset += rctx->total - remain;
++
++ } else {
++ dev_warn(rsss_dev->dev, "SHA3 input data length is too large\n");
++ return -EINVAL;
++ }
++
++ /* Copy remain data into buffer */
++ scatterwalk_map_and_copy(sha3_engine->buffer_addr, rctx->src_sg,
++ rctx->offset, remain, 0);
++ rctx->bufcnt = remain;
++
++ sha3_engine->src_length = length;
++ sha3_engine->src_dma = sha3_engine->ahash_src_dma_addr;
++
++ return 0;
++}
++
++/*
++ * Prepare DMA buffer as SG list buffer before
++ * hardware engine processing.
++ */
++static int aspeed_sha3_dma_prepare_sg(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++ struct aspeed_sg_list *src_list;
++ struct scatterlist *s;
++ int length, remain, sg_len;
++ int i, rc = 0;
++
++ rctx = ahash_request_ctx(req);
++ remain = (rctx->total + rctx->bufcnt) % rctx->blksize;
++ length = rctx->total + rctx->bufcnt - remain;
++
++ RSSS_DBG(rsss_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n",
++ "rctx total", rctx->total, "bufcnt", rctx->bufcnt,
++ "length", length, "remain", remain);
++
++ sg_len = dma_map_sg(rsss_dev->dev, rctx->src_sg, rctx->src_nents,
++ DMA_TO_DEVICE);
++ /*
++ * Need dma_sync_sg_for_device()?
++ */
++ if (!sg_len) {
++ dev_warn(rsss_dev->dev, "dma_map_sg() src error\n");
++ rc = -ENOMEM;
++ goto end;
++ }
++
++ src_list = (struct aspeed_sg_list *)sha3_engine->ahash_src_addr;
++
++ if (rctx->bufcnt != 0) {
++ u64 phy_addr;
++ u32 len;
++
++ phy_addr = sha3_engine->buffer_dma_addr;
++ len = rctx->bufcnt;
++ length -= len;
++
++ /* Last sg list */
++ if (length == 0)
++ len |= SG_LAST_LIST;
++
++ src_list[0].phy_addr = cpu_to_le64(phy_addr);
++ src_list[0].len = cpu_to_le32(len);
++
++ RSSS_DBG(rsss_dev, "Remain buffer first, addr:%llx, len:0x%x\n",
++ src_list[0].phy_addr, src_list[0].len);
++
++ src_list++;
++ }
++
++ if (length != 0) {
++ for_each_sg(rctx->src_sg, s, sg_len, i) {
++ u64 phy_addr = sg_dma_address(s);
++ u32 len = sg_dma_len(s);
++ u8 *va = sg_virt(s);
++
++ RSSS_DBG(rsss_dev, "SG[%d] PA:%llx, VA:%llx, len:0x%x\n",
++ i, sg_dma_address(s), (u64)va, len);
++
++ if (length > len) {
++ length -= len;
++ } else {
++ /* Last sg list */
++ len = length;
++ len |= SG_LAST_LIST;
++ length = 0;
++ }
++
++ src_list[i].phy_addr = cpu_to_le64(phy_addr);
++ src_list[i].len = cpu_to_le32(len);
++
++ len = len & 0xffff;
++ }
++ }
++
++ if (length != 0) {
++ rc = -EINVAL;
++ goto free_src_sg;
++ }
++
++ rctx->offset = rctx->total - remain;
++ sha3_engine->src_length = rctx->total + rctx->bufcnt - remain;
++ sha3_engine->src_dma = sha3_engine->ahash_src_dma_addr;
++
++ return 0;
++
++free_src_sg:
++ RSSS_DBG(rsss_dev, "dma_unmap_sg()\n");
++ dma_unmap_sg(rsss_dev->dev, rctx->src_sg, rctx->src_nents,
++ DMA_TO_DEVICE);
++end:
++ return rc;
++}
++
++static int aspeed_sha3_complete(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ sha3_engine->flags &= ~CRYPTO_FLAGS_BUSY;
++
++ crypto_finalize_hash_request(rsss_dev->crypt_engine_sha3, req, 0);
++
++ return 0;
++}
++
++/*
++ * Copy digest to the corresponding request result.
++ * This function will be called at final() stage.
++ */
++static int aspeed_sha3_transfer(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ rctx = ahash_request_ctx(req);
++
++ /* add usleep for DMA done */
++ udelay(100);
++ memcpy(req->result, sha3_engine->digest_addr, rctx->digsize);
++
++ return aspeed_sha3_complete(rsss_dev);
++}
++
++#ifdef RSSS_SHA3_POLLING_MODE
++static int aspeed_sha3_wait_complete(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ u32 sts;
++ int ret;
++
++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_SHA3_BUSY_STS, sts,
++ ((sts & SHA3_STS) == 0x0),
++ ASPEED_RSSS_POLLING_TIME,
++ ASPEED_RSSS_TIMEOUT * 10);
++ if (ret) {
++ dev_err(rsss_dev->dev, "SHA3 wrong engine status\n");
++ return -EIO;
++ }
++
++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_RSSS_INT_STS, sts,
++ ((sts & SHA3_INT_DONE) == SHA3_INT_DONE),
++ ASPEED_RSSS_POLLING_TIME,
++ ASPEED_RSSS_TIMEOUT);
++ if (ret) {
++ dev_err(rsss_dev->dev, "SHA3 wrong interrupt status\n");
++ return -EIO;
++ }
++
++ ast_rsss_write(rsss_dev, sts, ASPEED_RSSS_INT_STS);
++
++ RSSS_DBG(rsss_dev, "irq sts:0x%x\n", sts);
++
++ if (sts & SHA3_INT_DONE) {
++ if (sha3_engine->flags & CRYPTO_FLAGS_BUSY)
++ tasklet_schedule(&sha3_engine->done_task);
++ else
++ dev_err(rsss_dev->dev, "SHA3 no active requests.\n");
++ }
++
++ return 0;
++}
++#endif
++
++/*
++ * Trigger hardware engines to do the math.
++ */
++static int aspeed_sha3_trigger(struct aspeed_rsss_dev *rsss_dev,
++ aspeed_rsss_fn_t resume)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++
++ RSSS_DBG(rsss_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n",
++ &sha3_engine->src_dma, &sha3_engine->digest_dma_addr,
++ sha3_engine->src_length);
++
++ rctx = ahash_request_ctx(req);
++ sha3_engine->resume = resume;
++
++ ast_rsss_write(rsss_dev, sha3_engine->src_dma,
++ ASPEED_SHA3_SRC_LO);
++ /* TODO - SRC_HI */
++ ast_rsss_write(rsss_dev, sha3_engine->src_dma >> 32,
++ ASPEED_SHA3_SRC_HI);
++
++ ast_rsss_write(rsss_dev, sha3_engine->digest_dma_addr,
++ ASPEED_SHA3_DST_LO);
++ /* TODO - DST_HI */
++ ast_rsss_write(rsss_dev, sha3_engine->digest_dma_addr >> 32,
++ ASPEED_SHA3_DST_HI);
++
++ if (!sha3_engine->sg_mode)
++ ast_rsss_write(rsss_dev, sha3_engine->src_length,
++ ASPEED_SHA3_SRC_LEN);
++
++ ast_rsss_write(rsss_dev, rctx->cmd, ASPEED_SHA3_CMD);
++
++ /* Memory barrier to ensure all data setup before engine starts */
++ mb();
++
++ rctx->cmd |= SHA3_CMD_TRIG;
++
++ RSSS_DBG(rsss_dev, "cmd:0x%x\n", rctx->cmd);
++
++ ast_rsss_write(rsss_dev, rctx->cmd, ASPEED_SHA3_CMD);
++
++#ifdef RSSS_SHA3_POLLING_MODE
++ return aspeed_sha3_wait_complete(rsss_dev);
++#else
++ return -EINPROGRESS;
++#endif
++}
++
++static int aspeed_sha3_req_final(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++ int remain_pad;
++ u8 *src;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ /* A0 padding issue */
++ remain_pad = rctx->blksize - rctx->bufcnt;
++ if (remain_pad < 16) {
++ /* SW padding */
++ RSSS_DBG(rsss_dev, "Use SW padding, pad size:0x%x\n",
++ remain_pad);
++ src = (u8 *)sha3_engine->buffer_addr;
++ src[rctx->bufcnt] = 0x06;
++ memset(src + rctx->bufcnt + 1, 0, remain_pad - 1);
++ src[rctx->bufcnt + remain_pad - 1] |= 0x80;
++
++ rctx->bufcnt += remain_pad;
++
++ } else {
++ rctx->cmd |= SHA3_CMD_HW_PAD;
++ }
++
++ if (sha3_engine->sg_mode) {
++ struct aspeed_sg_list *src_list =
++ (struct aspeed_sg_list *)sha3_engine->ahash_src_addr;
++ u64 phy_addr;
++ u32 len;
++
++ phy_addr = sha3_engine->buffer_dma_addr;
++ len = rctx->bufcnt;
++ len |= SG_LAST_LIST;
++
++ src_list[0].phy_addr = cpu_to_le64(phy_addr);
++ src_list[0].len = cpu_to_le32(len);
++
++ RSSS_DBG(rsss_dev, "Final SG, addr:%llx, len:0x%x\n",
++ src_list[0].phy_addr, src_list[0].len);
++
++ rctx->cmd |= SHA3_CMD_SG_MODE;
++ sha3_engine->src_dma = sha3_engine->ahash_src_dma_addr;
++
++ } else {
++ sha3_engine->src_dma = sha3_engine->buffer_dma_addr;
++ sha3_engine->src_length = rctx->bufcnt;
++ }
++
++ rctx->cmd |= SHA3_CMD_ACC_FINAL;
++
++
++ return aspeed_sha3_trigger(rsss_dev, aspeed_sha3_transfer);
++}
++
++static int aspeed_sha3_update_resume(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ rctx = ahash_request_ctx(req);
++
++ rctx->cmd &= ~SHA3_CMD_TRIG;
++
++ if (rctx->flags & SHA3_FLAGS_FINUP)
++ return aspeed_sha3_req_final(rsss_dev);
++
++ return aspeed_sha3_complete(rsss_dev);
++}
++
++static int aspeed_sha3_update_resume_sg(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++ int remain;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ rctx = ahash_request_ctx(req);
++ remain = rctx->total - rctx->offset;
++
++ RSSS_DBG(rsss_dev, "Copy remain data from 0x%x, size:0x%x\n",
++ rctx->offset, remain);
++
++ dma_unmap_sg(rsss_dev->dev, rctx->src_sg, rctx->src_nents,
++ DMA_TO_DEVICE);
++
++ scatterwalk_map_and_copy(sha3_engine->buffer_addr, rctx->src_sg, rctx->offset,
++ remain, 0);
++
++ rctx->bufcnt = remain;
++ rctx->cmd &= ~(SHA3_CMD_TRIG | SHA3_CMD_SG_MODE);
++
++ if (rctx->flags & SHA3_FLAGS_FINUP)
++ return aspeed_sha3_req_final(rsss_dev);
++
++ return aspeed_sha3_complete(rsss_dev);
++}
++
++static int aspeed_sha3_req_update(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct ahash_request *req = sha3_engine->req;
++ struct aspeed_sha3_reqctx *rctx;
++ aspeed_rsss_fn_t resume;
++ int ret;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ rctx = ahash_request_ctx(req);
++
++ if (sha3_engine->sg_mode) {
++ rctx->cmd |= SHA3_CMD_SG_MODE;
++ resume = aspeed_sha3_update_resume_sg;
++
++ } else {
++ resume = aspeed_sha3_update_resume;
++ }
++
++ ret = sha3_engine->dma_prepare(rsss_dev);
++ if (ret)
++ return ret;
++
++ return aspeed_sha3_trigger(rsss_dev, resume);
++}
++
++static int aspeed_sha3_do_request(struct crypto_engine *engine, void *areq)
++{
++ struct ahash_request *req = ahash_request_cast(areq);
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev;
++ struct aspeed_engine_sha3 *sha3_engine;
++ int ret = 0;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ sha3_engine = &rsss_dev->sha3_engine;
++ sha3_engine->flags |= CRYPTO_FLAGS_BUSY;
++ sha3_engine->req = req;
++
++ if (sha3_engine->sg_mode)
++ sha3_engine->dma_prepare = aspeed_sha3_dma_prepare_sg;
++ else
++ sha3_engine->dma_prepare = aspeed_sha3_dma_prepare;
++
++ if (rctx->op == SHA_OP_UPDATE)
++ ret = aspeed_sha3_req_update(rsss_dev);
++ else if (rctx->op == SHA_OP_FINAL)
++ ret = aspeed_sha3_req_final(rsss_dev);
++
++ if (ret != -EINPROGRESS)
++ return ret;
++
++ return 0;
++}
++
++static int aspeed_sha3_handle_queue(struct aspeed_rsss_dev *rsss_dev,
++ struct ahash_request *req)
++{
++ return crypto_transfer_hash_request_to_engine(rsss_dev->crypt_engine_sha3, req);
++}
++
++static int aspeed_sha3_update(struct ahash_request *req)
++{
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev;
++ struct aspeed_engine_sha3 *sha3_engine;
++
++ RSSS_DBG(rsss_dev, "req->nbytes: %d\n", req->nbytes);
++
++ sha3_engine = &rsss_dev->sha3_engine;
++
++ rctx->total = req->nbytes;
++ rctx->src_sg = req->src;
++ rctx->offset = 0;
++ rctx->src_nents = sg_nents(req->src);
++ rctx->op = SHA_OP_UPDATE;
++
++ RSSS_DBG(rsss_dev, "total:0x%x, src_nents:0x%x\n", rctx->total, rctx->src_nents);
++
++ rctx->digcnt[0] += rctx->total;
++ if (rctx->digcnt[0] < rctx->total)
++ rctx->digcnt[1]++;
++
++ if (rctx->bufcnt + rctx->total < rctx->blksize) {
++ scatterwalk_map_and_copy(sha3_engine->buffer_addr + rctx->bufcnt,
++ rctx->src_sg, rctx->offset,
++ rctx->total, 0);
++ rctx->bufcnt += rctx->total;
++
++ return 0;
++ }
++
++ return aspeed_sha3_handle_queue(rsss_dev, req);
++}
++
++static int aspeed_sha3_final(struct ahash_request *req)
++{
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev;
++
++ RSSS_DBG(rsss_dev, "req->nbytes:%d, rctx->total:%d\n",
++ req->nbytes, rctx->total);
++ rctx->op = SHA_OP_FINAL;
++
++ return aspeed_sha3_handle_queue(rsss_dev, req);
++}
++
++static int aspeed_sha3_finup(struct ahash_request *req)
++{
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev;
++ int rc1, rc2;
++
++ RSSS_DBG(rsss_dev, "req->nbytes: %d\n", req->nbytes);
++
++ rctx->flags |= SHA3_FLAGS_FINUP;
++
++ rc1 = aspeed_sha3_update(req);
++ if (rc1 == -EINPROGRESS || rc1 == -EBUSY)
++ return rc1;
++
++ /*
++ * final() has to be always called to cleanup resources
++ * even if update() failed, except EINPROGRESS
++ */
++ rc2 = aspeed_sha3_final(req);
++
++ return rc1 ? : rc2;
++}
++
++static int aspeed_sha3_init(struct ahash_request *req)
++{
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
++ struct aspeed_sha3_ctx *tctx = crypto_ahash_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev;
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++
++ RSSS_DBG(rsss_dev, "%s: digest size:%d\n",
++ crypto_tfm_alg_name(&tfm->base),
++ crypto_ahash_digestsize(tfm));
++
++ rctx->cmd = SHA3_CMD_ACC;
++ rctx->flags = 0;
++
++ switch (crypto_ahash_digestsize(tfm)) {
++ case SHA3_224_DIGEST_SIZE:
++ rctx->cmd |= SHA3_CMD_MODE_224;
++ rctx->flags |= SHA3_FLAGS_SHA224;
++ rctx->digsize = SHA3_224_DIGEST_SIZE;
++ rctx->blksize = SHA3_224_BLOCK_SIZE;
++ break;
++ case SHA3_256_DIGEST_SIZE:
++ rctx->cmd |= SHA3_CMD_MODE_256;
++ rctx->flags |= SHA3_FLAGS_SHA256;
++ rctx->digsize = SHA3_256_DIGEST_SIZE;
++ rctx->blksize = SHA3_256_BLOCK_SIZE;
++ break;
++ case SHA3_384_DIGEST_SIZE:
++ rctx->cmd |= SHA3_CMD_MODE_384;
++ rctx->flags |= SHA3_FLAGS_SHA384;
++ rctx->digsize = SHA3_384_DIGEST_SIZE;
++ rctx->blksize = SHA3_384_BLOCK_SIZE;
++ break;
++ case SHA3_512_DIGEST_SIZE:
++ rctx->cmd |= SHA3_CMD_MODE_512;
++ rctx->flags |= SHA3_FLAGS_SHA512;
++ rctx->digsize = SHA3_512_DIGEST_SIZE;
++ rctx->blksize = SHA3_512_BLOCK_SIZE;
++ break;
++ default:
++ dev_warn(tctx->rsss_dev->dev, "digest size %d not support\n",
++ crypto_ahash_digestsize(tfm));
++ return -EINVAL;
++ }
++
++ rctx->bufcnt = 0;
++ rctx->total = 0;
++ rctx->digcnt[0] = 0;
++ rctx->digcnt[1] = 0;
++
++ memset(sha3_engine->digest_addr, 0x0, SHA3_512_DIGEST_SIZE);
++
++ return 0;
++}
++
++static int aspeed_sha3_digest(struct ahash_request *req)
++{
++ return aspeed_sha3_init(req) ? : aspeed_sha3_finup(req);
++}
++
++static int aspeed_sha3_export(struct ahash_request *req, void *out)
++{
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++
++ memcpy(out, rctx, sizeof(*rctx));
++
++ return 0;
++}
++
++static int aspeed_sha3_import(struct ahash_request *req, const void *in)
++{
++ struct aspeed_sha3_reqctx *rctx = ahash_request_ctx(req);
++
++ memcpy(rctx, in, sizeof(*rctx));
++
++ return 0;
++}
++
++static int aspeed_sha3_cra_init(struct crypto_tfm *tfm)
++{
++ struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg);
++ struct aspeed_sha3_ctx *tctx = crypto_tfm_ctx(tfm);
++ struct aspeed_rsss_alg *ast_alg;
++
++ ast_alg = container_of(alg, struct aspeed_rsss_alg, alg.ahash.base);
++ tctx->rsss_dev = ast_alg->rsss_dev;
++
++ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
++ sizeof(struct aspeed_sha3_reqctx));
++
++ return 0;
++}
++
++static void aspeed_sha3_cra_exit(struct crypto_tfm *tfm)
++{
++ struct aspeed_sha3_ctx *tctx = crypto_tfm_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = tctx->rsss_dev;
++
++ RSSS_DBG(rsss_dev, "%s\n", crypto_tfm_alg_name(tfm));
++}
++
++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_224 = {
++ .type = ASPEED_ALGO_TYPE_AHASH,
++ .alg.ahash.base = {
++ .init = aspeed_sha3_init,
++ .update = aspeed_sha3_update,
++ .final = aspeed_sha3_final,
++ .finup = aspeed_sha3_finup,
++ .digest = aspeed_sha3_digest,
++ .export = aspeed_sha3_export,
++ .import = aspeed_sha3_import,
++ .halg = {
++ .digestsize = SHA3_224_DIGEST_SIZE,
++ .statesize = sizeof(struct aspeed_sha3_reqctx),
++ .base = {
++ .cra_name = "sha3-224",
++ .cra_driver_name = "aspeed-sha3-224",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SHA3_224_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_init = aspeed_sha3_cra_init,
++ .cra_exit = aspeed_sha3_cra_exit,
++ }
++ }
++ },
++ .alg.ahash.op = {
++ .do_one_request = aspeed_sha3_do_request,
++ },
++};
++
++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_256 = {
++ .type = ASPEED_ALGO_TYPE_AHASH,
++ .alg.ahash.base = {
++ .init = aspeed_sha3_init,
++ .update = aspeed_sha3_update,
++ .final = aspeed_sha3_final,
++ .finup = aspeed_sha3_finup,
++ .digest = aspeed_sha3_digest,
++ .export = aspeed_sha3_export,
++ .import = aspeed_sha3_import,
++ .halg = {
++ .digestsize = SHA3_256_DIGEST_SIZE,
++ .statesize = sizeof(struct aspeed_sha3_reqctx),
++ .base = {
++ .cra_name = "sha3-256",
++ .cra_driver_name = "aspeed-sha3-256",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SHA3_256_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_init = aspeed_sha3_cra_init,
++ .cra_exit = aspeed_sha3_cra_exit,
++ }
++ }
++ },
++ .alg.ahash.op = {
++ .do_one_request = aspeed_sha3_do_request,
++ },
++};
++
++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_384 = {
++ .type = ASPEED_ALGO_TYPE_AHASH,
++ .alg.ahash.base = {
++ .init = aspeed_sha3_init,
++ .update = aspeed_sha3_update,
++ .final = aspeed_sha3_final,
++ .finup = aspeed_sha3_finup,
++ .digest = aspeed_sha3_digest,
++ .export = aspeed_sha3_export,
++ .import = aspeed_sha3_import,
++ .halg = {
++ .digestsize = SHA3_384_DIGEST_SIZE,
++ .statesize = sizeof(struct aspeed_sha3_reqctx),
++ .base = {
++ .cra_name = "sha3-384",
++ .cra_driver_name = "aspeed-sha3-384",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SHA3_384_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_init = aspeed_sha3_cra_init,
++ .cra_exit = aspeed_sha3_cra_exit,
++ }
++ }
++ },
++ .alg.ahash.op = {
++ .do_one_request = aspeed_sha3_do_request,
++ },
++};
++
++struct aspeed_rsss_alg aspeed_rsss_algs_sha3_512 = {
++ .type = ASPEED_ALGO_TYPE_AHASH,
++ .alg.ahash.base = {
++ .init = aspeed_sha3_init,
++ .update = aspeed_sha3_update,
++ .final = aspeed_sha3_final,
++ .finup = aspeed_sha3_finup,
++ .digest = aspeed_sha3_digest,
++ .export = aspeed_sha3_export,
++ .import = aspeed_sha3_import,
++ .halg = {
++ .digestsize = SHA3_512_DIGEST_SIZE,
++ .statesize = sizeof(struct aspeed_sha3_reqctx),
++ .base = {
++ .cra_name = "sha3-512",
++ .cra_driver_name = "aspeed-sha3-512",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY,
++ .cra_blocksize = SHA3_512_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct aspeed_sha3_ctx),
++ .cra_module = THIS_MODULE,
++ .cra_init = aspeed_sha3_cra_init,
++ .cra_exit = aspeed_sha3_cra_exit,
++ }
++ }
++ },
++ .alg.ahash.op = {
++ .do_one_request = aspeed_sha3_do_request,
++ },
++};
++
++static void aspeed_rsss_sha3_done_task(unsigned long data)
++{
++ struct aspeed_rsss_dev *rsss_dev = (struct aspeed_rsss_dev *)data;
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++
++ (void)sha3_engine->resume(rsss_dev);
++}
++
++void aspeed_rsss_sha3_exit(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++
++ crypto_engine_exit(rsss_dev->crypt_engine_sha3);
++ tasklet_kill(&sha3_engine->done_task);
++}
++
++int aspeed_rsss_sha3_init(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_sha3 *sha3_engine;
++ u32 val;
++ int rc;
++
++ rc = reset_control_deassert(rsss_dev->reset_sha3);
++ if (rc) {
++ dev_err(rsss_dev->dev, "Deassert SHA3 reset failed\n");
++ goto end;
++ }
++
++ sha3_engine = &rsss_dev->sha3_engine;
++
++ /* Initialize crypto hardware engine structure for SHA3 */
++ rsss_dev->crypt_engine_sha3 = crypto_engine_alloc_init(rsss_dev->dev, true);
++ if (!rsss_dev->crypt_engine_sha3) {
++ rc = -ENOMEM;
++ goto end;
++ }
++
++ rc = crypto_engine_start(rsss_dev->crypt_engine_sha3);
++ if (rc)
++ goto err_engine_sha3_start;
++
++ tasklet_init(&sha3_engine->done_task, aspeed_rsss_sha3_done_task,
++ (unsigned long)rsss_dev);
++
++ /* Allocate DMA buffer for hash engine input used */
++ sha3_engine->ahash_src_addr =
++ dmam_alloc_coherent(rsss_dev->dev,
++ ASPEED_HASH_SRC_DMA_BUF_LEN,
++ &sha3_engine->ahash_src_dma_addr,
++ GFP_KERNEL);
++ if (!sha3_engine->ahash_src_addr) {
++ dev_err(rsss_dev->dev, "Failed to allocate DMA src buffer\n");
++ rc = -ENOMEM;
++ goto err_engine_sha3_start;
++ }
++
++ sha3_engine->buffer_addr = dmam_alloc_coherent(rsss_dev->dev, SHA3_224_BLOCK_SIZE,
++ &sha3_engine->buffer_dma_addr,
++ GFP_KERNEL);
++ if (!sha3_engine->buffer_addr) {
++ dev_err(rsss_dev->dev, "Failed to allocate DMA buffer\n");
++ rc = -ENOMEM;
++ goto err_engine_sha3_start;
++ }
++
++ sha3_engine->digest_addr = dmam_alloc_coherent(rsss_dev->dev, SHA3_512_DIGEST_SIZE,
++ &sha3_engine->digest_dma_addr,
++ GFP_KERNEL);
++ if (!sha3_engine->digest_addr) {
++ dev_err(rsss_dev->dev, "Failed to allocate DMA digest buffer\n");
++ rc = -ENOMEM;
++ goto err_engine_sha3_start;
++ }
++
++ /*
++ * Set 1 to use scatter-gather mode.
++ * Set 0 to use direct mode.
++ */
++ sha3_engine->sg_mode = 0;
++
++ /* Self-test */
++ rc = aspeed_sha3_self_test(rsss_dev);
++ if (rc)
++ goto err_engine_sha3_start;
++
++ /* Enable SHA3 interrupt */
++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_INT_EN);
++ ast_rsss_write(rsss_dev, val | SHA3_INT_EN, ASPEED_RSSS_INT_EN);
++ dev_info(rsss_dev->dev, "Aspeed RSSS SHA3 interrupt mode.\n");
++
++ dev_info(rsss_dev->dev, "Aspeed RSSS SHA3 initialized (%s mode)\n",
++ sha3_engine->sg_mode ? "SG" : "Direct");
++
++ return 0;
++
++err_engine_sha3_start:
++ crypto_engine_exit(rsss_dev->crypt_engine_sha3);
++end:
++ return rc;
++}
+diff --git a/drivers/crypto/aspeed/aspeed-rsss-rsa.c b/drivers/crypto/aspeed/aspeed-rsss-rsa.c
+new file mode 100644
+index 000000000..1a777e97d
+--- /dev/null
++++ b/drivers/crypto/aspeed/aspeed-rsss-rsa.c
+@@ -0,0 +1,608 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++
++#include <linux/platform_device.h>
++#include "aspeed-rsss.h"
++
++static u8 data_rev[SRAM_BLOCK_SIZE];
++static u8 data[SRAM_BLOCK_SIZE];
++static int dbg;
++
++static void hexdump(char *name, unsigned char *buf, unsigned int len)
++{
++ if (!dbg)
++ return;
++
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
++ pr_info("%s:\n", name);
++ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
++ 16, 1, buf, len, false);
++#endif
++}
++
++static int aspeed_rsa_self_test(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_rsa *rsa_engine;
++ const u32 pattern = 0xffffffff;
++ u32 val;
++
++ rsa_engine = &rsss_dev->rsa_engine;
++
++ /* Set SRAM access control - CPU */
++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL);
++ ast_rsss_write(rsss_dev, val | SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL);
++
++ writel(pattern, rsa_engine->sram_exp);
++ val = readl(rsa_engine->sram_exp);
++ if (val != pattern)
++ return -EIO;
++
++ writel(0x0, rsa_engine->sram_exp);
++
++ return 0;
++}
++
++static inline struct akcipher_request *
++ akcipher_request_cast(struct crypto_async_request *req)
++{
++ return container_of(req, struct akcipher_request, base);
++}
++
++static int aspeed_rsa_do_fallback(struct akcipher_request *req)
++{
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher);
++ int err;
++
++ akcipher_request_set_tfm(req, ctx->fallback_tfm);
++
++ if (ctx->enc)
++ err = crypto_akcipher_encrypt(req);
++ else
++ err = crypto_akcipher_decrypt(req);
++
++ akcipher_request_set_tfm(req, cipher);
++
++ return err;
++}
++
++static bool aspeed_rsa_need_fallback(struct akcipher_request *req)
++{
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher);
++
++ return ctx->key.n_sz > ASPEED_RSA_MAX_KEY_LEN;
++}
++
++static int aspeed_rsa_handle_queue(struct aspeed_rsss_dev *rsss_dev,
++ struct akcipher_request *req)
++{
++ if (aspeed_rsa_need_fallback(req)) {
++ RSSS_DBG(rsss_dev, "SW fallback\n");
++ return aspeed_rsa_do_fallback(req);
++ }
++
++ return crypto_transfer_akcipher_request_to_engine(rsss_dev->crypt_engine_rsa, req);
++}
++
++static int aspeed_rsa_do_request(struct crypto_engine *engine, void *areq)
++{
++ struct akcipher_request *req = akcipher_request_cast(areq);
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher);
++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev;
++ struct aspeed_engine_rsa *rsa_engine;
++
++ rsa_engine = &rsss_dev->rsa_engine;
++ rsa_engine->req = req;
++ rsa_engine->flags |= CRYPTO_FLAGS_BUSY;
++
++ return ctx->trigger(rsss_dev);
++}
++
++static int aspeed_rsa_complete(struct aspeed_rsss_dev *rsss_dev, int err)
++{
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++ struct akcipher_request *req = rsa_engine->req;
++
++ rsa_engine->flags &= ~CRYPTO_FLAGS_BUSY;
++
++ crypto_finalize_akcipher_request(rsss_dev->crypt_engine_rsa, req, err);
++
++ return err;
++}
++
++/*
++ * Copy Data to SRAM buffer for engine used.
++ */
++static void aspeed_rsa_sg_copy_to_buffer(struct aspeed_rsss_dev *rsss_dev,
++ void __iomem *buf, struct scatterlist *src,
++ size_t nbytes)
++{
++ RSSS_DBG(rsss_dev, "src len:%zu\n", nbytes);
++
++ memset(data_rev, 0, SRAM_BLOCK_SIZE);
++ memset(data, 0, SRAM_BLOCK_SIZE);
++
++ scatterwalk_map_and_copy(data, src, 0, nbytes, 0);
++
++ hexdump("data", data, nbytes);
++ for (int i = 0; i < nbytes; i++)
++ data_rev[nbytes - i - 1] = data[i];
++
++ /* align 8 bytes */
++ memcpy_toio(buf, data_rev, (nbytes + 7) & ~(8 - 1));
++}
++
++/*
++ * Copy Exp/Mod to SRAM buffer for engine used.
++ *
++ * Params:
++ * - mode 0 : Exponential
++ * - mode 1 : Modulus
++ */
++static int aspeed_rsa_ctx_copy(struct aspeed_rsss_dev *rsss_dev, void __iomem *dst,
++ const u8 *src, size_t nbytes,
++ enum aspeed_rsa_key_mode mode)
++{
++ RSSS_DBG(rsss_dev, "nbytes:%zu, mode:%d\n", nbytes, mode);
++
++ if (nbytes > ASPEED_RSA_MAX_KEY_LEN)
++ return -ENOMEM;
++
++ memset(data, 0, SRAM_BLOCK_SIZE);
++
++ /* Remove leading zeros */
++ while (nbytes > 0 && src[0] == 0) {
++ src++;
++ nbytes--;
++ }
++
++ for (int i = 0; i < nbytes; i++)
++ data[nbytes - i - 1] = src[i];
++
++ /* align 8 bytes */
++ memcpy_toio(dst, data, (nbytes + 7) & ~(8 - 1));
++
++ return nbytes * 8;
++}
++
++static int aspeed_rsa_transfer(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++ struct akcipher_request *req = rsa_engine->req;
++ struct scatterlist *out_sg = req->dst;
++ size_t nbytes = req->dst_len;
++ u8 data[SRAM_BLOCK_SIZE];
++ u32 val;
++
++ RSSS_DBG(rsss_dev, "nbytes:%zu\n", nbytes);
++
++ /* Set SRAM access control - CPU */
++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL);
++ ast_rsss_write(rsss_dev, val | SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL);
++
++ for (int i = 0; i < nbytes; i++)
++ data[nbytes - i - 1] = readb(rsa_engine->sram_data + i);
++
++ scatterwalk_map_and_copy(data, out_sg, 0, nbytes, 1);
++
++ return aspeed_rsa_complete(rsss_dev, 0);
++}
++
++#ifdef RSSS_RSA_POLLING_MODE
++static int aspeed_rsa_wait_complete(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++ u32 sts;
++ int ret;
++
++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_RSA_ENG_STS, sts,
++ ((sts & RSA_STS) == 0x0),
++ ASPEED_RSSS_POLLING_TIME,
++ ASPEED_RSSS_TIMEOUT * 10);
++ if (ret) {
++ dev_err(rsss_dev->dev, "RSA wrong engine status\n");
++ return -EIO;
++ }
++
++ ret = readl_poll_timeout(rsss_dev->regs + ASPEED_RSSS_INT_STS, sts,
++ ((sts & RSA_INT_DONE) == RSA_INT_DONE),
++ ASPEED_RSSS_POLLING_TIME,
++ ASPEED_RSSS_TIMEOUT);
++ if (ret) {
++ dev_err(rsss_dev->dev, "RSA wrong interrupt status\n");
++ return -EIO;
++ }
++
++ ast_rsss_write(rsss_dev, sts, ASPEED_RSSS_INT_STS);
++
++ RSSS_DBG(rsss_dev, "irq sts:0x%x\n", sts);
++
++ if (sts & RSA_INT_DONE) {
++ /* Stop RSA engine */
++ ast_rsss_write(rsss_dev, 0, ASPEED_RSA_TRIGGER);
++
++ if (rsa_engine->flags & CRYPTO_FLAGS_BUSY)
++ tasklet_schedule(&rsa_engine->done_task);
++ else
++ dev_err(rsss_dev->dev, "RSA no active requests.\n");
++ }
++
++ return 0;
++}
++#endif
++
++static int aspeed_rsa_trigger(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++ struct akcipher_request *req = rsa_engine->req;
++ struct crypto_akcipher *cipher;
++ struct aspeed_rsa_ctx *ctx;
++ int ne, nm;
++ u32 val;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ cipher = crypto_akcipher_reqtfm(req);
++ ctx = akcipher_tfm_ctx(cipher);
++
++ if (!ctx->n || !ctx->n_sz) {
++ dev_err(rsss_dev->dev, "%s: key n is not set\n", __func__);
++ return -EINVAL;
++ }
++
++ /* Set SRAM access control - CPU */
++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL);
++ ast_rsss_write(rsss_dev, val | SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL);
++
++ memset_io(rsa_engine->sram_exp, 0, SRAM_BLOCK_SIZE);
++ memset_io(rsa_engine->sram_mod, 0, SRAM_BLOCK_SIZE);
++ memset_io(rsa_engine->sram_data, 0, SRAM_BLOCK_SIZE);
++
++ /* Copy source data to SRAM buffer */
++ aspeed_rsa_sg_copy_to_buffer(rsss_dev, rsa_engine->sram_data,
++ req->src, req->src_len);
++
++ nm = aspeed_rsa_ctx_copy(rsss_dev, rsa_engine->sram_mod, ctx->n,
++ ctx->n_sz, ASPEED_RSA_MOD_MODE);
++
++ /* Set dst len as modulus size */
++ req->dst_len = nm / 8;
++
++ if (ctx->enc) {
++ if (!ctx->e || !ctx->e_sz) {
++ dev_err(rsss_dev->dev, "%s: key e is not set\n",
++ __func__);
++ return -EINVAL;
++ }
++ /* Copy key e to SRAM buffer */
++ ne = aspeed_rsa_ctx_copy(rsss_dev, rsa_engine->sram_exp,
++ ctx->e, ctx->e_sz,
++ ASPEED_RSA_EXP_MODE);
++ } else {
++ if (!ctx->d || !ctx->d_sz) {
++ dev_err(rsss_dev->dev, "%s: key d is not set\n",
++ __func__);
++ return -EINVAL;
++ }
++ /* Copy key d to SRAM buffer */
++ ne = aspeed_rsa_ctx_copy(rsss_dev, rsa_engine->sram_exp,
++ ctx->key.d, ctx->key.d_sz,
++ ASPEED_RSA_EXP_MODE);
++ }
++
++ hexdump("exp", rsa_engine->sram_exp, ctx->e_sz);
++ hexdump("mod", rsa_engine->sram_mod, ctx->n_sz);
++ hexdump("data", rsa_engine->sram_data, req->src_len);
++
++ rsa_engine->resume = aspeed_rsa_transfer;
++
++ ast_rsss_write(rsss_dev, (ne << 16) + nm,
++ ASPEED_RSA_KEY_INFO);
++
++ /* Set SRAM access control - Engine */
++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_CTRL);
++ ast_rsss_write(rsss_dev, val & ~SRAM_AHB_MODE_CPU, ASPEED_RSSS_CTRL);
++
++ /* Trigger RSA engines */
++ ast_rsss_write(rsss_dev, RSA_TRIGGER, ASPEED_RSA_TRIGGER);
++
++#ifdef RSSS_RSA_POLLING_MODE
++ return aspeed_rsa_wait_complete(rsss_dev);
++#else
++ return 0;
++#endif
++}
++
++static int aspeed_rsa_enc(struct akcipher_request *req)
++{
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher);
++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev;
++
++ ctx->trigger = aspeed_rsa_trigger;
++ ctx->enc = 1;
++
++ return aspeed_rsa_handle_queue(rsss_dev, req);
++}
++
++static int aspeed_rsa_dec(struct akcipher_request *req)
++{
++ struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req);
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(cipher);
++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev;
++
++ ctx->trigger = aspeed_rsa_trigger;
++ ctx->enc = 0;
++
++ return aspeed_rsa_handle_queue(rsss_dev, req);
++}
++
++static u8 *aspeed_rsa_key_copy(u8 *src, size_t len)
++{
++ return kmemdup(src, len, GFP_KERNEL);
++}
++
++static int aspeed_rsa_set_n(struct aspeed_rsa_ctx *ctx, u8 *value,
++ size_t len)
++{
++ ctx->n_sz = len;
++ ctx->n = aspeed_rsa_key_copy(value, len);
++ if (!ctx->n)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static int aspeed_rsa_set_e(struct aspeed_rsa_ctx *ctx, u8 *value,
++ size_t len)
++{
++ ctx->e_sz = len;
++ ctx->e = aspeed_rsa_key_copy(value, len);
++ if (!ctx->e)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static int aspeed_rsa_set_d(struct aspeed_rsa_ctx *ctx, u8 *value,
++ size_t len)
++{
++ ctx->d_sz = len;
++ ctx->d = aspeed_rsa_key_copy(value, len);
++ if (!ctx->d)
++ return -ENOMEM;
++
++ return 0;
++}
++
++static void aspeed_rsa_key_free(struct aspeed_rsa_ctx *ctx)
++{
++ kfree_sensitive(ctx->n);
++ kfree_sensitive(ctx->e);
++ kfree_sensitive(ctx->d);
++ ctx->n_sz = 0;
++ ctx->e_sz = 0;
++ ctx->d_sz = 0;
++}
++
++static int aspeed_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
++ unsigned int keylen, int priv)
++{
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct aspeed_rsss_dev *rsss_dev = ctx->rsss_dev;
++ int ret;
++
++ RSSS_DBG(rsss_dev, "\n");
++
++ if (priv)
++ ret = rsa_parse_priv_key(&ctx->key, key, keylen);
++ else
++ ret = rsa_parse_pub_key(&ctx->key, key, keylen);
++
++ if (ret) {
++ dev_err(rsss_dev->dev, "rsss parse key failed, ret:0x%x\n",
++ ret);
++ return ret;
++ }
++
++ /* Aspeed engine supports up to 4096 bits,
++ * Use software fallback instead.
++ */
++ if (ctx->key.n_sz > ASPEED_RSA_MAX_KEY_LEN)
++ return 0;
++
++ hexdump("n", (u8 *)ctx->key.n, ctx->key.n_sz);
++ ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz);
++ if (ret)
++ goto err;
++
++ hexdump("e", (u8 *)ctx->key.e, ctx->key.e_sz);
++ ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz);
++ if (ret)
++ goto err;
++
++ if (priv) {
++ hexdump("d", (u8 *)ctx->key.d, ctx->key.d_sz);
++ ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz);
++ if (ret)
++ goto err;
++ }
++
++ return 0;
++
++err:
++ dev_err(rsss_dev->dev, "rsss set key failed\n");
++ aspeed_rsa_key_free(ctx);
++
++ return ret;
++}
++
++static int aspeed_rsa_set_pub_key(struct crypto_akcipher *tfm,
++ const void *key,
++ unsigned int keylen)
++{
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ int ret;
++
++ ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen);
++ if (ret)
++ return ret;
++
++ return aspeed_rsa_setkey(tfm, key, keylen, 0);
++}
++
++static int aspeed_rsa_set_priv_key(struct crypto_akcipher *tfm,
++ const void *key,
++ unsigned int keylen)
++{
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ int ret;
++
++ ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen);
++ if (ret)
++ return ret;
++
++ return aspeed_rsa_setkey(tfm, key, keylen, 1);
++}
++
++static unsigned int aspeed_rsa_max_size(struct crypto_akcipher *tfm)
++{
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++
++ if (ctx->key.n_sz > ASPEED_RSA_MAX_KEY_LEN)
++ return crypto_akcipher_maxsize(ctx->fallback_tfm);
++
++ return ctx->n_sz;
++}
++
++static int aspeed_rsa_init_tfm(struct crypto_akcipher *tfm)
++{
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++ struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
++ const char *name = crypto_tfm_alg_name(&tfm->base);
++ struct aspeed_rsss_alg *rsa_alg;
++
++ rsa_alg = container_of(alg, struct aspeed_rsss_alg, alg.akcipher.base);
++
++ ctx->rsss_dev = rsa_alg->rsss_dev;
++
++ ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_NEED_FALLBACK);
++ if (IS_ERR(ctx->fallback_tfm)) {
++ dev_err(ctx->rsss_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n",
++ name, PTR_ERR(ctx->fallback_tfm));
++ return PTR_ERR(ctx->fallback_tfm);
++ }
++
++ return 0;
++}
++
++static void aspeed_rsa_exit_tfm(struct crypto_akcipher *tfm)
++{
++ struct aspeed_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
++
++ crypto_free_akcipher(ctx->fallback_tfm);
++}
++
++struct aspeed_rsss_alg aspeed_rsss_algs_rsa = {
++ .type = ASPEED_ALGO_TYPE_AKCIPHER,
++ .alg.akcipher.base = {
++ .encrypt = aspeed_rsa_enc,
++ .decrypt = aspeed_rsa_dec,
++ .sign = aspeed_rsa_dec,
++ .verify = aspeed_rsa_enc,
++ .set_pub_key = aspeed_rsa_set_pub_key,
++ .set_priv_key = aspeed_rsa_set_priv_key,
++ .max_size = aspeed_rsa_max_size,
++ .init = aspeed_rsa_init_tfm,
++ .exit = aspeed_rsa_exit_tfm,
++ .base = {
++ .cra_name = "rsa",
++ .cra_driver_name = "aspeed-rsa",
++ .cra_priority = 300,
++ .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER |
++ CRYPTO_ALG_ASYNC |
++ CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .cra_module = THIS_MODULE,
++ .cra_ctxsize = sizeof(struct aspeed_rsa_ctx),
++ },
++ },
++ .alg.akcipher.op = {
++ .do_one_request = aspeed_rsa_do_request,
++ },
++};
++
++static void aspeed_rsa_done_task(unsigned long data)
++{
++ struct aspeed_rsss_dev *rsss_dev = (struct aspeed_rsss_dev *)data;
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++
++ (void)rsa_engine->resume(rsss_dev);
++}
++
++void aspeed_rsss_rsa_exit(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++
++ crypto_engine_exit(rsss_dev->crypt_engine_rsa);
++ tasklet_kill(&rsa_engine->done_task);
++}
++
++int aspeed_rsss_rsa_init(struct aspeed_rsss_dev *rsss_dev)
++{
++ struct aspeed_engine_rsa *rsa_engine;
++ u32 val;
++ int rc;
++
++ rc = reset_control_deassert(rsss_dev->reset_rsa);
++ if (rc) {
++ dev_err(rsss_dev->dev, "Deassert RSA reset failed\n");
++ goto end;
++ }
++
++ rsa_engine = &rsss_dev->rsa_engine;
++
++ /* Initialize crypto hardware engine structure for RSA */
++ rsss_dev->crypt_engine_rsa = crypto_engine_alloc_init(rsss_dev->dev, true);
++ if (!rsss_dev->crypt_engine_rsa) {
++ rc = -ENOMEM;
++ goto end;
++ }
++
++ rc = crypto_engine_start(rsss_dev->crypt_engine_rsa);
++ if (rc)
++ goto err_engine_rsa_start;
++
++ tasklet_init(&rsa_engine->done_task, aspeed_rsa_done_task,
++ (unsigned long)rsss_dev);
++
++ rsa_engine->sram_exp = rsss_dev->regs + SRAM_OFFSET_EXP;
++ rsa_engine->sram_mod = rsss_dev->regs + SRAM_OFFSET_MOD;
++ rsa_engine->sram_data = rsss_dev->regs + SRAM_OFFSET_DATA;
++
++ /* Set SRAM for RSA operation */
++ ast_rsss_write(rsss_dev, RSA_OPERATION, ASPEED_RSSS_CTRL);
++
++ /* Self-test */
++ rc = aspeed_rsa_self_test(rsss_dev);
++ if (rc)
++ goto err_engine_rsa_start;
++
++ /* Enable RSA interrupt */
++ val = ast_rsss_read(rsss_dev, ASPEED_RSSS_INT_EN);
++ ast_rsss_write(rsss_dev, val | RSA_INT_EN, ASPEED_RSSS_INT_EN);
++
++ dev_info(rsss_dev->dev, "Aspeed RSSS RSA initialized\n");
++
++ return 0;
++
++err_engine_rsa_start:
++ crypto_engine_exit(rsss_dev->crypt_engine_rsa);
++end:
++ return rc;
++}
+diff --git a/drivers/crypto/aspeed/aspeed-rsss.c b/drivers/crypto/aspeed/aspeed-rsss.c
+new file mode 100644
+index 000000000..f8ef9c4e0
+--- /dev/null
++++ b/drivers/crypto/aspeed/aspeed-rsss.c
+@@ -0,0 +1,190 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++
++#include <linux/clk.h>
++#include <linux/dma-mapping.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include "aspeed-rsss.h"
++
++static struct aspeed_rsss_alg *aspeed_rsss_algs[] = {
++ &aspeed_rsss_algs_rsa,
++ &aspeed_rsss_algs_sha3_224,
++ &aspeed_rsss_algs_sha3_256,
++ &aspeed_rsss_algs_sha3_384,
++ &aspeed_rsss_algs_sha3_512,
++};
++
++static void aspeed_rsss_register(struct aspeed_rsss_dev *rsss_dev)
++{
++ char *cra_name;
++ int rc;
++
++ for (int i = 0; i < ARRAY_SIZE(aspeed_rsss_algs); i++) {
++ aspeed_rsss_algs[i]->rsss_dev = rsss_dev;
++ if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AKCIPHER) {
++ rc = crypto_engine_register_akcipher(&aspeed_rsss_algs[i]->alg.akcipher);
++ cra_name = aspeed_rsss_algs[i]->alg.akcipher.base.base.cra_name;
++
++ } else if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AHASH) {
++ rc = crypto_engine_register_ahash(&aspeed_rsss_algs[i]->alg.ahash);
++ cra_name = aspeed_rsss_algs[i]->alg.ahash.base.halg.base.cra_name;
++ }
++
++ if (rc)
++ dev_warn(rsss_dev->dev, "Failed to register [%d] %s(0x%x)\n", i, cra_name, rc);
++ }
++}
++
++static void aspeed_rsss_unregister(struct aspeed_rsss_dev *rsss_dev)
++{
++ for (int i = 0; i < ARRAY_SIZE(aspeed_rsss_algs); i++) {
++ if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AKCIPHER)
++ crypto_engine_unregister_akcipher(&aspeed_rsss_algs[i]->alg.akcipher);
++
++ else if (aspeed_rsss_algs[i]->type == ASPEED_ALGO_TYPE_AHASH)
++ crypto_engine_unregister_ahash(&aspeed_rsss_algs[i]->alg.ahash);
++ }
++}
++
++/* RSSS interrupt service routine. */
++static irqreturn_t aspeed_rsss_irq(int irq, void *dev)
++{
++ struct aspeed_rsss_dev *rsss_dev = (struct aspeed_rsss_dev *)dev;
++ struct aspeed_engine_sha3 *sha3_engine = &rsss_dev->sha3_engine;
++ struct aspeed_engine_rsa *rsa_engine = &rsss_dev->rsa_engine;
++ u32 sts;
++
++ sts = ast_rsss_read(rsss_dev, ASPEED_RSSS_INT_STS);
++ ast_rsss_write(rsss_dev, sts, ASPEED_RSSS_INT_STS);
++
++ RSSS_DBG(rsss_dev, "irq sts:0x%x\n", sts);
++
++ if (sts & RSA_INT_DONE) {
++ /* Stop RSA engine */
++ ast_rsss_write(rsss_dev, 0, ASPEED_RSA_TRIGGER);
++
++ if (rsa_engine->flags & CRYPTO_FLAGS_BUSY)
++ tasklet_schedule(&rsa_engine->done_task);
++ else
++ dev_err(rsss_dev->dev, "RSA no active requests.\n");
++ }
++
++ if (sts & SHA3_INT_DONE) {
++ if (sha3_engine->flags & CRYPTO_FLAGS_BUSY)
++ tasklet_schedule(&sha3_engine->done_task);
++ else
++ dev_err(rsss_dev->dev, "SHA3 no active requests.\n");
++ }
++
++ return IRQ_HANDLED;
++}
++
++static const struct of_device_id aspeed_rsss_of_matches[] = {
++ { .compatible = "aspeed,ast2700-rsss", },
++ {},
++};
++
++static int aspeed_rsss_probe(struct platform_device *pdev)
++{
++ struct aspeed_rsss_dev *rsss_dev;
++ struct device *dev = &pdev->dev;
++ int rc;
++
++ rsss_dev = devm_kzalloc(dev, sizeof(struct aspeed_rsss_dev),
++ GFP_KERNEL);
++ if (!rsss_dev)
++ return -ENOMEM;
++
++ rsss_dev->dev = dev;
++
++ platform_set_drvdata(pdev, rsss_dev);
++
++ rsss_dev->regs = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(rsss_dev->regs))
++ return PTR_ERR(rsss_dev->regs);
++
++ /* Get irq number and register it */
++ rsss_dev->irq = platform_get_irq(pdev, 0);
++ if (rsss_dev->irq < 0)
++ return -ENXIO;
++
++ rc = devm_request_irq(dev, rsss_dev->irq, aspeed_rsss_irq, 0,
++ dev_name(dev), rsss_dev);
++ if (rc) {
++ dev_err(dev, "Failed to request irq.\n");
++ return rc;
++ }
++
++ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_warn(&pdev->dev, "No suitable DMA available\n");
++ return rc;
++ }
++
++ rsss_dev->clk = devm_clk_get_enabled(dev, NULL);
++ if (IS_ERR(rsss_dev->clk)) {
++ dev_err(dev, "Failed to get rsss clk\n");
++ return PTR_ERR(rsss_dev->clk);
++ }
++
++ rsss_dev->reset_rsa = devm_reset_control_get(dev, "rsa");
++ if (IS_ERR(rsss_dev->reset_rsa)) {
++ dev_err(dev, "Failed to get rsa reset\n");
++ return PTR_ERR(rsss_dev->reset_rsa);
++ }
++
++ rsss_dev->reset_sha3 = devm_reset_control_get(dev, "sha3");
++ if (IS_ERR(rsss_dev->reset_sha3)) {
++ dev_err(dev, "Failed to get sha3 reset\n");
++ return PTR_ERR(rsss_dev->reset_sha3);
++ }
++
++ rc = aspeed_rsss_rsa_init(rsss_dev);
++ if (rc) {
++ dev_err(dev, "RSA init failed\n");
++ return rc;
++ }
++
++ rc = aspeed_rsss_sha3_init(rsss_dev);
++ if (rc) {
++ dev_err(dev, "SHA3 init failed\n");
++ return rc;
++ }
++
++ aspeed_rsss_register(rsss_dev);
++
++ dev_info(dev, "Aspeed RSSS Hardware Accelerator successfully registered\n");
++
++ return 0;
++}
++
++static int aspeed_rsss_remove(struct platform_device *pdev)
++{
++ struct aspeed_rsss_dev *rsss_dev = platform_get_drvdata(pdev);
++
++ aspeed_rsss_unregister(rsss_dev);
++ aspeed_rsss_rsa_exit(rsss_dev);
++ aspeed_rsss_sha3_exit(rsss_dev);
++
++ return 0;
++}
++
++MODULE_DEVICE_TABLE(of, aspeed_rsss_of_matches);
++
++static struct platform_driver aspeed_rsss_driver = {
++ .probe = aspeed_rsss_probe,
++ .remove = aspeed_rsss_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_rsss_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_rsss_driver);
++
++MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>");
++MODULE_DESCRIPTION("ASPEED RSSS driver for multiple cryptographic engines");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/crypto/aspeed/aspeed-rsss.h b/drivers/crypto/aspeed/aspeed-rsss.h
+new file mode 100644
+index 000000000..3242f7aa7
+--- /dev/null
++++ b/drivers/crypto/aspeed/aspeed-rsss.h
+@@ -0,0 +1,270 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++
++#ifndef __ASPEED_RSSS_H__
++#define __ASPEED_RSSS_H__
++
++#include <linux/iopoll.h>
++#include <linux/reset.h>
++#include <crypto/scatterwalk.h>
++#include <crypto/internal/akcipher.h>
++#include <crypto/internal/hash.h>
++#include <crypto/internal/rsa.h>
++#include <crypto/engine.h>
++#include <crypto/akcipher.h>
++#include <crypto/hash.h>
++#include <crypto/sha3.h>
++
++#ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG
++#define RSSS_DBG(d, fmt, ...) \
++ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
++#else
++#define RSSS_DBG(d, fmt, ...) \
++ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__)
++#endif
++
++/*****************************
++ * *
++ * RSSS register definitions *
++ * *
++ * ***************************/
++#define ASPEED_RSSS_INT_STS 0xc00 /* RSSS interrupt status */
++#define ASPEED_RSSS_INT_EN 0xc04 /* RSSS interrupt enable */
++#define ASPEED_RSSS_CTRL 0xc08 /* RSSS generic control */
++#define ASPEED_RSA_TRIGGER 0xe00 /* RSA Engine Control: trigger */
++#define ASPEED_RSA_KEY_INFO 0xe08 /* RSA Exp/Mod Key Length (Bits) */
++#define ASPEED_RSA_ENG_STS 0xe0c /* RSA Engine Status */
++
++#define ASPEED_SHA3_CMD 0xe80
++#define ASPEED_SHA3_SRC_LO 0xe84
++#define ASPEED_SHA3_SRC_HI 0xe88
++#define ASPEED_SHA3_SRC_LEN 0xe8c
++#define ASPEED_SHA3_DST_LO 0xe90
++#define ASPEED_SHA3_DST_HI 0xe94
++#define ASPEED_SHA3_BUSY_STS 0xe98
++#define ASPEED_SHA3_ENG_STS 0xe9c
++
++/* RSSS interrupt status */
++#define SM4_INT_DONE BIT(3)
++#define SM3_INT_DONE BIT(2)
++#define SHA3_INT_DONE BIT(1)
++#define RSA_INT_DONE BIT(0)
++
++/* RSSS interrupt enable */
++#define SM4_INT_EN BIT(3)
++#define SM3_INT_EN BIT(2)
++#define SHA3_INT_EN BIT(1)
++#define RSA_INT_EN BIT(0)
++
++/* RSSS generic control */
++#define RSA_OPERATION (BIT(18) | BIT(19))
++#define SRAM_AHB_MODE_CPU BIT(16)
++#define SRAM_AHB_MODE_ENGINE 0x0
++#define SRAM_BUFF_PD (BIT(5) | BIT(4))
++#define SM4_DISABLE BIT(3)
++#define SM3_DISABLE BIT(2)
++#define SHA3_DISABLE BIT(1)
++
++/* RSA trigger */
++#define RSA_TRIGGER BIT(0)
++
++/* RSA key len */
++#define RSA_E_BITS_LEN(x) ((x) << 16)
++#define RSA_M_BITS_LEN(x) (x)
++
++#define RSA_STS (BIT(0) | BIT(1))
++
++/* RSA SRAM */
++#define SRAM_OFFSET_EXP 0x0
++#define SRAM_OFFSET_MOD 0x400
++#define SRAM_OFFSET_DATA 0x800
++#define SRAM_BLOCK_SIZE 0x400
++
++#define ASPEED_RSA_MAX_KEY_LEN 512 /* RSA maximum key length (Bytes) */
++
++#define CRYPTO_FLAGS_BUSY BIT(1)
++
++/* SHA3 command */
++#define SHA3_CMD_TRIG BIT(31)
++#define SHA3_CMD_MODE_224 (0x0 << 28)
++#define SHA3_CMD_MODE_256 (0x1 << 28)
++#define SHA3_CMD_MODE_384 (0x2 << 28)
++#define SHA3_CMD_MODE_512 (0x3 << 28)
++#define SHA3_CMD_MODE_S128 (0x4 << 28)
++#define SHA3_CMD_MODE_S256 (0x5 << 28)
++#define SHA3_CMD_HW_PAD BIT(27)
++#define SHA3_CMD_ACC_FINAL BIT(26)
++#define SHA3_CMD_ACC BIT(25)
++#define SHA3_CMD_SG_MODE BIT(24)
++#define SHA3_CMD_IN_RST BIT(21)
++#define SHA3_CMD_OUT_RST BIT(20)
++#define SHA3_CMD_OUT_LEN(x) ((x) & 0x1ffff)
++
++#define SHA3_FLAGS_SHA224 BIT(0)
++#define SHA3_FLAGS_SHA256 BIT(1)
++#define SHA3_FLAGS_SHA384 BIT(2)
++#define SHA3_FLAGS_SHA512 BIT(3)
++#define SHA3_FLAGS_FINUP BIT(0xa)
++#define SHA3_FLAGS_MASK (0xff)
++
++#define SHA3_STS BIT(0)
++
++#define SG_LAST_LIST BIT(31)
++
++#define SHA_OP_UPDATE 1
++#define SHA_OP_FINAL 2
++
++#define ASPEED_HASH_SRC_DMA_BUF_LEN 0xa000
++
++#define ASPEED_RSSS_POLLING_TIME 100
++#define ASPEED_RSSS_TIMEOUT 100000 /* 100 ms */
++
++struct aspeed_rsss_dev;
++
++typedef int (*aspeed_rsss_fn_t)(struct aspeed_rsss_dev *);
++
++struct aspeed_sg_list {
++ __le64 phy_addr;
++ __le32 len;
++};
++
++struct aspeed_engine_rsa {
++ struct tasklet_struct done_task;
++ unsigned long flags;
++ struct akcipher_request *req;
++
++ /* RSA input/output SRAM buffer */
++ void __iomem *sram_exp;
++ void __iomem *sram_mod;
++ void __iomem *sram_data;
++
++ /* callback func */
++ aspeed_rsss_fn_t resume;
++};
++
++struct aspeed_engine_sha3 {
++ struct tasklet_struct done_task;
++ unsigned long flags;
++ struct ahash_request *req;
++
++ /* input buffer for SG */
++ void *ahash_src_addr;
++ dma_addr_t ahash_src_dma_addr;
++
++ /* input buffer for remain */
++ void *buffer_addr;
++ dma_addr_t buffer_dma_addr;
++
++ /* output buffer */
++ void *digest_addr;
++ dma_addr_t digest_dma_addr;
++
++ dma_addr_t src_dma;
++ size_t src_length;
++
++ /* callback func */
++ aspeed_rsss_fn_t resume;
++ aspeed_rsss_fn_t dma_prepare;
++
++ unsigned sg_mode:1;
++};
++
++struct aspeed_rsss_dev {
++ void __iomem *regs;
++ struct device *dev;
++ int irq;
++ struct clk *clk;
++ struct reset_control *reset_rsa;
++ struct reset_control *reset_sha3;
++
++ struct crypto_engine *crypt_engine_rsa;
++ struct crypto_engine *crypt_engine_sha3;
++
++ struct aspeed_engine_rsa rsa_engine;
++ struct aspeed_engine_sha3 sha3_engine;
++};
++
++enum aspeed_algo_type {
++ ASPEED_ALGO_TYPE_AKCIPHER,
++ ASPEED_ALGO_TYPE_AHASH,
++};
++
++struct aspeed_rsss_alg {
++ struct aspeed_rsss_dev *rsss_dev;
++ enum aspeed_algo_type type;
++ union {
++ struct akcipher_engine_alg akcipher;
++ struct ahash_engine_alg ahash;
++ } alg;
++};
++
++/* RSA related */
++struct aspeed_rsa_ctx {
++ struct aspeed_rsss_dev *rsss_dev;
++
++ struct rsa_key key;
++ int enc;
++ u8 *n;
++ u8 *e;
++ u8 *d;
++ size_t n_sz;
++ size_t e_sz;
++ size_t d_sz;
++
++ aspeed_rsss_fn_t trigger;
++
++ struct crypto_akcipher *fallback_tfm;
++};
++
++enum aspeed_rsa_key_mode {
++ ASPEED_RSA_EXP_MODE = 0,
++ ASPEED_RSA_MOD_MODE,
++ ASPEED_RSA_DATA_MODE,
++};
++
++/* Hash related */
++struct aspeed_sha3_ctx {
++ struct aspeed_rsss_dev *rsss_dev;
++};
++
++struct aspeed_sha3_reqctx {
++ unsigned long flags; /* final update flag should no use */
++ unsigned long op; /* final or update */
++ u32 cmd; /* trigger cmd */
++
++ /* walk state */
++ struct scatterlist *src_sg;
++ int src_nents;
++ unsigned int offset; /* offset in current sg */
++ unsigned int total; /* per update length */
++
++ size_t digsize;
++ size_t blksize;
++ size_t ivsize;
++
++ /* remain data buffer */
++ size_t bufcnt; /* buffer counter */
++
++ /* output buffer */
++ u64 digcnt[2];
++};
++
++/******************************************************************************/
++
++#define ast_rsss_write(rsss, val, offset) \
++ writel((val), (rsss)->regs + (offset))
++
++#define ast_rsss_read(rsss, offset) \
++ readl((rsss)->regs + (offset))
++
++int aspeed_rsss_rsa_init(struct aspeed_rsss_dev *rsss_dev);
++void aspeed_rsss_rsa_exit(struct aspeed_rsss_dev *rsss_dev);
++int aspeed_rsss_sha3_init(struct aspeed_rsss_dev *rsss_dev);
++void aspeed_rsss_sha3_exit(struct aspeed_rsss_dev *rsss_dev);
++
++extern struct aspeed_rsss_alg aspeed_rsss_algs_rsa;
++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_224;
++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_256;
++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_384;
++extern struct aspeed_rsss_alg aspeed_rsss_algs_sha3_512;
++
++#endif /* __ASPEED_RSSS_H__ */
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0023-Add-i3c-driver-for-ast2700.patch b/recipes-kernel/linux/files/0023-Add-i3c-driver-for-ast2700.patch
new file mode 100644
index 0000000..d5902d4
--- /dev/null
+++ b/recipes-kernel/linux/files/0023-Add-i3c-driver-for-ast2700.patch
@@ -0,0 +1,10206 @@
+From b91ad012076362971d1019cc95c1fdbaa4d8854e Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 16:13:51 +0800
+Subject: [PATCH] Add i3c driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/i3c/Kconfig | 23 +
+ drivers/i3c/Makefile | 3 +
+ drivers/i3c/device.c | 378 ++++
+ drivers/i3c/i3c-mux-imx3102.c | 227 +++
+ drivers/i3c/i3cdev.c | 428 +++++
+ drivers/i3c/internals.h | 36 +
+ drivers/i3c/master.c | 1194 ++++++++++++-
+ drivers/i3c/master/ast2600-i3c-master.c | 609 +++++++
+ drivers/i3c/master/dw-i3c-master.c | 1553 +++++++++++++++--
+ drivers/i3c/master/dw-i3c-master.h | 60 +-
+ drivers/i3c/master/mipi-i3c-hci/cmd.h | 85 +-
+ drivers/i3c/master/mipi-i3c-hci/cmd_v1.c | 191 +-
+ drivers/i3c/master/mipi-i3c-hci/cmd_v2.c | 3 +-
+ drivers/i3c/master/mipi-i3c-hci/core.c | 746 +++++++-
+ drivers/i3c/master/mipi-i3c-hci/dat.h | 4 +
+ drivers/i3c/master/mipi-i3c-hci/dat_v1.c | 40 +-
+ drivers/i3c/master/mipi-i3c-hci/dma.c | 249 ++-
+ drivers/i3c/master/mipi-i3c-hci/ext_caps.c | 42 +
+ drivers/i3c/master/mipi-i3c-hci/ext_caps.h | 1 +
+ drivers/i3c/master/mipi-i3c-hci/hci.h | 18 +
+ drivers/i3c/master/mipi-i3c-hci/ibi.h | 4 +
+ drivers/i3c/master/mipi-i3c-hci/pio.c | 271 ++-
+ .../i3c/master/mipi-i3c-hci/vendor_aspeed.h | 408 +++++
+ drivers/i3c/master/svc-i3c-master.c | 2 +-
+ drivers/i3c/mctp/Kconfig | 23 +
+ drivers/i3c/mctp/Makefile | 3 +
+ drivers/i3c/mctp/i3c-mctp.c | 697 ++++++++
+ drivers/i3c/mctp/i3c-target-mctp.c | 485 +++++
+ 28 files changed, 7359 insertions(+), 424 deletions(-)
+ create mode 100644 drivers/i3c/i3c-mux-imx3102.c
+ create mode 100644 drivers/i3c/i3cdev.c
+ create mode 100644 drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h
+ create mode 100644 drivers/i3c/mctp/Kconfig
+ create mode 100644 drivers/i3c/mctp/Makefile
+ create mode 100644 drivers/i3c/mctp/i3c-mctp.c
+ create mode 100644 drivers/i3c/mctp/i3c-target-mctp.c
+
+diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
+index 30a441506..2fd8deaf7 100644
+--- a/drivers/i3c/Kconfig
++++ b/drivers/i3c/Kconfig
+@@ -20,5 +20,28 @@ menuconfig I3C
+ will be called i3c.
+
+ if I3C
++
++config I3CDEV
++ tristate "I3C device interface"
++ depends on I3C
++ help
++ Say Y here to use i3c-* device files, usually found in the /dev
++ directory on your system. They make it possible to have user-space
++ programs use the I3C devices.
++
++ This support is also available as a module. If so, the module
++ will be called i3cdev.
++
++ Note that this application programming interface is EXPERIMENTAL
++ and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
++
++config I3C_MUX_IMX3102
++ bool "IMX/IML3102 I3C multiplexer driver"
++ default y
++ select REGMAP_I3C
++ help
++ Say y to enable Renesas IMX3102 I3C 2:1 multiplexer.
++
++source "drivers/i3c/mctp/Kconfig"
+ source "drivers/i3c/master/Kconfig"
+ endif # I3C
+diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile
+index 11982efbc..4a8b82d94 100644
+--- a/drivers/i3c/Makefile
++++ b/drivers/i3c/Makefile
+@@ -1,4 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ i3c-y := device.o master.o
+ obj-$(CONFIG_I3C) += i3c.o
++obj-$(CONFIG_I3CDEV) += i3cdev.o
+ obj-$(CONFIG_I3C) += master/
++obj-$(CONFIG_I3C) += mctp/
++obj-$(CONFIG_I3C_MUX_IMX3102) += i3c-mux-imx3102.o
+diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
+index 1a6a8703d..fd523ed8a 100644
+--- a/drivers/i3c/device.c
++++ b/drivers/i3c/device.c
+@@ -50,6 +50,117 @@ int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ }
+ EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+
++/**
++ * i3c_device_send_hdr_cmds() - send HDR commands to a specific device
++ *
++ * @dev: device to which these commands should be sent
++ * @cmds: array of commands
++ * @ncmds: number of commands
++ *
++ * Send one or several HDR commands to @dev.
++ *
++ * This function can sleep and thus cannot be called in atomic context.
++ *
++ * Return: 0 in case of success, a negative error core otherwise.
++ */
++int i3c_device_send_hdr_cmds(struct i3c_device *dev, struct i3c_hdr_cmd *cmds,
++ int ncmds)
++{
++ enum i3c_hdr_mode mode;
++ int ret, i;
++
++ if (ncmds < 1)
++ return 0;
++
++ mode = cmds[0].mode;
++ for (i = 1; i < ncmds; i++) {
++ if (mode != cmds[i].mode)
++ return -EINVAL;
++ }
++
++ i3c_bus_normaluse_lock(dev->bus);
++ ret = i3c_dev_send_hdr_cmds_locked(dev->desc, cmds, ncmds);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_send_hdr_cmds);
++
++/**
++ * i3c_device_generate_ibi() - request In-Band Interrupt
++ *
++ * @dev: target device
++ * @data: IBI payload
++ * @len: payload length in bytes
++ *
++ * Request In-Band Interrupt with or without data payload.
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_generate_ibi(struct i3c_device *dev, const u8 *data, int len)
++{
++ int ret;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ ret = i3c_dev_generate_ibi_locked(dev->desc, data, len);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_generate_ibi);
++
++/**
++ * i3c_device_pending_read_notify() - Notify the bus master about the
++ * pending read data through IBI
++ *
++ * @dev: device with which the transfers should be done
++ * @pending_read: the transfer that conveys the pending read data
++ * @ibi_notify: the transfer that conveys the IBI with data (MDB)
++ *
++ * Initiate a private SDR transfer with @dev, then issue an IBI with
++ * data to notify the bus master that there is a pending read transfer.
++ *
++ * This function can sleep and thus cannot be called in atomic context.
++ *
++ * Return: 0 in case of success, a negative error core otherwise.
++ */
++int i3c_device_pending_read_notify(struct i3c_device *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify)
++{
++ int ret;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ ret = i3c_dev_pending_read_notify_locked(dev->desc, pending_read,
++ ibi_notify);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_pending_read_notify);
++
++/**
++ * i3c_device_is_ibi_enabled() - Query the In-Band Interrupt status
++ *
++ * @dev: target device
++ *
++ * Queries the device to check if In-Band Interrupt (IBI) is enabled by the bus
++ * controller.
++ *
++ * Return: 1 if enabled, 0 if disabled.
++ */
++bool i3c_device_is_ibi_enabled(struct i3c_device *dev)
++{
++ bool ret;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ ret = i3c_dev_is_ibi_enabled_locked(dev->desc);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_is_ibi_enabled);
++
+ /**
+ * i3c_device_do_setdasa() - do I3C dynamic address assignement with
+ * static address
+@@ -70,6 +181,31 @@ int i3c_device_do_setdasa(struct i3c_device *dev)
+ }
+ EXPORT_SYMBOL_GPL(i3c_device_do_setdasa);
+
++/**
++ * i3c_device_getstatus_ccc() - receive device status
++ *
++ * @dev: I3C device to get the status for
++ * @info: I3C device info to fill the status in
++ *
++ * Receive I3C device status from I3C master device via corresponding CCC
++ * command
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_getstatus_ccc(struct i3c_device *dev, struct i3c_device_info *info)
++{
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (dev->desc)
++ ret = i3c_dev_getstatus_locked(dev->desc, &dev->desc->info);
++ i3c_bus_normaluse_unlock(dev->bus);
++ i3c_device_get_info(dev, info);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_getstatus_ccc);
++
+ /**
+ * i3c_device_get_info() - get I3C device information
+ *
+@@ -291,3 +427,245 @@ void i3c_driver_unregister(struct i3c_driver *drv)
+ driver_unregister(&drv->driver);
+ }
+ EXPORT_SYMBOL_GPL(i3c_driver_unregister);
++
++/**
++ * i3c_device_control_pec() - enable or disable PEC support in HW
++ *
++ * @dev: I3C device to get the status for
++ * @pec: flag telling whether PEC support shall be enabled or disabled
++ *
++ * Try to enable or disable HW support for PEC (Packet Error Check).
++ * In case no HW support for PEC, software implementation could be used.
++ *
++ * Return: 0 in case of success, -EOPNOTSUPP in case PEC is not supported by HW,
++ * other negative error codes when PEC enabling failed.
++ */
++int i3c_device_control_pec(struct i3c_device *dev, bool pec)
++{
++ return i3c_dev_control_pec(dev->desc, pec);
++}
++EXPORT_SYMBOL_GPL(i3c_device_control_pec);
++
++/**
++ * i3c_device_register_event_cb() - register callback for I3C framework event.
++ * @dev: the I3C device driver handle.
++ * @ev: I3C framework event callback
++ *
++ * This function allows I3C device driver to register for I3C framework events.
++ * Provided callback will be used by controller driver to publish events.
++ */
++void i3c_device_register_event_cb(struct i3c_device *dev, i3c_event_cb event_cb)
++{
++ dev->desc->event_cb = event_cb;
++}
++EXPORT_SYMBOL_GPL(i3c_device_register_event_cb);
++
++/**
++ * i3c_device_setmrl_ccc() - set maximum read length
++ *
++ * @dev: I3C device to set the length for
++ * @info: I3C device info to fill the length in
++ * @read_len: maximum read length value to be set
++ * @ibi_len: maximum ibi payload length to be set
++ *
++ * Set I3C device maximum read length from I3C master device via corresponding CCC command
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_setmrl_ccc(struct i3c_device *dev, struct i3c_device_info *info, u16 read_len,
++ u8 ibi_len)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev->desc);
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (master)
++ ret = i3c_master_setmrl_locked(master, &dev->desc->info, read_len, ibi_len);
++ i3c_bus_normaluse_unlock(dev->bus);
++ i3c_device_get_info(dev, info);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_setmrl_ccc);
++
++/**
++ * i3c_device_setmwl_ccc() - set maximum write length
++ *
++ * @dev: I3C device to set the length for
++ * @info: I3C device info to fill the length in
++ * @write_len: maximum write length value to be set
++ *
++ * Set I3C device maximum write length from I3C master device via corresponding CCC command
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_setmwl_ccc(struct i3c_device *dev, struct i3c_device_info *info, u16 write_len)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev->desc);
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (master)
++ ret = i3c_master_setmwl_locked(master, &dev->desc->info, write_len);
++ i3c_bus_normaluse_unlock(dev->bus);
++ i3c_device_get_info(dev, info);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_setmwl_ccc);
++
++/**
++ * i3c_device_getmrl_ccc() - get maximum read length
++ *
++ * @dev: I3C device to get the length for
++ * @info: I3C device info to fill the length in
++ *
++ * Receive I3C device maximum read length from I3C master device via corresponding CCC command
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_getmrl_ccc(struct i3c_device *dev, struct i3c_device_info *info)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev->desc);
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (master)
++ ret = i3c_master_getmrl_locked(master, &dev->desc->info);
++ i3c_bus_normaluse_unlock(dev->bus);
++ i3c_device_get_info(dev, info);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_getmrl_ccc);
++
++/**
++ * i3c_device_getmwl_ccc() - get maximum write length
++ *
++ * @dev: I3C device to get the length for
++ * @info: I3C device info to fill the length in
++ *
++ * Receive I3C device maximum write length from I3C master device via corresponding CCC command
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_getmwl_ccc(struct i3c_device *dev, struct i3c_device_info *info)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev->desc);
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (master)
++ ret = i3c_master_getmwl_locked(master, &dev->desc->info);
++ i3c_bus_normaluse_unlock(dev->bus);
++ i3c_device_get_info(dev, info);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_getmwl_ccc);
++
++int i3c_device_setaasa_ccc(struct i3c_device *dev)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev->desc);
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (master)
++ ret = i3c_master_setaasa_locked(master);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_setaasa_ccc);
++/**
++ * i3c_device_dbgaction_wr_ccc() - I3C for Debug action write CCC
++ *
++ * @dev: I3C device to initiate the Debug Action write
++ * @info: I3C device info to capture target system details
++ * @data: data bytes for the debug action
++ * @len: length of the data bytes
++ *
++ * Initiate a particular debug action within the target system
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_dbgaction_wr_ccc(struct i3c_device *dev, struct i3c_device_info *info,
++ u8 *data, u8 len)
++{
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (dev->desc)
++ ret = i3c_dev_dbgaction_wr_locked(dev->desc, info, data, len);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_dbgaction_wr_ccc);
++
++
++int i3c_device_sethid_ccc(struct i3c_device *dev)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev->desc);
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (master)
++ ret = i3c_master_sethid_locked(master);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_sethid_ccc);
++
++/**
++ * i3c_device_dbgopcode_wr_ccc() - I3C for Debug opcode CCC
++ *
++ * @dev: I3C device to initiate the Debug Opcode write
++ * @info: I3C device info to capture target system details
++ * @data: data bytes for the debug opcode
++ * @len: length of the data bytes
++ *
++ * Request a particular operation of the network adaptor of the target system
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_dbgopcode_wr_ccc(struct i3c_device *dev, struct i3c_device_info *info,
++ u8 *data, u8 len)
++{
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (dev->desc)
++ ret = i3c_dev_dbgopcode_wr_locked(dev->desc, info, data, len);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_dbgopcode_wr_ccc);
++
++/**
++ * i3c_device_dbgopcode_rd_ccc() - I3C for Debug opcode CCC
++ *
++ * @dev: I3C device to initiate the Debug Opcode read
++ * @info: I3C device info to capture target system details
++ * @data: data bytes for the debug opcode
++ * @len: length of the data bytes
++ *
++ * Request a particular operation of the network adaptor of the target system
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_device_dbgopcode_rd_ccc(struct i3c_device *dev, struct i3c_device_info *info,
++ u8 *data, u8 len)
++{
++ int ret = -EINVAL;
++
++ i3c_bus_normaluse_lock(dev->bus);
++ if (dev->desc)
++ ret = i3c_dev_dbgopcode_rd_locked(dev->desc, info, data, len);
++ i3c_bus_normaluse_unlock(dev->bus);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_device_dbgopcode_rd_ccc);
+diff --git a/drivers/i3c/i3c-mux-imx3102.c b/drivers/i3c/i3c-mux-imx3102.c
+new file mode 100644
+index 000000000..e134aea93
+--- /dev/null
++++ b/drivers/i3c/i3c-mux-imx3102.c
+@@ -0,0 +1,227 @@
++// SPDX-License-Identifier: GPL-2.0-only
++/*
++ * Copyright (c) 2021 Aspeed Technology Inc.
++ *
++ * IMX3102: 2-to-1 multiplexier
++ *
++ * +------------------ +
++ * | SoC |
++ * | |
++ * | I3C controller #0 - | --+
++ * | | \ dev dev
++ * | | +---------+ | |
++ * | | | IMX3102 | ---+--+--+--+--- i3c bus
++ * | | +---------+ | |
++ * | | / dev dev
++ * | I3C controller #1 - | --+
++ * | |
++ * +---------------------+
++ */
++
++#include <linux/i3c/device.h>
++#include <linux/i3c/master.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/of.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/sysfs.h>
++#include <linux/delay.h>
++#include <linux/regmap.h>
++#include "internals.h"
++
++#define IMX3102_DEVICE_TYPE_HI 0x0
++#define IMX3102_DEVICE_TYPE_LO 0x1
++
++#define IMX3102_PORT_CONF 0x40
++#define IMX3102_PORT_CONF_M1_EN BIT(7)
++#define IMX3102_PORT_CONF_S_EN BIT(6)
++#define IMX3102_PORT_SEL 0x41
++#define IMX3102_PORT_SEL_M1 BIT(7)
++#define IMX3102_PORT_SEL_S_EN BIT(6)
++
++struct imx3102 {
++ struct regmap *regmap;
++
++ struct bin_attribute ownership;
++ struct bin_attribute reinit;
++ struct kernfs_node *kn;
++
++ struct i3c_device *i3cdev;
++};
++
++static ssize_t i3c_mux_imx3102_query(struct file *filp, struct kobject *kobj,
++ struct bin_attribute *attr, char *buf,
++ loff_t pos, size_t count)
++{
++ struct imx3102 *imx3102;
++ struct device *dev;
++ int ret;
++ u8 data[2];
++
++ imx3102 = dev_get_drvdata(container_of(kobj, struct device, kobj));
++ if (!imx3102)
++ return -1;
++
++ dev = &imx3102->i3cdev->dev;
++
++ ret = regmap_raw_read(imx3102->regmap, IMX3102_DEVICE_TYPE_HI, data, 2);
++ if (ret)
++ sprintf(buf, "N\n");
++ else
++ sprintf(buf, "Y\n");
++
++ return 2;
++}
++
++/* write whatever value to imx3102-mux to release the ownership */
++static ssize_t i3c_mux_imx3102_release_chan(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t pos, size_t count)
++{
++ struct imx3102 *imx3102;
++ struct device *dev;
++ struct regmap *regmap;
++ int ret;
++ u8 select;
++
++ imx3102 = dev_get_drvdata(container_of(kobj, struct device, kobj));
++ if (!imx3102) {
++ count = -1;
++ goto out;
++ }
++
++ dev = &imx3102->i3cdev->dev;
++ regmap = imx3102->regmap;
++ ret = regmap_raw_read(regmap, IMX3102_PORT_SEL, &select, 1);
++ if (ret)
++ goto out;
++
++ /* invert the bit to change the ownership */
++ select ^= IMX3102_PORT_SEL_M1;
++ regmap_raw_write(regmap, IMX3102_PORT_SEL, &select, 1);
++
++out:
++ return count;
++}
++
++static ssize_t i3c_mux_imx3102_bus_reinit(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr, char *buf,
++ loff_t pos, size_t count)
++{
++ struct imx3102 *imx3102;
++ int ret;
++
++ imx3102 = dev_get_drvdata(container_of(kobj, struct device, kobj));
++ if (!imx3102) {
++ count = -1;
++ return count;
++ }
++
++ ret = i3c_device_setaasa_ccc(imx3102->i3cdev);
++ ret = i3c_device_sethid_ccc(imx3102->i3cdev);
++
++ return count;
++}
++
++static int i3c_mux_imx3102_probe(struct i3c_device *i3cdev)
++{
++ struct device *dev = &i3cdev->dev;
++ struct imx3102 *imx3102;
++ struct regmap *regmap;
++ struct regmap_config imx3102_i3c_regmap_config = {
++ .reg_bits = 8,
++ .pad_bits = 8,
++ .val_bits = 8,
++ };
++ int ret;
++ u8 data[2];
++
++ if (dev->type == &i3c_masterdev_type)
++ return -ENOTSUPP;
++
++ imx3102 = devm_kzalloc(dev, sizeof(*imx3102), GFP_KERNEL);
++ if (!imx3102)
++ return -ENOMEM;
++
++ imx3102->i3cdev = i3cdev;
++
++ /* register regmap */
++ regmap = devm_regmap_init_i3c(i3cdev, &imx3102_i3c_regmap_config);
++ if (IS_ERR(regmap)) {
++ dev_err(dev, "Failed to register i3c regmap %d\n",
++ (int)PTR_ERR(regmap));
++ return PTR_ERR(regmap);
++ }
++ imx3102->regmap = regmap;
++
++ sysfs_bin_attr_init(&imx3102->ownership);
++ imx3102->ownership.attr.name = "imx3102.ownership";
++ imx3102->ownership.attr.mode = 0600;
++ imx3102->ownership.read = i3c_mux_imx3102_query;
++ imx3102->ownership.write = i3c_mux_imx3102_release_chan;
++ imx3102->ownership.size = 2;
++ ret = sysfs_create_bin_file(&dev->kobj, &imx3102->ownership);
++
++ sysfs_bin_attr_init(&imx3102->reinit);
++ imx3102->reinit.attr.name = "imx3102.reinit";
++ imx3102->reinit.attr.mode = 0200;
++ imx3102->reinit.write = i3c_mux_imx3102_bus_reinit;
++ imx3102->reinit.size = 2;
++ ret = sysfs_create_bin_file(&dev->kobj, &imx3102->reinit);
++
++ imx3102->kn = kernfs_find_and_get(dev->kobj.sd, imx3102->ownership.attr.name);
++ dev_set_drvdata(dev, imx3102);
++
++ ret = regmap_raw_read(regmap, IMX3102_DEVICE_TYPE_HI, data, 2);
++ if (ret) {
++ dev_info(dev, "No ownership\n");
++ return 0;
++ }
++ dev_dbg(dev, "device ID %02x %02x\n", data[0], data[1]);
++
++ /* enable the slave port */
++ regmap_raw_read(regmap, IMX3102_PORT_CONF, &data[0], 2);
++ data[0] |= IMX3102_PORT_CONF_S_EN | IMX3102_PORT_CONF_M1_EN;
++ data[1] |= IMX3102_PORT_SEL_S_EN;
++ regmap_raw_write(regmap, IMX3102_PORT_CONF, data, 2);
++
++ /* send SETAASA to bring the devices behind the mux to I3C mode */
++ i3c_device_setaasa_ccc(i3cdev);
++
++ return 0;
++}
++
++static void i3c_mux_imx3102_remove(struct i3c_device *i3cdev)
++{
++ struct device *dev = &i3cdev->dev;
++ struct imx3102 *imx3102;
++
++ imx3102 = dev_get_drvdata(dev);
++
++ kernfs_put(imx3102->kn);
++ sysfs_remove_bin_file(&dev->kobj, &imx3102->ownership);
++ devm_kfree(dev, imx3102);
++}
++
++static const struct i3c_device_id i3c_mux_imx3102_ids[] = {
++ I3C_DEVICE(0x266, 0x3102, (void *)0),
++ { /* sentinel */ },
++};
++MODULE_DEVICE_TABLE(i3c, i3c_mux_imx3102_ids);
++
++static struct i3c_driver imx3102_driver = {
++ .driver = {
++ .name = "i3c-mux-imx3102",
++ },
++ .probe = i3c_mux_imx3102_probe,
++ .remove = i3c_mux_imx3102_remove,
++ .id_table = i3c_mux_imx3102_ids,
++};
++module_i3c_driver(imx3102_driver);
++
++MODULE_AUTHOR("Dylan Hung <dylan_hung@aspeedtech.com>");
++MODULE_DESCRIPTION("I3C IMX3102 multiplexer driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/i3c/i3cdev.c b/drivers/i3c/i3cdev.c
+new file mode 100644
+index 000000000..fa96327d5
+--- /dev/null
++++ b/drivers/i3c/i3cdev.c
+@@ -0,0 +1,428 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
++ *
++ * Author: Vitor Soares <soares@synopsys.com>
++ */
++
++#include <linux/cdev.h>
++#include <linux/compat.h>
++#include <linux/device.h>
++#include <linux/fs.h>
++#include <linux/init.h>
++#include <linux/jiffies.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/notifier.h>
++#include <linux/slab.h>
++#include <linux/uaccess.h>
++
++#include <linux/i3c/i3cdev.h>
++
++#include "internals.h"
++
++struct i3cdev_data {
++ struct i3c_device *i3c;
++ struct device *dev;
++ struct mutex xfer_lock; /* prevent detach while transferring */
++ struct cdev cdev;
++ int id;
++};
++
++static DEFINE_IDA(i3cdev_ida);
++static dev_t i3cdev_number;
++#define I3C_MINORS (MINORMASK + 1)
++
++static struct i3cdev_data *get_free_i3cdev(struct i3c_device *i3c)
++{
++ struct i3cdev_data *i3cdev;
++ int id;
++
++ id = ida_simple_get(&i3cdev_ida, 0, I3C_MINORS, GFP_KERNEL);
++ if (id < 0) {
++ pr_err("i3cdev: no minor number available!\n");
++ return ERR_PTR(id);
++ }
++
++ i3cdev = kzalloc(sizeof(*i3cdev), GFP_KERNEL);
++ if (!i3cdev) {
++ ida_simple_remove(&i3cdev_ida, id);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ i3cdev->i3c = i3c;
++ i3cdev->id = id;
++ i3cdev_set_drvdata(i3c, i3cdev);
++
++ return i3cdev;
++}
++
++static void put_i3cdev(struct i3cdev_data *i3cdev)
++{
++ i3cdev_set_drvdata(i3cdev->i3c, NULL);
++ kfree(i3cdev);
++}
++
++static ssize_t
++i3cdev_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
++{
++ struct i3cdev_data *i3cdev = file->private_data;
++ struct i3c_device *i3c = i3cdev->i3c;
++ struct i3c_priv_xfer xfers = {
++ .rnw = true,
++ .len = count,
++ };
++ int ret = -EACCES;
++ char *tmp;
++
++ mutex_lock(&i3cdev->xfer_lock);
++ if (i3c->dev.driver)
++ goto err_out;
++
++ tmp = kzalloc(count, GFP_KERNEL);
++ if (!tmp)
++ return -ENOMEM;
++
++ xfers.data.in = tmp;
++
++ dev_dbg(&i3c->dev, "Reading %zu bytes.\n", count);
++
++ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
++ if (!ret)
++ ret = copy_to_user(buf, tmp, xfers.len) ? -EFAULT : xfers.len;
++
++ kfree(tmp);
++
++err_out:
++ mutex_unlock(&i3cdev->xfer_lock);
++ return ret;
++}
++
++static ssize_t
++i3cdev_write(struct file *file, const char __user *buf, size_t count,
++ loff_t *f_pos)
++{
++ struct i3cdev_data *i3cdev = file->private_data;
++ struct i3c_device *i3c = i3cdev->i3c;
++ struct i3c_priv_xfer xfers = {
++ .rnw = false,
++ .len = count,
++ };
++ int ret = -EACCES;
++ char *tmp;
++
++ mutex_lock(&i3cdev->xfer_lock);
++ if (i3c->dev.driver)
++ goto err_out;
++
++ tmp = memdup_user(buf, count);
++ if (IS_ERR(tmp))
++ return PTR_ERR(tmp);
++
++ xfers.data.out = tmp;
++
++ dev_dbg(&i3c->dev, "Writing %zu bytes.\n", count);
++
++ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
++ kfree(tmp);
++
++err_out:
++ mutex_unlock(&i3cdev->xfer_lock);
++ return (!ret) ? count : ret;
++}
++
++static int
++i3cdev_do_priv_xfer(struct i3c_device *dev, struct i3c_ioc_priv_xfer *xfers,
++ unsigned int nxfers)
++{
++ struct i3c_priv_xfer *k_xfers;
++ u8 **data_ptrs;
++ int i, j, ret = 0;
++
++ /* Since we have nxfers we may allocate k_xfer + *data_ptrs together */
++ k_xfers = kcalloc(nxfers, sizeof(*k_xfers) + sizeof(*data_ptrs),
++ GFP_KERNEL);
++ if (!k_xfers)
++ return -ENOMEM;
++
++ /* set data_ptrs to be after nxfers * i3c_priv_xfer */
++ data_ptrs = (void *)k_xfers + (nxfers * sizeof(*k_xfers));
++
++ for (i = 0; i < nxfers; i++) {
++ data_ptrs[i] = memdup_user((const u8 __user *)
++ (uintptr_t)xfers[i].data,
++ xfers[i].len);
++ if (IS_ERR(data_ptrs[i])) {
++ ret = PTR_ERR(data_ptrs[i]);
++ break;
++ }
++
++ k_xfers[i].len = xfers[i].len;
++ if (xfers[i].rnw) {
++ k_xfers[i].rnw = true;
++ k_xfers[i].data.in = data_ptrs[i];
++ } else {
++ k_xfers[i].rnw = false;
++ k_xfers[i].data.out = data_ptrs[i];
++ }
++ }
++
++ if (ret < 0)
++ goto err_free_mem;
++
++ ret = i3c_device_do_priv_xfers(dev, k_xfers, nxfers);
++ if (ret)
++ goto err_free_mem;
++
++ for (i = 0; i < nxfers; i++) {
++ if (xfers[i].rnw) {
++ if (copy_to_user(u64_to_user_ptr(xfers[i].data),
++ data_ptrs[i], xfers[i].len))
++ ret = -EFAULT;
++ }
++ }
++
++err_free_mem:
++ for (j = 0; j < i; j++)
++ kfree(data_ptrs[j]);
++ kfree(k_xfers);
++ return ret;
++}
++
++static struct i3c_ioc_priv_xfer *
++i3cdev_get_ioc_priv_xfer(unsigned int cmd, struct i3c_ioc_priv_xfer *u_xfers,
++ unsigned int *nxfers)
++{
++ u32 tmp = _IOC_SIZE(cmd);
++
++ if ((tmp % sizeof(struct i3c_ioc_priv_xfer)) != 0)
++ return ERR_PTR(-EINVAL);
++
++ *nxfers = tmp / sizeof(struct i3c_ioc_priv_xfer);
++ if (*nxfers == 0)
++ return ERR_PTR(-EINVAL);
++
++ return memdup_user(u_xfers, tmp);
++}
++
++static int
++i3cdev_ioc_priv_xfer(struct i3c_device *i3c, unsigned int cmd,
++ struct i3c_ioc_priv_xfer *u_xfers)
++{
++ struct i3c_ioc_priv_xfer *k_xfers;
++ unsigned int nxfers;
++ int ret;
++
++ k_xfers = i3cdev_get_ioc_priv_xfer(cmd, u_xfers, &nxfers);
++ if (IS_ERR(k_xfers))
++ return PTR_ERR(k_xfers);
++
++ ret = i3cdev_do_priv_xfer(i3c, k_xfers, nxfers);
++
++ kfree(k_xfers);
++
++ return ret;
++}
++
++static long
++i3cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct i3cdev_data *i3cdev = file->private_data;
++ struct i3c_device *i3c = i3cdev->i3c;
++ int ret = -EACCES;
++
++ dev_dbg(&i3c->dev, "ioctl, cmd=0x%02x, arg=0x%02lx\n", cmd, arg);
++
++ if (_IOC_TYPE(cmd) != I3C_DEV_IOC_MAGIC)
++ return -ENOTTY;
++
++ /* Use the xfer_lock to prevent device detach during ioctl call */
++ mutex_lock(&i3cdev->xfer_lock);
++ if (i3c->dev.driver)
++ goto err_no_dev;
++
++ /* Check command number and direction */
++ if (_IOC_NR(cmd) == _IOC_NR(I3C_IOC_PRIV_XFER(0)) &&
++ _IOC_DIR(cmd) == (_IOC_READ | _IOC_WRITE))
++ ret = i3cdev_ioc_priv_xfer(i3c, cmd,
++ (struct i3c_ioc_priv_xfer __user *)arg);
++
++err_no_dev:
++ mutex_unlock(&i3cdev->xfer_lock);
++ return ret;
++}
++
++static int i3cdev_open(struct inode *inode, struct file *file)
++{
++ struct i3cdev_data *i3cdev = container_of(inode->i_cdev,
++ struct i3cdev_data,
++ cdev);
++ file->private_data = i3cdev;
++
++ return 0;
++}
++
++static int i3cdev_release(struct inode *inode, struct file *file)
++{
++ file->private_data = NULL;
++
++ return 0;
++}
++
++static const struct file_operations i3cdev_fops = {
++ .owner = THIS_MODULE,
++ .read = i3cdev_read,
++ .write = i3cdev_write,
++ .unlocked_ioctl = i3cdev_ioctl,
++ .compat_ioctl = compat_ptr_ioctl,
++ .open = i3cdev_open,
++ .release = i3cdev_release,
++};
++
++/* ------------------------------------------------------------------------- */
++
++static struct class *i3cdev_class;
++
++static int i3cdev_attach(struct device *dev, void *dummy)
++{
++ struct i3cdev_data *i3cdev;
++ struct i3c_device *i3c;
++ int res;
++
++ if (dev->type == &i3c_masterdev_type || dev->driver)
++ return 0;
++
++ i3c = dev_to_i3cdev(dev);
++
++ /* Get a device */
++ i3cdev = get_free_i3cdev(i3c);
++ if (IS_ERR(i3cdev))
++ return PTR_ERR(i3cdev);
++
++ mutex_init(&i3cdev->xfer_lock);
++ cdev_init(&i3cdev->cdev, &i3cdev_fops);
++ i3cdev->cdev.owner = THIS_MODULE;
++ res = cdev_add(&i3cdev->cdev,
++ MKDEV(MAJOR(i3cdev_number), i3cdev->id), 1);
++ if (res)
++ goto error_cdev;
++
++ /* register this i3c device with the driver core */
++ i3cdev->dev = device_create(i3cdev_class, &i3c->dev,
++ MKDEV(MAJOR(i3cdev_number), i3cdev->id),
++ NULL, "bus!i3c!%s", dev_name(&i3c->dev));
++ if (IS_ERR(i3cdev->dev)) {
++ res = PTR_ERR(i3cdev->dev);
++ goto error;
++ }
++ pr_debug("i3cdev: I3C device [%s] registered as minor %d\n",
++ dev_name(&i3c->dev), i3cdev->id);
++ return 0;
++
++error:
++ cdev_del(&i3cdev->cdev);
++error_cdev:
++ put_i3cdev(i3cdev);
++ return res;
++}
++
++static int i3cdev_detach(struct device *dev, void *dummy)
++{
++ struct i3cdev_data *i3cdev;
++ struct i3c_device *i3c;
++
++ if (dev->type == &i3c_masterdev_type)
++ return 0;
++
++ i3c = dev_to_i3cdev(dev);
++
++ i3cdev = i3cdev_get_drvdata(i3c);
++ if (!i3cdev)
++ return 0;
++
++ /* Prevent transfers while cdev removal */
++ mutex_lock(&i3cdev->xfer_lock);
++ cdev_del(&i3cdev->cdev);
++ device_destroy(i3cdev_class, MKDEV(MAJOR(i3cdev_number), i3cdev->id));
++ mutex_unlock(&i3cdev->xfer_lock);
++
++ ida_simple_remove(&i3cdev_ida, i3cdev->id);
++ put_i3cdev(i3cdev);
++
++ pr_debug("i3cdev: device [%s] unregistered\n", dev_name(&i3c->dev));
++
++ return 0;
++}
++
++static int i3cdev_notifier_call(struct notifier_block *nb,
++ unsigned long action,
++ void *data)
++{
++ struct device *dev = data;
++
++ switch (action) {
++ case BUS_NOTIFY_ADD_DEVICE:
++ case BUS_NOTIFY_UNBOUND_DRIVER:
++ return i3cdev_attach(dev, NULL);
++ case BUS_NOTIFY_DEL_DEVICE:
++ case BUS_NOTIFY_REMOVED_DEVICE:
++ case BUS_NOTIFY_BIND_DRIVER:
++ return i3cdev_detach(dev, NULL);
++ }
++
++ return 0;
++}
++
++static struct notifier_block i3cdev_notifier = {
++ .notifier_call = i3cdev_notifier_call,
++};
++
++static int __init i3cdev_init(void)
++{
++ int res;
++
++ /* Dynamically request unused major number */
++ res = alloc_chrdev_region(&i3cdev_number, 0, I3C_MINORS, "i3c");
++ if (res)
++ goto out;
++
++ /* Create a classe to populate sysfs entries*/
++ i3cdev_class = class_create("i3cdev");
++ if (IS_ERR(i3cdev_class)) {
++ res = PTR_ERR(i3cdev_class);
++ goto out_unreg_chrdev;
++ }
++
++ /* Keep track of busses which have devices to add or remove later */
++ res = bus_register_notifier(&i3c_bus_type, &i3cdev_notifier);
++ if (res)
++ goto out_unreg_class;
++
++ /* Bind to already existing device without driver right away */
++ i3c_for_each_dev(NULL, i3cdev_attach);
++
++ return 0;
++
++out_unreg_class:
++ class_destroy(i3cdev_class);
++out_unreg_chrdev:
++ unregister_chrdev_region(i3cdev_number, I3C_MINORS);
++out:
++ pr_err("%s: Driver Initialisation failed\n", __FILE__);
++ return res;
++}
++
++static void __exit i3cdev_exit(void)
++{
++ bus_unregister_notifier(&i3c_bus_type, &i3cdev_notifier);
++ i3c_for_each_dev(NULL, i3cdev_detach);
++ class_destroy(i3cdev_class);
++ unregister_chrdev_region(i3cdev_number, I3C_MINORS);
++}
++
++MODULE_AUTHOR("Vitor Soares <soares@synopsys.com>");
++MODULE_DESCRIPTION("I3C /dev entries driver");
++MODULE_LICENSE("GPL");
++
++module_init(i3cdev_init);
++module_exit(i3cdev_exit);
+diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
+index 908a807ba..1d82f2fe4 100644
+--- a/drivers/i3c/internals.h
++++ b/drivers/i3c/internals.h
+@@ -9,19 +9,55 @@
+ #define I3C_INTERNALS_H
+
+ #include <linux/i3c/master.h>
++#include <linux/i3c/target.h>
+
+ extern struct bus_type i3c_bus_type;
++extern const struct device_type i3c_masterdev_type;
+
+ void i3c_bus_normaluse_lock(struct i3c_bus *bus);
+ void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
+
+ int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev);
++int i3c_dev_getstatus_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info);
+ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
++int i3c_dev_send_hdr_cmds_locked(struct i3c_dev_desc *dev,
++ struct i3c_hdr_cmd *cmds, int ncmds);
+ int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
+ int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
+ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
++int i3c_dev_getstatus_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info);
++int i3c_master_getmrl_locked(struct i3c_master_controller *master, struct i3c_device_info *info);
++int i3c_master_getmwl_locked(struct i3c_master_controller *master, struct i3c_device_info *info);
++int i3c_master_setmrl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, __be16 read_len, u8 ibi_len);
++int i3c_master_setmwl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, __be16 write_len);
++
++int i3c_dev_dbgaction_wr_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info,
++ u8 *data, u8 len);
++int i3c_dev_dbgopcode_wr_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info,
++ u8 *data, u8 len);
++int i3c_dev_dbgopcode_rd_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info,
++ u8 *data, u8 len);
++
++int i3c_for_each_dev(void *data, int (*fn)(struct device *, void *));
++int i3c_dev_generate_ibi_locked(struct i3c_dev_desc *dev, const u8 *data, int len);
++int i3c_dev_pending_read_notify_locked(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify);
++int i3c_dev_is_ibi_enabled_locked(struct i3c_dev_desc *dev);
++int i3c_for_each_dev(void *data, int (*fn)(struct device *, void *));
++int i3c_dev_control_pec(struct i3c_dev_desc *dev, bool pec);
++int i3c_master_getmrl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info);
++int i3c_master_getmwl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info);
++int i3c_master_setmrl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, u16 read_len, u8 ibi_len);
++int i3c_master_setmwl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, u16 write_len);
+ #endif /* I3C_INTERNAL_H */
+diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
+index 0e9ff5500..14b7c7b9a 100644
+--- a/drivers/i3c/master.c
++++ b/drivers/i3c/master.c
+@@ -16,12 +16,14 @@
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
+ #include <linux/workqueue.h>
++#include <dt-bindings/i3c/i3c.h>
+
+ #include "internals.h"
+
+ static DEFINE_IDR(i3c_bus_idr);
+ static DEFINE_MUTEX(i3c_core_lock);
+ static int __i3c_first_dynamic_bus_num;
++static BLOCKING_NOTIFIER_HEAD(i3c_bus_notifier);
+
+ /**
+ * i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
+@@ -104,12 +106,14 @@ static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+ }
+
+ static const struct device_type i3c_device_type;
++static const struct device_type i3c_target_device_type;
+
+ static struct i3c_bus *dev_to_i3cbus(struct device *dev)
+ {
+ struct i3c_master_controller *master;
+
+- if (dev->type == &i3c_device_type)
++ if (dev->type == &i3c_device_type ||
++ dev->type == &i3c_target_device_type)
+ return dev_to_i3cdev(dev)->bus;
+
+ master = dev_to_i3cmaster(dev);
+@@ -121,7 +125,8 @@ static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
+ {
+ struct i3c_master_controller *master;
+
+- if (dev->type == &i3c_device_type)
++ if (dev->type == &i3c_device_type ||
++ dev->type == &i3c_target_device_type)
+ return dev_to_i3cdev(dev)->desc;
+
+ master = dev_to_i3cmaster(dev);
+@@ -139,7 +144,7 @@ static ssize_t bcr_show(struct device *dev,
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+- ret = sprintf(buf, "%x\n", desc->info.bcr);
++ ret = sysfs_emit(buf, "%x\n", desc->info.bcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+@@ -156,7 +161,7 @@ static ssize_t dcr_show(struct device *dev,
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+- ret = sprintf(buf, "%x\n", desc->info.dcr);
++ ret = sysfs_emit(buf, "%x\n", desc->info.dcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+@@ -173,7 +178,7 @@ static ssize_t pid_show(struct device *dev,
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+- ret = sprintf(buf, "%llx\n", desc->info.pid);
++ ret = sysfs_emit(buf, "%llx\n", desc->info.pid);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+@@ -184,13 +189,19 @@ static ssize_t dynamic_address_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+ {
++ struct i3c_dev_desc *desc = dev_to_i3cdesc(dev);
++ struct i3c_master_controller *master = i3c_dev_get_master(desc);
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+- struct i3c_dev_desc *desc;
+ ssize_t ret;
++ u8 dyn_addr;
+
+ i3c_bus_normaluse_lock(bus);
+- desc = dev_to_i3cdesc(dev);
+- ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
++ if (master->target && master->target_ops->get_dyn_addr)
++ dyn_addr = master->target_ops->get_dyn_addr(master);
++ else
++ dyn_addr = desc->info.dyn_addr;
++
++ ret = sysfs_emit(buf, "%02x\n", dyn_addr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+@@ -221,15 +232,14 @@ static ssize_t hdrcap_show(struct device *dev,
+ if (!hdrcap_strings[mode])
+ continue;
+
+- ret = sprintf(buf + offset, offset ? " %s" : "%s",
+- hdrcap_strings[mode]);
++ ret = sysfs_emit(buf + offset, offset ? " %s" : "%s", hdrcap_strings[mode]);
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ }
+
+- ret = sprintf(buf + offset, "\n");
++ ret = sysfs_emit(buf + offset, "\n");
+ if (ret < 0)
+ goto out;
+
+@@ -242,27 +252,77 @@ static ssize_t hdrcap_show(struct device *dev,
+ }
+ static DEVICE_ATTR_RO(hdrcap);
+
++static ssize_t bus_context_show(struct device *dev, struct device_attribute *da,
++ char *buf)
++{
++ struct i3c_bus *bus = dev_to_i3cbus(dev);
++ ssize_t ret;
++
++ i3c_bus_normaluse_lock(bus);
++ ret = sprintf(buf, "%x\n", bus->context);
++ i3c_bus_normaluse_unlock(bus);
++
++ return ret;
++}
++static DEVICE_ATTR_RO(bus_context);
++
+ static ssize_t modalias_show(struct device *dev,
+ struct device_attribute *da, char *buf)
+ {
+- struct i3c_device *i3c = dev_to_i3cdev(dev);
+- struct i3c_device_info devinfo;
++ struct i3c_dev_desc *desc = dev_to_i3cdesc(dev);
++ struct i3c_device_info *devinfo = &desc->info;
+ u16 manuf, part, ext;
+
+- i3c_device_get_info(i3c, &devinfo);
+- manuf = I3C_PID_MANUF_ID(devinfo.pid);
+- part = I3C_PID_PART_ID(devinfo.pid);
+- ext = I3C_PID_EXTRA_INFO(devinfo.pid);
++ manuf = I3C_PID_MANUF_ID(devinfo->pid);
++ part = I3C_PID_PART_ID(devinfo->pid);
++ ext = I3C_PID_EXTRA_INFO(devinfo->pid);
+
+- if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+- return sprintf(buf, "i3c:dcr%02Xmanuf%04X", devinfo.dcr,
++ if (I3C_PID_RND_LOWER_32BITS(devinfo->pid))
++ return sysfs_emit(buf, "i3c:dcr %02X manuf %04X\n", devinfo->dcr,
+ manuf);
+
+- return sprintf(buf, "i3c:dcr%02Xmanuf%04Xpart%04Xext%04X",
+- devinfo.dcr, manuf, part, ext);
++ return sysfs_emit(buf, "i3c:dcr %02X manuf %04X part %04X ext %04X\n",
++ devinfo->dcr, manuf, part, ext);
+ }
+ static DEVICE_ATTR_RO(modalias);
+
++static ssize_t bus_reset_store(struct device *dev, struct device_attribute *da,
++ const char *buf, size_t count)
++{
++ struct i3c_master_controller *master;
++ ssize_t ret = count;
++
++ master = dev_to_i3cmaster(dev);
++ dev_dbg(&master->dev, "Reset bus to return to i2c_mode...\n");
++ i3c_bus_maintenance_lock(&master->bus);
++ if (master->ops->bus_reset)
++ master->ops->bus_reset(master);
++
++ i3c_bus_maintenance_unlock(&master->bus);
++
++ return ret;
++}
++static DEVICE_ATTR_WO(bus_reset);
++
++static ssize_t status_show(struct device *dev,
++ struct device_attribute *da,
++ char *buf)
++{
++ struct i3c_dev_desc *desc = dev_to_i3cdesc(dev);
++ struct i3c_bus *bus = dev_to_i3cbus(dev);
++ ssize_t ret;
++
++ i3c_bus_normaluse_lock(bus);
++ ret = i3c_dev_getstatus_locked(desc, &desc->info);
++ if (!ret)
++ ret = sysfs_emit(buf, "%x\n", desc->info.status);
++
++ i3c_bus_normaluse_unlock(bus);
++
++ return ret;
++}
++static DEVICE_ATTR_RO(status);
++
+ static struct attribute *i3c_device_attrs[] = {
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+@@ -270,6 +330,7 @@ static struct attribute *i3c_device_attrs[] = {
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ &dev_attr_modalias.attr,
++ &dev_attr_status.attr,
+ NULL,
+ };
+ ATTRIBUTE_GROUPS(i3c_device);
+@@ -299,19 +360,31 @@ static const struct device_type i3c_device_type = {
+ .uevent = i3c_device_uevent,
+ };
+
++static struct attribute *i3c_target_device_attrs[] = {
++ &dev_attr_dynamic_address.attr,
++ NULL,
++};
++ATTRIBUTE_GROUPS(i3c_target_device);
++
++static const struct device_type i3c_target_device_type = {
++ .groups = i3c_target_device_groups,
++};
++
+ static int i3c_device_match(struct device *dev, struct device_driver *drv)
+ {
+ struct i3c_device *i3cdev;
+ struct i3c_driver *i3cdrv;
+
+- if (dev->type != &i3c_device_type)
++ if (dev->type != &i3c_device_type && dev->type != &i3c_target_device_type)
+ return 0;
+
+ i3cdev = dev_to_i3cdev(dev);
+ i3cdrv = drv_to_i3cdrv(drv);
+- if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+- return 1;
+
++ if ((dev->type == &i3c_device_type && !i3cdrv->target) ||
++ (dev->type == &i3c_target_device_type && i3cdrv->target))
++ if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
++ return 1;
+ return 0;
+ }
+
+@@ -331,7 +404,8 @@ static void i3c_device_remove(struct device *dev)
+ if (driver->remove)
+ driver->remove(i3cdev);
+
+- i3c_device_free_ibi(i3cdev);
++ if (!driver->target)
++ i3c_device_free_ibi(i3cdev);
+ }
+
+ struct bus_type i3c_bus_type = {
+@@ -340,6 +414,7 @@ struct bus_type i3c_bus_type = {
+ .probe = i3c_device_probe,
+ .remove = i3c_device_remove,
+ };
++EXPORT_SYMBOL_GPL(i3c_bus_type);
+
+ static enum i3c_addr_slot_status
+ i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+@@ -453,6 +528,36 @@ static int i3c_bus_init(struct i3c_bus *i3cbus, struct device_node *np)
+ return 0;
+ }
+
++void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
++ void *data)
++{
++ struct i3c_bus *bus;
++ int id;
++
++ mutex_lock(&i3c_core_lock);
++ idr_for_each_entry(&i3c_bus_idr, bus, id)
++ fn(bus, data);
++ mutex_unlock(&i3c_core_lock);
++}
++EXPORT_SYMBOL_GPL(i3c_for_each_bus_locked);
++
++int i3c_register_notifier(struct notifier_block *nb)
++{
++ return blocking_notifier_chain_register(&i3c_bus_notifier, nb);
++}
++EXPORT_SYMBOL_GPL(i3c_register_notifier);
++
++int i3c_unregister_notifier(struct notifier_block *nb)
++{
++ return blocking_notifier_chain_unregister(&i3c_bus_notifier, nb);
++}
++EXPORT_SYMBOL_GPL(i3c_unregister_notifier);
++
++static void i3c_bus_notify(struct i3c_bus *bus, unsigned int action)
++{
++ blocking_notifier_call_chain(&i3c_bus_notifier, action, bus);
++}
++
+ static const char * const i3c_bus_mode_strings[] = {
+ [I3C_BUS_MODE_PURE] = "pure",
+ [I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+@@ -460,6 +565,26 @@ static const char * const i3c_bus_mode_strings[] = {
+ [I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
+ };
+
++/**
++ * i3c_device_publish_event() - publish I3C framework event to all interested
++ * devices
++ * @master: master used to handle devices
++ * @ev: I3C framework event to publish
++ */
++static void i3c_device_publish_event(struct i3c_master_controller *master,
++ enum i3c_event ev)
++{
++ struct i3c_dev_desc *i3cdev;
++
++ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
++ if (i3cdev->event_cb)
++ i3cdev->event_cb(i3cdev->dev, ev);
++ }
++}
++
++static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
++ u8 addr);
++
+ static ssize_t mode_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+@@ -471,9 +596,9 @@ static ssize_t mode_show(struct device *dev,
+ if (i3cbus->mode < 0 ||
+ i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
+ !i3c_bus_mode_strings[i3cbus->mode])
+- ret = sprintf(buf, "unknown\n");
++ ret = sysfs_emit(buf, "unknown\n");
+ else
+- ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
++ ret = sysfs_emit(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+@@ -488,8 +613,8 @@ static ssize_t current_master_show(struct device *dev,
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+- ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
+- i3cbus->cur_master->info.pid);
++ ret = sysfs_emit(buf, "%d-%llx\n", i3cbus->id,
++ i3cbus->cur_master->info.pid);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+@@ -504,7 +629,7 @@ static ssize_t i3c_scl_frequency_show(struct device *dev,
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+- ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
++ ret = sysfs_emit(buf, "%ld\n", i3cbus->scl_rate.i3c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+@@ -519,13 +644,151 @@ static ssize_t i2c_scl_frequency_show(struct device *dev,
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+- ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
++ ret = sysfs_emit(buf, "%ld\n", i3cbus->scl_rate.i2c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+ }
+ static DEVICE_ATTR_RO(i2c_scl_frequency);
+
++static int i3c_set_hotjoin(struct i3c_master_controller *master, bool enable)
++{
++ int ret;
++
++ if (!master || !master->ops)
++ return -EINVAL;
++
++ if (!master->ops->enable_hotjoin || !master->ops->disable_hotjoin)
++ return -EINVAL;
++
++ i3c_bus_normaluse_lock(&master->bus);
++
++ if (enable) {
++ ret = master->ops->enable_hotjoin(master);
++ i3c_master_enec_locked(master, I3C_BROADCAST_ADDR,
++ I3C_CCC_EVENT_HJ);
++ } else {
++ ret = master->ops->disable_hotjoin(master);
++ i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
++ I3C_CCC_EVENT_HJ);
++ }
++
++ master->hotjoin = enable;
++
++ i3c_bus_normaluse_unlock(&master->bus);
++
++ return ret;
++}
++
++static ssize_t hotjoin_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
++ int ret;
++ bool res;
++
++ if (!i3cbus->cur_master)
++ return -EINVAL;
++
++ if (kstrtobool(buf, &res))
++ return -EINVAL;
++
++ ret = i3c_set_hotjoin(i3cbus->cur_master->common.master, res);
++ if (ret)
++ return ret;
++
++ return count;
++}
++
++/*
++ * i3c_master_enable_hotjoin - Enable hotjoin
++ * @master: I3C master object
++ *
++ * Return: a 0 in case of success, an negative error code otherwise.
++ */
++int i3c_master_enable_hotjoin(struct i3c_master_controller *master)
++{
++ return i3c_set_hotjoin(master, true);
++}
++EXPORT_SYMBOL_GPL(i3c_master_enable_hotjoin);
++
++/*
++ * i3c_master_disable_hotjoin - Disable hotjoin
++ * @master: I3C master object
++ *
++ * Return: a 0 in case of success, an negative error code otherwise.
++ */
++int i3c_master_disable_hotjoin(struct i3c_master_controller *master)
++{
++ return i3c_set_hotjoin(master, false);
++}
++EXPORT_SYMBOL_GPL(i3c_master_disable_hotjoin);
++
++static ssize_t hotjoin_show(struct device *dev, struct device_attribute *da, char *buf)
++{
++ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
++ ssize_t ret;
++
++ i3c_bus_normaluse_lock(i3cbus);
++ ret = sysfs_emit(buf, "%d\n", i3cbus->cur_master->common.master->hotjoin);
++ i3c_bus_normaluse_unlock(i3cbus);
++
++ return ret;
++}
++
++static DEVICE_ATTR_RW(hotjoin);
++static ssize_t rescan_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i3c_master_controller *master = dev_to_i3cmaster(dev);
++ struct i3c_bus *bus = i3c_master_get_bus(master);
++ bool res;
++ int ret;
++
++ ret = kstrtobool(buf, &res);
++ if (ret)
++ return ret;
++
++ if (!res)
++ return count;
++
++ i3c_device_publish_event(master, i3c_event_prepare_for_rescan);
++
++ i3c_bus_maintenance_lock(bus);
++
++ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
++ I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
++ I3C_CCC_EVENT_HJ);
++ if (ret && ret != I3C_ERROR_M2) {
++ dev_dbg(&master->dev,
++ "Failed to run broadcast DISEC for rescan, ret=%d\n", ret);
++ i3c_bus_maintenance_unlock(bus);
++ return ret;
++ }
++
++ ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
++ if (ret && ret != I3C_ERROR_M2) {
++ dev_dbg(&master->dev,
++ "Failed to run RSTDAA for rescan, ret=%d\n", ret);
++ i3c_bus_maintenance_unlock(bus);
++ return ret;
++ }
++
++ i3c_bus_maintenance_unlock(bus);
++
++ ret = i3c_master_do_daa(master);
++ if (ret) {
++ dev_dbg(&master->dev, "Failed to run DAA for rescan, ret=%d\n",
++ ret);
++ return ret;
++ }
++
++ i3c_device_publish_event(master, i3c_event_rescan_done);
++
++ return count;
++}
++static DEVICE_ATTR_WO(rescan);
++
+ static struct attribute *i3c_masterdev_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_current_master.attr,
+@@ -536,6 +799,10 @@ static struct attribute *i3c_masterdev_attrs[] = {
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
++ &dev_attr_hotjoin.attr,
++ &dev_attr_bus_context.attr,
++ &dev_attr_bus_reset.attr,
++ &dev_attr_rescan.attr,
+ NULL,
+ };
+ ATTRIBUTE_GROUPS(i3c_masterdev);
+@@ -554,9 +821,10 @@ static void i3c_masterdev_release(struct device *dev)
+ of_node_put(dev->of_node);
+ }
+
+-static const struct device_type i3c_masterdev_type = {
++const struct device_type i3c_masterdev_type = {
+ .groups = i3c_masterdev_groups,
+ };
++EXPORT_SYMBOL_GPL(i3c_masterdev_type);
+
+ static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode,
+ unsigned long max_i2c_scl_rate)
+@@ -656,13 +924,15 @@ static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
+
+ static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
+ struct i3c_ccc_cmd_dest *dests,
+- unsigned int ndests)
++ unsigned int ndests, bool dbp, u8 db)
+ {
+ cmd->rnw = rnw ? 1 : 0;
+ cmd->id = id;
+ cmd->dests = dests;
+ cmd->ndests = ndests;
+ cmd->err = I3C_ERROR_UNKNOWN;
++ cmd->dbp = dbp;
++ cmd->db = db;
+ }
+
+ static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
+@@ -778,8 +1048,8 @@ static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
+
+ i3c_ccc_cmd_dest_init(&dest, addr, 0);
+ i3c_ccc_cmd_init(&cmd, false,
+- I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
+- &dest, 1);
++ I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR), &dest, 1,
++ false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+@@ -809,7 +1079,7 @@ int i3c_master_entdaa_locked(struct i3c_master_controller *master)
+ int ret;
+
+ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+- i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+@@ -831,10 +1101,9 @@ static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
+
+ events->events = evts;
+ i3c_ccc_cmd_init(&cmd, false,
+- enable ?
+- I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
+- I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
+- &dest, 1);
++ enable ? I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
++ I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
++ &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+@@ -967,7 +1236,7 @@ int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+ desc++;
+ }
+
+- i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+@@ -975,6 +1244,43 @@ int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+ }
+ EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
+
++int i3c_master_setaasa_locked(struct i3c_master_controller *master)
++{
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ int ret;
++
++ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_SETAASA, &dest, 1, false, 0);
++
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_master_setaasa_locked);
++
++int i3c_master_sethid_locked(struct i3c_master_controller *master)
++{
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ struct i3c_ccc_sethid *sethid;
++ int ret;
++
++ sethid = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 1);
++ if (!sethid)
++ return -ENOMEM;
++
++ sethid->hid = 0;
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_SETHID, &dest, 1, false, 0);
++
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_master_sethid_locked);
++
+ static int i3c_master_setda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr, bool setdasa)
+ {
+@@ -992,8 +1298,8 @@ static int i3c_master_setda_locked(struct i3c_master_controller *master,
+
+ setda->addr = newaddr << 1;
+ i3c_ccc_cmd_init(&cmd, false,
+- setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
+- &dest, 1);
++ setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA, &dest, 1,
++ false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+@@ -1012,8 +1318,8 @@ static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
+ return i3c_master_setda_locked(master, oldaddr, newaddr, false);
+ }
+
+-static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+- struct i3c_device_info *info)
++int i3c_master_getmrl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info)
+ {
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mrl *mrl;
+@@ -1031,7 +1337,7 @@ static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+ if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
+ dest.payload.len -= 1;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+@@ -1054,8 +1360,8 @@ static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+ return ret;
+ }
+
+-static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+- struct i3c_device_info *info)
++int i3c_master_getmwl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info)
+ {
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mwl *mwl;
+@@ -1066,7 +1372,7 @@ static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+ if (!mwl)
+ return -ENOMEM;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+@@ -1084,6 +1390,61 @@ static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+ return ret;
+ }
+
++int i3c_master_setmrl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, u16 read_len, u8 ibi_len)
++{
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ struct i3c_ccc_mrl *mrl;
++ int ret;
++
++ mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
++ if (!mrl)
++ return -ENOMEM;
++
++ /*
++ * When the device does not have IBI payload SETMRL only sends 2
++ * bytes of data.
++ */
++ if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
++ dest.payload.len -= 1;
++
++ mrl->read_len = cpu_to_be16(read_len);
++ mrl->ibi_len = ibi_len;
++ info->max_read_len = read_len;
++ info->max_ibi_len = mrl->ibi_len;
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_SETMRL(false), &dest, 1, false,
++ 0);
++
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++
++int i3c_master_setmwl_locked(struct i3c_master_controller *master,
++ struct i3c_device_info *info, u16 write_len)
++{
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ struct i3c_ccc_mwl *mwl;
++ int ret;
++
++ mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
++ if (!mwl)
++ return -ENOMEM;
++
++ mwl->len = cpu_to_be16(write_len);
++ info->max_write_len = write_len;
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_SETMWL(false), &dest, 1, false,
++ 0);
++
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++
+ static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+ {
+@@ -1097,7 +1458,7 @@ static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+ if (!getmaxds)
+ return -ENOMEM;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+@@ -1133,12 +1494,12 @@ static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
+ if (!gethdrcap)
+ return -ENOMEM;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+- if (dest.payload.len != 1) {
++ if (dest.payload.len < 1 || dest.payload.len > 4) {
+ ret = -EIO;
+ goto out;
+ }
+@@ -1163,7 +1524,7 @@ static int i3c_master_getpid_locked(struct i3c_master_controller *master,
+ if (!getpid)
+ return -ENOMEM;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+@@ -1193,7 +1554,7 @@ static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
+ if (!getbcr)
+ return -ENOMEM;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+@@ -1218,7 +1579,7 @@ static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+ if (!getdcr)
+ return -ENOMEM;
+
+- i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1, false, 0);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+@@ -1231,31 +1592,101 @@ static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+ return ret;
+ }
+
+-static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
++int i3c_dev_dbgaction_wr_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info,
++ u8 *data, u8 len)
+ {
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+- enum i3c_addr_slot_status slot_status;
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ u8 *data_int;
+ int ret;
+
+- if (!dev->info.dyn_addr)
+- return -EINVAL;
++ data_int = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, len);
++ if (!data_int)
++ return -ENOMEM;
+
+- slot_status = i3c_bus_get_addr_slot_status(&master->bus,
+- dev->info.dyn_addr);
+- if (slot_status == I3C_ADDR_SLOT_RSVD ||
+- slot_status == I3C_ADDR_SLOT_I2C_DEV)
+- return -EINVAL;
++ memcpy(data_int, data, len);
+
+- ret = i3c_master_getpid_locked(master, &dev->info);
+- if (ret)
+- return ret;
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DBGACTION(false), &dest, 1, false, 0);
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ i3c_ccc_cmd_dest_cleanup(&dest);
+
+- ret = i3c_master_getbcr_locked(master, &dev->info);
+- if (ret)
+- return ret;
++ return ret;
++}
+
+- ret = i3c_master_getdcr_locked(master, &dev->info);
+- if (ret)
++int i3c_dev_dbgopcode_wr_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info,
++ u8 *data, u8 len)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev);
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ u8 *data_int;
++ int ret;
++
++ data_int = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, len);
++ if (!data_int)
++ return -ENOMEM;
++
++ memcpy(data_int, data, len);
++
++ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DBGOPCODE, &dest, 1, false, 0);
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++
++int i3c_dev_dbgopcode_rd_locked(struct i3c_dev_desc *dev, struct i3c_device_info *info,
++ u8 *data, u8 len)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev);
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ u8 *data_int;
++ int ret;
++
++ data_int = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, len);
++ if (!data_int)
++ return -ENOMEM;
++
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_DBGOPCODE, &dest, 1, false, 0);
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ if (ret)
++ goto out;
++
++ memcpy(data, data_int, len);
++
++out:
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++
++static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev);
++ enum i3c_addr_slot_status slot_status;
++ int ret;
++
++ if (!dev->info.dyn_addr)
++ return -EINVAL;
++
++ slot_status = i3c_bus_get_addr_slot_status(&master->bus,
++ dev->info.dyn_addr);
++ if (slot_status == I3C_ADDR_SLOT_RSVD ||
++ slot_status == I3C_ADDR_SLOT_I2C_DEV)
++ return -EINVAL;
++
++ ret = i3c_master_getpid_locked(master, &dev->info);
++ if (ret)
++ return ret;
++
++ ret = i3c_master_getbcr_locked(master, &dev->info);
++ if (ret)
++ return ret;
++
++ ret = i3c_master_getdcr_locked(master, &dev->info);
++ if (ret)
+ return ret;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
+@@ -1267,6 +1698,19 @@ static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ dev->info.max_ibi_len = 1;
+
++ /*
++ * FIXME: The default mwl/mrl of the AST2700 A0 I3C target is 0, which can lead to
++ * application issues. Therefore, add a workaround to set mwl/mrl to meet the hardware
++ * capability when PID indicates that the devices is AST2700 A0 I3C
++ * MANUF_ID 0x3f6 => ASPEED
++ * PART_ID 0x600 => Ast2700 A0
++ */
++ if (I3C_PID_MANUF_ID(dev->info.pid) == 0x3f6 &&
++ I3C_PID_PART_ID(dev->info.pid) == 0x0600) {
++ i3c_master_setmrl_locked(master, &dev->info, 128, 128);
++ i3c_master_setmwl_locked(master, &dev->info, 128);
++ }
++
+ i3c_master_getmrl_locked(master, &dev->info);
+ i3c_master_getmwl_locked(master, &dev->info);
+
+@@ -1550,18 +1994,28 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+ */
+ int i3c_master_do_daa(struct i3c_master_controller *master)
+ {
+- int ret;
++ int ret = 0;
+
+ i3c_bus_maintenance_lock(&master->bus);
+- ret = master->ops->do_daa(master);
++ if (master->bus.context == I3C_BUS_CONTEXT_JESD403) {
++ /* Save init done flag and clear it to allow send CCC in order to switch SPD JEDEC
++ * device into I3C mode even after bus was initialized.
++ */
++ bool init_done = master->init_done;
++
++ master->init_done = false;
++ ret = i3c_master_sethid_locked(master);
++ ret = i3c_master_setaasa_locked(master);
++ master->init_done = init_done;
++ } else {
++ ret = master->ops->do_daa(master);
++ }
+ i3c_bus_maintenance_unlock(&master->bus);
+
+- if (ret)
++ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+- i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+- i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+ }
+@@ -1654,6 +2108,75 @@ static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
+ }
+ }
+
++static int i3c_master_jesd403_bus_init(struct i3c_master_controller *master)
++{
++ struct i3c_dev_boardinfo *i3cboardinfo;
++ struct i3c_dev_desc *i3cdev;
++ struct i3c_device_info info;
++ int ret;
++
++ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
++ /*
++ * Assuming all target devices attached to the bus are JESD403-1
++ * compliant devices, which means:
++ * - using the static address as the dynamic address
++ * - using SETAASA to enter the I3C mode
++ *
++ * Therefore, we skip the target devices that do not have static
++ * addresses or assigned-address properties.
++ */
++ if (!i3cboardinfo->init_dyn_addr || !i3cboardinfo->static_addr)
++ continue;
++
++ if (i3cboardinfo->init_dyn_addr != i3cboardinfo->static_addr)
++ continue;
++
++ ret = i3c_bus_get_addr_slot_status(&master->bus,
++ i3cboardinfo->init_dyn_addr);
++ if (ret != I3C_ADDR_SLOT_FREE)
++ return -EBUSY;
++
++ /*
++ * JESD403 compliant devices do not support GETPID/BCR/DCR/MXDS CCCs.
++ * Import these mandatory pieces of information form the boardinfo.
++ */
++ info.static_addr = i3cboardinfo->static_addr;
++ info.dyn_addr = i3cboardinfo->init_dyn_addr;
++ info.pid = i3cboardinfo->pid;
++ info.dcr = i3cboardinfo->dcr;
++ info.bcr = i3cboardinfo->bcr;
++ info.max_write_ds = 0;
++ info.max_read_ds = 0;
++ if (info.bcr & I3C_BCR_IBI_PAYLOAD)
++ info.max_ibi_len = 1;
++
++ i3cdev = i3c_master_alloc_i3c_dev(master, &info);
++ if (IS_ERR(i3cdev))
++ return -ENOMEM;
++
++ i3cdev->boardinfo = i3cboardinfo;
++
++ ret = i3c_master_attach_i3c_dev(master, i3cdev);
++ if (ret) {
++ i3c_master_free_i3c_dev(i3cdev);
++ return ret;
++ }
++ }
++
++ /*
++ * Supporting mixed devices (I3C mode + I2C mode) on the JESD403
++ * bus is not possible, as these devices would enter I3C mode if
++ * they receive a SETAASA broadcast CCC.
++ *
++ * Here, we only handle the devices that are declared to be in
++ * I2C mode.
++ */
++ if (master->bus.mode != I3C_BUS_MODE_PURE)
++ return 0;
++
++ return i3c_master_do_daa(master);
++}
++
+ /**
+ * i3c_master_bus_init() - initialize an I3C bus
+ * @master: main master initializing the bus
+@@ -1759,6 +2282,14 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
++ if (master->bus.context == I3C_BUS_CONTEXT_JESD403) {
++ ret = i3c_master_jesd403_bus_init(master);
++ if (ret)
++ goto err_rstdaa;
++
++ return 0;
++ }
++
+ /*
+ * Reserve init_dyn_addr first, and then try to pre-assign dynamic
+ * address and retrieve device information if needed.
+@@ -1819,6 +2350,25 @@ static int i3c_master_bus_init(struct i3c_master_controller *master)
+
+ static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+ {
++ int ret;
++
++ i3c_bus_maintenance_lock(&master->bus);
++ /* Disable all slave events before starting DAA. */
++ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
++ I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
++ I3C_CCC_EVENT_HJ);
++ if (ret && ret != I3C_ERROR_M2)
++ dev_dbg(&master->dev, "failed to send DISEC, ret=%i\n", ret);
++
++ /*
++ * Reset all dynamic address that may have been assigned before
++ * (assigned by the bootloader for example).
++ */
++ ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
++ if (ret && ret != I3C_ERROR_M2)
++ dev_dbg(&master->dev, "failed to send RSTDAA, ret=%i\n", ret);
++ i3c_bus_maintenance_unlock(&master->bus);
++
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+@@ -1847,7 +2397,9 @@ i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+- if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
++ /* Skip the I3C dev representing this master. */
++ if (i3cdev != master->this && i3cdev != refdev &&
++ i3cdev->info.pid == refdev->info.pid)
+ return i3cdev;
+ }
+
+@@ -1900,6 +2452,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ if (olddev) {
+ newdev->dev = olddev->dev;
++ newdev->event_cb = olddev->event_cb;
+ if (newdev->dev)
+ newdev->dev->desc = newdev;
+
+@@ -1923,6 +2476,16 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ i3c_dev_free_ibi_locked(olddev);
+ }
+ mutex_unlock(&olddev->ibi_lock);
++ if (olddev->info.max_ibi_len != newdev->info.max_ibi_len ||
++ olddev->info.max_read_len != newdev->info.max_read_len)
++ i3c_master_setmrl_locked(master, &newdev->info,
++ olddev->info.max_read_len,
++ olddev->info.max_ibi_len);
++ if (olddev->info.max_write_len != newdev->info.max_write_len)
++ i3c_master_setmwl_locked(master, &newdev->info,
++ olddev->info.max_write_len);
++ if (olddev->info.pec != newdev->info.pec)
++ i3c_device_control_pec(newdev->dev, olddev->info.pec);
+
+ old_dyn_addr = olddev->info.dyn_addr;
+
+@@ -2050,6 +2613,8 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+ struct device *dev = &master->dev;
+ enum i3c_addr_slot_status addrstatus;
+ u32 init_dyn_addr = 0;
++ u8 bcr = 0;
++ u8 dcr = 0;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+@@ -2083,6 +2648,16 @@ of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+ I3C_PID_RND_LOWER_32BITS(boardinfo->pid))
+ return -EINVAL;
+
++ if (!of_property_read_u8(node, "dcr", &dcr)) {
++ if (dcr > I3C_DCR_MAX)
++ return -EINVAL;
++
++ boardinfo->dcr = dcr;
++ }
++
++ if (!of_property_read_u8(node, "bcr", &bcr))
++ boardinfo->bcr = bcr;
++
+ boardinfo->init_dyn_addr = init_dyn_addr;
+ boardinfo->of_node = of_node_get(node);
+ list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
+@@ -2120,12 +2695,29 @@ static int of_populate_i3c_bus(struct i3c_master_controller *master)
+ struct device *dev = &master->dev;
+ struct device_node *i3cbus_np = dev->of_node;
+ struct device_node *node;
+- int ret;
++ int ret, i;
+ u32 val;
++ u8 context;
+
+ if (!i3cbus_np)
+ return 0;
+
++ /*
++ * If the bus context is not specified, set the default value to MIPI
++ * I3C Basic Version 1.0.
++ */
++ master->bus.context = I3C_BUS_CONTEXT_MIPI_BASIC_V1_0_0;
++ if (!of_property_read_u8(i3cbus_np, "bus-context", &context))
++ master->bus.context = context;
++
++ /* Undo the unnecessary address reservations for JESD403 bus context */
++ if (master->bus.context == I3C_BUS_CONTEXT_JESD403) {
++ for (i = 0; i < 7; i++)
++ i3c_bus_set_addr_slot_status(&master->bus,
++ I3C_BROADCAST_ADDR ^ BIT(i),
++ I3C_ADDR_SLOT_FREE);
++ }
++
+ for_each_available_child_of_node(i3cbus_np, node) {
+ ret = of_i3c_master_add_dev(master, node);
+ if (ret) {
+@@ -2311,7 +2903,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+ adap->dev.parent = master->dev.parent;
+ adap->owner = master->dev.parent->driver->owner;
+ adap->algo = &i3c_master_i2c_algo;
+- strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
++ strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+
+ /* FIXME: Should we allow i3c masters to override these values? */
+ adap->timeout = 1000;
+@@ -2349,11 +2941,19 @@ static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
+ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+ {
+ struct i3c_dev_desc *i3cdev;
++ int ret;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (!i3cdev->dev)
+ continue;
+
++ ret = i3c_device_disable_ibi(i3cdev->dev);
++ if (!ret)
++ i3c_device_free_ibi(i3cdev->dev);
++ else
++ dev_warn(i3cdev_to_dev(i3cdev->dev), "Failed to disable IBI, ret = %d",
++ ret);
++
+ i3cdev->dev->desc = NULL;
+ if (device_is_registered(&i3cdev->dev->dev))
+ device_unregister(&i3cdev->dev->dev);
+@@ -2374,7 +2974,7 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+ {
+ atomic_inc(&dev->ibi->pending_ibis);
+- queue_work(dev->common.master->wq, &slot->work);
++ queue_work(dev->ibi->wq, &slot->work);
+ }
+ EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
+
+@@ -2631,6 +3231,10 @@ int i3c_master_register(struct i3c_master_controller *master,
+ device_initialize(&master->dev);
+ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
++ master->dev.dma_mask = parent->dma_mask;
++ master->dev.coherent_dma_mask = parent->coherent_dma_mask;
++ master->dev.dma_parms = parent->dma_parms;
++
+ ret = of_populate_i3c_bus(master);
+ if (ret)
+ goto err_put_dev;
+@@ -2684,14 +3288,14 @@ int i3c_master_register(struct i3c_master_controller *master,
+ if (ret)
+ goto err_del_dev;
+
++ i3c_bus_notify(i3cbus, I3C_NOTIFY_BUS_ADD);
++
+ /*
+ * We're done initializing the bus and the controller, we can now
+ * register I3C devices discovered during the initial DAA.
+ */
+ master->init_done = true;
+- i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+- i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+
+@@ -2716,6 +3320,8 @@ EXPORT_SYMBOL_GPL(i3c_master_register);
+ */
+ void i3c_master_unregister(struct i3c_master_controller *master)
+ {
++ i3c_bus_notify(&master->bus, I3C_NOTIFY_BUS_REMOVE);
++
+ i3c_master_i2c_adapter_cleanup(master);
+ i3c_master_unregister_i3c_devs(master);
+ i3c_master_bus_cleanup(master);
+@@ -2723,6 +3329,258 @@ void i3c_master_unregister(struct i3c_master_controller *master)
+ }
+ EXPORT_SYMBOL_GPL(i3c_master_unregister);
+
++static int i3c_target_bus_init(struct i3c_master_controller *master)
++{
++ return master->target_ops->bus_init(master);
++}
++
++static void i3c_target_bus_cleanup(struct i3c_master_controller *master)
++{
++ if (master->target_ops->bus_cleanup)
++ master->target_ops->bus_cleanup(master);
++}
++
++static void i3c_targetdev_release(struct device *dev)
++{
++ struct i3c_master_controller *master = container_of(dev, struct i3c_master_controller, dev);
++ struct i3c_bus *bus = &master->bus;
++
++ mutex_lock(&i3c_core_lock);
++ idr_remove(&i3c_bus_idr, bus->id);
++ mutex_unlock(&i3c_core_lock);
++
++ of_node_put(dev->of_node);
++}
++
++static void i3c_target_device_release(struct device *dev)
++{
++ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
++ struct i3c_dev_desc *desc = i3cdev->desc;
++
++ kfree(i3cdev);
++ kfree(desc);
++}
++
++static void
++i3c_target_register_new_i3c_dev(struct i3c_master_controller *master, struct i3c_device_info info)
++{
++ struct i3c_dev_desc *desc;
++ int ret;
++
++ desc = kzalloc(sizeof(*desc), GFP_KERNEL);
++ if (!desc)
++ return;
++
++ desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
++ if (!desc->dev) {
++ kfree(desc);
++ return;
++ }
++
++ desc->dev->bus = &master->bus;
++ desc->dev->desc = desc;
++ desc->dev->dev.parent = &master->dev;
++ desc->dev->dev.type = &i3c_target_device_type;
++ desc->dev->dev.bus = &i3c_bus_type;
++ desc->dev->dev.release = i3c_target_device_release;
++ desc->info = info;
++ desc->common.master = master;
++ dev_set_name(&desc->dev->dev, "%d-target", master->bus.id);
++
++ ret = device_register(&desc->dev->dev);
++ if (ret)
++ dev_err(&master->dev, "Failed to add I3C target device (err = %d)\n", ret);
++
++ master->this = desc;
++}
++
++static void i3c_target_unregister_i3c_dev(struct i3c_master_controller *master)
++{
++ struct i3c_dev_desc *i3cdev = master->this;
++
++ if (device_is_registered(&i3cdev->dev->dev))
++ device_unregister(&i3cdev->dev->dev);
++ else
++ put_device(&i3cdev->dev->dev);
++}
++
++static void i3c_target_read_device_info(struct device_node *np, struct i3c_device_info *info)
++{
++ u64 pid;
++ u8 dcr, static_addr;
++ int ret;
++
++ ret = of_property_read_u64(np, "pid", &pid);
++ if (ret)
++ info->pid = 0;
++ else
++ info->pid = pid;
++
++ ret = of_property_read_u8(np, "dcr", &dcr);
++ if (ret)
++ info->dcr = 0;
++ else
++ info->dcr = dcr;
++
++ ret = of_property_read_u8(np, "static-address", &static_addr);
++ if (ret)
++ info->static_addr = 0;
++ else
++ info->static_addr = static_addr;
++}
++
++static int i3c_target_check_ops(const struct i3c_target_ops *ops)
++{
++ if (!ops || !ops->bus_init)
++ return -EINVAL;
++
++ return 0;
++}
++
++static ssize_t hotjoin_req_store(struct device *dev,
++ struct device_attribute *da, const char *buf,
++ size_t count)
++{
++ struct i3c_master_controller *master;
++ ssize_t ret = count;
++
++ master = dev_to_i3cmaster(dev);
++ if (!master->target_ops->is_hj_enabled)
++ return -EOPNOTSUPP;
++ if (!master->target_ops->is_hj_enabled(master->this))
++ return -EACCES;
++
++ if (!master->target_ops->hj_req)
++ return -EOPNOTSUPP;
++ ret = master->target_ops->hj_req(master->this);
++ if (ret)
++ return ret;
++
++ return count;
++}
++static DEVICE_ATTR_WO(hotjoin_req);
++
++static struct attribute *i3c_targetdev_attrs[] = {
++ &dev_attr_hotjoin_req.attr,
++ &dev_attr_bcr.attr,
++ &dev_attr_dcr.attr,
++ &dev_attr_pid.attr,
++ &dev_attr_dynamic_address.attr,
++ &dev_attr_hdrcap.attr,
++ &dev_attr_modalias.attr,
++ NULL,
++};
++ATTRIBUTE_GROUPS(i3c_targetdev);
++
++const struct device_type i3c_targetdev_type = {
++ .groups = i3c_targetdev_groups,
++};
++EXPORT_SYMBOL_GPL(i3c_targetdev_type);
++
++int i3c_target_register(struct i3c_master_controller *master, struct device *parent,
++ const struct i3c_target_ops *ops)
++{
++ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
++ struct i3c_device_info info;
++ int ret;
++
++ ret = i3c_target_check_ops(ops);
++ if (ret)
++ return ret;
++
++ master->dev.parent = parent;
++ master->dev.of_node = of_node_get(parent->of_node);
++ master->dev.bus = &i3c_bus_type;
++ master->dev.type = &i3c_targetdev_type;
++ master->dev.release = i3c_targetdev_release;
++ master->target_ops = ops;
++ i3cbus->mode = I3C_BUS_MODE_PURE;
++
++ ret = i3c_bus_init(i3cbus, master->dev.of_node);
++ if (ret)
++ return ret;
++
++ device_initialize(&master->dev);
++ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
++
++ master->dev.dma_mask = parent->dma_mask;
++ master->dev.coherent_dma_mask = parent->coherent_dma_mask;
++ master->dev.dma_parms = parent->dma_parms;
++
++ ret = device_add(&master->dev);
++ if (ret)
++ goto err_put_device;
++
++ i3c_target_read_device_info(master->dev.of_node, &info);
++
++ i3c_target_register_new_i3c_dev(master, info);
++
++ ret = i3c_target_bus_init(master);
++ if (ret)
++ goto err_cleanup_bus;
++
++ return 0;
++
++err_cleanup_bus:
++ i3c_target_bus_cleanup(master);
++
++err_put_device:
++ put_device(&master->dev);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(i3c_target_register);
++
++int i3c_target_unregister(struct i3c_master_controller *master)
++{
++ i3c_target_unregister_i3c_dev(master);
++ i3c_target_bus_cleanup(master);
++ device_unregister(&master->dev);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(i3c_target_unregister);
++
++int i3c_target_read_register(struct i3c_device *dev, const struct i3c_target_read_setup *setup)
++{
++ dev->desc->target_info.read_handler = setup->handler;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(i3c_target_read_register);
++
++int i3c_register(struct i3c_master_controller *master,
++ struct device *parent,
++ const struct i3c_master_controller_ops *master_ops,
++ const struct i3c_target_ops *target_ops,
++ bool secondary)
++{
++ const char *role;
++ int ret;
++
++ ret = of_property_read_string(parent->of_node, "initial-role", &role);
++ if (ret || !strcmp("primary", role)) {
++ return i3c_master_register(master, parent, master_ops, secondary);
++ } else if (!strcmp("target", role)) {
++ master->target = true;
++ return i3c_target_register(master, parent, target_ops);
++ } else {
++ return -EOPNOTSUPP;
++ }
++}
++EXPORT_SYMBOL_GPL(i3c_register);
++
++int i3c_unregister(struct i3c_master_controller *master)
++{
++ if (master->target)
++ i3c_target_unregister(master);
++ else
++ i3c_master_unregister(master);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(i3c_unregister);
++
+ int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev)
+ {
+ struct i3c_master_controller *master;
+@@ -2742,6 +3600,56 @@ int i3c_dev_setdasa_locked(struct i3c_dev_desc *dev)
+ dev->boardinfo->init_dyn_addr);
+ }
+
++int i3c_dev_getstatus_locked(struct i3c_dev_desc *dev,
++ struct i3c_device_info *info)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev);
++ struct i3c_ccc_getstatus *getsts;
++ struct i3c_ccc_cmd_dest dest;
++ struct i3c_ccc_cmd cmd;
++ int ret;
++
++ getsts = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getsts));
++ if (!getsts)
++ return -ENOMEM;
++
++ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETSTATUS, &dest, 1, false, 0);
++ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
++ if (ret)
++ goto out;
++
++ info->status = getsts->status;
++
++out:
++ i3c_ccc_cmd_dest_cleanup(&dest);
++
++ return ret;
++}
++
++int i3c_dev_send_hdr_cmds_locked(struct i3c_dev_desc *dev,
++ struct i3c_hdr_cmd *cmds, int ncmds)
++{
++ int i;
++ struct i3c_master_controller *master;
++
++ if (!dev)
++ return -ENOENT;
++
++ master = i3c_dev_get_master(dev);
++ if (!cmds || !master || ncmds <= 0)
++ return -EINVAL;
++
++ if (!master->ops->send_hdr_cmds)
++ return -EOPNOTSUPP;
++
++ for (i = 0; i < ncmds; i++) {
++ if (!(master->this->info.hdr_cap & BIT(cmds[i].mode)))
++ return -EOPNOTSUPP;
++ }
++
++ return master->ops->send_hdr_cmds(dev, cmds, ncmds);
++}
++
+ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+@@ -2755,10 +3663,81 @@ int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ if (!master || !xfers)
+ return -EINVAL;
+
+- if (!master->ops->priv_xfers)
+- return -ENOTSUPP;
++ if (!master->target) {
++ if (!master->ops->priv_xfers)
++ return -EOPNOTSUPP;
++
++ return master->ops->priv_xfers(dev, xfers, nxfers);
++ }
++
++ if (!master->target_ops->priv_xfers)
++ return -EOPNOTSUPP;
+
+- return master->ops->priv_xfers(dev, xfers, nxfers);
++ return master->target_ops->priv_xfers(dev, xfers, nxfers);
++}
++
++int i3c_dev_generate_ibi_locked(struct i3c_dev_desc *dev, const u8 *data, int len)
++
++{
++ struct i3c_master_controller *master;
++
++ if (!dev)
++ return -ENOENT;
++
++ master = i3c_dev_get_master(dev);
++ if (!master)
++ return -EINVAL;
++
++ if (!master->target)
++ return -EINVAL;
++
++ if (!master->target_ops->generate_ibi)
++ return -EOPNOTSUPP;
++
++ return master->target_ops->generate_ibi(dev, data, len);
++}
++
++int i3c_dev_pending_read_notify_locked(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify)
++{
++ struct i3c_master_controller *master;
++
++ if (!dev)
++ return -ENOENT;
++
++ master = i3c_dev_get_master(dev);
++ if (!master)
++ return -EINVAL;
++
++ if (!master->target)
++ return -EINVAL;
++
++ if (!master->target_ops->pending_read_notify)
++ return -EOPNOTSUPP;
++
++ return master->target_ops->pending_read_notify(dev, pending_read,
++ ibi_notify);
++}
++
++int i3c_dev_is_ibi_enabled_locked(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *master;
++
++ if (!dev)
++ return -ENOENT;
++
++ master = i3c_dev_get_master(dev);
++ if (!master)
++ return -EINVAL;
++
++ if (!master->target)
++ return -EINVAL;
++
++ if (!master->target_ops->is_ibi_enabled)
++ return -EOPNOTSUPP;
++
++ return master->target_ops->is_ibi_enabled(dev);
+ }
+
+ int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
+@@ -2815,6 +3794,12 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ if (!ibi)
+ return -ENOMEM;
+
++ ibi->wq = alloc_ordered_workqueue(dev_name(i3cdev_to_dev(dev->dev)), WQ_MEM_RECLAIM);
++ if (!ibi->wq) {
++ kfree(ibi);
++ return -ENOMEM;
++ }
++
+ atomic_set(&ibi->pending_ibis, 0);
+ init_completion(&ibi->all_ibis_handled);
+ ibi->handler = req->handler;
+@@ -2842,10 +3827,47 @@ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
+ WARN_ON(i3c_dev_disable_ibi_locked(dev));
+
+ master->ops->free_ibi(dev);
++
++ if (dev->ibi->wq) {
++ destroy_workqueue(dev->ibi->wq);
++ dev->ibi->wq = NULL;
++ }
++
+ kfree(dev->ibi);
+ dev->ibi = NULL;
+ }
+
++int i3c_for_each_dev(void *data, int (*fn)(struct device *, void *))
++{
++ int res;
++
++ mutex_lock(&i3c_core_lock);
++ res = bus_for_each_dev(&i3c_bus_type, NULL, data, fn);
++ mutex_unlock(&i3c_core_lock);
++
++ return res;
++}
++EXPORT_SYMBOL_GPL(i3c_for_each_dev);
++
++int i3c_dev_control_pec(struct i3c_dev_desc *dev, bool pec)
++{
++ struct i3c_master_controller *master = i3c_dev_get_master(dev);
++
++ if (!master->pec_supported)
++ return -EOPNOTSUPP;
++
++ dev->info.pec = pec;
++
++ /*
++ * TODO: There are two cases which shall be covered
++ * 1. Controller doesn't support PEC.
++ * In this case we could just fallback to SW implementation.
++ * 2. Device doesn't support PEC.
++ * Then we really can't use PEC - and should error-out.
++ */
++ return 0;
++}
++
+ static int __init i3c_init(void)
+ {
+ int res;
+diff --git a/drivers/i3c/master/ast2600-i3c-master.c b/drivers/i3c/master/ast2600-i3c-master.c
+index 01a47d3dd..b832aaba3 100644
+--- a/drivers/i3c/master/ast2600-i3c-master.c
++++ b/drivers/i3c/master/ast2600-i3c-master.c
+@@ -10,6 +10,8 @@
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
+ #include <linux/regmap.h>
++#include <linux/bitfield.h>
++#include <dt-bindings/i3c/i3c.h>
+
+ #include "dw-i3c-master.h"
+
+@@ -33,18 +35,93 @@
+ #define AST2600_I3CG_REG1_SA_EN BIT(15)
+ #define AST2600_I3CG_REG1_INST_ID_MASK GENMASK(19, 16)
+ #define AST2600_I3CG_REG1_INST_ID(x) (((x) << 16) & AST2600_I3CG_REG1_INST_ID_MASK)
++#define SCL_SW_MODE_OE BIT(20)
++#define SCL_OUT_SW_MODE_VAL BIT(21)
++#define SCL_IN_SW_MODE_VAL BIT(23)
++#define SDA_SW_MODE_OE BIT(24)
++#define SDA_OUT_SW_MODE_VAL BIT(25)
++#define SDA_IN_SW_MODE_VAL BIT(27)
++#define SCL_IN_SW_MODE_EN BIT(28)
++#define SDA_IN_SW_MODE_EN BIT(29)
++#define SCL_OUT_SW_MODE_EN BIT(30)
++#define SDA_OUT_SW_MODE_EN BIT(31)
+
+ #define AST2600_DEFAULT_SDA_PULLUP_OHMS 2000
+
++#define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
++#define DEV_ADDR_TABLE_DYNAMIC_ADDR GENMASK(23, 16)
++#define DEV_ADDR_TABLE_IBI_ADDR_MASK GENMASK(25, 24)
++#define IBI_ADDR_MASK_OFF 0b00
++#define IBI_ADDR_MASK_LAST_3BITS 0b01
++#define IBI_ADDR_MASK_LAST_4BITS 0b10
++#define DEV_ADDR_TABLE_DA_PARITY BIT(23)
++#define DEV_ADDR_TABLE_MR_REJECT BIT(14)
++#define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
++#define DEV_ADDR_TABLE_IBI_MDB BIT(12)
+ #define DEV_ADDR_TABLE_IBI_PEC BIT(11)
++#define DEV_ADDR_TABLE_STATIC_ADDR GENMASK(6, 0)
++
++#define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
++
++#define DEVICE_CTRL 0x0
++#define DEV_CTRL_SLAVE_MDB GENMASK(23, 16)
++#define DEV_CTRL_HOT_JOIN_NACK BIT(8)
++
++#define NUM_OF_SWDATS_IN_GROUP 8
++#define ALL_DATS_IN_GROUP_ARE_FREE ((1 << NUM_OF_SWDATS_IN_GROUP) - 1)
++#define NUM_OF_SWDAT_GROUP 16
++
++#define ADDR_GRP_MASK GENMASK(6, 3)
++#define ADDR_GRP(x) (((x) & ADDR_GRP_MASK) >> 3)
++#define ADDR_HID_MASK GENMASK(2, 0)
++#define ADDR_HID(x) ((x) & ADDR_HID_MASK)
++
++#define IBI_QUEUE_STATUS 0x18
++
++#define IBI_SIR_REQ_REJECT 0x30
++#define INTR_STATUS_EN 0x40
++#define INTR_SIGNAL_EN 0x44
++#define INTR_IBI_THLD_STAT BIT(2)
++
++#define PRESENT_STATE 0x54
++#define CM_TFR_STS GENMASK(13, 8)
++#define CM_TFR_STS_MASTER_SERV_IBI 0xe
++#define SDA_LINE_SIGNAL_LEVEL BIT(1)
++#define SCL_LINE_SIGNAL_LEVEL BIT(0)
++
++struct ast2600_i3c_swdat_group {
++ u32 dat[NUM_OF_SWDATS_IN_GROUP];
++ u32 free_pos;
++ int hw_index;
++ struct {
++ u32 set;
++ u32 clr;
++ } mask;
++};
+
+ struct ast2600_i3c {
+ struct dw_i3c_master dw;
+ struct regmap *global_regs;
+ unsigned int global_idx;
+ unsigned int sda_pullup;
++
++ struct ast2600_i3c_swdat_group dat_group[NUM_OF_SWDAT_GROUP];
+ };
+
++static u8 even_parity(u8 p)
++{
++ p ^= p >> 4;
++ p &= 0xf;
++
++ return (0x9669 >> p) & 1;
++}
++
++static inline struct dw_i3c_master *
++to_dw_i3c_master(struct i3c_master_controller *master)
++{
++ return container_of(master, struct dw_i3c_master, base);
++}
++
+ static struct ast2600_i3c *to_ast2600_i3c(struct dw_i3c_master *dw)
+ {
+ return container_of(dw, struct ast2600_i3c, dw);
+@@ -117,9 +194,537 @@ static void ast2600_i3c_set_dat_ibi(struct dw_i3c_master *i3c,
+ }
+ }
+
++static void ast2600_i3c_enter_sw_mode(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_VAL | SDA_IN_SW_MODE_VAL,
++ SCL_IN_SW_MODE_VAL | SDA_IN_SW_MODE_VAL);
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_EN | SDA_IN_SW_MODE_EN,
++ SCL_IN_SW_MODE_EN | SDA_IN_SW_MODE_EN);
++}
++
++static void ast2600_i3c_exit_sw_mode(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_EN | SDA_IN_SW_MODE_EN, 0);
++}
++
++static void ast2600_i3c_toggle_scl_in(struct dw_i3c_master *dw, int count)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++
++ for (; count; count--) {
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_VAL, 0);
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_VAL, SCL_IN_SW_MODE_VAL);
++ }
++}
++
++static void ast2600_i3c_gen_internal_stop(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_VAL, 0);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_VAL, 0);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_IN_SW_MODE_VAL, SCL_IN_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_VAL, SDA_IN_SW_MODE_VAL);
++}
++
++static int aspeed_i3c_bus_recovery(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ int i, ret = -1;
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_SW_MODE_OE, SCL_SW_MODE_OE);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_EN, SCL_OUT_SW_MODE_EN);
++
++ for (i = 0; i < 19; i++) {
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, 0);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL);
++ if (readl(dw->regs + PRESENT_STATE) & SDA_LINE_SIGNAL_LEVEL) {
++ ret = 0;
++ break;
++ }
++ }
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_EN, 0);
++ if (ret)
++ dev_err(&dw->base.dev, "Failed to recover the bus\n");
++
++ return ret;
++}
++
++static void ast2600_i3c_gen_target_reset_pattern(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ int i;
++
++ if (dw->base.bus.context == I3C_BUS_CONTEXT_JESD403) {
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_SW_MODE_OE, SCL_SW_MODE_OE);
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_EN, SCL_OUT_SW_MODE_EN);
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, 0);
++ mdelay(DIV_ROUND_UP(dw->timing.timed_reset_scl_low_ns,
++ 1000000));
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_EN, 0);
++ return;
++ }
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_VAL | SCL_OUT_SW_MODE_VAL,
++ SDA_OUT_SW_MODE_VAL | SCL_OUT_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_SW_MODE_OE | SCL_SW_MODE_OE,
++ SDA_SW_MODE_OE | SCL_SW_MODE_OE);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_EN | SCL_OUT_SW_MODE_EN,
++ SDA_OUT_SW_MODE_EN | SCL_OUT_SW_MODE_EN);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_VAL | SCL_IN_SW_MODE_VAL,
++ SDA_IN_SW_MODE_VAL | SCL_IN_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_EN | SCL_IN_SW_MODE_EN,
++ SDA_IN_SW_MODE_EN | SCL_IN_SW_MODE_EN);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, 0);
++ for (i = 0; i < 7; i++) {
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_VAL, 0);
++ regmap_write_bits(i3c->global_regs,
++ AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_VAL, SDA_OUT_SW_MODE_VAL);
++ }
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SCL_OUT_SW_MODE_VAL, SCL_OUT_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_VAL, 0);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_VAL, SDA_OUT_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_OUT_SW_MODE_EN | SCL_OUT_SW_MODE_EN, 0);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_EN | SCL_IN_SW_MODE_EN, 0);
++}
++
++static bool ast2600_i3c_fsm_exit_serv_ibi(struct dw_i3c_master *dw)
++{
++ u32 state;
++
++ /*
++ * Clear the IBI queue to enable the hardware to generate SCL and
++ * begin detecting the T-bit low to stop reading IBI data.
++ */
++ readl(dw->regs + IBI_QUEUE_STATUS);
++ state = FIELD_GET(CM_TFR_STS, readl(dw->regs + PRESENT_STATE));
++ if (state == CM_TFR_STS_MASTER_SERV_IBI)
++ return false;
++
++ return true;
++}
++
++static void ast2600_i3c_gen_tbits_in(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ bool is_idle;
++ int ret;
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_VAL, SDA_IN_SW_MODE_VAL);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_EN, SDA_IN_SW_MODE_EN);
++
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_VAL, 0);
++ ret = readx_poll_timeout_atomic(ast2600_i3c_fsm_exit_serv_ibi, dw,
++ is_idle, is_idle, 0, 2000000);
++ regmap_write_bits(i3c->global_regs, AST2600_I3CG_REG1(i3c->global_idx),
++ SDA_IN_SW_MODE_EN, 0);
++ if (ret)
++ dev_err(&dw->base.dev,
++ "Failed to exit the I3C fsm from %lx(MASTER_SERV_IBI): %d",
++ FIELD_GET(CM_TFR_STS, readl(dw->regs + PRESENT_STATE)),
++ ret);
++}
++
++static void ast2600_i3c_set_ibi_mdb(struct dw_i3c_master *dw, u8 mdb)
++{
++ u32 reg;
++
++ reg = readl(dw->regs + DEVICE_CTRL);
++ reg &= ~DEV_CTRL_SLAVE_MDB;
++ reg |= FIELD_PREP(DEV_CTRL_SLAVE_MDB, mdb);
++ writel(reg, dw->regs + DEVICE_CTRL);
++}
++
++static int ast2600_i3c_get_free_hw_pos(struct dw_i3c_master *dw)
++{
++ if (!(dw->free_pos & GENMASK(dw->maxdevs - 1, 0)))
++ return -ENOSPC;
++
++ return ffs(dw->free_pos) - 1;
++}
++
++static void ast2600_i3c_init_swdat(struct dw_i3c_master *dw)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ struct ast2600_i3c_swdat_group *gp;
++ int i, j;
++ u32 def_set, def_clr;
++
++ def_clr = DEV_ADDR_TABLE_IBI_ADDR_MASK;
++ def_set = DEV_ADDR_TABLE_MR_REJECT | DEV_ADDR_TABLE_SIR_REJECT;
++
++ for (i = 0; i < NUM_OF_SWDAT_GROUP; i++) {
++ gp = &i3c->dat_group[i];
++ gp->hw_index = -1;
++ gp->free_pos = ALL_DATS_IN_GROUP_ARE_FREE;
++ gp->mask.clr = def_clr;
++ gp->mask.set = def_set;
++
++ for (j = 0; j < NUM_OF_SWDATS_IN_GROUP; j++)
++ gp->dat[j] = 0;
++ }
++
++ for (i = 0; i < dw->maxdevs; i++)
++ writel(def_set,
++ dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr, i));
++}
++
++static int ast2600_i3c_set_swdat(struct dw_i3c_master *dw, u8 addr, u32 val)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)];
++ int pos = ADDR_HID(addr);
++
++ if (!(val & DEV_ADDR_TABLE_LEGACY_I2C_DEV)) {
++ /* Calculate DA parity for I3C devices */
++ val &= ~DEV_ADDR_TABLE_DA_PARITY;
++ val |= FIELD_PREP(DEV_ADDR_TABLE_DA_PARITY, even_parity(addr));
++ }
++ gp->dat[pos] = val;
++
++ if (val) {
++ gp->free_pos &= ~BIT(pos);
++
++ /*
++ * reserve the hw dat resource for the first member of the
++ * group. all the members in the group share the same hw dat.
++ */
++ if (gp->hw_index == -1) {
++ gp->hw_index = ast2600_i3c_get_free_hw_pos(dw);
++ if (gp->hw_index < 0)
++ goto out;
++
++ dw->free_pos &= ~BIT(gp->hw_index);
++ val &= ~gp->mask.clr;
++ val |= gp->mask.set;
++ writel(val,
++ dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr,
++ gp->hw_index));
++ }
++ } else {
++ gp->free_pos |= BIT(pos);
++
++ /*
++ * release the hw dat resource if all the members in the group
++ * are free.
++ */
++ if (gp->free_pos == ALL_DATS_IN_GROUP_ARE_FREE) {
++ writel(gp->mask.set,
++ dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr,
++ gp->hw_index));
++ dw->free_pos |= BIT(gp->hw_index);
++ gp->hw_index = -1;
++ }
++ }
++out:
++ return gp->hw_index;
++}
++
++static u32 ast2600_i3c_get_swdat(struct dw_i3c_master *dw, u8 addr)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)];
++
++ return gp->dat[ADDR_HID(addr)];
++}
++
++static int ast2600_i3c_flush_swdat(struct dw_i3c_master *dw, u8 addr)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)];
++ u32 dat = gp->dat[ADDR_HID(addr)];
++ int hw_index = gp->hw_index;
++
++ if (!dat || hw_index < 0)
++ return -1;
++
++ dat &= ~gp->mask.clr;
++ dat |= gp->mask.set;
++ writel(dat, dw->regs + DEV_ADDR_TABLE_LOC(dw->datstartaddr, hw_index));
++
++ return 0;
++}
++
++static int ast2600_i3c_get_swdat_hw_pos(struct dw_i3c_master *dw, u8 addr)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ struct ast2600_i3c_swdat_group *gp = &i3c->dat_group[ADDR_GRP(addr)];
++
++ return gp->hw_index;
++}
++
++static int ast2600_i3c_reattach_i3c_dev(struct i3c_dev_desc *dev,
++ u8 old_dyn_addr)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ u32 dat = FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, dev->info.dyn_addr);
++
++ if (old_dyn_addr != dev->info.dyn_addr)
++ ast2600_i3c_set_swdat(master, old_dyn_addr, 0);
++
++ ast2600_i3c_set_swdat(master, dev->info.dyn_addr, dat);
++ data->index = ast2600_i3c_get_swdat_hw_pos(master, dev->info.dyn_addr);
++ master->devs[dev->info.dyn_addr].addr = dev->info.dyn_addr;
++
++ return 0;
++}
++
++static int ast2600_i3c_attach_i3c_dev(struct i3c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ int pos;
++ u8 addr = dev->info.dyn_addr ?: dev->info.static_addr;
++
++ pos = ast2600_i3c_set_swdat(master, addr,
++ FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, addr));
++ if (pos < 0)
++ return pos;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->index = ast2600_i3c_get_swdat_hw_pos(master, addr);
++ master->devs[addr].addr = addr;
++ i3c_dev_set_master_data(dev, data);
++
++ if (master->base.bus.context == I3C_BUS_CONTEXT_JESD403) {
++ dev->info.max_write_ds = 0;
++ dev->info.max_read_ds = 0;
++ }
++
++ return 0;
++}
++
++static void ast2600_i3c_detach_i3c_dev(struct i3c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ u8 addr = dev->info.dyn_addr ?: dev->info.static_addr;
++
++ ast2600_i3c_set_swdat(master, addr, 0);
++
++ i3c_dev_set_master_data(dev, NULL);
++ master->devs[addr].addr = 0;
++ kfree(data);
++}
++
++static int ast2600_i3c_attach_i2c_dev(struct i2c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i2c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ int pos;
++
++ pos = ast2600_i3c_set_swdat(master, dev->addr,
++ DEV_ADDR_TABLE_LEGACY_I2C_DEV |
++ FIELD_PREP(DEV_ADDR_TABLE_STATIC_ADDR, dev->addr));
++ if (pos < 0)
++ return pos;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->index = ast2600_i3c_get_swdat_hw_pos(master, dev->addr);
++ master->devs[dev->addr].addr = dev->addr;
++ i2c_dev_set_master_data(dev, data);
++
++ return 0;
++}
++
++static void ast2600_i3c_detach_i2c_dev(struct i2c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i2c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ ast2600_i3c_set_swdat(master, dev->addr, 0);
++
++ i2c_dev_set_master_data(dev, NULL);
++ master->devs[dev->addr].addr = 0;
++ kfree(data);
++}
++
++static void ast2600_i3c_set_sir_enabled(struct dw_i3c_master *dw,
++ struct i3c_dev_desc *dev, u8 idx,
++ bool enable)
++{
++ struct ast2600_i3c *i3c = to_ast2600_i3c(dw);
++ struct ast2600_i3c_swdat_group *gp =
++ &i3c->dat_group[ADDR_GRP(dev->info.dyn_addr)];
++ unsigned long flags;
++ u32 reg;
++ bool global;
++
++ spin_lock_irqsave(&dw->devs_lock, flags);
++ if (enable) {
++ gp->mask.clr |= DEV_ADDR_TABLE_SIR_REJECT |
++ DEV_ADDR_TABLE_IBI_ADDR_MASK;
++
++ gp->mask.set &= ~DEV_ADDR_TABLE_SIR_REJECT;
++ gp->mask.set |= FIELD_PREP(DEV_ADDR_TABLE_IBI_ADDR_MASK,
++ IBI_ADDR_MASK_LAST_3BITS);
++ /*
++ * The ast2600 i3c controller will lock up on receiving 4n+1-byte IBIs
++ * if the PEC is disabled. We have no way to restrict the length of
++ * IBIs sent to the controller, so we need to unconditionally enable
++ * PEC checking, which means we drop a byte of payload data
++ */
++ gp->mask.set |= DEV_ADDR_TABLE_IBI_PEC;
++ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
++ gp->mask.set |= DEV_ADDR_TABLE_IBI_MDB;
++ } else {
++ reg = ast2600_i3c_get_swdat(dw, dev->info.dyn_addr);
++ reg |= DEV_ADDR_TABLE_SIR_REJECT;
++ ast2600_i3c_set_swdat(dw, dev->info.dyn_addr, reg);
++ }
++
++ reg = readl(dw->regs + IBI_SIR_REQ_REJECT);
++ if (enable) {
++ global = reg == 0xffffffff;
++ reg &= ~BIT(gp->hw_index);
++ } else {
++ int i;
++ bool hj_rejected = !!(readl(dw->regs + DEVICE_CTRL) &
++ DEV_CTRL_HOT_JOIN_NACK);
++ bool ibi_enable = false;
++
++ for (i = 0; i < NUM_OF_SWDATS_IN_GROUP; i++) {
++ if (!(gp->dat[i] & DEV_ADDR_TABLE_SIR_REJECT)) {
++ ibi_enable = true;
++ break;
++ }
++ }
++
++ if (!ibi_enable) {
++ reg |= BIT(gp->hw_index);
++ global = (reg == 0xffffffff) && hj_rejected;
++
++ gp->mask.set = DEV_ADDR_TABLE_SIR_REJECT;
++ }
++ }
++ writel(reg, dw->regs + IBI_SIR_REQ_REJECT);
++
++ if (global) {
++ reg = readl(dw->regs + INTR_STATUS_EN);
++ reg &= ~INTR_IBI_THLD_STAT;
++ if (enable)
++ reg |= INTR_IBI_THLD_STAT;
++ writel(reg, dw->regs + INTR_STATUS_EN);
++
++ reg = readl(dw->regs + INTR_SIGNAL_EN);
++ reg &= ~INTR_IBI_THLD_STAT;
++ if (enable)
++ reg |= INTR_IBI_THLD_STAT;
++ writel(reg, dw->regs + INTR_SIGNAL_EN);
++ }
++
++ ast2600_i3c_flush_swdat(dw, dev->info.dyn_addr);
++
++ spin_unlock_irqrestore(&dw->devs_lock, flags);
++}
++
++static void ast2600_i3c_set_ibi_dev(struct dw_i3c_master *dw,
++ struct i3c_dev_desc *dev)
++{
++ dw->devs[dev->info.dyn_addr].ibi_dev = dev;
++}
++
++static void ast2600_i3c_unset_ibi_dev(struct dw_i3c_master *dw,
++ struct i3c_dev_desc *dev)
++{
++ dw->devs[dev->info.dyn_addr].ibi_dev = NULL;
++}
++
++static struct i3c_dev_desc *ast2600_i3c_get_ibi_dev(struct dw_i3c_master *dw,
++ u8 addr)
++{
++ return dw->devs[addr].ibi_dev;
++}
++
+ static const struct dw_i3c_platform_ops ast2600_i3c_ops = {
+ .init = ast2600_i3c_init,
+ .set_dat_ibi = ast2600_i3c_set_dat_ibi,
++ .enter_sw_mode = ast2600_i3c_enter_sw_mode,
++ .exit_sw_mode = ast2600_i3c_exit_sw_mode,
++ .toggle_scl_in = ast2600_i3c_toggle_scl_in,
++ .gen_internal_stop = ast2600_i3c_gen_internal_stop,
++ .gen_target_reset_pattern = ast2600_i3c_gen_target_reset_pattern,
++ .gen_tbits_in = ast2600_i3c_gen_tbits_in,
++ .bus_recovery = aspeed_i3c_bus_recovery,
++ .set_ibi_mdb = ast2600_i3c_set_ibi_mdb,
++ .reattach_i3c_dev = ast2600_i3c_reattach_i3c_dev,
++ .attach_i3c_dev = ast2600_i3c_attach_i3c_dev,
++ .detach_i3c_dev = ast2600_i3c_detach_i3c_dev,
++ .attach_i2c_dev = ast2600_i3c_attach_i2c_dev,
++ .detach_i2c_dev = ast2600_i3c_detach_i2c_dev,
++ .get_addr_pos = ast2600_i3c_get_swdat_hw_pos,
++ .flush_dat = ast2600_i3c_flush_swdat,
++ .set_sir_enabled = ast2600_i3c_set_sir_enabled,
++ .set_ibi_dev = ast2600_i3c_set_ibi_dev,
++ .unset_ibi_dev = ast2600_i3c_unset_ibi_dev,
++ .get_ibi_dev = ast2600_i3c_get_ibi_dev,
+ };
+
+ static int ast2600_i3c_probe(struct platform_device *pdev)
+@@ -157,6 +762,10 @@ static int ast2600_i3c_probe(struct platform_device *pdev)
+
+ i3c->dw.platform_ops = &ast2600_i3c_ops;
+ i3c->dw.ibi_capable = true;
++ i3c->dw.base.pec_supported = true;
++
++ ast2600_i3c_init_swdat(&i3c->dw);
++
+ return dw_i3c_common_probe(&i3c->dw, pdev);
+ }
+
+diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
+index 235235613..85e20b517 100644
+--- a/drivers/i3c/master/dw-i3c-master.c
++++ b/drivers/i3c/master/dw-i3c-master.c
+@@ -6,11 +6,13 @@
+ */
+
+ #include <linux/bitops.h>
++#include <linux/bitfield.h>
+ #include <linux/clk.h>
+ #include <linux/completion.h>
+ #include <linux/err.h>
+ #include <linux/errno.h>
+ #include <linux/i3c/master.h>
++#include <linux/i3c/target.h>
+ #include <linux/interrupt.h>
+ #include <linux/ioport.h>
+ #include <linux/iopoll.h>
+@@ -20,31 +22,50 @@
+ #include <linux/platform_device.h>
+ #include <linux/reset.h>
+ #include <linux/slab.h>
++#include <dt-bindings/i3c/i3c.h>
+
+ #include "dw-i3c-master.h"
+
+ #define DEVICE_CTRL 0x0
+ #define DEV_CTRL_ENABLE BIT(31)
+ #define DEV_CTRL_RESUME BIT(30)
++#define DEV_CTRL_ABORT BIT(29)
++#define DEV_CTRL_IBI_PAYLOAD_EN BIT(9)
+ #define DEV_CTRL_HOT_JOIN_NACK BIT(8)
+ #define DEV_CTRL_I2C_SLAVE_PRESENT BIT(7)
++#define DEV_CTRL_IBA_INCLUDE BIT(0)
+
+ #define DEVICE_ADDR 0x4
+ #define DEV_ADDR_DYNAMIC_ADDR_VALID BIT(31)
+-#define DEV_ADDR_DYNAMIC(x) (((x) << 16) & GENMASK(22, 16))
++#define DEV_ADDR_DYNAMIC GENMASK(22, 16)
+
+ #define HW_CAPABILITY 0x8
++#define HW_CAP_SLV_HJ BIT(18)
++#define HW_CAP_HDR_TS BIT(4)
++#define HW_CAP_HDR_DDR BIT(3)
+ #define COMMAND_QUEUE_PORT 0xc
+ #define COMMAND_PORT_TOC BIT(30)
+ #define COMMAND_PORT_READ_TRANSFER BIT(28)
+ #define COMMAND_PORT_SDAP BIT(27)
+ #define COMMAND_PORT_ROC BIT(26)
++#define COMMAND_PORT_DBP(x) ((x) << 25)
+ #define COMMAND_PORT_SPEED(x) (((x) << 21) & GENMASK(23, 21))
++#define SPEED_I3C_SDR0 0x0
++#define SPEED_I3C_SDR1 0x1
++#define SPEED_I3C_SDR2 0x2
++#define SPEED_I3C_SDR3 0x3
++#define SPEED_I3C_SDR4 0x4
++#define SPEED_I3C_HDR_TS 0x5
++#define SPEED_I3C_HDR_DDR 0x6
++#define SPEED_I3C_I2C_FM 0x7
++#define SPEED_I2C_FM 0x0
++#define SPEED_I2C_FMP 0x1
+ #define COMMAND_PORT_DEV_INDEX(x) (((x) << 16) & GENMASK(20, 16))
+ #define COMMAND_PORT_CP BIT(15)
+ #define COMMAND_PORT_CMD(x) (((x) << 7) & GENMASK(14, 7))
+ #define COMMAND_PORT_TID(x) (((x) << 3) & GENMASK(6, 3))
+
++#define COMMAND_PORT_ARG_DBP(x) (((x) << 8) & GENMASK(15, 8))
+ #define COMMAND_PORT_ARG_DATA_LEN(x) (((x) << 16) & GENMASK(31, 16))
+ #define COMMAND_PORT_ARG_DATA_LEN_MAX 65536
+ #define COMMAND_PORT_TRANSFER_ARG 0x01
+@@ -72,10 +93,15 @@
+ #define RESPONSE_ERROR_TRANSF_ABORT 8
+ #define RESPONSE_ERROR_I2C_W_NACK_ERR 9
+ #define RESPONSE_PORT_TID(x) (((x) & GENMASK(27, 24)) >> 24)
++#define TID_TARGET_IBI 0b0001
++#define TID_TARGET_RD_DATA 0b0010
++#define TID_TARGET_MASTER_WR_DATA 0b1000
++#define TID_TARGET_MASTER_DEFSLVS 0b1111
+ #define RESPONSE_PORT_DATA_LEN(x) ((x) & GENMASK(15, 0))
+
+ #define RX_TX_DATA_PORT 0x14
+ #define IBI_QUEUE_STATUS 0x18
++#define IBI_QUEUE_STATUS_RSP_NACK BIT(31)
+ #define IBI_QUEUE_STATUS_IBI_ID(x) (((x) & GENMASK(15, 8)) >> 8)
+ #define IBI_QUEUE_STATUS_DATA_LEN(x) ((x) & GENMASK(7, 0))
+ #define IBI_QUEUE_IBI_ADDR(x) (IBI_QUEUE_STATUS_IBI_ID(x) >> 1)
+@@ -96,7 +122,8 @@
+ #define QUEUE_THLD_CTRL_RESP_BUF(x) (((x) - 1) << 8)
+
+ #define DATA_BUFFER_THLD_CTRL 0x20
+-#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(11, 8)
++#define DATA_BUFFER_THLD_TX_START GENMASK(18, 16)
++#define DATA_BUFFER_THLD_CTRL_RX_BUF GENMASK(10, 8)
+
+ #define IBI_QUEUE_CTRL 0x24
+ #define IBI_MR_REQ_REJECT 0x2C
+@@ -104,14 +131,29 @@
+ #define IBI_REQ_REJECT_ALL GENMASK(31, 0)
+
+ #define RESET_CTRL 0x34
++#define RESET_CTRL_BUS BIT(31)
++#define RESET_CTRL_BUS_RESET_TYPE GENMASK(30, 29)
++#define BUS_RESET_TYPE_EXIT 0b00
++#define BUS_RESET_TYPE_SCL_LOW 0b11
+ #define RESET_CTRL_IBI_QUEUE BIT(5)
+ #define RESET_CTRL_RX_FIFO BIT(4)
+ #define RESET_CTRL_TX_FIFO BIT(3)
+ #define RESET_CTRL_RESP_QUEUE BIT(2)
+ #define RESET_CTRL_CMD_QUEUE BIT(1)
+ #define RESET_CTRL_SOFT BIT(0)
++#define RESET_CTRL_XFER_QUEUES (RESET_CTRL_RX_FIFO | \
++ RESET_CTRL_TX_FIFO | \
++ RESET_CTRL_RESP_QUEUE | \
++ RESET_CTRL_CMD_QUEUE)
++#define RESET_CTRL_QUEUES (RESET_CTRL_IBI_QUEUE | \
++ RESET_CTRL_XFER_QUEUES)
+
+ #define SLV_EVENT_CTRL 0x38
++#define SLV_EVENT_CTRL_MWL_UPD BIT(7)
++#define SLV_EVENT_CTRL_MRL_UPD BIT(6)
++#define SLV_EVENT_CTRL_HJ_REQ BIT(3)
++#define SLV_EVENT_CTRL_SIR_EN BIT(0)
++
+ #define INTR_STATUS 0x3c
+ #define INTR_STATUS_EN 0x40
+ #define INTR_SIGNAL_EN 0x44
+@@ -146,6 +188,13 @@
+ #define INTR_MASTER_MASK (INTR_TRANSFER_ERR_STAT | \
+ INTR_RESP_READY_STAT)
+
++#define INTR_TARGET_MASK (INTR_READ_REQ_RECV_STAT | \
++ INTR_RESP_READY_STAT | \
++ INTR_IBI_UPDATED_STAT | \
++ INTR_TRANSFER_ERR_STAT | \
++ INTR_CCC_UPDATED_STAT | \
++ INTR_DYN_ADDR_ASSGN_STAT)
++
+ #define QUEUE_STATUS_LEVEL 0x4c
+ #define QUEUE_STATUS_IBI_STATUS_CNT(x) (((x) & GENMASK(28, 24)) >> 24)
+ #define QUEUE_STATUS_IBI_BUF_BLR(x) (((x) & GENMASK(23, 16)) >> 16)
+@@ -156,6 +205,15 @@
+ #define DATA_BUFFER_STATUS_LEVEL_TX(x) ((x) & GENMASK(7, 0))
+
+ #define PRESENT_STATE 0x54
++#define CM_TFR_ST_STS GENMASK(21, 16)
++#define CM_TFR_ST_STS_HALT 0x13
++#define CM_TFR_STS GENMASK(13, 8)
++#define CM_TFR_STS_MASTER_SERV_IBI 0xe
++#define CM_TFR_STS_MASTER_HALT 0xf
++#define CM_TFR_STS_SLAVE_HALT 0x6
++#define SDA_LINE_SIGNAL_LEVEL BIT(1)
++#define SCL_LINE_SIGNAL_LEVEL BIT(0)
++
+ #define CCC_DEVICE_STATUS 0x58
+ #define DEVICE_ADDR_TABLE_POINTER 0x5c
+ #define DEVICE_ADDR_TABLE_DEPTH(x) (((x) & GENMASK(31, 16)) >> 16)
+@@ -163,27 +221,46 @@
+
+ #define DEV_CHAR_TABLE_POINTER 0x60
+ #define VENDOR_SPECIFIC_REG_POINTER 0x6c
++#define SLV_MIPI_ID_VALUE 0x70
+ #define SLV_PID_VALUE 0x74
++#define SLV_PID_HI(x) (((x) >> 32) & GENMASK(15, 0))
++#define SLV_PID_LO(x) ((x) & GENMASK(31, 0))
+ #define SLV_CHAR_CTRL 0x78
++#define SLV_DCR GENMASK(15, 8)
++#define SLV_BCR GENMASK(7, 0)
++#define SLV_BCR_DEVICE_ROLE GENMASK(7, 6)
++
+ #define SLV_MAX_LEN 0x7c
++#define SLV_MAX_RD_LEN(x) (((x) & GENMASK(31, 16)) >> 16)
++#define SLV_MAX_WR_LEN(x) ((x) & GENMASK(15, 0))
++
+ #define MAX_READ_TURNAROUND 0x80
+ #define MAX_DATA_SPEED 0x84
+ #define SLV_DEBUG_STATUS 0x88
+ #define SLV_INTR_REQ 0x8c
++#define SLV_INTR_REQ_IBI_STS(x) (((x) & GENMASK(9, 8)) >> 8)
++#define IBI_STS_ACCEPTED 0x01
++#define IBI_STS_NOT_ATTEMPTED 0x11
++
+ #define DEVICE_CTRL_EXTENDED 0xb0
++#define DEVICE_CTRL_EXTENDED_MODE_MASK GENMASK(1, 0)
++#define DEVICE_CTRL_EXTENDED_MODE(x) ((x) & DEVICE_CTRL_EXTENDED_MODE_MASK)
++#define DEV_OPERATION_MODE_CONTROLLER 0x00
++#define DEV_OPERATION_MODE_TARGET 0x01
++
+ #define SCL_I3C_OD_TIMING 0xb4
+ #define SCL_I3C_PP_TIMING 0xb8
+-#define SCL_I3C_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+-#define SCL_I3C_TIMING_LCNT(x) ((x) & GENMASK(7, 0))
+-#define SCL_I3C_TIMING_CNT_MIN 5
++#define SCL_I3C_TIMING_HCNT GENMASK(23, 16)
++#define SCL_I3C_TIMING_LCNT GENMASK(7, 0)
++#define SCL_I3C_TIMING_CNT_MIN 5
+
+ #define SCL_I2C_FM_TIMING 0xbc
+-#define SCL_I2C_FM_TIMING_HCNT(x) (((x) << 16) & GENMASK(31, 16))
+-#define SCL_I2C_FM_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
++#define SCL_I2C_FM_TIMING_HCNT GENMASK(31, 16)
++#define SCL_I2C_FM_TIMING_LCNT GENMASK(15, 0)
+
+ #define SCL_I2C_FMP_TIMING 0xc0
+-#define SCL_I2C_FMP_TIMING_HCNT(x) (((x) << 16) & GENMASK(23, 16))
+-#define SCL_I2C_FMP_TIMING_LCNT(x) ((x) & GENMASK(15, 0))
++#define SCL_I2C_FMP_TIMING_HCNT GENMASK(23, 16)
++#define SCL_I2C_FMP_TIMING_LCNT GENMASK(15, 0)
+
+ #define SCL_EXT_LCNT_TIMING 0xc8
+ #define SCL_EXT_LCNT_4(x) (((x) << 24) & GENMASK(31, 24))
+@@ -192,32 +269,53 @@
+ #define SCL_EXT_LCNT_1(x) ((x) & GENMASK(7, 0))
+
+ #define SCL_EXT_TERMN_LCNT_TIMING 0xcc
++#define SDA_HOLD_SWITCH_DLY_TIMING 0xd0
++#define SDA_TX_HOLD GENMASK(18, 16)
++#define SDA_TX_HOLD_MIN 0b001
++#define SDA_TX_HOLD_MAX 0b111
+ #define BUS_FREE_TIMING 0xd4
+-#define BUS_I3C_MST_FREE(x) ((x) & GENMASK(15, 0))
++#define BUS_AVAIL_TIME GENMASK(31, 16)
++#define MAX_BUS_AVAIL_CNT 0xffffU
++#define BUS_I3C_MST_FREE GENMASK(15, 0)
+
+ #define BUS_IDLE_TIMING 0xd8
++#define SCL_LOW_MST_EXT_TIMEOUT 0xdc
+ #define I3C_VER_ID 0xe0
+ #define I3C_VER_TYPE 0xe4
+ #define EXTENDED_CAPABILITY 0xe8
+ #define SLAVE_CONFIG 0xec
+
+-#define DEV_ADDR_TABLE_IBI_MDB BIT(12)
+-#define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
+ #define DEV_ADDR_TABLE_LEGACY_I2C_DEV BIT(31)
+-#define DEV_ADDR_TABLE_DYNAMIC_ADDR(x) (((x) << 16) & GENMASK(23, 16))
+-#define DEV_ADDR_TABLE_STATIC_ADDR(x) ((x) & GENMASK(6, 0))
++#define DEV_ADDR_TABLE_DYNAMIC_ADDR GENMASK(23, 16)
++#define DEV_ADDR_TABLE_MR_REJECT BIT(14)
++#define DEV_ADDR_TABLE_SIR_REJECT BIT(13)
++#define DEV_ADDR_TABLE_IBI_MDB BIT(12)
++#define DEV_ADDR_TABLE_STATIC_ADDR GENMASK(6, 0)
+ #define DEV_ADDR_TABLE_LOC(start, idx) ((start) + ((idx) << 2))
+
+ #define I3C_BUS_SDR1_SCL_RATE 8000000
+ #define I3C_BUS_SDR2_SCL_RATE 6000000
+ #define I3C_BUS_SDR3_SCL_RATE 4000000
+ #define I3C_BUS_SDR4_SCL_RATE 2000000
++#define I3C_BUS_I2C_STD_SCL_RATE 100000
++#define I3C_BUS_I2C_STD_TLOW_MIN_NS 4700
++#define I3C_BUS_I2C_STD_THIGH_MIN_NS 4000
++#define I3C_BUS_I2C_STD_TR_MAX_NS 1000
++#define I3C_BUS_I2C_STD_TF_MAX_NS 300
+ #define I3C_BUS_I2C_FM_TLOW_MIN_NS 1300
++#define I3C_BUS_I2C_FM_THIGH_MIN_NS 600
++#define I3C_BUS_I2C_FM_TR_MAX_NS 300
++#define I3C_BUS_I2C_FM_TF_MAX_NS 300
+ #define I3C_BUS_I2C_FMP_TLOW_MIN_NS 500
++#define I3C_BUS_I2C_FMP_THIGH_MIN_NS 260
++#define I3C_BUS_I2C_FMP_TR_MAX_NS 120
++#define I3C_BUS_I2C_FMP_TF_MAX_NS 120
+ #define I3C_BUS_THIGH_MAX_NS 41
+
+ #define XFER_TIMEOUT (msecs_to_jiffies(1000))
+
++#define JESD403_TIMED_RESET_NS_DEF 52428800
++
+ struct dw_i3c_cmd {
+ u32 cmd_lo;
+ u32 cmd_hi;
+@@ -233,12 +331,7 @@ struct dw_i3c_xfer {
+ struct completion comp;
+ int ret;
+ unsigned int ncmds;
+- struct dw_i3c_cmd cmds[];
+-};
+-
+-struct dw_i3c_i2c_dev_data {
+- u8 index;
+- struct i3c_generic_ibi_pool *ibi_pool;
++ struct dw_i3c_cmd cmds[] __counted_by(ncmds);
+ };
+
+ static u8 even_parity(u8 p)
+@@ -280,6 +373,11 @@ static bool dw_i3c_master_supports_ccc_cmd(struct i3c_master_controller *m,
+ case I3C_CCC_GETSTATUS:
+ case I3C_CCC_GETMXDS:
+ case I3C_CCC_GETHDRCAP:
++ case I3C_CCC_SETAASA:
++ case I3C_CCC_SETHID:
++ case I3C_CCC_DBGACTION(true):
++ case I3C_CCC_DBGACTION(false):
++ case I3C_CCC_DBGOPCODE:
+ return true;
+ default:
+ return false;
+@@ -292,16 +390,123 @@ to_dw_i3c_master(struct i3c_master_controller *master)
+ return container_of(master, struct dw_i3c_master, base);
+ }
+
++static void dw_i3c_master_set_iba(struct dw_i3c_master *master, bool enable)
++{
++ u32 reg;
++
++ reg = readl(master->regs + DEVICE_CTRL);
++ reg &= ~DEV_CTRL_IBA_INCLUDE;
++ if (enable)
++ reg |= DEV_CTRL_IBA_INCLUDE;
++
++ writel(reg, master->regs + DEVICE_CTRL);
++}
++
+ static void dw_i3c_master_disable(struct dw_i3c_master *master)
+ {
++ if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE))
++ return;
++
++ if (master->base.target) {
++ master->platform_ops->enter_sw_mode(master);
++ master->platform_ops->gen_internal_stop(master);
++ }
++
+ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
++
++ if (master->base.target) {
++ master->platform_ops->toggle_scl_in(master, 8);
++ if (readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE) {
++ dev_warn(&master->base.dev,
++ "Failed to disable controller");
++ master->platform_ops->exit_sw_mode(master);
++ return;
++ }
++ master->platform_ops->exit_sw_mode(master);
++ }
+ }
+
+ static void dw_i3c_master_enable(struct dw_i3c_master *master)
+ {
++ u32 wait_enable_ns, clk_count;
++
++ if (master->base.target) {
++ master->platform_ops->enter_sw_mode(master);
++ master->platform_ops->gen_internal_stop(master);
++ }
++
+ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ENABLE,
+ master->regs + DEVICE_CTRL);
++
++ if (master->base.target) {
++ clk_count = FIELD_GET(BUS_AVAIL_TIME,
++ readl(master->regs + BUS_FREE_TIMING));
++ clk_count = max(clk_count, readl(master->regs + BUS_IDLE_TIMING));
++ wait_enable_ns = master->timing.core_period * clk_count;
++ udelay(DIV_ROUND_UP(wait_enable_ns, NSEC_PER_USEC));
++
++ master->platform_ops->toggle_scl_in(master, 8);
++ if (!(readl(master->regs + DEVICE_CTRL) & DEV_CTRL_ENABLE)) {
++ dev_warn(&master->base.dev,
++ "Failed to enable controller");
++ master->platform_ops->exit_sw_mode(master);
++ return;
++ }
++
++ master->platform_ops->gen_internal_stop(master);
++ master->platform_ops->exit_sw_mode(master);
++ }
++}
++
++static int dw_i3c_master_exit_halt(struct dw_i3c_master *master)
++{
++ u32 status;
++ u32 halt_state = CM_TFR_STS_MASTER_HALT;
++ int ret;
++
++ if (master->base.target)
++ halt_state = CM_TFR_STS_SLAVE_HALT;
++
++ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
++ master->regs + DEVICE_CTRL);
++
++ ret = readl_poll_timeout_atomic(master->regs + PRESENT_STATE, status,
++ FIELD_GET(CM_TFR_STS, status) != halt_state,
++ 10, 1000000);
++
++ if (ret)
++ dev_err(&master->base.dev,
++ "Exit halt state failed: %d %#x %#x\n", ret,
++ readl(master->regs + PRESENT_STATE),
++ readl(master->regs + QUEUE_STATUS_LEVEL));
++ return ret;
++}
++
++static int dw_i3c_master_enter_halt(struct dw_i3c_master *master, bool by_sw)
++{
++ u32 status;
++ u32 halt_state = CM_TFR_STS_MASTER_HALT;
++ int ret;
++
++ if (master->base.target)
++ halt_state = CM_TFR_STS_SLAVE_HALT;
++
++ if (by_sw)
++ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_ABORT,
++ master->regs + DEVICE_CTRL);
++
++ ret = readl_poll_timeout_atomic(master->regs + PRESENT_STATE, status,
++ FIELD_GET(CM_TFR_STS, status) == halt_state,
++ 10, 1000000);
++
++ if (ret)
++ dev_err(&master->base.dev,
++ "Enter halt state failed: %d %#x %#x\n", ret,
++ readl(master->regs + PRESENT_STATE),
++ readl(master->regs + QUEUE_STATUS_LEVEL));
++
++ return ret;
+ }
+
+ static int dw_i3c_master_get_addr_pos(struct dw_i3c_master *master, u8 addr)
+@@ -481,6 +686,9 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
+ }
+
+ for (i = 0; i < nresp; i++) {
++ if (xfer->cmds[i].error)
++ dev_err(&master->base.dev, "xfer error: %x\n",
++ xfer->cmds[i].error);
+ switch (xfer->cmds[i].error) {
+ case RESPONSE_NO_ERROR:
+ break;
+@@ -506,9 +714,14 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
+ complete(&xfer->comp);
+
+ if (ret < 0) {
++ /*
++ * The controller will enter the HALT state if an error occurs.
++ * Therefore, there is no need to manually halt the controller
++ * through software.
++ */
++ dw_i3c_master_enter_halt(master, false);
+ dw_i3c_master_dequeue_xfer_locked(master, xfer);
+- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME,
+- master->regs + DEVICE_CTRL);
++ dw_i3c_master_exit_halt(master);
+ }
+
+ xfer = list_first_entry_or_null(&master->xferqueue.list,
+@@ -521,41 +734,76 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr)
+ dw_i3c_master_start_xfer_locked(master);
+ }
+
++static int calc_i2c_clk(struct dw_i3c_master *master, unsigned long fscl,
++ u16 *hcnt, u16 *lcnt)
++{
++ unsigned long core_rate, core_period;
++ u32 period_cnt, margin;
++ u32 hcnt_min, lcnt_min;
++
++ core_rate = master->timing.core_rate;
++ core_period = master->timing.core_period;
++
++ if (fscl <= I3C_BUS_I2C_STD_SCL_RATE) {
++ lcnt_min = DIV_ROUND_UP(I3C_BUS_I2C_STD_TLOW_MIN_NS +
++ I3C_BUS_I2C_STD_TF_MAX_NS,
++ core_period);
++ hcnt_min = DIV_ROUND_UP(I3C_BUS_I2C_STD_THIGH_MIN_NS +
++ I3C_BUS_I2C_STD_TR_MAX_NS,
++ core_period);
++ } else if (fscl <= I3C_BUS_I2C_FM_SCL_RATE) {
++ lcnt_min = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS +
++ I3C_BUS_I2C_FM_TF_MAX_NS,
++ core_period);
++ hcnt_min = DIV_ROUND_UP(I3C_BUS_I2C_FM_THIGH_MIN_NS +
++ I3C_BUS_I2C_FM_TR_MAX_NS,
++ core_period);
++ } else {
++ lcnt_min = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS +
++ I3C_BUS_I2C_FMP_TF_MAX_NS,
++ core_period);
++ hcnt_min = DIV_ROUND_UP(I3C_BUS_I2C_FMP_THIGH_MIN_NS +
++ I3C_BUS_I2C_FMP_TR_MAX_NS,
++ core_period);
++ }
++
++ period_cnt = DIV_ROUND_UP(core_rate, fscl);
++ margin = (period_cnt - hcnt_min - lcnt_min) >> 1;
++ *lcnt = lcnt_min + margin;
++ *hcnt = max(period_cnt - *lcnt, hcnt_min);
++
++ return 0;
++}
++
+ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
+ {
+ unsigned long core_rate, core_period;
+ u32 scl_timing;
+ u8 hcnt, lcnt;
+
+- core_rate = clk_get_rate(master->core_clk);
+- if (!core_rate)
+- return -EINVAL;
+-
+- core_period = DIV_ROUND_UP(1000000000, core_rate);
+-
+- hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
+- if (hcnt < SCL_I3C_TIMING_CNT_MIN)
+- hcnt = SCL_I3C_TIMING_CNT_MIN;
++ core_rate = master->timing.core_rate;
++ core_period = master->timing.core_period;
+
+- lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) - hcnt;
+- if (lcnt < SCL_I3C_TIMING_CNT_MIN)
+- lcnt = SCL_I3C_TIMING_CNT_MIN;
++ if (master->timing.i3c_pp_scl_high && master->timing.i3c_pp_scl_low) {
++ hcnt = DIV_ROUND_CLOSEST(master->timing.i3c_pp_scl_high,
++ core_period);
++ lcnt = DIV_ROUND_CLOSEST(master->timing.i3c_pp_scl_low,
++ core_period);
++ } else {
++ hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
++ if (hcnt < SCL_I3C_TIMING_CNT_MIN)
++ hcnt = SCL_I3C_TIMING_CNT_MIN;
++
++ lcnt = DIV_ROUND_UP(core_rate, master->base.bus.scl_rate.i3c) -
++ hcnt;
++ if (lcnt < SCL_I3C_TIMING_CNT_MIN)
++ lcnt = SCL_I3C_TIMING_CNT_MIN;
++ }
+
+- scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
++ scl_timing = FIELD_PREP(SCL_I3C_TIMING_HCNT, hcnt) |
++ FIELD_PREP(SCL_I3C_TIMING_LCNT, lcnt);
+ writel(scl_timing, master->regs + SCL_I3C_PP_TIMING);
+
+- /*
+- * In pure i3c mode, MST_FREE represents tCAS. In shared mode, this
+- * will be set up by dw_i2c_clk_cfg as tLOW.
+- */
+- if (master->base.bus.mode == I3C_BUS_MODE_PURE)
+- writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+-
+- lcnt = max_t(u8,
+- DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period), lcnt);
+- scl_timing = SCL_I3C_TIMING_HCNT(hcnt) | SCL_I3C_TIMING_LCNT(lcnt);
+- writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
+-
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR1_SCL_RATE) - hcnt;
+ scl_timing = SCL_EXT_LCNT_1(lcnt);
+ lcnt = DIV_ROUND_UP(core_rate, I3C_BUS_SDR2_SCL_RATE) - hcnt;
+@@ -566,6 +814,27 @@ static int dw_i3c_clk_cfg(struct dw_i3c_master *master)
+ scl_timing |= SCL_EXT_LCNT_4(lcnt);
+ writel(scl_timing, master->regs + SCL_EXT_LCNT_TIMING);
+
++ if (master->timing.i3c_od_scl_high && master->timing.i3c_od_scl_low) {
++ hcnt = DIV_ROUND_CLOSEST(master->timing.i3c_od_scl_high,
++ core_period);
++ lcnt = DIV_ROUND_CLOSEST(master->timing.i3c_od_scl_low,
++ core_period);
++ } else if (master->base.bus.context == I3C_BUS_CONTEXT_JESD403) {
++ u16 hcnt_fmp, lcnt_fmp;
++
++ calc_i2c_clk(master, I3C_BUS_I2C_FM_PLUS_SCL_RATE, &hcnt_fmp,
++ &lcnt_fmp);
++ hcnt = min_t(u8, hcnt_fmp, FIELD_MAX(SCL_I3C_TIMING_HCNT));
++ lcnt = min_t(u8, lcnt_fmp, FIELD_MAX(SCL_I3C_TIMING_LCNT));
++ } else {
++ lcnt = max_t(u8,
++ DIV_ROUND_UP(I3C_BUS_TLOW_OD_MIN_NS, core_period),
++ lcnt);
++ }
++ scl_timing = FIELD_PREP(SCL_I3C_TIMING_HCNT, hcnt) |
++ FIELD_PREP(SCL_I3C_TIMING_LCNT, lcnt);
++ writel(scl_timing, master->regs + SCL_I3C_OD_TIMING);
++
+ return 0;
+ }
+
+@@ -575,58 +844,162 @@ static int dw_i2c_clk_cfg(struct dw_i3c_master *master)
+ u16 hcnt, lcnt;
+ u32 scl_timing;
+
+- core_rate = clk_get_rate(master->core_clk);
+- if (!core_rate)
+- return -EINVAL;
+-
+- core_period = DIV_ROUND_UP(1000000000, core_rate);
++ core_rate = master->timing.core_rate;
++ core_period = master->timing.core_period;
+
+- lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FMP_TLOW_MIN_NS, core_period);
+- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_PLUS_SCL_RATE) - lcnt;
+- scl_timing = SCL_I2C_FMP_TIMING_HCNT(hcnt) |
+- SCL_I2C_FMP_TIMING_LCNT(lcnt);
++ calc_i2c_clk(master, I3C_BUS_I2C_FM_PLUS_SCL_RATE, &hcnt, &lcnt);
++ scl_timing = FIELD_PREP(SCL_I2C_FMP_TIMING_HCNT, hcnt) |
++ FIELD_PREP(SCL_I2C_FMP_TIMING_LCNT, lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FMP_TIMING);
+
+- lcnt = DIV_ROUND_UP(I3C_BUS_I2C_FM_TLOW_MIN_NS, core_period);
+- hcnt = DIV_ROUND_UP(core_rate, I3C_BUS_I2C_FM_SCL_RATE) - lcnt;
+- scl_timing = SCL_I2C_FM_TIMING_HCNT(hcnt) |
+- SCL_I2C_FM_TIMING_LCNT(lcnt);
++ calc_i2c_clk(master, master->base.bus.scl_rate.i2c, &hcnt, &lcnt);
++ scl_timing = FIELD_PREP(SCL_I2C_FM_TIMING_HCNT, hcnt) |
++ FIELD_PREP(SCL_I2C_FM_TIMING_LCNT, lcnt);
+ writel(scl_timing, master->regs + SCL_I2C_FM_TIMING);
+
+- writel(BUS_I3C_MST_FREE(lcnt), master->regs + BUS_FREE_TIMING);
+- writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
+- master->regs + DEVICE_CTRL);
++ return 0;
++}
++
++static int dw_i3c_bus_clk_cfg(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ struct i3c_bus *bus = i3c_master_get_bus(m);
++ int ret;
++ u16 lcnt;
++
++ ret = dw_i2c_clk_cfg(master);
++ if (ret)
++ return ret;
++
++ ret = dw_i3c_clk_cfg(master);
++ if (ret)
++ return ret;
++
++ /*
++ * I3C register 0xd4[15:0] BUS_FREE_TIMING used to control several parameters:
++ * - tCAS & tCASr (tHD_STA in JESD403)
++ * - tCBP & tCBPr (tSU_STO in JESD403)
++ * - bus free time between a STOP condition and a START condition
++ *
++ * The constraints of these parameters differ in various bus contexts:
++ * JESD403 : BUS_FREE_TIMING = I3C OD SCL low period
++ * MIPI I3C, pure bus : BUS_FREE_TIMING = I3C PP SCL low period
++ * MIPI I3C, mixed bus: BUS_FREE_TIMING = I2C FM SCL low period
++ */
++ if (bus->context == I3C_BUS_CONTEXT_JESD403) {
++ lcnt = FIELD_GET(SCL_I3C_TIMING_LCNT,
++ readl(master->regs + SCL_I3C_OD_TIMING));
++ } else {
++ if (bus->mode == I3C_BUS_MODE_PURE) {
++ lcnt = FIELD_GET(SCL_I3C_TIMING_LCNT,
++ readl(master->regs + SCL_I3C_PP_TIMING));
++ } else {
++ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT,
++ master->regs + DEVICE_CTRL);
++
++ lcnt = FIELD_GET(SCL_I2C_FM_TIMING_LCNT,
++ readl(master->regs + SCL_I2C_FM_TIMING));
++ }
++ }
++
++ writel(FIELD_PREP(BUS_I3C_MST_FREE, lcnt), master->regs + BUS_FREE_TIMING);
++
++ return 0;
++}
++
++static int dw_i3c_target_bus_init(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ struct i3c_dev_desc *desc = master->base.this;
++ void *rx_buf;
++ u32 reg;
++ int ret;
++
++ ret = dw_i3c_bus_clk_cfg(m);
++ if (ret)
++ return ret;
++
++ reg = readl(master->regs + SLV_MAX_LEN);
++ /*
++ * Set max private write length value based on read-only register.
++ * TODO: Handle updates after receiving SETMWL CCC.
++ */
++ master->target.rx.max_len = SLV_MAX_WR_LEN(reg);
++
++ rx_buf = kzalloc(master->target.rx.max_len, GFP_KERNEL);
++ if (!rx_buf)
++ return -ENOMEM;
++
++ master->target.rx.buf = rx_buf;
++
++ dw_i3c_master_disable(master);
++
++ reg = readl(master->regs + QUEUE_THLD_CTRL) & ~QUEUE_THLD_CTRL_RESP_BUF_MASK;
++ writel(reg, master->regs + QUEUE_THLD_CTRL);
++
++ reg = readl(master->regs + DATA_BUFFER_THLD_CTRL) & ~DATA_BUFFER_THLD_CTRL_RX_BUF;
++ writel(reg, master->regs + DATA_BUFFER_THLD_CTRL);
++
++ writel(INTR_ALL, master->regs + INTR_STATUS);
++ writel(INTR_TARGET_MASK, master->regs + INTR_STATUS_EN);
++ writel(INTR_TARGET_MASK, master->regs + INTR_SIGNAL_EN);
++
++ reg = readl(master->regs + DEVICE_CTRL_EXTENDED) & ~DEVICE_CTRL_EXTENDED_MODE_MASK;
++ reg |= DEVICE_CTRL_EXTENDED_MODE(DEV_OPERATION_MODE_TARGET);
++ writel(reg, master->regs + DEVICE_CTRL_EXTENDED);
++
++ writel(SLV_PID_LO(desc->info.pid), master->regs + SLV_PID_VALUE);
++ writel(SLV_PID_HI(desc->info.pid), master->regs + SLV_MIPI_ID_VALUE);
++
++ reg = readl(master->regs + SLV_CHAR_CTRL);
++ reg &= ~(SLV_DCR | SLV_BCR_DEVICE_ROLE);
++ reg |= FIELD_PREP(SLV_DCR, desc->info.dcr) |
++ FIELD_PREP(SLV_BCR_DEVICE_ROLE, 0);
++ writel(reg, master->regs + SLV_CHAR_CTRL);
++
++ reg = FIELD_GET(SLV_BCR, reg);
++ if (reg & I3C_BCR_IBI_PAYLOAD) {
++ reg = readl(master->regs + DEVICE_CTRL);
++ reg |= DEV_CTRL_IBI_PAYLOAD_EN;
++ writel(reg, master->regs + DEVICE_CTRL);
++ }
++
++ reg = readl(master->regs + BUS_FREE_TIMING) |
++ FIELD_PREP(BUS_AVAIL_TIME, MAX_BUS_AVAIL_CNT);
++ writel(reg, master->regs + BUS_FREE_TIMING);
++ /* Clear Hot-join request before enabling the controller*/
++ writel(0, master->regs + SLV_EVENT_CTRL);
++
++
++ dw_i3c_master_enable(master);
+
+ return 0;
+ }
+
++static void dw_i3c_target_bus_cleanup(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ dw_i3c_master_disable(master);
++ kfree(master->target.rx.buf);
++}
++
+ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+ {
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+- struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = { };
+- u32 thld_ctrl;
++ u32 thld_ctrl, caps;
+ int ret;
+
+ ret = master->platform_ops->init(master);
+ if (ret)
+ return ret;
+
+- switch (bus->mode) {
+- case I3C_BUS_MODE_MIXED_FAST:
+- case I3C_BUS_MODE_MIXED_LIMITED:
+- ret = dw_i2c_clk_cfg(master);
+- if (ret)
+- return ret;
+- fallthrough;
+- case I3C_BUS_MODE_PURE:
+- ret = dw_i3c_clk_cfg(master);
+- if (ret)
+- return ret;
+- break;
+- default:
+- return -EINVAL;
+- }
++ spin_lock_init(&master->devs_lock);
++
++ ret = dw_i3c_bus_clk_cfg(m);
++ if (ret)
++ return ret;
+
+ thld_ctrl = readl(master->regs + QUEUE_THLD_CTRL);
+ thld_ctrl &= ~(QUEUE_THLD_CTRL_RESP_BUF_MASK |
+@@ -648,12 +1021,22 @@ static int dw_i3c_master_bus_init(struct i3c_master_controller *m)
+ if (ret < 0)
+ return ret;
+
+- writel(DEV_ADDR_DYNAMIC_ADDR_VALID | DEV_ADDR_DYNAMIC(ret),
++ writel(DEV_ADDR_DYNAMIC_ADDR_VALID | FIELD_PREP(DEV_ADDR_DYNAMIC, ret),
+ master->regs + DEVICE_ADDR);
+
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+
++ caps = readl(master->regs + HW_CAPABILITY);
++ if (caps & HW_CAP_HDR_DDR)
++ info.hdr_cap |= BIT(I3C_HDR_DDR);
++ if (caps & HW_CAP_HDR_TS) {
++ if (readl(master->regs + DEVICE_CTRL) | DEV_CTRL_I2C_SLAVE_PRESENT)
++ info.hdr_cap |= BIT(I3C_HDR_TSL);
++ else
++ info.hdr_cap |= BIT(I3C_HDR_TSP);
++ }
++
+ ret = i3c_master_set_info(&master->base, &info);
+ if (ret)
+ return ret;
+@@ -677,17 +1060,40 @@ static void dw_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+ dw_i3c_master_disable(master);
+ }
+
++static int dw_i3c_master_bus_reset(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ int ret;
++
++ ret = dw_i3c_master_enter_halt(master, true);
++ if (ret) {
++ dev_err(&master->base.dev,
++ "Failed to perform timed reset! Controller state %08x\n",
++ readl(master->regs + PRESENT_STATE));
++ return -EBUSY;
++ }
++
++ master->platform_ops->gen_target_reset_pattern(master);
++ dw_i3c_master_exit_halt(master);
++
++ return 0;
++}
++
+ static int dw_i3c_ccc_set(struct dw_i3c_master *master,
+ struct i3c_ccc_cmd *ccc)
+ {
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
++ u32 sda_lvl_pre, sda_lvl_post;
+ int ret, pos = 0;
+
+ if (ccc->id & I3C_CCC_DIRECT) {
+- pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
++ pos = master->platform_ops->get_addr_pos(master,
++ ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
++
++ master->platform_ops->flush_dat(master, ccc->dests[0].addr);
+ }
+
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+@@ -699,17 +1105,33 @@ static int dw_i3c_ccc_set(struct dw_i3c_master *master,
+ cmd->tx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+- COMMAND_PORT_TRANSFER_ARG;
++ COMMAND_PORT_TRANSFER_ARG | COMMAND_PORT_ARG_DBP(ccc->db);
+
+ cmd->cmd_lo = COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+- COMMAND_PORT_ROC;
++ COMMAND_PORT_ROC |
++ COMMAND_PORT_DBP(ccc->dbp);
++
++ if (ccc->id == I3C_CCC_SETHID || ccc->id == I3C_CCC_DEVCTRL)
++ cmd->cmd_lo |= COMMAND_PORT_SPEED(SPEED_I3C_I2C_FM);
+
++ sda_lvl_pre = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
+ dw_i3c_master_enqueue_xfer(master, xfer);
+- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
++ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) {
++ dw_i3c_master_enter_halt(master, true);
+ dw_i3c_master_dequeue_xfer(master, xfer);
++ sda_lvl_post = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ if (sda_lvl_pre == 0 && sda_lvl_post == 0) {
++ dev_warn(&master->base.dev,
++ "SDA stuck low! Try to recover the bus...\n");
++ master->platform_ops->bus_recovery(master);
++ }
++ dw_i3c_master_exit_halt(master);
++ }
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+@@ -724,12 +1146,15 @@ static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
+ {
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
++ u32 sda_lvl_pre, sda_lvl_post;
+ int ret, pos;
+
+- pos = dw_i3c_master_get_addr_pos(master, ccc->dests[0].addr);
++ pos = master->platform_ops->get_addr_pos(master, ccc->dests[0].addr);
+ if (pos < 0)
+ return pos;
+
++ master->platform_ops->flush_dat(master, ccc->dests[0].addr);
++
+ xfer = dw_i3c_master_alloc_xfer(master, 1);
+ if (!xfer)
+ return -ENOMEM;
+@@ -739,18 +1164,31 @@ static int dw_i3c_ccc_get(struct dw_i3c_master *master, struct i3c_ccc_cmd *ccc)
+ cmd->rx_len = ccc->dests[0].payload.len;
+
+ cmd->cmd_hi = COMMAND_PORT_ARG_DATA_LEN(ccc->dests[0].payload.len) |
+- COMMAND_PORT_TRANSFER_ARG;
++ COMMAND_PORT_TRANSFER_ARG | COMMAND_PORT_ARG_DBP(ccc->db);
+
+ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
+ COMMAND_PORT_CP |
+ COMMAND_PORT_DEV_INDEX(pos) |
+ COMMAND_PORT_CMD(ccc->id) |
+ COMMAND_PORT_TOC |
+- COMMAND_PORT_ROC;
++ COMMAND_PORT_ROC |
++ COMMAND_PORT_DBP(ccc->dbp);
+
++ sda_lvl_pre = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
+ dw_i3c_master_enqueue_xfer(master, xfer);
+- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
++ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) {
++ dw_i3c_master_enter_halt(master, true);
+ dw_i3c_master_dequeue_xfer(master, xfer);
++ sda_lvl_post = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ if (sda_lvl_pre == 0 && sda_lvl_post == 0) {
++ dev_warn(&master->base.dev,
++ "SDA stuck low! Try to recover the bus...\n");
++ master->platform_ops->bus_recovery(master);
++ }
++ dw_i3c_master_exit_halt(master);
++ }
+
+ ret = xfer->ret;
+ if (xfer->cmds[0].error == RESPONSE_ERROR_IBA_NACK)
+@@ -782,7 +1220,7 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ struct dw_i3c_xfer *xfer;
+ struct dw_i3c_cmd *cmd;
+- u32 olddevs, newdevs;
++ u32 olddevs, newdevs, sda_lvl_pre, sda_lvl_post;
+ u8 p, last_addr = 0;
+ int ret, pos;
+
+@@ -802,7 +1240,7 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
+ last_addr = ret;
+ ret |= (p << 7);
+
+- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(ret),
++ writel(FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, ret),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
+ }
+@@ -825,9 +1263,21 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
+ COMMAND_PORT_TOC |
+ COMMAND_PORT_ROC;
+
++ sda_lvl_pre = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
+ dw_i3c_master_enqueue_xfer(master, xfer);
+- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
++ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) {
++ dw_i3c_master_enter_halt(master, true);
+ dw_i3c_master_dequeue_xfer(master, xfer);
++ sda_lvl_post = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ if (sda_lvl_pre == 0 && sda_lvl_post == 0) {
++ dev_warn(&master->base.dev,
++ "SDA stuck low! Try to recover the bus...\n");
++ master->platform_ops->bus_recovery(master);
++ }
++ dw_i3c_master_exit_halt(master);
++ }
+
+ newdevs = GENMASK(master->maxdevs - cmd->rx_len - 1, 0);
+ newdevs &= ~olddevs;
+@@ -835,6 +1285,16 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
+ for (pos = 0; pos < master->maxdevs; pos++) {
+ if (newdevs & BIT(pos))
+ i3c_master_add_i3c_dev_locked(m, master->devs[pos].addr);
++
++ /* cleanup the free HW DATs */
++ if (master->free_pos & BIT(pos)) {
++ u32 dat = DEV_ADDR_TABLE_SIR_REJECT |
++ DEV_ADDR_TABLE_MR_REJECT;
++
++ writel(dat,
++ master->regs +
++ DEV_ADDR_TABLE_LOC(master->datstartaddr, pos));
++ }
+ }
+
+ dw_i3c_master_free_xfer(xfer);
+@@ -851,6 +1311,7 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
++ u32 sda_lvl_pre, sda_lvl_post;
+ int i, ret = 0;
+
+ if (!i3c_nxfers)
+@@ -874,6 +1335,8 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ if (!xfer)
+ return -ENOMEM;
+
++ master->platform_ops->flush_dat(master, dev->info.dyn_addr);
++
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+@@ -901,9 +1364,21 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
++ sda_lvl_pre = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
+ dw_i3c_master_enqueue_xfer(master, xfer);
+- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
++ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) {
++ dw_i3c_master_enter_halt(master, true);
+ dw_i3c_master_dequeue_xfer(master, xfer);
++ sda_lvl_post = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ if (sda_lvl_pre == 0 && sda_lvl_post == 0) {
++ dev_warn(&master->base.dev,
++ "SDA stuck low! Try to recover the bus...\n");
++ master->platform_ops->bus_recovery(master);
++ }
++ dw_i3c_master_exit_halt(master);
++ }
+
+ for (i = 0; i < i3c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+@@ -918,81 +1393,405 @@ static int dw_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
+ return ret;
+ }
+
+-static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+- u8 old_dyn_addr)
++static int dw_i3c_master_send_hdr_cmds(struct i3c_dev_desc *dev,
++ struct i3c_hdr_cmd *cmds, int ncmds)
+ {
+- struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+- int pos;
++ u8 dat_index;
++ int ret, i, ntxwords = 0, nrxwords = 0;
++ u32 sda_lvl_pre, sda_lvl_post;
++ struct dw_i3c_xfer *xfer;
+
+- pos = dw_i3c_master_get_free_pos(master);
++ if (ncmds < 1)
++ return 0;
+
+- if (data->index > pos && pos > 0) {
+- writel(0,
+- master->regs +
+- DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++ dev_dbg(&master->base.dev, "ncmds = %x", ncmds);
+
+- master->devs[data->index].addr = 0;
+- master->free_pos |= BIT(data->index);
++ if (ncmds > master->caps.cmdfifodepth)
++ return -EOPNOTSUPP;
+
+- data->index = pos;
+- master->devs[pos].addr = dev->info.dyn_addr;
+- master->free_pos &= ~BIT(pos);
++ for (i = 0; i < ncmds; i++) {
++ dev_dbg(&master->base.dev, "cmds[%d] mode = %x", i,
++ cmds[i].mode);
++ if (cmds[i].mode != I3C_HDR_DDR)
++ return -EOPNOTSUPP;
++ if (cmds[i].code & 0x80)
++ nrxwords += DIV_ROUND_UP(cmds[i].ndatawords, 2);
++ else
++ ntxwords += DIV_ROUND_UP(cmds[i].ndatawords, 2);
+ }
++ dev_dbg(&master->base.dev, "ntxwords = %x, nrxwords = %x", ntxwords,
++ nrxwords);
++ if (ntxwords > master->caps.datafifodepth ||
++ nrxwords > master->caps.datafifodepth)
++ return -EOPNOTSUPP;
+
+- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+- master->regs +
+- DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++ xfer = dw_i3c_master_alloc_xfer(master, ncmds);
++ if (!xfer)
++ return -ENOMEM;
+
+- master->devs[data->index].addr = dev->info.dyn_addr;
++ for (i = 0; i < ncmds; i++) {
++ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+- return 0;
+-}
++ dat_index = master->platform_ops->get_addr_pos(master, dev->info.dyn_addr);
+
+-static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+-{
+- struct i3c_master_controller *m = i3c_dev_get_master(dev);
+- struct dw_i3c_master *master = to_dw_i3c_master(m);
+- struct dw_i3c_i2c_dev_data *data;
+- int pos;
++ if (dat_index < 0)
++ return dat_index;
++ master->platform_ops->flush_dat(master, dev->info.dyn_addr);
+
+- pos = dw_i3c_master_get_free_pos(master);
+- if (pos < 0)
+- return pos;
++ cmd->cmd_hi =
++ COMMAND_PORT_ARG_DATA_LEN(cmds[i].ndatawords << 1) |
++ COMMAND_PORT_TRANSFER_ARG;
+
+- data = kzalloc(sizeof(*data), GFP_KERNEL);
+- if (!data)
+- return -ENOMEM;
++ if (cmds[i].code & 0x80) {
++ cmd->rx_buf = cmds[i].data.in;
++ cmd->rx_len = cmds[i].ndatawords << 1;
++ cmd->cmd_lo = COMMAND_PORT_READ_TRANSFER |
++ COMMAND_PORT_CP |
++ COMMAND_PORT_CMD(cmds[i].code) |
++ COMMAND_PORT_SPEED(SPEED_I3C_HDR_DDR);
+
+- data->index = pos;
+- master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr;
+- master->free_pos &= ~BIT(pos);
+- i3c_dev_set_master_data(dev, data);
++ } else {
++ cmd->tx_buf = cmds[i].data.out;
++ cmd->tx_len = cmds[i].ndatawords << 1;
++ cmd->cmd_lo = COMMAND_PORT_CP |
++ COMMAND_PORT_CMD(cmds[i].code) |
++ COMMAND_PORT_SPEED(SPEED_I3C_HDR_DDR);
++ }
+
+- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
+- master->regs +
+- DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++ cmd->cmd_lo |= COMMAND_PORT_TID(i) |
++ COMMAND_PORT_DEV_INDEX(dat_index) |
++ COMMAND_PORT_ROC;
+
+- return 0;
+-}
++ if (i == (ncmds - 1))
++ cmd->cmd_lo |= COMMAND_PORT_TOC;
+
+-static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+-{
+- struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+- struct i3c_master_controller *m = i3c_dev_get_master(dev);
+- struct dw_i3c_master *master = to_dw_i3c_master(m);
++ dev_dbg(&master->base.dev,
++ "%s:cmd_hi=0x%08x cmd_lo=0x%08x tx_len=%d rx_len=%d\n",
++ __func__, cmd->cmd_hi, cmd->cmd_lo, cmd->tx_len,
++ cmd->rx_len);
++ }
+
+- writel(0,
+- master->regs +
+- DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++ sda_lvl_pre = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ dw_i3c_master_enqueue_xfer(master, xfer);
++ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) {
++ dw_i3c_master_enter_halt(master, true);
++ dw_i3c_master_dequeue_xfer(master, xfer);
++ sda_lvl_post = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ if (sda_lvl_pre == 0 && sda_lvl_post == 0) {
++ dev_warn(&master->base.dev,
++ "SDA stuck low! Try to recover the bus...\n");
++ master->platform_ops->bus_recovery(master);
++ }
++ dw_i3c_master_exit_halt(master);
++ }
+
+- i3c_dev_set_master_data(dev, NULL);
+- master->devs[data->index].addr = 0;
+- master->free_pos |= BIT(data->index);
++ for (i = 0; i < ncmds; i++) {
++ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
++
++ if (cmds[i].code & 0x80)
++ cmds[i].ndatawords = DIV_ROUND_UP(cmd->rx_len, 2);
++ }
++
++ ret = xfer->ret;
++ dw_i3c_master_free_xfer(xfer);
++
++ return ret;
++}
++
++static int dw_i3c_target_priv_xfers(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *i3c_xfers,
++ int i3c_nxfers)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ struct dw_i3c_xfer *xfer;
++ int i;
++
++ if (!i3c_nxfers)
++ return 0;
++
++ xfer = dw_i3c_master_alloc_xfer(master, i3c_nxfers);
++ if (!xfer)
++ return -ENOMEM;
++
++ for (i = 0; i < i3c_nxfers; i++) {
++ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
++
++ if (!i3c_xfers[i].rnw) {
++ cmd->tx_buf = i3c_xfers[i].data.out;
++ cmd->tx_len = i3c_xfers[i].len;
++ cmd->cmd_lo = 0 | (i << 3) | (cmd->tx_len << 16);
++
++ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
++ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
++ }
++ }
++
++ dw_i3c_master_free_xfer(xfer);
++
++ return 0;
++}
++
++static int dw_i3c_target_reset_controller(struct dw_i3c_master *master)
++{
++ int ret;
++
++ ret = reset_control_assert(master->core_rst);
++ if (ret)
++ return ret;
++
++ ret = reset_control_deassert(master->core_rst);
++ if (ret)
++ return ret;
++
++ return dw_i3c_target_bus_init(&master->base);
++}
++
++static int dw_i3c_target_generate_ibi(struct i3c_dev_desc *dev, const u8 *data,
++ int len)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ u32 reg;
++ int ret;
++
++ if (data || len != 0)
++ return -EOPNOTSUPP;
++
++ reg = readl(master->regs + SLV_EVENT_CTRL);
++ if ((reg & SLV_EVENT_CTRL_SIR_EN) == 0)
++ return -EPERM;
++
++ init_completion(&master->target.comp);
++ writel(1, master->regs + SLV_INTR_REQ);
++
++ if (!wait_for_completion_timeout(&master->target.comp, XFER_TIMEOUT)) {
++ dev_warn(&master->base.dev, "Timeout waiting for completion: Reset controller\n");
++ kfree(master->target.rx.buf);
++
++ ret = dw_i3c_target_reset_controller(master);
++ if (ret)
++ dev_warn(&master->base.dev, "Reset controller failure: %d\n", ret);
++
++ return -EINVAL;
++ }
++
++ reg = readl(master->regs + SLV_INTR_REQ);
++ if (SLV_INTR_REQ_IBI_STS(reg) != IBI_STS_ACCEPTED) {
++ reg = readl(master->regs + SLV_EVENT_CTRL);
++ if ((reg & SLV_EVENT_CTRL_SIR_EN) == 0)
++ dev_warn(&master->base.dev, "SIR is disabled by master\n");
++ return -EACCES;
++ }
++
++ return 0;
++}
++
++static int dw_i3c_target_reset_queue(struct dw_i3c_master *master)
++{
++ int ret;
++ u32 status;
++
++ dw_i3c_master_disable(master);
++ writel(RESET_CTRL_XFER_QUEUES, master->regs + RESET_CTRL);
++ ret = readl_poll_timeout_atomic(master->regs + RESET_CTRL, status,
++ !status, 10, 1000000);
++ if (ret)
++ dev_err(&master->base.dev, "Reset %#x failed: %d\n", status,
++ ret);
++
++ dw_i3c_master_enable(master);
++
++ return ret;
++}
++
++static int dw_i3c_target_pending_read_notify(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ struct dw_i3c_xfer *xfer;
++ struct dw_i3c_cmd *cmd;
++ int ret;
++ u32 reg;
++ u8 mdb;
++
++ if (!pending_read || !ibi_notify)
++ return -EINVAL;
++
++ reg = readl(master->regs + SLV_EVENT_CTRL);
++ if ((reg & SLV_EVENT_CTRL_SIR_EN) == 0)
++ return -EPERM;
++
++ xfer = dw_i3c_master_alloc_xfer(master, 2);
++ if (!xfer)
++ return -ENOMEM;
++
++ mdb = *(u8 *)ibi_notify->data.out;
++ master->platform_ops->set_ibi_mdb(master, mdb);
++
++ /* Put IBI command & data into the command & data queues */
++ cmd = &xfer->cmds[0];
++ cmd->tx_buf = ibi_notify->data.out;
++ cmd->tx_len = ibi_notify->len;
++ cmd->cmd_lo = COMMAND_PORT_TID(TID_TARGET_IBI) | (cmd->tx_len << 16);
++ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
++ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
++
++ /* Put pending-read command & data into the command & data queues */
++ cmd = &xfer->cmds[1];
++ cmd->tx_buf = pending_read->data.out;
++ cmd->tx_len = pending_read->len;
++ cmd->cmd_lo = COMMAND_PORT_TID(TID_TARGET_RD_DATA) |
++ (cmd->tx_len << 16);
++ dw_i3c_master_wr_tx_fifo(master, cmd->tx_buf, cmd->tx_len);
++ writel(cmd->cmd_lo, master->regs + COMMAND_QUEUE_PORT);
++
++ dw_i3c_master_free_xfer(xfer);
++ init_completion(&master->target.rdata_comp);
++
++ ret = dw_i3c_target_generate_ibi(dev, NULL, 0);
++ if (ret) {
++ dev_warn(&master->base.dev, "Timeout waiting for completion: IBI MDB\n");
++ return -EINVAL;
++ }
++
++ if (!wait_for_completion_timeout(&master->target.rdata_comp,
++ XFER_TIMEOUT)) {
++ dev_warn(&master->base.dev, "Timeout waiting for completion: pending read data\n");
++ dw_i3c_target_reset_queue(master);
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static bool dw_i3c_target_is_ibi_enabled(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ u32 reg;
++
++ reg = readl(master->regs + SLV_EVENT_CTRL);
++ return !!(reg & SLV_EVENT_CTRL_SIR_EN);
++}
++
++static u8 dw_i3c_target_get_dyn_addr(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ u32 reg;
++
++ reg = readl(master->regs + DEVICE_ADDR);
++ if (reg & DEV_ADDR_DYNAMIC_ADDR_VALID)
++ return FIELD_GET(DEV_ADDR_DYNAMIC, reg);
++ return 0;
++}
++
++static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
++ u8 old_dyn_addr)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ int pos;
++
++ pos = dw_i3c_master_get_free_pos(master);
++
++ if (data->index > pos && pos > 0) {
++ writel(0,
++ master->regs +
++ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++
++ master->devs[data->index].addr = 0;
++ master->free_pos |= BIT(data->index);
++
++ data->index = pos;
++ master->devs[pos].addr = dev->info.dyn_addr;
++ master->free_pos &= ~BIT(pos);
++ }
++
++ writel(FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, dev->info.dyn_addr),
++ master->regs +
++ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++
++ master->devs[data->index].addr = dev->info.dyn_addr;
++
++ return 0;
++}
++
++static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++ struct dw_i3c_i2c_dev_data *data;
++ int pos;
++
++ pos = dw_i3c_master_get_free_pos(master);
++ if (pos < 0)
++ return pos;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->index = pos;
++ master->devs[pos].addr = dev->info.dyn_addr ? : dev->info.static_addr;
++ master->free_pos &= ~BIT(pos);
++ i3c_dev_set_master_data(dev, data);
++
++ writel(FIELD_PREP(DEV_ADDR_TABLE_DYNAMIC_ADDR, master->devs[pos].addr),
++ master->regs +
++ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++
++ return 0;
++}
++
++static void dw_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ writel(0,
++ master->regs +
++ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
++
++ i3c_dev_set_master_data(dev, NULL);
++ master->devs[data->index].addr = 0;
++ master->free_pos |= BIT(data->index);
+ kfree(data);
+ }
+
++static int dw_i3c_common_reattach_i3c_dev(struct i3c_dev_desc *dev,
++ u8 old_dyn_addr)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ return master->platform_ops->reattach_i3c_dev(dev, old_dyn_addr);
++}
++
++static int dw_i3c_common_attach_i3c_dev(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ return master->platform_ops->attach_i3c_dev(dev);
++}
++
++static void dw_i3c_common_detach_i3c_dev(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ master->platform_ops->detach_i3c_dev(dev);
++}
++
+ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers,
+ int i2c_nxfers)
+@@ -1002,6 +1801,7 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ unsigned int nrxwords = 0, ntxwords = 0;
+ struct dw_i3c_xfer *xfer;
++ u32 sda_lvl_pre, sda_lvl_post;
+ int i, ret = 0;
+
+ if (!i2c_nxfers)
+@@ -1021,10 +1821,18 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ nrxwords > master->caps.datafifodepth)
+ return -ENOTSUPP;
+
++ if (ntxwords == 0 && nrxwords == 0) {
++ dev_warn(&master->base.dev,
++ "Transfers w/o data bytes are not supported");
++ return -ENOTSUPP;
++ }
++
+ xfer = dw_i3c_master_alloc_xfer(master, i2c_nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
++ master->platform_ops->flush_dat(master, dev->addr);
++
+ for (i = 0; i < i2c_nxfers; i++) {
+ struct dw_i3c_cmd *cmd = &xfer->cmds[i];
+
+@@ -1048,9 +1856,21 @@ static int dw_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
+ cmd->cmd_lo |= COMMAND_PORT_TOC;
+ }
+
++ sda_lvl_pre = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
+ dw_i3c_master_enqueue_xfer(master, xfer);
+- if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT))
++ if (!wait_for_completion_timeout(&xfer->comp, XFER_TIMEOUT)) {
++ dw_i3c_master_enter_halt(master, true);
+ dw_i3c_master_dequeue_xfer(master, xfer);
++ sda_lvl_post = FIELD_GET(SDA_LINE_SIGNAL_LEVEL,
++ readl(master->regs + PRESENT_STATE));
++ if (sda_lvl_pre == 0 && sda_lvl_post == 0) {
++ dev_warn(&master->base.dev,
++ "SDA stuck low! Try to recover the bus...\n");
++ master->platform_ops->bus_recovery(master);
++ }
++ dw_i3c_master_exit_halt(master);
++ }
+
+ ret = xfer->ret;
+ dw_i3c_master_free_xfer(xfer);
+@@ -1079,7 +1899,7 @@ static int dw_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+ i2c_dev_set_master_data(dev, data);
+
+ writel(DEV_ADDR_TABLE_LEGACY_I2C_DEV |
+- DEV_ADDR_TABLE_STATIC_ADDR(dev->addr),
++ FIELD_PREP(DEV_ADDR_TABLE_STATIC_ADDR, dev->addr),
+ master->regs +
+ DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
+
+@@ -1102,6 +1922,22 @@ static void dw_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+ kfree(data);
+ }
+
++static int dw_i3c_common_attach_i2c_dev(struct i2c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i2c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ return master->platform_ops->attach_i2c_dev(dev);
++}
++
++static void dw_i3c_common_detach_i2c_dev(struct i2c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i2c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ master->platform_ops->detach_i2c_dev(dev);
++}
++
+ static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+ {
+@@ -1115,7 +1951,7 @@ static int dw_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ return PTR_ERR(data->ibi_pool);
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+- master->devs[data->index].ibi_dev = dev;
++ master->platform_ops->set_ibi_dev(master, dev);
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+
+ return 0;
+@@ -1129,13 +1965,31 @@ static void dw_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+- master->devs[data->index].ibi_dev = NULL;
++ master->platform_ops->unset_ibi_dev(master, dev);
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+
+ i3c_generic_ibi_free_pool(data->ibi_pool);
+ data->ibi_pool = NULL;
+ }
+
++/* Enable/Disable the IBI interrupt signal and status */
++static void dw_i3c_master_set_ibi_signal(struct dw_i3c_master *master, bool enable)
++{
++ u32 reg;
++
++ reg = readl(master->regs + INTR_STATUS_EN);
++ reg &= ~INTR_IBI_THLD_STAT;
++ if (enable)
++ reg |= INTR_IBI_THLD_STAT;
++ writel(reg, master->regs + INTR_STATUS_EN);
++
++ reg = readl(master->regs + INTR_SIGNAL_EN);
++ reg &= ~INTR_IBI_THLD_STAT;
++ if (enable)
++ reg |= INTR_IBI_THLD_STAT;
++ writel(reg, master->regs + INTR_SIGNAL_EN);
++}
++
+ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
+ struct i3c_dev_desc *dev,
+ u8 idx, bool enable)
+@@ -1170,23 +2024,34 @@ static void dw_i3c_master_set_sir_enabled(struct dw_i3c_master *master,
+ }
+ writel(reg, master->regs + IBI_SIR_REQ_REJECT);
+
+- if (global) {
+- reg = readl(master->regs + INTR_STATUS_EN);
+- reg &= ~INTR_IBI_THLD_STAT;
+- if (enable)
+- reg |= INTR_IBI_THLD_STAT;
+- writel(reg, master->regs + INTR_STATUS_EN);
++ if (global)
++ dw_i3c_master_set_ibi_signal(master, enable);
+
+- reg = readl(master->regs + INTR_SIGNAL_EN);
+- reg &= ~INTR_IBI_THLD_STAT;
+- if (enable)
+- reg |= INTR_IBI_THLD_STAT;
+- writel(reg, master->regs + INTR_SIGNAL_EN);
+- }
+
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+ }
+
++static int dw_i3c_master_enable_hotjoin(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ dw_i3c_master_set_ibi_signal(master, true);
++ writel(readl(master->regs + DEVICE_CTRL) & ~DEV_CTRL_HOT_JOIN_NACK,
++ master->regs + DEVICE_CTRL);
++
++ return 0;
++}
++
++static int dw_i3c_master_disable_hotjoin(struct i3c_master_controller *m)
++{
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_HOT_JOIN_NACK,
++ master->regs + DEVICE_CTRL);
++
++ return 0;
++}
++
+ static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+ {
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+@@ -1194,12 +2059,12 @@ static int dw_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+ int rc;
+
+- dw_i3c_master_set_sir_enabled(master, dev, data->index, true);
++ master->platform_ops->set_sir_enabled(master, dev, data->index, true);
+
+ rc = i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+
+ if (rc)
+- dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
++ master->platform_ops->set_sir_enabled(master, dev, data->index, false);
+
+ return rc;
+ }
+@@ -1209,13 +2074,9 @@ static int dw_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct dw_i3c_master *master = to_dw_i3c_master(m);
+- int rc;
+-
+- rc = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+- if (rc)
+- return rc;
+
+- dw_i3c_master_set_sir_enabled(master, dev, data->index, false);
++ i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
++ master->platform_ops->set_sir_enabled(master, dev, data->index, false);
+
+ return 0;
+ }
+@@ -1244,8 +2105,9 @@ static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
+ struct i3c_ibi_slot *slot;
+ struct i3c_dev_desc *dev;
+ unsigned long flags;
++ u32 state;
+ u8 addr, len;
+- int idx;
++ bool terminate_ibi = false;
+
+ addr = IBI_QUEUE_IBI_ADDR(status);
+ len = IBI_QUEUE_STATUS_DATA_LEN(status);
+@@ -1262,17 +2124,12 @@ static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
+ */
+
+ spin_lock_irqsave(&master->devs_lock, flags);
+- idx = dw_i3c_master_get_addr_pos(master, addr);
+- if (idx < 0) {
+- dev_dbg_ratelimited(&master->base.dev,
+- "IBI from unknown addr 0x%x\n", addr);
+- goto err_drain;
+- }
+
+- dev = master->devs[idx].ibi_dev;
++ dev = master->platform_ops->get_ibi_dev(master, addr);
+ if (!dev || !dev->ibi) {
+ dev_dbg_ratelimited(&master->base.dev,
+- "IBI from non-requested dev idx %d\n", idx);
++ "IBI from non-requested dev addr %02x\n",
++ addr);
+ goto err_drain;
+ }
+
+@@ -1288,6 +2145,7 @@ static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
+ dev_dbg_ratelimited(&master->base.dev,
+ "IBI payload len %d greater than max %d\n",
+ len, dev->ibi->max_payload_len);
++ terminate_ibi = true;
+ goto err_drain;
+ }
+
+@@ -1303,6 +2161,9 @@ static void dw_i3c_master_handle_ibi_sir(struct dw_i3c_master *master,
+
+ err_drain:
+ dw_i3c_master_drain_ibi_queue(master, len);
++ state = FIELD_GET(CM_TFR_STS, readl(master->regs + PRESENT_STATE));
++ if (terminate_ibi && state == CM_TFR_STS_MASTER_SERV_IBI)
++ master->platform_ops->gen_tbits_in(master);
+
+ spin_unlock_irqrestore(&master->devs_lock, flags);
+ }
+@@ -1315,17 +2176,34 @@ static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master)
+ {
+ unsigned int i, len, n_ibis;
+ u32 reg;
++ int ret;
+
+ reg = readl(master->regs + QUEUE_STATUS_LEVEL);
+ n_ibis = QUEUE_STATUS_IBI_STATUS_CNT(reg);
+ if (!n_ibis)
+ return;
+
++ if (n_ibis > 16) {
++ dev_err(&master->base.dev,
++ "The n_ibis %d surpasses the tolerance level for the IBI buffer\n",
++ n_ibis);
++ goto ibi_fifo_clear;
++ }
++
+ for (i = 0; i < n_ibis; i++) {
+ reg = readl(master->regs + IBI_QUEUE_STATUS);
+
++ if (reg & IBI_QUEUE_STATUS_RSP_NACK) {
++ dev_dbg_ratelimited(&master->base.dev,
++ "Nacked IBI from non-requested dev addr %02lx\n",
++ IBI_QUEUE_IBI_ADDR(reg));
++ goto ibi_fifo_clear;
++ }
++
+ if (IBI_TYPE_SIRQ(reg)) {
+ dw_i3c_master_handle_ibi_sir(master, reg);
++ } else if (IBI_TYPE_HJ(reg)) {
++ queue_work(master->base.wq, &master->hj_work);
+ } else {
+ len = IBI_QUEUE_STATUS_DATA_LEN(reg);
+ dev_info(&master->base.dev,
+@@ -1334,6 +2212,68 @@ static void dw_i3c_master_irq_handle_ibis(struct dw_i3c_master *master)
+ dw_i3c_master_drain_ibi_queue(master, len);
+ }
+ }
++
++ return;
++
++ibi_fifo_clear:
++ dw_i3c_master_enter_halt(master, true);
++ writel(RESET_CTRL_IBI_QUEUE, master->regs + RESET_CTRL);
++ ret = readl_poll_timeout_atomic(master->regs + RESET_CTRL, reg, !reg,
++ 10, 1000000);
++ if (ret)
++ dev_err(&master->base.dev,
++ "Timeout waiting for IBI FIFO reset\n");
++
++ dw_i3c_master_exit_halt(master);
++}
++
++static void dw_i3c_target_handle_ccc_update(struct dw_i3c_master *master)
++{
++ u32 event = readl(master->regs + SLV_EVENT_CTRL);
++ u32 reg = readl(master->regs + SLV_MAX_LEN);
++ u32 present_state = readl(master->regs + PRESENT_STATE);
++
++ if (event & SLV_EVENT_CTRL_MRL_UPD)
++ master->base.this->info.max_read_len = SLV_MAX_RD_LEN(reg);
++
++ if (event & SLV_EVENT_CTRL_MWL_UPD) {
++ master->base.this->info.max_write_len = SLV_MAX_WR_LEN(reg);
++ master->target.rx.max_len =
++ master->base.this->info.max_write_len;
++ }
++ writel(event, master->regs + SLV_EVENT_CTRL);
++
++ /* The I3C engine would get into halt-state if it receives SETMRL/MWL CCCs */
++ if (FIELD_GET(CM_TFR_STS, present_state) == CM_TFR_STS_SLAVE_HALT)
++ dw_i3c_master_exit_halt(master);
++
++ writel(INTR_CCC_UPDATED_STAT, master->regs + INTR_STATUS);
++}
++
++static void dw_i3c_target_handle_response_ready(struct dw_i3c_master *master)
++{
++ struct i3c_dev_desc *desc = master->base.this;
++ u32 reg = readl(master->regs + QUEUE_STATUS_LEVEL);
++ u32 nresp = QUEUE_STATUS_LEVEL_RESP(reg);
++ int i;
++
++ for (i = 0; i < nresp; i++) {
++ u32 resp = readl(master->regs + RESPONSE_QUEUE_PORT);
++ u32 nbytes = RESPONSE_PORT_DATA_LEN(resp);
++ u8 tid = RESPONSE_PORT_TID(resp);
++
++ if (nbytes > master->target.rx.max_len) {
++ dev_warn(&master->base.dev, "private write data length is larger than max\n");
++ return;
++ }
++
++ dw_i3c_master_read_rx_fifo(master, master->target.rx.buf, nbytes);
++
++ if (tid == TID_TARGET_MASTER_WR_DATA && desc->target_info.read_handler)
++ desc->target_info.read_handler(desc->dev, master->target.rx.buf, nbytes);
++ else if (tid == TID_TARGET_RD_DATA)
++ complete(&master->target.rdata_comp);
++ }
+ }
+
+ static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
+@@ -1348,27 +2288,102 @@ static irqreturn_t dw_i3c_master_irq_handler(int irq, void *dev_id)
+ return IRQ_NONE;
+ }
+
+- spin_lock(&master->xferqueue.lock);
+- dw_i3c_master_end_xfer_locked(master, status);
+- if (status & INTR_TRANSFER_ERR_STAT)
+- writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
+- spin_unlock(&master->xferqueue.lock);
++ if (master->base.target) {
++ if (status & INTR_DYN_ADDR_ASSGN_STAT) {
++ u32 reg = readl(master->regs + DEVICE_ADDR);
+
+- if (status & INTR_IBI_THLD_STAT)
+- dw_i3c_master_irq_handle_ibis(master);
++ master->base.this->info.dyn_addr =
++ FIELD_GET(DEV_ADDR_DYNAMIC, reg);
++ writel(INTR_DYN_ADDR_ASSGN_STAT,
++ master->regs + INTR_STATUS);
++ }
++
++ if (status & INTR_CCC_UPDATED_STAT)
++ dw_i3c_target_handle_ccc_update(master);
++
++ if (status & INTR_IBI_UPDATED_STAT) {
++ writel(INTR_IBI_UPDATED_STAT, master->regs + INTR_STATUS);
++ complete(&master->target.comp);
++ }
++
++ if (status & INTR_READ_REQ_RECV_STAT) {
++ /*
++ * TODO: Pass this information to the driver to take
++ * appropriate action.
++ */
++ dev_dbg(&master->base.dev,
++ "private read received from controller when cmd queue is empty\n");
++ writel(INTR_READ_REQ_RECV_STAT, master->regs + INTR_STATUS);
++ }
++
++ if (status & INTR_RESP_READY_STAT)
++ dw_i3c_target_handle_response_ready(master);
++ } else {
++ if (status & INTR_RESP_READY_STAT ||
++ status & INTR_TRANSFER_ERR_STAT) {
++ spin_lock(&master->xferqueue.lock);
++ dw_i3c_master_end_xfer_locked(master, status);
++ if (status & INTR_TRANSFER_ERR_STAT)
++ writel(INTR_TRANSFER_ERR_STAT, master->regs + INTR_STATUS);
++ spin_unlock(&master->xferqueue.lock);
++ }
++
++ if (status & INTR_IBI_THLD_STAT)
++ dw_i3c_master_irq_handle_ibis(master);
++ }
+
+ return IRQ_HANDLED;
+ }
+
++static int dw_i3c_target_hj_req(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct dw_i3c_master *master = to_dw_i3c_master(m);
++
++ if (!(readl(master->regs + HW_CAPABILITY) & HW_CAP_SLV_HJ)) {
++ dev_err(&master->base.dev, "HJ not supported");
++ return -EOPNOTSUPP;
++ }
++
++ if (readl(master->regs + DEVICE_ADDR) & DEV_ADDR_DYNAMIC_ADDR_VALID) {
++ dev_err(&master->base.dev, "DA already assigned");
++ return -EACCES;
++ }
++
++ writel(SLV_EVENT_CTRL_HJ_REQ, master->regs + SLV_EVENT_CTRL);
++
++ return 0;
++}
++
++static bool dw_i3c_target_is_hj_enabled(struct i3c_dev_desc *dev)
++{
++ /* DW i3c doesn't have the hardware flag to identify the hj enable status */
++ return true;
++}
++
++static const struct i3c_target_ops dw_mipi_i3c_target_ops = {
++ .bus_init = dw_i3c_target_bus_init,
++ .hj_req = dw_i3c_target_hj_req,
++ .bus_cleanup = dw_i3c_target_bus_cleanup,
++ .priv_xfers = dw_i3c_target_priv_xfers,
++ .generate_ibi = dw_i3c_target_generate_ibi,
++ .pending_read_notify = dw_i3c_target_pending_read_notify,
++ .is_hj_enabled = dw_i3c_target_is_hj_enabled,
++ .is_ibi_enabled = dw_i3c_target_is_ibi_enabled,
++ .get_dyn_addr = dw_i3c_target_get_dyn_addr,
++};
++
+ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
+ .bus_init = dw_i3c_master_bus_init,
+ .bus_cleanup = dw_i3c_master_bus_cleanup,
++ .bus_reset = dw_i3c_master_bus_reset,
+ .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
+ .do_daa = dw_i3c_master_daa,
+ .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
++ .send_hdr_cmds = dw_i3c_master_send_hdr_cmds,
+ .priv_xfers = dw_i3c_master_priv_xfers,
+ .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
+@@ -1378,21 +2393,25 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = {
+ static const struct i3c_master_controller_ops dw_mipi_i3c_ibi_ops = {
+ .bus_init = dw_i3c_master_bus_init,
+ .bus_cleanup = dw_i3c_master_bus_cleanup,
+- .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
+- .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
+- .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
++ .bus_reset = dw_i3c_master_bus_reset,
++ .attach_i3c_dev = dw_i3c_common_attach_i3c_dev,
++ .reattach_i3c_dev = dw_i3c_common_reattach_i3c_dev,
++ .detach_i3c_dev = dw_i3c_common_detach_i3c_dev,
+ .do_daa = dw_i3c_master_daa,
+ .supports_ccc_cmd = dw_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = dw_i3c_master_send_ccc_cmd,
++ .send_hdr_cmds = dw_i3c_master_send_hdr_cmds,
+ .priv_xfers = dw_i3c_master_priv_xfers,
+- .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
+- .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
++ .attach_i2c_dev = dw_i3c_common_attach_i2c_dev,
++ .detach_i2c_dev = dw_i3c_common_detach_i2c_dev,
+ .i2c_xfers = dw_i3c_master_i2c_xfers,
+ .request_ibi = dw_i3c_master_request_ibi,
+ .free_ibi = dw_i3c_master_free_ibi,
+ .enable_ibi = dw_i3c_master_enable_ibi,
+ .disable_ibi = dw_i3c_master_disable_ibi,
+ .recycle_ibi_slot = dw_i3c_master_recycle_ibi_slot,
++ .enable_hotjoin = dw_i3c_master_enable_hotjoin,
++ .disable_hotjoin = dw_i3c_master_disable_hotjoin,
+ };
+
+ /* default platform ops implementations */
+@@ -1407,15 +2426,151 @@ static void dw_i3c_platform_set_dat_ibi_nop(struct dw_i3c_master *i3c,
+ {
+ }
+
++static void dw_i3c_platform_enter_sw_mode_nop(struct dw_i3c_master *i3c)
++{
++}
++
++static void dw_i3c_platform_exit_sw_mode_nop(struct dw_i3c_master *i3c)
++{
++}
++
++static void dw_i3c_toggle_scl_in_nop(struct dw_i3c_master *i3c, int count)
++{
++}
++
++static void dw_i3c_gen_internal_stop_nop(struct dw_i3c_master *i3c)
++{
++}
++
++static void dw_i3c_gen_target_reset_pattern_nop(struct dw_i3c_master *i3c)
++{
++}
++
++static void dw_i3c_set_ibi_mdb_nop(struct dw_i3c_master *i3c, u8 mdb)
++{
++}
++
++static int dw_i3c_master_flush_dat_nop(struct dw_i3c_master *i3c, u8 addr)
++{
++ return 0;
++}
++
++static void dw_i3c_master_set_ibi_dev(struct dw_i3c_master *i3c,
++ struct i3c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++
++ i3c->devs[data->index].ibi_dev = dev;
++}
++
++static void dw_i3c_master_unset_ibi_dev(struct dw_i3c_master *i3c,
++ struct i3c_dev_desc *dev)
++{
++ struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
++
++ i3c->devs[data->index].ibi_dev = NULL;
++}
++
++static struct i3c_dev_desc *dw_i3c_master_get_ibi_dev(struct dw_i3c_master *i3c,
++ u8 addr)
++{
++ int idx = i3c->platform_ops->get_addr_pos(i3c, addr);
++
++ if (idx < 0) {
++ dev_dbg_ratelimited(&i3c->base.dev,
++ "IBI from unknown addr 0x%x\n", addr);
++ return NULL;
++ }
++
++ return i3c->devs[idx].ibi_dev;
++}
++
+ static const struct dw_i3c_platform_ops dw_i3c_platform_ops_default = {
+ .init = dw_i3c_platform_init_nop,
+ .set_dat_ibi = dw_i3c_platform_set_dat_ibi_nop,
++ .enter_sw_mode = dw_i3c_platform_enter_sw_mode_nop,
++ .exit_sw_mode = dw_i3c_platform_exit_sw_mode_nop,
++ .toggle_scl_in = dw_i3c_toggle_scl_in_nop,
++ .gen_internal_stop = dw_i3c_gen_internal_stop_nop,
++ .gen_target_reset_pattern = dw_i3c_gen_target_reset_pattern_nop,
++ .set_ibi_mdb = dw_i3c_set_ibi_mdb_nop,
++ .reattach_i3c_dev = dw_i3c_master_reattach_i3c_dev,
++ .attach_i3c_dev = dw_i3c_master_attach_i3c_dev,
++ .detach_i3c_dev = dw_i3c_master_detach_i3c_dev,
++ .attach_i2c_dev = dw_i3c_master_attach_i2c_dev,
++ .detach_i2c_dev = dw_i3c_master_detach_i2c_dev,
++ .get_addr_pos = dw_i3c_master_get_addr_pos,
++ .flush_dat = dw_i3c_master_flush_dat_nop,
++ .set_sir_enabled = dw_i3c_master_set_sir_enabled,
++ .set_ibi_dev = dw_i3c_master_set_ibi_dev,
++ .unset_ibi_dev = dw_i3c_master_unset_ibi_dev,
++ .get_ibi_dev = dw_i3c_master_get_ibi_dev,
+ };
+
++static int dw_i3c_of_populate_bus_timing(struct dw_i3c_master *master,
++ struct device_node *np)
++{
++ u32 val, reg, sda_tx_hold_ns;
++
++ master->timing.core_rate = clk_get_rate(master->core_clk);
++ if (!master->timing.core_rate) {
++ dev_err(&master->base.dev, "core clock rate not found\n");
++ return -EINVAL;
++ }
++
++ /* core_period is in nanosecond */
++ master->timing.core_period =
++ DIV_ROUND_UP(1000000000, master->timing.core_rate);
++
++ /* Parse configurations from the device tree */
++ if (!of_property_read_u32(np, "i3c-pp-scl-hi-period-ns", &val))
++ master->timing.i3c_pp_scl_high = val;
++
++ if (!of_property_read_u32(np, "i3c-pp-scl-lo-period-ns", &val))
++ master->timing.i3c_pp_scl_low = val;
++
++ if (!of_property_read_u32(np, "i3c-od-scl-hi-period-ns", &val))
++ master->timing.i3c_od_scl_high = val;
++
++ if (!of_property_read_u32(np, "i3c-od-scl-lo-period-ns", &val))
++ master->timing.i3c_od_scl_low = val;
++
++ sda_tx_hold_ns = SDA_TX_HOLD_MIN * master->timing.core_period;
++ if (!of_property_read_u32(np, "sda-tx-hold-ns", &val))
++ sda_tx_hold_ns = val;
++
++ master->timing.timed_reset_scl_low_ns = JESD403_TIMED_RESET_NS_DEF;
++ if (!of_property_read_u32(np, "timed-reset-scl-low-ns", &val))
++ master->timing.timed_reset_scl_low_ns = val;
++
++ val = clamp((u32)DIV_ROUND_CLOSEST(sda_tx_hold_ns,
++ master->timing.core_period),
++ (u32)SDA_TX_HOLD_MIN, (u32)SDA_TX_HOLD_MAX);
++ reg = readl(master->regs + SDA_HOLD_SWITCH_DLY_TIMING);
++ reg &= ~SDA_TX_HOLD;
++ reg |= FIELD_PREP(SDA_TX_HOLD, val);
++ writel(reg, master->regs + SDA_HOLD_SWITCH_DLY_TIMING);
++
++ val = DIV_ROUND_CLOSEST(master->timing.timed_reset_scl_low_ns,
++ master->timing.core_period);
++ writel(val, master->regs + SCL_LOW_MST_EXT_TIMEOUT);
++
++ return 0;
++}
++
++static void dw_i3c_hj_work(struct work_struct *work)
++{
++ struct dw_i3c_master *master =
++ container_of(work, typeof(*master), hj_work);
++
++ i3c_master_do_daa(&master->base);
++}
++
+ int dw_i3c_common_probe(struct dw_i3c_master *master,
+ struct platform_device *pdev)
+ {
+ const struct i3c_master_controller_ops *ops;
++ struct device_node *np;
+ int ret, irq;
+
+ if (!master->platform_ops)
+@@ -1430,7 +2585,7 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
+ return PTR_ERR(master->core_clk);
+
+ master->core_rst = devm_reset_control_get_optional_exclusive(&pdev->dev,
+- "core_rst");
++ NULL);
+ if (IS_ERR(master->core_rst))
+ return PTR_ERR(master->core_rst);
+
+@@ -1453,6 +2608,11 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
+
+ platform_set_drvdata(pdev, master);
+
++ np = pdev->dev.of_node;
++ ret = dw_i3c_of_populate_bus_timing(master, np);
++ if (ret)
++ goto err_assert_rst;
++
+ /* Information regarding the FIFOs/QUEUEs depth */
+ ret = readl(master->regs + QUEUE_STATUS_LEVEL);
+ master->caps.cmdfifodepth = QUEUE_STATUS_LEVEL_CMD(ret);
+@@ -1469,10 +2629,17 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
+ if (master->ibi_capable)
+ ops = &dw_mipi_i3c_ibi_ops;
+
+- ret = i3c_master_register(&master->base, &pdev->dev, ops, false);
++ INIT_WORK(&master->hj_work, dw_i3c_hj_work);
++ ret = i3c_register(&master->base, &pdev->dev, ops,
++ &dw_mipi_i3c_target_ops, false);
+ if (ret)
+ goto err_assert_rst;
+
++ if (!master->base.target && master->base.bus.context != I3C_BUS_CONTEXT_JESD403) {
++ dw_i3c_master_set_iba(master, true);
++ dw_i3c_master_enable_hotjoin(&master->base);
++ }
++
+ return 0;
+
+ err_assert_rst:
+@@ -1487,7 +2654,7 @@ EXPORT_SYMBOL_GPL(dw_i3c_common_probe);
+
+ void dw_i3c_common_remove(struct dw_i3c_master *master)
+ {
+- i3c_master_unregister(&master->base);
++ i3c_unregister(&master->base);
+
+ reset_control_assert(master->core_rst);
+
+diff --git a/drivers/i3c/master/dw-i3c-master.h b/drivers/i3c/master/dw-i3c-master.h
+index ab862c5d1..739cd8115 100644
+--- a/drivers/i3c/master/dw-i3c-master.h
++++ b/drivers/i3c/master/dw-i3c-master.h
+@@ -10,7 +10,7 @@
+ #include <linux/reset.h>
+ #include <linux/types.h>
+
+-#define DW_I3C_MAX_DEVS 32
++#define DW_I3C_MAX_DEVS 128
+
+ struct dw_i3c_master_caps {
+ u8 cmdfifodepth;
+@@ -22,6 +22,11 @@ struct dw_i3c_dat_entry {
+ struct i3c_dev_desc *ibi_dev;
+ };
+
++struct dw_i3c_i2c_dev_data {
++ u8 index;
++ struct i3c_generic_ibi_pool *ibi_pool;
++};
++
+ struct dw_i3c_master {
+ struct i3c_master_controller base;
+ u16 maxdevs;
+@@ -57,6 +62,29 @@ struct dw_i3c_master {
+
+ /* platform-specific data */
+ const struct dw_i3c_platform_ops *platform_ops;
++
++ /* target mode data */
++ struct {
++ struct completion comp;
++ struct completion rdata_comp;
++
++ /* Used for handling private write */
++ struct {
++ void *buf;
++ u16 max_len;
++ } rx;
++ } target;
++
++ struct {
++ unsigned long core_rate;
++ unsigned long core_period;
++ u32 i3c_od_scl_low;
++ u32 i3c_od_scl_high;
++ u32 i3c_pp_scl_low;
++ u32 i3c_pp_scl_high;
++ u32 timed_reset_scl_low_ns;
++ } timing;
++ struct work_struct hj_work;
+ };
+
+ struct dw_i3c_platform_ops {
+@@ -76,6 +104,36 @@ struct dw_i3c_platform_ops {
+ */
+ void (*set_dat_ibi)(struct dw_i3c_master *i3c,
+ struct i3c_dev_desc *dev, bool enable, u32 *reg);
++ void (*set_sir_enabled)(struct dw_i3c_master *i3c,
++ struct i3c_dev_desc *dev, u8 idx, bool enable);
++
++ /* Enter the software force mode by isolating the SCL and SDA pins */
++ void (*enter_sw_mode)(struct dw_i3c_master *i3c);
++
++ /* Exit the software force mode */
++ void (*exit_sw_mode)(struct dw_i3c_master *i3c);
++ void (*toggle_scl_in)(struct dw_i3c_master *i3c, int count);
++ void (*gen_internal_stop)(struct dw_i3c_master *i3c);
++ void (*gen_target_reset_pattern)(struct dw_i3c_master *i3c);
++ void (*gen_tbits_in)(struct dw_i3c_master *i3c);
++ int (*bus_recovery)(struct dw_i3c_master *i3c);
++
++ /* For target mode, pending read notification */
++ void (*set_ibi_mdb)(struct dw_i3c_master *i3c, u8 mdb);
++
++ /* DAT handling */
++ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
++ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
++ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
++ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
++ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
++ int (*get_addr_pos)(struct dw_i3c_master *i3c, u8 addr);
++ int (*flush_dat)(struct dw_i3c_master *i3c, u8 addr);
++ void (*set_ibi_dev)(struct dw_i3c_master *i3c,
++ struct i3c_dev_desc *dev);
++ void (*unset_ibi_dev)(struct dw_i3c_master *i3c,
++ struct i3c_dev_desc *dev);
++ struct i3c_dev_desc *(*get_ibi_dev)(struct dw_i3c_master *i3c, u8 addr);
+ };
+
+ extern int dw_i3c_common_probe(struct dw_i3c_master *master,
+diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd.h b/drivers/i3c/master/mipi-i3c-hci/cmd.h
+index 1d6dd2c5d..e3f207e32 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/cmd.h
++++ b/drivers/i3c/master/mipi-i3c-hci/cmd.h
+@@ -17,6 +17,14 @@
+ #define CMD_0_TOC W0_BIT_(31)
+ #define CMD_0_ROC W0_BIT_(30)
+ #define CMD_0_ATTR W0_MASK(2, 0)
++enum hci_cmd_attr {
++ CMD_0_ATTR_A = 0x2,
++ CMD_0_ATTR_I = 0x1,
++ CMD_0_ATTR_R = 0x0,
++ CMD_0_ATTR_C = 0x3,
++ CMD_0_ATTR_M = 0x7,
++ CMD_0_ATTR_T = 0x0,
++};
+
+ /*
+ * Response Descriptor Structure
+@@ -27,6 +35,24 @@
+
+ #define RESP_ERR_FIELD GENMASK(31, 28)
+
++#define a0_debug_s \
++ "resp status:%lx, xfer type:%lx, tid:%lx, CCC_HDR: %lx, data len: %lx"
++#define a1_debug_s \
++ "resp status:%lx, xfer type:%lx, CCC_INDI:%lx tid:%lx, CCC_HDR: %lx, data legth: %lx"
++/*
++ * Target mode Response Descriptor Structure
++ */
++#define TARGET_RESP_STATUS(resp) FIELD_GET(GENMASK(31, 28), resp)
++#define TARGET_RESP_XFER_TYPE(resp) FIELD_GET(BIT(27), resp)
++#define TARGET_RESP_XFER_TYPE_W 0
++#define TARGET_RESP_XFER_TYPE_R 1
++#define TARGET_RESP_CCC_INDICATE(resp) FIELD_GET(BIT(26), resp)
++#define TARGET_RESP_TID(resp) FIELD_GET(GENMASK(25, 24), resp)
++#define TARGET_RESP_TID_A0(resp) FIELD_GET(GENMASK(26, 24), resp)
++#define TARGET_RESP_CCC_HDR(resp) FIELD_GET(GENMASK(23, 16), resp)
++#define TARGET_RESP_SDR_PRIV_XFER 0
++#define TARGET_RESP_DATA_LENGTH(resp) FIELD_GET(GENMASK(15, 0), resp)
++
+ enum hci_resp_err {
+ RESP_SUCCESS = 0x0,
+ RESP_ERR_CRC = 0x1,
+@@ -45,18 +71,73 @@ enum hci_resp_err {
+ /* 0xc to 0xf are reserved for transfer specific errors */
+ };
+
++enum hci_target_resp_err {
++ TARGET_RESP_SUCCESS = 0x0,
++ TARGET_RESP_ERR_CRC = 0x1,
++ TARGET_RESP_ERR_PARITY = 0x2,
++ TARGET_RESP_ERR_FRAME = 0x3,
++ TARGET_RESP_ERR_R_NO_CMD_DESC = 0x5,
++ TARGET_RESP_ERR_OVERFLOW = 0x6,
++ TARGET_RESP_ERR_W_RX_QUEUE_FULL = 0x7,
++ TARGET_RESP_ERR_EARLY_TERMINATED = 0xa,
++ TARGET_RESP_ERR_I2C_READ_TOO_MUCH = 0xb,
++ TARGET_RESP_ERR_IBI_NACK = 0xc,
++ TARGET_RESP_ERR_IBI_LOST_ARBITRATION = 0xd,
++};
++
++/* Sub command in the internal control command */
++enum hci_m_sub_cmd {
++ M_SUB_CMD_RING_LOCK = 0x1,
++ M_SUB_CMD_BROCAST_ADDR_EN = 0x2,
++ M_SUB_CMD_DAT_CONT_UPDATE = 0x3,
++ M_SUB_CMD_TARGET_RST_PATTERN = 0x4,
++ M_SUB_CMD_REC_RST_PROC = 0x5,
++ M_SUB_CMD_END_XFER = 0x6,
++ M_SUB_CMD_CR_W_GETACCCR = 0x7,
++};
++
++/* Parameter for Sub command: RING_LOCK */
++#define CMD_M0_RING_LOCK_ON 0x1
++
++/* Parameter for Sub command: BROCAST_ADDR_EN */
++#define CMD_M0_BROCAST_ADDR_ON 0x1
++
++/* Parameter for Sub command: TARGET_RST_PATTERN */
++enum hci_rst_op_type {
++ RST_OP_TARGET_RST = 0x0,
++ RST_OP_ENTER_CRITICAL_SEC = 0x2,
++ RST_OP_EXIT_CRITICAL_SEC = 0x3,
++};
++
++/* Parameter for Sub command: REC_RST_PROC */
++enum hci_rec_rst_proc {
++ REC_PROC_I2C_SDA_STUCK = 0x0,
++ REC_PROC_I3C_SDR_SDA_STUCK = 0x1,
++ REC_PROC_I3C_HDR_SDA_STUCK = 0x2,
++ REC_PROC_FORCE_STOP = 0x4,
++ REC_PROC_CE2_ERR = 0x5,
++ REC_PROC_TIMED_RST = 0x6,
++};
++
+ /* TID generation (4 bits wide in all cases) */
+ #define hci_get_tid(bits) \
+ (atomic_inc_return_relaxed(&hci->next_cmd_tid) % (1U << 4))
++/* Specific tid to identify the response for IBI or master read */
++#define TID_TARGET_IBI 0b0001
++#define TID_TARGET_RD_DATA 0b0010
+
+ /* This abstracts operations with our command descriptor formats */
+ struct hci_cmd_ops {
+- int (*prep_ccc)(struct i3c_hci *hci, struct hci_xfer *xfer,
+- u8 ccc_addr, u8 ccc_cmd, bool raw);
++ int (*prep_ccc)(struct i3c_hci *hci, struct hci_xfer *xfer, u8 ccc_addr,
++ u8 ccc_cmd, bool ccc_dbp, u8 ccc_db, bool raw);
++ int (*prep_hdr)(struct i3c_hci *hci, struct hci_xfer *xfer, u8 addr,
++ u8 code, enum i3c_hdr_mode mode);
+ void (*prep_i3c_xfer)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ void (*prep_i2c_xfer)(struct i3c_hci *hci, struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer);
++ void (*prep_internal)(struct i3c_hci *hci, struct hci_xfer *xfer,
++ u8 sub_cmd, u32 param);
+ int (*perform_daa)(struct i3c_hci *hci);
+ };
+
+diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+index 6a781f89b..18f90cdb9 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/bitfield.h>
+ #include <linux/i3c/master.h>
++#include <linux/i3c/device.h>
+
+ #include "hci.h"
+ #include "cmd.h"
+@@ -25,7 +26,11 @@
+ #define CMD_A0_TOC W0_BIT_(31)
+ #define CMD_A0_ROC W0_BIT_(30)
+ #define CMD_A0_DEV_COUNT(v) FIELD_PREP(W0_MASK(29, 26), v)
++#ifdef CONFIG_ARCH_ASPEED
++#define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(22, 16), v)
++#else
+ #define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
++#endif
+ #define CMD_A0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+ #define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+@@ -45,7 +50,11 @@
+ #define CMD_I0_RNW W0_BIT_(29)
+ #define CMD_I0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+ #define CMD_I0_DTT(v) FIELD_PREP(W0_MASK(25, 23), v)
++#ifdef CONFIG_ARCH_ASPEED
++#define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(22, 16), v)
++#else
+ #define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
++#endif
+ #define CMD_I0_CP W0_BIT_(15)
+ #define CMD_I0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+ #define CMD_I0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+@@ -63,11 +72,16 @@
+ #define CMD_R0_RNW W0_BIT_(29)
+ #define CMD_R0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+ #define CMD_R0_DBP W0_BIT_(25)
++#ifdef CONFIG_ARCH_ASPEED
++#define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(22, 16), v)
++#else
+ #define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
++#endif
+ #define CMD_R0_CP W0_BIT_(15)
+ #define CMD_R0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+ #define CMD_R0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
++#ifndef CONFIG_ARCH_ASPEED
+ /*
+ * Combo Transfer (Write + Write/Read) Command
+ */
+@@ -87,7 +101,7 @@
+ #define CMD_C0_CP W0_BIT_(15)
+ #define CMD_C0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+ #define CMD_C0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+-
++#endif
+ /*
+ * Internal Control Command
+ */
+@@ -95,11 +109,25 @@
+ #define CMD_0_ATTR_M FIELD_PREP(CMD_0_ATTR, 0x7)
+
+ #define CMD_M1_VENDOR_SPECIFIC W1_MASK(63, 32)
+-#define CMD_M0_MIPI_RESERVED W0_MASK(31, 12)
+-#define CMD_M0_MIPI_CMD W0_MASK(11, 8)
++#define CMD_M0_MIPI_RESERVED(v) FIELD_PREP(W0_MASK(31, 12), v)
++#define CMD_M0_MIPI_CMD(v) FIELD_PREP(W0_MASK(11, 8), v)
+ #define CMD_M0_VENDOR_INFO_PRESENT W0_BIT_( 7)
+ #define CMD_M0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
++/*
++ * Target Transfer Command
++ */
++
++#define CMD_0_ATTR_T FIELD_PREP(CMD_0_ATTR, 0x0)
++
++#define CMD_T0_DATA_LENGTH(v) FIELD_PREP(W0_MASK(31, 16), v)
++#define CMD_T0_MDB(v) FIELD_PREP(W0_MASK(15, 8), v)
++#define CMD_T0_MDB_EN W0_BIT_(6)
++#define CMD_T0_TID(v) FIELD_PREP(W0_MASK(4, 3), v)
++#define CMD_T0_TID_A0(v) FIELD_PREP(W0_MASK(5, 3), v)
++
++#include "vendor_aspeed.h"
++
+
+ /* Data Transfer Speed and Mode */
+ enum hci_cmd_mode {
+@@ -123,17 +151,15 @@ static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
+ {
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+- if (bus->scl_rate.i3c >= 12500000)
+- return MODE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+- return MODE_I3C_SDR1;
++ return MODE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 6000000)
+- return MODE_I3C_SDR2;
++ return MODE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 4000000)
+- return MODE_I3C_SDR3;
++ return MODE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 2000000)
+- return MODE_I3C_SDR4;
+- return MODE_I3C_Fm_FmP;
++ return MODE_I3C_SDR3;
++ return MODE_I3C_SDR4;
+ }
+
+ static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
+@@ -169,9 +195,9 @@ static void fill_data_bytes(struct hci_xfer *xfer, u8 *data,
+ xfer->data = NULL;
+ }
+
+-static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
+- struct hci_xfer *xfer,
+- u8 ccc_addr, u8 ccc_cmd, bool raw)
++static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
++ u8 ccc_addr, u8 ccc_cmd, bool ccc_dbp, u8 ccc_db,
++ bool raw)
+ {
+ unsigned int dat_idx = 0;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+@@ -210,47 +236,99 @@ static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_CMD(ccc_cmd) | CMD_R0_CP |
+ CMD_R0_DEV_INDEX(dat_idx) |
++ (ccc_dbp ? CMD_R0_DBP : 0) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
++ if (ccc_dbp)
++ xfer->cmd_desc[1] |= CMD_R1_DEF_BYTE(ccc_db);
+ }
+
+ return 0;
+ }
+
+-static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
+- struct i3c_dev_desc *dev,
+- struct hci_xfer *xfer)
++static int hci_cmd_v1_prep_hdr(struct i3c_hci *hci, struct hci_xfer *xfer,
++ u8 addr, u8 code, enum i3c_hdr_mode hdr_mode)
+ {
+- struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+- unsigned int dat_idx = dev_data->dat_idx;
+- enum hci_cmd_mode mode = get_i3c_mode(hci);
++ unsigned int dat_idx = 0;
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
++ int ret;
++ enum hci_cmd_mode hdr_to_cmd[] = { MODE_I3C_HDR_DDR, MODE_I3C_HDR_TSx,
++ MODE_I3C_HDR_TSx, MODE_I3C_HDR_BT };
++
++ if (addr != I3C_BROADCAST_ADDR) {
++ ret = mipi_i3c_hci_dat_v1.get_index(hci, addr);
++ if (ret < 0)
++ return ret;
++ dat_idx = ret;
++ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+- xfer->cmd_desc[0] =
+- CMD_0_ATTR_I |
+- CMD_I0_TID(xfer->cmd_tid) |
+- CMD_I0_DEV_INDEX(dat_idx) |
+- CMD_I0_DTT(data_len) |
+- CMD_I0_MODE(mode);
++ xfer->cmd_desc[0] = CMD_0_ATTR_I | CMD_I0_TID(xfer->cmd_tid) |
++ CMD_I0_CMD(code & 0x7f) | CMD_I0_CP |
++ CMD_I0_DEV_INDEX(dat_idx) |
++ CMD_I0_DTT(data_len) |
++ CMD_I0_MODE(hdr_to_cmd[hdr_mode]);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+- xfer->cmd_desc[0] =
+- CMD_0_ATTR_R |
+- CMD_R0_TID(xfer->cmd_tid) |
+- CMD_R0_DEV_INDEX(dat_idx) |
+- CMD_R0_MODE(mode) |
+- (rnw ? CMD_R0_RNW : 0);
+- xfer->cmd_desc[1] =
+- CMD_R1_DATA_LENGTH(data_len);
++ xfer->cmd_desc[0] = CMD_0_ATTR_R | CMD_R0_TID(xfer->cmd_tid) |
++ CMD_R0_CMD(code & 0x7f) | CMD_R0_CP |
++ CMD_R0_DEV_INDEX(dat_idx) |
++ CMD_R0_MODE(hdr_to_cmd[hdr_mode]) |
++ (rnw ? CMD_R0_RNW : 0);
++ xfer->cmd_desc[1] = CMD_R1_DATA_LENGTH(data_len);
++ }
++ return 0;
++}
++
++static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
++ struct i3c_dev_desc *dev,
++ struct hci_xfer *xfer)
++{
++ u8 *data = xfer->data;
++ unsigned int data_len = xfer->data_len;
++
++ if (hci->master.target) {
++ if (!aspeed_get_i3c_revision_id(hci))
++ xfer->cmd_desc[0] = CMD_0_ATTR_T | CMD_T0_TID_A0(xfer->cmd_tid) |
++ CMD_T0_DATA_LENGTH(data_len);
++ else
++ xfer->cmd_desc[0] = CMD_0_ATTR_T | CMD_T0_TID(xfer->cmd_tid) |
++ CMD_T0_DATA_LENGTH(data_len);
++ } else {
++ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
++ unsigned int dat_idx = dev_data->dat_idx;
++ enum hci_cmd_mode mode = get_i3c_mode(hci);
++ bool rnw = xfer->rnw;
++
++ xfer->cmd_tid = hci_get_tid();
++ if (!rnw && data_len <= 4) {
++ /* we use an Immediate Data Transfer Command */
++ xfer->cmd_desc[0] =
++ CMD_0_ATTR_I |
++ CMD_I0_TID(xfer->cmd_tid) |
++ CMD_I0_DEV_INDEX(dat_idx) |
++ CMD_I0_DTT(data_len) |
++ CMD_I0_MODE(mode);
++ fill_data_bytes(xfer, data, data_len);
++ } else {
++ /* we use a Regular Data Transfer Command */
++ xfer->cmd_desc[0] =
++ CMD_0_ATTR_R |
++ CMD_R0_TID(xfer->cmd_tid) |
++ CMD_R0_DEV_INDEX(dat_idx) |
++ CMD_R0_MODE(mode) |
++ (rnw ? CMD_R0_RNW : 0);
++ xfer->cmd_desc[1] =
++ CMD_R1_DATA_LENGTH(data_len);
++ }
+ }
+ }
+
+@@ -289,11 +367,33 @@ static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci *hci,
+ }
+ }
+
++static void hci_cmd_v1_prep_internal(struct i3c_hci *hci, struct hci_xfer *xfer,
++ u8 sub_cmd, u32 param)
++{
++ xfer->cmd_tid = hci_get_tid(hci);
++ xfer->cmd_desc[0] = CMD_0_ATTR_M | CMD_M0_TID(xfer->cmd_tid) |
++ CMD_M0_MIPI_CMD(sub_cmd) |
++ CMD_M0_MIPI_RESERVED(param);
++ xfer->cmd_desc[1] = 0;
++}
++
++static void i3c_aspeed_set_daa_index(struct i3c_hci *hci, u8 addr)
++{
++ if (addr < 32)
++ ast_inhouse_write(ASPEED_I3C_DAA_INDEX0, BIT(addr));
++ else if ((addr >= 32) && (addr < 64))
++ ast_inhouse_write(ASPEED_I3C_DAA_INDEX1, BIT(addr - 32));
++ else if ((addr >= 64) && (addr < 96))
++ ast_inhouse_write(ASPEED_I3C_DAA_INDEX2, BIT(addr - 64));
++ else
++ ast_inhouse_write(ASPEED_I3C_DAA_INDEX3, BIT(addr - 96));
++}
++
+ static int hci_cmd_v1_daa(struct i3c_hci *hci)
+ {
+ struct hci_xfer *xfer;
+ int ret, dat_idx = -1;
+- u8 next_addr = 0;
++ u8 next_addr = 0x9;
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+@@ -310,14 +410,28 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
+ * Yes, there is room for improvements.
+ */
+ for (;;) {
++#ifndef CONFIG_ARCH_ASPEED
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0)
+ break;
+ dat_idx = ret;
++#endif
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
++#ifdef CONFIG_ARCH_ASPEED
++ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci, next_addr);
++ if (ret < 0)
++ break;
++ dat_idx = ret;
++ i3c_aspeed_set_daa_index(hci, dat_idx);
++ DBG("Dat index = %x %x %x %x\n",
++ ast_inhouse_read(ASPEED_I3C_DAA_INDEX0),
++ ast_inhouse_read(ASPEED_I3C_DAA_INDEX1),
++ ast_inhouse_read(ASPEED_I3C_DAA_INDEX2),
++ ast_inhouse_read(ASPEED_I3C_DAA_INDEX3));
++#endif
+
+ DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
+@@ -332,6 +446,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
+ CMD_A0_DEV_COUNT(1) |
+ CMD_A0_ROC | CMD_A0_TOC;
+ xfer->cmd_desc[1] = 0;
++ xfer->completion = &done;
+ hci->io->queue_xfer(hci, xfer, 1);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 1)) {
+@@ -344,7 +459,11 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+- ret = -EIO;
++ if (RESP_STATUS(xfer[0].response) ==
++ RESP_ERR_ADDR_HEADER)
++ ret = I3C_ERROR_M2;
++ else
++ ret = -EIO;
+ break;
+ }
+
+@@ -372,7 +491,9 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
+
+ const struct hci_cmd_ops mipi_i3c_hci_cmd_v1 = {
+ .prep_ccc = hci_cmd_v1_prep_ccc,
++ .prep_hdr = hci_cmd_v1_prep_hdr,
+ .prep_i3c_xfer = hci_cmd_v1_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v1_prep_i2c_xfer,
++ .prep_internal = hci_cmd_v1_prep_internal,
+ .perform_daa = hci_cmd_v1_daa,
+ };
+diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+index 4493b2b06..5f33c11c2 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
++++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+@@ -149,7 +149,8 @@ static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
+ }
+
+ static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
+- u8 ccc_addr, u8 ccc_cmd, bool raw)
++ u8 ccc_addr, u8 ccc_cmd, bool dbp, u8 db,
++ bool raw)
+ {
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
+index 837af83c8..66ce496db 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/core.c
++++ b/drivers/i3c/master/mipi-i3c-hci/core.c
+@@ -10,17 +10,23 @@
+ #include <linux/bitfield.h>
+ #include <linux/device.h>
+ #include <linux/errno.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
+ #include <linux/i3c/master.h>
++#include <linux/i3c/target.h>
++#include <linux/i3c/device.h>
+ #include <linux/interrupt.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
+ #include <linux/module.h>
+ #include <linux/platform_device.h>
++#include <dt-bindings/i3c/i3c.h>
+
+ #include "hci.h"
+ #include "ext_caps.h"
+ #include "cmd.h"
+ #include "dat.h"
++#include "vendor_aspeed.h"
+
+
+ /*
+@@ -117,6 +123,168 @@
+ #define DEV_CTX_BASE_LO 0x60
+ #define DEV_CTX_BASE_HI 0x64
+
++#ifdef CONFIG_ARCH_ASPEED
++
++static u32 aspeed_i3c_get_sdr_phy_reg(struct i3c_hci *hci)
++{
++ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
++
++ if (bus->scl_rate.i3c > 8000000)
++ return PHY_I3C_SDR0_CTRL0;
++ if (bus->scl_rate.i3c > 6000000)
++ return PHY_I3C_SDR1_CTRL0;
++ if (bus->scl_rate.i3c > 4000000)
++ return PHY_I3C_SDR2_CTRL0;
++ if (bus->scl_rate.i3c > 2000000)
++ return PHY_I3C_SDR3_CTRL0;
++ return PHY_I3C_SDR4_CTRL0;
++}
++
++static void aspeed_i3c_phy_init(struct i3c_hci *hci)
++{
++ u16 hcnt, lcnt;
++ unsigned long core_rate, core_period;
++
++ core_rate = clk_get_rate(hci->clk);
++ /* core_period is in nanosecond */
++ core_period = DIV_ROUND_UP(1000000000, core_rate);
++
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FM_DEFAULT_CAS_NS, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FM_DEFAULT_SU_STO_NS, core_period) - 1;
++ ast_phy_write(PHY_I2C_FM_CTRL0, FIELD_PREP(PHY_I2C_FM_CTRL0_CAS, hcnt) |
++ FIELD_PREP(PHY_I2C_FM_CTRL0_SU_STO, lcnt));
++
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FM_DEFAULT_SCL_H_NS, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FM_DEFAULT_SCL_L_NS, core_period) - 1;
++ ast_phy_write(PHY_I2C_FM_CTRL1, FIELD_PREP(PHY_I2C_FM_CTRL1_SCL_H, hcnt) |
++ FIELD_PREP(PHY_I2C_FM_CTRL1_SCL_L, lcnt));
++ ast_phy_write(PHY_I2C_FM_CTRL2, FIELD_PREP(PHY_I2C_FM_CTRL2_ACK_H, hcnt) |
++ FIELD_PREP(PHY_I2C_FM_CTRL2_ACK_L, hcnt));
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FM_DEFAULT_HD_DAT, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FM_DEFAULT_AHD_DAT, core_period) - 1;
++ ast_phy_write(PHY_I2C_FM_CTRL3, FIELD_PREP(PHY_I2C_FM_CTRL3_HD_DAT, hcnt) |
++ FIELD_PREP(PHY_I2C_FM_CTRL3_AHD_DAT, lcnt));
++
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_CAS_NS, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_SU_STO_NS, core_period) - 1;
++ ast_phy_write(PHY_I2C_FMP_CTRL0, FIELD_PREP(PHY_I2C_FMP_CTRL0_CAS, hcnt) |
++ FIELD_PREP(PHY_I2C_FMP_CTRL0_SU_STO, lcnt));
++
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_SCL_H_NS, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_SCL_L_NS, core_period) - 1;
++ ast_phy_write(PHY_I2C_FMP_CTRL1, FIELD_PREP(PHY_I2C_FMP_CTRL1_SCL_H, hcnt) |
++ FIELD_PREP(PHY_I2C_FMP_CTRL1_SCL_L, lcnt));
++ ast_phy_write(PHY_I2C_FMP_CTRL2, FIELD_PREP(PHY_I2C_FMP_CTRL2_ACK_H, hcnt) |
++ FIELD_PREP(PHY_I2C_FMP_CTRL2_ACK_L, hcnt));
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_HD_DAT, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_AHD_DAT, core_period) - 1;
++ ast_phy_write(PHY_I2C_FMP_CTRL3, FIELD_PREP(PHY_I2C_FMP_CTRL3_HD_DAT, hcnt) |
++ FIELD_PREP(PHY_I2C_FMP_CTRL3_AHD_DAT, lcnt));
++
++ ast_phy_write(PHY_PULLUP_EN, 0x0);
++}
++
++static void aspeed_i3c_of_populate_bus_timing(struct i3c_hci *hci, struct device_node *np)
++{
++ u16 hcnt, lcnt, total_cnt, min_tbit_cnt;
++ unsigned long core_rate, core_period;
++ u32 val, pp_high = 0, pp_low = 0, od_high = 0, od_low = 0, thd_dat = 0, internal_pu = 0;
++ u32 ctrl0, ctrl1, ctrl2;
++ u32 sdr_ctrl0_reg = aspeed_i3c_get_sdr_phy_reg(hci);
++
++ core_rate = clk_get_rate(hci->clk);
++ /* core_period is in nanosecond */
++ core_period = DIV_ROUND_UP(1000000000, core_rate);
++ /*
++ * The T-bits margin in our I3C controller is too tight to be set at 12.5MHz.
++ * Set it to a minimum of 60ns to ensure proper functionality.
++ */
++ min_tbit_cnt = DIV_ROUND_UP(60, core_period) - 1;
++
++ dev_info(&hci->master.dev, "core rate = %ld core period = %ld ns", core_rate, core_period);
++
++ /* Parse configurations from the device tree */
++ if (!of_property_read_u32(np, "i3c-pp-scl-hi-period-ns", &val))
++ pp_high = val;
++
++ if (!of_property_read_u32(np, "i3c-pp-scl-lo-period-ns", &val))
++ pp_low = val;
++
++ if (!of_property_read_u32(np, "i3c-od-scl-hi-period-ns", &val))
++ od_high = val;
++
++ if (!of_property_read_u32(np, "i3c-od-scl-lo-period-ns", &val))
++ od_low = val;
++
++ if (!of_property_read_u32(np, "sda-tx-hold-ns", &val))
++ thd_dat = val;
++
++ if (!of_property_read_u32(np, "internal-pullup", &val))
++ internal_pu = val;
++
++ if (pp_high && pp_low) {
++ hcnt = DIV_ROUND_CLOSEST(pp_high, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(pp_low, core_period) - 1;
++ } else if (hci->master.bus.mode == I3C_BUS_MODE_PURE) {
++ total_cnt = DIV_ROUND_UP(core_rate, hci->master.bus.scl_rate.i3c) - 2;
++ hcnt = DIV_ROUND_DOWN_ULL(total_cnt * 2, 5);
++ lcnt = (total_cnt - hcnt);
++ } else {
++ total_cnt = DIV_ROUND_UP(core_rate, hci->master.bus.scl_rate.i3c) - 2;
++ hcnt = DIV_ROUND_UP(I3C_BUS_THIGH_MAX_NS, core_period) - 1;
++ lcnt = (total_cnt - hcnt);
++ }
++ ctrl0 = FIELD_PREP(PHY_I3C_SDR0_CTRL0_SCL_H, hcnt) |
++ FIELD_PREP(PHY_I3C_SDR0_CTRL0_SCL_L, lcnt);
++ ast_phy_write(sdr_ctrl0_reg + PHY_I3C_CTRL0_OFFSET, ctrl0);
++ /* Address assign command(ENTDAA) will always use SDR0 setting */
++ ast_phy_write(PHY_I3C_SDR0_CTRL0, ctrl0);
++ ast_phy_write(PHY_I3C_DDR_CTRL0, ctrl0);
++ ctrl1 = FIELD_PREP(PHY_I3C_SDR0_CTRL1_TBIT_H, max(hcnt, min_tbit_cnt)) |
++ FIELD_PREP(PHY_I3C_SDR0_CTRL1_TBIT_L, max(lcnt, min_tbit_cnt));
++ ast_phy_write(sdr_ctrl0_reg + PHY_I3C_CTRL1_OFFSET, ctrl1);
++ ast_phy_write(PHY_I3C_SDR0_CTRL1, ctrl1);
++ ast_phy_write(PHY_I3C_DDR_CTRL1, ctrl1);
++
++ if (od_high && od_low) {
++ hcnt = DIV_ROUND_CLOSEST(od_high, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(od_low, core_period) - 1;
++ } else {
++ hcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_SCL_H_NS, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I2C_FMP_DEFAULT_SCL_L_NS, core_period) - 1;
++ }
++ ast_phy_write(PHY_I3C_OD_CTRL1, FIELD_PREP(PHY_I3C_OD_CTRL1_SCL_H, hcnt) |
++ FIELD_PREP(PHY_I3C_OD_CTRL1_SCL_L, lcnt));
++ ast_phy_write(PHY_I3C_OD_CTRL2, FIELD_PREP(PHY_I3C_OD_CTRL2_ACK_H, hcnt) |
++ FIELD_PREP(PHY_I3C_OD_CTRL2_ACK_L, hcnt));
++
++ if (thd_dat) {
++ hcnt = DIV_ROUND_CLOSEST(thd_dat, core_period) - 1;
++ lcnt = hcnt;
++ } else {
++ hcnt = DIV_ROUND_CLOSEST(PHY_I3C_OD_DEFAULT_HD_DAT, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I3C_OD_DEFAULT_AHD_DAT, core_period) - 1;
++ }
++ ctrl2 = FIELD_PREP(PHY_I3C_SDR0_CTRL2_HD_PP, hcnt) |
++ FIELD_PREP(PHY_I3C_SDR0_CTRL2_TBIT_HD_PP, lcnt);
++ ast_phy_write(sdr_ctrl0_reg + PHY_I3C_CTRL2_OFFSET, ctrl2);
++ ast_phy_write(PHY_I3C_SDR0_CTRL2, ctrl2);
++ ast_phy_write(PHY_I3C_DDR_CTRL2, ctrl2);
++
++ ast_phy_write(PHY_I3C_OD_CTRL3, FIELD_PREP(PHY_I3C_OD_CTRL3_HD_DAT, hcnt) |
++ FIELD_PREP(PHY_I3C_OD_CTRL3_AHD_DAT, lcnt));
++
++ hcnt = DIV_ROUND_CLOSEST(PHY_I3C_OD_DEFAULT_CAS_NS, core_period) - 1;
++ lcnt = DIV_ROUND_CLOSEST(PHY_I3C_OD_DEFAULT_CBP_NS, core_period) - 1;
++ ast_phy_write(PHY_I3C_OD_CTRL0, FIELD_PREP(PHY_I3C_OD_CTRL0_CAS, hcnt) |
++ FIELD_PREP(PHY_I3C_OD_CTRL0_CBP, lcnt));
++ if (internal_pu)
++ ast_phy_write(PHY_SW_FORCE_CTRL,
++ PHY_SW_FORCE_CTRL_SCL_PU_EN | PHY_SW_FORCE_CTRL_SDA_PU_EN |
++ FIELD_PREP(PHY_SW_FORCE_CTRL_SCL_PU_VAL, internal_pu) |
++ FIELD_PREP(PHY_SW_FORCE_CTRL_SDA_PU_VAL, internal_pu));
++}
++#endif
+
+ static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
+ {
+@@ -130,6 +298,16 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
+ int ret;
+
+ DBG("");
++ dev_info(&hci->master.dev, "Master Mode");
++
++#ifdef CONFIG_ARCH_ASPEED
++ ast_inhouse_write(ASPEED_I3C_CTRL,
++ ASPEED_I3C_CTRL_INIT |
++ FIELD_PREP(ASPEED_I3C_CTRL_INIT_MODE,
++ INIT_MST_MODE));
++ aspeed_i3c_phy_init(hci);
++ aspeed_i3c_of_populate_bus_timing(hci, m->dev.of_node);
++#endif
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.init(hci);
+@@ -144,6 +322,16 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
+ MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
++ if (hci->caps & HC_CAP_HDR_DDR_EN)
++ info.hdr_cap |= BIT(I3C_HDR_DDR);
++ if (hci->caps & HC_CAP_HDR_TS_EN) {
++ if (reg_read(HC_CONTROL) & HC_CONTROL_I2C_TARGET_PRESENT)
++ info.hdr_cap |= BIT(I3C_HDR_TSL);
++ else
++ info.hdr_cap |= BIT(I3C_HDR_TSP);
++ }
++ if (hci->caps & HC_CAP_HDR_BT_EN)
++ info.hdr_cap |= BIT(I3C_HDR_BT);
+ ret = i3c_master_set_info(m, &info);
+ if (ret)
+ return ret;
+@@ -161,25 +349,74 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
+ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
+ {
+ struct i3c_hci *hci = to_i3c_hci(m);
++ struct platform_device *pdev = to_platform_device(m->dev.parent);
+
+ DBG("");
+
+ reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
++ synchronize_irq(platform_get_irq(pdev, 0));
+ hci->io->cleanup(hci);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.cleanup(hci);
+ }
+
++static int i3c_hci_bus_reset(struct i3c_master_controller *m)
++{
++ struct i3c_hci *hci = to_i3c_hci(m);
++ struct hci_xfer *xfer;
++ DECLARE_COMPLETION_ONSTACK(done);
++ int ret;
++
++ xfer = hci_alloc_xfer(1);
++ if (!xfer)
++ return -ENOMEM;
++ if (hci->master.bus.context == I3C_BUS_CONTEXT_JESD403)
++ hci->cmd->prep_internal(hci, xfer, M_SUB_CMD_REC_RST_PROC,
++ REC_PROC_TIMED_RST);
++ else
++ hci->cmd->prep_internal(hci, xfer, M_SUB_CMD_TARGET_RST_PATTERN,
++ RST_OP_TARGET_RST);
++ xfer[0].completion = &done;
++
++ ret = hci->io->queue_xfer(hci, xfer, 1);
++ if (ret)
++ goto out;
++ if (!wait_for_completion_timeout(&done, HZ) &&
++ hci->io->dequeue_xfer(hci, xfer, 1)) {
++ ret = -ETIME;
++ goto out;
++ }
++out:
++ hci_free_xfer(xfer, 1);
++ return ret;
++}
++
++void mipi_i3c_hci_iba_ctrl(struct i3c_hci *hci, bool enable)
++{
++ DBG("%s IBA\n", enable ? "ENABLE" : "DISABLE");
++ reg_write(HC_CONTROL,
++ enable ? reg_read(HC_CONTROL) | HC_CONTROL_IBA_INCLUDE :
++ reg_read(HC_CONTROL) & ~HC_CONTROL_IBA_INCLUDE);
++}
++
++void mipi_i3c_hci_hj_ctrl(struct i3c_hci *hci, bool ack_nack)
++{
++ DBG("%s Hot-join requeset\n", ack_nack ? "ACK" : "NACK");
++ reg_write(HC_CONTROL,
++ ack_nack ? reg_read(HC_CONTROL) & ~HC_CONTROL_HOT_JOIN_CTRL :
++ reg_read(HC_CONTROL) | HC_CONTROL_HOT_JOIN_CTRL);
++}
++
+ void mipi_i3c_hci_resume(struct i3c_hci *hci)
+ {
+- /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
+- reg_write(HC_CONTROL, reg_read(HC_CONTROL));
++ reg_set(HC_CONTROL, HC_CONTROL_RESUME);
+ }
+
+ /* located here rather than pio.c because needed bits are in core reg space */
+ void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
+ {
+- reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
++ reg_write(RESET_CONTROL,
++ RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST | CMD_QUEUE_RST);
+ }
+
+ /* located here rather than dct.c because needed bits are in core reg space */
+@@ -188,6 +425,28 @@ void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
+ reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
+ }
+
++static int i3c_hci_enable_hotjoin(struct i3c_master_controller *m)
++{
++ struct i3c_hci *hci = to_i3c_hci(m);
++ int ret;
++
++ if (hci->io->request_hj)
++ ret = hci->io->request_hj(hci);
++ mipi_i3c_hci_hj_ctrl(hci, true);
++
++ return ret;
++}
++
++static int i3c_hci_disable_hotjoin(struct i3c_master_controller *m)
++{
++ struct i3c_hci *hci = to_i3c_hci(m);
++
++ if (hci->io->free_hj)
++ hci->io->free_hj(hci);
++ mipi_i3c_hci_hj_ctrl(hci, false);
++ return 0;
++}
++
+ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+ {
+@@ -199,8 +458,9 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+- DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+- ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
++ DBG("cmd=%#x rnw=%d dbp=%d db=%#x ndests=%d data[0].len=%d", ccc->id,
++ ccc->rnw, ccc->dbp, ccc->db, ccc->ndests,
++ ccc->dests[0].payload.len);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+@@ -210,8 +470,8 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ xfer->data = NULL;
+ xfer->data_len = 0;
+ xfer->rnw = false;
+- hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
+- ccc->id, true);
++ hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR, ccc->id,
++ ccc->dbp, ccc->db, true);
+ xfer++;
+ }
+
+@@ -220,7 +480,7 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ xfer[i].data_len = ccc->dests[i].payload.len;
+ xfer[i].rnw = ccc->rnw;
+ ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
+- ccc->id, raw);
++ ccc->id, ccc->dbp, ccc->db, raw);
+ if (ret)
+ goto out;
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+@@ -245,7 +505,12 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ ccc->dests[i - prefixed].payload.len =
+ RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+- ret = -EIO;
++ DBG("resp status = %lx", RESP_STATUS(xfer[i].response));
++ if (RESP_STATUS(xfer[i].response) ==
++ RESP_ERR_ADDR_HEADER)
++ ret = I3C_ERROR_M2;
++ else
++ ret = -EIO;
+ goto out;
+ }
+ }
+@@ -318,6 +583,8 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
+ if (i3c_xfers[i].rnw)
+ i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
++ dev_err(&hci->master.dev, "resp status = %lx",
++ RESP_STATUS(xfer[i].response));
+ ret = -EIO;
+ goto out;
+ }
+@@ -328,6 +595,71 @@ static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
+ return ret;
+ }
+
++static int i3c_hci_send_hdr_cmds(struct i3c_dev_desc *dev,
++ struct i3c_hdr_cmd *cmds, int ncmds)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ struct hci_xfer *xfer;
++ DECLARE_COMPLETION_ONSTACK(done);
++ int i, last, ret = 0, ntxwords = 0, nrxwords = 0;
++
++ DBG("ncmds = %d", ncmds);
++
++ for (i = 0; i < ncmds; i++) {
++ DBG("cmds[%d] mode = %x", i, cmds[i].mode);
++ if (!(BIT(cmds[i].mode) & m->this->info.hdr_cap))
++ return -EOPNOTSUPP;
++ if (cmds[i].code & 0x80)
++ nrxwords += DIV_ROUND_UP(cmds[i].ndatawords, 2);
++ else
++ ntxwords += DIV_ROUND_UP(cmds[i].ndatawords, 2);
++ }
++
++ xfer = hci_alloc_xfer(ncmds);
++ if (!xfer)
++ return -ENOMEM;
++
++ for (i = 0; i < ncmds; i++) {
++ xfer[i].data_len = cmds[i].ndatawords << 1;
++
++ xfer[i].rnw = cmds[i].code & 0x80 ? 1 : 0;
++ if (xfer[i].rnw)
++ xfer[i].data = cmds[i].data.in;
++ else
++ xfer[i].data = (void *)cmds[i].data.out;
++ hci->cmd->prep_hdr(hci, xfer, dev->info.dyn_addr, cmds[i].code, cmds[i].mode);
++
++ xfer[i].cmd_desc[0] |= CMD_0_ROC;
++ }
++ last = i - 1;
++ xfer[last].cmd_desc[0] |= CMD_0_TOC;
++ xfer[last].completion = &done;
++
++ ret = hci->io->queue_xfer(hci, xfer, ncmds);
++ if (ret)
++ goto hdr_out;
++ if (!wait_for_completion_timeout(&done, HZ) &&
++ hci->io->dequeue_xfer(hci, xfer, ncmds)) {
++ ret = -ETIME;
++ goto hdr_out;
++ }
++ for (i = 0; i < ncmds; i++) {
++ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
++ dev_err(&hci->master.dev, "resp status = %lx",
++ RESP_STATUS(xfer[i].response));
++ ret = -EIO;
++ goto hdr_out;
++ }
++ if (cmds[i].code & 0x80)
++ cmds[i].ndatawords = DIV_ROUND_UP(RESP_DATA_LENGTH(xfer[i].response), 2);
++ }
++
++hdr_out:
++ hci_free_xfer(xfer, ncmds);
++ return ret;
++}
++
+ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers, int nxfers)
+ {
+@@ -364,6 +696,8 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
++ dev_err(&hci->master.dev, "resp status = %lx",
++ RESP_STATUS(xfer[i].response));
+ ret = -EIO;
+ goto out;
+ }
+@@ -387,12 +721,18 @@ static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
+ if (!dev_data)
+ return -ENOMEM;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
++#ifdef CONFIG_ARCH_ASPEED
++ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci,
++ dev->info.dyn_addr ?: dev->info.static_addr);
++#else
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
++#endif
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+- mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
++ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret,
++ dev->info.dyn_addr ?: dev->info.static_addr);
+ dev_data->dat_idx = ret;
+ }
+ i3c_dev_set_master_data(dev, dev_data);
+@@ -410,6 +750,9 @@ static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
+ dev->info.dyn_addr);
++#ifdef CONFIG_ARCH_ASPEED
++ dev_data->dat_idx = dev->info.dyn_addr;
++#endif
+ return 0;
+ }
+
+@@ -441,7 +784,11 @@ static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+- ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
++ #ifdef CONFIG_ARCH_ASPEED
++ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci, dev->addr);
++ #else
++ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
++ #endif
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+@@ -524,8 +871,10 @@ static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ static const struct i3c_master_controller_ops i3c_hci_ops = {
+ .bus_init = i3c_hci_bus_init,
+ .bus_cleanup = i3c_hci_bus_cleanup,
++ .bus_reset = i3c_hci_bus_reset,
+ .do_daa = i3c_hci_daa,
+ .send_ccc_cmd = i3c_hci_send_ccc_cmd,
++ .send_hdr_cmds = i3c_hci_send_hdr_cmds,
+ .priv_xfers = i3c_hci_priv_xfers,
+ .i2c_xfers = i3c_hci_i2c_xfers,
+ .attach_i3c_dev = i3c_hci_attach_i3c_dev,
+@@ -538,6 +887,269 @@ static const struct i3c_master_controller_ops i3c_hci_ops = {
+ .enable_ibi = i3c_hci_enable_ibi,
+ .disable_ibi = i3c_hci_disable_ibi,
+ .recycle_ibi_slot = i3c_hci_recycle_ibi_slot,
++ .enable_hotjoin = i3c_hci_enable_hotjoin,
++ .disable_hotjoin = i3c_hci_disable_hotjoin,
++};
++
++static int ast2700_i3c_target_bus_init(struct i3c_master_controller *m)
++{
++ struct i3c_hci *hci = to_i3c_hci(m);
++ struct i3c_dev_desc *desc = hci->master.this;
++ u32 reg;
++ int ret;
++
++ dev_info(&hci->master.dev, "Secondary master Mode");
++
++ ast_inhouse_write(ASPEED_I3C_SLV_PID_LO, SLV_PID_LO(desc->info.pid));
++ ast_inhouse_write(ASPEED_I3C_SLV_PID_HI, SLV_PID_HI(desc->info.pid));
++
++ desc->info.bcr = I3C_BCR_DEVICE_ROLE(I3C_BCR_I3C_MASTER) |
++ I3C_BCR_HDR_CAP | I3C_BCR_IBI_PAYLOAD |
++ I3C_BCR_IBI_REQ_CAP;
++ reg = FIELD_PREP(ASPEED_I3C_SLV_CHAR_CTRL_DCR, desc->info.dcr) |
++ FIELD_PREP(ASPEED_I3C_SLV_CHAR_CTRL_BCR, desc->info.bcr);
++ if (desc->info.static_addr) {
++ reg |= ASPEED_I3C_SLV_CHAR_CTRL_STATIC_ADDR_EN |
++ FIELD_PREP(ASPEED_I3C_SLV_CHAR_CTRL_STATIC_ADDR,
++ desc->info.static_addr);
++ }
++ ast_inhouse_write(ASPEED_I3C_SLV_CHAR_CTRL, reg);
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_CAP_CTRL);
++ /* Make slave will sned the ibi when bus idle */
++ ast_inhouse_write(ASPEED_I3C_SLV_CAP_CTRL,
++ reg | ASPEED_I3C_SLV_CAP_CTRL_IBI_WAIT |
++ ASPEED_I3C_SLV_CAP_CTRL_HJ_WAIT);
++ if (hci->caps & HC_CAP_HDR_DDR_EN)
++ desc->info.hdr_cap |= BIT(I3C_HDR_DDR);
++ if (hci->caps & HC_CAP_HDR_TS_EN) {
++ if (reg_read(HC_CONTROL) & HC_CONTROL_I2C_TARGET_PRESENT)
++ desc->info.hdr_cap |= BIT(I3C_HDR_TSL);
++ else
++ desc->info.hdr_cap |= BIT(I3C_HDR_TSP);
++ }
++ if (hci->caps & HC_CAP_HDR_BT_EN)
++ desc->info.hdr_cap |= BIT(I3C_HDR_BT);
++ ast_inhouse_write(ASPEED_I3C_SLV_STS8_GETCAPS_TGT, desc->info.hdr_cap);
++ ast_inhouse_write(ASPEED_I3C_CTRL,
++ ASPEED_I3C_CTRL_INIT |
++ FIELD_PREP(ASPEED_I3C_CTRL_INIT_MODE,
++ INIT_SEC_MST_MODE));
++
++ init_completion(&hci->ibi_comp);
++ init_completion(&hci->pending_r_comp);
++ ret = hci->io->init(hci);
++ if (ret)
++ return ret;
++
++ reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
++ DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
++
++ return 0;
++}
++
++static void ast2700_i3c_target_bus_cleanup(struct i3c_master_controller *m)
++{
++ struct i3c_hci *hci = to_i3c_hci(m);
++
++ DBG("");
++
++ reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
++ hci->io->cleanup(hci);
++ kfree(hci->target_rx.buf);
++}
++
++static struct hci_xfer *
++ast2700_i3c_target_priv_xfers(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *i3c_xfers, int nxfers,
++ unsigned int tid)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ struct hci_xfer *xfer;
++ unsigned int size_limit;
++ int i, ret = 0;
++
++ DBG("nxfers = %d", nxfers);
++
++ xfer = hci_alloc_xfer(nxfers);
++ if (!xfer)
++ return xfer;
++
++ size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
++
++ for (i = 0; i < nxfers; i++) {
++ if (!i3c_xfers[i].rnw) {
++ xfer[i].data_len = i3c_xfers[i].len;
++ xfer[i].rnw = i3c_xfers[i].rnw;
++ xfer[i].data = (void *)i3c_xfers[i].data.out;
++ xfer[i].cmd_tid = tid;
++ hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
++ } else {
++ dev_err(&hci->master.dev,
++ "target mode can't do priv_read command\n");
++ }
++ }
++ ret = hci->io->queue_xfer(hci, xfer, nxfers);
++ if (ret) {
++ dev_err(&hci->master.dev, "queue xfer error %d", ret);
++ hci_free_xfer(xfer, nxfers);
++ return NULL;
++ }
++
++ return xfer;
++}
++
++int ast2700_i3c_target_put_rdata(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *i3c_xfers, int nxfers)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ struct hci_xfer *read_xfer;
++
++ reinit_completion(&hci->pending_r_comp);
++ read_xfer = ast2700_i3c_target_priv_xfers(dev, i3c_xfers, nxfers,
++ TID_TARGET_RD_DATA);
++ if (!read_xfer)
++ return -EINVAL;
++
++ if (!wait_for_completion_interruptible_timeout(&hci->pending_r_comp,
++ msecs_to_jiffies(1000))) {
++ dev_warn(&hci->master.dev, "timeout waiting for master read\n");
++ mipi_i3c_hci_pio_reset(hci);
++ return -EINVAL;
++ }
++ hci_free_xfer(read_xfer, 1);
++
++ return 0;
++}
++
++static int ast2700_i3c_target_generate_ibi(struct i3c_dev_desc *dev, const u8 *data, int len)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ u32 reg;
++
++ if (data || len != 0)
++ return -EOPNOTSUPP;
++
++ DBG("");
++
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_STS1);
++ if ((reg & ASPEED_I3C_SLV_STS1_IBI_EN) == 0)
++ return -EPERM;
++
++ reinit_completion(&hci->ibi_comp);
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_CAP_CTRL);
++ ast_inhouse_write(ASPEED_I3C_SLV_CAP_CTRL,
++ reg | ASPEED_I3C_SLV_CAP_CTRL_IBI_REQ);
++
++ if (!wait_for_completion_timeout(&hci->ibi_comp,
++ msecs_to_jiffies(1000))) {
++ dev_warn(&hci->master.dev, "timeout waiting for completion\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
++static int ast2700_i3c_target_hj_req(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ u32 reg;
++ int ret;
++
++ DBG("");
++
++ reg = ast_inhouse_read(ASPEED_I3C_STS);
++ if ((reg & ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS_VALID))
++ return -EINVAL;
++
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_STS1);
++ if (!(reg & ASPEED_I3C_SLV_STS1_HJ_EN))
++ return -EINVAL;
++
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_CAP_CTRL);
++ ast_inhouse_write(ASPEED_I3C_SLV_CAP_CTRL,
++ reg | ASPEED_I3C_SLV_CAP_CTRL_HJ_REQ);
++ ret = readx_poll_timeout(ast_inhouse_read, ASPEED_I3C_SLV_CAP_CTRL, reg,
++ !(reg & ASPEED_I3C_SLV_CAP_CTRL_HJ_REQ), 0,
++ 1000000);
++ if (ret) {
++ dev_warn(&hci->master.dev, "timeout waiting for completion\n");
++ return ret;
++ }
++
++ return 0;
++}
++
++static int
++ast2700_i3c_target_pending_read_notify(struct i3c_dev_desc *dev,
++ struct i3c_priv_xfer *pending_read,
++ struct i3c_priv_xfer *ibi_notify)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ struct hci_xfer *ibi_xfer, *pending_read_xfer;
++ u32 reg;
++
++ if (!pending_read || !ibi_notify)
++ return -EINVAL;
++
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_STS1);
++ if ((reg & ASPEED_I3C_SLV_STS1_IBI_EN) == 0)
++ return -EPERM;
++ reinit_completion(&hci->pending_r_comp);
++ ibi_xfer = ast2700_i3c_target_priv_xfers(dev, ibi_notify, 1,
++ TID_TARGET_IBI);
++ if (!ibi_xfer)
++ return -EINVAL;
++ pending_read_xfer = ast2700_i3c_target_priv_xfers(dev, pending_read, 1,
++ TID_TARGET_RD_DATA);
++ if (!pending_read_xfer)
++ return -EINVAL;
++ ast2700_i3c_target_generate_ibi(dev, NULL, 0);
++ hci_free_xfer(ibi_xfer, 1);
++ if (!wait_for_completion_timeout(&hci->pending_r_comp,
++ msecs_to_jiffies(1000))) {
++ dev_warn(&hci->master.dev, "timeout waiting for master read\n");
++ mipi_i3c_hci_pio_reset(hci);
++ return -EINVAL;
++ }
++ hci_free_xfer(pending_read_xfer, 1);
++
++ return 0;
++}
++
++static bool ast2700_i3c_target_is_ibi_enabled(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ u32 reg;
++
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_STS1);
++ return !!(reg & ASPEED_I3C_SLV_STS1_IBI_EN);
++}
++
++static bool ast2700_i3c_target_is_hj_enabled(struct i3c_dev_desc *dev)
++{
++ struct i3c_master_controller *m = i3c_dev_get_master(dev);
++ struct i3c_hci *hci = to_i3c_hci(m);
++ u32 reg;
++
++ reg = ast_inhouse_read(ASPEED_I3C_SLV_STS1);
++ return !!(reg & ASPEED_I3C_SLV_STS1_HJ_EN);
++}
++
++static const struct i3c_target_ops ast2700_i3c_target_ops = {
++ .bus_init = ast2700_i3c_target_bus_init,
++ .bus_cleanup = ast2700_i3c_target_bus_cleanup,
++ .hj_req = ast2700_i3c_target_hj_req,
++ .priv_xfers = ast2700_i3c_target_put_rdata,
++ .generate_ibi = ast2700_i3c_target_generate_ibi,
++ .pending_read_notify = ast2700_i3c_target_pending_read_notify,
++ .is_ibi_enabled = ast2700_i3c_target_is_ibi_enabled,
++ .is_hj_enabled = ast2700_i3c_target_is_hj_enabled,
+ };
+
+ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
+@@ -564,16 +1176,50 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
+ dev_err(&hci->master.dev, "Host Controller Internal Error\n");
+ val &= ~INTR_HC_INTERNAL_ERR;
+ }
+- if (val & INTR_HC_PIO) {
++ if (val)
++ dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
++ else
++ result = IRQ_HANDLED;
++
++ return result;
++}
++
++static irqreturn_t i3c_aspeed_irq_handler(int irqn, void *dev_id)
++{
++ struct i3c_hci *hci = dev_id;
++ u32 val, inhouse_val;
++ int result = -1;
++
++ val = ast_inhouse_read(ASPEED_I3C_INTR_SUM_STATUS);
++ DBG("Global INTR_STATUS = %#x\n", val);
++
++ if (val & ASPEED_INTR_SUM_CAP) {
++ i3c_hci_irq_handler(irqn, dev_id);
++ val &= ~ASPEED_INTR_SUM_CAP;
++ }
++ if (val & ASPEED_INTR_SUM_PIO) {
+ hci->io->irq_handler(hci, 0);
+- val &= ~INTR_HC_PIO;
++ val &= ~ASPEED_INTR_SUM_PIO;
+ }
+- if (val & INTR_HC_RINGS) {
+- hci->io->irq_handler(hci, val & INTR_HC_RINGS);
+- val &= ~INTR_HC_RINGS;
++ if (val & ASPEED_INTR_SUM_RHS) {
++ /*
++ * ASPEED only has one ring, and HCI v1.2 doesn't have a register to indicate which
++ * ring has the interrupt.
++ */
++ hci->io->irq_handler(hci, 1);
++ val &= ~ASPEED_INTR_SUM_RHS;
+ }
++ if (val & ASPEED_INTR_SUM_INHOUSE) {
++ inhouse_val = ast_inhouse_read(ASPEED_I3C_INTR_STATUS);
++ DBG("Inhouse INTR_STATUS = %#x/%#x\n", inhouse_val,
++ ast_inhouse_read(ASPEED_I3C_INTR_SIGNAL_ENABLE));
++ ast_inhouse_write(ASPEED_I3C_INTR_STATUS, inhouse_val);
++ val &= ~ASPEED_INTR_SUM_INHOUSE;
++ }
++
+ if (val)
+- dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
++ dev_err(&hci->master.dev, "unexpected INTR_SUN_STATUS %#x\n",
++ val);
+ else
+ result = IRQ_HANDLED;
+
+@@ -604,23 +1250,23 @@ static int i3c_hci_init(struct i3c_hci *hci)
+ }
+
+ hci->caps = reg_read(HC_CAPABILITIES);
+- DBG("caps = %#x", hci->caps);
++ dev_info(&hci->master.dev, "caps = %#x", hci->caps);
+
+ regval = reg_read(DAT_SECTION);
+ offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
+ hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
+- hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
++ hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
+ dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
+- hci->DAT_entries, hci->DAT_entry_size * 4, offset);
++ hci->DAT_entries, hci->DAT_entry_size, offset);
+
+ regval = reg_read(DCT_SECTION);
+ offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
+ hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
+- hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
++ hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
+ dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
+- hci->DCT_entries, hci->DCT_entry_size * 4, offset);
++ hci->DCT_entries, hci->DCT_entry_size, offset);
+
+ regval = reg_read(RING_HEADERS_SECTION);
+ offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
+@@ -659,6 +1305,10 @@ static int i3c_hci_init(struct i3c_hci *hci)
+ /* Disable all interrupts and allow all signal updates */
+ reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ reg_write(INTR_STATUS_ENABLE, 0xffffffff);
++#ifdef CONFIG_ARCH_ASPEED
++ ast_inhouse_write(ASPEED_I3C_INTR_SIGNAL_ENABLE, 0);
++ ast_inhouse_write(ASPEED_I3C_INTR_STATUS_ENABLE, 0xffffffff);
++#endif
+
+ /* Make sure our data ordering fits the host's */
+ regval = reg_read(HC_CONTROL);
+@@ -703,8 +1353,13 @@ static int i3c_hci_init(struct i3c_hci *hci)
+ if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ dev_err(&hci->master.dev, "PIO mode is stuck\n");
+ ret = -EIO;
++ } else if (!hci->dma_rst) {
++ dev_err(&hci->master.dev,
++ "missing or invalid i3c dma reset controller device tree entry\n");
++ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_dma;
++ reset_control_deassert(hci->dma_rst);
+ dev_info(&hci->master.dev, "Using DMA\n");
+ }
+ }
+@@ -731,6 +1386,14 @@ static int i3c_hci_init(struct i3c_hci *hci)
+ return 0;
+ }
+
++static void i3c_hci_hj_work(struct work_struct *work)
++{
++ struct i3c_hci *hci;
++
++ hci = container_of(work, struct i3c_hci, hj_work);
++ i3c_master_do_daa(&hci->master);
++}
++
+ static int i3c_hci_probe(struct platform_device *pdev)
+ {
+ struct i3c_hci *hci;
+@@ -747,20 +1410,49 @@ static int i3c_hci_probe(struct platform_device *pdev)
+ /* temporary for dev_printk's, to be replaced in i3c_master_register */
+ hci->master.dev.init_name = dev_name(&pdev->dev);
+
++ hci->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
++ if (IS_ERR(hci->rst)) {
++ dev_err(&pdev->dev,
++ "missing or invalid reset controller device tree entry");
++ return PTR_ERR(hci->rst);
++ }
++ reset_control_assert(hci->rst);
++ reset_control_deassert(hci->rst);
++
++ hci->dma_rst = devm_reset_control_get_shared_by_index(&pdev->dev, 1);
++ if (IS_ERR(hci->dma_rst))
++ hci->dma_rst = NULL;
++
++ hci->clk = devm_clk_get(&pdev->dev, NULL);
++ if (IS_ERR(hci->clk)) {
++ dev_err(&pdev->dev,
++ "missing or invalid clock controller device tree entry");
++ return PTR_ERR(hci->clk);
++ }
++
++ ret = clk_prepare_enable(hci->clk);
++ if (ret) {
++ dev_err(&pdev->dev, "Unable to enable i3c clock.\n");
++ return ret;
++ }
++
+ ret = i3c_hci_init(hci);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+- ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
++ ret = devm_request_irq(&pdev->dev, irq, i3c_aspeed_irq_handler,
+ 0, NULL, hci);
+ if (ret)
+ return ret;
+
+- ret = i3c_master_register(&hci->master, &pdev->dev,
+- &i3c_hci_ops, false);
++ INIT_WORK(&hci->hj_work, i3c_hci_hj_work);
++ ret = i3c_register(&hci->master, &pdev->dev, &i3c_hci_ops,
++ &ast2700_i3c_target_ops, false);
+ if (ret)
+ return ret;
++ if (!hci->master.target && hci->master.bus.context != I3C_BUS_CONTEXT_JESD403)
++ mipi_i3c_hci_iba_ctrl(hci, true);
+
+ return 0;
+ }
+@@ -769,11 +1461,12 @@ static void i3c_hci_remove(struct platform_device *pdev)
+ {
+ struct i3c_hci *hci = platform_get_drvdata(pdev);
+
+- i3c_master_unregister(&hci->master);
++ i3c_unregister(&hci->master);
+ }
+
+ static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
+ { .compatible = "mipi-i3c-hci", },
++ { .compatible = "aspeed-i3c-hci", },
+ {},
+ };
+ MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+@@ -787,6 +1480,7 @@ static struct platform_driver i3c_hci_driver = {
+ },
+ };
+ module_platform_driver(i3c_hci_driver);
++MODULE_ALIAS("platform:mipi-i3c-hci");
+
+ MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
+ MODULE_DESCRIPTION("MIPI I3C HCI driver");
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat.h b/drivers/i3c/master/mipi-i3c-hci/dat.h
+index 1f0f345c3..73a61f5fe 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat.h
++++ b/drivers/i3c/master/mipi-i3c-hci/dat.h
+@@ -18,7 +18,11 @@
+ struct hci_dat_ops {
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
++#ifdef CONFIG_ARCH_ASPEED
++ int (*alloc_entry)(struct i3c_hci *hci, unsigned int address);
++#else
+ int (*alloc_entry)(struct i3c_hci *hci);
++#endif
+ void (*free_entry)(struct i3c_hci *hci, unsigned int dat_idx);
+ void (*set_dynamic_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_static_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+index 47b9b4d4e..62608cdba 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+@@ -86,6 +86,28 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+ hci->DAT_data = NULL;
+ }
+
++#ifdef CONFIG_ARCH_ASPEED
++static int hci_dat_v1_alloc_entry(struct i3c_hci *hci, unsigned int address)
++{
++ unsigned int dat_idx;
++ int ret;
++
++ if (!hci->DAT_data) {
++ ret = hci_dat_v1_init(hci);
++ if (ret)
++ return ret;
++ }
++ if (test_bit_acquire(address, hci->DAT_data))
++ return -ENOENT;
++ dat_idx = address;
++ __set_bit(dat_idx, hci->DAT_data);
++
++ /* default flags */
++ dat_w0_write(dat_idx, DAT_0_SIR_REJECT | DAT_0_MR_REJECT);
++
++ return dat_idx;
++}
++#else
+ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+ {
+ unsigned int dat_idx;
+@@ -106,7 +128,7 @@ static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+
+ return dat_idx;
+ }
+-
++#endif
+ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ {
+ dat_w0_write(dat_idx, 0);
+@@ -118,6 +140,16 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+ {
++#ifdef CONFIG_ARCH_ASPEED
++ if (dat_idx != address) {
++ int ret;
++
++ ret = hci_dat_v1_alloc_entry(hci, address);
++ if (ret < 0)
++ dev_err(&hci->master.dev, "Allocate entry: %d", ret);
++ hci_dat_v1_free_entry(hci, dat_idx);
++ }
++#else
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+@@ -125,6 +157,7 @@ static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+ dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
+ (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ dat_w0_write(dat_idx, dat_w0);
++#endif
+ }
+
+ static void hci_dat_v1_set_static_addr(struct i3c_hci *hci,
+@@ -166,6 +199,10 @@ static void hci_dat_v1_clear_flags(struct i3c_hci *hci, unsigned int dat_idx,
+
+ static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
+ {
++#ifdef CONFIG_ARCH_ASPEED
++ if (test_bit_acquire(dev_addr, hci->DAT_data))
++ return dev_addr;
++#else
+ unsigned int dat_idx;
+ u32 dat_w0;
+
+@@ -174,6 +211,7 @@ static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
+ if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
+ return dat_idx;
+ }
++#endif
+
+ return -ENODEV;
+ }
+diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
+index edc3a69bf..dc8fe60ad 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
++++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
+@@ -18,7 +18,7 @@
+ #include "hci.h"
+ #include "cmd.h"
+ #include "ibi.h"
+-
++#include "vendor_aspeed.h"
+
+ /*
+ * Software Parameter Values (somewhat arb itrary for now).
+@@ -139,7 +139,7 @@ struct hci_rh_data {
+
+ struct hci_rings_data {
+ unsigned int total;
+- struct hci_rh_data headers[];
++ struct hci_rh_data headers[] __counted_by(total);
+ };
+
+ struct hci_dma_dev_ibi_data {
+@@ -229,6 +229,9 @@ static int hci_dma_init(struct i3c_hci *hci)
+ hci->io_data = rings;
+ rings->total = nr_rings;
+
++ regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
++ rhs_reg_write(CONTROL, regval);
++
+ for (i = 0; i < rings->total; i++) {
+ u32 offset = rhs_reg_read(RHn_OFFSET(i));
+
+@@ -251,6 +254,14 @@ static int hci_dma_init(struct i3c_hci *hci)
+ xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
+ resps_sz = rh->resp_struct_sz * rh->xfer_entries;
+
++ ret = dma_set_mask_and_coherent(&hci->master.dev,
++ DMA_BIT_MASK(64));
++ if (ret) {
++ dev_err(&hci->master.dev,
++ "cannot set 64-bits DMA mask\n");
++ return ret;
++ }
++
+ rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ &rh->xfer_dma, GFP_KERNEL);
+ rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+@@ -306,14 +317,30 @@ static int hci_dma_init(struct i3c_hci *hci)
+ ret = -ENOMEM;
+ if (!rh->ibi_status || !rh->ibi_data)
+ goto err_out;
++ rh_reg_write(IBI_STATUS_RING_BASE_LO, lo32(rh->ibi_status_dma));
++ rh_reg_write(IBI_STATUS_RING_BASE_HI, hi32(rh->ibi_status_dma));
+ rh->ibi_data_dma =
+ dma_map_single(&hci->master.dev, rh->ibi_data,
+ ibi_data_ring_sz, DMA_FROM_DEVICE);
++ if (hci->master.target) {
++ /*
++ * Set max private write length value based on read-only register.
++ * TODO: Handle updates after receiving SETMWL CCC.
++ */
++ hci->target_rx.max_len = ibi_data_ring_sz;
++
++ hci->target_rx.buf = kzalloc(hci->target_rx.max_len, GFP_KERNEL);
++ if (!hci->target_rx.buf)
++ return -ENOMEM;
++ }
++
+ if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ rh->ibi_data_dma = 0;
+ ret = -ENOMEM;
+ goto err_out;
+ }
++ rh_reg_write(IBI_DATA_RING_BASE_LO, lo32(rh->ibi_data_dma));
++ rh_reg_write(IBI_DATA_RING_BASE_HI, hi32(rh->ibi_data_dma));
+
+ regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
+ rh->ibi_status_entries) |
+@@ -328,11 +355,10 @@ static int hci_dma_init(struct i3c_hci *hci)
+ rh_reg_write(INTR_SIGNAL_ENABLE, regval);
+
+ ring_ready:
+- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
++ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
++ RING_CTRL_RUN_STOP);
+ }
+
+- regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+- rhs_reg_write(CONTROL, regval);
+ return 0;
+
+ err_out:
+@@ -370,13 +396,18 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
++ DBG("RING_OPERATION1 = %x", op1_val);
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+
+ /* store cmd descriptor */
+ *ring_data++ = xfer->cmd_desc[0];
+- *ring_data++ = xfer->cmd_desc[1];
++ DBG("CMD Descriptor[0]=%x", *(ring_data - 1));
++ if (!hci->master.target) {
++ *ring_data++ = xfer->cmd_desc[1];
++ DBG("CMD Descriptor[1]=%x", *(ring_data - 1));
++ }
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = xfer->cmd_desc[2];
+ *ring_data++ = xfer->cmd_desc[3];
+@@ -389,6 +420,7 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
+ ((i == n - 1) ? DATA_BUF_IOC : 0);
+
++ DBG("Data Buffer Descriptor[0]=%x", *(ring_data - 1));
+ /* 2nd and 3rd words of Data Buffer Descriptor Structure */
+ if (xfer->data) {
+ xfer->data_dma =
+@@ -409,6 +441,8 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
++ DBG("Data Buffer Descriptor[1]=%x [2]=%x", *(ring_data - 2),
++ *(ring_data - 1));
+
+ /* remember corresponding xfer struct */
+ rh->src_xfers[enqueue_ptr] = xfer;
+@@ -423,6 +457,7 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ * only if we didn't reach its dequeue pointer.
+ */
+ op2_val = rh_reg_read(RING_OPERATION2);
++ DBG("RING_OPERATION2 = %x", op2_val);
+ if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
+ /* the ring is full */
+ hci_dma_unmap_xfer(hci, xfer_list, i + 1);
+@@ -435,8 +470,12 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_ENQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
++ DBG("Write RING_OPERATION1 = %x", op1_val);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock_irq(&rh->lock);
++ DBG("INT status = %x enable = %x sig_enable = %x",
++ rh_reg_read(INTR_STATUS), rh_reg_read(INTR_STATUS_ENABLE),
++ rh_reg_read(INTR_SIGNAL_ENABLE));
+
+ return 0;
+ }
+@@ -448,9 +487,11 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
+ unsigned int i;
+ bool did_unqueue = false;
++ u32 ring_ctrl_val;
+
++ ring_ctrl_val = rh_reg_read(RING_CONTROL);
+ /* stop the ring */
+- rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
++ rh_reg_write(RING_CONTROL, ring_ctrl_val | RING_CTRL_ABORT);
+ if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
+ /*
+ * We're deep in it if ever this condition is ever met.
+@@ -491,7 +532,7 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ }
+
+ /* restart the ring */
+- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
++ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);
+
+ return did_unqueue;
+ }
+@@ -504,6 +545,7 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+
+ for (;;) {
+ op2_val = rh_reg_read(RING_OPERATION2);
++ DBG("RING_OPERATION2 = %x, done_ptr = %x", op2_val, done_ptr);
+ if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
+ break;
+
+@@ -511,7 +553,27 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+ resp = *ring_resp;
+ tid = RESP_TID(resp);
+ DBG("resp = 0x%08x", resp);
++ if (hci->master.target) {
++ DBG(a1_debug_s, TARGET_RESP_STATUS(resp),
++ TARGET_RESP_XFER_TYPE(resp),
++ TARGET_RESP_CCC_INDICATE(resp),
++ TARGET_RESP_TID(resp), TARGET_RESP_CCC_HDR(resp),
++ TARGET_RESP_DATA_LENGTH(resp));
++ /* ibi or master read or HDR read */
++ if (!TARGET_RESP_STATUS(resp) && !TARGET_RESP_CCC_INDICATE(resp)) {
++ if (TARGET_RESP_TID(resp) == TID_TARGET_IBI)
++ complete(&hci->ibi_comp);
++ else if (TARGET_RESP_TID(resp) == TID_TARGET_RD_DATA)
++ complete(&hci->pending_r_comp);
++ }
+
++ if (TARGET_RESP_STATUS(resp) >= TARGET_RESP_ERR_CRC &&
++ TARGET_RESP_STATUS(resp) <= TARGET_RESP_ERR_I2C_READ_TOO_MUCH) {
++ dev_err(&hci->master.dev, "Target Xfer Error: 0x%lx",
++ TARGET_RESP_STATUS(resp));
++ mipi_i3c_hci_resume(hci);
++ }
++ }
+ xfer = rh->src_xfers[done_ptr];
+ if (!xfer) {
+ DBG("orphaned ring entry");
+@@ -528,7 +590,6 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+-
+ done_ptr = (done_ptr + 1) % rh->xfer_entries;
+ rh->done_ptr = done_ptr;
+ }
+@@ -538,6 +599,7 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
++ DBG("Write RING_OPERATION1 = %x", op1_val);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+ }
+@@ -593,8 +655,11 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ unsigned int ptr, enq_ptr, deq_ptr;
+ unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
+ int ibi_addr, last_ptr;
++ bool ibi_rnw;
+ void *ring_ibi_data;
+ dma_addr_t ring_ibi_data_dma;
++ u32 ibi_status, *ring_ibi_status;
++ unsigned int chunks;
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
+@@ -602,6 +667,9 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ op2_val = rh_reg_read(RING_OPERATION2);
+ enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
+
++ DBG("RING_OP1_IBI_DEQ_PTR = %x, RING_OP2_IBI_ENQ_PTR = %x", deq_ptr,
++ enq_ptr);
++
+ ibi_status_error = 0;
+ ibi_addr = -1;
+ ibi_chunks = 0;
+@@ -611,13 +679,27 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ /* let's find all we can about this IBI */
+ for (ptr = deq_ptr; ptr != enq_ptr;
+ ptr = (ptr + 1) % rh->ibi_status_entries) {
+- u32 ibi_status, *ring_ibi_status;
+- unsigned int chunks;
+-
+ ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
+ ibi_status = *ring_ibi_status;
+- DBG("status = %#x", ibi_status);
+-
++ DBG("ptr = %#x status = %#x", ptr, ibi_status);
++
++ if (hci->master.target) {
++ dev = hci->master.this;
++ size_t nbytes = TARGET_RESP_DATA_LENGTH(ibi_status);
++ DBG(a1_debug_s, TARGET_RESP_STATUS(ibi_status),
++ TARGET_RESP_XFER_TYPE(ibi_status),
++ TARGET_RESP_CCC_INDICATE(ibi_status),
++ TARGET_RESP_TID(ibi_status),
++ TARGET_RESP_CCC_HDR(ibi_status),
++ TARGET_RESP_DATA_LENGTH(ibi_status));
++ if (TARGET_RESP_XFER_TYPE(ibi_status)) {
++ chunks = DIV_ROUND_UP(nbytes, rh->ibi_chunk_sz);
++ ibi_chunks += chunks;
++ ibi_size += nbytes;
++ }
++ last_ptr = ptr;
++ break;
++ }
+ if (ibi_status_error) {
+ /* we no longer care */
+ } else if (ibi_status & IBI_ERROR) {
+@@ -628,6 +710,7 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ /* the address changed unexpectedly */
+ ibi_status_error = ibi_status;
+ }
++ ibi_rnw = FIELD_GET(IBI_TARGET_RNW, ibi_status);
+
+ chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
+ ibi_chunks += chunks;
+@@ -649,44 +732,55 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ }
+ deq_ptr = last_ptr + 1;
+ deq_ptr %= rh->ibi_status_entries;
++ if (!hci->master.target) {
++ if (ibi_status_error) {
++ dev_err(&hci->master.dev, "IBI error from %#x\n",
++ ibi_addr);
++ goto done;
++ }
++ if (IBI_TYPE_HJ(ibi_addr, ibi_rnw)) {
++ queue_work(hci->master.wq, &hci->hj_work);
++ goto done;
++ } else if (IBI_TYPE_CR(ibi_addr, ibi_rnw)) {
++ dev_info(&hci->master.dev,
++ "get control role requeset from %02x\n",
++ ibi_addr);
++ goto done;
++ }
+
+- if (ibi_status_error) {
+- dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
+- goto done;
+- }
+-
+- /* determine who this is for */
+- dev = i3c_hci_addr_to_dev(hci, ibi_addr);
+- if (!dev) {
+- dev_err(&hci->master.dev,
+- "IBI for unknown device %#x\n", ibi_addr);
+- goto done;
+- }
++ /* determine who this is for */
++ dev = i3c_hci_addr_to_dev(hci, ibi_addr);
++ if (!dev) {
++ dev_err(&hci->master.dev,
++ "IBI for unknown device %#x\n", ibi_addr);
++ goto done;
++ }
+
+- dev_data = i3c_dev_get_master_data(dev);
+- dev_ibi = dev_data->ibi_data;
+- if (ibi_size > dev_ibi->max_len) {
+- dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+- ibi_size, dev_ibi->max_len);
+- goto done;
+- }
++ dev_data = i3c_dev_get_master_data(dev);
++ dev_ibi = dev_data->ibi_data;
++ if (ibi_size > dev_ibi->max_len) {
++ dev_err(&hci->master.dev,
++ "IBI payload too big (%d > %d)\n", ibi_size,
++ dev_ibi->max_len);
++ goto done;
++ }
+
+- /*
+- * This ring model is not suitable for zero-copy processing of IBIs.
+- * We have the data chunk ring wrap-around to deal with, meaning
+- * that the payload might span multiple chunks beginning at the
+- * end of the ring and wrap to the start of the ring. Furthermore
+- * there is no guarantee that those chunks will be released in order
+- * and in a timely manner by the upper driver. So let's just copy
+- * them to a discrete buffer. In practice they're supposed to be
+- * small anyway.
+- */
+- slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+- if (!slot) {
+- dev_err(&hci->master.dev, "no free slot for IBI\n");
+- goto done;
++ /*
++ * This ring model is not suitable for zero-copy processing of IBIs.
++ * We have the data chunk ring wrap-around to deal with, meaning
++ * that the payload might span multiple chunks beginning at the
++ * end of the ring and wrap to the start of the ring. Furthermore
++ * there is no guarantee that those chunks will be released in order
++ * and in a timely manner by the upper driver. So let's just copy
++ * them to a discrete buffer. In practice they're supposed to be
++ * small anyway.
++ */
++ slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
++ if (!slot) {
++ dev_err(&hci->master.dev, "no free slot for IBI\n");
++ goto done;
++ }
+ }
+-
+ /* copy first part of the payload */
+ ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
+ ring_ibi_data = rh->ibi_data + ibi_data_offset;
+@@ -695,10 +789,17 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ * rh->ibi_chunk_sz;
+ if (first_part > ibi_size)
+ first_part = ibi_size;
++ DBG("ibi_data_offset = %x, first_part = %x", ibi_data_offset,
++ first_part);
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ first_part, DMA_FROM_DEVICE);
+- memcpy(slot->data, ring_ibi_data, first_part);
+-
++ if (hci->master.target) {
++ memcpy(hci->target_rx.buf, ring_ibi_data, first_part);
++ DBG("first_part got: %*ph", (u32)first_part, hci->target_rx.buf);
++ } else {
++ memcpy(slot->data, ring_ibi_data, first_part);
++ DBG("first_part got: %*ph", (u32)first_part, slot->data);
++ }
+ /* copy second part if any */
+ if (ibi_size > first_part) {
+ /* we wrap back to the start and copy remaining data */
+@@ -706,14 +807,33 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ ring_ibi_data_dma = rh->ibi_data_dma;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ ibi_size - first_part, DMA_FROM_DEVICE);
+- memcpy(slot->data + first_part, ring_ibi_data,
+- ibi_size - first_part);
++ if (hci->master.target) {
++ memcpy(hci->target_rx.buf + first_part, ring_ibi_data,
++ ibi_size - first_part);
++ DBG("remain got: %*ph", (u32)ibi_size - first_part,
++ hci->target_rx.buf + first_part);
++ } else {
++ memcpy(slot->data + first_part, ring_ibi_data,
++ ibi_size - first_part);
++ DBG("remain got: %*ph", (u32)ibi_size - first_part,
++ slot->data + first_part);
++ }
++ }
++ if (hci->master.target) {
++ /* Bypass the priv_xfer data to target layer */
++ if (dev->target_info.read_handler &&
++ !TARGET_RESP_CCC_INDICATE(ibi_status))
++ dev->target_info.read_handler(dev->dev,
++ hci->target_rx.buf,
++ ibi_size);
++ if (TARGET_RESP_CCC_INDICATE(ibi_status))
++ aspeed_i3c_ccc_handler(hci, TARGET_RESP_CCC_HDR(ibi_status));
++ } else {
++ /* submit it */
++ slot->dev = dev;
++ slot->len = ibi_size;
++ i3c_master_queue_ibi(dev, slot);
+ }
+-
+- /* submit it */
+- slot->dev = dev;
+- slot->len = ibi_size;
+- i3c_master_queue_ibi(dev, slot);
+
+ done:
+ /* take care to update the ibi dequeue pointer atomically */
+@@ -721,12 +841,15 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_IBI_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
++ DBG("Write RING_OP1_IBI_DEQ_PTR = %x", op1_val);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+
+ /* update the chunk pointer */
+ rh->ibi_chunk_ptr += ibi_chunks;
+ rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
++ DBG("rh->ibi_chunk_ptr = %x ibi_chunk_ptr = %x", ibi_chunks,
++ rh->ibi_chunk_ptr);
+
+ /* and tell the hardware about freed chunks */
+ rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
+@@ -755,14 +878,22 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+
+ if (status & INTR_IBI_READY)
+ hci_dma_process_ibi(hci, rh);
+- if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
++ if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR)) {
+ hci_dma_xfer_done(hci, rh);
++ if (unlikely(status & INTR_TRANSFER_ERR)) {
++ dev_warn(&hci->master.dev,
++ "ring %d: Transfer Error\n", i);
++ mipi_i3c_hci_resume(hci);
++ }
++ }
+ if (status & INTR_RING_OP)
+ complete(&rh->op_done);
+
+- if (status & INTR_TRANSFER_ABORT)
++ if (status & INTR_TRANSFER_ABORT) {
+ dev_notice_ratelimited(&hci->master.dev,
+ "ring %d: Transfer Aborted\n", i);
++ mipi_i3c_hci_resume(hci);
++ }
+ if (status & INTR_WARN_INS_STOP_MODE)
+ dev_warn_ratelimited(&hci->master.dev,
+ "ring %d: Inserted Stop on Mode Change\n", i);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
+index 2e9b23efd..ef9e45fd6 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
++++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
+@@ -16,6 +16,9 @@
+ #include "ext_caps.h"
+ #include "xfer_mode_rate.h"
+
++#ifdef CONFIG_ARCH_ASPEED
++#define ASPEED_PHY_REGS_OFFSET 0xE00
++#endif
+
+ /* Extended Capability Header */
+ #define CAP_HEADER_LENGTH GENMASK(23, 8)
+@@ -207,6 +210,19 @@ static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
+ return 0;
+ }
+
++static int hci_extcap_vendor_ASPEED(struct i3c_hci *hci, void __iomem *base)
++{
++ u32 regs_offset;
++
++ regs_offset = readl(base + 1 * 4);
++ dev_info(&hci->master.dev, "INHOUSE control at offset %#x\n", regs_offset);
++ hci->INHOUSE_regs = hci->base_regs + regs_offset;
++ regs_offset = readl(base + 2 * 4);
++ dev_info(&hci->master.dev, "PHY control at offset %#x\n", regs_offset);
++ hci->PHY_regs = hci->base_regs + regs_offset;
++ return 0;
++}
++
+ struct hci_ext_cap_vendor_specific {
+ u32 vendor;
+ u8 cap;
+@@ -221,6 +237,7 @@ struct hci_ext_cap_vendor_specific {
+
+ static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
+ EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
++ EXT_CAP_VENDOR(ASPEED, 0xc0, 0x3),
+ };
+
+ static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
+@@ -255,6 +272,8 @@ static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
+
+ int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
+ {
++ u32 offset;
++
+ void __iomem *curr_cap = hci->EXTCAPS_regs;
+ void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
+ u32 cap_header, cap_id, cap_length;
+@@ -271,6 +290,29 @@ int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
+ DBG("id=0x%02x length=%d", cap_id, cap_length);
+ if (!cap_length)
+ break;
++ /*
++ * In AST2700A0 the offset of EXTCAPs will be the in-house register, and the
++ * reset value of the first double word is 0x2400. According to the rule of
++ * extcaps parser, the cap_id is 0 and cap_length is 36, which is an invalid
++ * ext_cap header; when cap_id is 0, the cap_length should be 1. Therefore, we use
++ * this to identify the A0/A1.
++ */
++ if (cap_id == 0 && cap_length != 1) {
++ /* AST2700A0 workaround */
++ /* A0 doesn't support DMA mode*/
++ hci->RHS_regs = NULL;
++ dev_info(&hci->master.dev,
++ "Clear Ring Headers offset\n");
++ offset = hci->EXTCAPS_regs - hci->base_regs;
++ hci->INHOUSE_regs = hci->EXTCAPS_regs;
++ dev_info(&hci->master.dev,
++ "INHOUSE control at offset %#x\n", offset);
++ hci->PHY_regs = hci->base_regs + ASPEED_PHY_REGS_OFFSET;
++ dev_info(&hci->master.dev,
++ "PHY control at offset %#x\n",
++ ASPEED_PHY_REGS_OFFSET);
++ return 0;
++ }
+ if (curr_cap + cap_length * 4 >= end) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (too big)\n",
+diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.h b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
+index 9df17822f..016060b97 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
++++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
+@@ -12,6 +12,7 @@
+
+ /* MIPI vendor IDs */
+ #define MIPI_VENDOR_NXP 0x11b
++#define MIPI_VENDOR_ASPEED 0x3f6
+
+
+ int i3c_hci_parse_ext_caps(struct i3c_hci *hci);
+diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
+index f109923f6..524381e81 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
++++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
+@@ -32,12 +32,18 @@ struct hci_cmd_ops;
+ /* Our main structure */
+ struct i3c_hci {
+ struct i3c_master_controller master;
++ struct reset_control *rst, *dma_rst;
++ struct clk *clk;
+ void __iomem *base_regs;
+ void __iomem *DAT_regs;
+ void __iomem *DCT_regs;
+ void __iomem *RHS_regs;
+ void __iomem *PIO_regs;
+ void __iomem *EXTCAPS_regs;
++#ifdef CONFIG_ARCH_ASPEED
++ void __iomem *INHOUSE_regs;
++ void __iomem *PHY_regs;
++#endif
+ void __iomem *AUTOCMD_regs;
+ void __iomem *DEBUG_regs;
+ const struct hci_io_ops *io;
+@@ -58,6 +64,15 @@ struct i3c_hci {
+ u32 vendor_version_id;
+ u32 vendor_product_id;
+ void *vendor_data;
++ struct completion ibi_comp;
++ struct completion pending_r_comp;
++ struct work_struct hj_work;
++
++ /* Used for handling private write */
++ struct {
++ void *buf;
++ u16 max_len;
++ } target_rx;
+ };
+
+
+@@ -115,6 +130,8 @@ struct hci_io_ops {
+ int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
++ int (*request_hj)(struct i3c_hci *hci);
++ void (*free_hj)(struct i3c_hci *hci);
+ void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+ int (*init)(struct i3c_hci *hci);
+@@ -140,5 +157,6 @@ struct i3c_hci_dev_data {
+ void mipi_i3c_hci_resume(struct i3c_hci *hci);
+ void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
+ void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
++void mipi_i3c_hci_hj_ctrl(struct i3c_hci *hci, bool ack_nack);
+
+ #endif
+diff --git a/drivers/i3c/master/mipi-i3c-hci/ibi.h b/drivers/i3c/master/mipi-i3c-hci/ibi.h
+index e1f98e264..287878eed 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/ibi.h
++++ b/drivers/i3c/master/mipi-i3c-hci/ibi.h
+@@ -25,6 +25,10 @@
+ #define IBI_TARGET_RNW BIT(8)
+ #define IBI_DATA_LENGTH GENMASK(7, 0)
+
++#define IBI_TYPE_HJ(a, rnw) (((a) == I3C_HOT_JOIN_ADDR) && !(rnw))
++
++#define IBI_TYPE_CR(a, rnw) (((a) != I3C_HOT_JOIN_ADDR) && !(rnw))
++
+ /* handy helpers */
+ static inline struct i3c_dev_desc *
+ i3c_hci_addr_to_dev(struct i3c_hci *hci, unsigned int addr)
+diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
+index d0272aa93..707412d81 100644
+--- a/drivers/i3c/master/mipi-i3c-hci/pio.c
++++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
+@@ -14,7 +14,7 @@
+ #include "hci.h"
+ #include "cmd.h"
+ #include "ibi.h"
+-
++#include "vendor_aspeed.h"
+
+ /*
+ * PIO Access Area
+@@ -157,6 +157,21 @@ static int hci_pio_init(struct i3c_hci *hci)
+ 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
+ dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
++ if (hci->master.target) {
++ void *rx_buf;
++ /*
++ * Set max private write length value based on read-only register.
++ * TODO: Handle updates after receiving SETMWL CCC.
++ */
++ hci->target_rx.max_len =
++ 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val));
++
++ rx_buf = kzalloc(hci->target_rx.max_len, GFP_KERNEL);
++ if (!rx_buf)
++ return -ENOMEM;
++
++ hci->target_rx.buf = rx_buf;
++ }
+
+ /*
+ * Let's initialize data thresholds to half of the actual FIFO size.
+@@ -189,10 +204,19 @@ static int hci_pio_init(struct i3c_hci *hci)
+ */
+ ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
+ pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
++ /*
++ * FIXME: The logical of EMPTY_BUF_THLD has someting wrong.
++ * In target mode, it should set to 0 to ensure the STAT_CMD_QUEUE_READY
++ * will keep 1.
++ */
+ val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
+ FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
+- FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
+- FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
++ FIELD_PREP(QUEUE_RESP_BUF_THLD, 1);
++ if (!aspeed_get_i3c_revision_id(hci))
++ val |= FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD,
++ hci->master.target ? 0 : 1);
++ else
++ val |= FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
+ pio_reg_write(QUEUE_THLD_CTRL, val);
+ pio->reg_queue_thresh = val;
+
+@@ -202,6 +226,16 @@ static int hci_pio_init(struct i3c_hci *hci)
+
+ /* Always accept error interrupts (will be activated on first xfer) */
+ pio->enabled_irqs = STAT_ALL_ERRORS;
++ if (hci->master.target) {
++ /*
++ * Enable response queue ready to handle the ccc update interrupt
++ * to avoid response queue full.
++ */
++ pio_reg_write(INTR_SIGNAL_ENABLE, STAT_RESP_READY);
++ pio->enabled_irqs |= STAT_RESP_READY;
++ } else {
++ mipi_i3c_hci_hj_ctrl(hci, false);
++ }
+
+ return 0;
+ }
+@@ -227,9 +261,11 @@ static void hci_pio_cleanup(struct i3c_hci *hci)
+ static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
+ {
+ DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
+- DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
+- pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
++ if (!hci->master.target) {
++ DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
++ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
++ }
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
+ DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+@@ -263,6 +299,61 @@ static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+ return !xfer->data_left;
+ }
+
++static void ast2700_target_read_rx_fifo(struct i3c_hci *hci, unsigned int count)
++{
++ u32 *p = hci->target_rx.buf;
++
++ if (count >= 4) {
++ unsigned int nr_words = count / 4;
++
++ while (nr_words--)
++ *p++ = pio_reg_read(XFER_DATA_PORT);
++ }
++ count &= 3;
++ if (count) {
++ u8 *p_byte = (u8 *)p;
++ u32 data = pio_reg_read(XFER_DATA_PORT);
++
++ data = (__force u32)cpu_to_le32(data);
++ while (count--) {
++ *p_byte++ = data;
++ data >>= 8;
++ }
++ }
++}
++
++static void aspeed_dummy_data_work_around(struct i3c_hci *hci,
++ struct hci_pio_data *pio)
++{
++ u32 remain, data;
++ struct hci_xfer *xfer = pio->curr_rx;
++ u32 *p;
++ u8 *p_byte;
++
++ remain = aspeed_get_received_rx_entries(hci);
++ if (remain > 1) {
++ dev_err(&hci->master.dev,
++ "unexpected behavior remain %d response = %x",
++ remain, xfer->response);
++ return;
++ }
++ remain *= 4;
++ if (remain) {
++ dev_warn_once(&hci->master.dev,
++ "encounter the dummy data issue");
++ p = xfer->data;
++ memcpy(p, p + 1, RESP_DATA_LENGTH(xfer->response) - 4);
++ p += ((RESP_DATA_LENGTH(xfer->response) >> 2) - 1);
++ data = pio_reg_read(XFER_DATA_PORT);
++ data = (__force u32)cpu_to_le32(data);
++ p_byte = (u8 *)p;
++ while (remain--) {
++ *p_byte++ = data;
++ data >>= 8;
++ }
++ }
++}
++
+ static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
+ struct hci_pio_data *pio, unsigned int count)
+ {
+@@ -314,11 +405,21 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
++#ifdef CONFIG_ARCH_ASPEED
++ unsigned int avail_tx = aspeed_get_avail_tx_entries(hci);
++
++ /* bail out if FIFO free space is below set threshold */
++ if (unlikely(!avail_tx))
++ return false;
++ /* we can fill up to that TX threshold */
++ nr_words = min(xfer->data_left / 4, avail_tx);
++#else
+ /* bail out if FIFO free space is below set threshold */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ /* we can fill up to that TX threshold */
+ nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
++#endif
+ /* push data into the FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+@@ -334,8 +435,13 @@ static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+ * also get some bytes past the actual buffer but no one
+ * should care as they won't be sent out.
+ */
++#ifdef CONFIG_ARCH_ASPEED
++ if (unlikely(!aspeed_get_avail_tx_entries(hci)))
++ return false;
++#else
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
++#endif
+ DBG("trailing %d", xfer->data_left);
+ pio_reg_write(XFER_DATA_PORT, *p);
+ xfer->data_left = 0;
+@@ -475,8 +581,85 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+
+ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+ {
++ if (hci->master.target) {
++ struct i3c_dev_desc *desc = hci->master.this;
++ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
++ size_t nbytes = TARGET_RESP_DATA_LENGTH(resp);
++
++ if (!aspeed_get_i3c_revision_id(hci)) {
++ DBG(a0_debug_s, TARGET_RESP_STATUS(resp),
++ TARGET_RESP_XFER_TYPE(resp),
++ TARGET_RESP_TID_A0(resp), TARGET_RESP_CCC_HDR(resp),
++ TARGET_RESP_DATA_LENGTH(resp));
++ if (TARGET_RESP_XFER_TYPE(resp)) {
++ ast2700_target_read_rx_fifo(hci, nbytes);
++ DBG("got: %*ph", (u32)nbytes,
++ hci->target_rx.buf);
++ if (!TARGET_RESP_CCC_HDR(resp)) {
++ /* Bypass the priv_xfer data to target layer */
++ if (desc->target_info.read_handler)
++ desc->target_info.read_handler(desc->dev,
++ hci->target_rx.buf,
++ nbytes);
++ } else {
++ aspeed_i3c_ccc_handler(hci, TARGET_RESP_CCC_HDR(resp));
++ }
++ } else {
++ /* ibi or master read or HDR read */
++ if (!TARGET_RESP_STATUS(resp) &&
++ (!TARGET_RESP_CCC_HDR(resp) ||
++ TARGET_RESP_CCC_HDR(resp) & 0x80)) {
++ if (TARGET_RESP_TID_A0(resp) == TID_TARGET_IBI)
++ complete(&hci->ibi_comp);
++ else if (TARGET_RESP_TID_A0(resp) == TID_TARGET_RD_DATA)
++ complete(&hci->pending_r_comp);
++ }
++ }
++ } else {
++ DBG(a1_debug_s,
++ TARGET_RESP_STATUS(resp),
++ TARGET_RESP_XFER_TYPE(resp),
++ TARGET_RESP_CCC_INDICATE(resp),
++ TARGET_RESP_TID(resp), TARGET_RESP_CCC_HDR(resp),
++ TARGET_RESP_DATA_LENGTH(resp));
++
++ if (TARGET_RESP_CCC_INDICATE(resp)) {
++ if (TARGET_RESP_XFER_TYPE(resp)) {
++ ast2700_target_read_rx_fifo(hci,
++ nbytes);
++ DBG("got: %*ph", (u32)nbytes,
++ hci->target_rx.buf);
++ aspeed_i3c_ccc_handler(hci, TARGET_RESP_CCC_HDR(resp));
++ }
++ } else if (TARGET_RESP_XFER_TYPE(resp)) {
++ ast2700_target_read_rx_fifo(hci, nbytes);
++ DBG("got: %*ph", (u32)nbytes,
++ hci->target_rx.buf);
++ /* Bypass the priv_xfer data to target layer */
++ if (desc->target_info.read_handler)
++ desc->target_info.read_handler(desc->dev,
++ hci->target_rx.buf,
++ nbytes);
++ } else {
++ /* TODO: Pass the HDR command to user space */
++ if (TARGET_RESP_TID(resp) == TID_TARGET_IBI)
++ complete(&hci->ibi_comp);
++ else if (TARGET_RESP_TID(resp) ==
++ TID_TARGET_RD_DATA)
++ complete(&hci->pending_r_comp);
++ }
++ }
++ if (TARGET_RESP_STATUS(resp) >= TARGET_RESP_ERR_CRC &&
++ TARGET_RESP_STATUS(resp) <= TARGET_RESP_ERR_I2C_READ_TOO_MUCH) {
++ dev_err(&hci->master.dev, "Target Xfer Error: 0x%lx",
++ TARGET_RESP_STATUS(resp));
++ hci_pio_err(hci, pio, 0);
++ }
++ /* Keep the response interrupt enable*/
++ return false;
++ }
+ while (pio->curr_resp &&
+- (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
++ (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
+ struct hci_xfer *xfer = pio->curr_resp;
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+ unsigned int tid = RESP_TID(resp);
+@@ -503,14 +686,14 @@ static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+ received = xfer->data_len - xfer->data_left;
+ expected = RESP_DATA_LENGTH(xfer->response);
+ if (expected > received) {
+- hci_pio_do_trailing_rx(hci, pio,
+- expected - received);
++ hci_pio_do_trailing_rx(hci, pio, expected - received);
+ } else if (received > expected) {
+ /* we consumed data meant for next xfer */
+ to_keep = DIV_ROUND_UP(expected, 4);
+ hci_pio_push_to_next_rx(hci, xfer, to_keep);
+ }
+-
++ /* Workaround for A0 dummy data issue */
++ aspeed_dummy_data_work_around(hci, pio);
+ /* then process the RX list pointer */
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+@@ -544,7 +727,8 @@ static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+- if (!(xfer->cmd_desc[0] & CMD_0_ROC))
++ if (!(xfer->cmd_desc[0] & CMD_0_ROC) &&
++ ((xfer->cmd_desc[0] & CMD_0_ATTR) != CMD_0_ATTR_M))
+ return;
+
+ prev_queue_tail = pio->resp_queue;
+@@ -711,6 +895,20 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+
+ /* dump states on programming errors */
+ if (status & STAT_PROG_ERRORS) {
++#ifdef CONFIG_ARCH_ASPEED
++ u32 queue = ast_inhouse_read(ASPEED_I3C_QUEUE_PTR0);
++ u32 data = ast_inhouse_read(ASPEED_I3C_QUEUE_PTR1);
++
++ dev_err(&hci->master.dev,
++ "prog error %#lx (C/R/I = %ld:%ld/%ld:%ld/%ld:%ld, TX/RX/IBI = %ld:%ld/%ld:%ld/%ld:%ld)\n",
++ status & STAT_PROG_ERRORS, QUEUE_PTR0_CMD_W(queue),
++ QUEUE_PTR0_CMD_R(queue), QUEUE_PTR0_RESP_W(queue),
++ QUEUE_PTR0_RESP_R(queue), QUEUE_PTR0_IBI_W(queue),
++ QUEUE_PTR0_IBI_R(queue), QUEUE_PTR0_TX_W(queue),
++ QUEUE_PTR0_TX_R(queue), QUEUE_PTR1_RX_W(data),
++ QUEUE_PTR1_RX_R(data), QUEUE_PTR1_IBI_DATA_W(data),
++ QUEUE_PTR1_IBI_DATA_R(data));
++#else
+ u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
+ u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
+
+@@ -722,6 +920,7 @@ static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ FIELD_GET(CUR_IBI_Q_LEVEL, queue),
+ FIELD_GET(CUR_TX_BUF_LVL, data),
+ FIELD_GET(CUR_RX_BUF_LVL, data));
++#endif
+ }
+
+ /* just bust out everything with pending responses for now */
+@@ -789,15 +988,13 @@ static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
+ u8 *p_byte = (u8 *)p;
+
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+- if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+- return false;
+ DBG("trailing %d", ibi->seg_cnt);
+ data = pio_reg_read(IBI_PORT);
+ data = (__force u32) cpu_to_le32(data);
+- while (ibi->seg_cnt--) {
++ do {
+ *p_byte++ = data;
+ data >>= 8;
+- }
++ } while (--ibi->seg_cnt);
+ }
+
+ return true;
+@@ -810,6 +1007,8 @@ static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+ u32 ibi_status;
++ unsigned int ibi_addr;
++ bool ibi_rnw;
+
+ /*
+ * We have a new IBI. Try to set up its payload retrieval.
+@@ -821,10 +1020,22 @@ static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+
+ ibi_status = pio_reg_read(IBI_PORT);
+ DBG("status = %#x", ibi_status);
+- ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+- if (ibi_status & IBI_ERROR) {
+- dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
++ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
++ ibi_rnw = FIELD_GET(IBI_TARGET_RNW, ibi_status);
++ if (IBI_TYPE_HJ(ibi_addr, ibi_rnw)) {
++ queue_work(hci->master.wq, &hci->hj_work);
++ return false;
++ } else if (IBI_TYPE_CR(ibi_addr, ibi_rnw)) {
++ dev_info(&hci->master.dev,
++ "get control role requeset from %02lx\n",
++ FIELD_GET(IBI_TARGET_ADDR, ibi_status));
+ return false;
++ } else {
++ ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
++ if (ibi_status & IBI_ERROR) {
++ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
++ return false;
++ }
+ }
+
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+@@ -874,7 +1085,7 @@ static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+ {
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+
+- if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
++ if (!ibi->slot && !ibi->seg_cnt)
+ if (!hci_pio_prep_new_ibi(hci, pio))
+ return false;
+
+@@ -944,6 +1155,7 @@ static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_pio_dev_ibi_data *dev_ibi;
++ struct hci_pio_data *pio = hci->io_data;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+@@ -956,9 +1168,24 @@ static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
++ pio->enabled_irqs |= STAT_IBI_STATUS_THLD;
++ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ return 0;
+ }
+
++static int hci_pio_request_hj(struct i3c_hci *hci)
++{
++ struct hci_pio_data *pio = hci->io_data;
++
++ pio->enabled_irqs |= STAT_IBI_STATUS_THLD;
++ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
++ return 0;
++}
++
++static void hci_pio_free_hj(struct i3c_hci *hci)
++{
++}
++
+ static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+ {
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+@@ -1003,7 +1230,7 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
+ if (hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs &= ~STAT_TX_THLD;
+ if (status & STAT_RESP_READY)
+- if (hci_pio_process_resp(hci, pio))
++ if (hci_pio_process_resp(hci, pio) && !hci->master.target)
+ pio->enabled_irqs &= ~STAT_RESP_READY;
+
+ if (unlikely(status & STAT_LATENCY_WARNINGS)) {
+@@ -1025,6 +1252,10 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("(out) status: %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
++#ifdef CONFIG_ARCH_ASPEED
++ /* FIXME: W1 to trigger the INTC to check for interrupts again.*/
++ ast_inhouse_write(ASPEED_I3C_INTR_RENEW, 1);
++#endif
+ spin_unlock(&pio->lock);
+ return true;
+ }
+@@ -1038,4 +1269,6 @@ const struct hci_io_ops mipi_i3c_hci_pio = {
+ .request_ibi = hci_pio_request_ibi,
+ .free_ibi = hci_pio_free_ibi,
+ .recycle_ibi_slot = hci_pio_recycle_ibi_slot,
++ .request_hj = hci_pio_request_hj,
++ .free_hj = hci_pio_free_hj,
+ };
+diff --git a/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h b/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h
+new file mode 100644
+index 000000000..7577bfc24
+--- /dev/null
++++ b/drivers/i3c/master/mipi-i3c-hci/vendor_aspeed.h
+@@ -0,0 +1,408 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++#ifndef VENDOR_ASPEED_H
++#define VENDOR_ASPEED_H
++
++/* Aspeed in-house register */
++#include "linux/bitfield.h"
++#define ast_inhouse_read(r) readl(hci->INHOUSE_regs + (r))
++#define ast_inhouse_write(r, v) writel(v, hci->INHOUSE_regs + (r))
++
++#define ASPEED_I3C_CTRL 0x0
++#define ASPEED_I3C_CTRL_STOP_QUEUE_PT BIT(31) //Stop the queue read pointer.
++#define ASPEED_I3C_CTRL_INIT BIT(4)
++#define ASPEED_I3C_CTRL_INIT_MODE GENMASK(1, 0)
++#define INIT_MST_MODE 0
++#define INIT_SEC_MST_MODE 1
++#define INIT_SLV_MODE 2
++
++#define ASPEED_I3C_STS 0x4
++#define ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS_VALID BIT(23)
++#define ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS GENMASK(22, 16)
++#define ASPEED_I3C_STS_MODE_PURE_SLV BIT(8)
++#define ASPEED_I3C_STS_MODE_SECONDARY_SLV_TO_MST BIT(7)
++#define ASPEED_I3C_STS_MODE_SECONDARY_MST_TO_SLV BIT(6)
++#define ASPEED_I3C_STS_MODE_SECONDARY_SLV BIT(5)
++#define ASPEED_I3C_STS_MODE_SECONDARY_MST BIT(4)
++#define ASPEED_I3C_STS_MODE_PRIMARY_SLV_TO_MST BIT(3)
++#define ASPEED_I3C_STS_MODE_PRIMARY_MST_TO_SLV BIT(2)
++#define ASPEED_I3C_STS_MODE_PRIMARY_SLV BIT(1)
++#define ASPEED_I3C_STS_MODE_PRIMARY_MST BIT(0)
++
++#define ASPEED_I3C_DAA_INDEX0 0x10
++#define ASPEED_I3C_DAA_INDEX1 0x14
++#define ASPEED_I3C_DAA_INDEX2 0x18
++#define ASPEED_I3C_DAA_INDEX3 0x1C
++
++#define ASPEED_I3C_AUTOCMD_0 0x20
++#define ASPEED_I3C_AUTOCMD_1 0x24
++#define ASPEED_I3C_AUTOCMD_2 0x28
++#define ASPEED_I3C_AUTOCMD_3 0x2C
++#define ASPEED_I3C_AUTOCMD_4 0x30
++#define ASPEED_I3C_AUTOCMD_5 0x34
++#define ASPEED_I3C_AUTOCMD_6 0x38
++#define ASPEED_I3C_AUTOCMD_7 0x3C
++
++#define ASPEED_I3C_AUTOCMD_SEL_0_7 0x40
++#define ASPEED_I3C_AUTOCMD_SEL_8_15 0x44
++#define ASPEED_I3C_AUTOCMD_SEL_16_23 0x48
++#define ASPEED_I3C_AUTOCMD_SEL_24_31 0x4C
++#define ASPEED_I3C_AUTOCMD_SEL_32_39 0x50
++#define ASPEED_I3C_AUTOCMD_SEL_40_47 0x54
++#define ASPEED_I3C_AUTOCMD_SEL_48_55 0x58
++#define ASPEED_I3C_AUTOCMD_SEL_56_63 0x5C
++#define ASPEED_I3C_AUTOCMD_SEL_64_71 0x60
++#define ASPEED_I3C_AUTOCMD_SEL_72_79 0x64
++#define ASPEED_I3C_AUTOCMD_SEL_80_87 0x68
++#define ASPEED_I3C_AUTOCMD_SEL_88_95 0x6C
++#define ASPEED_I3C_AUTOCMD_SEL_96_103 0x70
++#define ASPEED_I3C_AUTOCMD_SEL_104_111 0x74
++#define ASPEED_I3C_AUTOCMD_SEL_112_119 0x78
++#define ASPEED_I3C_AUTOCMD_SEL_120_127 0x7C
++
++#define ASPEED_I3C_SLV_CHAR_CTRL 0xA0
++#define ASPEED_I3C_SLV_CHAR_CTRL_DCR GENMASK(23, 16)
++#define ASPEED_I3C_SLV_CHAR_CTRL_BCR GENMASK(15, 8)
++#define SLV_BCR_DEVICE_ROLE GENMASK(7, 6)
++#define ASPEED_I3C_SLV_CHAR_CTRL_STATIC_ADDR_EN BIT(7)
++#define ASPEED_I3C_SLV_CHAR_CTRL_STATIC_ADDR GENMASK(6, 0)
++#define SLV_PID_HI(x) (((x) >> 32) & GENMASK(15, 0))
++#define SLV_PID_LO(x) ((x) & GENMASK(31, 0))
++#define ASPEED_I3C_SLV_PID_LO 0xA4
++#define ASPEED_I3C_SLV_PID_HI 0xA8
++#define ASPEED_I3C_SLV_FSM 0xAC
++#define ASPEED_I3C_SLV_CAP_CTRL 0xB0
++#define ASPEED_I3C_SLV_CAP_CTRL_PEC_EN BIT(31)
++#define ASPEED_I3C_SLV_CAP_CTRL_HAIT_IF_IBI_ERR BIT(30)
++#define ASPEED_I3C_SLV_CAP_CTRL_ACCEPT_CR BIT(16)
++#define ASPEED_I3C_SLV_CAP_CTRL_HJ_REQ BIT(10)
++#define ASPEED_I3C_SLV_CAP_CTRL_MR_REQ BIT(9)
++#define ASPEED_I3C_SLV_CAP_CTRL_IBI_REQ BIT(8)
++#define ASPEED_I3C_SLV_CAP_CTRL_HJ_WAIT BIT(6)
++#define ASPEED_I3C_SLV_CAP_CTRL_MR_WAIT BIT(5)
++#define ASPEED_I3C_SLV_CAP_CTRL_IBI_WAIT BIT(4)
++#define ASPEED_I3C_SLV_CAP_CTRL_NOTSUP_DEF_BYTE BIT(1)
++#define ASPEED_I3C_SLV_CAP_CTRL_I2C_DEV BIT(0)
++/* CCC related registers */
++#define ASPEED_I3C_SLV_STS1 0xB4
++#define ASPEED_I3C_SLV_STS1_IBI_PAYLOAD_SIZE GENMASK(31, 24)
++#define ASPEED_I3C_SLV_STS1_RSTACT GENMASK(22, 16)
++/* the parameters for the HDR-DDR Data Transfer Early Termination procedure*/
++#define ASPEED_I3C_SLV_STS1_ETP_ACK_CAP BIT(15)
++#define ASPEED_I3C_SLV_STS1_ETP_W_REQ BIT(14)
++#define ASPEED_I3C_SLV_STS1_ETP_CRC GENMASK(13, 12)
++#define ASPEED_I3C_SLV_STS1_ENDXFER_CONFIRM BIT(11)
++#define ASPEED_I3C_SLV_STS1_ENTER_TEST_MDOE BIT(8)
++#define ASPEED_I3C_SLV_STS1_HJ_EN BIT(6)
++#define ASPEED_I3C_SLV_STS1_CR_EN BIT(5)
++#define ASPEED_I3C_SLV_STS1_IBI_EN BIT(4)
++#define ASPEED_I3C_SLV_STS1_HJ_DONE BIT(2)
++#define ASPEED_I3C_SLV_STS1_CR_DONE BIT(1)
++#define ASPEED_I3C_SLV_STS1_IBI_DONE BIT(0)
++#define ASPEED_I3C_SLV_STS2 0xB8
++#define ASPEED_I3C_SLV_STS2_MWL GENMASK(31, 16)
++#define ASPEED_I3C_SLV_STS2_MRL GENMASK(15, 0)
++#define ASPEED_I3C_SLV_STS3_GROUP_ADDR 0xBC
++#define ASPEED_I3C_SLV_STS3_GROUP3_VALID BIT(31)
++#define ASPEED_I3C_SLV_STS3_GROUP3_ADDR GENMASK(30, 24)
++#define ASPEED_I3C_SLV_STS3_GROUP2_VALID BIT(23)
++#define ASPEED_I3C_SLV_STS3_GROUP2_ADDR GENMASK(22, 16)
++#define ASPEED_I3C_SLV_STS3_GROUP1_VALID BIT(15)
++#define ASPEED_I3C_SLV_STS3_GROUP1_ADDR GENMASK(14, 8)
++#define ASPEED_I3C_SLV_STS3_GROUP0_VALID BIT(7)
++#define ASPEED_I3C_SLV_STS3_GROUP0_ADDR GENMASK(6, 0)
++#define ASPEED_I3C_SLV_STS4_RSTACT_TIME 0xC0
++#define ASPEED_I3C_SLV_STS4_DBG_NET GENMASK(23, 16)
++#define ASPEED_I3C_SLV_STS4_WHOLE_CHIP GENMASK(15, 8)
++#define ASPEED_I3C_SLV_STS4_I3C GENMASK(7, 0)
++#define ASPEED_I3C_SLV_STS5_GETMXDS_RW 0xC4
++#define ASPEED_I3C_SLV_STS5_MAXWR GENMASK(15, 8)
++#define ASPEED_I3C_SLV_STS5_MAXRD GENMASK(7, 0)
++#define ASPEED_I3C_SLV_STS6_GETMXDS 0xC8
++#define ASPEED_I3C_SLV_STS6_FORMAT BIT(24)
++#define ASPEED_I3C_SLV_STS6_MAXRD_TURN_H GENMASK(23, 16)
++#define ASPEED_I3C_SLV_STS6_MAXRD_TURN_M GENMASK(15, 8)
++#define ASPEED_I3C_SLV_STS6_MAXRD_TURN_L GENMASK(7, 0)
++#define ASPEED_I3C_SLV_STS7_GETSTATUS 0xCC
++#define ASPEED_I3C_SLV_STS7_PRECR GENMASK(31, 16)
++#define ASPEED_I3C_SLV_STS7_TGT GENMASK(15, 0)
++#define ASPEED_I3C_SLV_STS8_GETCAPS_TGT 0xD0
++#define ASPEED_I3C_SLV_STS9_GETCAPS_VT_CR 0xD4
++#define ASPEED_I3C_SLV_STS7_VT GENMASK(31, 16)
++#define ASPEED_I3C_SLV_STS7_CR GENMASK(15, 0)
++
++#define ASPEED_I3C_QUEUE_PTR0 0xD8
++#define QUEUE_PTR0_TX_R(q) FIELD_GET(GENMASK(24, 20), q)
++#define QUEUE_PTR0_TX_W(q) FIELD_GET(GENMASK(16, 12), q)
++#define QUEUE_PTR0_IBI_R(q) FIELD_GET(GENMASK(11, 10), q)
++#define QUEUE_PTR0_IBI_W(q) FIELD_GET(GENMASK(9, 8), q)
++#define QUEUE_PTR0_RESP_R(q) FIELD_GET(GENMASK(7, 6), q)
++#define QUEUE_PTR0_RESP_W(q) FIELD_GET(GENMASK(5, 4), q)
++#define QUEUE_PTR0_CMD_R(q) FIELD_GET(GENMASK(3, 2), q)
++#define QUEUE_PTR0_CMD_W(q) FIELD_GET(GENMASK(1, 0), q)
++
++#define ASPEED_I3C_QUEUE_PTR1 0xDC
++#define QUEUE_PTR1_IBI_DATA_R(q) FIELD_GET(GENMASK(28, 24), q)
++#define QUEUE_PTR1_IBI_DATA_W(q) FIELD_GET(GENMASK(20, 16), q)
++#define QUEUE_PTR1_RX_R(q) FIELD_GET(GENMASK(12, 8), q)
++#define QUEUE_PTR1_RX_W(q) FIELD_GET(GENMASK(4, 0), q)
++
++#define ASPEED_I3C_INTR_STATUS 0xE0
++#define ASPEED_I3C_INTR_STATUS_ENABLE 0xE4
++#define ASPEED_I3C_INTR_SIGNAL_ENABLE 0xE8
++#define ASPEED_I3C_INTR_FORCE 0xEC
++#define ASPEED_I3C_INTR_I2C_SDA_STUCK_LOW BIT(14)
++#define ASPEED_I3C_INTR_I3C_SDA_STUCK_HIGH BIT(13)
++#define ASPEED_I3C_INTR_I3C_SDA_STUCK_LOW BIT(12)
++#define ASPEED_I3C_INTR_MST_INTERNAL_DONE BIT(10)
++#define ASPEED_I3C_INTR_MST_DDR_READ_DONE BIT(9)
++#define ASPEED_I3C_INTR_MST_DDR_WRITE_DONE BIT(8)
++#define ASPEED_I3C_INTR_MST_IBI_DONE BIT(7)
++#define ASPEED_I3C_INTR_MST_READ_DONE BIT(6)
++#define ASPEED_I3C_INTR_MST_WRITE_DONE BIT(5)
++#define ASPEED_I3C_INTR_MST_DAA_DONE BIT(4)
++#define ASPEED_I3C_INTR_SLV_SCL_STUCK BIT(1)
++#define ASPEED_I3C_INTR_TGRST BIT(0)
++
++#define ASPEED_I3C_INTR_SUM_STATUS 0xF0
++#define ASPEED_INTR_SUM_INHOUSE BIT(3)
++#define ASPEED_INTR_SUM_RHS BIT(2)
++#define ASPEED_INTR_SUM_PIO BIT(1)
++#define ASPEED_INTR_SUM_CAP BIT(0)
++
++#define ASPEED_I3C_INTR_RENEW 0xF4
++
++/* Aspeed Phy register */
++#define ast_phy_read(r) readl(hci->PHY_regs + (r))
++#define ast_phy_write(r, v) writel(v, hci->PHY_regs + (r))
++
++#define PHY_SW_FORCE_CTRL 0x4
++#define PHY_SW_FORCE_CTRL_SCL_IN_EN BIT(31)
++#define PHY_SW_FORCE_CTRL_SCL_OUT_EN BIT(30)
++#define PHY_SW_FORCE_CTRL_SCL_OE_EN BIT(29)
++#define PHY_SW_FORCE_CTRL_SCL_PU_EN BIT(28)
++#define PHY_SW_FORCE_CTRL_SDA_IN_EN BIT(27)
++#define PHY_SW_FORCE_CTRL_SDA_OUT_EN BIT(26)
++#define PHY_SW_FORCE_CTRL_SDA_OE_EN BIT(25)
++#define PHY_SW_FORCE_CTRL_SDA_PU_EN BIT(24)
++#define PHY_SW_FORCE_CTRL_SCL_IN_VAL BIT(13)
++#define PHY_SW_FORCE_CTRL_SCL_OUT_VAL BIT(12)
++#define PHY_SW_FORCE_CTRL_SCL_OE_VAL BIT(11)
++#define PHY_SW_FORCE_CTRL_SCL_PU_VAL GENMASK(10, 8)
++#define PHY_SW_FORCE_CTRL_SDA_IN_VAL BIT(5)
++#define PHY_SW_FORCE_CTRL_SDA_OUT_VAL BIT(4)
++#define PHY_SW_FORCE_CTRL_SDA_OE_VAL BIT(3)
++#define PHY_SW_FORCE_CTRL_SDA_PU_VAL GENMASK(2, 0)
++
++/* I2C FM: 400K */
++#define PHY_I2C_FM_CTRL0 0x8
++#define PHY_I2C_FM_CTRL0_CAS GENMASK(25, 16)
++#define PHY_I2C_FM_CTRL0_SU_STO GENMASK(9, 0)
++#define PHY_I2C_FM_CTRL1 0xC
++#define PHY_I2C_FM_CTRL1_SCL_H GENMASK(25, 16)
++#define PHY_I2C_FM_CTRL1_SCL_L GENMASK(9, 0)
++#define PHY_I2C_FM_CTRL2 0x10
++#define PHY_I2C_FM_CTRL2_ACK_H GENMASK(25, 16)
++#define PHY_I2C_FM_CTRL2_ACK_L GENMASK(9, 0)
++#define PHY_I2C_FM_CTRL3 0x14
++#define PHY_I2C_FM_CTRL3_HD_DAT GENMASK(25, 16)
++#define PHY_I2C_FM_CTRL3_AHD_DAT GENMASK(9, 0)
++
++#define PHY_I2C_FM_DEFAULT_CAS_NS 1130
++#define PHY_I2C_FM_DEFAULT_SU_STO_NS 1370
++#define PHY_I2C_FM_DEFAULT_SCL_H_NS 1130
++#define PHY_I2C_FM_DEFAULT_SCL_L_NS 1370
++#define PHY_I2C_FM_DEFAULT_HD_DAT 10
++#define PHY_I2C_FM_DEFAULT_AHD_DAT 10
++
++/* I2C FMP: 1M */
++#define PHY_I2C_FMP_CTRL0 0x18
++#define PHY_I2C_FMP_CTRL0_CAS GENMASK(25, 16)
++#define PHY_I2C_FMP_CTRL0_SU_STO GENMASK(9, 0)
++#define PHY_I2C_FMP_CTRL1 0x1C
++#define PHY_I2C_FMP_CTRL1_SCL_H GENMASK(25, 16)
++#define PHY_I2C_FMP_CTRL1_SCL_L GENMASK(9, 0)
++#define PHY_I2C_FMP_CTRL2 0x20
++#define PHY_I2C_FMP_CTRL2_ACK_H GENMASK(25, 16)
++#define PHY_I2C_FMP_CTRL2_ACK_L GENMASK(9, 0)
++#define PHY_I2C_FMP_CTRL3 0x24
++#define PHY_I2C_FMP_CTRL3_HD_DAT GENMASK(25, 16)
++#define PHY_I2C_FMP_CTRL3_AHD_DAT GENMASK(9, 0)
++
++#define PHY_I2C_FMP_DEFAULT_CAS_NS 380
++#define PHY_I2C_FMP_DEFAULT_SU_STO_NS 620
++#define PHY_I2C_FMP_DEFAULT_SCL_H_NS 380
++#define PHY_I2C_FMP_DEFAULT_SCL_L_NS 620
++#define PHY_I2C_FMP_DEFAULT_HD_DAT 10
++#define PHY_I2C_FMP_DEFAULT_AHD_DAT 10
++
++/* I3C OD */
++#define PHY_I3C_OD_CTRL0 0x28
++#define PHY_I3C_OD_CTRL0_CAS GENMASK(25, 16)
++#define PHY_I3C_OD_CTRL0_CBP GENMASK(9, 0)
++#define PHY_I3C_OD_CTRL1 0x2C
++#define PHY_I3C_OD_CTRL1_SCL_H GENMASK(25, 16)
++#define PHY_I3C_OD_CTRL1_SCL_L GENMASK(9, 0)
++#define PHY_I3C_OD_CTRL2 0x30
++#define PHY_I3C_OD_CTRL2_ACK_H GENMASK(25, 16)
++#define PHY_I3C_OD_CTRL2_ACK_L GENMASK(9, 0)
++#define PHY_I3C_OD_CTRL3 0x34
++#define PHY_I3C_OD_CTRL3_HD_DAT GENMASK(25, 16)
++#define PHY_I3C_OD_CTRL3_AHD_DAT GENMASK(9, 0)
++
++#define PHY_I3C_OD_DEFAULT_CAS_NS 40
++#define PHY_I3C_OD_DEFAULT_CBP_NS 40
++#define PHY_I3C_OD_DEFAULT_SCL_H_NS 380
++#define PHY_I3C_OD_DEFAULT_SCL_L_NS 620
++#define PHY_I3C_OD_DEFAULT_HD_DAT 10
++#define PHY_I3C_OD_DEFAULT_AHD_DAT 10
++
++/* I3C PP SDR0 */
++#define PHY_I3C_SDR0_CTRL0 0x38
++#define PHY_I3C_SDR0_CTRL0_SCL_H GENMASK(25, 16)
++#define PHY_I3C_SDR0_CTRL0_SCL_L GENMASK(9, 0)
++#define PHY_I3C_SDR0_CTRL1 0x3C
++#define PHY_I3C_SDR0_CTRL1_TBIT_H GENMASK(25, 16)
++#define PHY_I3C_SDR0_CTRL1_TBIT_L GENMASK(9, 0)
++#define PHY_I3C_SDR0_CTRL2 0x40
++#define PHY_I3C_SDR0_CTRL2_HD_PP GENMASK(25, 16)
++#define PHY_I3C_SDR0_CTRL2_TBIT_HD_PP GENMASK(9, 0)
++
++/* 1MHz */
++#define PHY_I3C_SDR0_DEFAULT_SCL_H_NS 380
++#define PHY_I3C_SDR0_DEFAULT_SCL_L_NS 620
++#define PHY_I3C_SDR0_DEFAULT_TBIT_H_NS 380
++#define PHY_I3C_SDR0_DEFAULT_TBIT_L_NS 620
++#define PHY_I3C_SDR0_DEFAULT_HD_PP_NS 10
++#define PHY_I3C_SDR0_DEFAULT_TBIT_HD_PP_NS 10
++
++#define PHY_I3C_CTRL0_OFFSET 0x0
++#define PHY_I3C_CTRL1_OFFSET 0x4
++#define PHY_I3C_CTRL2_OFFSET 0x8
++/* I3C PP SDR1 */
++#define PHY_I3C_SDR1_CTRL0 0x44
++#define PHY_I3C_SDR1_CTRL0_SCL_H GENMASK(25, 16)
++#define PHY_I3C_SDR1_CTRL0_SCL_L GENMASK(9, 0)
++#define PHY_I3C_SDR1_CTRL1 0x48
++#define PHY_I3C_SDR1_CTRL1_TBIT_H GENMASK(25, 16)
++#define PHY_I3C_SDR1_CTRL1_TBIT_L GENMASK(9, 0)
++#define PHY_I3C_SDR1_CTRL2 0x4C
++#define PHY_I3C_SDR1_CTRL2_HD_PP GENMASK(25, 16)
++#define PHY_I3C_SDR1_CTRL2_TBIT_HD_PP GENMASK(9, 0)
++/* I3C PP SDR2 */
++#define PHY_I3C_SDR2_CTRL0 0x50
++#define PHY_I3C_SDR2_CTRL0_SCL_H GENMASK(25, 16)
++#define PHY_I3C_SDR2_CTRL0_SCL_L GENMASK(9, 0)
++#define PHY_I3C_SDR2_CTRL1 0x54
++#define PHY_I3C_SDR2_CTRL1_TBIT_H GENMASK(25, 16)
++#define PHY_I3C_SDR2_CTRL1_TBIT_L GENMASK(9, 0)
++#define PHY_I3C_SDR2_CTRL2 0x58
++#define PHY_I3C_SDR2_CTRL2_HD_PP GENMASK(25, 16)
++#define PHY_I3C_SDR2_CTRL2_TBIT_HD_PP GENMASK(9, 0)
++/* I3C PP SDR3 */
++#define PHY_I3C_SDR3_CTRL0 0x5C
++#define PHY_I3C_SDR3_CTRL0_SCL_H GENMASK(25, 16)
++#define PHY_I3C_SDR3_CTRL0_SCL_L GENMASK(9, 0)
++#define PHY_I3C_SDR3_CTRL1 0x60
++#define PHY_I3C_SDR3_CTRL1_TBIT_H GENMASK(25, 16)
++#define PHY_I3C_SDR3_CTRL1_TBIT_L GENMASK(9, 0)
++#define PHY_I3C_SDR3_CTRL2 0x64
++#define PHY_I3C_SDR3_CTRL2_HD_PP GENMASK(25, 16)
++#define PHY_I3C_SDR3_CTRL2_TBIT_HD_PP GENMASK(9, 0)
++/* I3C PP SDR4 */
++#define PHY_I3C_SDR4_CTRL0 0x68
++#define PHY_I3C_SDR4_CTRL0_SCL_H GENMASK(25, 16)
++#define PHY_I3C_SDR4_CTRL0_SCL_L GENMASK(9, 0)
++#define PHY_I3C_SDR4_CTRL1 0x6C
++#define PHY_I3C_SDR4_CTRL1_TBIT_H GENMASK(25, 16)
++#define PHY_I3C_SDR4_CTRL1_TBIT_L GENMASK(9, 0)
++#define PHY_I3C_SDR4_CTRL2 0x70
++#define PHY_I3C_SDR4_CTRL2_HD_PP GENMASK(25, 16)
++#define PHY_I3C_SDR4_CTRL2_TBIT_HD_PP GENMASK(9, 0)
++/* I3C PP DDR */
++#define PHY_I3C_DDR_CTRL0 0x74
++#define PHY_I3C_DDR_CTRL0_SCL_H GENMASK(25, 16)
++#define PHY_I3C_DDR_CTRL0_SCL_L GENMASK(9, 0)
++#define PHY_I3C_DDR_CTRL1 0x78
++#define PHY_I3C_DDR_CTRL1_TBIT_H GENMASK(25, 16)
++#define PHY_I3C_DDR_CTRL1_TBIT_L GENMASK(9, 0)
++#define PHY_I3C_DDR_CTRL2 0x7C
++#define PHY_I3C_DDR_CTRL2_HD_PP GENMASK(25, 16)
++#define PHY_I3C_DDR_CTRL2_TBIT_HD_PP GENMASK(9, 0)
++
++/* 1MHz */
++#define PHY_I3C_DDR_DEFAULT_SCL_H_NS 380
++#define PHY_I3C_DDR_DEFAULT_SCL_L_NS 620
++#define PHY_I3C_DDR_DEFAULT_TBIT_H_NS 380
++#define PHY_I3C_DDR_DEFAULT_TBIT_L_NS 620
++#define PHY_I3C_DDR_DEFAULT_HD_PP_NS 10
++#define PHY_I3C_DDR_DEFAULT_TBIT_HD_PP_NS 10
++
++#define PHY_I3C_SR_P_PREPARE_CTRL 0x80
++#define PHY_I3C_SR_P_PREPARE_CTRL_HD GENMASK(25, 16)
++#define PHY_I3C_SR_P_PREPARE_CTRL_SCL_L GENMASK(9, 0)
++#define PHY_I3C_SR_P_DEFAULT_HD_NS 10
++#define PHY_I3C_SR_P_DEFAULT_SCL_L_NS 40
++
++#define PHY_PULLUP_EN 0x98
++#define PHY_PULLUP_EN_SCL GENMASK(14, 12)
++#define PHY_PULLUP_EN_SDA GENMASK(10, 8)
++#define PHY_PULLUP_EN_DDR_SCL GENMASK(6, 4)
++#define PHY_PULLUP_EN_DDR_SDA GENMASK(2, 0)
++
++static inline unsigned int aspeed_get_avail_tx_entries(struct i3c_hci *hci)
++{
++ unsigned int queue_ptr, entries;
++
++ queue_ptr = ast_inhouse_read(ASPEED_I3C_QUEUE_PTR0);
++ if (QUEUE_PTR0_TX_W(queue_ptr) >= QUEUE_PTR0_TX_R(queue_ptr))
++ entries = 0x20 - (QUEUE_PTR0_TX_W(queue_ptr) -
++ QUEUE_PTR0_TX_R(queue_ptr));
++ else
++ entries = QUEUE_PTR0_TX_R(queue_ptr) - QUEUE_PTR0_TX_W(queue_ptr);
++
++ return entries;
++}
++
++static inline unsigned int aspeed_get_received_rx_entries(struct i3c_hci *hci)
++{
++ unsigned int queue_ptr, entries;
++
++ queue_ptr = ast_inhouse_read(ASPEED_I3C_QUEUE_PTR1);
++ if (QUEUE_PTR1_RX_W(queue_ptr) >= QUEUE_PTR1_RX_R(queue_ptr))
++ entries = QUEUE_PTR1_RX_W(queue_ptr) - QUEUE_PTR1_RX_R(queue_ptr);
++ else
++ entries = 0x20 - (QUEUE_PTR1_RX_R(queue_ptr) -
++ QUEUE_PTR1_RX_W(queue_ptr));
++
++ return entries;
++}
++
++static inline unsigned int aspeed_get_i3c_revision_id(struct i3c_hci *hci)
++{
++ return FIELD_GET(GENMASK(23, 16), hci->vendor_product_id);
++}
++
++static inline void aspeed_i3c_ccc_handler(struct i3c_hci *hci, u8 ccc)
++{
++ u32 reg;
++ u8 dynamic_addr;
++
++ switch (ccc) {
++ case I3C_CCC_RSTDAA(true):
++ case I3C_CCC_RSTDAA(false):
++ hci->master.this->info.dyn_addr = 0;
++ break;
++ case I3C_CCC_ENTDAA:
++ case I3C_CCC_SETDASA:
++ case I3C_CCC_SETNEWDA:
++ case I3C_CCC_SETAASA:
++ reg = ast_inhouse_read(ASPEED_I3C_STS);
++ if (reg & ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS_VALID) {
++ dynamic_addr = FIELD_GET(ASPEED_I3C_STS_SLV_DYNAMIC_ADDRESS, reg);
++ hci->master.this->info.dyn_addr = dynamic_addr;
++ }
++ break;
++ }
++}
++
++#endif
+diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
+index acc937275..96070e86d 100644
+--- a/drivers/i3c/master/svc-i3c-master.c
++++ b/drivers/i3c/master/svc-i3c-master.c
+@@ -798,7 +798,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
+ u8 data[6];
+
+ /*
+- * We only care about the 48-bit provisional ID yet to
++ * We only care about the 48-bit provisioned ID yet to
+ * be sure a device does not nack an address twice.
+ * Otherwise, we would just need to flush the RX FIFO.
+ */
+diff --git a/drivers/i3c/mctp/Kconfig b/drivers/i3c/mctp/Kconfig
+new file mode 100644
+index 000000000..c272bca7e
+--- /dev/null
++++ b/drivers/i3c/mctp/Kconfig
+@@ -0,0 +1,23 @@
++# SPDX-License-Identifier: GPL-2.0-only
++config I3C_MCTP
++ tristate "I3C Controller MCTP driver"
++ depends on I3C
++help
++ Say yes here to enable the I3C MCTP driver for I3C HW that is
++ configured as an I3C Controller Device on the I3C Bus.
++
++config I3C_MCTP_HDR_DDR
++ bool "transfer with HDR-DDR mode"
++ depends on I3C_MCTP
++ default n
++ help
++ Say yes here to use the HDR-DDR mode as default to transfer data if
++ the device support it.
++
++config I3C_TARGET_MCTP
++ tristate "I3C Target MCTP driver"
++ depends on I3C
++ select CRC8
++help
++ Say yes here to enable the I3C MCTP driver for I3C HW that is
++ configured as an I3C Target Device on the I3C Bus.
+diff --git a/drivers/i3c/mctp/Makefile b/drivers/i3c/mctp/Makefile
+new file mode 100644
+index 000000000..05eb78684
+--- /dev/null
++++ b/drivers/i3c/mctp/Makefile
+@@ -0,0 +1,3 @@
++# SPDX-License-Identifier: GPL-2.0-only
++obj-$(CONFIG_I3C_MCTP) += i3c-mctp.o
++obj-$(CONFIG_I3C_TARGET_MCTP) += i3c-target-mctp.o
+diff --git a/drivers/i3c/mctp/i3c-mctp.c b/drivers/i3c/mctp/i3c-mctp.c
+new file mode 100644
+index 000000000..c4758de7c
+--- /dev/null
++++ b/drivers/i3c/mctp/i3c-mctp.c
+@@ -0,0 +1,697 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (C) 2022 Intel Corporation.*/
++
++#include <linux/cdev.h>
++#include <linux/fs.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/poll.h>
++#include <linux/preempt.h>
++#include <linux/ptr_ring.h>
++#include <linux/slab.h>
++#include <linux/timer.h>
++#include <linux/types.h>
++#include <linux/workqueue.h>
++
++#include <linux/i3c/device.h>
++#include <linux/i3c/master.h>
++
++#include <linux/i3c/mctp/i3c-mctp.h>
++
++#define I3C_MCTP_MINORS 32
++#define CCC_DEVICE_STATUS_PENDING_INTR(x) (((x) & GENMASK(3, 0)) >> 0)
++#define POLLING_TIMEOUT_MS 50
++#define MCTP_INTERRUPT_NUMBER 1
++#define RX_RING_COUNT 16
++#define I3C_MCTP_MIN_TRANSFER_SIZE 69
++#define I3C_MCTP_IBI_PAYLOAD_SIZE 2
++
++struct i3c_mctp {
++ struct i3c_device *i3c;
++ struct cdev cdev;
++ struct device *dev;
++ struct delayed_work polling_work;
++ struct platform_device *i3c_peci;
++ int id;
++ /*
++ * Restrict an access to the /dev descriptor to one
++ * user at a time.
++ */
++ spinlock_t device_file_lock;
++ int device_open;
++ /* Currently only one userspace client is supported */
++ struct i3c_mctp_client *default_client;
++ struct i3c_mctp_client *peci_client;
++ u16 max_read_len;
++ u16 max_write_len;
++};
++
++struct i3c_mctp_client {
++ struct i3c_mctp *priv;
++ struct ptr_ring rx_queue;
++ wait_queue_head_t wait_queue;
++};
++
++static struct class *i3c_mctp_class;
++static dev_t i3c_mctp_devt;
++static DEFINE_IDA(i3c_mctp_ida);
++
++static struct kmem_cache *packet_cache;
++
++/**
++ * i3c_mctp_packet_alloc() - allocates i3c_mctp_packet
++ *
++ * @flags: the type of memory to allocate
++ *
++ * Allocates i3c_mctp_packet via slab allocation
++ * Return: pointer to the packet, NULL if some error occurred
++ */
++void *i3c_mctp_packet_alloc(gfp_t flags)
++{
++ return kmem_cache_alloc(packet_cache, flags);
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_packet_alloc);
++
++/**
++ * i3c_mctp_packet_free() - frees i3c_mctp_packet
++ *
++ * @packet: pointer to the packet which should be freed
++ *
++ * Frees i3c_mctp_packet previously allocated via slab allocation
++ */
++void i3c_mctp_packet_free(void *packet)
++{
++ kmem_cache_free(packet_cache, packet);
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_packet_free);
++
++static void i3c_mctp_client_free(struct i3c_mctp_client *client)
++{
++ ptr_ring_cleanup(&client->rx_queue, &i3c_mctp_packet_free);
++
++ kfree(client);
++}
++
++static struct i3c_mctp_client *i3c_mctp_client_alloc(struct i3c_mctp *priv)
++{
++ struct i3c_mctp_client *client;
++ int ret;
++
++ client = kzalloc(sizeof(*client), GFP_KERNEL);
++ if (!client)
++ goto out;
++
++ client->priv = priv;
++ ret = ptr_ring_init(&client->rx_queue, RX_RING_COUNT, GFP_KERNEL);
++ if (ret)
++ return ERR_PTR(ret);
++ init_waitqueue_head(&client->wait_queue);
++out:
++ return client;
++}
++
++static struct i3c_mctp_client *i3c_mctp_find_client(struct i3c_mctp *priv,
++ struct i3c_mctp_packet *packet)
++{
++ u8 *msg_hdr = (u8 *)packet->data.payload;
++ u8 mctp_type = msg_hdr[MCTP_MSG_HDR_MSG_TYPE_OFFSET];
++ u16 vendor = (msg_hdr[MCTP_MSG_HDR_VENDOR_OFFSET] << 8
++ | msg_hdr[MCTP_MSG_HDR_VENDOR_OFFSET + 1]);
++ u8 intel_msg_op_code = msg_hdr[MCTP_MSG_HDR_OPCODE_OFFSET];
++
++ if (priv->peci_client && mctp_type == MCTP_MSG_TYPE_VDM_PCI &&
++ vendor == MCTP_VDM_PCI_INTEL_VENDOR_ID && intel_msg_op_code == MCTP_VDM_PCI_INTEL_PECI)
++ return priv->peci_client;
++
++ return priv->default_client;
++}
++
++static struct i3c_mctp_packet *i3c_mctp_read_packet(struct i3c_device *i3c)
++{
++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3c));
++ struct i3c_mctp_packet *rx_packet;
++ struct i3c_priv_xfer xfers = {
++ .rnw = true,
++ };
++ int ret;
++
++ rx_packet = i3c_mctp_packet_alloc(GFP_KERNEL);
++ if (!rx_packet)
++ return ERR_PTR(-ENOMEM);
++
++ rx_packet->size = I3C_MCTP_PACKET_SIZE;
++ xfers.len = rx_packet->size;
++ xfers.data.in = &rx_packet->data;
++
++ /* Check against packet size + PEC byte to make sure that we always try to read max */
++ if (priv->max_read_len != xfers.len + 1) {
++ dev_dbg(i3cdev_to_dev(i3c), "Length mismatch. MRL = %d, xfers.len = %d",
++ priv->max_read_len, xfers.len);
++ i3c_mctp_packet_free(rx_packet);
++ return ERR_PTR(-EINVAL);
++ }
++ if (i3c->desc->info.hdr_cap & BIT(I3C_HDR_DDR) &&
++ IS_ENABLED(CONFIG_I3C_MCTP_HDR_DDR)) {
++ struct i3c_hdr_cmd cmds;
++
++ cmds.mode = I3C_HDR_DDR;
++ cmds.code = 0x80;
++ cmds.ndatawords = DIV_ROUND_UP(rx_packet->size, 2);
++ cmds.data.in = &rx_packet->data;
++ ret = i3c_device_send_hdr_cmds(i3c, &cmds, 1);
++ if (!ret)
++ rx_packet->size = cmds.ndatawords;
++ } else {
++ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
++ if (!ret)
++ rx_packet->size = xfers.len;
++ }
++ if (ret) {
++ i3c_mctp_packet_free(rx_packet);
++ return ERR_PTR(ret);
++ }
++
++ return rx_packet;
++}
++
++static void i3c_mctp_dispatch_packet(struct i3c_mctp *priv, struct i3c_mctp_packet *packet)
++{
++ struct i3c_mctp_client *client = i3c_mctp_find_client(priv, packet);
++ int ret;
++
++ ret = ptr_ring_produce(&client->rx_queue, packet);
++ if (ret)
++ i3c_mctp_packet_free(packet);
++ else
++ wake_up_all(&client->wait_queue);
++}
++
++static void i3c_mctp_polling_work(struct work_struct *work)
++{
++ struct i3c_mctp *priv = container_of(to_delayed_work(work), struct i3c_mctp, polling_work);
++ struct i3c_device *i3cdev = priv->i3c;
++ struct i3c_mctp_packet *rx_packet;
++ struct i3c_device_info info;
++ int ret;
++
++ i3c_device_get_info(i3cdev, &info);
++ ret = i3c_device_getstatus_ccc(i3cdev, &info);
++ if (ret)
++ return;
++
++ if (CCC_DEVICE_STATUS_PENDING_INTR(info.status) != MCTP_INTERRUPT_NUMBER)
++ return;
++
++ rx_packet = i3c_mctp_read_packet(i3cdev);
++ if (IS_ERR(rx_packet))
++ goto out;
++
++ i3c_mctp_dispatch_packet(priv, rx_packet);
++out:
++ schedule_delayed_work(&priv->polling_work, msecs_to_jiffies(POLLING_TIMEOUT_MS));
++}
++
++static ssize_t i3c_mctp_write(struct file *file, const char __user *buf, size_t count,
++ loff_t *f_pos)
++{
++ struct i3c_mctp *priv = file->private_data;
++ struct i3c_device *i3c = priv->i3c;
++ struct i3c_priv_xfer xfers = {
++ .rnw = false,
++ .len = count,
++ };
++ u8 *data;
++ int ret;
++
++ /*
++ * Check against packet size + PEC byte
++ * to not send more data than it was set in the probe
++ */
++ if (priv->max_write_len < xfers.len + 1) {
++ dev_dbg(i3cdev_to_dev(i3c), "Length mismatch. MWL = %d, xfers.len = %d",
++ priv->max_write_len, xfers.len);
++ return -EINVAL;
++ }
++
++ data = memdup_user(buf, count);
++ if (IS_ERR(data))
++ return PTR_ERR(data);
++
++ if (i3c->desc->info.hdr_cap & BIT(I3C_HDR_DDR) &&
++ IS_ENABLED(CONFIG_I3C_MCTP_HDR_DDR)) {
++ struct i3c_hdr_cmd cmds;
++
++ cmds.mode = I3C_HDR_DDR;
++ cmds.code = 0;
++ cmds.ndatawords = DIV_ROUND_UP(count, 2);
++ cmds.data.out = data;
++ ret = i3c_device_send_hdr_cmds(i3c, &cmds, 1);
++ } else {
++ xfers.data.out = data;
++
++ ret = i3c_device_do_priv_xfers(i3c, &xfers, 1);
++ }
++ kfree(data);
++ return ret ?: count;
++}
++
++static ssize_t i3c_mctp_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
++{
++ struct i3c_mctp *priv = file->private_data;
++ struct i3c_mctp_client *client = priv->default_client;
++ struct i3c_mctp_packet *rx_packet;
++
++ if (count > sizeof(rx_packet->data))
++ count = sizeof(rx_packet->data);
++
++ rx_packet = ptr_ring_consume(&client->rx_queue);
++ if (!rx_packet)
++ return -EAGAIN;
++
++ if (count > rx_packet->size)
++ count = rx_packet->size;
++
++ if (copy_to_user(buf, &rx_packet->data, count))
++ return -EFAULT;
++
++ i3c_mctp_packet_free(rx_packet);
++
++ return count;
++}
++
++static int i3c_mctp_open(struct inode *inode, struct file *file)
++{
++ struct i3c_mctp *priv = container_of(inode->i_cdev, struct i3c_mctp, cdev);
++
++ spin_lock(&priv->device_file_lock);
++ if (priv->device_open) {
++ spin_unlock(&priv->device_file_lock);
++ return -EBUSY;
++ }
++ priv->device_open++;
++ /* Discard all of the packet in the rx_queue */
++ while (ptr_ring_consume(&priv->default_client->rx_queue))
++ ;
++ spin_unlock(&priv->device_file_lock);
++
++ file->private_data = priv;
++
++ return 0;
++}
++
++static int i3c_mctp_release(struct inode *inode, struct file *file)
++{
++ struct i3c_mctp *priv = file->private_data;
++
++ spin_lock(&priv->device_file_lock);
++ priv->device_open--;
++ spin_unlock(&priv->device_file_lock);
++
++ file->private_data = NULL;
++
++ return 0;
++}
++
++static __poll_t i3c_mctp_poll(struct file *file, struct poll_table_struct *pt)
++{
++ struct i3c_mctp *priv = file->private_data;
++ __poll_t ret = 0;
++
++ poll_wait(file, &priv->default_client->wait_queue, pt);
++
++ if (__ptr_ring_peek(&priv->default_client->rx_queue))
++ ret |= EPOLLIN;
++
++ return ret;
++}
++
++static const struct file_operations i3c_mctp_fops = {
++ .owner = THIS_MODULE,
++ .read = i3c_mctp_read,
++ .write = i3c_mctp_write,
++ .poll = i3c_mctp_poll,
++ .open = i3c_mctp_open,
++ .release = i3c_mctp_release,
++};
++
++/**
++ * i3c_mctp_add_peci_client() - registers PECI client
++ * @i3c: I3C device to get the PECI client for
++ *
++ * Return: pointer to PECI client, -ENOMEM - in case of client alloc fault
++ */
++struct i3c_mctp_client *i3c_mctp_add_peci_client(struct i3c_device *i3c)
++{
++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3c));
++ struct i3c_mctp_client *client;
++
++ client = i3c_mctp_client_alloc(priv);
++ if (IS_ERR(client))
++ return ERR_PTR(-ENOMEM);
++
++ priv->peci_client = client;
++
++ return priv->peci_client;
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_add_peci_client);
++
++/**
++ * i3c_mctp_remove_peci_client() - un-registers PECI client
++ * @client: i3c_mctp_client to be freed
++ */
++void i3c_mctp_remove_peci_client(struct i3c_mctp_client *client)
++{
++ struct i3c_mctp *priv = client->priv;
++
++ i3c_mctp_client_free(priv->peci_client);
++
++ priv->peci_client = NULL;
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_remove_peci_client);
++
++static struct i3c_mctp *i3c_mctp_alloc(struct i3c_device *i3c)
++{
++ struct i3c_mctp *priv;
++ int id;
++
++ priv = devm_kzalloc(i3cdev_to_dev(i3c), sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return ERR_PTR(-ENOMEM);
++
++ id = ida_alloc(&i3c_mctp_ida, GFP_KERNEL);
++ if (id < 0) {
++ pr_err("i3c_mctp: no minor number available!\n");
++ return ERR_PTR(id);
++ }
++
++ priv->id = id;
++ priv->i3c = i3c;
++
++ spin_lock_init(&priv->device_file_lock);
++
++ return priv;
++}
++
++static void i3c_mctp_ibi_handler(struct i3c_device *dev, const struct i3c_ibi_payload *payload)
++{
++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(dev));
++ struct i3c_mctp_packet *rx_packet;
++
++ rx_packet = i3c_mctp_read_packet(dev);
++ if (IS_ERR(rx_packet))
++ return;
++
++ i3c_mctp_dispatch_packet(priv, rx_packet);
++}
++
++static int i3c_mctp_init(struct i3c_driver *drv)
++{
++ int ret;
++
++ packet_cache = kmem_cache_create_usercopy("mctp-i3c-packet",
++ sizeof(struct i3c_mctp_packet), 0, 0, 0,
++ sizeof(struct i3c_mctp_packet), NULL);
++ if (IS_ERR(packet_cache)) {
++ ret = PTR_ERR(packet_cache);
++ goto out;
++ }
++
++ /* Dynamically request unused major number */
++ ret = alloc_chrdev_region(&i3c_mctp_devt, 0, I3C_MCTP_MINORS, "i3c-mctp");
++ if (ret)
++ goto out;
++
++ /* Create a class to populate sysfs entries*/
++ i3c_mctp_class = class_create("i3c-mctp");
++ if (IS_ERR(i3c_mctp_class)) {
++ ret = PTR_ERR(i3c_mctp_class);
++ goto out_unreg_chrdev;
++ }
++
++ i3c_driver_register(drv);
++
++ return 0;
++
++out_unreg_chrdev:
++ unregister_chrdev_region(i3c_mctp_devt, I3C_MCTP_MINORS);
++out:
++ pr_err("i3c_mctp: driver initialisation failed\n");
++ return ret;
++}
++
++static void i3c_mctp_free(struct i3c_driver *drv)
++{
++ i3c_driver_unregister(drv);
++ class_destroy(i3c_mctp_class);
++ unregister_chrdev_region(i3c_mctp_devt, I3C_MCTP_MINORS);
++ kmem_cache_destroy(packet_cache);
++}
++
++static int i3c_mctp_enable_ibi(struct i3c_device *i3cdev)
++{
++ struct i3c_ibi_setup ibireq = {
++ .handler = i3c_mctp_ibi_handler,
++ .max_payload_len = 2,
++ .num_slots = 10,
++ };
++ int ret;
++
++ ret = i3c_device_request_ibi(i3cdev, &ibireq);
++ if (ret)
++ return ret;
++ ret = i3c_device_enable_ibi(i3cdev);
++ if (ret)
++ i3c_device_free_ibi(i3cdev);
++
++ return ret;
++}
++
++static void i3c_mctp_disable_ibi(struct i3c_device *i3cdev)
++{
++ i3c_device_disable_ibi(i3cdev);
++ i3c_device_free_ibi(i3cdev);
++}
++
++/**
++ * i3c_mctp_get_eid() - receive MCTP EID assigned to the device
++ *
++ * @client: client for the device to get the EID for
++ * @domain_id: requested domain ID
++ * @eid: pointer to store EID value
++ *
++ * Receive MCTP endpoint ID dynamically assigned by the MCTP Bus Owner
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_mctp_get_eid(struct i3c_mctp_client *client, u8 domain_id, u8 *eid)
++{
++ /* TODO: Implement EID assignment basing on domain ID */
++ *eid = 1;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_get_eid);
++
++/**
++ * i3c_mctp_send_packet() - send mctp packet
++ *
++ * @tx_packet: the allocated packet that needs to be send via I3C
++ * @i3c: i3c device to send the packet to
++ *
++ * Return: 0 in case of success, a negative error code otherwise.
++ */
++int i3c_mctp_send_packet(struct i3c_device *i3c, struct i3c_mctp_packet *tx_packet)
++{
++ if (i3c->desc->info.hdr_cap & BIT(I3C_HDR_DDR) &&
++ IS_ENABLED(CONFIG_I3C_MCTP_HDR_DDR)) {
++ struct i3c_hdr_cmd cmds;
++
++ cmds.mode = I3C_HDR_DDR;
++ cmds.code = 0;
++ cmds.ndatawords = DIV_ROUND_UP(tx_packet->size, 2);
++ cmds.data.out = &tx_packet->data;
++ return i3c_device_send_hdr_cmds(i3c, &cmds, 1);
++ }
++ struct i3c_priv_xfer xfers;
++
++ xfers.rnw = false;
++ xfers.len = tx_packet->size;
++ xfers.data.out = &tx_packet->data;
++ return i3c_device_do_priv_xfers(i3c, &xfers, 1);
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_send_packet);
++
++/**
++ * i3c_mctp_receive_packet() - receive mctp packet
++ *
++ * @client: i3c_mctp_client to receive the packet from
++ * @timeout: timeout, in jiffies
++ *
++ * The function will sleep for up to @timeout if no packet is ready to read.
++ *
++ * Returns struct i3c_mctp_packet from or ERR_PTR in case of error or the
++ * timeout elapsed.
++ */
++struct i3c_mctp_packet *i3c_mctp_receive_packet(struct i3c_mctp_client *client,
++ unsigned long timeout)
++{
++ struct i3c_mctp_packet *rx_packet;
++ int ret;
++
++ ret = wait_event_interruptible_timeout(client->wait_queue,
++ __ptr_ring_peek(&client->rx_queue), timeout);
++ if (ret < 0)
++ return ERR_PTR(ret);
++ else if (ret == 0)
++ return ERR_PTR(-ETIME);
++
++ rx_packet = ptr_ring_consume(&client->rx_queue);
++ if (!rx_packet)
++ return ERR_PTR(-EAGAIN);
++
++ return rx_packet;
++}
++EXPORT_SYMBOL_GPL(i3c_mctp_receive_packet);
++
++static void i3c_mctp_i3c_event_cb(struct i3c_device *dev, enum i3c_event event)
++{
++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(dev));
++
++ switch (event) {
++ case i3c_event_prepare_for_rescan:
++ /*
++ * Disable IBI and polling mode blindly.
++ */
++ i3c_mctp_disable_ibi(dev);
++ cancel_delayed_work(&priv->polling_work);
++ break;
++ case i3c_event_rescan_done:
++ if (i3c_mctp_enable_ibi(dev)) {
++ INIT_DELAYED_WORK(&priv->polling_work,
++ i3c_mctp_polling_work);
++ schedule_delayed_work(&priv->polling_work,
++ msecs_to_jiffies(POLLING_TIMEOUT_MS));
++ }
++ break;
++ default:
++ break;
++ }
++}
++
++static int i3c_mctp_probe(struct i3c_device *i3cdev)
++{
++ int ibi_payload_size = I3C_MCTP_IBI_PAYLOAD_SIZE;
++ struct device *dev = i3cdev_to_dev(i3cdev);
++ struct i3c_device_info info;
++ struct i3c_mctp *priv;
++ int ret;
++
++ priv = i3c_mctp_alloc(i3cdev);
++ if (IS_ERR(priv))
++ return PTR_ERR(priv);
++
++ cdev_init(&priv->cdev, &i3c_mctp_fops);
++
++ priv->cdev.owner = THIS_MODULE;
++ ret = cdev_add(&priv->cdev, MKDEV(MAJOR(i3c_mctp_devt), priv->id), 1);
++ if (ret)
++ goto error_cdev;
++
++ /* register this i3c device with the driver core */
++ priv->dev = device_create(i3c_mctp_class, dev,
++ MKDEV(MAJOR(i3c_mctp_devt), priv->id),
++ NULL, "i3c-mctp-%d", priv->id);
++ if (IS_ERR(priv->dev)) {
++ ret = PTR_ERR(priv->dev);
++ goto error;
++ }
++
++ ret = i3c_device_control_pec(i3cdev, true);
++ if (ret)
++ dev_warn(priv->dev, "Hardware not support pec");
++
++ priv->default_client = i3c_mctp_client_alloc(priv);
++ if (IS_ERR(priv->default_client))
++ goto error;
++
++ dev_set_drvdata(i3cdev_to_dev(i3cdev), priv);
++
++ priv->i3c_peci = platform_device_register_data(i3cdev_to_dev(i3cdev), "peci-i3c", priv->id,
++ NULL, 0);
++ if (IS_ERR(priv->i3c_peci))
++ dev_warn(priv->dev, "failed to register peci-i3c device\n");
++
++ i3c_device_register_event_cb(i3cdev, i3c_mctp_i3c_event_cb);
++ if (i3c_mctp_enable_ibi(i3cdev)) {
++ INIT_DELAYED_WORK(&priv->polling_work, i3c_mctp_polling_work);
++ schedule_delayed_work(&priv->polling_work, msecs_to_jiffies(POLLING_TIMEOUT_MS));
++ ibi_payload_size = 0;
++ }
++
++ i3c_device_get_info(i3cdev, &info);
++
++ ret = i3c_device_getmrl_ccc(i3cdev, &info);
++ if (ret || info.max_read_len != I3C_MCTP_MIN_TRANSFER_SIZE)
++ ret = i3c_device_setmrl_ccc(i3cdev, &info, I3C_MCTP_MIN_TRANSFER_SIZE,
++ ibi_payload_size);
++ if (ret && info.max_read_len != I3C_MCTP_MIN_TRANSFER_SIZE) {
++ dev_err(dev, "Failed to set MRL!, ret = %d\n", ret);
++ goto error_peci;
++ }
++ priv->max_read_len = info.max_read_len;
++
++ ret = i3c_device_getmwl_ccc(i3cdev, &info);
++ if (ret || info.max_write_len != I3C_MCTP_MIN_TRANSFER_SIZE)
++ ret = i3c_device_setmwl_ccc(i3cdev, &info, I3C_MCTP_MIN_TRANSFER_SIZE);
++ if (ret && info.max_write_len != I3C_MCTP_MIN_TRANSFER_SIZE) {
++ dev_err(dev, "Failed to set MWL!, ret = %d\n", ret);
++ goto error_peci;
++ }
++ priv->max_write_len = info.max_write_len;
++
++ return 0;
++
++error_peci:
++ platform_device_unregister(priv->i3c_peci);
++ i3c_device_disable_ibi(i3cdev);
++ i3c_device_free_ibi(i3cdev);
++error:
++ cdev_del(&priv->cdev);
++error_cdev:
++ put_device(dev);
++ return ret;
++}
++
++static void i3c_mctp_remove(struct i3c_device *i3cdev)
++{
++ struct i3c_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev));
++
++ i3c_mctp_disable_ibi(i3cdev);
++ i3c_mctp_client_free(priv->default_client);
++ priv->default_client = NULL;
++ platform_device_unregister(priv->i3c_peci);
++
++ device_destroy(i3c_mctp_class, MKDEV(MAJOR(i3c_mctp_devt), priv->id));
++ cdev_del(&priv->cdev);
++ ida_free(&i3c_mctp_ida, priv->id);
++}
++
++static const struct i3c_device_id i3c_mctp_ids[] = {
++ I3C_CLASS(0xCC, 0x0),
++ I3C_DEVICE(0x3f6, 0x8000, (void *)0),
++ I3C_DEVICE(0x3f6, 0x8001, (void *)0),
++ I3C_DEVICE(0x3f6, 0xA001, (void *)0),
++ I3C_DEVICE(0x3f6, 0xA003, (void *)0),
++ I3C_DEVICE(0x3f6, 0x0503, (void *)0),
++ { },
++};
++
++static struct i3c_driver i3c_mctp_drv = {
++ .driver.name = "i3c-mctp",
++ .id_table = i3c_mctp_ids,
++ .probe = i3c_mctp_probe,
++ .remove = i3c_mctp_remove,
++};
++
++module_driver(i3c_mctp_drv, i3c_mctp_init, i3c_mctp_free);
++MODULE_AUTHOR("Oleksandr Shulzhenko <oleksandr.shulzhenko.viktorovych@intel.com>");
++MODULE_DESCRIPTION("I3C MCTP driver");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/i3c/mctp/i3c-target-mctp.c b/drivers/i3c/mctp/i3c-target-mctp.c
+new file mode 100644
+index 000000000..28645bb15
+--- /dev/null
++++ b/drivers/i3c/mctp/i3c-target-mctp.c
+@@ -0,0 +1,485 @@
++// SPDX-License-Identifier: GPL-2.0
++/* Copyright (C) 2022 Intel Corporation.*/
++
++#include <linux/cdev.h>
++#include <linux/idr.h>
++#include <linux/module.h>
++#include <linux/poll.h>
++#include <linux/ptr_ring.h>
++#include <linux/workqueue.h>
++#include <linux/crc8.h>
++
++#include <linux/i3c/device.h>
++
++#define I3C_CRC8_POLYNOMIAL 0x07
++DECLARE_CRC8_TABLE(i3c_crc8_table);
++
++#define I3C_TARGET_MCTP_MINORS 32
++#define RX_RING_COUNT 16
++
++/*
++ * IBI Mandatory Data Byte
++ * https://www.mipi.org/mipi_i3c_mandatory_data_byte_values_public
++ *
++ * MCTP:
++ * bit[7:5] = 3'b101
++ * bit[4:0] = 5'h0E
++ */
++#define I3C_MCTP_MDB 0xae
++
++static struct class *i3c_target_mctp_class;
++static dev_t i3c_target_mctp_devt;
++static DEFINE_IDA(i3c_target_mctp_ida);
++
++struct mctp_client;
++
++struct i3c_target_mctp {
++ struct i3c_device *i3cdev;
++ struct cdev cdev;
++ int id;
++ struct mctp_client *client;
++ spinlock_t client_lock; /* to protect client access */
++ bool mdb_append_pec;
++};
++
++struct mctp_client {
++ struct kref ref;
++ struct i3c_target_mctp *priv;
++ struct ptr_ring rx_queue;
++ wait_queue_head_t wait_queue;
++};
++
++struct mctp_packet {
++ u8 *data;
++ u16 count;
++};
++
++static void *i3c_target_mctp_packet_alloc(u16 count)
++{
++ struct mctp_packet *packet;
++ u8 *data;
++
++ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
++ if (!packet)
++ return NULL;
++
++ data = kzalloc(count, GFP_ATOMIC);
++ if (!data) {
++ kfree(packet);
++ return NULL;
++ }
++
++ packet->data = data;
++ packet->count = count;
++
++ return packet;
++}
++
++static void i3c_target_mctp_packet_free(void *data)
++{
++ struct mctp_packet *packet = data;
++
++ kfree(packet->data);
++ kfree(packet);
++}
++
++static struct mctp_client *i3c_target_mctp_client_alloc(struct i3c_target_mctp *priv)
++{
++ struct mctp_client *client;
++
++ client = kzalloc(sizeof(*client), GFP_KERNEL);
++ if (!client)
++ goto out;
++
++ kref_init(&client->ref);
++ client->priv = priv;
++ ptr_ring_init(&client->rx_queue, RX_RING_COUNT, GFP_KERNEL);
++out:
++ return client;
++}
++
++static void i3c_target_mctp_client_free(struct kref *ref)
++{
++ struct mctp_client *client = container_of(ref, typeof(*client), ref);
++
++ ptr_ring_cleanup(&client->rx_queue, &i3c_target_mctp_packet_free);
++
++ kfree(client);
++}
++
++static void i3c_target_mctp_client_get(struct mctp_client *client)
++{
++ kref_get(&client->ref);
++}
++
++static void i3c_target_mctp_client_put(struct mctp_client *client)
++{
++ kref_put(&client->ref, &i3c_target_mctp_client_free);
++}
++
++static void
++i3c_target_mctp_rx_packet_enqueue(struct i3c_device *i3cdev, const u8 *data, size_t count)
++{
++ struct i3c_target_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev));
++ struct mctp_client *client;
++ struct mctp_packet *packet;
++ int ret;
++
++ spin_lock(&priv->client_lock);
++ client = priv->client;
++ if (client)
++ i3c_target_mctp_client_get(client);
++ spin_unlock(&priv->client_lock);
++
++ if (!client)
++ return;
++
++ packet = i3c_target_mctp_packet_alloc(count);
++ if (!packet)
++ goto err;
++
++ memcpy(packet->data, data, count);
++
++ ret = ptr_ring_produce(&client->rx_queue, packet);
++ if (ret)
++ i3c_target_mctp_packet_free(packet);
++ else
++ wake_up_all(&client->wait_queue);
++err:
++ i3c_target_mctp_client_put(client);
++}
++
++static struct mctp_client *i3c_target_mctp_create_client(struct i3c_target_mctp *priv)
++{
++ struct mctp_client *client;
++ int ret;
++
++ /* Currently, we support just one client. */
++ spin_lock_irq(&priv->client_lock);
++ ret = priv->client ? -EBUSY : 0;
++ spin_unlock_irq(&priv->client_lock);
++
++ if (ret)
++ return ERR_PTR(ret);
++
++ client = i3c_target_mctp_client_alloc(priv);
++ if (!client)
++ return ERR_PTR(-ENOMEM);
++
++ init_waitqueue_head(&client->wait_queue);
++
++ spin_lock_irq(&priv->client_lock);
++ priv->client = client;
++ spin_unlock_irq(&priv->client_lock);
++
++ return client;
++}
++
++static void i3c_target_mctp_delete_client(struct mctp_client *client)
++{
++ struct i3c_target_mctp *priv = client->priv;
++
++ spin_lock_irq(&priv->client_lock);
++ priv->client = NULL;
++ spin_unlock_irq(&priv->client_lock);
++
++ i3c_target_mctp_client_put(client);
++}
++
++static int i3c_target_mctp_open(struct inode *inode, struct file *file)
++{
++ struct i3c_target_mctp *priv = container_of(inode->i_cdev, struct i3c_target_mctp, cdev);
++ struct mctp_client *client;
++
++ client = i3c_target_mctp_create_client(priv);
++ if (IS_ERR(client))
++ return PTR_ERR(client);
++
++ file->private_data = client;
++
++ return 0;
++}
++
++static int i3c_target_mctp_release(struct inode *inode, struct file *file)
++{
++ struct mctp_client *client = file->private_data;
++
++ i3c_target_mctp_delete_client(client);
++
++ return 0;
++}
++
++static ssize_t i3c_target_mctp_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct mctp_client *client = file->private_data;
++ struct mctp_packet *rx_packet;
++
++ rx_packet = ptr_ring_consume_irq(&client->rx_queue);
++ if (!rx_packet)
++ return -EAGAIN;
++
++ if (count < rx_packet->count) {
++ count = -EINVAL;
++ goto err_free;
++ }
++ if (count > rx_packet->count)
++ count = rx_packet->count;
++
++ if (copy_to_user(buf, rx_packet->data, count))
++ count = -EFAULT;
++err_free:
++ i3c_target_mctp_packet_free(rx_packet);
++
++ return count;
++}
++
++static u8 *pec_append(u8 addr_rnw, u8 *buf, u8 len)
++{
++ u8 pec_v;
++
++ pec_v = crc8(i3c_crc8_table, &addr_rnw, 1, 0);
++ pec_v = crc8(i3c_crc8_table, buf, len, pec_v);
++ buf[len] = pec_v;
++
++ return buf;
++}
++
++static ssize_t i3c_target_mctp_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct mctp_client *client = file->private_data;
++ struct i3c_target_mctp *priv = client->priv;
++ struct i3c_priv_xfer xfers[2] = {};
++ struct i3c_device_info info;
++ u8 *tx_data;
++ u8 *ibi_data;
++ int ret;
++ bool ibi_enabled = i3c_device_is_ibi_enabled(priv->i3cdev);
++
++ if (!ibi_enabled) {
++ dev_warn(i3cdev_to_dev(priv->i3cdev), "IBI not enabled\n");
++ return count;
++ }
++ if (priv->mdb_append_pec)
++ ibi_data = kzalloc(2, GFP_KERNEL);
++ else
++ ibi_data = kzalloc(1, GFP_KERNEL);
++ if (!ibi_data)
++ return -ENOMEM;
++ ibi_data[0] = I3C_MCTP_MDB;
++
++ tx_data = kzalloc(count, GFP_KERNEL);
++ if (!tx_data) {
++ ret = -ENOMEM;
++ goto free_ibi;
++ }
++
++ if (copy_from_user(tx_data, buf, count)) {
++ ret = -EFAULT;
++ goto out_packet;
++ }
++
++ i3c_device_get_info(priv->i3cdev, &info);
++ if (priv->mdb_append_pec) {
++ pec_append(info.dyn_addr << 1 | 0x1, ibi_data, 1);
++ xfers[0].len = 2;
++ } else {
++ xfers[0].len = 1;
++ }
++ xfers[0].data.out = ibi_data;
++
++ xfers[1].data.out = tx_data;
++ xfers[1].len = count;
++
++ ret = i3c_device_pending_read_notify(priv->i3cdev, &xfers[1],
++ &xfers[0]);
++ if (ret)
++ goto out_packet;
++ ret = count;
++
++out_packet:
++ kfree(tx_data);
++free_ibi:
++ kfree(ibi_data);
++ return ret;
++}
++
++static __poll_t i3c_target_mctp_poll(struct file *file, struct poll_table_struct *pt)
++{
++ struct mctp_client *client = file->private_data;
++ __poll_t ret = 0;
++
++ poll_wait(file, &client->wait_queue, pt);
++
++ if (__ptr_ring_peek(&client->rx_queue))
++ ret |= EPOLLIN;
++
++ /*
++ * TODO: Add support for "write" readiness.
++ * DW-I3C has a hardware queue that has finite number of entries.
++ * If we try to issue more writes that space in this queue allows for,
++ * we're in trouble. This should be handled by error from write() and
++ * poll() blocking for write events.
++ */
++ return ret;
++}
++
++static const struct file_operations i3c_target_mctp_fops = {
++ .owner = THIS_MODULE,
++ .open = i3c_target_mctp_open,
++ .release = i3c_target_mctp_release,
++ .read = i3c_target_mctp_read,
++ .write = i3c_target_mctp_write,
++ .poll = i3c_target_mctp_poll,
++};
++
++static struct i3c_target_read_setup i3c_target_mctp_rx_packet_setup = {
++ .handler = i3c_target_mctp_rx_packet_enqueue,
++};
++
++static ssize_t mdb_append_pec_show(struct device *dev, struct device_attribute *attr, char *buf)
++{
++ struct i3c_device *i3cdev = dev_get_drvdata(dev);
++ struct i3c_target_mctp *priv = i3cdev_get_drvdata(i3cdev);
++ ssize_t ret;
++
++ ret = sysfs_emit(buf, "%d\n", priv->mdb_append_pec);
++
++ return ret;
++}
++
++static ssize_t mdb_append_pec_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct i3c_device *i3cdev = dev_get_drvdata(dev);
++ struct i3c_target_mctp *priv = i3cdev_get_drvdata(i3cdev);
++ bool res;
++ int ret;
++
++ ret = kstrtobool(buf, &res);
++ if (ret)
++ return ret;
++
++ priv->mdb_append_pec = res;
++
++ return count;
++}
++
++static DEVICE_ATTR_RW(mdb_append_pec);
++
++static int i3c_target_mctp_probe(struct i3c_device *i3cdev)
++{
++ struct device *parent = i3cdev_to_dev(i3cdev);
++ struct i3c_target_mctp *priv;
++ struct device *dev;
++ int ret;
++
++ priv = devm_kzalloc(parent, sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ ret = ida_alloc(&i3c_target_mctp_ida, GFP_KERNEL);
++ if (ret < 0)
++ return ret;
++ priv->id = ret;
++
++ priv->i3cdev = i3cdev;
++ spin_lock_init(&priv->client_lock);
++
++ cdev_init(&priv->cdev, &i3c_target_mctp_fops);
++ priv->cdev.owner = THIS_MODULE;
++
++ ret = cdev_add(&priv->cdev,
++ MKDEV(MAJOR(i3c_target_mctp_devt), priv->id), 1);
++ if (ret) {
++ ida_free(&i3c_target_mctp_ida, priv->id);
++ return ret;
++ }
++
++ dev = device_create(i3c_target_mctp_class, parent,
++ MKDEV(MAJOR(i3c_target_mctp_devt), priv->id), i3cdev,
++ "i3c-mctp-target-%d", priv->id);
++ if (IS_ERR(dev)) {
++ ret = PTR_ERR(dev);
++ goto err;
++ }
++
++ /*
++ * By default, the PEC is appended to the MDB as a hardware workaround for the AST2600 I3C
++ * controller as primary controller.
++ */
++ priv->mdb_append_pec = 1;
++
++ ret = device_create_file(dev, &dev_attr_mdb_append_pec);
++ if (unlikely(ret)) {
++ dev_err(dev, "Failed creating device attrs\n");
++ ret = -EINVAL;
++ goto err;
++ }
++
++ i3cdev_set_drvdata(i3cdev, priv);
++
++ i3c_target_read_register(i3cdev, &i3c_target_mctp_rx_packet_setup);
++
++ crc8_populate_msb(i3c_crc8_table, I3C_CRC8_POLYNOMIAL);
++
++ return 0;
++err:
++ cdev_del(&priv->cdev);
++ ida_free(&i3c_target_mctp_ida, priv->id);
++
++ return ret;
++}
++
++static void i3c_target_mctp_remove(struct i3c_device *i3cdev)
++{
++ struct i3c_target_mctp *priv = dev_get_drvdata(i3cdev_to_dev(i3cdev));
++
++ device_destroy(i3c_target_mctp_class, i3c_target_mctp_devt);
++ cdev_del(&priv->cdev);
++ ida_free(&i3c_target_mctp_ida, priv->id);
++}
++
++static const struct i3c_device_id i3c_target_mctp_ids[] = {
++ I3C_CLASS(0xcc, 0x0),
++ { },
++};
++
++static struct i3c_driver i3c_target_mctp_drv = {
++ .driver.name = "i3c-target-mctp",
++ .id_table = i3c_target_mctp_ids,
++ .probe = i3c_target_mctp_probe,
++ .remove = i3c_target_mctp_remove,
++ .target = true,
++};
++
++static int i3c_target_mctp_init(struct i3c_driver *drv)
++{
++ int ret;
++
++ ret = alloc_chrdev_region(&i3c_target_mctp_devt, 0,
++ I3C_TARGET_MCTP_MINORS, "i3c-target-mctp");
++ if (ret)
++ return ret;
++
++ i3c_target_mctp_class = class_create("i3c-target-mctp");
++ if (IS_ERR(i3c_target_mctp_class)) {
++ unregister_chrdev_region(i3c_target_mctp_devt, I3C_TARGET_MCTP_MINORS);
++ return PTR_ERR(i3c_target_mctp_class);
++ }
++
++ return i3c_driver_register(drv);
++}
++
++static void i3c_target_mctp_fini(struct i3c_driver *drv)
++{
++ i3c_driver_unregister(drv);
++ class_destroy(i3c_target_mctp_class);
++ unregister_chrdev_region(i3c_target_mctp_devt, I3C_TARGET_MCTP_MINORS);
++}
++
++module_driver(i3c_target_mctp_drv, i3c_target_mctp_init, i3c_target_mctp_fini);
++MODULE_AUTHOR("Iwona Winiarska <iwona.winiarska@intel.com>");
++MODULE_DESCRIPTION("I3C Target MCTP driver");
++MODULE_LICENSE("GPL");
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0024-Add-PCIe-RC-driver-for-ast2700.patch b/recipes-kernel/linux/files/0024-Add-PCIe-RC-driver-for-ast2700.patch
new file mode 100644
index 0000000..ab8d72d
--- /dev/null
+++ b/recipes-kernel/linux/files/0024-Add-PCIe-RC-driver-for-ast2700.patch
@@ -0,0 +1,1469 @@
+From 1503660256537944a4854f60db5ac60f4c438a8d Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 16:22:55 +0800
+Subject: [PATCH] Add PCIe RC driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/pci/controller/Kconfig | 9 +
+ drivers/pci/controller/Makefile | 1 +
+ drivers/pci/controller/aspeed_pciecfg.c | 135 +++
+ drivers/pci/controller/pcie-aspeed.c | 1265 +++++++++++++++++++++++
+ 4 files changed, 1410 insertions(+)
+ create mode 100644 drivers/pci/controller/aspeed_pciecfg.c
+ create mode 100644 drivers/pci/controller/pcie-aspeed.c
+
+diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
+index c0c3f2824..a9de92ad2 100644
+--- a/drivers/pci/controller/Kconfig
++++ b/drivers/pci/controller/Kconfig
+@@ -47,6 +47,15 @@ config PCIE_APPLE
+
+ If unsure, say Y if you have an Apple Silicon system.
+
++config PCIE_ASPEED
++ bool "ASPEED PCIe controller"
++ depends on PCI
++ depends on OF || COMPILE_TEST
++ select PCI_MSI_ARCH_FALLBACKS
++ help
++ Say Y here if you want PCIe controller support on
++ ASPEED SoCs.
++
+ config PCI_VERSATILE
+ bool "ARM Versatile PB PCI controller"
+ depends on ARCH_VERSATILE || COMPILE_TEST
+diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
+index 37c8663de..a9e3fc756 100644
+--- a/drivers/pci/controller/Makefile
++++ b/drivers/pci/controller/Makefile
+@@ -27,6 +27,7 @@ obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o
+ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o
+ obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o
+ obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o
++obj-$(CONFIG_PCIE_ASPEED) += pcie-aspeed.o
+ obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o
+ obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o
+ obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
+diff --git a/drivers/pci/controller/aspeed_pciecfg.c b/drivers/pci/controller/aspeed_pciecfg.c
+new file mode 100644
+index 000000000..718f814ce
+--- /dev/null
++++ b/drivers/pci/controller/aspeed_pciecfg.c
+@@ -0,0 +1,135 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * PCIe host controller driver for ASPEED PCIe Bridge
++ *
++ */
++#include <linux/of_platform.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
++#include <linux/irqdomain.h>
++#include <linux/kernel.h>
++#include <linux/reset.h>
++#include <linux/of.h>
++#include <linux/of_irq.h>
++#include <linux/of_address.h>
++#include <linux/gpio/consumer.h>
++
++struct aspeed_pciecfg {
++ void __iomem *reg;
++ struct regmap *ahbc;
++ struct reset_control *rst;
++ struct reset_control *rc_low_rst;
++ struct reset_control *rc_high_rst;
++ struct gpio_desc *rc_low_rst_gpio;
++ struct gpio_desc *rc_high_rst_gpio;
++};
++
++static const struct of_device_id aspeed_pciecfg_of_match[] = {
++ { .compatible = "aspeed,ast2600-pciecfg", },
++ {}
++};
++
++#define AHBC_UNLOCK 0xAEED1A03
++static void aspeed_pciecfg_init(struct aspeed_pciecfg *pciecfg)
++{
++ reset_control_assert(pciecfg->rst);
++
++ if (pciecfg->rc_low_rst_gpio) {
++ gpiod_set_value(pciecfg->rc_low_rst_gpio, 1);
++ gpiod_set_value(pciecfg->rc_low_rst_gpio, 0);
++ } else if (pciecfg->rc_low_rst) {
++ reset_control_deassert(pciecfg->rc_low_rst);
++ reset_control_assert(pciecfg->rc_low_rst);
++ }
++
++ if (pciecfg->rc_high_rst_gpio) {
++ gpiod_set_value(pciecfg->rc_high_rst_gpio, 1);
++ gpiod_set_value(pciecfg->rc_high_rst_gpio, 0);
++ } else if (pciecfg->rc_high_rst) {
++ reset_control_deassert(pciecfg->rc_high_rst);
++ reset_control_assert(pciecfg->rc_high_rst);
++ }
++
++ mdelay(5);
++ reset_control_deassert(pciecfg->rst);
++
++ //workaround : Send vender define message for avoid when PCIE RESET send unknown message out
++ writel(0x34000000, pciecfg->reg + 0x10);
++ writel(0x0000007f, pciecfg->reg + 0x14);
++ writel(0x00001a03, pciecfg->reg + 0x18);
++ writel(0x00000000, pciecfg->reg + 0x1C);
++
++ regmap_write(pciecfg->ahbc, 0x00, AHBC_UNLOCK);
++ regmap_update_bits(pciecfg->ahbc, 0x8C, BIT(5), BIT(5));
++ regmap_write(pciecfg->ahbc, 0x00, 0x1);
++
++ //ahb to pcie rc
++ writel(0xe0006000, pciecfg->reg + 0x60);
++ writel(0x00000000, pciecfg->reg + 0x64);
++ writel(0xFFFFFFFF, pciecfg->reg + 0x68);
++
++ //PCIe Host Enable
++ writel(BIT(0), pciecfg->reg + 0x00);
++}
++
++static int aspeed_pciecfg_probe(struct platform_device *pdev)
++{
++ struct aspeed_pciecfg *pciecfg;
++ struct device *dev = &pdev->dev;
++
++ pciecfg = devm_kzalloc(&pdev->dev, sizeof(*pciecfg), GFP_KERNEL);
++ if (!pciecfg)
++ return -ENOMEM;
++
++ pciecfg->reg = devm_platform_ioremap_resource(pdev, 0);
++ if (IS_ERR(pciecfg->reg))
++ return PTR_ERR(pciecfg->reg);
++
++ pciecfg->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
++ if (IS_ERR(pciecfg->rst)) {
++ dev_err(&pdev->dev, "can't get pcie reset\n");
++ return PTR_ERR(pciecfg->rst);
++ }
++
++ pciecfg->rc_low_rst = NULL;
++ if (of_device_is_available(of_parse_phandle(dev->of_node, "aspeed,pcie0", 0))) {
++ pciecfg->rc_low_rst_gpio =
++ devm_gpiod_get_optional(dev, "pcie0-perst",
++ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
++ if (!pciecfg->rc_low_rst_gpio) {
++ pciecfg->rc_low_rst = devm_reset_control_get_shared(&pdev->dev, "rc_low");
++ if (IS_ERR(pciecfg->rc_low_rst))
++ dev_info(&pdev->dev, "No RC low reset\n");
++ }
++ }
++
++ pciecfg->rc_high_rst = NULL;
++ if (of_device_is_available(of_parse_phandle(dev->of_node, "aspeed,pcie1", 0))) {
++ pciecfg->rc_high_rst_gpio =
++ devm_gpiod_get_optional(dev, "pcie1-perst",
++ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE);
++ if (!pciecfg->rc_high_rst_gpio) {
++ pciecfg->rc_high_rst = devm_reset_control_get_shared(&pdev->dev, "rc_high");
++ if (IS_ERR(pciecfg->rc_high_rst))
++ dev_info(&pdev->dev, "No RC high reset\n");
++ }
++ }
++
++ pciecfg->ahbc = syscon_regmap_lookup_by_compatible("aspeed,aspeed-ahbc");
++ if (IS_ERR(pciecfg->ahbc))
++ return IS_ERR(pciecfg->ahbc);
++
++ aspeed_pciecfg_init(pciecfg);
++
++ return 0;
++}
++
++static struct platform_driver aspeed_pciecfg_driver = {
++ .driver = {
++ .name = "aspeed-pciecfg",
++ .suppress_bind_attrs = true,
++ .of_match_table = aspeed_pciecfg_of_match,
++ },
++ .probe = aspeed_pciecfg_probe,
++};
++builtin_platform_driver(aspeed_pciecfg_driver);
+diff --git a/drivers/pci/controller/pcie-aspeed.c b/drivers/pci/controller/pcie-aspeed.c
+new file mode 100644
+index 000000000..e018f6835
+--- /dev/null
++++ b/drivers/pci/controller/pcie-aspeed.c
+@@ -0,0 +1,1265 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * PCIe host controller driver for ASPEED PCIe Bridge
++ *
++ */
++#include <linux/irqchip/chained_irq.h>
++#include <linux/irqdomain.h>
++#include <linux/mfd/syscon.h>
++#include <linux/kernel.h>
++#include <linux/msi.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/of_platform.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/of_pci.h>
++#include <linux/pci.h>
++#include <linux/regmap.h>
++#include <linux/reset.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/workqueue.h>
++#include <linux/gpio/consumer.h>
++#include <linux/bitfield.h>
++
++/* PCI Host Controller registers */
++#define ASPEED_PCIE_CLASS_CODE 0x04
++#define ASPEED_PCIE_GLOBAL 0x30
++#define ASPEED_PCIE_CFG_DIN 0x50
++#define ASPEED_PCIE_CFG3 0x58
++#define ASPEED_PCIE_LOCK 0x7C
++#define ASPEED_PCIE_LINK 0xC0
++#define ASPEED_PCIE_INT 0xC4
++#define ASPEED_PCIE_LINK_STS 0xD0
++/* AST_PCIE_CFG2 0x04 */
++#define PCIE_CFG_CLASS_CODE(x) ((x) << 8)
++#define PCIE_CFG_REV_ID(x) (x)
++/* PEHR10: Miscellaneous Control 10H Register */
++#define DATALINK_REPORT_CAPABLE BIT(4)
++/* PEHR14: Miscellaneous Control 14H Register */
++#define HOTPLUG_CAPABLE_ENABLE BIT(6)
++#define HOTPLUG_SURPRISE_ENABLE BIT(5)
++#define ATTENTION_BUTTON_ENALBE BIT(0)
++/* PEHR30: Miscellaneous Control 30H Register */
++/* Disable RC synchronous reset when link up to link down*/
++#define RC_SYNC_RESET_DISABLE BIT(20)
++#define ROOT_COMPLEX_ID(x) ((x) << 4)
++#define PCIE_RC_SLOT_ENABLE BIT(1)
++/* AST_PCIE_LOCK 0x7C */
++#define PCIE_UNLOCK 0xa8
++/* AST_PCIE_LINK 0xC0 */
++#define PCIE_LINK_STS BIT(5)
++/* ASPEED_PCIE_LINK_STS 0xD0 */
++#define PCIE_LINK_5G BIT(17)
++#define PCIE_LINK_2_5G BIT(16)
++
++/* H2X Controller registers */
++/* reg 0x08 */
++#define PCIE_TX_IDLE_CLEAR BIT(0)
++
++/* reg 0x24 */
++#define PCIE_TX_IDLE BIT(31)
++
++#define PCIE_STATUS_OF_TX GENMASK(25, 24)
++#define PCIE_RC_TX_COMPLETE 0
++#define PCIE_RC_L_TX_COMPLETE BIT(24)
++#define PCIE_RC_H_TX_COMPLETE BIT(25)
++
++#define PCIE_TRIGGER_TX BIT(0)
++
++/* reg 0x80, 0xC0 */
++#define PCIE_RX_TAG_MASK GENMASK(23, 16)
++#define PCIE_RX_DMA_EN BIT(9)
++#define PCIE_RX_LINEAR BIT(8)
++#define PCIE_RX_MSI_SEL BIT(7)
++#define PCIE_RX_MSI_EN BIT(6)
++#define PCIE_1M_ADDRESS_EN BIT(5)
++#define PCIE_UNLOCK_RX_BUFF BIT(4)
++#define PCIE_RX_TLP_TAG_MATCH BIT(3)
++#define PCIE_Wait_RX_TLP_CLR BIT(2)
++#define PCIE_RC_RX_ENABLE BIT(1)
++#define PCIE_RC_ENABLE BIT(0)
++
++/* reg 0x88, 0xC8 : RC ISR */
++#define PCIE_RC_CPLCA_ISR BIT(6)
++#define PCIE_RC_CPLUR_ISR BIT(5)
++#define PCIE_RC_RX_DONE_ISR BIT(4)
++
++#define PCIE_RC_INTD_ISR BIT(3)
++#define PCIE_RC_INTC_ISR BIT(2)
++#define PCIE_RC_INTB_ISR BIT(1)
++#define PCIE_RC_INTA_ISR BIT(0)
++
++#define MAX_MSI_HOST_IRQS 64
++
++/* AST2700 H2X */
++#define H2X_CTRL 0x00
++#define H2X_BRIDGE_EN BIT(0)
++#define H2X_BRIDGE_DIRECT_EN BIT(1)
++#define H2X_CFGE_INT_STS 0x08
++#define CFGE_TX_IDLE BIT(0)
++#define CFGE_RX_IDLE BIT(1)
++#define H2X_CFGI_TLP 0x20
++#define H2X_CFGI_WR_DATA 0x24
++#define H2X_CFGI_CTRL 0x28
++#define CFGI_TLP_FIRE BIT(0)
++#define H2X_CFGI_RET_DATA 0x2C
++#define H2X_CFGE_TLP_1ST 0x30
++#define H2X_CFGE_TLP_NEXT 0x34
++#define H2X_CFGE_CTRL 0x38
++#define CFGE_TLP_FIRE BIT(0)
++#define H2X_CFGE_RET_DATA 0x3C
++#define H2X_REMAP_DIRECT_ADDR 0x78
++
++/* AST2700 PEHR */
++#define PEHR_GEN_CAPABILITY 0x60
++#define PORT_TPYE GENMASK(7, 4)
++#define PORT_TYPE_ROOT BIT(2)
++
++/* TLP configuration type 0 and type 1 */
++#define CRG_READ_FMTTYPE(type) (0x04000000 | (type << 24))
++#define CRG_WRITE_FMTTYPE(type) (0x44000000 | (type << 24))
++#define CRG_PAYLOAD_SIZE 0x01 /* 1 DWORD */
++#define TLP_COMP_STATUS(s) (((s) >> 13) & 7)
++
++struct aspeed_pcie_rc_platform {
++ int (*setup)(struct platform_device *pdev);
++ /* Interrupt Register Offset */
++ int reg_intx_en;
++ int reg_intx_sts;
++ int reg_msi_en;
++ int reg_msi_sts;
++};
++
++struct aspeed_pcie {
++ struct pci_host_bridge *host;
++ struct device *dev;
++ void __iomem *reg; //rc slot base
++ struct regmap *ahbc;
++ struct regmap *device;
++ int domain;
++ char name[10];
++ u32 msi_address;
++ int irq;
++ u8 tx_tag;
++ struct regmap *cfg; //pciecfg
++ struct regmap *pciephy; //pcie_phy
++ struct reset_control *h2xrst;
++ struct reset_control *perst;
++ /* INTx */
++ struct irq_domain *irq_domain; //irq_domain
++ // msi
++ struct irq_domain *dev_domain; //inner_domain
++ struct irq_domain *msi_domain;
++ struct mutex lock; /* protect bitmap variable */
++ int hotplug_event;
++ struct gpio_desc *perst_ep_in;
++ struct gpio_desc *perst_rc_out;
++ struct gpio_desc *perst_owner;
++ struct delayed_work rst_dwork;
++ DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_HOST_IRQS);
++
++ const struct aspeed_pcie_rc_platform *platform;
++ bool support_msi;
++};
++
++static void aspeed_pcie_intx_ack_irq(struct irq_data *d)
++{
++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d);
++ int intx_en = pcie->platform->reg_intx_en;
++
++ writel(readl(pcie->reg + intx_en) | BIT(d->hwirq), pcie->reg + intx_en);
++}
++
++static void aspeed_pcie_intx_mask_irq(struct irq_data *d)
++{
++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d);
++ int intx_en = pcie->platform->reg_intx_en;
++
++ writel(readl(pcie->reg + intx_en) & ~BIT(d->hwirq), pcie->reg + intx_en);
++}
++
++static void aspeed_pcie_intx_unmask_irq(struct irq_data *d)
++{
++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(d);
++ int intx_en = pcie->platform->reg_intx_en;
++
++ writel(readl(pcie->reg + intx_en) | BIT(d->hwirq), pcie->reg + intx_en);
++}
++
++static struct irq_chip aspeed_intx_irq_chip = {
++ .name = "ASPEED:IntX",
++ .irq_ack = aspeed_pcie_intx_ack_irq,
++ .irq_mask = aspeed_pcie_intx_mask_irq,
++ .irq_unmask = aspeed_pcie_intx_unmask_irq,
++};
++
++static int aspeed_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
++ irq_hw_number_t hwirq)
++{
++ irq_set_chip_and_handler(irq, &aspeed_intx_irq_chip, handle_level_irq);
++ irq_set_chip_data(irq, domain->host_data);
++ irq_set_status_flags(irq, IRQ_LEVEL);
++
++ return 0;
++}
++
++/* INTx IRQ Domain operations */
++static const struct irq_domain_ops aspeed_intx_domain_ops = {
++ .map = aspeed_pcie_intx_map,
++};
++
++static void aspeed_pcie_intr_handler(struct irq_desc *desc)
++{
++ struct aspeed_pcie *pcie = irq_desc_get_handler_data(desc);
++ struct irq_chip *irqchip = irq_desc_get_chip(desc);
++ const struct aspeed_pcie_rc_platform *platform = pcie->platform;
++ unsigned long status;
++ unsigned long intx;
++ u32 bit;
++ int i;
++
++ chained_irq_enter(irqchip, desc);
++
++ intx = readl(pcie->reg + platform->reg_intx_sts) & 0xf;
++ if (intx) {
++ for_each_set_bit(bit, &intx, PCI_NUM_INTX)
++ generic_handle_domain_irq(pcie->irq_domain, bit);
++ }
++
++ if (IS_ENABLED(CONFIG_PCI_MSI)) {
++ for (i = 0; i < 2; i++) {
++ status = readl(pcie->reg + platform->reg_msi_sts + (i * 4));
++ writel(status, pcie->reg + platform->reg_msi_sts + (i * 4));
++ if (!status)
++ continue;
++
++ for_each_set_bit(bit, &status, 32) {
++ if (i)
++ bit += 32;
++ generic_handle_domain_irq(pcie->dev_domain, bit);
++ }
++ }
++ }
++ chained_irq_exit(irqchip, desc);
++}
++
++//optional : set_slot_power_limit
++void aspeed_pcie_set_slot_power_limit(struct aspeed_pcie *pcie)
++{
++ u32 cfg_val, isr;
++ int ret;
++
++ writel(BIT(4) | readl(pcie->reg), pcie->reg);
++
++ pcie->tx_tag %= 0x7;
++ regmap_write(pcie->cfg, 0x10, 0x74000001);
++ switch (pcie->domain) {
++ case 0: //write for 0.8.0
++ regmap_write(pcie->cfg, 0x14, 0x00400050 | (pcie->tx_tag << 8));
++ break;
++ case 1: //write for 0.4.0
++ regmap_write(pcie->cfg, 0x14, 0x00200050 | (pcie->tx_tag << 8));
++ break;
++ }
++
++ regmap_write(pcie->cfg, 0x18, 0);
++ regmap_write(pcie->cfg, 0x1C, 0);
++ regmap_write(pcie->cfg, 0x20, 0x1a);
++
++ //trigger tx
++ regmap_write_bits(pcie->cfg, 0x24, PCIE_TRIGGER_TX, PCIE_TRIGGER_TX);
++
++ //wait tx idle
++ ret = regmap_read_poll_timeout(pcie->cfg, 0x24, cfg_val,
++ (cfg_val & PCIE_TX_IDLE), 0, 10);
++ if (ret)
++ goto out;
++
++ //write clr tx idle
++ regmap_write_bits(pcie->cfg, 0x08, PCIE_TX_IDLE_CLEAR,
++ PCIE_TX_IDLE_CLEAR);
++
++ //check tx status
++ regmap_read(pcie->cfg, 0x24, &cfg_val);
++ switch (cfg_val & PCIE_STATUS_OF_TX) {
++ case PCIE_RC_L_TX_COMPLETE:
++ case PCIE_RC_H_TX_COMPLETE:
++ ret = readl_poll_timeout(pcie->reg + 0x08, isr,
++ (isr & PCIE_RC_RX_DONE_ISR), 0, 10);
++ if (ret)
++ dev_err(pcie->dev, "[%d] : tx timeout [%x]\n",
++ pcie->domain, isr);
++
++ writel(readl(pcie->reg + 0x08), pcie->reg + 0x08);
++ break;
++ }
++out:
++ pcie->tx_tag++;
++}
++
++static int aspeed_ast2600_rd_conf(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ struct aspeed_pcie *pcie = bus->sysdata;
++ u32 bdf_offset;
++ int rx_done_fail = 0;
++ u32 cfg_val, isr, type = 0;
++ u32 link_sts = 0;
++ int ret;
++
++ //H2X80[4] (unlock) is write-only.
++ //Driver may set H2X80[4]=1 before triggering next TX config.
++ writel(BIT(4) | readl(pcie->reg), pcie->reg);
++
++ switch (pcie->domain) {
++ case 0:
++ if (!bus->number) {
++ switch (PCI_SLOT(devfn)) {
++ case 0:
++ case 4:
++ break;
++ default:
++ *val = 0xffffffff;
++ return PCIBIOS_SUCCESSFUL;
++ }
++ }
++
++ if (bus->number)
++ type = 1;
++ else
++ type = 0;
++ break;
++ case 1:
++ if (bus->number == 128) {
++ switch (PCI_SLOT(devfn)) {
++ case 0:
++ case 8:
++ break;
++ default:
++ *val = 0xffffffff;
++ return PCIBIOS_SUCCESSFUL;
++ }
++ }
++
++ if (bus->number > 128)
++ type = 1;
++ else
++ type = 0;
++ break;
++ }
++
++ dev_dbg(pcie->dev, "[%d]R:b d f [%d:%d:%d] devfn %x\n",
++ pcie->domain, bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), devfn);
++
++ if (type) {
++ regmap_read(pcie->pciephy, ASPEED_PCIE_LINK, &link_sts);
++ if (!(link_sts & PCIE_LINK_STS)) {
++ *val = 0xffffffff;
++ return PCIBIOS_SUCCESSFUL;
++ }
++ }
++
++ bdf_offset = ((bus->number) << 24) | (PCI_SLOT(devfn) << 19) |
++ (PCI_FUNC(devfn) << 16) | (where & ~3);
++
++ pcie->tx_tag %= 0x7;
++
++ regmap_write(pcie->cfg, 0x10, 0x04000001 | (type << 24));
++ regmap_write(pcie->cfg, 0x14, 0x0000200f | (pcie->tx_tag << 8));
++ regmap_write(pcie->cfg, 0x18, bdf_offset);
++ regmap_write(pcie->cfg, 0x1C, 0x00000000);
++
++ //trigger tx
++ regmap_write_bits(pcie->cfg, 0x24, PCIE_TRIGGER_TX, PCIE_TRIGGER_TX);
++
++ //wait tx idle
++ //todo find timeout and time period
++ ret = regmap_read_poll_timeout(pcie->cfg, 0x24, cfg_val,
++ (cfg_val & PCIE_TX_IDLE), 0, 10);
++ if (ret) {
++ dev_err(pcie->dev, "[%d] : tx idle timeout [%x]\n",
++ pcie->domain, cfg_val);
++ *val = 0xffffffff;
++ goto out;
++ }
++
++ //write clr tx idle
++ regmap_write_bits(pcie->cfg, 0x08, PCIE_TX_IDLE_CLEAR,
++ PCIE_TX_IDLE_CLEAR);
++
++ //check tx status
++ regmap_read(pcie->cfg, 0x24, &cfg_val);
++
++ switch (cfg_val & PCIE_STATUS_OF_TX) {
++ case PCIE_RC_L_TX_COMPLETE: //domain 0
++ if (pcie->domain != 0)
++ dev_err(pcie->dev, "[%d] : tx complete no correct\n",
++ pcie->domain);
++ fallthrough;
++ case PCIE_RC_H_TX_COMPLETE: //domain 1
++ ret = readl_poll_timeout(pcie->reg + 0x08, isr,
++ (isr & PCIE_RC_RX_DONE_ISR), 0, 10);
++ if (ret) {
++ dev_err(pcie->dev, "[%d] : rx done timeout\n",
++ pcie->domain);
++ rx_done_fail = 1;
++ *val = 0xffffffff;
++ }
++ if (!rx_done_fail) {
++ if (readl(pcie->reg + 0x14) & BIT(13))
++ *val = 0xffffffff;
++ else
++ *val = readl(pcie->reg + 0x0C);
++ }
++
++ writel(BIT(4) | readl(pcie->reg), pcie->reg);
++ writel(readl(pcie->reg + 0x08), pcie->reg + 0x08);
++ break;
++ case PCIE_STATUS_OF_TX:
++ *val = 0xffffffff;
++ break;
++ default: //read rc data
++ regmap_read(pcie->cfg, 0x0C, &cfg_val);
++ *val = cfg_val;
++ break;
++ }
++
++ switch (size) {
++ case 1:
++ *val = (*val >> ((where & 3) * 8)) & 0xff;
++ break;
++ case 2:
++ *val = (*val >> ((where & 2) * 8)) & 0xffff;
++ break;
++ }
++
++ dev_dbg(pcie->dev, "R:b d f [%d:%d:%d] where:%x : %x\n",
++ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val);
++
++#ifdef CONFIG_HOTPLUG_PCI
++ switch (pcie->domain) {
++ case 0:
++ if (where == 0x9a && bus->number == 0x0 &&
++ (PCI_SLOT(devfn) == 0x4) && (PCI_FUNC(devfn) == 0x0) &&
++ pcie->hotplug_event)
++ *val |= PCI_EXP_SLTSTA_ABP;
++ break;
++ case 1:
++ if (where == 0x9a && bus->number == 128 &&
++ (PCI_SLOT(devfn) == 0x8) && (PCI_FUNC(devfn) == 0x0) &&
++ pcie->hotplug_event)
++ *val |= PCI_EXP_SLTSTA_ABP;
++ break;
++ }
++#endif
++out:
++ pcie->tx_tag++;
++ return PCIBIOS_SUCCESSFUL;
++}
++
++static int aspeed_ast2600_wr_conf(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ u32 type = 0;
++ u32 shift = 8 * (where & 3);
++ u32 bdf_offset;
++ u8 byte_en = 0;
++ struct aspeed_pcie *pcie = bus->sysdata;
++ u32 isr, cfg_val;
++ int ret;
++
++#ifdef CONFIG_HOTPLUG_PCI
++ switch (pcie->domain) {
++ case 0:
++ if (where == 0x9a && bus->number == 0x0 &&
++ (PCI_SLOT(devfn) == 0x4) && (PCI_FUNC(devfn) == 0x0) &&
++ pcie->hotplug_event && (val & PCI_EXP_SLTSTA_ABP)) {
++ pcie->hotplug_event = 0;
++ return PCIBIOS_SUCCESSFUL;
++ }
++ break;
++ case 1:
++ if (where == 0x9a && bus->number == 128 &&
++ (PCI_SLOT(devfn) == 0x8) && (PCI_FUNC(devfn) == 0x0) &&
++ pcie->hotplug_event && (val & PCI_EXP_SLTSTA_ABP)) {
++ pcie->hotplug_event = 0;
++ return PCIBIOS_SUCCESSFUL;
++ }
++ break;
++ }
++#endif
++
++ dev_dbg(pcie->dev, "W b d f [%d:%d:%d] : where %x : val %x\n",
++ bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, val);
++
++ //H2X80[4] (unlock) is write-only.
++ //Driver may set H2X80[4]=1 before triggering next TX config.
++ writel(BIT(4) | readl(pcie->reg), pcie->reg);
++
++ switch (size) {
++ case 1:
++ switch (where % 4) {
++ case 0:
++ byte_en = 0x1;
++ break;
++ case 1:
++ byte_en = 0x2;
++ break;
++ case 2:
++ byte_en = 0x4;
++ break;
++ case 3:
++ byte_en = 0x8;
++ break;
++ }
++ val = (val & 0xff) << shift;
++ break;
++ case 2:
++ switch ((where >> 1) % 2) {
++ case 0:
++ byte_en = 0x3;
++ break;
++ case 1:
++ byte_en = 0xc;
++ break;
++ }
++ val = (val & 0xffff) << shift;
++ break;
++ default:
++ byte_en = 0xf;
++ break;
++ }
++
++ switch (pcie->domain) {
++ case 0:
++ if (bus->number)
++ type = 1;
++ else
++ type = 0;
++ break;
++ case 1:
++ if (bus->number > 128)
++ type = 1;
++ else
++ type = 0;
++ break;
++ }
++
++ bdf_offset = (bus->number << 24) | (PCI_SLOT(devfn) << 19) |
++ (PCI_FUNC(devfn) << 16) | (where & ~3);
++ pcie->tx_tag %= 0x7;
++
++ regmap_write(pcie->cfg, 0x10, 0x44000001 | (type << 24));
++ regmap_write(pcie->cfg, 0x14,
++ 0x00002000 | (pcie->tx_tag << 8) | byte_en);
++ regmap_write(pcie->cfg, 0x18, bdf_offset);
++ regmap_write(pcie->cfg, 0x1C, 0x00000000);
++ regmap_write(pcie->cfg, 0x20, val);
++
++ //trigger tx
++ regmap_write_bits(pcie->cfg, 0x24, PCIE_TRIGGER_TX, PCIE_TRIGGER_TX);
++
++ //wait tx idle
++ //todo find timeout and time period
++ ret = regmap_read_poll_timeout(pcie->cfg, 0x24, cfg_val,
++ (cfg_val & PCIE_TX_IDLE), 0, 10);
++ if (ret) {
++ dev_err(pcie->dev, "[%d] : tx idle timeout [%x]\n",
++ pcie->domain, cfg_val);
++ goto out;
++ }
++
++ //write clr tx idle
++ regmap_write_bits(pcie->cfg, 0x08, PCIE_TX_IDLE_CLEAR,
++ PCIE_TX_IDLE_CLEAR);
++
++ //check tx status
++ regmap_read(pcie->cfg, 0x24, &cfg_val);
++
++ switch (cfg_val & PCIE_STATUS_OF_TX) {
++ case PCIE_RC_L_TX_COMPLETE:
++ case PCIE_RC_H_TX_COMPLETE:
++ ret = readl_poll_timeout(pcie->reg + 0x08, isr,
++ (isr & PCIE_RC_RX_DONE_ISR), 0, 10);
++ if (ret)
++ dev_err(pcie->dev, "[%d] : tx timeout\n", pcie->domain);
++
++ writel(readl(pcie->reg + 0x08), pcie->reg + 0x08);
++ break;
++ }
++
++out:
++ pcie->tx_tag++;
++ return PCIBIOS_SUCCESSFUL;
++}
++
++static int aspeed_ast2700_rd_conf(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 *val)
++{
++ struct aspeed_pcie *pcie = bus->sysdata;
++ u32 bdf_offset, status;
++ u8 type;
++ int ret;
++
++ if (bus->number == 0 && devfn != 0) {
++ *val = 0xffffffff;
++ return PCIBIOS_SUCCESSFUL;
++ }
++
++ if (bus->number == 0) {
++ /* Internal access to bridge */
++ writel(0xF << 16 | (where & ~3), pcie->reg + H2X_CFGI_TLP);
++ writel(CFGI_TLP_FIRE, pcie->reg + H2X_CFGI_CTRL);
++ *val = readl(pcie->reg + H2X_CFGI_RET_DATA);
++ } else {
++ bdf_offset = ((bus->number) << 24) | (PCI_SLOT(devfn) << 19) |
++ (PCI_FUNC(devfn) << 16) | (where & ~3);
++
++ pcie->tx_tag %= 0xF;
++
++ type = (bus->number == 1) ? PCI_HEADER_TYPE_NORMAL : PCI_HEADER_TYPE_BRIDGE;
++
++ /* Prepare TLP */
++ writel(CRG_READ_FMTTYPE(type) | CRG_PAYLOAD_SIZE, pcie->reg + H2X_CFGE_TLP_1ST);
++ writel(0x40100F | (pcie->tx_tag << 8), pcie->reg + H2X_CFGE_TLP_NEXT);
++ writel(bdf_offset, pcie->reg + H2X_CFGE_TLP_NEXT);
++ /* Clear TX/RX status */
++ writel(CFGE_TX_IDLE | CFGE_RX_IDLE, pcie->reg + H2X_CFGE_INT_STS);
++ /* Issue command */
++ writel(CFGE_TLP_FIRE, pcie->reg + H2X_CFGE_CTRL);
++
++ pcie->tx_tag++;
++
++ ret = readl_poll_timeout(pcie->reg + H2X_CFGE_INT_STS, status, (status & CFGE_RX_IDLE), 0, 20);
++ if (ret) {
++ dev_err(pcie->dev,
++ "RC [%04X:%02X:%02X.%02X] : RX Conf. timeout, sts: %x\n",
++ pcie->domain, bus->number, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), status);
++ *val = 0xffffffff;
++ return PCIBIOS_SUCCESSFUL;
++ }
++ *val = readl(pcie->reg + H2X_CFGE_RET_DATA);
++ }
++
++ switch (size) {
++ case 1:
++ *val = (*val >> ((where & 3) * 8)) & 0xff;
++ break;
++ case 2:
++ *val = (*val >> ((where & 2) * 8)) & 0xffff;
++ break;
++ }
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++static int aspeed_ast2700_wr_conf(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ struct aspeed_pcie *pcie = bus->sysdata;
++ u32 shift = 8 * (where & 3);
++ u8 byte_en;
++ u32 bdf_offset, status, type;
++ int ret;
++
++ if (bus->number == 0 && devfn != 0)
++ return PCIBIOS_SUCCESSFUL;
++
++ switch (size) {
++ case 1:
++ byte_en = 1 << (where % 4);
++ val = (val & 0xff) << shift;
++ break;
++ case 2:
++ byte_en = (((where >> 1) % 2) == 0) ? 0x3 : 0xc;
++ val = (val & 0xffff) << shift;
++ break;
++ default:
++ byte_en = 0xf;
++ break;
++ }
++
++ if (bus->number == 0) {
++ /* Internal access to bridge */
++ writel(0x100000 | byte_en << 16 | (where & ~3), pcie->reg + H2X_CFGI_TLP);
++ writel(val, pcie->reg + H2X_CFGI_WR_DATA);
++ writel(CFGI_TLP_FIRE, pcie->reg + H2X_CFGI_CTRL);
++ } else {
++ bdf_offset = (bus->number << 24) | (PCI_SLOT(devfn) << 19) |
++ (PCI_FUNC(devfn) << 16) | (where & ~3);
++ pcie->tx_tag %= 0xF;
++
++ type = (bus->number == 1) ? PCI_HEADER_TYPE_NORMAL : PCI_HEADER_TYPE_BRIDGE;
++
++ /* Prepare TLP */
++ writel(CRG_WRITE_FMTTYPE(type) | CRG_PAYLOAD_SIZE, pcie->reg + H2X_CFGE_TLP_1ST);
++ writel(0x401000 | (pcie->tx_tag << 8) | byte_en, pcie->reg + H2X_CFGE_TLP_NEXT);
++ writel(bdf_offset, pcie->reg + H2X_CFGE_TLP_NEXT);
++ writel(val, pcie->reg + H2X_CFGE_TLP_NEXT);
++ /* Clear TX/RX idle status */
++ writel(CFGE_TX_IDLE | CFGE_RX_IDLE, pcie->reg + H2X_CFGE_INT_STS);
++ /* Issue command */
++ writel(CFGE_TLP_FIRE, pcie->reg + H2X_CFGE_CTRL);
++
++ pcie->tx_tag++;
++
++ ret = readl_poll_timeout(pcie->reg + H2X_CFGE_INT_STS, status,
++ (status & CFGE_RX_IDLE), 0, 20);
++ if (ret)
++ dev_err(pcie->dev,
++ "RC [%04X:%02X:%02X.%02X] : TX Conf. timeout, sts: %x\n",
++ pcie->domain, bus->number, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), status);
++
++ (void)readl(pcie->reg + H2X_CFGE_RET_DATA);
++ }
++
++ return PCIBIOS_SUCCESSFUL;
++}
++
++/* PCIe operations */
++static struct pci_ops aspeed_ast2600_pcie_ops = {
++ .read = aspeed_ast2600_rd_conf,
++ .write = aspeed_ast2600_wr_conf,
++};
++
++static struct pci_ops aspeed_ast2700_pcie_ops = {
++ .read = aspeed_ast2700_rd_conf,
++ .write = aspeed_ast2700_wr_conf,
++};
++
++#ifdef CONFIG_PCI_MSI
++static void aspeed_msi_compose_msi_msg(struct irq_data *data,
++ struct msi_msg *msg)
++{
++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(data);
++
++ msg->address_hi = 0;
++ msg->address_lo = pcie->msi_address;
++ msg->data = data->hwirq;
++}
++
++static int aspeed_msi_set_affinity(struct irq_data *irq_data,
++ const struct cpumask *mask, bool force)
++{
++ return -EINVAL;
++}
++
++static struct irq_chip aspeed_msi_bottom_irq_chip = {
++ .name = "ASPEED MSI",
++ .irq_compose_msi_msg = aspeed_msi_compose_msi_msg,
++ .irq_set_affinity = aspeed_msi_set_affinity,
++};
++
++static int aspeed_irq_msi_domain_alloc(struct irq_domain *domain,
++ unsigned int virq, unsigned int nr_irqs,
++ void *args)
++{
++ struct aspeed_pcie *pcie = domain->host_data;
++ int bit;
++ int i;
++
++ mutex_lock(&pcie->lock);
++
++ bit = bitmap_find_free_region(pcie->msi_irq_in_use, MAX_MSI_HOST_IRQS,
++ get_count_order(nr_irqs));
++
++ mutex_unlock(&pcie->lock);
++
++ if (bit < 0)
++ return -ENOSPC;
++
++ for (i = 0; i < nr_irqs; i++) {
++ irq_domain_set_info(domain, virq + i, bit + i,
++ &aspeed_msi_bottom_irq_chip,
++ domain->host_data, handle_simple_irq, NULL,
++ NULL);
++ }
++
++ return 0;
++}
++
++static void aspeed_irq_msi_domain_free(struct irq_domain *domain,
++ unsigned int virq, unsigned int nr_irqs)
++{
++ struct irq_data *data = irq_domain_get_irq_data(domain, virq);
++ struct aspeed_pcie *pcie = irq_data_get_irq_chip_data(data);
++
++ mutex_lock(&pcie->lock);
++
++ bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
++ get_count_order(nr_irqs));
++
++ mutex_unlock(&pcie->lock);
++}
++
++static void aspeed_pcie_msi_enable(struct aspeed_pcie *pcie)
++{
++ writel(0xffffffff, pcie->reg + pcie->platform->reg_msi_en);
++ writel(0xffffffff, pcie->reg + pcie->platform->reg_msi_en + 0x04);
++}
++
++static const struct irq_domain_ops aspeed_msi_domain_ops = {
++ .alloc = aspeed_irq_msi_domain_alloc,
++ .free = aspeed_irq_msi_domain_free,
++};
++
++static struct irq_chip aspeed_msi_irq_chip = {
++ .name = "PCIe MSI",
++ .irq_enable = pci_msi_unmask_irq,
++ .irq_disable = pci_msi_mask_irq,
++ .irq_mask = pci_msi_mask_irq,
++ .irq_unmask = pci_msi_unmask_irq,
++};
++
++static struct msi_domain_info aspeed_msi_domain_info = {
++ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
++ MSI_FLAG_MULTI_PCI_MSI),
++ .chip = &aspeed_msi_irq_chip,
++};
++#endif
++
++static int aspeed_pcie_init_irq_domain(struct aspeed_pcie *pcie)
++{
++ struct device *dev = pcie->dev;
++ struct device_node *node = dev->of_node;
++ struct device_node *pcie_intc_node;
++#ifdef CONFIG_PCI_MSI
++ struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
++ struct irq_domain *parent;
++#endif
++
++ /* Setup INTx */
++ pcie_intc_node = of_get_next_child(node, NULL);
++ if (!pcie_intc_node) {
++ dev_err(dev, "No PCIe Intc node found\n");
++ return -ENODEV;
++ }
++
++ pcie->irq_domain =
++ irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, &aspeed_intx_domain_ops, pcie);
++
++ if (!pcie->irq_domain) {
++ dev_err(dev, "failed to get an INTx IRQ domain\n");
++ return -ENOMEM;
++ }
++
++ of_node_put(pcie_intc_node);
++
++ if (!pcie->support_msi)
++ return 0;
++
++#ifdef CONFIG_PCI_MSI
++ pcie->dev_domain =
++ irq_domain_add_linear(NULL, MAX_MSI_HOST_IRQS, &aspeed_msi_domain_ops, pcie);
++ if (!pcie->dev_domain) {
++ dev_err(pcie->dev, "failed to create IRQ domain\n");
++ return -ENOMEM;
++ }
++
++ pcie->msi_domain =
++ pci_msi_create_irq_domain(fwnode, &aspeed_msi_domain_info, pcie->dev_domain);
++ if (!pcie->msi_domain) {
++ dev_err(pcie->dev, "failed to create MSI domain\n");
++ irq_domain_remove(parent);
++ return -ENOMEM;
++ }
++ aspeed_pcie_msi_enable(pcie);
++#endif
++
++ return 0;
++}
++
++static void aspeed_pcie_port_init(struct aspeed_pcie *pcie)
++{
++ u32 link_sts = 0;
++
++ //plda init
++ regmap_write(pcie->pciephy, ASPEED_PCIE_LOCK, PCIE_UNLOCK);
++#ifdef CONFIG_HOTPLUG_PCI
++ regmap_write(pcie->pciephy, ASPEED_PCIE_GLOBAL,
++ RC_SYNC_RESET_DISABLE | ROOT_COMPLEX_ID(0x3) |
++ PCIE_RC_SLOT_ENABLE);
++ regmap_write(pcie->pciephy, 0x10, 0xd7040022 | DATALINK_REPORT_CAPABLE);
++ regmap_write(pcie->pciephy, 0x14,
++ HOTPLUG_CAPABLE_ENABLE | HOTPLUG_SURPRISE_ENABLE |
++ ATTENTION_BUTTON_ENALBE);
++#else
++ regmap_write(pcie->pciephy, ASPEED_PCIE_GLOBAL, ROOT_COMPLEX_ID(0x3));
++#endif
++ /* Toggle the gpio to reset the devices on RC bus */
++ if (pcie->perst_rc_out) {
++ mdelay(100);
++ gpiod_set_value(pcie->perst_rc_out, 1);
++ }
++
++ reset_control_deassert(pcie->perst);
++ mdelay(500);
++
++ //clr intx isr
++ writel(0x0, pcie->reg + 0x04);
++
++ //clr msi isr
++ writel(0xFFFFFFFF, pcie->reg + 0x28);
++ writel(0xFFFFFFFF, pcie->reg + 0x2c);
++
++ //rc_l
++ // 0x80: 040 set bit7 0
++ // 0xC0: 080 set bit7 1
++ if (pcie->domain)
++ writel(PCIE_RX_DMA_EN | PCIE_RX_LINEAR | PCIE_RX_MSI_SEL |
++ PCIE_RX_MSI_EN | PCIE_Wait_RX_TLP_CLR |
++ PCIE_RC_RX_ENABLE | PCIE_RC_ENABLE,
++ pcie->reg);
++ else
++ writel(PCIE_RX_DMA_EN | PCIE_RX_LINEAR | PCIE_RX_MSI_EN |
++ PCIE_Wait_RX_TLP_CLR | PCIE_RC_RX_ENABLE |
++ PCIE_RC_ENABLE,
++ pcie->reg);
++
++ //assign debug tx tag
++ writel(0x28, pcie->reg + 0x3C);
++
++ regmap_read(pcie->pciephy, ASPEED_PCIE_LINK, &link_sts);
++ if (link_sts & PCIE_LINK_STS) {
++ // aspeed_pcie_set_slot_power_limit(pcie);
++ dev_info(pcie->dev, "PCIE- Link up\n");
++ // if (readl(pcie->pciereg_base
++ // + ASPEED_PCIE_LINK_STS) & PCIE_LINK_2_5G)
++ // dev_info(pcie->dev, "PCIE- Link up : 2.5G\n");
++ } else {
++ dev_info(pcie->dev, "PCIE- Link down\n");
++ }
++}
++
++
++static ssize_t hotplug_store(struct device *dev, struct device_attribute *attr,
++ const char *buf, size_t len)
++{
++ struct aspeed_pcie *pcie = dev_get_drvdata(dev);
++
++ pcie->hotplug_event = 1;
++
++ return len;
++}
++
++static DEVICE_ATTR_WO(hotplug);
++
++static void aspeed_pcie_reset_work(struct work_struct *work)
++{
++ struct aspeed_pcie *pcie =
++ container_of(work, typeof(*pcie), rst_dwork.work);
++ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
++ struct pci_bus *parent = host->bus;
++ struct pci_dev *dev, *temp;
++ u32 link_sts = 0;
++ u16 command;
++
++ pci_lock_rescan_remove();
++
++ list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
++ bus_list) {
++ pci_dev_get(dev);
++ pci_stop_and_remove_bus_device(dev);
++ /*
++ * Ensure that no new Requests will be generated from
++ * the device.
++ */
++ pci_read_config_word(dev, PCI_COMMAND, &command);
++ command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
++ command |= PCI_COMMAND_INTX_DISABLE;
++ pci_write_config_word(dev, PCI_COMMAND, command);
++ pci_dev_put(dev);
++ }
++
++ /*
++ * With perst_rc_out GPIO, the perst will only affect our PCIe controller, so it only
++ * needs to stay low for 1ms.
++ * Without perst_rc_out GPIO, the perst will affect external devices, so it needs to
++ * follow the spec and stay low for at least 100ms.
++ */
++ reset_control_assert(pcie->perst);
++ if (pcie->perst_rc_out) {
++ gpiod_set_value(pcie->perst_rc_out, 0);
++ mdelay(1);
++ } else {
++ mdelay(100);
++ }
++ reset_control_deassert(pcie->perst);
++ if (pcie->perst_rc_out) {
++ mdelay(100);
++ gpiod_set_value(pcie->perst_rc_out, 1);
++ }
++ mdelay(10);
++
++ regmap_read(pcie->pciephy, ASPEED_PCIE_LINK, &link_sts);
++ if (link_sts & PCIE_LINK_STS)
++ dev_info(pcie->dev, "PCIE- Link up\n");
++ else
++ dev_info(pcie->dev, "PCIE- Link down\n");
++
++ pci_rescan_bus(host->bus);
++ pci_unlock_rescan_remove();
++}
++
++static irqreturn_t pcie_rst_irq_handler(int irq, void *dev_id)
++{
++ struct aspeed_pcie *pcie = dev_id;
++
++ schedule_delayed_work(&pcie->rst_dwork, 0);
++
++ return IRQ_HANDLED;
++}
++
++#define AHBC_UNLOCK 0xAEED1A03
++static int aspeed_ast2600_setup(struct platform_device *pdev)
++{
++ struct aspeed_pcie *pcie = platform_get_drvdata(pdev);
++ struct device_node *cfg_node;
++ int err;
++
++ pcie->perst_rc_out =
++ devm_gpiod_get_optional(pcie->dev, "perst-rc-out",
++ GPIOD_OUT_LOW |
++ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
++
++ pcie->perst = devm_reset_control_get_exclusive(pcie->dev, NULL);
++ if (IS_ERR(pcie->perst)) {
++ dev_err(&pdev->dev, "can't get pcie phy reset\n");
++ return PTR_ERR(pcie->perst);
++ }
++ reset_control_assert(pcie->perst);
++
++ pcie->ahbc = syscon_regmap_lookup_by_compatible("aspeed,aspeed-ahbc");
++ if (IS_ERR(pcie->ahbc))
++ return IS_ERR(pcie->ahbc);
++
++ cfg_node =
++ of_find_compatible_node(NULL, NULL, "aspeed,ast2600-pciecfg");
++ if (cfg_node) {
++ pcie->cfg = syscon_node_to_regmap(cfg_node);
++ if (IS_ERR(pcie->cfg))
++ return PTR_ERR(pcie->cfg);
++ }
++
++ //workaround : Send vender define message for avoid when PCIE RESET send unknown message out
++ regmap_write(pcie->cfg, 0x10, 0x34000000);
++ regmap_write(pcie->cfg, 0x14, 0x0000007f);
++ regmap_write(pcie->cfg, 0x18, 0x00001a03);
++ regmap_write(pcie->cfg, 0x1c, 0x00000000);
++
++ regmap_write(pcie->ahbc, 0x00, AHBC_UNLOCK);
++ regmap_update_bits(pcie->ahbc, 0x8C, BIT(5), BIT(5));
++ regmap_write(pcie->ahbc, 0x00, 0x1);
++
++ //ahb to pcie rc
++ regmap_write(pcie->cfg, 0x60, 0xe0006000);
++ regmap_write(pcie->cfg, 0x64, 0x00000000);
++ regmap_write(pcie->cfg, 0x68, 0xFFFFFFFF);
++
++ //PCIe Host Enable
++ regmap_write(pcie->cfg, 0x00, BIT(0));
++
++ //080 can't config for msi
++ pcie->support_msi = (pcie->domain) ? false : true;
++
++ aspeed_pcie_port_init(pcie);
++
++ pcie->host->ops = &aspeed_ast2600_pcie_ops;
++
++ err = sysfs_create_file(&pdev->dev.kobj, &dev_attr_hotplug.attr);
++ if (err) {
++ dev_err(&pdev->dev, "unable to create sysfs interface\n");
++ return err;
++ }
++
++ if (pcie->domain) {
++ pcie->perst_ep_in =
++ devm_gpiod_get_optional(pcie->dev, "perst-ep-in", GPIOD_IN);
++ if (pcie->perst_ep_in) {
++ gpiod_set_debounce(pcie->perst_ep_in, 100);
++ irq_set_irq_type(gpiod_to_irq(pcie->perst_ep_in),
++ IRQ_TYPE_EDGE_BOTH);
++ err = devm_request_irq(pcie->dev,
++ gpiod_to_irq(pcie->perst_ep_in),
++ pcie_rst_irq_handler,
++ IRQF_SHARED, "PERST monitor",
++ pcie);
++ if (err) {
++ dev_err(pcie->dev,
++ "Failed to request gpio irq %d\n", err);
++ return err;
++ }
++ INIT_DELAYED_WORK(&pcie->rst_dwork,
++ aspeed_pcie_reset_work);
++ }
++ pcie->perst_owner =
++ devm_gpiod_get_optional(pcie->dev, "perst-owner", GPIOD_OUT_HIGH);
++ }
++
++ return 0;
++}
++
++static int aspeed_ast2700_setup(struct platform_device *pdev)
++{
++ struct aspeed_pcie *pcie = platform_get_drvdata(pdev);
++ struct device *dev = pcie->dev;
++ u32 cfg_val;
++
++ pcie->h2xrst = devm_reset_control_get(dev, "h2x");
++ if (IS_ERR(pcie->h2xrst))
++ return dev_err_probe(dev, PTR_ERR(pcie->h2xrst), "failed to get h2x reset\n");
++
++ pcie->perst = devm_reset_control_get(dev, "perst");
++ if (IS_ERR(pcie->perst))
++ return dev_err_probe(dev, PTR_ERR(pcie->perst), "failed to get perst reset\n");
++
++ pcie->device = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,device");
++ if (IS_ERR(pcie->device))
++ return dev_err_probe(dev, PTR_ERR(pcie->device), "failed to map device base\n");
++
++ reset_control_assert(pcie->perst);
++
++ regmap_write(pcie->pciephy, 0x00, 0x11501a02);
++ regmap_write(pcie->pciephy, 0x70, 0xa00c0);
++ regmap_write(pcie->pciephy, 0x78, 0x80030);
++ regmap_write(pcie->pciephy, 0x58, 0x1);
++
++ regmap_write(pcie->device, 0x60, 0xf0001);
++ regmap_write(pcie->device, 0x64, 0xff00ff00);
++ regmap_write(pcie->device, 0x70, 0);
++ regmap_write(pcie->device, 0x78, (pcie->domain == 1) ? BIT(31) : 0);
++
++ reset_control_assert(pcie->h2xrst);
++ mdelay(10);
++ reset_control_deassert(pcie->h2xrst);
++
++ regmap_write(pcie->pciephy, 0x5C, 0x40000000);
++ /* Configure to Root port */
++ regmap_read(pcie->pciephy, PEHR_GEN_CAPABILITY, &cfg_val);
++ regmap_write(pcie->pciephy, PEHR_GEN_CAPABILITY,
++ (cfg_val & ~PORT_TPYE) | FIELD_PREP(PORT_TPYE, PORT_TYPE_ROOT));
++
++ /* PCIe Host Enable */
++ writel(0, pcie->reg + H2X_CTRL);
++ writel(H2X_BRIDGE_EN | H2X_BRIDGE_DIRECT_EN, pcie->reg + H2X_CTRL);
++
++ /* The BAR mapping:
++ * CPU Node0: 0x60000000
++ * CPU Node1: 0x80000000
++ * IO : 0xa0000000
++ */
++ writel(0x60000000 + (0x20000000 * pcie->domain), pcie->reg + H2X_REMAP_DIRECT_ADDR);
++
++ reset_control_deassert(pcie->perst);
++ mdelay(500);
++
++ /* Clear INTx isr */
++ writel(0, pcie->reg + pcie->platform->reg_intx_sts);
++
++ /* Clear MSI/MSI-X isr */
++ writel(~0, pcie->reg + pcie->platform->reg_msi_sts);
++ writel(~0, pcie->reg + pcie->platform->reg_msi_sts + 0x04);
++
++ pcie->host->ops = &aspeed_ast2700_pcie_ops;
++
++ aspeed_msi_domain_info.flags |= MSI_FLAG_PCI_MSIX;
++ pcie->support_msi = true;
++
++ return 0;
++}
++
++static int aspeed_pcie_probe(struct platform_device *pdev)
++{
++ struct device *dev = &pdev->dev;
++ struct pci_host_bridge *host;
++ struct aspeed_pcie *pcie;
++ struct device_node *node = dev->of_node;
++ const void *md = of_device_get_match_data(dev);
++ int err;
++
++ if (!md)
++ return -ENODEV;
++
++ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
++ if (!host)
++ return -ENOMEM;
++
++ pcie = pci_host_bridge_priv(host);
++ pcie->dev = dev;
++ pcie->tx_tag = 0;
++ platform_set_drvdata(pdev, pcie);
++
++ pcie->platform = md;
++ pcie->host = host;
++
++ pcie->reg = devm_platform_ioremap_resource(pdev, 0);
++
++ of_property_read_u32(node, "msi_address", &pcie->msi_address);
++ of_property_read_u32(node, "linux,pci-domain", &pcie->domain);
++
++ pcie->pciephy = syscon_regmap_lookup_by_phandle(node, "pciephy");
++ if (IS_ERR(pcie->pciephy))
++ return dev_err_probe(dev, PTR_ERR(pcie->pciephy), "failed to map pciephy base\n");
++
++ err = pcie->platform->setup(pdev);
++ if (err) {
++ dev_err(dev, "Setup PCIe RC failed\n");
++ return err;
++ }
++
++ host->sysdata = pcie;
++
++ pcie->irq = irq_of_parse_and_map(node, 0);
++ if (pcie->irq < 0) {
++ dev_err(dev, "Mapping IRQ failed\n");
++ return pcie->irq;
++ }
++
++ err = aspeed_pcie_init_irq_domain(pcie);
++ if (err) {
++ dev_err(dev, "failed to init PCIe IRQ domain\n");
++ return err;
++ }
++
++ irq_set_chained_handler_and_data(pcie->irq, aspeed_pcie_intr_handler,
++ pcie);
++
++ return pci_host_probe(host);
++}
++
++static struct aspeed_pcie_rc_platform pcie_rc_ast2600 = {
++ .setup = aspeed_ast2600_setup,
++ .reg_intx_en = 0x04,
++ .reg_intx_sts = 0x08,
++ .reg_msi_en = 0x20,
++ .reg_msi_sts = 0x28,
++};
++
++static struct aspeed_pcie_rc_platform pcie_rc_ast2700 = {
++ .setup = aspeed_ast2700_setup,
++ .reg_intx_en = 0x40,
++ .reg_intx_sts = 0x48,
++ .reg_msi_en = 0x50,
++ .reg_msi_sts = 0x58,
++};
++
++static const struct of_device_id aspeed_pcie_of_match[] = {
++ { .compatible = "aspeed,ast2600-pcie", .data = &pcie_rc_ast2600 },
++ { .compatible = "aspeed,ast2700-pcie", .data = &pcie_rc_ast2700 },
++ {}
++};
++
++static struct platform_driver aspeed_pcie_driver = {
++ .driver = {
++ .name = "aspeed-pcie",
++ .suppress_bind_attrs = true,
++ .of_match_table = aspeed_pcie_of_match,
++ },
++ .probe = aspeed_pcie_probe,
++};
++
++module_platform_driver(aspeed_pcie_driver);
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0025-Add-SGMII-USB3-phy-driver-for-ast2700.patch b/recipes-kernel/linux/files/0025-Add-SGMII-USB3-phy-driver-for-ast2700.patch
new file mode 100644
index 0000000..b2c1b54
--- /dev/null
+++ b/recipes-kernel/linux/files/0025-Add-SGMII-USB3-phy-driver-for-ast2700.patch
@@ -0,0 +1,561 @@
+From 4670793341176838e34dcb12ee548e57e560bd76 Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Tue, 11 Mar 2025 16:26:10 +0800
+Subject: [PATCH] Add SGMII USB3 phy driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/phy/Kconfig | 1 +
+ drivers/phy/Makefile | 1 +
+ drivers/phy/aspeed/Kconfig | 23 +++
+ drivers/phy/aspeed/Makefile | 4 +
+ drivers/phy/aspeed/aspeed-sgmii.c | 199 +++++++++++++++++++++
+ drivers/phy/aspeed/aspeed-usb-phy3.c | 257 +++++++++++++++++++++++++++
+ 6 files changed, 485 insertions(+)
+ create mode 100644 drivers/phy/aspeed/Kconfig
+ create mode 100644 drivers/phy/aspeed/Makefile
+ create mode 100644 drivers/phy/aspeed/aspeed-sgmii.c
+ create mode 100644 drivers/phy/aspeed/aspeed-usb-phy3.c
+
+diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
+index e4502958f..1fc97cd71 100644
+--- a/drivers/phy/Kconfig
++++ b/drivers/phy/Kconfig
+@@ -74,6 +74,7 @@ config PHY_CAN_TRANSCEIVER
+
+ source "drivers/phy/allwinner/Kconfig"
+ source "drivers/phy/amlogic/Kconfig"
++source "drivers/phy/aspeed/Kconfig"
+ source "drivers/phy/broadcom/Kconfig"
+ source "drivers/phy/cadence/Kconfig"
+ source "drivers/phy/freescale/Kconfig"
+diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
+index fb3dc9de6..114db3303 100644
+--- a/drivers/phy/Makefile
++++ b/drivers/phy/Makefile
+@@ -12,6 +12,7 @@ obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o
+ obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o
+ obj-y += allwinner/ \
+ amlogic/ \
++ aspeed/ \
+ broadcom/ \
+ cadence/ \
+ freescale/ \
+diff --git a/drivers/phy/aspeed/Kconfig b/drivers/phy/aspeed/Kconfig
+new file mode 100644
+index 000000000..4719f0b72
+--- /dev/null
++++ b/drivers/phy/aspeed/Kconfig
+@@ -0,0 +1,23 @@
++# SPDX-License-Identifier: GPL-2.0-only
++
++#
++# PHY drivers for ASPEED
++#
++
++config PHY_ASPEED_SGMII
++ tristate "ASPEED SGMII PHY driver"
++ select REGMAP
++ select MFD_SYSCON
++ select GENERIC_PHY
++ depends on ARCH_ASPEED
++ default n
++ help
++ Enable driver support for Aspeed AST2700 PHY SGMII.
++
++config PHY_ASPEED_USB3
++ tristate "ASPEED USB3 PHY driver"
++ select GENERIC_PHY
++ depends on ARCH_ASPEED
++ default n
++ help
++ Enable driver support for Aspeed AST2700 PHY USB3.
+diff --git a/drivers/phy/aspeed/Makefile b/drivers/phy/aspeed/Makefile
+new file mode 100644
+index 000000000..946e8666c
+--- /dev/null
++++ b/drivers/phy/aspeed/Makefile
+@@ -0,0 +1,4 @@
++# SPDX-License-Identifier: GPL-2.0
++
++obj-$(CONFIG_PHY_ASPEED_SGMII) += aspeed-sgmii.o
++obj-$(CONFIG_PHY_ASPEED_USB3) += aspeed-usb-phy3.o
+\ No newline at end of file
+diff --git a/drivers/phy/aspeed/aspeed-sgmii.c b/drivers/phy/aspeed/aspeed-sgmii.c
+new file mode 100644
+index 000000000..65931caf5
+--- /dev/null
++++ b/drivers/phy/aspeed/aspeed-sgmii.c
+@@ -0,0 +1,199 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++#include <linux/sizes.h>
++#include <linux/module.h>
++#include <linux/phy/phy.h>
++#include <linux/platform_device.h>
++#include <linux/mfd/syscon.h>
++#include <linux/regmap.h>
++#include <linux/ethtool.h>
++
++#define SGMII_CFG 0x00
++#define SGMII_PHY_PIPE_CTL 0x20
++#define SGMII_FIFO_DELAY_THREHOLD 0x28
++#define SGMII_MODE 0x30
++
++#define SGMII_CFG_FIFO_MODE BIT(0)
++#define SGMII_CFG_SPEED_10M 0
++#define SGMII_CFG_SPEED_100M BIT(4)
++#define SGMII_CFG_SPEED_1G BIT(5)
++#define SGMII_CFG_PWR_DOWN BIT(11)
++#define SGMII_CFG_AN_ENABLE BIT(12)
++#define SGMII_CFG_SW_RESET BIT(15)
++#define SGMII_PCTL_TX_DEEMPH_3_5DB BIT(6)
++#define SGMII_MODE_ENABLE BIT(0)
++#define SGMII_MODE_USE_LOCAL_CONFIG BIT(2)
++
++#define PLDA_CLK 0x268
++
++#define PLDA_CLK_SEL_INTERNAL_25M BIT(8)
++#define PLDA_CLK_FREQ_MULTI GENMASK(7, 0)
++
++struct aspeed_sgmii {
++ struct device *dev;
++ void __iomem *regs;
++ struct regmap *plda_regmap;
++};
++
++static void aspeed_sgmii_set_nway(struct phy *phy)
++{
++ struct aspeed_sgmii *sgmii = phy_get_drvdata(phy);
++ u32 reg;
++
++ /*
++ * The PLDA frequency multiplication is X xor 0x19.
++ * (X xor 0x19) * clock source = data rate.
++ * SGMII data rate is 1.25G, so (0x2b xor 0x19) * 25MHz is equal 1.25G.
++ */
++ reg = PLDA_CLK_SEL_INTERNAL_25M | FIELD_PREP(PLDA_CLK_FREQ_MULTI, 0x2b);
++ regmap_write(sgmii->plda_regmap, PLDA_CLK, reg);
++
++ writel(0, sgmii->regs + SGMII_MODE);
++
++ writel(0, sgmii->regs + SGMII_CFG);
++ reg = SGMII_CFG_SW_RESET | SGMII_CFG_PWR_DOWN;
++ writel(reg, sgmii->regs + SGMII_CFG);
++
++ reg = SGMII_CFG_AN_ENABLE;
++ writel(reg, sgmii->regs + SGMII_CFG);
++
++ writel(0x0a, sgmii->regs + SGMII_FIFO_DELAY_THREHOLD);
++
++ writel(SGMII_PCTL_TX_DEEMPH_3_5DB, sgmii->regs + SGMII_PHY_PIPE_CTL);
++ reg = SGMII_MODE_ENABLE;
++ writel(reg, sgmii->regs + SGMII_MODE);
++}
++
++static void aspeed_sgmii_set_2_5g(struct phy *phy)
++{
++ struct aspeed_sgmii *sgmii = phy_get_drvdata(phy);
++ u32 reg;
++
++ /* For HiSGMII 2.5G speed */
++ reg = PLDA_CLK_SEL_INTERNAL_25M | FIELD_PREP(PLDA_CLK_FREQ_MULTI, 0x64);
++ regmap_write(sgmii->plda_regmap, PLDA_CLK, reg);
++
++ writel(0, sgmii->regs + SGMII_MODE);
++
++ writel(0, sgmii->regs + SGMII_CFG);
++ reg = SGMII_CFG_SW_RESET | SGMII_CFG_PWR_DOWN;
++ writel(reg, sgmii->regs + SGMII_CFG);
++
++ reg = SGMII_CFG_SPEED_1G;
++ writel(reg, sgmii->regs + SGMII_CFG);
++
++ writel(0x0a, sgmii->regs + SGMII_FIFO_DELAY_THREHOLD);
++
++ writel(SGMII_PCTL_TX_DEEMPH_3_5DB, sgmii->regs + SGMII_PHY_PIPE_CTL);
++ reg = SGMII_MODE_ENABLE;
++ writel(reg, sgmii->regs + SGMII_MODE);
++}
++
++static int aspeed_sgmii_phy_init(struct phy *phy)
++{
++ aspeed_sgmii_set_nway(phy);
++
++ return 0;
++}
++
++static int aspeed_sgmii_phy_exit(struct phy *phy)
++{
++ struct aspeed_sgmii *sgmii = phy_get_drvdata(phy);
++
++ /* Disable SGMII controller */
++ writel(0, sgmii->regs + SGMII_MODE);
++
++ return 0;
++}
++
++static int aspeed_sgmii_phy_set_speed(struct phy *phy, int speed)
++{
++ if (speed == SPEED_2500)
++ aspeed_sgmii_set_2_5g(phy);
++ else
++ aspeed_sgmii_set_nway(phy);
++
++ return 0;
++}
++
++static const struct phy_ops aspeed_sgmii_phyops = {
++ .init = aspeed_sgmii_phy_init,
++ .exit = aspeed_sgmii_phy_exit,
++ .set_speed = aspeed_sgmii_phy_set_speed,
++ .owner = THIS_MODULE,
++};
++
++static int aspeed_sgmii_probe(struct platform_device *pdev)
++{
++ struct aspeed_sgmii *sgmii;
++ struct resource *res;
++ struct device *dev;
++ struct device_node *np;
++ struct phy_provider *provider;
++ struct phy *phy;
++
++ dev = &pdev->dev;
++
++ sgmii = devm_kzalloc(dev, sizeof(*sgmii), GFP_KERNEL);
++ if (!sgmii)
++ return -ENOMEM;
++
++ sgmii->dev = dev;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ if (!res) {
++ dev_err(dev, "cannot get resource\n");
++ return -ENODEV;
++ }
++
++ sgmii->regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(sgmii->regs)) {
++ dev_err(dev, "cannot map registers\n");
++ return PTR_ERR(sgmii->regs);
++ }
++
++ np = pdev->dev.of_node;
++ sgmii->plda_regmap = syscon_regmap_lookup_by_phandle(np, "aspeed,plda");
++ if (IS_ERR(sgmii->plda_regmap)) {
++ dev_err(sgmii->dev, "Unable to find plda regmap (%ld)\n",
++ PTR_ERR(sgmii->plda_regmap));
++ return PTR_ERR(sgmii->plda_regmap);
++ }
++
++ phy = devm_phy_create(dev, NULL, &aspeed_sgmii_phyops);
++ if (IS_ERR(phy)) {
++ dev_err(&pdev->dev, "failed to create PHY\n");
++ return PTR_ERR(phy);
++ }
++
++ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
++ if (IS_ERR(provider))
++ return PTR_ERR(provider);
++
++ phy_set_drvdata(phy, sgmii);
++
++ dev_info(dev, "module loaded\n");
++
++ return 0;
++}
++
++static const struct of_device_id aspeed_sgmii_of_matches[] = {
++ { .compatible = "aspeed,ast2700-sgmii" },
++ { },
++};
++
++static struct platform_driver aspeed_sgmii_driver = {
++ .probe = aspeed_sgmii_probe,
++ .driver = {
++ .name = "aspeed-sgmii",
++ .of_match_table = aspeed_sgmii_of_matches,
++ },
++};
++
++module_platform_driver(aspeed_sgmii_driver);
++
++MODULE_AUTHOR("Jacky Chou <jacky_chou@aspeedtech.com>");
++MODULE_DESCRIPTION("Control of ASPEED SGMII Device");
++MODULE_LICENSE("GPL");
+diff --git a/drivers/phy/aspeed/aspeed-usb-phy3.c b/drivers/phy/aspeed/aspeed-usb-phy3.c
+new file mode 100644
+index 000000000..74198415d
+--- /dev/null
++++ b/drivers/phy/aspeed/aspeed-usb-phy3.c
+@@ -0,0 +1,257 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright 2023 Aspeed Technology Inc.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/of.h>
++#include <linux/of_address.h>
++#include <linux/regmap.h>
++#include <linux/mfd/syscon.h>
++#include <asm/io.h>
++#include <linux/clk.h>
++#include <linux/reset.h>
++#include <linux/phy/phy.h>
++
++#define PHY3P00_DEFAULT 0xCE70000F /* PHY PCS Protocol Setting #1 default value */
++#define PHY3P04_DEFAULT 0x49C00014 /* PHY PCS Protocol Setting #2 default value */
++#define PHY3P08_DEFAULT 0x5E406825 /* PHY PCS Protocol Setting #3 default value */
++#define PHY3P0C_DEFAULT 0x00000001 /* PHY PCS Protocol Setting #4 default value */
++
++#define DWC_CRTL_NUM 2
++
++#define USB_PHY3_INIT_DONE BIT(15) /* BIT15: USB3.1 Phy internal SRAM iniitalization done */
++#define USB_PHY3_SRAM_BYPASS BIT(7) /* USB3.1 Phy SRAM bypass */
++#define USB_PHY3_SRAM_EXT_LOAD BIT(6) /* USB3.1 Phy SRAM external load done */
++
++struct aspeed_usb_phy3 {
++ struct device *dev;
++ void __iomem *regs;
++ const struct aspeed_usb_phy3_model *model;
++ bool phy_ext_load_quirk;
++};
++
++struct usb_dwc3_ctrl {
++ u32 offset;
++ u32 value;
++};
++
++struct aspeed_usb_phy3_model {
++ /* offsets to the PHY3 registers */
++ unsigned int phy3s00; /* PHY SRAM Control/Status #1 */
++ unsigned int phy3s04; /* PHY SRAM Control/Status #2 */
++ unsigned int phy3c00; /* PHY PCS Control/Status #1 */
++ unsigned int phy3c04; /* PHY PCS Control/Status #2 */
++ unsigned int phy3p00; /* PHY PCS Protocol Setting #1 */
++ unsigned int phy3p04; /* PHY PCS Protocol Setting #2 */
++ unsigned int phy3p08; /* PHY PCS Protocol Setting #3 */
++ unsigned int phy3p0c; /* PHY PCS Protocol Setting #4 */
++ unsigned int dwc_cmd; /* DWC3 Commands base address offest */
++};
++
++static struct usb_dwc3_ctrl ctrl_data[DWC_CRTL_NUM] = {
++ {0xc12c, 0x0c854802}, /* Set DWC3 GUCTL for ref_clk */
++ {0xc630, 0x0c800020}, /* Set DWC3 GLADJ for ref_clk */
++};
++
++static const struct aspeed_usb_phy3_model ast2700a0_model = {
++ .phy3s00 = 0x800,
++ .phy3s04 = 0x804,
++ .phy3c00 = 0x808,
++ .phy3c04 = 0x80C,
++ .phy3p00 = 0x810,
++ .phy3p04 = 0x814,
++ .phy3p08 = 0x818,
++ .phy3p0c = 0x81C,
++ .dwc_cmd = 0xB80,
++};
++
++static const struct aspeed_usb_phy3_model ast2700_model = {
++ .phy3s00 = 0x00,
++ .phy3s04 = 0x04,
++ .phy3c00 = 0x08,
++ .phy3c04 = 0x0C,
++ .phy3p00 = 0x10,
++ .phy3p04 = 0x14,
++ .phy3p08 = 0x18,
++ .phy3p0c = 0x1C,
++ .dwc_cmd = 0x40,
++};
++
++static const struct of_device_id aspeed_usb_phy3_dt_ids[] = {
++ {
++ .compatible = "aspeed,ast2700-a0-uhy3a",
++ .data = &ast2700a0_model
++ },
++ {
++ .compatible = "aspeed,ast2700-a0-uhy3b",
++ .data = &ast2700a0_model
++ },
++ {
++ .compatible = "aspeed,ast2700-uphy3a",
++ .data = &ast2700_model
++ },
++ {
++ .compatible = "aspeed,ast2700-uphy3b",
++ .data = &ast2700_model
++ },
++ { }
++};
++MODULE_DEVICE_TABLE(of, aspeed_usb_phy3_dt_ids);
++
++static int aspeed_usb_phy3_init(struct phy *phy)
++{
++ struct aspeed_usb_phy3 *phy3 = phy_get_drvdata(phy);
++ const struct aspeed_usb_phy3_model *model = phy3->model;
++ u32 val;
++ int timeout = 100;
++ int i, j;
++
++ while ((readl(phy3->regs + model->phy3s00) & USB_PHY3_INIT_DONE)
++ != USB_PHY3_INIT_DONE) {
++ usleep_range(100, 110);
++ if (--timeout == 0) {
++ dev_err(phy3->dev, "Wait phy3 init timed out\n");
++ return -ETIMEDOUT;
++ }
++ }
++
++ val = readl(phy3->regs + model->phy3s00);
++
++ if (phy3->phy_ext_load_quirk)
++ val |= USB_PHY3_SRAM_EXT_LOAD;
++ else
++ val |= USB_PHY3_SRAM_BYPASS;
++ writel(val, phy3->regs + model->phy3s00);
++
++ /* Set protocol1_ext signals as default PHY3 settings based on SNPS documents.
++ * Including PCFGI[54]: protocol1_ext_rx_los_lfps_en for better compatibility
++ */
++ writel(PHY3P00_DEFAULT, phy3->regs + model->phy3p00);
++ writel(PHY3P04_DEFAULT, phy3->regs + model->phy3p04);
++ writel(PHY3P08_DEFAULT, phy3->regs + model->phy3p08);
++ writel(PHY3P0C_DEFAULT, phy3->regs + model->phy3p0c);
++
++ /* xHCI DWC specific command initially set when PCIe xHCI enable */
++ for (i = 0, j = model->dwc_cmd; i < DWC_CRTL_NUM; i++) {
++ /* 48-bits Command:
++ * CMD1: Data -> DWC CMD [31:0], Address -> DWC CMD [47:32]
++ * CMD2: Data -> DWC CMD [79:48], Address -> DWC CMD [95:80]
++ * ... and etc.
++ */
++ if (i % 2 == 0) {
++ writel(ctrl_data[i].value, phy3->regs + j);
++ j += 4;
++
++ writel(ctrl_data[i].offset & 0xFFFF, phy3->regs + j);
++ } else {
++ val = readl(phy3->regs + j) & 0xFFFF;
++ val |= ((ctrl_data[i].value & 0xFFFF) << 16);
++ writel(val, phy3->regs + j);
++ j += 4;
++
++ val = (ctrl_data[i].offset << 16) | (ctrl_data[i].value >> 16);
++ writel(val, phy3->regs + j);
++ j += 4;
++ }
++ }
++
++ dev_info(phy3->dev, "Initialized USB PHY3\n");
++ return 0;
++}
++
++static const struct phy_ops aspeed_usb_phy3_phyops = {
++ .init = aspeed_usb_phy3_init,
++ .owner = THIS_MODULE,
++};
++
++static int aspeed_usb_phy3_probe(struct platform_device *pdev)
++{
++ struct aspeed_usb_phy3 *phy3;
++ struct device *dev;
++ struct phy_provider *provider;
++ struct phy *phy;
++ struct device_node *node = pdev->dev.of_node;
++ struct clk *clk;
++ struct reset_control *rst;
++ int rc = 0;
++
++ dev = &pdev->dev;
++
++ phy3 = devm_kzalloc(dev, sizeof(*phy3), GFP_KERNEL);
++ if (!phy3)
++ return -ENOMEM;
++
++ phy3->dev = dev;
++
++ phy3->model = of_device_get_match_data(dev);
++ if (IS_ERR(phy3->model)) {
++ dev_err(dev, "Couldn't get model data\n");
++ return -ENODEV;
++ }
++
++ clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(clk))
++ return PTR_ERR(clk);
++
++ rc = clk_prepare_enable(clk);
++ if (rc) {
++ dev_err(dev, "Unable to enable clock (%d)\n", rc);
++ return rc;
++ }
++
++ rst = devm_reset_control_get_shared(dev, NULL);
++ if (IS_ERR(rst)) {
++ rc = PTR_ERR(rst);
++ goto err;
++ }
++ rc = reset_control_deassert(rst);
++ if (rc)
++ goto err;
++
++ phy3->regs = of_iomap(node, 0);
++
++ phy3->phy_ext_load_quirk =
++ device_property_read_bool(dev, "aspeed,phy_ext_load_quirk");
++
++ phy = devm_phy_create(dev, NULL, &aspeed_usb_phy3_phyops);
++ if (IS_ERR(phy)) {
++ dev_err(dev, "failed to create PHY\n");
++ return PTR_ERR(phy);
++ }
++
++ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
++ if (IS_ERR(provider))
++ return PTR_ERR(provider);
++
++ phy_set_drvdata(phy, phy3);
++
++ dev_info(phy3->dev, "Probed USB PHY3\n");
++
++ return 0;
++
++err:
++ if (clk)
++ clk_disable_unprepare(clk);
++ return rc;
++}
++
++static int aspeed_usb_phy3_remove(struct platform_device *pdev)
++{
++ return 0;
++}
++
++static struct platform_driver aspeed_usb_phy3_driver = {
++ .probe = aspeed_usb_phy3_probe,
++ .remove = aspeed_usb_phy3_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = aspeed_usb_phy3_dt_ids,
++ },
++};
++module_platform_driver(aspeed_usb_phy3_driver);
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Joe Wang <joe_wang@aspeedtech.com>");
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/files/0026-Add-USB-Virtual-Hub-Controller-driver-for-ast2700.patch b/recipes-kernel/linux/files/0026-Add-USB-Virtual-Hub-Controller-driver-for-ast2700.patch
new file mode 100644
index 0000000..2d715e6
--- /dev/null
+++ b/recipes-kernel/linux/files/0026-Add-USB-Virtual-Hub-Controller-driver-for-ast2700.patch
@@ -0,0 +1,617 @@
+From a6a18c0ece6c85aaafc7626d2bceadf5e34021ba Mon Sep 17 00:00:00 2001
+From: "yung-sheng.huang" <yung-sheng.huang@fii-na.corp-partner.google.com>
+Date: Thu, 13 Mar 2025 16:43:08 +0800
+Subject: [PATCH] Add USB Virtual Hub Controller driver for ast2700
+
+This is base on aspeed SDK 9.05.
+
+Source:
+AspeedTech-BMC github:
+https://github.com/AspeedTech-BMC/linux/blob/aspeed-master-v6.6/
+(cherry picked from commit 769f62b7baa84d6998723b0ea60280e380183553)
+
+Signed-off-by: yung-sheng.huang <yung-sheng.huang@fii-na.corp-partner.google.com>
+---
+ drivers/usb/gadget/udc/aspeed-vhub/core.c | 183 ++++++++++++++++++++++
+ drivers/usb/gadget/udc/aspeed-vhub/dev.c | 10 +-
+ drivers/usb/gadget/udc/aspeed-vhub/epn.c | 38 ++++-
+ drivers/usb/gadget/udc/aspeed-vhub/hub.c | 14 +-
+ drivers/usb/gadget/udc/aspeed-vhub/vhub.h | 3 +
+ drivers/usb/gadget/udc/aspeed_udc.c | 47 ++++--
+ 6 files changed, 266 insertions(+), 29 deletions(-)
+
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
+index f60a019bb..55c8af126 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
+@@ -23,9 +23,27 @@
+ #include <linux/of.h>
+ #include <linux/regmap.h>
+ #include <linux/dma-mapping.h>
++#include <linux/reset.h>
++#include <linux/mfd/syscon.h>
+
+ #include "vhub.h"
+
++#define ASPEED_G7_SCU_VHUB_USB_FUNC_OFFSET 0x410
++
++enum ast_g7_pcie {
++ NOT_SUPPORTED,
++ PCIE_EHCI,
++ PCIE_XHCI,
++};
++
++struct ast_vhub_match_data {
++ enum ast_g7_pcie g7_pcie;
++ u32 usb_mode_mask;
++ u32 xhci_mode_mask;
++ u32 txfifo_fix_reg;
++ u32 txfifo_fix_val;
++};
++
+ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
+ int status)
+ {
+@@ -239,6 +257,7 @@ void ast_vhub_init_hw(struct ast_vhub *vhub)
+ if (vhub->force_usb1)
+ ctrl |= VHUB_CTRL_FULL_SPEED_ONLY;
+
++ ctrl |= VHUB_CTRL_AUTO_REMOTE_WAKEUP;
+ ctrl |= VHUB_CTRL_UPSTREAM_CONNECT;
+ writel(ctrl, vhub->regs + AST_VHUB_CTRL);
+
+@@ -253,6 +272,52 @@ void ast_vhub_init_hw(struct ast_vhub *vhub)
+ vhub->regs + AST_VHUB_IER);
+ }
+
++int ast_vhub_init_pcie(struct ast_vhub *vhub, const struct ast_vhub_match_data *pdata)
++{
++ struct device *dev = &vhub->pdev->dev;
++ struct regmap *pcie_device;
++ struct regmap *scu;
++ u32 scu_usb;
++ int rc = 0;
++
++ scu = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,scu");
++ if (IS_ERR(scu)) {
++ dev_err(dev, "failed to find SCU regmap\n");
++ return PTR_ERR(scu);
++ }
++
++ regmap_read(scu, ASPEED_G7_SCU_VHUB_USB_FUNC_OFFSET, &scu_usb);
++
++ /* Check EHCI or xHCI to virtual hub */
++ if ((scu_usb & pdata->usb_mode_mask) == 0) {
++ pcie_device = syscon_regmap_lookup_by_phandle(dev->of_node,
++ "aspeed,device");
++ if (IS_ERR(pcie_device)) {
++ dev_err(dev, "failed to find PCIe device regmap\n");
++ return PTR_ERR(pcie_device);
++ }
++ if (pdata->g7_pcie == PCIE_XHCI) {
++ /* Check PCIe xHCI or BMC xHCI to virtual hub */
++ if ((scu_usb & pdata->xhci_mode_mask) == 0) {
++ dev_info(dev, "PCIe xHCI to vhub\n");
++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev
++ /* Turn on PCIe xHCI without MSI */
++ regmap_update_bits(pcie_device, 0x70,
++ BIT(19) | BIT(11) | BIT(3),
++ BIT(19) | BIT(11) | BIT(3));
++ }
++ } else if (pdata->g7_pcie == PCIE_EHCI) {
++ dev_info(dev, "PCIe EHCI to vhub\n");
++ //EnPCIaMSI_EnPCIaIntA_EnPCIaMst_EnPCIaDev
++ /* Turn on PCIe EHCI without MSI */
++ regmap_update_bits(pcie_device, 0x70,
++ BIT(18) | BIT(10) | BIT(2),
++ BIT(18) | BIT(10) | BIT(2));
++ }
++ }
++ return rc;
++}
++
+ static void ast_vhub_remove(struct platform_device *pdev)
+ {
+ struct ast_vhub *vhub = platform_get_drvdata(pdev);
+@@ -280,6 +345,9 @@ static void ast_vhub_remove(struct platform_device *pdev)
+ if (vhub->clk)
+ clk_disable_unprepare(vhub->clk);
+
++ if (vhub->rst)
++ reset_control_assert(vhub->rst);
++
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+ if (vhub->ep0_bufs)
+@@ -298,11 +366,19 @@ static int ast_vhub_probe(struct platform_device *pdev)
+ struct resource *res;
+ int i, rc = 0;
+ const struct device_node *np = pdev->dev.of_node;
++ const struct ast_vhub_match_data *pdata;
++ u32 val;
+
+ vhub = devm_kzalloc(&pdev->dev, sizeof(*vhub), GFP_KERNEL);
+ if (!vhub)
+ return -ENOMEM;
+
++ pdata = of_device_get_match_data(&pdev->dev);
++ if (IS_ERR(pdata)) {
++ dev_err(&pdev->dev, "Couldn't get match data\n");
++ return -ENODEV;
++ }
++
+ rc = of_property_read_u32(np, "aspeed,vhub-downstream-ports",
+ &vhub->max_ports);
+ if (rc < 0)
+@@ -337,6 +413,16 @@ static int ast_vhub_probe(struct platform_device *pdev)
+
+ platform_set_drvdata(pdev, vhub);
+
++ vhub->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
++
++ if (IS_ERR(vhub->rst)) {
++ rc = PTR_ERR(vhub->rst);
++ goto err;
++ }
++ rc = reset_control_assert(vhub->rst);
++ if (rc)
++ goto err;
++
+ vhub->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vhub->clk)) {
+ rc = PTR_ERR(vhub->clk);
+@@ -348,6 +434,26 @@ static int ast_vhub_probe(struct platform_device *pdev)
+ goto err;
+ }
+
++ if (vhub->rst) {
++ mdelay(10);
++ rc = reset_control_deassert(vhub->rst);
++ if (rc)
++ goto err;
++ }
++
++ if (pdata->g7_pcie != NOT_SUPPORTED) {
++ rc = ast_vhub_init_pcie(vhub, pdata);
++ if (rc)
++ goto err;
++
++ /* For G7 PortA/B, enable the option of TXFIFO fix.
++ * It forces the CRC error for a re-try when vHub cannot fetch DRAM in time.
++ */
++ val = readl(vhub->regs + pdata->txfifo_fix_reg);
++ writel(pdata->txfifo_fix_val | val,
++ vhub->regs + pdata->txfifo_fix_reg);
++ }
++
+ /* Check if we need to limit the HW to USB1 */
+ max_speed = usb_get_maximum_speed(&pdev->dev);
+ if (max_speed != USB_SPEED_UNKNOWN && max_speed < USB_SPEED_HIGH)
+@@ -370,6 +476,12 @@ static int ast_vhub_probe(struct platform_device *pdev)
+ goto err;
+ }
+
++ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++ if (rc) {
++ dev_warn(&pdev->dev, "No suitable DMA available\n");
++ goto err;
++ }
++
+ /*
+ * Allocate DMA buffers for all EP0s in one chunk,
+ * one per port and one for the vHub itself
+@@ -412,15 +524,86 @@ static int ast_vhub_probe(struct platform_device *pdev)
+ return rc;
+ }
+
++static const struct ast_vhub_match_data aspeed_vhub_match_data = {
++ .g7_pcie = NOT_SUPPORTED,
++};
++
++static const struct ast_vhub_match_data aspeed_g7_vhuba0_match_data = {
++ .g7_pcie = PCIE_EHCI,
++ .usb_mode_mask = GENMASK(25, 24),
++ .xhci_mode_mask = 0,
++ .txfifo_fix_reg = 0x800,
++ .txfifo_fix_val = BIT(13),
++};
++
++static const struct ast_vhub_match_data aspeed_g7_vhubb0_match_data = {
++ .g7_pcie = PCIE_EHCI,
++ .usb_mode_mask = GENMASK(29, 28),
++ .xhci_mode_mask = 0,
++ .txfifo_fix_reg = 0x800,
++ .txfifo_fix_val = BIT(13),
++};
++
++static const struct ast_vhub_match_data aspeed_g7_vhuba1_match_data = {
++ .g7_pcie = PCIE_XHCI,
++ .usb_mode_mask = GENMASK(3, 2),
++ .xhci_mode_mask = BIT_MASK(9),
++ .txfifo_fix_reg = 0x80C,
++ .txfifo_fix_val = BIT(31),
++};
++
++static const struct ast_vhub_match_data aspeed_g7_vhubb1_match_data = {
++ .g7_pcie = PCIE_XHCI,
++ .usb_mode_mask = GENMASK(7, 6),
++ .xhci_mode_mask = BIT_MASK(10),
++ .txfifo_fix_reg = 0x80C,
++ .txfifo_fix_val = BIT(31),
++};
++
++static const struct ast_vhub_match_data aspeed_g7_vhubc_match_data = {
++ .g7_pcie = NOT_SUPPORTED,
++};
++
++static const struct ast_vhub_match_data aspeed_g7_vhubd_match_data = {
++ .g7_pcie = NOT_SUPPORTED,
++};
++
+ static const struct of_device_id ast_vhub_dt_ids[] = {
+ {
+ .compatible = "aspeed,ast2400-usb-vhub",
++ .data = &aspeed_vhub_match_data,
+ },
+ {
+ .compatible = "aspeed,ast2500-usb-vhub",
++ .data = &aspeed_vhub_match_data,
+ },
+ {
+ .compatible = "aspeed,ast2600-usb-vhub",
++ .data = &aspeed_vhub_match_data,
++ },
++ {
++ .compatible = "aspeed,ast2700-usb-vhuba0",
++ .data = &aspeed_g7_vhuba0_match_data,
++ },
++ {
++ .compatible = "aspeed,ast2700-usb-vhubb0",
++ .data = &aspeed_g7_vhubb0_match_data,
++ },
++ {
++ .compatible = "aspeed,ast2700-usb-vhuba1",
++ .data = &aspeed_g7_vhuba1_match_data,
++ },
++ {
++ .compatible = "aspeed,ast2700-usb-vhubb1",
++ .data = &aspeed_g7_vhubb1_match_data,
++ },
++ {
++ .compatible = "aspeed,ast2700-usb-vhubc",
++ .data = &aspeed_g7_vhubc_match_data,
++ },
++ {
++ .compatible = "aspeed,ast2700-usb-vhubd",
++ .data = &aspeed_g7_vhubd_match_data,
+ },
+ { }
+ };
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+index 573109ca5..32fe010ec 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+@@ -116,10 +116,14 @@ static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
+
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ d->wakeup_en = is_set;
++ val = readl(d->vhub->regs + AST_VHUB_CTRL);
++ if (is_set)
++ writel(val | VHUB_CTRL_AUTO_REMOTE_WAKEUP,
++ d->vhub->regs + AST_VHUB_CTRL);
++
+ return std_req_complete;
+- }
+
+- if (wValue == USB_DEVICE_TEST_MODE) {
++ } else if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(d->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+@@ -239,7 +243,7 @@ int ast_vhub_std_dev_request(struct ast_vhub_ep *ep,
+ d->gadget.speed = ep->vhub->speed;
+ if (d->gadget.speed > d->driver->max_speed)
+ d->gadget.speed = d->driver->max_speed;
+- DDBG(d, "fist packet, captured speed %d\n",
++ DDBG(d, "first packet, captured speed %d\n",
+ d->gadget.speed);
+ }
+
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+index 148d7ec3e..a19cecbc7 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+@@ -340,7 +340,9 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ struct ast_vhub *vhub = ep->vhub;
+ unsigned long flags;
+ bool empty;
+- int rc;
++ int rc = 0;
++
++ spin_lock_irqsave(&vhub->lock, flags);
+
+ /* Paranoid checks */
+ if (!u_req || !u_req->complete || !u_req->buf) {
+@@ -349,14 +351,16 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ dev_warn(&vhub->pdev->dev, "complete=%p internal=%d\n",
+ u_req->complete, req->internal);
+ }
+- return -EINVAL;
++ rc = -EINVAL;
++ goto out;
+ }
+
+ /* Endpoint enabled ? */
+ if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
+ !ep->dev->enabled) {
+ EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
+- return -ESHUTDOWN;
++ rc = -ESHUTDOWN;
++ goto out;
+ }
+
+ /* Map request for DMA if possible. For now, the rule for DMA is
+@@ -383,11 +387,16 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ if (rc) {
+ dev_warn(&vhub->pdev->dev,
+ "Request mapping failure %d\n", rc);
+- return rc;
++ goto out;
+ }
+ } else
+ u_req->dma = 0;
+
++ if (ep->dev->wakeup_en) {
++ EPVDBG(ep, "Wakeup host first\n");
++ ast_vhub_hub_wake_all(vhub);
++ }
++
+ EPVDBG(ep, "enqueue req @%p\n", req);
+ EPVDBG(ep, " l=%d dma=0x%x zero=%d noshort=%d noirq=%d is_in=%d\n",
+ u_req->length, (u32)u_req->dma, u_req->zero,
+@@ -400,9 +409,8 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ req->act_count = 0;
+ req->active = false;
+ req->last_desc = -1;
+- spin_lock_irqsave(&vhub->lock, flags);
+- empty = list_empty(&ep->queue);
+
++ empty = list_empty(&ep->queue);
+ /* Add request to list and kick processing if empty */
+ list_add_tail(&req->queue, &ep->queue);
+ if (empty) {
+@@ -411,9 +419,10 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
+ else
+ ast_vhub_epn_kick(ep, req);
+ }
++out:
+ spin_unlock_irqrestore(&vhub->lock, flags);
+
+- return 0;
++ return rc;
+ }
+
+ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
+@@ -786,6 +795,20 @@ static void ast_vhub_epn_dispose(struct usb_ep *u_ep)
+ ep->dev = NULL;
+ }
+
++static void ast_vhub_epn_flush(struct usb_ep *u_ep)
++{
++ struct ast_vhub_ep *ep = to_ast_ep(u_ep);
++ struct ast_vhub *vhub = ep->vhub;
++ unsigned long flags;
++
++ EPDBG(ep, "flushing !\n");
++
++ spin_lock_irqsave(&vhub->lock, flags);
++ /* This will clear out all the request of the endpoint and send requests done messages. */
++ ast_vhub_nuke(ep, -EINVAL);
++ spin_unlock_irqrestore(&vhub->lock, flags);
++}
++
+ static const struct usb_ep_ops ast_vhub_epn_ops = {
+ .enable = ast_vhub_epn_enable,
+ .disable = ast_vhub_epn_disable,
+@@ -796,6 +819,7 @@ static const struct usb_ep_ops ast_vhub_epn_ops = {
+ .set_wedge = ast_vhub_epn_set_wedge,
+ .alloc_request = ast_vhub_alloc_request,
+ .free_request = ast_vhub_free_request,
++ .fifo_flush = ast_vhub_epn_flush,
+ };
+
+ struct ast_vhub_ep *ast_vhub_alloc_epn(struct ast_vhub_dev *d, u8 addr)
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+index a63e4af60..28278eab5 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
++++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+@@ -221,9 +221,8 @@ static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
+ EPDBG(ep, "Hub remote wakeup %s\n",
+ is_set ? "enabled" : "disabled");
+ return std_req_complete;
+- }
+
+- if (wValue == USB_DEVICE_TEST_MODE) {
++ } else if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(ep->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+@@ -445,10 +444,9 @@ enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
+
+ /* GET/SET_CONFIGURATION */
+ case DeviceRequest | USB_REQ_GET_CONFIGURATION:
+- return ast_vhub_simple_reply(ep, 1);
++ return ast_vhub_simple_reply(ep, vhub->current_config);
+ case DeviceOutRequest | USB_REQ_SET_CONFIGURATION:
+- if (wValue != 1)
+- return std_req_stall;
++ vhub->current_config = wValue;
+ return std_req_complete;
+
+ /* GET_DESCRIPTOR */
+@@ -673,6 +671,9 @@ static enum std_req_rc ast_vhub_set_port_feature(struct ast_vhub_ep *ep,
+ ast_vhub_port_reset(vhub, port);
+ return std_req_complete;
+ case USB_PORT_FEAT_POWER:
++ ast_vhub_change_port_stat(vhub, port,
++ 0, USB_PORT_STAT_POWER,
++ false);
+ /*
+ * On Power-on, we mark the connected flag changed,
+ * if there's a connected device, some hosts will
+@@ -750,9 +751,6 @@ static enum std_req_rc ast_vhub_get_port_stat(struct ast_vhub_ep *ep,
+ stat = vhub->ports[port].status;
+ chg = vhub->ports[port].change;
+
+- /* We always have power */
+- stat |= USB_PORT_STAT_POWER;
+-
+ EPDBG(ep, " port status=%04x change=%04x\n", stat, chg);
+
+ return ast_vhub_simple_reply(ep,
+diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+index 6b9dfa6e1..fce1f004d 100644
+--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
++++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+@@ -388,6 +388,8 @@ struct ast_vhub {
+ spinlock_t lock;
+ struct work_struct wake_work;
+ struct clk *clk;
++ struct reset_control *rst;
++
+
+ /* EP0 DMA buffers allocated in one chunk */
+ void *ep0_bufs;
+@@ -419,6 +421,7 @@ struct ast_vhub {
+
+ /* Upstream bus speed captured at bus reset */
+ unsigned int speed;
++ u8 current_config;
+
+ /* Standard USB Descriptors of the vhub. */
+ struct usb_device_descriptor vhub_dev_desc;
+diff --git a/drivers/usb/gadget/udc/aspeed_udc.c b/drivers/usb/gadget/udc/aspeed_udc.c
+index 486828657..4d8f925b2 100644
+--- a/drivers/usb/gadget/udc/aspeed_udc.c
++++ b/drivers/usb/gadget/udc/aspeed_udc.c
+@@ -156,7 +156,7 @@
+ #define AST_EP_DMA_DESC_PID_DATA1 (2 << 14)
+ #define AST_EP_DMA_DESC_PID_MDATA (3 << 14)
+ #define EP_DESC1_IN_LEN(x) ((x) & 0x1fff)
+-#define AST_EP_DMA_DESC_MAX_LEN (7680) /* Max packet length for trasmit in 1 desc */
++#define AST_EP_DMA_DESC_MAX_LEN (4096) /* Max packet length for trasmit in 1 desc */
+
+ struct ast_udc_request {
+ struct usb_request req;
+@@ -278,6 +278,17 @@ static const char * const ast_ep_name[] = {
+
+ /*-------------------------------------------------------------------------*/
+
++static inline void ast_udc_dma_workaround(void *addr)
++{
++ /*
++ * The workaround consists of using a dummy read of the memory before
++ * doing the MMIO writes. This will ensure that the previous writes
++ * have been "pushed out".
++ */
++ mb();
++ (void)__raw_readl((void __iomem *)addr);
++}
++
+ static void ast_udc_done(struct ast_udc_ep *ep, struct ast_udc_request *req,
+ int status)
+ {
+@@ -478,7 +489,7 @@ static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
+ struct device *dev = &udc->pdev->dev;
+ bool last = false;
+ int chunk, count;
+- u32 offset;
++ u32 offset, size;
+
+ if (!ep->descs) {
+ dev_warn(dev, "%s: Empty DMA descs list failure\n",
+@@ -489,9 +500,9 @@ static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
+ chunk = tx_len;
+ offset = count = 0;
+
+- EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x\n", req,
++ EP_DBG(ep, "req @%p, %s:%d, %s:0x%x, %s:0x%x zero=%d\n", req,
+ "wptr", ep->descs_wptr, "dma_buf", dma_buf,
+- "tx_len", tx_len);
++ "tx_len", tx_len, req->req.zero);
+
+ /* Create Descriptor Lists */
+ while (chunk >= 0 && !last && count < AST_UDC_DESCS_COUNT) {
+@@ -499,13 +510,23 @@ static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
+ ep->descs[ep->descs_wptr].des_0 = dma_buf + offset;
+
+ if (chunk > ep->chunk_max) {
+- ep->descs[ep->descs_wptr].des_1 = ep->chunk_max;
++ size = ep->chunk_max;
+ } else {
+- ep->descs[ep->descs_wptr].des_1 = chunk;
+- last = true;
++ size = chunk;
++ /*
++ * Check if this is the last packet?
++ * May go the loop again for the zero length packet
++ */
++ if (!chunk || !req->req.zero || (chunk % ep->ep.maxpacket) != 0)
++ last = true;
+ }
+
+- chunk -= ep->chunk_max;
++ ep->descs[ep->descs_wptr].des_1 = size;
++ chunk -= size;
++ offset += size;
++
++ if (last)
++ ast_udc_dma_workaround(&ep->descs[ep->descs_wptr]);
+
+ EP_DBG(ep, "descs[%d]: 0x%x 0x%x\n",
+ ep->descs_wptr,
+@@ -520,8 +541,6 @@ static int ast_dma_descriptor_setup(struct ast_udc_ep *ep, u32 dma_buf,
+
+ if (ep->descs_wptr >= AST_UDC_DESCS_COUNT)
+ ep->descs_wptr = 0;
+-
+- offset = ep->chunk_max * count;
+ }
+
+ return 0;
+@@ -538,6 +557,9 @@ static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req)
+ EP_DBG(ep, "kick req @%p, len:%d, dir:%d\n",
+ req, tx_len, ep->dir_in);
+
++ if (ep->dir_in)
++ ast_udc_dma_workaround(req->req.buf + req->req.actual);
++
+ ast_ep_write(ep, req->req.dma + req->req.actual, AST_UDC_EP_DMA_BUFF);
+
+ /* Start DMA */
+@@ -549,11 +571,13 @@ static void ast_udc_epn_kick(struct ast_udc_ep *ep, struct ast_udc_request *req)
+ static void ast_udc_epn_kick_desc(struct ast_udc_ep *ep,
+ struct ast_udc_request *req)
+ {
++ u32 count;
+ u32 descs_max_size;
+ u32 tx_len;
+ u32 last;
+
+- descs_max_size = AST_EP_DMA_DESC_MAX_LEN * AST_UDC_DESCS_COUNT;
++ count = req->req.zero ? AST_UDC_DESCS_COUNT - 1 : AST_UDC_DESCS_COUNT;
++ descs_max_size = AST_EP_DMA_DESC_MAX_LEN * count;
+
+ last = req->req.length - req->req.actual;
+ tx_len = last > descs_max_size ? descs_max_size : last;
+@@ -1303,6 +1327,7 @@ static int ast_udc_start(struct usb_gadget *gadget,
+ UDC_DBG(udc, "\n");
+ udc->driver = driver;
+ udc->gadget.dev.of_node = udc->pdev->dev.of_node;
++ udc->gadget.dev.of_node_reused = true;
+
+ for (i = 0; i < AST_UDC_NUM_ENDPOINTS; i++) {
+ ep = &udc->ep[i];
+--
+2.34.1
+
diff --git a/recipes-kernel/linux/linux-gbmc_aspeedg7.bb b/recipes-kernel/linux/linux-gbmc_aspeedg7.bb
index 8433ef9..ee8fb72 100644
--- a/recipes-kernel/linux/linux-gbmc_aspeedg7.bb
+++ b/recipes-kernel/linux/linux-gbmc_aspeedg7.bb
@@ -22,27 +22,36 @@
"
# TODO(b/386134258): ask Aspeed to upstream below
-# All these dirvers are porting from SDK v09.03 kernel
+# All these dirvers are porting from SDK v09.05 kernel
# https://github.com/AspeedTech-BMC/linux/commits/aspeed-master-v6.6/
-# SHA: a769cc67850759a3952f7a40f5f5798c3d0f7bfd
+# SHA: 769f62b7baa84d6998723b0ea60280e380183553
SRC_URI:append:aspeed-g7 = " \
- file://0001-Add-dts-dtsi-config-for-2700-A0-A1.patch \
- file://0002-dt-bings-head-file-for-2700-A0-A1.patch \
- file://0003-Add-socX_intc-driver.patch \
- file://0004-Add-e2m_icX-for-A1.patch \
- file://0005-pinctrl-for-2700.patch \
- file://0006-syscon0_syscon1_A0_A1.patch \
- file://0007-uart_udma_2700.patch \
- file://0008-fmc_spi_mtd.patch \
- file://0009-Enable-wdt-driver.patch \
- file://0010-Enable-reset-controller-driver.patch \
- file://0011-Enable-FTGMAC100-driver-for-mac0.patch \
- file://0012-Enable-gpio-controller-driver.patch \
- file://0013-Enable-sgpio-driver.patch \
- file://0014-Enable-i2c-driver.patch \
- file://0015-porting-emmc-driver.patch \
- file://0016-Enable-jtag-driver.patch \
- file://0017-Add-uart-routing.patch\
+ file://0001-Add-arch-files-for-ast2700.patch \
+ file://0002-Add-dt-bindings-head-files-for-ast2700.patch \
+ file://0003-Add-include-head-files-for-ast2700.patch \
+ file://0004-Add-irqchip-driver-for-ast2700.patch \
+ file://0005-Add-pinctrl-driver-for-ast2700.patch \
+ file://0006-Add-clk-driver-for-ast2700.patch \
+ file://0007-Add-SOC-driver-for-ast2700.patch \
+ file://0008-Add-uart-drvier-for-ast2700.patch \
+ file://0009-Add-SPI-and-MTD-driver-for-ast2700.patch \
+ file://0010-Add-crypto-driver-for-ast2700.patch \
+ file://0011-Add-watchdog-driver-for-ast2700.patch \
+ file://0012-Add-reset-controller-driver-for-ast2700.patch \
+ file://0013-Add-net-ftgmac-driver-for-ast2700.patch \
+ file://0014-Add-gpio-sgpio-driver-for-ast2700.patch \
+ file://0015-Add-i2c-driver-for-ast2700.patch \
+ file://0016-Add-emmc-driver-for-ast2700.patch \
+ file://0017-Add-jtag-driver-for-ast2700.patch \
+ file://0018-Add-RNG-drivers-for-ast2700.patch \
+ file://0019-Add-ADC-driver-for-ast2700.patch \
+ file://0020-Add-RTC-driver-for-ast2700.patch \
+ file://0021-Add-USB-uhci-ehci-driver-for-ast2700.patch \
+ file://0022-Add-crypto-driver-for-ast2700.patch \
+ file://0023-Add-i3c-driver-for-ast2700.patch \
+ file://0024-Add-PCIe-RC-driver-for-ast2700.patch \
+ file://0025-Add-SGMII-USB3-phy-driver-for-ast2700.patch \
+ file://0026-Add-USB-Virtual-Hub-Controller-driver-for-ast2700.patch \
"
KCONFIG_MODE="--allnoconfig"