aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Baryshkov <dmitry.baryshkov@linaro.org>2022-01-28 02:23:24 +0300
committerDmitry Baryshkov <dmitry.baryshkov@linaro.org>2022-01-28 02:23:24 +0300
commita98bbc34054f08a26c488ec5b3eab9766911981f (patch)
tree2814c8be053abfaf982a17c19f9ab2a1eb3f6c0e
parente961a3113c795ae85bbe38e8fd4358b6a553718a (diff)
New rr-cache entries from ci-merge
Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
-rw-r--r--rr-cache/10a8545b4c4be6af2fc9f01da40c1782f43a6ea8/thisimage13
-rw-r--r--rr-cache/17322fadc19567308f58287055e890d18cee9adb/thisimage1318
-rw-r--r--rr-cache/325bb31acf18dbd753731c11ff330dfee205f678/thisimage24
-rw-r--r--rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/postimage117
-rw-r--r--rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/preimage121
-rw-r--r--rr-cache/5827ea96d82158fcd557fa14f13f88d4f5342a87/thisimage3
-rw-r--r--rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage13
-rw-r--r--rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage.113
-rw-r--r--rr-cache/7a9e95bebe19cf20be503f6e4db2d595dc9ed9cf/thisimage686
-rw-r--r--rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/postimage7537
-rw-r--r--rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/preimage7541
-rw-r--r--rr-cache/8fa1921511a4999681ff27bc4fc8fbe723977a4f/thisimage633
-rw-r--r--rr-cache/9304cf403e433f623fc9b674a050258c4b4b216c/preimage1510
-rw-r--r--rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/postimage97
-rw-r--r--rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/preimage124
-rw-r--r--rr-cache/9a11bb9d0aee8e2b21835affb17471cbe026fdba/thisimage3
-rw-r--r--rr-cache/b1a5dd916641055afa69c27a74431a0b5f649585/thisimage162
-rw-r--r--rr-cache/eb2a574f27709226377dc4000c4882d227af3594/postimage1027
-rw-r--r--rr-cache/eb2a574f27709226377dc4000c4882d227af3594/preimage1029
-rw-r--r--rr-cache/f285fdfe5adcadf77d60a81471e87da51069de06/preimage1074
-rw-r--r--rr-cache/f8a1a7626be9dc69ed208c66229f310158ca0a52/thisimage313
21 files changed, 23089 insertions, 269 deletions
diff --git a/rr-cache/10a8545b4c4be6af2fc9f01da40c1782f43a6ea8/thisimage b/rr-cache/10a8545b4c4be6af2fc9f01da40c1782f43a6ea8/thisimage
index f503868..6494e4b 100644
--- a/rr-cache/10a8545b4c4be6af2fc9f01da40c1782f43a6ea8/thisimage
+++ b/rr-cache/10a8545b4c4be6af2fc9f01da40c1782f43a6ea8/thisimage
@@ -379,6 +379,7 @@ CONFIG_MICROSEMI_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_REALTEK_PHY=y
CONFIG_ROCKCHIP_PHY=y
+CONFIG_DP83867_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
@@ -416,6 +417,7 @@ CONFIG_TOUCHSCREEN_EDT_FT5X06=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
CONFIG_INPUT_PM8XXX_VIBRATOR=m
+CONFIG_INPUT_PWM_BEEPER=m
CONFIG_INPUT_PWM_VIBRA=m
CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
@@ -480,6 +482,7 @@ CONFIG_I2C_QCOM_GENI=m
CONFIG_I2C_QUP=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_RK3X=y
+CONFIG_I2C_S3C2410=y
CONFIG_I2C_SH_MOBILE=y
CONFIG_I2C_TEGRA=y
CONFIG_I2C_UNIPHIER_F=y
@@ -523,6 +526,7 @@ CONFIG_PINCTRL_IMX8MQ=y
CONFIG_PINCTRL_IMX8QM=y
CONFIG_PINCTRL_IMX8QXP=y
CONFIG_PINCTRL_IMX8DXL=y
+CONFIG_PINCTRL_IMX8ULP=y
CONFIG_PINCTRL_MSM=y
CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_IPQ6018=y
@@ -574,6 +578,7 @@ CONFIG_BATTERY_MAX17042=m
CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_BQ25980=m
CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_JC42=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_PWM_FAN=m
CONFIG_SENSORS_RASPBERRYPI_HWMON=m
@@ -692,6 +697,7 @@ CONFIG_VIDEO_QCOM_VENUS=m
CONFIG_SDR_PLATFORM_DRIVERS=y
CONFIG_VIDEO_RCAR_DRIF=m
CONFIG_VIDEO_IMX219=m
+CONFIG_VIDEO_OV5640=m
CONFIG_VIDEO_OV5645=m
CONFIG_VIDEO_QCOM_CAMSS=m
CONFIG_VIDEO_QCOM_VENUS=m
@@ -824,6 +830,7 @@ CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
CONFIG_SND_SOC_SIMPLE_MUX=m
CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
CONFIG_SND_SOC_WCD934X=m
CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8960=m
@@ -948,6 +955,7 @@ CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_PCF85063=m
CONFIG_RTC_DRV_PCF85363=m
CONFIG_RTC_DRV_M41T80=m
CONFIG_RTC_DRV_RX8581=m
@@ -999,6 +1007,7 @@ CONFIG_MFD_CROS_EC_DEV=y
CONFIG_STAGING=y
CONFIG_STAGING_MEDIA=y
CONFIG_VIDEO_HANTRO=m
+CONFIG_VIDEO_IMX_MEDIA=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=y
@@ -1020,6 +1029,7 @@ CONFIG_CLK_IMX8MN=y
CONFIG_CLK_IMX8MP=y
CONFIG_CLK_IMX8MQ=y
CONFIG_CLK_IMX8QXP=y
+CONFIG_CLK_IMX8ULP=y
CONFIG_TI_SCI_CLK=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_QCOM_A53PLL=y
@@ -1116,6 +1126,7 @@ CONFIG_ARCH_R8A77980=y
CONFIG_ARCH_R8A77990=y
CONFIG_ARCH_R8A77995=y
CONFIG_ARCH_R8A779A0=y
+CONFIG_ARCH_R8A779F0=y
CONFIG_ARCH_R9A07G044=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
@@ -1225,6 +1236,8 @@ CONFIG_SLIM_QCOM_NGD_CTRL=m
CONFIG_MUX_MMIO=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_IMX=m
+CONFIG_INTERCONNECT_IMX8MM=m
+CONFIG_INTERCONNECT_IMX8MN=m
CONFIG_INTERCONNECT_IMX8MQ=m
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
diff --git a/rr-cache/17322fadc19567308f58287055e890d18cee9adb/thisimage b/rr-cache/17322fadc19567308f58287055e890d18cee9adb/thisimage
new file mode 100644
index 0000000..398016f
--- /dev/null
+++ b/rr-cache/17322fadc19567308f58287055e890d18cee9adb/thisimage
@@ -0,0 +1,1318 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NUMA_BALANCING=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_HUGETLB=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_PERF=y
+CONFIG_USER_NS=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_ARCH_ACTIONS=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_ALPINE=y
+CONFIG_ARCH_APPLE=y
+CONFIG_ARCH_BCM2835=y
+CONFIG_ARCH_BCM4908=y
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
+CONFIG_ARCH_BRCMSTB=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_ARCH_K3=y
+CONFIG_ARCH_LAYERSCAPE=y
+CONFIG_ARCH_LG1K=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_KEEMBAY=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_ARCH_MXC=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_RENESAS=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_ARCH_S32=y
+CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_INTEL_SOCFPGA=y
+CONFIG_ARCH_SYNQUACER=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARCH_SPRD=y
+CONFIG_ARCH_THUNDER=y
+CONFIG_ARCH_THUNDER2=y
+CONFIG_ARCH_UNIPHIER=y
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_VISCONTI=y
+CONFIG_ARCH_XGENE=y
+CONFIG_ARCH_ZYNQMP=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_SCHED_MC=y
+CONFIG_SCHED_SMT=y
+CONFIG_NUMA=y
+CONFIG_SECCOMP=y
+CONFIG_KEXEC=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_XEN=y
+CONFIG_COMPAT=y
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_HIBERNATION=y
+CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+CONFIG_ENERGY_MODEL=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_PSCI_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
+CONFIG_ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM=m
+CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_IMX_CPUFREQ_DT=m
+CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_ARM_RASPBERRYPI_CPUFREQ=m
+CONFIG_ARM_SCMI_CPUFREQ=y
+CONFIG_ARM_TEGRA186_CPUFREQ=y
+CONFIG_QORIQ_CPUFREQ=y
+CONFIG_ARM_SCMI_PROTOCOL=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_RASPBERRYPI_FIRMWARE=y
+CONFIG_INTEL_STRATIX10_SERVICE=y
+CONFIG_INTEL_STRATIX10_RSU=m
+CONFIG_QCOM_SCM=y
+CONFIG_EFI_CAPSULE_LOADER=y
+CONFIG_IMX_SCU=y
+CONFIG_IMX_SCU_PD=y
+CONFIG_ACPI=y
+CONFIG_ACPI_APEI=y
+CONFIG_ACPI_APEI_GHES=y
+CONFIG_ACPI_APEI_PCIEAER=y
+CONFIG_ACPI_APEI_MEMORY_FAILURE=y
+CONFIG_ACPI_APEI_EINJ=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_CHACHA20_NEON=m
+CONFIG_CRYPTO_AES_ARM64_BS=m
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_KSM=y
+CONFIG_MEMORY_FAILURE=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IPV6=m
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_NAT=m
+CONFIG_IP_NF_TARGET_MASQUERADE=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP6_NF_IPTABLES=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_NAT=m
+CONFIG_IP6_NF_TARGET_MASQUERADE=m
+CONFIG_BRIDGE=m
+CONFIG_BRIDGE_VLAN_FILTERING=y
+CONFIG_NET_DSA=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_VLAN_8021Q_MVRP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBS=m
+CONFIG_NET_SCH_ETF=m
+CONFIG_NET_SCH_TAPRIO=m
+CONFIG_NET_SCH_MQPRIO=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_FLOWER=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_GATE=m
+CONFIG_QRTR=m
+CONFIG_QRTR_SMD=m
+CONFIG_QRTR_TUN=m
+CONFIG_BPF_JIT=y
+CONFIG_CAN=m
+CONFIG_CAN_RCAR=m
+CONFIG_CAN_RCAR_CANFD=m
+CONFIG_CAN_FLEXCAN=m
+CONFIG_CAN_MCP251XFD=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=m
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=m
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+CONFIG_BT_LEDS=y
+# CONFIG_BT_DEBUGFS is not set
+CONFIG_BT_HCIBTUSB=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_H4=y
+CONFIG_BT_HCIUART_BCSP=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_3WIRE=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_BT_QCOMSMD=m
+CONFIG_CFG80211=m
+CONFIG_CFG80211_WEXT=y
+CONFIG_MAC80211=m
+CONFIG_MAC80211_LEDS=y
+CONFIG_RFKILL=y
+CONFIG_WCN36XX=m
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_NFC=m
+CONFIG_NFC_NCI=m
+CONFIG_NFC_S3FWRN5_I2C=m
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_PASID=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCI_AARDVARK=y
+CONFIG_PCI_TEGRA=y
+CONFIG_PCIE_RCAR_HOST=y
+CONFIG_PCIE_RCAR_EP=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_XGENE=y
+CONFIG_PCIE_ALTERA=y
+CONFIG_PCIE_ALTERA_MSI=y
+CONFIG_PCI_HOST_THUNDER_PEM=y
+CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCIE_BRCMSTB=m
+CONFIG_PCI_IMX6=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCIE_LAYERSCAPE_GEN4=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
+CONFIG_PCIE_TEGRA194_HOST=m
+CONFIG_PCIE_VISCONTI_HOST=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_PCI_ENDPOINT_CONFIGFS=y
+CONFIG_PCI_EPF_TEST=m
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_HISILICON_LPC=y
+CONFIG_FSL_MC_BUS=y
+CONFIG_TEGRA_ACONNECT=m
+CONFIG_GNSS=m
+CONFIG_GNSS_MTK_SERIAL=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_DATAFLASH=y
+CONFIG_MTD_SST25L=y
+CONFIG_MTD_RAW_NAND=y
+CONFIG_MTD_NAND_DENALI_DT=y
+CONFIG_MTD_NAND_MARVELL=y
+CONFIG_MTD_NAND_FSL_IFC=y
+CONFIG_MTD_NAND_QCOM=y
+CONFIG_MTD_SPI_NOR=y
+CONFIG_MTK_DEVAPC=m
+CONFIG_SPI_CADENCE_QUADSPI=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=m
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_NVME=m
+CONFIG_QCOM_COINCELL=m
+CONFIG_QCOM_FASTRPC=m
+CONFIG_SRAM=y
+CONFIG_PCI_ENDPOINT_TEST=m
+CONFIG_EEPROM_AT24=m
+CONFIG_EEPROM_AT25=m
+CONFIG_UACCE=m
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SAS_ATA=y
+CONFIG_SCSI_HISI_SAS=y
+CONFIG_SCSI_HISI_SAS_PCI=y
+CONFIG_MEGARAID_SAS=y
+CONFIG_SCSI_MPT3SAS=m
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_HISI=y
+CONFIG_SCSI_UFS_EXYNOS=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
+CONFIG_AHCI_MVEBU=y
+CONFIG_AHCI_XGENE=y
+CONFIG_AHCI_QORIQ=y
+CONFIG_SATA_SIL24=y
+CONFIG_SATA_RCAR=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_NETDEVICES=y
+CONFIG_MACVLAN=m
+CONFIG_MACVTAP=m
+CONFIG_TUN=y
+CONFIG_VETH=m
+CONFIG_VIRTIO_NET=y
+CONFIG_NET_DSA_MSCC_FELIX=m
+CONFIG_AMD_XGBE=y
+CONFIG_NET_XGENE=y
+CONFIG_ATL1C=m
+CONFIG_BCMGENET=m
+CONFIG_BNX2X=m
+CONFIG_MACB=y
+CONFIG_THUNDER_NIC_PF=y
+CONFIG_FEC=y
+CONFIG_FSL_FMAN=y
+CONFIG_FSL_DPAA_ETH=y
+CONFIG_FSL_DPAA2_ETH=y
+CONFIG_FSL_ENETC=y
+CONFIG_FSL_ENETC_VF=y
+CONFIG_FSL_ENETC_QOS=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_HNS_DSAF=y
+CONFIG_HNS_ENET=y
+CONFIG_HNS3=y
+CONFIG_HNS3_HCLGE=y
+CONFIG_HNS3_ENET=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
+CONFIG_MVNETA=y
+CONFIG_MVPP2=y
+CONFIG_SKY2=y
+CONFIG_MLX4_EN=m
+CONFIG_MLX5_CORE=m
+CONFIG_MLX5_CORE_EN=y
+CONFIG_QCOM_EMAC=m
+CONFIG_RMNET=m
+CONFIG_SH_ETH=y
+CONFIG_RAVB=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_SNI_AVE=y
+CONFIG_SNI_NETSEC=y
+CONFIG_STMMAC_ETH=m
+CONFIG_TI_K3_AM65_CPSW_NUSS=y
+CONFIG_QCOM_IPA=m
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
+CONFIG_MDIO_BUS_MUX_MULTIPLEXER=y
+CONFIG_AQUANTIA_PHY=y
+CONFIG_BCM54140_PHY=m
+CONFIG_MARVELL_PHY=m
+CONFIG_MARVELL_10G_PHY=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_MICREL_PHY=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_AT803X_PHY=y
+CONFIG_REALTEK_PHY=y
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_DP83867_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_USB_PEGASUS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_LAN78XX=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_DM9601=m
+CONFIG_USB_NET_SR9800=m
+CONFIG_USB_NET_SMSC75XX=m
+CONFIG_USB_NET_SMSC95XX=m
+CONFIG_USB_NET_PLUSB=m
+CONFIG_USB_NET_MCS7830=m
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_ATH10K_SNOC=m
+CONFIG_WCN36XX=m
+CONFIG_ATH11K=m
+CONFIG_ATH11K_AHB=m
+CONFIG_ATH11K_PCI=m
+CONFIG_BRCMFMAC=m
+CONFIG_MWIFIEX=m
+CONFIG_MWIFIEX_PCIE=m
+CONFIG_WL18XX=m
+CONFIG_WLCORE_SDIO=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_ADC=m
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_SNVS_PWRKEY=m
+CONFIG_KEYBOARD_IMX_SC_KEY=m
+CONFIG_KEYBOARD_CROS_EC=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_TOUCHSCREEN_ATMEL_MXT=m
+CONFIG_TOUCHSCREEN_GOODIX=m
+CONFIG_TOUCHSCREEN_EDT_FT5X06=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_PM8941_PWRKEY=y
+CONFIG_INPUT_PM8XXX_VIBRATOR=m
+CONFIG_INPUT_PWM_BEEPER=m
+CONFIG_INPUT_PWM_VIBRA=m
+CONFIG_INPUT_HISI_POWERKEY=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_BCM2835AUX=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_8250_OMAP=y
+CONFIG_SERIAL_8250_MT6577=y
+CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_MESON=y
+CONFIG_SERIAL_MESON_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
+CONFIG_SERIAL_TEGRA_TCU=y
+CONFIG_SERIAL_IMX=y
+CONFIG_SERIAL_IMX_CONSOLE=y
+CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_XILINX_PS_UART=y
+CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
+CONFIG_SERIAL_FSL_LPUART=y
+CONFIG_SERIAL_FSL_LPUART_CONSOLE=y
+CONFIG_SERIAL_FSL_LINFLEXUART=y
+CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE=y
+CONFIG_SERIAL_MVEBU_UART=y
+CONFIG_SERIAL_OWL=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_IPMI_HANDLER=m
+CONFIG_IPMI_DEVICE_INTERFACE=m
+CONFIG_IPMI_SI=m
+CONFIG_TCG_TPM=y
+CONFIG_TCG_TIS_I2C_INFINEON=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
+CONFIG_I2C_BCM2835=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_GPIO=m
+CONFIG_I2C_IMX=y
+CONFIG_I2C_IMX_LPI2C=y
+CONFIG_I2C_MESON=y
+CONFIG_I2C_MT65XX=y
+CONFIG_I2C_MV64XXX=y
+CONFIG_I2C_OMAP=y
+CONFIG_I2C_OWL=y
+CONFIG_I2C_PXA=y
+CONFIG_I2C_QCOM_CCI=m
+CONFIG_I2C_QCOM_GENI=m
+CONFIG_I2C_QUP=y
+CONFIG_I2C_RIIC=y
+CONFIG_I2C_RK3X=y
+CONFIG_I2C_S3C2410=y
+CONFIG_I2C_SH_MOBILE=y
+CONFIG_I2C_TEGRA=y
+CONFIG_I2C_UNIPHIER_F=y
+CONFIG_I2C_RCAR=y
+CONFIG_I2C_CROS_EC_TUNNEL=y
+CONFIG_SPI=y
+CONFIG_SPI_ARMADA_3700=y
+CONFIG_SPI_BCM2835=m
+CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_DESIGNWARE=m
+CONFIG_SPI_DW_DMA=y
+CONFIG_SPI_DW_MMIO=m
+CONFIG_SPI_FSL_LPSPI=y
+CONFIG_SPI_FSL_QUADSPI=y
+CONFIG_SPI_NXP_FLEXSPI=y
+CONFIG_SPI_IMX=m
+CONFIG_SPI_FSL_DSPI=y
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_SPI_ORION=y
+CONFIG_SPI_PL022=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_RPCIF=m
+CONFIG_SPI_QCOM_QSPI=m
+CONFIG_SPI_QUP=y
+CONFIG_SPI_QCOM_GENI=m
+CONFIG_SPI_S3C64XX=y
+CONFIG_SPI_SH_MSIOF=m
+CONFIG_SPI_SUN6I=y
+CONFIG_SPI_SPIDEV=m
+CONFIG_SPMI=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_OWL=y
+CONFIG_PINCTRL_S700=y
+CONFIG_PINCTRL_S900=y
+CONFIG_PINCTRL_IMX8MM=y
+CONFIG_PINCTRL_IMX8MN=y
+CONFIG_PINCTRL_IMX8MP=y
+CONFIG_PINCTRL_IMX8MQ=y
+CONFIG_PINCTRL_IMX8QM=y
+CONFIG_PINCTRL_IMX8QXP=y
+CONFIG_PINCTRL_IMX8DXL=y
+CONFIG_PINCTRL_IMX8ULP=y
+CONFIG_PINCTRL_MSM=y
+CONFIG_PINCTRL_IPQ8074=y
+CONFIG_PINCTRL_IPQ6018=y
+CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_MSM8994=y
+CONFIG_PINCTRL_MSM8996=y
+CONFIG_PINCTRL_MSM8998=y
+CONFIG_PINCTRL_QCS404=y
+CONFIG_PINCTRL_QDF2XXX=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_SC7180=y
+CONFIG_PINCTRL_SC7280=y
+CONFIG_PINCTRL_SDM845=y
+CONFIG_PINCTRL_SM8150=y
+CONFIG_PINCTRL_SM8250=y
+CONFIG_PINCTRL_SM8350=y
+<<<<<<<
+CONFIG_PINCTRL_LPASS_LPI=y
+=======
+CONFIG_PINCTRL_SM8450=y
+CONFIG_PINCTRL_LPASS_LPI=m
+>>>>>>>
+CONFIG_GPIO_ALTERA=m
+CONFIG_GPIO_DAVINCI=y
+CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_MB86S7X=y
+CONFIG_GPIO_MPC8XXX=y
+CONFIG_GPIO_MXC=y
+CONFIG_GPIO_PL061=y
+CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_UNIPHIER=y
+CONFIG_GPIO_VISCONTI=y
+CONFIG_GPIO_WCD934X=m
+CONFIG_GPIO_XGENE=y
+CONFIG_GPIO_XGENE_SB=y
+CONFIG_GPIO_MAX732X=y
+CONFIG_GPIO_PCA953X=y
+CONFIG_GPIO_PCA953X_IRQ=y
+CONFIG_GPIO_BD9571MWV=m
+CONFIG_GPIO_MAX77620=y
+CONFIG_GPIO_SL28CPLD=m
+CONFIG_POWER_AVS=y
+CONFIG_QCOM_CPR=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_MSM=y
+CONFIG_POWER_RESET_QCOM_PON=m
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_SYSCON_REBOOT_MODE=y
+CONFIG_BATTERY_SBS=m
+CONFIG_BATTERY_BQ27XXX=y
+CONFIG_SENSORS_ARM_SCMI=y
+CONFIG_BATTERY_MAX17042=m
+CONFIG_CHARGER_BQ25890=m
+CONFIG_CHARGER_BQ25980=m
+CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_JC42=m
+CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_PWM_FAN=m
+CONFIG_SENSORS_RASPBERRYPI_HWMON=m
+CONFIG_SENSORS_SL28CPLD=m
+CONFIG_SENSORS_INA2XX=m
+CONFIG_SENSORS_INA3221=m
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_QORIQ_THERMAL=m
+CONFIG_SUN8I_THERMAL=y
+CONFIG_IMX_SC_THERMAL=m
+CONFIG_IMX8MM_THERMAL=m
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_THERMAL=y
+CONFIG_RCAR_GEN3_THERMAL=y
+CONFIG_ARMADA_THERMAL=y
+CONFIG_BCM2711_THERMAL=m
+CONFIG_BCM2835_THERMAL=m
+CONFIG_BRCMSTB_THERMAL=m
+CONFIG_EXYNOS_THERMAL=y
+CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_TEGRA_SOCTHERM=m
+CONFIG_QCOM_TSENS=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_QCOM_LMH=m
+CONFIG_QCOM_SPMI_ADC_TM5=m
+CONFIG_UNIPHIER_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_SL28CPLD_WATCHDOG=m
+CONFIG_PM8916_WATCHDOG=y
+CONFIG_ARM_SP805_WATCHDOG=y
+CONFIG_ARM_SBSA_WATCHDOG=y
+CONFIG_ARM_SMC_WATCHDOG=y
+CONFIG_S3C2410_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_SUNXI_WATCHDOG=m
+CONFIG_IMX2_WDT=y
+CONFIG_IMX_SC_WDT=m
+CONFIG_QCOM_WDT=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_RENESAS_WDT=y
+CONFIG_UNIPHIER_WATCHDOG=y
+CONFIG_BCM2835_WDT=y
+CONFIG_MFD_ALTERA_SYSMGR=y
+CONFIG_MFD_BD9571MWV=y
+CONFIG_MFD_AXP20X_I2C=y
+CONFIG_MFD_AXP20X_RSB=y
+CONFIG_MFD_EXYNOS_LPASS=m
+CONFIG_MFD_HI6421_PMIC=y
+CONFIG_MFD_HI655X_PMIC=y
+CONFIG_MFD_MAX77620=y
+CONFIG_MFD_MT6397=y
+CONFIG_MFD_QCOM_QCA639X=y
+CONFIG_MFD_QCOM_RPM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
+CONFIG_MFD_SEC_CORE=y
+CONFIG_MFD_SL28CPLD=y
+CONFIG_MFD_ROHM_BD718XX=y
+CONFIG_MFD_WCD934X=m
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_AXP20X=y
+CONFIG_REGULATOR_BD718XX=y
+CONFIG_REGULATOR_BD9571MWV=y
+CONFIG_REGULATOR_FAN53555=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_HI6421V530=y
+CONFIG_REGULATOR_HI655X=y
+CONFIG_REGULATOR_MAX77620=y
+CONFIG_REGULATOR_MAX8973=y
+CONFIG_REGULATOR_MP8859=y
+CONFIG_REGULATOR_MT6358=y
+CONFIG_REGULATOR_MT6397=y
+CONFIG_REGULATOR_PCA9450=y
+CONFIG_REGULATOR_PF8X00=y
+CONFIG_REGULATOR_PFUZE100=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_QCOM_RPMH=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
+CONFIG_REGULATOR_QCOM_USB_VBUS=m
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_S2MPS11=y
+CONFIG_REGULATOR_TPS65132=m
+CONFIG_REGULATOR_VCTRL=m
+CONFIG_RC_CORE=m
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=m
+CONFIG_IR_SUNXI=m
+CONFIG_MEDIA_SUPPORT=m
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_SDR_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_PLATFORM_SUPPORT=y
+# CONFIG_DVB_NET is not set
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=m
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_RCAR_CSI2=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_SUN6I_CSI=m
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
+CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
+CONFIG_VIDEO_SAMSUNG_EXYNOS_GSC=m
+CONFIG_VIDEO_RENESAS_FDP1=m
+CONFIG_VIDEO_RENESAS_FCP=m
+CONFIG_VIDEO_RENESAS_VSP1=m
+CONFIG_VIDEO_QCOM_VENUS=m
+CONFIG_SDR_PLATFORM_DRIVERS=y
+CONFIG_VIDEO_RCAR_DRIF=m
+CONFIG_VIDEO_IMX219=m
+CONFIG_VIDEO_OV5640=m
+CONFIG_VIDEO_OV5645=m
+CONFIG_VIDEO_QCOM_CAMSS=m
+CONFIG_VIDEO_QCOM_VENUS=m
+CONFIG_DRM=m
+CONFIG_DRM_I2C_NXP_TDA998X=m
+CONFIG_DRM_MALI_DISPLAY=m
+CONFIG_DRM_NOUVEAU=m
+CONFIG_DRM_EXYNOS=m
+CONFIG_DRM_EXYNOS5433_DECON=y
+CONFIG_DRM_EXYNOS7_DECON=y
+CONFIG_DRM_EXYNOS_DSI=y
+# CONFIG_DRM_EXYNOS_DP is not set
+CONFIG_DRM_EXYNOS_HDMI=y
+CONFIG_DRM_EXYNOS_MIC=y
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_CDN_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+CONFIG_ROCKCHIP_INNO_HDMI=y
+CONFIG_ROCKCHIP_LVDS=y
+CONFIG_DRM_RCAR_DU=m
+CONFIG_DRM_RCAR_DW_HDMI=m
+CONFIG_DRM_SUN4I=m
+CONFIG_DRM_SUN6I_DSI=m
+CONFIG_DRM_SUN8I_DW_HDMI=m
+CONFIG_DRM_SUN8I_MIXER=m
+CONFIG_DRM_MSM=m
+CONFIG_DRM_TEGRA=m
+CONFIG_DRM_PANEL_LVDS=m
+CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_PANEL_EDP=m
+CONFIG_DRM_PANEL_BOE_TV101WUM_NL6=m
+CONFIG_DRM_PANEL_MANTIX_MLAF057WE51=m
+CONFIG_DRM_PANEL_RAYDIUM_RM67191=m
+CONFIG_DRM_PANEL_SITRONIX_ST7703=m
+CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_LONTIUM_LT8912B=m
+CONFIG_DRM_NWL_MIPI_DSI=m
+CONFIG_DRM_LONTIUM_LT9611=m
+CONFIG_DRM_PARADE_PS8640=m
+CONFIG_DRM_SII902X=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
+CONFIG_DRM_THINE_THC63LVD1024=m
+CONFIG_DRM_TI_SN65DSI86=m
+CONFIG_DRM_LONTIUM_LT9611UXC=m
+CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA=m
+CONFIG_DRM_LONTIUM_LT9611=m
+CONFIG_DRM_LONTIUM_LT9611UXC=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
+CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
+CONFIG_DRM_DW_HDMI_CEC=m
+CONFIG_DRM_IMX_DCSS=m
+CONFIG_DRM_VC4=m
+CONFIG_DRM_ETNAVIV=m
+CONFIG_DRM_HISI_HIBMC=m
+CONFIG_DRM_HISI_KIRIN=m
+CONFIG_DRM_MEDIATEK=m
+CONFIG_DRM_MEDIATEK_HDMI=m
+CONFIG_DRM_MXSFB=m
+CONFIG_DRM_MESON=m
+CONFIG_DRM_PL111=m
+CONFIG_DRM_LIMA=m
+CONFIG_DRM_PANFROST=m
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_EFI=y
+CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_LP855X=m
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HDA_TEGRA=m
+CONFIG_SND_HDA_CODEC_HDMI=m
+CONFIG_SND_SOC=y
+CONFIG_SND_BCM2835_SOC_I2S=m
+CONFIG_SND_SOC_FSL_SAI=m
+CONFIG_SND_SOC_FSL_ASRC=m
+CONFIG_SND_SOC_FSL_MICFIL=m
+CONFIG_SND_SOC_FSL_EASRC=m
+CONFIG_SND_IMX_SOC=m
+CONFIG_SND_SOC_IMX_SGTL5000=m
+CONFIG_SND_SOC_IMX_SPDIF=m
+CONFIG_SND_SOC_IMX_AUDMIX=m
+CONFIG_SND_SOC_FSL_ASOC_CARD=m
+CONFIG_SND_MESON_AXG_SOUND_CARD=m
+CONFIG_SND_MESON_GX_SOUND_CARD=m
+CONFIG_SND_SOC_QCOM=m
+CONFIG_SND_SOC_APQ8016_SBC=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
+CONFIG_SND_SOC_MSM8996=m
+CONFIG_SND_SOC_QDSP6=m
+CONFIG_SND_SOC_SDM845=m
+CONFIG_SND_SOC_SM8250=m
+CONFIG_SND_SOC_ROCKCHIP=m
+CONFIG_SND_SOC_ROCKCHIP_SPDIF=m
+CONFIG_SND_SOC_ROCKCHIP_RT5645=m
+CONFIG_SND_SOC_RK3399_GRU_SOUND=m
+CONFIG_SND_SOC_SAMSUNG=y
+CONFIG_SND_SOC_RCAR=m
+CONFIG_SND_SOC_RZ=m
+CONFIG_SND_SUN4I_I2S=m
+CONFIG_SND_SUN4I_SPDIF=m
+CONFIG_SND_SOC_TEGRA=m
+CONFIG_SND_SOC_TEGRA210_AHUB=m
+CONFIG_SND_SOC_TEGRA210_DMIC=m
+CONFIG_SND_SOC_TEGRA210_I2S=m
+CONFIG_SND_SOC_TEGRA186_DSPK=m
+CONFIG_SND_SOC_TEGRA210_ADMAIF=m
+CONFIG_SND_SOC_TEGRA210_MVC=m
+CONFIG_SND_SOC_TEGRA210_SFC=m
+CONFIG_SND_SOC_TEGRA210_AMX=m
+CONFIG_SND_SOC_TEGRA210_ADX=m
+CONFIG_SND_SOC_TEGRA210_MIXER=m
+CONFIG_SND_SOC_TEGRA_AUDIO_GRAPH_CARD=m
+CONFIG_SND_SOC_AK4613=m
+CONFIG_SND_SOC_ES7134=m
+CONFIG_SND_SOC_ES7241=m
+CONFIG_SND_SOC_GTM601=m
+CONFIG_SND_SOC_MSM8916_WCD_ANALOG=m
+CONFIG_SND_SOC_MSM8916_WCD_DIGITAL=m
+CONFIG_SND_SOC_PCM3168A_I2C=m
+CONFIG_SND_SOC_RT5659=m
+CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
+CONFIG_SND_SOC_SIMPLE_MUX=m
+CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
+CONFIG_SND_SOC_WCD934X=m
+CONFIG_SND_SOC_WM8904=m
+CONFIG_SND_SOC_WM8960=m
+CONFIG_SND_SOC_WM8962=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SOC_WSA881X=m
+CONFIG_SND_SOC_LPASS_WSA_MACRO=m
+CONFIG_SND_SOC_LPASS_VA_MACRO=m
+CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_HID_MULTITOUCH=m
+CONFIG_I2C_HID_ACPI=m
+CONFIG_I2C_HID_OF=m
+CONFIG_I2C_HID=m
+CONFIG_USB=y
+CONFIG_USB_CONN_GPIO=m
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PCI=m
+CONFIG_USB_XHCI_PCI_RENESAS=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_XHCI_TEGRA=y
+CONFIG_USB_XHCI_PCI=m
+CONFIG_USB_XHCI_PCI_RENESAS=m
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_EXYNOS=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS_HCD=m
+CONFIG_USB_RENESAS_USBHS=m
+CONFIG_USB_ACM=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_MTU3=y
+CONFIG_USB_MUSB_HDRC=y
+CONFIG_USB_MUSB_SUNXI=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_ULPI=y
+CONFIG_USB_DWC2=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_CP210X=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_OPTION=m
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_RENESAS_USB3=m
+CONFIG_USB_TEGRA_XUDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=m
+CONFIG_TYPEC_TCPCI=m
+CONFIG_TYPEC_FUSB302=m
+CONFIG_TYPEC_HD3SS3220=m
+CONFIG_TYPEC_TPS6598X=m
+CONFIG_TYPEC_QCOM_PMIC=m
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_ACPI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_ESDHC=y
+CONFIG_MMC_SDHCI_CADENCE=y
+CONFIG_MMC_SDHCI_ESDHC_IMX=y
+CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_F_SDH30=y
+CONFIG_MMC_MESON_GX=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_MMC_SPI=y
+CONFIG_MMC_SDHI=y
+CONFIG_MMC_UNIPHIER=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_EXYNOS=y
+CONFIG_MMC_DW_HI3798CV200=y
+CONFIG_MMC_DW_K3=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_MMC_SUNXI=y
+CONFIG_MMC_BCM2835=y
+CONFIG_MMC_MTK=y
+CONFIG_MMC_SDHCI_XENON=y
+CONFIG_MMC_SDHCI_AM654=y
+CONFIG_MMC_OWL=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_LM3692X=m
+CONFIG_LEDS_PCA9532=m
+CONFIG_LEDS_CLASS_MULTICOLOR=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_PWM=y
+CONFIG_LEDS_QCOM_LPG=y
+CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_CPU=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_LEDS_TRIGGER_PANIC=y
+CONFIG_EDAC=y
+CONFIG_EDAC_GHES=y
+CONFIG_EDAC_LAYERSCAPE=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_DS1307=m
+CONFIG_RTC_DRV_HYM8563=m
+CONFIG_RTC_DRV_MAX77686=y
+CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_PCF85063=m
+CONFIG_RTC_DRV_PCF85363=m
+CONFIG_RTC_DRV_M41T80=m
+CONFIG_RTC_DRV_RX8581=m
+CONFIG_RTC_DRV_RV3028=m
+CONFIG_RTC_DRV_RV8803=m
+CONFIG_RTC_DRV_S5M=y
+CONFIG_RTC_DRV_DS3232=y
+CONFIG_RTC_DRV_PCF2127=m
+CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_CROS_EC=y
+CONFIG_RTC_DRV_FSL_FTM_ALARM=m
+CONFIG_RTC_DRV_S3C=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_RTC_DRV_SUN6I=y
+CONFIG_RTC_DRV_ARMADA38X=y
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_SNVS=m
+CONFIG_RTC_DRV_IMX_SC=m
+CONFIG_RTC_DRV_XGENE=y
+CONFIG_DMADEVICES=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_SUN6I=m
+CONFIG_FSL_EDMA=y
+CONFIG_IMX_SDMA=y
+CONFIG_K3_DMA=y
+CONFIG_MV_XOR=y
+CONFIG_MV_XOR_V2=y
+CONFIG_OWL_DMA=y
+CONFIG_PL330_DMA=y
+CONFIG_TEGRA20_APB_DMA=y
+CONFIG_TEGRA210_ADMA=m
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_QCOM_HIDMA_MGMT=y
+CONFIG_QCOM_HIDMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_RENESAS_USB_DMAC=m
+CONFIG_RZ_DMAC=y
+CONFIG_TI_K3_UDMA=y
+CONFIG_TI_K3_UDMA_GLUE_LAYER=y
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
+CONFIG_MFD_CROS_EC_DEV=y
+CONFIG_STAGING=y
+CONFIG_STAGING_MEDIA=y
+CONFIG_VIDEO_HANTRO=m
+CONFIG_VIDEO_IMX_MEDIA=m
+CONFIG_CHROME_PLATFORMS=y
+CONFIG_CROS_EC=y
+CONFIG_CROS_EC_I2C=y
+CONFIG_CROS_EC_SPI=y
+CONFIG_CROS_EC_CHARDEV=m
+CONFIG_COMMON_CLK_SCMI=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_COMMON_CLK_CS2000_CP=y
+CONFIG_COMMON_CLK_FSL_SAI=y
+CONFIG_COMMON_CLK_S2MPS11=y
+CONFIG_COMMON_CLK_PWM=y
+CONFIG_COMMON_CLK_VC5=y
+CONFIG_COMMON_CLK_ZYNQMP=y
+CONFIG_COMMON_CLK_BD718XX=m
+CONFIG_CLK_RASPBERRYPI=m
+CONFIG_CLK_IMX8MM=y
+CONFIG_CLK_IMX8MN=y
+CONFIG_CLK_IMX8MP=y
+CONFIG_CLK_IMX8MQ=y
+CONFIG_CLK_IMX8QXP=y
+CONFIG_CLK_IMX8ULP=y
+CONFIG_TI_SCI_CLK=y
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_A53PLL=y
+CONFIG_QCOM_CLK_APCS_MSM8916=y
+CONFIG_QCOM_CLK_APCC_MSM8996=y
+CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_QCOM_CLK_RPMH=y
+CONFIG_IPQ_GCC_8074=y
+CONFIG_IPQ_GCC_6018=y
+CONFIG_MSM_GCC_8916=y
+CONFIG_MSM_GCC_8994=y
+CONFIG_MSM_MMCC_8996=y
+CONFIG_MSM_GCC_8998=y
+CONFIG_QCS_GCC_404=y
+CONFIG_SC_GCC_7180=y
+CONFIG_SC_GCC_7280=y
+CONFIG_SDM_CAMCC_845=m
+CONFIG_SDM_GCC_845=y
+CONFIG_SDM_GPUCC_845=y
+CONFIG_SDM_VIDEOCC_845=y
+CONFIG_SDM_DISPCC_845=y
+CONFIG_SM_GCC_8150=y
+CONFIG_SM_GCC_8250=y
+CONFIG_SM_GCC_8350=y
+CONFIG_SM_GCC_8450=y
+CONFIG_SM_GPUCC_8150=y
+CONFIG_SM_GPUCC_8250=y
+CONFIG_SM_DISPCC_8250=y
+CONFIG_SM_VIDEOCC_8250=y
+CONFIG_QCOM_HFPLL=y
+CONFIG_CLK_GFM_LPASS_SM8250=m
+CONFIG_CLK_RCAR_USB2_CLOCK_SEL=y
+CONFIG_CLK_GFM_LPASS_SM8250=y
+CONFIG_HWSPINLOCK=y
+CONFIG_SDM_GPUCC_845=y
+CONFIG_SDM_DISPCC_845=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_ARM_MHU=y
+CONFIG_IMX_MBOX=y
+CONFIG_PLATFORM_MHU=y
+CONFIG_BCM2835_MBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_QCOM_IPCC=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_TEGRA_IOMMU_SMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_ARM_SMMU_V3=y
+CONFIG_MTK_IOMMU=y
+CONFIG_QCOM_IOMMU=y
+CONFIG_REMOTEPROC=y
+CONFIG_QCOM_Q6V5_MSS=m
+CONFIG_QCOM_Q6V5_PAS=m
+CONFIG_QCOM_SYSMON=m
+CONFIG_QCOM_WCNSS_PIL=m
+CONFIG_RPMSG_CHAR=m
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=m
+CONFIG_RPMSG_QCOM_SMD=y
+CONFIG_SOUNDWIRE=m
+CONFIG_SOUNDWIRE_QCOM=m
+CONFIG_OWL_PM_DOMAINS=y
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_FSL_DPAA=y
+CONFIG_FSL_MC_DPIO=y
+CONFIG_FSL_RCPM=y
+CONFIG_MTK_PMIC_WRAP=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_AOSS_QMP=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_QCOM_RMTFS_MEM=m
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_RPMHPD=y
+CONFIG_QCOM_RPMPD=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_QCOM_SMSM=y
+CONFIG_QCOM_SOCINFO=m
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_QCOM_STATS=m
+CONFIG_QCOM_APR=m
+CONFIG_ARCH_R8A774A1=y
+CONFIG_ARCH_R8A774B1=y
+CONFIG_ARCH_R8A774C0=y
+CONFIG_ARCH_R8A774E1=y
+CONFIG_ARCH_R8A77950=y
+CONFIG_ARCH_R8A77951=y
+CONFIG_ARCH_R8A77960=y
+CONFIG_ARCH_R8A77961=y
+CONFIG_ARCH_R8A77965=y
+CONFIG_ARCH_R8A77970=y
+CONFIG_ARCH_R8A77980=y
+CONFIG_ARCH_R8A77990=y
+CONFIG_ARCH_R8A77995=y
+CONFIG_ARCH_R8A779A0=y
+CONFIG_ARCH_R8A779F0=y
+CONFIG_ARCH_R9A07G044=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_ARCH_TEGRA_186_SOC=y
+CONFIG_ARCH_TEGRA_194_SOC=y
+CONFIG_ARCH_TEGRA_234_SOC=y
+CONFIG_TI_SCI_PM_DOMAINS=y
+CONFIG_ARM_IMX_BUS_DEVFREQ=m
+CONFIG_ARM_IMX8M_DDRC_DEVFREQ=m
+CONFIG_EXTCON_PTN5150=m
+CONFIG_EXTCON_USB_GPIO=y
+CONFIG_EXTCON_USBC_CROS_EC=y
+CONFIG_RENESAS_RPCIF=m
+CONFIG_IIO=y
+CONFIG_EXYNOS_ADC=y
+CONFIG_MAX9611=m
+CONFIG_QCOM_SPMI_VADC=m
+CONFIG_QCOM_SPMI_ADC5=m
+CONFIG_ROCKCHIP_SARADC=m
+CONFIG_RZG2L_ADC=m
+CONFIG_IIO_CROS_EC_SENSORS_CORE=m
+CONFIG_IIO_CROS_EC_SENSORS=m
+CONFIG_IIO_ST_LSM6DSX=m
+CONFIG_IIO_CROS_EC_LIGHT_PROX=m
+CONFIG_SENSORS_ISL29018=m
+CONFIG_VCNL4000=m
+CONFIG_IIO_ST_MAGN_3AXIS=m
+CONFIG_IIO_CROS_EC_BARO=m
+CONFIG_MPL3115=m
+CONFIG_PWM=y
+CONFIG_PWM_BCM2835=m
+CONFIG_PWM_CROS_EC=m
+CONFIG_PWM_IMX27=m
+CONFIG_PWM_MESON=m
+CONFIG_PWM_MTK_DISP=m
+CONFIG_PWM_MEDIATEK=m
+CONFIG_PWM_RCAR=m
+CONFIG_PWM_RENESAS_TPU=m
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_PWM_SAMSUNG=y
+CONFIG_PWM_SL28CPLD=m
+CONFIG_PWM_SUN4I=m
+CONFIG_PWM_TEGRA=m
+CONFIG_PWM_VISCONTI=m
+CONFIG_SL28CPLD_INTC=y
+CONFIG_QCOM_PDC=y
+CONFIG_RESET_IMX7=y
+CONFIG_RESET_QCOM_AOSS=y
+CONFIG_RESET_QCOM_PDC=m
+CONFIG_RESET_RZG2L_USBPHY_CTRL=y
+CONFIG_RESET_TI_SCI=y
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_MIXEL_MIPI_DPHY=m
+CONFIG_PHY_HI6220_USB=y
+CONFIG_PHY_HISTB_COMBPHY=y
+CONFIG_PHY_HISI_INNO_USB2=y
+CONFIG_PHY_MVEBU_CP110_COMPHY=y
+CONFIG_PHY_MTK_TPHY=y
+CONFIG_PHY_QCOM_QMP=y
+CONFIG_PHY_QCOM_QMP_TYPEC=y
+CONFIG_PHY_QCOM_QUSB2=m
+CONFIG_PHY_QCOM_UFS=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_QCOM_USB_HS_SNPS_28NM=y
+CONFIG_PHY_QCOM_USB_SS=y
+CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=y
+CONFIG_PHY_RCAR_GEN3_PCIE=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_HDMI=m
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY=m
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PHY_ROCKCHIP_TYPEC=y
+CONFIG_PHY_SAMSUNG_UFS=y
+CONFIG_PHY_UNIPHIER_USB2=y
+CONFIG_PHY_UNIPHIER_USB3=y
+CONFIG_PHY_TEGRA_XUSB=y
+CONFIG_ARM_SMMU_V3_PMU=m
+CONFIG_FSL_IMX8_DDR_PMU=m
+CONFIG_HISI_PMU=y
+CONFIG_QCOM_L2_PMU=y
+CONFIG_QCOM_L3_PMU=y
+CONFIG_NVMEM_IMX_OCOTP=y
+CONFIG_NVMEM_IMX_OCOTP_SCU=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_MTK_EFUSE=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_NVMEM_SUNXI_SID=y
+CONFIG_UNIPHIER_EFUSE=y
+CONFIG_MESON_EFUSE=m
+CONFIG_NVMEM_RMEM=m
+CONFIG_FPGA=y
+CONFIG_FPGA_MGR_STRATIX10_SOC=m
+CONFIG_FPGA_BRIDGE=m
+CONFIG_ALTERA_FREEZE_BRIDGE=m
+CONFIG_FPGA_REGION=m
+CONFIG_OF_FPGA_REGION=m
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+CONFIG_SLIMBUS=m
+CONFIG_SLIM_QCOM_CTRL=m
+CONFIG_SLIM_QCOM_NGD_CTRL=m
+CONFIG_MUX_MMIO=y
+CONFIG_INTERCONNECT=y
+CONFIG_INTERCONNECT_IMX=m
+CONFIG_INTERCONNECT_IMX8MM=m
+CONFIG_INTERCONNECT_IMX8MN=m
+CONFIG_INTERCONNECT_IMX8MQ=m
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_MSM8916=m
+CONFIG_INTERCONNECT_QCOM_MSM8974=m
+CONFIG_INTERCONNECT_QCOM_OSM_L3=m
+CONFIG_INTERCONNECT_QCOM_SC7280=y
+CONFIG_INTERCONNECT_QCOM_SDM845=y
+CONFIG_INTERCONNECT_QCOM_QCS404=m
+CONFIG_INTERCONNECT_QCOM_SC7180=m
+CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_INTERCONNECT_QCOM_SM8150=m
+CONFIG_INTERCONNECT_QCOM_SM8250=m
+CONFIG_INTERCONNECT_QCOM_SM8350=m
+CONFIG_INTERCONNECT_QCOM_SM8450=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_BTRFS_FS=m
+CONFIG_BTRFS_FS_POSIX_ACL=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_FUSE_FS=m
+CONFIG_CUSE=m
+CONFIG_OVERLAY_FS=m
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+CONFIG_CONFIGFS_FS=y
+CONFIG_EFIVAR_FS=y
+CONFIG_SQUASHFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_USER_API_SKCIPHER=y
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=y
+CONFIG_CRYPTO_DEV_SUN8I_CE=m
+CONFIG_CRYPTO_DEV_FSL_CAAM=m
+CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=m
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_RNG=m
+CONFIG_CRYPTO_DEV_CCREE=m
+CONFIG_CRYPTO_DEV_HISI_SEC2=m
+CONFIG_CRYPTO_DEV_HISI_ZIP=m
+CONFIG_CRYPTO_DEV_HISI_HPRE=m
+CONFIG_CRYPTO_DEV_HISI_TRNG=m
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
+# CONFIG_FTRACE is not set
+CONFIG_MEMTEST=y
diff --git a/rr-cache/325bb31acf18dbd753731c11ff330dfee205f678/thisimage b/rr-cache/325bb31acf18dbd753731c11ff330dfee205f678/thisimage
index 7271d99..d03cb65 100644
--- a/rr-cache/325bb31acf18dbd753731c11ff330dfee205f678/thisimage
+++ b/rr-cache/325bb31acf18dbd753731c11ff330dfee205f678/thisimage
@@ -275,6 +275,14 @@ config MSM_MMCC_8974
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8976
+ tristate "MSM8956/76 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8956/76 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
config MSM_MMCC_8994
tristate "MSM8994 Multimedia Clock Controller"
select MSM_GCC_8994
@@ -574,6 +582,14 @@ config SM_CAMCC_8250
Support for the camera clock controller on SM8250 devices.
Say Y if you want to support camera devices and camera functionality.
+config SDX_GCC_65
+ tristate "SDX65 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SDX65 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_DISPCC_8250
tristate "SM8150 and SM8250 Display Clock Controller"
depends on SM_GCC_8150 || SM_GCC_8250
@@ -628,6 +644,14 @@ config SM_GCC_8350
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8450
+ tristate "SM8450 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8450 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
select SM_GCC_8150
diff --git a/rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/postimage b/rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/postimage
new file mode 100644
index 0000000..35e2ee2
--- /dev/null
+++ b/rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/postimage
@@ -0,0 +1,117 @@
+# The config is based on running daily CI for enterprise Linux distros to
+# seek regressions on linux-next builds on different bare-metal and virtual
+# platforms. It can be used for example,
+#
+# $ make ARCH=arm64 defconfig debug.config
+#
+# Keep alphabetically sorted inside each section.
+#
+# printk and dmesg options
+#
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_PRINTK_TIME=y
+CONFIG_SYMBOLIC_ERRNAME=y
+#
+# Compile-time checks and compiler options
+#
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_FRAME_WARN=2048
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+#
+# Generic Kernel Debugging Instruments
+#
+# CONFIG_UBSAN_ALIGNMENT is not set
+# CONFIG_UBSAN_DIV_ZERO is not set
+# CONFIG_UBSAN_TRAP is not set
+# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_FS_ALLOW_ALL=y
+CONFIG_DEBUG_IRQFLAGS=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_BOOL=y
+CONFIG_UBSAN_BOUNDS=y
+CONFIG_UBSAN_ENUM=y
+CONFIG_UBSAN_SHIFT=y
+CONFIG_UBSAN_UNREACHABLE=y
+#
+# Memory Debugging
+#
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF is not set
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_WX is not set
+# CONFIG_KFENCE is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_PAGE_EXTENSION=y
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_VMACACHE=y
+CONFIG_GENERIC_PTDUMP=y
+CONFIG_KASAN=y
+CONFIG_KASAN_GENERIC=y
+CONFIG_KASAN_INLINE=y
+CONFIG_KASAN_VMALLOC=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SLUB_DEBUG_ON=y
+#
+# Debug Oops, Lockups and Hangs
+#
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_SOFTLOCKUP_DETECTOR=y
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_PROVE_RAW_LOCK_NESTING is not set
+CONFIG_PROVE_LOCKING=y
+#
+# Debug kernel data structures
+#
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+#
+# RCU Debugging
+#
+CONFIG_PROVE_RCU=y
+CONFIG_PROVE_RCU_LIST=y
+#
+# Tracers
+#
+CONFIG_BRANCH_PROFILE_NONE=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+
+# Landing team defines
+
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_KASAN=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
diff --git a/rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/preimage b/rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/preimage
new file mode 100644
index 0000000..482c273
--- /dev/null
+++ b/rr-cache/49b87c48bf2ac9331db89b2d5c14b64e2a07542a/preimage
@@ -0,0 +1,121 @@
+<<<<<<<
+# The config is based on running daily CI for enterprise Linux distros to
+# seek regressions on linux-next builds on different bare-metal and virtual
+# platforms. It can be used for example,
+#
+# $ make ARCH=arm64 defconfig debug.config
+#
+# Keep alphabetically sorted inside each section.
+#
+# printk and dmesg options
+#
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_PRINTK_TIME=y
+CONFIG_SYMBOLIC_ERRNAME=y
+#
+# Compile-time checks and compiler options
+#
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_FRAME_WARN=2048
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+#
+# Generic Kernel Debugging Instruments
+#
+# CONFIG_UBSAN_ALIGNMENT is not set
+# CONFIG_UBSAN_DIV_ZERO is not set
+# CONFIG_UBSAN_TRAP is not set
+# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_FS_ALLOW_ALL=y
+CONFIG_DEBUG_IRQFLAGS=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_BOOL=y
+CONFIG_UBSAN_BOUNDS=y
+CONFIG_UBSAN_ENUM=y
+CONFIG_UBSAN_SHIFT=y
+CONFIG_UBSAN_UNREACHABLE=y
+#
+# Memory Debugging
+#
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF is not set
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_WX is not set
+# CONFIG_KFENCE is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_PAGE_EXTENSION=y
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_VMACACHE=y
+CONFIG_GENERIC_PTDUMP=y
+CONFIG_KASAN=y
+CONFIG_KASAN_GENERIC=y
+CONFIG_KASAN_INLINE=y
+CONFIG_KASAN_VMALLOC=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SLUB_DEBUG_ON=y
+#
+# Debug Oops, Lockups and Hangs
+#
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_SOFTLOCKUP_DETECTOR=y
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_PROVE_RAW_LOCK_NESTING is not set
+CONFIG_PROVE_LOCKING=y
+#
+# Debug kernel data structures
+#
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+#
+# RCU Debugging
+#
+CONFIG_PROVE_RCU=y
+CONFIG_PROVE_RCU_LIST=y
+#
+# Tracers
+#
+CONFIG_BRANCH_PROFILE_NONE=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
+=======
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_LOCK_ALLOC=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_PROVE_RCU=y
+CONFIG_SLUB_DEBUG=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_KASAN=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_USB_GADGET_DEBUG=y
+CONFIG_USB_GADGET_DEBUG_FILES=y
+>>>>>>>
diff --git a/rr-cache/5827ea96d82158fcd557fa14f13f88d4f5342a87/thisimage b/rr-cache/5827ea96d82158fcd557fa14f13f88d4f5342a87/thisimage
index 98a8a49..0fe79f6 100644
--- a/rr-cache/5827ea96d82158fcd557fa14f13f88d4f5342a87/thisimage
+++ b/rr-cache/5827ea96d82158fcd557fa14f13f88d4f5342a87/thisimage
@@ -1078,7 +1078,7 @@
};
qfprom@784000 {
- compatible = "qcom,qfprom";
+ compatible = "qcom,sdm845-qfprom", "qcom,qfprom";
reg = <0 0x00784000 0 0x8ff>;
#address-cells = <1>;
#size-cells = <1>;
@@ -4508,7 +4508,6 @@
gpu: gpu@5000000 {
compatible = "qcom,adreno-630.2", "qcom,adreno";
- #stream-id-cells = <16>;
reg = <0 0x5000000 0 0x40000>, <0 0x509e000 0 0x10>;
reg-names = "kgsl_3d0_reg_memory", "cx_mem";
diff --git a/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage b/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage
index c640680..0d0ee87 100644
--- a/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage
+++ b/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage
@@ -379,6 +379,7 @@ CONFIG_MICROSEMI_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_REALTEK_PHY=y
CONFIG_ROCKCHIP_PHY=y
+CONFIG_DP83867_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
@@ -416,6 +417,7 @@ CONFIG_TOUCHSCREEN_EDT_FT5X06=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
CONFIG_INPUT_PM8XXX_VIBRATOR=m
+CONFIG_INPUT_PWM_BEEPER=m
CONFIG_INPUT_PWM_VIBRA=m
CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
@@ -480,6 +482,7 @@ CONFIG_I2C_QCOM_GENI=m
CONFIG_I2C_QUP=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_RK3X=y
+CONFIG_I2C_S3C2410=y
CONFIG_I2C_SH_MOBILE=y
CONFIG_I2C_TEGRA=y
CONFIG_I2C_UNIPHIER_F=y
@@ -523,6 +526,7 @@ CONFIG_PINCTRL_IMX8MQ=y
CONFIG_PINCTRL_IMX8QM=y
CONFIG_PINCTRL_IMX8QXP=y
CONFIG_PINCTRL_IMX8DXL=y
+CONFIG_PINCTRL_IMX8ULP=y
CONFIG_PINCTRL_MSM=y
CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_IPQ6018=y
@@ -574,6 +578,7 @@ CONFIG_BATTERY_MAX17042=m
CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_BQ25980=m
CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_JC42=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_PWM_FAN=m
CONFIG_SENSORS_RASPBERRYPI_HWMON=m
@@ -692,6 +697,7 @@ CONFIG_VIDEO_QCOM_VENUS=m
CONFIG_SDR_PLATFORM_DRIVERS=y
CONFIG_VIDEO_RCAR_DRIF=m
CONFIG_VIDEO_IMX219=m
+CONFIG_VIDEO_OV5640=m
CONFIG_VIDEO_OV5645=m
CONFIG_VIDEO_QCOM_CAMSS=m
CONFIG_VIDEO_QCOM_VENUS=m
@@ -824,6 +830,7 @@ CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
CONFIG_SND_SOC_SIMPLE_MUX=m
CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
CONFIG_SND_SOC_WCD934X=m
CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8960=m
@@ -948,6 +955,7 @@ CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_PCF85063=m
CONFIG_RTC_DRV_PCF85363=m
CONFIG_RTC_DRV_M41T80=m
CONFIG_RTC_DRV_RX8581=m
@@ -999,6 +1007,7 @@ CONFIG_MFD_CROS_EC_DEV=y
CONFIG_STAGING=y
CONFIG_STAGING_MEDIA=y
CONFIG_VIDEO_HANTRO=m
+CONFIG_VIDEO_IMX_MEDIA=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=y
@@ -1020,6 +1029,7 @@ CONFIG_CLK_IMX8MN=y
CONFIG_CLK_IMX8MP=y
CONFIG_CLK_IMX8MQ=y
CONFIG_CLK_IMX8QXP=y
+CONFIG_CLK_IMX8ULP=y
CONFIG_TI_SCI_CLK=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_QCOM_A53PLL=y
@@ -1116,6 +1126,7 @@ CONFIG_ARCH_R8A77980=y
CONFIG_ARCH_R8A77990=y
CONFIG_ARCH_R8A77995=y
CONFIG_ARCH_R8A779A0=y
+CONFIG_ARCH_R8A779F0=y
CONFIG_ARCH_R9A07G044=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
@@ -1229,6 +1240,8 @@ CONFIG_SLIM_QCOM_NGD_CTRL=m
CONFIG_MUX_MMIO=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_IMX=m
+CONFIG_INTERCONNECT_IMX8MM=m
+CONFIG_INTERCONNECT_IMX8MN=m
CONFIG_INTERCONNECT_IMX8MQ=m
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
diff --git a/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage.1 b/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage.1
index c640680..0d0ee87 100644
--- a/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage.1
+++ b/rr-cache/5c4842fdcd0859dbca6e52d6868f065b520bac7f/thisimage.1
@@ -379,6 +379,7 @@ CONFIG_MICROSEMI_PHY=y
CONFIG_AT803X_PHY=y
CONFIG_REALTEK_PHY=y
CONFIG_ROCKCHIP_PHY=y
+CONFIG_DP83867_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_USB_PEGASUS=m
CONFIG_USB_RTL8150=m
@@ -416,6 +417,7 @@ CONFIG_TOUCHSCREEN_EDT_FT5X06=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_PM8941_PWRKEY=y
CONFIG_INPUT_PM8XXX_VIBRATOR=m
+CONFIG_INPUT_PWM_BEEPER=m
CONFIG_INPUT_PWM_VIBRA=m
CONFIG_INPUT_HISI_POWERKEY=y
# CONFIG_SERIO_SERPORT is not set
@@ -480,6 +482,7 @@ CONFIG_I2C_QCOM_GENI=m
CONFIG_I2C_QUP=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_RK3X=y
+CONFIG_I2C_S3C2410=y
CONFIG_I2C_SH_MOBILE=y
CONFIG_I2C_TEGRA=y
CONFIG_I2C_UNIPHIER_F=y
@@ -523,6 +526,7 @@ CONFIG_PINCTRL_IMX8MQ=y
CONFIG_PINCTRL_IMX8QM=y
CONFIG_PINCTRL_IMX8QXP=y
CONFIG_PINCTRL_IMX8DXL=y
+CONFIG_PINCTRL_IMX8ULP=y
CONFIG_PINCTRL_MSM=y
CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_IPQ6018=y
@@ -574,6 +578,7 @@ CONFIG_BATTERY_MAX17042=m
CONFIG_CHARGER_BQ25890=m
CONFIG_CHARGER_BQ25980=m
CONFIG_SENSORS_ARM_SCPI=y
+CONFIG_SENSORS_JC42=m
CONFIG_SENSORS_LM90=m
CONFIG_SENSORS_PWM_FAN=m
CONFIG_SENSORS_RASPBERRYPI_HWMON=m
@@ -692,6 +697,7 @@ CONFIG_VIDEO_QCOM_VENUS=m
CONFIG_SDR_PLATFORM_DRIVERS=y
CONFIG_VIDEO_RCAR_DRIF=m
CONFIG_VIDEO_IMX219=m
+CONFIG_VIDEO_OV5640=m
CONFIG_VIDEO_OV5645=m
CONFIG_VIDEO_QCOM_CAMSS=m
CONFIG_VIDEO_QCOM_VENUS=m
@@ -824,6 +830,7 @@ CONFIG_SND_SOC_RT5659=m
CONFIG_SND_SOC_SIMPLE_AMPLIFIER=m
CONFIG_SND_SOC_SIMPLE_MUX=m
CONFIG_SND_SOC_TAS571X=m
+CONFIG_SND_SOC_TLV320AIC32X4_I2C=m
CONFIG_SND_SOC_WCD934X=m
CONFIG_SND_SOC_WM8904=m
CONFIG_SND_SOC_WM8960=m
@@ -948,6 +955,7 @@ CONFIG_RTC_DRV_DS1307=m
CONFIG_RTC_DRV_HYM8563=m
CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
+CONFIG_RTC_DRV_PCF85063=m
CONFIG_RTC_DRV_PCF85363=m
CONFIG_RTC_DRV_M41T80=m
CONFIG_RTC_DRV_RX8581=m
@@ -999,6 +1007,7 @@ CONFIG_MFD_CROS_EC_DEV=y
CONFIG_STAGING=y
CONFIG_STAGING_MEDIA=y
CONFIG_VIDEO_HANTRO=m
+CONFIG_VIDEO_IMX_MEDIA=m
CONFIG_CHROME_PLATFORMS=y
CONFIG_CROS_EC=y
CONFIG_CROS_EC_I2C=y
@@ -1020,6 +1029,7 @@ CONFIG_CLK_IMX8MN=y
CONFIG_CLK_IMX8MP=y
CONFIG_CLK_IMX8MQ=y
CONFIG_CLK_IMX8QXP=y
+CONFIG_CLK_IMX8ULP=y
CONFIG_TI_SCI_CLK=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_QCOM_A53PLL=y
@@ -1116,6 +1126,7 @@ CONFIG_ARCH_R8A77980=y
CONFIG_ARCH_R8A77990=y
CONFIG_ARCH_R8A77995=y
CONFIG_ARCH_R8A779A0=y
+CONFIG_ARCH_R8A779F0=y
CONFIG_ARCH_R9A07G044=y
CONFIG_ROCKCHIP_PM_DOMAINS=y
CONFIG_ARCH_TEGRA_132_SOC=y
@@ -1229,6 +1240,8 @@ CONFIG_SLIM_QCOM_NGD_CTRL=m
CONFIG_MUX_MMIO=y
CONFIG_INTERCONNECT=y
CONFIG_INTERCONNECT_IMX=m
+CONFIG_INTERCONNECT_IMX8MM=m
+CONFIG_INTERCONNECT_IMX8MN=m
CONFIG_INTERCONNECT_IMX8MQ=m
CONFIG_INTERCONNECT_QCOM=y
CONFIG_INTERCONNECT_QCOM_MSM8916=m
diff --git a/rr-cache/7a9e95bebe19cf20be503f6e4db2d595dc9ed9cf/thisimage b/rr-cache/7a9e95bebe19cf20be503f6e4db2d595dc9ed9cf/thisimage
index 47e36ba..e0a8eda 100644
--- a/rr-cache/7a9e95bebe19cf20be503f6e4db2d595dc9ed9cf/thisimage
+++ b/rr-cache/7a9e95bebe19cf20be503f6e4db2d595dc9ed9cf/thisimage
@@ -20,13 +20,15 @@
#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
-static u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
+static inline
+u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
}
-static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline
+enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
return HAL_ENCRYPT_TYPE_OPEN;
@@ -34,32 +36,34 @@ static enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_bas
return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline
+u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline
+bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
}
-static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -67,8 +71,8 @@ static bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
return ieee80211_has_morefrags(hdr->frame_control);
}
-static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
- struct sk_buff *skb)
+static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
@@ -76,37 +80,37 @@ static u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
}
-static u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
}
-static void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
}
-static bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
__le32_to_cpu(attn->info2));
}
-static bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
-static bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
{
return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
__le32_to_cpu(attn->info1));
}
-static bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
+static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
{
return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
__le32_to_cpu(attn->info2)) ==
@@ -154,68 +158,68 @@ static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
}
-static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
}
-static u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
}
-static u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
}
-static u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
}
-static u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
}
-static u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
}
-static bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
- struct hal_rx_desc *desc)
+static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
{
return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
}
@@ -233,14 +237,14 @@ static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
}
-static u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
+static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
{
return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
__le32_to_cpu(attn->info1));
}
-static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
- struct hal_rx_desc *rx_desc)
+static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
u8 *rx_pkt_hdr;
@@ -249,8 +253,8 @@ static u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
return rx_pkt_hdr;
}
-static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
- struct hal_rx_desc *rx_desc)
+static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
u32 tlv_tag;
@@ -259,15 +263,15 @@ static bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
return tlv_tag == HAL_RX_MPDU_START;
}
-static u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
- struct hal_rx_desc *rx_desc)
+static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
{
return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
}
-static void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
- struct hal_rx_desc *desc,
- u16 len)
+static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
{
ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
}
@@ -1356,25 +1360,6 @@ int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
return 0;
}
-static inline u32 ath11k_he_gi_to_nl80211_he_gi(u8 sgi)
-{
- u32 ret = 0;
-
- switch (sgi) {
- case RX_MSDU_START_SGI_0_8_US:
- ret = NL80211_RATE_INFO_HE_GI_0_8;
- break;
- case RX_MSDU_START_SGI_1_6_US:
- ret = NL80211_RATE_INFO_HE_GI_1_6;
- break;
- case RX_MSDU_START_SGI_3_2_US:
- ret = NL80211_RATE_INFO_HE_GI_3_2;
- break;
- }
-
- return ret;
-}
-
static void
ath11k_update_per_peer_tx_stats(struct ath11k *ar,
struct htt_ppdu_stats *ppdu_stats, u8 user)
@@ -1493,14 +1478,15 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
arsta->txrate.mcs = mcs;
arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
arsta->txrate.he_dcm = dcm;
- arsta->txrate.he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
- arsta->txrate.he_ru_alloc = ath11k_he_ru_tones_to_nl80211_he_ru_alloc(
- (user_rate->ru_end -
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
+ ((user_rate->ru_end -
user_rate->ru_start) + 1);
break;
}
arsta->txrate.nss = nss;
+
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
arsta->tx_duration += tx_duration;
memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
@@ -2380,7 +2366,7 @@ static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
}
rx_status->encoding = RX_ENC_HE;
rx_status->nss = nss;
- rx_status->he_gi = ath11k_he_gi_to_nl80211_he_gi(sgi);
+ rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
break;
}
@@ -2599,36 +2585,30 @@ free_out:
static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct napi_struct *napi,
struct sk_buff_head *msdu_list,
- int *quota, int ring_id)
+ int mac_id)
{
- struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath11k *ar;
struct ieee80211_rx_status rx_status = {0};
- u8 mac_id;
int ret;
if (skb_queue_empty(msdu_list))
return;
- rcu_read_lock();
-
- while (*quota && (msdu = __skb_dequeue(msdu_list))) {
- rxcb = ATH11K_SKB_RXCB(msdu);
- mac_id = rxcb->mac_id;
- ar = ab->pdevs[mac_id].ar;
- if (!rcu_dereference(ab->pdevs_active[mac_id])) {
- dev_kfree_skb_any(msdu);
- continue;
- }
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
- if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
- dev_kfree_skb_any(msdu);
- continue;
- }
+ ar = ab->pdevs[mac_id].ar;
+ if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
+ while ((msdu = __skb_dequeue(msdu_list))) {
ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
- if (ret) {
+ if (unlikely(ret)) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
dev_kfree_skb_any(msdu);
@@ -2636,10 +2616,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
}
ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
- (*quota)--;
}
-
- rcu_read_unlock();
}
int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
@@ -2648,19 +2625,21 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
struct ath11k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
int num_buffs_reaped[MAX_RADIOS] = {0};
- struct sk_buff_head msdu_list;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
struct ath11k_skb_rxcb *rxcb;
int total_msdu_reaped = 0;
struct hal_srng *srng;
struct sk_buff *msdu;
- int quota = budget;
bool done = false;
int buf_id, mac_id;
struct ath11k *ar;
- u32 *rx_desc;
+ struct hal_reo_dest_ring *desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
int i;
- __skb_queue_head_init(&msdu_list);
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
@@ -2669,13 +2648,11 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
ath11k_hal_srng_access_begin(ab, srng);
try_again:
- while ((rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
- struct hal_reo_dest_ring desc = *(struct hal_reo_dest_ring *)rx_desc;
- enum hal_reo_dest_ring_push_reason push_reason;
- u32 cookie;
-
+ while (likely(desc =
+ (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
+ srng))) {
cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
- desc.buf_addr_info.info1);
+ desc->buf_addr_info.info1);
buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
cookie);
mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
@@ -2684,7 +2661,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
spin_lock_bh(&rx_ring->idr_lock);
msdu = idr_find(&rx_ring->bufs_idr, buf_id);
- if (!msdu) {
+ if (unlikely(!msdu)) {
ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
@@ -2700,37 +2677,41 @@ try_again:
DMA_FROM_DEVICE);
num_buffs_reaped[mac_id]++;
- total_msdu_reaped++;
push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
- desc.info0);
- if (push_reason !=
- HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ desc->info0);
+ if (unlikely(push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
dev_kfree_skb_any(msdu);
ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
continue;
}
- rxcb->is_first_msdu = !!(desc.rx_msdu_info.info0 &
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(desc.rx_msdu_info.info0 &
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
- desc.rx_mpdu_info.meta_data);
+ desc->rx_mpdu_info.meta_data);
rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
- desc.rx_mpdu_info.info0);
+ desc->rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
- desc.info0);
+ desc->info0);
rxcb->mac_id = mac_id;
- __skb_queue_tail(&msdu_list, msdu);
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
- if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
+ if (rxcb->is_continuation) {
+ done = false;
+ } else {
+ total_msdu_reaped++;
done = true;
- break;
}
+
+ if (total_msdu_reaped >= budget)
+ break;
}
/* Hw might have updated the head pointer after we cached it.
@@ -2739,7 +2720,7 @@ try_again:
* head pointer so that we can reap complete MPDU in the current
* rx processing.
*/
- if (!done && ath11k_hal_srng_dst_num_free(ab, srng, true)) {
+ if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
ath11k_hal_srng_access_end(ab, srng);
goto try_again;
}
@@ -2748,25 +2729,23 @@ try_again:
spin_unlock_bh(&srng->lock);
- if (!total_msdu_reaped)
+ if (unlikely(!total_msdu_reaped))
goto exit;
for (i = 0; i < ab->num_radios; i++) {
if (!num_buffs_reaped[i])
continue;
+ ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
+
ar = ab->pdevs[i].ar;
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
ab->hw_params.hal_params->rx_buf_rbm);
}
-
- ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
- &quota, ring_id);
-
exit:
- return budget - quota;
+ return total_msdu_reaped;
}
static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
@@ -2774,6 +2753,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
{
struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
u32 num_msdu;
+ int i;
if (!rx_stats)
return;
@@ -2835,6 +2815,13 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
+
+ for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
+ arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
+
rx_stats->rx_duration += ppdu_info->rx_duration;
arsta->rx_duration = rx_stats->rx_duration;
}
@@ -2948,6 +2935,43 @@ fail_desc_get:
return req_entries - num_remain;
}
+#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
+
+static void
+ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
+ struct hal_tlv_hdr *tlv)
+{
+ struct hal_rx_ppdu_start *ppdu_start;
+ u16 ppdu_id_diff, ppdu_id, tlv_len;
+ u8 *ptr;
+
+ /* PPDU id is part of second tlv, move ptr to second tlv */
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr = (u8 *)tlv;
+ ptr += sizeof(*tlv) + tlv_len;
+ tlv = (struct hal_tlv_hdr *)ptr;
+
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
+ return;
+
+ ptr += sizeof(*tlv);
+ ppdu_start = (struct hal_rx_ppdu_start *)ptr;
+ ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+
+ if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ }
+}
+
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
@@ -2955,6 +2979,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
const struct ath11k_hw_hal_params *hal_params;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
+ struct ath11k_mon_data *pmon;
struct hal_srng *srng;
void *rx_mon_status_desc;
struct sk_buff *skb;
@@ -2968,6 +2993,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
dp = &ar->dp;
+ pmon = &dp->mon_data;
srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
@@ -2980,8 +3006,10 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
*budget -= 1;
rx_mon_status_desc =
ath11k_hal_srng_src_peek(ab, srng);
- if (!rx_mon_status_desc)
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
break;
+ }
ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
&cookie, &rbm);
@@ -2994,6 +3022,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
buf_id);
spin_unlock_bh(&rx_ring->idr_lock);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
goto move_next;
}
@@ -3013,10 +3042,18 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl));
dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
goto move_next;
}
+ if (ab->hw_params.full_monitor_mode) {
+ ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
+ if (paddr == pmon->mon_status_paddr)
+ pmon->buf_state = DP_MON_STATUS_MATCH;
+ }
__skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
}
move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
@@ -3067,10 +3104,10 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (!num_buffs_reaped)
goto exit;
- while ((skb = __skb_dequeue(&skb_list))) {
- memset(&ppdu_info, 0, sizeof(ppdu_info));
- ppdu_info.peer_id = HAL_INVALID_PEERID;
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
+ while ((skb = __skb_dequeue(&skb_list))) {
if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
@@ -3098,10 +3135,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
ath11k_dbg(ab, ATH11K_DBG_DATA,
"failed to find the peer with peer_id %d\n",
ppdu_info.peer_id);
- spin_unlock_bh(&ab->base_lock);
- rcu_read_unlock();
- dev_kfree_skb_any(skb);
- continue;
+ goto next_skb;
}
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
@@ -3110,10 +3144,13 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+next_skb:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
dev_kfree_skb_any(skb);
+ memset(&ppdu_info, 0, sizeof(ppdu_info));
+ ppdu_info.peer_id = HAL_INVALID_PEERID;
}
exit:
return num_buffs_reaped;
@@ -3803,7 +3840,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
- rbm != ab->hw_params.hal_params->rx_buf_rbm) {
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath11k_dp_rx_link_desc_return(ab, desc,
@@ -4832,7 +4869,7 @@ static struct sk_buff *
ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u32 mac_id, struct sk_buff *head_msdu,
struct sk_buff *last_msdu,
- struct ieee80211_rx_status *rxs)
+ struct ieee80211_rx_status *rxs, bool *fcs_err)
{
struct ath11k_base *ab = ar->ab;
struct sk_buff *msdu, *prev_buf;
@@ -4842,12 +4879,17 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
u8 *dest, decap_format;
struct ieee80211_hdr_3addr *wh;
struct rx_attention *rx_attention;
+ u32 err_bitmap;
if (!head_msdu)
goto err_merge_fail;
rx_desc = (struct hal_rx_desc *)head_msdu->data;
rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ *fcs_err = true;
if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
return NULL;
@@ -4936,9 +4978,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
struct ieee80211_rx_status *rxs = &dp->rx_status;
+ bool fcs_err = false;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
- tail_msdu, rxs);
+ tail_msdu, rxs, &fcs_err);
if (!mon_skb)
goto mon_deliver_fail;
@@ -4946,6 +4989,10 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
header = mon_skb;
rxs->flag = 0;
+
+ if (fcs_err)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
do {
skb_next = mon_skb->next;
if (!skb_next)
@@ -5094,6 +5141,357 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
}
}
+static u32
+ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_sw_monitor_ring *sw_desc = ring_entry;
+ struct hal_rx_msdu_list msdu_list;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_skb_rxcb *rxcb;
+ void *rx_msdu_link_desc;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ int buf_id, i = 0;
+ u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
+ u32 rx_bufs_used = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0, sw_cookie;
+ u16 num_msdus = 0;
+ u8 rxdma_err, rbm;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+
+ ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
+
+ sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
+ sw_mon_entries->end_of_ppdu = false;
+ sw_mon_entries->drop_ppdu = false;
+ p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
+ msdu_cnt = sw_mon_entries->msdu_cnt;
+
+ sw_mon_entries->end_of_ppdu =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
+ if (sw_mon_entries->end_of_ppdu)
+ return rx_bufs_used;
+
+ if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
+ sw_desc->info0) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ rxdma_err =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
+ sw_desc->info0);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ drop_mpdu = true;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ rx_msdu_link_desc =
+ (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (sw_mon_entries->mon_dst_paddr -
+ pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon msdu_pop: invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ break;
+ }
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, sw_mon_entries->ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu_cnt--;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ is_first_msdu = false;
+ }
+
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ rx_bufs_used++;
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
+ &sw_mon_entries->mon_dst_paddr,
+ &sw_mon_entries->mon_dst_sw_cookie,
+ &rbm,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: dp_rx_monitor_link_desc_return failed\n");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ return rx_bufs_used;
+}
+
+static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu,
+ struct sk_buff *head,
+ struct sk_buff *tail)
+{
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return -ENOMEM;
+
+ list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
+ mon_mpdu->head = head;
+ mon_mpdu->tail = tail;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu)
+{
+ struct dp_full_mon_mpdu *tmp;
+ struct sk_buff *tmp_msdu, *skb_next;
+
+ if (list_empty(&dp->dp_full_mon_mpdu_list))
+ return;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+
+ tmp_msdu = mon_mpdu->head;
+ while (tmp_msdu) {
+ skb_next = tmp_msdu->next;
+ dev_kfree_skb_any(tmp_msdu);
+ tmp_msdu = skb_next;
+ }
+
+ kfree(mon_mpdu);
+ }
+}
+
+static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
+ int mac_id,
+ struct ath11k_mon_data *pmon,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct dp_full_mon_mpdu *tmp;
+ struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ int ret;
+
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
+ if (head_msdu && tail_msdu) {
+ ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
+ }
+ kfree(mon_mpdu);
+ }
+
+ return ret;
+}
+
+static int
+ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ int quota = 0, work = 0, count;
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+
+ while (pmon->hold_mon_dst_ring) {
+ quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, 1);
+ if (pmon->buf_state == DP_MON_STATUS_MATCH) {
+ count = sw_mon_entries->status_buf_count;
+ if (count > 1) {
+ quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, count);
+ }
+
+ ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
+ pmon, napi);
+ pmon->hold_mon_dst_ring = false;
+ } else if (!pmon->mon_status_paddr ||
+ pmon->buf_state == DP_MON_STATUS_LEAD) {
+ sw_mon_entries->drop_ppdu = true;
+ pmon->hold_mon_dst_ring = false;
+ }
+
+ if (!quota)
+ break;
+
+ work += quota;
+ }
+
+ if (sw_mon_entries->drop_ppdu)
+ ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
+
+ return work;
+}
+
+static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct sk_buff *head_msdu, *tail_msdu;
+ void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ void *ring_entry;
+ u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
+ int quota = 0, ret;
+ bool break_dst_ring = false;
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->hold_mon_dst_ring) {
+ spin_unlock_bh(&pmon->mon_lock);
+ goto reap_status_ring;
+ }
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ sw_mon_entries);
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (!sw_mon_entries->end_of_ppdu) {
+ if (head_msdu) {
+ ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
+ pmon->mon_mpdu,
+ head_msdu,
+ tail_msdu);
+ if (ret)
+ break_dst_ring = true;
+ }
+
+ goto next_entry;
+ } else {
+ if (!sw_mon_entries->ppdu_id &&
+ !sw_mon_entries->mon_status_paddr) {
+ break_dst_ring = true;
+ goto next_entry;
+ }
+ }
+
+ rx_mon_stats->dest_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
+ pmon->hold_mon_dst_ring = true;
+next_entry:
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ if (break_dst_ring)
+ break;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
+ }
+
+reap_status_ring:
+ quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
+ napi, budget);
+
+ return quota;
+}
+
static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
@@ -5116,10 +5514,14 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
- if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ ab->hw_params.full_monitor_mode)
+ ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
+ else if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+
return ret;
}
diff --git a/rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/postimage b/rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/postimage
new file mode 100644
index 0000000..f0be065
--- /dev/null
+++ b/rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/postimage
@@ -0,0 +1,7537 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_DEBUG_FS */
+#include <linux/net_tstamp.h>
+#include <linux/phylink.h>
+#include <linux/udp.h>
+#include <linux/bpf_trace.h>
+#include <net/pkt_cls.h>
+#include <net/xdp_sock_drv.h>
+#include "stmmac_ptp.h"
+#include "stmmac.h"
+#include "stmmac_xdp.h"
+#include <linux/reset.h>
+#include <linux/of_mdio.h>
+#include "dwmac1000.h"
+#include "dwxgmac2.h"
+#include "hwif.h"
+
+/* As long as the interface is active, we keep the timestamping counter enabled
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
+ * (clock jumps) when changing timestamping settings at runtime.
+ */
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSCTRLSSR)
+
+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
+
+/* Module parameters */
+#define TX_TIMEO 5000
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, 0644);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, 0444);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+
+/* Limit to make sure XDP TX and slow path can coexist */
+#define STMMAC_XSK_TX_BUDGET_MAX 256
+#define STMMAC_TX_XSK_AVAIL 16
+#define STMMAC_RX_FILL_BATCH 16
+
+#define STMMAC_XDP_PASS 0
+#define STMMAC_XDP_CONSUMED BIT(0)
+#define STMMAC_XDP_TX BIT(1)
+#define STMMAC_XDP_REDIRECT BIT(2)
+
+static int flow_ctrl = FLOW_AUTO;
+module_param(flow_ctrl, int, 0644);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, 0644);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, 0644);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
+module_param(buf_sz, int, 0644);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+#define STMMAC_RX_COPYBREAK 256
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, 0644);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
+
+/* By default the driver will use the ring mode to manage tx and rx descriptors,
+ * but allow user to force to use the chain instead of the ring
+ */
+static unsigned int chain_mode;
+module_param(chain_mode, int, 0444);
+MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+/* For MSI interrupts handling */
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
+
+#ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
+static void stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(struct net_device *dev);
+#endif
+
+#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
+
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+{
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(priv->plat->pclk);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ return ret;
+ }
+ if (priv->plat->clks_config) {
+ ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ if (priv->plat->clks_config)
+ priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it checks the driver parameters and set a default in case of
+ * errors.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+}
+
+static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+ u32 queue;
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_disable(&ch->rxtx_napi);
+ continue;
+ }
+
+ if (queue < rx_queues_cnt)
+ napi_disable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_disable(&ch->tx_napi);
+ }
+}
+
+/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_rx_queue *rx_q;
+ u32 queue;
+
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ rx_q = &priv->rx_queue[queue];
+ if (rx_q->xsk_pool) {
+ synchronize_rcu();
+ break;
+ }
+ }
+
+ __stmmac_disable_all_queues(priv);
+}
+
+/**
+ * stmmac_enable_all_queues - Enable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+ u32 queue;
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_enable(&ch->rxtx_napi);
+ continue;
+ }
+
+ if (queue < rx_queues_cnt)
+ napi_enable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_enable(&ch->tx_napi);
+ }
+}
+
+static void stmmac_service_event_schedule(struct stmmac_priv *priv)
+{
+ if (!test_bit(STMMAC_DOWN, &priv->state) &&
+ !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
+ queue_work(priv->wq, &priv->service_task);
+}
+
+static void stmmac_global_err(struct stmmac_priv *priv)
+{
+ netif_carrier_off(priv->dev);
+ set_bit(STMMAC_RESET_REQUESTED, &priv->state);
+ stmmac_service_event_schedule(priv);
+}
+
+/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ u32 clk_rate;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+ if (clk_rate < CSR_F_35M)
+ priv->clk_csr = STMMAC_CSR_20_35M;
+ else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+ priv->clk_csr = STMMAC_CSR_35_60M;
+ else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+ priv->clk_csr = STMMAC_CSR_60_100M;
+ else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+ priv->clk_csr = STMMAC_CSR_100_150M;
+ else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+ priv->clk_csr = STMMAC_CSR_150_250M;
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
+ priv->clk_csr = STMMAC_CSR_250_300M;
+ }
+
+ if (priv->plat->has_sun8i) {
+ if (clk_rate > 160000000)
+ priv->clk_csr = 0x03;
+ else if (clk_rate > 80000000)
+ priv->clk_csr = 0x02;
+ else if (clk_rate > 40000000)
+ priv->clk_csr = 0x01;
+ else
+ priv->clk_csr = 0;
+ }
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ priv->clk_csr = 0x5;
+ else if (clk_rate > 350000000)
+ priv->clk_csr = 0x4;
+ else if (clk_rate > 300000000)
+ priv->clk_csr = 0x3;
+ else if (clk_rate > 250000000)
+ priv->clk_csr = 0x2;
+ else if (clk_rate > 150000000)
+ priv->clk_csr = 0x1;
+ else
+ priv->clk_csr = 0x0;
+ }
+}
+
+static void print_pkt(unsigned char *buf, int len)
+{
+ pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
+}
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ u32 avail;
+
+ if (tx_q->dirty_tx > tx_q->cur_tx)
+ avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
+ else
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+
+ return avail;
+}
+
+/**
+ * stmmac_rx_dirty - Get RX queue dirty
+ * @priv: driver private structure
+ * @queue: RX queue index
+ */
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ u32 dirty;
+
+ if (rx_q->dirty_rx <= rx_q->cur_rx)
+ dirty = rx_q->cur_rx - rx_q->dirty_rx;
+ else
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+
+ return dirty;
+}
+
+static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+{
+ int tx_lpi_timer;
+
+ /* Clear/set the SW EEE timer flag based on LPI ET enablement */
+ priv->eee_sw_timer_en = en ? 0 : 1;
+ tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+}
+
+/**
+ * stmmac_enable_eee_mode - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* check if all TX queues have the work finished */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ return; /* still unfinished work */
+ }
+
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_eee_mode(priv, priv->hw,
+ priv->plat->en_tx_lpi_clockgating);
+}
+
+/**
+ * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ if (!priv->eee_sw_timer_en) {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ return;
+ }
+
+ stmmac_reset_eee_mode(priv, priv->hw);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer - EEE TX SW timer.
+ * @t: timer_list struct containing private info
+ * Description:
+ * if there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
+{
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+}
+
+/**
+ * stmmac_eee_init - init EEE
+ * @priv: driver private structure
+ * Description:
+ * if the GMAC supports the EEE (from the HW cap reg) and the phy device
+ * can also manage EEE, this function enable the LPI state and start related
+ * timer.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ int eee_tw_timer = priv->eee_tw_timer;
+
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
+ priv->hw->pcs == STMMAC_PCS_RTBI)
+ return false;
+
+ /* Check if MAC core supports the EEE feature. */
+ if (!priv->dma_cap.eee)
+ return false;
+
+ mutex_lock(&priv->lock);
+
+ /* Check if it needs to be deactivated */
+ if (!priv->eee_active) {
+ if (priv->eee_enabled) {
+ netdev_dbg(priv->dev, "disable EEE\n");
+ stmmac_lpi_entry_timer_config(priv, 0);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ false);
+ }
+ mutex_unlock(&priv->lock);
+ return false;
+ }
+
+ if (priv->eee_active && !priv->eee_enabled) {
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ true);
+ }
+
+ if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+ stmmac_lpi_entry_timer_config(priv, 1);
+ } else {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ return true;
+}
+
+/* stmmac_get_tx_hwtstamp - get HW TX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the descriptor & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
+ struct dma_desc *p, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* exit if skb doesn't support hw tstamp */
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, p)) {
+ stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+
+ netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+ }
+}
+
+/* stmmac_get_rx_hwtstamp - get HW RX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+ struct dma_desc *np, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ struct dma_desc *desc = p;
+ u64 ns = 0;
+
+ if (!priv->hwts_rx_en)
+ return;
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ desc = np;
+
+ /* Check if timestamp is available */
+ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+
+ ns -= priv->plat->cdc_error_adj;
+
+ netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
+ }
+}
+
+/**
+ * stmmac_hwtstamp_set - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ u32 ptp_v2 = 0;
+ u32 tstamp_all = 0;
+ u32 ptp_over_ipv4_udp = 0;
+ u32 ptp_over_ipv6_udp = 0;
+ u32 ptp_over_ethernet = 0;
+ u32 snap_type_sel = 0;
+ u32 ts_master_en = 0;
+ u32 ts_event_en = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
+ netdev_alert(priv->dev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(config)))
+ return -EFAULT;
+
+ netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ if (priv->adv_ts) {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* time stamp no incoming packet at all */
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* 'xmac' hardware can support Sync, Pdelay_Req and
+ * Pdelay_resp by setting bit14 and bits17/16 to 01
+ * This leaves Delay_Req timestamps out.
+ * Enable all events *and* general purpose message
+ * timestamping
+ */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ /* PTP v1, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /* PTP v1, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ /* PTP v2, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* PTP v2, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* PTP v2, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* PTP v2/802.AS1 any layer, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
+ ts_event_en = PTP_TCR_TSEVNTENA;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ /* time stamp any incoming packet */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_all = PTP_TCR_TSENALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+ } else {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ default:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ }
+ }
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
+
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
+ priv->systime_flags |= tstamp_all | ptp_v2 |
+ ptp_over_ethernet | ptp_over_ipv6_udp |
+ ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ memcpy(&priv->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_hwtstamp_get - read hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function obtain the current hardware timestamping settings
+ * as requested.
+ */
+static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *config = &priv->tstamp_config;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
+ * @priv: driver private structure
+ * @systime_flags: timestamping flags
+ * Description:
+ * Initialize hardware counter for packet timestamping.
+ * This is valid as long as the interface is open and not suspended.
+ * Will be rerun after resuming from suspend, case in which the timestamping
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
+ */
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ struct timespec64 now;
+ u32 sec_inc = 0;
+ u64 temp = 0;
+ int ret;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+ priv->systime_flags = systime_flags;
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
+
+/**
+ * stmmac_init_ptp - init PTP
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
+ * This is done by looking at the HW cap. register.
+ * This function also registers the ptp driver.
+ */
+static int stmmac_init_ptp(struct stmmac_priv *priv)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ int ret;
+
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+ if (ret)
+ return ret;
+
+ priv->adv_ts = 0;
+ /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
+ if (xmac && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+ /* Dwmac 3.x core with extend_desc can support adv_ts */
+ else if (priv->extend_desc && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+
+ if (priv->dma_cap.time_stamp)
+ netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
+
+ if (priv->adv_ts)
+ netdev_info(priv->dev,
+ "IEEE 1588-2008 Advanced Timestamp supported\n");
+
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ stmmac_ptp_register(priv);
+
+ return 0;
+}
+
+static void stmmac_release_ptp(struct stmmac_priv *priv)
+{
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ stmmac_ptp_unregister(priv);
+}
+
+/**
+ * stmmac_mac_flow_ctrl - Configure flow control in all queues
+ * @priv: driver private structure
+ * @duplex: duplex passed to the next function
+ * Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
+}
+
+static void stmmac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ int tx_cnt = priv->plat->tx_queues_to_use;
+ int max_speed = priv->plat->max_speed;
+
+ phylink_set(mac_supported, 10baseT_Half);
+ phylink_set(mac_supported, 10baseT_Full);
+ phylink_set(mac_supported, 100baseT_Half);
+ phylink_set(mac_supported, 100baseT_Full);
+ phylink_set(mac_supported, 1000baseT_Half);
+ phylink_set(mac_supported, 1000baseT_Full);
+ phylink_set(mac_supported, 1000baseKX_Full);
+
+ phylink_set(mac_supported, Autoneg);
+ phylink_set(mac_supported, Pause);
+ phylink_set(mac_supported, Asym_Pause);
+ phylink_set_port_modes(mac_supported);
+
+ /* Cut down 1G if asked to */
+ if ((max_speed > 0) && (max_speed < 1000)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ } else if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 2500baseX_Full);
+ }
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || (max_speed >= 2500)) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 2500baseX_Full);
+ }
+ if (!max_speed || (max_speed >= 5000)) {
+ phylink_set(mac_supported, 5000baseT_Full);
+ }
+ if (!max_speed || (max_speed >= 10000)) {
+ phylink_set(mac_supported, 10000baseSR_Full);
+ phylink_set(mac_supported, 10000baseLR_Full);
+ phylink_set(mac_supported, 10000baseER_Full);
+ phylink_set(mac_supported, 10000baseLRM_Full);
+ phylink_set(mac_supported, 10000baseT_Full);
+ phylink_set(mac_supported, 10000baseKX4_Full);
+ phylink_set(mac_supported, 10000baseKR_Full);
+ }
+ if (!max_speed || (max_speed >= 25000)) {
+ phylink_set(mac_supported, 25000baseCR_Full);
+ phylink_set(mac_supported, 25000baseKR_Full);
+ phylink_set(mac_supported, 25000baseSR_Full);
+ }
+ if (!max_speed || (max_speed >= 40000)) {
+ phylink_set(mac_supported, 40000baseKR4_Full);
+ phylink_set(mac_supported, 40000baseCR4_Full);
+ phylink_set(mac_supported, 40000baseSR4_Full);
+ phylink_set(mac_supported, 40000baseLR4_Full);
+ }
+ if (!max_speed || (max_speed >= 50000)) {
+ phylink_set(mac_supported, 50000baseCR2_Full);
+ phylink_set(mac_supported, 50000baseKR2_Full);
+ phylink_set(mac_supported, 50000baseSR2_Full);
+ phylink_set(mac_supported, 50000baseKR_Full);
+ phylink_set(mac_supported, 50000baseSR_Full);
+ phylink_set(mac_supported, 50000baseCR_Full);
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
+ phylink_set(mac_supported, 50000baseDR_Full);
+ }
+ if (!max_speed || (max_speed >= 100000)) {
+ phylink_set(mac_supported, 100000baseKR4_Full);
+ phylink_set(mac_supported, 100000baseSR4_Full);
+ phylink_set(mac_supported, 100000baseCR4_Full);
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
+ phylink_set(mac_supported, 100000baseKR2_Full);
+ phylink_set(mac_supported, 100000baseSR2_Full);
+ phylink_set(mac_supported, 100000baseCR2_Full);
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
+ phylink_set(mac_supported, 100000baseDR2_Full);
+ }
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (tx_cnt > 1) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ linkmode_and(supported, supported, mac_supported);
+ linkmode_andnot(supported, supported, mask);
+
+ linkmode_and(state->advertising, state->advertising, mac_supported);
+ linkmode_andnot(state->advertising, state->advertising, mask);
+
+ /* If PCS is supported, check which modes it supports. */
+ if (priv->hw->xpcs)
+ xpcs_validate(priv->hw->xpcs, supported, state);
+}
+
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Nothing to do, xpcs_config() handles everything */
+}
+
+static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
+ }
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ priv->tx_lpi_enabled = false;
+ priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ u32 ctrl;
+
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl &= ~priv->hw->link.speed_mask;
+
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_5000:
+ ctrl |= priv->hw->link.xgmii.speed5000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.xgmii.speed2500;
+ break;
+ default:
+ return;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
+ switch (speed) {
+ case SPEED_100000:
+ ctrl |= priv->hw->link.xlgmii.speed100000;
+ break;
+ case SPEED_50000:
+ ctrl |= priv->hw->link.xlgmii.speed50000;
+ break;
+ case SPEED_40000:
+ ctrl |= priv->hw->link.xlgmii.speed40000;
+ break;
+ case SPEED_25000:
+ ctrl |= priv->hw->link.xlgmii.speed25000;
+ break;
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (speed) {
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
+ break;
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
+ break;
+ default:
+ return;
+ }
+ }
+
+ priv->speed = speed;
+
+ if (priv->plat->fix_mac_speed)
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
+
+ if (!duplex)
+ ctrl &= ~priv->hw->link.duplex;
+ else
+ ctrl |= priv->hw->link.duplex;
+
+ /* Flow Control operation */
+ if (tx_pause && rx_pause)
+ stmmac_mac_flow_ctrl(priv, duplex);
+
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+
+ stmmac_mac_set(priv, priv->ioaddr, true);
+ if (phy && priv->dma_cap.eee) {
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_enabled = stmmac_eee_init(priv);
+ priv->tx_lpi_enabled = priv->eee_enabled;
+ stmmac_set_eee_pls(priv, priv->hw, true);
+ }
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, true);
+}
+
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .validate = stmmac_validate,
+ .mac_config = stmmac_mac_config,
+ .mac_link_down = stmmac_mac_link_down,
+ .mac_link_up = stmmac_mac_link_up,
+};
+
+/**
+ * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PCS.
+ * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
+ * configured for the TBI, RTBI, or SGMII PHY interface.
+ */
+static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+{
+ int interface = priv->plat->interface;
+
+ if (priv->dma_cap.pcs) {
+ if ((interface == PHY_INTERFACE_MODE_RGMII) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_RGMII;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
+ }
+ }
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct device_node *node;
+ int ret;
+
+ node = priv->plat->phylink_node;
+
+ if (node)
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
+
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
+ * manually parse it
+ */
+ if (!node || ret) {
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
+
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
+ return -ENODEV;
+ }
+
+ ret = phylink_connect_phy(priv->phylink, phydev);
+ }
+
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
+
+ return ret;
+}
+
+static int stmmac_phy_setup(struct stmmac_priv *priv)
+{
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int mode = priv->plat->phy_interface;
+ struct phylink *phylink;
+
+ priv->phylink_config.dev = &priv->dev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.pcs_poll = true;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ mdio_bus_data->xpcs_an_inband;
+
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ phylink = phylink_create(&priv->phylink_config, fwnode,
+ mode, &stmmac_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ if (priv->hw->xpcs)
+ phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
+
+ priv->phylink = phylink;
+ return 0;
+}
+
+static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int desc_size;
+ void *head_rx;
+ u32 queue;
+
+ /* Display RX rings */
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ pr_info("\tRX Queue %u rings\n", queue);
+
+ if (priv->extend_desc) {
+ head_rx = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ head_rx = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ /* Display RX ring */
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ unsigned int desc_size;
+ void *head_tx;
+ u32 queue;
+
+ /* Display TX rings */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ pr_info("\tTX Queue %d rings\n", queue);
+
+ if (priv->extend_desc) {
+ head_tx = (void *)tx_q->dma_etx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ head_tx = (void *)tx_q->dma_entx;
+ desc_size = sizeof(struct dma_edesc);
+ } else {
+ head_tx = (void *)tx_q->dma_tx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
+ tx_q->dma_tx_phy, desc_size);
+ }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ /* Display RX ring */
+ stmmac_display_rx_rings(priv);
+
+ /* Display TX ring */
+ stmmac_display_tx_rings(priv);
+}
+
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_8KiB)
+ ret = BUF_SIZE_16KiB;
+ else if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu > DEFAULT_BUFSIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DEFAULT_BUFSIZE;
+
+ return ret;
+}
+
+/**
+ * stmmac_clear_rx_descriptors - clear RX descriptors
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * Description: this function is called to clear the RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ /* Clear the RX descriptors */
+ for (i = 0; i < priv->dma_rx_size; i++)
+ if (priv->extend_desc)
+ stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == priv->dma_rx_size - 1),
+ priv->dma_buf_sz);
+ else
+ stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == priv->dma_rx_size - 1),
+ priv->dma_buf_sz);
+}
+
+/**
+ * stmmac_clear_tx_descriptors - clear tx descriptors
+ * @priv: driver private structure
+ * @queue: TX queue index.
+ * Description: this function is called to clear the TX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ /* Clear the TX descriptors */
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ int last = (i == (priv->dma_tx_size - 1));
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &tx_q->dma_etx[i].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[i].basic;
+ else
+ p = &tx_q->dma_tx[i];
+
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
+ }
+}
+
+/**
+ * stmmac_clear_descriptors - clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the TX and RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Clear the RX descriptors */
+ for (queue = 0; queue < rx_queue_cnt; queue++)
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Clear the TX descriptors */
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ stmmac_clear_tx_descriptors(priv, queue);
+}
+
+/**
+ * stmmac_init_rx_buffers - init the RX descriptor buffer.
+ * @priv: driver private structure
+ * @p: descriptor pointer
+ * @i: descriptor index
+ * @flags: gfp flag
+ * @queue: RX queue index
+ * Description: this function is called to allocate a receive buffer, perform
+ * the DMA mapping and init the descriptor.
+ */
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+ int i, gfp_t flags, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
+
+ if (!buf->page) {
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->page)
+ return -ENOMEM;
+ buf->page_offset = stmmac_rx_offset(priv);
+ }
+
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ } else {
+ buf->sec_page = NULL;
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ }
+
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ stmmac_init_desc3(priv, p);
+
+ return 0;
+}
+
+/**
+ * stmmac_free_rx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (buf->page)
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
+ buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
+}
+
+/**
+ * stmmac_free_tx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->tx_skbuff_dma[i].buf &&
+ tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
+ if (tx_q->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ }
+
+ if (tx_q->xdpf[i] &&
+ (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
+ xdp_return_frame(tx_q->xdpf[i]);
+ tx_q->xdpf[i] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff[i] &&
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
+ dev_kfree_skb_any(tx_q->tx_skbuff[i]);
+ tx_q->tx_skbuff[i] = NULL;
+ }
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+}
+
+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, queue, i);
+}
+
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
+ gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct dma_desc *p;
+ int ret;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
+ return ret;
+
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (!buf->xdp)
+ continue;
+
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ }
+}
+
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf;
+ dma_addr_t dma_addr;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + i);
+ else
+ p = rx_q->dma_rx + i;
+
+ buf = &rx_q->buf_pool[i];
+
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp)
+ return -ENOMEM;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, p, dma_addr);
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
+{
+ if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, queue);
+}
+
+/**
+ * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int ret;
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
+
+ rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
+
+ if (rx_q->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx_q->page_pool));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ }
+
+ if (rx_q->xsk_pool) {
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ stmmac_alloc_rx_buffers_zc(priv, queue);
+ } else {
+ ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ if (ret < 0)
+ return -ENOMEM;
+ }
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
+ else
+ stmmac_mode_init(priv, rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
+ }
+
+ return 0;
+}
+
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* RX INITIALIZATION */
+ netif_dbg(priv, probe, priv->dev,
+ "SKB addresses:\nskb\t\tskb data\tdma data\n");
+
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ return 0;
+
+err_init_rx_buffers:
+ while (queue >= 0) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, queue);
+ else
+ dma_free_rx_skbufs(priv, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ if (queue == 0)
+ break;
+
+ queue--;
+ }
+
+ return ret;
+}
+
+/**
+ * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue : TX queue index
+ * Description: this function initializes the DMA TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
+ stmmac_mode_init(priv, tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
+ }
+
+ tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;
+
+ stmmac_clear_desc(priv, p);
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
+ }
+
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+
+ return 0;
+}
+
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt;
+ u32 queue;
+
+ tx_queue_cnt = priv->plat->tx_queues_to_use;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ __init_dma_tx_desc_rings(priv, queue);
+
+ return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_rx_desc_rings(dev, flags);
+ if (ret)
+ return ret;
+
+ ret = init_dma_tx_desc_rings(dev);
+
+ stmmac_clear_descriptors(priv);
+
+ if (netif_msg_hw(priv))
+ stmmac_display_rings(priv);
+
+ return ret;
+}
+
+/**
+ * dma_free_tx_skbufs - free TX dma buffers
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ tx_q->xsk_frames_done = 0;
+
+ for (i = 0; i < priv->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, queue, i);
+
+ if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+ tx_q->xsk_frames_done = 0;
+ tx_q->xsk_pool = NULL;
+ }
+}
+
+/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
+ * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ /* Release the DMA RX socket buffers */
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, queue);
+ else
+ dma_free_rx_skbufs(priv, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ rx_q->dma_rx, rx_q->dma_rx_phy);
+ else
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ rx_q->dma_erx, rx_q->dma_rx_phy);
+
+ if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_q->xdp_rxq);
+
+ kfree(rx_q->buf_pool);
+ if (rx_q->page_pool)
+ page_pool_destroy(rx_q->page_pool);
+}
+
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ /* Free RX queue resources */
+ for (queue = 0; queue < rx_count; queue++)
+ __free_dma_rx_desc_resources(priv, queue);
+}
+
+/**
+ * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
+
+ /* Release the DMA TX socket buffers */
+ dma_free_tx_skbufs(priv, queue);
+
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
+
+ size *= priv->dma_tx_size;
+
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
+
+ kfree(tx_q->tx_skbuff_dma);
+ kfree(tx_q->tx_skbuff);
+}
+
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Free TX queue resources */
+ for (queue = 0; queue < tx_count; queue++)
+ __free_dma_tx_desc_resources(priv, queue);
+}
+
+/**
+ * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
+ * @priv: private structure
+ * @queue: RX queue index
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ bool xdp_prog = stmmac_xdp_is_enabled(priv);
+ struct page_pool_params pp_params = { 0 };
+ unsigned int num_pages;
+ unsigned int napi_id;
+ int ret;
+
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = priv->dma_rx_size;
+ num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.order = ilog2(num_pages);
+ pp_params.nid = dev_to_node(priv->device);
+ pp_params.dev = priv->device;
+ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ pp_params.offset = stmmac_rx_offset(priv);
+ pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+
+ rx_q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_q->page_pool)) {
+ ret = PTR_ERR(rx_q->page_pool);
+ rx_q->page_pool = NULL;
+ return ret;
+ }
+
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
+ if (!rx_q->buf_pool)
+ return -ENOMEM;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_erx)
+ return -ENOMEM;
+
+ } else {
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ return -ENOMEM;
+ }
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps))
+ napi_id = ch->rxtx_napi.napi_id;
+ else
+ napi_id = ch->rx_napi.napi_id;
+
+ ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
+ rx_q->queue_index,
+ napi_id);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to register xdp rxq info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __alloc_dma_rx_desc_resources(priv, queue);
+ if (ret)
+ goto err_dma;
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_rx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
+ * @priv: private structure
+ * @queue: TX queue index
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
+
+ tx_q->queue_index = queue;
+ tx_q->priv_data = priv;
+
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff_dma)
+ return -ENOMEM;
+
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff)
+ return -ENOMEM;
+
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
+
+ size *= priv->dma_tx_size;
+
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
+
+ return 0;
+}
+
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* TX queues buffers and DMA */
+ for (queue = 0; queue < tx_count; queue++) {
+ ret = __alloc_dma_tx_desc_resources(priv, queue);
+ if (ret)
+ goto err_dma;
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_tx_desc_resources(priv);
+ return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* RX Allocation */
+ int ret = alloc_dma_rx_desc_resources(priv);
+
+ if (ret)
+ return ret;
+
+ ret = alloc_dma_tx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv);
+
+ /* Release the DMA RX socket buffers later
+ * to ensure all pending XDP_TX buffers are returned.
+ */
+ free_dma_rx_desc_resources(priv);
+}
+
+/**
+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ * @priv: driver private structure
+ * Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
+ }
+}
+
+/**
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
+ */
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ stmmac_start_rx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This starts a TX DMA channel
+ */
+static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ stmmac_start_tx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This stops a RX DMA channel
+ */
+static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ stmmac_stop_rx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This stops a TX DMA channel
+ */
+static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ stmmac_stop_tx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_start_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
+ */
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv: driver private structure
+ * Description: it is used for configuring the DMA operation mode register in
+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
+ u32 txmode = 0;
+ u32 rxmode = 0;
+ u32 chan = 0;
+ u8 qmode = 0;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+
+ if (priv->plat->force_thresh_dma_mode) {
+ txmode = tc;
+ rxmode = tc;
+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+ /*
+ * In case of GMAC, SF mode can be enabled
+ * to perform the TX COE in HW. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ txmode = SF_DMA_MODE;
+ rxmode = SF_DMA_MODE;
+ priv->xstats.threshold = SF_DMA_MODE;
+ } else {
+ txmode = tc;
+ rxmode = SF_DMA_MODE;
+ }
+
+ /* configure all channels */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ u32 buf_size;
+
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+
+ if (rx_q->xsk_pool) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ chan);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ chan);
+ }
+ }
+
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
+ }
+}
+
+static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct xsk_buff_pool *pool = tx_q->xsk_pool;
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc = NULL;
+ struct xdp_desc xdp_desc;
+ bool work_done = true;
+
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ budget = min(budget, stmmac_tx_avail(priv, queue));
+
+ while (budget-- > 0) {
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ /* We are sharing with slow path and stop XSK TX desc submission when
+ * available TX ring is less than threshold.
+ */
+ if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
+ !netif_carrier_ok(priv->dev)) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
+
+ /* To return XDP buffer to XSK pool, we simple call
+ * xsk_tx_completed(), so we don't need to fill up
+ * 'buf' and 'xdpf'.
+ */
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->xdpf[entry] = NULL;
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ tx_q->tx_count_frames++;
+
+ if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
+ true, priv->mode, true, true,
+ xdp_desc.len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ entry = tx_q->cur_tx;
+ }
+
+ if (tx_desc) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ xsk_tx_release(pool);
+ }
+
+ /* Return true if all of the 3 conditions are met
+ * a) TX Budget is still available
+ * b) work_done = true when XSK TX desc peek is empty (no more
+ * pending XSK TX for transmission)
+ */
+ return !!budget && work_done;
+}
+
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
+/**
+ * stmmac_tx_clean - to manage the transmission completion
+ * @priv: driver private structure
+ * @budget: napi budget limiting this functions packet handling
+ * @queue: TX queue index
+ * Description: it reclaims the transmit resources after transmission completes.
+ */
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int entry, xmits = 0, count = 0;
+
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ priv->xstats.tx_clean++;
+
+ tx_q->xsk_frames_done = 0;
+
+ entry = tx_q->dirty_tx;
+
+ /* Try to clean all TX complete frame in 1 shot */
+ while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ struct dma_desc *p;
+ int status;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdpf = tx_q->xdpf[entry];
+ skb = NULL;
+ } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ xdpf = NULL;
+ skb = tx_q->tx_skbuff[entry];
+ } else {
+ xdpf = NULL;
+ skb = NULL;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[entry].basic;
+ else
+ p = tx_q->dma_tx + entry;
+
+ status = stmmac_tx_status(priv, &priv->dev->stats,
+ &priv->xstats, p, priv->ioaddr);
+ /* Check if the descriptor is owned by the DMA */
+ if (unlikely(status & tx_dma_own))
+ break;
+
+ count++;
+
+ /* Make sure descriptor fields are read after reading
+ * the own bit.
+ */
+ dma_rmb();
+
+ /* Just consider the last segment and ...*/
+ if (likely(!(status & tx_not_ls))) {
+ /* ... verify the status error condition */
+ if (unlikely(status & tx_err)) {
+ priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
+ } else {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ priv->xstats.txq_stats[queue].tx_pkt_n++;
+ }
+ if (skb)
+ stmmac_get_tx_hwtstamp(priv, p, skb);
+ }
+
+ if (likely(tx_q->tx_skbuff_dma[entry].buf &&
+ tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
+ if (tx_q->tx_skbuff_dma[entry].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
+ DMA_TO_DEVICE);
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->tx_skbuff_dma[entry].len = 0;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ }
+
+ stmmac_clean_desc3(priv, tx_q, p);
+
+ tx_q->tx_skbuff_dma[entry].last_segment = false;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ if (likely(skb)) {
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_consume_skb_any(skb);
+ tx_q->tx_skbuff[entry] = NULL;
+ }
+ }
+
+ stmmac_release_tx_desc(priv, p, priv->mode);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ }
+ tx_q->dirty_tx = entry;
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
+ pkts_compl, bytes_compl);
+
+ if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
+ queue))) &&
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
+
+ netif_dbg(priv, tx_done, priv->dev,
+ "%s: restart transmit\n", __func__);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ if (tx_q->xsk_pool) {
+ bool work_done;
+
+ if (tx_q->xsk_frames_done)
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_q->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_q->xsk_pool);
+
+ /* For XSK TX, we try to send as many as possible.
+ * If XSK work done (XSK TX desc empty and budget still
+ * available), return "budget - 1" to reenable TX IRQ.
+ * Else, return "budget" to make NAPI continue polling.
+ */
+ work_done = stmmac_xdp_xmit_zc(priv, queue,
+ STMMAC_XSK_TX_BUDGET_MAX);
+ if (work_done)
+ xmits = budget - 1;
+ else
+ xmits = budget;
+ }
+
+ if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
+ priv->eee_sw_timer_en) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
+
+ /* We still have pending packets, let's call for a new scheduling */
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ HRTIMER_MODE_REL);
+
+ __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ /* Combine decisions from TX clean and XSK TX */
+ return max(count, xmits);
+}
+
+/**
+ * stmmac_tx_err - to manage the tx error
+ * @priv: driver private structure
+ * @chan: channel index
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of transmission errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
+
+ stmmac_stop_tx_dma(priv, chan);
+ dma_free_tx_skbufs(priv, chan);
+ stmmac_clear_tx_descriptors(priv, chan);
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ tx_q->mss = 0;
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+ stmmac_start_tx_dma(priv, chan);
+
+ priv->dev->stats.tx_errors++;
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
+}
+
+/**
+ * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
+ * @priv: driver private structure
+ * @txmode: TX operating mode
+ * @rxmode: RX operating mode
+ * @chan: channel index
+ * Description: it is used for configuring of the DMA operation mode in
+ * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
+ * mode.
+ */
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan)
+{
+ u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
+}
+
+static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = stmmac_safety_feat_irq_status(priv, priv->dev,
+ priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+ if (ret && (ret != -EINVAL)) {
+ stmmac_global_err(priv);
+ return true;
+ }
+
+ return false;
+}
+
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
+{
+ int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan, dir);
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_channel *ch = &priv->channel[chan];
+ struct napi_struct *rx_napi;
+ struct napi_struct *tx_napi;
+ unsigned long flags;
+
+ rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
+ tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
+ if (napi_schedule_prep(rx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(rx_napi);
+ }
+ }
+
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
+ if (napi_schedule_prep(tx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(tx_napi);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * stmmac_dma_interrupt - DMA ISR
+ * @priv: driver private structure
+ * Description: this is the DMA ISR. It is called by the main ISR.
+ * It calls the dwmac dma routine and schedule poll method in case of some
+ * work can be done.
+ */
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ tx_channel_count : rx_channel_count;
+ u32 chan;
+ int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+ /* Make sure we never check beyond our status buffer. */
+ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+ channels_to_check = ARRAY_SIZE(status);
+
+ for (chan = 0; chan < channels_to_check; chan++)
+ status[chan] = stmmac_napi_check(priv, chan,
+ DMA_DIR_RXTX);
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ stmmac_bump_dma_threshold(priv, chan);
+ } else if (unlikely(status[chan] == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+ }
+}
+
+/**
+ * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
+ * @priv: driver private structure
+ * Description: this masks the MMC irq, in fact, the counters are managed in SW.
+ */
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
+
+ if (priv->dma_cap.rmon) {
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ netdev_info(priv->dev, "No MAC Management Counters available\n");
+}
+
+/**
+ * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+ return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
+}
+
+/**
+ * stmmac_check_ether_addr - check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ stmmac_get_umac_addr(priv, priv->hw, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
+ eth_hw_addr_random(priv->dev);
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
+ }
+}
+
+/**
+ * stmmac_init_dma_engine - DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific MAC/GMAC callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 chan = 0;
+ int atds = 0;
+ int ret = 0;
+
+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
+ dev_err(priv->device, "Invalid DMA configuration\n");
+ return -EINVAL;
+ }
+
+ if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+ atds = 1;
+
+ ret = stmmac_reset(priv, priv->ioaddr);
+ if (ret) {
+ dev_err(priv->device, "Failed to reset the dma\n");
+ return ret;
+ }
+
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+ }
+
+ return ret;
+}
+
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ HRTIMER_MODE_REL);
+}
+
+/**
+ * stmmac_tx_timer - mitigation sw timer for tx.
+ * @t: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the stmmac_tx_clean.
+ */
+static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
+{
+ struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ struct stmmac_channel *ch;
+ struct napi_struct *napi;
+
+ ch = &priv->channel[tx_q->queue_index];
+ napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ if (likely(napi_schedule_prep(napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * stmmac_init_coalesce - init mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
+{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 chan;
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
+ priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ for (chan = 0; chan < rx_channel_count; chan++)
+ priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
+}
+
+static void stmmac_set_rings_length(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
+ /* set TX ring length */
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_set_tx_ring_len(priv, priv->ioaddr,
+ (priv->dma_tx_size - 1), chan);
+
+ /* set RX ring length */
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_set_rx_ring_len(priv, priv->ioaddr,
+ (priv->dma_rx_size - 1), chan);
+}
+
+/**
+ * stmmac_set_tx_queue_weight - Set TX queue weight
+ * @priv: driver private structure
+ * Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 weight;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ weight = priv->plat->tx_queues_cfg[queue].weight;
+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
+ }
+}
+
+/**
+ * stmmac_configure_cbs - Configure CBS in TX queue
+ * @priv: driver private structure
+ * Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 mode_to_use;
+ u32 queue;
+
+ /* queue 0 is reserved for legacy traffic */
+ for (queue = 1; queue < tx_queues_count; queue++) {
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB)
+ continue;
+
+ stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ }
+}
+
+/**
+ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ * @priv: driver private structure
+ * Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 chan;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ chan = priv->plat->rx_queues_cfg[queue].chan;
+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX Queue Priority
+ */
+static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ if (!priv->plat->rx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->rx_queues_cfg[queue].prio;
+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the TX Queue Priority
+ */
+static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ if (!priv->plat->tx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->tx_queues_cfg[queue].prio;
+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX queue routing
+ */
+static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u8 packet;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ /* no specific packet type routing specified for the queue */
+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
+ continue;
+
+ packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
+ }
+}
+
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
+/**
+ * stmmac_mtl_configuration - Configure MTL
+ * @priv: driver private structure
+ * Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+ if (tx_queues_count > 1)
+ stmmac_set_tx_queue_weight(priv);
+
+ /* Configure MTL RX algorithms */
+ if (rx_queues_count > 1)
+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
+ priv->plat->rx_sched_algorithm);
+
+ /* Configure MTL TX algorithms */
+ if (tx_queues_count > 1)
+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
+ priv->plat->tx_sched_algorithm);
+
+ /* Configure CBS in AVB TX queues */
+ if (tx_queues_count > 1)
+ stmmac_configure_cbs(priv);
+
+ /* Map RX MTL to DMA channels */
+ stmmac_rx_queue_dma_chan_map(priv);
+
+ /* Enable MAC RX Queues */
+ stmmac_mac_enable_rx_queues(priv);
+
+ /* Set RX priorities */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rx_queues_prio(priv);
+
+ /* Set TX priorities */
+ if (tx_queues_count > 1)
+ stmmac_mac_config_tx_queues_prio(priv);
+
+ /* Set RX routing */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
+}
+
+static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
+{
+ if (priv->dma_cap.asp) {
+ netdev_info(priv->dev, "Enabling Safety Features\n");
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
+ priv->plat->safety_feat_cfg);
+ } else {
+ netdev_info(priv->dev, "No Safety Features support found\n");
+ }
+}
+
+static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
+{
+ char *name;
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ name = priv->wq_name;
+ sprintf(name, "%s-fpe", priv->dev->name);
+
+ priv->fpe_wq = create_singlethread_workqueue(name);
+ if (!priv->fpe_wq) {
+ netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
+
+ return -ENOMEM;
+ }
+ netdev_info(priv->dev, "FPE workqueue start");
+
+ return 0;
+}
+
+/**
+ * stmmac_hw_setup - setup mac in a usable state.
+ * @dev : pointer to the device structure.
+ * @init_ptp: initialize PTP if set
+ * Description:
+ * this is the main function to setup the HW in a usable state because the
+ * dma engine is reset, the core registers are configured (e.g. AXI,
+ * Checksum features, timers). The DMA is ready to start receiving and
+ * transmitting.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Copy the MAC addr into the HW */
+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+
+ /* PS and related bits will be programmed according to the speed */
+ if (priv->hw->pcs) {
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if ((speed == SPEED_10) || (speed == SPEED_100) ||
+ (speed == SPEED_1000)) {
+ priv->hw->ps = speed;
+ } else {
+ dev_warn(priv->device, "invalid port speed\n");
+ priv->hw->ps = 0;
+ }
+ }
+
+ /* Initialize the MAC Core */
+ stmmac_core_init(priv, priv->hw, dev);
+
+ /* Initialize MTL*/
+ stmmac_mtl_configuration(priv);
+
+ /* Initialize Safety Features */
+ stmmac_safety_feat_configuration(priv);
+
+ ret = stmmac_rx_ipc(priv, priv->hw);
+ if (!ret) {
+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ priv->hw->rx_csum = 0;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ stmmac_mmc_setup(priv);
+
+ if (init_ptp) {
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ }
+
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
+
+ /* Convert the timer from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
+ if (priv->use_riwt) {
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ if (!priv->rx_riwt[queue])
+ priv->rx_riwt[queue] = DEF_DMA_RIWT;
+
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ priv->rx_riwt[queue], queue);
+ }
+ }
+
+ if (priv->hw->pcs)
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+
+ /* set TX and RX rings length */
+ stmmac_set_rings_length(priv);
+
+ /* Enable TSO */
+ if (priv->tso) {
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ /* TSO and TBS cannot co-exist */
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ continue;
+
+ stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
+ }
+ }
+
+ /* Enable Split Header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
+ /* TBS */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
+
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ }
+
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
+
+ /* Start the ball rolling... */
+ stmmac_start_all_dma(priv);
+
+ if (priv->dma_cap.fpesel) {
+ stmmac_fpe_start_wq(priv);
+
+ if (priv->plat->fpe_cfg->enable)
+ stmmac_fpe_handshake(priv, true);
+ }
+
+ return 0;
+}
+
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
+static void stmmac_free_irq(struct net_device *dev,
+ enum request_irq_err irq_err, int irq_idx)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int j;
+
+ switch (irq_err) {
+ case REQ_IRQ_ERR_ALL:
+ irq_idx = priv->plat->tx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_TX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->tx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->tx_irq[j], NULL);
+ free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ }
+ }
+ irq_idx = priv->plat->rx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_RX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->rx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->rx_irq[j], NULL);
+ free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ }
+ }
+
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
+ free_irq(priv->sfty_ue_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_UE:
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
+ free_irq(priv->sfty_ce_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_CE:
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
+ free_irq(priv->lpi_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_LPI:
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_WOL:
+ free_irq(dev->irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_MAC:
+ case REQ_IRQ_ERR_NO:
+ /* If MAC IRQ request error, no more IRQ to free */
+ break;
+ }
+}
+
+static int stmmac_request_irq_multi_msi(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
+ cpumask_t cpu_mask;
+ int irq_idx = 0;
+ char *int_name;
+ int ret;
+ int i;
+
+ /* For common interrupt */
+ int_name = priv->int_name_mac;
+ sprintf(int_name, "%s:%s", dev->name, "mac");
+ ret = request_irq(dev->irq, stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc mac MSI %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ int_name = priv->int_name_wol;
+ sprintf(int_name, "%s:%s", dev->name, "wol");
+ ret = request_irq(priv->wol_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc wol MSI %d (error: %d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the LPI IRQ in case of another line
+ * is used for LPI
+ */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ int_name = priv->int_name_lpi;
+ sprintf(int_name, "%s:%s", dev->name, "lpi");
+ ret = request_irq(priv->lpi_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc lpi MSI %d (error: %d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Correctible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ce;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ce");
+ ret = request_irq(priv->sfty_ce_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ce MSI %d (error: %d)\n",
+ __func__, priv->sfty_ce_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_CE;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Uncorrectible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ue;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ue");
+ ret = request_irq(priv->sfty_ue_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ue MSI %d (error: %d)\n",
+ __func__, priv->sfty_ue_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_UE;
+ goto irq_error;
+ }
+ }
+
+ /* Request Rx MSI irq */
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (i >= MTL_MAX_RX_QUEUES)
+ break;
+ if (priv->rx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_rx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
+ ret = request_irq(priv->rx_irq[i],
+ stmmac_msi_intr_rx,
+ 0, int_name, &priv->rx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc rx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->rx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_RX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ }
+
+ /* Request Tx MSI irq */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (i >= MTL_MAX_TX_QUEUES)
+ break;
+ if (priv->tx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_tx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
+ ret = request_irq(priv->tx_irq[i],
+ stmmac_msi_intr_tx,
+ 0, int_name, &priv->tx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc tx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->tx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_TX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, irq_idx);
+ return ret;
+}
+
+static int stmmac_request_irq_single(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
+ int ret;
+
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ ret = request_irq(priv->wol_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the IRQ lines */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, 0);
+ return ret;
+}
+
+static int stmmac_request_irq(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ /* Request the IRQ lines */
+ if (priv->plat->multi_msi_en)
+ ret = stmmac_request_irq_multi_msi(dev);
+ else
+ ret = stmmac_request_irq_single(dev);
+
+ return ret;
+}
+
+/**
+ * stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
+ int bfsize = 0;
+ u32 chan;
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
+ (!priv->hw->xpcs ||
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
+ ret = stmmac_init_phy(dev);
+ if (ret) {
+ netdev_err(priv->dev,
+ "%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto init_phy_error;
+ }
+ }
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
+ priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+
+ if (!priv->dma_tx_size)
+ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!priv->dma_rx_size)
+ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ ret = stmmac_hw_setup(dev, true);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ goto init_error;
+ }
+
+ stmmac_init_coalesce(priv);
+
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ stmmac_enable_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
+
+ return 0;
+
+irq_error:
+ phylink_stop(priv->phylink);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ phylink_disconnect_phy(priv->phylink);
+init_phy_error:
+ pm_runtime_put(priv->device);
+ return ret;
+}
+
+static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+{
+ set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ if (priv->fpe_wq)
+ destroy_workqueue(priv->fpe_wq);
+
+ netdev_info(priv->dev, "FPE workqueue stop");
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ netif_tx_disable(dev);
+
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ /* Stop and disconnect the PHY */
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ netif_carrier_off(dev);
+
+ stmmac_release_ptp(priv);
+
+ pm_runtime_put(priv->device);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_stop_wq(priv);
+
+ return 0;
+}
+
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ p = &tx_q->dma_tx[tx_q->cur_tx];
+
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ return true;
+}
+
+/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segment: condition for the last descriptor
+ * @queue: TX queue index
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
+ int total_len, bool last_segment, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct dma_desc *desc;
+ u32 buff_size;
+ int tmp_len;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ dma_addr_t curr_addr;
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ curr_addr = des + (total_len - tmp_len);
+ if (priv->dma_cap.addr64 <= 32)
+ desc->des0 = cpu_to_le32(curr_addr);
+ else
+ stmmac_set_desc_addr(priv, desc, curr_addr);
+
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int desc_size;
+
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ wmb();
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ u32 queue = skb_get_queue_mapping(skb);
+ unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
+ u32 pay_len, mss;
+ dma_addr_t des;
+ int i;
+
+ tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
+
+ /* Compute header lengths */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ hdr = sizeof(struct udphdr);
+ } else {
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr = tcp_hdrlen(skb);
+ }
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv, queue) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != tx_q->mss) {
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ stmmac_set_mss(priv, mss_desc, mss);
+ tx_q->mss = mss;
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, hdr, proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
+ first_entry = tx_q->cur_tx;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[first_entry].basic;
+ else
+ desc = &tx_q->dma_tx[first_entry];
+ first = desc;
+
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ if (priv->dma_cap.addr64 <= 32) {
+ first->des0 = cpu_to_le32(des);
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+ des += proto_hdr_len;
+ pay_len = 0;
+ }
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1), queue);
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+ }
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
+
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* Manage tx mitigation */
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames[queue])
+ set_ic = true;
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ stmmac_prepare_tso_tx_desc(priv, first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc) {
+ /* Make sure that first descriptor has been completely
+ * written, including its own bit. This is because MSS is
+ * actually before first descriptor, so we need to make
+ * sure that MSS's own bit is the last thing written.
+ */
+ dma_wmb();
+ stmmac_set_tx_owner(priv, mss_desc);
+ }
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ tx_q->cur_tx, first, nfrags);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
+ * stmmac_xmit - Tx entry point of the driver
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int first_entry, tx_packets, enh_desc;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
+ int i, csum_insertion = 0, is_jumbo = 0;
+ u32 queue = skb_get_queue_mapping(skb);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int gso = skb_shinfo(skb)->gso_type;
+ struct dma_edesc *tbs_desc = NULL;
+ struct dma_desc *desc, *first;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
+ dma_addr_t des;
+
+ tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
+
+ if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
+ stmmac_disable_eee_mode(priv);
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+ if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ return stmmac_tso_xmit(skb, dev);
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
+ entry = tx_q->cur_tx;
+ first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = tx_q->dma_tx + entry;
+
+ first = desc;
+
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
+ enh_desc = priv->plat->enh_desc;
+ /* To program the descriptors according to the size of the frame */
+ if (enh_desc)
+ is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
+
+ if (unlikely(is_jumbo)) {
+ entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
+ if (unlikely(entry < 0) && (entry != -EINVAL))
+ goto dma_map_err;
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+ bool last_segment = (i == (nfrags - 1));
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[entry]);
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = tx_q->dma_tx + entry;
+
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err; /* should reuse desc w/o issues */
+
+ tx_q->tx_skbuff_dma[entry].buf = des;
+
+ stmmac_set_desc_addr(priv, desc, des);
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = true;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* Prepare the descriptor and set the own bit too */
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
+ priv->mode, 1, last_segment, skb->len);
+ }
+
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[entry] = skb;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ tx_packets = (entry + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames[queue])
+ set_ic = true;
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (likely(priv->extend_desc))
+ desc = &tx_q->dma_etx[entry].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = &tx_q->dma_tx[entry];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ if (netif_msg_pktdata(priv)) {
+ netdev_dbg(priv->dev,
+ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ entry, first, nfrags);
+
+ netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
+ skb_tx_timestamp(skb);
+
+ /* Ready to fill the first descriptor and set the OWN bit w/o any
+ * problems because all the descriptors are actually ready to be
+ * passed to the DMA engine.
+ */
+ if (likely(!is_jumbo)) {
+ bool last_segment = (nfrags == 0);
+
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+
+ stmmac_set_desc_addr(priv, first, des);
+
+ tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first);
+ }
+
+ /* Prepare the first descriptor setting the OWN bit too */
+ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
+ csum_insertion, priv->mode, 0, last_segment,
+ skb->len);
+ }
+
+ if (tx_q->tbs & STMMAC_TBS_EN) {
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
+
+ tbs_desc = &tx_q->dma_entx[first_entry];
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
+ }
+
+ stmmac_set_tx_owner(priv, first);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ netdev_err(priv->dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vlan_ethhdr *veth;
+ __be16 vlan_proto;
+ u16 vlanid;
+
+ veth = (struct vlan_ethhdr *)skb->data;
+ vlan_proto = veth->h_vlan_proto;
+
+ if ((vlan_proto == htons(ETH_P_8021Q) &&
+ dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
+ (vlan_proto == htons(ETH_P_8021AD) &&
+ dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ /* pop the vlan tag */
+ vlanid = ntohs(veth->h_vlan_TCI);
+ memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
+ }
+}
+
+/**
+ * stmmac_rx_refill - refill used skb preallocated buffers
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
+
+ while (dirty-- > 0) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ struct dma_desc *p;
+ bool use_rx_wd;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ if (!buf->page) {
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->page)
+ break;
+ }
+
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ }
+
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ if (priv->sph)
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ else
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ stmmac_refill_desc3(priv, rx_q, p);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ }
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+}
+
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ unsigned int plen = 0, hlen = 0;
+ int coe = priv->hw->rx_csum;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
+static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc;
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
+ return STMMAC_XDP_CONSUMED;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ if (dma_map) {
+ dma_addr = dma_map_single(priv->device, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr))
+ return STMMAC_XDP_CONSUMED;
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
+ xdpf->headroom;
+ dma_sync_single_for_device(priv->device, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
+ }
+
+ tx_q->tx_skbuff_dma[entry].buf = dma_addr;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdpf->len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ tx_q->xdpf[entry] = xdpf;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
+ true, priv->mode, true, true,
+ xdpf->len);
+
+ tx_q->tx_count_frames++;
+
+ if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ return STMMAC_XDP_TX;
+}
+
+static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
+ int cpu)
+{
+ int index = cpu;
+
+ if (unlikely(index < 0))
+ index = 0;
+
+ while (index >= priv->plat->tx_queues_to_use)
+ index -= priv->plat->tx_queues_to_use;
+
+ return index;
+}
+
+static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int queue;
+ int res;
+
+ if (unlikely(!xdpf))
+ return STMMAC_XDP_CONSUMED;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
+ if (res == STMMAC_XDP_TX)
+ stmmac_flush_tx_descriptors(priv, queue);
+
+ __netif_tx_unlock(nq);
+
+ return res;
+}
+
+static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ u32 act;
+ int res;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ res = STMMAC_XDP_PASS;
+ break;
+ case XDP_TX:
+ res = stmmac_xdp_xmit_back(priv, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
+ res = STMMAC_XDP_CONSUMED;
+ else
+ res = STMMAC_XDP_REDIRECT;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ res = STMMAC_XDP_CONSUMED;
+ break;
+ }
+
+ return res;
+}
+
+static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ res = STMMAC_XDP_PASS;
+ goto out;
+ }
+
+ res = __stmmac_xdp_run_prog(priv, prog, xdp);
+out:
+ return ERR_PTR(-res);
+}
+
+static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
+ int xdp_status)
+{
+ int cpu = smp_processor_id();
+ int queue;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+
+ if (xdp_status & STMMAC_XDP_TX)
+ stmmac_tx_timer_arm(priv, queue);
+
+ if (xdp_status & STMMAC_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ return skb;
+}
+
+static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ struct dma_desc *p, struct dma_desc *np,
+ struct xdp_buff *xdp)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int len = xdp->data_end - xdp->data;
+ enum pkt_hash_types hash_type;
+ int coe = priv->hw->rx_csum;
+ struct sk_buff *skb;
+ u32 hash;
+
+ skb = stmmac_construct_skb_zc(ch, xdp);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ return;
+ }
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rxtx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+}
+
+static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int entry = rx_q->dirty_rx;
+ struct dma_desc *rx_desc = NULL;
+ bool ret = true;
+
+ budget = min(budget, stmmac_rx_dirty(priv, queue));
+
+ while (budget-- > 0 && entry != rx_q->cur_rx) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ dma_addr_t dma_addr;
+ bool use_rx_wd;
+
+ if (!buf->xdp) {
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp) {
+ ret = false;
+ break;
+ }
+ }
+
+ if (priv->extend_desc)
+ rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ rx_desc = rx_q->dma_rx + entry;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, rx_desc, dma_addr);
+ stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
+ stmmac_refill_desc3(priv, rx_q, rx_desc);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ }
+
+ if (rx_desc) {
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ }
+
+ return ret;
+}
+
+static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int next_entry = rx_q->cur_rx;
+ unsigned int desc_size;
+ struct bpf_prog *prog;
+ bool failure = false;
+ int xdp_status = 0;
+ int status = 0;
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ struct stmmac_rx_buffer *buf;
+ unsigned int buf1_len = 0;
+ struct dma_desc *np, *p;
+ int entry;
+ int res;
+
+ if (!count && rx_q->state_saved) {
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ error = 0;
+ len = 0;
+ }
+
+ if (count >= limit)
+ break;
+
+read_again:
+ buf1_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (dirty >= STMMAC_RX_FILL_BATCH) {
+ failure = failure ||
+ !stmmac_rx_refill_zc(priv, queue, dirty);
+ dirty = 0;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ /* Prefetch the next RX descriptor */
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ /* Ensure a valid XSK buffer before proceed */
+ if (!buf->xdp)
+ break;
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats,
+ rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ count++;
+ continue;
+ }
+
+ /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
+ if (likely(status & rx_not_ls)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ goto read_again;
+ }
+
+ /* XDP ZC Frame only support primary buffers for now */
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (likely(!(status & rx_not_ls)) &&
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ /* RX buffer is good and fit into a XSK pool buffer */
+ buf->xdp->data_end = buf->xdp->data + buf1_len;
+ xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+
+ prog = READ_ONCE(priv->xdp_prog);
+ res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
+
+ switch (res) {
+ case STMMAC_XDP_PASS:
+ stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
+ xsk_buff_free(buf->xdp);
+ break;
+ case STMMAC_XDP_CONSUMED:
+ xsk_buff_free(buf->xdp);
+ priv->dev->stats.rx_dropped++;
+ break;
+ case STMMAC_XDP_TX:
+ case STMMAC_XDP_REDIRECT:
+ xdp_status |= res;
+ break;
+ }
+
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
+ if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
+ if (failure || stmmac_rx_dirty(priv, queue) > 0)
+ xsk_set_rx_need_wakeup(rx_q->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
+
+ return (int)count;
+ }
+
+ return failure ? limit : (int)count;
+}
+
+/**
+ * stmmac_rx - manage the receive process
+ * @priv: driver private structure
+ * @limit: napi bugget
+ * @queue: RX queue index.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
+ unsigned int next_entry = rx_q->cur_rx;
+ enum dma_data_direction dma_dir;
+ unsigned int desc_size;
+ struct sk_buff *skb = NULL;
+ struct xdp_buff xdp;
+ int xdp_status = 0;
+ int buf_sz;
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ unsigned int buf1_len = 0, buf2_len = 0;
+ enum pkt_hash_types hash_type;
+ struct stmmac_rx_buffer *buf;
+ struct dma_desc *np, *p;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+ if (count >= limit)
+ break;
+
+read_again:
+ buf1_len = 0;
+ buf2_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats, rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ count++;
+ continue;
+ }
+
+ /* Buffer is good. Go on. */
+
+ prefetch(page_address(buf->page) + buf->page_offset);
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (likely(!(status & rx_not_ls)) &&
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))) {
+ if (buf2_len) {
+ buf2_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ } else if (buf1_len) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+ }
+
+ if (!skb) {
+ unsigned int pre_len, sync_len;
+
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+
+ xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, false);
+
+ pre_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ skb = stmmac_xdp_run_prog(priv, &xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device
+ * cover max len CPU touch
+ */
+ sync_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ sync_len = max(sync_len, pre_len);
+
+ /* For Not XDP_PASS verdict */
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & STMMAC_XDP_CONSUMED) {
+ page_pool_put_page(rx_q->page_pool,
+ virt_to_head_page(xdp.data),
+ sync_len, true);
+ buf->page = NULL;
+ priv->dev->stats.rx_dropped++;
+
+ /* Clear skb as it was set as
+ * status by XDP program.
+ */
+ skb = NULL;
+
+ if (unlikely((status & rx_not_ls)))
+ goto read_again;
+
+ count++;
+ continue;
+ } else if (xdp_res & (STMMAC_XDP_TX |
+ STMMAC_XDP_REDIRECT)) {
+ xdp_status |= xdp_res;
+ buf->page = NULL;
+ skb = NULL;
+ count++;
+ continue;
+ }
+ }
+ }
+
+ if (!skb) {
+ /* XDP program may expand or reduce tail */
+ buf1_len = xdp.data_end - xdp.data;
+
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ count++;
+ goto drain_data;
+ }
+
+ /* XDP program may adjust header */
+ skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+ skb_put(skb, buf1_len);
+
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else if (buf1_len) {
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, buf->page_offset, buf1_len,
+ priv->dma_buf_sz);
+
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
+
+ if (buf2_len) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ buf2_len, dma_dir);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, buf2_len,
+ priv->dma_buf_sz);
+
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
+ }
+
+drain_data:
+ if (likely(status & rx_not_ls))
+ goto read_again;
+ if (!skb)
+ continue;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ count++;
+ }
+
+ if (status & rx_not_ls || skb) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ stmmac_rx_refill(priv, queue);
+
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
+ return count;
+}
+
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ u32 chan = ch->index;
+ int work_done;
+
+ priv->xstats.napi_poll++;
+
+ work_done = stmmac_rx(priv, budget, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, tx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ u32 chan = ch->index;
+ int work_done;
+
+ priv->xstats.napi_poll++;
+
+ work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = min(work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rxtx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int rx_done, tx_done, rxtx_done;
+ u32 chan = ch->index;
+
+ priv->xstats.napi_poll++;
+
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = min(tx_done, budget);
+
+ rx_done = stmmac_rx_zc(priv, budget, chan);
+
+ rxtx_done = max(tx_done, rx_done);
+
+ /* If either TX or RX work is not complete, return budget
+ * and keep pooling
+ */
+ if (rxtx_done >= budget)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (napi_complete_done(napi, rxtx_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ /* Both RX and TX work done are compelte,
+ * so enable both RX & TX IRQs.
+ */
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return min(rxtx_done, budget - 1);
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * @txqueue: the index of the hanging transmit queue
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable time. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ stmmac_global_err(priv);
+}
+
+/**
+ * stmmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_set_rx_mode(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ stmmac_set_filter(priv, priv->hw, dev);
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
+
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ txfifosz /= priv->plat->tx_queues_to_use;
+
+ if (netif_running(dev)) {
+ netdev_err(priv->dev, "must be stopped to change its MTU\n");
+ return -EBUSY;
+ }
+
+ if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
+ netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
+ return -EINVAL;
+ }
+
+ new_mtu = STMMAC_ALIGN(new_mtu);
+
+ /* If condition true, FIFO is too small or MTU too large */
+ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
+ features &= ~NETIF_F_RXCSUM;
+
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_CSUM_MASK;
+
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertion in the TDES and not use SF.
+ */
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_CSUM_MASK;
+
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
+ return features;
+}
+
+static int stmmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ /* Keep the COE Type in case of csum is supporting */
+ if (features & NETIF_F_RXCSUM)
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ else
+ priv->hw->rx_csum = 0;
+ /* No check needed because rx_coe has been set before and it will be
+ * fixed in case of issue.
+ */
+ stmmac_rx_ipc(priv, priv->hw);
+
+ if (priv->sph_cap) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ u32 chan;
+
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ return 0;
+}
+
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
+ return;
+
+ /* If LP has sent verify mPacket, LP is FPE capable */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
+ if (*lp_state < FPE_STATE_CAPABLE)
+ *lp_state = FPE_STATE_CAPABLE;
+
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_RESPONSE);
+ }
+
+ /* If Local has sent verify mPacket, Local is FPE capable */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
+ if (*lo_state < FPE_STATE_CAPABLE)
+ *lo_state = FPE_STATE_CAPABLE;
+ }
+
+ /* If LP has sent response mPacket, LP is entering FPE ON */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ *lp_state = FPE_STATE_ENTERING_ON;
+
+ /* If Local has sent response mPacket, Local is entering FPE ON */
+ if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
+ *lo_state = FPE_STATE_ENTERING_ON;
+
+ if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
+ !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
+ priv->fpe_wq) {
+ queue_work(priv->fpe_wq, &priv->fpe_task);
+ }
+}
+
+static void stmmac_common_interrupt(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queues_count;
+ u32 queue;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
+
+ if (priv->irq_wake)
+ pm_wakeup_event(priv->device, 0);
+
+ if (priv->dma_cap.estsel)
+ stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ &priv->xstats, tx_cnt);
+
+ if (priv->dma_cap.fpesel) {
+ int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
+ priv->dev);
+
+ stmmac_fpe_event_status(priv, status);
+ }
+
+ /* To handle GMAC own interrupts */
+ if ((priv->plat->has_gmac) || xmac) {
+ int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+
+ if (unlikely(status)) {
+ /* For LPI we need to save the tx status */
+ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+ priv->tx_path_in_lpi_mode = true;
+ if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+ priv->tx_path_in_lpi_mode = false;
+ }
+
+ for (queue = 0; queue < queues_count; queue++) {
+ status = stmmac_host_mtl_irq_status(priv, priv->hw,
+ queue);
+ }
+
+ /* PCS link status */
+ if (priv->hw->pcs) {
+ if (priv->xstats.pcs_link)
+ netif_carrier_on(priv->dev);
+ else
+ netif_carrier_off(priv->dev);
+ }
+
+ stmmac_timestamp_interrupt(priv, priv);
+ }
+}
+
+/**
+ * stmmac_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It can call:
+ * o DMA service routine (to manage incoming frame reception and transmission
+ * status)
+ * o Core interrupts to manage: remote wake-up, management counter, LPI
+ * interrupts.
+ */
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ if (stmmac_safety_feat_interrupt(priv))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ /* To handle DMA interrupts */
+ stmmac_dma_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ stmmac_safety_feat_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ int chan = tx_q->queue_index;
+ struct stmmac_priv *priv;
+ int status;
+
+ priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
+
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ stmmac_bump_dma_threshold(priv, chan);
+ } else if (unlikely(status == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+{
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ int chan = rx_q->queue_index;
+ struct stmmac_priv *priv;
+
+ priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ stmmac_napi_check(priv, chan, DMA_DIR_RX);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* If adapter is down, do nothing */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ if (priv->plat->multi_msi_en) {
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++)
+ stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ } else {
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+ }
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv (dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = stmmac_hwtstamp_set(dev, rq);
+ break;
+ case SIOCGHWTSTAMP:
+ ret = stmmac_hwtstamp_get(dev, rq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct stmmac_priv *priv = cb_priv;
+ int ret = -EOPNOTSUPP;
+
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ return ret;
+
+ __stmmac_disable_all_queues(priv);
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+ break;
+ case TC_SETUP_CLSFLOWER:
+ ret = stmmac_tc_setup_cls(priv, priv, type_data);
+ break;
+ default:
+ break;
+ }
+
+ stmmac_enable_all_queues(priv);
+ return ret;
+}
+
+static LIST_HEAD(stmmac_block_cb_list);
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &stmmac_block_cb_list,
+ stmmac_setup_tc_block_cb,
+ priv, priv, true);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ int gso = skb_shinfo(skb)->gso_type;
+
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
+ /*
+ * There is no way to determine the number of TSO/USO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO/USO is supported then at least this
+ * one will be capable.
+ */
+ return 0;
+ }
+
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ goto set_mac_error;
+
+ stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+
+set_mac_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+
+static void sysfs_display_ring(void *head, int size, int extend_desc,
+ struct seq_file *seq, dma_addr_t dma_phy_addr)
+{
+ int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < size; i++) {
+ if (extend_desc) {
+ dma_addr = dma_phy_addr + i * sizeof(*ep);
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(ep->basic.des0),
+ le32_to_cpu(ep->basic.des1),
+ le32_to_cpu(ep->basic.des2),
+ le32_to_cpu(ep->basic.des3));
+ ep++;
+ } else {
+ dma_addr = dma_phy_addr + i * sizeof(*p);
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ p++;
+ }
+ seq_printf(seq, "\n");
+ }
+}
+
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ if ((dev->flags & IFF_UP) == 0)
+ return 0;
+
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ seq_printf(seq, "RX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_erx,
+ priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_rx,
+ priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ }
+ }
+
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ seq_printf(seq, "TX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_etx,
+ priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_tx,
+ priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
+
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->hw_cap_support) {
+ seq_printf(seq, "DMA HW features not supported\n");
+ return 0;
+ }
+
+ seq_printf(seq, "==============================\n");
+ seq_printf(seq, "\tDMA HW features\n");
+ seq_printf(seq, "==============================\n");
+
+ seq_printf(seq, "\t10/100 Mbps: %s\n",
+ (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+ seq_printf(seq, "\t1000 Mbps: %s\n",
+ (priv->dma_cap.mbps_1000) ? "Y" : "N");
+ seq_printf(seq, "\tHalf duplex: %s\n",
+ (priv->dma_cap.half_duplex) ? "Y" : "N");
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
+ (priv->dma_cap.pcs) ? "Y" : "N");
+ seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+ (priv->dma_cap.sma_mdio) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Remote wake up: %s\n",
+ (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Magic Frame: %s\n",
+ (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+ seq_printf(seq, "\tRMON module: %s\n",
+ (priv->dma_cap.rmon) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+ (priv->dma_cap.time_stamp) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
+ (priv->dma_cap.atime_stamp) ? "Y" : "N");
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
+ (priv->dma_cap.eee) ? "Y" : "N");
+ seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+ seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+ (priv->dma_cap.tx_coe) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+ priv->dma_cap.number_rx_channel);
+ seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+ priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
+ priv->dma_cap.number_rx_queues);
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
+ priv->dma_cap.number_tx_queues);
+ seq_printf(seq, "\tEnhanced descriptors: %s\n",
+ (priv->dma_cap.enh_desc) ? "Y" : "N");
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
+ priv->dma_cap.pps_out_num);
+ seq_printf(seq, "\tSafety Features: %s\n",
+ priv->dma_cap.asp ? "Y" : "N");
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ priv->dma_cap.frpsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
+ priv->dma_cap.addr64);
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ priv->dma_cap.rssen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+ priv->dma_cap.vlhash ? "Y" : "N");
+ seq_printf(seq, "\tSplit Header: %s\n",
+ priv->dma_cap.sphen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
+ priv->dma_cap.vlins ? "Y" : "N");
+ seq_printf(seq, "\tDouble VLAN: %s\n",
+ priv->dma_cap.dvlan ? "Y" : "N");
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
+ priv->dma_cap.l3l4fnum);
+ seq_printf(seq, "\tARP Offloading: %s\n",
+ priv->dma_cap.arpoffsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
+ priv->dma_cap.estsel ? "Y" : "N");
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
+ priv->dma_cap.fpesel ? "Y" : "N");
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
+ priv->dma_cap.tbssel ? "Y" : "N");
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
+
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (dev->netdev_ops != &stmmac_netdev_ops)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (priv->dbgfs_dir)
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+ priv->dbgfs_dir,
+ stmmac_fs_dir,
+ dev->name);
+ break;
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+ .notifier_call = stmmac_device_event,
+};
+
+static void stmmac_init_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ rtnl_lock();
+
+ /* Create per netdev entries */
+ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
+
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
+
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+
+ rtnl_unlock();
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
+ }
+
+ return crc;
+}
+
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ __le16 pmatch = 0;
+ int count = 0;
+ u16 vid = 0;
+
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
+ count++;
+ }
+
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ pmatch = cpu_to_le16(vid);
+ hash = 0;
+ }
+
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
+
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ return ret;
+ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ goto del_vlan_error;
+ }
+
+ ret = stmmac_vlan_update(priv, is_double);
+
+del_vlan_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int i, nxmit = 0;
+ int queue;
+
+ if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ for (i = 0; i < num_frames; i++) {
+ int res;
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
+ if (res == STMMAC_XDP_CONSUMED)
+ break;
+
+ nxmit++;
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_rx_dma(priv, queue);
+ __free_dma_rx_desc_resources(priv, queue);
+}
+
+void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ u32 buf_size;
+ int ret;
+
+ ret = __alloc_dma_rx_desc_resources(priv, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc RX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ if (ret) {
+ __free_dma_rx_desc_resources(priv, queue);
+ netdev_err(priv->dev, "Failed to init RX desc.\n");
+ return;
+ }
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, rx_q->queue_index);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, rx_q->queue_index);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_start_rx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_tx_dma(priv, queue);
+ __free_dma_tx_desc_resources(priv, queue);
+}
+
+void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ int ret;
+
+ ret = __alloc_dma_tx_desc_resources(priv, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc TX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_tx_desc_rings(priv, queue);
+ if (ret) {
+ __free_dma_tx_desc_resources(priv, queue);
+ netdev_err(priv->dev, "Failed to init TX desc.\n");
+ return;
+ }
+
+ stmmac_clear_tx_descriptors(priv, queue);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, tx_q->queue_index);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, tx_q->queue_index);
+
+ stmmac_start_tx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ return ret;
+}
+
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ struct stmmac_channel *ch;
+
+ if (test_bit(STMMAC_DOWN, &priv->state) ||
+ !netif_carrier_ok(priv->dev))
+ return -ENETDOWN;
+
+ if (!stmmac_xdp_is_enabled(priv))
+ return -ENXIO;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ rx_q = &priv->rx_queue[queue];
+ tx_q = &priv->tx_queue[queue];
+ ch = &priv->channel[queue];
+
+ if (!rx_q->xsk_pool && !tx_q->xsk_pool)
+ return -ENXIO;
+
+ if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
+ /* EQoS does not have per-DMA channel SW interrupt,
+ * so we schedule RX Napi straight-away.
+ */
+ if (likely(napi_schedule_prep(&ch->rxtx_napi)))
+ __napi_schedule(&ch->rxtx_napi);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
+ .ndo_set_features = stmmac_set_features,
+ .ndo_set_rx_mode = stmmac_set_rx_mode,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_eth_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
+ .ndo_select_queue = stmmac_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
+ .ndo_bpf = stmmac_bpf,
+ .ndo_xdp_xmit = stmmac_xdp_xmit,
+ .ndo_xsk_wakeup = stmmac_xsk_wakeup,
+};
+
+static void stmmac_reset_subtask(struct stmmac_priv *priv)
+{
+ if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
+ return;
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ netdev_err(priv->dev, "Reset adapter.\n");
+
+ rtnl_lock();
+ netif_trans_update(priv->dev);
+ while (test_and_set_bit(STMMAC_RESETING, &priv->state))
+ usleep_range(1000, 2000);
+
+ set_bit(STMMAC_DOWN, &priv->state);
+ dev_close(priv->dev);
+ dev_open(priv->dev, NULL);
+ clear_bit(STMMAC_DOWN, &priv->state);
+ clear_bit(STMMAC_RESETING, &priv->state);
+ rtnl_unlock();
+}
+
+static void stmmac_service_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ service_task);
+
+ stmmac_reset_subtask(priv);
+ clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
+}
+
+/**
+ * stmmac_hw_init - Init the MAC device
+ * @priv: driver private structure
+ * Description: this function is to configure the MAC device according to
+ * some platform parameters or the HW capability register. It prepares the
+ * driver to use either ring or chain modes and to setup either enhanced or
+ * normal descriptors.
+ */
+static int stmmac_hw_init(struct stmmac_priv *priv)
+{
+ int ret;
+
+ /* dwmac-sun8i only work in chain mode */
+ if (priv->plat->has_sun8i)
+ chain_mode = 1;
+ priv->chain_mode = chain_mode;
+
+ /* Initialize HW Interface */
+ ret = stmmac_hwif_init(priv);
+ if (ret)
+ return ret;
+
+ /* Get the HW capability (new GMAC newer than 3.50a) */
+ priv->hw_cap_support = stmmac_get_hw_features(priv);
+ if (priv->hw_cap_support) {
+ dev_info(priv->device, "DMA HW capability register supported\n");
+
+ /* We can override some gmac/dma configuration fields: e.g.
+ * enh_desc, tx_coe (e.g. that are passed through the
+ * platform) with the values from the HW capability
+ * register (if supported).
+ */
+ priv->plat->enh_desc = priv->dma_cap.enh_desc;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
+ !priv->plat->use_phy_wol;
+ priv->hw->pmt = priv->plat->pmt;
+ if (priv->dma_cap.hash_tb_sz) {
+ priv->hw->multicast_filter_bins =
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
+ priv->hw->mcast_bits_log2 =
+ ilog2(priv->hw->multicast_filter_bins);
+ }
+
+ /* TXCOE doesn't work in thresh DMA mode */
+ if (priv->plat->force_thresh_dma_mode)
+ priv->plat->tx_coe = 0;
+ else
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
+ if (priv->dma_cap.rx_coe_type2)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+ else if (priv->dma_cap.rx_coe_type1)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
+ } else {
+ dev_info(priv->device, "No HW DMA feature register supported\n");
+ }
+
+ if (priv->plat->rx_coe) {
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ dev_info(priv->device, "RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
+ }
+ if (priv->plat->tx_coe)
+ dev_info(priv->device, "TX Checksum insertion supported\n");
+
+ if (priv->plat->pmt) {
+ dev_info(priv->device, "Wake-Up On Lan supported\n");
+ device_set_wakeup_capable(priv->device, 1);
+ }
+
+ if (priv->dma_cap.tsoen)
+ dev_info(priv->device, "TSO supported\n");
+
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+
+ /* Run HW quirks, if any */
+ if (priv->hwif_quirks) {
+ ret = priv->hwif_quirks(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
+ return 0;
+}
+
+static void stmmac_napi_add(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+ spin_lock_init(&ch->lock);
+
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_tx_napi_add(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_add(dev, &ch->rxtx_napi,
+ stmmac_napi_poll_rxtx,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+}
+
+static void stmmac_napi_del(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_del(&ch->rxtx_napi);
+ }
+ }
+}
+
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ stmmac_napi_del(dev);
+
+ priv->plat->rx_queues_to_use = rx_cnt;
+ priv->plat->tx_queues_to_use = tx_cnt;
+
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ priv->dma_rx_size = rx_size;
+ priv->dma_tx_size = tx_size;
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
+static void stmmac_fpe_lp_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ fpe_task);
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+ bool *enable = &fpe_cfg->enable;
+ int retries = 20;
+
+ while (retries-- > 0) {
+ /* Bail out immediately if FPE handshake is OFF */
+ if (*lo_state == FPE_STATE_OFF || !*hs_enable)
+ break;
+
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+
+ netdev_info(priv->dev, "configured FPE\n");
+
+ *lo_state = FPE_STATE_ON;
+ *lp_state = FPE_STATE_ON;
+ netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
+ break;
+ }
+
+ if ((*lo_state == FPE_STATE_CAPABLE ||
+ *lo_state == FPE_STATE_ENTERING_ON) &&
+ *lp_state != FPE_STATE_ON) {
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+ msleep(500);
+ }
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+}
+
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+{
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+ priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
+ }
+
+ priv->plat->fpe_cfg->hs_enable = enable;
+ }
+}
+
+/**
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+ u32 rxq;
+ int i, ret = 0;
+
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ stmmac_set_ethtool_ops(ndev);
+ priv->pause = pause;
+ priv->plat = plat_dat;
+ priv->ioaddr = res->addr;
+ priv->dev->base_addr = (unsigned long)res->addr;
+ priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
+
+ priv->dev->irq = res->irq;
+ priv->wol_irq = res->wol_irq;
+ priv->lpi_irq = res->lpi_irq;
+ priv->sfty_ce_irq = res->sfty_ce_irq;
+ priv->sfty_ue_irq = res->sfty_ue_irq;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ priv->rx_irq[i] = res->rx_irq[i];
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ priv->tx_irq[i] = res->tx_irq[i];
+
+ if (!is_zero_ether_addr(res->mac))
+ eth_hw_addr_set(priv->dev, res->mac);
+
+ dev_set_drvdata(device, priv->dev);
+
+ /* Verify driver arguments */
+ stmmac_verify_args();
+
+ priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
+ if (!priv->af_xdp_zc_qps)
+ return -ENOMEM;
+
+ /* Allocate workqueue */
+ priv->wq = create_singlethread_workqueue("stmmac_wq");
+ if (!priv->wq) {
+ dev_err(priv->device, "failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&priv->service_task, stmmac_service_task);
+
+ /* Initialize Link Partner FPE workqueue */
+ INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances
+ */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ if (priv->plat->stmmac_rst) {
+ ret = reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_deassert(priv->plat->stmmac_rst);
+ /* Some reset controllers have only reset callback instead of
+ * assert + deassert callbacks pair.
+ */
+ if (ret == -ENOTSUPP)
+ reset_control_reset(priv->plat->stmmac_rst);
+ }
+
+ ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
+ if (ret == -ENOTSUPP)
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
+ /* Init MAC and get the capabilities */
+ ret = stmmac_hw_init(priv);
+ if (ret)
+ goto error_hw_init;
+
+ /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
+ */
+ if (priv->synopsys_id < DWMAC_CORE_5_20)
+ priv->plat->dma_cfg->dche = false;
+
+ stmmac_check_ether_addr(priv);
+
+ ndev->netdev_ops = &stmmac_netdev_ops;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+
+ ret = stmmac_tc_init(priv, priv);
+ if (!ret) {
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (priv->plat->has_gmac4)
+ ndev->hw_features |= NETIF_F_GSO_UDP_L4;
+ priv->tso = true;
+ dev_info(priv->device, "TSO feature enabled\n");
+ }
+
+ if (priv->dma_cap.sphen) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph_cap = true;
+ priv->sph = priv->sph_cap;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+ * So overwrite dma_cap.addr64 according to HW real design.
+ */
+ if (priv->plat->addr64)
+ priv->dma_cap.addr64 = priv->plat->addr64;
+
+ if (priv->dma_cap.addr64) {
+ ret = dma_set_mask_and_coherent(device,
+ DMA_BIT_MASK(priv->dma_cap.addr64));
+ if (!ret) {
+ dev_info(priv->device, "Using %d bits DMA width\n",
+ priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
+ } else {
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(priv->device, "Failed to set DMA Mask\n");
+ goto error_hw_init;
+ }
+
+ priv->dma_cap.addr64 = 32;
+ }
+ }
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
+ /* MTU range: 46 - hw-specific max */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ if (priv->plat->has_xgmac)
+ ndev->max_mtu = XGMAC_JUMBO_LEN;
+ else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ ndev->max_mtu = JUMBO_LEN;
+ else
+ ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+ * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+ */
+ if ((priv->plat->maxmtu < ndev->max_mtu) &&
+ (priv->plat->maxmtu >= ndev->min_mtu))
+ ndev->max_mtu = priv->plat->maxmtu;
+ else if (priv->plat->maxmtu < ndev->min_mtu)
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ /* Setup channels NAPI */
+ stmmac_napi_add(ndev);
+
+ mutex_init(&priv->lock);
+
+ /* If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed. Viceversa the driver'll try to
+ * set the MDC clock dynamically according to the csr actual
+ * clock input.
+ */
+ if (priv->plat->clk_csr >= 0)
+ priv->clk_csr = priv->plat->clk_csr;
+ else
+ stmmac_clk_csr_set(priv);
+
+ stmmac_check_pcs_mode(priv);
+
+ pm_runtime_get_noresume(device);
+ pm_runtime_set_active(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(device);
+
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err(priv->device,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
+ }
+
+ if (priv->plat->speed_mode_2500)
+ priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
+
+ if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
+ ret = stmmac_xpcs_setup(priv->mii);
+ if (ret)
+ goto error_xpcs_setup;
+ }
+
+ ret = stmmac_phy_setup(priv);
+ if (ret) {
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
+ goto error_phy_setup;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
+ goto error_netdev_register;
+ }
+
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ goto error_serdes_powerup;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ stmmac_init_fs(ndev);
+#endif
+
+ if (priv->plat->dump_debug_regs)
+ priv->plat->dump_debug_regs(priv->plat->bsp_priv);
+
+ /* Let pm_runtime_put() disable the clocks.
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
+ */
+ pm_runtime_put(device);
+
+ return ret;
+
+error_serdes_powerup:
+ unregister_netdev(ndev);
+error_netdev_register:
+ phylink_destroy(priv->phylink);
+error_xpcs_setup:
+error_phy_setup:
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+error_mdio_register:
+ stmmac_napi_del(ndev);
+error_hw_init:
+ destroy_workqueue(priv->wq);
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
+
+/**
+ * stmmac_dvr_remove
+ * @dev: device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int stmmac_dvr_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_info(priv->dev, "%s: removing driver", __func__);
+
+ stmmac_stop_all_dma(priv);
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ netif_carrier_off(ndev);
+ unregister_netdev(ndev);
+
+ /* Serdes power down needs to happen after VLAN filter
+ * is deleted that is triggered by unregister_netdev().
+ */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
+#ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+#endif
+ phylink_destroy(priv->phylink);
+ if (priv->plat->stmmac_rst)
+ reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_assert(priv->plat->stmmac_ahb_rst);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+ destroy_workqueue(priv->wq);
+ mutex_destroy(&priv->lock);
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
+
+/**
+ * stmmac_suspend - suspend callback
+ * @dev: device pointer
+ * Description: this is the function to suspend the device and it is called
+ * by the platform driver to stop the network queue, release the resources,
+ * program the PMT register (for WoL), clean and release driver resources.
+ */
+int stmmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 chan;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ mutex_lock(&priv->lock);
+
+ netif_device_detach(ndev);
+
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
+ /* Stop TX/RX DMA */
+ stmmac_stop_all_dma(priv);
+
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ stmmac_pmt(priv, priv->hw, priv->wolopts);
+ priv->irq_wake = 1;
+ } else {
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_suspend(priv->phylink, true);
+ } else {
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ phylink_suspend(priv->phylink, false);
+ }
+ rtnl_unlock();
+
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+ stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
+ }
+
+ priv->speed = SPEED_UNKNOWN;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_suspend);
+
+/**
+ * stmmac_reset_queues_param - reset queue parameters
+ * @priv: device pointer
+ */
+static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+ }
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+}
+
+/**
+ * stmmac_resume - resume callback
+ * @dev: device pointer
+ * Description: when resume this function is invoked to setup the DMA and CORE
+ * in a usable state.
+ */
+int stmmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console).
+ */
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ mutex_lock(&priv->lock);
+ stmmac_pmt(priv, priv->hw, 0);
+ mutex_unlock(&priv->lock);
+ priv->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+ }
+
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_resume(priv->phylink);
+ } else {
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device))
+ phylink_speed_up(priv->phylink);
+ }
+ rtnl_unlock();
+
+ rtnl_lock();
+ mutex_lock(&priv->lock);
+
+ stmmac_reset_queues_param(priv);
+
+ stmmac_free_tx_skbufs(priv);
+ stmmac_clear_descriptors(priv);
+
+ stmmac_hw_setup(ndev, false);
+ stmmac_init_coalesce(priv);
+ stmmac_set_rx_mode(ndev);
+
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+
+ stmmac_enable_all_queues(priv);
+
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+
+ netif_device_attach(ndev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_resume);
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6)) {
+ if (kstrtoint(opt + 6, 0, &debug))
+ goto err;
+ } else if (!strncmp(opt, "phyaddr:", 8)) {
+ if (kstrtoint(opt + 8, 0, &phyaddr))
+ goto err;
+ } else if (!strncmp(opt, "buf_sz:", 7)) {
+ if (kstrtoint(opt + 7, 0, &buf_sz))
+ goto err;
+ } else if (!strncmp(opt, "tc:", 3)) {
+ if (kstrtoint(opt + 3, 0, &tc))
+ goto err;
+ } else if (!strncmp(opt, "watchdog:", 9)) {
+ if (kstrtoint(opt + 9, 0, &watchdog))
+ goto err;
+ } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
+ goto err;
+ } else if (!strncmp(opt, "pause:", 6)) {
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
+ goto err;
+ } else if (!strncmp(opt, "chain_mode:", 11)) {
+ if (kstrtoint(opt + 11, 0, &chain_mode))
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion", __func__);
+ return -EINVAL;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif /* MODULE */
+
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ /* Create debugfs main directory if it doesn't exist yet */
+ if (!stmmac_fs_dir)
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+ register_netdevice_notifier(&stmmac_notifier);
+#endif
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ unregister_netdevice_notifier(&stmmac_notifier);
+ debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/preimage b/rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/preimage
new file mode 100644
index 0000000..eea2954
--- /dev/null
+++ b/rr-cache/8c2b8bf507854aaf7c7a64fde16df892d508760f/preimage
@@ -0,0 +1,7541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_DEBUG_FS */
+#include <linux/net_tstamp.h>
+#include <linux/phylink.h>
+#include <linux/udp.h>
+#include <linux/bpf_trace.h>
+#include <net/pkt_cls.h>
+#include <net/xdp_sock_drv.h>
+#include "stmmac_ptp.h"
+#include "stmmac.h"
+#include "stmmac_xdp.h"
+#include <linux/reset.h>
+#include <linux/of_mdio.h>
+#include "dwmac1000.h"
+#include "dwxgmac2.h"
+#include "hwif.h"
+
+/* As long as the interface is active, we keep the timestamping counter enabled
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
+ * (clock jumps) when changing timestamping settings at runtime.
+ */
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSCTRLSSR)
+
+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
+
+/* Module parameters */
+#define TX_TIMEO 5000
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, 0644);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, 0444);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
+
+/* Limit to make sure XDP TX and slow path can coexist */
+#define STMMAC_XSK_TX_BUDGET_MAX 256
+#define STMMAC_TX_XSK_AVAIL 16
+#define STMMAC_RX_FILL_BATCH 16
+
+#define STMMAC_XDP_PASS 0
+#define STMMAC_XDP_CONSUMED BIT(0)
+#define STMMAC_XDP_TX BIT(1)
+#define STMMAC_XDP_REDIRECT BIT(2)
+
+static int flow_ctrl = FLOW_AUTO;
+module_param(flow_ctrl, int, 0644);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, 0644);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, 0644);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
+module_param(buf_sz, int, 0644);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+#define STMMAC_RX_COPYBREAK 256
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, 0644);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
+
+/* By default the driver will use the ring mode to manage tx and rx descriptors,
+ * but allow user to force to use the chain instead of the ring
+ */
+static unsigned int chain_mode;
+module_param(chain_mode, int, 0444);
+MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+/* For MSI interrupts handling */
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
+
+#ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
+static void stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(struct net_device *dev);
+#endif
+
+#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
+
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+{
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(priv->plat->pclk);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ return ret;
+ }
+ if (priv->plat->clks_config) {
+ ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ if (priv->plat->clks_config)
+ priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it checks the driver parameters and set a default in case of
+ * errors.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+}
+
+static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+ u32 queue;
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_disable(&ch->rxtx_napi);
+ continue;
+ }
+
+ if (queue < rx_queues_cnt)
+ napi_disable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_disable(&ch->tx_napi);
+ }
+}
+
+/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_rx_queue *rx_q;
+ u32 queue;
+
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ rx_q = &priv->rx_queue[queue];
+ if (rx_q->xsk_pool) {
+ synchronize_rcu();
+ break;
+ }
+ }
+
+ __stmmac_disable_all_queues(priv);
+}
+
+/**
+ * stmmac_enable_all_queues - Enable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+ u32 queue;
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_enable(&ch->rxtx_napi);
+ continue;
+ }
+
+ if (queue < rx_queues_cnt)
+ napi_enable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_enable(&ch->tx_napi);
+ }
+}
+
+static void stmmac_service_event_schedule(struct stmmac_priv *priv)
+{
+ if (!test_bit(STMMAC_DOWN, &priv->state) &&
+ !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
+ queue_work(priv->wq, &priv->service_task);
+}
+
+static void stmmac_global_err(struct stmmac_priv *priv)
+{
+ netif_carrier_off(priv->dev);
+ set_bit(STMMAC_RESET_REQUESTED, &priv->state);
+ stmmac_service_event_schedule(priv);
+}
+
+/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ u32 clk_rate;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+ if (clk_rate < CSR_F_35M)
+ priv->clk_csr = STMMAC_CSR_20_35M;
+ else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+ priv->clk_csr = STMMAC_CSR_35_60M;
+ else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+ priv->clk_csr = STMMAC_CSR_60_100M;
+ else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+ priv->clk_csr = STMMAC_CSR_100_150M;
+ else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+ priv->clk_csr = STMMAC_CSR_150_250M;
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
+ priv->clk_csr = STMMAC_CSR_250_300M;
+ }
+
+ if (priv->plat->has_sun8i) {
+ if (clk_rate > 160000000)
+ priv->clk_csr = 0x03;
+ else if (clk_rate > 80000000)
+ priv->clk_csr = 0x02;
+ else if (clk_rate > 40000000)
+ priv->clk_csr = 0x01;
+ else
+ priv->clk_csr = 0;
+ }
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ priv->clk_csr = 0x5;
+ else if (clk_rate > 350000000)
+ priv->clk_csr = 0x4;
+ else if (clk_rate > 300000000)
+ priv->clk_csr = 0x3;
+ else if (clk_rate > 250000000)
+ priv->clk_csr = 0x2;
+ else if (clk_rate > 150000000)
+ priv->clk_csr = 0x1;
+ else
+ priv->clk_csr = 0x0;
+ }
+}
+
+static void print_pkt(unsigned char *buf, int len)
+{
+ pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
+}
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ u32 avail;
+
+ if (tx_q->dirty_tx > tx_q->cur_tx)
+ avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
+ else
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+
+ return avail;
+}
+
+/**
+ * stmmac_rx_dirty - Get RX queue dirty
+ * @priv: driver private structure
+ * @queue: RX queue index
+ */
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ u32 dirty;
+
+ if (rx_q->dirty_rx <= rx_q->cur_rx)
+ dirty = rx_q->cur_rx - rx_q->dirty_rx;
+ else
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+
+ return dirty;
+}
+
+static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+{
+ int tx_lpi_timer;
+
+ /* Clear/set the SW EEE timer flag based on LPI ET enablement */
+ priv->eee_sw_timer_en = en ? 0 : 1;
+ tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+}
+
+/**
+ * stmmac_enable_eee_mode - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* check if all TX queues have the work finished */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ return; /* still unfinished work */
+ }
+
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_eee_mode(priv, priv->hw,
+ priv->plat->en_tx_lpi_clockgating);
+}
+
+/**
+ * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ if (!priv->eee_sw_timer_en) {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ return;
+ }
+
+ stmmac_reset_eee_mode(priv, priv->hw);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer - EEE TX SW timer.
+ * @t: timer_list struct containing private info
+ * Description:
+ * if there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
+{
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
+
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+}
+
+/**
+ * stmmac_eee_init - init EEE
+ * @priv: driver private structure
+ * Description:
+ * if the GMAC supports the EEE (from the HW cap reg) and the phy device
+ * can also manage EEE, this function enable the LPI state and start related
+ * timer.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ int eee_tw_timer = priv->eee_tw_timer;
+
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
+ priv->hw->pcs == STMMAC_PCS_RTBI)
+ return false;
+
+ /* Check if MAC core supports the EEE feature. */
+ if (!priv->dma_cap.eee)
+ return false;
+
+ mutex_lock(&priv->lock);
+
+ /* Check if it needs to be deactivated */
+ if (!priv->eee_active) {
+ if (priv->eee_enabled) {
+ netdev_dbg(priv->dev, "disable EEE\n");
+ stmmac_lpi_entry_timer_config(priv, 0);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ false);
+ }
+ mutex_unlock(&priv->lock);
+ return false;
+ }
+
+ if (priv->eee_active && !priv->eee_enabled) {
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ true);
+ }
+
+ if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+ stmmac_lpi_entry_timer_config(priv, 1);
+ } else {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ return true;
+}
+
+/* stmmac_get_tx_hwtstamp - get HW TX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the descriptor & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
+ struct dma_desc *p, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* exit if skb doesn't support hw tstamp */
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, p)) {
+ stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+
+ netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+ }
+}
+
+/* stmmac_get_rx_hwtstamp - get HW RX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+ struct dma_desc *np, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ struct dma_desc *desc = p;
+ u64 ns = 0;
+
+ if (!priv->hwts_rx_en)
+ return;
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ desc = np;
+
+ /* Check if timestamp is available */
+ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+
+ ns -= priv->plat->cdc_error_adj;
+
+ netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
+ }
+}
+
+/**
+ * stmmac_hwtstamp_set - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ u32 ptp_v2 = 0;
+ u32 tstamp_all = 0;
+ u32 ptp_over_ipv4_udp = 0;
+ u32 ptp_over_ipv6_udp = 0;
+ u32 ptp_over_ethernet = 0;
+ u32 snap_type_sel = 0;
+ u32 ts_master_en = 0;
+ u32 ts_event_en = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
+ netdev_alert(priv->dev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(config)))
+ return -EFAULT;
+
+ netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ if (priv->adv_ts) {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* time stamp no incoming packet at all */
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* 'xmac' hardware can support Sync, Pdelay_Req and
+ * Pdelay_resp by setting bit14 and bits17/16 to 01
+ * This leaves Delay_Req timestamps out.
+ * Enable all events *and* general purpose message
+ * timestamping
+ */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ /* PTP v1, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /* PTP v1, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ /* PTP v2, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* PTP v2, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* PTP v2, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* PTP v2/802.AS1 any layer, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
+ ts_event_en = PTP_TCR_TSEVNTENA;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ /* time stamp any incoming packet */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_all = PTP_TCR_TSENALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+ } else {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ default:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ }
+ }
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
+
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
+ priv->systime_flags |= tstamp_all | ptp_v2 |
+ ptp_over_ethernet | ptp_over_ipv6_udp |
+ ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ memcpy(&priv->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_hwtstamp_get - read hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function obtain the current hardware timestamping settings
+ * as requested.
+ */
+static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *config = &priv->tstamp_config;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
+ * @priv: driver private structure
+ * @systime_flags: timestamping flags
+ * Description:
+ * Initialize hardware counter for packet timestamping.
+ * This is valid as long as the interface is open and not suspended.
+ * Will be rerun after resuming from suspend, case in which the timestamping
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
+ */
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ struct timespec64 now;
+ u32 sec_inc = 0;
+ u64 temp = 0;
+ int ret;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+ priv->systime_flags = systime_flags;
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
+
+/**
+ * stmmac_init_ptp - init PTP
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
+ * This is done by looking at the HW cap. register.
+ * This function also registers the ptp driver.
+ */
+static int stmmac_init_ptp(struct stmmac_priv *priv)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ int ret;
+
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+ if (ret)
+ return ret;
+
+ priv->adv_ts = 0;
+ /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
+ if (xmac && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+ /* Dwmac 3.x core with extend_desc can support adv_ts */
+ else if (priv->extend_desc && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+
+ if (priv->dma_cap.time_stamp)
+ netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
+
+ if (priv->adv_ts)
+ netdev_info(priv->dev,
+ "IEEE 1588-2008 Advanced Timestamp supported\n");
+
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ stmmac_ptp_register(priv);
+
+ return 0;
+}
+
+static void stmmac_release_ptp(struct stmmac_priv *priv)
+{
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ stmmac_ptp_unregister(priv);
+}
+
+/**
+ * stmmac_mac_flow_ctrl - Configure flow control in all queues
+ * @priv: driver private structure
+ * @duplex: duplex passed to the next function
+ * Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
+}
+
+static void stmmac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mac_supported) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ int tx_cnt = priv->plat->tx_queues_to_use;
+ int max_speed = priv->plat->max_speed;
+
+ phylink_set(mac_supported, 10baseT_Half);
+ phylink_set(mac_supported, 10baseT_Full);
+ phylink_set(mac_supported, 100baseT_Half);
+ phylink_set(mac_supported, 100baseT_Full);
+ phylink_set(mac_supported, 1000baseT_Half);
+ phylink_set(mac_supported, 1000baseT_Full);
+ phylink_set(mac_supported, 1000baseKX_Full);
+
+ phylink_set(mac_supported, Autoneg);
+ phylink_set(mac_supported, Pause);
+ phylink_set(mac_supported, Asym_Pause);
+ phylink_set_port_modes(mac_supported);
+
+ /* Cut down 1G if asked to */
+ if ((max_speed > 0) && (max_speed < 1000)) {
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ } else if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 2500baseX_Full);
+ }
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || (max_speed >= 2500)) {
+ phylink_set(mac_supported, 2500baseT_Full);
+ phylink_set(mac_supported, 2500baseX_Full);
+ }
+ if (!max_speed || (max_speed >= 5000)) {
+ phylink_set(mac_supported, 5000baseT_Full);
+ }
+ if (!max_speed || (max_speed >= 10000)) {
+ phylink_set(mac_supported, 10000baseSR_Full);
+ phylink_set(mac_supported, 10000baseLR_Full);
+ phylink_set(mac_supported, 10000baseER_Full);
+ phylink_set(mac_supported, 10000baseLRM_Full);
+ phylink_set(mac_supported, 10000baseT_Full);
+ phylink_set(mac_supported, 10000baseKX4_Full);
+ phylink_set(mac_supported, 10000baseKR_Full);
+ }
+ if (!max_speed || (max_speed >= 25000)) {
+ phylink_set(mac_supported, 25000baseCR_Full);
+ phylink_set(mac_supported, 25000baseKR_Full);
+ phylink_set(mac_supported, 25000baseSR_Full);
+ }
+ if (!max_speed || (max_speed >= 40000)) {
+ phylink_set(mac_supported, 40000baseKR4_Full);
+ phylink_set(mac_supported, 40000baseCR4_Full);
+ phylink_set(mac_supported, 40000baseSR4_Full);
+ phylink_set(mac_supported, 40000baseLR4_Full);
+ }
+ if (!max_speed || (max_speed >= 50000)) {
+ phylink_set(mac_supported, 50000baseCR2_Full);
+ phylink_set(mac_supported, 50000baseKR2_Full);
+ phylink_set(mac_supported, 50000baseSR2_Full);
+ phylink_set(mac_supported, 50000baseKR_Full);
+ phylink_set(mac_supported, 50000baseSR_Full);
+ phylink_set(mac_supported, 50000baseCR_Full);
+ phylink_set(mac_supported, 50000baseLR_ER_FR_Full);
+ phylink_set(mac_supported, 50000baseDR_Full);
+ }
+ if (!max_speed || (max_speed >= 100000)) {
+ phylink_set(mac_supported, 100000baseKR4_Full);
+ phylink_set(mac_supported, 100000baseSR4_Full);
+ phylink_set(mac_supported, 100000baseCR4_Full);
+ phylink_set(mac_supported, 100000baseLR4_ER4_Full);
+ phylink_set(mac_supported, 100000baseKR2_Full);
+ phylink_set(mac_supported, 100000baseSR2_Full);
+ phylink_set(mac_supported, 100000baseCR2_Full);
+ phylink_set(mac_supported, 100000baseLR2_ER2_FR2_Full);
+ phylink_set(mac_supported, 100000baseDR2_Full);
+ }
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (tx_cnt > 1) {
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Half);
+ }
+
+ linkmode_and(supported, supported, mac_supported);
+ linkmode_andnot(supported, supported, mask);
+
+ linkmode_and(state->advertising, state->advertising, mac_supported);
+ linkmode_andnot(state->advertising, state->advertising, mask);
+
+ /* If PCS is supported, check which modes it supports. */
+ if (priv->hw->xpcs)
+ xpcs_validate(priv->hw->xpcs, supported, state);
+}
+
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Nothing to do, xpcs_config() handles everything */
+}
+
+static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
+ }
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ priv->tx_lpi_enabled = false;
+ priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ u32 ctrl;
+
+ ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl &= ~priv->hw->link.speed_mask;
+
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_5000:
+ ctrl |= priv->hw->link.xgmii.speed5000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.xgmii.speed2500;
+ break;
+ default:
+ return;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
+ switch (speed) {
+ case SPEED_100000:
+ ctrl |= priv->hw->link.xlgmii.speed100000;
+ break;
+ case SPEED_50000:
+ ctrl |= priv->hw->link.xlgmii.speed50000;
+ break;
+ case SPEED_40000:
+ ctrl |= priv->hw->link.xlgmii.speed40000;
+ break;
+ case SPEED_25000:
+ ctrl |= priv->hw->link.xlgmii.speed25000;
+ break;
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (speed) {
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
+ break;
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
+ break;
+ default:
+ return;
+ }
+ }
+
+ priv->speed = speed;
+
+ if (priv->plat->fix_mac_speed)
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
+
+ if (!duplex)
+ ctrl &= ~priv->hw->link.duplex;
+ else
+ ctrl |= priv->hw->link.duplex;
+
+ /* Flow Control operation */
+ if (tx_pause && rx_pause)
+ stmmac_mac_flow_ctrl(priv, duplex);
+
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+
+ stmmac_mac_set(priv, priv->ioaddr, true);
+ if (phy && priv->dma_cap.eee) {
+ priv->eee_active = phy_init_eee(phy, 1) >= 0;
+ priv->eee_enabled = stmmac_eee_init(priv);
+ priv->tx_lpi_enabled = priv->eee_enabled;
+ stmmac_set_eee_pls(priv, priv->hw, true);
+ }
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, true);
+}
+
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .validate = stmmac_validate,
+ .mac_config = stmmac_mac_config,
+ .mac_link_down = stmmac_mac_link_down,
+ .mac_link_up = stmmac_mac_link_up,
+};
+
+/**
+ * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PCS.
+ * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
+ * configured for the TBI, RTBI, or SGMII PHY interface.
+ */
+static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+{
+ int interface = priv->plat->interface;
+
+ if (priv->dma_cap.pcs) {
+ if ((interface == PHY_INTERFACE_MODE_RGMII) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_RGMII;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
+ }
+ }
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct device_node *node;
+ int ret;
+
+ node = priv->plat->phylink_node;
+
+ if (node)
+ ret = phylink_of_phy_connect(priv->phylink, node, 0);
+
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
+ * manually parse it
+ */
+ if (!node || ret) {
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
+
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
+ return -ENODEV;
+ }
+
+ ret = phylink_connect_phy(priv->phylink, phydev);
+ }
+
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ }
+
+ return ret;
+}
+
+static int stmmac_phy_setup(struct stmmac_priv *priv)
+{
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int mode = priv->plat->phy_interface;
+ struct phylink *phylink;
+
+ priv->phylink_config.dev = &priv->dev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.pcs_poll = true;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ mdio_bus_data->xpcs_an_inband;
+
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ phylink = phylink_create(&priv->phylink_config, fwnode,
+ mode, &stmmac_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ if (priv->hw->xpcs)
+ phylink_set_pcs(phylink, &priv->hw->xpcs->pcs);
+
+ priv->phylink = phylink;
+ return 0;
+}
+
+static void stmmac_display_rx_rings(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int desc_size;
+ void *head_rx;
+ u32 queue;
+
+ /* Display RX rings */
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ pr_info("\tRX Queue %u rings\n", queue);
+
+ if (priv->extend_desc) {
+ head_rx = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ head_rx = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ /* Display RX ring */
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ unsigned int desc_size;
+ void *head_tx;
+ u32 queue;
+
+ /* Display TX rings */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ pr_info("\tTX Queue %d rings\n", queue);
+
+ if (priv->extend_desc) {
+ head_tx = (void *)tx_q->dma_etx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ head_tx = (void *)tx_q->dma_entx;
+ desc_size = sizeof(struct dma_edesc);
+ } else {
+ head_tx = (void *)tx_q->dma_tx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false,
+ tx_q->dma_tx_phy, desc_size);
+ }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv)
+{
+ /* Display RX ring */
+ stmmac_display_rx_rings(priv);
+
+ /* Display TX ring */
+ stmmac_display_tx_rings(priv);
+}
+
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_8KiB)
+ ret = BUF_SIZE_16KiB;
+ else if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu > DEFAULT_BUFSIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DEFAULT_BUFSIZE;
+
+ return ret;
+}
+
+/**
+ * stmmac_clear_rx_descriptors - clear RX descriptors
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * Description: this function is called to clear the RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ /* Clear the RX descriptors */
+ for (i = 0; i < priv->dma_rx_size; i++)
+ if (priv->extend_desc)
+ stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == priv->dma_rx_size - 1),
+ priv->dma_buf_sz);
+ else
+ stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == priv->dma_rx_size - 1),
+ priv->dma_buf_sz);
+}
+
+/**
+ * stmmac_clear_tx_descriptors - clear tx descriptors
+ * @priv: driver private structure
+ * @queue: TX queue index.
+ * Description: this function is called to clear the TX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ /* Clear the TX descriptors */
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ int last = (i == (priv->dma_tx_size - 1));
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &tx_q->dma_etx[i].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[i].basic;
+ else
+ p = &tx_q->dma_tx[i];
+
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
+ }
+}
+
+/**
+ * stmmac_clear_descriptors - clear descriptors
+ * @priv: driver private structure
+ * Description: this function is called to clear the TX and RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv)
+{
+ u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Clear the RX descriptors */
+ for (queue = 0; queue < rx_queue_cnt; queue++)
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ /* Clear the TX descriptors */
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ stmmac_clear_tx_descriptors(priv, queue);
+}
+
+/**
+ * stmmac_init_rx_buffers - init the RX descriptor buffer.
+ * @priv: driver private structure
+ * @p: descriptor pointer
+ * @i: descriptor index
+ * @flags: gfp flag
+ * @queue: RX queue index
+ * Description: this function is called to allocate a receive buffer, perform
+ * the DMA mapping and init the descriptor.
+ */
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
+ int i, gfp_t flags, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
+
+ if (!buf->page) {
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->page)
+ return -ENOMEM;
+ buf->page_offset = stmmac_rx_offset(priv);
+ }
+
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ } else {
+ buf->sec_page = NULL;
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ }
+
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ stmmac_init_desc3(priv, p);
+
+ return 0;
+}
+
+/**
+ * stmmac_free_rx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (buf->page)
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
+ buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
+}
+
+/**
+ * stmmac_free_tx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ if (tx_q->tx_skbuff_dma[i].buf &&
+ tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
+ if (tx_q->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ }
+
+ if (tx_q->xdpf[i] &&
+ (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
+ xdp_return_frame(tx_q->xdpf[i]);
+ tx_q->xdpf[i] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff[i] &&
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
+ dev_kfree_skb_any(tx_q->tx_skbuff[i]);
+ tx_q->tx_skbuff[i] = NULL;
+ }
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+}
+
+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, queue, i);
+}
+
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv, u32 queue,
+ gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct dma_desc *p;
+ int ret;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, p, i, flags,
+ queue);
+ if (ret)
+ return ret;
+
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (!buf->xdp)
+ continue;
+
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ }
+}
+
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < priv->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf;
+ dma_addr_t dma_addr;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + i);
+ else
+ p = rx_q->dma_rx + i;
+
+ buf = &rx_q->buf_pool[i];
+
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp)
+ return -ENOMEM;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, p, dma_addr);
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
+{
+ if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, queue);
+}
+
+/**
+ * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv, u32 queue, gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int ret;
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
+
+ rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
+
+ if (rx_q->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx_q->page_pool));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ }
+
+ if (rx_q->xsk_pool) {
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ stmmac_alloc_rx_buffers_zc(priv, queue);
+ } else {
+ ret = stmmac_alloc_rx_buffers(priv, queue, flags);
+ if (ret < 0)
+ return -ENOMEM;
+ }
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
+ else
+ stmmac_mode_init(priv, rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
+ }
+
+ return 0;
+}
+
+static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* RX INITIALIZATION */
+ netif_dbg(priv, probe, priv->dev,
+ "SKB addresses:\nskb\t\tskb data\tdma data\n");
+
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __init_dma_rx_desc_rings(priv, queue, flags);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ return 0;
+
+err_init_rx_buffers:
+ while (queue >= 0) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, queue);
+ else
+ dma_free_rx_skbufs(priv, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ if (queue == 0)
+ break;
+
+ queue--;
+ }
+
+ return ret;
+}
+
+/**
+ * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @queue : TX queue index
+ * Description: this function initializes the DMA TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
+ stmmac_mode_init(priv, tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
+ }
+
+ tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
+
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;
+
+ stmmac_clear_desc(priv, p);
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
+ }
+
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+
+ return 0;
+}
+
+static int init_dma_tx_desc_rings(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt;
+ u32 queue;
+
+ tx_queue_cnt = priv->plat->tx_queues_to_use;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ __init_dma_tx_desc_rings(priv, queue);
+
+ return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_rx_desc_rings(dev, flags);
+ if (ret)
+ return ret;
+
+ ret = init_dma_tx_desc_rings(dev);
+
+ stmmac_clear_descriptors(priv);
+
+ if (netif_msg_hw(priv))
+ stmmac_display_rings(priv);
+
+ return ret;
+}
+
+/**
+ * dma_free_tx_skbufs - free TX dma buffers
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int i;
+
+ tx_q->xsk_frames_done = 0;
+
+ for (i = 0; i < priv->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, queue, i);
+
+ if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+ tx_q->xsk_frames_done = 0;
+ tx_q->xsk_pool = NULL;
+ }
+}
+
+/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, queue);
+}
+
+/**
+ * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
+ * @priv: private structure
+ * @queue: RX queue index
+ */
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ /* Release the DMA RX socket buffers */
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, queue);
+ else
+ dma_free_rx_skbufs(priv, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ rx_q->dma_rx, rx_q->dma_rx_phy);
+ else
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ rx_q->dma_erx, rx_q->dma_rx_phy);
+
+ if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_q->xdp_rxq);
+
+ kfree(rx_q->buf_pool);
+ if (rx_q->page_pool)
+ page_pool_destroy(rx_q->page_pool);
+}
+
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ /* Free RX queue resources */
+ for (queue = 0; queue < rx_count; queue++)
+ __free_dma_rx_desc_resources(priv, queue);
+}
+
+/**
+ * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
+ * @priv: private structure
+ * @queue: TX queue index
+ */
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
+
+ /* Release the DMA TX socket buffers */
+ dma_free_tx_skbufs(priv, queue);
+
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
+
+ size *= priv->dma_tx_size;
+
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
+
+ kfree(tx_q->tx_skbuff_dma);
+ kfree(tx_q->tx_skbuff);
+}
+
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Free TX queue resources */
+ for (queue = 0; queue < tx_count; queue++)
+ __free_dma_tx_desc_resources(priv, queue);
+}
+
+/**
+ * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
+ * @priv: private structure
+ * @queue: RX queue index
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ bool xdp_prog = stmmac_xdp_is_enabled(priv);
+ struct page_pool_params pp_params = { 0 };
+ unsigned int num_pages;
+ unsigned int napi_id;
+ int ret;
+
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = priv->dma_rx_size;
+ num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
+ pp_params.order = ilog2(num_pages);
+ pp_params.nid = dev_to_node(priv->device);
+ pp_params.dev = priv->device;
+ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ pp_params.offset = stmmac_rx_offset(priv);
+ pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+
+ rx_q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_q->page_pool)) {
+ ret = PTR_ERR(rx_q->page_pool);
+ rx_q->page_pool = NULL;
+ return ret;
+ }
+
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
+ if (!rx_q->buf_pool)
+ return -ENOMEM;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_erx)
+ return -ENOMEM;
+
+ } else {
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ return -ENOMEM;
+ }
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps))
+ napi_id = ch->rxtx_napi.napi_id;
+ else
+ napi_id = ch->rx_napi.napi_id;
+
+ ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
+ rx_q->queue_index,
+ napi_id);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to register xdp rxq info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __alloc_dma_rx_desc_resources(priv, queue);
+ if (ret)
+ goto err_dma;
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_rx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
+ * @priv: private structure
+ * @queue: TX queue index
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ size_t size;
+ void *addr;
+
+ tx_q->queue_index = queue;
+ tx_q->priv_data = priv;
+
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff_dma)
+ return -ENOMEM;
+
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff)
+ return -ENOMEM;
+
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
+
+ size *= priv->dma_tx_size;
+
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
+
+ return 0;
+}
+
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* TX queues buffers and DMA */
+ for (queue = 0; queue < tx_count; queue++) {
+ ret = __alloc_dma_tx_desc_resources(priv, queue);
+ if (ret)
+ goto err_dma;
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_tx_desc_resources(priv);
+ return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* RX Allocation */
+ int ret = alloc_dma_rx_desc_resources(priv);
+
+ if (ret)
+ return ret;
+
+ ret = alloc_dma_tx_desc_resources(priv);
+
+ return ret;
+}
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ */
+static void free_dma_desc_resources(struct stmmac_priv *priv)
+{
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv);
+
+ /* Release the DMA RX socket buffers later
+ * to ensure all pending XDP_TX buffers are returned.
+ */
+ free_dma_rx_desc_resources(priv);
+}
+
+/**
+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ * @priv: driver private structure
+ * Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
+ }
+}
+
+/**
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
+ */
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ stmmac_start_rx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This starts a TX DMA channel
+ */
+static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ stmmac_start_tx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This stops a RX DMA channel
+ */
+static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ stmmac_stop_rx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This stops a TX DMA channel
+ */
+static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ stmmac_stop_tx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_start_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
+ */
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv: driver private structure
+ * Description: it is used for configuring the DMA operation mode register in
+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
+ u32 txmode = 0;
+ u32 rxmode = 0;
+ u32 chan = 0;
+ u8 qmode = 0;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+
+ if (priv->plat->force_thresh_dma_mode) {
+ txmode = tc;
+ rxmode = tc;
+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+ /*
+ * In case of GMAC, SF mode can be enabled
+ * to perform the TX COE in HW. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ txmode = SF_DMA_MODE;
+ rxmode = SF_DMA_MODE;
+ priv->xstats.threshold = SF_DMA_MODE;
+ } else {
+ txmode = tc;
+ rxmode = SF_DMA_MODE;
+ }
+
+ /* configure all channels */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ u32 buf_size;
+
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+
+ if (rx_q->xsk_pool) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ chan);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ chan);
+ }
+ }
+
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
+ }
+}
+
+static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct xsk_buff_pool *pool = tx_q->xsk_pool;
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc = NULL;
+ struct xdp_desc xdp_desc;
+ bool work_done = true;
+
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ budget = min(budget, stmmac_tx_avail(priv, queue));
+
+ while (budget-- > 0) {
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ /* We are sharing with slow path and stop XSK TX desc submission when
+ * available TX ring is less than threshold.
+ */
+ if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
+ !netif_carrier_ok(priv->dev)) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
+
+ /* To return XDP buffer to XSK pool, we simple call
+ * xsk_tx_completed(), so we don't need to fill up
+ * 'buf' and 'xdpf'.
+ */
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->xdpf[entry] = NULL;
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ tx_q->tx_count_frames++;
+
+ if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
+ true, priv->mode, true, true,
+ xdp_desc.len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ entry = tx_q->cur_tx;
+ }
+
+ if (tx_desc) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ xsk_tx_release(pool);
+ }
+
+ /* Return true if all of the 3 conditions are met
+ * a) TX Budget is still available
+ * b) work_done = true when XSK TX desc peek is empty (no more
+ * pending XSK TX for transmission)
+ */
+ return !!budget && work_done;
+}
+
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
+/**
+ * stmmac_tx_clean - to manage the transmission completion
+ * @priv: driver private structure
+ * @budget: napi budget limiting this functions packet handling
+ * @queue: TX queue index
+ * Description: it reclaims the transmit resources after transmission completes.
+ */
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int entry, xmits = 0, count = 0;
+
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ priv->xstats.tx_clean++;
+
+ tx_q->xsk_frames_done = 0;
+
+ entry = tx_q->dirty_tx;
+
+ /* Try to clean all TX complete frame in 1 shot */
+ while ((entry != tx_q->cur_tx) && count < priv->dma_tx_size) {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ struct dma_desc *p;
+ int status;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdpf = tx_q->xdpf[entry];
+ skb = NULL;
+ } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ xdpf = NULL;
+ skb = tx_q->tx_skbuff[entry];
+ } else {
+ xdpf = NULL;
+ skb = NULL;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[entry].basic;
+ else
+ p = tx_q->dma_tx + entry;
+
+ status = stmmac_tx_status(priv, &priv->dev->stats,
+ &priv->xstats, p, priv->ioaddr);
+ /* Check if the descriptor is owned by the DMA */
+ if (unlikely(status & tx_dma_own))
+ break;
+
+ count++;
+
+ /* Make sure descriptor fields are read after reading
+ * the own bit.
+ */
+ dma_rmb();
+
+ /* Just consider the last segment and ...*/
+ if (likely(!(status & tx_not_ls))) {
+ /* ... verify the status error condition */
+ if (unlikely(status & tx_err)) {
+ priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
+ } else {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ priv->xstats.txq_stats[queue].tx_pkt_n++;
+ }
+ if (skb)
+ stmmac_get_tx_hwtstamp(priv, p, skb);
+ }
+
+ if (likely(tx_q->tx_skbuff_dma[entry].buf &&
+ tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
+ if (tx_q->tx_skbuff_dma[entry].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
+ DMA_TO_DEVICE);
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->tx_skbuff_dma[entry].len = 0;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ }
+
+ stmmac_clean_desc3(priv, tx_q, p);
+
+ tx_q->tx_skbuff_dma[entry].last_segment = false;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ if (likely(skb)) {
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_consume_skb_any(skb);
+ tx_q->tx_skbuff[entry] = NULL;
+ }
+ }
+
+ stmmac_release_tx_desc(priv, p, priv->mode);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ }
+ tx_q->dirty_tx = entry;
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
+ pkts_compl, bytes_compl);
+
+ if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
+ queue))) &&
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
+
+ netif_dbg(priv, tx_done, priv->dev,
+ "%s: restart transmit\n", __func__);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ if (tx_q->xsk_pool) {
+ bool work_done;
+
+ if (tx_q->xsk_frames_done)
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_q->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_q->xsk_pool);
+
+ /* For XSK TX, we try to send as many as possible.
+ * If XSK work done (XSK TX desc empty and budget still
+ * available), return "budget - 1" to reenable TX IRQ.
+ * Else, return "budget" to make NAPI continue polling.
+ */
+ work_done = stmmac_xdp_xmit_zc(priv, queue,
+ STMMAC_XSK_TX_BUDGET_MAX);
+ if (work_done)
+ xmits = budget - 1;
+ else
+ xmits = budget;
+ }
+
+ if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
+ priv->eee_sw_timer_en) {
+ stmmac_enable_eee_mode(priv);
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
+
+ /* We still have pending packets, let's call for a new scheduling */
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ HRTIMER_MODE_REL);
+
+ __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ /* Combine decisions from TX clean and XSK TX */
+ return max(count, xmits);
+}
+
+/**
+ * stmmac_tx_err - to manage the tx error
+ * @priv: driver private structure
+ * @chan: channel index
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of transmission errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
+
+ stmmac_stop_tx_dma(priv, chan);
+ dma_free_tx_skbufs(priv, chan);
+ stmmac_clear_tx_descriptors(priv, chan);
+ tx_q->dirty_tx = 0;
+ tx_q->cur_tx = 0;
+ tx_q->mss = 0;
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+ stmmac_start_tx_dma(priv, chan);
+
+ priv->dev->stats.tx_errors++;
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
+}
+
+/**
+ * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
+ * @priv: driver private structure
+ * @txmode: TX operating mode
+ * @rxmode: RX operating mode
+ * @chan: channel index
+ * Description: it is used for configuring of the DMA operation mode in
+ * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
+ * mode.
+ */
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan)
+{
+ u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
+}
+
+static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = stmmac_safety_feat_irq_status(priv, priv->dev,
+ priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+ if (ret && (ret != -EINVAL)) {
+ stmmac_global_err(priv);
+ return true;
+ }
+
+ return false;
+}
+
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
+{
+ int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan, dir);
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ struct stmmac_channel *ch = &priv->channel[chan];
+ struct napi_struct *rx_napi;
+ struct napi_struct *tx_napi;
+ unsigned long flags;
+
+ rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
+ tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
+ if (napi_schedule_prep(rx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(rx_napi);
+ }
+ }
+
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
+ if (napi_schedule_prep(tx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(tx_napi);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * stmmac_dma_interrupt - DMA ISR
+ * @priv: driver private structure
+ * Description: this is the DMA ISR. It is called by the main ISR.
+ * It calls the dwmac dma routine and schedule poll method in case of some
+ * work can be done.
+ */
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ tx_channel_count : rx_channel_count;
+ u32 chan;
+ int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+ /* Make sure we never check beyond our status buffer. */
+ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+ channels_to_check = ARRAY_SIZE(status);
+
+ for (chan = 0; chan < channels_to_check; chan++)
+ status[chan] = stmmac_napi_check(priv, chan,
+ DMA_DIR_RXTX);
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ stmmac_bump_dma_threshold(priv, chan);
+ } else if (unlikely(status[chan] == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+ }
+}
+
+/**
+ * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
+ * @priv: driver private structure
+ * Description: this masks the MMC irq, in fact, the counters are managed in SW.
+ */
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
+
+ if (priv->dma_cap.rmon) {
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ netdev_info(priv->dev, "No MAC Management Counters available\n");
+}
+
+/**
+ * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+ return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
+}
+
+/**
+ * stmmac_check_ether_addr - check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ stmmac_get_umac_addr(priv, priv->hw, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
+ eth_hw_addr_random(priv->dev);
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
+ }
+}
+
+/**
+ * stmmac_init_dma_engine - DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific MAC/GMAC callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 chan = 0;
+ int atds = 0;
+ int ret = 0;
+
+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
+ dev_err(priv->device, "Invalid DMA configuration\n");
+ return -EINVAL;
+ }
+
+ if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+ atds = 1;
+
+ ret = stmmac_reset(priv, priv->ioaddr);
+ if (ret) {
+ dev_err(priv->device, "Failed to reset the dma\n");
+ return ret;
+ }
+
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+ }
+
+ return ret;
+}
+
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ HRTIMER_MODE_REL);
+}
+
+/**
+ * stmmac_tx_timer - mitigation sw timer for tx.
+ * @t: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the stmmac_tx_clean.
+ */
+static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
+{
+ struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ struct stmmac_channel *ch;
+ struct napi_struct *napi;
+
+ ch = &priv->channel[tx_q->queue_index];
+ napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ if (likely(napi_schedule_prep(napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * stmmac_init_coalesce - init mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
+{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 chan;
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
+ priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ for (chan = 0; chan < rx_channel_count; chan++)
+ priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
+}
+
+static void stmmac_set_rings_length(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
+ /* set TX ring length */
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_set_tx_ring_len(priv, priv->ioaddr,
+ (priv->dma_tx_size - 1), chan);
+
+ /* set RX ring length */
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_set_rx_ring_len(priv, priv->ioaddr,
+ (priv->dma_rx_size - 1), chan);
+}
+
+/**
+ * stmmac_set_tx_queue_weight - Set TX queue weight
+ * @priv: driver private structure
+ * Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 weight;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ weight = priv->plat->tx_queues_cfg[queue].weight;
+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
+ }
+}
+
+/**
+ * stmmac_configure_cbs - Configure CBS in TX queue
+ * @priv: driver private structure
+ * Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 mode_to_use;
+ u32 queue;
+
+ /* queue 0 is reserved for legacy traffic */
+ for (queue = 1; queue < tx_queues_count; queue++) {
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB)
+ continue;
+
+ stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ }
+}
+
+/**
+ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ * @priv: driver private structure
+ * Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 chan;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ chan = priv->plat->rx_queues_cfg[queue].chan;
+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX Queue Priority
+ */
+static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ if (!priv->plat->rx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->rx_queues_cfg[queue].prio;
+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the TX Queue Priority
+ */
+static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ if (!priv->plat->tx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->tx_queues_cfg[queue].prio;
+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX queue routing
+ */
+static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u8 packet;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ /* no specific packet type routing specified for the queue */
+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
+ continue;
+
+ packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
+ }
+}
+
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
+/**
+ * stmmac_mtl_configuration - Configure MTL
+ * @priv: driver private structure
+ * Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+ if (tx_queues_count > 1)
+ stmmac_set_tx_queue_weight(priv);
+
+ /* Configure MTL RX algorithms */
+ if (rx_queues_count > 1)
+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
+ priv->plat->rx_sched_algorithm);
+
+ /* Configure MTL TX algorithms */
+ if (tx_queues_count > 1)
+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
+ priv->plat->tx_sched_algorithm);
+
+ /* Configure CBS in AVB TX queues */
+ if (tx_queues_count > 1)
+ stmmac_configure_cbs(priv);
+
+ /* Map RX MTL to DMA channels */
+ stmmac_rx_queue_dma_chan_map(priv);
+
+ /* Enable MAC RX Queues */
+ stmmac_mac_enable_rx_queues(priv);
+
+ /* Set RX priorities */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rx_queues_prio(priv);
+
+ /* Set TX priorities */
+ if (tx_queues_count > 1)
+ stmmac_mac_config_tx_queues_prio(priv);
+
+ /* Set RX routing */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
+}
+
+static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
+{
+ if (priv->dma_cap.asp) {
+ netdev_info(priv->dev, "Enabling Safety Features\n");
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
+ priv->plat->safety_feat_cfg);
+ } else {
+ netdev_info(priv->dev, "No Safety Features support found\n");
+ }
+}
+
+static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
+{
+ char *name;
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ name = priv->wq_name;
+ sprintf(name, "%s-fpe", priv->dev->name);
+
+ priv->fpe_wq = create_singlethread_workqueue(name);
+ if (!priv->fpe_wq) {
+ netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
+
+ return -ENOMEM;
+ }
+ netdev_info(priv->dev, "FPE workqueue start");
+
+ return 0;
+}
+
+/**
+ * stmmac_hw_setup - setup mac in a usable state.
+ * @dev : pointer to the device structure.
+ * @init_ptp: initialize PTP if set
+ * Description:
+ * this is the main function to setup the HW in a usable state because the
+ * dma engine is reset, the core registers are configured (e.g. AXI,
+ * Checksum features, timers). The DMA is ready to start receiving and
+ * transmitting.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Copy the MAC addr into the HW */
+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+
+ /* PS and related bits will be programmed according to the speed */
+ if (priv->hw->pcs) {
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if ((speed == SPEED_10) || (speed == SPEED_100) ||
+ (speed == SPEED_1000)) {
+ priv->hw->ps = speed;
+ } else {
+ dev_warn(priv->device, "invalid port speed\n");
+ priv->hw->ps = 0;
+ }
+ }
+
+ /* Initialize the MAC Core */
+ stmmac_core_init(priv, priv->hw, dev);
+
+ /* Initialize MTL*/
+ stmmac_mtl_configuration(priv);
+
+ /* Initialize Safety Features */
+ stmmac_safety_feat_configuration(priv);
+
+ ret = stmmac_rx_ipc(priv, priv->hw);
+ if (!ret) {
+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ priv->hw->rx_csum = 0;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ stmmac_mmc_setup(priv);
+
+ if (init_ptp) {
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ }
+
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
+
+ /* Convert the timer from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
+ if (priv->use_riwt) {
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ if (!priv->rx_riwt[queue])
+ priv->rx_riwt[queue] = DEF_DMA_RIWT;
+
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ priv->rx_riwt[queue], queue);
+ }
+ }
+
+ if (priv->hw->pcs)
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+
+ /* set TX and RX rings length */
+ stmmac_set_rings_length(priv);
+
+ /* Enable TSO */
+ if (priv->tso) {
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+
+ /* TSO and TBS cannot co-exist */
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ continue;
+
+ stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
+ }
+ }
+
+ /* Enable Split Header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
+ /* TBS */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
+
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ }
+
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
+
+ /* Start the ball rolling... */
+ stmmac_start_all_dma(priv);
+
+ if (priv->dma_cap.fpesel) {
+ stmmac_fpe_start_wq(priv);
+
+ if (priv->plat->fpe_cfg->enable)
+ stmmac_fpe_handshake(priv, true);
+ }
+
+ return 0;
+}
+
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
+static void stmmac_free_irq(struct net_device *dev,
+ enum request_irq_err irq_err, int irq_idx)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int j;
+
+ switch (irq_err) {
+ case REQ_IRQ_ERR_ALL:
+ irq_idx = priv->plat->tx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_TX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->tx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->tx_irq[j], NULL);
+ free_irq(priv->tx_irq[j], &priv->tx_queue[j]);
+ }
+ }
+ irq_idx = priv->plat->rx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_RX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->rx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->rx_irq[j], NULL);
+ free_irq(priv->rx_irq[j], &priv->rx_queue[j]);
+ }
+ }
+
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
+ free_irq(priv->sfty_ue_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_UE:
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
+ free_irq(priv->sfty_ce_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_CE:
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
+ free_irq(priv->lpi_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_LPI:
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_WOL:
+ free_irq(dev->irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_MAC:
+ case REQ_IRQ_ERR_NO:
+ /* If MAC IRQ request error, no more IRQ to free */
+ break;
+ }
+}
+
+static int stmmac_request_irq_multi_msi(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
+ cpumask_t cpu_mask;
+ int irq_idx = 0;
+ char *int_name;
+ int ret;
+ int i;
+
+ /* For common interrupt */
+ int_name = priv->int_name_mac;
+ sprintf(int_name, "%s:%s", dev->name, "mac");
+ ret = request_irq(dev->irq, stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc mac MSI %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ int_name = priv->int_name_wol;
+ sprintf(int_name, "%s:%s", dev->name, "wol");
+ ret = request_irq(priv->wol_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc wol MSI %d (error: %d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the LPI IRQ in case of another line
+ * is used for LPI
+ */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ int_name = priv->int_name_lpi;
+ sprintf(int_name, "%s:%s", dev->name, "lpi");
+ ret = request_irq(priv->lpi_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc lpi MSI %d (error: %d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Correctible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ce;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ce");
+ ret = request_irq(priv->sfty_ce_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ce MSI %d (error: %d)\n",
+ __func__, priv->sfty_ce_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_CE;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Uncorrectible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ue;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ue");
+ ret = request_irq(priv->sfty_ue_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ue MSI %d (error: %d)\n",
+ __func__, priv->sfty_ue_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_UE;
+ goto irq_error;
+ }
+ }
+
+ /* Request Rx MSI irq */
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (i >= MTL_MAX_RX_QUEUES)
+ break;
+ if (priv->rx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_rx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
+ ret = request_irq(priv->rx_irq[i],
+ stmmac_msi_intr_rx,
+ 0, int_name, &priv->rx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc rx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->rx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_RX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ }
+
+ /* Request Tx MSI irq */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (i >= MTL_MAX_TX_QUEUES)
+ break;
+ if (priv->tx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_tx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
+ ret = request_irq(priv->tx_irq[i],
+ stmmac_msi_intr_tx,
+ 0, int_name, &priv->tx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc tx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->tx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_TX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, irq_idx);
+ return ret;
+}
+
+static int stmmac_request_irq_single(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
+ int ret;
+
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ ret = request_irq(priv->wol_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the IRQ lines */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, 0);
+ return ret;
+}
+
+static int stmmac_request_irq(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ /* Request the IRQ lines */
+ if (priv->plat->multi_msi_en)
+ ret = stmmac_request_irq_multi_msi(dev);
+ else
+ ret = stmmac_request_irq_single(dev);
+
+ return ret;
+}
+
+/**
+ * stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
+ int bfsize = 0;
+ u32 chan;
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
+ (!priv->hw->xpcs ||
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
+ ret = stmmac_init_phy(dev);
+ if (ret) {
+ netdev_err(priv->dev,
+ "%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto init_phy_error;
+ }
+ }
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
+
+ priv->dma_buf_sz = bfsize;
+ buf_sz = bfsize;
+
+ priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+
+ if (!priv->dma_tx_size)
+ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!priv->dma_rx_size)
+ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ ret = stmmac_hw_setup(dev, true);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ goto init_error;
+ }
+
+ stmmac_init_coalesce(priv);
+
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ stmmac_enable_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
+
+ return 0;
+
+irq_error:
+ phylink_stop(priv->phylink);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ phylink_disconnect_phy(priv->phylink);
+init_phy_error:
+ pm_runtime_put(priv->device);
+ return ret;
+}
+
+static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+{
+ set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ if (priv->fpe_wq)
+ destroy_workqueue(priv->fpe_wq);
+
+ netdev_info(priv->dev, "FPE workqueue stop");
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ netif_tx_disable(dev);
+
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ /* Stop and disconnect the PHY */
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ netif_carrier_off(dev);
+
+ stmmac_release_ptp(priv);
+
+ pm_runtime_put(priv->device);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_stop_wq(priv);
+
+ return 0;
+}
+
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ p = &tx_q->dma_tx[tx_q->cur_tx];
+
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+ return true;
+}
+
+/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segment: condition for the last descriptor
+ * @queue: TX queue index
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
+ int total_len, bool last_segment, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct dma_desc *desc;
+ u32 buff_size;
+ int tmp_len;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ dma_addr_t curr_addr;
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ curr_addr = des + (total_len - tmp_len);
+ if (priv->dma_cap.addr64 <= 32)
+ desc->des0 = cpu_to_le32(curr_addr);
+ else
+ stmmac_set_desc_addr(priv, desc, curr_addr);
+
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ int desc_size;
+
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ wmb();
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ u32 queue = skb_get_queue_mapping(skb);
+ unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
+ u32 pay_len, mss;
+ dma_addr_t des;
+ int i;
+
+ tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
+
+ /* Compute header lengths */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ hdr = sizeof(struct udphdr);
+ } else {
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ hdr = tcp_hdrlen(skb);
+ }
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv, queue) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != tx_q->mss) {
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ stmmac_set_mss(priv, mss_desc, mss);
+ tx_q->mss = mss;
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, hdr, proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
+ first_entry = tx_q->cur_tx;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[first_entry].basic;
+ else
+ desc = &tx_q->dma_tx[first_entry];
+ first = desc;
+
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ if (priv->dma_cap.addr64 <= 32) {
+ first->des0 = cpu_to_le32(des);
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+ des += proto_hdr_len;
+ pay_len = 0;
+ }
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1), queue);
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+ }
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
+
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* Manage tx mitigation */
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames[queue])
+ set_ic = true;
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
+
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ stmmac_prepare_tso_tx_desc(priv, first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc) {
+ /* Make sure that first descriptor has been completely
+ * written, including its own bit. This is because MSS is
+ * actually before first descriptor, so we need to make
+ * sure that MSS's own bit is the last thing written.
+ */
+ dma_wmb();
+ stmmac_set_tx_owner(priv, mss_desc);
+ }
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ tx_q->cur_tx, first, nfrags);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
+ * stmmac_xmit - Tx entry point of the driver
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int first_entry, tx_packets, enh_desc;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
+ int i, csum_insertion = 0, is_jumbo = 0;
+ u32 queue = skb_get_queue_mapping(skb);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int gso = skb_shinfo(skb)->gso_type;
+ struct dma_edesc *tbs_desc = NULL;
+ struct dma_desc *desc, *first;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
+ dma_addr_t des;
+
+ tx_q = &priv->tx_queue[queue];
+ first_tx = tx_q->cur_tx;
+
+ if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
+ stmmac_disable_eee_mode(priv);
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+ if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ return stmmac_tso_xmit(skb, dev);
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
+ entry = tx_q->cur_tx;
+ first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = tx_q->dma_tx + entry;
+
+ first = desc;
+
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
+ enh_desc = priv->plat->enh_desc;
+ /* To program the descriptors according to the size of the frame */
+ if (enh_desc)
+ is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
+
+ if (unlikely(is_jumbo)) {
+ entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
+ if (unlikely(entry < 0) && (entry != -EINVAL))
+ goto dma_map_err;
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+ bool last_segment = (i == (nfrags - 1));
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[entry]);
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = tx_q->dma_tx + entry;
+
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err; /* should reuse desc w/o issues */
+
+ tx_q->tx_skbuff_dma[entry].buf = des;
+
+ stmmac_set_desc_addr(priv, desc, des);
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = true;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* Prepare the descriptor and set the own bit too */
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
+ priv->mode, 1, last_segment, skb->len);
+ }
+
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[entry] = skb;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ tx_packets = (entry + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames[queue])
+ set_ic = true;
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (likely(priv->extend_desc))
+ desc = &tx_q->dma_etx[entry].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = &tx_q->dma_tx[entry];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ if (netif_msg_pktdata(priv)) {
+ netdev_dbg(priv->dev,
+ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ entry, first, nfrags);
+
+ netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
+ skb_tx_timestamp(skb);
+
+ /* Ready to fill the first descriptor and set the OWN bit w/o any
+ * problems because all the descriptors are actually ready to be
+ * passed to the DMA engine.
+ */
+ if (likely(!is_jumbo)) {
+ bool last_segment = (nfrags == 0);
+
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+
+ stmmac_set_desc_addr(priv, first, des);
+
+ tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first);
+ }
+
+ /* Prepare the first descriptor setting the OWN bit too */
+ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
+ csum_insertion, priv->mode, 0, last_segment,
+ skb->len);
+ }
+
+ if (tx_q->tbs & STMMAC_TBS_EN) {
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
+
+ tbs_desc = &tx_q->dma_entx[first_entry];
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
+ }
+
+ stmmac_set_tx_owner(priv, first);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ netdev_err(priv->dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vlan_ethhdr *veth;
+ __be16 vlan_proto;
+ u16 vlanid;
+
+ veth = (struct vlan_ethhdr *)skb->data;
+ vlan_proto = veth->h_vlan_proto;
+
+ if ((vlan_proto == htons(ETH_P_8021Q) &&
+ dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
+ (vlan_proto == htons(ETH_P_8021AD) &&
+ dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ /* pop the vlan tag */
+ vlanid = ntohs(veth->h_vlan_TCI);
+ memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
+ }
+}
+
+/**
+ * stmmac_rx_refill - refill used skb preallocated buffers
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.addr64 <= 32)
+ gfp |= GFP_DMA32;
+
+ while (dirty-- > 0) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ struct dma_desc *p;
+ bool use_rx_wd;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ if (!buf->page) {
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->page)
+ break;
+ }
+
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ }
+
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ if (priv->sph)
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ else
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ stmmac_refill_desc3(priv, rx_q, p);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ }
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+}
+
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ unsigned int plen = 0, hlen = 0;
+ int coe = priv->hw->rx_csum;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
+static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc;
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
+ return STMMAC_XDP_CONSUMED;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ if (dma_map) {
+ dma_addr = dma_map_single(priv->device, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr))
+ return STMMAC_XDP_CONSUMED;
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
+ xdpf->headroom;
+ dma_sync_single_for_device(priv->device, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
+ }
+
+ tx_q->tx_skbuff_dma[entry].buf = dma_addr;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdpf->len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ tx_q->xdpf[entry] = xdpf;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
+ true, priv->mode, true, true,
+ xdpf->len);
+
+ tx_q->tx_count_frames++;
+
+ if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ return STMMAC_XDP_TX;
+}
+
+static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
+ int cpu)
+{
+ int index = cpu;
+
+ if (unlikely(index < 0))
+ index = 0;
+
+ while (index >= priv->plat->tx_queues_to_use)
+ index -= priv->plat->tx_queues_to_use;
+
+ return index;
+}
+
+static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int queue;
+ int res;
+
+ if (unlikely(!xdpf))
+ return STMMAC_XDP_CONSUMED;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
+ if (res == STMMAC_XDP_TX)
+ stmmac_flush_tx_descriptors(priv, queue);
+
+ __netif_tx_unlock(nq);
+
+ return res;
+}
+
+static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ u32 act;
+ int res;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ res = STMMAC_XDP_PASS;
+ break;
+ case XDP_TX:
+ res = stmmac_xdp_xmit_back(priv, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
+ res = STMMAC_XDP_CONSUMED;
+ else
+ res = STMMAC_XDP_REDIRECT;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ res = STMMAC_XDP_CONSUMED;
+ break;
+ }
+
+ return res;
+}
+
+static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ res = STMMAC_XDP_PASS;
+ goto out;
+ }
+
+ res = __stmmac_xdp_run_prog(priv, prog, xdp);
+out:
+ return ERR_PTR(-res);
+}
+
+static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
+ int xdp_status)
+{
+ int cpu = smp_processor_id();
+ int queue;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+
+ if (xdp_status & STMMAC_XDP_TX)
+ stmmac_tx_timer_arm(priv, queue);
+
+ if (xdp_status & STMMAC_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ return skb;
+}
+
+static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ struct dma_desc *p, struct dma_desc *np,
+ struct xdp_buff *xdp)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int len = xdp->data_end - xdp->data;
+ enum pkt_hash_types hash_type;
+ int coe = priv->hw->rx_csum;
+ struct sk_buff *skb;
+ u32 hash;
+
+ skb = stmmac_construct_skb_zc(ch, xdp);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ return;
+ }
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rxtx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+}
+
+static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int entry = rx_q->dirty_rx;
+ struct dma_desc *rx_desc = NULL;
+ bool ret = true;
+
+ budget = min(budget, stmmac_rx_dirty(priv, queue));
+
+ while (budget-- > 0 && entry != rx_q->cur_rx) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ dma_addr_t dma_addr;
+ bool use_rx_wd;
+
+ if (!buf->xdp) {
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp) {
+ ret = false;
+ break;
+ }
+ }
+
+ if (priv->extend_desc)
+ rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ rx_desc = rx_q->dma_rx + entry;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, rx_desc, dma_addr);
+ stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
+ stmmac_refill_desc3(priv, rx_q, rx_desc);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
+ }
+
+ if (rx_desc) {
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ }
+
+ return ret;
+}
+
+static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int next_entry = rx_q->cur_rx;
+ unsigned int desc_size;
+ struct bpf_prog *prog;
+ bool failure = false;
+ int xdp_status = 0;
+ int status = 0;
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ struct stmmac_rx_buffer *buf;
+ unsigned int buf1_len = 0;
+ struct dma_desc *np, *p;
+ int entry;
+ int res;
+
+ if (!count && rx_q->state_saved) {
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ error = 0;
+ len = 0;
+ }
+
+ if (count >= limit)
+ break;
+
+read_again:
+ buf1_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (dirty >= STMMAC_RX_FILL_BATCH) {
+ failure = failure ||
+ !stmmac_rx_refill_zc(priv, queue, dirty);
+ dirty = 0;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ /* Prefetch the next RX descriptor */
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ /* Ensure a valid XSK buffer before proceed */
+ if (!buf->xdp)
+ break;
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats,
+ rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ count++;
+ continue;
+ }
+
+ /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
+ if (likely(status & rx_not_ls)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ goto read_again;
+ }
+
+ /* XDP ZC Frame only support primary buffers for now */
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (likely(!(status & rx_not_ls)) &&
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ /* RX buffer is good and fit into a XSK pool buffer */
+ buf->xdp->data_end = buf->xdp->data + buf1_len;
+ xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+
+ prog = READ_ONCE(priv->xdp_prog);
+ res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
+
+ switch (res) {
+ case STMMAC_XDP_PASS:
+ stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
+ xsk_buff_free(buf->xdp);
+ break;
+ case STMMAC_XDP_CONSUMED:
+ xsk_buff_free(buf->xdp);
+ priv->dev->stats.rx_dropped++;
+ break;
+ case STMMAC_XDP_TX:
+ case STMMAC_XDP_REDIRECT:
+ xdp_status |= res;
+ break;
+ }
+
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
+ if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
+ if (failure || stmmac_rx_dirty(priv, queue) > 0)
+ xsk_set_rx_need_wakeup(rx_q->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
+
+ return (int)count;
+ }
+
+ return failure ? limit : (int)count;
+}
+
+/**
+ * stmmac_rx - manage the receive process
+ * @priv: driver private structure
+ * @limit: napi bugget
+ * @queue: RX queue index.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
+ unsigned int next_entry = rx_q->cur_rx;
+ enum dma_data_direction dma_dir;
+ unsigned int desc_size;
+ struct sk_buff *skb = NULL;
+ struct xdp_buff xdp;
+ int xdp_status = 0;
+ int buf_sz;
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ unsigned int buf1_len = 0, buf2_len = 0;
+ enum pkt_hash_types hash_type;
+ struct stmmac_rx_buffer *buf;
+ struct dma_desc *np, *p;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+ if (count >= limit)
+ break;
+
+read_again:
+ buf1_len = 0;
+ buf2_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats, rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ count++;
+ continue;
+ }
+
+ /* Buffer is good. Go on. */
+
+ prefetch(page_address(buf->page) + buf->page_offset);
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
+ * Type frames (LLC/LLC-SNAP)
+ *
+ * llc_snap is never checked in GMAC >= 4, so this ACS
+ * feature is always disabled and packets need to be
+ * stripped manually.
+ */
+ if (likely(!(status & rx_not_ls)) &&
+ (likely(priv->synopsys_id >= DWMAC_CORE_4_00) ||
+ unlikely(status != llc_snap))) {
+ if (buf2_len) {
+ buf2_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ } else if (buf1_len) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+ }
+
+ if (!skb) {
+ unsigned int pre_len, sync_len;
+
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+
+ xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, false);
+
+ pre_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ skb = stmmac_xdp_run_prog(priv, &xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device
+ * cover max len CPU touch
+ */
+ sync_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ sync_len = max(sync_len, pre_len);
+
+ /* For Not XDP_PASS verdict */
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & STMMAC_XDP_CONSUMED) {
+ page_pool_put_page(rx_q->page_pool,
+ virt_to_head_page(xdp.data),
+ sync_len, true);
+ buf->page = NULL;
+ priv->dev->stats.rx_dropped++;
+
+ /* Clear skb as it was set as
+ * status by XDP program.
+ */
+ skb = NULL;
+
+ if (unlikely((status & rx_not_ls)))
+ goto read_again;
+
+ count++;
+ continue;
+ } else if (xdp_res & (STMMAC_XDP_TX |
+ STMMAC_XDP_REDIRECT)) {
+ xdp_status |= xdp_res;
+ buf->page = NULL;
+ skb = NULL;
+ count++;
+ continue;
+ }
+ }
+ }
+
+ if (!skb) {
+ /* XDP program may expand or reduce tail */
+ buf1_len = xdp.data_end - xdp.data;
+
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ count++;
+ goto drain_data;
+ }
+
+ /* XDP program may adjust header */
+ skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+ skb_put(skb, buf1_len);
+
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else if (buf1_len) {
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, buf->page_offset, buf1_len,
+ priv->dma_buf_sz);
+
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
+
+ if (buf2_len) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ buf2_len, dma_dir);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, buf2_len,
+ priv->dma_buf_sz);
+
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
+ }
+
+drain_data:
+ if (likely(status & rx_not_ls))
+ goto read_again;
+ if (!skb)
+ continue;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ count++;
+ }
+
+ if (status & rx_not_ls || skb) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ stmmac_rx_refill(priv, queue);
+
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
+ return count;
+}
+
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ u32 chan = ch->index;
+ int work_done;
+
+ priv->xstats.napi_poll++;
+
+ work_done = stmmac_rx(priv, budget, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, tx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ u32 chan = ch->index;
+ int work_done;
+
+ priv->xstats.napi_poll++;
+
+ work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = min(work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rxtx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int rx_done, tx_done, rxtx_done;
+ u32 chan = ch->index;
+
+ priv->xstats.napi_poll++;
+
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = min(tx_done, budget);
+
+ rx_done = stmmac_rx_zc(priv, budget, chan);
+
+ rxtx_done = max(tx_done, rx_done);
+
+ /* If either TX or RX work is not complete, return budget
+ * and keep pooling
+ */
+ if (rxtx_done >= budget)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (napi_complete_done(napi, rxtx_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ /* Both RX and TX work done are compelte,
+ * so enable both RX & TX IRQs.
+ */
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return min(rxtx_done, budget - 1);
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * @txqueue: the index of the hanging transmit queue
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable time. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ stmmac_global_err(priv);
+}
+
+/**
+ * stmmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_set_rx_mode(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ stmmac_set_filter(priv, priv->hw, dev);
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int txfifosz = priv->plat->tx_fifo_size;
+ const int mtu = new_mtu;
+
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ txfifosz /= priv->plat->tx_queues_to_use;
+
+ if (netif_running(dev)) {
+ netdev_err(priv->dev, "must be stopped to change its MTU\n");
+ return -EBUSY;
+ }
+
+ if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
+ netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
+ return -EINVAL;
+ }
+
+ new_mtu = STMMAC_ALIGN(new_mtu);
+
+ /* If condition true, FIFO is too small or MTU too large */
+ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+ return -EINVAL;
+
+ dev->mtu = mtu;
+
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
+ features &= ~NETIF_F_RXCSUM;
+
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_CSUM_MASK;
+
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertion in the TDES and not use SF.
+ */
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_CSUM_MASK;
+
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
+ return features;
+}
+
+static int stmmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ /* Keep the COE Type in case of csum is supporting */
+ if (features & NETIF_F_RXCSUM)
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ else
+ priv->hw->rx_csum = 0;
+ /* No check needed because rx_coe has been set before and it will be
+ * fixed in case of issue.
+ */
+ stmmac_rx_ipc(priv, priv->hw);
+
+ if (priv->sph_cap) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ u32 chan;
+
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ return 0;
+}
+
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
+ return;
+
+ /* If LP has sent verify mPacket, LP is FPE capable */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
+ if (*lp_state < FPE_STATE_CAPABLE)
+ *lp_state = FPE_STATE_CAPABLE;
+
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_RESPONSE);
+ }
+
+ /* If Local has sent verify mPacket, Local is FPE capable */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
+ if (*lo_state < FPE_STATE_CAPABLE)
+ *lo_state = FPE_STATE_CAPABLE;
+ }
+
+ /* If LP has sent response mPacket, LP is entering FPE ON */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ *lp_state = FPE_STATE_ENTERING_ON;
+
+ /* If Local has sent response mPacket, Local is entering FPE ON */
+ if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
+ *lo_state = FPE_STATE_ENTERING_ON;
+
+ if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
+ !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
+ priv->fpe_wq) {
+ queue_work(priv->fpe_wq, &priv->fpe_task);
+ }
+}
+
+static void stmmac_common_interrupt(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queues_count;
+ u32 queue;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
+
+ if (priv->irq_wake)
+ pm_wakeup_event(priv->device, 0);
+
+ if (priv->dma_cap.estsel)
+ stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ &priv->xstats, tx_cnt);
+
+ if (priv->dma_cap.fpesel) {
+ int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
+ priv->dev);
+
+ stmmac_fpe_event_status(priv, status);
+ }
+
+ /* To handle GMAC own interrupts */
+ if ((priv->plat->has_gmac) || xmac) {
+ int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+
+ if (unlikely(status)) {
+ /* For LPI we need to save the tx status */
+ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+ priv->tx_path_in_lpi_mode = true;
+ if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+ priv->tx_path_in_lpi_mode = false;
+ }
+
+ for (queue = 0; queue < queues_count; queue++) {
+ status = stmmac_host_mtl_irq_status(priv, priv->hw,
+ queue);
+ }
+
+ /* PCS link status */
+ if (priv->hw->pcs) {
+ if (priv->xstats.pcs_link)
+ netif_carrier_on(priv->dev);
+ else
+ netif_carrier_off(priv->dev);
+ }
+
+ stmmac_timestamp_interrupt(priv, priv);
+ }
+}
+
+/**
+ * stmmac_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It can call:
+ * o DMA service routine (to manage incoming frame reception and transmission
+ * status)
+ * o Core interrupts to manage: remote wake-up, management counter, LPI
+ * interrupts.
+ */
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ if (stmmac_safety_feat_interrupt(priv))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ /* To handle DMA interrupts */
+ stmmac_dma_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ stmmac_safety_feat_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ int chan = tx_q->queue_index;
+ struct stmmac_priv *priv;
+ int status;
+
+ priv = container_of(tx_q, struct stmmac_priv, tx_queue[chan]);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
+
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ stmmac_bump_dma_threshold(priv, chan);
+ } else if (unlikely(status == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+{
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ int chan = rx_q->queue_index;
+ struct stmmac_priv *priv;
+
+ priv = container_of(rx_q, struct stmmac_priv, rx_queue[chan]);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ stmmac_napi_check(priv, chan, DMA_DIR_RX);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* If adapter is down, do nothing */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ if (priv->plat->multi_msi_en) {
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++)
+ stmmac_msi_intr_rx(0, &priv->rx_queue[i]);
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ stmmac_msi_intr_tx(0, &priv->tx_queue[i]);
+ } else {
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+ }
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv (dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = stmmac_hwtstamp_set(dev, rq);
+ break;
+ case SIOCGHWTSTAMP:
+ ret = stmmac_hwtstamp_get(dev, rq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct stmmac_priv *priv = cb_priv;
+ int ret = -EOPNOTSUPP;
+
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ return ret;
+
+ __stmmac_disable_all_queues(priv);
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+ break;
+ case TC_SETUP_CLSFLOWER:
+ ret = stmmac_tc_setup_cls(priv, priv, type_data);
+ break;
+ default:
+ break;
+ }
+
+ stmmac_enable_all_queues(priv);
+ return ret;
+}
+
+static LIST_HEAD(stmmac_block_cb_list);
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &stmmac_block_cb_list,
+ stmmac_setup_tc_block_cb,
+ priv, priv, true);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ int gso = skb_shinfo(skb)->gso_type;
+
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
+ /*
+ * There is no way to determine the number of TSO/USO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO/USO is supported then at least this
+ * one will be capable.
+ */
+ return 0;
+ }
+
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ goto set_mac_error;
+
+ stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+
+set_mac_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+
+static void sysfs_display_ring(void *head, int size, int extend_desc,
+ struct seq_file *seq, dma_addr_t dma_phy_addr)
+{
+ int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < size; i++) {
+ if (extend_desc) {
+ dma_addr = dma_phy_addr + i * sizeof(*ep);
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(ep->basic.des0),
+ le32_to_cpu(ep->basic.des1),
+ le32_to_cpu(ep->basic.des2),
+ le32_to_cpu(ep->basic.des3));
+ ep++;
+ } else {
+ dma_addr = dma_phy_addr + i * sizeof(*p);
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ p++;
+ }
+ seq_printf(seq, "\n");
+ }
+}
+
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ if ((dev->flags & IFF_UP) == 0)
+ return 0;
+
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ seq_printf(seq, "RX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_erx,
+ priv->dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_rx,
+ priv->dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ }
+ }
+
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ seq_printf(seq, "TX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_etx,
+ priv->dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_tx,
+ priv->dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
+
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->hw_cap_support) {
+ seq_printf(seq, "DMA HW features not supported\n");
+ return 0;
+ }
+
+ seq_printf(seq, "==============================\n");
+ seq_printf(seq, "\tDMA HW features\n");
+ seq_printf(seq, "==============================\n");
+
+ seq_printf(seq, "\t10/100 Mbps: %s\n",
+ (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+ seq_printf(seq, "\t1000 Mbps: %s\n",
+ (priv->dma_cap.mbps_1000) ? "Y" : "N");
+ seq_printf(seq, "\tHalf duplex: %s\n",
+ (priv->dma_cap.half_duplex) ? "Y" : "N");
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
+ (priv->dma_cap.pcs) ? "Y" : "N");
+ seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+ (priv->dma_cap.sma_mdio) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Remote wake up: %s\n",
+ (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Magic Frame: %s\n",
+ (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+ seq_printf(seq, "\tRMON module: %s\n",
+ (priv->dma_cap.rmon) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+ (priv->dma_cap.time_stamp) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
+ (priv->dma_cap.atime_stamp) ? "Y" : "N");
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
+ (priv->dma_cap.eee) ? "Y" : "N");
+ seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+ seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+ (priv->dma_cap.tx_coe) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+ priv->dma_cap.number_rx_channel);
+ seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+ priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
+ priv->dma_cap.number_rx_queues);
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
+ priv->dma_cap.number_tx_queues);
+ seq_printf(seq, "\tEnhanced descriptors: %s\n",
+ (priv->dma_cap.enh_desc) ? "Y" : "N");
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
+ priv->dma_cap.pps_out_num);
+ seq_printf(seq, "\tSafety Features: %s\n",
+ priv->dma_cap.asp ? "Y" : "N");
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ priv->dma_cap.frpsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
+ priv->dma_cap.addr64);
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ priv->dma_cap.rssen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+ priv->dma_cap.vlhash ? "Y" : "N");
+ seq_printf(seq, "\tSplit Header: %s\n",
+ priv->dma_cap.sphen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
+ priv->dma_cap.vlins ? "Y" : "N");
+ seq_printf(seq, "\tDouble VLAN: %s\n",
+ priv->dma_cap.dvlan ? "Y" : "N");
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
+ priv->dma_cap.l3l4fnum);
+ seq_printf(seq, "\tARP Offloading: %s\n",
+ priv->dma_cap.arpoffsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
+ priv->dma_cap.estsel ? "Y" : "N");
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
+ priv->dma_cap.fpesel ? "Y" : "N");
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
+ priv->dma_cap.tbssel ? "Y" : "N");
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
+
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (dev->netdev_ops != &stmmac_netdev_ops)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (priv->dbgfs_dir)
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+ priv->dbgfs_dir,
+ stmmac_fs_dir,
+ dev->name);
+ break;
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+ .notifier_call = stmmac_device_event,
+};
+
+static void stmmac_init_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ rtnl_lock();
+
+ /* Create per netdev entries */
+ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
+
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
+
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+
+ rtnl_unlock();
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
+ }
+
+ return crc;
+}
+
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ __le16 pmatch = 0;
+ int count = 0;
+ u16 vid = 0;
+
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
+ count++;
+ }
+
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ pmatch = cpu_to_le16(vid);
+ hash = 0;
+ }
+
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
+
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ return ret;
+ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
+
+ ret = pm_runtime_get_sync(priv->device);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->device);
+ return ret;
+ }
+
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ goto del_vlan_error;
+ }
+
+ ret = stmmac_vlan_update(priv, is_double);
+
+del_vlan_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int i, nxmit = 0;
+ int queue;
+
+ if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ for (i = 0; i < num_frames; i++) {
+ int res;
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
+ if (res == STMMAC_XDP_CONSUMED)
+ break;
+
+ nxmit++;
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_rx_dma(priv, queue);
+ __free_dma_rx_desc_resources(priv, queue);
+}
+
+void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ u32 buf_size;
+ int ret;
+
+ ret = __alloc_dma_rx_desc_resources(priv, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc RX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_rx_desc_rings(priv, queue, GFP_KERNEL);
+ if (ret) {
+ __free_dma_rx_desc_resources(priv, queue);
+ netdev_err(priv->dev, "Failed to init RX desc.\n");
+ return;
+ }
+
+ stmmac_clear_rx_descriptors(priv, queue);
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, rx_q->queue_index);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, rx_q->queue_index);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_start_rx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_tx_dma(priv, queue);
+ __free_dma_tx_desc_resources(priv, queue);
+}
+
+void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ int ret;
+
+ ret = __alloc_dma_tx_desc_resources(priv, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc TX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_tx_desc_rings(priv, queue);
+ if (ret) {
+ __free_dma_tx_desc_resources(priv, queue);
+ netdev_err(priv->dev, "Failed to init TX desc.\n");
+ return;
+ }
+
+ stmmac_clear_tx_descriptors(priv, queue);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, tx_q->queue_index);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, tx_q->queue_index);
+
+ stmmac_start_tx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++)
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv);
+dma_desc_error:
+ return ret;
+}
+
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ struct stmmac_channel *ch;
+
+ if (test_bit(STMMAC_DOWN, &priv->state) ||
+ !netif_carrier_ok(priv->dev))
+ return -ENETDOWN;
+
+ if (!stmmac_xdp_is_enabled(priv))
+ return -ENXIO;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ rx_q = &priv->rx_queue[queue];
+ tx_q = &priv->tx_queue[queue];
+ ch = &priv->channel[queue];
+
+ if (!rx_q->xsk_pool && !tx_q->xsk_pool)
+ return -ENXIO;
+
+ if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
+ /* EQoS does not have per-DMA channel SW interrupt,
+ * so we schedule RX Napi straight-away.
+ */
+ if (likely(napi_schedule_prep(&ch->rxtx_napi)))
+ __napi_schedule(&ch->rxtx_napi);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
+ .ndo_set_features = stmmac_set_features,
+ .ndo_set_rx_mode = stmmac_set_rx_mode,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_eth_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
+ .ndo_select_queue = stmmac_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
+ .ndo_bpf = stmmac_bpf,
+ .ndo_xdp_xmit = stmmac_xdp_xmit,
+ .ndo_xsk_wakeup = stmmac_xsk_wakeup,
+};
+
+static void stmmac_reset_subtask(struct stmmac_priv *priv)
+{
+ if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
+ return;
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ netdev_err(priv->dev, "Reset adapter.\n");
+
+ rtnl_lock();
+ netif_trans_update(priv->dev);
+ while (test_and_set_bit(STMMAC_RESETING, &priv->state))
+ usleep_range(1000, 2000);
+
+ set_bit(STMMAC_DOWN, &priv->state);
+ dev_close(priv->dev);
+ dev_open(priv->dev, NULL);
+ clear_bit(STMMAC_DOWN, &priv->state);
+ clear_bit(STMMAC_RESETING, &priv->state);
+ rtnl_unlock();
+}
+
+static void stmmac_service_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ service_task);
+
+ stmmac_reset_subtask(priv);
+ clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
+}
+
+/**
+ * stmmac_hw_init - Init the MAC device
+ * @priv: driver private structure
+ * Description: this function is to configure the MAC device according to
+ * some platform parameters or the HW capability register. It prepares the
+ * driver to use either ring or chain modes and to setup either enhanced or
+ * normal descriptors.
+ */
+static int stmmac_hw_init(struct stmmac_priv *priv)
+{
+ int ret;
+
+ /* dwmac-sun8i only work in chain mode */
+ if (priv->plat->has_sun8i)
+ chain_mode = 1;
+ priv->chain_mode = chain_mode;
+
+ /* Initialize HW Interface */
+ ret = stmmac_hwif_init(priv);
+ if (ret)
+ return ret;
+
+ /* Get the HW capability (new GMAC newer than 3.50a) */
+ priv->hw_cap_support = stmmac_get_hw_features(priv);
+ if (priv->hw_cap_support) {
+ dev_info(priv->device, "DMA HW capability register supported\n");
+
+ /* We can override some gmac/dma configuration fields: e.g.
+ * enh_desc, tx_coe (e.g. that are passed through the
+ * platform) with the values from the HW capability
+ * register (if supported).
+ */
+ priv->plat->enh_desc = priv->dma_cap.enh_desc;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
+ !priv->plat->use_phy_wol;
+ priv->hw->pmt = priv->plat->pmt;
+ if (priv->dma_cap.hash_tb_sz) {
+ priv->hw->multicast_filter_bins =
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
+ priv->hw->mcast_bits_log2 =
+ ilog2(priv->hw->multicast_filter_bins);
+ }
+
+ /* TXCOE doesn't work in thresh DMA mode */
+ if (priv->plat->force_thresh_dma_mode)
+ priv->plat->tx_coe = 0;
+ else
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
+ if (priv->dma_cap.rx_coe_type2)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+ else if (priv->dma_cap.rx_coe_type1)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
+ } else {
+ dev_info(priv->device, "No HW DMA feature register supported\n");
+ }
+
+ if (priv->plat->rx_coe) {
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ dev_info(priv->device, "RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
+ }
+ if (priv->plat->tx_coe)
+ dev_info(priv->device, "TX Checksum insertion supported\n");
+
+ if (priv->plat->pmt) {
+ dev_info(priv->device, "Wake-Up On Lan supported\n");
+ device_set_wakeup_capable(priv->device, 1);
+ }
+
+ if (priv->dma_cap.tsoen)
+ dev_info(priv->device, "TSO supported\n");
+
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+
+ /* Run HW quirks, if any */
+ if (priv->hwif_quirks) {
+ ret = priv->hwif_quirks(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
+ return 0;
+}
+
+static void stmmac_napi_add(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+ spin_lock_init(&ch->lock);
+
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_tx_napi_add(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_add(dev, &ch->rxtx_napi,
+ stmmac_napi_poll_rxtx,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+}
+
+static void stmmac_napi_del(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_del(&ch->rxtx_napi);
+ }
+ }
+}
+
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ stmmac_napi_del(dev);
+
+ priv->plat->rx_queues_to_use = rx_cnt;
+ priv->plat->tx_queues_to_use = tx_cnt;
+
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ priv->dma_rx_size = rx_size;
+ priv->dma_tx_size = tx_size;
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
+static void stmmac_fpe_lp_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ fpe_task);
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+ bool *enable = &fpe_cfg->enable;
+ int retries = 20;
+
+ while (retries-- > 0) {
+ /* Bail out immediately if FPE handshake is OFF */
+ if (*lo_state == FPE_STATE_OFF || !*hs_enable)
+ break;
+
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+
+ netdev_info(priv->dev, "configured FPE\n");
+
+ *lo_state = FPE_STATE_ON;
+ *lp_state = FPE_STATE_ON;
+ netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
+ break;
+ }
+
+ if ((*lo_state == FPE_STATE_CAPABLE ||
+ *lo_state == FPE_STATE_ENTERING_ON) &&
+ *lp_state != FPE_STATE_ON) {
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+ msleep(500);
+ }
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+}
+
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+{
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+ priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
+ }
+
+ priv->plat->fpe_cfg->hs_enable = enable;
+ }
+}
+
+/**
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+ u32 rxq;
+ int i, ret = 0;
+
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ stmmac_set_ethtool_ops(ndev);
+ priv->pause = pause;
+ priv->plat = plat_dat;
+ priv->ioaddr = res->addr;
+ priv->dev->base_addr = (unsigned long)res->addr;
+ priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
+
+ priv->dev->irq = res->irq;
+ priv->wol_irq = res->wol_irq;
+ priv->lpi_irq = res->lpi_irq;
+ priv->sfty_ce_irq = res->sfty_ce_irq;
+ priv->sfty_ue_irq = res->sfty_ue_irq;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ priv->rx_irq[i] = res->rx_irq[i];
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ priv->tx_irq[i] = res->tx_irq[i];
+
+ if (!is_zero_ether_addr(res->mac))
+ eth_hw_addr_set(priv->dev, res->mac);
+
+ dev_set_drvdata(device, priv->dev);
+
+ /* Verify driver arguments */
+ stmmac_verify_args();
+
+ priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
+ if (!priv->af_xdp_zc_qps)
+ return -ENOMEM;
+
+ /* Allocate workqueue */
+ priv->wq = create_singlethread_workqueue("stmmac_wq");
+ if (!priv->wq) {
+ dev_err(priv->device, "failed to create workqueue\n");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&priv->service_task, stmmac_service_task);
+
+ /* Initialize Link Partner FPE workqueue */
+ INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances
+ */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ if (priv->plat->stmmac_rst) {
+ ret = reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_deassert(priv->plat->stmmac_rst);
+ /* Some reset controllers have only reset callback instead of
+ * assert + deassert callbacks pair.
+ */
+ if (ret == -ENOTSUPP)
+ reset_control_reset(priv->plat->stmmac_rst);
+ }
+
+ ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
+ if (ret == -ENOTSUPP)
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
+ /* Init MAC and get the capabilities */
+ ret = stmmac_hw_init(priv);
+ if (ret)
+ goto error_hw_init;
+
+ /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
+ */
+ if (priv->synopsys_id < DWMAC_CORE_5_20)
+ priv->plat->dma_cfg->dche = false;
+
+ stmmac_check_ether_addr(priv);
+
+ ndev->netdev_ops = &stmmac_netdev_ops;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+
+ ret = stmmac_tc_init(priv, priv);
+ if (!ret) {
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (priv->plat->has_gmac4)
+ ndev->hw_features |= NETIF_F_GSO_UDP_L4;
+ priv->tso = true;
+ dev_info(priv->device, "TSO feature enabled\n");
+ }
+
+ if (priv->dma_cap.sphen) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph_cap = true;
+ priv->sph = priv->sph_cap;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
+ /* The current IP register MAC_HW_Feature1[ADDR64] only define
+ * 32/40/64 bit width, but some SOC support others like i.MX8MP
+ * support 34 bits but it map to 40 bits width in MAC_HW_Feature1[ADDR64].
+ * So overwrite dma_cap.addr64 according to HW real design.
+ */
+ if (priv->plat->addr64)
+ priv->dma_cap.addr64 = priv->plat->addr64;
+
+ if (priv->dma_cap.addr64) {
+ ret = dma_set_mask_and_coherent(device,
+ DMA_BIT_MASK(priv->dma_cap.addr64));
+ if (!ret) {
+ dev_info(priv->device, "Using %d bits DMA width\n",
+ priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
+ } else {
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(priv->device, "Failed to set DMA Mask\n");
+ goto error_hw_init;
+ }
+
+ priv->dma_cap.addr64 = 32;
+ }
+ }
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
+ /* MTU range: 46 - hw-specific max */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ if (priv->plat->has_xgmac)
+ ndev->max_mtu = XGMAC_JUMBO_LEN;
+ else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ ndev->max_mtu = JUMBO_LEN;
+ else
+ ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+ * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+ */
+ if ((priv->plat->maxmtu < ndev->max_mtu) &&
+ (priv->plat->maxmtu >= ndev->min_mtu))
+ ndev->max_mtu = priv->plat->maxmtu;
+ else if (priv->plat->maxmtu < ndev->min_mtu)
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ /* Setup channels NAPI */
+ stmmac_napi_add(ndev);
+
+ mutex_init(&priv->lock);
+
+ /* If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed. Viceversa the driver'll try to
+ * set the MDC clock dynamically according to the csr actual
+ * clock input.
+ */
+ if (priv->plat->clk_csr >= 0)
+ priv->clk_csr = priv->plat->clk_csr;
+ else
+ stmmac_clk_csr_set(priv);
+
+ stmmac_check_pcs_mode(priv);
+
+ pm_runtime_get_noresume(device);
+ pm_runtime_set_active(device);
+<<<<<<<
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
+=======
+ pm_runtime_enable(device);
+ /*
+ * Prevent runtime pm from being ON by default. Users can enable
+ * it using power/control in sysfs.
+ */
+ pm_runtime_forbid(device);
+>>>>>>>
+
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err(priv->device,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
+ }
+
+ if (priv->plat->speed_mode_2500)
+ priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
+
+ if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
+ ret = stmmac_xpcs_setup(priv->mii);
+ if (ret)
+ goto error_xpcs_setup;
+ }
+
+ ret = stmmac_phy_setup(priv);
+ if (ret) {
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
+ goto error_phy_setup;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
+ goto error_netdev_register;
+ }
+
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ goto error_serdes_powerup;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ stmmac_init_fs(ndev);
+#endif
+
+ if (priv->plat->dump_debug_regs)
+ priv->plat->dump_debug_regs(priv->plat->bsp_priv);
+
+ /* Let pm_runtime_put() disable the clocks.
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
+ */
+ pm_runtime_put(device);
+
+ return ret;
+
+error_serdes_powerup:
+ unregister_netdev(ndev);
+error_netdev_register:
+ phylink_destroy(priv->phylink);
+error_xpcs_setup:
+error_phy_setup:
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+error_mdio_register:
+ stmmac_napi_del(ndev);
+error_hw_init:
+ destroy_workqueue(priv->wq);
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
+
+/**
+ * stmmac_dvr_remove
+ * @dev: device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int stmmac_dvr_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_info(priv->dev, "%s: removing driver", __func__);
+
+ stmmac_stop_all_dma(priv);
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ netif_carrier_off(ndev);
+ unregister_netdev(ndev);
+
+ /* Serdes power down needs to happen after VLAN filter
+ * is deleted that is triggered by unregister_netdev().
+ */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
+#ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+#endif
+ phylink_destroy(priv->phylink);
+ if (priv->plat->stmmac_rst)
+ reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_assert(priv->plat->stmmac_ahb_rst);
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+ destroy_workqueue(priv->wq);
+ mutex_destroy(&priv->lock);
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
+
+/**
+ * stmmac_suspend - suspend callback
+ * @dev: device pointer
+ * Description: this is the function to suspend the device and it is called
+ * by the platform driver to stop the network queue, release the resources,
+ * program the PMT register (for WoL), clean and release driver resources.
+ */
+int stmmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 chan;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ mutex_lock(&priv->lock);
+
+ netif_device_detach(ndev);
+
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->tx_queue[chan].txtimer);
+
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
+ /* Stop TX/RX DMA */
+ stmmac_stop_all_dma(priv);
+
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ stmmac_pmt(priv, priv->hw, priv->wolopts);
+ priv->irq_wake = 1;
+ } else {
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_suspend(priv->phylink, true);
+ } else {
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ phylink_suspend(priv->phylink, false);
+ }
+ rtnl_unlock();
+
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+ stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
+ }
+
+ priv->speed = SPEED_UNKNOWN;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_suspend);
+
+/**
+ * stmmac_reset_queues_param - reset queue parameters
+ * @priv: device pointer
+ */
+static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+ }
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+}
+
+/**
+ * stmmac_resume - resume callback
+ * @dev: device pointer
+ * Description: when resume this function is invoked to setup the DMA and CORE
+ * in a usable state.
+ */
+int stmmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console).
+ */
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ mutex_lock(&priv->lock);
+ stmmac_pmt(priv, priv->hw, 0);
+ mutex_unlock(&priv->lock);
+ priv->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+ }
+
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_resume(priv->phylink);
+ } else {
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device))
+ phylink_speed_up(priv->phylink);
+ }
+ rtnl_unlock();
+
+ rtnl_lock();
+ mutex_lock(&priv->lock);
+
+ stmmac_reset_queues_param(priv);
+
+ stmmac_free_tx_skbufs(priv);
+ stmmac_clear_descriptors(priv);
+
+ stmmac_hw_setup(ndev, false);
+ stmmac_init_coalesce(priv);
+ stmmac_set_rx_mode(ndev);
+
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+
+ stmmac_enable_all_queues(priv);
+
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+
+ netif_device_attach(ndev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_resume);
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return -EINVAL;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6)) {
+ if (kstrtoint(opt + 6, 0, &debug))
+ goto err;
+ } else if (!strncmp(opt, "phyaddr:", 8)) {
+ if (kstrtoint(opt + 8, 0, &phyaddr))
+ goto err;
+ } else if (!strncmp(opt, "buf_sz:", 7)) {
+ if (kstrtoint(opt + 7, 0, &buf_sz))
+ goto err;
+ } else if (!strncmp(opt, "tc:", 3)) {
+ if (kstrtoint(opt + 3, 0, &tc))
+ goto err;
+ } else if (!strncmp(opt, "watchdog:", 9)) {
+ if (kstrtoint(opt + 9, 0, &watchdog))
+ goto err;
+ } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
+ goto err;
+ } else if (!strncmp(opt, "pause:", 6)) {
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
+ goto err;
+ } else if (!strncmp(opt, "chain_mode:", 11)) {
+ if (kstrtoint(opt + 11, 0, &chain_mode))
+ goto err;
+ }
+ }
+ return 0;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion", __func__);
+ return -EINVAL;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif /* MODULE */
+
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ /* Create debugfs main directory if it doesn't exist yet */
+ if (!stmmac_fs_dir)
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+ register_netdevice_notifier(&stmmac_notifier);
+#endif
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ unregister_netdevice_notifier(&stmmac_notifier);
+ debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/rr-cache/8fa1921511a4999681ff27bc4fc8fbe723977a4f/thisimage b/rr-cache/8fa1921511a4999681ff27bc4fc8fbe723977a4f/thisimage
index a094835..cddaec2 100644
--- a/rr-cache/8fa1921511a4999681ff27bc4fc8fbe723977a4f/thisimage
+++ b/rr-cache/8fa1921511a4999681ff27bc4fc8fbe723977a4f/thisimage
@@ -73,6 +73,14 @@ struct wmi_tlv_dma_buf_release_parse {
bool meta_data_done;
};
+struct wmi_tlv_fw_stats_parse {
+ const struct wmi_stats_event *ev;
+ const struct wmi_per_chain_rssi_stats *rssi;
+ struct ath11k_fw_stats *stats;
+ int rssi_num;
+ bool chain_rssi_done;
+};
+
static const struct wmi_tlv_policy wmi_tlv_policies[] = {
[WMI_TAG_ARRAY_BYTE]
= { .min_len = 0 },
@@ -120,6 +128,8 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
= { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
[WMI_TAG_STATS_EVENT]
= { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_RFKILL_EVENT] = {
+ .min_len = sizeof(struct wmi_rfkill_state_change_ev) },
[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
= { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
[WMI_TAG_HOST_SWFDA_EVENT] = {
@@ -128,6 +138,12 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
[WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
.min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
+ .min_len = sizeof(struct wmi_obss_color_collision_event) },
+ [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+ .min_len = sizeof(struct wmi_11d_new_cc_ev) },
+ [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+ .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
};
#define PRIMAP(_hw_mode_) \
@@ -249,6 +265,8 @@ static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buf
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
cmd_hdr->cmd_id = cmd;
+ trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
+
memset(skb_cb, 0, sizeof(*skb_cb));
ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
@@ -267,21 +285,39 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
{
struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
int ret = -EOPNOTSUPP;
+ struct ath11k_base *ab = wmi_sc->ab;
might_sleep();
- wait_event_timeout(wmi_sc->tx_credits_wq, ({
- ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+ if (ab->hw_params.credit_flow) {
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
- if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH, &wmi_sc->ab->dev_flags))
- ret = -ESHUTDOWN;
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
- (ret != -EAGAIN);
- }), WMI_SEND_TIMEOUT_HZ);
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+ } else {
+ wait_event_timeout(wmi->tx_ce_desc_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -ENOBUFS);
+ }), WMI_SEND_TIMEOUT_HZ);
+ }
if (ret == -EAGAIN)
ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+ if (ret == -ENOBUFS)
+ ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
+ cmd_id);
+
return ret;
}
@@ -315,6 +351,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
struct ath11k_pdev *pdev)
{
struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
struct ath11k_band_cap *cap_band;
struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
u32 phy_map;
@@ -346,6 +383,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev->pdev_id = mac_phy_caps->pdev_id;
pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+ ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
+ mac_phy_caps->supported_bands;
+ ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
+ ab->target_pdev_count++;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
* band to band for a single radio, need to see how this should be
@@ -485,6 +526,8 @@ static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
cap->num_msdu_desc = ev->num_msdu_desc;
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi sys cap info 0x%x\n", cap->sys_cap_info);
+
return 0;
}
@@ -1244,7 +1287,8 @@ int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
return ret;
}
-int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct wmi_pdev_set_ps_mode_cmd *cmd;
@@ -1259,7 +1303,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
- cmd->sta_ps_mode = enable;
+ cmd->sta_ps_mode = psmode;
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
if (ret) {
@@ -1269,7 +1313,7 @@ int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id, u32 enable)
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI vdev set psmode %d vdev id %d\n",
- enable, vdev_id);
+ psmode, vdev_id);
return ret;
}
@@ -1612,6 +1656,15 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
+ struct ieee80211_vif *vif;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
+ return -EINVAL;
+ }
+
+ vif = arvif->vif;
len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
@@ -1624,8 +1677,12 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
- cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
+
+ if (vif->csa_active) {
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
+ }
+
cmd->buf_len = bcn->len;
ptr = skb->data + sizeof(*cmd);
@@ -1689,7 +1746,8 @@ int ath11k_wmi_vdev_install_key(struct ath11k *ar,
tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
- memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+ if (arg->key_data)
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
if (ret) {
@@ -1762,7 +1820,7 @@ ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
cmd->peer_flags |= WMI_PEER_AUTH;
if (param->need_ptk_4_way) {
cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
- if (!hw_crypto_disabled)
+ if (!hw_crypto_disabled && param->is_assoc)
cmd->peer_flags &= ~WMI_PEER_AUTH;
}
if (param->need_gtk_2_way)
@@ -2069,7 +2127,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
void *ptr;
int i, ret, len;
u32 *tmp_ptr;
- u8 extraie_len_with_pad = 0;
+ u16 extraie_len_with_pad = 0;
struct hint_short_ssid *s_ssid = NULL;
struct hint_bssid *hint_bssid = NULL;
@@ -2088,7 +2146,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
len += sizeof(*bssid) * params->num_bssid;
len += TLV_HDR_SIZE;
- if (params->extraie.len)
+ if (params->extraie.len && params->extraie.len <= 0xFFFF)
extraie_len_with_pad =
roundup(params->extraie.len, sizeof(u32));
len += extraie_len_with_pad;
@@ -2137,6 +2195,8 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
cmd->num_ssids = params->num_ssids;
cmd->ie_len = params->extraie.len;
cmd->n_probes = params->n_probes;
+ ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
ptr += sizeof(*cmd);
@@ -2195,7 +2255,7 @@ int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN, len);
ptr += TLV_HDR_SIZE;
- if (params->extraie.len)
+ if (extraie_len_with_pad)
memcpy(ptr, params->extraie.ptr,
params->extraie.len);
@@ -2386,6 +2446,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
tchan_info->reg_class_id);
*reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
tchan_info->antennamax);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ tchan_info->maxregpower);
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@@ -2754,6 +2816,42 @@ out:
return ret;
}
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_set_current_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_current_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(&cmd->new_alpha2, &param->alpha2, 3);
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "set current country pdev id %d alpha2 %c%c\n",
+ ar->pdev->pdev_id,
+ param->alpha2[0],
+ param->alpha2[1]);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int
ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
struct thermal_mitigation_params *param)
@@ -2818,6 +2916,75 @@ ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_start_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->scan_period_msec = param->scan_period_msec;
+ cmd->start_interval_msec = param->start_interval_msec;
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "send 11d scan start vdev id %d period %d ms internal %d ms\n",
+ cmd->vdev_id,
+ cmd->scan_period_msec,
+ cmd->start_interval_msec);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "send 11d scan stop vdev id %d\n",
+ cmd->vdev_id);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
@@ -3428,6 +3595,56 @@ int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
}
static void
+ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_obss_color_collision_event *ev;
+ struct ath11k_vif *arvif;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ rcu_read_lock();
+
+ ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch obss color collision ev");
+ goto exit;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
+ ev->vdev_id);
+ goto exit;
+ }
+
+ switch (ev->evt_type) {
+ case WMI_BSS_COLOR_COLLISION_DETECTION:
+ ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
+ ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
+ break;
+ case WMI_BSS_COLOR_COLLISION_DISABLE:
+ case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
+ case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
+ break;
+ default:
+ ath11k_warn(ab, "received unknown obss color collision detection event\n");
+ }
+
+exit:
+ kfree(tb);
+ rcu_read_unlock();
+}
+
+static void
ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
struct wmi_host_pdev_band_to_mac *band_to_mac)
{
@@ -4144,6 +4361,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
soc->num_radios = 0;
+ soc->target_pdev_count = 0;
phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
while (phy_id_map && soc->num_radios < MAX_RADIOS) {
@@ -4780,6 +4998,7 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
struct sk_buff *msdu;
struct ieee80211_tx_info *info;
struct ath11k_skb_cb *skb_cb;
+ int num_mgmt;
spin_lock_bh(&ar->txmgmt_idr_lock);
msdu = idr_find(&ar->txmgmt_idr, desc_id);
@@ -4803,10 +5022,19 @@ static int wmi_process_mgmt_tx_comp(struct ath11k *ar, u32 desc_id,
ieee80211_tx_status_irqsafe(ar->hw, msdu);
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
/* WARN when we received this event without doing any mgmt tx */
- if (atomic_dec_if_positive(&ar->num_pending_mgmt_tx) < 0)
+ if (num_mgmt < 0)
WARN_ON_ONCE(1);
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmi mgmt tx comp pending %d desc id %d\n",
+ num_mgmt, desc_id);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+
return 0;
}
@@ -5342,47 +5570,107 @@ ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
}
-int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
- struct ath11k_fw_stats *stats)
+static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
{
- const void **tb;
- const struct wmi_stats_event *ev;
- const void *data;
- int i, ret;
- u32 len = skb->len;
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k_fw_stats *stats = parse->stats;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
+ int j, ret = 0;
- tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, len, GFP_ATOMIC);
- if (IS_ERR(tb)) {
- ret = PTR_ERR(tb);
- ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
- return ret;
+ if (tag != WMI_TAG_RSSI_STATS)
+ return -EPROTO;
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats vdev id %d mac %pM\n",
+ stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
+
+ arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "not found vif for vdev id %d\n",
+ stats_rssi->vdev_id);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats bssid %pM vif %pK\n",
+ arvif->bssid, arvif->vif);
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (!sta) {
+ ath11k_warn(ab, "not found station for bssid %pM\n",
+ arvif->bssid);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
+
+ for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
+ arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats beacon rssi[%d] %d data rssi[%d] %d\n",
+ j,
+ stats_rssi->rssi_avg_beacon[j],
+ j,
+ stats_rssi->rssi_avg_data[j]);
}
- ev = tb[WMI_TAG_STATS_EVENT];
- data = tb[WMI_TAG_ARRAY_BYTE];
- if (!ev || !data) {
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
+ struct wmi_tlv_fw_stats_parse *parse,
+ const void *ptr,
+ u16 len)
+{
+ struct ath11k_fw_stats *stats = parse->stats;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ int i, ret = 0;
+ const void *data = ptr;
+
+ if (!ev) {
ath11k_warn(ab, "failed to fetch update stats ev");
- kfree(tb);
return -EPROTO;
}
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "wmi stats update ev pdev_id %d pdev %i vdev %i bcn %i\n",
- ev->pdev_id,
- ev->num_pdev_stats, ev->num_vdev_stats,
- ev->num_bcn_stats);
-
- stats->pdev_id = ev->pdev_id;
stats->stats_id = 0;
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
for (i = 0; i < ev->num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
struct ath11k_fw_stats_pdev *dst;
src = data;
if (len < sizeof(*src)) {
- kfree(tb);
- return -EPROTO;
+ ret = -EPROTO;
+ goto exit;
}
stats->stats_id = WMI_REQUEST_PDEV_STAT;
@@ -5406,12 +5694,29 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
src = data;
if (len < sizeof(*src)) {
- kfree(tb);
- return -EPROTO;
+ ret = -EPROTO;
+ goto exit;
}
stats->stats_id = WMI_REQUEST_VDEV_STAT;
+ arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
+ if (arvif) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->rssi_beacon = src->beacon_snr;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats vdev id %d snr %d\n",
+ src->vdev_id, src->beacon_snr);
+ } else {
+ ath11k_warn(ab, "not found station for bssid %pM\n",
+ arvif->bssid);
+ }
+ }
+
data += sizeof(*src);
len -= sizeof(*src);
@@ -5429,8 +5734,8 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
src = data;
if (len < sizeof(*src)) {
- kfree(tb);
- return -EPROTO;
+ ret = -EPROTO;
+ goto exit;
}
stats->stats_id = WMI_REQUEST_BCN_STAT;
@@ -5446,8 +5751,67 @@ int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
list_add_tail(&dst->list, &stats->bcn);
}
- kfree(tb);
- return 0;
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_STATS_EVENT:
+ parse->ev = (struct wmi_stats_event *)ptr;
+ parse->stats->pdev_id = parse->ev->pdev_id;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+ break;
+ case WMI_TAG_PER_CHAIN_RSSI_STATS:
+ parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
+
+ if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
+ parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi stats id 0x%x num chain %d\n",
+ parse->ev->stats_id,
+ parse->rssi_num);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (parse->rssi_num && !parse->chain_rssi_done) {
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_rssi_chain_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse rssi chain %d\n",
+ ret);
+ return ret;
+ }
+ parse->chain_rssi_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ struct wmi_tlv_fw_stats_parse parse = { };
+
+ stats->stats_id = 0;
+ parse.stats = stats;
+
+ return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_fw_stats_parse,
+ &parse);
}
size_t ath11k_wmi_fw_stats_num_vdevs(struct list_head *head)
@@ -5809,15 +6173,79 @@ static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
wake_up(&ab->wmi_ab.tx_credits_wq);
}
+static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const struct wmi_11d_new_cc_ev *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath11k_warn(ab, "failed to fetch 11d new cc ev");
+ return -EPROTO;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wmi 11d new cc %c%c\n",
+ ab->new_alpha2[0],
+ ab->new_alpha2[1]);
+
+ kfree(tb);
+
+ queue_work(ab->workqueue, &ab->update_11d_work);
+
+ return 0;
+}
+
static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
struct sk_buff *skb)
{
+ struct ath11k_pdev_wmi *wmi = NULL;
+ u32 i;
+ u8 wmi_ep_count;
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
dev_kfree_skb(skb);
+
+ if (eid >= ATH11K_HTC_EP_COUNT)
+ return;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params.max_radios)
+ return;
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++) {
+ if (ab->wmi_ab.wmi[i].eid == eid) {
+ wmi = &ab->wmi_ab.wmi[i];
+ break;
+ }
+ }
+
+ if (wmi)
+ wake_up(&wmi->tx_ce_desc_wq);
}
static bool ath11k_reg_is_world_alpha(char *alpha)
{
- return alpha[0] == '0' && alpha[1] == '0';
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
}
static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -5910,7 +6338,7 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
ar = ab->pdevs[pdev_idx].ar;
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = regd;
- ieee80211_queue_work(ar->hw, &ar->regd_update_work);
+ queue_work(ab->workqueue, &ar->regd_update_work);
} else {
/* This regd would be applied during mac registration and is
* held constant throughout for regd intersection purpose
@@ -6110,6 +6538,7 @@ static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
{
+ struct ath11k_vif *arvif;
u32 vdev_id, tx_status;
if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
@@ -6117,6 +6546,17 @@ static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *s
ath11k_warn(ab, "failed to extract bcn tx status");
return;
}
+
+ rcu_read_lock();
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ ath11k_mac_bcn_tx_event(arvif);
+ rcu_read_unlock();
}
static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
@@ -6401,6 +6841,7 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
struct ieee80211_sta *sta;
struct ath11k_peer *peer;
struct ath11k *ar;
+ u32 vdev_id;
if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
ath11k_warn(ab, "failed to extract peer sta kickout event");
@@ -6416,10 +6857,15 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
if (!peer) {
ath11k_warn(ab, "peer not found %pM\n",
arg.mac_addr);
+ spin_unlock_bh(&ab->base_lock);
goto exit;
}
- ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+ vdev_id = peer->vdev_id;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
peer->vdev_id);
@@ -6440,7 +6886,6 @@ static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff
ieee80211_report_low_ack(sta, 10);
exit:
- spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
}
@@ -6896,6 +7341,40 @@ exit:
kfree(tb);
}
+static void ath11k_rfkill_state_change_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_rfkill_state_change_ev *ev;
+ const void **tb;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_RFKILL_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "wmi tlv rfkill state change gpio %d type %d radio_state %d\n",
+ ev->gpio_pin_num,
+ ev->int_type,
+ ev->radio_state);
+
+ spin_lock_bh(&ab->base_lock);
+ ab->rfkill_radio_on = (ev->radio_state == WMI_RFKILL_RADIO_STATE_ON);
+ spin_unlock_bh(&ab->base_lock);
+
+ queue_work(ab->workqueue, &ab->rfkill_work);
+ kfree(tb);
+}
+
static void
ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
struct sk_buff *skb)
@@ -7049,6 +7528,13 @@ static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_b
complete(&ab->wow.wakeup_completed);
}
+static void
+ath11k_wmi_diag_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ trace_ath11k_wmi_diag(ab, skb->data, skb->len);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -7057,6 +7543,8 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+ trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
+
if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
goto out;
@@ -7141,6 +7629,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
ath11k_probe_resp_tx_status_event(ab, skb);
break;
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ ath11k_wmi_obss_color_collision_event(ab, skb);
+ break;
/* add Unsupported events here */
case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
@@ -7160,6 +7651,15 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_WOW_WAKEUP_HOST_EVENTID:
ath11k_wmi_event_wow_wakeup_host(ab, skb);
break;
+ case WMI_11D_NEW_COUNTRY_EVENTID:
+ ath11k_reg_11d_new_cc_event(ab, skb);
+ break;
+ case WMI_RFKILL_STATE_CHANGE_EVENTID:
+ ath11k_rfkill_state_change_event(ab, skb);
+ break;
+ case WMI_DIAG_EVENTID:
+ ath11k_wmi_diag_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7202,6 +7702,7 @@ static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+ init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
return 0;
}
@@ -7417,3 +7918,31 @@ int ath11k_wmi_wow_enable(struct ath11k *ar)
return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
}
+
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ u32 prob_req_oui;
+ int len;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->prob_req_oui = prob_req_oui;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "wmi scan prob req oui %d\n",
+ prob_req_oui);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
+}
diff --git a/rr-cache/9304cf403e433f623fc9b674a050258c4b4b216c/preimage b/rr-cache/9304cf403e433f623fc9b674a050258c4b4b216c/preimage
new file mode 100644
index 0000000..8589c95
--- /dev/null
+++ b/rr-cache/9304cf403e433f623fc9b674a050258c4b4b216c/preimage
@@ -0,0 +1,1510 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/of/property.c - Procedures for accessing and interpreting
+ * Devicetree properties and graphs.
+ *
+ * Initially created by copying procedures from drivers/of/base.c. This
+ * file contains the OF property as well as the OF graph interface
+ * functions.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
+ *
+ * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
+ * Grant Likely.
+ */
+
+#define pr_fmt(fmt) "OF: " fmt
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/of_irq.h>
+#include <linux/string.h>
+#include <linux/moduleparam.h>
+
+#include "of_private.h"
+
+/**
+ * of_graph_is_present() - check graph's presence
+ * @node: pointer to device_node containing graph port
+ *
+ * Return: True if @node has a port or ports (with a port) sub-node,
+ * false otherwise.
+ */
+bool of_graph_is_present(const struct device_node *node)
+{
+ struct device_node *ports, *port;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (ports)
+ node = ports;
+
+ port = of_get_child_by_name(node, "port");
+ of_node_put(ports);
+ of_node_put(port);
+
+ return !!port;
+}
+EXPORT_SYMBOL(of_graph_is_present);
+
+/**
+ * of_property_count_elems_of_size - Count the number of elements in a property
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property in a device node and count the number of elements of
+ * size elem_size in it.
+ *
+ * Return: The number of elements on sucess, -EINVAL if the property does not
+ * exist or its length does not match a multiple of elem_size and -ENODATA if
+ * the property does not have a value.
+ */
+int of_property_count_elems_of_size(const struct device_node *np,
+ const char *propname, int elem_size)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ if (prop->length % elem_size != 0) {
+ pr_err("size of %s in node %pOF is not a multiple of %d\n",
+ propname, np, elem_size);
+ return -EINVAL;
+ }
+
+ return prop->length / elem_size;
+}
+EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
+
+/**
+ * of_find_property_value_of_size
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @min: minimum allowed length of property value
+ * @max: maximum allowed length of property value (0 means unlimited)
+ * @len: if !=NULL, actual length is written to here
+ *
+ * Search for a property in a device node and valid the requested size.
+ *
+ * Return: The property value on success, -EINVAL if the property does not
+ * exist, -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data is too small or too large.
+ *
+ */
+static void *of_find_property_value_of_size(const struct device_node *np,
+ const char *propname, u32 min, u32 max, size_t *len)
+{
+ struct property *prop = of_find_property(np, propname, NULL);
+
+ if (!prop)
+ return ERR_PTR(-EINVAL);
+ if (!prop->value)
+ return ERR_PTR(-ENODATA);
+ if (prop->length < min)
+ return ERR_PTR(-EOVERFLOW);
+ if (max && prop->length > max)
+ return ERR_PTR(-EOVERFLOW);
+
+ if (len)
+ *len = prop->length;
+
+ return prop->value;
+}
+
+/**
+ * of_property_read_u32_index - Find and read a u32 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u32 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 32-bit value from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_u32_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u32 *out_value)
+{
+ const u32 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0,
+ NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be32_to_cpup(((__be32 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u32_index);
+
+/**
+ * of_property_read_u64_index - Find and read a u64 from a multi-value property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @index: index of the u64 in the list of values
+ * @out_value: pointer to return value, modified only if no error.
+ *
+ * Search for a property in a device node and read nth 64-bit value from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_u64_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u64 *out_value)
+{
+ const u64 *val = of_find_property_value_of_size(np, propname,
+ ((index + 1) * sizeof(*out_value)),
+ 0, NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = be64_to_cpup(((__be64 *)val) + index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u64_index);
+
+/**
+ * of_property_read_variable_u8_array - Find and read an array of u8 from a
+ * property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to found values.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+int of_property_read_variable_u8_array(const struct device_node *np,
+ const char *propname, u8 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const u8 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
+ *out_values++ = *val++;
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u8_array);
+
+/**
+ * of_property_read_variable_u16_array - Find and read an array of u16 from a
+ * property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to found values.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+int of_property_read_variable_u16_array(const struct device_node *np,
+ const char *propname, u16 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const __be16 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
+ *out_values++ = be16_to_cpup(val++);
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u16_array);
+
+/**
+ * of_property_read_variable_u32_array - Find and read an array of 32 bit
+ * integers from a property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return found values.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it.
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+int of_property_read_variable_u32_array(const struct device_node *np,
+ const char *propname, u32 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--)
+ *out_values++ = be32_to_cpup(val++);
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u32_array);
+
+/**
+ * of_property_read_u64 - Find and read a 64 bit integer from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_value: pointer to return value, modified only if return value is 0.
+ *
+ * Search for a property in a device node and read a 64-bit value from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_value is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_u64(const struct device_node *np, const char *propname,
+ u64 *out_value)
+{
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ sizeof(*out_value),
+ 0,
+ NULL);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ *out_value = of_read_number(val, 2);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_u64);
+
+/**
+ * of_property_read_variable_u64_array - Find and read an array of 64 bit
+ * integers from a property, with bounds on the minimum and maximum array size.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to found values.
+ * @sz_min: minimum number of array elements to read
+ * @sz_max: maximum number of array elements to read, if zero there is no
+ * upper limit on the number of elements in the dts entry but only
+ * sz_min will be read.
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it.
+ *
+ * Return: The number of elements read on success, -EINVAL if the property
+ * does not exist, -ENODATA if property does not have a value, and -EOVERFLOW
+ * if the property data is smaller than sz_min or longer than sz_max.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+int of_property_read_variable_u64_array(const struct device_node *np,
+ const char *propname, u64 *out_values,
+ size_t sz_min, size_t sz_max)
+{
+ size_t sz, count;
+ const __be32 *val = of_find_property_value_of_size(np, propname,
+ (sz_min * sizeof(*out_values)),
+ (sz_max * sizeof(*out_values)),
+ &sz);
+
+ if (IS_ERR(val))
+ return PTR_ERR(val);
+
+ if (!sz_max)
+ sz = sz_min;
+ else
+ sz /= sizeof(*out_values);
+
+ count = sz;
+ while (count--) {
+ *out_values++ = of_read_number(val, 2);
+ val += 2;
+ }
+
+ return sz;
+}
+EXPORT_SYMBOL_GPL(of_property_read_variable_u64_array);
+
+/**
+ * of_property_read_string - Find and read a string from a property
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_string: pointer to null terminated return string, modified only if
+ * return value is 0.
+ *
+ * Search for a property in a device tree node and retrieve a null
+ * terminated string value (pointer to data, not a copy).
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist, -ENODATA if
+ * property does not have a value, and -EILSEQ if the string is not
+ * null-terminated within the length of the property data.
+ *
+ * The out_string pointer is modified only if a valid string can be decoded.
+ */
+int of_property_read_string(const struct device_node *np, const char *propname,
+ const char **out_string)
+{
+ const struct property *prop = of_find_property(np, propname, NULL);
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ if (strnlen(prop->value, prop->length) >= prop->length)
+ return -EILSEQ;
+ *out_string = prop->value;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string);
+
+/**
+ * of_property_match_string() - Find string in a list and return index
+ * @np: pointer to node containing string list property
+ * @propname: string list property name
+ * @string: pointer to string to search for in string list
+ *
+ * This function searches a string list property and returns the index
+ * of a specific string value.
+ */
+int of_property_match_string(const struct device_node *np, const char *propname,
+ const char *string)
+{
+ const struct property *prop = of_find_property(np, propname, NULL);
+ size_t l;
+ int i;
+ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+
+ p = prop->value;
+ end = p + prop->length;
+
+ for (i = 0; p < end; i++, p += l) {
+ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ pr_debug("comparing %s with %s\n", string, p);
+ if (strcmp(string, p) == 0)
+ return i; /* Found it; return index */
+ }
+ return -ENODATA;
+}
+EXPORT_SYMBOL_GPL(of_property_match_string);
+
+/**
+ * of_property_read_string_helper() - Utility helper for parsing string properties
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_strs: output array of string pointers.
+ * @sz: number of array elements to read.
+ * @skip: Number of strings to skip over at beginning of list.
+ *
+ * Don't call this function directly. It is a utility helper for the
+ * of_property_read_string*() family of functions.
+ */
+int of_property_read_string_helper(const struct device_node *np,
+ const char *propname, const char **out_strs,
+ size_t sz, int skip)
+{
+ const struct property *prop = of_find_property(np, propname, NULL);
+ int l = 0, i = 0;
+ const char *p, *end;
+
+ if (!prop)
+ return -EINVAL;
+ if (!prop->value)
+ return -ENODATA;
+ p = prop->value;
+ end = p + prop->length;
+
+ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
+ l = strnlen(p, end - p) + 1;
+ if (p + l > end)
+ return -EILSEQ;
+ if (out_strs && i >= skip)
+ *out_strs++ = p;
+ }
+ i -= skip;
+ return i <= 0 ? -ENODATA : i;
+}
+EXPORT_SYMBOL_GPL(of_property_read_string_helper);
+
+const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur,
+ u32 *pu)
+{
+ const void *curv = cur;
+
+ if (!prop)
+ return NULL;
+
+ if (!cur) {
+ curv = prop->value;
+ goto out_val;
+ }
+
+ curv += sizeof(*cur);
+ if (curv >= prop->value + prop->length)
+ return NULL;
+
+out_val:
+ *pu = be32_to_cpup(curv);
+ return curv;
+}
+EXPORT_SYMBOL_GPL(of_prop_next_u32);
+
+const char *of_prop_next_string(struct property *prop, const char *cur)
+{
+ const void *curv = cur;
+
+ if (!prop)
+ return NULL;
+
+ if (!cur)
+ return prop->value;
+
+ curv += strlen(cur) + 1;
+ if (curv >= prop->value + prop->length)
+ return NULL;
+
+ return curv;
+}
+EXPORT_SYMBOL_GPL(of_prop_next_string);
+
+/**
+ * of_graph_parse_endpoint() - parse common endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the OF endpoint data structure
+ *
+ * The caller should hold a reference to @node.
+ */
+int of_graph_parse_endpoint(const struct device_node *node,
+ struct of_endpoint *endpoint)
+{
+ struct device_node *port_node = of_get_parent(node);
+
+ WARN_ONCE(!port_node, "%s(): endpoint %pOF has no parent node\n",
+ __func__, node);
+
+ memset(endpoint, 0, sizeof(*endpoint));
+
+ endpoint->local_node = node;
+ /*
+ * It doesn't matter whether the two calls below succeed.
+ * If they don't then the default value 0 is used.
+ */
+ of_property_read_u32(port_node, "reg", &endpoint->port);
+ of_property_read_u32(node, "reg", &endpoint->id);
+
+ of_node_put(port_node);
+
+ return 0;
+}
+EXPORT_SYMBOL(of_graph_parse_endpoint);
+
+/**
+ * of_graph_get_port_by_id() - get the port matching a given id
+ * @parent: pointer to the parent device node
+ * @id: id of the port
+ *
+ * Return: A 'port' node pointer with refcount incremented. The caller
+ * has to use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_port_by_id(struct device_node *parent, u32 id)
+{
+ struct device_node *node, *port;
+
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ for_each_child_of_node(parent, port) {
+ u32 port_id = 0;
+
+ if (!of_node_name_eq(port, "port"))
+ continue;
+ of_property_read_u32(port, "reg", &port_id);
+ if (id == port_id)
+ break;
+ }
+
+ of_node_put(node);
+
+ return port;
+}
+EXPORT_SYMBOL(of_graph_get_port_by_id);
+
+/**
+ * of_graph_get_next_endpoint() - get next endpoint node
+ * @parent: pointer to the parent device node
+ * @prev: previous endpoint node, or NULL to get first
+ *
+ * Return: An 'endpoint' node pointer with refcount incremented. Refcount
+ * of the passed @prev node is decremented.
+ */
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+ struct device_node *prev)
+{
+ struct device_node *endpoint;
+ struct device_node *port;
+
+ if (!parent)
+ return NULL;
+
+ /*
+ * Start by locating the port node. If no previous endpoint is specified
+ * search for the first port node, otherwise get the previous endpoint
+ * parent port node.
+ */
+ if (!prev) {
+ struct device_node *node;
+
+ node = of_get_child_by_name(parent, "ports");
+ if (node)
+ parent = node;
+
+ port = of_get_child_by_name(parent, "port");
+ of_node_put(node);
+
+ if (!port) {
+ pr_err("graph: no port node found in %pOF\n", parent);
+ return NULL;
+ }
+ } else {
+ port = of_get_parent(prev);
+ if (WARN_ONCE(!port, "%s(): endpoint %pOF has no parent node\n",
+ __func__, prev))
+ return NULL;
+ }
+
+ while (1) {
+ /*
+ * Now that we have a port node, get the next endpoint by
+ * getting the next child. If the previous endpoint is NULL this
+ * will return the first child.
+ */
+ endpoint = of_get_next_child(port, prev);
+ if (endpoint) {
+ of_node_put(port);
+ return endpoint;
+ }
+
+ /* No more endpoints under this port, try the next one. */
+ prev = NULL;
+
+ do {
+ port = of_get_next_child(parent, port);
+ if (!port)
+ return NULL;
+ } while (!of_node_name_eq(port, "port"));
+ }
+}
+EXPORT_SYMBOL(of_graph_get_next_endpoint);
+
+/**
+ * of_graph_get_endpoint_by_regs() - get endpoint node of specific identifiers
+ * @parent: pointer to the parent device node
+ * @port_reg: identifier (value of reg property) of the parent port node
+ * @reg: identifier (value of reg property) of the endpoint node
+ *
+ * Return: An 'endpoint' node pointer which is identified by reg and at the same
+ * is the child of a port node identified by port_reg. reg and port_reg are
+ * ignored when they are -1. Use of_node_put() on the pointer when done.
+ */
+struct device_node *of_graph_get_endpoint_by_regs(
+ const struct device_node *parent, int port_reg, int reg)
+{
+ struct of_endpoint endpoint;
+ struct device_node *node = NULL;
+
+ for_each_endpoint_of_node(parent, node) {
+ of_graph_parse_endpoint(node, &endpoint);
+ if (((port_reg == -1) || (endpoint.port == port_reg)) &&
+ ((reg == -1) || (endpoint.id == reg)))
+ return node;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(of_graph_get_endpoint_by_regs);
+
+/**
+ * of_graph_get_remote_endpoint() - get remote endpoint node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote endpoint node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_endpoint(const struct device_node *node)
+{
+ /* Get remote endpoint node. */
+ return of_parse_phandle(node, "remote-endpoint", 0);
+}
+EXPORT_SYMBOL(of_graph_get_remote_endpoint);
+
+/**
+ * of_graph_get_port_parent() - get port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: device node associated with endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_port_parent(struct device_node *node)
+{
+ unsigned int depth;
+
+ if (!node)
+ return NULL;
+
+ /*
+ * Preserve usecount for passed in node as of_get_next_parent()
+ * will do of_node_put() on it.
+ */
+ of_node_get(node);
+
+ /* Walk 3 levels up only if there is 'ports' node. */
+ for (depth = 3; depth && node; depth--) {
+ node = of_get_next_parent(node);
+ if (depth == 2 && !of_node_name_eq(node, "ports"))
+ break;
+ }
+ return node;
+}
+EXPORT_SYMBOL(of_graph_get_port_parent);
+
+/**
+ * of_graph_get_remote_port_parent() - get remote port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port_parent(
+ const struct device_node *node)
+{
+ struct device_node *np, *pp;
+
+ /* Get remote endpoint node. */
+ np = of_graph_get_remote_endpoint(node);
+
+ pp = of_graph_get_port_parent(np);
+
+ of_node_put(np);
+
+ return pp;
+}
+EXPORT_SYMBOL(of_graph_get_remote_port_parent);
+
+/**
+ * of_graph_get_remote_port() - get remote port node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote port node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port(const struct device_node *node)
+{
+ struct device_node *np;
+
+ /* Get remote endpoint node. */
+ np = of_graph_get_remote_endpoint(node);
+ if (!np)
+ return NULL;
+ return of_get_next_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port);
+
+int of_graph_get_endpoint_count(const struct device_node *np)
+{
+ struct device_node *endpoint;
+ int num = 0;
+
+ for_each_endpoint_of_node(np, endpoint)
+ num++;
+
+ return num;
+}
+EXPORT_SYMBOL(of_graph_get_endpoint_count);
+
+/**
+ * of_graph_get_remote_node() - get remote parent device_node for given port/endpoint
+ * @node: pointer to parent device_node containing graph port/endpoint
+ * @port: identifier (value of reg property) of the parent port node
+ * @endpoint: identifier (value of reg property) of the endpoint node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ * to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_node(const struct device_node *node,
+ u32 port, u32 endpoint)
+{
+ struct device_node *endpoint_node, *remote;
+
+ endpoint_node = of_graph_get_endpoint_by_regs(node, port, endpoint);
+ if (!endpoint_node) {
+ pr_debug("no valid endpoint (%d, %d) for node %pOF\n",
+ port, endpoint, node);
+ return NULL;
+ }
+
+ remote = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ if (!remote) {
+ pr_debug("no valid remote node\n");
+ return NULL;
+ }
+
+ if (!of_device_is_available(remote)) {
+ pr_debug("not available for remote node\n");
+ of_node_put(remote);
+ return NULL;
+ }
+
+ return remote;
+}
+EXPORT_SYMBOL(of_graph_get_remote_node);
+
+static struct fwnode_handle *of_fwnode_get(struct fwnode_handle *fwnode)
+{
+ return of_fwnode_handle(of_node_get(to_of_node(fwnode)));
+}
+
+static void of_fwnode_put(struct fwnode_handle *fwnode)
+{
+ of_node_put(to_of_node(fwnode));
+}
+
+static bool of_fwnode_device_is_available(const struct fwnode_handle *fwnode)
+{
+ return of_device_is_available(to_of_node(fwnode));
+}
+
+static bool of_fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
+{
+ return of_property_read_bool(to_of_node(fwnode), propname);
+}
+
+static int of_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
+{
+ const struct device_node *node = to_of_node(fwnode);
+
+ if (!val)
+ return of_property_count_elems_of_size(node, propname,
+ elem_size);
+
+ switch (elem_size) {
+ case sizeof(u8):
+ return of_property_read_u8_array(node, propname, val, nval);
+ case sizeof(u16):
+ return of_property_read_u16_array(node, propname, val, nval);
+ case sizeof(u32):
+ return of_property_read_u32_array(node, propname, val, nval);
+ case sizeof(u64):
+ return of_property_read_u64_array(node, propname, val, nval);
+ }
+
+ return -ENXIO;
+}
+
+static int
+of_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval)
+{
+ const struct device_node *node = to_of_node(fwnode);
+
+ return val ?
+ of_property_read_string_array(node, propname, val, nval) :
+ of_property_count_strings(node, propname);
+}
+
+static const char *of_fwnode_get_name(const struct fwnode_handle *fwnode)
+{
+ return kbasename(to_of_node(fwnode)->full_name);
+}
+
+static const char *of_fwnode_get_name_prefix(const struct fwnode_handle *fwnode)
+{
+ /* Root needs no prefix here (its name is "/"). */
+ if (!to_of_node(fwnode)->parent)
+ return "";
+
+ return "/";
+}
+
+static struct fwnode_handle *
+of_fwnode_get_parent(const struct fwnode_handle *fwnode)
+{
+ return of_fwnode_handle(of_get_parent(to_of_node(fwnode)));
+}
+
+static struct fwnode_handle *
+of_fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
+{
+ return of_fwnode_handle(of_get_next_available_child(to_of_node(fwnode),
+ to_of_node(child)));
+}
+
+static struct fwnode_handle *
+of_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
+{
+ const struct device_node *node = to_of_node(fwnode);
+ struct device_node *child;
+
+ for_each_available_child_of_node(node, child)
+ if (of_node_name_eq(child, childname))
+ return of_fwnode_handle(child);
+
+ return NULL;
+}
+
+static int
+of_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ struct of_phandle_args of_args;
+ unsigned int i;
+ int ret;
+
+ if (nargs_prop)
+ ret = of_parse_phandle_with_args(to_of_node(fwnode), prop,
+ nargs_prop, index, &of_args);
+ else
+ ret = of_parse_phandle_with_fixed_args(to_of_node(fwnode), prop,
+ nargs, index, &of_args);
+ if (ret < 0)
+ return ret;
+ if (!args)
+ return 0;
+
+ args->nargs = of_args.args_count;
+ args->fwnode = of_fwnode_handle(of_args.np);
+
+ for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
+ args->args[i] = i < of_args.args_count ? of_args.args[i] : 0;
+
+ return 0;
+}
+
+static struct fwnode_handle *
+of_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev)
+{
+ return of_fwnode_handle(of_graph_get_next_endpoint(to_of_node(fwnode),
+ to_of_node(prev)));
+}
+
+static struct fwnode_handle *
+of_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
+{
+ return of_fwnode_handle(
+ of_graph_get_remote_endpoint(to_of_node(fwnode)));
+}
+
+static struct fwnode_handle *
+of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode)
+{
+ struct device_node *np;
+
+ /* Get the parent of the port */
+ np = of_get_parent(to_of_node(fwnode));
+ if (!np)
+ return NULL;
+
+ /* Is this the "ports" node? If not, it's the port parent. */
+ if (!of_node_name_eq(np, "ports"))
+ return of_fwnode_handle(np);
+
+ return of_fwnode_handle(of_get_next_parent(np));
+}
+
+static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint)
+{
+ const struct device_node *node = to_of_node(fwnode);
+ struct device_node *port_node = of_get_parent(node);
+
+ endpoint->local_fwnode = fwnode;
+
+ of_property_read_u32(port_node, "reg", &endpoint->port);
+ of_property_read_u32(node, "reg", &endpoint->id);
+
+ of_node_put(port_node);
+
+ return 0;
+}
+
+static const void *
+of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
+ const struct device *dev)
+{
+ return of_device_get_match_data(dev);
+}
+
+static bool of_is_ancestor_of(struct device_node *test_ancestor,
+ struct device_node *child)
+{
+ of_node_get(child);
+ while (child) {
+ if (child == test_ancestor) {
+ of_node_put(child);
+ return true;
+ }
+ child = of_get_next_parent(child);
+ }
+ return false;
+}
+
+<<<<<<<
+=======
+static struct device_node *of_get_compat_node(struct device_node *np)
+{
+ of_node_get(np);
+
+ while (np) {
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ np = NULL;
+ }
+
+ if (of_find_property(np, "compatible", NULL))
+ break;
+
+ np = of_get_next_parent(np);
+ }
+
+ return np;
+}
+
+static struct device_node *of_get_compat_node_parent(struct device_node *np)
+{
+ struct device_node *parent, *node;
+
+ parent = of_get_parent(np);
+ node = of_get_compat_node(parent);
+ of_node_put(parent);
+
+ return node;
+}
+
+>>>>>>>
+/**
+ * of_link_to_phandle - Add fwnode link to supplier from supplier phandle
+ * @con_np: consumer device tree node
+ * @sup_np: supplier device tree node
+ *
+ * Given a phandle to a supplier device tree node (@sup_np), this function
+ * finds the device that owns the supplier device tree node and creates a
+ * device link from @dev consumer device to the supplier device. This function
+ * doesn't create device links for invalid scenarios such as trying to create a
+ * link with a parent device as the consumer of its child device. In such
+ * cases, it returns an error.
+ *
+ * Returns:
+ * - 0 if fwnode link successfully created to supplier
+ * - -EINVAL if the supplier link is invalid and should not be created
+ * - -ENODEV if struct device will never be create for supplier
+ */
+static int of_link_to_phandle(struct device_node *con_np,
+ struct device_node *sup_np)
+{
+ struct device *sup_dev;
+ struct device_node *tmp_np = sup_np;
+
+ of_node_get(sup_np);
+ /*
+ * Find the device node that contains the supplier phandle. It may be
+ * @sup_np or it may be an ancestor of @sup_np.
+ */
+ while (sup_np) {
+
+ /* Don't allow linking to a disabled supplier */
+ if (!of_device_is_available(sup_np)) {
+ of_node_put(sup_np);
+ sup_np = NULL;
+ }
+
+ if (of_find_property(sup_np, "compatible", NULL))
+ break;
+
+ sup_np = of_get_next_parent(sup_np);
+ }
+
+ if (!sup_np) {
+ pr_debug("Not linking %pOFP to %pOFP - No device\n",
+ con_np, tmp_np);
+ return -ENODEV;
+ }
+
+ /*
+ * Don't allow linking a device node as a consumer of one of its
+ * descendant nodes. By definition, a child node can't be a functional
+ * dependency for the parent node.
+ */
+ if (of_is_ancestor_of(con_np, sup_np)) {
+ pr_debug("Not linking %pOFP to %pOFP - is descendant\n",
+ con_np, sup_np);
+ of_node_put(sup_np);
+ return -EINVAL;
+ }
+
+ /*
+ * Don't create links to "early devices" that won't have struct devices
+ * created for them.
+ */
+ sup_dev = get_dev_from_fwnode(&sup_np->fwnode);
+ if (!sup_dev &&
+ (of_node_check_flag(sup_np, OF_POPULATED) ||
+ sup_np->fwnode.flags & FWNODE_FLAG_NOT_DEVICE)) {
+ pr_debug("Not linking %pOFP to %pOFP - No struct device\n",
+ con_np, sup_np);
+ of_node_put(sup_np);
+ return -ENODEV;
+ }
+ put_device(sup_dev);
+
+ fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
+ of_node_put(sup_np);
+
+ return 0;
+}
+
+/**
+ * parse_prop_cells - Property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @list_name: Property name that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed name
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *list_name,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp(prop_name, list_name))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, list_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SIMPLE_PROP(fname, name, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_prop_cells(np, prop_name, index, name, cells); \
+}
+
+static int strcmp_suffix(const char *str, const char *suffix)
+{
+ unsigned int len, suffix_len;
+
+ len = strlen(str);
+ suffix_len = strlen(suffix);
+ if (len <= suffix_len)
+ return -1;
+ return strcmp(str + len - suffix_len, suffix);
+}
+
+/**
+ * parse_suffix_prop_cells - Suffix property parsing function for suppliers
+ *
+ * @np: Pointer to device tree node containing a list
+ * @prop_name: Name of property to be parsed. Expected to hold phandle values
+ * @index: For properties holding a list of phandles, this is the index
+ * into the list.
+ * @suffix: Property suffix that is known to contain list of phandle(s) to
+ * supplier(s)
+ * @cells_name: property name that specifies phandles' arguments count
+ *
+ * This is a helper function to parse properties that have a known fixed suffix
+ * and are a list of phandles and phandle arguments.
+ *
+ * Returns:
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+static struct device_node *parse_suffix_prop_cells(struct device_node *np,
+ const char *prop_name, int index,
+ const char *suffix,
+ const char *cells_name)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp_suffix(prop_name, suffix))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, prop_name, cells_name, index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+#define DEFINE_SUFFIX_PROP(fname, suffix, cells) \
+static struct device_node *parse_##fname(struct device_node *np, \
+ const char *prop_name, int index) \
+{ \
+ return parse_suffix_prop_cells(np, prop_name, index, suffix, cells); \
+}
+
+/**
+ * struct supplier_bindings - Property parsing functions for suppliers
+ *
+ * @parse_prop: function name
+ * parse_prop() finds the node corresponding to a supplier phandle
+ * @parse_prop.np: Pointer to device node holding supplier phandle property
+ * @parse_prop.prop_name: Name of property holding a phandle value
+ * @parse_prop.index: For properties holding a list of phandles, this is the
+ * index into the list
+<<<<<<<
+=======
+ * @optional: Describes whether a supplier is mandatory or not
+ * @node_not_dev: The consumer node containing the property is never converted
+ * to a struct device. Instead, parse ancestor nodes for the
+ * compatible property to find a node corresponding to a device.
+>>>>>>>
+ *
+ * Returns:
+ * parse_prop() return values are
+ * - phandle node pointer with refcount incremented. Caller must of_node_put()
+ * on it when done.
+ * - NULL if no phandle found at index
+ */
+struct supplier_bindings {
+ struct device_node *(*parse_prop)(struct device_node *np,
+ const char *prop_name, int index);
+ bool optional;
+};
+
+DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
+DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
+DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
+DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
+DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
+DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
+DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
+DEFINE_SIMPLE_PROP(hwlocks, "hwlocks", "#hwlock-cells")
+DEFINE_SIMPLE_PROP(extcon, "extcon", NULL)
+DEFINE_SIMPLE_PROP(nvmem_cells, "nvmem-cells", NULL)
+DEFINE_SIMPLE_PROP(phys, "phys", "#phy-cells")
+DEFINE_SIMPLE_PROP(wakeup_parent, "wakeup-parent", NULL)
+DEFINE_SIMPLE_PROP(pinctrl0, "pinctrl-0", NULL)
+DEFINE_SIMPLE_PROP(pinctrl1, "pinctrl-1", NULL)
+DEFINE_SIMPLE_PROP(pinctrl2, "pinctrl-2", NULL)
+DEFINE_SIMPLE_PROP(pinctrl3, "pinctrl-3", NULL)
+DEFINE_SIMPLE_PROP(pinctrl4, "pinctrl-4", NULL)
+DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL)
+DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL)
+DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL)
+DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL)
+DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL)
+DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells")
+DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
+DEFINE_SIMPLE_PROP(leds, "leds", NULL)
+DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
+DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
+DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
+
+static struct device_node *parse_gpios(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (!strcmp_suffix(prop_name, ",nr-gpios"))
+ return NULL;
+
+ return parse_suffix_prop_cells(np, prop_name, index, "-gpios",
+ "#gpio-cells");
+}
+
+static struct device_node *parse_iommu_maps(struct device_node *np,
+ const char *prop_name, int index)
+{
+ if (strcmp(prop_name, "iommu-map"))
+ return NULL;
+
+ return of_parse_phandle(np, prop_name, (index * 4) + 1);
+}
+
+static struct device_node *parse_gpio_compat(struct device_node *np,
+ const char *prop_name, int index)
+{
+ struct of_phandle_args sup_args;
+
+ if (strcmp(prop_name, "gpio") && strcmp(prop_name, "gpios"))
+ return NULL;
+
+ /*
+ * Ignore node with gpio-hog property since its gpios are all provided
+ * by its parent.
+ */
+ if (of_find_property(np, "gpio-hog", NULL))
+ return NULL;
+
+ if (of_parse_phandle_with_args(np, prop_name, "#gpio-cells", index,
+ &sup_args))
+ return NULL;
+
+ return sup_args.np;
+}
+
+static struct device_node *parse_interrupts(struct device_node *np,
+ const char *prop_name, int index)
+{
+ struct of_phandle_args sup_args;
+
+ if (!IS_ENABLED(CONFIG_OF_IRQ) || IS_ENABLED(CONFIG_PPC))
+ return NULL;
+
+ if (strcmp(prop_name, "interrupts") &&
+ strcmp(prop_name, "interrupts-extended"))
+ return NULL;
+
+ return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np;
+}
+
+static const struct supplier_bindings of_supplier_bindings[] = {
+ { .parse_prop = parse_clocks, },
+ { .parse_prop = parse_interconnects, },
+ { .parse_prop = parse_iommus, .optional = true, },
+ { .parse_prop = parse_iommu_maps, .optional = true, },
+ { .parse_prop = parse_mboxes, },
+ { .parse_prop = parse_io_channels, },
+ { .parse_prop = parse_interrupt_parent, },
+ { .parse_prop = parse_dmas, .optional = true, },
+ { .parse_prop = parse_power_domains, },
+ { .parse_prop = parse_hwlocks, },
+ { .parse_prop = parse_extcon, },
+ { .parse_prop = parse_nvmem_cells, },
+ { .parse_prop = parse_phys, },
+ { .parse_prop = parse_wakeup_parent, },
+ { .parse_prop = parse_pinctrl0, },
+ { .parse_prop = parse_pinctrl1, },
+ { .parse_prop = parse_pinctrl2, },
+ { .parse_prop = parse_pinctrl3, },
+ { .parse_prop = parse_pinctrl4, },
+ { .parse_prop = parse_pinctrl5, },
+ { .parse_prop = parse_pinctrl6, },
+ { .parse_prop = parse_pinctrl7, },
+ { .parse_prop = parse_pinctrl8, },
+ { .parse_prop = parse_pwms, },
+ { .parse_prop = parse_resets, },
+ { .parse_prop = parse_leds, },
+ { .parse_prop = parse_backlight, },
+ { .parse_prop = parse_gpio_compat, },
+ { .parse_prop = parse_interrupts, },
+ { .parse_prop = parse_regulators, },
+ { .parse_prop = parse_gpio, },
+ { .parse_prop = parse_gpios, },
+ {}
+};
+
+/**
+ * of_link_property - Create device links to suppliers listed in a property
+ * @con_np: The consumer device tree node which contains the property
+ * @prop_name: Name of property to be parsed
+ *
+ * This function checks if the property @prop_name that is present in the
+ * @con_np device tree node is one of the known common device tree bindings
+ * that list phandles to suppliers. If @prop_name isn't one, this function
+ * doesn't do anything.
+ *
+ * If @prop_name is one, this function attempts to create fwnode links from the
+ * consumer device tree node @con_np to all the suppliers device tree nodes
+ * listed in @prop_name.
+ *
+ * Any failed attempt to create a fwnode link will NOT result in an immediate
+ * return. of_link_property() must create links to all the available supplier
+ * device tree nodes even when attempts to create a link to one or more
+ * suppliers fail.
+ */
+static int of_link_property(struct device_node *con_np, const char *prop_name)
+{
+ struct device_node *phandle;
+ const struct supplier_bindings *s = of_supplier_bindings;
+ unsigned int i = 0;
+ bool matched = false;
+
+ /* Do not stop at first failed link, link all available suppliers. */
+ while (!matched && s->parse_prop) {
+ if (s->optional && !fw_devlink_is_strict()) {
+ s++;
+ continue;
+ }
+
+ while ((phandle = s->parse_prop(con_np, prop_name, i))) {
+<<<<<<<
+=======
+ struct device_node *con_dev_np;
+
+ con_dev_np = s->node_not_dev
+ ? of_get_compat_node_parent(con_np)
+ : of_node_get(con_np);
+>>>>>>>
+ matched = true;
+ i++;
+ of_link_to_phandle(con_np, phandle);
+ of_node_put(phandle);
+ }
+ s++;
+ }
+ return 0;
+}
+
+static int of_fwnode_add_links(struct fwnode_handle *fwnode)
+{
+ struct property *p;
+ struct device_node *con_np = to_of_node(fwnode);
+
+ if (IS_ENABLED(CONFIG_X86))
+ return 0;
+
+ if (!con_np)
+ return -EINVAL;
+
+ for_each_property_of_node(con_np, p)
+ of_link_property(con_np, p->name);
+
+ return 0;
+}
+
+const struct fwnode_operations of_fwnode_ops = {
+ .get = of_fwnode_get,
+ .put = of_fwnode_put,
+ .device_is_available = of_fwnode_device_is_available,
+ .device_get_match_data = of_fwnode_device_get_match_data,
+ .property_present = of_fwnode_property_present,
+ .property_read_int_array = of_fwnode_property_read_int_array,
+ .property_read_string_array = of_fwnode_property_read_string_array,
+ .get_name = of_fwnode_get_name,
+ .get_name_prefix = of_fwnode_get_name_prefix,
+ .get_parent = of_fwnode_get_parent,
+ .get_next_child_node = of_fwnode_get_next_child_node,
+ .get_named_child_node = of_fwnode_get_named_child_node,
+ .get_reference_args = of_fwnode_get_reference_args,
+ .graph_get_next_endpoint = of_fwnode_graph_get_next_endpoint,
+ .graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint,
+ .graph_get_port_parent = of_fwnode_graph_get_port_parent,
+ .graph_parse_endpoint = of_fwnode_graph_parse_endpoint,
+ .add_links = of_fwnode_add_links,
+};
+EXPORT_SYMBOL_GPL(of_fwnode_ops);
diff --git a/rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/postimage b/rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/postimage
new file mode 100644
index 0000000..a23dc31
--- /dev/null
+++ b/rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/postimage
@@ -0,0 +1,97 @@
+ Qualcomm SPMI PMICs multi-function device bindings
+
+The Qualcomm SPMI series presently includes PM8941, PM8841 and PMA8084
+PMICs. These PMICs use a QPNP scheme through SPMI interface.
+QPNP is effectively a partitioning scheme for dividing the SPMI extended
+register space up into logical pieces, and set of fixed register
+locations/definitions within these regions, with some of these regions
+specifically used for interrupt handling.
+
+The QPNP PMICs are used with the Qualcomm Snapdragon series SoCs, and are
+interfaced to the chip via the SPMI (System Power Management Interface) bus.
+Support for multiple independent functions are implemented by splitting the
+16-bit SPMI slave address space into 256 smaller fixed-size regions, 256 bytes
+each. A function can consume one or more of these fixed-size register regions.
+
+Required properties:
+- compatible: Should contain one of:
+ "qcom,pm660",
+ "qcom,pm660l",
+ "qcom,pm7325",
+ "qcom,pm8004",
+ "qcom,pm8005",
+ "qcom,pm8019",
+ "qcom,pm8028",
+ "qcom,pm8110",
+ "qcom,pm8150",
+ "qcom,pm8150b",
+ "qcom,pm8150c",
+ "qcom,pm8150l",
+ "qcom,pm8226",
+ "qcom,pm8350",
+ "qcom,pm8350b",
+ "qcom,pm8350c",
+ "qcom,pm8841",
+ "qcom,pm8901",
+ "qcom,pm8909",
+ "qcom,pm8916",
+ "qcom,pm8941",
+ "qcom,pm8950",
+ "qcom,pm8994",
+ "qcom,pm8998",
+ "qcom,pma8084",
+ "qcom,pmd9635",
+ "qcom,pmi8950",
+ "qcom,pmi8962",
+ "qcom,pmi8994",
+ "qcom,pmi8998",
+ "qcom,pmk8002",
+ "qcom,pmk8350",
+ "qcom,pmr735a",
+ "qcom,pmr735b",
+ "qcom,smb2351",
+
+ or generalized "qcom,spmi-pmic".
+- reg: Specifies the SPMI USID slave address for this device.
+ For more information see:
+ Documentation/devicetree/bindings/spmi/spmi.yaml
+
+Required properties for peripheral child nodes:
+- compatible: Should contain "qcom,xxx", where "xxx" is a peripheral name.
+
+Optional properties for peripheral child nodes:
+- interrupts: Interrupts are specified as a 4-tuple. For more information
+ see:
+ Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
+- interrupt-names: Corresponding interrupt name to the interrupts property
+
+Each child node of SPMI slave id represents a function of the PMIC. In the
+example below the rtc device node represents a peripheral of pm8941
+SID = 0. The regulator device node represents a peripheral of pm8941 SID = 1.
+
+Example:
+
+ spmi {
+ compatible = "qcom,spmi-pmic-arb";
+
+ pm8941@0 {
+ compatible = "qcom,pm8941", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+
+ rtc {
+ compatible = "qcom,rtc";
+ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "alarm";
+ };
+ };
+
+ pm8941@1 {
+ compatible = "qcom,pm8941", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+
+ regulator {
+ compatible = "qcom,regulator";
+ regulator-name = "8941_boost";
+ };
+ };
+ };
diff --git a/rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/preimage b/rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/preimage
new file mode 100644
index 0000000..f1289f3
--- /dev/null
+++ b/rr-cache/974706bbaaf4e5896a4bcc2f3e72e8335ce0d1dd/preimage
@@ -0,0 +1,124 @@
+ Qualcomm SPMI PMICs multi-function device bindings
+
+The Qualcomm SPMI series presently includes PM8941, PM8841 and PMA8084
+PMICs. These PMICs use a QPNP scheme through SPMI interface.
+QPNP is effectively a partitioning scheme for dividing the SPMI extended
+register space up into logical pieces, and set of fixed register
+locations/definitions within these regions, with some of these regions
+specifically used for interrupt handling.
+
+The QPNP PMICs are used with the Qualcomm Snapdragon series SoCs, and are
+interfaced to the chip via the SPMI (System Power Management Interface) bus.
+Support for multiple independent functions are implemented by splitting the
+16-bit SPMI slave address space into 256 smaller fixed-size regions, 256 bytes
+each. A function can consume one or more of these fixed-size register regions.
+
+Required properties:
+- compatible: Should contain one of:
+<<<<<<<
+ "qcom,pm660",
+ "qcom,pm660l",
+=======
+ "qcom,pm8941",
+ "qcom,pm8841",
+ "qcom,pma8084",
+ "qcom,pm8019",
+ "qcom,pm8226",
+ "qcom,pm8110",
+ "qcom,pma8084",
+ "qcom,pmi8962",
+ "qcom,pmd9635",
+ "qcom,pm8994",
+ "qcom,pmi8994",
+ "qcom,pm8916",
+ "qcom,pm8004",
+ "qcom,pm8909",
+ "qcom,pm8950",
+ "qcom,pmi8950",
+ "qcom,pm8998",
+ "qcom,pmi8998",
+ "qcom,pm8005",
+ "qcom,pm8350",
+ "qcom,pm8350b",
+ "qcom,pm8350c",
+ "qcom,pmk8350",
+>>>>>>>
+ "qcom,pm7325",
+ "qcom,pm8004",
+ "qcom,pm8005",
+ "qcom,pm8019",
+ "qcom,pm8028",
+ "qcom,pm8110",
+ "qcom,pm8150",
+ "qcom,pm8150b",
+ "qcom,pm8150c",
+ "qcom,pm8150l",
+ "qcom,pm8226",
+ "qcom,pm8350c",
+ "qcom,pm8841",
+ "qcom,pm8901",
+ "qcom,pm8909",
+ "qcom,pm8916",
+ "qcom,pm8941",
+ "qcom,pm8950",
+ "qcom,pm8994",
+ "qcom,pm8998",
+ "qcom,pma8084",
+ "qcom,pmd9635",
+ "qcom,pmi8950",
+ "qcom,pmi8962",
+ "qcom,pmi8994",
+ "qcom,pmi8998",
+ "qcom,pmk8002",
+ "qcom,pmk8350",
+ "qcom,pmr735a",
+<<<<<<<
+ "qcom,pmr735b",
+
+=======
+ "qcom,smb2351",
+>>>>>>>
+ or generalized "qcom,spmi-pmic".
+- reg: Specifies the SPMI USID slave address for this device.
+ For more information see:
+ Documentation/devicetree/bindings/spmi/spmi.yaml
+
+Required properties for peripheral child nodes:
+- compatible: Should contain "qcom,xxx", where "xxx" is a peripheral name.
+
+Optional properties for peripheral child nodes:
+- interrupts: Interrupts are specified as a 4-tuple. For more information
+ see:
+ Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb.txt
+- interrupt-names: Corresponding interrupt name to the interrupts property
+
+Each child node of SPMI slave id represents a function of the PMIC. In the
+example below the rtc device node represents a peripheral of pm8941
+SID = 0. The regulator device node represents a peripheral of pm8941 SID = 1.
+
+Example:
+
+ spmi {
+ compatible = "qcom,spmi-pmic-arb";
+
+ pm8941@0 {
+ compatible = "qcom,pm8941", "qcom,spmi-pmic";
+ reg = <0x0 SPMI_USID>;
+
+ rtc {
+ compatible = "qcom,rtc";
+ interrupts = <0x0 0x61 0x1 IRQ_TYPE_EDGE_RISING>;
+ interrupt-names = "alarm";
+ };
+ };
+
+ pm8941@1 {
+ compatible = "qcom,pm8941", "qcom,spmi-pmic";
+ reg = <0x1 SPMI_USID>;
+
+ regulator {
+ compatible = "qcom,regulator";
+ regulator-name = "8941_boost";
+ };
+ };
+ };
diff --git a/rr-cache/9a11bb9d0aee8e2b21835affb17471cbe026fdba/thisimage b/rr-cache/9a11bb9d0aee8e2b21835affb17471cbe026fdba/thisimage
index c1150d9..336fda5 100644
--- a/rr-cache/9a11bb9d0aee8e2b21835affb17471cbe026fdba/thisimage
+++ b/rr-cache/9a11bb9d0aee8e2b21835affb17471cbe026fdba/thisimage
@@ -36,6 +36,7 @@ obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8976) += gcc-msm8976.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
@@ -86,6 +87,7 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -93,6 +95,7 @@ obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
+obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
diff --git a/rr-cache/b1a5dd916641055afa69c27a74431a0b5f649585/thisimage b/rr-cache/b1a5dd916641055afa69c27a74431a0b5f649585/thisimage
index 994eaf5..db161a4 100644
--- a/rr-cache/b1a5dd916641055afa69c27a74431a0b5f649585/thisimage
+++ b/rr-cache/b1a5dd916641055afa69c27a74431a0b5f649585/thisimage
@@ -1062,6 +1062,9 @@ struct ov8856 {
const struct ov8856_lane_cfg *priv_lane;
u8 modes_size;
+
+ /* True if the device has been identified */
+ bool identified;
};
struct ov8856_lane_cfg {
@@ -1308,6 +1311,71 @@ static int ov8856_write_reg_list(struct ov8856 *ov8856,
return 0;
}
+static int ov8856_identify_module(struct ov8856 *ov8856)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
+ int ret;
+ u32 val;
+
+ if (ov8856->identified)
+ return 0;
+
+ ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID,
+ OV8856_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV8856_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x",
+ OV8856_CHIP_ID, val);
+ return -ENXIO;
+ }
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
+ if (ret)
+ return ret;
+
+ ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
+ OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
+ if (ret) {
+ dev_err(&client->dev, "failed to set otp mode");
+ return ret;
+ }
+
+ ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
+ OV8856_REG_VALUE_08BIT,
+ OV8856_OTP_LOAD_CTRL_ENABLE);
+ if (ret) {
+ dev_err(&client->dev, "failed to enable load control");
+ return ret;
+ }
+
+ ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
+ OV8856_REG_VALUE_08BIT, &val);
+ if (ret) {
+ dev_err(&client->dev, "failed to read module revision");
+ return ret;
+ }
+
+ dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
+ val,
+ val == OV8856_2A_MODULE ? "2A" :
+ val == OV8856_1B_MODULE ? "1B" : "unknown revision",
+ client->addr);
+
+ ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
+ OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
+ if (ret) {
+ dev_err(&client->dev, "failed to exit streaming mode");
+ return ret;
+ }
+
+ ov8856->identified = true;
+
+ return 0;
+}
+
static int ov8856_update_digital_gain(struct ov8856 *ov8856, u32 d_gain)
{
int ret;
@@ -1592,6 +1660,10 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
const struct ov8856_reg_list *reg_list;
int link_freq_index, ret;
+ ret = ov8856_identify_module(ov8856);
+ if (ret)
+ return ret;
+
link_freq_index = ov8856->cur_mode->link_freq_index;
reg_list = &ov8856->priv_lane->link_freq_configs[link_freq_index].reg_list;
@@ -1899,65 +1971,6 @@ static const struct v4l2_subdev_internal_ops ov8856_internal_ops = {
.open = ov8856_open,
};
-static int ov8856_identify_module(struct ov8856 *ov8856)
-{
- struct i2c_client *client = v4l2_get_subdevdata(&ov8856->sd);
- int ret;
- u32 val;
-
- ret = ov8856_read_reg(ov8856, OV8856_REG_CHIP_ID,
- OV8856_REG_VALUE_24BIT, &val);
- if (ret)
- return ret;
-
- if (val != OV8856_CHIP_ID) {
- dev_err(&client->dev, "chip id mismatch: %x!=%x",
- OV8856_CHIP_ID, val);
- return -ENXIO;
- }
-
- ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
- OV8856_REG_VALUE_08BIT, OV8856_MODE_STREAMING);
- if (ret)
- return ret;
-
- ret = ov8856_write_reg(ov8856, OV8856_OTP_MODE_CTRL,
- OV8856_REG_VALUE_08BIT, OV8856_OTP_MODE_AUTO);
- if (ret) {
- dev_err(&client->dev, "failed to set otp mode");
- return ret;
- }
-
- ret = ov8856_write_reg(ov8856, OV8856_OTP_LOAD_CTRL,
- OV8856_REG_VALUE_08BIT,
- OV8856_OTP_LOAD_CTRL_ENABLE);
- if (ret) {
- dev_err(&client->dev, "failed to enable load control");
- return ret;
- }
-
- ret = ov8856_read_reg(ov8856, OV8856_MODULE_REVISION,
- OV8856_REG_VALUE_08BIT, &val);
- if (ret) {
- dev_err(&client->dev, "failed to read module revision");
- return ret;
- }
-
- dev_info(&client->dev, "OV8856 revision %x (%s) at address 0x%02x\n",
- val,
- val == OV8856_2A_MODULE ? "2A" :
- val == OV8856_1B_MODULE ? "1B" : "unknown revision",
- client->addr);
-
- ret = ov8856_write_reg(ov8856, OV8856_REG_MODE_SELECT,
- OV8856_REG_VALUE_08BIT, OV8856_MODE_STANDBY);
- if (ret) {
- dev_err(&client->dev, "failed to exit streaming mode");
- return ret;
- }
-
- return 0;
-}
static int ov8856_get_hwcfg(struct ov8856 *ov8856, struct device *dev)
{
@@ -2081,6 +2094,7 @@ static int ov8856_probe(struct i2c_client *client)
{
struct ov8856 *ov8856;
int ret;
+ bool full_power;
ov8856 = devm_kzalloc(&client->dev, sizeof(*ov8856), GFP_KERNEL);
if (!ov8856)
@@ -2095,16 +2109,19 @@ static int ov8856_probe(struct i2c_client *client)
v4l2_i2c_subdev_init(&ov8856->sd, client, &ov8856_subdev_ops);
- ret = __ov8856_power_on(ov8856);
- if (ret) {
- dev_err(&client->dev, "failed to power on\n");
- return ret;
- }
+ full_power = acpi_dev_state_d0(&client->dev);
+ if (full_power) {
+ ret = __ov8856_power_on(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to power on\n");
+ return ret;
+ }
- ret = ov8856_identify_module(ov8856);
- if (ret) {
- dev_err(&client->dev, "failed to find sensor: %d", ret);
- goto probe_power_off;
+ ret = ov8856_identify_module(ov8856);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d", ret);
+ goto probe_power_off;
+ }
}
mutex_init(&ov8856->mutex);
@@ -2134,11 +2151,9 @@ static int ov8856_probe(struct i2c_client *client)
goto probe_error_media_entity_cleanup;
}
- /*
- * Device is already turned on by i2c-core with ACPI domain PM.
- * Enable runtime PM and turn off the device.
- */
- pm_runtime_set_active(&client->dev);
+ /* Set the device's state to active if it's in D0 state. */
+ if (full_power)
+ pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
pm_runtime_idle(&client->dev);
@@ -2185,6 +2200,7 @@ static struct i2c_driver ov8856_i2c_driver = {
},
.probe_new = ov8856_probe,
.remove = ov8856_remove,
+ .flags = I2C_DRV_ACPI_WAIVE_D0_PROBE,
};
module_i2c_driver(ov8856_i2c_driver);
diff --git a/rr-cache/eb2a574f27709226377dc4000c4882d227af3594/postimage b/rr-cache/eb2a574f27709226377dc4000c4882d227af3594/postimage
new file mode 100644
index 0000000..e4df219
--- /dev/null
+++ b/rr-cache/eb2a574f27709226377dc4000c4882d227af3594/postimage
@@ -0,0 +1,1027 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "sm8250.dtsi"
+#include "pm8150.dtsi"
+#include "pm8150b.dtsi"
+#include "pm8150l.dtsi"
+#include "pm8009.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8250 MTP";
+ compatible = "qcom,sm8250-mtp", "qcom,sm8250";
+
+ aliases {
+ serial0 = &uart12;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ thermal-zones {
+ camera-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ conn-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150b_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmw-pa1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmw-pa2-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-msm-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ vreg_s4a_1p8: pm8150-s4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_s6c_0p88: smpc6-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s6c_0p88";
+
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-always-on;
+ vin-supply = <&vph_pwr>;
+ };
+
+ display_panel_avdd: display_gpio_regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "display_panel_avdd";
+ regulator-min-microvolt = <5500000>;
+ regulator-max-microvolt = <5500000>;
+ regulator-enable-ramp-delay = <233>;
+ gpio = <&tlmm 61 0>;
+ enable-active-high;
+ regulator-boot-on;
+ pinctrl-names = "default";
+ pinctrl-0 = <&display_panel_avdd_default>;
+ };
+
+};
+
+&adsp {
+ status = "okay";
+ firmware-name = "qcom/sm8250/adsp.mbn";
+};
+
+&apps_rsc {
+ pm8150-rpmh-regulators {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-l1-l8-l11-supply = <&vreg_s6c_0p88>;
+ vdd-l2-l10-supply = <&vreg_bob>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p95>;
+ vdd-l6-l9-supply = <&vreg_s8c_1p3>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_1p9>;
+ vdd-l13-l16-l17-supply = <&vreg_bob>;
+
+ vreg_s5a_1p9: smps5 {
+ regulator-name = "vreg_s5a_1p9";
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6a_0p95: smps6 {
+ regulator-name = "vreg_s6a_0p95";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_3p1: ldo2 {
+ regulator-name = "vreg_l2a_3p1";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p9: ldo3 {
+ regulator-name = "vreg_l3a_0p9";
+ regulator-min-microvolt = <928000>;
+ regulator-max-microvolt = <932000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a_0p875: ldo5 {
+ regulator-name = "vreg_l5a_0p875";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_1p2: ldo6 {
+ regulator-name = "vreg_l6a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p7: ldo7 {
+ regulator-name = "vreg_l7a_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9a_1p2: ldo9 {
+ regulator-name = "vreg_l9a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a_1p8: ldo10 {
+ regulator-name = "vreg_l10a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12a_1p8: ldo12 {
+ regulator-name = "vreg_l12a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13a_ts_3p0: ldo13 {
+ regulator-name = "vreg_l13a_ts_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-name = "vreg_l14a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_11ad_io_1p8: ldo15 {
+ regulator-name = "vreg_l15a_11ad_io_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a_2p7: ldo16 {
+ regulator-name = "vreg_l16a_2p7";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a_3p0: ldo17 {
+ regulator-name = "vreg_l17a_3p0";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18a_0p9: ldo18 {
+ regulator-name = "vreg_l18a_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8150l-rpmh-regulators {
+ compatible = "qcom,pm8150l-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-l1-l8-supply = <&vreg_s4a_1p8>;
+ vdd-l2-l3-supply = <&vreg_s8c_1p3>;
+ vdd-l4-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-l11-supply = <&vreg_bob>;
+ vdd-l9-l10-supply = <&vreg_bob>;
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
+ vreg_s8c_1p3: smps8 {
+ regulator-name = "vreg_s8c_1p3";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p2: ldo2 {
+ regulator-name = "vreg_l2c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p92: ldo3 {
+ regulator-name = "vreg_l3c_0p92";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c_1p7: ldo4 {
+ regulator-name = "vreg_l4c_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c_1p8: ldo5 {
+ regulator-name = "vreg_l5c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_2p9: ldo6 {
+ regulator-name = "vreg_l6c_2p9";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c_cam_vcm0_2p85: ldo7 {
+ regulator-name = "vreg_l7c_cam_vcm0_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-name = "vreg_l8c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-name = "vreg_l9c_2p9";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c_3p0: ldo10 {
+ regulator-name = "vreg_l10c_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_3p3: ldo11 {
+ regulator-name = "vreg_l11c_3p3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8009-rpmh-regulators {
+ compatible = "qcom,pm8009-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vreg_bob>;
+ vdd-l2-supply = <&vreg_s8c_1p3>;
+ vdd-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_s4a_1p8>;
+
+ vreg_l1f_cam_dvdd1_1p1: ldo1 {
+ regulator-name = "vreg_l1f_cam_dvdd1_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_cam_dvdd0_1p2: ldo2 {
+ regulator-name = "vreg_l2f_cam_dvdd0_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_cam_dvdd2_1p05: ldo3 {
+ regulator-name = "vreg_l3f_cam_dvdd2_1p05";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5f_cam_avdd0_2p85: ldo5 {
+ regulator-name = "vreg_l5f_cam_avdd0_2p85";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6f_cam_avdd1_2p85: ldo6 {
+ regulator-name = "vreg_l6f_cam_avdd1_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <2856000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7f_1p8: ldo7 {
+ regulator-name = "vreg_l7f_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&cdsp {
+ status = "okay";
+ firmware-name = "qcom/sm8250/cdsp.mbn";
+};
+
+&dsi0 {
+ status = "okay";
+ vdda-supply = <&vreg_l9a_1p2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ port@1 {
+ endpoint {
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi0_phy {
+ status = "okay";
+ vdds-supply = <&vreg_l5a_0p875>;
+};
+
+#if 0
+&dsi1 {
+ status = "okay";
+ vdda-supply = <&vreg_l9a_1p2>;
+
+ ports {
+ port@1 {
+ endpoint {
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi1_phy {
+ status = "okay";
+ vdds-supply = <&vreg_l5a_0p875>;
+};
+#endif
+
+&gmu {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sm8250/a650_zap.mbn";
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <1000000>;
+
+ /* NQ NFC chip @28 */
+};
+
+&i2c13 {
+ status = "okay";
+
+ /* st,stmfts @ 49 */
+};
+
+&i2c15 {
+ status = "okay";
+
+ /* smb1390 @ 10 */
+ /* rtc6226 @ 64 */
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
+&pm8150_adc {
+ xo-therm@4c {
+ reg = <ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ skin-therm@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pa-therm1@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150_adc ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pa-therm1@2 {
+ reg = <2>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150b_adc {
+ conn-therm@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150b_adc_tm {
+ status = "okay";
+
+ conn-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150b_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150l_adc_tm {
+ status = "okay";
+
+ camera-flash-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-msm-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pa-therm2@2 {
+ reg = <2>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150l_adc {
+ camera-flash-therm@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ skin-msm-therm@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pa-therm2@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&qupv3_id_2 {
+ status = "okay";
+};
+
+&slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8250/slpi.mbn";
+};
+
+&soc {
+ wcd938x: codec {
+ compatible = "qcom,wcd9380-codec";
+ #sound-dai-cells = <1>;
+ reset-gpios = <&tlmm 32 0>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-rxtx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob>;
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+ };
+};
+
+&sound {
+ compatible = "qcom,sm8250-sndcard";
+ model = "SM8250-MTP-WCD9380-WSA8810-VA-DMIC";
+ audio-routing =
+ "SpkrLeft IN", "WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC1", "MIC BIAS1",
+ "AMIC2", "MIC BIAS2",
+ "AMIC3", "MIC BIAS3",
+ "AMIC4", "MIC BIAS3",
+ "AMIC5", "MIC BIAS4",
+ "TX SWR_ADC0", "ADC1_OUTPUT",
+ "TX SWR_ADC1", "ADC2_OUTPUT",
+ "TX SWR_ADC2", "ADC3_OUTPUT",
+ "TX SWR_ADC3", "ADC4_OUTPUT",
+ "TX SWR_DMIC0", "DMIC1_OUTPUT",
+ "TX SWR_DMIC1", "DMIC2_OUTPUT",
+ "TX SWR_DMIC2", "DMIC3_OUTPUT",
+ "TX SWR_DMIC3", "DMIC4_OUTPUT",
+ "TX SWR_DMIC4", "DMIC5_OUTPUT",
+ "TX SWR_DMIC5", "DMIC6_OUTPUT",
+ "TX SWR_DMIC6", "DMIC7_OUTPUT",
+ "TX SWR_DMIC7", "DMIC8_OUTPUT";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+ cpu {
+ sound-dai = <&q6afedai RX_CODEC_DMA_RX_0>;
+ };
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&rxmacro 0>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+ cpu {
+ sound-dai = <&q6afedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 0>, <&txmacro 0>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+ cpu {
+ sound-dai = <&q6afedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&wsamacro 0>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+ cpu {
+ sound-dai = <&q6afedai VA_CODEC_DMA_TX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&vamacro 0>;
+ };
+ };
+};
+
+&swr0 {
+ left_spkr: wsa8810-right@0,3{
+ compatible = "sdw10217211000";
+ reg = <0 3>;
+ powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+
+ right_spkr: wsa8810-left@0,4{
+ compatible = "sdw10217211000";
+ reg = <0 4>;
+ powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&swr1 {
+ wcd_rx: wcd9380-rx@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ wcd_tx: wcd9380-tx@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 3 4 5>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <28 4>, <40 4>;
+
+ display_panel_avdd_default: display_panel_avdd_default {
+ mux {
+ pins = "gpio61";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <8>;
+ bias-disable = <0>;
+ output-high;
+ };
+ };
+
+ wcd938x_reset_default: wcd938x_reset_default {
+ mux {
+ pins = "gpio32";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio32";
+ drive-strength = <16>;
+ output-high;
+ };
+ };
+
+ wcd938x_reset_sleep: wcd938x_reset_sleep {
+ mux {
+ pins = "gpio32";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio32";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+ };
+
+};
+
+&uart12 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ vcc-supply = <&vreg_l17a_3p0>;
+ vcc-max-microamp = <750000>;
+ vccq-supply = <&vreg_l6a_1p2>;
+ vccq-max-microamp = <700000>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5a_0p875>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_2_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
+
+&swr0 {
+ left_right: wsa8810-right{
+ compatible = "sdw10217211000";
+ reg = <0 2>;
+ powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+
+ left_spkr: wsa8810-left{
+ compatible = "sdw10217211000";
+ reg = <0 1>;
+ powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ direction = <2>;
+ };
+};
+
+&sound {
+ compatible = "qcom,sm8250-sndcard";
+ model = "SM8250";
+ audio-routing =
+ "SpkrLeft IN", "WSA_SPK1 OUT",
+ "MM_DL1", "MultiMedia1 Playback";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ dma-dai-link {
+ link-name = "WSA Playback";
+ cpu {
+ sound-dai = <&q6afedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&swr0 0>, <&wsamacro 0>;
+ };
+ };
+};
+
+&venus {
+ status = "okay";
+};
diff --git a/rr-cache/eb2a574f27709226377dc4000c4882d227af3594/preimage b/rr-cache/eb2a574f27709226377dc4000c4882d227af3594/preimage
new file mode 100644
index 0000000..87a0d87
--- /dev/null
+++ b/rr-cache/eb2a574f27709226377dc4000c4882d227af3594/preimage
@@ -0,0 +1,1029 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/sound/qcom,q6afe.h>
+#include <dt-bindings/sound/qcom,q6asm.h>
+#include <dt-bindings/gpio/gpio.h>
+#include "sm8250.dtsi"
+#include "pm8150.dtsi"
+#include "pm8150b.dtsi"
+#include "pm8150l.dtsi"
+#include "pm8009.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SM8250 MTP";
+ compatible = "qcom,sm8250-mtp", "qcom,sm8250";
+
+ aliases {
+ serial0 = &uart12;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ thermal-zones {
+ camera-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ conn-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150b_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmw-pa1-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ mmw-pa2-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 2>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-msm-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ skin-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 1>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+
+ xo-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150_adc_tm 0>;
+
+ trips {
+ active-config0 {
+ temperature = <125000>;
+ hysteresis = <1000>;
+ type = "passive";
+ };
+ };
+ };
+ };
+
+ vph_pwr: vph-pwr-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vph_pwr";
+ regulator-min-microvolt = <3700000>;
+ regulator-max-microvolt = <3700000>;
+ };
+
+ vreg_s4a_1p8: pm8150-s4 {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s4a_1p8";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+
+ regulator-always-on;
+ regulator-boot-on;
+
+ vin-supply = <&vph_pwr>;
+ };
+
+ vreg_s6c_0p88: smpc6-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vreg_s6c_0p88";
+
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-always-on;
+ vin-supply = <&vph_pwr>;
+ };
+
+ display_panel_avdd: display_gpio_regulator@1 {
+ compatible = "regulator-fixed";
+ regulator-name = "display_panel_avdd";
+ regulator-min-microvolt = <5500000>;
+ regulator-max-microvolt = <5500000>;
+ regulator-enable-ramp-delay = <233>;
+ gpio = <&tlmm 61 0>;
+ enable-active-high;
+ regulator-boot-on;
+ pinctrl-names = "default";
+ pinctrl-0 = <&display_panel_avdd_default>;
+ };
+
+};
+
+&adsp {
+ status = "okay";
+ firmware-name = "qcom/sm8250/adsp.mbn";
+};
+
+&apps_rsc {
+ pm8150-rpmh-regulators {
+ compatible = "qcom,pm8150-rpmh-regulators";
+ qcom,pmic-id = "a";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-s9-supply = <&vph_pwr>;
+ vdd-s10-supply = <&vph_pwr>;
+ vdd-l1-l8-l11-supply = <&vreg_s6c_0p88>;
+ vdd-l2-l10-supply = <&vreg_bob>;
+ vdd-l3-l4-l5-l18-supply = <&vreg_s6a_0p95>;
+ vdd-l6-l9-supply = <&vreg_s8c_1p3>;
+ vdd-l7-l12-l14-l15-supply = <&vreg_s5a_1p9>;
+ vdd-l13-l16-l17-supply = <&vreg_bob>;
+
+ vreg_s5a_1p9: smps5 {
+ regulator-name = "vreg_s5a_1p9";
+ regulator-min-microvolt = <1904000>;
+ regulator-max-microvolt = <2000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_s6a_0p95: smps6 {
+ regulator-name = "vreg_s6a_0p95";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <1128000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2a_3p1: ldo2 {
+ regulator-name = "vreg_l2a_3p1";
+ regulator-min-microvolt = <3072000>;
+ regulator-max-microvolt = <3072000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3a_0p9: ldo3 {
+ regulator-name = "vreg_l3a_0p9";
+ regulator-min-microvolt = <928000>;
+ regulator-max-microvolt = <932000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5a_0p875: ldo5 {
+ regulator-name = "vreg_l5a_0p875";
+ regulator-min-microvolt = <880000>;
+ regulator-max-microvolt = <880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6a_1p2: ldo6 {
+ regulator-name = "vreg_l6a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7a_1p7: ldo7 {
+ regulator-name = "vreg_l7a_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9a_1p2: ldo9 {
+ regulator-name = "vreg_l9a_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10a_1p8: ldo10 {
+ regulator-name = "vreg_l10a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l12a_1p8: ldo12 {
+ regulator-name = "vreg_l12a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l13a_ts_3p0: ldo13 {
+ regulator-name = "vreg_l13a_ts_3p0";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l14a_1p8: ldo14 {
+ regulator-name = "vreg_l14a_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1880000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l15a_11ad_io_1p8: ldo15 {
+ regulator-name = "vreg_l15a_11ad_io_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l16a_2p7: ldo16 {
+ regulator-name = "vreg_l16a_2p7";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l17a_3p0: ldo17 {
+ regulator-name = "vreg_l17a_3p0";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3008000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l18a_0p9: ldo18 {
+ regulator-name = "vreg_l18a_0p9";
+ regulator-min-microvolt = <912000>;
+ regulator-max-microvolt = <912000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8150l-rpmh-regulators {
+ compatible = "qcom,pm8150l-rpmh-regulators";
+ qcom,pmic-id = "c";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vph_pwr>;
+ vdd-s3-supply = <&vph_pwr>;
+ vdd-s4-supply = <&vph_pwr>;
+ vdd-s5-supply = <&vph_pwr>;
+ vdd-s6-supply = <&vph_pwr>;
+ vdd-s7-supply = <&vph_pwr>;
+ vdd-s8-supply = <&vph_pwr>;
+ vdd-l1-l8-supply = <&vreg_s4a_1p8>;
+ vdd-l2-l3-supply = <&vreg_s8c_1p3>;
+ vdd-l4-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-l11-supply = <&vreg_bob>;
+ vdd-l9-l10-supply = <&vreg_bob>;
+ vdd-bob-supply = <&vph_pwr>;
+
+ vreg_bob: bob {
+ regulator-name = "vreg_bob";
+ regulator-min-microvolt = <3008000>;
+ regulator-max-microvolt = <4000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_AUTO>;
+ };
+
+ vreg_s8c_1p3: smps8 {
+ regulator-name = "vreg_s8c_1p3";
+ regulator-min-microvolt = <1352000>;
+ regulator-max-microvolt = <1352000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l1c_1p8: ldo1 {
+ regulator-name = "vreg_l1c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2c_1p2: ldo2 {
+ regulator-name = "vreg_l2c_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3c_0p92: ldo3 {
+ regulator-name = "vreg_l3c_0p92";
+ regulator-min-microvolt = <920000>;
+ regulator-max-microvolt = <920000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l4c_1p7: ldo4 {
+ regulator-name = "vreg_l4c_1p7";
+ regulator-min-microvolt = <1704000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5c_1p8: ldo5 {
+ regulator-name = "vreg_l5c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2928000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6c_2p9: ldo6 {
+ regulator-name = "vreg_l6c_2p9";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7c_cam_vcm0_2p85: ldo7 {
+ regulator-name = "vreg_l7c_cam_vcm0_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <3104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l8c_1p8: ldo8 {
+ regulator-name = "vreg_l8c_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l9c_2p9: ldo9 {
+ regulator-name = "vreg_l9c_2p9";
+ regulator-min-microvolt = <2704000>;
+ regulator-max-microvolt = <2960000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l10c_3p0: ldo10 {
+ regulator-name = "vreg_l10c_3p0";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l11c_3p3: ldo11 {
+ regulator-name = "vreg_l11c_3p3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3312000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+
+ pm8009-rpmh-regulators {
+ compatible = "qcom,pm8009-rpmh-regulators";
+ qcom,pmic-id = "f";
+
+ vdd-s1-supply = <&vph_pwr>;
+ vdd-s2-supply = <&vreg_bob>;
+ vdd-l2-supply = <&vreg_s8c_1p3>;
+ vdd-l5-l6-supply = <&vreg_bob>;
+ vdd-l7-supply = <&vreg_s4a_1p8>;
+
+ vreg_l1f_cam_dvdd1_1p1: ldo1 {
+ regulator-name = "vreg_l1f_cam_dvdd1_1p1";
+ regulator-min-microvolt = <1104000>;
+ regulator-max-microvolt = <1104000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l2f_cam_dvdd0_1p2: ldo2 {
+ regulator-name = "vreg_l2f_cam_dvdd0_1p2";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l3f_cam_dvdd2_1p05: ldo3 {
+ regulator-name = "vreg_l3f_cam_dvdd2_1p05";
+ regulator-min-microvolt = <1056000>;
+ regulator-max-microvolt = <1056000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l5f_cam_avdd0_2p85: ldo5 {
+ regulator-name = "vreg_l5f_cam_avdd0_2p85";
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l6f_cam_avdd1_2p85: ldo6 {
+ regulator-name = "vreg_l6f_cam_avdd1_2p85";
+ regulator-min-microvolt = <2856000>;
+ regulator-max-microvolt = <2856000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+
+ vreg_l7f_1p8: ldo7 {
+ regulator-name = "vreg_l7f_1p8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-initial-mode = <RPMH_REGULATOR_MODE_HPM>;
+ };
+ };
+};
+
+&cdsp {
+ status = "okay";
+ firmware-name = "qcom/sm8250/cdsp.mbn";
+};
+
+&dsi0 {
+ status = "okay";
+ vdda-supply = <&vreg_l9a_1p2>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ports {
+ port@1 {
+ endpoint {
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi0_phy {
+ status = "okay";
+ vdds-supply = <&vreg_l5a_0p875>;
+};
+
+#if 0
+&dsi1 {
+ status = "okay";
+ vdda-supply = <&vreg_l9a_1p2>;
+
+ ports {
+ port@1 {
+ endpoint {
+ data-lanes = <0 1 2 3>;
+ };
+ };
+ };
+};
+
+&dsi1_phy {
+ status = "okay";
+ vdds-supply = <&vreg_l5a_0p875>;
+};
+#endif
+
+&gmu {
+ status = "okay";
+};
+
+&gpu {
+ status = "okay";
+
+ zap-shader {
+ memory-region = <&gpu_mem>;
+ firmware-name = "qcom/sm8250/a650_zap.mbn";
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <1000000>;
+
+ /* NQ NFC chip @28 */
+};
+
+&i2c13 {
+ status = "okay";
+
+ /* st,stmfts @ 49 */
+};
+
+&i2c15 {
+ status = "okay";
+
+ /* smb1390 @ 10 */
+ /* rtc6226 @ 64 */
+};
+
+&mdss {
+ status = "okay";
+};
+
+&mdss_mdp {
+ status = "okay";
+};
+
+&pm8150_adc {
+ xo-therm@4c {
+ reg = <ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ skin-therm@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pa-therm1@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150_adc_tm {
+ status = "okay";
+
+ xo-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150_adc ADC5_XO_THERM_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pa-therm1@2 {
+ reg = <2>;
+ io-channels = <&pm8150_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150b_adc {
+ conn-therm@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&pm8150b_adc_tm {
+ status = "okay";
+
+ conn-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150b_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150l_adc_tm {
+ status = "okay";
+
+ camera-flash-therm@0 {
+ reg = <0>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ skin-msm-therm@1 {
+ reg = <1>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+
+ pa-therm2@2 {
+ reg = <2>;
+ io-channels = <&pm8150l_adc ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time-us = <200>;
+ };
+};
+
+&pm8150l_adc {
+ camera-flash-therm@4d {
+ reg = <ADC5_AMUX_THM1_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ skin-msm-therm@4e {
+ reg = <ADC5_AMUX_THM2_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+
+ pa-therm2@4f {
+ reg = <ADC5_AMUX_THM3_100K_PU>;
+ qcom,ratiometric;
+ qcom,hw-settle-time = <200>;
+ };
+};
+
+&qupv3_id_0 {
+ status = "okay";
+};
+
+&qupv3_id_1 {
+ status = "okay";
+};
+
+&qupv3_id_2 {
+ status = "okay";
+};
+
+&slpi {
+ status = "okay";
+ firmware-name = "qcom/sm8250/slpi.mbn";
+};
+
+&soc {
+ wcd938x: codec {
+ compatible = "qcom,wcd9380-codec";
+ #sound-dai-cells = <1>;
+ reset-gpios = <&tlmm 32 0>;
+ vdd-buck-supply = <&vreg_s4a_1p8>;
+ vdd-rxtx-supply = <&vreg_s4a_1p8>;
+ vdd-io-supply = <&vreg_s4a_1p8>;
+ vdd-mic-bias-supply = <&vreg_bob>;
+ qcom,micbias1-microvolt = <1800000>;
+ qcom,micbias2-microvolt = <1800000>;
+ qcom,micbias3-microvolt = <1800000>;
+ qcom,micbias4-microvolt = <1800000>;
+ qcom,mbhc-buttons-vthreshold-microvolt = <75000 150000 237000 500000 500000 500000 500000 500000>;
+ qcom,mbhc-headset-vthreshold-microvolt = <1700000>;
+ qcom,mbhc-headphone-vthreshold-microvolt = <50000>;
+ qcom,rx-device = <&wcd_rx>;
+ qcom,tx-device = <&wcd_tx>;
+ };
+};
+
+&sound {
+ compatible = "qcom,sm8250-sndcard";
+ model = "SM8250-MTP-WCD9380-WSA8810-VA-DMIC";
+ audio-routing =
+ "SpkrLeft IN", "WSA_SPK1 OUT",
+ "SpkrRight IN", "WSA_SPK2 OUT",
+ "IN1_HPHL", "HPHL_OUT",
+ "IN2_HPHR", "HPHR_OUT",
+ "AMIC1", "MIC BIAS1",
+ "AMIC2", "MIC BIAS2",
+ "AMIC3", "MIC BIAS3",
+ "AMIC4", "MIC BIAS3",
+ "AMIC5", "MIC BIAS4",
+ "TX SWR_ADC0", "ADC1_OUTPUT",
+ "TX SWR_ADC1", "ADC2_OUTPUT",
+ "TX SWR_ADC2", "ADC3_OUTPUT",
+ "TX SWR_ADC3", "ADC4_OUTPUT",
+ "TX SWR_DMIC0", "DMIC1_OUTPUT",
+ "TX SWR_DMIC1", "DMIC2_OUTPUT",
+ "TX SWR_DMIC2", "DMIC3_OUTPUT",
+ "TX SWR_DMIC3", "DMIC4_OUTPUT",
+ "TX SWR_DMIC4", "DMIC5_OUTPUT",
+ "TX SWR_DMIC5", "DMIC6_OUTPUT",
+ "TX SWR_DMIC6", "DMIC7_OUTPUT",
+ "TX SWR_DMIC7", "DMIC8_OUTPUT";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ mm2-dai-link {
+ link-name = "MultiMedia2";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA2>;
+ };
+ };
+
+ mm3-dai-link {
+ link-name = "MultiMedia3";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA3>;
+ };
+ };
+
+ wcd-playback-dai-link {
+ link-name = "WCD Playback";
+ cpu {
+ sound-dai = <&q6afedai RX_CODEC_DMA_RX_0>;
+ };
+ codec {
+ sound-dai = <&wcd938x 0>, <&swr1 0>, <&rxmacro 0>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ wcd-capture-dai-link {
+ link-name = "WCD Capture";
+ cpu {
+ sound-dai = <&q6afedai TX_CODEC_DMA_TX_3>;
+ };
+
+ codec {
+ sound-dai = <&wcd938x 1>, <&swr2 0>, <&txmacro 0>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ wsa-dai-link {
+ link-name = "WSA Playback";
+ cpu {
+ sound-dai = <&q6afedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&right_spkr>, <&swr0 0>, <&wsamacro 0>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ };
+
+ va-dai-link {
+ link-name = "VA Capture";
+ cpu {
+ sound-dai = <&q6afedai VA_CODEC_DMA_TX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&vamacro 0>;
+ };
+ };
+};
+
+&swr0 {
+ left_spkr: wsa8810-right@0,3{
+ compatible = "sdw10217211000";
+ reg = <0 3>;
+ powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+
+ right_spkr: wsa8810-left@0,4{
+ compatible = "sdw10217211000";
+ reg = <0 4>;
+ powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&swr1 {
+ wcd_rx: wcd9380-rx@0,4 {
+ compatible = "sdw20217010d00";
+ reg = <0 4>;
+ qcom,rx-port-mapping = <1 2 3 4 5>;
+ };
+};
+
+&swr2 {
+ wcd_tx: wcd9380-tx@0,3 {
+ compatible = "sdw20217010d00";
+ reg = <0 3>;
+ qcom,tx-port-mapping = <2 3 4 5>;
+ };
+};
+
+&tlmm {
+ gpio-reserved-ranges = <28 4>, <40 4>;
+
+<<<<<<<
+ display_panel_avdd_default: display_panel_avdd_default {
+ mux {
+ pins = "gpio61";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio61";
+ drive-strength = <8>;
+ bias-disable = <0>;
+ output-high;
+ };
+ };
+
+=======
+ wcd938x_reset_default: wcd938x_reset_default {
+ mux {
+ pins = "gpio32";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio32";
+ drive-strength = <16>;
+ output-high;
+ };
+ };
+
+ wcd938x_reset_sleep: wcd938x_reset_sleep {
+ mux {
+ pins = "gpio32";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio32";
+ drive-strength = <16>;
+ bias-disable;
+ output-low;
+ };
+ };
+>>>>>>>
+};
+
+&uart12 {
+ status = "okay";
+};
+
+&ufs_mem_hc {
+ status = "okay";
+
+ vcc-supply = <&vreg_l17a_3p0>;
+ vcc-max-microamp = <750000>;
+ vccq-supply = <&vreg_l6a_1p2>;
+ vccq-max-microamp = <700000>;
+ vccq2-supply = <&vreg_s4a_1p8>;
+ vccq2-max-microamp = <750000>;
+};
+
+&ufs_mem_phy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l5a_0p875>;
+ vdda-pll-supply = <&vreg_l9a_1p2>;
+};
+
+&usb_1 {
+ status = "okay";
+};
+
+&usb_1_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_1_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_1_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
+
+&usb_2 {
+ status = "okay";
+};
+
+&usb_2_dwc3 {
+ dr_mode = "host";
+};
+
+&usb_2_hsphy {
+ status = "okay";
+
+ vdda-pll-supply = <&vreg_l5a_0p875>;
+ vdda18-supply = <&vreg_l12a_1p8>;
+ vdda33-supply = <&vreg_l2a_3p1>;
+};
+
+&usb_2_qmpphy {
+ status = "okay";
+
+ vdda-phy-supply = <&vreg_l9a_1p2>;
+ vdda-pll-supply = <&vreg_l18a_0p9>;
+};
+
+&swr0 {
+ left_right: wsa8810-right{
+ compatible = "sdw10217211000";
+ reg = <0 2>;
+ powerdown-gpios = <&tlmm 127 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrRight";
+ #sound-dai-cells = <0>;
+ };
+
+ left_spkr: wsa8810-left{
+ compatible = "sdw10217211000";
+ reg = <0 1>;
+ powerdown-gpios = <&tlmm 26 GPIO_ACTIVE_HIGH>;
+ #thermal-sensor-cells = <0>;
+ sound-name-prefix = "SpkrLeft";
+ #sound-dai-cells = <0>;
+ };
+};
+
+&q6asmdai {
+ dai@0 {
+ reg = <0>;
+ direction = <2>;
+ };
+};
+
+&sound {
+ compatible = "qcom,sm8250-sndcard";
+ model = "SM8250";
+ audio-routing =
+ "SpkrLeft IN", "WSA_SPK1 OUT",
+ "MM_DL1", "MultiMedia1 Playback";
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ dma-dai-link {
+ link-name = "WSA Playback";
+ cpu {
+ sound-dai = <&q6afedai WSA_CODEC_DMA_RX_0>;
+ };
+
+ platform {
+ sound-dai = <&q6routing>;
+ };
+
+ codec {
+ sound-dai = <&left_spkr>, <&swr0 0>, <&wsamacro 0>;
+ };
+ };
+};
+
+&venus {
+ status = "okay";
+};
diff --git a/rr-cache/f285fdfe5adcadf77d60a81471e87da51069de06/preimage b/rr-cache/f285fdfe5adcadf77d60a81471e87da51069de06/preimage
new file mode 100644
index 0000000..14868da
--- /dev/null
+++ b/rr-cache/f285fdfe5adcadf77d60a81471e87da51069de06/preimage
@@ -0,0 +1,1074 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2019-2020. Linaro Limited.
+ */
+
+#include <linux/firmware.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#include <sound/hdmi-codec.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#define EDID_BLOCK_SIZE 128
+#define EDID_NUM_BLOCKS 2
+
+struct lt9611uxc {
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ struct regmap *regmap;
+ /* Protects all accesses to registers by stopping the on-chip MCU */
+ struct mutex ocm_lock;
+
+ struct wait_queue_head wq;
+ struct work_struct work;
+
+ struct device_node *dsi0_node;
+ struct device_node *dsi1_node;
+ struct mipi_dsi_device *dsi0;
+ struct mipi_dsi_device *dsi1;
+ struct platform_device *audio_pdev;
+
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+
+ struct regulator_bulk_data supplies[2];
+
+ struct i2c_client *client;
+
+ bool hpd_supported;
+ bool edid_read;
+ /* can be accessed from different threads, so protect this with ocm_lock */
+ bool hdmi_connected;
+ uint8_t fw_version;
+};
+
+#define LT9611_PAGE_CONTROL 0xff
+
+static const struct regmap_range_cfg lt9611uxc_ranges[] = {
+ {
+ .name = "register_range",
+ .range_min = 0,
+ .range_max = 0xd0ff,
+ .selector_reg = LT9611_PAGE_CONTROL,
+ .selector_mask = 0xff,
+ .selector_shift = 0,
+ .window_start = 0,
+ .window_len = 0x100,
+ },
+};
+
+static const struct regmap_config lt9611uxc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0xffff,
+ .ranges = lt9611uxc_ranges,
+ .num_ranges = ARRAY_SIZE(lt9611uxc_ranges),
+};
+
+struct lt9611uxc_mode {
+ u16 hdisplay;
+ u16 vdisplay;
+ u8 vrefresh;
+ bool dual_dsi;
+};
+
+/*
+ * This chip supports only a fixed set of modes.
+ * Enumerate them here to check whether the mode is supported.
+ */
+static struct lt9611uxc_mode lt9611uxc_modes[] = {
+ { 3840, 2160, 60, true },
+ { 3840, 2160, 30, true },
+ { 1920, 1080, 60, false },
+ { 1920, 1080, 30, false },
+ { 1920, 1080, 25, false },
+ { 1366, 768, 60, false },
+ { 1360, 768, 60, false },
+ { 1280, 1024, 60, false },
+ { 1280, 800, 60, false },
+ { 1280, 720, 60, false },
+ { 1280, 720, 50, false },
+ { 1280, 720, 30, false },
+ { 1152, 864, 60, false },
+ { 1024, 768, 60, false },
+ { 800, 600, 60, false },
+ { 720, 576, 50, false },
+ { 720, 480, 60, false },
+ { 640, 480, 60, false },
+};
+
+static struct lt9611uxc *bridge_to_lt9611uxc(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct lt9611uxc, bridge);
+}
+
+static struct lt9611uxc *connector_to_lt9611uxc(struct drm_connector *connector)
+{
+ return container_of(connector, struct lt9611uxc, connector);
+}
+
+static void lt9611uxc_lock(struct lt9611uxc *lt9611uxc)
+{
+ mutex_lock(&lt9611uxc->ocm_lock);
+ regmap_write(lt9611uxc->regmap, 0x80ee, 0x01);
+}
+
+static void lt9611uxc_unlock(struct lt9611uxc *lt9611uxc)
+{
+ regmap_write(lt9611uxc->regmap, 0x80ee, 0x00);
+ msleep(50);
+ mutex_unlock(&lt9611uxc->ocm_lock);
+}
+
+static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id)
+{
+ struct lt9611uxc *lt9611uxc = dev_id;
+ unsigned int irq_status = 0;
+ unsigned int hpd_status = 0;
+
+ lt9611uxc_lock(lt9611uxc);
+
+ regmap_read(lt9611uxc->regmap, 0xb022, &irq_status);
+ regmap_read(lt9611uxc->regmap, 0xb023, &hpd_status);
+ if (irq_status)
+ regmap_write(lt9611uxc->regmap, 0xb022, 0);
+
+ if (irq_status & BIT(0)) {
+ lt9611uxc->edid_read = !!(hpd_status & BIT(0));
+ wake_up_all(&lt9611uxc->wq);
+ }
+
+ if (irq_status & BIT(1)) {
+ lt9611uxc->hdmi_connected = hpd_status & BIT(1);
+ schedule_work(&lt9611uxc->work);
+ }
+
+ lt9611uxc_unlock(lt9611uxc);
+
+ return IRQ_HANDLED;
+}
+
+static void lt9611uxc_hpd_work(struct work_struct *work)
+{
+ struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work);
+ bool connected;
+
+ if (lt9611uxc->connector.dev) {
+ if (lt9611uxc->connector.dev->mode_config.funcs)
+ drm_kms_helper_hotplug_event(lt9611uxc->connector.dev);
+ } else {
+
+ mutex_lock(&lt9611uxc->ocm_lock);
+ connected = lt9611uxc->hdmi_connected;
+ mutex_unlock(&lt9611uxc->ocm_lock);
+
+ drm_bridge_hpd_notify(&lt9611uxc->bridge,
+ connected ?
+ connector_status_connected :
+ connector_status_disconnected);
+ }
+}
+
+static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc)
+{
+ gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
+ msleep(20);
+
+ gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 0);
+ msleep(20);
+
+ gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1);
+ msleep(300);
+}
+
+static void lt9611uxc_assert_5v(struct lt9611uxc *lt9611uxc)
+{
+ if (!lt9611uxc->enable_gpio)
+ return;
+
+ gpiod_set_value_cansleep(lt9611uxc->enable_gpio, 1);
+ msleep(20);
+}
+
+static int lt9611uxc_regulator_init(struct lt9611uxc *lt9611uxc)
+{
+ int ret;
+
+ lt9611uxc->supplies[0].supply = "vdd";
+ lt9611uxc->supplies[1].supply = "vcc";
+
+ ret = devm_regulator_bulk_get(lt9611uxc->dev, 2, lt9611uxc->supplies);
+ if (ret < 0)
+ return ret;
+
+ return regulator_set_load(lt9611uxc->supplies[0].consumer, 200000);
+}
+
+static int lt9611uxc_regulator_enable(struct lt9611uxc *lt9611uxc)
+{
+ int ret;
+
+ ret = regulator_enable(lt9611uxc->supplies[0].consumer);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(1000, 10000); /* 50000 according to dtsi */
+
+ ret = regulator_enable(lt9611uxc->supplies[1].consumer);
+ if (ret < 0) {
+ regulator_disable(lt9611uxc->supplies[0].consumer);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct lt9611uxc_mode *lt9611uxc_find_mode(const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(lt9611uxc_modes); i++) {
+ if (lt9611uxc_modes[i].hdisplay == mode->hdisplay &&
+ lt9611uxc_modes[i].vdisplay == mode->vdisplay &&
+ lt9611uxc_modes[i].vrefresh == drm_mode_vrefresh(mode)) {
+ return &lt9611uxc_modes[i];
+ }
+ }
+
+ return NULL;
+}
+
+static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
+ struct device_node *dsi_node)
+{
+ const struct mipi_dsi_device_info info = { "lt9611uxc", 0, NULL };
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ struct device *dev = lt9611uxc->dev;
+ int ret;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ if (!host) {
+ dev_err(dev, "failed to find dsi host\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ return dsi;
+ }
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ dev_err(dev, "failed to attach dsi to host\n");
+ return ERR_PTR(ret);
+ }
+
+ return dsi;
+}
+
+static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
+{
+ struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
+ unsigned int count;
+ struct edid *edid;
+
+ edid = lt9611uxc->bridge.funcs->get_edid(&lt9611uxc->bridge, connector);
+ drm_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+
+ return count;
+}
+
+static enum drm_connector_status lt9611uxc_connector_detect(struct drm_connector *connector,
+ bool force)
+{
+ struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
+
+ return lt9611uxc->bridge.funcs->detect(&lt9611uxc->bridge);
+}
+
+static enum drm_mode_status lt9611uxc_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode);
+ struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
+
+ if (!lt9611uxc_mode)
+ return MODE_BAD;
+
+ if (lt9611uxc_mode->dual_dsi && (!lt9611uxc->dsi0 || !lt9611uxc->dsi1))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static const struct drm_connector_helper_funcs lt9611uxc_bridge_connector_helper_funcs = {
+ .get_modes = lt9611uxc_connector_get_modes,
+ .mode_valid = lt9611uxc_connector_mode_valid,
+};
+
+static const struct drm_connector_funcs lt9611uxc_bridge_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = lt9611uxc_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int lt9611uxc_connector_init(struct drm_bridge *bridge, struct lt9611uxc *lt9611uxc)
+{
+ int ret;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ lt9611uxc->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_helper_add(&lt9611uxc->connector,
+ &lt9611uxc_bridge_connector_helper_funcs);
+ ret = drm_connector_init(bridge->dev, &lt9611uxc->connector,
+ &lt9611uxc_bridge_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(&lt9611uxc->connector, bridge->encoder);
+}
+
+static int lt9611uxc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
+ ret = lt9611uxc_connector_init(bridge, lt9611uxc);
+ if (ret < 0)
+ return ret;
+ }
+
+<<<<<<<
+ /* Attach primary DSI */
+ if (lt9611uxc->dsi0_node) {
+ lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node);
+ if (IS_ERR(lt9611uxc->dsi0))
+ return PTR_ERR(lt9611uxc->dsi0);
+ }
+
+ /* Attach secondary DSI, if specified */
+ if (lt9611uxc->dsi1_node) {
+ lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node);
+ if (IS_ERR(lt9611uxc->dsi1)) {
+ ret = PTR_ERR(lt9611uxc->dsi1);
+ goto err_unregister_dsi0;
+ }
+ }
+
+ return 0;
+
+err_unregister_dsi0:
+ if (lt9611uxc->dsi0) {
+ mipi_dsi_detach(lt9611uxc->dsi0);
+ mipi_dsi_device_unregister(lt9611uxc->dsi0);
+ }
+
+ return ret;
+=======
+ return 0;
+>>>>>>>
+}
+
+static enum drm_mode_status
+lt9611uxc_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
+ struct lt9611uxc_mode *lt9611uxc_mode = lt9611uxc_find_mode(mode);
+
+ if (!lt9611uxc_mode)
+ return MODE_BAD;
+
+ if (lt9611uxc_mode->dual_dsi && (!lt9611uxc->dsi0 || !lt9611uxc->dsi1))
+ return MODE_BAD;
+
+ return MODE_OK;
+}
+
+static void lt9611uxc_video_setup(struct lt9611uxc *lt9611uxc,
+ const struct drm_display_mode *mode)
+{
+ u32 h_total, hactive, hsync_len, hfront_porch;
+ u32 v_total, vactive, vsync_len, vfront_porch;
+
+ h_total = mode->htotal;
+ v_total = mode->vtotal;
+
+ hactive = mode->hdisplay;
+ hsync_len = mode->hsync_end - mode->hsync_start;
+ hfront_porch = mode->hsync_start - mode->hdisplay;
+
+ vactive = mode->vdisplay;
+ vsync_len = mode->vsync_end - mode->vsync_start;
+ vfront_porch = mode->vsync_start - mode->vdisplay;
+
+ if (lt9611uxc->dsi0 && lt9611uxc->dsi1)
+ regmap_write(lt9611uxc->regmap, 0xb025, 0x03);
+ else if (lt9611uxc->dsi0)
+ regmap_write(lt9611uxc->regmap, 0xb025, 0x01);
+ else
+ regmap_write(lt9611uxc->regmap, 0xb025, 0x02);
+
+ regmap_write(lt9611uxc->regmap, 0xd00d, (u8)(v_total / 256));
+ regmap_write(lt9611uxc->regmap, 0xd00e, (u8)(v_total % 256));
+
+ regmap_write(lt9611uxc->regmap, 0xd00f, (u8)(vactive / 256));
+ regmap_write(lt9611uxc->regmap, 0xd010, (u8)(vactive % 256));
+
+ regmap_write(lt9611uxc->regmap, 0xd011, (u8)(h_total / 256));
+ regmap_write(lt9611uxc->regmap, 0xd012, (u8)(h_total % 256));
+
+ regmap_write(lt9611uxc->regmap, 0xd013, (u8)(hactive / 256));
+ regmap_write(lt9611uxc->regmap, 0xd014, (u8)(hactive % 256));
+
+ regmap_write(lt9611uxc->regmap, 0xd015, (u8)(vsync_len % 256));
+
+ regmap_update_bits(lt9611uxc->regmap, 0xd016, 0xf, (u8)(hsync_len / 256));
+ regmap_write(lt9611uxc->regmap, 0xd017, (u8)(hsync_len % 256));
+
+ regmap_update_bits(lt9611uxc->regmap, 0xd018, 0xf, (u8)(vfront_porch / 256));
+ regmap_write(lt9611uxc->regmap, 0xd019, (u8)(vfront_porch % 256));
+
+ regmap_update_bits(lt9611uxc->regmap, 0xd01a, 0xf, (u8)(hfront_porch / 256));
+ regmap_write(lt9611uxc->regmap, 0xd01b, (u8)(hfront_porch % 256));
+}
+
+static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adj_mode)
+{
+ struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
+
+ lt9611uxc_lock(lt9611uxc);
+ lt9611uxc_video_setup(lt9611uxc, mode);
+ lt9611uxc_unlock(lt9611uxc);
+}
+
+static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge)
+{
+ struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
+ unsigned int reg_val = 0;
+ int ret;
+ bool connected = true;
+
+ lt9611uxc_lock(lt9611uxc);
+
+ if (lt9611uxc->hpd_supported) {
+ ret = regmap_read(lt9611uxc->regmap, 0xb023, &reg_val);
+
+ if (ret)
+ dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret);
+ else
+ connected = reg_val & BIT(1);
+ }
+ lt9611uxc->hdmi_connected = connected;
+
+ lt9611uxc_unlock(lt9611uxc);
+
+ return connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc)
+{
+ return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read,
+ msecs_to_jiffies(500));
+}
+
+static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
+{
+ struct lt9611uxc *lt9611uxc = data;
+ int ret;
+
+ if (len > EDID_BLOCK_SIZE)
+ return -EINVAL;
+
+ if (block >= EDID_NUM_BLOCKS)
+ return -EINVAL;
+
+ lt9611uxc_lock(lt9611uxc);
+
+ regmap_write(lt9611uxc->regmap, 0xb00b, 0x10);
+
+ regmap_write(lt9611uxc->regmap, 0xb00a, block * EDID_BLOCK_SIZE);
+
+ ret = regmap_noinc_read(lt9611uxc->regmap, 0xb0b0, buf, len);
+ if (ret)
+ dev_err(lt9611uxc->dev, "edid read failed: %d\n", ret);
+
+ lt9611uxc_unlock(lt9611uxc);
+
+ return 0;
+};
+
+static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge);
+ int ret;
+
+ ret = lt9611uxc_wait_for_edid(lt9611uxc);
+ if (ret < 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret);
+ return NULL;
+ } else if (ret == 0) {
+ dev_err(lt9611uxc->dev, "wait for EDID timeout\n");
+ return NULL;
+ }
+
+ return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc);
+}
+
+static const struct drm_bridge_funcs lt9611uxc_bridge_funcs = {
+ .attach = lt9611uxc_bridge_attach,
+ .mode_valid = lt9611uxc_bridge_mode_valid,
+ .mode_set = lt9611uxc_bridge_mode_set,
+ .detect = lt9611uxc_bridge_detect,
+ .get_edid = lt9611uxc_bridge_get_edid,
+};
+
+static int lt9611uxc_parse_dt(struct device *dev,
+ struct lt9611uxc *lt9611uxc)
+{
+ lt9611uxc->dsi0_node = of_graph_get_remote_node(dev->of_node, 0, -1);
+ lt9611uxc->dsi1_node = of_graph_get_remote_node(dev->of_node, 1, -1);
+
+ if (!lt9611uxc->dsi0_node && !lt9611uxc->dsi1_node) {
+ dev_err(lt9611uxc->dev, "failed to get remote node for primary dsi\n");
+ return -ENODEV;
+ }
+
+
+ return 0;
+}
+
+static int lt9611uxc_gpio_init(struct lt9611uxc *lt9611uxc)
+{
+ struct device *dev = lt9611uxc->dev;
+
+ lt9611uxc->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(lt9611uxc->reset_gpio)) {
+ dev_err(dev, "failed to acquire reset gpio\n");
+ return PTR_ERR(lt9611uxc->reset_gpio);
+ }
+
+ lt9611uxc->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(lt9611uxc->enable_gpio)) {
+ dev_err(dev, "failed to acquire enable gpio\n");
+ return PTR_ERR(lt9611uxc->enable_gpio);
+ }
+
+ return 0;
+}
+
+static int lt9611uxc_read_device_rev(struct lt9611uxc *lt9611uxc)
+{
+ unsigned int rev0, rev1, rev2;
+ int ret;
+
+ lt9611uxc_lock(lt9611uxc);
+
+ ret = regmap_read(lt9611uxc->regmap, 0x8100, &rev0);
+ ret |= regmap_read(lt9611uxc->regmap, 0x8101, &rev1);
+ ret |= regmap_read(lt9611uxc->regmap, 0x8102, &rev2);
+ if (ret)
+ dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret);
+ else
+ dev_info(lt9611uxc->dev, "LT9611 revision: 0x%02x.%02x.%02x\n", rev0, rev1, rev2);
+
+ lt9611uxc_unlock(lt9611uxc);
+
+ return ret;
+}
+
+static int lt9611uxc_read_version(struct lt9611uxc *lt9611uxc)
+{
+ unsigned int rev;
+ int ret;
+
+ lt9611uxc_lock(lt9611uxc);
+
+ ret = regmap_read(lt9611uxc->regmap, 0xb021, &rev);
+ if (ret)
+ dev_err(lt9611uxc->dev, "failed to read revision: %d\n", ret);
+ else
+ dev_info(lt9611uxc->dev, "LT9611 version: 0x%02x\n", rev);
+
+ lt9611uxc_unlock(lt9611uxc);
+
+ return ret < 0 ? ret : rev;
+}
+
+static int lt9611uxc_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
+{
+ /*
+ * LT9611UXC will automatically detect rate and sample size, so no need
+ * to setup anything here.
+ */
+ return 0;
+}
+
+static void lt9611uxc_audio_shutdown(struct device *dev, void *data)
+{
+}
+
+static int lt9611uxc_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
+ struct device_node *endpoint)
+{
+ struct of_endpoint of_ep;
+ int ret;
+
+ ret = of_graph_parse_endpoint(endpoint, &of_ep);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * HDMI sound should be located as reg = <2>
+ * Then, it is sound port 0
+ */
+ if (of_ep.port == 2)
+ return 0;
+
+ return -EINVAL;
+}
+
+static const struct hdmi_codec_ops lt9611uxc_codec_ops = {
+ .hw_params = lt9611uxc_hdmi_hw_params,
+ .audio_shutdown = lt9611uxc_audio_shutdown,
+ .get_dai_id = lt9611uxc_hdmi_i2s_get_dai_id,
+};
+
+static int lt9611uxc_audio_init(struct device *dev, struct lt9611uxc *lt9611uxc)
+{
+ struct hdmi_codec_pdata codec_data = {
+ .ops = &lt9611uxc_codec_ops,
+ .max_i2s_channels = 2,
+ .i2s = 1,
+ .data = lt9611uxc,
+ };
+
+ lt9611uxc->audio_pdev =
+ platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
+ PLATFORM_DEVID_AUTO,
+ &codec_data, sizeof(codec_data));
+
+ return PTR_ERR_OR_ZERO(lt9611uxc->audio_pdev);
+}
+
+static void lt9611uxc_audio_exit(struct lt9611uxc *lt9611uxc)
+{
+ if (lt9611uxc->audio_pdev) {
+ platform_device_unregister(lt9611uxc->audio_pdev);
+ lt9611uxc->audio_pdev = NULL;
+ }
+}
+
+#define LT9611UXC_FW_PAGE_SIZE 32
+static void lt9611uxc_firmware_write_page(struct lt9611uxc *lt9611uxc, u16 addr, const u8 *buf)
+{
+ struct reg_sequence seq_write_prepare[] = {
+ REG_SEQ0(0x805a, 0x04),
+ REG_SEQ0(0x805a, 0x00),
+
+ REG_SEQ0(0x805e, 0xdf),
+ REG_SEQ0(0x805a, 0x20),
+ REG_SEQ0(0x805a, 0x00),
+ REG_SEQ0(0x8058, 0x21),
+ };
+
+ struct reg_sequence seq_write_addr[] = {
+ REG_SEQ0(0x805b, (addr >> 16) & 0xff),
+ REG_SEQ0(0x805c, (addr >> 8) & 0xff),
+ REG_SEQ0(0x805d, addr & 0xff),
+ REG_SEQ0(0x805a, 0x10),
+ REG_SEQ0(0x805a, 0x00),
+ };
+
+ regmap_write(lt9611uxc->regmap, 0x8108, 0xbf);
+ msleep(20);
+ regmap_write(lt9611uxc->regmap, 0x8108, 0xff);
+ msleep(20);
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_write_prepare, ARRAY_SIZE(seq_write_prepare));
+ regmap_noinc_write(lt9611uxc->regmap, 0x8059, buf, LT9611UXC_FW_PAGE_SIZE);
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_write_addr, ARRAY_SIZE(seq_write_addr));
+ msleep(20);
+}
+
+static void lt9611uxc_firmware_read_page(struct lt9611uxc *lt9611uxc, u16 addr, char *buf)
+{
+ struct reg_sequence seq_read_page[] = {
+ REG_SEQ0(0x805a, 0xa0),
+ REG_SEQ0(0x805a, 0x80),
+ REG_SEQ0(0x805b, (addr >> 16) & 0xff),
+ REG_SEQ0(0x805c, (addr >> 8) & 0xff),
+ REG_SEQ0(0x805d, addr & 0xff),
+ REG_SEQ0(0x805a, 0x90),
+ REG_SEQ0(0x805a, 0x80),
+ REG_SEQ0(0x8058, 0x21),
+ };
+
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_read_page, ARRAY_SIZE(seq_read_page));
+ regmap_noinc_read(lt9611uxc->regmap, 0x805f, buf, LT9611UXC_FW_PAGE_SIZE);
+}
+
+static char *lt9611uxc_firmware_read(struct lt9611uxc *lt9611uxc, size_t size)
+{
+ struct reg_sequence seq_read_setup[] = {
+ REG_SEQ0(0x805a, 0x84),
+ REG_SEQ0(0x805a, 0x80),
+ };
+
+ char *readbuf;
+ u16 offset;
+
+ readbuf = kzalloc(ALIGN(size, 32), GFP_KERNEL);
+ if (!readbuf)
+ return NULL;
+
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_read_setup, ARRAY_SIZE(seq_read_setup));
+
+ for (offset = 0;
+ offset < size;
+ offset += LT9611UXC_FW_PAGE_SIZE)
+ lt9611uxc_firmware_read_page(lt9611uxc, offset, &readbuf[offset]);
+
+ return readbuf;
+}
+
+static int lt9611uxc_firmware_update(struct lt9611uxc *lt9611uxc)
+{
+ int ret;
+ u16 offset;
+ size_t remain;
+ char *readbuf;
+ const struct firmware *fw;
+
+ struct reg_sequence seq_setup[] = {
+ REG_SEQ0(0x805e, 0xdf),
+ REG_SEQ0(0x8058, 0x00),
+ REG_SEQ0(0x8059, 0x50),
+ REG_SEQ0(0x805a, 0x10),
+ REG_SEQ0(0x805a, 0x00),
+ };
+
+
+ struct reg_sequence seq_block_erase[] = {
+ REG_SEQ0(0x805a, 0x04),
+ REG_SEQ0(0x805a, 0x00),
+ REG_SEQ0(0x805b, 0x00),
+ REG_SEQ0(0x805c, 0x00),
+ REG_SEQ0(0x805d, 0x00),
+ REG_SEQ0(0x805a, 0x01),
+ REG_SEQ0(0x805a, 0x00),
+ };
+
+ ret = request_firmware(&fw, "lt9611uxc_fw.bin", lt9611uxc->dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(lt9611uxc->dev, "Updating firmware\n");
+ lt9611uxc_lock(lt9611uxc);
+
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_setup, ARRAY_SIZE(seq_setup));
+
+ /*
+ * Need erase block 2 timess here. Sometimes, block erase can fail.
+ * This is a workaroud.
+ */
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase));
+ msleep(3000);
+ regmap_multi_reg_write(lt9611uxc->regmap, seq_block_erase, ARRAY_SIZE(seq_block_erase));
+ msleep(3000);
+
+ for (offset = 0, remain = fw->size;
+ remain >= LT9611UXC_FW_PAGE_SIZE;
+ offset += LT9611UXC_FW_PAGE_SIZE, remain -= LT9611UXC_FW_PAGE_SIZE)
+ lt9611uxc_firmware_write_page(lt9611uxc, offset, fw->data + offset);
+
+ if (remain > 0) {
+ char buf[LT9611UXC_FW_PAGE_SIZE];
+
+ memset(buf, 0xff, LT9611UXC_FW_PAGE_SIZE);
+ memcpy(buf, fw->data + offset, remain);
+ lt9611uxc_firmware_write_page(lt9611uxc, offset, buf);
+ }
+ msleep(20);
+
+ readbuf = lt9611uxc_firmware_read(lt9611uxc, fw->size);
+ if (!readbuf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!memcmp(readbuf, fw->data, fw->size)) {
+ dev_err(lt9611uxc->dev, "Firmware update failed\n");
+ print_hex_dump(KERN_ERR, "fw: ", DUMP_PREFIX_OFFSET, 16, 1, readbuf, fw->size, false);
+ ret = -EINVAL;
+ } else {
+ dev_info(lt9611uxc->dev, "Firmware updates successfully\n");
+ ret = 0;
+ }
+ kfree(readbuf);
+
+out:
+ lt9611uxc_unlock(lt9611uxc);
+ lt9611uxc_reset(lt9611uxc);
+ release_firmware(fw);
+
+ return ret;
+}
+
+static ssize_t lt9611uxc_firmware_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev);
+ int ret;
+
+ ret = lt9611uxc_firmware_update(lt9611uxc);
+ if (ret < 0)
+ return ret;
+ return len;
+}
+
+static ssize_t lt9611uxc_firmware_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct lt9611uxc *lt9611uxc = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%02x\n", lt9611uxc->fw_version);
+}
+
+static DEVICE_ATTR_RW(lt9611uxc_firmware);
+
+static struct attribute *lt9611uxc_attrs[] = {
+ &dev_attr_lt9611uxc_firmware.attr,
+ NULL,
+};
+
+static const struct attribute_group lt9611uxc_attr_group = {
+ .attrs = lt9611uxc_attrs,
+};
+
+static const struct attribute_group *lt9611uxc_attr_groups[] = {
+ &lt9611uxc_attr_group,
+ NULL,
+};
+
+static int lt9611uxc_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct lt9611uxc *lt9611uxc;
+ struct device *dev = &client->dev;
+ int ret;
+ bool fw_updated = false;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "device doesn't support I2C\n");
+ return -ENODEV;
+ }
+
+ lt9611uxc = devm_kzalloc(dev, sizeof(*lt9611uxc), GFP_KERNEL);
+ if (!lt9611uxc)
+ return -ENOMEM;
+
+ lt9611uxc->dev = &client->dev;
+ lt9611uxc->client = client;
+ mutex_init(&lt9611uxc->ocm_lock);
+
+ lt9611uxc->regmap = devm_regmap_init_i2c(client, &lt9611uxc_regmap_config);
+ if (IS_ERR(lt9611uxc->regmap)) {
+ dev_err(lt9611uxc->dev, "regmap i2c init failed\n");
+ return PTR_ERR(lt9611uxc->regmap);
+ }
+
+ ret = lt9611uxc_parse_dt(&client->dev, lt9611uxc);
+ if (ret) {
+ dev_err(dev, "failed to parse device tree\n");
+ return ret;
+ }
+
+ ret = lt9611uxc_gpio_init(lt9611uxc);
+ if (ret < 0)
+ goto err_of_put;
+
+ ret = lt9611uxc_regulator_init(lt9611uxc);
+ if (ret < 0)
+ goto err_of_put;
+
+ lt9611uxc_assert_5v(lt9611uxc);
+
+ ret = lt9611uxc_regulator_enable(lt9611uxc);
+ if (ret)
+ goto err_of_put;
+
+ lt9611uxc_reset(lt9611uxc);
+
+ ret = lt9611uxc_read_device_rev(lt9611uxc);
+ if (ret) {
+ dev_err(dev, "failed to read chip rev\n");
+ goto err_disable_regulators;
+ }
+
+retry:
+ ret = lt9611uxc_read_version(lt9611uxc);
+ if (ret < 0) {
+ dev_err(dev, "failed to read FW version\n");
+ goto err_disable_regulators;
+ } else if (ret == 0) {
+ if (!fw_updated) {
+ fw_updated = true;
+ dev_err(dev, "FW version 0, enforcing firmware update\n");
+ ret = lt9611uxc_firmware_update(lt9611uxc);
+ if (ret < 0)
+ goto err_disable_regulators;
+ else
+ goto retry;
+ } else {
+ dev_err(dev, "FW version 0, update failed\n");
+ ret = -EOPNOTSUPP;
+ goto err_disable_regulators;
+ }
+ } else if (ret < 0x40) {
+ dev_info(dev, "FW version 0x%x, HPD not supported\n", ret);
+ } else {
+ lt9611uxc->hpd_supported = true;
+ }
+ lt9611uxc->fw_version = ret;
+
+ init_waitqueue_head(&lt9611uxc->wq);
+ INIT_WORK(&lt9611uxc->work, lt9611uxc_hpd_work);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ lt9611uxc_irq_thread_handler,
+ IRQF_ONESHOT, "lt9611uxc", lt9611uxc);
+ if (ret) {
+ dev_err(dev, "failed to request irq\n");
+ goto err_disable_regulators;
+ }
+
+ i2c_set_clientdata(client, lt9611uxc);
+
+ lt9611uxc->bridge.funcs = &lt9611uxc_bridge_funcs;
+ lt9611uxc->bridge.of_node = client->dev.of_node;
+ lt9611uxc->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ if (lt9611uxc->hpd_supported)
+ lt9611uxc->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ lt9611uxc->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+
+ drm_bridge_add(&lt9611uxc->bridge);
+
+ /* Attach primary DSI */
+ lt9611uxc->dsi0 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi0_node);
+ if (IS_ERR(lt9611uxc->dsi0)) {
+ ret = PTR_ERR(lt9611uxc->dsi0);
+ goto err_remove_bridge;
+ }
+
+ /* Attach secondary DSI, if specified */
+ if (lt9611uxc->dsi1_node) {
+ lt9611uxc->dsi1 = lt9611uxc_attach_dsi(lt9611uxc, lt9611uxc->dsi1_node);
+ if (IS_ERR(lt9611uxc->dsi1)) {
+ ret = PTR_ERR(lt9611uxc->dsi1);
+ goto err_remove_bridge;
+ }
+ }
+
+ return lt9611uxc_audio_init(dev, lt9611uxc);
+
+err_remove_bridge:
+ drm_bridge_remove(&lt9611uxc->bridge);
+
+err_disable_regulators:
+ regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies);
+
+err_of_put:
+ of_node_put(lt9611uxc->dsi1_node);
+ of_node_put(lt9611uxc->dsi0_node);
+
+ return ret;
+}
+
+static int lt9611uxc_remove(struct i2c_client *client)
+{
+ struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client);
+
+ disable_irq(client->irq);
+ flush_scheduled_work();
+ lt9611uxc_audio_exit(lt9611uxc);
+ drm_bridge_remove(&lt9611uxc->bridge);
+
+ mutex_destroy(&lt9611uxc->ocm_lock);
+
+ regulator_bulk_disable(ARRAY_SIZE(lt9611uxc->supplies), lt9611uxc->supplies);
+
+ of_node_put(lt9611uxc->dsi1_node);
+ of_node_put(lt9611uxc->dsi0_node);
+
+ return 0;
+}
+
+static struct i2c_device_id lt9611uxc_id[] = {
+ { "lontium,lt9611uxc", 0 },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id lt9611uxc_match_table[] = {
+ { .compatible = "lontium,lt9611uxc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lt9611uxc_match_table);
+
+static struct i2c_driver lt9611uxc_driver = {
+ .driver = {
+ .name = "lt9611uxc",
+ .of_match_table = lt9611uxc_match_table,
+ .dev_groups = lt9611uxc_attr_groups,
+ },
+ .probe = lt9611uxc_probe,
+ .remove = lt9611uxc_remove,
+ .id_table = lt9611uxc_id,
+};
+module_i2c_driver(lt9611uxc_driver);
+
+MODULE_AUTHOR("Dmitry Baryshkov <dmitry.baryshkov@linaro.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/rr-cache/f8a1a7626be9dc69ed208c66229f310158ca0a52/thisimage b/rr-cache/f8a1a7626be9dc69ed208c66229f310158ca0a52/thisimage
index bfa4bf8..d996e5a 100644
--- a/rr-cache/f8a1a7626be9dc69ed208c66229f310158ca0a52/thisimage
+++ b/rr-cache/f8a1a7626be9dc69ed208c66229f310158ca0a52/thisimage
@@ -2866,6 +2866,215 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
};
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
+/* Register names should be validated, they might be different for this PHY */
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+};
+
struct qmp_phy;
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -3094,6 +3303,10 @@ static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
"aux", "ref_clk_src", "com_aux"
};
+static const char * const sm8450_ufs_phy_clk_l[] = {
+ "qref", "ref", "ref_aux",
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -4095,6 +4308,94 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8350_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
+ .tx_tbl = sm8350_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
+ .rx_tbl = sm8350_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
+ .pcs_tbl = sm8350_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .clk_list = sm8450_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -5757,6 +6058,18 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8350-qmp-usb3-uni-phy",
.data = &sm8350_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy",
+ .data = &sm8450_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy",
+ .data = &sm8450_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-ufs-phy",
+ .data = &sm8450_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
.compatible = "qcom,qcm2290-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
},