Merge 328d73e6b8 on remote branch

Change-Id: Ifb5ec2e7d8b7b8b3b11a6aba3565d1c0faa7e4b1
This commit is contained in:
Linux Build Service Account 2024-09-11 03:24:30 -07:00
commit 7cf8e36f33
145 changed files with 27164 additions and 522 deletions

View File

@ -268,6 +268,15 @@ config ARCH_NIOBE
chipset. If you do not wish to build a kernel that runs on this
chipset or if you are unsure, say 'N' here.
config ARCH_SERAPH
bool "Enable support for Qualcomm Technologies, Inc. Seraph"
depends on ARCH_QCOM
help
This enables support for Qualcomm Technologies, Inc. Seraph
chipset. If you do not wish to build a kernel that runs on this
chipset or if you are unsure,
say 'N' here.
config ARCH_MONACO_AUTO
bool "Enable Support for Qualcomm Technologies, Inc. MONACO_AUTO"
depends on ARCH_QCOM

View File

@ -4,82 +4,81 @@ CONFIG_ARCH_QCOM=y
CONFIG_ARCH_QTI_VM=y
CONFIG_ARM64=y
CONFIG_ARM64_PMEM=y
CONFIG_ARM_SMMU=m
CONFIG_ARM_SMMU=y
CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y
CONFIG_ARM_SMMU_QCOM=m
CONFIG_COMMON_CLK_QCOM=m
CONFIG_FAILOVER=m
CONFIG_ARM_SMMU_QCOM=y
CONFIG_COMMON_CLK_QCOM=y
CONFIG_FAILOVER=y
CONFIG_GH_ARM64_DRV=y
CONFIG_GH_CTRL=y
CONFIG_GH_DBL=y
CONFIG_GH_MSGQ=y
CONFIG_GH_RM_DRV=y
CONFIG_GH_VIRT_WATCHDOG=m
CONFIG_GH_VIRT_WATCHDOG=y
CONFIG_GUNYAH=y
CONFIG_GUNYAH_DRIVERS=y
CONFIG_HVC_GUNYAH=y
CONFIG_HVC_GUNYAH_CONSOLE=y
CONFIG_I2C_MSM_GENI=m
CONFIG_I2C_VIRTIO=m
CONFIG_I2C_MSM_GENI=y
CONFIG_I2C_VIRTIO=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
# CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB is not set
# CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST is not set
CONFIG_IPC_LOGGING=m
CONFIG_IPC_LOGGING=y
CONFIG_LOCALVERSION="-perf"
CONFIG_MAILBOX=y
CONFIG_MHI_BUS=m
CONFIG_MHI_BUS=y
CONFIG_MHI_BUS_MISC=y
CONFIG_MHI_UCI=m
CONFIG_MHI_UCI=y
# CONFIG_MODULE_SIG_ALL is not set
CONFIG_MSM_HAB=m
CONFIG_MSM_HAB=y
CONFIG_MSM_VIRTIO_HAB=y
CONFIG_NET_FAILOVER=m
CONFIG_PINCTRL_LEMANS=m
CONFIG_PINCTRL_MONACO_AUTO=m
CONFIG_PINCTRL_MSM=m
CONFIG_QCOM_DMABUF_HEAPS=m
CONFIG_NET_FAILOVER=y
CONFIG_PINCTRL_LEMANS=y
CONFIG_PINCTRL_MONACO_AUTO=y
CONFIG_PINCTRL_MSM=y
CONFIG_QCOM_DMABUF_HEAPS=y
CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y
CONFIG_QCOM_DMABUF_HEAPS_CMA=y
CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_HGSL=m
CONFIG_QCOM_HGSL_TCSR_SIGNAL=m
CONFIG_QCOM_IOMMU_DEBUG=m
CONFIG_QCOM_IOMMU_UTIL=m
CONFIG_QCOM_IPCC=m
CONFIG_QCOM_LOGBUF_BOOTLOG=m
CONFIG_QCOM_MEM_BUF=m
CONFIG_QCOM_MEM_BUF_DEV=m
CONFIG_QCOM_QMI_HELPERS=m
CONFIG_QCOM_RPROC_COMMON=m
CONFIG_QCOM_RUN_QUEUE_STATS=m
CONFIG_QCOM_HGSL=y
CONFIG_QCOM_HGSL_TCSR_SIGNAL=y
CONFIG_QCOM_IOMMU_DEBUG=y
CONFIG_QCOM_IOMMU_UTIL=y
CONFIG_QCOM_IPCC=y
CONFIG_QCOM_LOGBUF_BOOTLOG=y
CONFIG_QCOM_MEM_BUF=y
CONFIG_QCOM_MEM_BUF_DEV=y
CONFIG_QCOM_QMI_HELPERS=y
CONFIG_QCOM_RPROC_COMMON=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_SCM=y
CONFIG_QCOM_SECURE_BUFFER=m
CONFIG_QCOM_SMEM=m
CONFIG_QCOM_SECURE_BUFFER=y
CONFIG_QCOM_SMEM=y
# CONFIG_QCOM_SOC_WATCHDOG is not set
CONFIG_QCOM_SYSMON=m
CONFIG_QCOM_SYSMON=y
# CONFIG_QCOM_WATCHDOG_USERSPACE_PET is not set
CONFIG_QCOM_WDT_CORE=m
CONFIG_QRTR=m
CONFIG_QRTR_MHI=m
CONFIG_QCOM_WDT_CORE=y
CONFIG_QRTR=y
CONFIG_QRTR_MHI=y
CONFIG_QRTR_NODE_ID=1
CONFIG_QRTR_WAKEUP_MS=0
CONFIG_QTI_IOMMU_SUPPORT=m
CONFIG_RENAME_DEVICES=m
CONFIG_RPMSG_QCOM_GLINK=m
CONFIG_RPMSG_QCOM_GLINK_CMA=m
CONFIG_RPMSG_QCOM_GLINK_SMEM=m
CONFIG_SERIAL_MSM_GENI=m
CONFIG_SPI_MSM_GENI=m
CONFIG_SPI_SPIDEV=m
CONFIG_SWIOTLB_NONLINEAR=y
CONFIG_QTI_IOMMU_SUPPORT=y
CONFIG_RENAME_DEVICES=y
CONFIG_RPMSG_QCOM_GLINK=y
CONFIG_RPMSG_QCOM_GLINK_CMA=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SPI_MSM_GENI=y
CONFIG_SPI_SPIDEV=y
CONFIG_VHOST_MENU=y
CONFIG_VIRTIO_BLK=m
CONFIG_VIRTIO_INPUT=m
CONFIG_VIRTIO_MMIO=m
CONFIG_VIRTIO_BLK=y
CONFIG_VIRTIO_INPUT=y
CONFIG_VIRTIO_MMIO=y
CONFIG_VIRTIO_MMIO_POLL_RESET=y
CONFIG_VIRTIO_MMIO_SWIOTLB=y
CONFIG_VIRTIO_NET=m
CONFIG_VIRTIO_NET=y
CONFIG_VIRT_DRIVERS=y
# CONFIG_ZONE_DMA is not set
# CONFIG_ZONE_DMA32 is not set

View File

@ -1,3 +1,4 @@
CONFIG_AQUANTIA_PHY=m
CONFIG_ARCH_QCOM=y
CONFIG_ARM_PARAVIRT_SMMU_V3=m
CONFIG_ARM_SMMU=m
@ -66,6 +67,7 @@ CONFIG_PINCTRL_MONACO_AUTO=m
CONFIG_PINCTRL_MSM=m
CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
CONFIG_PINCTRL_SDMSHRIKE=m
CONFIG_PINCTRL_SLPI=m
CONFIG_PINCTRL_SM6150=m
CONFIG_PINCTRL_SM8150=m
CONFIG_POWER_RESET_QCOM_VM=m

View File

@ -113,6 +113,7 @@ CONFIG_MSM_CORE_HANG_DETECT=m
CONFIG_MSM_GPI_DMA=m
# CONFIG_MSM_GPI_DMA_DEBUG is not set
CONFIG_MSM_HSUSB_PHY=m
CONFIG_MSM_NPU=m
CONFIG_MSM_PERFORMANCE=m
CONFIG_MSM_QMP=m
CONFIG_NL80211_TESTMODE=y

View File

@ -0,0 +1,118 @@
CONFIG_ARCH_NEO=y
CONFIG_ARM_QCOM_CPUFREQ_HW=m
CONFIG_ARM_QCOM_CPUFREQ_HW_DEBUG=m
CONFIG_ARM_SMMU=m
CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y
CONFIG_ARM_SMMU_SELFTEST=y
CONFIG_COMMON_CLK_QCOM=m
CONFIG_CPU_IDLE_GOV_QCOM_LPM=m
CONFIG_EDAC_KRYO_ARM64=m
# CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE is not set
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
CONFIG_EDAC_QCOM=m
# CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE is not set
CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
# CONFIG_GH_CTRL is not set
# CONFIG_GH_DBL is not set
# CONFIG_GH_GUEST_POPS is not set
# CONFIG_GH_IRQ_LEND is not set
# CONFIG_GH_MEM_NOTIFIER is not set
CONFIG_GH_MSGQ=m
CONFIG_GH_RM_DRV=m
# CONFIG_GH_TLMM_VM_MEM_ACCESS is not set
CONFIG_GH_VIRT_WATCHDOG=m
CONFIG_GUNYAH_DRIVERS=y
# CONFIG_HVC_GUNYAH is not set
CONFIG_HWSPINLOCK_QCOM=m
CONFIG_I2C_EUSB2_REPEATER=m
CONFIG_I2C_MSM_GENI=m
CONFIG_INIT_ON_FREE_DEFAULT_ON=y
CONFIG_INTERCONNECT_QCOM_BCM_VOTER=m
CONFIG_INTERCONNECT_QCOM_DEBUG=m
CONFIG_INTERCONNECT_QCOM_NEO=m
CONFIG_INTERCONNECT_QCOM_QOS=m
CONFIG_INTERCONNECT_QCOM_RPMH=m
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_LOCALVERSION="-gki"
CONFIG_MFD_I2C_PMIC=m
# CONFIG_MODULE_SIG_ALL is not set
CONFIG_MSM_BOOT_STATS=m
CONFIG_MSM_CORE_HANG_DETECT=m
CONFIG_MSM_GPI_DMA=m
CONFIG_MSM_PERFORMANCE=m
CONFIG_MSM_SYSSTATS=m
CONFIG_PDR_INDICATION_NOTIF_TIMEOUT=9000
CONFIG_PINCTRL_MSM=m
CONFIG_PINCTRL_NEO=m
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y
CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m
CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_QCOM_CLK_RPMH=m
CONFIG_QCOM_COMMAND_DB=m
CONFIG_QCOM_CPUSS_SLEEP_STATS=m
CONFIG_QCOM_DMABUF_HEAPS=m
CONFIG_QCOM_DMABUF_HEAPS_CARVEOUT=y
CONFIG_QCOM_DMABUF_HEAPS_CMA=y
CONFIG_QCOM_DMABUF_HEAPS_PAGE_POOL_REFILL=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM=y
CONFIG_QCOM_DMABUF_HEAPS_SYSTEM_SECURE=y
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_GDSC_REGULATOR=m
CONFIG_QCOM_IOMMU_DEBUG=m
CONFIG_QCOM_IOMMU_UTIL=m
CONFIG_QCOM_LAZY_MAPPING=m
CONFIG_QCOM_LLCC=m
CONFIG_QCOM_LLCC_PERFMON=m
CONFIG_QCOM_LLCC_PMU=m
CONFIG_QCOM_MEMORY_DUMP_V2=m
CONFIG_QCOM_MEM_BUF=m
CONFIG_QCOM_MEM_BUF_DEV=m
CONFIG_QCOM_MEM_HOOKS=m
CONFIG_QCOM_PANIC_ON_NOTIF_TIMEOUT=y
CONFIG_QCOM_PANIC_ON_PDR_NOTIF_TIMEOUT=y
CONFIG_QCOM_PDC=m
CONFIG_QCOM_RAMDUMP=m
CONFIG_QCOM_RPMH=m
CONFIG_QCOM_RUN_QUEUE_STATS=m
CONFIG_QCOM_SCM=m
CONFIG_QCOM_SECURE_BUFFER=m
CONFIG_QCOM_SHOW_RESUME_IRQ=m
CONFIG_QCOM_SMEM=m
CONFIG_QCOM_SOCINFO=m
CONFIG_QCOM_SOC_WATCHDOG=m
CONFIG_QCOM_STATS=m
CONFIG_QCOM_WATCHDOG_BARK_TIME=11000
CONFIG_QCOM_WATCHDOG_IPI_PING=y
CONFIG_QCOM_WATCHDOG_PET_TIME=9360
# CONFIG_QCOM_WATCHDOG_USERSPACE_PET is not set
CONFIG_QCOM_WATCHDOG_WAKEUP_ENABLE=y
# CONFIG_QCOM_WCNSS_PIL is not set
# CONFIG_QCOM_WDOG_BITE_EARLY_PANIC is not set
CONFIG_QCOM_WDT_CORE=m
CONFIG_QTEE_SHM_BRIDGE=y
CONFIG_QTI_IOMMU_SUPPORT=m
CONFIG_REGULATOR_DEBUG_CONTROL=m
CONFIG_REGULATOR_PROXY_CONSUMER=m
CONFIG_REGULATOR_QCOM_PM8008=m
CONFIG_REGULATOR_QTI_FIXED_VOLTAGE=m
CONFIG_REGULATOR_RPMH=m
CONFIG_SCHED_WALT=m
CONFIG_SERIAL_MSM_GENI=m
CONFIG_SPI_MSM_GENI=m
CONFIG_SXR_CAMCC_NEO=m
CONFIG_SXR_DEBUGCC_NEO=m
CONFIG_SXR_DISPCC_NEO=m
CONFIG_SXR_GCC_NEO=m
CONFIG_SXR_GPUCC_NEO=m
CONFIG_SXR_TCSRCC_NEO=m
CONFIG_SXR_VIDEOCC_NEO=m
CONFIG_USB_CONFIGFS_F_DIAG=m
CONFIG_USB_CONFIGFS_F_QDSS=m
CONFIG_USB_DWC3_MSM=m
CONFIG_USB_F_DIAG=m
CONFIG_USB_F_QDSS=m
CONFIG_USB_MSM_EUSB2_PHY=m
CONFIG_USB_MSM_SSPHY_QMP=m
CONFIG_USB_REPEATER=m
CONFIG_VIRT_DRIVERS=y

View File

@ -0,0 +1,21 @@
CONFIG_CMDLINE="stack_depot_disable=off kasan.stacktrace=off cgroup_disable=pressure cgroup.memory=nokmem page_owner=on no_hash_pointers panic_on_taint=0x20"
CONFIG_DEBUG_SPINLOCK=y
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y
CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y
CONFIG_INIT_ON_FREE_DEFAULT_ON=y
CONFIG_IOMMU_TLBSYNC_DEBUG=y
CONFIG_LKDTM=m
CONFIG_LOCALVERSION="-consolidate"
CONFIG_LOCKDEP=y
CONFIG_LOCKUP_DETECTOR=y
CONFIG_LOCK_STAT=y
CONFIG_MSM_GPI_DMA_DEBUG=y
CONFIG_PAGE_POISONING=y
CONFIG_PM_DEBUG=y
CONFIG_PM_SLEEP_DEBUG=y
CONFIG_PREEMPTIRQ_TRACEPOINTS=y
CONFIG_RUNTIME_TESTING_MENU=y
CONFIG_SCHED_WALT_DEBUG=m
CONFIG_TRACE_IRQFLAGS=y
CONFIG_TRACE_MMIO_ACCESS=y
CONFIG_TRACE_PREEMPT_TOGGLE=y

View File

@ -120,6 +120,7 @@ CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE=m
CONFIG_POWER_RESET_QCOM_DOWNLOAD_MODE_DEFAULT=y
CONFIG_POWER_RESET_QCOM_PON=m
CONFIG_POWER_RESET_QCOM_REBOOT_REASON=m
CONFIG_PWM_QCOM=m
CONFIG_PWM_QTI_LPG=m
CONFIG_QCOM_AOSS_QMP=m
CONFIG_QCOM_BALANCE_ANON_FILE_RECLAIM=y

View File

@ -98,6 +98,7 @@ CONFIG_MMC_SDHCI_MSM=m
# CONFIG_MODULE_SIG_ALL is not set
CONFIG_MSM_BOOT_STATS=m
CONFIG_MSM_CORE_HANG_DETECT=m
CONFIG_MSM_GPI_DMA=m
CONFIG_MSM_PERFORMANCE=m
CONFIG_MSM_QMP=m
CONFIG_MSM_QUSB_PHY=m

View File

@ -2,6 +2,7 @@ CONFIG_ATOMIC64_SELFTEST=m
CONFIG_LKDTM=m
CONFIG_LOCALVERSION="-gki-consolidate"
CONFIG_LOCK_TORTURE_TEST=m
CONFIG_MSM_GPI_DMA_DEBUG=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_TEST_USER_COPY=m
CONFIG_USB_LINK_LAYER_TEST=m

View File

@ -0,0 +1,10 @@
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_SERAPH=y
CONFIG_HWSPINLOCK_QCOM=m
CONFIG_LOCALVERSION="-gki"
# CONFIG_MODULE_SIG_ALL is not set
CONFIG_PINCTRL_MSM=m
CONFIG_PINCTRL_SERAPH=m
CONFIG_QCOM_SCM=m
CONFIG_QCOM_SMEM=m
CONFIG_QCOM_SOCINFO=m

View File

@ -0,0 +1,14 @@
CONFIG_ATOMIC64_SELFTEST=m
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_LKDTM=m
CONFIG_LOCALVERSION="-consolidate"
CONFIG_LOCK_TORTURE_TEST=m
CONFIG_PAGE_OWNER=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RUNTIME_TESTING_MENU=y
CONFIG_TEST_USER_COPY=m

View File

@ -7,50 +7,6 @@ target_name = "autoghgvm"
def define_autoghgvm_lxc():
_autoghgvm_lxc_in_tree_modules = [
# keep sorted
"drivers/block/virtio_blk.ko",
"drivers/bus/mhi/devices/mhi_dev_uci.ko",
"drivers/bus/mhi/host/mhi.ko",
"drivers/clk/qcom/clk-dummy.ko",
"drivers/clk/qcom/clk-qcom.ko",
"drivers/dma-buf/heaps/qcom_dma_heaps.ko",
"drivers/i2c/busses/i2c-msm-geni.ko",
"drivers/i2c/busses/i2c-virtio.ko",
"drivers/iommu/arm/arm-smmu/arm_smmu.ko",
"drivers/iommu/iommu-logger.ko",
"drivers/iommu/qcom_iommu_debug.ko",
"drivers/iommu/qcom_iommu_util.ko",
"drivers/mailbox/qcom-ipcc.ko",
"drivers/net/net_failover.ko",
"drivers/net/virtio_net.ko",
"drivers/pinctrl/qcom/pinctrl-lemans.ko",
"drivers/pinctrl/qcom/pinctrl-monaco_auto.ko",
"drivers/pinctrl/qcom/pinctrl-msm.ko",
"drivers/remoteproc/qcom_sysmon.ko",
"drivers/remoteproc/rproc_qcom_common.ko",
"drivers/rpmsg/qcom_glink.ko",
"drivers/rpmsg/qcom_glink_cma.ko",
"drivers/rpmsg/qcom_glink_smem.ko",
"drivers/soc/qcom/hab/msm_hab.ko",
"drivers/soc/qcom/hgsl/qcom_hgsl.ko",
"drivers/soc/qcom/mem_buf/mem_buf.ko",
"drivers/soc/qcom/mem_buf/mem_buf_dev.ko",
"drivers/soc/qcom/qcom_logbuf_boot_log.ko",
"drivers/soc/qcom/qcom_wdt_core.ko",
"drivers/soc/qcom/qmi_helpers.ko",
"drivers/soc/qcom/rename_devices.ko",
"drivers/soc/qcom/rq_stats.ko",
"drivers/soc/qcom/secure_buffer.ko",
"drivers/soc/qcom/smem.ko",
"drivers/spi/spi-msm-geni.ko",
"drivers/spi/spidev.ko",
"drivers/tty/serial/msm_geni_serial.ko",
"drivers/virt/gunyah/gh_virt_wdt.ko",
"drivers/virtio/virtio_input.ko",
"drivers/virtio/virtio_mmio.ko",
"kernel/trace/qcom_ipc_logging.ko",
"net/core/failover.ko",
"net/qrtr/qrtr.ko",
"net/qrtr/qrtr-mhi.ko",
]
for variant in lxc_variants:

View File

@ -54,6 +54,7 @@ def define_autogvm():
"drivers/net/mdio/mdio-mux.ko",
"drivers/net/net_failover.ko",
"drivers/net/pcs/pcs_xpcs.ko",
"drivers/net/phy/aquantia.ko",
"drivers/net/phy/marvell.ko",
"drivers/net/virtio_net.ko",
"drivers/pci/controller/pci-msm-drv.ko",
@ -66,6 +67,7 @@ def define_autogvm():
"drivers/pinctrl/qcom/pinctrl-monaco_auto.ko",
"drivers/pinctrl/qcom/pinctrl-msm.ko",
"drivers/pinctrl/qcom/pinctrl-sdmshrike.ko",
"drivers/pinctrl/qcom/pinctrl-slpi.ko",
"drivers/pinctrl/qcom/pinctrl-sm6150.ko",
"drivers/pinctrl/qcom/pinctrl-sm8150.ko",
"drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko",

37
build.config.msm.neo-la Normal file
View File

@ -0,0 +1,37 @@
################################################################################
## Inheriting configs from ACK
. ${ROOT_DIR}/msm-kernel/build.config.common
. ${ROOT_DIR}/msm-kernel/build.config.aarch64
################################################################################
## Variant setup
MSM_ARCH=neo_la
VARIANTS=(consolidate gki)
[ -z "${VARIANT}" ] && VARIANT=consolidate
ABL_SRC=bootable/bootloader/edk2
BOOT_IMAGE_HEADER_VERSION=4
BASE_ADDRESS=0x80000000
PAGE_SIZE=4096
BUILD_VENDOR_DLKM=1
PREPARE_SYSTEM_DLKM=1
SYSTEM_DLKM_MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
SUPER_IMAGE_SIZE=0x10000000
TRIM_UNUSED_MODULES=1
BUILD_INIT_BOOT_IMG=1
[ -z "${DT_OVERLAY_SUPPORT}" ] && DT_OVERLAY_SUPPORT=1
if [ "${KERNEL_CMDLINE_CONSOLE_AUTO}" != "0" ]; then
KERNEL_VENDOR_CMDLINE+=' console=ttyMSM0,115200n8 msm_geni_serial.con_enabled=1 '
fi
KERNEL_VENDOR_CMDLINE+=' bootconfig '
################################################################################
## Inheriting MSM configs
. ${KERNEL_DIR}/build.config.msm.common
. ${KERNEL_DIR}/build.config.msm.gki
## Inherit SXR configs
. ${KERNEL_DIR}/build.config.sxr.common

33
build.config.msm.seraph Normal file
View File

@ -0,0 +1,33 @@
################################################################################
## Inheriting configs from ACK
. ${ROOT_DIR}/msm-kernel/build.config.common
. ${ROOT_DIR}/msm-kernel/build.config.aarch64
################################################################################
## Variant setup
MSM_ARCH=seraph
VARIANTS=(consolidate gki)
[ -z "${VARIANT}" ] && VARIANT=consolidate
ABL_SRC=bootable/bootloader/edk2
BOOT_IMAGE_HEADER_VERSION=4
BASE_ADDRESS=0x80000000
PAGE_SIZE=4096
BUILD_VENDOR_DLKM=1
PREPARE_SYSTEM_DLKM=1
SYSTEM_DLKM_MODULES_LIST=${ROOT_DIR}/${KERNEL_DIR}/android/gki_system_dlkm_modules
SUPER_IMAGE_SIZE=0x10000000
TRIM_UNUSED_MODULES=1
BUILD_INIT_BOOT_IMG=1
[ -z "${DT_OVERLAY_SUPPORT}" ] && DT_OVERLAY_SUPPORT=1
if [ "${KERNEL_CMDLINE_CONSOLE_AUTO}" != "0" ]; then
KERNEL_VENDOR_CMDLINE+='console=ttyMSM0,115200n8 earlycon'
fi
KERNEL_VENDOR_CMDLINE+=' bootconfig '
################################################################################
## Inheriting MSM configs
. ${KERNEL_DIR}/build.config.msm.common
. ${KERNEL_DIR}/build.config.msm.gki

View File

@ -2,7 +2,9 @@ build.config.msm.autogvm
build.config.msm.autoghgvm
build.config.msm.pineapple
build.config.msm.anorak
build.config.msm.neo-la
build.config.msm.niobe
build.config.msm.seraph
build.config.msm.kalama
build.config.msm.pineapple.vm
build.config.msm.kalama.vm

View File

@ -1490,6 +1490,69 @@ config SM_DEBUGCC_VOLCANO
Volcano devices.
Say Y if you want to support the debug clocks such as clock
measurement functionality.
config SXR_GCC_NEO
tristate "NEO Global Clock Controller"
depends on COMMON_CLK_QCOM
help
Support for the global clock controller on Qualcomm Technologies, Inc.
NEO devices.
Say Y if you want to use peripheral devices such as UART, SPI, I2C,
USB, UFS, SD/eMMC, PCIE, etc.
config SXR_GPUCC_NEO
tristate "NEO Graphics Clock Controller"
select SXR_GCC_NEO
help
Support for the graphics clock controller on Qualcomm Technologies, Inc.
NEO devices.
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
config SXR_CAMCC_NEO
tristate "NEO Camera Clock Controller"
select SXR_GCC_NEO
help
Support for the camera clock controller on Qualcomm Technologies, Inc.
NEO devices.
Say Y if you want to support camera devices and functionality such as
capturing pictures.
config SXR_DISPCC_NEO
tristate "NEO Display Clock Controller"
select SXR_GCC_NEO
help
Support for the display clock controller on Qualcomm Technologies, Inc
NEO devices.
Say Y if you want to support display devices and functionality such as
splash screen.
config SXR_VIDEOCC_NEO
tristate "NEO Video Clock Controller"
select SXR_GCC_NEO
help
Support for the video clock controller on Qualcomm Technologies, Inc.
NEO devices.
Say Y if you want to support video devices and functionality such as
video encode/decode.
config SXR_DEBUGCC_NEO
tristate "NEO Debug Clock Controller"
depends on SXR_GCC_NEO
help
Support for the debug clock controller on Qualcomm Technologies, Inc.
NEO devices.
Say Y if you want to support the debug clocks such as clock measurement
functionality.
config SXR_TCSRCC_NEO
tristate "Top-Level CSR Clock Controller"
depends on COMMON_CLK_QCOM
help
Support for the TCSR clock controller on Qualcomm Technologies, Inc
NEO devices.
Say Y if you want to support miscellaneous top-level clocks
such as for the PHY references.
endif
config VIRTIO_CLK

View File

@ -127,6 +127,7 @@ obj-$(CONFIG_SM_DISPCC_HOLI) += dispcc-holi.o
obj-$(CONFIG_SM_DISPCC_PINEAPPLE) += dispcc-pineapple.o
obj-$(CONFIG_SM_DISPCC_PITTI) += dispcc-pitti.o
obj-$(CONFIG_SXR_DISPCC_ANORAK) += dispcc0-anorak.o dispcc1-anorak.o
obj-$(CONFIG_SXR_DISPCC_NEO) += dispcc-neo.o
obj-$(CONFIG_SXR_DISPCC_NIOBE) += dispcc0-niobe.o dispcc1-niobe.o
obj-$(CONFIG_SM_DISPCC_VOLCANO) += dispcc-volcano.o
obj-$(CONFIG_SM_DEBUGCC_BLAIR) += debugcc-blair.o
@ -135,6 +136,7 @@ obj-$(CONFIG_SM_DEBUGCC_HOLI) += debugcc-holi.o
obj-$(CONFIG_SM_DEBUGCC_PINEAPPLE) += debugcc-pineapple.o
obj-$(CONFIG_SM_DEBUGCC_PITTI) += debugcc-pitti.o
obj-$(CONFIG_SXR_DEBUGCC_ANORAK) += debugcc-anorak.o
obj-$(CONFIG_SXR_DEBUGCC_NEO) += debugcc-neo.o
obj-$(CONFIG_SXR_DEBUGCC_NIOBE) += debugcc-niobe.o
obj-$(CONFIG_SM_DEBUGCC_VOLCANO) += debugcc-volcano.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
@ -153,6 +155,7 @@ obj-$(CONFIG_SM_GCC_PINEAPPLE) += gcc-pineapple.o
obj-$(CONFIG_SM_GCC_PITTI) += gcc-pitti.o
obj-$(CONFIG_SM_GCC_VOLCANO) += gcc-volcano.o
obj-$(CONFIG_SXR_GCC_ANORAK) += gcc-anorak.o
obj-$(CONFIG_SXR_GCC_NEO) += gcc-neo.o
obj-$(CONFIG_SXR_GCC_NIOBE) += gcc-niobe.o
obj-$(CONFIG_SM_GPUCC_6150) += gpucc-sm6150.o
obj-$(CONFIG_SM_GPUCC_6350) += gpucc-sm6350.o
@ -160,6 +163,7 @@ obj-$(CONFIG_SM_GPUCC_PITTI) += gpucc-pitti.o
obj-$(CONFIG_SM_CAMCC_CLIFFS) += camcc-cliffs.o
obj-$(CONFIG_SM_CAMCC_PINEAPPLE) += camcc-pineapple.o
obj-$(CONFIG_SXR_CAMCC_ANORAK) += camcc-anorak.o
obj-$(CONFIG_SXR_CAMCC_NEO) += camcc-neo.o
obj-$(CONFIG_SXR_CAMCC_NIOBE) += camcc-niobe.o
obj-$(CONFIG_SM_CAMCC_VOLCANO) += camcc-volcano.o
obj-$(CONFIG_SM_GCC_KALAMA) += gcc-kalama.o
@ -181,6 +185,7 @@ obj-$(CONFIG_SM_GPUCC_HOLI) += gpucc-holi.o
obj-$(CONFIG_SM_GPUCC_PINEAPPLE) += gpucc-pineapple.o
obj-$(CONFIG_SM_GPUCC_VOLCANO) += gpucc-volcano.o
obj-$(CONFIG_SXR_GPUCC_ANORAK) += gpucc-anorak.o
obj-$(CONFIG_SXR_GPUCC_NEO) += gpucc-neo.o
obj-$(CONFIG_SXR_GPUCC_NIOBE) += gpucc-niobe.o
obj-$(CONFIG_SM_VIDEOCC_6150) += videocc-sm6150.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
@ -188,8 +193,10 @@ obj-$(CONFIG_SM_VIDEOCC_8250) += videocc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_PINEAPPLE) += videocc-pineapple.o
obj-$(CONFIG_SM_VIDEOCC_VOLCANO) += videocc-volcano.o
obj-$(CONFIG_SXR_VIDEOCC_ANORAK) += videocc-anorak.o
obj-$(CONFIG_SXR_VIDEOCC_NEO) += videocc-neo.o
obj-$(CONFIG_SXR_VIDEOCC_NIOBE) += videocc-niobe.o
obj-$(CONFIG_SM_TCSRCC_PINEAPPLE) += tcsrcc-pineapple.o
obj-$(CONFIG_SXR_TCSRCC_NEO) += tcsrcc-neo.o
obj-$(CONFIG_SXR_TCSRCC_NIOBE) += tcsrcc-niobe.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o

2788
drivers/clk/qcom/camcc-neo.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013, 2016, 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@ -104,6 +104,7 @@ static int clk_branch_wait(const struct clk_branch *br, bool enabling,
udelay(10);
} else if (br->halt_check == BRANCH_HALT_ENABLE ||
br->halt_check == BRANCH_HALT ||
br->halt_check == BRANCH_HALT_POLL ||
(enabling && voted)) {
timeout = get_branch_timeout(br);
@ -125,6 +126,10 @@ static int clk_branch_toggle(struct clk_hw *hw, bool en,
struct clk_branch *br = to_clk_branch(hw);
int ret;
if (br->halt_check == BRANCH_HALT_POLL) {
return clk_branch_wait(br, en, check_halt);
}
if (en) {
ret = clk_enable_regmap(hw);
if (ret)
@ -211,6 +216,12 @@ static void clk_branch2_list_registers(struct seq_file *f, struct clk_hw *hw)
{"MEM_ENABLE_ACK_MASK", 0x0},
};
static struct clk_register_data data3[] = {
{"SREG_ENABLE_REG", 0x0},
{"SREG_CORE_ACK_MASK", 0x0},
{"SREG_PERIPH_ACK_MASK", 0x0},
};
size = ARRAY_SIZE(data);
for (i = 0; i < size; i++) {
@ -243,6 +254,16 @@ static void clk_branch2_list_registers(struct seq_file *f, struct clk_hw *hw)
clock_debug_output(f, "%20s: 0x%.8x\n", data2[2].name,
br->mem_enable_ack_bit);
}
if (br->sreg_enable_reg) {
regmap_read(br->clkr.regmap, br->sreg_enable_reg +
data3[0].offset, &val);
clock_debug_output(f, "%20s: 0x%.8x\n", data3[0].name, val);
clock_debug_output(f, "%20s: 0x%.8x\n", data3[1].name,
br->sreg_core_ack_bit);
clock_debug_output(f, "%20s: 0x%.8x\n", data3[2].name,
br->sreg_periph_ack_bit);
}
}
static int clk_branch2_set_flags(struct clk_hw *hw, unsigned long flags)
@ -323,6 +344,29 @@ static int clk_branch2_mem_enable(struct clk_hw *hw)
return -EBUSY;
}
static int clk_branch2_sreg_enable(struct clk_hw *hw)
{
struct clk_branch *br = to_clk_branch(hw);
u32 val;
int count = 200;
int ret;
ret = clk_enable_regmap(hw);
if (ret)
return -EINVAL;
regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val);
while (count-- > 0) {
if (!(val & br->sreg_core_ack_bit))
return 0;
udelay(1);
regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val);
}
return -EBUSY;
}
static void clk_branch2_mem_disable(struct clk_hw *hw)
{
struct clk_branch *br = to_clk_branch(hw);
@ -332,6 +376,24 @@ static void clk_branch2_mem_disable(struct clk_hw *hw)
return clk_branch2_disable(hw);
}
static void clk_branch2_sreg_disable(struct clk_hw *hw)
{
struct clk_branch *br = to_clk_branch(hw);
u32 val;
int count = 200;
clk_disable_regmap(hw);
regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val);
while (count-- > 0) {
if (val & br->sreg_periph_ack_bit)
return;
udelay(1);
regmap_read(br->clkr.regmap, br->sreg_enable_reg, &val);
}
}
static void clk_branch_restore_context_aon(struct clk_hw *hw)
{
if (clk_enable_regmap(hw))
@ -395,6 +457,15 @@ const struct clk_ops clk_branch2_mem_ops = {
};
EXPORT_SYMBOL(clk_branch2_mem_ops);
const struct clk_ops clk_branch2_sreg_ops = {
.enable = clk_branch2_sreg_enable,
.disable = clk_branch2_sreg_disable,
.is_enabled = clk_is_enabled_regmap,
.init = clk_branch2_init,
.debug_init = clk_branch_debug_init,
};
EXPORT_SYMBOL_GPL(clk_branch2_sreg_ops);
static unsigned long clk_branch2_hw_ctl_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013, 2016, 2020 The Linux Foundation. All rights reserved. */
/* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. */
/* Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved. */
#ifndef __QCOM_CLK_BRANCH_H__
#define __QCOM_CLK_BRANCH_H__
@ -26,9 +26,12 @@ struct clk_branch {
u32 halt_reg;
u32 mem_enable_reg;
u32 mem_ack_reg;
u32 sreg_enable_reg;
u8 hwcg_bit;
u8 halt_bit;
u8 mem_enable_ack_bit;
u32 sreg_core_ack_bit;
u32 sreg_periph_ack_bit;
u8 halt_check;
#define BRANCH_VOTED BIT(7) /* Delay on disable */
#define BRANCH_HALT 0 /* pol: 1 = halt */
@ -38,6 +41,7 @@ struct clk_branch {
#define BRANCH_HALT_DELAY 2 /* No bit to check; just delay */
#define BRANCH_HALT_SKIP 3 /* Don't check halt bit */
#define BRANCH_HALT_INVERT 4 /* Invert logic for halt bit */
#define BRANCH_HALT_POLL 5 /* Don't enable the clock, poll for halt */
struct clk_regmap clkr;
};
@ -50,6 +54,7 @@ extern const struct clk_ops clk_branch2_aon_ops;
extern const struct clk_ops clk_branch2_force_off_ops;
extern const struct clk_ops clk_branch2_mem_ops;
extern const struct clk_ops clk_branch2_crm_ops;
extern const struct clk_ops clk_branch2_sreg_ops;
#define to_clk_branch(_hw) \
container_of(to_clk_regmap(_hw), struct clk_branch, clkr)

View File

@ -707,6 +707,18 @@ static const struct clk_rpmh_desc clk_rpmh_lemans = {
.num_clks = ARRAY_SIZE(lemans_rpmh_clocks),
};
DEFINE_CLK_RPMH_ARC(neo, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 1);
static struct clk_hw *neo_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &neo_bi_tcxo.hw,
[RPMH_CXO_CLK_A] = &neo_bi_tcxo_ao.hw,
};
static const struct clk_rpmh_desc clk_rpmh_neo = {
.clks = neo_rpmh_clocks,
.num_clks = ARRAY_SIZE(neo_rpmh_clocks),
};
DEFINE_CLK_RPMH_VRM(anorak, rf_clk1, rf_clk1_ao, "clka1", 1);
DEFINE_CLK_RPMH_VRM(anorak, ln_bb_clk7, ln_bb_clk7_ao, "clka7", 2);
DEFINE_CLK_RPMH_VRM(anorak, ln_bb_clk8, ln_bb_clk8_ao, "clka8", 4);
@ -1009,6 +1021,7 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,niobe-rpmh-clk", .data = &clk_rpmh_niobe},
{ .compatible = "qcom,volcano-rpmh-clk", .data = &clk_rpmh_volcano},
{ .compatible = "qcom,anorak-rpmh-clk", .data = &clk_rpmh_anorak},
{ .compatible = "qcom,neo-rpmh-clk", .data = &clk_rpmh_neo},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -632,7 +632,7 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
};
static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
.cmd_rcgr = 0x6044,
.cmd_rcgr = 0x7044,
.mnd_width = 16,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,

2688
drivers/clk/qcom/gcc-neo.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1933,6 +1933,20 @@ static struct clk_regmap_div gcc_pcie_2_pipe_div_clk_src = {
},
};
static struct clk_regmap_div gcc_pwm0_xo512_div_clk_src = {
.reg = 0x33030,
.shift = 0,
.width = 9,
.clkr.hw.init = &(const struct clk_init_data) {
.name = "gcc_pwm0_xo512_div_clk_src",
.parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_regmap_div_ops,
},
};
static struct clk_regmap_div gcc_qupv3_wrap1_s1_div_clk_src = {
.reg = 0x18148,
.shift = 0,
@ -3011,6 +3025,11 @@ static struct clk_branch gcc_pwm0_xo512_clk = {
.enable_mask = BIT(0),
.hw.init = &(const struct clk_init_data) {
.name = "gcc_pwm0_xo512_clk",
.parent_hws = (const struct clk_hw*[]){
&gcc_pwm0_xo512_div_clk_src.clkr.hw
},
.flags = CLK_SET_RATE_PARENT,
.num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@ -4280,6 +4299,7 @@ static struct clk_regmap *gcc_niobe_clocks[] = {
[GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
[GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
[GCC_PWM0_XO512_CLK] = &gcc_pwm0_xo512_clk.clkr,
[GCC_PWM0_XO512_DIV_CLK_SRC] = &gcc_pwm0_xo512_div_clk_src.clkr,
[GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
[GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,

View File

@ -0,0 +1,535 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,gpucc-neo.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
#include "clk-rcg.h"
#include "common.h"
#include "reset.h"
#include "vdd-level.h"
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NOMINAL + 1, 1, vdd_corner);
static DEFINE_VDD_REGULATORS(vdd_mxa, VDD_NOMINAL + 1, 1, vdd_corner);
static struct clk_vdd_class *gpu_cc_neo_regulators[] = {
&vdd_cx,
&vdd_mxa,
};
enum {
P_BI_TCXO,
P_GPLL0_OUT_MAIN,
P_GPLL0_OUT_MAIN_DIV,
P_GPU_CC_PLL0_OUT_MAIN,
P_GPU_CC_PLL1_OUT_MAIN,
};
static const struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
/* 470MHz Configuration */
static const struct alpha_pll_config gpu_cc_pll0_config = {
.l = 0x18,
.cal_l = 0x44,
.alpha = 0x7AAA,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00182261,
.config_ctl_hi1_val = 0x82AA299C,
.test_ctl_val = 0x00000000,
.test_ctl_hi_val = 0x00000003,
.test_ctl_hi1_val = 0x00009000,
.test_ctl_hi2_val = 0x00000034,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000005,
};
static struct clk_alpha_pll gpu_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll0",
.parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_alpha_pll_lucid_ole_ops,
},
.vdd_data = {
.vdd_class = &vdd_mxa,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 615000000,
[VDD_LOW] = 1100000000,
[VDD_LOW_L1] = 1600000000,
[VDD_NOMINAL] = 2000000000},
},
},
};
/* 440MHz Configuration */
static const struct alpha_pll_config gpu_cc_pll1_config = {
.l = 0x16,
.cal_l = 0x44,
.alpha = 0xEAAA,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00182261,
.config_ctl_hi1_val = 0x82AA299C,
.test_ctl_val = 0x00000000,
.test_ctl_hi_val = 0x00000003,
.test_ctl_hi1_val = 0x00009000,
.test_ctl_hi2_val = 0x00000034,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000005,
};
static struct clk_alpha_pll gpu_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_pll1",
.parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_alpha_pll_lucid_ole_ops,
},
.vdd_data = {
.vdd_class = &vdd_mxa,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 615000000,
[VDD_LOW] = 1100000000,
[VDD_LOW_L1] = 1600000000,
[VDD_NOMINAL] = 2000000000},
},
},
};
static const struct parent_map gpu_cc_parent_map_0[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 5 },
{ P_GPLL0_OUT_MAIN_DIV, 6 },
};
static const struct clk_parent_data gpu_cc_parent_data_0[] = {
{ .fw_name = "bi_tcxo" },
{ .fw_name = "gpll0_out_main" },
{ .fw_name = "gpll0_out_main_div" },
};
static const struct parent_map gpu_cc_parent_map_1[] = {
{ P_BI_TCXO, 0 },
{ P_GPU_CC_PLL0_OUT_MAIN, 1 },
{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
{ P_GPLL0_OUT_MAIN, 5 },
{ P_GPLL0_OUT_MAIN_DIV, 6 },
};
static const struct clk_parent_data gpu_cc_parent_data_1[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &gpu_cc_pll0.clkr.hw },
{ .hw = &gpu_cc_pll1.clkr.hw },
{ .fw_name = "gpll0_out_main" },
{ .fw_name = "gpll0_out_main_div" },
};
static const struct parent_map gpu_cc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
{ P_GPU_CC_PLL1_OUT_MAIN, 3 },
{ P_GPLL0_OUT_MAIN, 5 },
{ P_GPLL0_OUT_MAIN_DIV, 6 },
};
static const struct clk_parent_data gpu_cc_parent_data_2[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &gpu_cc_pll1.clkr.hw },
{ .fw_name = "gpll0_out_main" },
{ .fw_name = "gpll0_out_main_div" },
};
static const struct freq_tbl ftbl_gpu_cc_ff_clk_src[] = {
F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
{ }
};
static struct clk_rcg2 gpu_cc_ff_clk_src = {
.cmd_rcgr = 0x9474,
.mnd_width = 0,
.hid_width = 5,
.parent_map = gpu_cc_parent_map_0,
.freq_tbl = ftbl_gpu_cc_ff_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpu_cc_ff_clk_src",
.parent_data = gpu_cc_parent_data_0,
.num_parents = ARRAY_SIZE(gpu_cc_parent_data_0),
.ops = &clk_rcg2_ops,
},
.clkr.vdd_data = {
.vdd_class = &vdd_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 200000000},
},
};
static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(220000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
F(550000000, P_GPU_CC_PLL1_OUT_MAIN, 2, 0, 0),
{ }
};
static struct clk_rcg2 gpu_cc_gmu_clk_src = {
.cmd_rcgr = 0x9318,
.mnd_width = 0,
.hid_width = 5,
.parent_map = gpu_cc_parent_map_1,
.freq_tbl = ftbl_gpu_cc_gmu_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpu_cc_gmu_clk_src",
.parent_data = gpu_cc_parent_data_1,
.num_parents = ARRAY_SIZE(gpu_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
.clkr.vdd_data = {
.vdd_classes = gpu_cc_neo_regulators,
.num_vdd_classes = ARRAY_SIZE(gpu_cc_neo_regulators),
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 220000000,
[VDD_LOW] = 550000000},
},
};
static struct clk_rcg2 gpu_cc_hub_clk_src = {
.cmd_rcgr = 0x93ec,
.mnd_width = 0,
.hid_width = 5,
.parent_map = gpu_cc_parent_map_2,
.freq_tbl = ftbl_gpu_cc_ff_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "gpu_cc_hub_clk_src",
.parent_data = gpu_cc_parent_data_2,
.num_parents = ARRAY_SIZE(gpu_cc_parent_data_2),
.ops = &clk_rcg2_ops,
},
.clkr.vdd_data = {
.vdd_class = &vdd_cx,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 200000000},
},
};
static struct clk_branch gpu_cc_ahb_clk = {
.halt_reg = 0x911c,
.halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x911c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_ahb_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_hub_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_crc_ahb_clk = {
.halt_reg = 0x9120,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9120,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_crc_ahb_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_hub_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_cx_ff_clk = {
.halt_reg = 0x914c,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x914c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cx_ff_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_ff_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_cx_gmu_clk = {
.halt_reg = 0x913c,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x913c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cx_gmu_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DONT_HOLD_STATE,
.ops = &clk_branch2_aon_ops,
},
},
};
static struct clk_branch gpu_cc_cxo_aon_clk = {
.halt_reg = 0x9004,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9004,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cxo_aon_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_cxo_clk = {
.halt_reg = 0x9144,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x9144,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cxo_clk",
.flags = CLK_DONT_HOLD_STATE,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_gx_gmu_clk = {
.halt_reg = 0x90bc,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x90bc,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_gx_gmu_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_gmu_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_hub_aon_clk = {
.halt_reg = 0x93e8,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x93e8,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_hub_aon_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_hub_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_aon_ops,
},
},
};
static struct clk_branch gpu_cc_hub_cx_int_clk = {
.halt_reg = 0x9148,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9148,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_hub_cx_int_clk",
.parent_hws = (const struct clk_hw*[]){
&gpu_cc_hub_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT | CLK_DONT_HOLD_STATE,
.ops = &clk_branch2_aon_ops,
},
},
};
static struct clk_branch gpu_cc_memnoc_gfx_clk = {
.halt_reg = 0x9150,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9150,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_memnoc_gfx_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
.halt_reg = 0x7000,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x7000,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch gpu_cc_sleep_clk = {
.halt_reg = 0x9134,
.halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x9134,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_sleep_clk",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_regmap *gpu_cc_neo_clocks[] = {
[GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
[GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
[GPU_CC_CX_FF_CLK] = &gpu_cc_cx_ff_clk.clkr,
[GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
[GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
[GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
[GPU_CC_FF_CLK_SRC] = &gpu_cc_ff_clk_src.clkr,
[GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
[GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
[GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
[GPU_CC_HUB_AON_CLK] = &gpu_cc_hub_aon_clk.clkr,
[GPU_CC_HUB_CLK_SRC] = &gpu_cc_hub_clk_src.clkr,
[GPU_CC_HUB_CX_INT_CLK] = &gpu_cc_hub_cx_int_clk.clkr,
[GPU_CC_MEMNOC_GFX_CLK] = &gpu_cc_memnoc_gfx_clk.clkr,
[GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
[GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
[GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
};
static const struct regmap_config gpu_cc_neo_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x9988,
.fast_io = true,
};
static const struct qcom_cc_desc gpu_cc_neo_desc = {
.config = &gpu_cc_neo_regmap_config,
.clks = gpu_cc_neo_clocks,
.num_clks = ARRAY_SIZE(gpu_cc_neo_clocks),
.clk_regulators = gpu_cc_neo_regulators,
.num_clk_regulators = ARRAY_SIZE(gpu_cc_neo_regulators),
};
static const struct of_device_id gpu_cc_neo_match_table[] = {
{ .compatible = "qcom,neo-gpucc" },
{ }
};
MODULE_DEVICE_TABLE(of, gpu_cc_neo_match_table);
static int gpu_cc_neo_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
regmap = qcom_cc_map(pdev, &gpu_cc_neo_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
clk_lucid_ole_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
clk_lucid_ole_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
/*Keep the clock always-ON
* gpu_cc_demet_clk
*/
regmap_update_bits(regmap, 0x0900C, BIT(0), BIT(0));
ret = qcom_cc_really_probe(pdev, &gpu_cc_neo_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
return ret;
}
dev_info(&pdev->dev, "Registered GPU CC clocks\n");
return ret;
}
static void gpu_cc_neo_sync_state(struct device *dev)
{
qcom_cc_sync_state(dev, &gpu_cc_neo_desc);
}
static struct platform_driver gpu_cc_neo_driver = {
.probe = gpu_cc_neo_probe,
.driver = {
.name = "gpu_cc-neo",
.of_match_table = gpu_cc_neo_match_table,
.sync_state = gpu_cc_neo_sync_state,
},
};
static int __init gpu_cc_neo_init(void)
{
return platform_driver_register(&gpu_cc_neo_driver);
}
subsys_initcall(gpu_cc_neo_init);
static void __exit gpu_cc_neo_exit(void)
{
platform_driver_unregister(&gpu_cc_neo_driver);
}
module_exit(gpu_cc_neo_exit);
MODULE_DESCRIPTION("QTI GPU_CC NEO Driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,155 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,tcsrcc-neo.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
#include "clk-pll.h"
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "clk-regmap-divider.h"
#include "clk-regmap-mux.h"
#include "common.h"
#include "reset.h"
#include "vdd-level.h"
static struct clk_branch tcsr_pcie_0_clkref_en = {
.halt_reg = 0x15100,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x15100,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "tcsr_pcie_0_clkref_en",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch tcsr_pcie_1_clkref_en = {
.halt_reg = 0x15114,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x15114,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "tcsr_pcie_1_clkref_en",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch tcsr_usb2_clkref_en = {
.halt_reg = 0x15118,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x15118,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "tcsr_usb2_clkref_en",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch tcsr_usb3_clkref_en = {
.halt_reg = 0x15108,
.halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x15108,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "tcsr_usb3_clkref_en",
.ops = &clk_branch2_ops,
},
},
};
static struct clk_regmap *tcsr_cc_neo_clocks[] = {
[TCSR_PCIE_0_CLKREF_EN] = &tcsr_pcie_0_clkref_en.clkr,
[TCSR_PCIE_1_CLKREF_EN] = &tcsr_pcie_1_clkref_en.clkr,
[TCSR_USB2_CLKREF_EN] = &tcsr_usb2_clkref_en.clkr,
[TCSR_USB3_CLKREF_EN] = &tcsr_usb3_clkref_en.clkr,
};
static const struct regmap_config tcsr_cc_neo_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x2f000,
.fast_io = true,
};
static const struct qcom_cc_desc tcsr_cc_neo_desc = {
.config = &tcsr_cc_neo_regmap_config,
.clks = tcsr_cc_neo_clocks,
.num_clks = ARRAY_SIZE(tcsr_cc_neo_clocks),
};
static const struct of_device_id tcsr_cc_neo_match_table[] = {
{ .compatible = "qcom,neo-tcsrcc" },
{ }
};
MODULE_DEVICE_TABLE(of, tcsr_cc_neo_match_table);
static int tcsr_cc_neo_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
regmap = qcom_cc_map(pdev, &tcsr_cc_neo_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ret = qcom_cc_really_probe(pdev, &tcsr_cc_neo_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register TCSR CC clocks\n");
return ret;
}
dev_info(&pdev->dev, "Registered TCSR CC clocks\n");
return ret;
}
static void tcsr_cc_neo_sync_state(struct device *dev)
{
qcom_cc_sync_state(dev, &tcsr_cc_neo_desc);
}
static struct platform_driver tcsr_cc_neo_driver = {
.probe = tcsr_cc_neo_probe,
.driver = {
.name = "tcsr_cc-neo",
.of_match_table = tcsr_cc_neo_match_table,
.sync_state = tcsr_cc_neo_sync_state,
},
};
static int __init tcsr_cc_neo_init(void)
{
return platform_driver_register(&tcsr_cc_neo_driver);
}
subsys_initcall(tcsr_cc_neo_init);
static void __exit tcsr_cc_neo_exit(void)
{
platform_driver_unregister(&tcsr_cc_neo_driver);
}
module_exit(tcsr_cc_neo_exit);
MODULE_DESCRIPTION("QTI TCSR_CC NEO Driver");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,522 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021-2022, 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/pm_runtime.h>
#include <dt-bindings/clock/qcom,videocc-neo.h>
#include "clk-alpha-pll.h"
#include "clk-branch.h"
#include "clk-rcg.h"
#include "clk-regmap-divider.h"
#include "common.h"
#include "reset.h"
#include "vdd-level.h"
static DEFINE_VDD_REGULATORS(vdd_mm, VDD_NOMINAL + 1, 1, vdd_corner);
static DEFINE_VDD_REGULATORS(vdd_mxc, VDD_NOMINAL + 1, 1, vdd_corner);
static struct clk_vdd_class *video_cc_neo_regulators[] = {
&vdd_mm,
&vdd_mxc,
};
enum {
P_BI_TCXO,
P_SLEEP_CLK,
P_VIDEO_CC_PLL0_OUT_MAIN,
P_VIDEO_CC_PLL1_OUT_MAIN,
};
static const struct pll_vco lucid_ole_vco[] = {
{ 249600000, 2000000000, 0 },
};
static const struct alpha_pll_config video_cc_pll0_config = {
.l = 0x25,
.cal_l = 0x44,
.alpha = 0x8000,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00182261,
.config_ctl_hi1_val = 0x82AA299C,
.test_ctl_val = 0x00000000,
.test_ctl_hi_val = 0x00000003,
.test_ctl_hi1_val = 0x00009000,
.test_ctl_hi2_val = 0x00000034,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000005,
};
static struct clk_alpha_pll video_cc_pll0 = {
.offset = 0x0,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "video_cc_pll0",
.parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_alpha_pll_lucid_ole_ops,
},
.vdd_data = {
.vdd_class = &vdd_mxc,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 615000000,
[VDD_LOW] = 1100000000,
[VDD_LOW_L1] = 1600000000,
[VDD_NOMINAL] = 2000000000},
},
},
};
static const struct alpha_pll_config video_cc_pll1_config = {
.l = 0x36,
.cal_l = 0x44,
.alpha = 0xB000,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00182261,
.config_ctl_hi1_val = 0x82AA299C,
.test_ctl_val = 0x00000000,
.test_ctl_hi_val = 0x00000003,
.test_ctl_hi1_val = 0x00009000,
.test_ctl_hi2_val = 0x00000034,
.user_ctl_val = 0x00000000,
.user_ctl_hi_val = 0x00000005,
};
static struct clk_alpha_pll video_cc_pll1 = {
.offset = 0x1000,
.vco_table = lucid_ole_vco,
.num_vco = ARRAY_SIZE(lucid_ole_vco),
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE],
.clkr = {
.hw.init = &(struct clk_init_data){
.name = "video_cc_pll1",
.parent_data = &(const struct clk_parent_data){
.fw_name = "bi_tcxo",
},
.num_parents = 1,
.ops = &clk_alpha_pll_lucid_ole_ops,
},
.vdd_data = {
.vdd_class = &vdd_mxc,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER_D1] = 615000000,
[VDD_LOW] = 1100000000,
[VDD_LOW_L1] = 1600000000,
[VDD_NOMINAL] = 2000000000},
},
},
};
static const struct parent_map video_cc_parent_map_1[] = {
{ P_BI_TCXO, 0 },
{ P_VIDEO_CC_PLL0_OUT_MAIN, 1 },
};
static const struct clk_parent_data video_cc_parent_data_1[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &video_cc_pll0.clkr.hw },
};
static const struct parent_map video_cc_parent_map_2[] = {
{ P_BI_TCXO, 0 },
{ P_VIDEO_CC_PLL1_OUT_MAIN, 1 },
};
static const struct clk_parent_data video_cc_parent_data_2[] = {
{ .fw_name = "bi_tcxo" },
{ .hw = &video_cc_pll1.clkr.hw },
};
static const struct parent_map video_cc_parent_map_3[] = {
{ P_SLEEP_CLK, 0 },
};
static const struct clk_parent_data video_cc_parent_data_3[] = {
{ .fw_name = "sleep_clk" },
};
static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
F(720000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
F(1014000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
F(1332000000, P_VIDEO_CC_PLL0_OUT_MAIN, 1, 0, 0),
{ }
};
static struct clk_rcg2 video_cc_mvs0_clk_src = {
.cmd_rcgr = 0x8000,
.mnd_width = 0,
.hid_width = 5,
.parent_map = video_cc_parent_map_1,
.freq_tbl = ftbl_video_cc_mvs0_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_cc_mvs0_clk_src",
.parent_data = video_cc_parent_data_1,
.num_parents = ARRAY_SIZE(video_cc_parent_data_1),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
.clkr.vdd_data = {
.vdd_classes = video_cc_neo_regulators,
.num_vdd_classes = ARRAY_SIZE(video_cc_neo_regulators),
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 720000000,
[VDD_LOW] = 1014000000,
[VDD_NOMINAL] = 1332000000},
},
};
static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
F(1050000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
F(1350000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
F(1650000000, P_VIDEO_CC_PLL1_OUT_MAIN, 1, 0, 0),
{ }
};
static struct clk_rcg2 video_cc_mvs1_clk_src = {
.cmd_rcgr = 0x8018,
.mnd_width = 0,
.hid_width = 5,
.parent_map = video_cc_parent_map_2,
.freq_tbl = ftbl_video_cc_mvs1_clk_src,
.enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_cc_mvs1_clk_src",
.parent_data = video_cc_parent_data_2,
.num_parents = ARRAY_SIZE(video_cc_parent_data_2),
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_rcg2_ops,
},
.clkr.vdd_data = {
.vdd_classes = video_cc_neo_regulators,
.num_vdd_classes = ARRAY_SIZE(video_cc_neo_regulators),
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 1050000000,
[VDD_LOW] = 1350000000,
[VDD_NOMINAL] = 1650000000},
},
};
static const struct freq_tbl ftbl_video_cc_sleep_clk_src[] = {
F(32000, P_SLEEP_CLK, 1, 0, 0),
{ }
};
static struct clk_rcg2 video_cc_sleep_clk_src = {
.cmd_rcgr = 0x8128,
.mnd_width = 0,
.hid_width = 5,
.parent_map = video_cc_parent_map_3,
.freq_tbl = ftbl_video_cc_sleep_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "video_cc_sleep_clk_src",
.parent_data = video_cc_parent_data_3,
.num_parents = ARRAY_SIZE(video_cc_parent_data_3),
.ops = &clk_rcg2_ops,
},
.clkr.vdd_data = {
.vdd_class = &vdd_mm,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_LOWER] = 32000},
},
};
static struct clk_regmap_div video_cc_mvs0_div_clk_src = {
.reg = 0x80c4,
.shift = 0,
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "video_cc_mvs0_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ro_ops,
},
};
static struct clk_regmap_div video_cc_mvs0c_div2_div_clk_src = {
.reg = 0x8070,
.shift = 0,
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "video_cc_mvs0c_div2_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs0_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ro_ops,
},
};
static struct clk_regmap_div video_cc_mvs1_div_clk_src = {
.reg = 0x80ec,
.shift = 0,
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "video_cc_mvs1_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ro_ops,
},
};
static struct clk_regmap_div video_cc_mvs1c_div2_div_clk_src = {
.reg = 0x809c,
.shift = 0,
.width = 4,
.clkr.hw.init = &(struct clk_init_data) {
.name = "video_cc_mvs1c_div2_div_clk_src",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_regmap_div_ro_ops,
},
};
static struct clk_branch video_cc_mvs0_clk = {
.halt_reg = 0x80b8,
.halt_check = BRANCH_HALT_VOTED,
.hwcg_reg = 0x80b8,
.hwcg_bit = 1,
.clkr = {
.enable_reg = 0x80b8,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_cc_mvs0_clk",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs0_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch video_cc_mvs0c_clk = {
.halt_reg = 0x8064,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x8064,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_cc_mvs0c_clk",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs0c_div2_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch video_cc_mvs1_clk = {
.halt_reg = 0x80e0,
.halt_check = BRANCH_HALT_VOTED,
.hwcg_reg = 0x80e0,
.hwcg_bit = 1,
.clkr = {
.enable_reg = 0x80e0,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_cc_mvs1_clk",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs1_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch video_cc_mvs1c_clk = {
.halt_reg = 0x8090,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x8090,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_cc_mvs1c_clk",
.parent_hws = (const struct clk_hw*[]){
&video_cc_mvs1c_div2_div_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_branch video_cc_sleep_clk = {
.halt_reg = 0x8140,
.halt_check = BRANCH_HALT,
.clkr = {
.enable_reg = 0x8140,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "video_cc_sleep_clk",
.parent_hws = (const struct clk_hw*[]){
&video_cc_sleep_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
};
static struct clk_regmap *video_cc_neo_clocks[] = {
[VIDEO_CC_MVS0_CLK] = &video_cc_mvs0_clk.clkr,
[VIDEO_CC_MVS0_CLK_SRC] = &video_cc_mvs0_clk_src.clkr,
[VIDEO_CC_MVS0_DIV_CLK_SRC] = &video_cc_mvs0_div_clk_src.clkr,
[VIDEO_CC_MVS0C_CLK] = &video_cc_mvs0c_clk.clkr,
[VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC] = &video_cc_mvs0c_div2_div_clk_src.clkr,
[VIDEO_CC_MVS1_CLK] = &video_cc_mvs1_clk.clkr,
[VIDEO_CC_MVS1_CLK_SRC] = &video_cc_mvs1_clk_src.clkr,
[VIDEO_CC_MVS1_DIV_CLK_SRC] = &video_cc_mvs1_div_clk_src.clkr,
[VIDEO_CC_MVS1C_CLK] = &video_cc_mvs1c_clk.clkr,
[VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC] = &video_cc_mvs1c_div2_div_clk_src.clkr,
[VIDEO_CC_PLL0] = &video_cc_pll0.clkr,
[VIDEO_CC_PLL1] = &video_cc_pll1.clkr,
[VIDEO_CC_SLEEP_CLK] = &video_cc_sleep_clk.clkr,
[VIDEO_CC_SLEEP_CLK_SRC] = &video_cc_sleep_clk_src.clkr,
};
static const struct qcom_reset_map video_cc_neo_resets[] = {
[CVP_VIDEO_CC_INTERFACE_BCR] = { 0x80f0 },
[CVP_VIDEO_CC_MVS0_BCR] = { 0x80a0 },
[VIDEO_CC_MVS0C_CLK_ARES] = { 0x8064, 2 },
[CVP_VIDEO_CC_MVS0C_BCR] = { 0x8048 },
[CVP_VIDEO_CC_MVS1_BCR] = { 0x80c8 },
[VIDEO_CC_MVS1C_CLK_ARES] = { 0x8090, 2 },
[CVP_VIDEO_CC_MVS1C_BCR] = { 0x8074 },
[VIDEO_CC_XO_CLK_ARES] = { 0x8124, 2 },
};
static const struct regmap_config video_cc_neo_regmap_config = {
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
.max_register = 0x9f4c,
.fast_io = true,
};
static struct qcom_cc_desc video_cc_neo_desc = {
.config = &video_cc_neo_regmap_config,
.clks = video_cc_neo_clocks,
.num_clks = ARRAY_SIZE(video_cc_neo_clocks),
.resets = video_cc_neo_resets,
.num_resets = ARRAY_SIZE(video_cc_neo_resets),
.clk_regulators = video_cc_neo_regulators,
.num_clk_regulators = ARRAY_SIZE(video_cc_neo_regulators),
};
static const struct of_device_id video_cc_neo_match_table[] = {
{ .compatible = "qcom,neo-videocc" },
{ }
};
MODULE_DEVICE_TABLE(of, video_cc_neo_match_table);
static int video_cc_neo_probe(struct platform_device *pdev)
{
struct regmap *regmap;
int ret;
regmap = qcom_cc_map(pdev, &video_cc_neo_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
ret = qcom_cc_runtime_init(pdev, &video_cc_neo_desc);
if (ret)
return ret;
ret = pm_runtime_get_sync(&pdev->dev);
if (ret)
return ret;
clk_lucid_ole_pll_configure(&video_cc_pll0, regmap, &video_cc_pll0_config);
clk_lucid_ole_pll_configure(&video_cc_pll1, regmap, &video_cc_pll1_config);
/*
* Keep clocks always enabled:
* video_cc_ahb_clk
* video_cc_xo_clk
*/
regmap_update_bits(regmap, 0x80f4, BIT(0), BIT(0));
regmap_update_bits(regmap, 0x8124, BIT(0), BIT(0));
ret = qcom_cc_really_probe(pdev, &video_cc_neo_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register VIDEO CC clocks\n");
return ret;
}
pm_runtime_put_sync(&pdev->dev);
dev_info(&pdev->dev, "Registered VIDEO CC clocks\n");
return ret;
}
static void video_cc_neo_sync_state(struct device *dev)
{
qcom_cc_sync_state(dev, &video_cc_neo_desc);
}
static const struct dev_pm_ops video_cc_neo_pm_ops = {
SET_RUNTIME_PM_OPS(qcom_cc_runtime_suspend, qcom_cc_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static struct platform_driver video_cc_neo_driver = {
.probe = video_cc_neo_probe,
.driver = {
.name = "video_cc-neo",
.of_match_table = video_cc_neo_match_table,
.sync_state = video_cc_neo_sync_state,
.pm = &video_cc_neo_pm_ops,
},
};
static int __init video_cc_neo_init(void)
{
return platform_driver_register(&video_cc_neo_driver);
}
subsys_initcall(video_cc_neo_init);
static void __exit video_cc_neo_exit(void)
{
platform_driver_unregister(&video_cc_neo_driver);
}
module_exit(video_cc_neo_exit);
MODULE_DESCRIPTION("QTI VIDEO_CC NEO Driver");
MODULE_LICENSE("GPL");

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@ -91,6 +91,8 @@ static const struct virtio_clk_init_data sm6150_scc_virtio_clocks[] = {
[SCC_QUPV3_SE1_CLK] = {.name = "scc_qupv3_se1_clk",},
[SCC_QUPV3_SE2_CLK] = {.name = "scc_qupv3_se2_clk",},
[SCC_QUPV3_SE3_CLK] = {.name = "scc_qupv3_se3_clk",},
[SCC_QUPV3_SE4_CLK] = {.name = "scc_qupv3_se4_clk",},
[SCC_QUPV3_SE5_CLK] = {.name = "scc_qupv3_se5_clk",},
[SCC_QUPV3_M_HCLK_CLK] = {.name = "scc_qupv3_m_hclk_clk",},
[SCC_QUPV3_S_HCLK_CLK] = {.name = "scc_qupv3_s_hclk_clk",},
};

View File

@ -707,6 +707,28 @@ static void lpm_idle_exit(void *unused, int state, struct cpuidle_device *dev)
}
}
static int suspend_lpm_notify(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
int cpu;
switch (mode) {
case PM_SUSPEND_PREPARE:
suspend_in_progress = true;
break;
case PM_POST_SUSPEND:
suspend_in_progress = false;
break;
default:
break;
}
for_each_online_cpu(cpu)
wake_up_if_idle(cpu);
return 0;
}
/**
* lpm_enable_device() - Initialize the governor's data for the CPU
* @drv: cpuidle driver
@ -831,6 +853,10 @@ static struct cpuidle_governor lpm_governor = {
.reflect = lpm_reflect,
};
static struct notifier_block suspend_lpm_nb = {
.notifier_call = suspend_lpm_notify,
};
static int __init qcom_lpm_governor_init(void)
{
int ret;
@ -856,6 +882,8 @@ static int __init qcom_lpm_governor_init(void)
if (ret < 0)
goto cpuhp_setup_fail;
register_pm_notifier(&suspend_lpm_nb);
return 0;
cpuhp_setup_fail:

View File

@ -1772,11 +1772,40 @@ int gpi_terminate_channel(struct gpii_chan *gpii_chan)
return ret;
}
/*
* geni_gsi_connect_doorbell() - function to connect gsi doorbell
* @chan: gsi channel handle
*
* This function uses asynchronous channel command 48 to connect
* io_6 input from GSI interrupt input.
*
* Return: Returns success or failure
*/
int geni_gsi_connect_doorbell(struct dma_chan *chan)
{
struct gpii_chan *gpii_chan = to_gpii_chan(chan);
struct gpii *gpii = gpii_chan->gpii;
int ret = 0;
GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ENABLE_HID);
if (ret) {
GPII_ERR(gpii, gpii_chan->chid, "Error enable Chan:%d HID interrupt\n", ret);
gpi_dump_debug_reg(gpii);
}
return ret;
}
EXPORT_SYMBOL_GPL(geni_gsi_connect_doorbell);
/*
* geni_gsi_disconnect_doorbell_stop_ch() - function to disconnect gsi doorbell and stop channel
* @chan: gsi channel handle
* @stop_ch: stop channel if set to true
*
* This function uses asynchronous channel command 49 to dis-connect
* io_6 input from GSI interrupt input.
*
* Return: Returns success or failure
*/
int geni_gsi_disconnect_doorbell_stop_ch(struct dma_chan *chan, bool stop_ch)
@ -1786,10 +1815,6 @@ int geni_gsi_disconnect_doorbell_stop_ch(struct dma_chan *chan, bool stop_ch)
int ret = 0;
bool error = false;
/*
* Use asynchronous channel command 49 (see section 3.10.7) to dis-connect
* io_6 input from GSI interrupt input.
*/
GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_DISABLE_HID);
if (ret) {

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/slab.h>
@ -137,6 +137,9 @@ int scm_legacy_call(struct device *dev, const struct qcom_scm_desc *desc,
__le32 *arg_buf;
const __le32 *res_buf;
if (!dev)
return -EPROBE_DEFER;
cmd = kzalloc(PAGE_ALIGN(alloc_len), GFP_KERNEL);
if (!cmd)
return -ENOMEM;

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2015,2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/io.h>
@ -212,6 +212,9 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
struct arm_smccc_res smc_res;
struct arm_smccc_args smc = {0};
if (!dev)
return -EPROBE_DEFER;
smc.args[0] = ARM_SMCCC_CALL_VAL(
smccc_call_type,
qcom_smccc_convention,
@ -222,9 +225,6 @@ int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
smc.args[i + SCM_SMC_FIRST_REG_IDX] = desc->args[i];
if (unlikely(arglen > SCM_SMC_N_REG_ARGS)) {
if (!dev)
return -EPROBE_DEFER;
alloc_len = SCM_SMC_N_EXT_ARGS * sizeof(u64);
use_qtee_shmbridge = qtee_shmbridge_is_enabled();
if (use_qtee_shmbridge) {

File diff suppressed because it is too large Load Diff

View File

@ -12,6 +12,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/qcom_hwspinlock.h>
#include <linux/regmap.h>
#include "hwspinlock_internal.h"
@ -25,6 +26,43 @@ struct qcom_hwspinlock_of_data {
const struct regmap_config *regmap_config;
};
/**
* qcom_hwspinlock_bust() - bust qcom specific hwspinlock
* @hwlock: a previously-acquired hwspinlock which we want to bust
* @id: identifier of the remote lock holder, if applicable
*
* This function will bust a hwspinlock that was previously acquired as
* long as the current owner of the lock matches the id given by the caller.
*
* Context: Process context.
*
* Returns: 0 on success, or error if bust operation fails
*/
int qcom_hwspinlock_bust(struct hwspinlock *lock, unsigned int id)
{
struct regmap_field *field = lock->priv;
u32 owner;
int ret;
ret = regmap_field_read(field, &owner);
if (ret) {
pr_err("%s: unable to query spinlock owner\n", __func__);
return ret;
}
if (owner != id)
return 0;
ret = regmap_field_write(field, 0);
if (ret) {
pr_err("%s: failed to bust spinlock\n", __func__);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(qcom_hwspinlock_bust);
static int qcom_hwspinlock_trylock(struct hwspinlock *lock)
{
struct regmap_field *field = lock->priv;

View File

@ -247,7 +247,8 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
atomic_inc(csdev->refcnt);
} else {
/* Free up the buffer if we failed to enable */
used = false;
kfree(drvdata->buf);
drvdata->buf = NULL;
}
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);

View File

@ -153,6 +153,19 @@ config INTERCONNECT_QCOM_ANORAK
for setting bandwidth between two endpoints (path). It also used to
configure NOC QoS settings (Quality of Service).
config INTERCONNECT_QCOM_NEO
tristate "NEO interconnect driver"
depends on INTERCONNECT_QCOM
depends on QCOM_RPMH && QCOM_COMMAND_DB && OF
select INTERCONNECT_QCOM_BCM_VOTER
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_QOS
help
This is a driver for the Qualcomm Technologies, Inc. Network-on-Chip
on Neo-based platforms. Interconnect driver provides interfaces
for setting bandwidth between two endpoints (path). It also used to
configure NOC QoS settings (Quality of Service).
config INTERCONNECT_QCOM_SDX55
tristate "Qualcomm SDX55 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE

View File

@ -26,6 +26,7 @@ qnoc-sm6150-objs := sm6150.o
qnoc-lemans-objs := lemans.o
qnoc-monaco-auto-objs := monaco_auto.o
qnoc-anorak-objs := anorak.o
qnoc-neo-objs := neo.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
@ -77,6 +78,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_HOLI) += qnoc-holi.o
obj-$(CONFIG_INTERCONNECT_QCOM_PITTI) += qnoc-pitti.o
obj-$(CONFIG_INTERCONNECT_QCOM_MONACO_AUTO) += qnoc-monaco-auto.o
obj-$(CONFIG_INTERCONNECT_QCOM_ANORAK) += qnoc-anorak.o
obj-$(CONFIG_INTERCONNECT_QCOM_NEO) += qnoc-neo.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_QOS) += qnoc-qos.o
obj-$(CONFIG_INTERCONNECT_QCOM_QOS_RPM) += qnoc-qos-rpm.o

File diff suppressed because it is too large Load Diff

View File

@ -1421,8 +1421,8 @@ static int qti_flash_led_setup(struct qti_flash_led *led)
mask = FLASH_LED_STROBE_CFG_MASK | FLASH_LED_HW_SW_STROBE_SEL;
if (led->ext_led) {
val |= FLASH_LED_STROBE_TRIGGER | FLASH_LED_STROBE_POLARITY;
mask |= FLASH_LED_STROBE_TRIGGER | FLASH_LED_STROBE_POLARITY;
val |= FLASH_LED_STROBE_POLARITY;
mask |= FLASH_LED_STROBE_POLARITY;
}
rc = qti_flash_led_masked_write(led,

View File

@ -7,3 +7,14 @@ config VIRTIO_NPU
which provides acceleration for neural network processing.
This driver is based on virtio.
Say Y if you want to support virtual NPU.
config MSM_NPU
tristate "QTI MSM Neural Processing Unit support"
depends on ARCH_QCOM
help
Enable support for Neural Processing Unit
for specific QTI chipsets.
This module serves as the common driver
for npu which provides acceleration for neural
network processing.

View File

@ -1,3 +1,14 @@
# SPDX-License-Identifier: GPL-2.0-only
ifneq ($(CONFIG_VIRTIO_NPU),)
obj-$(CONFIG_VIRTIO_NPU) := virtio_npu.o
else
msm_npu-objs := npu_dbg.o \
npu_dev.o \
npu_debugfs.o \
npu_host_ipc.o \
npu_hw_access.o \
npu_mgr.o
obj-$(CONFIG_MSM_NPU) := msm_npu.o
endif

View File

@ -0,0 +1,277 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _NPU_COMMON_H
#define _NPU_COMMON_H
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
#include <linux/msm_npu.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/mailbox/qmp.h>
#include "npu_mgr.h"
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define NPU_MAX_MBOX_NUM 2
#define NPU_MBOX_LOW_PRI 0
#define NPU_MBOX_HIGH_PRI 1
#define DEFAULT_REG_DUMP_NUM 64
#define ROW_BYTES 16
#define GROUP_BYTES 4
#define NUM_MAX_CLK_NUM 24
#define NPU_MAX_REGULATOR_NUM 2
#define NPU_MAX_DT_NAME_LEN 21
#define NPU_MAX_PWRLEVELS 8
#define NPU_MAX_STATS_BUF_SIZE 16384
#define NPU_MAX_PATCH_NUM 160
#define PERF_MODE_DEFAULT 0
enum npu_power_level {
NPU_PWRLEVEL_MINSVS = 0,
NPU_PWRLEVEL_LOWSVS,
NPU_PWRLEVEL_SVS,
NPU_PWRLEVEL_SVS_L1,
NPU_PWRLEVEL_NOM,
NPU_PWRLEVEL_NOM_L1,
NPU_PWRLEVEL_TURBO,
NPU_PWRLEVEL_TURBO_L1,
NPU_PWRLEVEL_OFF = 0xFFFFFFFF,
};
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
*/
struct npu_smmu_ctx {
int domain;
struct dma_iommu_mapping *mmu_mapping;
struct reg_bus_client *reg_bus_clt;
int32_t attach_cnt;
};
struct npu_ion_buf {
int fd;
struct dma_buf *dma_buf;
struct dma_buf_attachment *attachment;
struct sg_table *table;
dma_addr_t iova;
uint32_t size;
void *phys_addr;
void *buf;
struct list_head list;
};
struct npu_clk {
struct clk *clk;
char clk_name[NPU_MAX_DT_NAME_LEN];
};
struct npu_regulator {
struct regulator *regulator;
char regulator_name[NPU_MAX_DT_NAME_LEN];
};
struct npu_debugfs_ctx {
struct dentry *root;
uint32_t reg_off;
uint32_t reg_cnt;
};
struct npu_debugfs_reg_ctx {
char *buf;
size_t buf_len;
struct npu_device *npu_dev;
};
struct npu_mbox {
struct mbox_client client;
struct mbox_chan *chan;
struct npu_device *npu_dev;
uint32_t id;
};
/**
* struct npu_pwrlevel - Struct holding different pwrlevel info obtained
* from dtsi file
* @pwr_level: NPU power level
* @freq[]: NPU frequency vote in Hz
*/
struct npu_pwrlevel {
uint32_t pwr_level;
long clk_freq[NUM_MAX_CLK_NUM];
};
/*
* struct npu_reg - Struct holding npu register information
* @ off - register offset
* @ val - register value
* @ valid - if register value is valid
*/
struct npu_reg {
uint32_t off;
uint32_t val;
bool valid;
};
/**
* struct npu_pwrctrl - Power control settings for a NPU device
* @pwr_vote_num - voting information for power enable
* @pwrlevels - List of supported power levels
* @active_pwrlevel - The currently active power level
* @default_pwrlevel - device wake up power level
* @max_pwrlevel - maximum allowable powerlevel per the user
* @min_pwrlevel - minimum allowable powerlevel per the user
* @num_pwrlevels - number of available power levels
* @cdsprm_pwrlevel - maximum power level from cdsprm
* @fmax_pwrlevel - maximum power level from qfprom fmax setting
* @uc_pwrlevel - power level from user driver setting
* @perf_mode_override - perf mode from sysfs to override perf mode
* settings from user driver
* @dcvs_mode - dcvs mode from sysfs to turn on dcvs mode
* settings from user driver
* @devbw - bw device
*/
struct npu_pwrctrl {
int32_t pwr_vote_num;
struct npu_pwrlevel pwrlevels[NPU_MAX_PWRLEVELS];
uint32_t active_pwrlevel;
uint32_t default_pwrlevel;
uint32_t max_pwrlevel;
uint32_t min_pwrlevel;
uint32_t num_pwrlevels;
struct device *devbw;
uint32_t bwmon_enabled;
uint32_t uc_pwrlevel;
uint32_t cdsprm_pwrlevel;
uint32_t fmax_pwrlevel;
uint32_t perf_mode_override;
uint32_t dcvs_mode;
uint32_t cur_dcvs_activity;
};
/**
* struct npu_thermalctrl - Thermal control settings for a NPU device
* @max_state - maximum thermal mitigation state
* @current_state - current thermal mitigation state
* @pwr_level -power level that thermal control requested
*/
struct npu_thermalctrl {
unsigned long max_state;
unsigned long current_state;
uint32_t pwr_level;
};
#define NPU_MAX_IRQ 3
struct npu_irq {
char *name;
int irq;
int irq_type;
};
struct npu_io_data {
size_t size;
void __iomem *base;
};
struct npu_fw_io_data {
phys_addr_t mem_phys;
phys_addr_t mem_reloc;
void *mem_region;
size_t mem_size;
};
struct npu_device {
struct mutex dev_lock;
struct platform_device *pdev;
dev_t dev_num;
struct cdev cdev;
struct class *class;
struct device *device;
struct npu_io_data core_io;
struct npu_io_data tcm_io;
struct npu_io_data bwmon_io;
struct npu_io_data qfprom_io;
struct npu_fw_io_data fw_io;
uint32_t core_clk_num;
struct npu_clk core_clks[NUM_MAX_CLK_NUM];
uint32_t regulator_num;
struct npu_regulator regulators[NPU_MAX_DT_NAME_LEN];
struct npu_irq irq[NPU_MAX_IRQ];
struct device *cb_device;
struct npu_host_ctx host_ctx;
struct npu_smmu_ctx smmu_ctx;
struct npu_debugfs_ctx debugfs_ctx;
struct npu_mbox mbox_aop;
struct thermal_cooling_device *tcdev;
struct npu_pwrctrl pwrctrl;
struct npu_thermalctrl thermalctrl;
struct llcc_slice_desc *sys_cache;
uint32_t execute_v2_flag;
bool cxlimit_registered;
struct icc_path *icc_npu_cdspmem;
struct icc_path *icc_cpu_imemcfg;
uint32_t hw_version;
};
struct npu_client {
struct npu_device *npu_dev;
struct mutex list_lock;
struct list_head mapped_buffer_list;
};
/* -------------------------------------------------------------------------
* Function Prototypes
* -------------------------------------------------------------------------
*/
int npu_debugfs_init(struct npu_device *npu_dev);
void npu_debugfs_deinit(struct npu_device *npu_dev);
int npu_enable_core_power(struct npu_device *npu_dev);
void npu_disable_core_power(struct npu_device *npu_dev);
int npu_enable_post_pil_clocks(struct npu_device *npu_dev);
void npu_disable_post_pil_clocks(struct npu_device *npu_dev);
irqreturn_t npu_intr_hdler(int irq, void *ptr);
int npu_set_uc_power_level(struct npu_device *npu_dev,
uint32_t pwr_level);
int fw_init(struct npu_device *npu_dev);
void fw_deinit(struct npu_device *npu_dev, bool ssr, bool fw_alive);
int npu_notify_cdsprm_cxlimit_activity(struct npu_device *npu_dev, bool enable);
#endif /* _NPU_COMMON_H */

View File

@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include "npu_common.h"
#include "npu_firmware.h"
#include "npu_hw.h"
#include "npu_hw_access.h"
#include "npu_mgr.h"
/* -------------------------------------------------------------------------
* Function Definitions - Debug
* -------------------------------------------------------------------------
*/
void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
{
uint32_t reg_val;
reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
pr_info("fw jobs execute started count = %d\n", reg_val);
reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
pr_info("fw jobs execute finished count = %d\n", reg_val);
reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
pr_info("fw jobs aco parser debug = %d\n", reg_val);
}

View File

@ -0,0 +1,184 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include <linux/debugfs.h>
#include "npu_hw.h"
#include "npu_hw_access.h"
#include "npu_common.h"
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define NPU_LOG_BUF_SIZE 4096
/* -------------------------------------------------------------------------
* Function Prototypes
* -------------------------------------------------------------------------
*/
static int npu_debug_open(struct inode *inode, struct file *file);
static int npu_debug_release(struct inode *inode, struct file *file);
static ssize_t npu_debug_ctrl_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos);
/* -------------------------------------------------------------------------
* Variables
* -------------------------------------------------------------------------
*/
static struct npu_device *g_npu_dev;
static const struct file_operations npu_ctrl_fops = {
.open = npu_debug_open,
.release = npu_debug_release,
.read = NULL,
.write = npu_debug_ctrl_write,
};
/* -------------------------------------------------------------------------
* Function Implementations
* -------------------------------------------------------------------------
*/
static int npu_debug_open(struct inode *inode, struct file *file)
{
/* non-seekable */
file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
file->private_data = inode->i_private;
return 0;
}
static int npu_debug_release(struct inode *inode, struct file *file)
{
return 0;
}
/* -------------------------------------------------------------------------
* Function Implementations - DebugFS Control
* -------------------------------------------------------------------------
*/
static ssize_t npu_debug_ctrl_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos)
{
char buf[24];
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
int32_t rc = 0;
uint32_t val;
pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
if (count >= sizeof(buf))
return -EINVAL;
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
buf[count] = 0; /* end of string */
if (count >= 2)
buf[count-1] = 0;/* remove line feed */
if (strcmp(buf, "on") == 0) {
pr_info("triggering fw_init\n");
if (fw_init(npu_dev) != 0)
pr_info("error in fw_init\n");
} else if (strcmp(buf, "off") == 0) {
pr_info("triggering fw_deinit\n");
fw_deinit(npu_dev, false, true);
} else if (strcmp(buf, "ssr") == 0) {
pr_info("trigger error irq\n");
if (npu_enable_core_power(npu_dev))
return -EPERM;
REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(1), 2);
REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2);
npu_disable_core_power(npu_dev);
} else if (strcmp(buf, "ssr_wdt") == 0) {
pr_info("trigger wdt irq\n");
npu_disable_post_pil_clocks(npu_dev);
} else if (strcmp(buf, "loopback") == 0) {
pr_debug("loopback test\n");
rc = npu_host_loopback_test(npu_dev);
pr_debug("loopback test end: %d\n", rc);
} else {
rc = kstrtou32(buf, 10, &val);
if (rc) {
pr_err("Invalid input for power level settings\n");
} else {
val = min(val, npu_dev->pwrctrl.max_pwrlevel);
npu_dev->pwrctrl.active_pwrlevel = val;
pr_info("setting power state to %d\n", val);
}
}
return count;
}
/* -------------------------------------------------------------------------
* Function Implementations - DebugFS
* -------------------------------------------------------------------------
*/
int npu_debugfs_init(struct npu_device *npu_dev)
{
struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
g_npu_dev = npu_dev;
debugfs->root = debugfs_create_dir("npu", NULL);
if (IS_ERR_OR_NULL(debugfs->root)) {
pr_err("debugfs_create_dir for npu failed, error %ld\n",
PTR_ERR(debugfs->root));
return -ENODEV;
}
if (!debugfs_create_file("ctrl", 0644, debugfs->root,
npu_dev, &npu_ctrl_fops)) {
pr_err("debugfs_create_file ctrl fail\n");
goto err;
}
debugfs_create_bool("sys_cache_disable", 0644,
debugfs->root, &(host_ctx->sys_cache_disable));
debugfs_create_u32("fw_dbg_mode", 0644,
debugfs->root, &(host_ctx->fw_dbg_mode));
debugfs_create_u32("fw_state", 0444,
debugfs->root, &(host_ctx->fw_state));
debugfs_create_u32("pwr_level", 0444,
debugfs->root, &(pwr->active_pwrlevel));
debugfs_create_u32("exec_flags", 0644,
debugfs->root, &(host_ctx->exec_flags_override));
return 0;
err:
npu_debugfs_deinit(npu_dev);
return -ENODEV;
}
void npu_debugfs_deinit(struct npu_device *npu_dev)
{
struct npu_debugfs_ctx *debugfs = &npu_dev->debugfs_ctx;
if (!IS_ERR_OR_NULL(debugfs->root)) {
debugfs_remove_recursive(debugfs->root);
debugfs->root = NULL;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,176 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _NPU_FIRMWARE_H
#define _NPU_FIRMWARE_H
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include <linux/types.h>
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
/* NPU Firmware Control/Status Register, written by FW and read HOST */
#define REG_NPU_FW_CTRL_STATUS NPU_GPR0
/* written by HOST and read by FW for control */
#define REG_NPU_HOST_CTRL_STATUS NPU_GPR1
/* Data value for control */
#define REG_NPU_HOST_CTRL_VALUE NPU_GPR2
/* Simulates an interrupt for FW->HOST, used for pre-silicon */
#define REG_FW_TO_HOST_EVENT NPU_GPR3
/* Read/Written by both host and dsp for sync between driver and dsp */
#define REG_HOST_DSP_CTRL_STATUS NPU_GPR4
/* Data value for debug */
#define REG_NPU_FW_DEBUG_DATA NPU_GPR13
/* Started job count */
#define REG_FW_JOB_CNT_START NPU_GPR14
/* Finished job count */
#define REG_FW_JOB_CNT_END NPU_GPR15
/* NPU FW Control/Status Register */
/* bit fields definitions in CTRL STATUS REG */
#define FW_CTRL_STATUS_IPC_READY_BIT 0
#define FW_CTRL_STATUS_LOG_READY_BIT 1
#define FW_CTRL_STATUS_EXECUTE_THREAD_READY_BIT 2
#define FW_CTRL_STATUS_MAIN_THREAD_READY_BIT 3
#define FW_CTRL_STATUS_LOADED_ACO_BIT 4
#define FW_CTRL_STATUS_EXECUTING_ACO_BIT 5
#define FW_CTRL_STATUS_SHUTDOWN_DONE_BIT 12
#define FW_CTRL_STATUS_STACK_CORRUPT_BIT 13
/* 32 bit values of the bit fields above */
#define FW_CTRL_STATUS_IPC_READY_VAL (1 << FW_CTRL_STATUS_IPC_READY_BIT)
#define FW_CTRL_STATUS_LOG_READY_VAL (1 << FW_CTRL_STATUS_LOG_READY_BIT)
#define FW_CTRL_STATUS_EXECUTE_THREAD_READY_VAL \
(1 << FW_CTRL_STATUS_EXECUTE_THREAD_READY_BIT)
#define FW_CTRL_STATUS_MAIN_THREAD_READY_VAL \
(1 << FW_CTRL_STATUS_MAIN_THREAD_READY_BIT)
#define FW_CTRL_STATUS_LOADED_ACO_VAL \
(1 << FW_CTRL_STATUS_LOADED_ACO_BIT)
#define FW_CTRL_STATUS_EXECUTING_ACO_VAL \
(1 << FW_CTRL_STATUS_EXECUTING_ACO_BIT)
#define FW_CTRL_STATUS_SHUTDOWN_DONE_VAL \
(1 << FW_CTRL_STATUS_SHUTDOWN_DONE_BIT)
#define FW_CTRL_STATUS_STACK_CORRUPT_VAL \
(1 << FW_CTRL_STATUS_STACK_CORRUPT_BIT)
/* NPU HOST Control/Status Register */
/* bit fields definitions in CTRL STATUS REG */
/* Host has programmed IPC address into the REG_NPU_HOST_CTRL_VALUE register */
#define HOST_CTRL_STATUS_IPC_ADDRESS_READY_BIT 0
/* Host has enabled logging during boot */
#define HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_BIT 1
/* Host has enabled the clk gating of CAL during boot */
#define HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_BIT 2
/* Host requests to pause fw during boot up */
#define HOST_CTRL_STATUS_FW_PAUSE 3
/* Host requests to disable watchdog */
#define HOST_CTRL_STATUS_DISABLE_WDOG_BIT 4
/* 32 bit values of the bit fields above */
#define HOST_CTRL_STATUS_IPC_ADDRESS_READY_VAL \
(1 << HOST_CTRL_STATUS_IPC_ADDRESS_READY_BIT)
#define HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_VAL \
(1 << HOST_CTRL_STATUS_BOOT_ENABLE_LOGGING_BIT)
#define HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_VAL \
(1 << HOST_CTRL_STATUS_BOOT_ENABLE_CLK_GATE_BIT)
#define HOST_CTRL_STATUS_FW_PAUSE_VAL \
(1 << HOST_CTRL_STATUS_FW_PAUSE)
#define HOST_CTRL_STATUS_DISABLE_WDOG_VAL \
(1 << HOST_CTRL_STATUS_DISABLE_WDOG_BIT)
/* NPU HOST DSP Control/Status Register */
/* notification of power up */
/* following bits are set by host and read by dsp */
#define HOST_DSP_CTRL_STATUS_PWR_UP_BIT 0
/* notification of power dwn */
#define HOST_DSP_CTRL_STATUS_PWR_DWN_BIT 1
/* following bits are set by dsp and read by host */
/* notification of power up acknowlegement*/
#define HOST_DSP_CTRL_STATUS_PWR_UP_ACK_BIT 4
/* notification of power down acknowlegement*/
#define HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_BIT 5
/* 32 bit values of the bit fields above */
#define HOST_DSP_CTRL_STATUS_PWR_UP_VAL \
(1 << HOST_DSP_CTRL_STATUS_PWR_UP_BIT)
#define HOST_DSP_CTRL_STATUS_PWR_DWN_VAL \
(1 << HOST_DSP_CTRL_STATUS_PWR_DWN_BIT)
#define HOST_DSP_CTRL_STATUS_PWR_UP_ACK_VAL \
(1 << HOST_DSP_CTRL_STATUS_PWR_UP_ACK_BIT)
#define HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_VAL \
(1 << HOST_DSP_CTRL_STATUS_PWR_DWN_ACK_BIT)
/* Queue table header definition */
struct hfi_queue_tbl_header {
uint32_t qtbl_version; /* queue table version number */
uint32_t qtbl_size; /* total tables+queues size in bytes */
uint32_t qtbl_qhdr0_offset; /* offset of the 1st queue header entry */
uint32_t qtbl_qhdr_size; /* queue header size */
uint32_t qtbl_num_q; /* total number of queues */
uint32_t qtbl_num_active_q; /* number of active queues */
};
/* Queue header definition */
struct hfi_queue_header {
uint32_t qhdr_status; /* 0 == inactive, 1 == active */
/* 4 byte-aligned start offset from start of q table */
uint32_t qhdr_start_offset;
/* queue type */
uint32_t qhdr_type;
/* in bytes, value of 0 means packets are variable size.*/
uint32_t qhdr_q_size;
/* size of the Queue packet entries, in bytes, 0 means variable size */
uint32_t qhdr_pkt_size;
uint32_t qhdr_pkt_drop_cnt;
/* receiver watermark in # of queue packets */
uint32_t qhdr_rx_wm;
/* transmitter watermark in # of queue packets */
uint32_t qhdr_tx_wm;
/*
* set to request an interrupt from transmitter
* if qhdr_tx_wm is reached
*/
uint32_t qhdr_rx_req;
/*
* set to request an interrupt from receiver
* if qhdr_rx_wm is reached
*/
uint32_t qhdr_tx_req;
uint32_t qhdr_rx_irq_status; /* Not used */
uint32_t qhdr_tx_irq_status; /* Not used */
uint32_t qhdr_read_idx; /* read index in bytes */
uint32_t qhdr_write_idx; /* write index in bytes */
};
/* in bytes */
#define HFI_QUEUE_TABLE_HEADER_SIZE (sizeof(struct hfi_queue_tbl_header))
#define HFI_QUEUE_HEADER_SIZE (sizeof(struct hfi_queue_header))
#define HFI_QUEUE_TABLE_SIZE (HFI_QUEUE_TABLE_HEADER_SIZE + \
(NPU_HFI_NUMBER_OF_QS * HFI_QUEUE_HEADER_SIZE))
/* Queue Indexes */
#define IPC_QUEUE_CMD_HIGH_PRIORITY 0 /* High priority Queue APPS->M0 */
#define IPC_QUEUE_APPS_EXEC 1 /* APPS Execute Queue APPS->M0 */
#define IPC_QUEUE_DSP_EXEC 2 /* DSP Execute Queue DSP->M0 */
#define IPC_QUEUE_APPS_RSP 3 /* APPS Message Queue M0->APPS */
#define IPC_QUEUE_DSP_RSP 4 /* DSP Message Queue DSP->APPS */
#define IPC_QUEUE_LOG 5 /* Log Message Queue M0->APPS */
#define NPU_HFI_NUMBER_OF_QS 6
#define NPU_HFI_NUMBER_OF_ACTIVE_QS 6
#define NPU_HFI_QUEUES_PER_CHANNEL 2
#endif /* _NPU_FIRMWARE_H */

View File

@ -0,0 +1,438 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include "npu_hw_access.h"
#include "npu_mgr.h"
#include "npu_firmware.h"
#include "npu_hw.h"
#include "npu_host_ipc.h"
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
/* HFI IPC interface */
#define TX_HDR_TYPE 0x01000000
#define RX_HDR_TYPE 0x00010000
#define HFI_QTBL_STATUS_ENABLED 0x00000001
#define QUEUE_TBL_VERSION 0x87654321
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
*/
struct npu_queue_tuple {
uint32_t size;
uint32_t hdr;
uint32_t start_offset;
};
static struct npu_queue_tuple npu_q_setup[6] = {
{ 1024, IPC_QUEUE_CMD_HIGH_PRIORITY | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_APPS_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_DSP_EXEC | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_APPS_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 4096, IPC_QUEUE_DSP_RSP | TX_HDR_TYPE | RX_HDR_TYPE, 0},
{ 1024, IPC_QUEUE_LOG | TX_HDR_TYPE | RX_HDR_TYPE, 0},
};
/* -------------------------------------------------------------------------
* File Scope Function Prototypes
* -------------------------------------------------------------------------
*/
static int npu_host_ipc_init_hfi(struct npu_device *npu_dev);
static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
uint32_t q_idx, void *cmd_ptr);
static int npu_host_ipc_read_msg_hfi(struct npu_device *npu_dev,
uint32_t q_idx, uint32_t *msg_ptr);
static int ipc_queue_read(struct npu_device *npu_dev, uint32_t target_que,
uint8_t *packet, uint8_t *is_tx_req_set);
static int ipc_queue_write(struct npu_device *npu_dev, uint32_t target_que,
uint8_t *packet, uint8_t *is_rx_req_set);
/* -------------------------------------------------------------------------
* Function Definitions
* -------------------------------------------------------------------------
*/
static int npu_host_ipc_init_hfi(struct npu_device *npu_dev)
{
int status = 0;
struct hfi_queue_tbl_header *q_tbl_hdr = NULL;
struct hfi_queue_header *q_hdr_arr = NULL;
struct hfi_queue_header *q_hdr = NULL;
void *q_tbl_addr = NULL;
uint32_t reg_val = 0;
uint32_t q_idx = 0;
uint32_t q_tbl_size = sizeof(struct hfi_queue_tbl_header) +
(NPU_HFI_NUMBER_OF_QS * sizeof(struct hfi_queue_header));
uint32_t q_size = 0;
uint32_t cur_start_offset = 0;
reg_val = REGR(npu_dev, REG_NPU_FW_CTRL_STATUS);
/*
* If the firmware is already running and we're just attaching,
* we do not need to do this
*/
if ((reg_val & FW_CTRL_STATUS_LOG_READY_VAL) != 0)
return status;
/* check for valid interface queue table start address */
q_tbl_addr = kzalloc(q_tbl_size, GFP_KERNEL);
if (q_tbl_addr == NULL)
return -ENOMEM;
/* retrieve interface queue table start address */
q_tbl_hdr = q_tbl_addr;
q_hdr_arr = (struct hfi_queue_header *)((uint8_t *)q_tbl_addr +
sizeof(struct hfi_queue_tbl_header));
/* initialize the interface queue table header */
q_tbl_hdr->qtbl_version = QUEUE_TBL_VERSION;
q_tbl_hdr->qtbl_size = q_tbl_size;
q_tbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_queue_tbl_header);
q_tbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_queue_header);
q_tbl_hdr->qtbl_num_q = NPU_HFI_NUMBER_OF_QS;
q_tbl_hdr->qtbl_num_active_q = NPU_HFI_NUMBER_OF_ACTIVE_QS;
cur_start_offset = q_tbl_size;
for (q_idx = IPC_QUEUE_CMD_HIGH_PRIORITY;
q_idx <= IPC_QUEUE_LOG; q_idx++) {
q_hdr = &q_hdr_arr[q_idx];
/* queue is active */
q_hdr->qhdr_status = 0x01;
q_hdr->qhdr_start_offset = cur_start_offset;
npu_q_setup[q_idx].start_offset = cur_start_offset;
q_size = npu_q_setup[q_idx].size;
q_hdr->qhdr_type = npu_q_setup[q_idx].hdr;
/* in bytes */
q_hdr->qhdr_q_size = q_size;
/* variable size packets */
q_hdr->qhdr_pkt_size = 0;
q_hdr->qhdr_pkt_drop_cnt = 0;
q_hdr->qhdr_rx_wm = 0x1;
q_hdr->qhdr_tx_wm = 0x1;
/* since queue is initially empty */
q_hdr->qhdr_rx_req = 0x1;
q_hdr->qhdr_tx_req = 0x0;
/* not used */
q_hdr->qhdr_rx_irq_status = 0;
/* not used */
q_hdr->qhdr_tx_irq_status = 0;
q_hdr->qhdr_read_idx = 0;
q_hdr->qhdr_write_idx = 0;
cur_start_offset += q_size;
}
MEMW(npu_dev, IPC_ADDR, (uint8_t *)q_tbl_hdr, q_tbl_size);
kfree(q_tbl_addr);
/* Write in the NPU's address for where IPC starts */
REGW(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_VALUE,
(uint32_t)IPC_MEM_OFFSET_FROM_SSTCM);
/* Set value bit */
reg_val = REGR(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_STATUS);
REGW(npu_dev, (uint32_t)REG_NPU_HOST_CTRL_STATUS, reg_val |
HOST_CTRL_STATUS_IPC_ADDRESS_READY_VAL);
return status;
}
static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
uint32_t q_idx, void *cmd_ptr)
{
int status = 0;
uint8_t is_rx_req_set = 0;
uint32_t retry_cnt = 5;
status = ipc_queue_write(npu_dev, q_idx, (uint8_t *)cmd_ptr,
&is_rx_req_set);
if (status == -ENOSPC) {
do {
msleep(20);
status = ipc_queue_write(npu_dev, q_idx,
(uint8_t *)cmd_ptr, &is_rx_req_set);
} while ((status == -ENOSPC) && (--retry_cnt > 0));
}
if (status == 0) {
if (is_rx_req_set == 1)
status = INTERRUPT_RAISE_NPU(npu_dev);
}
if (status == 0)
pr_debug("Cmd Msg put on Command Queue - SUCCESSS\n");
else
pr_err("Cmd Msg put on Command Queue - FAILURE\n");
return status;
}
static int npu_host_ipc_read_msg_hfi(struct npu_device *npu_dev,
uint32_t q_idx, uint32_t *msg_ptr)
{
int status = 0;
uint8_t is_tx_req_set;
status = ipc_queue_read(npu_dev, q_idx, (uint8_t *)msg_ptr,
&is_tx_req_set);
if (status == 0) {
/* raise interrupt if qhdr_tx_req is set */
if (is_tx_req_set == 1)
status = INTERRUPT_RAISE_NPU(npu_dev);
}
return status;
}
static int ipc_queue_read(struct npu_device *npu_dev,
uint32_t target_que, uint8_t *packet,
uint8_t *is_tx_req_set)
{
int status = 0;
struct hfi_queue_header queue;
uint32_t packet_size, new_read_idx;
size_t read_ptr;
size_t offset = 0;
offset = (size_t)IPC_ADDR + sizeof(struct hfi_queue_tbl_header) +
target_que * sizeof(struct hfi_queue_header);
if ((packet == NULL) || (is_tx_req_set == NULL))
return -EINVAL;
/* Read the queue */
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
HFI_QUEUE_HEADER_SIZE);
if (queue.qhdr_type != npu_q_setup[target_que].hdr ||
queue.qhdr_q_size != npu_q_setup[target_que].size ||
queue.qhdr_read_idx >= queue.qhdr_q_size ||
queue.qhdr_write_idx >= queue.qhdr_q_size ||
queue.qhdr_start_offset !=
npu_q_setup[target_que].start_offset) {
pr_err("Invalid Queue header\n");
status = -EIO;
goto exit;
}
/* check if queue is empty */
if (queue.qhdr_read_idx == queue.qhdr_write_idx) {
/*
* set qhdr_rx_req, to inform the sender that the Interrupt
* needs to be raised with the next packet queued
*/
queue.qhdr_rx_req = 1;
*is_tx_req_set = 0;
status = -EPERM;
goto exit;
}
read_ptr = ((size_t)(size_t)IPC_ADDR +
queue.qhdr_start_offset + queue.qhdr_read_idx);
/* Read packet size */
MEMR(npu_dev, (void *)((size_t)read_ptr), packet, 4);
packet_size = *((uint32_t *)packet);
pr_debug("target_que: %d, packet_size: %d\n",
target_que,
packet_size);
if ((packet_size == 0) ||
(packet_size > NPU_IPC_BUF_LENGTH)) {
pr_err("Invalid packet size %d\n", packet_size);
status = -EINVAL;
goto exit;
}
new_read_idx = queue.qhdr_read_idx + packet_size;
if (new_read_idx < (queue.qhdr_q_size)) {
MEMR(npu_dev, (void *)((size_t)read_ptr), packet, packet_size);
} else {
new_read_idx -= (queue.qhdr_q_size);
MEMR(npu_dev, (void *)((size_t)read_ptr), packet,
packet_size - new_read_idx);
MEMR(npu_dev, (void *)((size_t)IPC_ADDR +
queue.qhdr_start_offset),
(void *)((size_t)packet + (packet_size-new_read_idx)),
new_read_idx);
}
queue.qhdr_read_idx = new_read_idx;
if (queue.qhdr_read_idx == queue.qhdr_write_idx)
/*
* receiver wants an interrupt from transmitter
* (when next item queued) because queue is empty
*/
queue.qhdr_rx_req = 1;
else
/* clear qhdr_rx_req since the queue is not empty */
queue.qhdr_rx_req = 0;
if (queue.qhdr_tx_req == 1)
/* transmitter requested an interrupt */
*is_tx_req_set = 1;
else
*is_tx_req_set = 0;
exit:
/* Update RX interrupt request -- queue.qhdr_rx_req */
MEMW(npu_dev, (void *)((size_t)offset +
(uint32_t)((size_t)&(queue.qhdr_rx_req) -
(size_t)&queue)), (uint8_t *)&queue.qhdr_rx_req,
sizeof(queue.qhdr_rx_req));
/* Update Read pointer -- queue.qhdr_read_idx */
MEMW(npu_dev, (void *)((size_t)offset + (uint32_t)(
(size_t)&(queue.qhdr_read_idx) - (size_t)&queue)),
(uint8_t *)&queue.qhdr_read_idx, sizeof(queue.qhdr_read_idx));
return status;
}
static int ipc_queue_write(struct npu_device *npu_dev,
uint32_t target_que, uint8_t *packet,
uint8_t *is_rx_req_set)
{
int status = 0;
struct hfi_queue_header queue;
uint32_t packet_size, new_write_idx;
uint32_t empty_space;
void *write_ptr;
uint32_t read_idx;
size_t offset = (size_t)IPC_ADDR +
sizeof(struct hfi_queue_tbl_header) +
target_que * sizeof(struct hfi_queue_header);
if ((packet == NULL) || (is_rx_req_set == NULL))
return -EINVAL;
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
HFI_QUEUE_HEADER_SIZE);
if (queue.qhdr_type != npu_q_setup[target_que].hdr ||
queue.qhdr_q_size != npu_q_setup[target_que].size ||
queue.qhdr_read_idx >= queue.qhdr_q_size ||
queue.qhdr_write_idx >= queue.qhdr_q_size ||
queue.qhdr_start_offset !=
npu_q_setup[target_que].start_offset) {
pr_err("Invalid Queue header\n");
status = -EIO;
goto exit;
}
packet_size = (*(uint32_t *)packet);
if (packet_size == 0) {
/* assign failed status and return */
status = -EPERM;
goto exit;
}
/* sample Read Idx */
read_idx = queue.qhdr_read_idx;
/* Calculate Empty Space(UWord32) in the Queue */
empty_space = (queue.qhdr_write_idx >= read_idx) ?
((queue.qhdr_q_size) - (queue.qhdr_write_idx - read_idx)) :
(read_idx - queue.qhdr_write_idx);
if (empty_space <= packet_size) {
/*
* If Queue is FULL/ no space for message
* set qhdr_tx_req.
*/
queue.qhdr_tx_req = 1;
/*
* Queue is FULL, force raise an interrupt to Receiver
*/
*is_rx_req_set = 1;
status = -ENOSPC;
goto exit;
}
/*
* clear qhdr_tx_req so that receiver does not raise an interrupt
* on reading packets from Queue, since there is space to write
* the next packet
*/
queue.qhdr_tx_req = 0;
new_write_idx = (queue.qhdr_write_idx + packet_size);
write_ptr = (void *)(size_t)((size_t)IPC_ADDR +
queue.qhdr_start_offset + queue.qhdr_write_idx);
if (new_write_idx < queue.qhdr_q_size) {
MEMW(npu_dev, (void *)((size_t)write_ptr), (uint8_t *)packet,
packet_size);
} else {
/* wraparound case */
new_write_idx -= (queue.qhdr_q_size);
MEMW(npu_dev, (void *)((size_t)write_ptr), (uint8_t *)packet,
packet_size - new_write_idx);
MEMW(npu_dev, (void *)((size_t)((size_t)IPC_ADDR +
queue.qhdr_start_offset)), (uint8_t *)(packet +
(packet_size - new_write_idx)), new_write_idx);
}
/* Update qhdr_write_idx */
queue.qhdr_write_idx = new_write_idx;
*is_rx_req_set = (queue.qhdr_rx_req == 1) ? 1 : 0;
/* Update Write pointer -- queue.qhdr_write_idx */
exit:
/* Update TX request -- queue.qhdr_tx_req */
MEMW(npu_dev, (void *)((size_t)(offset + (uint32_t)(
(size_t)&(queue.qhdr_tx_req) - (size_t)&queue))),
&queue.qhdr_tx_req, sizeof(queue.qhdr_tx_req));
MEMW(npu_dev, (void *)((size_t)(offset + (uint32_t)(
(size_t)&(queue.qhdr_write_idx) - (size_t)&queue))),
&queue.qhdr_write_idx, sizeof(queue.qhdr_write_idx));
return status;
}
/* -------------------------------------------------------------------------
* IPC Interface functions
* -------------------------------------------------------------------------
*/
int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t q_idx,
void *cmd_ptr)
{
return npu_host_ipc_send_cmd_hfi(npu_dev, q_idx, cmd_ptr);
}
int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t q_idx,
uint32_t *msg_ptr)
{
return npu_host_ipc_read_msg_hfi(npu_dev, q_idx, msg_ptr);
}
int npu_host_ipc_pre_init(struct npu_device *npu_dev)
{
return npu_host_ipc_init_hfi(npu_dev);
}
int npu_host_ipc_post_init(struct npu_device *npu_dev)
{
return 0;
}

View File

@ -0,0 +1,464 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef NPU_HOST_IPC_H
#define NPU_HOST_IPC_H
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
/* Messages sent **to** NPU */
/* IPC Message Commands -- uint32_t */
/* IPC command start base */
#define NPU_IPC_CMD_BASE 0x00000000
/* ipc_cmd_load_pkt */
#define NPU_IPC_CMD_LOAD 0x00000001
/* ipc_cmd_unload_pkt */
#define NPU_IPC_CMD_UNLOAD 0x00000002
/* ipc_cmd_execute_pkt */
#define NPU_IPC_CMD_EXECUTE 0x00000003
/* ipc_cmd_set_logging_state */
#define NPU_IPC_CMD_CONFIG_LOG 0x00000004
#define NPU_IPC_CMD_CONFIG_PERFORMANCE 0x00000005
#define NPU_IPC_CMD_CONFIG_DEBUG 0x00000006
#define NPU_IPC_CMD_SHUTDOWN 0x00000007
/* ipc_cmd_loopback_packet */
#define NPU_IPC_CMD_LOOPBACK 0x00000008
/* ipc_cmd_load_packet_v2_t */
#define NPU_IPC_CMD_LOAD_V2 0x00000009
/* ipc_cmd_execute_packet_v2 */
#define NPU_IPC_CMD_EXECUTE_V2 0x0000000A
/* ipc_cmd_set_property_packet */
#define NPU_IPC_CMD_SET_PROPERTY 0x0000000B
/* ipc_cmd_get_property_packet */
#define NPU_IPC_CMD_GET_PROPERTY 0x0000000C
/* Messages sent **from** NPU */
/* IPC Message Response -- uint32_t */
/* IPC response start base */
#define NPU_IPC_MSG_BASE 0x00010000
/* ipc_msg_load_pkt */
#define NPU_IPC_MSG_LOAD_DONE 0x00010001
/* ipc_msg_header_pkt */
#define NPU_IPC_MSG_UNLOAD_DONE 0x00010002
/* ipc_msg_header_pkt */
#define NPU_IPC_MSG_EXECUTE_DONE 0x00010003
/* ipc_msg_event_notify_pkt */
#define NPU_IPC_MSG_EVENT_NOTIFY 0x00010004
/* ipc_msg_loopback_pkt */
#define NPU_IPC_MSG_LOOPBACK_DONE 0x00010005
/* ipc_msg_execute_pkt_v2 */
#define NPU_IPC_MSG_EXECUTE_V2_DONE 0x00010006
/* ipc_msg_set_property_packet */
#define NPU_IPC_MSG_SET_PROPERTY_DONE 0x00010007
/* ipc_msg_get_property_packet */
#define NPU_IPC_MSG_GET_PROPERTY_DONE 0x00010008
/* ipc_msg_general_notify_pkt */
#define NPU_IPC_MSG_GENERAL_NOTIFY 0x00010010
/* IPC Notify Message Type -- uint32_t */
#define NPU_NOTIFY_DCVS_MODE 0x00002000
/* Logging message size */
/* Number 32-bit elements for the maximum log message size */
#define NPU_LOG_MSG_MAX_SIZE 4
/* Performance */
/* Performance counters for current network layer */
/* Amount of data read from all the DMA read channels */
#define NPU_PERFORMANCE_DMA_DATA_READ 0x01
/* Amount of data written from all the DMA write channels */
#define NPU_PERFORMANCE_DMA_DATA_WRITTEN 0x02
/* Number of blocks read by DMA channels */
#define NPU_PERFORMANCE_DMA_NUM_BLOCKS_READ 0x03
/* Number of blocks written by DMA channels */
#define NPU_PERFORMANCE_DMA_NUM_BLOCKS_WRITTEN 0x04
/* Number of instructions executed by CAL */
#define NPU_PERFORMANCE_INSTRUCTIONS_CAL 0x05
/* Number of instructions executed by CUB */
#define NPU_PERFORMANCE_INSTRUCTIONS_CUB 0x06
/* Timestamp of start of network load */
#define NPU_PERFORMANCE_TIMESTAMP_LOAD_START 0x07
/* Timestamp of end of network load */
#define NPU_PERFORMANCE_TIMESTAMP_LOAD_END 0x08
/* Timestamp of start of network execute */
#define NPU_PERFORMANCE_TIMESTAMP_EXECUTE_START 0x09
/* Timestamp of end of network execute */
#define NPU_PERFORMANCE_TIMESTAMP_EXECUTE_END 0x10
/* Timestamp of CAL start */
#define NPU_PERFORMANCE_TIMESTAMP_CAL_START 0x11
/* Timestamp of CAL end */
#define NPU_PERFORMANCE_TIMESTAMP_CAL_END 0x12
/* Timestamp of CUB start */
#define NPU_PERFORMANCE_TIMESTAMP_CUB_START 0x13
/* Timestamp of CUB end */
#define NPU_PERFORMANCE_TIMESTAMP_CUB_END 0x14
/* Performance enable */
/* Select which counters you want back per layer */
/* Shutdown */
/* Immediate shutdown, discard any state, etc */
#define NPU_SHUTDOWN_IMMEDIATE 0x01
/* Shutdown after current execution (if any) is completed */
#define NPU_SHUTDOWN_WAIT_CURRENT_EXECUTION 0x02
/* Debug stats */
#define NUM_LAYER_STATS_PER_EXE_MSG_MAX 110
/* DCVS */
#define NPU_DCVS_ACTIVITY_MAX_PERF 0x100
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
*/
/* Command Header - Header for all Messages **TO** NPU */
/*
* command header packet definition for
* messages sent from host->NPU
*/
struct ipc_cmd_header_pkt {
uint32_t size;
uint32_t cmd_type;
uint32_t trans_id;
uint32_t flags; /* TDO what flags and why */
};
/* Message Header - Header for all messages **FROM** NPU */
/*
* message header packet definition for
* mesasges sent from NPU->host
*/
struct ipc_msg_header_pkt {
uint32_t size;
uint32_t msg_type;
uint32_t status;
uint32_t trans_id;
uint32_t flags;
};
/* Execute */
/*
* FIRMWARE
* keep lastNetworkIDRan = uint32
* keep wasLastNetworkChunky = BOOLEAN
*/
/*
* ACO Buffer definition
*/
struct npu_aco_buffer {
/*
* used to track if previous network is the same and already loaded,
* we can save a dma
*/
uint32_t network_id;
/*
* size of header + first chunk ACO buffer -
* this saves a dma by dmaing both header and first chunk
*/
uint32_t buf_size;
/*
* SMMU 32-bit mapped address that the DMA engine can read -
* uses lower 32 bits
*/
uint64_t address;
};
/*
* ACO Buffer V2 definition
*/
struct npu_aco_buffer_v2 {
/*
* used to track if previous network is the same and already loaded,
* we can save a dma
*/
uint32_t network_id;
/*
* size of header + first chunk ACO buffer -
* this saves a dma by dmaing both header and first chunk
*/
uint32_t buf_size;
/*
* SMMU 32-bit mapped address that the DMA engine can read -
* uses lower 32 bits
*/
uint32_t address;
/*
* number of layers in the network
*/
uint32_t num_layers;
};
/*
* ACO Patch Parameters
*/
struct npu_patch_tuple {
uint32_t value;
uint32_t chunk_id;
uint16_t instruction_size_in_bytes;
uint16_t variable_size_in_bits;
uint16_t shift_value_in_bits;
uint32_t loc_offset;
};
/*
* ACO Patch Tuple V2
*/
struct npu_patch_tuple_v2 {
uint32_t value;
uint32_t chunk_id;
uint32_t instruction_size_in_bytes;
uint32_t variable_size_in_bits;
uint32_t shift_value_in_bits;
uint32_t loc_offset;
};
struct npu_patch_params {
uint32_t num_params;
struct npu_patch_tuple param[2];
};
/*
* LOAD command packet definition
*/
struct ipc_cmd_load_pkt {
struct ipc_cmd_header_pkt header;
struct npu_aco_buffer buf_pkt;
};
/*
* LOAD command packet V2 definition
*/
struct ipc_cmd_load_pkt_v2 {
struct ipc_cmd_header_pkt header;
struct npu_aco_buffer_v2 buf_pkt;
uint32_t num_patch_params;
struct npu_patch_tuple_v2 patch_params[];
};
/*
* UNLOAD command packet definition
*/
struct ipc_cmd_unload_pkt {
struct ipc_cmd_header_pkt header;
uint32_t network_hdl;
};
/*
* Execute packet definition
*/
struct ipc_cmd_execute_pkt {
struct ipc_cmd_header_pkt header;
struct npu_patch_params patch_params;
uint32_t network_hdl;
};
struct npu_patch_params_v2 {
uint32_t value;
uint32_t id;
};
/*
* Execute packet V2 definition
*/
struct ipc_cmd_execute_pkt_v2 {
struct ipc_cmd_header_pkt header;
uint32_t network_hdl;
uint32_t num_patch_params;
struct npu_patch_params_v2 patch_params[];
};
/*
* Loopback packet definition
*/
struct ipc_cmd_loopback_pkt {
struct ipc_cmd_header_pkt header;
uint32_t loopbackParams;
};
/*
* Generic property definition
*/
struct ipc_cmd_prop_pkt {
struct ipc_cmd_header_pkt header;
uint32_t prop_id;
uint32_t num_params;
uint32_t network_hdl;
uint32_t prop_param[];
};
/*
* Generic property response packet definition
*/
struct ipc_msg_prop_pkt {
struct ipc_msg_header_pkt header;
uint32_t prop_id;
uint32_t num_params;
uint32_t network_hdl;
uint32_t prop_param[];
};
/*
* Generic notify message packet definition
*/
struct ipc_msg_general_notify_pkt {
struct ipc_msg_header_pkt header;
uint32_t notify_id;
uint32_t num_params;
uint32_t network_hdl;
uint32_t notify_param[];
};
/*
* LOAD response packet definition
*/
struct ipc_msg_load_pkt {
struct ipc_msg_header_pkt header;
uint32_t network_hdl;
};
/*
* UNLOAD response packet definition
*/
struct ipc_msg_unload_pkt {
struct ipc_msg_header_pkt header;
uint32_t network_hdl;
};
/*
* Layer Stats information returned back during EXECUTE_DONE response
*/
struct ipc_layer_stats {
/*
* hardware tick count per layer
*/
uint32_t tick_count;
};
struct ipc_execute_layer_stats {
/*
* total number of layers associated with the execution
*/
uint32_t total_num_layers;
/*
* pointer to each layer stats
*/
struct ipc_layer_stats
layer_stats_list[NUM_LAYER_STATS_PER_EXE_MSG_MAX];
};
struct ipc_execute_stats {
/*
* total e2e IPC tick count during EXECUTE cmd
*/
uint32_t e2e_ipc_tick_count;
/*
* tick count on ACO loading
*/
uint32_t aco_load_tick_count;
/*
* tick count on ACO execution
*/
uint32_t aco_execution_tick_count;
/*
* individual layer stats
*/
struct ipc_execute_layer_stats exe_stats;
};
/*
* EXECUTE response packet definition
*/
struct ipc_msg_execute_pkt {
struct ipc_msg_header_pkt header;
struct ipc_execute_stats stats;
uint32_t network_hdl;
};
/*
* EXECUTE V2 response packet definition
*/
struct ipc_msg_execute_pkt_v2 {
struct ipc_msg_header_pkt header;
uint32_t network_hdl;
uint32_t stats_data[];
};
/*
* LOOPBACK response packet definition
*/
struct ipc_msg_loopback_pkt {
struct ipc_msg_header_pkt header;
uint32_t loopbackParams;
};
/* Logging Related */
/*
* ipc_log_state_t - Logging state
*/
struct ipc_log_state {
uint32_t module_msk;
uint32_t level_msk;
};
struct ipc_cmd_log_state_pkt {
struct ipc_cmd_header_pkt header;
struct ipc_log_state log_state;
};
struct ipc_msg_log_state_pkt {
struct ipc_msg_header_pkt header;
struct ipc_log_state log_state;
};
/*
* Logging message
* This is a message from the NPU that contains the
* logging message. The values of part1-4 are not exposed
* the receiver has to refer to the logging implementation to
* intrepret what these mean and how to parse
*/
struct ipc_msg_log_pkt {
struct ipc_msg_header_pkt header;
uint32_t log_msg[NPU_LOG_MSG_MAX_SIZE];
};
/* Performance Related */
/*
* Set counter mask of which counters we want
* This is a message from HOST->NPU Firmware
*/
struct ipc_cmd_set_performance_query {
struct ipc_cmd_header_pkt header;
uint32_t cnt_msk;
};
/*
* Set counter mask of which counters we want
* This is a message from HOST->NPU Firmware
*/
struct ipc_msg_performance_counters {
struct ipc_cmd_header_pkt header;
uint32_t layer_id;
uint32_t num_tulpes;
/* Array of tuples [HEADER,value] */
uint32_t cnt_tulpes[];
};
/*
* ipc_cmd_shutdown - Shutdown command
*/
struct ipc_cmd_shutdown_pkt {
struct ipc_cmd_header_pkt header;
uint32_t shutdown_flags;
};
#endif /* NPU_HOST_IPC_H */

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef NPU_HW_H
#define NPU_HW_H
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define NPU_HW_VERSION (0x00000000)
#define NPU_MASTERn_IPC_IRQ_OUT(n) (0x00001004+0x1000*(n))
#define NPU_CACHE_ATTR_IDn___POR 0x00011100
#define NPU_CACHE_ATTR_IDn(n) (0x00000800+0x4*(n))
#define NPU_MASTERn_IPC_IRQ_IN_CTRL(n) (0x00001008+0x1000*(n))
#define NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S 4
#define NPU_MASTERn_IPC_IRQ_OUT_CTRL(n) (0x00001004+0x1000*(n))
#define NPU_MASTER0_IPC_IRQ_OUT_CTRL__IRQ_TYPE_PULSE 4
#define NPU_GPR0 (0x00000100)
#define NPU_MASTERn_ERROR_IRQ_STATUS(n) (0x00001010+0x1000*(n))
#define NPU_MASTERn_ERROR_IRQ_INCLUDE(n) (0x00001014+0x1000*(n))
#define NPU_MASTERn_ERROR_IRQ_ENABLE(n) (0x00001018+0x1000*(n))
#define NPU_MASTERn_ERROR_IRQ_CLEAR(n) (0x0000101C+0x1000*(n))
#define NPU_MASTERn_ERROR_IRQ_SET(n) (0x00001020+0x1000*(n))
#define NPU_MASTERn_ERROR_IRQ_OWNER(n) (0x00007000+4*(n))
#define NPU_ERROR_IRQ_MASK 0x000000E3
#define NPU_MASTERn_WDOG_IRQ_STATUS(n) (0x00001030+0x1000*(n))
#define NPU_WDOG_BITE_IRQ_STATUS (1 << 1)
#define NPU_MASTERn_WDOG_IRQ_INCLUDE(n) (0x00001034+0x1000*(n))
#define NPU_WDOG_BITE_IRQ_INCLUDE (1 << 1)
#define NPU_MASTERn_WDOG_IRQ_OWNER(n) (0x00007010+4*(n))
#define NPU_WDOG_IRQ_MASK 0x00000002
#define NPU_GPR1 (0x00000104)
#define NPU_GPR2 (0x00000108)
#define NPU_GPR3 (0x0000010C)
#define NPU_GPR4 (0x00000110)
#define NPU_GPR13 (0x00000134)
#define NPU_GPR14 (0x00000138)
#define NPU_GPR15 (0x0000013C)
#define BWMON2_SAMPLING_WINDOW (0x000003A8)
#define BWMON2_BYTE_COUNT_THRESHOLD_HIGH (0x000003AC)
#define BWMON2_BYTE_COUNT_THRESHOLD_MEDIUM (0x000003B0)
#define BWMON2_BYTE_COUNT_THRESHOLD_LOW (0x000003B4)
#define BWMON2_ZONE_ACTIONS (0x000003B8)
#define BWMON2_ZONE_COUNT_THRESHOLD (0x000003BC)
#endif /* NPU_HW_H */

View File

@ -0,0 +1,485 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include <linux/msm_dma_iommu_mapping.h>
#include <soc/qcom/subsystem_restart.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/firmware.h>
#include <linux/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "npu_hw_access.h"
#include "npu_common.h"
#include "npu_hw.h"
/* -------------------------------------------------------------------------
* Functions - Register
* -------------------------------------------------------------------------
*/
static uint32_t npu_reg_read(void __iomem *base, size_t size, uint32_t off)
{
if (!base) {
pr_err("NULL base address\n");
return 0;
}
if ((off % 4) != 0) {
pr_err("offset %x is not aligned\n", off);
return 0;
}
if (off >= size) {
pr_err("offset exceeds io region %x:%x\n", off, size);
return 0;
}
return readl_relaxed(base + off);
}
static void npu_reg_write(void __iomem *base, size_t size, uint32_t off,
uint32_t val)
{
if (!base) {
pr_err("NULL base address\n");
return;
}
if ((off % 4) != 0) {
pr_err("offset %x is not aligned\n", off);
return;
}
if (off >= size) {
pr_err("offset exceeds io region %x:%x\n", off, size);
return;
}
writel_relaxed(val, base + off);
__iowmb();
}
uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off)
{
return npu_reg_read(npu_dev->core_io.base, npu_dev->core_io.size, off);
}
void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val)
{
npu_reg_write(npu_dev->core_io.base, npu_dev->core_io.size,
off, val);
}
uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off)
{
return npu_reg_read(npu_dev->bwmon_io.base, npu_dev->bwmon_io.size,
off);
}
void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val)
{
npu_reg_write(npu_dev->bwmon_io.base, npu_dev->bwmon_io.size,
off, val);
}
uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
{
return npu_reg_read(npu_dev->qfprom_io.base,
npu_dev->qfprom_io.size, off);
}
/* -------------------------------------------------------------------------
* Functions - Memory
* -------------------------------------------------------------------------
*/
void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
uint32_t size)
{
size_t dst_off = (size_t)dst;
uint32_t *src_ptr32 = (uint32_t *)src;
uint8_t *src_ptr8 = NULL;
uint32_t i = 0;
uint32_t num = 0;
if (dst_off >= npu_dev->tcm_io.size ||
(npu_dev->tcm_io.size - dst_off) < size) {
pr_err("memory write exceeds io region %x:%x:%x\n",
dst_off, size, npu_dev->tcm_io.size);
return;
}
num = size/4;
for (i = 0; i < num; i++) {
writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
dst_off += 4;
}
if (size%4 != 0) {
src_ptr8 = (uint8_t *)((size_t)src + (num*4));
num = size%4;
for (i = 0; i < num; i++) {
writeb_relaxed(src_ptr8[i], npu_dev->tcm_io.base +
dst_off);
dst_off += 1;
}
}
__iowmb();
}
int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
uint32_t size)
{
size_t src_off = (size_t)src;
uint32_t *out32 = (uint32_t *)dst;
uint8_t *out8 = NULL;
uint32_t i = 0;
uint32_t num = 0;
if (src_off >= npu_dev->tcm_io.size ||
(npu_dev->tcm_io.size - src_off) < size) {
pr_err("memory read exceeds io region %x:%x:%x\n",
src_off, size, npu_dev->tcm_io.size);
return 0;
}
num = size/4;
for (i = 0; i < num; i++) {
out32[i] = readl_relaxed(npu_dev->tcm_io.base + src_off);
src_off += 4;
}
if (size%4 != 0) {
out8 = (uint8_t *)((size_t)dst + (num*4));
num = size%4;
for (i = 0; i < num; i++) {
out8[i] = readb_relaxed(npu_dev->tcm_io.base + src_off);
src_off += 1;
}
}
return 0;
}
void *npu_ipc_addr(void)
{
return (void *)(IPC_MEM_OFFSET_FROM_SSTCM);
}
/* -------------------------------------------------------------------------
* Functions - Interrupt
* -------------------------------------------------------------------------
*/
void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
uint32_t wdg_irq_sts = 0, error_irq_sts = 0;
/* Clear irq state */
REGW(npu_dev, NPU_MASTERn_IPC_IRQ_OUT(0), 0x0);
wdg_irq_sts = REGR(npu_dev, NPU_MASTERn_WDOG_IRQ_STATUS(0));
if (wdg_irq_sts != 0) {
pr_err("wdg irq %x\n", wdg_irq_sts);
host_ctx->wdg_irq_sts |= wdg_irq_sts;
host_ctx->fw_error = true;
}
error_irq_sts = REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_STATUS(0));
error_irq_sts &= REGR(npu_dev, NPU_MASTERn_ERROR_IRQ_ENABLE(0));
if (error_irq_sts != 0) {
REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_CLEAR(0), error_irq_sts);
pr_err("error irq %x\n", error_irq_sts);
host_ctx->err_irq_sts |= error_irq_sts;
host_ctx->fw_error = true;
}
}
int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev)
{
/* Bit 4 is setting IRQ_SOURCE_SELECT to local
* and we're triggering a pulse to NPU_MASTER0_IPC_IN_IRQ0
*/
npu_core_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_IN_CTRL(0), 0x1
<< NPU_MASTER0_IPC_IRQ_IN_CTRL__IRQ_SOURCE_SELECT___S | 0x1);
return 0;
}
int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev)
{
npu_core_reg_write(npu_dev, NPU_MASTERn_IPC_IRQ_OUT_CTRL(1), 0x8);
return 0;
}
/* -------------------------------------------------------------------------
* Functions - ION Memory
* -------------------------------------------------------------------------
*/
static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client
*client, int buf_hdl, uint32_t size)
{
struct npu_ion_buf *ret_val = NULL, *tmp;
struct list_head *pos = NULL;
mutex_lock(&client->list_lock);
list_for_each(pos, &(client->mapped_buffer_list)) {
tmp = list_entry(pos, struct npu_ion_buf, list);
if (tmp->fd == buf_hdl) {
ret_val = tmp;
break;
}
}
if (ret_val) {
/* mapped already, treat as invalid request */
pr_err("ion buf has been mapped\n");
ret_val = NULL;
} else {
ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL);
if (ret_val) {
ret_val->fd = buf_hdl;
ret_val->size = size;
ret_val->iova = 0;
list_add(&(ret_val->list),
&(client->mapped_buffer_list));
}
}
mutex_unlock(&client->list_lock);
return ret_val;
}
static struct npu_ion_buf *npu_get_npu_ion_buffer(struct npu_client
*client, int buf_hdl)
{
struct list_head *pos = NULL;
struct npu_ion_buf *ret_val = NULL, *tmp;
mutex_lock(&client->list_lock);
list_for_each(pos, &(client->mapped_buffer_list)) {
tmp = list_entry(pos, struct npu_ion_buf, list);
if (tmp->fd == buf_hdl) {
ret_val = tmp;
break;
}
}
mutex_unlock(&client->list_lock);
return ret_val;
}
static void npu_free_npu_ion_buffer(struct npu_client
*client, int buf_hdl)
{
struct list_head *pos = NULL;
struct npu_ion_buf *npu_ion_buf = NULL;
mutex_lock(&client->list_lock);
list_for_each(pos, &(client->mapped_buffer_list)) {
npu_ion_buf = list_entry(pos, struct npu_ion_buf, list);
if (npu_ion_buf->fd == buf_hdl) {
list_del(&npu_ion_buf->list);
kfree(npu_ion_buf);
break;
}
}
mutex_unlock(&client->list_lock);
}
int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
uint64_t *addr)
{
MODULE_IMPORT_NS(DMA_BUF);
int ret = 0;
struct npu_device *npu_dev = client->npu_dev;
struct npu_ion_buf *ion_buf = NULL;
struct npu_smmu_ctx *smmu_ctx = &npu_dev->smmu_ctx;
if (buf_hdl == 0)
return -EINVAL;
ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size);
if (!ion_buf) {
pr_err("%s fail to alloc npu_ion_buffer\n", __func__);
ret = -ENOMEM;
return ret;
}
smmu_ctx->attach_cnt++;
ion_buf->dma_buf = dma_buf_get(ion_buf->fd);
if (IS_ERR_OR_NULL(ion_buf->dma_buf)) {
pr_err("dma_buf_get failed %d\n", ion_buf->fd);
ret = -ENOMEM;
ion_buf->dma_buf = NULL;
goto map_end;
}
ion_buf->attachment = dma_buf_attach(ion_buf->dma_buf,
&(npu_dev->pdev->dev));
if (IS_ERR(ion_buf->attachment)) {
ret = -ENOMEM;
ion_buf->attachment = NULL;
goto map_end;
}
ion_buf->attachment->dma_map_attrs = DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
ion_buf->table = dma_buf_map_attachment(ion_buf->attachment,
DMA_BIDIRECTIONAL);
if (IS_ERR(ion_buf->table)) {
pr_err("npu dma_buf_map_attachment failed\n");
ret = -ENOMEM;
ion_buf->table = NULL;
goto map_end;
}
ion_buf->iova = ion_buf->table->sgl->dma_address;
ion_buf->size = ion_buf->dma_buf->size;
*addr = ion_buf->iova;
pr_debug("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
ion_buf->size);
map_end:
if (ret)
npu_mem_unmap(client, buf_hdl, 0);
return ret;
}
void npu_mem_invalidate(struct npu_client *client, int buf_hdl)
{
struct npu_device *npu_dev = client->npu_dev;
struct npu_ion_buf *ion_buf = npu_get_npu_ion_buffer(client,
buf_hdl);
if (!ion_buf)
pr_err("%s can't find ion buf\n", __func__);
else
dma_sync_sg_for_cpu(&(npu_dev->pdev->dev), ion_buf->table->sgl,
ion_buf->table->nents, DMA_BIDIRECTIONAL);
}
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr)
{
struct npu_ion_buf *ion_buf = NULL;
struct list_head *pos = NULL;
bool valid = false;
mutex_lock(&client->list_lock);
list_for_each(pos, &(client->mapped_buffer_list)) {
ion_buf = list_entry(pos, struct npu_ion_buf, list);
if (ion_buf->iova == addr) {
valid = true;
break;
}
}
mutex_unlock(&client->list_lock);
return valid;
}
void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr)
{
MODULE_IMPORT_NS(DMA_BUF);
struct npu_device *npu_dev = client->npu_dev;
struct npu_ion_buf *ion_buf = NULL;
/* clear entry and retrieve the corresponding buffer */
ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
if (!ion_buf) {
pr_err("%s could not find buffer\n", __func__);
return;
}
if (ion_buf->iova != addr)
pr_warn("unmap address %llu doesn't match %llu\n", addr,
ion_buf->iova);
if (ion_buf->table)
dma_buf_unmap_attachment(ion_buf->attachment, ion_buf->table,
DMA_BIDIRECTIONAL);
if (ion_buf->dma_buf && ion_buf->attachment)
dma_buf_detach(ion_buf->dma_buf, ion_buf->attachment);
if (ion_buf->dma_buf)
dma_buf_put(ion_buf->dma_buf);
npu_dev->smmu_ctx.attach_cnt--;
pr_debug("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
ion_buf->size);
npu_free_npu_ion_buffer(client, buf_hdl);
}
/* -------------------------------------------------------------------------
* Functions - Features
* -------------------------------------------------------------------------
*/
uint8_t npu_hw_clk_gating_enabled(void)
{
return 1;
}
uint8_t npu_hw_log_enabled(void)
{
return 1;
}
/* -------------------------------------------------------------------------
* Functions - Subsystem/PIL
* -------------------------------------------------------------------------
*/
#define NPU_PAS_ID (23)
int npu_subsystem_get(struct npu_device *npu_dev, const char *fw_name)
{
struct device *dev = npu_dev->device;
const struct firmware *firmware_p;
ssize_t fw_size;
/* load firmware */
int ret = request_firmware(&firmware_p, fw_name, dev);
if (ret < 0) {
pr_err("request_firmware %s failed: %d\n", fw_name, ret);
return ret;
}
fw_size = qcom_mdt_get_size(firmware_p);
if (fw_size < 0 || fw_size > npu_dev->fw_io.mem_size) {
pr_err("npu fw size invalid, %lld\n", fw_size);
return -EINVAL;
}
/* load the ELF segments to memory */
ret = qcom_mdt_load(dev, firmware_p, fw_name, NPU_PAS_ID,
npu_dev->fw_io.mem_region, npu_dev->fw_io.mem_phys,
npu_dev->fw_io.mem_size, &npu_dev->fw_io.mem_reloc);
release_firmware(firmware_p);
if (ret) {
pr_err("qcom_mdt_load failure, %d\n", ret);
return ret;
}
ret = qcom_scm_pas_auth_and_reset(NPU_PAS_ID);
if (ret) {
pr_err("failed to authenticate image and release reset\n");
return -2;
}
pr_debug("done pas auth\n");
return 0;
}
void npu_subsystem_put(struct npu_device *npu_dev)
{
int ret = qcom_scm_pas_shutdown(NPU_PAS_ID);
if (ret)
pr_err("failed to shutdown: %d\n", ret);
}

View File

@ -0,0 +1,87 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _NPU_HW_ACCESS_H
#define _NPU_HW_ACCESS_H
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include "npu_common.h"
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define IPC_MEM_OFFSET_FROM_SSTCM 0x00010000
#define SYS_CACHE_SCID 23
#define QFPROM_FMAX_REG_OFFSET 0x000001C8
#define QFPROM_FMAX_BITS_MASK 0x0000000C
#define QFPROM_FMAX_BITS_SHIFT 2
#define REGW(npu_dev, off, val) npu_core_reg_write(npu_dev, off, val)
#define REGR(npu_dev, off) npu_core_reg_read(npu_dev, off)
#define MEMW(npu_dev, dst, src, size) npu_mem_write(npu_dev, (void *)(dst),\
(void *)(src), size)
#define MEMR(npu_dev, src, dst, size) npu_mem_read(npu_dev, (void *)(src),\
(void *)(dst), size)
#define IPC_ADDR npu_ipc_addr()
#define INTERRUPT_ACK(npu_dev, num) npu_interrupt_ack(npu_dev, num)
#define INTERRUPT_RAISE_NPU(npu_dev) npu_interrupt_raise_m0(npu_dev)
#define INTERRUPT_RAISE_DSP(npu_dev) npu_interrupt_raise_dsp(npu_dev)
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
*/
struct npu_device;
struct npu_ion_buf_t;
struct npu_host_ctx;
struct npu_client;
typedef irqreturn_t (*intr_hdlr_fn)(int32_t irq, void *ptr);
typedef void (*wq_hdlr_fn) (struct work_struct *work);
/* -------------------------------------------------------------------------
* Function Prototypes
* -------------------------------------------------------------------------
*/
uint32_t npu_core_reg_read(struct npu_device *npu_dev, uint32_t off);
void npu_core_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val);
uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off);
void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val);
void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
uint32_t size);
int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
uint32_t size);
uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off);
int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
uint64_t *addr);
void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr);
void npu_mem_invalidate(struct npu_client *client, int buf_hdl);
bool npu_mem_verify_addr(struct npu_client *client, uint64_t addr);
void *npu_ipc_addr(void);
void npu_interrupt_ack(struct npu_device *npu_dev, uint32_t intr_num);
int32_t npu_interrupt_raise_m0(struct npu_device *npu_dev);
int32_t npu_interrupt_raise_dsp(struct npu_device *npu_dev);
uint8_t npu_hw_clk_gating_enabled(void);
uint8_t npu_hw_log_enabled(void);
int npu_enable_irq(struct npu_device *npu_dev);
void npu_disable_irq(struct npu_device *npu_dev);
int npu_enable_sys_cache(struct npu_device *npu_dev);
void npu_disable_sys_cache(struct npu_device *npu_dev);
int npu_subsystem_get(struct npu_device *npu_dev, const char *fw_name);
void npu_subsystem_put(struct npu_device *npu_dev);
#endif /* _NPU_HW_ACCESS_H*/

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,147 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _NPU_MGR_H
#define _NPU_MGR_H
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
*/
#include <linux/spinlock.h>
#include "npu_hw_access.h"
#include "npu_common.h"
/* -------------------------------------------------------------------------
* Defines
* -------------------------------------------------------------------------
*/
#define NW_CMD_TIMEOUT_MS (1000 * 60 * 5) /* set for 5 minutes */
#define NW_CMD_TIMEOUT msecs_to_jiffies(NW_CMD_TIMEOUT_MS)
#define NW_DEBUG_TIMEOUT_MS (1000 * 60 * 30) /* set for 30 minutes */
#define NW_DEBUG_TIMEOUT msecs_to_jiffies(NW_DEBUG_TIMEOUT_MS)
#define FIRMWARE_VERSION 0x00001000
#define MAX_LOADED_NETWORK 32
#define NPU_IPC_BUF_LENGTH 512
#define FW_DBG_MODE_PAUSE (1 << 0)
#define FW_DBG_MODE_INC_TIMEOUT (1 << 1)
#define FW_DBG_DISABLE_WDOG (1 << 2)
#define FW_DBG_ENABLE_LOGGING (1 << 3)
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
*/
struct npu_network {
uint64_t id;
int buf_hdl;
uint64_t phy_add;
uint32_t size;
uint32_t first_block_size;
uint32_t network_hdl;
uint32_t priority;
uint32_t cur_perf_mode;
uint32_t init_perf_mode;
uint32_t num_layers;
void *stats_buf;
void __user *stats_buf_u;
uint32_t stats_buf_size;
uint32_t trans_id;
atomic_t ref_cnt;
bool is_valid;
bool is_active;
bool fw_error;
bool cmd_pending;
bool cmd_async;
int cmd_ret_status;
struct completion cmd_done;
struct npu_client *client;
};
enum fw_state {
FW_DISABLED = 0,
FW_ENABLED = 1,
};
struct npu_host_ctx {
struct mutex lock;
void *subsystem_handle;
struct npu_device *npu_dev;
enum fw_state fw_state;
int32_t fw_ref_cnt;
int32_t npu_init_cnt;
int32_t power_vote_num;
struct work_struct irq_work;
struct delayed_work fw_deinit_work;
atomic_t fw_deinit_work_cnt;
struct workqueue_struct *wq;
struct completion misc_done;
struct completion fw_deinit_done;
bool misc_pending;
void *prop_buf;
int32_t network_num;
struct npu_network networks[MAX_LOADED_NETWORK];
bool sys_cache_disable;
uint32_t fw_dbg_mode;
uint32_t exec_flags_override;
uint32_t fw_unload_delay_ms;
atomic_t ipc_trans_id;
atomic_t network_execute_cnt;
int cmd_ret_status;
uint32_t err_irq_sts;
uint32_t wdg_irq_sts;
bool fw_error;
};
struct npu_device;
/* -------------------------------------------------------------------------
* Function Prototypes
* -------------------------------------------------------------------------
*/
int npu_host_init(struct npu_device *npu_dev);
void npu_host_deinit(struct npu_device *npu_dev);
/* Host Driver IPC Interface */
int npu_host_ipc_pre_init(struct npu_device *npu_dev);
int npu_host_ipc_post_init(struct npu_device *npu_dev);
void npu_host_ipc_deinit(struct npu_device *npu_dev);
int npu_host_ipc_send_cmd(struct npu_device *npu_dev, uint32_t queueIndex,
void *pCmd);
int npu_host_ipc_read_msg(struct npu_device *npu_dev, uint32_t queueIndex,
uint32_t *pMsg);
int32_t npu_host_get_info(struct npu_device *npu_dev,
struct msm_npu_get_info_ioctl *get_info_ioctl);
int32_t npu_host_map_buf(struct npu_client *client,
struct msm_npu_map_buf_ioctl *map_ioctl);
int32_t npu_host_unmap_buf(struct npu_client *client,
struct msm_npu_unmap_buf_ioctl *unmap_ioctl);
int32_t npu_host_load_network(struct npu_client *client,
struct msm_npu_load_network_ioctl *load_ioctl);
int32_t npu_host_load_network_v2(struct npu_client *client,
struct msm_npu_load_network_ioctl_v2 *load_ioctl,
struct msm_npu_patch_info_v2 *patch_info);
int32_t npu_host_unload_network(struct npu_client *client,
struct msm_npu_unload_network_ioctl *unload);
int32_t npu_host_exec_network(struct npu_client *client,
struct msm_npu_exec_network_ioctl *exec_ioctl);
int32_t npu_host_exec_network_v2(struct npu_client *client,
struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
struct msm_npu_patch_buf_info *patch_buf_info);
int32_t npu_host_loopback_test(struct npu_device *npu_dev);
int32_t npu_host_set_fw_property(struct npu_device *npu_dev,
struct msm_npu_property *property);
int32_t npu_host_get_fw_property(struct npu_device *npu_dev,
struct msm_npu_property *property);
void npu_host_cleanup_networks(struct npu_client *client);
int32_t npu_host_set_perf_mode(struct npu_client *client, uint32_t network_hdl,
uint32_t perf_mode);
int32_t npu_host_get_perf_mode(struct npu_client *client, uint32_t network_hdl);
void npu_dump_debug_timeout_stats(struct npu_device *npu_dev);
#endif /* _NPU_MGR_H */

View File

@ -19,7 +19,7 @@
#include <linux/iommu.h>
#include <linux/micrel_phy.h>
#include <linux/rtnetlink.h>
#include <linux/suspend.h>
#include <linux/tcp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@ -171,6 +171,8 @@
void *ipc_emac_log_ctxt;
struct emac_emb_smmu_cb_ctx emac_emb_smmu_ctx = {0};
static int qcom_ethqos_hib_restore(struct device *dev);
static int qcom_ethqos_hib_freeze(struct device *dev);
struct plat_stmmacenet_data *plat_dat;
struct qcom_ethqos *pethqos;
@ -2375,7 +2377,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
} else {
ETHQOSERR("Phy interrupt configuration failed");
}
if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG) {
if (ethqos->emac_ver == EMAC_HW_v2_3_2_RG || ethqos->emac_ver == EMAC_HW_v2_3_1) {
ethqos_pps_irq_config(ethqos);
create_pps_interrupt_device_node(&ethqos->avb_class_a_dev_t,
&ethqos->avb_class_a_cdev,
@ -2422,6 +2424,11 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
int ret;
struct stmmac_priv *priv;
if (of_device_is_compatible(pdev->dev.of_node, "qcom,emac-smmu-embedded")) {
of_platform_depopulate(&pdev->dev);
return 0;
}
ethqos = get_stmmac_bsp_priv(&pdev->dev);
if (!ethqos)
return -ENODEV;
@ -2430,6 +2437,13 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
ret = stmmac_pltfr_remove(pdev);
if (ethqos->rgmii_clk)
clk_disable_unprepare(ethqos->rgmii_clk);
if (priv->plat->has_gmac4 && ethqos->phyaux_clk)
clk_disable_unprepare(ethqos->phyaux_clk);
if (priv->plat->has_gmac4 && ethqos->sgmiref_clk)
clk_disable_unprepare(ethqos->sgmiref_clk);
if (priv->plat->phy_intr_en_extn_stm)
free_irq(ethqos->phy_intr, ethqos);
priv->phy_irq_enabled = false;
@ -2441,9 +2455,21 @@ static int qcom_ethqos_remove(struct platform_device *pdev)
ethqos_disable_regulators(ethqos);
ethqos_clks_config(ethqos, false);
platform_set_drvdata(pdev, NULL);
of_platform_depopulate(&pdev->dev);
return ret;
}
static void qcom_ethqos_shutdown_main(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
if (!dev)
return;
qcom_ethqos_remove(pdev);
}
static int qcom_ethqos_suspend(struct device *dev)
{
struct qcom_ethqos *ethqos;
@ -2456,6 +2482,9 @@ static int qcom_ethqos_suspend(struct device *dev)
return 0;
}
if (pm_suspend_target_state == PM_SUSPEND_MEM)
return qcom_ethqos_hib_freeze(dev);
ethqos = get_stmmac_bsp_priv(dev);
if (!ethqos)
return -ENODEV;
@ -2491,6 +2520,9 @@ static int qcom_ethqos_resume(struct device *dev)
if (of_device_is_compatible(dev->of_node, "qcom,emac-smmu-embedded"))
return 0;
if (pm_suspend_target_state == PM_SUSPEND_MEM)
return qcom_ethqos_hib_restore(dev);
ethqos = get_stmmac_bsp_priv(dev);
if (!ethqos)
@ -2567,8 +2599,38 @@ static int qcom_ethqos_enable_clks(struct qcom_ethqos *ethqos, struct device *de
goto error_rgmii_get;
}
}
if (priv->plat->interface == PHY_INTERFACE_MODE_SGMII ||
priv->plat->interface == PHY_INTERFACE_MODE_USXGMII) {
ethqos->sgmiref_clk = devm_clk_get(dev, "sgmi_ref");
if (IS_ERR(ethqos->sgmiref_clk)) {
dev_warn(dev, "Failed sgmi_ref\n");
ret = PTR_ERR(ethqos->sgmiref_clk);
goto error_sgmi_ref;
} else {
ret = clk_prepare_enable(ethqos->sgmiref_clk);
if (ret)
goto error_sgmi_ref;
}
ethqos->phyaux_clk = devm_clk_get(dev, "phyaux");
if (IS_ERR(ethqos->phyaux_clk)) {
dev_warn(dev, "Failed phyaux\n");
ret = PTR_ERR(ethqos->phyaux_clk);
goto error_phyaux_ref;
} else {
ret = clk_prepare_enable(ethqos->phyaux_clk);
if (ret)
goto error_phyaux_ref;
}
}
return 0;
if (priv->plat->interface == PHY_INTERFACE_MODE_SGMII ||
priv->plat->interface == PHY_INTERFACE_MODE_USXGMII) {
error_phyaux_ref:
clk_disable_unprepare(ethqos->sgmiref_clk);
error_sgmi_ref:
clk_disable_unprepare(ethqos->rgmii_clk);
}
error_rgmii_get:
clk_disable_unprepare(priv->plat->pclk);
error_pclk_get:
@ -2591,6 +2653,12 @@ static void qcom_ethqos_disable_clks(struct qcom_ethqos *ethqos, struct device *
if (ethqos->rgmii_clk)
clk_disable_unprepare(ethqos->rgmii_clk);
if (priv->plat->has_gmac4 && ethqos->phyaux_clk)
clk_disable_unprepare(ethqos->phyaux_clk);
if (priv->plat->has_gmac4 && ethqos->sgmiref_clk)
clk_disable_unprepare(ethqos->sgmiref_clk);
ETHQOSINFO("Exit\n");
}
@ -2622,7 +2690,7 @@ static int qcom_ethqos_hib_restore(struct device *dev)
ret = ethqos_init_gpio(ethqos);
if (ret)
return ret;
ETHQOSINFO("GPIO init failed\n");
ret = qcom_ethqos_enable_clks(ethqos, dev);
if (ret)
@ -2652,11 +2720,6 @@ static int qcom_ethqos_hib_restore(struct device *dev)
#endif /* end of DWC_ETH_QOS_CONFIG_PTP */
/* issue software reset to device */
ret = stmmac_reset(priv, priv->ioaddr);
if (ret) {
dev_err(priv->device, "Failed to reset\n");
return ret;
}
if (!netif_running(ndev)) {
rtnl_lock();
@ -2710,6 +2773,8 @@ static int qcom_ethqos_hib_freeze(struct device *dev)
ethqos_free_gpios(ethqos);
ethqos->curr_serdes_speed = 0;
ETHQOSINFO("end\n");
return ret;
@ -2726,6 +2791,7 @@ static const struct dev_pm_ops qcom_ethqos_pm_ops = {
static struct platform_driver qcom_ethqos_driver = {
.probe = qcom_ethqos_probe,
.remove = qcom_ethqos_remove,
.shutdown = qcom_ethqos_shutdown_main,
.driver = {
.name = DRV_NAME,
.pm = &qcom_ethqos_pm_ops,
@ -2783,5 +2849,12 @@ module_init(qcom_ethqos_init_module)
module_exit(qcom_ethqos_exit_module)
#if IS_ENABLED(CONFIG_AQUANTIA_PHY)
MODULE_SOFTDEP("post: aquantia");
#endif
#if IS_ENABLED(CONFIG_MARVELL_PHY)
MODULE_SOFTDEP("post: marvell");
#endif
MODULE_DESCRIPTION("Qualcomm ETHQOS driver");
MODULE_LICENSE("GPL v2");

View File

@ -1188,7 +1188,7 @@ static int qcom_ethqos_serdes_update_sgmii(struct qcom_ethqos *ethqos,
switch (speed) {
case SPEED_1000:
if (ethqos->curr_serdes_speed == SPEED_2500)
if (ethqos->curr_serdes_speed != SPEED_1000)
ret = qcom_ethqos_serdes_sgmii_1Gb(ethqos);
ethqos->curr_serdes_speed = SPEED_1000;

View File

@ -380,8 +380,8 @@ static int dwmac4_get_hw_feature(void __iomem *ioaddr,
dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
// dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
dma_cap->sphen = 0;
dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
switch (dma_cap->addr64) {
case 0:

View File

@ -454,6 +454,73 @@ int virtio_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, int regnum,
}
EXPORT_SYMBOL_GPL(virtio_mdio_write_c45);
int virtio_mdio_read_c45_indirect(struct mii_bus *bus, int addr, int regnum)
{
struct phy_remote_access_t *phy_request = NULL;
unsigned long tmp;
mutex_lock(&emac_mdio_fe_pdev->emac_mdio_fe_lock);
phy_request = &emac_mdio_fe_ctx->tx_msg.request_data;
memset(phy_request, 0, sizeof(*phy_request));
phy_request->mdio_type = MDIO_CLAUSE_45_DIRECT;
phy_request->mdio_op_remote_type = MDIO_REMOTE_OP_TYPE_READ;
phy_request->phyaddr = addr;
phy_request->phydev = mdiobus_c45_devad(regnum);
phy_request->phyreg = mdiobus_c45_regad(regnum);
emac_mdio_fe_ctx->tx_msg.type = VIRTIO_EMAC_MDIO_FE_REQ;
emac_mdio_fe_ctx->tx_msg.len = sizeof(struct fe_to_be_msg);
emac_mdio_fe_xmit(emac_mdio_fe_ctx);
EMAC_MDIO_FE_DBG("Sent VIRTIO_EMAC_MDIO_FE_REQ Event Cmd\n");
emac_mdio_fe_ctx->phy_reply = -1;
tmp = msecs_to_jiffies(WAIT_PHY_REPLY_MAX_TIMEOUT);
if (down_timeout(&emac_mdio_fe_ctx->emac_mdio_fe_sem, tmp) == -ETIME) {
EMAC_MDIO_FE_WARN("Wait for phy reply timeout\n");
mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock);
return -1;
}
mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock);
return (int)emac_mdio_fe_ctx->phy_reply;
}
EXPORT_SYMBOL_GPL(virtio_mdio_read_c45_indirect);
int virtio_mdio_write_c45_indirect(struct mii_bus *bus, int addr, int regnum, u16 val)
{
struct phy_remote_access_t *phy_request = NULL;
unsigned long tmp;
mutex_lock(&emac_mdio_fe_pdev->emac_mdio_fe_lock);
phy_request = &emac_mdio_fe_ctx->tx_msg.request_data;
memset(phy_request, 0, sizeof(*phy_request));
phy_request->mdio_type = MDIO_CLAUSE_45_DIRECT;
phy_request->mdio_op_remote_type = MDIO_REMOTE_OP_TYPE_WRITE;
phy_request->phyaddr = addr;
phy_request->phydev = mdiobus_c45_devad(regnum);
phy_request->phyreg = mdiobus_c45_regad(regnum);
phy_request->phydata = val;
emac_mdio_fe_ctx->tx_msg.type = VIRTIO_EMAC_MDIO_FE_REQ;
emac_mdio_fe_ctx->tx_msg.len = sizeof(struct fe_to_be_msg);
emac_mdio_fe_xmit(emac_mdio_fe_ctx);
EMAC_MDIO_FE_DBG("Sent VIRTIO_EMAC_MDIO_FE_REQ Event Cmd\n");
emac_mdio_fe_ctx->phy_reply = -1;
tmp = msecs_to_jiffies(WAIT_PHY_REPLY_MAX_TIMEOUT);
if (down_timeout(&emac_mdio_fe_ctx->emac_mdio_fe_sem, tmp) == -ETIME) {
EMAC_MDIO_FE_WARN("Wait for phy reply timeout\n");
mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock);
return -1;
}
mutex_unlock(&emac_mdio_fe_pdev->emac_mdio_fe_lock);
return (int)emac_mdio_fe_ctx->phy_reply;
}
EXPORT_SYMBOL_GPL(virtio_mdio_write_c45_indirect);
static int emac_mdio_fe_probe(struct virtio_device *vdev)
{
int ret;
@ -525,5 +592,6 @@ static void __exit emac_mdio_fe_exit(void)
module_init(emac_mdio_fe_init);
module_exit(emac_mdio_fe_exit);
MODULE_SOFTDEP("post: stmmac");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("EMAC Virt MDIO FE Driver");

View File

@ -17,6 +17,10 @@ int virtio_mdio_read_c45(struct mii_bus *bus, int addr, int devnum, int regnum);
int virtio_mdio_write_c45(struct mii_bus *bus, int addr, int devnum, int regnum, u16 val);
int virtio_mdio_read_c45_indirect(struct mii_bus *bus, int addr, int regnum);
int virtio_mdio_write_c45_indirect(struct mii_bus *bus, int addr, int regnum, u16 val);
#else
static inline int virtio_mdio_read(struct mii_bus *bus, int addr, int regnum)
{
@ -39,6 +43,21 @@ static inline int virtio_mdio_read_c45(struct mii_bus *bus, int addr, int devnum
static inline int virtio_mdio_write_c45(struct mii_bus *bus,
int addr, int devnum,
int regnum, u16 val)
{
/* Not enabled */
return 0;
}
static inline int virtio_mdio_read_c45_indirect(struct mii_bus *bus, int addr,
int regnum)
{
/* Not enabled */
return 0;
}
static inline int virtio_mdio_write_c45_indirect(struct mii_bus *bus, int addr,
int regnum, u16 val)
{
/* Not enabled */
return 0;

View File

@ -1821,6 +1821,88 @@ static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 q
return xsk_get_pool_from_qid(priv->dev, queue);
}
static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv, struct stmmac_dma_conf *dma_conf)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
int i;
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
}
if (priv->sph && buf->sec_page) {
page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
}
}
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
for (i = 0; i < dma_conf->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
struct dma_desc *p;
if (priv->extend_desc)
p = &((rx_q->dma_erx + i)->basic);
else
p = rx_q->dma_rx + i;
if (!buf->page) {
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->page)
goto err_reinit_rx_buffers;
buf->addr = page_pool_get_dma_addr(buf->page);
if (!buf->addr) {
pr_err("buf->addr is NULL\n");
goto err_reinit_rx_buffers;
}
}
if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page)
goto err_reinit_rx_buffers;
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
if (!buf->sec_addr) {
pr_err("buf->sec_addr is NULL\n");
goto err_reinit_rx_buffers;
}
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
} else {
buf->sec_page = NULL;
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
}
stmmac_set_desc_addr(priv, p, buf->addr);
if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
}
}
return;
err_reinit_rx_buffers:
pr_err(" error in reinit_rx_buffers\n");
do {
dma_free_rx_skbufs(priv, dma_conf, queue);
if (queue == 0)
break;
} while (queue-- > 0);
}
/**
* __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
* @priv: driver private structure
@ -7479,8 +7561,12 @@ int stmmac_dvr_probe(struct device *device,
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
ndev->vlan_features |= ndev->hw_features;
priv->dma_cap.vlhash = 0;
priv->dma_cap.vlins = 0;
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
priv->dma_cap.vlhash = 0;
priv->dma_cap.vlins = 0;
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@ -7832,6 +7918,8 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
stmmac_reinit_rx_buffers(priv, &priv->dma_conf);
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv, &priv->dma_conf);

View File

@ -506,9 +506,12 @@ int stmmac_mdio_register(struct net_device *ndev)
} else {
err = new_bus->read(new_bus, phyaddr, MII_BMSR);
if (err == -EBUSY || !err || err == 0xffff) {
dev_warn(dev, "Invalid PHY address read from dtsi: %d\n",
phyaddr);
new_bus->phy_mask = mdio_bus_data->phy_mask;
err = of_property_read_u32(np, "emac-cl45-phy-addr", &phyaddr);
new_bus->phy_mask = ~(1 << phyaddr);
skip_phy_detect = 1;
new_bus->read = &virtio_mdio_read_c45_indirect;
new_bus->write = &virtio_mdio_write_c45_indirect;
new_bus->probe_capabilities = MDIOBUS_C22_C45;
} else {
new_bus->phy_mask = ~(1 << phyaddr);
skip_phy_detect = 1;

View File

@ -204,6 +204,16 @@ static const struct qfprom_soc_compatible_data niobe_qfprom = {
.nkeepout = ARRAY_SIZE(niobe_qfprom_keepout)
};
static const struct nvmem_keepout pineapple_qfprom_keepout[] = {
{.start = 0, .end = 0x9b},
{.start = 0x9c, .end = 0x1000},
};
static const struct qfprom_soc_compatible_data pineapple_qfprom = {
.keepout = pineapple_qfprom_keepout,
.nkeepout = ARRAY_SIZE(pineapple_qfprom_keepout)
};
/**
* qfprom_disable_fuse_blowing() - Undo enabling of fuse blowing.
* @priv: Our driver data.
@ -547,6 +557,7 @@ static const struct of_device_id qfprom_of_match[] = {
{ .compatible = "qcom,cliffs-qfprom", .data = &cliffs_qfprom},
{ .compatible = "qcom,pitti-qfprom", .data = &pitti_qfprom},
{ .compatible = "qcom,niobe-qfprom", .data = &niobe_qfprom},
{ .compatible = "qcom,pineapple-qfprom", .data = &pineapple_qfprom},
{/* sentinel */},
};
MODULE_DEVICE_TABLE(of, qfprom_of_match);

View File

@ -1284,6 +1284,8 @@ struct msm_pcie_dev_t {
#if IS_ENABLED(CONFIG_I2C)
struct pcie_i2c_ctrl i2c_ctrl;
#endif
bool fmd_enable;
};
struct msm_root_dev_t {
@ -1596,6 +1598,55 @@ int msm_pcie_reg_dump(struct pci_dev *pci_dev, u8 *buff, u32 len)
}
EXPORT_SYMBOL(msm_pcie_reg_dump);
static void msm_pcie_config_perst(struct msm_pcie_dev_t *dev, bool assert)
{
if (dev->fmd_enable) {
pr_err("PCIe: FMD is enabled for RC%d\n", dev->rc_idx);
return;
}
if (assert) {
PCIE_INFO(dev, "PCIe: RC%d: assert PERST\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
dev->gpio[MSM_PCIE_GPIO_PERST].on);
} else {
PCIE_INFO(dev, "PCIe: RC%d: de-assert PERST\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
}
}
int msm_pcie_fmd_enable(struct pci_dev *pci_dev)
{
struct pci_dev *root_pci_dev;
struct msm_pcie_dev_t *pcie_dev;
root_pci_dev = pcie_find_root_port(pci_dev);
if (!root_pci_dev)
return -ENODEV;
pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
if (!pcie_dev) {
pr_err("PCIe: did not find RC for pci endpoint device.\n");
return -ENODEV;
}
PCIE_INFO(pcie_dev, "RC%d Enable FMD\n", pcie_dev->rc_idx);
if (pcie_dev->fmd_enable) {
pr_err("PCIe: FMD is already enabled for RC%d\n", pcie_dev->rc_idx);
return 0;
}
if (!gpio_get_value(pcie_dev->gpio[MSM_PCIE_GPIO_PERST].num))
msm_pcie_config_perst(pcie_dev, false);
pcie_dev->fmd_enable = true;
return 0;
}
EXPORT_SYMBOL_GPL(msm_pcie_fmd_enable);
static void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
{
writel_relaxed(value, base + offset);
@ -2432,15 +2483,13 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
case MSM_PCIE_ASSERT_PERST:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
dev->gpio[MSM_PCIE_GPIO_PERST].on);
msm_pcie_config_perst(dev, true);
usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
break;
case MSM_PCIE_DEASSERT_PERST:
PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
msm_pcie_config_perst(dev, false);
usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
break;
case MSM_PCIE_KEEP_RESOURCES_ON:
@ -5764,8 +5813,7 @@ static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
#endif
PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
dev->gpio[MSM_PCIE_GPIO_PERST].on);
msm_pcie_config_perst(dev, true);
PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
dev->rc_idx);
return MSM_PCIE_ERROR;
@ -6171,8 +6219,7 @@ static int msm_pcie_enable_link(struct msm_pcie_dev_t *dev)
PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
msm_pcie_config_perst(dev, false);
usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
ep_up_timeout = jiffies + usecs_to_jiffies(EP_UP_TIMEOUT_US);
@ -6289,10 +6336,8 @@ static int msm_pcie_enable(struct msm_pcie_dev_t *dev)
PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
dev->gpio[MSM_PCIE_GPIO_PERST].on);
usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
PERST_PROPAGATION_DELAY_US_MAX);
msm_pcie_config_perst(dev, true);
usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
/* enable power */
ret = msm_pcie_vreg_init(dev);
@ -6422,8 +6467,7 @@ static void msm_pcie_disable(struct msm_pcie_dev_t *dev)
PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
dev->rc_idx);
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
dev->gpio[MSM_PCIE_GPIO_PERST].on);
msm_pcie_config_perst(dev, true);
if (dev->phy_power_down_offset)
msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0);
@ -6588,6 +6632,7 @@ int msm_pcie_enumerate(u32 rc_idx)
PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
dev->fmd_enable = false;
if (!dev->drv_ready) {
PCIE_DBG(dev,
"PCIe: RC%d: has not been successfully probed yet\n",
@ -7528,7 +7573,7 @@ static void msm_pcie_handle_linkdown(struct msm_pcie_dev_t *dev)
return;
}
if (!dev->suspending) {
if (!dev->suspending && !dev->fmd_enable) {
/* PCIe registers dump on link down */
PCIE_DUMP(dev,
"PCIe:Linkdown IRQ for RC%d Dumping PCIe registers\n",
@ -7550,8 +7595,7 @@ static void msm_pcie_handle_linkdown(struct msm_pcie_dev_t *dev)
/* assert PERST */
if (!(msm_pcie_keep_resources_on & BIT(dev->rc_idx)))
gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
dev->gpio[MSM_PCIE_GPIO_PERST].on);
msm_pcie_config_perst(dev, true);
PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);

View File

@ -23,6 +23,18 @@ config PINCTRL_PINEAPPLE
Say Y here to compile statically, or M here to compile it as a
module. If unsure, say N.
config PINCTRL_NEO
tristate "Qualcomm Technologies Inc NEO pin controller driver"
depends on GPIOLIB && OF
select PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM)
block found on the Qualcomm Technologies Inc NEO platforms.
This driver could also be used for a target supporting secondary VM.
Say Y here to compile statically, or M here to compile it as a module.
If unsure, say N.
config PINCTRL_ANORAK
tristate "Qualcomm Technologies Inc ANORAK pin controller driver"
depends on GPIOLIB && OF
@ -46,6 +58,17 @@ config PINCTRL_NIOBE
Say Y here to compile statically, or M here to compile it as a
module. If unsure, say N.
config PINCTRL_SERAPH
tristate "Qualcomm Technologies, Inc. SERAPH pin controller driver"
depends on GPIOLIB && OF
select PINCTRL_MSM
help
This is the pinctrl, pinmux, pinconf and gpiolib driver for the
Qualcomm Technologies Inc Top Level Mode Multiplexer block (TLMM)
block found on the Qualcomm Technologies Inc SERAPH platform.
Say Y here to compile statically, or M here to compile it as a
module. If unsure, say N.
config PINCTRL_CLIFFS
tristate "Qualcomm Technologies, Inc. CLIFFS pin controller driver"
depends on GPIOLIB && OF

View File

@ -2,8 +2,10 @@
# Qualcomm pin control drivers
obj-$(CONFIG_PINCTRL_MSM) += pinctrl-msm.o
obj-$(CONFIG_PINCTRL_PINEAPPLE) += pinctrl-pineapple.o
obj-$(CONFIG_PINCTRL_NEO) += pinctrl-neo.o
obj-$(CONFIG_PINCTRL_ANORAK) += pinctrl-anorak.o
obj-$(CONFIG_PINCTRL_NIOBE) += pinctrl-niobe.o
obj-$(CONFIG_PINCTRL_SERAPH) += pinctrl-seraph.o
obj-$(CONFIG_PINCTRL_CLIFFS) += pinctrl-cliffs.o
obj-$(CONFIG_PINCTRL_KALAMA) += pinctrl-kalama.o
obj-$(CONFIG_PINCTRL_BLAIR) += pinctrl-blair.o

View File

@ -0,0 +1,69 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pinctrl/pinctrl.h>
#include "pinctrl-msm.h"
#include "pinctrl-neo.h"
static const struct msm_pinctrl_soc_data neo_pinctrl = {
.pins = neo_pins,
.npins = ARRAY_SIZE(neo_pins),
.functions = neo_functions,
.nfunctions = ARRAY_SIZE(neo_functions),
.groups = neo_groups,
.ngroups = ARRAY_SIZE(neo_groups),
.ngpios = 156,
.qup_regs = neo_qup_regs,
.nqup_regs = ARRAY_SIZE(neo_qup_regs),
.wakeirq_map = neo_pdc_map,
.nwakeirq_map = ARRAY_SIZE(neo_pdc_map),
};
static int neo_pinctrl_probe(struct platform_device *pdev)
{
const struct msm_pinctrl_soc_data *pinctrl_data;
pinctrl_data = of_device_get_match_data(&pdev->dev);
if (!pinctrl_data)
return -EINVAL;
return msm_pinctrl_probe(pdev, pinctrl_data);
}
static const struct of_device_id neo_pinctrl_of_match[] = {
{ .compatible = "qcom,neo-pinctrl", .data = &neo_pinctrl},
{ },
};
static struct platform_driver neo_pinctrl_driver = {
.driver = {
.name = "neo-pinctrl",
.of_match_table = neo_pinctrl_of_match,
},
.probe = neo_pinctrl_probe,
.remove = msm_pinctrl_remove,
};
static int __init neo_pinctrl_init(void)
{
return platform_driver_register(&neo_pinctrl_driver);
}
arch_initcall(neo_pinctrl_init);
static void __exit neo_pinctrl_exit(void)
{
platform_driver_unregister(&neo_pinctrl_driver);
}
module_exit(neo_pinctrl_exit);
MODULE_DESCRIPTION("QTI neo pinctrl driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(of, neo_pinctrl_of_match);
MODULE_SOFTDEP("pre: qcom_tlmm_vm_irqchip");

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "qcom-reboot-reason: %s: " fmt, __func__
#include <linux/err.h>
#include <linux/init.h>
@ -38,6 +40,7 @@ static struct poweroff_reason reasons[] = {
static int qcom_reboot_reason_reboot(struct notifier_block *this,
unsigned long event, void *ptr)
{
int rc;
char *cmd = ptr;
struct qcom_reboot_reason *reboot = container_of(this,
struct qcom_reboot_reason, reboot_nb);
@ -47,9 +50,11 @@ static int qcom_reboot_reason_reboot(struct notifier_block *this,
return NOTIFY_OK;
for (reason = reasons; reason->cmd; reason++) {
if (!strcmp(cmd, reason->cmd)) {
nvmem_cell_write(reboot->nvmem_cell,
rc = nvmem_cell_write(reboot->nvmem_cell,
&reason->pon_reason,
sizeof(reason->pon_reason));
if (rc < 0)
pr_err("PON reason store failed, rc=%d\n", rc);
break;
}
}

View File

@ -269,6 +269,7 @@ struct battery_chg_dev {
bool block_tx;
bool ship_mode_en;
bool debug_battery_detected;
bool wls_not_supported;
bool wls_fw_update_reqd;
u32 wls_fw_version;
u16 wls_fw_crc;
@ -1584,13 +1585,19 @@ static int battery_chg_init_psy(struct battery_chg_dev *bcdev)
}
}
bcdev->psy_list[PSY_TYPE_WLS].psy =
devm_power_supply_register(bcdev->dev, &wls_psy_desc, &psy_cfg);
if (IS_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy)) {
rc = PTR_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy);
bcdev->psy_list[PSY_TYPE_WLS].psy = NULL;
pr_err("Failed to register wireless power supply, rc=%d\n", rc);
return rc;
if (bcdev->wls_not_supported) {
pr_debug("Wireless charging is not supported\n");
} else {
bcdev->psy_list[PSY_TYPE_WLS].psy =
devm_power_supply_register(bcdev->dev, &wls_psy_desc, &psy_cfg);
if (IS_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy)) {
rc = PTR_ERR(bcdev->psy_list[PSY_TYPE_WLS].psy);
bcdev->psy_list[PSY_TYPE_WLS].psy = NULL;
pr_err("Failed to register wireless power supply, rc=%d\n", rc);
return rc;
}
}
bcdev->psy_list[PSY_TYPE_BATTERY].psy =
@ -2258,6 +2265,44 @@ static struct attribute *battery_class_usb_2_attrs[] = {
};
ATTRIBUTE_GROUPS(battery_class_usb_2);
static struct attribute *battery_class_no_wls_attrs[] = {
&class_attr_soh.attr,
&class_attr_resistance.attr,
&class_attr_moisture_detection_status.attr,
&class_attr_moisture_detection_en.attr,
&class_attr_fake_soc.attr,
&class_attr_ship_mode_en.attr,
&class_attr_restrict_chg.attr,
&class_attr_restrict_cur.attr,
&class_attr_usb_real_type.attr,
&class_attr_usb_typec_compliant.attr,
&class_attr_usb_num_ports.attr,
&class_attr_charge_control_en.attr,
NULL,
};
ATTRIBUTE_GROUPS(battery_class_no_wls);
static struct attribute *battery_class_usb_2_no_wls_attrs[] = {
&class_attr_soh.attr,
&class_attr_resistance.attr,
&class_attr_moisture_detection_status.attr,
&class_attr_moisture_detection_usb_2_status.attr,
&class_attr_moisture_detection_en.attr,
&class_attr_moisture_detection_usb_2_en.attr,
&class_attr_fake_soc.attr,
&class_attr_ship_mode_en.attr,
&class_attr_restrict_chg.attr,
&class_attr_restrict_cur.attr,
&class_attr_usb_real_type.attr,
&class_attr_usb_2_real_type.attr,
&class_attr_usb_typec_compliant.attr,
&class_attr_usb_num_ports.attr,
&class_attr_usb_2_typec_compliant.attr,
&class_attr_charge_control_en.attr,
NULL,
};
ATTRIBUTE_GROUPS(battery_class_usb_2_no_wls);
#ifdef CONFIG_DEBUG_FS
static void battery_chg_add_debugfs(struct battery_chg_dev *bcdev)
{
@ -2286,6 +2331,9 @@ static int battery_chg_parse_dt(struct battery_chg_dev *bcdev)
int i, rc, len;
u32 prev, val;
bcdev->wls_not_supported = of_property_read_bool(node,
"qcom,wireless-charging-not-supported");
of_property_read_string(node, "qcom,wireless-fw-name",
&bcdev->wls_fw_name);
@ -2623,8 +2671,12 @@ static int battery_chg_probe(struct platform_device *pdev)
bcdev->battery_class.name = "qcom-battery";
if (bcdev->num_usb_ports == 2)
if (bcdev->num_usb_ports == 2 && bcdev->wls_not_supported)
bcdev->battery_class.class_groups = battery_class_usb_2_no_wls_groups;
else if (bcdev->num_usb_ports == 2)
bcdev->battery_class.class_groups = battery_class_usb_2_groups;
else if (bcdev->wls_not_supported)
bcdev->battery_class.class_groups = battery_class_no_wls_groups;
else
bcdev->battery_class.class_groups = battery_class_groups;

View File

@ -29,8 +29,11 @@
#define PWM_CYC_CFG 0xC
#define PWM_UPDATE 0x10
#define PWM_PERIOD_CNT 0x14
#define PWM_RESET 0x18
#define PWM_FRAME_POLARITY_BIT 0
#define PWM_FRAME_POLARITY_BIT BIT(0)
#define PWM_FRAME_ROLLOVER_CNT_BIT BIT(4)
#define PWM_FRAME_RESET_BIT BIT(0)
enum {
ENABLE_STATUS0,
@ -42,6 +45,8 @@ enum {
struct pdm_pwm_priv_data {
unsigned int max_channels;
const u16 *status_reg_offsets;
bool pwm_reset_support;
bool pwm_cnt_rollover;
};
/*
@ -54,6 +59,7 @@ struct pdm_pwm_priv_data {
* @current_freq: Current frequency of frame.
* @freq_set: This bool flag is responsible for setting period once per frame.
* @mutex: mutex lock per frame.
* @cnt_rollover_en: This bool flag is used to set rollover bit per frame.
*/
struct pdm_pwm_frames {
u32 frame_id;
@ -66,6 +72,7 @@ struct pdm_pwm_frames {
bool freq_set;
struct mutex frame_lock; /* PWM per frame lock */
struct pdm_pwm_chip *pwm_chip;
bool cnt_rollover_en;
};
/*
@ -100,8 +107,11 @@ static int __pdm_pwm_calc_pwm_frequency(struct pdm_pwm_chip *chip,
unsigned long cyc_cfg, freq;
int ret;
/* PWM client could set the period only once, due to HW limitation. */
if (chip->frames[hw_idx].freq_set)
/*
* PWM client can set the period only once if the HW version does
* not support reset functionality.
*/
if (chip->frames[hw_idx].freq_set && !chip->priv_data->pwm_reset_support)
return 0;
freq = PERIOD_TO_HZ(period_ns);
@ -167,18 +177,34 @@ static int pdm_pwm_config(struct pdm_pwm_chip *chip, u32 hw_idx,
mutex_lock(&chip->frames[hw_idx].frame_lock);
/*
* Set the counter rollover enable bit, so that counter doesn't get stuck
* in period change configuration.
*/
if (chip->priv_data->pwm_cnt_rollover && !chip->frames[hw_idx].cnt_rollover_en) {
regmap_update_bits(chip->regmap, chip->frames[hw_idx].reg_offset + PWM_CTL0,
PWM_FRAME_ROLLOVER_CNT_BIT, PWM_FRAME_ROLLOVER_CNT_BIT);
chip->frames[hw_idx].cnt_rollover_en = true;
}
ret = __pdm_pwm_calc_pwm_frequency(chip, current_period, hw_idx);
if (ret)
goto out;
if (chip->frames[hw_idx].current_period_ns != period_ns) {
pr_err("Period cannot be updated, calculating dutycycle on old period\n");
current_period = chip->frames[hw_idx].current_period_ns;
if (chip->priv_data->pwm_reset_support)
regmap_update_bits(chip->regmap,
chip->frames[hw_idx].reg_offset + PWM_RESET,
PWM_FRAME_RESET_BIT, PWM_FRAME_RESET_BIT);
else {
pr_err("Period cannot be updated, calculating dutycycle on old period\n");
current_period = chip->frames[hw_idx].current_period_ns;
}
}
if (chip->frames[hw_idx].polarity != polarity) {
regmap_update_bits(chip->regmap, chip->frames[hw_idx].reg_offset
+ PWM_CTL0, BIT(PWM_FRAME_POLARITY_BIT), polarity);
+ PWM_CTL0, PWM_FRAME_POLARITY_BIT, polarity);
chip->frames[hw_idx].polarity = polarity;
}
@ -220,21 +246,6 @@ static int pdm_pwm_config(struct pdm_pwm_chip *chip, u32 hw_idx,
return ret;
}
static void pdm_pwm_free(struct pwm_chip *pwm_chip, struct pwm_device *pwm)
{
struct pdm_pwm_chip *chip = container_of(pwm_chip,
struct pdm_pwm_chip, pwm_chip);
u32 hw_idx = pwm->hwpwm;
mutex_lock(&chip->lock);
chip->frames[hw_idx].freq_set = false;
chip->frames[hw_idx].current_period_ns = 0;
chip->frames[hw_idx].current_duty_ns = 0;
mutex_unlock(&chip->lock);
}
static int pdm_pwm_enable(struct pdm_pwm_chip *chip, struct pwm_device *pwm)
{
u32 ret, val;
@ -305,7 +316,7 @@ static int pdm_pwm_apply(struct pwm_chip *pwm_chip, struct pwm_device *pwm,
pwm_get_state(pwm, &curr_state);
if (state->period < curr_state.period)
if (state->period < curr_state.period && !chip->priv_data->pwm_reset_support)
return -EINVAL;
if (state->period != curr_state.period ||
@ -331,6 +342,24 @@ static int pdm_pwm_apply(struct pwm_chip *pwm_chip, struct pwm_device *pwm,
return 0;
}
static void pdm_pwm_free(struct pwm_chip *pwm_chip, struct pwm_device *pwm)
{
struct pdm_pwm_chip *chip = container_of(pwm_chip,
struct pdm_pwm_chip, pwm_chip);
u32 hw_idx = pwm->hwpwm;
mutex_lock(&chip->lock);
chip->frames[hw_idx].freq_set = false;
chip->frames[hw_idx].current_period_ns = 0;
chip->frames[hw_idx].current_duty_ns = 0;
chip->frames[hw_idx].cnt_rollover_en = false;
mutex_unlock(&chip->lock);
pdm_pwm_disable(chip, pwm);
}
static const struct pwm_ops pdm_pwm_ops = {
.apply = pdm_pwm_apply,
.free = pdm_pwm_free,
@ -465,7 +494,7 @@ static int get_polarity(struct seq_file *m, void *unused)
u32 temp;
regmap_read(chip->regmap, frame->reg_offset + PWM_CTL0, &temp);
if (BIT(PWM_FRAME_POLARITY_BIT) & temp)
if (PWM_FRAME_POLARITY_BIT & temp)
seq_puts(m, "PWM_POLARITY_INVERSED\n");
else
seq_puts(m, "PWM_POLARITY_NORMAL\n");
@ -672,6 +701,8 @@ static struct pdm_pwm_priv_data pdm_pwm_v2_reg_offsets = {
[ENABLE_STATUS0] = 0xc,
[ENABLE_STATUS1] = 0x10,
},
.pwm_reset_support = true,
.pwm_cnt_rollover = true,
};
static const struct of_device_id pdm_pwm_of_match[] = {

View File

@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. */
/* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved. */
#define pr_fmt(fmt) "ap72200-reg: %s: " fmt, __func__
@ -44,6 +44,8 @@ static int ap72200_vreg_enable(struct regulator_dev *rdev)
struct ap72200_vreg *vreg = rdev_get_drvdata(rdev);
int rc, val;
gpiod_set_value_cansleep(vreg->ena_gpiod, 1);
val = DIV_ROUND_UP(vreg->rdesc.fixed_uV - AP72200_MIN_UV, AP72200_STEP_UV);
/* Set the voltage */
@ -82,6 +84,8 @@ static int ap72200_vreg_disable(struct regulator_dev *rdev)
vreg->is_enabled = false;
gpiod_set_value_cansleep(vreg->ena_gpiod, 0);
return rc;
}
@ -157,9 +161,6 @@ static int ap72200_probe(struct i2c_client *client,
return PTR_ERR(vreg->ena_gpiod);
}
/* Keep the EN pin of this regulator always high */
gpiod_set_value_cansleep(vreg->ena_gpiod, 1);
vreg->rdev = devm_regulator_register(vreg->dev, &vreg->rdesc, &reg_config);
if (IS_ERR(vreg->rdev)) {
ret = PTR_ERR(vreg->rdev);

View File

@ -33,7 +33,6 @@ struct qcom_q6v5 {
int ready_irq;
int handover_irq;
int stop_irq;
int active_state_ack_irq;
struct rproc_subdev *ssr_subdev;
@ -43,7 +42,6 @@ struct qcom_q6v5 {
struct completion start_done;
struct completion stop_done;
struct completion running_ack;
int crash_reason;

View File

@ -61,6 +61,7 @@ static bool recovery_set_cb;
#define SOCCP_SLEEP_US 100
#define SOCCP_TIMEOUT_US 10000
#define SOCCP_STATE_MASK 0x600
#define SPARE_REG_SOCCP_D0 0x1
#define SOCCP_D0 0x2
#define SOCCP_D1 0x4
#define SOCCP_D3 0x8
@ -89,6 +90,7 @@ struct adsp_data {
const char *sysmon_name;
const char *qmp_name;
int ssctl_id;
unsigned int smem_host_id;
bool check_status;
};
@ -125,6 +127,7 @@ struct qcom_adsp {
bool retry_shutdown;
struct icc_path *bus_client;
int crash_reason_smem;
unsigned int smem_host_id;
bool has_aggre2_clk;
bool dma_phys_below_32b;
bool decrypt_shutdown;
@ -162,7 +165,8 @@ struct qcom_adsp {
unsigned int wake_bit;
unsigned int sleep_bit;
int current_users;
void *config_addr;
void *tcsr_addr;
void *spare_reg_addr;
bool check_status;
};
@ -812,32 +816,37 @@ static int adsp_start(struct rproc *rproc)
return ret;
}
static irqreturn_t soccp_running_ack(int irq, void *data)
{
struct qcom_q6v5 *q6v5 = data;
complete(&q6v5->running_ack);
return IRQ_HANDLED;
}
/**
* rproc_config_check() - Check back the config register
* @state: new state of the rproc
*
* Call this function after there has been a request to change of
* state of rproc. This function takes in the new state to which the
* rproc has transitioned, and poll the WFI status register to check
* if the state request change has been accepted successfully by the
* rproc. The poll is timed out after 10 milliseconds.
* Polled read on a register till with a 5ms timeout and 100-200Us interval.
* Returns immediately if the expected value is read back from the addr.
*
* Return: 0 if the WFI status register reflects the requested state.
* state: new state of the rproc
*
* addr: Address to poll on
*
* return: 0 if the expected value is read back from the address
* -ETIMEDOUT is the value was not read in 5ms
*/
static int rproc_config_check(struct qcom_adsp *adsp, u32 state)
static int rproc_config_check(struct qcom_adsp *adsp, u32 state, void *addr)
{
unsigned int retry_num = 50;
u32 val;
do {
usleep_range(SOCCP_SLEEP_US, SOCCP_SLEEP_US + 100);
val = readl(addr);
} while (!(val & state) && --retry_num);
return (val & state) ? 0 : -ETIMEDOUT;
}
static int rproc_config_check_atomic(struct qcom_adsp *adsp, u32 state, void *addr)
{
u32 val;
return readx_poll_timeout_atomic(readl, adsp->config_addr, val,
return readx_poll_timeout_atomic(readl, addr, val,
val == state, SOCCP_SLEEP_US, SOCCP_TIMEOUT_US);
}
@ -853,31 +862,49 @@ static int rproc_find_status_register(struct qcom_adsp *adsp)
{
struct device_node *tcsr;
struct device_node *np = adsp->dev->of_node;
u32 offset;
struct resource res;
u32 offset, addr;
int ret;
void *tcsr_base;
tcsr = of_parse_phandle(np, "soccp-config", 0);
tcsr = of_parse_phandle(np, "soccp-tcsr", 0);
if (!tcsr) {
dev_err(adsp->dev, "Unable to find the soccp config register\n");
return -EINVAL;
}
tcsr_base = of_iomap(tcsr, 0);
ret = of_address_to_resource(tcsr, 0, &res);
of_node_put(tcsr);
if (ret) {
dev_err(adsp->dev, "Unable to find the tcsr base addr\n");
return ret;
}
tcsr_base = ioremap_wc(res.start, resource_size(&res));
if (!tcsr_base) {
dev_err(adsp->dev, "Unable to find the tcsr base addr\n");
return -ENOMEM;
}
ret = of_property_read_u32_index(np, "soccp-config", 1, &offset);
ret = of_property_read_u32_index(np, "soccp-tcsr", 1, &offset);
if (ret < 0) {
dev_err(adsp->dev, "Unable to find the tcsr offset addr\n");
dev_err(adsp->dev, "Unable to find the tcsr config offset addr\n");
iounmap(tcsr_base);
return ret;
}
adsp->tcsr_addr = tcsr_base + offset;
adsp->config_addr = tcsr_base + offset;
ret = of_property_read_u32(np, "soccp-spare", &addr);
if (!addr) {
dev_err(adsp->dev, "Unable to find the running config register\n");
return -EINVAL;
}
adsp->spare_reg_addr = ioremap_wc(addr, 4);
if (!adsp->spare_reg_addr) {
dev_err(adsp->dev, "Unable to find the tcsr base addr\n");
return -ENOMEM;
}
return 0;
}
@ -941,14 +968,18 @@ int rproc_set_state(struct rproc *rproc, bool state)
goto soccp_out;
}
ret = do_bus_scaling(adsp, true);
if (ret) {
dev_err(adsp->q6v5.dev, "failed to set bandwidth request\n");
goto soccp_out;
}
ret = clk_prepare_enable(adsp->xo);
if (ret) {
dev_err(adsp->dev, "failed to enable clks\n");
goto soccp_out;
}
reinit_completion(&(adsp->q6v5.running_ack));
ret = qcom_smem_state_update_bits(adsp->wake_state,
SOCCP_STATE_MASK,
BIT(adsp->wake_bit));
@ -957,21 +988,25 @@ int rproc_set_state(struct rproc *rproc, bool state)
goto soccp_out;
}
ret = rproc_config_check(adsp, SOCCP_D0);
ret = rproc_config_check(adsp, SOCCP_D0 | SOCCP_D1, adsp->tcsr_addr);
if (ret) {
dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update tcsr val=%d\n",
current->comm, readl(adsp->config_addr));
current->comm, readl(adsp->tcsr_addr));
goto soccp_out;
}
ret = wait_for_completion_timeout(&adsp->q6v5.running_ack, msecs_to_jiffies(5));
if (!ret) {
dev_err(adsp->dev, "%s requested D3->D0: failed to get wake ack\n",
current->comm);
ret = -ETIMEDOUT;
ret = rproc_config_check(adsp, SPARE_REG_SOCCP_D0, adsp->spare_reg_addr);
if (ret) {
ret = qcom_smem_state_update_bits(adsp->wake_state,
SOCCP_STATE_MASK,
!BIT(adsp->wake_bit));
if (ret)
dev_err(adsp->dev, "failed to clear smem bits after a failed D0 request\n");
dev_err(adsp->dev, "%s requested D3->D0: soccp failed to update spare reg val=%d\n",
current->comm, readl(adsp->spare_reg_addr));
goto soccp_out;
} else
ret = 0;
}
adsp->current_users = 1;
} else {
@ -988,14 +1023,26 @@ int rproc_set_state(struct rproc *rproc, bool state)
goto soccp_out;
}
ret = rproc_config_check(adsp, SOCCP_D3);
ret = rproc_config_check(adsp, SOCCP_D3, adsp->tcsr_addr);
if (ret) {
ret = qcom_smem_state_update_bits(adsp->sleep_state,
SOCCP_STATE_MASK,
!BIT(adsp->sleep_bit));
if (ret)
dev_err(adsp->dev, "failed to clear smem bits after a failed D3 request\n");
dev_err(adsp->dev, "%s requested D0->D3 failed: TCSR value:%d\n",
current->comm, readl(adsp->config_addr));
current->comm, readl(adsp->tcsr_addr));
goto soccp_out;
}
disable_regulators(adsp);
clk_disable_unprepare(adsp->xo);
ret = do_bus_scaling(adsp, false);
if (ret < 0) {
dev_err(adsp->q6v5.dev, "failed to set bandwidth request\n");
goto soccp_out;
}
adsp->current_users = 0;
}
}
@ -1030,7 +1077,11 @@ static int rproc_panic_handler(struct notifier_block *this,
dev_err(adsp->dev, "failed to update smem bits for D3 to D0\n");
goto done;
}
ret = rproc_config_check(adsp, SOCCP_D0);
ret = rproc_config_check_atomic(adsp, SOCCP_D0, adsp->tcsr_addr);
if (ret)
dev_err(adsp->dev, "failed to change to D0\n");
ret = rproc_config_check_atomic(adsp, SPARE_REG_SOCCP_D0, adsp->spare_reg_addr);
if (ret)
dev_err(adsp->dev, "failed to change to D0\n");
done:
@ -1043,11 +1094,13 @@ static void qcom_pas_handover(struct qcom_q6v5 *q6v5)
int ret;
if (adsp->check_status) {
ret = rproc_config_check(adsp, SOCCP_D3);
ret = rproc_config_check(adsp, SOCCP_D3, adsp->tcsr_addr);
if (ret)
dev_err(adsp->dev, "state not changed in handover\n");
dev_err(adsp->dev, "state not changed in handover TCSR val = %d\n",
readl(adsp->tcsr_addr));
else
dev_info(adsp->dev, "state changed in handover for soccp!\n");
dev_info(adsp->dev, "state changed in handover for soccp! TCSR val = %d\n",
readl(adsp->tcsr_addr));
}
disable_regulators(adsp);
clk_disable_unprepare(adsp->aggre2_clk);
@ -1094,6 +1147,9 @@ static int adsp_stop(struct rproc *rproc)
if (handover)
qcom_pas_handover(&adsp->q6v5);
if (adsp->smem_host_id)
ret = qcom_smem_bust_hwspin_lock_by_host(adsp->smem_host_id);
if (is_mss_ssr_hyp_assign_en(adsp)) {
add_mpss_dsm_mem_ssr_dump(adsp);
ret = mpss_dsm_hyp_assign_control(adsp, false);
@ -1608,6 +1664,7 @@ static int adsp_probe(struct platform_device *pdev)
goto free_rproc;
adsp->has_aggre2_clk = desc->has_aggre2_clk;
adsp->info_name = desc->sysmon_name;
adsp->smem_host_id = desc->smem_host_id;
adsp->decrypt_shutdown = desc->decrypt_shutdown;
adsp->qmp_name = desc->qmp_name;
adsp->dma_phys_below_32b = desc->dma_phys_below_32b;
@ -1697,26 +1754,9 @@ static int adsp_probe(struct platform_device *pdev)
goto detach_proxy_pds;
}
adsp->q6v5.active_state_ack_irq = platform_get_irq_byname(pdev, "wake-ack");
if (adsp->q6v5.active_state_ack_irq < 0) {
dev_err(&pdev->dev, "failed to acquire readyack irq\n");
goto detach_proxy_pds;
}
ret = devm_request_threaded_irq(&pdev->dev, adsp->q6v5.active_state_ack_irq,
NULL, soccp_running_ack,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
"qcom_q6v5_pas", &adsp->q6v5);
if (ret) {
dev_err(&pdev->dev, "failed to acquire ready ack IRQ\n");
goto detach_proxy_pds;
}
mutex_init(&adsp->adsp_lock);
init_completion(&(adsp->q6v5.running_ack));
adsp->current_users = 0;
}
qcom_q6v5_register_ssr_subdev(&adsp->q6v5, &adsp->ssr_subdev.subdev);
@ -1764,7 +1804,7 @@ static int adsp_probe(struct platform_device *pdev)
mutex_unlock(&q6v5_pas_mutex);
if (adsp->check_status) {
adsp->panic_blk.priority = INT_MAX - 1;
adsp->panic_blk.priority = INT_MAX - 2;
adsp->panic_blk.notifier_call = rproc_panic_handler;
atomic_notifier_chain_register(&panic_notifier_list, &adsp->panic_blk);
}
@ -1973,6 +2013,7 @@ static const struct adsp_data niobe_adsp_resource = {
.sysmon_name = "adsp",
.qmp_name = "adsp",
.ssctl_id = 0x14,
.smem_host_id = 2,
};
static const struct adsp_data cliffs_adsp_resource = {
@ -2231,6 +2272,7 @@ static const struct adsp_data niobe_cdsp_resource = {
.sysmon_name = "cdsp",
.qmp_name = "cdsp",
.ssctl_id = 0x17,
.smem_host_id = 5,
};
static const struct adsp_data cliffs_cdsp_resource = {

View File

@ -1630,7 +1630,7 @@ static irqreturn_t qcom_glink_native_intr(int irq, void *data)
struct qcom_glink *glink = data;
int ret;
ret = qcom_glink_native_rx(glink, 10);
ret = qcom_glink_native_rx(glink, 15);
return (ret) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/delay.h>
@ -51,6 +51,7 @@ enum {
MSG_SSR_AFTER_POWERUP, /* outbound */
MSG_SSR_SETUP, /* inbound */
MSG_SSR_SETUP_ACK, /* outbound */
MSG_INBUF_RECLAIM, /* inbound */
MSG_MAX,
MSG_ERR = 0xff,
};
@ -79,7 +80,7 @@ struct virtio_glink_bridge_dsp_info {
struct notifier_block nb;
void *notifier_handle;
spinlock_t ssr_lock;
struct mutex ssr_lock;
struct list_head node;
};
@ -121,6 +122,7 @@ static int virtio_glink_bridge_msg_type_supported(u32 msg_type)
switch (msg_type) {
case MSG_SETUP:
case MSG_SSR_SETUP:
case MSG_INBUF_RECLAIM:
return true;
default:
return false;
@ -141,7 +143,7 @@ static int virtio_glink_bridge_send_msg(struct virtio_glink_bridge *vgbridge,
msg->label = cpu_to_virtio32(vdev, label);
sg_init_one(&sg, msg, sizeof(*msg));
rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, msg, GFP_ATOMIC);
rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, msg, GFP_KERNEL);
if (rc) {
dev_err(&vdev->dev, "fail to add input buffer\n");
return rc;
@ -167,7 +169,7 @@ static int virtio_glink_bridge_send_msg_ack(struct virtio_glink_bridge *vgbridge
ack->status = cpu_to_virtio32(vdev, status);
sg_init_one(&sg, ack, sizeof(*ack));
rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, ack, GFP_ATOMIC);
rc = virtqueue_add_inbuf(vgbridge->vq, &sg, 1, ack, GFP_KERNEL);
if (rc) {
dev_err(&vdev->dev, "fail to add input buffer\n");
return rc;
@ -197,7 +199,7 @@ static int virtio_glink_bridge_ssr_cb(struct notifier_block *nb,
dsp_info = container_of(nb, struct virtio_glink_bridge_dsp_info, nb);
spin_lock(&dsp_info->ssr_lock);
mutex_lock(&dsp_info->ssr_lock);
dev = &dsp_info->vgbridge->vdev->dev;
dev_info(dev, "received cb state %ld for %s\n", state, dsp_info->label);
@ -212,7 +214,7 @@ static int virtio_glink_bridge_ssr_cb(struct notifier_block *nb,
default:
break;
}
spin_unlock(&dsp_info->ssr_lock);
mutex_unlock(&dsp_info->ssr_lock);
return NOTIFY_DONE;
}
@ -264,6 +266,9 @@ static void virtio_glink_bridge_rx_work(struct work_struct *work)
goto out;
}
if (msg_type == MSG_INBUF_RECLAIM)
return;
msg_ack_type = virtio_glink_bridge_to_msg_ack_type(msg_type);
dsp_info = virtio_glink_bridge_get_dsp_info(vgbridge, label);
@ -273,7 +278,7 @@ static void virtio_glink_bridge_rx_work(struct work_struct *work)
goto out;
}
spin_lock(&dsp_info->ssr_lock);
mutex_lock(&dsp_info->ssr_lock);
switch (msg_type) {
case MSG_SETUP:
@ -333,7 +338,7 @@ static void virtio_glink_bridge_rx_work(struct work_struct *work)
rc = VIRTIO_GLINK_BRIDGE_SUCCESS;
unlock:
virtio_glink_bridge_send_msg_ack(vgbridge, msg_ack_type, label, rc);
spin_unlock(&dsp_info->ssr_lock);
mutex_unlock(&dsp_info->ssr_lock);
return;
out:
virtio_glink_bridge_send_msg_ack(vgbridge, msg_ack_type, label, rc);
@ -383,7 +388,7 @@ static int virtio_glink_bridge_of_parse(struct virtio_glink_bridge *vgbridge)
goto out;
}
spin_lock_init(&dsp_info->ssr_lock);
mutex_init(&dsp_info->ssr_lock);
dsp_info->np = child_np;
list_add_tail(&dsp_info->node, &vgbridge->dsp_infos);
}

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "qcom-bwmon: " fmt
@ -1702,6 +1702,7 @@ void __stop_bw_hwmon(struct bw_hwmon *hw, enum mon_reg_type type)
bwmon_monitor_stop(hw);
mon_irq_disable(m, type);
synchronize_irq(m->irq);
free_irq(m->irq, m);
mon_disable(m, type);
mon_clear(m, true, type);

View File

@ -80,7 +80,7 @@ struct uhab_context *hab_ctx_alloc(int kernel)
ctx->closing = 0;
INIT_LIST_HEAD(&ctx->vchannels);
INIT_LIST_HEAD(&ctx->exp_whse);
INIT_LIST_HEAD(&ctx->imp_whse);
hab_rb_init(&ctx->imp_whse);
INIT_LIST_HEAD(&ctx->exp_rxq);
init_waitqueue_head(&ctx->exp_wq);
@ -167,8 +167,11 @@ void hab_ctx_free_fn(struct uhab_context *ctx)
write_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
list_del(&exp->node);
for (exp_super = hab_rb_min(&ctx->imp_whse, struct export_desc_super, node);
exp_super != NULL;
exp_super = hab_rb_min(&ctx->imp_whse, struct export_desc_super, node)) {
exp = &exp_super->exp;
hab_rb_remove(&ctx->imp_whse, exp_super);
ctx->import_total--;
pr_debug("leaked imp %d vcid %X for ctx is collected total %d\n",
exp->export_id, exp->vcid_local,

View File

@ -318,7 +318,7 @@ struct uhab_context {
struct list_head exp_rxq;
spinlock_t expq_lock;
struct list_head imp_whse;
HAB_RB_ROOT imp_whse;
spinlock_t imp_lock;
uint32_t import_total;
@ -511,6 +511,8 @@ struct export_desc_super {
enum export_state exp_state;
uint32_t remote_imported;
HAB_RB_ENTRY node;
/*
* exp must be the last member
* because it is a variable length struct with pfns as payload
@ -767,4 +769,8 @@ int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
int hab_stat_buffer_print(char *dest,
int dest_size, const char *fmt, ...);
int hab_create_cdev_node(int mmid_grp_index);
struct export_desc_super *hab_rb_exp_insert(struct rb_root *root, struct export_desc_super *exp_s);
struct export_desc_super *hab_rb_exp_find(struct rb_root *root, struct export_desc_super *key);
#endif /* __HAB_H */

View File

@ -423,6 +423,64 @@ static void reclaim_cleanup(struct work_struct *reclaim_work)
}
}
void hab_rb_init(struct rb_root *root)
{
*root = RB_ROOT;
}
struct export_desc_super *hab_rb_exp_find(struct rb_root *root, struct export_desc_super *key)
{
struct rb_node *node = root->rb_node;
struct export_desc_super *exp_super;
while (node) {
exp_super = rb_entry(node, struct export_desc_super, node);
if (key->exp.export_id < exp_super->exp.export_id)
node = node->rb_left;
else if (key->exp.export_id > exp_super->exp.export_id)
node = node->rb_right;
else {
if (key->exp.pchan < exp_super->exp.pchan)
node = node->rb_left;
else if (key->exp.pchan > exp_super->exp.pchan)
node = node->rb_right;
else
return exp_super;
}
}
return NULL;
}
struct export_desc_super *hab_rb_exp_insert(struct rb_root *root, struct export_desc_super *exp_s)
{
struct rb_node **new = &(root->rb_node), *parent = NULL;
while (*new) {
struct export_desc_super *this = rb_entry(*new, struct export_desc_super, node);
parent = *new;
if (exp_s->exp.export_id < this->exp.export_id)
new = &((*new)->rb_left);
else if (exp_s->exp.export_id > this->exp.export_id)
new = &((*new)->rb_right);
else {
if (exp_s->exp.pchan < this->exp.pchan)
new = &((*new)->rb_left);
else if (exp_s->exp.pchan > this->exp.pchan)
new = &((*new)->rb_right);
else
/* should not found the target key before insert */
return this;
}
}
rb_link_node(&exp_s->node, parent, new);
rb_insert_color(&exp_s->node, root);
return NULL;
}
/* create one more char device for /dev/hab */
#define CDEV_NUM_MAX (MM_ID_MAX / 100 + 1)

View File

@ -470,8 +470,8 @@ int hab_mem_import(struct uhab_context *ctx,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL;
struct export_desc_super *exp_super = NULL;
struct export_desc *export = NULL;
struct export_desc_super *exp_super = NULL, key = {0};
struct virtual_channel *vchan = NULL;
struct hab_header header = HAB_HEADER_INITIALIZER;
struct hab_import_ack expected_ack = {0};
@ -528,59 +528,56 @@ int hab_mem_import(struct uhab_context *ctx,
}
}
key.exp.export_id = param->exportid;
key.exp.pchan = vchan->pchan;
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(exp->pchan == vchan->pchan)) {
exp_super = container_of(exp, struct export_desc_super, exp);
/* not allowed to import one exp desc more than once */
if (exp_super->import_state == EXP_DESC_IMPORTED
|| exp_super->import_state == EXP_DESC_IMPORTING) {
pr_err("vc %x not allowed to import expid %u more than once\n",
vchan->id, exp->export_id);
spin_unlock_bh(&ctx->imp_lock);
ret = -EINVAL;
goto err_imp;
}
/*
* set the flag to avoid another thread getting the exp desc again
* and must be before unlock, otherwise it is no use.
*/
exp_super->import_state = EXP_DESC_IMPORTING;
found = 1;
break;
exp_super = hab_rb_exp_find(&ctx->imp_whse, &key);
if (exp_super) {
/* not allowed to import one exp desc more than once */
if (exp_super->import_state == EXP_DESC_IMPORTED
|| exp_super->import_state == EXP_DESC_IMPORTING) {
export = &exp_super->exp;
pr_err("vc %x not allowed to import one expid %u more than once\n",
vchan->id, export->export_id);
spin_unlock_bh(&ctx->imp_lock);
ret = -EINVAL;
goto err_imp;
}
}
spin_unlock_bh(&ctx->imp_lock);
if (!found) {
pr_err("vc %x fail to get export descriptor from export id %d\n",
vchan->id, param->exportid);
/*
* set the flag to avoid another thread getting the exp desc again
* and must be before unlock, otherwise it is no use.
*/
exp_super->import_state = EXP_DESC_IMPORTING;
found = 1;
} else {
spin_unlock_bh(&ctx->imp_lock);
pr_err("Fail to get export descriptor from export id %d vcid %x\n",
param->exportid, vchan->id);
ret = -ENODEV;
goto err_imp;
}
spin_unlock_bh(&ctx->imp_lock);
if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) {
export = &exp_super->exp;
if ((export->payload_count << PAGE_SHIFT) != param->sizebytes) {
pr_err("vc %x input size %d don't match buffer size %d\n",
vchan->id, param->sizebytes, exp->payload_count << PAGE_SHIFT);
vchan->id, param->sizebytes, export->payload_count << PAGE_SHIFT);
ret = -EINVAL;
exp_super->import_state = EXP_DESC_INIT;
goto err_imp;
}
ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
ret = habmem_imp_hyp_map(ctx->import_ctx, param, export, kernel);
if (ret) {
pr_err("Import fail on vc %x ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
vchan->id, ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload));
vchan->id, ret, export->payload_count,
export->domid_local, *((uint32_t *)export->payload));
exp_super->import_state = EXP_DESC_INIT;
goto err_imp;
}
exp->import_index = param->index;
exp->kva = kernel ? (void *)param->kva : NULL;
export->import_index = param->index;
export->kva = kernel ? (void *)param->kva : NULL;
exp_super->import_state = EXP_DESC_IMPORTED;
err_imp:
@ -590,10 +587,10 @@ int hab_mem_import(struct uhab_context *ctx,
(found == 1) &&
(ret != 0)) {
/* dma_buf create failure, rollback required */
hab_send_unimport_msg(vchan, exp->export_id);
hab_send_unimport_msg(vchan, export->export_id);
spin_lock_bh(&ctx->imp_lock);
list_del(&exp->node);
hab_rb_remove(&ctx->imp_whse, exp_super);
ctx->import_total--;
spin_unlock_bh(&ctx->imp_lock);
@ -610,8 +607,8 @@ int hab_mem_unimport(struct uhab_context *ctx,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *exp_tmp;
struct export_desc_super *exp_super = NULL;
struct export_desc *exp = NULL;
struct export_desc_super *exp_super = NULL, key = {0};
struct virtual_channel *vchan;
if (!ctx || !param)
@ -624,30 +621,27 @@ int hab_mem_unimport(struct uhab_context *ctx,
return -ENODEV;
}
key.exp.export_id = param->exportid;
key.exp.pchan = vchan->pchan;
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
/* same pchan is expected here */
if (exp->export_id == param->exportid &&
exp->pchan == vchan->pchan) {
exp_super = container_of(exp, struct export_desc_super, exp);
/* only successfully imported export desc could be found and released */
if (exp_super->import_state == EXP_DESC_IMPORTED) {
list_del(&exp->node);
ctx->import_total--;
found = 1;
} else
pr_err("vc %x exp id:%u status:%d is found, invalid to unimport\n",
vchan->id, exp->export_id, exp_super->import_state);
break;
}
exp_super = hab_rb_exp_find(&ctx->imp_whse, &key);
if (exp_super) {
/* only successfully imported export desc could be found and released */
if (exp_super->import_state == EXP_DESC_IMPORTED) {
hab_rb_remove(&ctx->imp_whse, exp_super);
ctx->import_total--;
found = 1;
} else
pr_err("vc %x exp id:%u status:%d is found, invalid to unimport\n",
vchan->id, exp_super->exp.export_id, exp_super->import_state);
}
spin_unlock_bh(&ctx->imp_lock);
if (!found)
if (!found) {
ret = -EINVAL;
else {
pr_err("exp id %u unavailable on vc %x\n", param->exportid, vchan->id);
} else {
exp = &exp_super->exp;
ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel);
if (ret) {
pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",

View File

@ -242,17 +242,23 @@ static void hab_msg_queue(struct virtual_channel *vchan,
}
static int hab_export_enqueue(struct virtual_channel *vchan,
struct export_desc *exp)
struct export_desc *export)
{
struct uhab_context *ctx = vchan->ctx;
struct export_desc_super *exp_super = container_of(export, struct export_desc_super, exp);
int irqs_disabled = irqs_disabled();
struct export_desc_super *ret;
hab_spin_lock(&ctx->imp_lock, irqs_disabled);
list_add_tail(&exp->node, &ctx->imp_whse);
ctx->import_total++;
ret = hab_rb_exp_insert(&ctx->imp_whse, exp_super);
if (ret != NULL)
pr_err("expid %u already exists on vc %x, size %d\n",
export->export_id, vchan->id, PAGE_SIZE * export->payload_count);
else
ctx->import_total++;
hab_spin_unlock(&ctx->imp_lock, irqs_disabled);
return 0;
return (ret == NULL) ? 0 : -EINVAL;
}
/*
@ -544,19 +550,22 @@ static int hab_receive_export_desc(struct physical_channel *pchan,
ack_recvd->ack.export_id = exp_desc->export_id;
ack_recvd->ack.vcid_local = exp_desc->vcid_local;
ack_recvd->ack.vcid_remote = exp_desc->vcid_remote;
ack_recvd->ack.imp_whse_added = 1;
}
hab_export_enqueue(vchan, exp_desc);
ret = hab_export_enqueue(vchan, exp_desc);
if (pchan->mem_proto == 1) {
ack_recvd->ack.imp_whse_added = ret ? 0 : 1;
hab_spin_lock(&vchan->ctx->impq_lock, irqs_disabled);
list_add_tail(&ack_recvd->node, &vchan->ctx->imp_rxq);
hab_spin_unlock(&vchan->ctx->impq_lock, irqs_disabled);
} else
hab_send_export_ack(vchan, pchan, exp_desc);
(void)hab_send_export_ack(vchan, pchan, exp_desc);
return 0;
if (ret)
kfree(exp_desc_super);
return ret;
err_imp:
if (pchan->mem_proto == 1) {

View File

@ -40,6 +40,17 @@
#include <linux/delay.h>
#include <linux/version.h>
#include <linux/devcoredump.h>
void hab_rb_init(struct rb_root *root);
#define hab_rb_remove(root, pos) rb_erase(&(pos)->node, root)
#define hab_rb_min(root, type, node) rb_entry_safe(rb_first(root), type, node)
#define hab_rb_max(root, type, node) rb_entry_safe(rb_last(root), type, node)
#define hab_rb_for_each_entry(pos, n, head, member) \
rbtree_postorder_for_each_entry_safe(pos, n, head, member)
#define HAB_RB_ENTRY struct rb_node
#define HAB_RB_ROOT struct rb_root
#if defined(CONFIG_MSM_VHOST_HAB) || defined(CONFIG_MSM_VIRTIO_HAB)
#include <asm/arch_timer.h>
static inline unsigned long long msm_timer_get_sclk_ticks(void)

View File

@ -121,13 +121,14 @@ static int print_ctx_total_expimp(struct uhab_context *ctx,
struct compressed_pfns *pfn_table = NULL;
int exp_total = 0, imp_total = 0;
int exp_cnt = 0, imp_cnt = 0;
struct export_desc *exp = NULL;
struct export_desc *export = NULL;
struct export_desc_super *exp_super, *exp_super_tmp;
int exim_size = 0;
int ret = 0;
read_lock(&ctx->exp_lock);
list_for_each_entry(exp, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)exp->payload;
list_for_each_entry(export, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)export->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
exp_total += exim_size;
exp_cnt++;
@ -135,9 +136,10 @@ static int print_ctx_total_expimp(struct uhab_context *ctx,
read_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) {
pfn_table = (struct compressed_pfns *)exp->payload;
hab_rb_for_each_entry(exp_super, exp_super_tmp, &ctx->imp_whse, node) {
export = &exp_super->exp;
if (habmm_imp_hyp_map_check(ctx->import_ctx, export)) {
pfn_table = (struct compressed_pfns *)export->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
imp_total += exim_size;
imp_cnt++;
@ -146,7 +148,7 @@ static int print_ctx_total_expimp(struct uhab_context *ctx,
spin_unlock_bh(&ctx->imp_lock);
if (exp_cnt || exp_total || imp_cnt || imp_total)
hab_stat_buffer_print(buf, size,
ret = hab_stat_buffer_print(buf, size,
"ctx %d exp %d size %d imp %d size %d\n",
ctx->owner, exp_cnt, exp_total,
imp_cnt, imp_total);
@ -154,26 +156,27 @@ static int print_ctx_total_expimp(struct uhab_context *ctx,
return 0;
read_lock(&ctx->exp_lock);
hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)exp->payload;
ret = hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: ");
list_for_each_entry(export, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)export->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
ret = hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", export->export_id,
export->vcid_local, exim_size);
}
hab_stat_buffer_print(buf, size, "\n");
ret = hab_stat_buffer_print(buf, size, "\n");
read_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->imp_whse, node) {
if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) {
pfn_table = (struct compressed_pfns *)exp->payload;
ret = hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: ");
hab_rb_for_each_entry(exp_super, exp_super_tmp, &ctx->imp_whse, node) {
export = &exp_super->exp;
if (habmm_imp_hyp_map_check(ctx->import_ctx, export)) {
pfn_table = (struct compressed_pfns *)export->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
ret = hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", export->export_id,
export->vcid_local, exim_size);
}
}
ret = hab_stat_buffer_print(buf, size, "\n");

View File

@ -21,6 +21,9 @@
#define HAB_VIRTIO_DEVICE_ID_DISPLAY 93
#define HAB_VIRTIO_DEVICE_ID_GRAPHICS 94
#define HAB_VIRTIO_DEVICE_ID_VIDEO 95
#define HAB_VIRTIO_DEVICE_ID_VNW 96
#define HAB_VIRTIO_DEVICE_ID_EXT 97
#define HAB_VIRTIO_DEVICE_ID_GPCE 98
/* all probed virtio_hab stored in this list */
static struct list_head vhab_list = LIST_HEAD_INIT(vhab_list);
@ -39,6 +42,9 @@ static struct virtio_device_tbl {
{ MM_DISP_1, HAB_VIRTIO_DEVICE_ID_DISPLAY, NULL },
{ MM_GFX, HAB_VIRTIO_DEVICE_ID_GRAPHICS, NULL },
{ MM_VID, HAB_VIRTIO_DEVICE_ID_VIDEO, NULL },
{ MM_VNW_1, HAB_VIRTIO_DEVICE_ID_VNW, NULL },
{ MM_EXT_1, HAB_VIRTIO_DEVICE_ID_EXT, NULL },
{ MM_GPCE_1, HAB_VIRTIO_DEVICE_ID_GPCE, NULL },
};
enum pool_type_t {
@ -743,6 +749,18 @@ static int virthab_probe(struct virtio_device *vdev)
mmid_start = MM_VID;
mmid_range = MM_VID_END - MM_VID_START - 1;
virthab_store_vdev(MM_VID, vdev);
} else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_VNW) {
mmid_start = MM_VNW_1;
mmid_range = MM_VNW_END - MM_VNW_START - 1;
virthab_store_vdev(MM_VNW_1, vdev);
} else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_EXT) {
mmid_start = MM_EXT_1;
mmid_range = MM_EXT_END - MM_EXT_START - 1;
virthab_store_vdev(MM_EXT_1, vdev);
} else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_GPCE) {
mmid_start = MM_GPCE_1;
mmid_range = MM_GPCE_END - MM_GPCE_START - 1;
virthab_store_vdev(MM_GPCE_1, vdev);
} else {
pr_err("unknown virtio device is detected %d\n",
vdev->id.device);
@ -878,6 +896,9 @@ static struct virtio_device_id id_table[] = {
{ HAB_VIRTIO_DEVICE_ID_DISPLAY, VIRTIO_DEV_ANY_ID }, /* virtio display */
{ HAB_VIRTIO_DEVICE_ID_GRAPHICS, VIRTIO_DEV_ANY_ID }, /* virtio graphics */
{ HAB_VIRTIO_DEVICE_ID_VIDEO, VIRTIO_DEV_ANY_ID }, /* virtio video */
{ HAB_VIRTIO_DEVICE_ID_VNW, VIRTIO_DEV_ANY_ID }, /* virtio vnw */
{ HAB_VIRTIO_DEVICE_ID_EXT, VIRTIO_DEV_ANY_ID }, /* virtio external */
{ HAB_VIRTIO_DEVICE_ID_GPCE, VIRTIO_DEV_ANY_ID }, /* virtio gpce */
{ 0 },
};

View File

@ -2351,10 +2351,6 @@ static int hgsl_ioctl_mem_alloc(struct file *filep, unsigned long arg)
goto out;
}
/* let the back end aware that this is HGSL allocation */
params.flags &= ~GSL_MEMFLAGS_USERMEM_MASK;
params.flags |= GSL_MEMFLAGS_USERMEM_HGSL_ALLOC;
mem_node->flags = params.flags;
ret = hgsl_sharedmem_alloc(hgsl->dev, params.sizebytes, params.flags, mem_node);
@ -2544,7 +2540,6 @@ static int hgsl_ioctl_mem_map_smmu(struct file *filep, unsigned long arg)
}
params.size = PAGE_ALIGN(params.size);
params.flags &= ~GSL_MEMFLAGS_USERMEM_MASK;
mem_node->flags = params.flags;
mem_node->fd = params.fd;
mem_node->memtype = params.memtype;

View File

@ -69,14 +69,6 @@
#define GSL_MEMFLAGS_GPUIOCOHERENT 0x80000000
#define GSL_MEMFLAGS_CACHEMODE_MASK 0x0C000000
/* external or internal buffer */
#define GSL_MEMFLAGS_USERMEM_HGSL_ALLOC 0x00000020
#define GSL_MEMFLAGS_USERMEM_ASHMEM 0x00000040
#define GSL_MEMFLAGS_USERMEM_ADDR 0x00000060
#define GSL_MEMFLAGS_USERMEM_ION 0x00000080
#define GSL_MEMFLAGS_USERMEM_SHIFT 5
#define GSL_MEMFLAGS_USERMEM_MASK 0x000000e0
/****************************************************************************/
/* cache flags */
/****************************************************************************/

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2023,2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/memory.h>
@ -62,11 +62,14 @@ static bool is_rpm_controller;
static DECLARE_BITMAP(movable_bitmap, 1024);
static bool has_pend_offline_req;
static struct workqueue_struct *migrate_wq;
static struct timer_list mem_offline_timeout_timer;
static struct task_struct *offline_trig_task;
#define MODULE_CLASS_NAME "mem-offline"
#define MEMBLOCK_NAME "memory%lu"
#define SEGMENT_NAME "segment%lu"
#define BUF_LEN 100
#define MIGRATE_TIMEOUT_SEC 20
#define OFFLINE_TIMEOUT_SEC 7
struct section_stat {
unsigned long success_count;
@ -477,7 +480,7 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr)
{
unsigned long block_sz = memory_block_size_bytes();
unsigned long pages_per_blk = block_sz / PAGE_SIZE;
unsigned long tot_free_pages = 0, pfn, end_pfn, flags;
unsigned long tot_free_pages = 0, pfn, end_pfn;
unsigned long used;
struct zone *movable_zone = &NODE_DATA(numa_node_id())->node_zones[ZONE_MOVABLE];
struct page *page;
@ -491,7 +494,6 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr)
if (!zone_intersects(movable_zone, pfn, pages_per_blk))
return 0;
spin_lock_irqsave(&movable_zone->lock, flags);
while (pfn < end_pfn) {
if (!pfn_valid(pfn) || !PageBuddy(pfn_to_page(pfn))) {
pfn++;
@ -501,13 +503,18 @@ static unsigned long get_section_allocated_memory(unsigned long sec_nr)
tot_free_pages += 1 << page_private(page);
pfn += 1 << page_private(page);
}
spin_unlock_irqrestore(&movable_zone->lock, flags);
used = block_sz - (tot_free_pages * PAGE_SIZE);
return used;
}
static void mem_offline_timeout_cb(struct timer_list *timer)
{
pr_info("mem-offline: SIGALRM is raised to stop the offline operation\n");
send_sig_info(SIGALRM, SEND_SIG_PRIV, offline_trig_task);
}
static int mem_event_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
@ -572,6 +579,8 @@ static int mem_event_callback(struct notifier_block *self,
idx) / sections_per_block].fail_count;
has_pend_offline_req = true;
cancel_work_sync(&fill_movable_zone_work);
offline_trig_task = current;
mod_timer(&mem_offline_timeout_timer, jiffies + (OFFLINE_TIMEOUT_SEC * HZ));
cur = ktime_get();
break;
case MEM_OFFLINE:
@ -592,6 +601,14 @@ static int mem_event_callback(struct notifier_block *self,
pr_debug("mem-offline: Segment %d memblk_bitmap 0x%lx\n",
seg_idx, segment_infos[seg_idx].bitmask_kernel_blk);
totalram_pages_add(memory_block_size_bytes()/PAGE_SIZE);
del_timer_sync(&mem_offline_timeout_timer);
offline_trig_task = NULL;
break;
case MEM_CANCEL_OFFLINE:
pr_debug("mem-offline: MEM_CANCEL_OFFLINE : start = 0x%llx end = 0x%llx\n",
start_addr, end_addr);
del_timer_sync(&mem_offline_timeout_timer);
offline_trig_task = NULL;
break;
case MEM_CANCEL_ONLINE:
pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%llx end = 0x%llx\n",
@ -1832,12 +1849,14 @@ static struct platform_driver mem_offline_driver = {
static int __init mem_module_init(void)
{
timer_setup(&mem_offline_timeout_timer, mem_offline_timeout_cb, 0);
return platform_driver_register(&mem_offline_driver);
}
subsys_initcall(mem_module_init);
static void __exit mem_module_exit(void)
{
del_timer_sync(&mem_offline_timeout_timer);
platform_driver_unregister(&mem_offline_driver);
}
module_exit(mem_module_exit);

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/cache.h>
@ -1153,7 +1153,7 @@ static int md_panic_handler(struct notifier_block *this,
static struct notifier_block md_panic_blk = {
.notifier_call = md_panic_handler,
.priority = INT_MAX - 2, /* < msm watchdog panic notifier */
.priority = INT_MAX - 3, /* < msm watchdog panic notifier */
};
static int md_register_minidump_entry(char *name, u64 virt_addr,

View File

@ -85,6 +85,10 @@ static unsigned short root_swap_dev;
static struct work_struct save_params_work;
static struct completion write_done;
static unsigned char iv[IV_SIZE];
static uint8_t *compressed_blk_array;
static int blk_array_pos;
static unsigned long nr_pages;
static void *auth_slot;
static void init_sg(struct scatterlist *sg, void *data, unsigned int size)
{
@ -301,6 +305,26 @@ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb);
}
/*
* Number of pages compressed at one time. This is inline with UNC_PAGES
* in kernel/power/swap.c.
*/
#define UNCMP_PAGES 32
static uint32_t get_size_of_compression_block_array(unsigned long pages)
{
/*
* Get the max index based on total no. of pages. Current compression
* algorithm compresses each UNC_PAGES pages to x pages. Use this logic to
* get the max index.
*/
uint32_t max_index = DIV_ROUND_UP(pages, UNCMP_PAGES);
uint32_t size = ALIGN((max_index * sizeof(*compressed_blk_array)), PAGE_SIZE);
return size;
}
static void save_auth_and_params_to_disk(struct work_struct *work)
{
int cur_slot;
@ -309,7 +333,7 @@ static void save_auth_and_params_to_disk(struct work_struct *work)
int authslot_count = 0;
int authpage_count = read_authpage_count();
struct hib_bio_batch hb;
int err2;
int err2, i = 0;
hib_init_batch(&hb);
@ -318,6 +342,27 @@ static void save_auth_and_params_to_disk(struct work_struct *work)
*/
params_slot = alloc_swapdev_block(root_swap_dev);
if (auth_slot) {
*(int *)auth_slot = params_slot + 1;
/* Currently bootloader code does the following to
* calculate the authentication slot index.
* authslot = NrMetaPages + NrCopyPages + NrSwapMapPages +
* HDR_SWP_INFO_NUM_PAGES;
*
* However, with compression enabled, we cannot apply the
* above logic to get the authentication slot. So this
* data should be provided to the BL for decryption to work.
*
* In the current implementation, BL doesn't make use of
* the swap_map_pages for restoring the hibernation image. So these pages
* could be used for other purposes. Use this to store the
* authentication slot number. This data will be stored at index as
* that of the first swap_map_page.
*/
write_page(auth_slot, 1, &hb);
}
authpage = authslot_start;
while (authslot_count < authpage_count) {
cur_slot = alloc_swapdev_block(root_swap_dev);
@ -327,6 +372,19 @@ static void save_auth_and_params_to_disk(struct work_struct *work)
}
params->authslot_count = authslot_count;
write_page(params, params_slot, &hb);
/*
* Write the array holding the compressed block count to disk
*/
if (compressed_blk_array) {
uint32_t size = get_size_of_compression_block_array(nr_pages);
for (i = 0; i < size / PAGE_SIZE; i++) {
cur_slot = alloc_swapdev_block(root_swap_dev);
write_page(compressed_blk_array + (i * PAGE_SIZE), cur_slot, &hb);
}
}
err2 = hib_wait_io(&hb);
hib_finish_batch(&hb);
complete_all(&write_done);
@ -457,6 +515,20 @@ void deinit_aes_encrypt(void)
kfree(params);
}
static void cleanup_cmp_blk_array(void)
{
blk_array_pos = 0;
if (compressed_blk_array) {
kvfree((void *)compressed_blk_array);
compressed_blk_array = NULL;
}
if (auth_slot) {
free_page((unsigned long)auth_slot);
auth_slot = NULL;
}
}
static int hibernate_pm_notifier(struct notifier_block *nb,
unsigned long event, void *unused)
{
@ -492,6 +564,7 @@ static int hibernate_pm_notifier(struct notifier_block *nb,
case (PM_POST_HIBERNATION):
deinit_aes_encrypt();
cleanup_cmp_blk_array();
break;
default:
@ -543,6 +616,47 @@ static void init_aes_encrypt(void *data, void *unused)
kfree(params);
}
/*
* Bit(part of swsusp_header_flags) to indicate if the image is uncompressed
* or not. This is inline with SF_NOCOMPRESS_MODE defined in
* kernel/power/power.h.
*/
#define SF_NOCOMPRESS_MODE 2
static void hibernated_do_mem_alloc(void *data, unsigned long pages,
unsigned int swsusp_header_flags, int *ret)
{
uint32_t size;
/* total no. of pages in the snapshot image */
nr_pages = pages;
if (!(swsusp_header_flags & SF_NOCOMPRESS_MODE)) {
size = get_size_of_compression_block_array(pages);
compressed_blk_array = kvzalloc(size, GFP_KERNEL);
if (!compressed_blk_array) {
*ret = -ENOMEM;
return;
}
/* Allocate memory to hold authentication slot start */
auth_slot = (void *)get_zeroed_page(GFP_KERNEL);
if (!auth_slot) {
pr_err("Failed to allocate page for storing authentication tag slot number\n");
*ret = -ENOMEM;
}
}
}
static void hibernate_save_cmp_len(void *data, size_t cmp_len)
{
uint8_t pages;
pages = DIV_ROUND_UP(cmp_len, PAGE_SIZE);
compressed_blk_array[blk_array_pos++] = pages;
}
static int __init qcom_secure_hibernattion_init(void)
{
int ret;
@ -551,6 +665,8 @@ static int __init qcom_secure_hibernattion_init(void)
register_trace_android_vh_init_aes_encrypt(init_aes_encrypt, NULL);
register_trace_android_vh_skip_swap_map_write(skip_swap_map_write, NULL);
register_trace_android_vh_post_image_save(save_params_to_disk, NULL);
register_trace_android_vh_hibernate_save_cmp_len(hibernate_save_cmp_len, NULL);
register_trace_android_vh_hibernated_do_mem_alloc(hibernated_do_mem_alloc, NULL);
ret = register_pm_notifier(&pm_nb);
if (ret) {

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2022,2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define pr_fmt(fmt) "va-minidump: %s: " fmt, __func__
@ -690,12 +690,12 @@ static int qcom_va_md_elf_panic_handler(struct notifier_block *this,
static struct notifier_block qcom_va_md_panic_blk = {
.notifier_call = qcom_va_md_panic_handler,
.priority = INT_MAX - 3,
.priority = INT_MAX - 4,
};
static struct notifier_block qcom_va_md_elf_panic_blk = {
.notifier_call = qcom_va_md_elf_panic_handler,
.priority = INT_MAX - 4,
.priority = INT_MAX - 5,
};
static int qcom_va_md_reserve_mem(struct device *dev)

View File

@ -1495,7 +1495,7 @@ const struct device *rpmh_rsc_get_device(const char *name, u32 drv_id)
struct rsc_drv_top *rsc_top = rpmh_rsc_get_top_device(name);
int i;
if (IS_ERR(rsc_top))
if (IS_ERR(rsc_top) || strcmp(name, "cam_rsc"))
return ERR_PTR(-ENODEV);
for (i = 0; i < rsc_top->drv_count; i++) {
@ -1748,6 +1748,10 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv[i].regs[DRV_SOLVER_CONFIG]);
solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
solver_config = solver_config >> DRV_HW_SOLVER_SHIFT;
spin_lock_init(&drv[i].lock);
spin_lock_init(&drv[i].client.cache_lock);
if (of_find_property(dn, "power-domains", NULL)) {
ret = rpmh_rsc_pd_attach(&drv[i]);
if (ret)
@ -1772,7 +1776,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv[i].regs = rpmh_rsc_reg_offsets_ver_3_0_hw_channel;
}
spin_lock_init(&drv[i].lock);
init_waitqueue_head(&drv[i].tcs_wait);
bitmap_zero(drv[i].tcs_in_use, MAX_TCS_NR);
drv[i].client.non_batch_cache = devm_kcalloc(&pdev->dev, CMD_DB_MAX_RESOURCES,
@ -1795,8 +1798,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
if (ret)
return ret;
spin_lock_init(&drv[i].client.cache_lock);
drv[i].ipc_log_ctx = ipc_log_context_create(
RSC_DRV_IPC_LOG_SIZE,
drv[i].name, 0);

View File

@ -12,6 +12,7 @@
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/soc/qcom/qcom_hwspinlock.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/soc/qcom/smem.h>
@ -370,6 +371,32 @@ static struct qcom_smem *__smem;
/* Timeout (ms) for the trylock of remote spinlocks */
#define HWSPINLOCK_TIMEOUT 1000
/* The qcom hwspinlock id is always plus one from the smem host id */
#define SMEM_HOST_ID_TO_HWSPINLOCK_ID(__x) ((__x) + 1)
/**
* qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
* @host: remote processor id
*
* Busts the hwspin_lock for the given smem host id. This helper is intended
* for remoteproc drivers that manage remoteprocs with an equivalent smem
* driver instance in the remote firmware. Drivers can force a release of the
* smem hwspin_lock if the rproc unexpectedly goes into a bad state.
*
* Context: Process context.
*
* Returns: 0 on success, otherwise negative errno.
*/
int qcom_smem_bust_hwspin_lock_by_host(unsigned int host)
{
/* This function is for remote procs, so ignore SMEM_HOST_APPS */
if (host == SMEM_HOST_APPS || host >= SMEM_HOST_COUNT)
return -EINVAL;
return qcom_hwspinlock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host));
}
EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host);
static int qcom_smem_alloc_private(struct qcom_smem *smem,
struct smem_partition *part,
unsigned item,

View File

@ -578,6 +578,7 @@ static const struct soc_id soc_id[] = {
{ 565, "BLAIRP" },
{ 629, "NIOBE" },
{ 652, "NIOBE" },
{ 672, "SERAPH" },
{ 577, "PINEAPPLEP" },
{ 578, "BLAIR-LITE" },
{ 605, "SA_MONACOAU_ADAS" },
@ -593,6 +594,7 @@ static const struct soc_id soc_id[] = {
{ 642, "CLIFFSP" },
{ 643, "CLIFFS7P" },
{ 549, "ANORAK" },
{ 554, "NEO-LA" },
};
static struct attribute *msm_custom_socinfo_attrs[MAX_SOCINFO_ATTRS];

View File

@ -191,8 +191,13 @@ static int add_delta_time(
ptr = g_sysmon_stats.sysmon_power_stats_cdsp;
} else if (dsp_id == SLPI) {
ptr = g_sysmon_stats.sysmon_power_stats_slpi;
} else {
return -EINVAL;
}
if (ptr == NULL)
return -EINVAL;
if (ver >= 2) {
powerstats_ticks = (u64)(((u64)ptr->last_update_time_powerstats_msb << 32) |
ptr->last_update_time_powerstats_lsb);

View File

@ -106,6 +106,41 @@ static void q2spi_parse_cr_header(struct q2spi_geni *q2spi, struct msm_gpi_cb co
q2spi_doorbell(q2spi, &cb->q2spi_cr_header_event);
}
/*
* q2spi_check_m_irq_err_status() - this function checks m_irq error status and
* also if start sequence error seen it will set is_start_seq_fail flag as true.
*
* @q2spi: q2spi master device handle
* @cb_status: irq status fields
*
* Return: None
*/
static void q2spi_check_m_irq_err_status(struct q2spi_geni *q2spi, u32 cb_status)
{
/* bit 5 to 12 represents gp irq status */
u32 status = (cb_status & M_GP_IRQ_MASK) >> M_GP_IRQ_ERR_START_BIT;
if (status & Q2SPI_PWR_ON_NACK)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_PWR_ON_NACK\n", __func__);
if (status & Q2SPI_HDR_FAIL)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_HDR_FAIL\n", __func__);
if (status & Q2SPI_HCR_FAIL)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_HCR_FAIL\n", __func__);
if (status & Q2SPI_CHECKSUM_FAIL)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_CHEKSUM_FAIL\n", __func__);
if (status & Q2SPI_START_SEQ_TIMEOUT) {
q2spi->is_start_seq_fail = true;
complete_all(&q2spi->wait_comp_start_fail);
Q2SPI_DEBUG(q2spi, "%s Q2SPI_START_SEQ_TIMEOUT\n", __func__);
}
if (status & Q2SPI_STOP_SEQ_TIMEOUT)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_STOP_SEQ_TIMEOUT\n", __func__);
if (status & Q2SPI_WAIT_PHASE_TIMEOUT)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_WAIT_PHASE_TIMEOUT\n", __func__);
if (status & Q2SPI_CLIENT_EN_NOT_DETECTED)
Q2SPI_DEBUG(q2spi, "%s Q2SPI_CLIENT_EN_NOT_DETECTED\n", __func__);
}
static void q2spi_gsi_tx_callback(void *cb)
{
struct msm_gpi_dma_async_tx_cb_param *cb_param = NULL;
@ -131,6 +166,9 @@ static void q2spi_gsi_tx_callback(void *cb)
if (cb_param->completion_code == MSM_GPI_TCE_UNEXP_ERR) {
Q2SPI_DEBUG(q2spi, "%s Unexpected GSI CB completion code CB status:0x%x\n",
__func__, cb_param->status);
q2spi->gsi->qup_gsi_err = true;
q2spi_check_m_irq_err_status(q2spi, cb_param->status);
complete_all(&q2spi->tx_cb);
return;
} else if (cb_param->completion_code == MSM_GPI_TCE_EOT) {
Q2SPI_DEBUG(q2spi, "%s MSM_GPI_TCE_EOT\n", __func__);
@ -458,41 +496,46 @@ int check_gsi_transfer_completion_db_rx(struct q2spi_geni *q2spi)
int check_gsi_transfer_completion(struct q2spi_geni *q2spi)
{
int i = 0, ret = 0;
unsigned long timeout = 0, xfer_timeout = 0;
unsigned long timeleft = 0, xfer_timeout = 0;
xfer_timeout = XFER_TIMEOUT_OFFSET;
Q2SPI_DEBUG(q2spi, "%s tx_eot:%d rx_eot:%d\n", __func__,
q2spi->gsi->num_tx_eot, q2spi->gsi->num_rx_eot);
for (i = 0 ; i < q2spi->gsi->num_tx_eot; i++) {
timeout =
timeleft =
wait_for_completion_timeout(&q2spi->tx_cb, msecs_to_jiffies(xfer_timeout));
if (!timeout) {
if (!timeleft) {
Q2SPI_DEBUG(q2spi, "%s PID:%d Tx[%d] timeout\n", __func__, current->pid, i);
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else {
} else if (!q2spi->gsi->qup_gsi_err) {
Q2SPI_DEBUG(q2spi, "%s tx completed\n", __func__);
}
}
for (i = 0 ; i < q2spi->gsi->num_rx_eot; i++) {
timeout =
timeleft =
wait_for_completion_timeout(&q2spi->rx_cb, msecs_to_jiffies(xfer_timeout));
if (!timeout) {
if (!timeleft) {
Q2SPI_DEBUG(q2spi, "%s PID:%d Rx[%d] timeout\n", __func__, current->pid, i);
ret = -ETIMEDOUT;
goto err_gsi_geni_transfer;
} else {
} else if (!q2spi->gsi->qup_gsi_err) {
Q2SPI_DEBUG(q2spi, "%s rx completed\n", __func__);
}
}
err_gsi_geni_transfer:
if (q2spi->gsi->qup_gsi_err || !timeout) {
if (q2spi->gsi->qup_gsi_err || !timeleft) {
ret = -ETIMEDOUT;
Q2SPI_DEBUG(q2spi, "%s Err QUP Gsi Error\n", __func__);
q2spi->gsi->qup_gsi_err = false;
q2spi->setup_config0 = false;
gpi_q2spi_terminate_all(q2spi->gsi->tx_c);
/* Block on TX completion callback for start sequence failure */
wait_for_completion_interruptible_timeout
(&q2spi->wait_comp_start_fail,
msecs_to_jiffies(TIMEOUT_MSECONDS));
if (!q2spi->is_start_seq_fail)
gpi_q2spi_terminate_all(q2spi->gsi->tx_c);
}
return ret;
}
@ -708,6 +751,7 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *
case MSM_GPI_QUP_CR_HEADER:
/* Update last access time of a device for autosuspend */
pm_runtime_mark_last_busy(q2spi->dev);
q2spi->gsi->qup_gsi_err = false;
q2spi_cr_hdr_event = &cb->q2spi_cr_header_event;
num_crs = q2spi_cr_hdr_event->byte0_len;
if (q2spi_cr_hdr_event->code == Q2SPI_CR_HEADER_LEN_ZERO ||
@ -744,6 +788,11 @@ void q2spi_gsi_ch_ev_cb(struct dma_chan *ch, struct msm_gpi_cb const *cb, void *
if (cb->cb_event == MSM_GPI_QUP_ERROR)
q2spi->gsi->qup_gsi_global_err = true;
if (cb->cb_event == MSM_GPI_QUP_FW_ERROR) {
q2spi_geni_se_dump_regs(q2spi);
gpi_dump_for_geni(q2spi->gsi->tx_c);
}
if (q2spi->gsi->qup_gsi_err)
Q2SPI_DEBUG(q2spi, "%s set qup_gsi_err\n", __func__);
}

View File

@ -1255,11 +1255,12 @@ q2spi_get_dw_offset(struct q2spi_geni *q2spi, enum cmd_type c_type, unsigned int
return offset;
}
int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_ptr,
struct q2spi_packet **q2spi_pkt_ptr, int vtype)
{
struct q2spi_packet *q2spi_pkt;
struct q2spi_host_variant1_pkt *q2spi_hc_var1;
struct q2spi_request q2spi_req = *q2spi_req_ptr;
int ret;
unsigned int dw_offset = 0;
@ -1288,7 +1289,7 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
sizeof(q2spi_hc_var1->data_buf) : q2spi_req.data_len;
memcpy(q2spi_hc_var1->data_buf, q2spi_req.data_buff, q2spi_req.data_len);
q2spi_kfree(q2spi, q2spi_req.data_buff, __LINE__);
q2spi_req.data_buff = NULL;
q2spi_req_ptr->data_buff = NULL;
}
q2spi_hc_var1->flow = MC_FLOW;
q2spi_hc_var1->interrupt = CLIENT_INTERRUPT;
@ -1324,10 +1325,11 @@ int q2spi_frame_lra(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
return q2spi_hc_var1->flow_id;
}
int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_ptr,
struct q2spi_packet *q2spi_pkt)
{
struct q2spi_host_variant4_5_pkt *q2spi_hc_var5;
struct q2spi_request q2spi_req = *q2spi_req_ptr;
int ret = 0, flow_id;
if (!q2spi) {
@ -1386,7 +1388,7 @@ int q2spi_sma_format(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
q2spi_dump_ipc(q2spi, "sma format var5 data_buf",
(char *)q2spi_hc_var5->data_buf, q2spi_req.data_len);
q2spi_kfree(q2spi, q2spi_req.data_buff, __LINE__);
q2spi_req.data_buff = NULL;
q2spi_req_ptr->data_buff = NULL;
}
if (q2spi_req.flow_id < Q2SPI_END_TID_ID)
q2spi_hc_var5->flow = MC_FLOW;
@ -1528,7 +1530,7 @@ int q2spi_hrf_sleep(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
Q2SPI_DEBUG(q2spi, "%s hrf_req cmd:%d flow_id:%d data_buff:%p\n",
__func__, q2spi_hrf_req->cmd, q2spi_hrf_req->flow_id, q2spi_hrf_req->data_buff);
ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt, VARIANT_1_LRA);
ret = q2spi_frame_lra(q2spi, q2spi_hrf_req, &q2spi_pkt, VARIANT_1_LRA);
Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n",
__func__, q2spi_hrf_req, q2spi_pkt);
if (ret < 0) {
@ -1562,7 +1564,7 @@ int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
Q2SPI_DEBUG(q2spi, "%s addr:0x%x proto:0x%x data_len:0x%x\n",
__func__, q2spi_req.addr, q2spi_req.proto_ind, q2spi_req.data_len);
ret = q2spi_frame_lra(q2spi, *q2spi_hrf_req, &q2spi_pkt, VARIANT_1_HRF);
ret = q2spi_frame_lra(q2spi, q2spi_hrf_req, &q2spi_pkt, VARIANT_1_HRF);
Q2SPI_DEBUG(q2spi, "%s q2spi_hrf_req:%p q2spi_pkt:%p\n",
__func__, q2spi_hrf_req, q2spi_pkt);
if (ret < 0) {
@ -1571,7 +1573,7 @@ int q2spi_hrf_flow(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
}
q2spi_pkt->flow_id = ret;
ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt);
ret = q2spi_sma_format(q2spi, &q2spi_req, q2spi_pkt);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_sma_format failed ret:%d\n", __func__, ret);
q2spi_unmap_var_bufs(q2spi, q2spi_pkt);
@ -1658,6 +1660,7 @@ bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet *
/*
* q2spi_add_req_to_tx_queue - Add q2spi packets to tx_queue_list
* @q2spi: pointer to q2spi_geni
* @q2spi_req_ptr: pointer to q2spi_request
* @q2spi_pkt_ptr: ponter to q2spi_packet
*
* This function frames the Q2SPI host request based on request type
@ -1665,10 +1668,11 @@ bool q2spi_del_pkt_from_tx_queue(struct q2spi_geni *q2spi, struct q2spi_packet *
*
* Return: 0 on success. Error code on failure.
*/
int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2spi_req,
int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request *q2spi_req_ptr,
struct q2spi_packet **q2spi_pkt_ptr)
{
struct q2spi_packet *q2spi_pkt = NULL;
struct q2spi_request q2spi_req = *q2spi_req_ptr;
int ret = -EINVAL;
if (q2spi->port_release) {
@ -1679,7 +1683,7 @@ int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2s
q2spi_tx_queue_status(q2spi);
q2spi_print_req_cmd(q2spi, q2spi_req);
if (q2spi_req.cmd == LOCAL_REG_READ || q2spi_req.cmd == LOCAL_REG_WRITE) {
ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
ret = q2spi_frame_lra(q2spi, q2spi_req_ptr, &q2spi_pkt, VARIANT_1_LRA);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n",
__func__, ret);
@ -1690,7 +1694,7 @@ int q2spi_add_req_to_tx_queue(struct q2spi_geni *q2spi, struct q2spi_request q2s
q2spi_pkt = q2spi_alloc_q2spi_pkt(q2spi, __LINE__);
if (!q2spi_pkt)
return -ENOMEM;
ret = q2spi_sma_format(q2spi, q2spi_req, q2spi_pkt);
ret = q2spi_sma_format(q2spi, q2spi_req_ptr, q2spi_pkt);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_sma_format failed ret:%d\n",
__func__, ret);
@ -1977,15 +1981,18 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re
return ret;
} else if (ret == -ETIMEDOUT) {
/* Upon transfer failure's retry here */
Q2SPI_DEBUG(q2spi, "%s ret:%d retry_count:%d retrying cur_q2spi_pkt:%p\n",
__func__, ret, i + 1, cur_q2spi_pkt);
Q2SPI_DEBUG(q2spi, "%s ret:%d retry_count:%d q2spi_pkt:%p db_pending:%d\n",
__func__, ret, i + 1, cur_q2spi_pkt,
atomic_read(&q2spi->doorbell_pending));
if (q2spi->gsi->qup_gsi_global_err) {
Q2SPI_DEBUG(q2spi, "%s GSI global error, No retry\n", __func__);
ret = -EIO;
goto transfer_exit;
}
if (i == 0) {
if (i == 0 && !atomic_read(&q2spi->doorbell_pending) &&
q2spi->is_start_seq_fail) {
q2spi->is_start_seq_fail = false;
ret = q2spi_wakeup_hw_from_sleep(q2spi);
if (ret) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_wakeup_hw_from_sleep\n",
@ -2026,7 +2033,7 @@ static int q2spi_transfer_with_retries(struct q2spi_geni *q2spi, struct q2spi_re
q2spi_req.data_buff = data_buf;
}
mutex_lock(&q2spi->queue_lock);
flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &cur_q2spi_pkt);
flow_id = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &cur_q2spi_pkt);
mutex_unlock(&q2spi->queue_lock);
if (flow_id < 0) {
q2spi_kfree(q2spi, data_buf, __LINE__);
@ -2071,7 +2078,7 @@ void q2spi_transfer_abort(struct q2spi_geni *q2spi)
abort_request.cmd = ABORT;
abort_request.sync = 1;
mutex_lock(&q2spi->queue_lock);
ret = q2spi_add_req_to_tx_queue(q2spi, abort_request, &cur_q2spi_abort_pkt);
ret = q2spi_add_req_to_tx_queue(q2spi, &abort_request, &cur_q2spi_abort_pkt);
mutex_unlock(&q2spi->queue_lock);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_add_req_to_tx_queue ret:%d\n", __func__, ret);
@ -2104,7 +2111,7 @@ void q2spi_transfer_soft_reset(struct q2spi_geni *q2spi)
soft_reset_request.cmd = SOFT_RESET;
soft_reset_request.sync = 1;
mutex_lock(&q2spi->queue_lock);
ret = q2spi_add_req_to_tx_queue(q2spi, soft_reset_request, &cur_q2spi_sr_pkt);
ret = q2spi_add_req_to_tx_queue(q2spi, &soft_reset_request, &cur_q2spi_sr_pkt);
mutex_unlock(&q2spi->queue_lock);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_add_req_to_tx_queue ret:%d\n", __func__, ret);
@ -2211,7 +2218,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t
return -EINVAL;
}
q2spi = filp->private_data;
Q2SPI_DEBUG(q2spi, "%s Enter PID=%d\n", __func__, current->pid);
Q2SPI_DEBUG(q2spi, "In %s Enter PID=%d\n", __func__, current->pid);
mutex_lock(&q2spi->port_lock);
ret = q2spi_transfer_check(q2spi, &q2spi_req, buf, len);
@ -2260,12 +2267,15 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t
pm_runtime_set_suspended(q2spi->dev);
goto err;
}
q2spi->is_start_seq_fail = false;
reinit_completion(&q2spi->wait_comp_start_fail);
Q2SPI_DEBUG(q2spi, "%s PM after get_sync count:%d\n", __func__,
atomic_read(&q2spi->dev->power.usage_count));
q2spi_wait_for_doorbell_setup_ready(q2spi);
mutex_lock(&q2spi->queue_lock);
reinit_completion(&q2spi->sma_wr_comp);
flow_id = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &cur_q2spi_pkt);
flow_id = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &cur_q2spi_pkt);
mutex_unlock(&q2spi->queue_lock);
if (flow_id < 0) {
if (q2spi_req.data_buff)
@ -2278,6 +2288,7 @@ static ssize_t q2spi_transfer(struct file *filp, const char __user *buf, size_t
ret = -ENOMEM;
goto err;
}
Q2SPI_DEBUG(q2spi, "%s flow_id:%d\n", __func__, flow_id);
ret = q2spi_transfer_with_retries(q2spi, q2spi_req, cur_q2spi_pkt, len, flow_id, user_buf);
Q2SPI_DEBUG(q2spi, "%s transfer_with_retries ret:%d\n", __func__, ret);
@ -2775,9 +2786,6 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt)
if (ret) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_setup_gsi_xfer failed: %d\n", __func__, ret);
atomic_set(&q2spi->sma_wr_pending, 0);
atomic_set(&q2spi->doorbell_pending, 0);
q2spi_geni_se_dump_regs(q2spi);
gpi_dump_for_geni(q2spi->gsi->tx_c);
del_timer_sync(&q2spi->slave_sleep_timer);
goto unmap_buf;
}
@ -2788,9 +2796,6 @@ static int q2spi_gsi_submit(struct q2spi_packet *q2spi_pkt)
Q2SPI_DEBUG(q2spi, "%s PID:%d Err completion timeout: %d\n",
__func__, current->pid, ret);
atomic_set(&q2spi->sma_wr_pending, 0);
atomic_set(&q2spi->doorbell_pending, 0);
q2spi_geni_se_dump_regs(q2spi);
gpi_dump_for_geni(q2spi->gsi->tx_c);
del_timer_sync(&q2spi->slave_sleep_timer);
goto unmap_buf;
}
@ -3093,14 +3098,11 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr)
q2spi_pkt->var5_pkt->flow_id);
}
}
if (!cm_flow_pkt && atomic_read(&q2spi->doorbell_pending)) {
atomic_inc(&q2spi->retry);
Q2SPI_DEBUG(q2spi, "%s doorbell pending retry\n", __func__);
complete_all(&q2spi_pkt->bulk_wait);
q2spi_unmap_var_bufs(q2spi, q2spi_pkt);
ret = -EAGAIN;
goto send_msg_exit;
}
if (!cm_flow_pkt && atomic_read(&q2spi->doorbell_pending))
Q2SPI_DEBUG(q2spi, "%s cm_flow_pkt:%d doorbell_pending:%d\n",
__func__, cm_flow_pkt, atomic_read(&q2spi->doorbell_pending));
ret = q2spi_gsi_submit(q2spi_pkt);
if (ret) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_gsi_submit failed: %d\n", __func__, ret);
@ -3116,6 +3118,9 @@ int __q2spi_send_messages(struct q2spi_geni *q2spi, void *ptr)
atomic_set(&q2spi->sma_rd_pending, 0);
}
/* add 2msec delay for slave to complete sleep process after it received a sleep packet */
if (q2spi_pkt->is_client_sleep_pkt)
usleep_range(2000, 3000);
send_msg_exit:
mutex_unlock(&q2spi->send_msgs_lock);
if (atomic_read(&q2spi->sma_rd_pending))
@ -3533,7 +3538,7 @@ int q2spi_read_reg(struct q2spi_geni *q2spi, int reg_offset)
q2spi_req.data_len = 4; /* In bytes */
Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p &q2spi_pkt=%p\n", __func__, q2spi_pkt, &q2spi_pkt);
ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
ret = q2spi_frame_lra(q2spi, &q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p flow_id:%d\n", __func__, q2spi_pkt, ret);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "Err q2spi_frame_lra failed ret:%d\n", ret);
@ -3593,7 +3598,7 @@ static int q2spi_write_reg(struct q2spi_geni *q2spi, int reg_offset, unsigned lo
q2spi_req.addr = reg_offset;
q2spi_req.data_len = 4;
q2spi_req.data_buff = &data;
ret = q2spi_frame_lra(q2spi, q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
ret = q2spi_frame_lra(q2spi, &q2spi_req, &q2spi_pkt, VARIANT_1_LRA);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err q2spi_frame_lra failed ret:%d\n", __func__, ret);
return ret;
@ -3790,7 +3795,7 @@ int q2spi_send_system_mem_access(struct q2spi_geni *q2spi, struct q2spi_packet *
q2spi_req.sync = 0;
while (retries--) {
mutex_lock(&q2spi->queue_lock);
ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, q2spi_pkt);
ret = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, q2spi_pkt);
mutex_unlock(&q2spi->queue_lock);
if (ret == -ENOMEM) {
Q2SPI_DEBUG(q2spi, "%s Err ret:%d\n", __func__, ret);
@ -3858,6 +3863,11 @@ void q2spi_find_pkt_by_flow_id(struct q2spi_geni *q2spi, struct q2spi_cr_packet
if (q2spi_pkt) {
Q2SPI_DEBUG(q2spi, "%s Found q2spi_pkt %p with flow_id %d\n",
__func__, q2spi_pkt, flow_id);
if (!atomic_read(&q2spi->sma_wr_pending)) {
atomic_set(&q2spi->sma_wr_pending, 1);
Q2SPI_DEBUG(q2spi, "%s sma_wr_pending set for prev DB\n", __func__);
}
/* wakeup HRF flow which is waiting for this CR doorbell */
complete_all(&q2spi_pkt->wait_for_db);
return;
@ -4414,6 +4424,7 @@ static int q2spi_geni_probe(struct platform_device *pdev)
atomic_set(&q2spi->sma_rd_pending, 0);
init_completion(&q2spi->sma_wr_comp);
init_completion(&q2spi->sma_rd_comp);
init_completion(&q2spi->wait_comp_start_fail);
/* Pre allocate buffers for transfers */
ret = q2spi_pre_alloc_buffers(q2spi);
@ -4563,8 +4574,6 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi)
Q2SPI_DEBUG(q2spi, "%s Sending disconnect doorbell only\n", __func__);
atomic_set(&q2spi->slave_in_sleep, 0);
geni_gsi_disconnect_doorbell_stop_ch(q2spi->gsi->tx_c, true);
q2spi_unmap_doorbell_rx_buf(q2spi);
ret = pinctrl_select_state(q2spi->geni_pinctrl, q2spi->geni_gpio_default);
if (ret) {
@ -4603,9 +4612,10 @@ int q2spi_wakeup_slave_through_gpio(struct q2spi_geni *q2spi)
__func__, ret);
return ret;
}
geni_gsi_ch_start(q2spi->gsi->tx_c);
ret = q2spi_map_doorbell_rx_buf(q2spi);
/* add necessary delay to wake up the soc */
usleep_range(5000, 6000);
gpi_q2spi_terminate_all(q2spi->gsi->tx_c);
return ret;
}
@ -4624,34 +4634,42 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi)
Q2SPI_DEBUG(q2spi, "%s: PID=%d q2spi_sleep_cmd_enable:%d\n",
__func__, current->pid, q2spi->q2spi_sleep_cmd_enable);
if (!q2spi->q2spi_sleep_cmd_enable)
return 0;
if (atomic_read(&q2spi->slave_in_sleep)) {
Q2SPI_DEBUG(q2spi, "%s: Client in sleep\n", __func__);
return 0;
}
if (mutex_is_locked(&q2spi->port_lock) || q2spi->port_release) {
Q2SPI_DEBUG(q2spi, "%s: port_lock acquired or release is in progress\n", __func__);
return 0;
}
mutex_lock(&q2spi->queue_lock);
if (atomic_read(&q2spi->slave_in_sleep)) {
Q2SPI_DEBUG(q2spi, "%s: Client in sleep\n", __func__);
mutex_unlock(&q2spi->queue_lock);
return 0;
}
atomic_set(&q2spi->slave_in_sleep, 1);
q2spi_req.cmd = Q2SPI_HRF_SLEEP_CMD;
q2spi_req.sync = 1;
mutex_lock(&q2spi->queue_lock);
ret = q2spi_add_req_to_tx_queue(q2spi, q2spi_req, &q2spi_pkt);
ret = q2spi_add_req_to_tx_queue(q2spi, &q2spi_req, &q2spi_pkt);
mutex_unlock(&q2spi->queue_lock);
if (ret < 0) {
Q2SPI_DEBUG(q2spi, "%s Err failed ret:%d\n", __func__, ret);
goto err;
atomic_set(&q2spi->slave_in_sleep, 0);
return ret;
}
Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p tid:%d\n", __func__, q2spi_pkt, q2spi_pkt->xfer->tid);
q2spi_pkt->is_client_sleep_pkt = true;
ret = __q2spi_transfer(q2spi, q2spi_req, q2spi_pkt, 0);
if (ret) {
Q2SPI_DEBUG(q2spi, "%s __q2spi_transfer q2spi_pkt:%p ret%d\n",
__func__, q2spi_pkt, ret);
atomic_set(&q2spi->slave_in_sleep, 0);
Q2SPI_DEBUG(q2spi, "%s q2spi_pkt:%p ret: %d\n", __func__, q2spi_pkt, ret);
if (ret == -ETIMEDOUT)
gpi_q2spi_terminate_all(q2spi->gsi->tx_c);
if (q2spi->port_release) {
Q2SPI_DEBUG(q2spi, "%s Err Port in closed state, return\n", __func__);
return -ENOENT;
@ -4661,12 +4679,8 @@ int q2spi_put_slave_to_sleep(struct q2spi_geni *q2spi)
q2spi_free_xfer_tid(q2spi, q2spi_pkt->xfer->tid);
q2spi_del_pkt_from_tx_queue(q2spi, q2spi_pkt);
q2spi_free_q2spi_pkt(q2spi_pkt, __LINE__);
atomic_set(&q2spi->slave_in_sleep, 1);
/* add 2msec delay for slave to process the sleep packet */
usleep_range(2000, 3000);
Q2SPI_DEBUG(q2spi, "%s: PID=%d End slave_in_sleep:%d\n", __func__, current->pid,
atomic_read(&q2spi->slave_in_sleep));
err:
return ret;
}
@ -4751,6 +4765,7 @@ static int q2spi_geni_runtime_resume(struct device *dev)
Q2SPI_DEBUG(q2spi, "%s Failed to set IRQ wake\n", __func__);
geni_gsi_ch_start(q2spi->gsi->tx_c);
geni_gsi_connect_doorbell(q2spi->gsi->tx_c);
/* Clear is_suspend to map doorbell buffers */
atomic_set(&q2spi->is_suspend, 0);

View File

@ -133,6 +133,18 @@
#define SE_SPI_RX_TRANS_LEN 0x270
#define TRANS_LEN_MSK GENMASK(23, 0)
/* GENI General Purpose Interrupt Status */
#define M_GP_IRQ_ERR_START_BIT 5
#define M_GP_IRQ_MASK GENMASK(12, 5)
#define Q2SPI_PWR_ON_NACK BIT(0)
#define Q2SPI_HDR_FAIL BIT(1)
#define Q2SPI_HCR_FAIL BIT(2)
#define Q2SPI_CHECKSUM_FAIL BIT(3)
#define Q2SPI_START_SEQ_TIMEOUT BIT(4)
#define Q2SPI_STOP_SEQ_TIMEOUT BIT(5)
#define Q2SPI_WAIT_PHASE_TIMEOUT BIT(6)
#define Q2SPI_CLIENT_EN_NOT_DETECTED BIT(7)
/* HRF FLOW Info */
#define HRF_ENTRY_OPCODE 3
#define HRF_ENTRY_TYPE 3
@ -516,6 +528,8 @@ struct q2spi_dma_transfer {
* @q2spi_cr_txn_err: reflects Q2SPI_CR_TRANSACTION_ERROR in CR body
* @q2spi_sleep_cmd_enable: reflects start sending the sleep command to slave
* @q2spi_cr_hdr_err: reflects CR Header incorrect in CR Header
* @is_start_seq_fail: start sequence fail due to slave not responding
* @wait_comp_start_fail: completion for transfer callback during start sequence failure
*/
struct q2spi_geni {
struct device *wrapper_dev;
@ -622,6 +636,8 @@ struct q2spi_geni {
bool q2spi_cr_txn_err;
bool q2spi_sleep_cmd_enable;
bool q2spi_cr_hdr_err;
bool is_start_seq_fail;
struct completion wait_comp_start_fail;
};
/**

Some files were not shown because too many files have changed in this diff Show More