Add 'qcom/opensource/securemsm-kernel/' from commit 'a6005ceed271246683596608e4c56b4d921fb363'

git-subtree-dir: qcom/opensource/securemsm-kernel
git-subtree-mainline: 46e9caf0d0
git-subtree-split: a6005ceed2
Change-Id:
repo: https://git.codelinaro.org/clo/la/platform/vendor/qcom/opensource/securemsm-kernel
tag: LA.VENDOR.14.3.0.r1-17300-lanai.QSSI15.0
This commit is contained in:
David Wronek 2024-10-06 16:45:20 +02:00
commit 587685c687
92 changed files with 45193 additions and 0 deletions

View File

@ -0,0 +1,99 @@
headers_src = [
"include/uapi/linux/smc*ke.h",
"include/linux/smc*_object.h",
"include/linux/IClientE*v.h",
"include/linux/smc*_clientenv.h",
]
smcinvoke_headers_out = [
"include/linux/smcinvoke.h",
"include/linux/smcinvoke_object.h",
"include/linux/smci_object.h",
"include/linux/IClientEnv.h",
"include/linux/smci_clientenv.h",
]
smcinvoke_kernel_headers_verbose = "--verbose "
genrule {
name: "qti_generate_smcinvoke_kernel_headers",
tools: ["headers_install.sh",
"unifdef"
],
tool_files: [
"ssg_kernel_headers.py",
],
srcs: headers_src,
cmd: "python3 -u $(location ssg_kernel_headers.py) " +
smcinvoke_kernel_headers_verbose +
"--header_arch arm64 " +
"--gen_dir $(genDir) " +
"--smcinvoke_headers_to_expose $(locations include/uapi/linux/smc*ke.h) $(locations include/linux/smc*_object.h) $(locations include/linux/IClientE*v.h) $(locations include/linux/smc*_clientenv.h) " +
"--unifdef $(location unifdef) " +
"--headers_install $(location headers_install.sh)",
out: smcinvoke_headers_out,
}
cc_library_headers {
name: "smcinvoke_kernel_headers",
export_include_dirs: ["."] + ["include"] + ["include/uapi"],
generated_headers: ["qti_generate_smcinvoke_kernel_headers"],
export_generated_headers: ["qti_generate_smcinvoke_kernel_headers"],
vendor: true,
recovery_available: true
}
qseecom_headers_src = [
"include/uapi/linux/qsee*om.h",
"include/uapi/linux/qsee*api.h",
]
qseecom_headers_out = [
"include/linux/qseecom.h",
"include/linux/qseecom_api.h",
]
qseecom_kernel_headers_verbose = "--verbose "
genrule {
name: "qti_generate_qseecom_kernel_headers",
tools: ["headers_install.sh",
"unifdef"
],
tool_files: [
"ssg_kernel_headers.py",
],
srcs: qseecom_headers_src,
cmd: "python3 -u $(location ssg_kernel_headers.py) " +
qseecom_kernel_headers_verbose +
"--header_arch arm64 " +
"--gen_dir $(genDir) " +
"--smcinvoke_headers_to_expose $(locations include/uapi/linux/qsee*om.h) $(locations include/uapi/linux/qsee*api.h) " +
"--unifdef $(location unifdef) " +
"--headers_install $(location headers_install.sh)",
out: qseecom_headers_out,
}
cc_library_headers {
name: "qseecom_kernel_headers",
export_include_dirs: ["."] + ["include"] + ["include/uapi"],
generated_headers: ["qti_generate_qseecom_kernel_headers"],
export_generated_headers: ["qti_generate_qseecom_kernel_headers"],
vendor: true,
recovery_available: true
}
cc_library_headers {
name: "smmu_proxy_uapi_header",
vendor_available: true,
export_include_dirs: ["smmu-proxy/include/uapi/"],
}
cc_library_headers {
name: "securemsm_kernel_uapi_headers",
vendor_available: true,
host_supported: true,
export_include_dirs: ["include/uapi"],
}

View File

@ -0,0 +1,181 @@
# Android makefile for securemsm kernel modules
ENABLE_SECUREMSM_DLKM := true
ENABLE_SECUREMSM_QTEE_DLKM := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE),false)
ENABLE_SECUREMSM_DLKM := false
endif
ifeq ($(TARGET_KERNEL_DLKM_SECUREMSM_QTEE_OVERRIDE),false)
ENABLE_SECUREMSM_QTEE_DLKM := false
endif
endif
ifeq ($(ENABLE_SECUREMSM_DLKM), true)
ENABLE_QCRYPTO_DLKM := true
ENABLE_HDCP_QSEECOM_DLKM := true
ENABLE_QRNG_DLKM := true
ifeq ($(TARGET_USES_SMMU_PROXY), true)
ENABLE_SMMU_PROXY := true
endif #TARGET_USES_SMMU_PROXY
endif #ENABLE_SECUREMSM_DLKM
ifeq ($(ENABLE_SECUREMSM_QTEE_DLKM), true)
ENABLE_SMCINVOKE_DLKM := true
ENABLE_TZLOG_DLKM := true
#Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
ENABLE_QSEECOM_DLKM := true
endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
endif #ENABLE_SECUREMSM_QTEE_DLKM
ifeq ($(TARGET_USES_GY), true)
ENABLE_QCRYPTO_DLKM := false
ENABLE_HDCP_QSEECOM_DLKM := false
ENABLE_QRNG_DLKM := false
ENABLE_SMMU_PROXY := false
ENABLE_SMCINVOKE_DLKM := true
ENABLE_TZLOG_DLKM := false
ENABLE_QSEECOM_DLKM := false
endif #TARGET_USES_GY
LOCAL_PATH := $(call my-dir)
VENDOR_OPENSOURCE_DIR ?= vendor/qcom/opensource
VENDOR_COMMON_DIR ?= device/qcom/common
DLKM_DIR := $(TOP)/$(VENDOR_COMMON_DIR)/dlkm
SEC_KERNEL_DIR := $(TOP)/$(VENDOR_OPENSOURCE_DIR)/securemsm-kernel
LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/include/ \
$(LOCAL_PATH)/include/uapi
SSG_SRC_FILES := \
$(wildcard $(LOCAL_PATH)/*) \
$(wildcard $(LOCAL_PATH)/*/*) \
$(wildcard $(LOCAL_PATH)/*/*/*) \
$(wildcard $(LOCAL_PATH)/*/*/*/*)
LOCAL_MODULE_DDK_BUILD := true
# This is set once per LOCAL_PATH, not per (kernel) module
KBUILD_OPTIONS := SSG_ROOT=$(SEC_KERNEL_DIR)
KBUILD_OPTIONS += BOARD_PLATFORM=$(TARGET_BOARD_PLATFORM)
CONDITIONAL_FLAGS := $(ENABLE_SECUREMSM_QTEE_DLKM) $(ENABLE_SECUREMSM_DLKM)
ifneq (0, $(words $(filter true, $(CONDITIONAL_FLAGS))))
include $(CLEAR_VARS)
# For incremental compilation
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := sec-module-symvers
LOCAL_MODULE_STEM := Module.symvers
LOCAL_MODULE_KBUILD_NAME := Module.symvers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif
ifeq ($(ENABLE_SMCINVOKE_DLKM), true)
include $(CLEAR_VARS)
#LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := smcinvoke_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := smcinvoke_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_HEADER_LIBRARIES := smcinvoke_kernel_headers
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_SMCINVOKE_DLKM
###################################################
###################################################
ifeq ($(ENABLE_TZLOG_DLKM), true)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := tz_log_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := tz_log_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_TZLOG_DLKM
ifeq ($(ENABLE_QSEECOM_DLKM), true)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qseecom_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qseecom_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_QSEECOM_DLKM
###################################################
###################################################
ifeq ($(ENABLE_QCRYPTO_DLKM), true)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qce50_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qce50_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###################################################
###################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qcedev-mod_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qcedev-mod_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
###################################################
###################################################
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qcrypto-msm_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qcrypto-msm_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_QCRYPTO_DLKM
###################################################
###################################################
ifeq ($(ENABLE_HDCP_QSEECOM_DLKM), true)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := hdcp_qseecom_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := hdcp_qseecom_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_HDCP_QSEECOM_DLKM
###################################################
###################################################
ifeq ($(ENABLE_QRNG_DLKM), true)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_MODULE := qrng_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := qrng_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_QRNG_DLKM
###################################################
###################################################
ifeq ($(ENABLE_SMMU_PROXY), true)
include $(CLEAR_VARS)
#LOCAL_SRC_FILES := $(SSG_SRC_FILES)
LOCAL_EXPORT_KO_INCLUDE_DIRS := $(LOCAL_PATH)/smmu-proxy/ $(LOCAL_PATH)/
LOCAL_MODULE := smmu_proxy_dlkm.ko
LOCAL_MODULE_KBUILD_NAME := smmu_proxy_dlkm.ko
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_DEBUG_ENABLE := true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
include $(DLKM_DIR)/Build_external_kernelmodule.mk
endif #ENABLE_SMMU_PROXY

View File

@ -0,0 +1,92 @@
package(
default_visibility = [
"//visibility:public",
],
)
load("//build/kernel/kleaf:kernel.bzl", "ddk_headers")
ddk_headers(
name = "smcinvoke_kernel_headers",
hdrs = glob([
"include/linux/smcinvoke*.h",
"include/linux/smci_o*.h",
"include/uapi/linux/smcinvoke*.h",
"include/linux/IClientE*.h",
"include/linux/smci_c*.h",
"include/smci/interface/IOpener.h",
"include/smci/interface/smci_opener.h",
"include/linux/ITrustedCameraDriver.h",
"include/linux/CTrustedCameraDriver.h",
]),
includes = [
"include",
"include/linux",
"linux",
"include/uapi/linux",
],
)
ddk_headers(
name = "qseecom_kernel_headers",
hdrs = glob([
"include/uapi/linux/qseecom.h",
"include/uapi/linux/qseecom_api.h",
"linux/misc/qseecom_kernel.h",
"linux/misc/qseecom_priv.h",
"linux/misc/qseecomi.h",
]),
includes = ["linux", "include/uapi", "include/uapi/linux"]
)
ddk_headers(
name = "hdcp_qseecom_dlkm",
hdrs = glob([
":smcinvoke_kernel_headers",
"linux/*.h",
"include/linux/*h",
"include/smci/uid/*h",
"include/smci/interface/*h",
"linux/misc/*.h",
"config/*.h",
]),
includes = [
".",
"config",
"include",
"linux",
],
)
ddk_headers(
name = "qcedev_local_headers",
hdrs = glob([
"include/uapi/linux/*.h",
"crypto-qti/*.h"
]),
includes = ["include/uapi", "include/uapi/linux", "crypto-qti"]
)
ddk_headers(
name = "smmu_proxy_headers",
hdrs = glob([
"smmu-proxy/*.h",
"smmu-proxy/linux/*.h",
"smmu-proxy/include/uapi/linux/*.h"
]),
includes = [".", "smmu-proxy"],
)
load(":build/pineapple.bzl", "define_pineapple")
load(":build/anorak.bzl", "define_anorak")
load(":build/blair.bzl", "define_blair")
load(":build/sun.bzl", "define_sun")
load(":build/niobe.bzl", "define_niobe")
load(":build/monaco.bzl", "define_monaco")
define_pineapple()
define_anorak()
define_blair()
define_niobe()
define_monaco()
define_sun()

View File

@ -0,0 +1,58 @@
LINUXINCLUDE += -I$(SSG_MODULE_ROOT)/ \
-I$(SSG_MODULE_ROOT)/linux/ \
-I$(SSG_MODULE_ROOT)/include/linux/ \
-I$(SSG_MODULE_ROOT)/include/uapi/ \
-I$(SSG_MODULE_ROOT)/include/uapi/linux/
ifneq ($(CONFIG_ARCH_QTI_VM), y)
LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.h
include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig.conf
endif
#Enable Qseecom if CONFIG_ARCH_KHAJE OR CONFIG_ARCH_KHAJE or CONFIG_QTI_QUIN_GVM is set to y
ifneq (, $(filter y, $(CONFIG_QTI_QUIN_GVM) $(CONFIG_ARCH_KHAJE) $(CONFIG_ARCH_SA8155) $(CONFIG_ARCH_BLAIR) $(CONFIG_ARCH_SA6155)))
include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.conf
LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom.h
else
LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_qseecom_compat.h
endif
obj-$(CONFIG_QSEECOM) += qseecom_dlkm.o
qseecom_dlkm-objs := qseecom/qseecom.o
include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smcinvoke.conf
LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smcinvoke.h
obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke_dlkm.o
smcinvoke_dlkm-objs := smcinvoke/smcinvoke_kernel.o smcinvoke/smcinvoke.o
obj-$(CONFIG_QTI_TZ_LOG) += tz_log_dlkm.o
tz_log_dlkm-objs := tz_log/tz_log.o
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qce50_dlkm.o
qce50_dlkm-objs := crypto-qti/qce50.o
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev-mod_dlkm.o
qcedev-mod_dlkm-objs := crypto-qti/qcedev.o crypto-qti/qcedev_smmu.o
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto-msm_dlkm.o
qcrypto-msm_dlkm-objs := crypto-qti/qcrypto.o
obj-$(CONFIG_HDCP_QSEECOM) += hdcp_qseecom_dlkm.o
hdcp_qseecom_dlkm-objs := hdcp/hdcp_main.o hdcp/hdcp_smcinvoke.o hdcp/hdcp_qseecom.o
obj-$(CONFIG_HW_RANDOM_MSM_LEGACY) += qrng_dlkm.o
qrng_dlkm-objs := qrng/msm_rng.o
ifneq (, $(filter y, $(ARCH_QTI_VM) $(CONFIG_ARCH_PINEAPPLE) $(CONFIG_ARCH_SUN) $(CONFIG_ARCH_NIOBE) $(CONFIG_ARCH_ANORAK)))
include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smmu_proxy.conf
LINUXINCLUDE += -include $(SSG_MODULE_ROOT)/config/sec-kernel_defconfig_smmu_proxy.h
obj-$(CONFIG_QTI_SMMU_PROXY) += smmu_proxy_dlkm.o
smmu_proxy_dlkm-objs := smmu-proxy/qti-smmu-proxy-common.o
ifneq ($(CONFIG_ARCH_QTI_VM), y)
smmu_proxy_dlkm-objs += smmu-proxy/qti-smmu-proxy-pvm.o
else
smmu_proxy_dlkm-objs += smmu-proxy/qti-smmu-proxy-tvm.o
endif
endif

View File

@ -0,0 +1,12 @@
M=$(PWD)
SSG_MODULE_ROOT=$(KERNEL_SRC)/$(M)
INC=-I/$(M)/linux/*
KBUILD_OPTIONS+=SSG_MODULE_ROOT=$(SSG_MODULE_ROOT)
all: modules
clean:
rm -f *.cmd *.d *.mod *.o *.ko *.mod.c *.mod.o Module.symvers modules.order
%:
$(MAKE) -C $(KERNEL_SRC) M=$(M) $(INC) $@ $(KBUILD_OPTIONS)

View File

@ -0,0 +1,21 @@
load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
def define_anorak():
define_consolidate_gki_modules(
target = "anorak",
modules = [
"smcinvoke_dlkm",
"tz_log_dlkm",
"hdcp_qseecom_dlkm",
"qce50_dlkm",
"qcedev-mod_dlkm",
"qrng_dlkm",
"qcrypto-msm_dlkm",
"smmu_proxy_dlkm",
"qseecom_dlkm"
],
extra_options = [
"CONFIG_QCOM_SMCINVOKE",
"CONFIG_QSEECOM_COMPAT",
],
)

View File

@ -0,0 +1,21 @@
load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
def define_blair():
define_consolidate_gki_modules(
target = "blair",
modules = [
"smcinvoke_dlkm",
"tz_log_dlkm",
"hdcp_qseecom_dlkm",
"qce50_dlkm",
"qcedev-mod_dlkm",
"qrng_dlkm",
"qcrypto-msm_dlkm",
"smmu_proxy_dlkm",
"qseecom_dlkm"
],
extra_options = [
"CONFIG_QCOM_SMCINVOKE",
"CONFIG_QSEECOM",
],
)

View File

@ -0,0 +1,20 @@
load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
def define_monaco():
define_consolidate_gki_modules(
target = "monaco",
modules = [
"smcinvoke_dlkm",
"tz_log_dlkm",
"hdcp_qseecom_dlkm",
"qce50_dlkm",
"qcedev-mod_dlkm",
"qrng_dlkm",
"qcrypto-msm_dlkm",
"qseecom_dlkm"
],
extra_options = [
"CONFIG_QCOM_SMCINVOKE",
"CONFIG_QSEECOM_COMPAT",
],
)

View File

@ -0,0 +1,21 @@
load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
def define_niobe():
define_consolidate_gki_modules(
target = "niobe",
modules = [
"smcinvoke_dlkm",
"tz_log_dlkm",
"hdcp_qseecom_dlkm",
"qce50_dlkm",
"qcedev-mod_dlkm",
"qrng_dlkm",
"qcrypto-msm_dlkm",
"smmu_proxy_dlkm",
"qseecom_dlkm"
],
extra_options = [
"CONFIG_QCOM_SMCINVOKE",
"CONFIG_QSEECOM_COMPAT",
],
)

View File

@ -0,0 +1,21 @@
load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
def define_pineapple():
define_consolidate_gki_modules(
target = "pineapple",
modules = [
"smcinvoke_dlkm",
"tz_log_dlkm",
"hdcp_qseecom_dlkm",
"qce50_dlkm",
"qcedev-mod_dlkm",
"qrng_dlkm",
"qcrypto-msm_dlkm",
"smmu_proxy_dlkm",
"qseecom_dlkm"
],
extra_options = [
"CONFIG_QCOM_SMCINVOKE",
"CONFIG_QSEECOM_COMPAT",
],
)

View File

@ -0,0 +1,21 @@
load(":securemsm_kernel.bzl", "define_consolidate_gki_modules")
def define_sun():
define_consolidate_gki_modules(
target = "sun",
modules = [
"smcinvoke_dlkm",
"tz_log_dlkm",
"qseecom_dlkm",
"hdcp_qseecom_dlkm",
"qce50_dlkm",
"qcedev-mod_dlkm",
"qrng_dlkm",
"qcrypto-msm_dlkm",
"smmu_proxy_dlkm"
],
extra_options = [
"CONFIG_QCOM_SMCINVOKE",
"CONFIG_QSEECOM_COMPAT",
],
)

View File

@ -0,0 +1,7 @@
export CONFIG_QTI_TZ_LOG=m
export CONFIG_CRYPTO_DEV_QCEDEV=m
export CONFIG_CRYPTO_DEV_QCRYPTO=m
export CONFIG_HDCP_QSEECOM=m
export CONFIG_HW_RANDOM_MSM_LEGACY=m
export CONFIG_QSEECOM_PROXY=m
export CONFIG_QSEECOM=m

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved..
*/
#define CONFIG_QTI_TZ_LOG 1
#define CONFIG_CRYPTO_DEV_QCEDEV 1
#define CONFIG_CRYPTO_DEV_QCRYPTO 1
#define CONFIG_HDCP_QSEECOM 1
#define CONFIG_HW_RANDOM_MSM_LEGACY 1

View File

@ -0,0 +1 @@
export CONFIG_QTI_CRYPTO_FDE=m

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved..
*/
#define CONFIG_QSEECOM 1

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved..
*/
#define CONFIG_QSEECOM_COMPAT 1

View File

@ -0,0 +1 @@
export CONFIG_QCOM_SMCINVOKE=m

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved..
*/
#define CONFIG_QCOM_SMCINVOKE 1

View File

@ -0,0 +1 @@
export CONFIG_QTI_SMMU_PROXY=m

View File

@ -0,0 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define CONFIG_QTI_SMMU_PROXY 1

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _FIPS_STATUS__H
#define _FIPS_STATUS__H
#include <linux/types.h>
#include <linux/ioctl.h>
/**
* fips_status: global FIPS140-2 status
* @FIPS140_STATUS_NA:
* Not a FIPS140-2 compliant Build.
* The flag status won't
* change throughout
* the lifetime
* @FIPS140_STATUS_PASS_CRYPTO:
* KAT self tests are passed.
* @FIPS140_STATUS_QCRYPTO_ALLOWED:
* Integrity test is passed.
* @FIPS140_STATUS_PASS:
* All tests are passed and build
* is in FIPS140-2 mode
* @FIPS140_STATUS_FAIL:
* One of the test is failed.
* This will block all requests
* to crypto modules
*/
enum fips_status {
FIPS140_STATUS_NA = 0,
FIPS140_STATUS_PASS_CRYPTO = 1,
FIPS140_STATUS_QCRYPTO_ALLOWED = 2,
FIPS140_STATUS_PASS = 3,
FIPS140_STATUS_FAIL = 0xFF
};
#endif /* _FIPS_STATUS__H */

View File

@ -0,0 +1,999 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* QTI Over the Air (OTA) Crypto driver
*
* Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/cache.h>
#include <linux/version.h>
#include "linux/qcota.h"
#include "qce.h"
#include "qce_ota.h"
enum qce_ota_oper_enum {
QCE_OTA_F8_OPER = 0,
QCE_OTA_MPKT_F8_OPER = 1,
QCE_OTA_F9_OPER = 2,
QCE_OTA_VAR_MPKT_F8_OPER = 3,
QCE_OTA_OPER_LAST
};
struct ota_dev_control;
struct ota_async_req {
struct list_head rlist;
struct completion complete;
int err;
enum qce_ota_oper_enum op;
union {
struct qce_f9_req f9_req;
struct qce_f8_req f8_req;
struct qce_f8_multi_pkt_req f8_mp_req;
struct qce_f8_variable_multi_pkt_req f8_v_mp_req;
} req;
unsigned int steps;
struct ota_qce_dev *pqce;
};
/*
* Register ourselves as a char device /dev/qcota0 to be able to access the ota
* from userspace.
*/
#define QCOTA_DEV "qcota0"
struct ota_dev_control {
/* char device */
struct cdev cdev;
int minor;
struct list_head ready_commands;
unsigned int magic;
struct list_head qce_dev;
spinlock_t lock;
struct mutex register_lock;
bool registered;
uint32_t total_units;
};
struct ota_qce_dev {
struct list_head qlist;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
struct ota_async_req *active_command;
struct tasklet_struct done_tasklet;
struct ota_dev_control *podev;
uint32_t unit;
u64 total_req;
u64 err_req;
};
#define OTA_MAGIC 0x4f544143
static long qcota_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static int qcota_open(struct inode *inode, struct file *file);
static int qcota_release(struct inode *inode, struct file *file);
static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
static const struct file_operations qcota_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = qcota_ioctl,
.open = qcota_open,
.release = qcota_release,
};
static struct ota_dev_control qcota_dev = {
.magic = OTA_MAGIC,
};
static dev_t qcota_device_no;
static struct class *driver_class;
static struct device *class_dev;
#define DEBUG_MAX_FNAME 16
#define DEBUG_MAX_RW_BUF 1024
struct qcota_stat {
u64 f8_req;
u64 f8_mp_req;
u64 f8_v_mp_req;
u64 f9_req;
u64 f8_op_success;
u64 f8_op_fail;
u64 f8_mp_op_success;
u64 f8_mp_op_fail;
u64 f8_v_mp_op_success;
u64 f8_v_mp_op_fail;
u64 f9_op_success;
u64 f9_op_fail;
};
static struct qcota_stat _qcota_stat;
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static int _debug_qcota;
static struct ota_dev_control *qcota_control(void)
{
return &qcota_dev;
}
static int qcota_open(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = qcota_control();
if (podev == NULL) {
pr_err("%s: no such device %d\n", __func__,
MINOR(inode->i_rdev));
return -ENOENT;
}
file->private_data = podev;
return 0;
}
static int qcota_release(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = file->private_data;
if (podev != NULL && podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
file->private_data = NULL;
return 0;
}
static bool _next_v_mp_req(struct ota_async_req *areq)
{
unsigned char *p;
if (areq->err)
return false;
if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
return false;
p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
areq->req.f8_v_mp_req.qce_f8_req.data_len =
areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
areq->req.f8_v_mp_req.qce_f8_req.count_c++;
return true;
}
static void req_done(unsigned long data)
{
struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
struct ota_dev_control *podev = pqce->podev;
struct ota_async_req *areq;
unsigned long flags;
struct ota_async_req *new_req = NULL;
int ret = 0;
bool schedule = true;
spin_lock_irqsave(&podev->lock, flags);
areq = pqce->active_command;
if (unlikely(areq == NULL))
pr_err("ota_crypto: %s, no active request\n", __func__);
else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
if (_next_v_mp_req(areq)) {
/* execute next subcommand */
spin_unlock_irqrestore(&podev->lock, flags);
ret = start_req(pqce, areq);
if (unlikely(ret)) {
areq->err = ret;
schedule = true;
spin_lock_irqsave(&podev->lock, flags);
} else {
areq = NULL;
schedule = false;
}
} else {
/* done with this variable mp req */
schedule = true;
}
}
while (schedule) {
if (!list_empty(&podev->ready_commands)) {
new_req = container_of(podev->ready_commands.next,
struct ota_async_req, rlist);
list_del(&new_req->rlist);
pqce->active_command = new_req;
spin_unlock_irqrestore(&podev->lock, flags);
if (new_req) {
new_req->err = 0;
/* start a new request */
ret = start_req(pqce, new_req);
}
if (unlikely(new_req && ret)) {
new_req->err = ret;
complete(&new_req->complete);
ret = 0;
new_req = NULL;
spin_lock_irqsave(&podev->lock, flags);
} else {
schedule = false;
}
} else {
pqce->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
schedule = false;
}
}
if (areq)
complete(&areq->complete);
}
static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_qce_dev *pqce;
pqce = areq->pqce;
areq->req.f9_req.mac_i = *((uint32_t *)icv);
if (ret) {
pqce->err_req++;
areq->err = -ENXIO;
} else
areq->err = 0;
tasklet_schedule(&pqce->done_tasklet);
}
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_qce_dev *pqce;
pqce = areq->pqce;
if (ret) {
pqce->err_req++;
areq->err = -ENXIO;
} else {
areq->err = 0;
}
tasklet_schedule(&pqce->done_tasklet);
}
static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
{
struct qce_f9_req *pf9;
struct qce_f8_multi_pkt_req *p_mp_f8;
struct qce_f8_req *pf8;
int ret = 0;
/* command should be on the podev->active_command */
areq->pqce = pqce;
switch (areq->op) {
case QCE_OTA_F8_OPER:
pf8 = &areq->req.f8_req;
ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
break;
case QCE_OTA_MPKT_F8_OPER:
p_mp_f8 = &areq->req.f8_mp_req;
ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
break;
case QCE_OTA_F9_OPER:
pf9 = &areq->req.f9_req;
ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
break;
case QCE_OTA_VAR_MPKT_F8_OPER:
pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
break;
default:
ret = -ENOTSUPP;
break;
}
areq->err = ret;
pqce->total_req++;
if (ret)
pqce->err_req++;
return ret;
}
static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
{
/* do this function with spinlock set */
struct ota_qce_dev *p;
if (unlikely(list_empty(&podev->qce_dev))) {
pr_err("%s: no valid qce to schedule\n", __func__);
return NULL;
}
list_for_each_entry(p, &podev->qce_dev, qlist) {
if (p->active_command == NULL)
return p;
}
return NULL;
}
static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
{
unsigned long flags;
int ret = 0;
struct qcota_stat *pstat;
struct ota_qce_dev *pqce;
areq->err = 0;
spin_lock_irqsave(&podev->lock, flags);
pqce = schedule_qce(podev);
if (pqce) {
pqce->active_command = areq;
spin_unlock_irqrestore(&podev->lock, flags);
ret = start_req(pqce, areq);
if (ret != 0) {
spin_lock_irqsave(&podev->lock, flags);
pqce->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
}
} else {
list_add_tail(&areq->rlist, &podev->ready_commands);
spin_unlock_irqrestore(&podev->lock, flags);
}
if (ret == 0)
wait_for_completion(&areq->complete);
pstat = &_qcota_stat;
switch (areq->op) {
case QCE_OTA_F8_OPER:
if (areq->err)
pstat->f8_op_fail++;
else
pstat->f8_op_success++;
break;
case QCE_OTA_MPKT_F8_OPER:
if (areq->err)
pstat->f8_mp_op_fail++;
else
pstat->f8_mp_op_success++;
break;
case QCE_OTA_F9_OPER:
if (areq->err)
pstat->f9_op_fail++;
else
pstat->f9_op_success++;
break;
case QCE_OTA_VAR_MPKT_F8_OPER:
default:
if (areq->err)
pstat->f8_v_mp_op_fail++;
else
pstat->f8_v_mp_op_success++;
break;
}
return areq->err;
}
static long qcota_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
struct ota_dev_control *podev;
uint8_t *user_src;
uint8_t *user_dst;
uint8_t *k_buf = NULL;
struct ota_async_req areq;
uint32_t total, temp;
struct qcota_stat *pstat;
int i;
uint8_t *p = NULL;
podev = file->private_data;
if (podev == NULL || podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
/* Verify user arguments. */
if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
return -ENOTTY;
init_completion(&areq.complete);
pstat = &_qcota_stat;
switch (cmd) {
case QCOTA_F9_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f9_req, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
user_src = areq.req.f9_req.message;
if (!access_ok(VERIFY_READ, (void __user *)user_src,
areq.req.f9_req.msize))
return -EFAULT;
if (areq.req.f9_req.msize == 0)
return 0;
k_buf = memdup_user((const void __user *)user_src,
areq.req.f9_req.msize);
if (IS_ERR(k_buf))
return -EFAULT;
areq.req.f9_req.message = k_buf;
areq.op = QCE_OTA_F9_OPER;
pstat->f9_req++;
err = submit_req(&areq, podev);
areq.req.f9_req.message = user_src;
if (err == 0 && copy_to_user((void __user *)arg,
&areq.req.f9_req, sizeof(struct qce_f9_req))) {
err = -EFAULT;
}
kfree(k_buf);
break;
case QCOTA_F8_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_req, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
total = areq.req.f8_req.data_len;
user_src = areq.req.f8_req.data_in;
if (user_src != NULL) {
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
}
user_dst = areq.req.f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
if (!total)
return 0;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
/* k_buf returned from kmalloc should be cache line aligned */
if (user_src && copy_from_user(k_buf,
(void __user *)user_src, total)) {
kfree(k_buf);
return -EFAULT;
}
if (user_src)
areq.req.f8_req.data_in = k_buf;
else
areq.req.f8_req.data_in = NULL;
areq.req.f8_req.data_out = k_buf;
areq.op = QCE_OTA_F8_OPER;
pstat->f8_req++;
err = submit_req(&areq, podev);
if (err == 0 && copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
temp = areq.req.f8_mp_req.qce_f8_req.data_len;
if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
areq.req.f8_mp_req.cipher_size)
return -EINVAL;
total = (uint32_t) areq.req.f8_mp_req.num_pkt *
areq.req.f8_mp_req.qce_f8_req.data_len;
user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
if (!total)
return 0;
/* k_buf should be cache line aligned */
k_buf = memdup_user((const void __user *)user_src, total);
if (IS_ERR(k_buf))
return -EFAULT;
areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
areq.op = QCE_OTA_MPKT_F8_OPER;
pstat->f8_mp_req++;
err = submit_req(&areq, podev);
if (err == 0 && copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_V_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_variable_multi_pkt_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
sizeof(struct qce_f8_variable_multi_pkt_req)))
return -EFAULT;
if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
return -EINVAL;
for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
if (!access_ok(VERIFY_WRITE, (void __user *)
areq.req.f8_v_mp_req.cipher_iov[i].addr,
areq.req.f8_v_mp_req.cipher_iov[i].size))
return -EFAULT;
total += areq.req.f8_v_mp_req.cipher_iov[i].size;
total = ALIGN(total, L1_CACHE_BYTES);
}
if (!total)
return 0;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
if (copy_from_user(p, (void __user *)user_src,
areq.req.f8_v_mp_req.cipher_iov[i].size)) {
kfree(k_buf);
return -EFAULT;
}
p += areq.req.f8_v_mp_req.cipher_iov[i].size;
p = (uint8_t *) ALIGN(((uintptr_t)p),
L1_CACHE_BYTES);
}
areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
areq.req.f8_v_mp_req.qce_f8_req.data_len =
areq.req.f8_v_mp_req.cipher_iov[0].size;
areq.steps = 0;
areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
pstat->f8_v_mp_req++;
err = submit_req(&areq, podev);
if (err != 0) {
kfree(k_buf);
return err;
}
for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
if (copy_to_user(user_dst, p,
areq.req.f8_v_mp_req.cipher_iov[i].size)) {
kfree(k_buf);
return -EFAULT;
}
p += areq.req.f8_v_mp_req.cipher_iov[i].size;
p = (uint8_t *) ALIGN(((uintptr_t)p),
L1_CACHE_BYTES);
}
kfree(k_buf);
break;
default:
return -ENOTTY;
}
return err;
}
static int qcota_probe(struct platform_device *pdev)
{
void *handle = NULL;
int rc = 0;
struct ota_dev_control *podev;
struct ce_hw_support ce_support;
struct ota_qce_dev *pqce;
unsigned long flags;
podev = &qcota_dev;
pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
if (!pqce)
return -ENOMEM;
rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV);
if (rc < 0) {
pr_err("alloc_chrdev_region failed %d\n", rc);
return rc;
}
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
driver_class = class_create(QCOTA_DEV);
#else
driver_class = class_create(THIS_MODULE, QCOTA_DEV);
#endif
if (IS_ERR(driver_class)) {
rc = -ENOMEM;
pr_err("class_create failed %d\n", rc);
goto exit_unreg_chrdev_region;
}
class_dev = device_create(driver_class, NULL, qcota_device_no, NULL,
QCOTA_DEV);
if (IS_ERR(class_dev)) {
pr_err("class_device_create failed %d\n", rc);
rc = -ENOMEM;
goto exit_destroy_class;
}
cdev_init(&podev->cdev, &qcota_fops);
podev->cdev.owner = THIS_MODULE;
rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1);
if (rc < 0) {
pr_err("cdev_add failed %d\n", rc);
goto exit_destroy_device;
}
podev->minor = 0;
pqce->podev = podev;
pqce->active_command = NULL;
tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
/* open qce */
handle = qce_open(pdev, &rc);
if (handle == NULL) {
pr_err("%s: device %s, can not open qce\n",
__func__, pdev->name);
goto exit_del_cdev;
}
if (qce_hw_support(handle, &ce_support) < 0 ||
!ce_support.ota) {
pr_err("%s: device %s, qce does not support ota capability\n",
__func__, pdev->name);
rc = -ENODEV;
goto err;
}
pqce->qce = handle;
pqce->pdev = pdev;
pqce->total_req = 0;
pqce->err_req = 0;
platform_set_drvdata(pdev, pqce);
mutex_lock(&podev->register_lock);
rc = 0;
if (!podev->registered) {
if (rc == 0) {
pqce->unit = podev->total_units;
podev->total_units++;
podev->registered = true;
}
} else {
pqce->unit = podev->total_units;
podev->total_units++;
}
mutex_unlock(&podev->register_lock);
if (rc) {
pr_err("ion: failed to register misc device.\n");
goto err;
}
spin_lock_irqsave(&podev->lock, flags);
list_add_tail(&pqce->qlist, &podev->qce_dev);
spin_unlock_irqrestore(&podev->lock, flags);
return 0;
err:
if (handle)
qce_close(handle);
platform_set_drvdata(pdev, NULL);
tasklet_kill(&pqce->done_tasklet);
exit_del_cdev:
cdev_del(&podev->cdev);
exit_destroy_device:
device_destroy(driver_class, qcota_device_no);
exit_destroy_class:
class_destroy(driver_class);
exit_unreg_chrdev_region:
unregister_chrdev_region(qcota_device_no, 1);
kfree(pqce);
return rc;
}
static int qcota_remove(struct platform_device *pdev)
{
struct ota_dev_control *podev;
struct ota_qce_dev *pqce;
unsigned long flags;
pqce = platform_get_drvdata(pdev);
if (!pqce)
return 0;
if (pqce->qce)
qce_close(pqce->qce);
podev = pqce->podev;
if (!podev)
goto ret;
spin_lock_irqsave(&podev->lock, flags);
list_del(&pqce->qlist);
spin_unlock_irqrestore(&podev->lock, flags);
mutex_lock(&podev->register_lock);
if (--podev->total_units == 0) {
cdev_del(&podev->cdev);
device_destroy(driver_class, qcota_device_no);
class_destroy(driver_class);
unregister_chrdev_region(qcota_device_no, 1);
podev->registered = false;
}
mutex_unlock(&podev->register_lock);
ret:
tasklet_kill(&pqce->done_tasklet);
kfree(pqce);
return 0;
}
static const struct of_device_id qcota_match[] = {
{ .compatible = "qcom,qcota",
},
{}
};
static struct platform_driver qcota_plat_driver = {
.probe = qcota_probe,
.remove = qcota_remove,
.driver = {
.name = "qcota",
.of_match_table = qcota_match,
},
};
static int _disp_stats(void)
{
struct qcota_stat *pstat;
int len = 0;
struct ota_dev_control *podev = &qcota_dev;
unsigned long flags;
struct ota_qce_dev *p;
pstat = &_qcota_stat;
len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
"\nQTI OTA crypto accelerator Statistics:\n");
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 request : %llu\n",
pstat->f8_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation success : %llu\n",
pstat->f8_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation fail : %llu\n",
pstat->f8_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP request : %llu\n",
pstat->f8_mp_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation success : %llu\n",
pstat->f8_mp_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation fail : %llu\n",
pstat->f8_mp_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP request : %llu\n",
pstat->f8_v_mp_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP operation success: %llu\n",
pstat->f8_v_mp_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP operation fail : %llu\n",
pstat->f8_v_mp_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 request : %llu\n",
pstat->f9_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation success : %llu\n",
pstat->f9_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation fail : %llu\n",
pstat->f9_op_fail);
spin_lock_irqsave(&podev->lock, flags);
list_for_each_entry(p, &podev->qce_dev, qlist) {
len += scnprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
" Engine %4d Req : %llu\n",
p->unit,
p->total_req
);
len += scnprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
" Engine %4d Req Error : %llu\n",
p->unit,
p->err_req
);
}
spin_unlock_irqrestore(&podev->lock, flags);
return len;
}
static ssize_t _debug_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int rc = -EINVAL;
int len;
len = _disp_stats();
if (len <= count)
rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
return rc;
}
static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ota_dev_control *podev = &qcota_dev;
unsigned long flags;
struct ota_qce_dev *p;
memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
spin_lock_irqsave(&podev->lock, flags);
list_for_each_entry(p, &podev->qce_dev, qlist) {
p->total_req = 0;
p->err_req = 0;
}
spin_unlock_irqrestore(&podev->lock, flags);
return count;
}
static const struct file_operations _debug_stats_ops = {
.open = simple_open,
.read = _debug_stats_read,
.write = _debug_stats_write,
};
static int _qcota_debug_init(void)
{
int rc;
char name[DEBUG_MAX_FNAME];
struct dentry *dent;
_debug_dent = debugfs_create_dir("qcota", NULL);
if (IS_ERR(_debug_dent)) {
pr_err("qcota debugfs_create_dir fail, error %ld\n",
PTR_ERR(_debug_dent));
return PTR_ERR(_debug_dent);
}
snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
_debug_qcota = 0;
dent = debugfs_create_file(name, 0644, _debug_dent,
&_debug_qcota, &_debug_stats_ops);
if (dent == NULL) {
pr_err("qcota debugfs_create_file fail, error %ld\n",
PTR_ERR(dent));
rc = PTR_ERR(dent);
goto err;
}
return 0;
err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
static int __init qcota_init(void)
{
int rc;
struct ota_dev_control *podev;
rc = _qcota_debug_init();
if (rc)
return rc;
podev = &qcota_dev;
INIT_LIST_HEAD(&podev->ready_commands);
INIT_LIST_HEAD(&podev->qce_dev);
spin_lock_init(&podev->lock);
mutex_init(&podev->register_lock);
podev->registered = false;
podev->total_units = 0;
return platform_driver_register(&qcota_plat_driver);
}
static void __exit qcota_exit(void)
{
debugfs_remove_recursive(_debug_dent);
platform_driver_unregister(&qcota_plat_driver);
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI Ota Crypto driver");
module_init(qcota_init);
module_exit(qcota_exit);

View File

@ -0,0 +1,224 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI Crypto Engine driver API
*
* Copyright (c) 2010-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCE_H
#define __CRYPTO_MSM_QCE_H
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
#include <crypto/skcipher.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha1.h>
#include <crypto/sha2.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
/* SHA digest size in bytes */
#define SHA256_DIGESTSIZE 32
#define SHA1_DIGESTSIZE 20
#define AES_CE_BLOCK_SIZE 16
/* key size in bytes */
#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */
#define SHA_HMAC_KEY_SIZE 64
#define DES_KEY_SIZE 8
#define TRIPLE_DES_KEY_SIZE 24
#define AES128_KEY_SIZE 16
#define AES192_KEY_SIZE 24
#define AES256_KEY_SIZE 32
#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE
/* iv length in bytes */
#define AES_IV_LENGTH 16
#define DES_IV_LENGTH 8
#define MAX_IV_LENGTH AES_IV_LENGTH
/* Maximum number of bytes per transfer */
#define QCE_MAX_OPER_DATA 0xFF00
/* Maximum Nonce bytes */
#define MAX_NONCE 16
/* Crypto clock control flags */
#define QCE_CLK_ENABLE_FIRST 1
#define QCE_BW_REQUEST_FIRST 2
#define QCE_CLK_DISABLE_FIRST 3
#define QCE_BW_REQUEST_RESET_FIRST 4
/* default average and peak bw for crypto device */
#define CRYPTO_AVG_BW 384
#define CRYPTO_PEAK_BW 384
typedef void (*qce_comp_func_ptr_t)(void *areq,
unsigned char *icv, unsigned char *iv, int ret);
/* Cipher algorithms supported */
enum qce_cipher_alg_enum {
CIPHER_ALG_DES = 0,
CIPHER_ALG_3DES = 1,
CIPHER_ALG_AES = 2,
CIPHER_ALG_LAST
};
/* Hash and hmac algorithms supported */
enum qce_hash_alg_enum {
QCE_HASH_SHA1 = 0,
QCE_HASH_SHA256 = 1,
QCE_HASH_SHA1_HMAC = 2,
QCE_HASH_SHA256_HMAC = 3,
QCE_HASH_AES_CMAC = 4,
QCE_HASH_LAST
};
/* Cipher encryption/decryption operations */
enum qce_cipher_dir_enum {
QCE_ENCRYPT = 0,
QCE_DECRYPT = 1,
QCE_CIPHER_DIR_LAST
};
/* Cipher algorithms modes */
enum qce_cipher_mode_enum {
QCE_MODE_CBC = 0,
QCE_MODE_ECB = 1,
QCE_MODE_CTR = 2,
QCE_MODE_XTS = 3,
QCE_MODE_CCM = 4,
QCE_CIPHER_MODE_LAST
};
/* Cipher operation type */
enum qce_req_op_enum {
QCE_REQ_ABLK_CIPHER = 0,
QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
QCE_REQ_AEAD = 2,
QCE_REQ_LAST
};
/* Offload operation type */
enum qce_offload_op_enum {
QCE_OFFLOAD_NONE = 0, /* kernel pipe */
QCE_OFFLOAD_HLOS_HLOS = 1,
QCE_OFFLOAD_HLOS_HLOS_1 = 2,
QCE_OFFLOAD_HLOS_CPB = 3,
QCE_OFFLOAD_HLOS_CPB_1 = 4,
QCE_OFFLOAD_CPB_HLOS = 5,
QCE_OFFLOAD_OPER_LAST
};
/* Algorithms/features supported in CE HW engine */
struct ce_hw_support {
bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
bool cmac;
bool aes_key_192;
bool aes_xts;
bool aes_ccm;
bool ota;
bool aligned_only;
bool bam;
bool is_shared;
bool hw_key;
bool use_sw_aes_cbc_ecb_ctr_algo;
bool use_sw_aead_algo;
bool use_sw_aes_xts_algo;
bool use_sw_ahash_algo;
bool use_sw_hmac_algo;
bool use_sw_aes_ccm_algo;
bool clk_mgmt_sus_res;
bool req_bw_before_clk;
unsigned int ce_device;
unsigned int ce_hw_instance;
unsigned int max_request;
};
/* Sha operation parameters */
struct qce_sha_req {
qce_comp_func_ptr_t qce_cb; /* call back */
enum qce_hash_alg_enum alg; /* sha algorithm */
unsigned char *digest; /* sha digest */
struct scatterlist *src; /* pointer to scatter list entry */
uint32_t auth_data[4]; /* byte count */
unsigned char *authkey; /* auth key */
unsigned int authklen; /* auth key length */
bool first_blk; /* first block indicator */
bool last_blk; /* last block indicator */
unsigned int size; /* data length in bytes */
void *areq;
unsigned int flags;
int current_req_info;
};
struct qce_req {
enum qce_req_op_enum op; /* operation type */
qce_comp_func_ptr_t qce_cb; /* call back */
void *areq;
enum qce_cipher_alg_enum alg; /* cipher algorithms*/
enum qce_cipher_dir_enum dir; /* encryption? decryption? */
enum qce_cipher_mode_enum mode; /* algorithm mode */
enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
unsigned char *authkey; /* authentication key */
unsigned int authklen; /* authentication key kength */
unsigned int authsize; /* authentication key kength */
unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */
unsigned char *assoc; /* Ptr to formatted associated data */
unsigned int assoclen; /* Formatted associated data length */
struct scatterlist *asg; /* Formatted associated data sg */
unsigned char *enckey; /* cipher key */
unsigned int encklen; /* cipher key length */
unsigned char *iv; /* initialization vector */
unsigned int ivsize; /* initialization vector size*/
unsigned int iv_ctr_size; /* iv increment counter size*/
unsigned int cryptlen; /* data length */
unsigned int use_pmem; /* is source of data PMEM allocated? */
struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
unsigned int flags;
enum qce_offload_op_enum offload_op; /* Offload usecase */
bool is_pattern_valid; /* Is pattern setting required */
unsigned int pattern_info; /* Pattern info for offload operation */
unsigned int block_offset; /* partial first block for AES CTR */
bool is_copy_op; /* copy buffers without crypto ops */
int current_req_info;
};
struct qce_pm_table {
int (*suspend)(void *handle);
int (*resume)(void *handle);
};
extern struct qce_pm_table qce_pm_table;
struct qce_error {
bool no_error;
bool timer_error;
bool key_paused;
bool generic_error;
};
void *qce_open(struct platform_device *pdev, int *rc);
int qce_close(void *handle);
int qce_aead_req(void *handle, struct qce_req *req);
int qce_ablk_cipher_req(void *handle, struct qce_req *req);
int qce_hw_support(void *handle, struct ce_hw_support *support);
int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
int qce_enable_clk(void *handle);
int qce_disable_clk(void *handle);
void qce_get_driver_stats(void *handle);
void qce_clear_driver_stats(void *handle);
void qce_dump_req(void *handle);
void qce_get_crypto_status(void *handle, struct qce_error *error);
int qce_manage_timeout(void *handle, int req_info);
int qce_set_irqs(void *handle, bool enable);
#endif /* __CRYPTO_MSM_QCE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,256 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
#define _DRIVERS_CRYPTO_MSM_QCE50_H_
#include "linux/msm-sps.h"
/* MAX Data xfer block size between BAM and CE */
#define MAX_CE_BAM_BURST_SIZE 0x40
#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
#define GET_VIRT_ADDR(x) \
((uintptr_t)pce_dev->coh_vmem + \
((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
#define GET_PHYS_ADDR(x) \
(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \
((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
#define CRYPTO_REG_SIZE 4
#define NUM_OF_CRYPTO_AUTH_IV_REG 16
#define NUM_OF_CRYPTO_CNTR_IV_REG 4
#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
#define CRYPTO_TOTAL_REGISTERS_DUMPED 26
#define CRYPTO_RESULT_DUMP_SIZE \
ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
QCEBAM_BURST_SIZE)
/* QCE max number of descriptor in a descriptor list */
#define QCE_MAX_NUM_DESC 128
#define SPS_MAX_PKT_SIZE (32 * 1024 - 64)
/* default bam ipc log level */
#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
/* State of consumer/producer Pipe */
enum qce_pipe_st_enum {
QCE_PIPE_STATE_IDLE = 0,
QCE_PIPE_STATE_IN_PROG = 1,
QCE_PIPE_STATE_COMP = 2,
QCE_PIPE_STATE_LAST
};
enum qce_xfer_type_enum {
QCE_XFER_HASHING,
QCE_XFER_CIPHERING,
QCE_XFER_AEAD,
QCE_XFER_F8,
QCE_XFER_F9,
QCE_XFER_TYPE_LAST
};
struct qce_sps_ep_conn_data {
struct sps_pipe *pipe;
struct sps_connect connect;
struct sps_register_event event;
};
/* CE Result DUMP format*/
struct ce_result_dump_format {
uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
__be32 status;
__be32 status2;
};
struct qce_cmdlist_info {
unsigned long cmdlist;
struct sps_command_element *crypto_cfg;
struct sps_command_element *crypto_cfg_le;
struct sps_command_element *encr_seg_cfg;
struct sps_command_element *encr_seg_size;
struct sps_command_element *encr_seg_start;
struct sps_command_element *encr_key;
struct sps_command_element *encr_xts_key;
struct sps_command_element *encr_cntr_iv;
struct sps_command_element *encr_ccm_cntr_iv;
struct sps_command_element *encr_mask_0;
struct sps_command_element *encr_mask_1;
struct sps_command_element *encr_mask_2;
struct sps_command_element *encr_mask_3;
struct sps_command_element *encr_xts_du_size;
struct sps_command_element *pattern_info;
struct sps_command_element *block_offset;
struct sps_command_element *auth_seg_cfg;
struct sps_command_element *auth_seg_size;
struct sps_command_element *auth_seg_start;
struct sps_command_element *auth_key;
struct sps_command_element *auth_iv;
struct sps_command_element *auth_nonce_info;
struct sps_command_element *auth_bytecount;
struct sps_command_element *seg_size;
struct sps_command_element *go_proc;
ptrdiff_t size;
};
struct qce_cmdlistptr_ops {
struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
struct qce_cmdlist_info cipher_aes_128_ecb;
struct qce_cmdlist_info cipher_aes_256_ecb;
struct qce_cmdlist_info cipher_aes_128_xts;
struct qce_cmdlist_info cipher_aes_256_xts;
struct qce_cmdlist_info cipher_des_cbc;
struct qce_cmdlist_info cipher_des_ecb;
struct qce_cmdlist_info cipher_3des_cbc;
struct qce_cmdlist_info cipher_3des_ecb;
struct qce_cmdlist_info auth_sha1;
struct qce_cmdlist_info auth_sha256;
struct qce_cmdlist_info auth_sha1_hmac;
struct qce_cmdlist_info auth_sha256_hmac;
struct qce_cmdlist_info auth_aes_128_cmac;
struct qce_cmdlist_info auth_aes_256_cmac;
struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
struct qce_cmdlist_info aead_aes_128_ccm;
struct qce_cmdlist_info aead_aes_256_ccm;
struct qce_cmdlist_info cipher_null;
struct qce_cmdlist_info f8_kasumi;
struct qce_cmdlist_info f8_snow3g;
struct qce_cmdlist_info f9_kasumi;
struct qce_cmdlist_info f9_snow3g;
struct qce_cmdlist_info unlock_all_pipes;
};
struct qce_ce_cfg_reg_setting {
uint32_t crypto_cfg_be;
uint32_t crypto_cfg_le;
uint32_t encr_cfg_aes_cbc_128;
uint32_t encr_cfg_aes_cbc_256;
uint32_t encr_cfg_aes_ecb_128;
uint32_t encr_cfg_aes_ecb_256;
uint32_t encr_cfg_aes_xts_128;
uint32_t encr_cfg_aes_xts_256;
uint32_t encr_cfg_aes_ctr_128;
uint32_t encr_cfg_aes_ctr_256;
uint32_t encr_cfg_aes_ccm_128;
uint32_t encr_cfg_aes_ccm_256;
uint32_t encr_cfg_des_cbc;
uint32_t encr_cfg_des_ecb;
uint32_t encr_cfg_3des_cbc;
uint32_t encr_cfg_3des_ecb;
uint32_t encr_cfg_kasumi;
uint32_t encr_cfg_snow3g;
uint32_t auth_cfg_cmac_128;
uint32_t auth_cfg_cmac_256;
uint32_t auth_cfg_sha1;
uint32_t auth_cfg_sha256;
uint32_t auth_cfg_hmac_sha1;
uint32_t auth_cfg_hmac_sha256;
uint32_t auth_cfg_aes_ccm_128;
uint32_t auth_cfg_aes_ccm_256;
uint32_t auth_cfg_aead_sha1_hmac;
uint32_t auth_cfg_aead_sha256_hmac;
uint32_t auth_cfg_kasumi;
uint32_t auth_cfg_snow3g;
/* iv0 - bits 127:96 - CRYPTO_CNTR_MASK_REG0*/
uint32_t encr_cntr_mask_0;
/* iv1 - bits 95:64 - CRYPTO_CNTR_MASK_REG1*/
uint32_t encr_cntr_mask_1;
/* iv2 - bits 63:32 - CRYPTO_CNTR_MASK_REG2*/
uint32_t encr_cntr_mask_2;
/* iv3 - bits 31:0 - CRYPTO_CNTR_MASK_REG*/
uint32_t encr_cntr_mask_3;
};
struct ce_bam_info {
uint32_t bam_irq;
uint32_t bam_mem;
void __iomem *bam_iobase;
uint32_t ce_device;
uint32_t ce_hw_instance;
uint32_t bam_ee;
unsigned int pipe_pair_index[QCE_OFFLOAD_OPER_LAST];
unsigned int src_pipe_index[QCE_OFFLOAD_OPER_LAST];
unsigned int dest_pipe_index[QCE_OFFLOAD_OPER_LAST];
unsigned long bam_handle;
int ce_burst_size;
uint32_t minor_version;
uint32_t major_version;
struct qce_sps_ep_conn_data producer[QCE_OFFLOAD_OPER_LAST];
struct qce_sps_ep_conn_data consumer[QCE_OFFLOAD_OPER_LAST];
};
/* SPS data structure with buffers, commandlists & commmand pointer lists */
struct ce_sps_data {
enum qce_pipe_st_enum producer_state; /* Producer pipe state */
int consumer_status; /* consumer pipe status */
int producer_status; /* producer pipe status */
struct sps_transfer in_transfer;
struct sps_transfer out_transfer;
struct qce_cmdlistptr_ops cmdlistptr;
uint32_t result_dump; /* reuslt dump virtual address */
uint32_t result_dump_null;
uint32_t result_dump_phy; /* result dump physical address (32 bits) */
uint32_t result_dump_null_phy;
uint32_t ignore_buffer; /* ignore buffer virtual address */
struct ce_result_dump_format *result; /* ponter to result dump */
struct ce_result_dump_format *result_null;
};
struct ce_request_info {
atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
void *user;
void *areq;
int assoc_nents;
struct scatterlist *asg; /* Formatted associated data sg */
int src_nents;
int dst_nents;
dma_addr_t phy_iv_in;
unsigned char dec_iv[16];
int dir;
enum qce_cipher_mode_enum mode;
dma_addr_t phy_ota_src;
dma_addr_t phy_ota_dst;
unsigned int ota_size;
unsigned int req_len;
unsigned int offload_op;
};
struct qce_driver_stats {
int no_of_timeouts;
int no_of_dummy_reqs;
int current_mode;
int outstanding_reqs;
};
#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI Crypto Engine driver OTA API
*
* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCE_OTA_H
#define __CRYPTO_MSM_QCE_OTA_H
#include <linux/platform_device.h>
#include "linux/qcota.h"
int qce_f8_req(void *handle, struct qce_f8_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
int qce_f9_req(void *handle, struct qce_f9_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
#endif /* __CRYPTO_MSM_QCE_OTA_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,443 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qti (or) Qualcomm Technologies Inc CE device driver.
*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include <linux/qcom-dma-mapping.h>
#include <linux/list.h>
#include "linux/qcedev.h"
#include "qcedevi.h"
#include "qcedev_smmu.h"
#include "soc/qcom/secure_buffer.h"
#include <linux/mem-buf.h>
static int qcedev_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
if (!dev || !cb) {
pr_err("%s err: invalid input params\n", __func__);
return -EINVAL;
}
cb->dev = dev;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev,
sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
return 0;
}
int qcedev_parse_context_bank(struct platform_device *pdev)
{
struct qcedev_control *podev;
struct context_bank_info *cb = NULL;
struct device_node *np = NULL;
int rc = 0;
if (!pdev) {
pr_err("%s err: invalid platform devices\n", __func__);
return -EINVAL;
}
if (!pdev->dev.parent) {
pr_err("%s err: failed to find a parent for %s\n",
__func__, dev_name(&pdev->dev));
return -EINVAL;
}
podev = dev_get_drvdata(pdev->dev.parent);
np = pdev->dev.of_node;
cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
pr_err("%s ERROR = Failed to allocate cb\n", __func__);
return -ENOMEM;
}
INIT_LIST_HEAD(&cb->list);
list_add_tail(&cb->list, &podev->context_banks);
rc = of_property_read_string(np, "label", &cb->name);
if (rc)
pr_debug("%s ERROR = Unable to read label\n", __func__);
cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
rc = qcedev_setup_context_bank(cb, &pdev->dev);
if (rc) {
pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
goto err_setup_cb;
}
return 0;
err_setup_cb:
list_del(&cb->list);
devm_kfree(&pdev->dev, cb);
return rc;
}
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
{
struct qcedev_mem_client *mem_client = NULL;
if (mtype != MEM_ION) {
pr_err("%s: err: Mem type not supported\n", __func__);
goto err;
}
mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
if (!mem_client)
goto err;
mem_client->mtype = mtype;
return mem_client;
err:
return NULL;
}
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
{
kfree(mem_client);
}
static bool is_iommu_present(struct qcedev_handle *qce_hndl)
{
return !list_empty(&qce_hndl->cntl->context_banks);
}
static struct context_bank_info *get_context_bank(
struct qcedev_handle *qce_hndl, bool is_secure)
{
struct qcedev_control *podev = qce_hndl->cntl;
struct context_bank_info *cb = NULL, *match = NULL;
list_for_each_entry(cb, &podev->context_banks, list) {
if (cb->is_secure == is_secure) {
match = cb;
break;
}
}
return match;
}
static int ion_map_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client, int fd,
unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
int rc = 0;
struct dma_buf *buf = NULL;
struct dma_buf_attachment *attach = NULL;
struct sg_table *table = NULL;
struct context_bank_info *cb = NULL;
buf = dma_buf_get(fd);
if (IS_ERR_OR_NULL(buf))
return -EINVAL;
if (is_iommu_present(qce_hndl)) {
cb = get_context_bank(qce_hndl, !mem_buf_dma_buf_exclusive_owner(buf));
if (!cb) {
pr_err("%s: err: failed to get context bank info\n",
__func__);
rc = -EIO;
goto map_err;
}
/* Prepare a dma buf for dma on the given device */
attach = dma_buf_attach(buf, cb->dev);
if (IS_ERR_OR_NULL(attach)) {
rc = PTR_ERR(attach) ?: -ENOMEM;
pr_err("%s: err: failed to attach dmabuf\n", __func__);
goto map_err;
}
/* Get the scatterlist for the given attachment */
attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(table)) {
rc = PTR_ERR(table) ?: -ENOMEM;
pr_err("%s: err: failed to map table\n", __func__);
goto map_table_err;
}
if (table->sgl) {
binfo->ion_buf.iova = sg_dma_address(table->sgl);
binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
if (binfo->ion_buf.mapped_buf_size < fd_size) {
pr_err("%s: err: mapping failed, size mismatch\n",
__func__);
rc = -ENOMEM;
goto map_sg_err;
}
} else {
pr_err("%s: err: sg list is NULL\n", __func__);
rc = -ENOMEM;
goto map_sg_err;
}
binfo->ion_buf.mapping_info.dev = cb->dev;
binfo->ion_buf.mapping_info.mapping = cb->mapping;
binfo->ion_buf.mapping_info.table = table;
binfo->ion_buf.mapping_info.attach = attach;
binfo->ion_buf.mapping_info.buf = buf;
binfo->ion_buf.ion_fd = fd;
} else {
pr_err("%s: err: smmu not enabled\n", __func__);
rc = -EIO;
goto map_err;
}
return 0;
map_sg_err:
dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
map_table_err:
dma_buf_detach(buf, attach);
map_err:
dma_buf_put(buf);
return rc;
}
static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_reg_buf_info *binfo)
{
struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
if (is_iommu_present(qce_hndl)) {
dma_buf_unmap_attachment(mapping_info->attach,
mapping_info->table, DMA_BIDIRECTIONAL);
dma_buf_detach(mapping_info->buf, mapping_info->attach);
dma_buf_put(mapping_info->buf);
}
return 0;
}
static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client, int fd,
unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
int rc = -1;
switch (mem_client->mtype) {
case MEM_ION:
rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
break;
default:
pr_err("%s: err: Mem type not supported\n", __func__);
break;
}
if (rc)
pr_err("%s: err: failed to map buffer\n", __func__);
return rc;
}
static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client,
struct qcedev_reg_buf_info *binfo)
{
int rc = -1;
switch (mem_client->mtype) {
case MEM_ION:
rc = ion_unmap_buffer(qce_hndl, binfo);
break;
default:
pr_err("%s: err: Mem type not supported\n", __func__);
break;
}
if (rc)
pr_err("%s: err: failed to unmap buffer\n", __func__);
return rc;
}
int qcedev_check_and_map_buffer(void *handle,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr)
{
bool found = false;
struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
int rc = 0;
unsigned long mapped_size = 0;
if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
/* Check if the buffer fd is already mapped */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
if (temp->ion_buf.ion_fd == fd) {
found = true;
*vaddr = temp->ion_buf.iova;
mapped_size = temp->ion_buf.mapped_buf_size;
atomic_inc(&temp->ref_count);
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
/* If buffer fd is not mapped then create a fresh mapping */
if (!found) {
pr_debug("%s: info: ion fd not registered with driver\n",
__func__);
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
pr_err("%s: err: failed to allocate binfo\n",
__func__);
rc = -ENOMEM;
goto error;
}
rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
fd_size, binfo);
if (rc) {
pr_err("%s: err: failed to map fd (%d) error = %d\n",
__func__, fd, rc);
goto error;
}
*vaddr = binfo->ion_buf.iova;
mapped_size = binfo->ion_buf.mapped_buf_size;
atomic_inc(&binfo->ref_count);
/* Add buffer mapping information to regd buffer list */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
/* Make sure the offset is within the mapped range */
if (offset >= mapped_size) {
pr_err(
"%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
__func__, offset, mapped_size, fd);
rc = -ERANGE;
goto unmap;
}
/* return the mapped virtual address adjusted by offset */
*vaddr += offset;
return 0;
unmap:
if (!found) {
qcedev_unmap_buffer(handle, mem_client, binfo);
mutex_lock(&qce_hndl->registeredbufs.lock);
list_del(&binfo->list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
error:
kfree(binfo);
return rc;
}
int qcedev_check_and_unmap_buffer(void *handle, int fd)
{
struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
bool found = false;
if (!handle || fd < 0) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
/* Check if the buffer fd is mapped and present in the regd list. */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry_safe(binfo, dummy,
&qce_hndl->registeredbufs.list, list) {
if (binfo->ion_buf.ion_fd == fd) {
found = true;
atomic_dec(&binfo->ref_count);
/* Unmap only if there are no more references */
if (atomic_read(&binfo->ref_count) == 0) {
qcedev_unmap_buffer(qce_hndl,
mem_client, binfo);
list_del(&binfo->list);
kfree(binfo);
}
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
if (!found) {
pr_err("%s: err: calling unmap on unknown fd %d\n",
__func__, fd);
return -EINVAL;
}
return 0;
}
int qcedev_unmap_all_buffers(void *handle)
{
struct qcedev_reg_buf_info *binfo = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
struct list_head *pos;
if (!handle) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
mutex_lock(&qce_hndl->registeredbufs.lock);
while (!list_empty(&qce_hndl->registeredbufs.list)) {
pos = qce_hndl->registeredbufs.list.next;
binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
if (binfo)
qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
list_del(pos);
kfree(binfo);
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
return 0;
}

View File

@ -0,0 +1,81 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Qti (or) Qualcomm Technologies Inc CE device driver.
*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_PARSE_H_
#define _DRIVERS_CRYPTO_PARSE_H_
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/iommu.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/msm_ion.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/types.h>
struct context_bank_info {
struct list_head list;
const char *name;
u32 buffer_type;
u32 start_addr;
u32 size;
bool is_secure;
struct device *dev;
struct dma_iommu_mapping *mapping;
};
enum qcedev_mem_type {
MEM_ION,
};
struct qcedev_mem_client {
enum qcedev_mem_type mtype;
};
struct dma_mapping_info {
struct device *dev;
struct dma_iommu_mapping *mapping;
struct sg_table *table;
struct dma_buf_attachment *attach;
struct dma_buf *buf;
};
struct qcedev_ion_buf_info {
struct dma_mapping_info mapping_info;
dma_addr_t iova;
unsigned long mapped_buf_size;
int ion_fd;
};
struct qcedev_reg_buf_info {
struct list_head list;
union {
struct qcedev_ion_buf_info ion_buf;
};
atomic_t ref_count;
};
struct qcedev_buffer_list {
struct list_head list;
struct mutex lock;
};
int qcedev_parse_context_bank(struct platform_device *pdev);
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype);
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client);
int qcedev_check_and_map_buffer(void *qce_hndl,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr);
int qcedev_check_and_unmap_buffer(void *handle, int fd);
int qcedev_unmap_all_buffers(void *handle);
extern struct qcedev_reg_buf_info *global_binfo_in;
extern struct qcedev_reg_buf_info *global_binfo_out;
extern struct qcedev_reg_buf_info *global_binfo_res;
#endif

View File

@ -0,0 +1,136 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI crypto Driver
*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCEDEVI_H
#define __CRYPTO_MSM_QCEDEVI_H
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <crypto/hash.h>
#include "qcom_crypto_device.h"
#include "fips_status.h"
#include "qce.h"
#include "qcedev_smmu.h"
#define CACHE_LINE_SIZE 64
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
enum qcedev_crypto_oper_type {
QCEDEV_CRYPTO_OPER_CIPHER = 0,
QCEDEV_CRYPTO_OPER_SHA = 1,
QCEDEV_CRYPTO_OPER_OFFLOAD_CIPHER = 2,
QCEDEV_CRYPTO_OPER_LAST
};
struct qcedev_handle;
struct qcedev_cipher_req {
struct skcipher_request creq;
void *cookie;
};
struct qcedev_sha_req {
struct ahash_request sreq;
void *cookie;
};
struct qcedev_sha_ctxt {
uint32_t auth_data[4];
uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
uint32_t diglen;
uint8_t trailing_buf[64];
uint32_t trailing_buf_len;
uint8_t first_blk;
uint8_t last_blk;
uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
bool init_done;
};
struct qcedev_async_req {
struct list_head list;
struct completion complete;
enum qcedev_crypto_oper_type op_type;
union {
struct qcedev_cipher_op_req cipher_op_req;
struct qcedev_sha_op_req sha_op_req;
struct qcedev_offload_cipher_op_req offload_cipher_op_req;
};
union {
struct qcedev_cipher_req cipher_req;
struct qcedev_sha_req sha_req;
};
struct qcedev_handle *handle;
int err;
wait_queue_head_t wait_q;
uint16_t state;
bool timed_out;
};
/**********************************************************************
* Register ourselves as a char device to be able to access the dev driver
* from userspace.
*/
#define QCEDEV_DEV "qce"
struct qcedev_control {
/* CE features supported by platform */
struct msm_ce_hw_support platform_support;
uint32_t ce_lock_count;
uint32_t high_bw_req_count;
/* CE features/algorithms supported by HW engine*/
struct ce_hw_support ce_support;
/* replaced msm_bus with interconnect path */
struct icc_path *icc_path;
/* average and peak bw values for interconnect */
uint32_t icc_avg_bw;
uint32_t icc_peak_bw;
/* char device */
struct cdev cdev;
int minor;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
unsigned int magic;
struct list_head ready_commands;
struct qcedev_async_req *active_command;
spinlock_t lock;
struct tasklet_struct done_tasklet;
struct list_head context_banks;
struct qcedev_mem_client *mem_client;
};
struct qcedev_handle {
/* qcedev control handle */
struct qcedev_control *cntl;
/* qce internal sha context*/
struct qcedev_sha_ctxt sha_ctxt;
/* qcedev mapped buffer list */
struct qcedev_buffer_list registeredbufs;
};
void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
unsigned char *iv, int ret);
void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
unsigned char *authdata, int ret);
#endif /* __CRYPTO_MSM_QCEDEVI_H */

View File

@ -0,0 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_CRYPTO_DEVICE__H
#define __QCOM_CRYPTO_DEVICE__H
#include <linux/types.h>
struct msm_ce_hw_support {
uint32_t ce_shared;
uint32_t shared_ce_resource;
uint32_t hw_key_support;
uint32_t sha_hmac;
};
#endif /* __QCOM_CRYPTO_DEVICE__H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,61 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#define QCRYPTO_CTX_KEY_MASK 0x000000ff
#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002
#define QCRYPTO_CTX_XTS_MASK 0x0000ff00
#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100
#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200
int qcrypto_cipher_set_device(struct skcipher_request *req, unsigned int dev);
int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);
int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);
int qcrypto_cipher_clear_flag(struct skcipher_request *req,
unsigned int flags);
int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);
struct crypto_engine_entry {
u32 hw_instance;
u32 ce_device;
int shared;
};
int qcrypto_get_num_engines(void);
void qcrypto_get_engine_list(size_t num_engines,
struct crypto_engine_entry *arr);
int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
struct qcrypto_func_set {
int (*cipher_set)(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
int (*get_num_engines)(void);
void (*get_engine_list)(size_t num_engines,
struct crypto_engine_entry *arr);
};
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */

View File

@ -0,0 +1,529 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
#define CRYPTO_BAM_CNFG_BITS_REG 0x0007C
#define CRYPTO_BAM_CD_ENABLE 27
#define CRYPTO_BAM_CD_ENABLE_MASK (1 << CRYPTO_BAM_CD_ENABLE)
#define QCE_AUTH_REG_BYTE_COUNT 4
#define CRYPTO_VERSION_REG 0x1A000
#define CRYPTO_DATA_IN0_REG 0x1A010
#define CRYPTO_DATA_IN1_REG 0x1A014
#define CRYPTO_DATA_IN2_REG 0x1A018
#define CRYPTO_DATA_IN3_REG 0x1A01C
#define CRYPTO_DATA_OUT0_REG 0x1A020
#define CRYPTO_DATA_OUT1_REG 0x1A024
#define CRYPTO_DATA_OUT2_REG 0x1A028
#define CRYPTO_DATA_OUT3_REG 0x1A02C
#define CRYPTO_STATUS_REG 0x1A100
#define CRYPTO_STATUS2_REG 0x1A104
#define CRYPTO_STATUS3_REG 0x1A11C
#define CRYPTO_STATUS4_REG 0x1A124
#define CRYPTO_STATUS5_REG 0x1A128
#define CRYPTO_STATUS6_REG 0x1A13C
#define CRYPTO_ENGINES_AVAIL 0x1A108
#define CRYPTO_FIFO_SIZES_REG 0x1A10C
#define CRYPTO_SEG_SIZE_REG 0x1A110
#define CRYPTO_GOPROC_REG 0x1A120
#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000
#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000
#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
#define CRYPTO_ENCR_SEG_START_REG 0x1A208
#define CRYPTO_DATA_PATT_PROC_CFG_REG 0x1A500
#define CRYPTO_DATA_PARTIAL_BLOCK_PROC_CFG_REG 0x1A504
#define CRYPTO_ENCR_KEY0_REG 0x1D000
#define CRYPTO_ENCR_KEY1_REG 0x1D004
#define CRYPTO_ENCR_KEY2_REG 0x1D008
#define CRYPTO_ENCR_KEY3_REG 0x1D00C
#define CRYPTO_ENCR_KEY4_REG 0x1D010
#define CRYPTO_ENCR_KEY5_REG 0x1D014
#define CRYPTO_ENCR_KEY6_REG 0x1D018
#define CRYPTO_ENCR_KEY7_REG 0x1D01C
#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020
#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024
#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028
#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C
#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030
#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034
#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038
#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C
#define CRYPTO_ENCR_PIPE0_KEY0_REG 0x1E000
#define CRYPTO_ENCR_PIPE0_KEY1_REG 0x1E004
#define CRYPTO_ENCR_PIPE0_KEY2_REG 0x1E008
#define CRYPTO_ENCR_PIPE0_KEY3_REG 0x1E00C
#define CRYPTO_ENCR_PIPE0_KEY4_REG 0x1E010
#define CRYPTO_ENCR_PIPE0_KEY5_REG 0x1E014
#define CRYPTO_ENCR_PIPE0_KEY6_REG 0x1E018
#define CRYPTO_ENCR_PIPE0_KEY7_REG 0x1E01C
#define CRYPTO_ENCR_PIPE1_KEY0_REG 0x1E020
#define CRYPTO_ENCR_PIPE1_KEY1_REG 0x1E024
#define CRYPTO_ENCR_PIPE1_KEY2_REG 0x1E028
#define CRYPTO_ENCR_PIPE1_KEY3_REG 0x1E02C
#define CRYPTO_ENCR_PIPE1_KEY4_REG 0x1E030
#define CRYPTO_ENCR_PIPE1_KEY5_REG 0x1E034
#define CRYPTO_ENCR_PIPE1_KEY6_REG 0x1E038
#define CRYPTO_ENCR_PIPE1_KEY7_REG 0x1E03C
#define CRYPTO_ENCR_PIPE2_KEY0_REG 0x1E040
#define CRYPTO_ENCR_PIPE2_KEY1_REG 0x1E044
#define CRYPTO_ENCR_PIPE2_KEY2_REG 0x1E048
#define CRYPTO_ENCR_PIPE2_KEY3_REG 0x1E04C
#define CRYPTO_ENCR_PIPE2_KEY4_REG 0x1E050
#define CRYPTO_ENCR_PIPE2_KEY5_REG 0x1E054
#define CRYPTO_ENCR_PIPE2_KEY6_REG 0x1E058
#define CRYPTO_ENCR_PIPE2_KEY7_REG 0x1E05C
#define CRYPTO_ENCR_PIPE3_KEY0_REG 0x1E060
#define CRYPTO_ENCR_PIPE3_KEY1_REG 0x1E064
#define CRYPTO_ENCR_PIPE3_KEY2_REG 0x1E068
#define CRYPTO_ENCR_PIPE3_KEY3_REG 0x1E06C
#define CRYPTO_ENCR_PIPE3_KEY4_REG 0x1E070
#define CRYPTO_ENCR_PIPE3_KEY5_REG 0x1E074
#define CRYPTO_ENCR_PIPE3_KEY6_REG 0x1E078
#define CRYPTO_ENCR_PIPE3_KEY7_REG 0x1E07C
#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG 0x1E200
#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG 0x1E204
#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG 0x1E208
#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG 0x1E20C
#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG 0x1E210
#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG 0x1E214
#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG 0x1E218
#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG 0x1E21C
#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG 0x1E220
#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG 0x1E224
#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG 0x1E228
#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG 0x1E22C
#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG 0x1E230
#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG 0x1E234
#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG 0x1E238
#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG 0x1E23C
#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG 0x1E240
#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG 0x1E244
#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG 0x1E248
#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG 0x1E24C
#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG 0x1E250
#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG 0x1E254
#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG 0x1E258
#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG 0x1E25C
#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG 0x1E260
#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG 0x1E264
#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG 0x1E268
#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG 0x1E26C
#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG 0x1E270
#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG 0x1E274
#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG 0x1E278
#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG 0x1E27C
#define CRYPTO_CNTR0_IV0_REG 0x1A20C
#define CRYPTO_CNTR1_IV1_REG 0x1A210
#define CRYPTO_CNTR2_IV2_REG 0x1A214
#define CRYPTO_CNTR3_IV3_REG 0x1A218
#define CRYPTO_CNTR_MASK_REG0 0x1A23C
#define CRYPTO_CNTR_MASK_REG1 0x1A238
#define CRYPTO_CNTR_MASK_REG2 0x1A234
#define CRYPTO_CNTR_MASK_REG 0x1A21C
#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220
#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224
#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228
#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C
#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x1A230
#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300
#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304
#define CRYPTO_AUTH_SEG_START_REG 0x1A308
#define CRYPTO_AUTH_KEY0_REG 0x1D040
#define CRYPTO_AUTH_KEY1_REG 0x1D044
#define CRYPTO_AUTH_KEY2_REG 0x1D048
#define CRYPTO_AUTH_KEY3_REG 0x1D04C
#define CRYPTO_AUTH_KEY4_REG 0x1D050
#define CRYPTO_AUTH_KEY5_REG 0x1D054
#define CRYPTO_AUTH_KEY6_REG 0x1D058
#define CRYPTO_AUTH_KEY7_REG 0x1D05C
#define CRYPTO_AUTH_KEY8_REG 0x1D060
#define CRYPTO_AUTH_KEY9_REG 0x1D064
#define CRYPTO_AUTH_KEY10_REG 0x1D068
#define CRYPTO_AUTH_KEY11_REG 0x1D06C
#define CRYPTO_AUTH_KEY12_REG 0x1D070
#define CRYPTO_AUTH_KEY13_REG 0x1D074
#define CRYPTO_AUTH_KEY14_REG 0x1D078
#define CRYPTO_AUTH_KEY15_REG 0x1D07C
#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800
#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804
#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808
#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C
#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810
#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814
#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818
#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C
#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820
#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824
#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828
#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C
#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830
#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834
#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838
#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C
#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E880
#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E884
#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E888
#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E88C
#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E890
#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E894
#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E898
#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E89C
#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E8A0
#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E8A4
#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E8A8
#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E8AC
#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E8B0
#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E8B4
#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E8B8
#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E8BC
#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E900
#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E904
#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E908
#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E90C
#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E910
#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E914
#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E918
#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E91C
#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E920
#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E924
#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E928
#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E92C
#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E930
#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E934
#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E938
#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E93C
#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E980
#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E984
#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E988
#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E98C
#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E990
#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E994
#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E998
#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E99C
#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E9A0
#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E9A4
#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E9A8
#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E9AC
#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E9B0
#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E9B4
#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E9B8
#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E9BC
#define CRYPTO_AUTH_IV0_REG 0x1A310
#define CRYPTO_AUTH_IV1_REG 0x1A314
#define CRYPTO_AUTH_IV2_REG 0x1A318
#define CRYPTO_AUTH_IV3_REG 0x1A31C
#define CRYPTO_AUTH_IV4_REG 0x1A320
#define CRYPTO_AUTH_IV5_REG 0x1A324
#define CRYPTO_AUTH_IV6_REG 0x1A328
#define CRYPTO_AUTH_IV7_REG 0x1A32C
#define CRYPTO_AUTH_IV8_REG 0x1A330
#define CRYPTO_AUTH_IV9_REG 0x1A334
#define CRYPTO_AUTH_IV10_REG 0x1A338
#define CRYPTO_AUTH_IV11_REG 0x1A33C
#define CRYPTO_AUTH_IV12_REG 0x1A340
#define CRYPTO_AUTH_IV13_REG 0x1A344
#define CRYPTO_AUTH_IV14_REG 0x1A348
#define CRYPTO_AUTH_IV15_REG 0x1A34C
#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350
#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354
#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358
#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C
#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390
#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394
#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398
#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C
#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0
#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4
#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8
#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC
#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0
#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4
#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8
#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC
#define CRYPTO_CONFIG_REG 0x1A400
#define CRYPTO_PWR_CTRL 0x1A408
#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00
#define CRYPTO_DEBUG_REG 0x1AF04
/* Register bits */
#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF
#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */
#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000
#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */
#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000
#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */
/* status reg */
#define CRYPTO_MAC_FAILED 31
#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */
#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
#define CRYPTO_HSD_ERR 20
#define CRYPTO_ACCESS_VIOL 19
#define CRYPTO_PIPE_ACTIVE_ERR 18
#define CRYPTO_CFG_CHNG_ERR 17
#define CRYPTO_DOUT_ERR 16
#define CRYPTO_DIN_ERR 15
#define CRYPTO_AXI_ERR 14
#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */
#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE)
#define CRYPTO_ENCR_BUSY 9
#define CRYPTO_AUTH_BUSY 8
#define CRYPTO_DOUT_INTR 7
#define CRYPTO_DIN_INTR 6
#define CRYPTO_OP_DONE_INTR 5
#define CRYPTO_ERR_INTR 4
#define CRYPTO_DOUT_RDY 3
#define CRYPTO_DIN_RDY 2
#define CRYPTO_OPERATION_DONE 1
#define CRYPTO_SW_ERR 0
/* status2 reg */
#define CRYPTO_AXI_EXTRA 1
#define CRYPTO_LOCKED 2
/* config reg */
#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */
#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE)
#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0
#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1
#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2
#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3
#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4
#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5
#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6
#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7
#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8
#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9
#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10
#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11
#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12
#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13
#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14
#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15
#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */
#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
#define CRYPTO_ENUM_1_QUEUED_REQS 0
#define CRYPTO_ENUM_2_QUEUED_REQS 1
#define CRYPTO_ENUM_3_QUEUED_REQS 2
#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */
#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
#define CRYPTO_LITTLE_ENDIAN_MODE 9
#define CRYPTO_LITTLE_ENDIAN_MASK (1 << CRYPTO_LITTLE_ENDIAN_MODE)
#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */
#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT)
#define CRYPTO_HIGH_SPD_EN_N 4
#define CRYPTO_MASK_DOUT_INTR 3
#define CRYPTO_MASK_DIN_INTR 2
#define CRYPTO_MASK_OP_DONE_INTR 1
#define CRYPTO_MASK_ERR_INTR 0
/* auth_seg_cfg reg */
#define CRYPTO_COMP_EXP_MAC 24
#define CRYPTO_COMP_EXP_MAC_DISABLED 0
#define CRYPTO_COMP_EXP_MAC_ENABLED 1
#define CRYPTO_F9_DIRECTION 23
#define CRYPTO_F9_DIRECTION_UPLINK 0
#define CRYPTO_F9_DIRECTION_DOWNLINK 1
#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */
#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
#define CRYPTO_USE_PIPE_KEY_AUTH 19
#define CRYPTO_USE_HW_KEY_AUTH 18
#define CRYPTO_FIRST 17
#define CRYPTO_LAST 16
#define CRYPTO_AUTH_POS 14 /* bit 15 .. 14*/
#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
#define CRYPTO_AUTH_POS_BEFORE 0
#define CRYPTO_AUTH_POS_AFTER 1
#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/
#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE)
#define CRYPTO_AUTH_SIZE_SHA1 0
#define CRYPTO_AUTH_SIZE_SHA256 1
#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0
#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1
#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2
#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3
#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4
#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5
#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6
#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7
#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8
#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9
#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10
#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11
#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12
#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13
#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14
#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15
#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
#define CRYPTO_AUTH_MODE_HASH 0
#define CRYPTO_AUTH_MODE_HMAC 1
#define CRYPTO_AUTH_MODE_CCM 0
#define CRYPTO_AUTH_MODE_CMAC 1
#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/
#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
#define CRYPTO_AUTH_KEY_SZ_AES128 0
#define CRYPTO_AUTH_KEY_SZ_AES256 2
#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
#define CRYPTO_AUTH_ALG_MASK 7
#define CRYPTO_AUTH_ALG_NONE 0
#define CRYPTO_AUTH_ALG_SHA 1
#define CRYPTO_AUTH_ALG_AES 2
#define CRYPTO_AUTH_ALG_KASUMI 3
#define CRYPTO_AUTH_ALG_SNOW3G 4
#define CRYPTO_AUTH_ALG_ZUC 5
/* encr_xts_du_size reg */
#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
/* encr_seg_cfg reg */
#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */
#define CRYPTO_F8_KEYSTREAM_DISABLED 0
#define CRYPTO_F8_KEYSTREAM_ENABLED 1
#define CRYPTO_F8_DIRECTION 16 /* bit */
#define CRYPTO_F8_DIRECTION_UPLINK 0
#define CRYPTO_F8_DIRECTION_DOWNLINK 1
#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */
#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1
#define CRYPTO_USE_KEY_REGISTERS 0
#define CRYPTO_USE_HW_KEY_ENCR 14
#define CRYPTO_USE_KEY_REG 0
#define CRYPTO_USE_HW_KEY 1
#define CRYPTO_LAST_CCM 13
#define CRYPTO_LAST_CCM_XFR 1
#define CRYPTO_INTERM_CCM_XFR 0
#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
#define CRYPTO_CNTR_ALG_NIST 0
#define CRYPTO_ENCODE 10
#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
/* only valid when AES */
#define CRYPTO_ENCR_MODE_ECB 0
#define CRYPTO_ENCR_MODE_CBC 1
#define CRYPTO_ENCR_MODE_CTR 2
#define CRYPTO_ENCR_MODE_XTS 3
#define CRYPTO_ENCR_MODE_CCM 4
#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
#define CRYPTO_ENCR_KEY_SZ_DES 0
#define CRYPTO_ENCR_KEY_SZ_3DES 1
#define CRYPTO_ENCR_KEY_SZ_AES128 0
#define CRYPTO_ENCR_KEY_SZ_AES256 2
#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
#define CRYPTO_ENCR_ALG_NONE 0
#define CRYPTO_ENCR_ALG_DES 1
#define CRYPTO_ENCR_ALG_AES 2
#define CRYPTO_ENCR_ALG_KASUMI 4
#define CRYPTO_ENCR_ALG_SNOW_3G 5
#define CRYPTO_ENCR_ALG_ZUC 6
/* goproc reg */
#define CRYPTO_GO 0
#define CRYPTO_CLR_CNTXT 1
#define CRYPTO_RESULTS_DUMP 2
/* F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG */
#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */
#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */
#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
/* F9 definition of CRYPTO_AUTH_IV4 REG */
#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */
#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
(0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
/* engines_avail */
#define CRYPTO_ENCR_AES_SEL 0
#define CRYPTO_DES_SEL 1
#define CRYPTO_ENCR_SNOW3G_SEL 2
#define CRYPTO_ENCR_KASUMI_SEL 3
#define CRYPTO_SHA_SEL 4
#define CRYPTO_SHA512_SEL 5
#define CRYPTO_AUTH_AES_SEL 6
#define CRYPTO_AUTH_SNOW3G_SEL 7
#define CRYPTO_AUTH_KASUMI_SEL 8
#define CRYPTO_BAM_PIPE_SETS 9 /* bit 12 - 9 */
#define CRYPTO_AXI_WR_BEATS 13 /* bit 18 - 13 */
#define CRYPTO_AXI_RD_BEATS 19 /* bit 24 - 19 */
#define CRYPTO_ENCR_ZUC_SEL 26
#define CRYPTO_AUTH_ZUC_SEL 27
#define CRYPTO_ZUC_ENABLE 28
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */

View File

@ -0,0 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#pragma once
// AUTOGENERATED FILE: DO NOT EDIT
#include <linux/types.h>
#include "smcinvoke_object.h"
#define HDCP1_PROVISION 0
#define HDCP1_VERIFY 1
#define HDCP1_SET_ENCRYPTION 2
#define HDCP1_SET_ENCRYPTION_V2 3
#define HDCP1_SET_KEY 4
#define HDCP1_SET_KEY_V2 5
#define HDCP1_SET_MODE 6
static inline int32_t hdcp1_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t hdcp1_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t hdcp1_provision(struct Object self, uint32_t keyFormat_val,
const void *key_ptr, size_t key_len,
const void *dps_ptr, size_t dps_len)
{
union ObjectArg a[3] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&keyFormat_val, sizeof(uint32_t)};
a[1].bi = (struct ObjectBufIn) {key_ptr, key_len * 1};
a[2].bi = (struct ObjectBufIn) {dps_ptr, dps_len * 1};
return Object_invoke(self, HDCP1_PROVISION, a,
ObjectCounts_pack(3, 0, 0, 0));
}
static inline int32_t hdcp1_verify(struct Object self, uint32_t deviceType_val)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&deviceType_val, sizeof(uint32_t)};
return Object_invoke(self, HDCP1_VERIFY, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp1_set_encryption(struct Object self, uint32_t enable_val)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&enable_val, sizeof(uint32_t)};
return Object_invoke(self, HDCP1_SET_ENCRYPTION, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp1_set_encryption_v2(struct Object self, uint32_t enable_val,
uint32_t deviceType_val)
{
union ObjectArg a[1] = {{{0, 0}}};
struct {
uint32_t m_enable;
uint32_t m_deviceType;
} i;
a[0].b = (struct ObjectBuf) {&i, 8};
i.m_enable = enable_val;
i.m_deviceType = deviceType_val;
return Object_invoke(self, HDCP1_SET_ENCRYPTION_V2, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp1_set_key(struct Object self, void *ksv_ptr, size_t ksv_len,
size_t *ksv_lenout)
{
union ObjectArg a[1] = {{{0, 0}}};
int32_t result = 0;
a[0].b = (struct ObjectBuf) {ksv_ptr, ksv_len * 1};
result = Object_invoke(self, HDCP1_SET_KEY, a,
ObjectCounts_pack(0, 1, 0, 0));
*ksv_lenout = a[0].b.size / 1;
return result;
}
static inline int32_t hdcp1_set_key_v2(struct Object self, void *ksv_ptr,
size_t ksv_len, size_t *ksv_lenout,
uint32_t deviceType_val)
{
union ObjectArg a[2] = {{{0, 0}}};
int32_t result = 0;
a[1].b = (struct ObjectBuf) {ksv_ptr, ksv_len * 1};
a[0].b = (struct ObjectBuf) {&deviceType_val, sizeof(uint32_t)};
result = Object_invoke(self, HDCP1_SET_KEY_V2, a,
ObjectCounts_pack(1, 1, 0, 0));
*ksv_lenout = a[1].b.size / 1;
return result;
}
static inline int32_t hdcp1_set_mode(struct Object self, int32_t mode_val)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&mode_val, sizeof(int32_t)};
return Object_invoke(self, HDCP1_SET_MODE, a,
ObjectCounts_pack(1, 0, 0, 0));
}

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#pragma once
// AUTOGENERATED FILE: DO NOT EDIT
#include <linux/types.h>
#include "smcinvoke_object.h"
#define IHDCP1OPS_NOTIFY_TOPOLOGY_CHANGE 0
static inline int32_t hdcp1_ops_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t hdcp1_ops_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t hdcp1_ops_notify_topology_change(struct Object self)
{
return Object_invoke(self, IHDCP1OPS_NOTIFY_TOPOLOGY_CHANGE, 0, 0);
}

View File

@ -0,0 +1,304 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#pragma once
// AUTOGENERATED FILE: DO NOT EDIT
#include <linux/types.h>
#include "smcinvoke_object.h"
#define HDCP2P2_PROVISION_KEY 0
#define HDCP2P2_VERIFY_KEY 1
#define HDCP2P2_TX_INIT 2
#define HDCP2P2_TX_DEINIT 3
#define HDCP2P2_RCVD_MSG 4
#define HDCP2P2_SEND_TIMEOUT 5
#define HDCP2P2_SET_HW_KEY 6
#define HDCP2P2_QUERY_STREAM_TYPE 7
#define HDCP2P2_INIT 8
#define HDCP2P2_DEINIT 9
#define HDCP2P2_VERSION 10
#define HDCP2P2_SESSION_INIT 11
#define HDCP2P2_SESSION_DEINIT 12
#define HDCP2P2_START_AUTH 13
#define HDCP2P2_SESSION_OPEN_STREAM 14
#define HDCP2P2_SESSION_CLOSE_STREAM 15
#define HDCP2P2_FORCE_ENCRYPTION 16
#define HDCP2P2_DELETE_PAIRING_INFO 17
static inline int32_t hdcp2p2_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t hdcp2p2_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t hdcp2p2_provision_key(struct Object self, const void *key_ptr,
size_t key_len,
const void *dps_ptr,
size_t dps_len)
{
union ObjectArg a[2] = {{{0, 0}}};
a[0].bi = (struct ObjectBufIn) {key_ptr, key_len * 1};
a[1].bi = (struct ObjectBufIn) {dps_ptr, dps_len * 1};
return Object_invoke(self, HDCP2P2_PROVISION_KEY, a,
ObjectCounts_pack(2, 0, 0, 0));
}
static inline int32_t hdcp2p2_verify_key(struct Object self)
{
return Object_invoke(self, HDCP2P2_VERIFY_KEY, 0, 0);
}
static inline int32_t hdcp2p2_tx_init(struct Object self, uint32_t sessionid_val,
uint32_t *ctxhandle_ptr)
{
union ObjectArg a[2] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&sessionid_val, sizeof(uint32_t)};
a[1].b = (struct ObjectBuf) {ctxhandle_ptr, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_TX_INIT, a,
ObjectCounts_pack(1, 1, 0, 0));
}
static inline int32_t hdcp2p2_tx_deinit(struct Object self, uint32_t ctxhandle_val)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_TX_DEINIT, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp2p2_rcvd_msg(
struct Object self, const void *reqMsg_ptr, size_t reqMsg_len,
uint32_t ctxhandle_val, void *resMsg_ptr, size_t resMsg_len,
size_t *resMsg_lenout, uint32_t *timeout_ptr, uint32_t *flag_ptr, uint32_t *state_ptr)
{
union ObjectArg a[4] = {{{0, 0}}};
int32_t result = 0;
struct {
uint32_t m_timeout;
uint32_t m_flag;
uint32_t m_state;
} o = {0, 0, 0};
a[2].b = (struct ObjectBuf) {&o, 12};
a[0].bi = (struct ObjectBufIn) {reqMsg_ptr, reqMsg_len * 1};
a[1].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
a[3].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
result = Object_invoke(self, HDCP2P2_RCVD_MSG, a,
ObjectCounts_pack(2, 2, 0, 0));
*resMsg_lenout = a[3].b.size / 1;
*timeout_ptr = o.m_timeout;
*flag_ptr = o.m_flag;
*state_ptr = o.m_state;
return result;
}
static inline int32_t hdcp2p2_send_timeout(struct Object self, uint32_t ctxhandle_val,
void *resMsg_ptr, size_t resMsg_len,
size_t *resMsg_lenout,
uint32_t *timeout_ptr)
{
union ObjectArg a[3] = {{{0, 0}}};
int32_t result = 0;
a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
a[1].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
a[2].b = (struct ObjectBuf) {timeout_ptr, sizeof(uint32_t)};
result = Object_invoke(self, HDCP2P2_SEND_TIMEOUT, a,
ObjectCounts_pack(1, 2, 0, 0));
*resMsg_lenout = a[1].b.size / 1;
return result;
}
static inline int32_t hdcp2p2_set_hw_key(struct Object self, uint32_t ctxhandle_val)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_SET_HW_KEY, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp2p2_query_stream_type(
struct Object self, uint32_t ctxhandle_val, void *resMsg_ptr, size_t resMsg_len,
size_t *resMsg_lenout, uint32_t *timeout_ptr)
{
union ObjectArg a[3] = {{{0, 0}}};
int32_t result = 0;
a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
a[1].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
a[2].b = (struct ObjectBuf) {timeout_ptr, sizeof(uint32_t)};
result = Object_invoke(self, HDCP2P2_QUERY_STREAM_TYPE, a,
ObjectCounts_pack(1, 2, 0, 0));
*resMsg_lenout = a[1].b.size / 1;
return result;
}
static inline int32_t hdcp2p2_init(struct Object self, uint32_t clientVersion_val,
uint32_t *appversion_ptr)
{
union ObjectArg a[2] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&clientVersion_val, sizeof(uint32_t)};
a[1].b = (struct ObjectBuf) {appversion_ptr, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_INIT, a,
ObjectCounts_pack(1, 1, 0, 0));
}
static inline int32_t hdcp2p2_deinit(struct Object self)
{
return Object_invoke(self, HDCP2P2_DEINIT, 0, 0);
}
static inline int32_t hdcp2p2_version(struct Object self, uint32_t *appversion_ptr)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {appversion_ptr, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_VERSION, a,
ObjectCounts_pack(0, 1, 0, 0));
}
static inline int32_t hdcp2p2_session_init(struct Object self, uint32_t deviceId_val,
uint32_t *sessionId_ptr)
{
union ObjectArg a[2] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&deviceId_val, sizeof(uint32_t)};
a[1].b = (struct ObjectBuf) {sessionId_ptr, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_SESSION_INIT, a,
ObjectCounts_pack(1, 1, 0, 0));
}
static inline int32_t hdcp2p2_session_deinit(struct Object self,
uint32_t sessionId_val)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) {&sessionId_val, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_SESSION_DEINIT, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp2p2_start_auth(struct Object self, uint32_t ctxhandle_val,
void *resMsg_ptr, size_t resMsg_len,
size_t *resMsg_lenout,
uint32_t *timeout_ptr,
uint32_t *flag_ptr,
uint32_t *tzctxhandle_ptr)
{
union ObjectArg a[3] = {{{0, 0}}};
int32_t result = 0;
struct {
uint32_t m_timeout;
uint32_t m_flag;
uint32_t m_tzctxhandle;
} o = {0, 0, 0};
a[1].b = (struct ObjectBuf) {&o, 12};
a[0].b = (struct ObjectBuf) {&ctxhandle_val, sizeof(uint32_t)};
a[2].b = (struct ObjectBuf) {resMsg_ptr, resMsg_len * 1};
result = Object_invoke(self, HDCP2P2_START_AUTH, a,
ObjectCounts_pack(1, 2, 0, 0));
*resMsg_lenout = a[2].b.size / 1;
*timeout_ptr = o.m_timeout;
*flag_ptr = o.m_flag;
*tzctxhandle_ptr = o.m_tzctxhandle;
return result;
}
static inline int32_t hdcp2p2_session_open_stream(struct Object self,
uint32_t sessionid_val,
uint32_t vcpayloadid_val,
uint32_t stream_number_val,
uint32_t streamMediaType_val,
uint32_t *resStreamId_ptr)
{
union ObjectArg a[2] = {{{0, 0}}};
struct {
uint32_t m_sessionid;
uint32_t m_vcpayloadid;
uint32_t m_stream_number;
uint32_t m_streamMediaType;
} i = {0, 0, 0, 0};
a[0].b = (struct ObjectBuf) {&i, 16};
i.m_sessionid = sessionid_val;
i.m_vcpayloadid = vcpayloadid_val;
i.m_stream_number = stream_number_val;
i.m_streamMediaType = streamMediaType_val;
a[1].b = (struct ObjectBuf) {resStreamId_ptr, sizeof(uint32_t)};
return Object_invoke(self, HDCP2P2_SESSION_OPEN_STREAM, a,
ObjectCounts_pack(1, 1, 0, 0));
}
static inline int32_t hdcp2p2_session_close_stream(struct Object self,
uint32_t sessionid_val,
uint32_t streamId_val)
{
union ObjectArg a[1] = {{{0, 0}}};
struct {
uint32_t m_sessionid;
uint32_t m_streamId;
} i = {0, 0};
a[0].b = (struct ObjectBuf) {&i, 8};
i.m_sessionid = sessionid_val;
i.m_streamId = streamId_val;
return Object_invoke(self, HDCP2P2_SESSION_CLOSE_STREAM, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp2p2_force_encryption(struct Object self,
uint32_t ctxhandle_val,
uint32_t enable_val)
{
union ObjectArg a[1] = {{{0, 0}}};
struct {
uint32_t m_ctxhandle;
uint32_t m_enable;
} i = {0, 0};
a[0].b = (struct ObjectBuf) {&i, 8};
i.m_ctxhandle = ctxhandle_val;
i.m_enable = enable_val;
return Object_invoke(self, HDCP2P2_FORCE_ENCRYPTION, a,
ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t hdcp2p2_delete_pairing_info(struct Object self)
{
return Object_invoke(self, HDCP2P2_DELETE_PAIRING_INFO, 0, 0);
}

View File

@ -0,0 +1,338 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hdcp_main.h"
#include "hdcp_qseecom.h"
#include "hdcp_smcinvoke.h"
struct hdcp_ta_interface ta_interface;
static DEFINE_MUTEX(hdcp1_mutex_g);
static DEFINE_MUTEX(hdcp2_mutex_g);
void select_interface(bool use_smcinvoke)
{
if (use_smcinvoke) {
ta_interface.trusted_app_hdcp1_init = &hdcp1_init_smcinvoke;
ta_interface.trusted_app_hdcp1_feature_supported = &hdcp1_feature_supported_smcinvoke;
ta_interface.trusted_app_hdcp1_set_enc = &hdcp1_set_enc_smcinvoke;
ta_interface.trusted_app_hdcp1_ops_notify = &hdcp1_ops_notify_smcinvoke;
ta_interface.trusted_app_hdcp1_start = &hdcp1_start_smcinvoke;
ta_interface.trusted_app_hdcp1_stop = &hdcp1_stop_smcinvoke;
ta_interface.trusted_app_hdcp2_init = &hdcp2_init_smcinvoke;
ta_interface.trusted_app_hdcp2_deinit = &hdcp2_deinit_smcinvoke;
ta_interface.trusted_app_hdcp2_app_start = &hdcp2_app_start_smcinvoke;
ta_interface.trusted_app_hdcp2_app_start_auth = &hdcp2_app_start_auth_smcinvoke;
ta_interface.trusted_app_hdcp2_app_process_msg = &hdcp2_app_process_msg_smcinvoke;
ta_interface.trusted_app_hdcp2_app_enable_encryption = &hdcp2_app_enable_encryption_smcinvoke;
ta_interface.trusted_app_hdcp2_app_timeout = &hdcp2_app_timeout_smcinvoke;
ta_interface.trusted_app_hdcp2_app_query_stream = &hdcp2_app_query_stream_smcinvoke;
ta_interface.trusted_app_hdcp2_app_stop = &hdcp2_app_stop_smcinvoke;
ta_interface.trusted_app_hdcp2_feature_supported = &hdcp2_feature_supported_smcinvoke;
ta_interface.trusted_app_hdcp2_force_encryption = &hdcp2_force_encryption_smcinvoke;
ta_interface.trusted_app_hdcp2_open_stream = &hdcp2_open_stream_smcinvoke;
ta_interface.trusted_app_hdcp2_close_stream = &hdcp2_close_stream_smcinvoke;
ta_interface.trusted_app_hdcp2_update_app_data = &hdcp2_update_app_data_smcinvoke;
} else {
ta_interface.trusted_app_hdcp1_init = &hdcp1_init_qseecom;
ta_interface.trusted_app_hdcp1_feature_supported = &hdcp1_feature_supported_qseecom;
ta_interface.trusted_app_hdcp1_set_enc = &hdcp1_set_enc_qseecom;
ta_interface.trusted_app_hdcp1_ops_notify = &hdcp1_ops_notify_qseecom;
ta_interface.trusted_app_hdcp1_start = &hdcp1_start_qseecom;
ta_interface.trusted_app_hdcp1_stop = &hdcp1_stop_qseecom;
ta_interface.trusted_app_hdcp2_init = &hdcp2_init_qseecom;
ta_interface.trusted_app_hdcp2_deinit = &hdcp2_deinit_qseecom;
ta_interface.trusted_app_hdcp2_app_start = &hdcp2_app_start_qseecom;
ta_interface.trusted_app_hdcp2_app_start_auth = &hdcp2_app_start_auth_qseecom;
ta_interface.trusted_app_hdcp2_app_process_msg = &hdcp2_app_process_msg_qseecom;
ta_interface.trusted_app_hdcp2_app_timeout = &hdcp2_app_timeout_qseecom;
ta_interface.trusted_app_hdcp2_app_enable_encryption = &hdcp2_app_enable_encryption_qseecom;
ta_interface.trusted_app_hdcp2_app_query_stream = &hdcp2_app_query_stream_qseecom;
ta_interface.trusted_app_hdcp2_app_stop = &hdcp2_app_stop_qseecom;
ta_interface.trusted_app_hdcp2_feature_supported = &hdcp2_feature_supported_qseecom;
ta_interface.trusted_app_hdcp2_force_encryption = &hdcp2_force_encryption_qseecom;
ta_interface.trusted_app_hdcp2_open_stream = &hdcp2_open_stream_qseecom;
ta_interface.trusted_app_hdcp2_close_stream = &hdcp2_close_stream_qseecom;
ta_interface.trusted_app_hdcp2_update_app_data = &hdcp2_update_app_data_qseecom;
}
}
int hdcp1_count_ones(u8 *array, u8 len)
{
int i, j, count = 0;
for (i = 0; i < len; i++)
for (j = 0; j < 8; j++)
count += (((array[i] >> j) & 0x1) ? 1 : 0);
return count;
}
int hdcp1_validate_aksv(u32 aksv_msb, u32 aksv_lsb)
{
int const number_of_ones = 20;
u8 aksv[5] = {0};
pr_debug("AKSV=%02x%08x\n", aksv_msb, aksv_lsb);
aksv[0] = aksv_lsb & 0xFF;
aksv[1] = (aksv_lsb >> 8) & 0xFF;
aksv[2] = (aksv_lsb >> 16) & 0xFF;
aksv[3] = (aksv_lsb >> 24) & 0xFF;
aksv[4] = aksv_msb & 0xFF;
/* check there are 20 ones in AKSV */
if (hdcp1_count_ones(aksv, 5) != number_of_ones) {
pr_err("AKSV bit count failed\n");
return -EINVAL;
}
return 0;
}
bool hdcp2_feature_supported(void *data)
{
int ret = 0;
mutex_lock(&hdcp2_mutex_g);
ret = ta_interface.trusted_app_hdcp2_feature_supported(data);
mutex_unlock(&hdcp2_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp2_feature_supported);
int hdcp2_force_encryption(void *ctx, uint32_t enable)
{
int ret = 0;
mutex_lock(&hdcp2_mutex_g);
ret = ta_interface.trusted_app_hdcp2_force_encryption(ctx, enable);
mutex_unlock(&hdcp2_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp2_force_encryption);
int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd,
struct hdcp2_app_data *app_data)
{
int ret = 0;
uint32_t req_len = 0;
if (!ctx || !app_data) {
pr_err("invalid input\n");
return -EINVAL;
}
req_len = app_data->request.length;
mutex_lock(&hdcp2_mutex_g);
switch (cmd) {
case HDCP2_CMD_START:
ret = ta_interface.trusted_app_hdcp2_app_start(ctx, req_len);
break;
case HDCP2_CMD_START_AUTH:
ret = ta_interface.trusted_app_hdcp2_app_start_auth(ctx, req_len);
break;
case HDCP2_CMD_PROCESS_MSG:
ret = ta_interface.trusted_app_hdcp2_app_process_msg(ctx, req_len);
break;
case HDCP2_CMD_TIMEOUT:
ret = ta_interface.trusted_app_hdcp2_app_timeout(ctx, req_len);
break;
case HDCP2_CMD_EN_ENCRYPTION:
ret = ta_interface.trusted_app_hdcp2_app_enable_encryption(ctx, req_len);
break;
case HDCP2_CMD_QUERY_STREAM:
ret = ta_interface.trusted_app_hdcp2_app_query_stream(ctx, req_len);
break;
case HDCP2_CMD_STOP:
ret = ta_interface.trusted_app_hdcp2_app_stop(ctx);
break;
default:
goto error;
}
if (ret)
goto error;
ret = ta_interface.trusted_app_hdcp2_update_app_data(ctx, app_data);
error:
mutex_unlock(&hdcp2_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp2_app_comm);
int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id, uint8_t stream_number,
uint32_t *stream_id)
{
int ret = 0;
mutex_lock(&hdcp2_mutex_g);
ret = ta_interface.trusted_app_hdcp2_open_stream(ctx, vc_payload_id, stream_number,
stream_id);
mutex_unlock(&hdcp2_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp2_open_stream);
int hdcp2_close_stream(void *ctx, uint32_t stream_id)
{
int ret = 0;
mutex_lock(&hdcp2_mutex_g);
ret = ta_interface.trusted_app_hdcp2_close_stream(ctx, stream_id);
mutex_unlock(&hdcp2_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp2_close_stream);
void *hdcp2_init(u32 device_type)
{
void *data = NULL;
mutex_lock(&hdcp2_mutex_g);
data = ta_interface.trusted_app_hdcp2_init(device_type);
mutex_unlock(&hdcp2_mutex_g);
return data;
}
EXPORT_SYMBOL_GPL(hdcp2_init);
void hdcp2_set_hdcp_key_verify_retries(void *ctx, u32 max_hdcp_key_verify_retries)
{
struct hdcp2_qsee_handle *handle = ctx;
handle->max_hdcp_key_verify_retries = max_hdcp_key_verify_retries;
pr_debug("hdcp2 max_hdcp_key_verify_retries %d\n", handle->max_hdcp_key_verify_retries);
}
EXPORT_SYMBOL_GPL(hdcp2_set_hdcp_key_verify_retries);
void hdcp2_deinit(void *ctx)
{
ta_interface.trusted_app_hdcp2_deinit(ctx);
}
EXPORT_SYMBOL_GPL(hdcp2_deinit);
void *hdcp1_init(void)
{
void *data = NULL;
mutex_lock(&hdcp1_mutex_g);
data = ta_interface.trusted_app_hdcp1_init();
mutex_unlock(&hdcp1_mutex_g);
return data;
}
EXPORT_SYMBOL_GPL(hdcp1_init);
void hdcp1_set_hdcp_key_verify_retries(void *ctx, u32 max_hdcp_key_verify_retries)
{
struct hdcp1_qsee_handle *handle = ctx;
handle->max_hdcp_key_verify_retries = max_hdcp_key_verify_retries;
pr_debug("hdcp1 max_hdcp_key_verify_retries %d\n", handle->max_hdcp_key_verify_retries);
}
EXPORT_SYMBOL_GPL(hdcp1_set_hdcp_key_verify_retries);
void hdcp1_deinit(void *data)
{
kfree(data);
}
EXPORT_SYMBOL_GPL(hdcp1_deinit);
bool hdcp1_feature_supported(void *data)
{
bool supported = false;
mutex_lock(&hdcp1_mutex_g);
supported = ta_interface.trusted_app_hdcp1_feature_supported(data);
mutex_unlock(&hdcp1_mutex_g);
return supported;
}
EXPORT_SYMBOL_GPL(hdcp1_feature_supported);
int hdcp1_set_enc(void *data, bool enable)
{
int ret = 0;
mutex_lock(&hdcp1_mutex_g);
ret = ta_interface.trusted_app_hdcp1_set_enc(data, enable);
mutex_unlock(&hdcp1_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp1_set_enc);
int hdcp1_ops_notify(void *data, void *topo, bool is_authenticated)
{
int ret = 0;
ret = ta_interface.trusted_app_hdcp1_ops_notify(data, topo, is_authenticated);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp1_ops_notify);
int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb)
{
int ret = 0;
mutex_lock(&hdcp1_mutex_g);
ret = ta_interface.trusted_app_hdcp1_start(data, aksv_msb, aksv_lsb);
mutex_unlock(&hdcp1_mutex_g);
return ret;
}
EXPORT_SYMBOL_GPL(hdcp1_start);
void hdcp1_stop(void *data)
{
mutex_lock(&hdcp1_mutex_g);
ta_interface.trusted_app_hdcp1_stop(data);
mutex_unlock(&hdcp1_mutex_g);
}
EXPORT_SYMBOL_GPL(hdcp1_stop);
static int __init hdcp_module_init(void)
{
struct device_node *np = NULL;
bool use_smcinvoke = false;
np = of_find_compatible_node(NULL, NULL, "qcom,hdcp");
if (!np) {
/*select qseecom interface as default if hdcp node
*is not present in dtsi
*/
select_interface(use_smcinvoke);
return 0;
}
use_smcinvoke = of_property_read_bool(np, "qcom,use-smcinvoke");
select_interface(use_smcinvoke);
return 0;
}
static void __exit hdcp_module_exit(void)
{
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("HDCP driver");
module_init(hdcp_module_init);
module_exit(hdcp_module_exit);

View File

@ -0,0 +1,113 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HDCP_MAIN_H__
#define __HDCP_MAIN_H__
#include <linux/cdev.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/hdcp_qseecom.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/sched.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <misc/qseecom_kernel.h>
#define HDCP2P2_APP_NAME "hdcp2p2"
#define HDCP1_APP_NAME "hdcp1"
#define HDCP1OPS_APP_NAME "ops"
#define HDCPSRM_APP_NAME "hdcpsrm"
#define QSEECOM_SBUFF_SIZE 0x1000
#define MAX_REC_ID_LIST_SIZE 160
#define MAX_TX_MESSAGE_SIZE 129
#define MAX_RX_MESSAGE_SIZE 534
#define MAX_TOPOLOGY_ELEMS 32
#define HDCP1_NOTIFY_TOPOLOGY 1
#define HDCP1_AKSV_SIZE 8
#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \
((((maj)&0xFF) << 16) | (((min)&0xFF) << 8) | ((patch)&0xFF))
#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF)
#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF)
#define HCDP_TXMTR_GET_PATCH_VERSION(v) ((v)&0xFF)
#define HDCP_CLIENT_MAJOR_VERSION 2
#define HDCP_CLIENT_MINOR_VERSION 1
#define HDCP_CLIENT_PATCH_VERSION 0
#define HDCP_SUCCESS 0
/* Wait 200ms after authentication */
#define SLEEP_FORCE_ENCRYPTION_MS 200
/* Error code when Qseecomd is not up at boot time */
#define QSEECOMD_ERROR -4103
/* Wait for 100ms on every retry to check if Qseecomd is up */
#define SLEEP_QSEECOMD_WAIT_MS 100
#define SLEEP_SET_HW_KEY_MS 300
/* flags set by tz in response message */
#define HDCP_TXMTR_SUBSTATE_INIT 0
#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST 1
#define HDCP_TXMTR_SUBSTATE_PROCESSED_RECIEVERID_LIST 2
#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_STREAM_READY_MESSAGE 3
#define HDCP_TXMTR_SUBSTATE_REPEATER_AUTH_COMPLETE 4
enum hdcp_state {
HDCP_STATE_INIT = 0x00,
HDCP_STATE_APP_LOADED = 0x01,
HDCP_STATE_SESSION_INIT = 0x02,
HDCP_STATE_TXMTR_INIT = 0x04,
HDCP_STATE_AUTHENTICATED = 0x08,
HDCP_STATE_ERROR = 0x10
};
struct hdcp_ta_interface
{
void *(*trusted_app_hdcp1_init)(void);
bool (*trusted_app_hdcp1_feature_supported)(void *data);
int (*trusted_app_hdcp1_set_enc)(void *data,bool enable);
int (*trusted_app_hdcp1_ops_notify)(void *data, void *topo,
bool is_authenticated);
int (*trusted_app_hdcp1_start)(void *data, u32 *aksv_msb,
u32 *aksv_lsb);
void (*trusted_app_hdcp1_stop)(void *data);
void *(*trusted_app_hdcp2_init)(u32 device_type);
void (*trusted_app_hdcp2_deinit)(void *ctx);
int (*trusted_app_hdcp2_app_start)(void *ctx, uint32_t req_len);
int (*trusted_app_hdcp2_app_start_auth)(void *ctx, uint32_t req_len);
int (*trusted_app_hdcp2_app_process_msg)(void *ctx, uint32_t req_len);
int (*trusted_app_hdcp2_app_timeout)(void *ctx, uint32_t req_len);
int (*trusted_app_hdcp2_app_enable_encryption)(void *ctx, uint32_t req_len);
int (*trusted_app_hdcp2_app_query_stream)(void *ctx, uint32_t req_len);
int (*trusted_app_hdcp2_app_stop)(void *ctx);
bool (*trusted_app_hdcp2_feature_supported)(void *ctx);
int (*trusted_app_hdcp2_force_encryption)(void *ctx, uint32_t enable);
int (*trusted_app_hdcp2_open_stream)(void *ctx, uint8_t vc_payload_id,
uint8_t stream_number, uint32_t *stream_id);
int (*trusted_app_hdcp2_close_stream)(void *ctx, uint32_t stream_id);
int (*trusted_app_hdcp2_update_app_data)(void *ctx,
struct hdcp2_app_data *app_data);
};
int hdcp1_validate_aksv(u32 aksv_msb, u32 aksv_lsb);
#endif /* __HDCP_MAIN_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,346 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HDCP_QSEECOM_H__
#define __HDCP_QSEECOM_H__
#include <linux/hdcp_qseecom.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "hdcp_main.h"
/*
* @max_hdcp_key_verify_retries - Max number of retries by default set to 0 which
* is equivalent to 0MS. Actual value will be the one
* from the dtsi file.
*/
struct hdcp1_qsee_handle {
struct qseecom_handle *qseecom_handle;
struct qseecom_handle *hdcpops_handle;
bool feature_supported;
uint32_t device_type;
enum hdcp_state hdcp_state;
char *app_name;
uint32_t max_hdcp_key_verify_retries;
};
/*
* If Qseecomd starts late and hdcp key
* verification has already started, qseecomd_down
* flag will be set to true. It will be set to false
* once the Qseecomd is up. Initial assumption is
* that the Qseecomd will start in time.
*/
static bool qseecomd_down;
/*
* @max_hdcp_key_verify_retries - Max number of retries by default set to 0 which
* is equivalent to 0MS. Actual value will be the one
* from the dtsi file.
*/
struct hdcp2_qsee_handle {
struct hdcp2_app_data app_data;
uint32_t tz_ctxhandle;
bool feature_supported;
enum hdcp_state hdcp_state;
struct qseecom_handle *qseecom_handle;
struct qseecom_handle *hdcpsrm_qseecom_handle;
uint32_t session_id;
bool legacy_app;
uint32_t device_type;
char *app_name;
unsigned char *req_buf;
unsigned char *res_buf;
int (*app_init)(struct hdcp2_qsee_handle *handle);
int (*tx_init)(struct hdcp2_qsee_handle *handle);
uint32_t max_hdcp_key_verify_retries;
};
struct hdcp1_key_set_req {
uint32_t commandid;
} __packed;
struct hdcp1_key_set_rsp {
uint32_t commandid;
uint32_t ret;
uint8_t ksv[HDCP1_AKSV_SIZE];
} __packed;
struct hdcp1_ops_notify_req {
uint32_t commandid;
uint32_t device_type;
uint8_t recv_id_list[MAX_REC_ID_LIST_SIZE];
int32_t recv_id_len;
struct hdcp1_topology topology;
bool is_authenticated;
} __packed;
struct hdcp1_ops_notify_rsp {
uint32_t commandid;
uint32_t ret;
} __packed;
struct hdcp1_set_enc_req {
uint32_t commandid;
uint32_t enable;
} __packed;
struct hdcp1_set_enc_rsp {
uint32_t commandid;
uint32_t ret;
} __packed;
struct hdcp1_key_verify_req {
uint32_t commandid;
uint32_t key_type;
} __packed;
struct hdcp1_key_verify_rsp {
uint32_t commandId;
uint32_t ret;
} __packed;
struct hdcp_init_v1_req {
uint32_t commandid;
} __packed;
struct hdcp_init_v1_rsp {
uint32_t status;
uint32_t commandid;
uint32_t ctxhandle;
uint32_t timeout;
uint32_t msglen;
uint8_t message[MAX_TX_MESSAGE_SIZE];
} __packed;
struct hdcp_init_req {
uint32_t commandid;
uint32_t clientversion;
} __packed;
struct hdcp_init_rsp {
uint32_t status;
uint32_t commandid;
uint32_t appversion;
} __packed;
struct hdcp_session_init_req {
uint32_t commandid;
uint32_t deviceid;
} __packed;
struct hdcp_session_init_rsp {
uint32_t status;
uint32_t commandid;
uint32_t sessionid;
} __packed;
struct hdcp_tx_init_v1_req {
uint32_t commandid;
} __packed;
struct hdcp_tx_init_v1_rsp {
uint32_t status;
uint32_t commandid;
uint32_t ctxhandle;
uint32_t timeout;
uint32_t msglen;
uint8_t message[MAX_TX_MESSAGE_SIZE];
} __packed;
struct hdcp_tx_init_req {
uint32_t commandid;
uint32_t sessionid;
} __packed;
struct hdcp_tx_init_rsp {
uint32_t status;
uint32_t commandid;
uint32_t ctxhandle;
} __packed;
struct hdcp_version_req {
uint32_t commandid;
} __packed;
struct hdcp_version_rsp {
uint32_t status;
uint32_t commandId;
uint32_t appversion;
} __packed;
struct hdcp_session_open_stream_req {
uint32_t commandid;
uint32_t sessionid;
uint32_t vcpayloadid;
uint32_t stream_number;
uint32_t streamMediaType;
} __packed;
struct hdcp_session_open_stream_rsp {
uint32_t status;
uint32_t commandid;
uint32_t streamid;
} __packed;
struct hdcp_session_close_stream_req {
uint32_t commandid;
uint32_t sessionid;
uint32_t streamid;
} __packed;
struct hdcp_session_close_stream_rsp {
uint32_t status;
uint32_t commandid;
} __packed;
struct hdcp_force_encryption_req {
uint32_t commandid;
uint32_t ctxhandle;
uint32_t enable;
} __packed;
struct hdcp_force_encryption_rsp {
uint32_t status;
uint32_t commandid;
} __packed;
struct hdcp_tx_deinit_req {
uint32_t commandid;
uint32_t ctxhandle;
} __packed;
struct hdcp_tx_deinit_rsp {
uint32_t status;
uint32_t commandid;
} __packed;
struct hdcp_session_deinit_req {
uint32_t commandid;
uint32_t sessionid;
} __packed;
struct hdcp_session_deinit_rsp {
uint32_t status;
uint32_t commandid;
} __packed;
struct hdcp_deinit_req {
uint32_t commandid;
} __packed;
struct hdcp_deinit_rsp {
uint32_t status;
uint32_t commandid;
} __packed;
struct hdcp_query_stream_type_req {
uint32_t commandid;
uint32_t ctxhandle;
} __packed;
struct hdcp_query_stream_type_rsp {
uint32_t status;
uint32_t commandid;
uint32_t timeout;
uint32_t msglen;
uint8_t msg[MAX_TX_MESSAGE_SIZE];
} __packed;
struct hdcp_set_hw_key_req {
uint32_t commandid;
uint32_t ctxhandle;
} __packed;
struct hdcp_set_hw_key_rsp {
uint32_t status;
uint32_t commandid;
} __packed;
struct hdcp_send_timeout_req {
uint32_t commandid;
uint32_t ctxhandle;
} __packed;
struct hdcp_send_timeout_rsp {
uint32_t status;
uint32_t commandid;
uint32_t timeout;
uint32_t msglen;
uint8_t message[MAX_TX_MESSAGE_SIZE];
} __packed;
struct hdcp_start_auth_req {
uint32_t commandid;
uint32_t ctxHandle;
} __packed;
struct hdcp_start_auth_rsp {
uint32_t status;
uint32_t commandid;
uint32_t ctxhandle;
uint32_t timeout;
uint32_t msglen;
uint8_t message[MAX_TX_MESSAGE_SIZE];
} __packed;
struct hdcp_rcvd_msg_req {
uint32_t commandid;
uint32_t ctxhandle;
uint32_t msglen;
uint8_t msg[MAX_RX_MESSAGE_SIZE];
} __packed;
struct hdcp_rcvd_msg_rsp {
uint32_t status;
uint32_t commandid;
uint32_t state;
uint32_t timeout;
uint32_t flag;
uint32_t msglen;
uint8_t msg[MAX_TX_MESSAGE_SIZE];
} __packed;
struct hdcp_verify_key_req {
uint32_t commandid;
} __packed;
struct hdcp_verify_key_rsp {
uint32_t status;
uint32_t commandId;
} __packed;
#define HDCP1_SET_KEY 202
#define HDCP1_KEY_VERIFY 204
#define HDCP1_SET_ENC 205
/* DP device type */
#define DEVICE_TYPE_DP 0x8002
void *hdcp1_init_qseecom(void);
bool hdcp1_feature_supported_qseecom(void *data);
int hdcp1_set_enc_qseecom(void *data, bool enable);
int hdcp1_ops_notify_qseecom(void *data, void *topo, bool is_authenticated);
int hdcp1_start_qseecom(void *data, u32 *aksv_msb, u32 *aksv_lsb);
void hdcp1_stop_qseecom(void *data);
void *hdcp2_init_qseecom(u32 device_type);
void hdcp2_deinit_qseecom(void *ctx);
int hdcp2_app_start_qseecom(void *ctx, uint32_t req_len);
int hdcp2_app_start_auth_qseecom(void *ctx, uint32_t req_len);
int hdcp2_app_process_msg_qseecom(void *ctx, uint32_t req_len);
int hdcp2_app_timeout_qseecom(void *ctx, uint32_t req_len);
int hdcp2_app_enable_encryption_qseecom(void *ctx, uint32_t req_len);
int hdcp2_app_query_stream_qseecom(void *ctx, uint32_t req_len);
int hdcp2_app_stop_qseecom(void *ctx);
bool hdcp2_feature_supported_qseecom(void *ctx);
int hdcp2_force_encryption_qseecom(void *ctx, uint32_t enable);
int hdcp2_open_stream_qseecom(void *ctx, uint8_t vc_payload_id,
uint8_t stream_number, uint32_t *stream_id);
int hdcp2_close_stream_qseecom(void *ctx, uint32_t stream_id);
int hdcp2_update_app_data_qseecom(void *ctx, struct hdcp2_app_data *app_data);
#endif /* __HDCP_QSEECOM_H__ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,62 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HDCP_SMCINVOKE_H__
#define __HDCP_SMCINVOKE_H__
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <include/linux/smcinvoke_object.h>
#include "hdcp_main.h"
struct hdcp1_smcinvoke_handle {
struct Object hdcp1_app_obj;
struct Object hdcp1_appcontroller_obj;
struct Object hdcp1ops_app_obj;
struct Object hdcp1ops_appcontroller_obj;
bool feature_supported;
uint32_t device_type;
enum hdcp_state hdcp_state;
};
struct hdcp2_smcinvoke_handle {
struct hdcp2_app_data app_data;
uint32_t tz_ctxhandle;
bool feature_supported;
enum hdcp_state hdcp_state;
struct Object hdcp2_app_obj;
struct Object hdcp2_appcontroller_obj;
struct Object hdcpsrm_app_obj;
struct Object hdcpsrm_appcontroller_obj;
uint32_t session_id;
uint32_t device_type;
};
void *hdcp1_init_smcinvoke(void);
bool hdcp1_feature_supported_smcinvoke(void *data);
int hdcp1_set_enc_smcinvoke(void *data, bool enable);
int hdcp1_ops_notify_smcinvoke(void *data, void *topo, bool is_authenticated);
int hdcp1_start_smcinvoke(void *data, u32 *aksv_msb, u32 *aksv_lsb);
void hdcp1_stop_smcinvoke(void *data);
void *hdcp2_init_smcinvoke(u32 device_type);
void hdcp2_deinit_smcinvoke(void *ctx);
int hdcp2_app_start_smcinvoke(void *ctx, uint32_t req_len);
int hdcp2_app_start_auth_smcinvoke(void *ctx, uint32_t req_len);
int hdcp2_app_process_msg_smcinvoke(void *ctx, uint32_t req_len);
int hdcp2_app_timeout_smcinvoke(void *ctx, uint32_t req_len);
int hdcp2_app_enable_encryption_smcinvoke(void *ctx, uint32_t req_len);
int hdcp2_app_query_stream_smcinvoke(void *ctx, uint32_t req_len);
int hdcp2_app_stop_smcinvoke(void *ctx);
bool hdcp2_feature_supported_smcinvoke(void *ctx);
int hdcp2_force_encryption_smcinvoke(void *ctx, uint32_t enable);
int hdcp2_open_stream_smcinvoke(void *ctx, uint8_t vc_payload_id,
uint8_t stream_number, uint32_t *stream_id);
int hdcp2_close_stream_smcinvoke(void *ctx, uint32_t stream_id);
int hdcp2_update_app_data_smcinvoke(void *ctx, struct hdcp2_app_data *app_data);
#endif /* __HDCP_SMCINVOKE_H__ */

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "smcinvoke_object.h"
#define CTrustedCameraDriver_UID 283

View File

@ -0,0 +1,159 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
*
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#define IClientEnv_OP_open 0
#define IClientEnv_OP_registerLegacy 1
#define IClientEnv_OP_register 2
#define IClientEnv_OP_registerWithWhitelist 3
#define IClientEnv_OP_notifyDomainChange 4
#define IClientEnv_OP_registerWithCredentials 5
#define IClientEnv_OP_loadCmnlibFromBuffer 6
#define IClientEnv_OP_configTaRegion 7
#define IClientEnv_OP_adciAccept 8
#define IClientEnv_OP_adciShutdown 9
#include "smcinvoke_object.h"
static inline int32_t
IClientEnv_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IClientEnv_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IClientEnv_open(struct Object self, uint32_t uid_val, struct Object *obj_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].b = (struct ObjectBuf) { &uid_val, sizeof(uint32_t) };
result = Object_invoke(self, IClientEnv_OP_open, a, ObjectCounts_pack(1, 0, 0, 1));
*obj_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_registerLegacy(struct Object self, const void *credentials_ptr, size_t credentials_len,
struct Object *clientEnv_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { credentials_ptr, credentials_len * 1 };
result = Object_invoke(self, IClientEnv_OP_registerLegacy, a,
ObjectCounts_pack(1, 0, 0, 1));
*clientEnv_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_register(struct Object self, struct Object credentials_val,
struct Object *clientEnv_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].o = credentials_val;
result = Object_invoke(self, IClientEnv_OP_register, a,
ObjectCounts_pack(0, 0, 1, 1));
*clientEnv_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_registerWithWhitelist(struct Object self,
struct Object credentials_val, const uint32_t *uids_ptr,
size_t uids_len, struct Object *clientEnv_ptr)
{
union ObjectArg a[3];
int32_t result;
a[1].o = credentials_val;
a[0].bi = (struct ObjectBufIn) { uids_ptr, uids_len *
sizeof(uint32_t) };
result = Object_invoke(self, IClientEnv_OP_registerWithWhitelist, a,
ObjectCounts_pack(1, 0, 1, 1));
*clientEnv_ptr = a[2].o;
return result;
}
static inline int32_t
IClientEnv_notifyDomainChange(struct Object self)
{
return Object_invoke(self, IClientEnv_OP_notifyDomainChange, 0, 0);
}
static inline int32_t
IClientEnv_registerWithCredentials(struct Object self, struct Object
credentials_val, struct Object *clientEnv_ptr)
{
union ObjectArg a[2]={{{0,0}}};
int32_t result;
a[0].o = credentials_val;
result = Object_invoke(self, IClientEnv_OP_registerWithCredentials, a,
ObjectCounts_pack(0, 0, 1, 1));
*clientEnv_ptr = a[1].o;
return result;
}
static inline int32_t
IClientEnv_loadCmnlibFromBuffer(struct Object self, const void *cmnlibElf_ptr, size_t cmnlibElf_len)
{
union ObjectArg a[1]={{{0,0}}};
a[0].bi = (struct ObjectBufIn) { cmnlibElf_ptr, cmnlibElf_len * 1 };
return Object_invoke(self, IClientEnv_OP_loadCmnlibFromBuffer, a, ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t
IClientEnv_configTaRegion(struct Object self, uint64_t appRgnAddr_val, uint32_t appRgnSize_val)
{
union ObjectArg a[1]={{{0,0}}};
struct {
uint64_t m_appRgnAddr;
uint32_t m_appRgnSize;
} i;
a[0].b = (struct ObjectBuf) { &i, 12 };
i.m_appRgnAddr = appRgnAddr_val;
i.m_appRgnSize = appRgnSize_val;
return Object_invoke(self, IClientEnv_OP_configTaRegion, a, ObjectCounts_pack(1, 0, 0, 0));
}
static inline int32_t
IClientEnv_adciAccept(struct Object self)
{
return Object_invoke(self, IClientEnv_OP_adciAccept, 0, 0);
}
static inline int32_t
IClientEnv_adciShutdown(struct Object self)
{
return Object_invoke(self, IClientEnv_OP_adciShutdown, 0, 0);
}

View File

@ -0,0 +1,130 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "smcinvoke_object.h"
/**
* Struct containing values for programming of domain ID
*
* @version: Version info
* @protect: To protect or reset the lanes
* @csid_hw_idx_mask: Bit position denoting CSID in use
* @cdm_hw_idx_mask: Bit position denoting CDM in use
* @vc_mask: VC mask for identifying domain
* @phy_lane_sel_mask: PHY lane info - contains CPHY, DPHY and PHY ID values
* 0-15 bits -- PHY index
* 16-23 bits -- CPHY lanes
* 24-31 bits -- DPHY lanes
* @reserved: Reserved bit
*/
typedef struct {
uint32_t version;
uint32_t protect;
uint32_t csid_hw_idx_mask;
uint32_t cdm_hw_idx_mask;
uint64_t vc_mask;
uint64_t phy_lane_sel_mask;
uint64_t reserved;
} ITCDriverSensorInfo;
#define ITrustedCameraDriver_ERROR_NOT_ALLOWED 10
#define ITrustedCameraDriver_OP_dynamicProtectSensor 0
#define ITrustedCameraDriver_OP_getVersion 1
#define ITrustedCameraDriver_OP_dynamicConfigureFDPort 3
static inline int32_t
ITrustedCameraDriver_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
ITrustedCameraDriver_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
/*
* Description: This method allows protecting a camera sensor based on the sensor
* information provided.
*
* In: this - ITrustedCameraDriver object
* In: phy_info_ptr - Camera HW settings required for securing the usecase
* Out: void
* Return: Object_OK on success
* secure camera error codes from seccam_def on failure
*/
static inline int32_t
ITrustedCameraDriver_dynamicProtectSensor(struct Object self,
const ITCDriverSensorInfo *phy_info_ptr)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].bi = (struct ObjectBufIn) { phy_info_ptr, sizeof(ITCDriverSensorInfo) };
return Object_invoke(self, ITrustedCameraDriver_OP_dynamicProtectSensor, a,
ObjectCounts_pack(1, 0, 0, 0));
}
/*
* Description: Get the current version info
*
* In: this - ITrustedCameraDriver object
* Out: arch_ver_ptr - the pointer of arch version number.
* Out: max_ver_ptr - the pointer of the second part of the version number
* Out: min_ver_ptr - the pointer of the third part of the version number
* Return: Object_OK on success
*/
static inline int32_t
ITrustedCameraDriver_getVersion(struct Object self, uint32_t *arch_ver_ptr,
uint32_t *max_ver_ptr, uint32_t *min_ver_ptr)
{
union ObjectArg a[1] = {{{0, 0}}};
int32_t result;
struct {
uint32_t m_arch_ver;
uint32_t m_max_ver;
uint32_t m_min_ver;
} o = {0};
a[0].b = (struct ObjectBuf) { &o, 12 };
result = Object_invoke(self, ITrustedCameraDriver_OP_getVersion, a,
ObjectCounts_pack(0, 1, 0, 0));
*arch_ver_ptr = o.m_arch_ver;
*max_ver_ptr = o.m_max_ver;
*min_ver_ptr = o.m_min_ver;
return result;
}
/*
* Description: Dynamic configuration to allow secure/non-secure FD port
* on all the CSIDs
*
* In: this - ITrustedCameraDriver object
* In: protect - to secure or non-secure the port
* Out: void
* Return: Object_OK on success
* Object_ERROR on failure
* ITrustedCameraDriver_ERROR_NOT_ALLOWED on request to
* configure FD port even when disabled by OEM
*/
static inline int32_t
ITrustedCameraDriver_dynamicConfigureFDPort(struct Object self, uint32_t protect)
{
union ObjectArg a[1] = {{{0, 0}}};
a[0].b = (struct ObjectBuf) { &protect, sizeof(uint32_t) };
return Object_invoke(self, ITrustedCameraDriver_OP_dynamicConfigureFDPort, a,
ObjectCounts_pack(1, 0, 0, 0));
}

View File

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_CLIENTENV_H
#define __SMCI_CLIENTENV_H
#include "smci_object.h"
#include "IClientEnv.h"
#define SMCI_CLIENTENV_OP_OPEN 0
#define SMCI_CLIENTENV_OP_REGISTERLEGACY 1
#define SMCI_CLIENTENV_OP_REGISTER 2
#define SMCI_CLIENTENV_OP_REGISTERWITHWHITELIST 3
#define SMCI_CLIENTENV_OP_NOTIFYDOMAINCHANGE 4
#define SMCI_CLIENTENV_OP_REGISTERWITHCREDENTIALS 5
#define SMCI_CLIENTENV_OP_LOADCMNLIBFROMBUFFER 6
#define SMCI_CLIENTENV_OP_CONFIGTAREGION 7
#define SMCI_CLIENTENV_OP_ADCIACCEPT 8
#define SMCI_CLIENTENV_OP_ADCISUTDOWN 9
static inline int32_t
smci_clientenv_release(struct smci_object self)
{
return IClientEnv_release(self);
}
static inline int32_t
smci_clientenv_retain(struct smci_object self)
{
return IClientEnv_retain(self);
}
static inline int32_t
smci_clientenv_open(struct smci_object self, uint32_t uid_val, struct smci_object *obj_ptr)
{
return IClientEnv_open(self, uid_val, obj_ptr);
}
static inline int32_t
smci_clientenv_registerlegacy(struct smci_object self, const void *credentials_ptr,
size_t credentials_len, struct smci_object *clientenv_ptr)
{
return IClientEnv_registerLegacy(self, credentials_ptr,
credentials_len, clientenv_ptr);
}
static inline int32_t
smci_clientenv_register(struct smci_object self, struct smci_object credentials_val,
struct smci_object *clientenv_ptr)
{
return IClientEnv_register(self, credentials_val,
clientenv_ptr);
}
static inline int32_t
smci_clientenv_registerwithwhitelist(struct smci_object self,
struct smci_object credentials_val, const uint32_t *uids_ptr,
size_t uids_len, struct smci_object *clientenv_ptr)
{
return IClientEnv_registerWithWhitelist(self,
credentials_val, uids_ptr,
uids_len, clientenv_ptr);
}
static inline int32_t
smc_clientenv_notifydomainchange(struct smci_object self)
{
return IClientEnv_notifyDomainChange(self);
}
static inline int32_t
smci_clientenv_registerwithcredentials(struct smci_object self, struct smci_object
credentials_val, struct smci_object *clientenv_ptr)
{
return IClientEnv_registerWithCredentials(self,
credentials_val, clientenv_ptr);
}
static inline int32_t
smci_clientenv_loadcmnlibfrombuffer(struct smci_object self, const void *cmnlibelf_ptr,
size_t cmnlibelf_len)
{
return IClientEnv_loadCmnlibFromBuffer(self, cmnlibelf_ptr, cmnlibelf_len);
}
static inline int32_t
smci_clientenv_configtaregion(struct smci_object self, uint64_t apprgnaddr_val,
uint32_t apprgnsize_val)
{
return IClientEnv_configTaRegion(self, apprgnaddr_val, apprgnsize_val);
}
static inline int32_t
smci_clientenv_adciaccept(struct smci_object self)
{
return IClientEnv_adciAccept(self);
}
static inline int32_t
smci_clientenv_adcishutdown(struct smci_object self)
{
return IClientEnv_adciShutdown(self);
}
#endif /* __SMCI_CLIENTENV_H */

View File

@ -0,0 +1,151 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_OBJECT_H
#define __SMCI_OBJECT_H
#include <linux/types.h>
#include <linux/firmware.h>
#include <linux/qtee_shmbridge.h>
#include "smcinvoke.h"
#include "smcinvoke_object.h"
/*
* Method bits are not modified by transport layers. These describe the
* method (member function) being requested by the client.
*/
#define SMCI_OBJECT_OP_METHOD_MASK (0x0000FFFFu)
#define SMCI_OBJECT_OP_METHODID(op) ((op) & SMCI_OBJECT_OP_METHOD_MASK)
#define SMCI_OBJECT_OP_RELEASE (SMCI_OBJECT_OP_METHOD_MASK - 0)
#define SMCI_OBJECT_OP_RETAIN (SMCI_OBJECT_OP_METHOD_MASK - 1)
#define SMCI_OBJECT_OP_MAP_REGION 0
#define SMCI_OBJECT_OP_YIELD 1
#define SMCI_OBJECT_OP_SLEEP 2
#define SMCI_OBJECT_COUNTS_MAX_BI 0xF
#define SMCI_OBJECT_COUNTS_MAX_BO 0xF
#define SMCI_OBJECT_COUNTS_MAX_OI 0xF
#define SMCI_OBJECT_COUNTS_MAX_OO 0xF
/* unpack counts */
#define SMCI_OBJECT_COUNTS_NUM_BI(k) ((size_t) (((k) >> 0) & SMCI_OBJECT_COUNTS_MAX_BI))
#define SMCI_OBJECT_COUNTS_NUM_BO(k) ((size_t) (((k) >> 4) & SMCI_OBJECT_COUNTS_MAX_BO))
#define SMCI_OBJECT_COUNTS_NUM_OI(k) ((size_t) (((k) >> 8) & SMCI_OBJECT_COUNTS_MAX_OI))
#define SMCI_OBJECT_COUNTS_NUM_OO(k) ((size_t) (((k) >> 12) & SMCI_OBJECT_COUNTS_MAX_OO))
#define SMCI_OBJECT_COUNTS_NUM_BUFFERS(k) \
(SMCI_OBJECT_COUNTS_NUM_BI(k) + SMCI_OBJECT_COUNTS_NUM_BO(k))
#define SMCI_OBJECT_COUNTS_NUM_OBJECTS(k) \
(SMCI_OBJECT_COUNTS_NUM_OI(k) + SMCI_OBJECT_COUNTS_NUM_OO(k))
/* Indices into args[] */
#define SMCI_OBJECT_COUNTS_INDEX_BI(k) 0
#define SMCI_OBJECT_COUNTS_INDEX_BO(k) \
(SMCI_OBJECT_COUNTS_INDEX_BI(k) + SMCI_OBJECT_COUNTS_NUM_BI(k))
#define SMCI_OBJECT_COUNTS_INDEX_OI(k) \
(SMCI_OBJECT_COUNTS_INDEX_BO(k) + SMCI_OBJECT_COUNTS_NUM_BO(k))
#define SMCI_OBJECT_COUNTS_INDEX_OO(k) \
(SMCI_OBJECT_COUNTS_INDEX_OI(k) + SMCI_OBJECT_COUNTS_NUM_OI(k))
#define SMCI_OBJECT_COUNTS_TOTAL(k) \
(SMCI_OBJECT_COUNTS_INDEX_OO(k) + SMCI_OBJECT_COUNTS_NUM_OO(k))
#define SMCI_OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \
((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \
((in_objs) << 8) | ((out_objs) << 12)))
#define SMCI_OBJECT_COUNTS_INDEX_BUFFERS(k) SMCI_OBJECT_COUNTS_INDEX_BI(k)
/* Object_invoke return codes */
#define SMCI_OBJECT_IS_OK(err) ((err) == 0)
#define SMCI_OBJECT_IS_ERROR(err) ((err) != 0)
/* Generic error codes */
#define SMCI_OBJECT_OK 0 /* non-specific success code */
#define SMCI_OBJECT_ERROR 1 /* non-specific error */
#define SMCI_OBJECT_ERROR_INVALID 2 /* unsupported/unrecognized request */
#define SMCI_OBJECT_ERROR_SIZE_IN 3 /* supplied buffer/string too large */
#define SMCI_OBJECT_ERROR_SIZE_OUT 4 /* supplied output buffer too small */
#define SMCI_OBJECT_ERROR_USERBASE 10 /* start of user-defined error range */
/* Transport layer error codes */
#define SMCI_OBJECT_ERROR_DEFUNCT -90 /* smci_object no longer exists */
#define SMCI_OBJECT_ERROR_ABORT -91 /* calling thread must exit */
#define SMCI_OBJECT_ERROR_BADOBJ -92 /* invalid smci_object context */
#define SMCI_OBJECT_ERROR_NOSLOTS -93 /* caller's smci_object table full */
#define SMCI_OBJECT_ERROR_MAXARGS -94 /* too many args */
#define SMCI_OBJECT_ERROR_MAXDATA -95 /* buffers too large */
#define SMCI_OBJECT_ERROR_UNAVAIL -96 /* the request could not be processed */
#define SMCI_OBJECT_ERROR_KMEM -97 /* kernel out of memory */
#define SMCI_OBJECT_ERROR_REMOTE -98 /* local method sent to remote smci_object */
#define SMCI_OBJECT_ERROR_BUSY -99 /* smci_object is busy */
#define SMCI_OBJECT_ERROR_TIMEOUT -103 /* Call Back smci_object invocation timed out. */
/* smci_objectop */
#define SMCI_OBJECT_OP_LOCAL ((uint32_t) 0x00008000U)
#define SMCI_OBJECT_OP_IS_LOCAL(op) (((op) & SMCI_OBJECT_OP_LOCAL) != 0)
/* smci_object */
#define SMCI_OBJECTCOUNTS_PACK(nbuffersin, nbuffersout, nobjectsin, nobjectsout) \
((uint32_t) ((nbuffersin) | \
((nbuffersout) << 4) | \
((nobjectsin) << 8) | \
((nobjectsout) << 12)))
#define smci_object_arg ObjectArg
#define smci_objectinvoke ObjectInvoke
#define smci_object Object
#define smci_object_buf ObjectBuf
#define smci_object_buf_in ObjectBufIn
static inline int32_t smci_object_invoke(struct smci_object o, uint32_t op,
union smci_object_arg *args, uint32_t k)
{
return Object_invoke(o, op, args, k);
}
#define SMCI_OBJECT_NULL ((struct smci_object){NULL, NULL})
#define SMCI_OBJECT_NOT_RETAINED
#define SMCI_OBJECT_CONSUMED
static inline int32_t smci_object_release(SMCI_OBJECT_CONSUMED struct smci_object o)
{
return Object_release(o);
}
static inline int32_t smci_object_retain(struct smci_object o)
{
return Object_retain(o);
}
#define SMCI_OBJECT_IS_NULL(o) ((o).invoke == NULL)
#define SMCI_OBJECT_RELEASE_IF(o) \
do { \
struct smci_object o_ = (o); \
if (!SMCI_OBJECT_IS_NULL(o_)) \
(void) smci_object_release(o_); \
} while (0)
static inline void smci_object_replace(struct smci_object *loc, struct smci_object obj_new)
{
Object_replace(loc, obj_new);
}
#define SMCI_OBJECT_ASSIGN_NULL(loc) smci_object_replace(&(loc), SMCI_OBJECT_NULL)
#endif /* __SMCI_OBJECT_H */

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_SMCINVOKE_H_
#define _UAPI_SMCINVOKE_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define SMCINVOKE_USERSPACE_OBJ_NULL -1
#define DEFAULT_CB_OBJ_THREAD_CNT 4
#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
struct smcinvoke_buf {
__u64 addr;
__u64 size;
};
struct smcinvoke_obj {
__s64 fd;
__s64 cb_server_fd;
__s64 reserved;
};
union smcinvoke_arg {
struct smcinvoke_buf b;
struct smcinvoke_obj o;
};
/*
* struct smcinvoke_cmd_req: This structure is transparently sent to TEE
* @op - Operation to be performed
* @counts - number of aruments passed
* @result - result of invoke operation
* @argsize - size of each of arguments
* @args - args is pointer to buffer having all arguments
* @reserved: IN/OUT: Usage is not defined but should be set to 0
*/
struct smcinvoke_cmd_req {
__u32 op;
__u32 counts;
__s32 result;
__u32 argsize;
__u64 args;
__s64 reserved;
};
/*
* struct smcinvoke_accept: structure to process CB req from TEE
* @has_resp: IN: Whether IOCTL is carrying response data
* @result: IN: Outcome of operation op
* @op: OUT: Operation to be performed on target object
* @counts: OUT: Number of arguments, embedded in buffer pointed by
* buf_addr, to complete operation
* @reserved: IN/OUT: Usage is not defined but should be set to 0.
* @argsize: IN: Size of any argument, all of equal size, embedded
* in buffer pointed by buf_addr
* @txn_id: OUT: An id that should be passed as it is for response
* @cbobj_id: OUT: Callback object which is target of operation op
* @buf_len: IN: Len of buffer pointed by buf_addr
* @buf_addr: IN: Buffer containing all arguments which are needed
* to complete operation op
*/
struct smcinvoke_accept {
__u32 has_resp;
__s32 result;
__u32 op;
__u32 counts;
__s32 reserved;
__u32 argsize;
__u64 txn_id;
__s64 cbobj_id;
__u64 buf_len;
__u64 buf_addr;
};
/*
* @cb_buf_size: IN: Max buffer size for any callback obj implemented by client
* @reserved: IN/OUT: Usage is not defined but should be set to 0
*/
struct smcinvoke_server {
__u64 cb_buf_size;
__s64 reserved;
};
#define SMCINVOKE_IOC_MAGIC 0x98
#define SMCINVOKE_IOCTL_INVOKE_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
#define SMCINVOKE_IOCTL_ACCEPT_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept)
#define SMCINVOKE_IOCTL_SERVER_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server)
#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \
_IOWR(SMCINVOKE_IOC_MAGIC, 4, __s64)
/*
* smcinvoke logging buffer is for communicating with the smcinvoke driver additional
* info for debugging to be included in driver's log (if any)
*/
#define SMCINVOKE_LOG_BUF_SIZE 100
#define SMCINVOKE_IOCTL_LOG \
_IOC(_IOC_READ|_IOC_WRITE, SMCINVOKE_IOC_MAGIC, 255, SMCINVOKE_LOG_BUF_SIZE)
#endif /* _UAPI_SMCINVOKE_H_ */

View File

@ -0,0 +1,202 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCINVOKE_OBJECT_H
#define __SMCINVOKE_OBJECT_H
#include <linux/types.h>
#include <linux/firmware.h>
#include <linux/qtee_shmbridge.h>
#include "smcinvoke.h"
/*
* Method bits are not modified by transport layers. These describe the
* method (member function) being requested by the client.
*/
#define OBJECT_OP_METHOD_MASK (0x0000FFFFu)
#define OBJECT_OP_METHODID(op) ((op) & OBJECT_OP_METHOD_MASK)
#define OBJECT_OP_RELEASE (OBJECT_OP_METHOD_MASK - 0)
#define OBJECT_OP_RETAIN (OBJECT_OP_METHOD_MASK - 1)
#define OBJECT_OP_MAP_REGION 0
#define OBJECT_OP_YIELD 1
#define OBJECT_OP_SLEEP 2
#define OBJECT_COUNTS_MAX_BI 0xF
#define OBJECT_COUNTS_MAX_BO 0xF
#define OBJECT_COUNTS_MAX_OI 0xF
#define OBJECT_COUNTS_MAX_OO 0xF
/* unpack counts */
#define OBJECT_COUNTS_NUM_BI(k) ((size_t) (((k) >> 0) & OBJECT_COUNTS_MAX_BI))
#define OBJECT_COUNTS_NUM_BO(k) ((size_t) (((k) >> 4) & OBJECT_COUNTS_MAX_BO))
#define OBJECT_COUNTS_NUM_OI(k) ((size_t) (((k) >> 8) & OBJECT_COUNTS_MAX_OI))
#define OBJECT_COUNTS_NUM_OO(k) ((size_t) (((k) >> 12) & OBJECT_COUNTS_MAX_OO))
#define OBJECT_COUNTS_NUM_buffers(k) \
(OBJECT_COUNTS_NUM_BI(k) + OBJECT_COUNTS_NUM_BO(k))
#define OBJECT_COUNTS_NUM_objects(k) \
(OBJECT_COUNTS_NUM_OI(k) + OBJECT_COUNTS_NUM_OO(k))
/* Indices into args[] */
#define OBJECT_COUNTS_INDEX_BI(k) 0
#define OBJECT_COUNTS_INDEX_BO(k) \
(OBJECT_COUNTS_INDEX_BI(k) + OBJECT_COUNTS_NUM_BI(k))
#define OBJECT_COUNTS_INDEX_OI(k) \
(OBJECT_COUNTS_INDEX_BO(k) + OBJECT_COUNTS_NUM_BO(k))
#define OBJECT_COUNTS_INDEX_OO(k) \
(OBJECT_COUNTS_INDEX_OI(k) + OBJECT_COUNTS_NUM_OI(k))
#define OBJECT_COUNTS_TOTAL(k) \
(OBJECT_COUNTS_INDEX_OO(k) + OBJECT_COUNTS_NUM_OO(k))
#define OBJECT_COUNTS_PACK(in_bufs, out_bufs, in_objs, out_objs) \
((uint32_t) ((in_bufs) | ((out_bufs) << 4) | \
((in_objs) << 8) | ((out_objs) << 12)))
#define OBJECT_COUNTS_INDEX_buffers(k) OBJECT_COUNTS_INDEX_BI(k)
/* Object_invoke return codes */
#define OBJECT_isOK(err) ((err) == 0)
#define OBJECT_isERROR(err) ((err) != 0)
/* Generic error codes */
#define OBJECT_OK 0 /* non-specific success code */
#define OBJECT_ERROR 1 /* non-specific error */
#define OBJECT_ERROR_INVALID 2 /* unsupported/unrecognized request */
#define OBJECT_ERROR_SIZE_IN 3 /* supplied buffer/string too large */
#define OBJECT_ERROR_SIZE_OUT 4 /* supplied output buffer too small */
#define OBJECT_ERROR_USERBASE 10 /* start of user-defined error range */
/* Transport layer error codes */
#define OBJECT_ERROR_DEFUNCT -90 /* object no longer exists */
#define OBJECT_ERROR_ABORT -91 /* calling thread must exit */
#define OBJECT_ERROR_BADOBJ -92 /* invalid object context */
#define OBJECT_ERROR_NOSLOTS -93 /* caller's object table full */
#define OBJECT_ERROR_MAXARGS -94 /* too many args */
#define OBJECT_ERROR_MAXDATA -95 /* buffers too large */
#define OBJECT_ERROR_UNAVAIL -96 /* the request could not be processed */
#define OBJECT_ERROR_KMEM -97 /* kernel out of memory */
#define OBJECT_ERROR_REMOTE -98 /* local method sent to remote object */
#define OBJECT_ERROR_BUSY -99 /* Object is busy */
#define Object_ERROR_TIMEOUT -103 /* Call Back Object invocation timed out. */
#define FOR_ARGS(ndxvar, counts, section) \
for (ndxvar = OBJECT_COUNTS_INDEX_##section(counts); \
ndxvar < (OBJECT_COUNTS_INDEX_##section(counts) \
+ OBJECT_COUNTS_NUM_##section(counts)); \
++ndxvar)
/* ObjectOp */
#define ObjectOp_METHOD_MASK ((uint32_t) 0x0000FFFFu)
#define ObjectOp_methodID(op) ((op) & ObjectOp_METHOD_MASK)
#define ObjectOp_LOCAL ((uint32_t) 0x00008000U)
#define ObjectOp_isLocal(op) (((op) & ObjectOp_LOCAL) != 0)
#define Object_OP_release (ObjectOp_METHOD_MASK - 0)
#define Object_OP_retain (ObjectOp_METHOD_MASK - 1)
/* Object */
#define ObjectCounts_pack(nBuffersIn, nBuffersOut, nObjectsIn, nObjectsOut) \
((uint32_t) ((nBuffersIn) | \
((nBuffersOut) << 4) | \
((nObjectsIn) << 8) | \
((nObjectsOut) << 12)))
union ObjectArg;
struct smcinvoke_cmd_req;
typedef int32_t (*ObjectInvoke)(void *h,
uint32_t op,
union ObjectArg *args,
uint32_t counts);
struct Object {
ObjectInvoke invoke;
void *context;
};
struct ObjectBuf {
void *ptr;
size_t size;
};
struct ObjectBufIn {
const void *ptr;
size_t size;
};
union ObjectArg {
struct ObjectBuf b;
struct ObjectBufIn bi;
struct Object o;
};
static inline int32_t Object_invoke(struct Object o, uint32_t op,
union ObjectArg *args, uint32_t k)
{
return o.invoke(o.context, op, args, k);
}
#define Object_NULL ((struct Object){NULL, NULL})
#define OBJECT_NOT_RETAINED
#define OBJECT_CONSUMED
static inline int32_t Object_release(OBJECT_CONSUMED struct Object o)
{
return Object_invoke((o), Object_OP_release, 0, 0);
}
static inline int32_t Object_retain(struct Object o)
{
return Object_invoke((o), Object_OP_retain, 0, 0);
}
#define Object_isNull(o) ((o).invoke == NULL)
#define Object_RELEASE_IF(o) \
do { \
struct Object o_ = (o); \
if (!Object_isNull(o_)) \
(void) Object_release(o_); \
} while (0)
static inline void Object_replace(struct Object *loc, struct Object objNew)
{
if (!Object_isNull(*loc))
Object_release(*loc);
if (!Object_isNull(objNew))
Object_retain(objNew);
*loc = objNew;
}
#define Object_ASSIGN_NULL(loc) Object_replace(&(loc), Object_NULL)
#define SMCINVOKE_INTERFACE_MAX_RETRY 5
#define SMCINVOKE_INTERFACE_BUSY_WAIT_MS 5
int smcinvoke_release_from_kernel_client(int fd);
int get_root_fd(int *root_fd);
int process_invoke_request_from_kernel_client(
int fd, struct smcinvoke_cmd_req *req);
char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm);
int32_t get_client_env_object(struct Object *clientEnvObj);
#endif /* __SMCINVOKE_OBJECT_H */

View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/** @cond */
#pragma once
#include "smcinvoke_object.h"
#define IAppClient_ERROR_APP_NOT_FOUND INT32_C(10)
#define IAppClient_ERROR_APP_RESTART_FAILED INT32_C(11)
#define IAppClient_ERROR_APP_UNTRUSTED_CLIENT INT32_C(12)
#define IAppClient_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(13)
#define IAppClient_ERROR_APP_LOAD_FAILED INT32_C(14)
#define IAppClient_OP_getAppObject 0
static inline int32_t
IAppClient_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IAppClient_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IAppClient_getAppObject(struct Object self, const void *appDistName_ptr, size_t appDistName_len,struct Object *obj_ptr)
{
int32_t result;
union ObjectArg a[2];
a[0].bi = (struct ObjectBufIn) { appDistName_ptr, appDistName_len * 1 };
result = Object_invoke(self, IAppClient_OP_getAppObject, a, ObjectCounts_pack(1, 0, 0, 1));
*obj_ptr = a[1].o;
return result;
}

View File

@ -0,0 +1,143 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/** @cond */
#pragma once
#include "smcinvoke_object.h"
#define IAppController_CBO_INTERFACE_WAIT UINT32_C(1)
#define IAppController_ERROR_APP_SUSPENDED INT32_C(10)
#define IAppController_ERROR_APP_BLOCKED_ON_LISTENER INT32_C(11)
#define IAppController_ERROR_APP_UNLOADED INT32_C(12)
#define IAppController_ERROR_APP_IN_USE INT32_C(13)
#define IAppController_ERROR_NOT_SUPPORTED INT32_C(14)
#define IAppController_ERROR_CBO_UNKNOWN INT32_C(15)
#define IAppController_ERROR_APP_UNLOAD_NOT_ALLOWED INT32_C(16)
#define IAppController_ERROR_APP_DISCONNECTED INT32_C(17)
#define IAppController_ERROR_USER_DISCONNECT_REJECTED INT32_C(18)
#define IAppController_ERROR_STILL_RUNNING INT32_C(19)
#define IAppController_OP_openSession 0
#define IAppController_OP_unload 1
#define IAppController_OP_getAppObject 2
#define IAppController_OP_installCBO 3
#define IAppController_OP_disconnect 4
#define IAppController_OP_restart 5
static inline int32_t
IAppController_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IAppController_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IAppController_openSession(struct Object self, uint32_t cancelCode_val, uint32_t connectionMethod_val, uint32_t connectionData_val, uint32_t paramTypes_val, uint32_t exParamTypes_val, const void *i1_ptr, size_t i1_len, const void *i2_ptr, size_t i2_len, const void *i3_ptr, size_t i3_len, const void *i4_ptr, size_t i4_len, void *o1_ptr, size_t o1_len, size_t *o1_lenout, void *o2_ptr, size_t o2_len, size_t *o2_lenout, void *o3_ptr, size_t o3_len, size_t *o3_lenout, void *o4_ptr, size_t o4_len, size_t *o4_lenout,struct Object imem1_val,struct Object imem2_val,struct Object imem3_val,struct Object imem4_val, uint32_t *memrefOutSz1_ptr, uint32_t *memrefOutSz2_ptr, uint32_t *memrefOutSz3_ptr, uint32_t *memrefOutSz4_ptr,struct Object *session_ptr, uint32_t *retValue_ptr, uint32_t *retOrigin_ptr)
{
union ObjectArg a[15];
struct {
uint32_t m_cancelCode;
uint32_t m_connectionMethod;
uint32_t m_connectionData;
uint32_t m_paramTypes;
uint32_t m_exParamTypes;
} i;
struct {
uint32_t m_memrefOutSz1;
uint32_t m_memrefOutSz2;
uint32_t m_memrefOutSz3;
uint32_t m_memrefOutSz4;
uint32_t m_retValue;
uint32_t m_retOrigin;
} o;
int32_t result;
a[0].b = (struct ObjectBuf) { &i, 20 };
a[5].b = (struct ObjectBuf) { &o, 24 };
i.m_cancelCode = cancelCode_val;
i.m_connectionMethod = connectionMethod_val;
i.m_connectionData = connectionData_val;
i.m_paramTypes = paramTypes_val;
i.m_exParamTypes = exParamTypes_val;
a[1].bi = (struct ObjectBufIn) { i1_ptr, i1_len * 1 };
a[2].bi = (struct ObjectBufIn) { i2_ptr, i2_len * 1 };
a[3].bi = (struct ObjectBufIn) { i3_ptr, i3_len * 1 };
a[4].bi = (struct ObjectBufIn) { i4_ptr, i4_len * 1 };
a[6].b = (struct ObjectBuf) { o1_ptr, o1_len * 1 };
a[7].b = (struct ObjectBuf) { o2_ptr, o2_len * 1 };
a[8].b = (struct ObjectBuf) { o3_ptr, o3_len * 1 };
a[9].b = (struct ObjectBuf) { o4_ptr, o4_len * 1 };
a[10].o = imem1_val;
a[11].o = imem2_val;
a[12].o = imem3_val;
a[13].o = imem4_val;
result = Object_invoke(self, IAppController_OP_openSession, a, ObjectCounts_pack(5, 5, 4, 1));
*o1_lenout = a[6].b.size / 1;
*o2_lenout = a[7].b.size / 1;
*o3_lenout = a[8].b.size / 1;
*o4_lenout = a[9].b.size / 1;
*memrefOutSz1_ptr = o.m_memrefOutSz1;
*memrefOutSz2_ptr = o.m_memrefOutSz2;
*memrefOutSz3_ptr = o.m_memrefOutSz3;
*memrefOutSz4_ptr = o.m_memrefOutSz4;
*session_ptr = a[14].o;
*retValue_ptr = o.m_retValue;
*retOrigin_ptr = o.m_retOrigin;
return result;
}
static inline int32_t
IAppController_unload(struct Object self)
{
return Object_invoke(self, IAppController_OP_unload, 0, 0);
}
static inline int32_t
IAppController_getAppObject(struct Object self,struct Object *obj_ptr)
{
union ObjectArg a[1];
int32_t result = Object_invoke(self, IAppController_OP_getAppObject, a, ObjectCounts_pack(0, 0, 0, 1));
*obj_ptr = a[0].o;
return result;
}
static inline int32_t
IAppController_installCBO(struct Object self, uint32_t uid_val,struct Object obj_val)
{
union ObjectArg a[2];
a[0].b = (struct ObjectBuf) { &uid_val, sizeof(uint32_t) };
a[1].o = obj_val;
return Object_invoke(self, IAppController_OP_installCBO, a, ObjectCounts_pack(1, 0, 1, 0));
}
static inline int32_t
IAppController_disconnect(struct Object self)
{
return Object_invoke(self, IAppController_OP_disconnect, 0, 0);
}
static inline int32_t
IAppController_restart(struct Object self)
{
return Object_invoke(self, IAppController_OP_restart, 0, 0);
}

View File

@ -0,0 +1,105 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#pragma once
#include "smcinvoke_object.h"
#include "IAppController.h"
#define IAppLoader_ERROR_INVALID_BUFFER INT32_C(10)
#define IAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
#define IAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
#define IAppLoader_ERROR_METADATA_INVALID INT32_C(13)
#define IAppLoader_ERROR_MAX_NUM_APPS INT32_C(14)
#define IAppLoader_ERROR_NO_NAME_IN_METADATA INT32_C(15)
#define IAppLoader_ERROR_ALREADY_LOADED INT32_C(16)
#define IAppLoader_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
#define IAppLoader_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
#define IAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
#define IAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
#define IAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
#define IAppLoader_ERROR_APP_NOT_LOADED INT32_C(22)
#define IAppLoader_ERROR_APP_MAX_CLIENT_CONNECTIONS INT32_C(23)
#define IAppLoader_ERROR_APP_BLACKLISTED INT32_C(24)
#define IAppLoader_OP_loadFromBuffer 0
#define IAppLoader_OP_loadFromRegion 1
#define IAppLoader_OP_loadEmbedded 2
#define IAppLoader_OP_connect 3
static inline int32_t
IAppLoader_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IAppLoader_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IAppLoader_loadFromBuffer(struct Object self, const void *appElf_ptr, size_t appElf_len,struct Object *appController_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appElf_ptr, appElf_len * 1 };
result = Object_invoke(self, IAppLoader_OP_loadFromBuffer, a, ObjectCounts_pack(1, 0, 0, 1));
*appController_ptr = a[1].o;
return result;
}
static inline int32_t
IAppLoader_loadFromRegion(struct Object self,struct Object appElf_val,struct Object *appController_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].o = appElf_val;
result = Object_invoke(self, IAppLoader_OP_loadFromRegion, a, ObjectCounts_pack(0, 0, 1, 1));
*appController_ptr = a[1].o;
return result;
}
static inline int32_t
IAppLoader_loadEmbedded(struct Object self, const void *appName_ptr, size_t appName_len,struct Object *appController_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
result = Object_invoke(self, IAppLoader_OP_loadEmbedded, a, ObjectCounts_pack(1, 0, 0, 1));
*appController_ptr = a[1].o;
return result;
}
static inline int32_t
IAppLoader_connect(struct Object self, const void *appName_ptr, size_t appName_len,struct Object *appController_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
result = Object_invoke(self, IAppLoader_OP_connect, a, ObjectCounts_pack(1, 0, 0, 1));
*appController_ptr = a[1].o;
return result;
}

View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/** @cond */
#pragma once
#include "smcinvoke_object.h"
/** 0 is not a valid service ID. */
#define IOpener_INVALID_ID UINT32_C(0)
#define IOpener_ERROR_NOT_FOUND INT32_C(10)
#define IOpener_ERROR_PRIVILEGE INT32_C(11)
#define IOpener_ERROR_NOT_SUPPORTED INT32_C(12)
#define IOpener_OP_open 0
static inline int32_t
IOpener_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IOpener_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IOpener_open(struct Object self, uint32_t id_val,struct Object *obj_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].b = (struct ObjectBuf) { &id_val, sizeof(uint32_t) };
result = Object_invoke(self, IOpener_OP_open, a, ObjectCounts_pack(1, 0, 0, 1));
*obj_ptr = a[1].o;
return result;
}

View File

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_APPCLIENT_H
#define __SMCI_APPCLIENT_H
#include "smci_object.h"
#include "IAppClient.h"
#define SMCI_APPCLIENT_ERROR_APP_NOT_FOUND INT32_C(10)
#define SMCI_APPCLIENT_ERROR_APP_RESTART_FAILED INT32_C(11)
#define SMCI_APPCLIENT_ERROR_APP_UNTRUSTED_CLIENT INT32_C(12)
#define SMCI_APPCLIENT_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(13)
#define SMCI_APPCLIENT_ERROR_APP_LOAD_FAILED INT32_C(14)
#define SMCI_APPCLIENT_UID (0x97)
#define SMCI_APPCLIENT_OP_GETAPPOBJECT 0
static inline int32_t
smci_appclient_release(struct smci_object self)
{
return IAppClient_release(self);
}
static inline int32_t
smci_appclient_retain(struct smci_object self)
{
return IAppClient_retain(self);
}
static inline int32_t
smci_appclient_getappobject(struct smci_object self, const void *app_dist_name_ptr,
size_t app_dist_name_len, struct smci_object *obj_ptr)
{
return IAppClient_getAppObject(self, app_dist_name_ptr,
app_dist_name_len, obj_ptr);
}
#endif /* __SMCI_APPCLIENT_H */

View File

@ -0,0 +1,100 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_APPCONTROLLER_H
#define __SMCI_APPCONTROLLER_H
#include "smci_object.h"
#include "IAppController.h"
#define SMCI_APPCONTROLLER_CBO_INTERFACE_WAIT UINT32_C(1)
#define SMCI_APPCONTROLLER_ERROR_APP_SUSPENDED INT32_C(10)
#define SMCI_APPCONTROLLER_ERROR_APP_BLOCKED_ON_LISTENER INT32_C(11)
#define SMCI_APPCONTROLLER_ERROR_APP_UNLOADED INT32_C(12)
#define SMCI_APPCONTROLLER_ERROR_APP_IN_USE INT32_C(13)
#define SMCI_APPCONTROLLER_ERROR_NOT_SUPPORTED INT32_C(14)
#define SMCI_APPCONTROLLER_ERROR_CBO_UNKNOWN INT32_C(15)
#define SMCI_APPCONTROLLER_ERROR_APP_UNLOAD_NOT_ALLOWED INT32_C(16)
#define SMCI_APPCONTROLLER_ERROR_APP_DISCONNECTED INT32_C(17)
#define SMCI_APPCONTROLLER_ERROR_USER_DISCONNECT_REJECTED INT32_C(18)
#define SMCI_APPCONTROLLER_ERROR_STILL_RUNNING INT32_C(19)
#define SMCI_APPCONTROLLER_OP_OPENSESSION 0
#define SMCI_APPCONTROLLER_OP_UNLOAD 1
#define SMCI_APPCONTROLLER_OP_GETAPPOBJECT 2
#define SMCI_APPCONTROLLER_OP_INSTALLCBO 3
#define SMCI_APPCONTROLLER_OP_DISCONNECT 4
#define SMCI_APPCONTROLLER_OP_RESTART 5
static inline int32_t
smci_appcontroller_release(struct smci_object self)
{
return IAppController_release(self);
}
static inline int32_t
smci_appcontroller_retain(struct smci_object self)
{
return IAppController_retain(self);
}
static inline int32_t
smci_appcontroller_opensession(struct smci_object self, uint32_t cancel_code_val,
uint32_t connection_method_val, uint32_t connection_data_val, uint32_t param_types_val,
uint32_t ex_param_types_val, const void *i1_ptr, size_t i1_len, const void *i2_ptr,
size_t i2_len, const void *i3_ptr, size_t i3_len, const void *i4_ptr, size_t i4_len,
void *o1_ptr, size_t o1_len, size_t *o1_lenout, void *o2_ptr, size_t o2_len,
size_t *o2_lenout, void *o3_ptr, size_t o3_len, size_t *o3_lenout, void *o4_ptr,
size_t o4_len, size_t *o4_lenout, struct smci_object imem1_val,
struct smci_object imem2_val, struct smci_object imem3_val, struct smci_object imem4_val,
uint32_t *memref_out_sz1_ptr, uint32_t *memref_out_sz2_ptr, uint32_t *memref_out_sz3_ptr,
uint32_t *memref_out_sz4_ptr, struct smci_object *session_ptr, uint32_t *ret_value_ptr,
uint32_t *ret_origin_ptr)
{
return IAppController_openSession(self, cancel_code_val,
connection_method_val, connection_data_val, param_types_val,
ex_param_types_val, i1_ptr, i1_len, i2_ptr,
i2_len, i3_ptr, i3_len, i4_ptr, i4_len,
o1_ptr, o1_len, o1_lenout, o2_ptr, o2_len,
o2_lenout, o3_ptr, o3_len, o3_lenout, o4_ptr,
o4_len, o4_lenout, imem1_val,
imem2_val, imem3_val, imem4_val,
memref_out_sz1_ptr, memref_out_sz2_ptr, memref_out_sz3_ptr,
memref_out_sz4_ptr, session_ptr, ret_value_ptr,
ret_origin_ptr);
}
static inline int32_t
smci_appcontroller_unload(struct smci_object self)
{
return IAppController_unload(self);
}
static inline int32_t
smci_appcontroller_getappobject(struct smci_object self, struct smci_object *obj_ptr)
{
return IAppController_getAppObject(self, obj_ptr);
}
static inline int32_t
smci_appcontroller_installcbo(struct smci_object self, uint32_t uid_val, struct smci_object obj_val)
{
return IAppController_installCBO(self, uid_val, obj_val);
}
static inline int32_t
smci_appcontroller_disconnect(struct smci_object self)
{
return IAppController_disconnect(self);
}
static inline int32_t
smci_appcontroller_restart(struct smci_object self)
{
return IAppController_restart(self);
}
#endif /* __SMCI_APPCONTROLLER_H */

View File

@ -0,0 +1,79 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_APPLOADER_H
#define __SMCI_APPLOADER_H
#include "smci_object.h"
#include "smci_appcontroller.h"
#include "IAppLoader.h"
#define SMCI_APPLOADER_ERROR_INVALID_BUFFER INT32_C(10)
#define SMCI_APPLOADER_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
#define SMCI_APPLOADER_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
#define SMCI_APPLOADER_ERROR_METADATA_INVALID INT32_C(13)
#define SMCI_APPLOADER_ERROR_MAX_NUM_APPS INT32_C(14)
#define SMCI_APPLOADER_ERROR_NO_NAME_IN_METADATA INT32_C(15)
#define SMCI_APPLOADER_ERROR_ALREADY_LOADED INT32_C(16)
#define SMCI_APPLOADER_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
#define SMCI_APPLOADER_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
#define SMCI_APPLOADER_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
#define SMCI_APPLOADER_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
#define SMCI_APPLOADER_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
#define SMCI_APPLOADER_ERROR_APP_NOT_LOADED INT32_C(22)
#define SMCI_APPLOADER_ERROR_APP_MAX_CLIENT_CONNECTIONS INT32_C(23)
#define SMCI_APPLOADER_ERROR_APP_BLACKLISTED INT32_C(24)
#define SMCI_APPLOADER_OP_LOADFROMBUFFER 0
#define SMCI_APPLOADER_OP_LOADFROMREGION 1
#define SMCI_APPLOADER_OP_LOADEMBEDDED 2
#define SMCI_APPLOADER_OP_CONNECT 3
#define SMCI_APPLOADER_UID (0x3)
static inline int32_t
smci_apploader_release(struct smci_object self)
{
return IAppLoader_release(self);
}
static inline int32_t
smci_apploader_retain(struct smci_object self)
{
return IAppLoader_retain(self);
}
static inline int32_t
smci_apploader_loadfrombuffer(struct smci_object self, const void *appelf_ptr, size_t appelf_len,
struct smci_object *appcontroller_ptr)
{
return IAppLoader_loadFromBuffer(self, appelf_ptr, appelf_len,
appcontroller_ptr);
}
static inline int32_t
smci_apploader_loadfromregion(struct smci_object self, struct smci_object appelf_val,
struct smci_object *appcontroller_ptr)
{
return IAppLoader_loadFromRegion(self, appelf_val,
appcontroller_ptr);
}
static inline int32_t
smci_apploader_loadembedded(struct smci_object self, const void *appname_ptr, size_t appname_len,
struct smci_object *appcontroller_ptr)
{
return IAppLoader_loadEmbedded(self, appname_ptr, appname_len,
appcontroller_ptr);
}
static inline int32_t
smci_apploader_connect(struct smci_object self, const void *appname_ptr, size_t appname_len,
struct smci_object *appcontroller_ptr)
{
return IAppLoader_connect(self, appname_ptr, appname_len,
appcontroller_ptr);
}
#endif /* __SMCI_APPLOADER_H */

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_OPENER_H
#define __SMCI_OPENER_H
#include "smci_object.h"
#include "IOpener.h"
/** 0 is not a valid service ID. */
#define SMCI_OPENER_INVALID_ID UINT32_C(0)
#define SMCI_OPENER_ERROR_NOT_FOUND INT32_C(10)
#define SMCI_OPENER_ERROR_PRIVILEGE INT32_C(11)
#define SMCI_OPENER_ERROR_NOT_SUPPORTED INT32_C(12)
#define SMCI_OPENER_OP_OPEN 0
static inline int32_t
smci_opener_release(struct smci_object self)
{
return IOpener_release(self);
}
static inline int32_t
smci_opener_retain(struct smci_object self)
{
return IOpener_retain(self);
}
static inline int32_t
smci_opener_open(struct smci_object self, uint32_t id_val, struct smci_object *obj_ptr)
{
return IOpener_open(self, id_val, obj_ptr);
}
#endif /* __SMCI_OPENER_H */

View File

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
/**
* @addtogroup CAppClient
* @{
Class CAppClient implements \link IAppClient \endlink interface.
This class provides an interface to obtain app-provided functionalities.
The class ID `AppClient` is not included in the default privilege set.
*/
#pragma once
#include <smcinvoke_object.h>
#define CAppClient_UID (0x97)

View File

@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#pragma once
#include <include/linux/smcinvoke_object.h>
// This class provides an interface to load Secure Applications in QSEE
#define CAppLoader_UID (3)

View File

@ -0,0 +1,390 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_QCEDEV__H
#define _UAPI_QCEDEV__H
#include <linux/types.h>
#include <linux/ioctl.h>
#define QCEDEV_MAX_SHA_BLOCK_SIZE 64
#define QCEDEV_MAX_BEARER 31
#define QCEDEV_MAX_KEY_SIZE 64
#define QCEDEV_MAX_IV_SIZE 32
#define QCEDEV_MAX_BUFFERS 16
#define QCEDEV_MAX_SHA_DIGEST 32
#define QCEDEV_USE_PMEM 1
#define QCEDEV_NO_PMEM 0
#define QCEDEV_AES_KEY_128 16
#define QCEDEV_AES_KEY_192 24
#define QCEDEV_AES_KEY_256 32
/**
*qcedev_oper_enum: Operation types
* @QCEDEV_OPER_ENC: Encrypt
* @QCEDEV_OPER_DEC: Decrypt
* @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by
* user. Key already set by an external processor.
* @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by
* user. Key already set by an external processor.
*/
enum qcedev_oper_enum {
QCEDEV_OPER_DEC = 0,
QCEDEV_OPER_ENC = 1,
QCEDEV_OPER_DEC_NO_KEY = 2,
QCEDEV_OPER_ENC_NO_KEY = 3,
QCEDEV_OPER_LAST
};
/**
*qcedev_offload_oper_enum: Offload operation types (uses pipe keys)
* @QCEDEV_OFFLOAD_HLOS_HLOS: Non-secure to non-secure (eg. audio dec).
* @QCEDEV_OFFLOAD_HLOS_CPB: Non-secure to secure (eg. video dec).
* @QCEDEV_OFFLOAD_CPB_HLOS: Secure to non-secure (eg. hdcp video enc).
*/
enum qcedev_offload_oper_enum {
QCEDEV_OFFLOAD_HLOS_HLOS = 1,
QCEDEV_OFFLOAD_HLOS_HLOS_1 = 2,
QCEDEV_OFFLOAD_HLOS_CPB = 3,
QCEDEV_OFFLOAD_HLOS_CPB_1 = 4,
QCEDEV_OFFLOAD_CPB_HLOS = 5,
QCEDEV_OFFLOAD_OPER_LAST
};
/**
*qcedev_offload_err_enum: Offload error conditions
* @QCEDEV_OFFLOAD_NO_ERROR: Successful crypto operation.
* @QCEDEV_OFFLOAD_GENERIC_ERROR: Generic error in crypto status.
* @QCEDEV_OFFLOAD_TIMER_EXPIRED_ERROR: Pipe key timer expired.
* @QCEDEV_OFFLOAD_KEY_PAUSE_ERROR: Pipe key pause (means GPCE is paused).
*/
enum qcedev_offload_err_enum {
QCEDEV_OFFLOAD_NO_ERROR = 0,
QCEDEV_OFFLOAD_GENERIC_ERROR = 1,
QCEDEV_OFFLOAD_KEY_TIMER_EXPIRED_ERROR = 2,
QCEDEV_OFFLOAD_KEY_PAUSE_ERROR = 3
};
/**
*qcedev_oper_enum: Cipher algorithm types
* @QCEDEV_ALG_DES: DES
* @QCEDEV_ALG_3DES: 3DES
* @QCEDEV_ALG_AES: AES
*/
enum qcedev_cipher_alg_enum {
QCEDEV_ALG_DES = 0,
QCEDEV_ALG_3DES = 1,
QCEDEV_ALG_AES = 2,
QCEDEV_ALG_LAST
};
/**
*qcedev_cipher_mode_enum : AES mode
* @QCEDEV_AES_MODE_CBC: CBC
* @QCEDEV_AES_MODE_ECB: ECB
* @QCEDEV_AES_MODE_CTR: CTR
* @QCEDEV_AES_MODE_XTS: XTS
* @QCEDEV_AES_MODE_CCM: CCM
* @QCEDEV_DES_MODE_CBC: CBC
* @QCEDEV_DES_MODE_ECB: ECB
*/
enum qcedev_cipher_mode_enum {
QCEDEV_AES_MODE_CBC = 0,
QCEDEV_AES_MODE_ECB = 1,
QCEDEV_AES_MODE_CTR = 2,
QCEDEV_AES_MODE_XTS = 3,
QCEDEV_AES_MODE_CCM = 4,
QCEDEV_DES_MODE_CBC = 5,
QCEDEV_DES_MODE_ECB = 6,
QCEDEV_AES_DES_MODE_LAST
};
/**
*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
* @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits)
* @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit)
* @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits)
* @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit)
* @QCEDEV_ALG_AES_CMAC: Configurable MAC size
*/
enum qcedev_sha_alg_enum {
QCEDEV_ALG_SHA1 = 0,
QCEDEV_ALG_SHA256 = 1,
QCEDEV_ALG_SHA1_HMAC = 2,
QCEDEV_ALG_SHA256_HMAC = 3,
QCEDEV_ALG_AES_CMAC = 4,
QCEDEV_ALG_SHA_ALG_LAST
};
/**
* struct buf_info - Buffer information
* @offset: Offset from the base address of the buffer
* (Used when buffer is allocated using PMEM)
* @vaddr: Virtual buffer address pointer
* @len: Size of the buffer
*/
struct buf_info {
union {
__u32 offset;
__u8 *vaddr;
};
__u32 len;
};
/**
* struct qcedev_vbuf_info - Source and destination Buffer information
* @src: Array of buf_info for input/source
* @dst: Array of buf_info for output/destination
*/
struct qcedev_vbuf_info {
struct buf_info src[QCEDEV_MAX_BUFFERS];
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_pmem_info - Stores PMEM buffer information
* @fd_src: Handle to /dev/adsp_pmem used to allocate
* memory for input/src buffer
* @src: Array of buf_info for input/source
* @fd_dst: Handle to /dev/adsp_pmem used to allocate
* memory for output/dst buffer
* @dst: Array of buf_info for output/destination
* @pmem_src_offset: The offset from input/src buffer
* (allocated by PMEM)
*/
struct qcedev_pmem_info {
int fd_src;
struct buf_info src[QCEDEV_MAX_BUFFERS];
int fd_dst;
struct buf_info dst[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_cipher_op_req - Holds the ciphering request information
* @use_pmem (IN): Flag to indicate if buffer source is PMEM
* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
* @pmem (IN): Stores PMEM buffer information.
* Refer struct qcedev_pmem_info
* @vbuf (IN/OUT): Stores Source and destination Buffer information
* Refer to struct qcedev_vbuf_info
* @data_len (IN): Total Length of input/src and output/dst in bytes
* @in_place_op (IN): Indicates whether the operation is inplace where
* source == destination
* When using PMEM allocated memory, must set this to 1
* @enckey (IN): 128 bits of confidentiality key
* enckey[0] bit 127-120, enckey[1] bit 119-112,..
* enckey[15] bit 7-0
* @encklen (IN): Length of the encryption key(set to 128 bits/16
* bytes in the driver)
* @iv (IN/OUT): Initialisation vector data
* This is updated by the driver, incremented by
* number of blocks encrypted/decrypted.
* @ivlen (IN): Length of the IV
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
* for AES-128 CTR mode only)
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR
* Apllicabel when using AES algorithm only
* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
*
*If use_pmem is set to 0, the driver assumes that memory was not allocated
* via PMEM, and kernel will need to allocate memory and copy data from user
* space buffer (data_src/dta_dst) and process accordingly and copy data back
* to the user space buffer
*
* If use_pmem is set to 1, the driver assumes that memory was allocated via
* PMEM.
* The kernel driver will use the fd_src to determine the kernel virtual address
* base that maps to the user space virtual address base for the buffer
* allocated in user space.
* The final input/src and output/dst buffer pointer will be determined
* by adding the offsets to the kernel virtual addr.
*
* If use of hardware key is supported in the target, user can configure the
* key parameters (encklen, enckey) to use the hardware key.
* In order to use the hardware key, set encklen to 0 and set the enckey
* data array to 0.
*/
struct qcedev_cipher_op_req {
__u8 use_pmem;
union {
struct qcedev_pmem_info pmem;
struct qcedev_vbuf_info vbuf;
};
__u32 entries;
__u32 data_len;
__u8 in_place_op;
__u8 enckey[QCEDEV_MAX_KEY_SIZE];
__u32 encklen;
__u8 iv[QCEDEV_MAX_IV_SIZE];
__u32 ivlen;
__u32 byteoffset;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_oper_enum op;
};
/**
* struct qcedev_sha_op_req - Holds the hashing request information
* @data (IN): Array of pointers to the data to be hashed
* @entries (IN): Number of buf_info entries in the data array
* @data_len (IN): Length of data to be hashed
* @digest (IN/OUT): Returns the hashed data information
* @diglen (OUT): Size of the hashed/digest data
* @authkey (IN): Pointer to authentication key for HMAC
* @authklen (IN): Size of the authentication key
* @alg (IN): Secure Hash algorithm
*/
struct qcedev_sha_op_req {
struct buf_info data[QCEDEV_MAX_BUFFERS];
__u32 entries;
__u32 data_len;
__u8 digest[QCEDEV_MAX_SHA_DIGEST];
__u32 diglen;
__u8 *authkey;
__u32 authklen;
enum qcedev_sha_alg_enum alg;
};
/**
* struct pattern_info - Holds pattern information for pattern-based
* decryption/encryption for AES ECB, counter, and CBC modes.
* @patt_sz (IN): Total number of blocks.
* @proc_data_sz (IN): Number of blocks to be processed.
* @patt_offset (IN): Start of the segment.
*/
struct pattern_info {
__u8 patt_sz;
__u8 proc_data_sz;
__u8 patt_offset;
};
/**
* struct qcedev_offload_cipher_op_req - Holds the offload request information
* @vbuf (IN/OUT): Stores Source and destination Buffer information.
* Refer to struct qcedev_vbuf_info.
* @entries (IN): Number of entries to be processed as part of request.
* @data_len (IN): Total Length of input/src and output/dst in bytes
* @in_place_op (IN): Indicates whether the operation is inplace where
* source == destination.
* @encklen (IN): Length of the encryption key(set to 128 bits/16
* bytes in the driver).
* @iv (IN/OUT): Initialisation vector data
* This is updated by the driver, incremented by
* number of blocks encrypted/decrypted.
* @ivlen (IN): Length of the IV.
* @iv_ctr_size (IN): IV counter increment mask size.
* Driver sets the mask value based on this size.
* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set
* for AES-128 CTR mode only).
* @block_offset (IN): Offset in the block that needs a skip of encrypt/
* decrypt.
* @pattern_valid (IN): Indicates the request contains a valid pattern.
* @pattern_info (IN): The pattern to be used for the offload request.
* @is_copy_op (IN): Offload operations sometimes requires a copy between
* secure and non-secure buffers without any encrypt/
* decrypt operations.
* @alg (IN): Type of ciphering algorithm: AES/DES/3DES.
* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR.
* Applicable when using AES algorithm only.
* @op (IN): Type of operation.
* Refer to qcedev_offload_oper_enum.
* @err (OUT): Error in crypto status.
* Refer to qcedev_offload_err_enum.
*/
struct qcedev_offload_cipher_op_req {
struct qcedev_vbuf_info vbuf;
__u32 entries;
__u32 data_len;
__u32 in_place_op;
__u32 encklen;
__u8 iv[QCEDEV_MAX_IV_SIZE];
__u32 ivlen;
__u32 iv_ctr_size;
__u32 byteoffset;
__u8 block_offset;
__u8 is_pattern_valid;
__u8 is_copy_op;
__u8 encrypt;
struct pattern_info pattern_info;
enum qcedev_cipher_alg_enum alg;
enum qcedev_cipher_mode_enum mode;
enum qcedev_offload_oper_enum op;
enum qcedev_offload_err_enum err;
};
/**
* struct qfips_verify_t - Holds data for FIPS Integrity test
* @kernel_size (IN): Size of kernel Image
* @kernel (IN): pointer to buffer containing the kernel Image
*/
struct qfips_verify_t {
unsigned int kernel_size;
void *kernel;
};
/**
* struct qcedev_map_buf_req - Holds the mapping request information
* fd (IN): Array of fds.
* num_fds (IN): Number of fds in fd[].
* fd_size (IN): Array of sizes corresponding to each fd in fd[].
* fd_offset (IN): Array of offset corresponding to each fd in fd[].
* vaddr (OUT): Array of mapped virtual address corresponding to
* each fd in fd[].
*/
struct qcedev_map_buf_req {
__s32 fd[QCEDEV_MAX_BUFFERS];
__u32 num_fds;
__u32 fd_size[QCEDEV_MAX_BUFFERS];
__u32 fd_offset[QCEDEV_MAX_BUFFERS];
__u64 buf_vaddr[QCEDEV_MAX_BUFFERS];
};
/**
* struct qcedev_unmap_buf_req - Holds the hashing request information
* fd (IN): Array of fds to unmap
* num_fds (IN): Number of fds in fd[].
*/
struct qcedev_unmap_buf_req {
__s32 fd[QCEDEV_MAX_BUFFERS];
__u32 num_fds;
};
struct file;
long qcedev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
#define QCEDEV_IOC_MAGIC 0x87
#define QCEDEV_IOCTL_ENC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_DEC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req)
#define QCEDEV_IOCTL_SHA_INIT_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_SHA_UPDATE_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_SHA_FINAL_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_GET_SHA_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_LOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 7)
#define QCEDEV_IOCTL_UNLOCK_CE \
_IO(QCEDEV_IOC_MAGIC, 8)
#define QCEDEV_IOCTL_GET_CMAC_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_sha_op_req)
#define QCEDEV_IOCTL_MAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 10, struct qcedev_map_buf_req)
#define QCEDEV_IOCTL_UNMAP_BUF_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 11, struct qcedev_unmap_buf_req)
#define QCEDEV_IOCTL_OFFLOAD_OP_REQ \
_IOWR(QCEDEV_IOC_MAGIC, 12, struct qcedev_offload_cipher_op_req)
#endif /* _UAPI_QCEDEV__H */

View File

@ -0,0 +1,218 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_QCOTA_H
#define _UAPI_QCOTA_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define QCE_OTA_MAX_BEARER 31
#define OTA_KEY_SIZE 16 /* 128 bits of keys. */
enum qce_ota_dir_enum {
QCE_OTA_DIR_UPLINK = 0,
QCE_OTA_DIR_DOWNLINK = 1,
QCE_OTA_DIR_LAST
};
enum qce_ota_algo_enum {
QCE_OTA_ALGO_KASUMI = 0,
QCE_OTA_ALGO_SNOW3G = 1,
QCE_OTA_ALGO_LAST
};
/**
* struct qce_f8_req - qce f8 request
* @data_in: packets input data stream to be ciphered.
* If NULL, streaming mode operation.
* @data_out: ciphered packets output data.
* @data_len: length of data_in and data_out in bytes.
* @count_c: count-C, ciphering sequence number, 32 bit
* @bearer: 5 bit of radio bearer identifier.
* @ckey: 128 bits of confidentiality key,
* ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0.
* @direction: uplink or donwlink.
* @algorithm: Kasumi, or Snow3G.
*
* If data_in is NULL, the engine will run in a special mode called
* key stream mode. In this special mode, the engine will generate
* key stream output for the number of bytes specified in the
* data_len, based on the input parameters of direction, algorithm,
* ckey, bearer, and count_c. The data_len is restricted to
* the length of multiple of 16 bytes. Application can then take the
* output stream, do a exclusive or to the input data stream, and
* generate the final cipher data stream.
*/
struct qce_f8_req {
__u8 *data_in;
__u8 *data_out;
__u16 data_len;
__u32 count_c;
__u8 bearer;
__u8 ckey[OTA_KEY_SIZE];
enum qce_ota_dir_enum direction;
enum qce_ota_algo_enum algorithm;
int current_req_info;
};
/**
* struct qce_f8_multi_pkt_req - qce f8 multiple packet request
* Muliptle packets with uniform size, and
* F8 ciphering parameters can be ciphered in a
* single request.
*
* @num_pkt: number of packets.
*
* @cipher_start: ciphering starts offset within a packet.
*
* @cipher_size: number of bytes to be ciphered within a packet.
*
* @qce_f8_req: description of the packet and F8 parameters.
* The following fields have special meaning for
* multiple packet operation,
*
* @data_len: data_len indicates the length of a packet.
*
* @data_in: packets are concatenated together in a byte
* stream started at data_in.
*
* @data_out: The returned ciphered output for multiple
* packets.
* Each packet ciphered output are concatenated
* together into a byte stream started at data_out.
* Note, each ciphered packet output area from
* offset 0 to cipher_start-1, and from offset
* cipher_size to data_len -1 are remained
* unaltered from packet input area.
* @count_c: count-C of the first packet, 32 bit.
*
*
* In one request, multiple packets can be ciphered, and output to the
* data_out stream.
*
* Packet data are laid out contiguously in sequence in data_in,
* and data_out area. Every packet is identical size.
* If the PDU is not byte aligned, set the data_len value of
* to the rounded up value of the packet size. Eg, PDU size of
* 253 bits, set the packet size to 32 bytes. Next packet starts on
* the next byte boundary.
*
* For each packet, data from offset 0 to cipher_start
* will be left unchanged and output to the data_out area.
* This area of the packet can be for the RLC header, which is not
* to be ciphered.
*
* The ciphering of a packet starts from offset cipher_start, for
* cipher_size bytes of data. Data starting from
* offset cipher_start + cipher_size to the end of packet will be left
* unchanged and output to the dataOut area.
*
* For each packet the input arguments of bearer, direction,
* ckey, algorithm have to be the same. count_c is the ciphering sequence
* number of the first packet. The 2nd packet's ciphering sequence
* number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
* number is count_c + 2.....
*
*/
struct qce_f8_multi_pkt_req {
__u16 num_pkt;
__u16 cipher_start;
__u16 cipher_size;
struct qce_f8_req qce_f8_req;
};
/**
* struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request
* Muliptle packets with variable size, and
* F8 ciphering parameters can be ciphered in a
* single request.
*
* @num_pkt: number of packets.
*
* @cipher_iov[]: array of iov of packets to be ciphered.
*
*
* @qce_f8_req: description of the packet and F8 parameters.
* The following fields have special meaning for
* multiple packet operation,
*
* @data_len: ignored.
*
* @data_in: ignored.
*
* @data_out: ignored.
*
* @count_c: count-C of the first packet, 32 bit.
*
*
* In one request, multiple packets can be ciphered.
*
* The i-th packet are defined in cipher_iov[i-1].
* The ciphering of i-th packet starts from offset 0 of the PDU specified
* by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data.
* If the PDU is not byte aligned, set the cipher_iov[i-1].size value
* to the rounded up value of the packet size. Eg, PDU size of
* 253 bits, set the packet size to 32 bytes.
*
* Ciphering are done in place. That is, the ciphering
* input and output data are both in cipher_iov[i-1].addr for the i-th
* packet.
*
* For each packet the input arguments of bearer, direction,
* ckey, algorithm have to be the same. count_c is the ciphering sequence
* number of the first packet. The 2nd packet's ciphering sequence
* number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
* number is count_c + 2.....
*/
#define MAX_NUM_V_MULTI_PKT 20
struct cipher_iov {
unsigned char *addr;
unsigned short size;
};
struct qce_f8_variable_multi_pkt_req {
unsigned short num_pkt;
struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT];
struct qce_f8_req qce_f8_req;
};
/**
* struct qce_f9_req - qce f9 request
* @message: message
* @msize: message size in bytes (include the last partial byte).
* @last_bits: valid bits in the last byte of message.
* @mac_i: 32 bit message authentication code, to be returned.
* @fresh: random 32 bit number, one per user.
* @count_i: 32 bit count-I integrity sequence number.
* @direction: uplink or donwlink.
* @ikey: 128 bits of integrity key,
* ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0.
* @algorithm: Kasumi, or Snow3G.
*/
struct qce_f9_req {
__u8 *message;
__u16 msize;
__u8 last_bits;
__u32 mac_i;
__u32 fresh;
__u32 count_i;
enum qce_ota_dir_enum direction;
__u8 ikey[OTA_KEY_SIZE];
enum qce_ota_algo_enum algorithm;
int current_req_info;
};
#define QCOTA_IOC_MAGIC 0x85
#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req)
#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req)
#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req)
#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\
struct qce_f8_variable_multi_pkt_req)
#endif /* _UAPI_QCOTA_H */

View File

@ -0,0 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_QRNG_H_
#define _UAPI_QRNG_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define QRNG_IOC_MAGIC 0x100
#define QRNG_IOCTL_RESET_BUS_BANDWIDTH\
_IO(QRNG_IOC_MAGIC, 1)
#endif /* _UAPI_QRNG_H_ */

View File

@ -0,0 +1,186 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017, 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _QSEECOM_H_
#define _QSEECOM_H_
#pragma message("Warning: This header file will be deprecated in future")
#include <linux/types.h>
#include <linux/ioctl.h>
#define MAX_ION_FD 4
#define MAX_APP_NAME_SIZE 64
#define QSEECOM_HASH_SIZE 32
#define ICE_KEY_SIZE 32
#define ICE_SALT_SIZE 32
/*
* struct qseecom_ion_fd_info - ion fd handle data information
* @fd - ion handle to some memory allocated in user space
* @cmd_buf_offset - command buffer offset
*/
struct qseecom_ion_fd_info {
__s32 fd;
__u32 cmd_buf_offset;
};
enum qseecom_key_management_usage_type {
QSEOS_KM_USAGE_DISK_ENCRYPTION = 0x01,
QSEOS_KM_USAGE_FILE_ENCRYPTION = 0x02,
QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION = 0x03,
QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION = 0x04,
QSEOS_KM_USAGE_MAX
};
struct qseecom_create_key_req {
unsigned char hash32[QSEECOM_HASH_SIZE];
enum qseecom_key_management_usage_type usage;
};
struct qseecom_wipe_key_req {
enum qseecom_key_management_usage_type usage;
int wipe_key_flag;/* 1->remove key from storage(alone with clear key) */
/* 0->do not remove from storage (clear key) */
};
struct qseecom_update_key_userinfo_req {
unsigned char current_hash32[QSEECOM_HASH_SIZE];
unsigned char new_hash32[QSEECOM_HASH_SIZE];
enum qseecom_key_management_usage_type usage;
};
#define SHA256_DIGEST_LENGTH (256/8)
/*
* struct qseecom_save_partition_hash_req
* @partition_id - partition id.
* @hash[SHA256_DIGEST_LENGTH] - sha256 digest.
*/
struct qseecom_save_partition_hash_req {
int partition_id; /* in */
char digest[SHA256_DIGEST_LENGTH]; /* in */
};
/*
* struct qseecom_is_es_activated_req
* @is_activated - 1=true , 0=false
*/
struct qseecom_is_es_activated_req {
int is_activated; /* out */
};
/*
* struct qseecom_mdtp_cipher_dip_req
* @in_buf - input buffer
* @in_buf_size - input buffer size
* @out_buf - output buffer
* @out_buf_size - output buffer size
* @direction - 0=encrypt, 1=decrypt
*/
struct qseecom_mdtp_cipher_dip_req {
__u8 *in_buf;
__u32 in_buf_size;
__u8 *out_buf;
__u32 out_buf_size;
__u32 direction;
};
struct qseecom_qteec_req {
void *req_ptr;
__u32 req_len;
void *resp_ptr;
__u32 resp_len;
};
struct qseecom_qteec_modfd_req {
void *req_ptr;
__u32 req_len;
void *resp_ptr;
__u32 resp_len;
struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
};
#define MAX_CE_PIPE_PAIR_PER_UNIT 3
struct qseecom_ce_pipe_entry {
int valid;
unsigned int ce_num;
unsigned int ce_pipe_pair;
};
struct qseecom_ice_data_t {
int flag;
};
#define MAX_CE_INFO_HANDLE_SIZE 32
struct qseecom_ce_info_req {
unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
unsigned int usage;
unsigned int unit_num;
unsigned int num_ce_pipe_entries;
struct qseecom_ce_pipe_entry ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
};
struct qseecom_ice_key_data_t {
__u8 key[ICE_KEY_SIZE];
__u32 key_len;
__u8 salt[ICE_SALT_SIZE];
__u32 salt_len;
};
struct file;
#define QSEECOM_IOC_MAGIC 0x97
#define QSEECOM_IOCTL_CREATE_KEY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 17, struct qseecom_create_key_req)
#define QSEECOM_IOCTL_WIPE_KEY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 18, struct qseecom_wipe_key_req)
#define QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 19, struct qseecom_save_partition_hash_req)
#define QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 20, struct qseecom_is_es_activated_req)
#define QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 24, struct qseecom_update_key_userinfo_req)
#define QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 30, struct qseecom_qteec_modfd_req)
#define QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 31, struct qseecom_qteec_req)
#define QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 32, struct qseecom_qteec_modfd_req)
#define QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 33, struct qseecom_qteec_modfd_req)
#define QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
#define QSEECOM_IOCTL_GET_CE_PIPE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 40, struct qseecom_ce_info_req)
#define QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 41, struct qseecom_ce_info_req)
#define QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 42, struct qseecom_ce_info_req)
#define QSEECOM_IOCTL_SET_ICE_INFO \
_IOWR(QSEECOM_IOC_MAGIC, 43, struct qseecom_ice_data_t)
#define QSEECOM_IOCTL_FBE_CLEAR_KEY \
_IOWR(QSEECOM_IOC_MAGIC, 44, struct qseecom_ice_key_data_t)
#endif /* _QSEECOM_H_ */

View File

@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017, 2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _QSEECOM_API_H_
#define _QSEECOM_API_H_
#pragma message("Warning: This header file will be deprecated in future")
#include <linux/types.h>
#include <linux/ioctl.h>
#include "qseecom.h"
/*
* struct qseecom_register_listener_req -
* for register listener ioctl request
* @listener_id - service id (shared between userspace and QSE)
* @ifd_data_fd - ion handle
* @virt_sb_base - shared buffer base in user space
* @sb_size - shared buffer size
*/
struct qseecom_register_listener_req {
__u32 listener_id; /* in */
__s32 ifd_data_fd; /* in */
void *virt_sb_base; /* in */
__u32 sb_size; /* in */
};
/*
* struct qseecom_send_cmd_req - for send command ioctl request
* @cmd_req_len - command buffer length
* @cmd_req_buf - command buffer
* @resp_len - response buffer length
* @resp_buf - response buffer
*/
struct qseecom_send_cmd_req {
void *cmd_req_buf; /* in */
unsigned int cmd_req_len; /* in */
void *resp_buf; /* in/out */
unsigned int resp_len; /* in/out */
};
/*
* struct qseecom_send_modfd_cmd_req - for send command ioctl request
* @cmd_req_len - command buffer length
* @cmd_req_buf - command buffer
* @resp_len - response buffer length
* @resp_buf - response buffer
* @ifd_data_fd - ion handle to memory allocated in user space
* @cmd_buf_offset - command buffer offset
*/
struct qseecom_send_modfd_cmd_req {
void *cmd_req_buf; /* in */
unsigned int cmd_req_len; /* in */
void *resp_buf; /* in/out */
unsigned int resp_len; /* in/out */
struct qseecom_ion_fd_info ifd_data[MAX_ION_FD];
};
/*
* struct qseecom_load_img_data - for sending image length information and
* ion file descriptor to the qseecom driver. ion file descriptor is used
* for retrieving the ion file handle and in turn the physical address of
* the image location.
* @mdt_len - Length of the .mdt file in bytes.
* @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
* @ion_fd - Ion file descriptor used when allocating memory.
* @img_name - Name of the image.
* @app_arch - Architecture of the image, i.e. 32bit or 64bit app
*/
struct qseecom_load_img_req {
__u32 mdt_len; /* in */
__u32 img_len; /* in */
__s32 ifd_data_fd; /* in */
char img_name[MAX_APP_NAME_SIZE]; /* in */
__u32 app_arch; /* in */
__u32 app_id; /* out*/
};
struct qseecom_set_sb_mem_param_req {
__s32 ifd_data_fd; /* in */
void *virt_sb_base; /* in */
__u32 sb_len; /* in */
};
/*
* struct qseecom_qseos_version_req - get qseos version
* @qseos_version - version number
*/
struct qseecom_qseos_version_req {
unsigned int qseos_version; /* in */
};
/*
* struct qseecom_qseos_app_load_query - verify if app is loaded in qsee
* @app_name[MAX_APP_NAME_SIZE]- name of the app.
* @app_id - app id.
*/
struct qseecom_qseos_app_load_query {
char app_name[MAX_APP_NAME_SIZE]; /* in */
__u32 app_id; /* out */
__u32 app_arch;
};
struct qseecom_send_svc_cmd_req {
__u32 cmd_id;
void *cmd_req_buf; /* in */
unsigned int cmd_req_len; /* in */
void *resp_buf; /* in/out */
unsigned int resp_len; /* in/out */
};
/*
* struct qseecom_send_modfd_resp - for send command ioctl request
* @req_len - command buffer length
* @req_buf - command buffer
* @ifd_data_fd - ion handle to memory allocated in user space
* @cmd_buf_offset - command buffer offset
*/
struct qseecom_send_modfd_listener_resp {
void *resp_buf_ptr; /* in */
unsigned int resp_len; /* in */
struct qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
};
struct qseecom_sg_entry {
__u32 phys_addr;
__u32 len;
};
struct qseecom_sg_entry_64bit {
__u64 phys_addr;
__u32 len;
} __attribute__ ((packed));
struct file;
#define QSEECOM_IOC_MAGIC 0x97
#define QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 1, struct qseecom_register_listener_req)
#define QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
_IO(QSEECOM_IOC_MAGIC, 2)
#define QSEECOM_IOCTL_SEND_CMD_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 3, struct qseecom_send_cmd_req)
#define QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 4, struct qseecom_send_modfd_cmd_req)
#define QSEECOM_IOCTL_RECEIVE_REQ \
_IO(QSEECOM_IOC_MAGIC, 5)
#define QSEECOM_IOCTL_SEND_RESP_REQ \
_IO(QSEECOM_IOC_MAGIC, 6)
#define QSEECOM_IOCTL_LOAD_APP_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 7, struct qseecom_load_img_req)
#define QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 8, struct qseecom_set_sb_mem_param_req)
#define QSEECOM_IOCTL_UNLOAD_APP_REQ \
_IO(QSEECOM_IOC_MAGIC, 9)
#define QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 10, struct qseecom_qseos_version_req)
#define QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 13, struct qseecom_load_img_req)
#define QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
_IO(QSEECOM_IOC_MAGIC, 14)
#define QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 15, struct qseecom_qseos_app_load_query)
#define QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 16, struct qseecom_send_svc_cmd_req)
#define QSEECOM_IOCTL_SEND_MODFD_RESP \
_IOWR(QSEECOM_IOC_MAGIC, 21, struct qseecom_send_modfd_listener_resp)
#define QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
_IOWR(QSEECOM_IOC_MAGIC, 35, struct qseecom_send_modfd_cmd_req)
#define QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
_IOWR(QSEECOM_IOC_MAGIC, 36, struct qseecom_send_modfd_listener_resp)
#endif /* _QSEECOM_API_H_ */

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _UAPI_SMCINVOKE_H_
#define _UAPI_SMCINVOKE_H_
#include <linux/types.h>
#include <linux/ioctl.h>
#define SMCINVOKE_USERSPACE_OBJ_NULL -1
#define DEFAULT_CB_OBJ_THREAD_CNT 4
#define SMCINVOKE_TZ_MIN_BUF_SIZE 4096
struct smcinvoke_buf {
__u64 addr;
__u64 size;
};
struct smcinvoke_obj {
__s64 fd;
__s64 cb_server_fd;
__s64 reserved;
};
union smcinvoke_arg {
struct smcinvoke_buf b;
struct smcinvoke_obj o;
};
/*
* struct smcinvoke_cmd_req: This structure is transparently sent to TEE
* @op - Operation to be performed
* @counts - number of aruments passed
* @result - result of invoke operation
* @argsize - size of each of arguments
* @args - args is pointer to buffer having all arguments
* @reserved: IN/OUT: Usage is not defined but should be set to 0
*/
struct smcinvoke_cmd_req {
__u32 op;
__u32 counts;
__s32 result;
__u32 argsize;
__u64 args;
__s64 reserved;
};
/*
* struct smcinvoke_accept: structure to process CB req from TEE
* @has_resp: IN: Whether IOCTL is carrying response data
* @result: IN: Outcome of operation op
* @op: OUT: Operation to be performed on target object
* @counts: OUT: Number of arguments, embedded in buffer pointed by
* buf_addr, to complete operation
* @reserved: IN/OUT: Usage is not defined but should be set to 0.
* @argsize: IN: Size of any argument, all of equal size, embedded
* in buffer pointed by buf_addr
* @txn_id: OUT: An id that should be passed as it is for response
* @cbobj_id: OUT: Callback object which is target of operation op
* @buf_len: IN: Len of buffer pointed by buf_addr
* @buf_addr: IN: Buffer containing all arguments which are needed
* to complete operation op
*/
struct smcinvoke_accept {
__u32 has_resp;
__s32 result;
__u32 op;
__u32 counts;
__s32 reserved;
__u32 argsize;
__u64 txn_id;
__s64 cbobj_id;
__u64 buf_len;
__u64 buf_addr;
};
/*
* @cb_buf_size: IN: Max buffer size for any callback obj implemented by client
* @reserved: IN/OUT: Usage is not defined but should be set to 0
*/
struct smcinvoke_server {
__u64 cb_buf_size;
__s64 reserved;
};
#define SMCINVOKE_IOC_MAGIC 0x98
#define SMCINVOKE_IOCTL_INVOKE_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 1, struct smcinvoke_cmd_req)
#define SMCINVOKE_IOCTL_ACCEPT_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 2, struct smcinvoke_accept)
#define SMCINVOKE_IOCTL_SERVER_REQ \
_IOWR(SMCINVOKE_IOC_MAGIC, 3, struct smcinvoke_server)
#define SMCINVOKE_IOCTL_ACK_LOCAL_OBJ \
_IOWR(SMCINVOKE_IOC_MAGIC, 4, __s64)
/*
* smcinvoke logging buffer is for communicating with the smcinvoke driver additional
* info for debugging to be included in driver's log (if any)
*/
#define SMCINVOKE_LOG_BUF_SIZE 100
#define SMCINVOKE_IOCTL_LOG \
_IOC(_IOC_READ|_IOC_WRITE, SMCINVOKE_IOC_MAGIC, 255, SMCINVOKE_LOG_BUF_SIZE)
#endif /* _UAPI_SMCINVOKE_H_ */

View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2021, The Linux Foundation. All rights reserved.
*/
#ifndef __QSEECOM_KERNEL_H_
#define __QSEECOM_KERNEL_H_
#include <linux/types.h>
#define QSEECOM_ALIGN_SIZE 0x40
#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1)
#define QSEECOM_ALIGN(x) \
((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
/*
* struct qseecom_handle -
* Handle to the qseecom device for kernel clients
* @sbuf - shared buffer pointer
* @sbbuf_len - shared buffer size
*/
struct qseecom_handle {
void *dev; /* in/out */
unsigned char *sbuf; /* in/out */
uint32_t sbuf_len; /* in/out */
};
int qseecom_start_app(struct qseecom_handle **handle,
char *app_name, uint32_t size);
int qseecom_shutdown_app(struct qseecom_handle **handle);
int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
#if IS_ENABLED(CONFIG_QSEECOM)
int qseecom_process_listener_from_smcinvoke(uint32_t *result,
u64 *response_type, unsigned int *data);
#else
static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result,
u64 *response_type, unsigned int *data)
{
return -EOPNOTSUPP;
}
#endif
#endif /* __QSEECOM_KERNEL_H_ */

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QSEECOM_PRIV_H_
#define __QSEECOM_PRIV_H_
#include <linux/types.h>
#if IS_ENABLED(CONFIG_QSEECOM) || IS_ENABLED(CONFIG_ARCH_SA8155)
int qseecom_process_listener_from_smcinvoke(uint32_t *result,
u64 *response_type, unsigned int *data);
#else
static inline int qseecom_process_listener_from_smcinvoke(uint32_t *result,
u64 *response_type, unsigned int *data)
{
return -EOPNOTSUPP;
}
int get_qseecom_kernel_fun_ops(void);
#endif
#endif

View File

@ -0,0 +1,740 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QSEECOMI_H_
#define __QSEECOMI_H_
/* we need to include qseecom.h present in securemsm-kernel */
#include "../../include/uapi/linux/qseecom.h"
#define QSEECOM_KEY_ID_SIZE 32
#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD -19 /*0xFFFFFFED*/
#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
#define QSEOS_RESULT_FAIL_KS_OP -64
#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS -65
#define QSEOS_RESULT_FAIL_MAX_KEYS -66
#define QSEOS_RESULT_FAIL_SAVE_KS -67
#define QSEOS_RESULT_FAIL_LOAD_KS -68
#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE -69
#define QSEOS_RESULT_FAIL_KEY_ID_DNE -70
#define QSEOS_RESULT_FAIL_INCORRECT_PSWD -71
#define QSEOS_RESULT_FAIL_MAX_ATTEMPT -72
#define QSEOS_RESULT_FAIL_PENDING_OPERATION -73
#define SMCINVOKE_RESULT_INBOUND_REQ_NEEDED 3
enum qseecom_command_scm_resp_type {
QSEOS_APP_ID = 0xEE01,
QSEOS_LISTENER_ID
};
enum qseecom_qceos_cmd_id {
QSEOS_APP_START_COMMAND = 0x01,
QSEOS_APP_SHUTDOWN_COMMAND,
QSEOS_APP_LOOKUP_COMMAND,
QSEOS_REGISTER_LISTENER,
QSEOS_DEREGISTER_LISTENER,
QSEOS_CLIENT_SEND_DATA_COMMAND,
QSEOS_LISTENER_DATA_RSP_COMMAND,
QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
QSEOS_GET_APP_STATE_COMMAND,
QSEOS_LOAD_SERV_IMAGE_COMMAND,
QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
QSEOS_APP_REGION_NOTIFICATION,
QSEOS_REGISTER_LOG_BUF_COMMAND,
QSEOS_RPMB_PROVISION_KEY_COMMAND,
QSEOS_RPMB_ERASE_COMMAND,
QSEOS_GENERATE_KEY = 0x11,
QSEOS_DELETE_KEY,
QSEOS_MAX_KEY_COUNT,
QSEOS_SET_KEY,
QSEOS_UPDATE_KEY_USERINFO,
QSEOS_TEE_OPEN_SESSION,
QSEOS_TEE_INVOKE_COMMAND,
QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
QSEOS_TEE_CLOSE_SESSION,
QSEOS_TEE_REQUEST_CANCELLATION,
QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
QSEOS_FSM_IKE_REQ_CMD = 0x203,
QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
QSEOS_DIAG_FUSE_REQ_CMD = 0x501,
QSEOS_DIAG_FUSE_REQ_RSP_CMD = 0x502,
QSEOS_CMD_MAX = 0xEFFFFFFF
};
enum qseecom_qceos_cmd_status {
QSEOS_RESULT_SUCCESS = 0,
QSEOS_RESULT_INCOMPLETE,
QSEOS_RESULT_BLOCKED_ON_LISTENER,
QSEOS_RESULT_CBACK_REQUEST,
QSEOS_RESULT_FAILURE = 0xFFFFFFFF
};
enum qseecom_pipe_type {
QSEOS_PIPE_ENC = 0x1,
QSEOS_PIPE_ENC_XTS = 0x2,
QSEOS_PIPE_AUTH = 0x4,
QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
};
/* QSEE Reentrancy support phase */
enum qseecom_qsee_reentrancy_phase {
QSEE_REENTRANCY_PHASE_0 = 0,
QSEE_REENTRANCY_PHASE_1,
QSEE_REENTRANCY_PHASE_2,
QSEE_REENTRANCY_PHASE_3,
QSEE_REENTRANCY_PHASE_MAX = 0xFF
};
struct qsee_apps_region_info_ireq {
uint32_t qsee_cmd_id;
uint32_t addr;
uint32_t size;
} __packed;
struct qsee_apps_region_info_64bit_ireq {
uint32_t qsee_cmd_id;
uint64_t addr;
uint32_t size;
} __packed;
struct qseecom_check_app_ireq {
uint32_t qsee_cmd_id;
char app_name[MAX_APP_NAME_SIZE];
} __packed;
struct qseecom_load_app_ireq {
uint32_t qsee_cmd_id;
uint32_t mdt_len; /* Length of the mdt file */
uint32_t img_len; /* Length of .bxx and .mdt files */
uint32_t phy_addr; /* phy addr of the start of image */
char app_name[MAX_APP_NAME_SIZE]; /* application name*/
} __packed;
struct qseecom_load_app_64bit_ireq {
uint32_t qsee_cmd_id;
uint32_t mdt_len;
uint32_t img_len;
uint64_t phy_addr;
char app_name[MAX_APP_NAME_SIZE];
} __packed;
struct qseecom_unload_app_ireq {
uint32_t qsee_cmd_id;
uint32_t app_id;
} __packed;
struct qseecom_load_lib_image_ireq {
uint32_t qsee_cmd_id;
uint32_t mdt_len;
uint32_t img_len;
uint32_t phy_addr;
} __packed;
struct qseecom_load_lib_image_64bit_ireq {
uint32_t qsee_cmd_id;
uint32_t mdt_len;
uint32_t img_len;
uint64_t phy_addr;
} __packed;
struct qseecom_unload_lib_image_ireq {
uint32_t qsee_cmd_id;
} __packed;
struct qseecom_register_listener_ireq {
uint32_t qsee_cmd_id;
uint32_t listener_id;
uint32_t sb_ptr;
uint32_t sb_len;
} __packed;
struct qseecom_register_listener_64bit_ireq {
uint32_t qsee_cmd_id;
uint32_t listener_id;
uint64_t sb_ptr;
uint32_t sb_len;
} __packed;
struct qseecom_unregister_listener_ireq {
uint32_t qsee_cmd_id;
uint32_t listener_id;
} __packed;
struct qseecom_client_send_data_ireq {
uint32_t qsee_cmd_id;
uint32_t app_id;
uint32_t req_ptr;
uint32_t req_len;
uint32_t rsp_ptr;/* First 4 bytes should be the return status */
uint32_t rsp_len;
uint32_t sglistinfo_ptr;
uint32_t sglistinfo_len;
} __packed;
struct qseecom_client_send_data_64bit_ireq {
uint32_t qsee_cmd_id;
uint32_t app_id;
uint64_t req_ptr;
uint32_t req_len;
uint64_t rsp_ptr;
uint32_t rsp_len;
uint64_t sglistinfo_ptr;
uint32_t sglistinfo_len;
} __packed;
struct qseecom_reg_log_buf_ireq {
uint32_t qsee_cmd_id;
uint32_t phy_addr;
uint32_t len;
} __packed;
struct qseecom_reg_log_buf_64bit_ireq {
uint32_t qsee_cmd_id;
uint64_t phy_addr;
uint32_t len;
} __packed;
/* send_data resp */
struct qseecom_client_listener_data_irsp {
uint32_t qsee_cmd_id;
uint32_t listener_id;
uint32_t status;
uint32_t sglistinfo_ptr;
uint32_t sglistinfo_len;
} __packed;
struct qseecom_client_listener_data_64bit_irsp {
uint32_t qsee_cmd_id;
uint32_t listener_id;
uint32_t status;
uint64_t sglistinfo_ptr;
uint32_t sglistinfo_len;
} __packed;
/*
* struct qseecom_command_scm_resp - qseecom response buffer
* @cmd_status: value from enum tz_sched_cmd_status
* @sb_in_rsp_addr: points to physical location of response
* buffer
* @sb_in_rsp_len: length of command response
*/
struct qseecom_command_scm_resp {
uint32_t result;
enum qseecom_command_scm_resp_type resp_type;
unsigned int data;
} __packed;
struct qseecom_rpmb_provision_key {
uint32_t key_type;
};
struct qseecom_client_send_service_ireq {
uint32_t qsee_cmd_id;
uint32_t key_type; /* in */
unsigned int req_len; /* in */
uint32_t rsp_ptr; /* in/out */
unsigned int rsp_len; /* in/out */
} __packed;
struct qseecom_client_send_service_64bit_ireq {
uint32_t qsee_cmd_id;
uint32_t key_type;
unsigned int req_len;
uint64_t rsp_ptr;
unsigned int rsp_len;
} __packed;
struct qseecom_key_generate_ireq {
uint32_t qsee_command_id;
uint32_t flags;
uint8_t key_id[QSEECOM_KEY_ID_SIZE];
uint8_t hash32[QSEECOM_HASH_SIZE];
} __packed;
struct qseecom_key_select_ireq {
uint32_t qsee_command_id;
uint32_t ce;
uint32_t pipe;
uint32_t pipe_type;
uint32_t flags;
uint8_t key_id[QSEECOM_KEY_ID_SIZE];
uint8_t hash32[QSEECOM_HASH_SIZE];
} __packed;
struct qseecom_key_delete_ireq {
uint32_t qsee_command_id;
uint32_t flags;
uint8_t key_id[QSEECOM_KEY_ID_SIZE];
uint8_t hash32[QSEECOM_HASH_SIZE];
} __packed;
struct qseecom_key_userinfo_update_ireq {
uint32_t qsee_command_id;
uint32_t flags;
uint8_t key_id[QSEECOM_KEY_ID_SIZE];
uint8_t current_hash32[QSEECOM_HASH_SIZE];
uint8_t new_hash32[QSEECOM_HASH_SIZE];
} __packed;
struct qseecom_key_max_count_query_ireq {
uint32_t flags;
} __packed;
struct qseecom_key_max_count_query_irsp {
uint32_t max_key_count;
} __packed;
struct qseecom_qteec_ireq {
uint32_t qsee_cmd_id;
uint32_t app_id;
uint32_t req_ptr;
uint32_t req_len;
uint32_t resp_ptr;
uint32_t resp_len;
uint32_t sglistinfo_ptr;
uint32_t sglistinfo_len;
} __packed;
struct qseecom_qteec_64bit_ireq {
uint32_t qsee_cmd_id;
uint32_t app_id;
uint64_t req_ptr;
uint32_t req_len;
uint64_t resp_ptr;
uint32_t resp_len;
uint64_t sglistinfo_ptr;
uint32_t sglistinfo_len;
} __packed;
struct qseecom_client_send_fsm_diag_req {
uint32_t qsee_cmd_id;
uint32_t req_ptr;
uint32_t req_len;
uint32_t rsp_ptr;
uint32_t rsp_len;
} __packed;
struct qseecom_continue_blocked_request_ireq {
uint32_t qsee_cmd_id;
uint32_t app_or_session_id; /*legacy: app_id; smcinvoke: session_id*/
} __packed;
/********** ARMV8 SMC INTERFACE TZ MACRO *******************/
#define TZ_SVC_APP_MGR 1 /* Application management */
#define TZ_SVC_LISTENER 2 /* Listener service management */
#define TZ_SVC_EXTERNAL 3 /* External image loading */
#define TZ_SVC_RPMB 4 /* RPMB */
#define TZ_SVC_KEYSTORE 5 /* Keystore management */
#define TZ_SVC_FUSE 8 /* Fuse services */
#define TZ_SVC_ES 16 /* Enterprise Security */
#define TZ_SVC_MDTP 18 /* Mobile Device Theft */
/*----------------------------------------------------------------------------
* Owning Entity IDs (defined by ARM SMC doc)
* ---------------------------------------------------------------------------
*/
#define TZ_OWNER_ARM 0 /** ARM Architecture call ID */
#define TZ_OWNER_CPU 1 /** CPU service call ID */
#define TZ_OWNER_SIP 2 /** SIP service call ID */
#define TZ_OWNER_OEM 3 /** OEM service call ID */
#define TZ_OWNER_STD 4 /** Standard service call ID */
/** Values 5-47 are reserved for future use */
/** Trusted Application call IDs */
#define TZ_OWNER_TZ_APPS 48
#define TZ_OWNER_TZ_APPS_RESERVED 49
/** Trusted OS Call IDs */
#define TZ_OWNER_QSEE_OS 50
#define TZ_OWNER_MOBI_OS 51
#define TZ_OWNER_OS_RESERVED_3 52
#define TZ_OWNER_OS_RESERVED_4 53
#define TZ_OWNER_OS_RESERVED_5 54
#define TZ_OWNER_OS_RESERVED_6 55
#define TZ_OWNER_OS_RESERVED_7 56
#define TZ_OWNER_OS_RESERVED_8 57
#define TZ_OWNER_OS_RESERVED_9 58
#define TZ_OWNER_OS_RESERVED_10 59
#define TZ_OWNER_OS_RESERVED_11 60
#define TZ_OWNER_OS_RESERVED_12 61
#define TZ_OWNER_OS_RESERVED_13 62
#define TZ_OWNER_OS_RESERVED_14 63
#define TZ_SVC_INFO 6 /* Misc. information services */
/** Trusted Application call groups */
#define TZ_SVC_APP_ID_PLACEHOLDER 0 /* SVC bits will contain App ID */
/** General helper macro to create a bitmask from bits low to high. */
#define TZ_MASK_BITS(h, l) ((0xffffffff >> (32 - ((h - l) + 1))) << l)
/*
* Macro used to define an SMC ID based on the owner ID,
* service ID, and function number.
*/
#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
#define TZ_SYSCALL_PARAM_NARGS_MASK TZ_MASK_BITS(3, 0)
#define TZ_SYSCALL_PARAM_TYPE_MASK TZ_MASK_BITS(1, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
p4, p5, p6, p7, p8, p9, p10) \
((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
/*
* Macros used to create the Parameter ID associated with the syscall
*/
#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
/*
* Macro used to obtain the Parameter ID associated with the syscall
*/
#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID) CMD_ID ## _PARAM_ID
/** Helper macro to extract the owning entity from the SMC ID. */
#define TZ_SYSCALL_OWNER_ID(r0) ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
/** Helper macro for checking whether an owning entity is of type trusted OS. */
#define IS_OWNER_TRUSTED_OS(owner_id) \
(((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
#define TZ_SYSCALL_PARAM_TYPE_VAL 0x0 /* type of value */
#define TZ_SYSCALL_PARAM_TYPE_BUF_RO 0x1 /* type of buffer RO */
#define TZ_SYSCALL_PARAM_TYPE_BUF_RW 0x2 /* type of buffer RW */
#define TZ_OS_APP_START_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
#define TZ_OS_APP_START_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_3( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_APP_SHUTDOWN_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_APP_LOOKUP_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_APP_GET_STATE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_APP_REGION_NOTIFICATION_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_REGISTER_LOG_BUFFER_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_3( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_0
#define TZ_SECBOOT_GET_FUSE_INFO \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_FUSE, 0x09)
#define TZ_SECBOOT_GET_FUSE_INFO_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_4(\
TZ_SYSCALL_PARAM_TYPE_BUF_RO, \
TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_REGISTER_LISTENER_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_3( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_DEREGISTER_LISTENER_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_3( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_QSAPP_SEND_DATA_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_0
#define TZ_INFO_IS_SVC_AVAILABLE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_INFO_GET_FEATURE_VERSION_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_RPMB_PROVISION_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_RPMB_ERASE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_0
#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_0
#define TZ_OS_KS_GEN_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_KS_DEL_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_KS_GET_MAX_KEYS_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_KS_SET_PIPE_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_KS_UPDATE_KEY_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_2( \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_ES_SAVE_PARTITION_HASH_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_3( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_GPAPP_OPEN_SESSION_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_GPAPP_CLOSE_SESSION_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_GPAPP_INVOKE_COMMAND_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_MDTP_CIPHER_DIP_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_5( \
TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x07)
#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_7( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_7( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_7( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
TZ_SYSCALL_CREATE_PARAM_ID_4( \
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
#endif /* __QSEECOMI_H_ */

View File

@ -0,0 +1,493 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2011-2013, 2015, 2017-2021 The Linux Foundation. All rights
* reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/hw_random.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/err.h>
#include <linux/types.h>
#include <linux/of.h>
#include <linux/qrng.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/delay.h>
#include <linux/crypto.h>
#include <crypto/internal/rng.h>
#include <linux/interconnect.h>
#include <linux/sched/signal.h>
#include <linux/version.h>
#define DRIVER_NAME "msm_rng"
/* Device specific register offsets */
#define PRNG_DATA_OUT_OFFSET 0x0000
#define PRNG_STATUS_OFFSET 0x0004
#define PRNG_LFSR_CFG_OFFSET 0x0100
#define PRNG_CONFIG_OFFSET 0x0104
/* Device specific register masks and config values */
#define PRNG_LFSR_CFG_MASK 0xFFFF0000
#define PRNG_LFSR_CFG_CLOCKS 0x0000DDDD
#define PRNG_CONFIG_MASK 0xFFFFFFFD
#define PRNG_HW_ENABLE 0x00000002
#define MAX_HW_FIFO_DEPTH 16 /* FIFO is 16 words deep */
#define MAX_HW_FIFO_SIZE (MAX_HW_FIFO_DEPTH * 4) /* FIFO is 32 bits wide */
#define RETRY_MAX_CNT 5 /* max retry times to read register */
#define RETRY_DELAY_INTERVAL 440 /* retry delay interval in us */
struct msm_rng_device {
struct platform_device *pdev;
void __iomem *base;
struct clk *prng_clk;
struct mutex rng_lock;
struct icc_path *icc_path;
};
static struct msm_rng_device msm_rng_device_info;
static struct msm_rng_device *msm_rng_dev_cached;
static struct mutex cached_rng_lock;
static long msm_rng_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
long ret = 0;
switch (cmd) {
case QRNG_IOCTL_RESET_BUS_BANDWIDTH:
pr_debug("calling msm_rng_bus_scale(LOW)\n");
ret = icc_set_bw(msm_rng_device_info.icc_path, 0, 0);
if (ret)
pr_err("failed qrng_reset_bus_bw, ret = %ld\n", ret);
break;
default:
pr_err("Unsupported IOCTL call\n");
break;
}
return ret;
}
/*
*
* This function calls hardware random bit generator directory and retuns it
* back to caller
*
*/
static int msm_rng_direct_read(struct msm_rng_device *msm_rng_dev,
void *data, size_t max)
{
struct platform_device *pdev;
void __iomem *base;
size_t currsize = 0;
u32 val = 0;
u32 *retdata = data;
int ret;
int failed = 0;
pdev = msm_rng_dev->pdev;
base = msm_rng_dev->base;
/* no room for word data */
if (max < 4)
return 0;
mutex_lock(&msm_rng_dev->rng_lock);
if (msm_rng_dev->icc_path) {
ret = icc_set_bw(msm_rng_dev->icc_path, 0, 300000);
if (ret) {
pr_err("bus_scale_client_update_req failed\n");
goto bus_err;
}
}
/* enable PRNG clock */
if (msm_rng_dev->prng_clk) {
ret = clk_prepare_enable(msm_rng_dev->prng_clk);
if (ret) {
pr_err("failed to enable prng clock\n");
goto err;
}
}
/* read random data from h/w */
do {
/* check status bit if data is available */
if (!(readl_relaxed(base + PRNG_STATUS_OFFSET)
& 0x00000001)) {
if (failed++ == RETRY_MAX_CNT) {
if (currsize == 0)
pr_err("Data not available\n");
break;
}
udelay(RETRY_DELAY_INTERVAL);
} else {
/* read FIFO */
val = readl_relaxed(base + PRNG_DATA_OUT_OFFSET);
/* write data back to callers pointer */
*(retdata++) = val;
currsize += 4;
/* make sure we stay on 32bit boundary */
if ((max - currsize) < 4)
break;
}
} while (currsize < max);
/* vote to turn off clock */
if (msm_rng_dev->prng_clk)
clk_disable_unprepare(msm_rng_dev->prng_clk);
err:
if (msm_rng_dev->icc_path) {
ret = icc_set_bw(msm_rng_dev->icc_path, 0, 0);
if (ret)
pr_err("bus_scale_client_update_req failed\n");
}
bus_err:
mutex_unlock(&msm_rng_dev->rng_lock);
val = 0L;
return currsize;
}
static int msm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
struct msm_rng_device *msm_rng_dev;
int rv = 0;
msm_rng_dev = (struct msm_rng_device *)rng->priv;
rv = msm_rng_direct_read(msm_rng_dev, data, max);
return rv;
}
static struct hwrng msm_rng = {
.name = DRIVER_NAME,
.read = msm_rng_read,
.quality = 1024,
};
static int msm_rng_enable_hw(struct msm_rng_device *msm_rng_dev)
{
unsigned long val = 0;
unsigned long reg_val = 0;
int ret = 0;
if (msm_rng_dev->icc_path) {
ret = icc_set_bw(msm_rng_dev->icc_path, 0, 30000);
if (ret)
pr_err("bus_scale_client_update_req failed\n");
}
/* Enable the PRNG CLK */
if (msm_rng_dev->prng_clk) {
ret = clk_prepare_enable(msm_rng_dev->prng_clk);
if (ret) {
dev_err(&(msm_rng_dev->pdev)->dev,
"failed to enable clock in probe\n");
return -EPERM;
}
}
/* Enable PRNG h/w only if it is NOT ON */
val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET) &
PRNG_HW_ENABLE;
/* PRNG H/W is not ON */
if (val != PRNG_HW_ENABLE) {
val = readl_relaxed(msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
val &= PRNG_LFSR_CFG_MASK;
val |= PRNG_LFSR_CFG_CLOCKS;
writel_relaxed(val, msm_rng_dev->base + PRNG_LFSR_CFG_OFFSET);
/* The PRNG CONFIG register should be first written */
mb();
reg_val = readl_relaxed(msm_rng_dev->base + PRNG_CONFIG_OFFSET)
& PRNG_CONFIG_MASK;
reg_val |= PRNG_HW_ENABLE;
writel_relaxed(reg_val, msm_rng_dev->base + PRNG_CONFIG_OFFSET);
/* The PRNG clk should be disabled only after we enable the
* PRNG h/w by writing to the PRNG CONFIG register.
*/
mb();
}
if (msm_rng_dev->prng_clk)
clk_disable_unprepare(msm_rng_dev->prng_clk);
if (msm_rng_dev->icc_path) {
ret = icc_set_bw(msm_rng_dev->icc_path, 0, 0);
if (ret)
pr_err("bus_scale_client_update_req failed\n");
}
return 0;
}
static const struct file_operations msm_rng_fops = {
.unlocked_ioctl = msm_rng_ioctl,
};
static struct class *msm_rng_class;
static struct cdev msm_rng_cdev;
static int msm_rng_probe(struct platform_device *pdev)
{
struct resource *res;
struct msm_rng_device *msm_rng_dev = NULL;
void __iomem *base = NULL;
bool configure_qrng = true;
int error = 0;
struct device *dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "invalid address\n");
error = -EFAULT;
goto err_exit;
}
msm_rng_dev = kzalloc(sizeof(struct msm_rng_device), GFP_KERNEL);
if (!msm_rng_dev) {
error = -ENOMEM;
goto err_exit;
}
base = ioremap(res->start, resource_size(res));
if (!base) {
dev_err(&pdev->dev, "ioremap failed\n");
error = -ENOMEM;
goto err_iomap;
}
msm_rng_dev->base = base;
/* create a handle for clock control */
if (pdev->dev.of_node) {
if (of_property_read_bool(pdev->dev.of_node,
"qcom,no-clock-support"))
msm_rng_dev->prng_clk = NULL;
else
msm_rng_dev->prng_clk = clk_get(&pdev->dev,
"km_clk_src");
}
if (IS_ERR(msm_rng_dev->prng_clk)) {
dev_err(&pdev->dev, "failed to register clock source\n");
error = -ENODEV;
goto err_clk_get;
}
/* save away pdev and register driver data */
msm_rng_dev->pdev = pdev;
platform_set_drvdata(pdev, msm_rng_dev);
if (pdev->dev.of_node) {
msm_rng_dev->icc_path = of_icc_get(&pdev->dev, "data_path");
msm_rng_device_info.icc_path = msm_rng_dev->icc_path;
if (IS_ERR(msm_rng_dev->icc_path)) {
error = PTR_ERR(msm_rng_dev->icc_path);
dev_err(&pdev->dev, "get icc path err %d\n", error);
goto err_icc_get;
}
}
/* Enable rng h/w for the targets which can access the entire
* address space of PRNG.
*/
if ((pdev->dev.of_node) && (of_property_read_bool(pdev->dev.of_node,
"qcom,no-qrng-config")))
configure_qrng = false;
if (configure_qrng) {
error = msm_rng_enable_hw(msm_rng_dev);
if (error)
goto err_icc_get;
}
mutex_init(&msm_rng_dev->rng_lock);
mutex_init(&cached_rng_lock);
/* register with hwrng framework */
msm_rng.priv = (unsigned long) msm_rng_dev;
error = hwrng_register(&msm_rng);
if (error) {
dev_err(&pdev->dev, "failed to register hwrng\n");
goto err_reg_hwrng;
}
error = register_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME, &msm_rng_fops);
if (error) {
dev_err(&pdev->dev, "failed to register chrdev\n");
goto err_reg_chrdev;
}
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
msm_rng_class = class_create("msm-rng");
#else
msm_rng_class = class_create(THIS_MODULE, "msm-rng");
#endif
if (IS_ERR(msm_rng_class)) {
pr_err("class_create failed\n");
error = PTR_ERR(msm_rng_class);
goto err_create_cls;
}
dev = device_create(msm_rng_class, NULL, MKDEV(QRNG_IOC_MAGIC, 0),
NULL, "msm-rng");
if (IS_ERR(dev)) {
pr_err("Device create failed\n");
error = PTR_ERR(dev);
goto err_create_dev;
}
cdev_init(&msm_rng_cdev, &msm_rng_fops);
msm_rng_dev_cached = msm_rng_dev;
return error;
err_create_dev:
class_destroy(msm_rng_class);
err_create_cls:
unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
err_reg_chrdev:
hwrng_unregister(&msm_rng);
err_reg_hwrng:
if (msm_rng_dev->icc_path)
icc_put(msm_rng_dev->icc_path);
err_icc_get:
if (msm_rng_dev->prng_clk)
clk_put(msm_rng_dev->prng_clk);
err_clk_get:
iounmap(msm_rng_dev->base);
err_iomap:
kfree_sensitive(msm_rng_dev);
err_exit:
return error;
}
static int msm_rng_remove(struct platform_device *pdev)
{
struct msm_rng_device *msm_rng_dev = platform_get_drvdata(pdev);
cdev_del(&msm_rng_cdev);
device_destroy(msm_rng_class, MKDEV(QRNG_IOC_MAGIC, 0));
class_destroy(msm_rng_class);
unregister_chrdev(QRNG_IOC_MAGIC, DRIVER_NAME);
hwrng_unregister(&msm_rng);
if (msm_rng_dev->prng_clk)
clk_put(msm_rng_dev->prng_clk);
iounmap(msm_rng_dev->base);
platform_set_drvdata(pdev, NULL);
if (msm_rng_dev->icc_path)
icc_put(msm_rng_dev->icc_path);
kfree_sensitive(msm_rng_dev);
msm_rng_dev_cached = NULL;
return 0;
}
static int qrng_get_random(struct crypto_rng *tfm, const u8 *src,
unsigned int slen, u8 *rdata,
unsigned int dlen)
{
int sizeread = 0;
int rv = -EFAULT;
if (!msm_rng_dev_cached) {
pr_err("%s: msm_rng_dev is not initialized\n", __func__);
rv = -ENODEV;
goto err_exit;
}
if (!rdata) {
pr_err("%s: data buffer is null\n", __func__);
rv = -EINVAL;
goto err_exit;
}
if (signal_pending(current) ||
mutex_lock_interruptible(&cached_rng_lock)) {
pr_err("%s: mutex lock interrupted\n", __func__);
rv = -ERESTARTSYS;
goto err_exit;
}
sizeread = msm_rng_direct_read(msm_rng_dev_cached, rdata, dlen);
if (sizeread == dlen)
rv = 0;
mutex_unlock(&cached_rng_lock);
err_exit:
return rv;
}
static int qrng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
{
return 0;
}
static struct rng_alg rng_algs[] = { {
.generate = qrng_get_random,
.seed = qrng_reset,
.seedsize = 0,
.base = {
.cra_name = "qrng",
.cra_driver_name = "fips_hw_qrng",
.cra_priority = 300,
.cra_ctxsize = 0,
.cra_module = THIS_MODULE,
}
} };
static const struct of_device_id qrng_match[] = {
{.compatible = "qcom,msm-rng"},
{},
};
static struct platform_driver rng_driver = {
.probe = msm_rng_probe,
.remove = msm_rng_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = qrng_match,
},
};
static int __init msm_rng_init(void)
{
int ret;
msm_rng_dev_cached = NULL;
ret = platform_driver_register(&rng_driver);
if (ret) {
pr_err("%s: platform_driver_register error:%d\n",
__func__, ret);
goto err_exit;
}
ret = crypto_register_rngs(rng_algs, ARRAY_SIZE(rng_algs));
if (ret) {
pr_err("%s: crypto_register_algs error:%d\n",
__func__, ret);
goto err_exit;
}
err_exit:
return ret;
}
module_init(msm_rng_init);
static void __exit msm_rng_exit(void)
{
crypto_unregister_rngs(rng_algs, ARRAY_SIZE(rng_algs));
platform_driver_unregister(&rng_driver);
}
module_exit(msm_rng_exit);
MODULE_DESCRIPTION("QTI MSM Random Number Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,145 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
#define _QCOM_INLINE_CRYPTO_ENGINE_H_
#include <linux/platform_device.h>
#include <linux/cdev.h>
struct request;
enum ice_cryto_algo_mode {
ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
};
enum ice_crpto_key_size {
ICE_CRYPTO_KEY_SIZE_128 = 0x0,
ICE_CRYPTO_KEY_SIZE_256 = 0x2,
};
enum ice_crpto_key_mode {
ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
ICE_CRYPTO_USE_LUT_SW_KEY = 0x3
};
#define QCOM_ICE_TYPE_NAME_LEN 8
typedef void (*ice_error_cb)(void *, u32 error);
struct qcom_ice_bus_vote {
uint32_t client_handle;
uint32_t curr_vote;
int min_bw_vote;
int max_bw_vote;
int saved_vote;
bool is_max_bw_needed;
struct device_attribute max_bus_bw;
};
/*
* ICE HW device structure.
*/
struct ice_device {
struct list_head list;
struct device *pdev;
struct cdev cdev;
dev_t device_no;
struct class *driver_class;
void __iomem *mmio;
struct resource *res;
int irq;
bool is_ice_enabled;
bool is_ice_disable_fuse_blown;
ice_error_cb error_cb;
void *host_controller_data; /* UFS/EMMC/other? */
struct list_head clk_list_head;
u32 ice_hw_version;
bool is_ice_clk_available;
char ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
struct regulator *reg;
bool is_regulator_available;
struct qcom_ice_bus_vote bus_vote;
ktime_t ice_reset_start_time;
ktime_t ice_reset_complete_time;
void *key_table;
};
struct ice_crypto_setting {
enum ice_crpto_key_size key_size;
enum ice_cryto_algo_mode algo_mode;
enum ice_crpto_key_mode key_mode;
short key_index;
};
struct ice_data_setting {
struct ice_crypto_setting crypto_data;
bool sw_forced_context_switch;
bool decr_bypass;
bool encr_bypass;
};
/* MSM ICE Crypto Data Unit of target DUN of Transfer Request */
enum ice_crypto_data_unit {
ICE_CRYPTO_DATA_UNIT_512_B = 0,
ICE_CRYPTO_DATA_UNIT_1_KB = 1,
ICE_CRYPTO_DATA_UNIT_2_KB = 2,
ICE_CRYPTO_DATA_UNIT_4_KB = 3,
ICE_CRYPTO_DATA_UNIT_8_KB = 4,
ICE_CRYPTO_DATA_UNIT_16_KB = 5,
ICE_CRYPTO_DATA_UNIT_32_KB = 6,
ICE_CRYPTO_DATA_UNIT_64_KB = 7,
};
struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
#if IS_ENABLED(CONFIG_CYRPTO_DEV_QCOM_ICE)
int enable_ice_setup(struct ice_device *ice_dev);
int disable_ice_setup(struct ice_device *ice_dev);
int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
void qcom_ice_set_fde_flag(int flag);
struct list_head *get_ice_dev_list(void);
#else
static inline int enable_ice_setup(struct ice_device *ice_dev)
{
return 0;
}
static inline int disable_ice_setup(struct ice_device *ice_dev)
{
return 0;
}
static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
{
return 0;
}
static inline void qcom_ice_set_fde_flag(int flag) {}
static inline struct list_head *get_ice_dev_list(void)
{
return NULL;
}
#endif
struct qcom_ice_variant_ops {
const char *name;
int (*init)(struct platform_device *device_init, void *init_data,
ice_error_cb err);
int (*reset)(struct platform_device *device_reset);
int (*resume)(struct platform_device *device_resume);
int (*suspend)(struct platform_device *device_suspend);
int (*config_start)(struct platform_device *device_start,
struct request *req, struct ice_data_setting *setting,
bool start);
int (*config_end)(struct platform_device *pdev,
struct request *req);
int (*status)(struct platform_device *device_status);
void (*debug)(struct platform_device *device_debug);
};
#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,112 @@
load(
"//build/kernel/kleaf:kernel.bzl",
"ddk_module",
"kernel_modules_install",
)
load(
":securemsm_modules.bzl",
"securemsm_modules",
"securemsm_modules_by_config",
)
load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir")
def _replace_formatting_codes(target, variant, s):
kernel_build = "{}_{}".format(target, variant)
return s.replace("%b", kernel_build).replace("%t", target)
def _console_print(target, variant, module, message):
if module:
print("{}: {}: securemsm-kernel: {}: {}".format(target, variant, module, message))
else:
print("{}: {}: securemsm-kernel: {} ".format(target, variant, message))
def _get_options(target, variant, target_config_option, modules, extra_options):
all_options = {option: True for option in extra_options}
redundant_options = []
for option in securemsm_modules_by_config:
module_name = securemsm_modules_by_config[option]
if option in all_options:
if module_name in modules:
redundant_options.append(option)
else:
_console_print(target, variant, None, 'WARNING: Config option "{}" corresponds to securemsm module {}, but this module is not listed in module list!'.format(option, module_name))
else:
all_options[option] = True
if target_config_option in all_options:
redundant_options.append(target_config_option)
else:
all_options[target_config_option] = True
if redundant_options:
_console_print(target, variant, None, "INFO: The following options are already declared either by a module or the target, no need to redeclare: \n{}".format("\n".join(redundant_options)))
return all_options
def _get_module_srcs(target, variant, module, options):
srcs = [] + module["default_srcs"] + module["srcs"]
module_path = "{}/".format(module["path"]) if module["path"] else ""
for option in module["config_srcs"]:
srcs.extend(module["config_srcs"][option].get(option in options, []))
globbed_srcs = native.glob(["{}{}".format(module_path, _replace_formatting_codes(target, variant, src)) for src in srcs])
if not globbed_srcs:
_console_print(target, variant, module["name"], "WARNING: Module has no sources attached!")
return globbed_srcs
def define_target_variant_modules(target, variant, modules, extra_options = [], config_option = None):
kernel_build_variant = "{}_{}".format(target, variant)
options = _get_options(target, variant, config_option, modules, extra_options)
module_rules = []
target_local_defines = []
modules = [securemsm_modules[module_name] for module_name in modules]
tv = "{}_{}".format(target, variant)
target_local_defines = ["SMCINVOKE_TRACE_INCLUDE_PATH=../../../{}/smcinvoke".format(native.package_name())]
for config in extra_options:
target_local_defines.append(config)
for module in modules:
rule_name = "{}_{}".format(kernel_build_variant, module["name"])
module_srcs = _get_module_srcs(target, variant, module, options)
ddk_module(
name = rule_name,
kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
srcs = module_srcs,
out = "{}.ko".format(module["name"]),
deps = ["//msm-kernel:all_headers"] + [_replace_formatting_codes(target, variant, dep) for dep in module["deps"]],
hdrs = module["hdrs"],
local_defines = target_local_defines,
copts = module["copts"],
)
module_rules.append(rule_name)
copy_to_dist_dir(
name = "{}_securemsm-kernel_dist".format(kernel_build_variant),
data = module_rules,
dist_dir = "out/target/product/{}/dlkm/lib/modules/".format(target),
flat = True,
wipe_dist_dir = False,
allow_duplicate_filenames = False,
mode_overrides = {"**/*": "644"},
log = "info",
)
kernel_modules_install(
name = "{}_modules_install".format(kernel_build_variant),
kernel_build = "//msm-kernel:{}".format(kernel_build_variant),
kernel_modules = module_rules,
)
def define_consolidate_gki_modules(target, modules, extra_options = [], config_option = None):
define_target_variant_modules(target, "consolidate", modules, extra_options, config_option)
define_target_variant_modules(target, "gki", modules, extra_options, config_option)
define_target_variant_modules(target, "perf", modules, extra_options, config_option)

View File

@ -0,0 +1,71 @@
#Build ssg kernel driver
ENABLE_SECUREMSM_DLKM := true
ENABLE_SECUREMSM_QTEE_DLKM := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), false)
ENABLE_SECUREMSM_DLKM := false
endif
ifeq ($(TARGET_KERNEL_DLKM_SECUREMSM_QTEE_OVERRIDE), false)
ENABLE_SECUREMSM_QTEE_DLKM := false
endif
endif
ifeq ($(ENABLE_SECUREMSM_DLKM), true)
ENABLE_QCRYPTO_DLKM := true
ENABLE_HDCP_QSEECOM_DLKM := true
ENABLE_QRNG_DLKM := true
ifeq ($(TARGET_USES_SMMU_PROXY), true)
ENABLE_SMMU_PROXY := true
endif #TARGET_USES_SMMU_PROXY
endif #ENABLE_SECUREMSM_DLKM
ifeq ($(ENABLE_SECUREMSM_QTEE_DLKM), true)
ENABLE_SMCINVOKE_DLKM := true
ENABLE_TZLOG_DLKM := true
#Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
ENABLE_QSEECOM_DLKM := true
endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
endif #ENABLE_SECUREMSM_QTEE_DLKM
ifeq ($(TARGET_USES_GY), true)
ENABLE_QCRYPTO_DLKM := false
ENABLE_HDCP_QSEECOM_DLKM := false
ENABLE_QRNG_DLKM := false
ENABLE_SMMU_PROXY := false
ENABLE_SMCINVOKE_DLKM := true
ENABLE_TZLOG_DLKM := false
ENABLE_QSEECOM_DLKM := false
endif #TARGET_USES_GY
ifeq ($(ENABLE_QCRYPTO_DLKM), true)
PRODUCT_PACKAGES += qcedev-mod_dlkm.ko
PRODUCT_PACKAGES += qce50_dlkm.ko
PRODUCT_PACKAGES += qcrypto-msm_dlkm.ko
endif #ENABLE_QCRYPTO_DLKM
ifeq ($(ENABLE_HDCP_QSEECOM_DLKM), true)
PRODUCT_PACKAGES += hdcp_qseecom_dlkm.ko
endif #ENABLE_HDCP_QSEECOM_DLKM
ifeq ($(ENABLE_QRNG_DLKM), true)
PRODUCT_PACKAGES += qrng_dlkm.ko
endif #ENABLE_QRNG_DLKM
ifeq ($(ENABLE_SMMU_PROXY), true)
PRODUCT_PACKAGES += smmu_proxy_dlkm.ko
endif #ENABLE_SMMU_PROXY
ifeq ($(ENABLE_SMCINVOKE_DLKM), true)
PRODUCT_PACKAGES += smcinvoke_dlkm.ko
endif #ENABLE_SMCINVOKE_DLKM
ifeq ($(ENABLE_TZLOG_DLKM), true)
PRODUCT_PACKAGES += tz_log_dlkm.ko
endif #ENABLE_TZLOG_DLKM
ifeq ($(ENABLE_QSEECOM_DLKM), true)
PRODUCT_PACKAGES += qseecom_dlkm.ko
endif #ENABLE_QSEECOM_DLKM

View File

@ -0,0 +1,75 @@
ENABLE_SECUREMSM_DLKM := true
ENABLE_SECUREMSM_QTEE_DLKM := true
ifeq ($(TARGET_KERNEL_DLKM_DISABLE), true)
ifeq ($(TARGET_KERNEL_DLKM_SECURE_MSM_OVERRIDE), false)
ENABLE_SECUREMSM_DLKM := false
endif
ifeq ($(TARGET_KERNEL_DLKM_SECUREMSM_QTEE_OVERRIDE), false)
ENABLE_SECUREMSM_QTEE_DLKM := false
endif
endif
ifeq ($(ENABLE_SECUREMSM_DLKM), true)
ENABLE_QCRYPTO_DLKM := true
ENABLE_HDCP_QSEECOM_DLKM := true
ENABLE_QRNG_DLKM := true
ifeq ($(TARGET_USES_SMMU_PROXY), true)
ENABLE_SMMU_PROXY := true
endif #TARGET_USES_SMMU_PROXY
endif #ENABLE_SECUREMSM_DLKM
ifeq ($(ENABLE_SECUREMSM_QTEE_DLKM), true)
ENABLE_SMCINVOKE_DLKM := true
ENABLE_TZLOG_DLKM := true
#Enable Qseecom if TARGET_ENABLE_QSEECOM or TARGET_BOARD_AUTO is set to true
ifneq (, $(filter true, $(TARGET_ENABLE_QSEECOM) $(TARGET_BOARD_AUTO)))
ENABLE_QSEECOM_DLKM := true
endif #TARGET_ENABLE_QSEECOM OR TARGET_BOARD_AUTO
endif #ENABLE_SECUREMSM_QTEE_DLKM
ifeq ($(TARGET_USES_GY), true)
ENABLE_QCRYPTO_DLKM := false
ENABLE_HDCP_QSEECOM_DLKM := false
ENABLE_QRNG_DLKM := false
ENABLE_SMMU_PROXY := false
ENABLE_SMCINVOKE_DLKM := true
ENABLE_TZLOG_DLKM := false
ENABLE_QSEECOM_DLKM := false
endif #TARGET_USES_GY
ifeq ($(ENABLE_QCRYPTO_DLKM), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qcedev-mod_dlkm.ko \
$(KERNEL_MODULES_OUT)/qcrypto-msm_dlkm.ko \
$(KERNEL_MODULES_OUT)/qce50_dlkm.ko
endif #ENABLE_QCRYPTO_DLKM
ifeq ($(ENABLE_QRNG_DLKM), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qrng_dlkm.ko
endif #ENABLE_QRNG_DLKM
ifeq ($(ENABLE_HDCP_QSEECOM_DLKM), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/hdcp_qseecom_dlkm.ko
endif #ENABLE_HDCP_QSEECOM_DLKM
ifeq ($(ENABLE_SMMU_PROXY), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smmu_proxy_dlkm.ko
endif #ENABLE_SMMU_PROXY
ifeq ($(ENABLE_SMCINVOKE_DLKM), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/smcinvoke_dlkm.ko
endif #ENABLE_SMCINVOKE_DLKM
ifeq ($(ENABLE_TZLOG_DLKM), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/tz_log_dlkm.ko
endif # ENABLE_TZLOG_DLKM
ifeq ($(ENABLE_QSEECOM_DLKM), true)
BOARD_VENDOR_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko
BOARD_VENDOR_RAMDISK_KERNEL_MODULES += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko
BOARD_VENDOR_RAMDISK_RECOVERY_KERNEL_MODULES_LOAD += $(KERNEL_MODULES_OUT)/qseecom_dlkm.ko
endif #ENABLE_QSEECOM_DLKM

View File

@ -0,0 +1,160 @@
SMCINVOKE_PATH = "smcinvoke"
QSEECOM_PATH = "qseecom"
TZLOG_PATH = "tz_log"
HDCP_PATH = "hdcp"
QCEDEV_PATH = "crypto-qti"
QRNG_PATH = "qrng"
SMMU_PROXY_PATH = "smmu-proxy"
# This dictionary holds all the securemsm-kernel modules included by calling register_securemsm_module
securemsm_modules = {}
securemsm_modules_by_config = {}
# Registers securemsm module to kernel build system.
# name: The name of the module. The name of the file generated for this module will be {name}.ko.
# path: The path that will be prepended to all sources listed for this module.
# config_option: If this module is enabled, the config optiont that will get enabled if so. Not all modules have this, and this is an optional parameter.
# config_srcs: A dictionary of sources to be added to the module depending on if a configuration option is enabled or not. The keys to the dictionary are
# the name of the config option, and the value depends If it is a list, it will just be the list of sources to be added to the module if the config option
# is enabled. If the value is another dictionary, then you can specify sources to be added if the config option is DISABLED by having a list under the
# default_srcs: A list of sources to be added to the module regardless of configuration options.
# deps: A list of kernel_module or ddk_module rules that this module depends on.
def register_securemsm_module(name, path = None, config_option = None, default_srcs = [], config_srcs = {}, deps = [], srcs = [], copts = [], hdrs = []):
processed_config_srcs = {}
for config_src_name in config_srcs:
config_src = config_srcs[config_src_name]
if type(config_src) == "list":
processed_config_srcs[config_src_name] = {True: config_src}
else:
processed_config_srcs[config_src_name] = config_src
module = {
"name": name,
"path": path,
"default_srcs": default_srcs,
"config_srcs": processed_config_srcs,
"config_option": config_option,
"deps": deps,
"copts": copts,
"srcs": srcs,
"hdrs": hdrs,
}
securemsm_modules[name] = module
if config_option:
securemsm_modules_by_config[config_option] = name
# ------------------------------------ SECUREMSM MODULE DEFINITIONS ---------------------------------
register_securemsm_module(
name = "qseecom_dlkm",
path = QSEECOM_PATH,
default_srcs = [
"qseecom.c",
"ice.h",
],
deps = [":qseecom_kernel_headers"],
#srcs = ["config/sec-kernel_defconfig_qseecom.h"],
#copts = ["-include", "config/sec-kernel_defconfig_qseecom.h"],
)
register_securemsm_module(
name = "smcinvoke_dlkm",
path = SMCINVOKE_PATH,
default_srcs = [
"smcinvoke.c",
"smcinvoke_kernel.c",
"trace_smcinvoke.h",
"IQSEEComCompat.h",
"smci_qseecomcompat.h",
"IQSEEComCompatAppLoader.h",
"smci_qseecomcompatapploader.h",
],
deps = [":smcinvoke_kernel_headers", ":qseecom_kernel_headers", "%b_qseecom_dlkm"],
hdrs = [":smcinvoke_kernel_headers"],
)
register_securemsm_module(
name = "tz_log_dlkm",
path = TZLOG_PATH,
deps = [":qseecom_kernel_headers"],
default_srcs = ["tz_log.c"],
)
register_securemsm_module(
name = "hdcp_qseecom_dlkm",
path = HDCP_PATH,
default_srcs = [
"hdcp_qseecom.c",
"hdcp_qseecom.h",
"hdcp_main.c",
"smcinvoke_object.h",
"smci_object.h",
"hdcp_main.h",
"hdcp_smcinvoke.c",
"hdcp_smcinvoke.h",
"CAppClient.h",
"CAppLoader.h",
"IAppClient.h",
"smci_appclient.h",
"IAppController.h",
"smci_appcontroller.h",
"IAppLoader.h",
"smci_apploader.h",
"IClientEnv.h",
"smci_clientenv.h",
"IOpener.h",
"smci_opener.h",
"hdcp1.h",
"hdcp1_ops.h",
"hdcp2p2.h",
],
deps = [":hdcp_qseecom_dlkm", "%b_smcinvoke_dlkm", "%b_qseecom_dlkm"],
srcs = ["config/sec-kernel_defconfig.h"],
copts = [
"-include",
"config/sec-kernel_defconfig.h",
],
)
register_securemsm_module(
name = "qce50_dlkm",
path = QCEDEV_PATH,
default_srcs = ["qce50.c"],
deps = [":qcedev_local_headers"],
)
register_securemsm_module(
name = "qcedev-mod_dlkm",
path = QCEDEV_PATH,
default_srcs = [
"qcedev.c",
"qcedev_smmu.c"],
deps = [":qcedev_local_headers",
"%b_qce50_dlkm"],
)
register_securemsm_module(
name = "qrng_dlkm",
path = QRNG_PATH,
default_srcs = ["msm_rng.c"],
deps = [":qcedev_local_headers"],
)
register_securemsm_module(
name = "qcrypto-msm_dlkm",
path = QCEDEV_PATH,
default_srcs = ["qcrypto.c"],
deps = [":qcedev_local_headers",
"%b_qce50_dlkm"],
)
register_securemsm_module(
name = "smmu_proxy_dlkm",
path = SMMU_PROXY_PATH,
srcs = ["qti-smmu-proxy-pvm.c", "qti-smmu-proxy-common.c"],
deps = ["%b_smcinvoke_dlkm", ":smmu_proxy_headers"],
)

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
*/
#include "smcinvoke_object.h"
#define IQSEEComCompat_ERROR_APP_UNAVAILABLE INT32_C(10)
#define IQSEEComCompat_OP_sendRequest 0
#define IQSEEComCompat_OP_disconnect 1
#define IQSEEComCompat_OP_unload 2
static inline int32_t
IQSEEComCompat_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IQSEEComCompat_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IQSEEComCompat_sendRequest(struct Object self,
const void *reqIn_ptr, size_t reqIn_len,
const void *rspIn_ptr, size_t rspIn_len,
void *reqOut_ptr, size_t reqOut_len, size_t *reqOut_lenout,
void *rspOut_ptr, size_t rspOut_len, size_t *rspOut_lenout,
const uint32_t *embeddedBufOffsets_ptr,
size_t embeddedBufOffsets_len, uint32_t is64_val,
struct Object smo1_val, struct Object smo2_val,
struct Object smo3_val, struct Object smo4_val)
{
union ObjectArg a[10];
int32_t result;
a[0].bi = (struct ObjectBufIn) { reqIn_ptr, reqIn_len * 1 };
a[1].bi = (struct ObjectBufIn) { rspIn_ptr, rspIn_len * 1 };
a[4].b = (struct ObjectBuf) { reqOut_ptr, reqOut_len * 1 };
a[5].b = (struct ObjectBuf) { rspOut_ptr, rspOut_len * 1 };
a[2].bi = (struct ObjectBufIn) { embeddedBufOffsets_ptr,
embeddedBufOffsets_len * sizeof(uint32_t) };
a[3].b = (struct ObjectBuf) { &is64_val, sizeof(uint32_t) };
a[6].o = smo1_val;
a[7].o = smo2_val;
a[8].o = smo3_val;
a[9].o = smo4_val;
result = Object_invoke(self, IQSEEComCompat_OP_sendRequest, a,
ObjectCounts_pack(4, 2, 4, 0));
*reqOut_lenout = a[4].b.size / 1;
*rspOut_lenout = a[5].b.size / 1;
return result;
}
static inline int32_t
IQSEEComCompat_disconnect(struct Object self)
{
return Object_invoke(self, IQSEEComCompat_OP_disconnect, 0, 0);
}
static inline int32_t
IQSEEComCompat_unload(struct Object self)
{
return Object_invoke(self, IQSEEComCompat_OP_unload, 0, 0);
}

View File

@ -0,0 +1,106 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "smcinvoke_object.h"
#define IQSEEComCompatAppLoader_MAX_FILENAME_LEN UINT32_C(64)
#define IQSEEComCompatAppLoader_ELFCLASS32 UINT32_C(1)
#define IQSEEComCompatAppLoader_ELFCLASS64 UINT32_C(2)
#define IQSEEComCompatAppLoader_ERROR_INVALID_BUFFER INT32_C(10)
#define IQSEEComCompatAppLoader_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
#define IQSEEComCompatAppLoader_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
#define IQSEEComCompatAppLoader_ERROR_METADATA_INVALID INT32_C(13)
#define IQSEEComCompatAppLoader_ERROR_MAX_NUM_APPS INT32_C(14)
#define IQSEEComCompatAppLoader_ERROR_NO_NAME_IN_METADATA INT32_C(15)
#define IQSEEComCompatAppLoader_ERROR_ALREADY_LOADED INT32_C(16)
#define IQSEEComCompatAppLoader_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
#define IQSEEComCompatAppLoader_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
#define IQSEEComCompatAppLoader_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
#define IQSEEComCompatAppLoader_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
#define IQSEEComCompatAppLoader_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
#define IQSEEComCompatAppLoader_ERROR_APP_BLACKLISTED INT32_C(22)
#define IQSEEComCompatAppLoader_ERROR_APP_NOT_LOADED INT32_C(23)
#define IQSEEComCompatAppLoader_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(24)
#define IQSEEComCompatAppLoader_ERROR_FILENAME_TOO_LONG INT32_C(25)
#define IQSEEComCompatAppLoader_ERROR_APP_ARCH_NOT_SUPPORTED INT32_C(26)
#define IQSEEComCompatAppLoader_OP_loadFromRegion 0
#define IQSEEComCompatAppLoader_OP_loadFromBuffer 1
#define IQSEEComCompatAppLoader_OP_lookupTA 2
static inline int32_t
IQSEEComCompatAppLoader_release(struct Object self)
{
return Object_invoke(self, Object_OP_release, 0, 0);
}
static inline int32_t
IQSEEComCompatAppLoader_retain(struct Object self)
{
return Object_invoke(self, Object_OP_retain, 0, 0);
}
static inline int32_t
IQSEEComCompatAppLoader_loadFromRegion(struct Object self,
struct Object appElf_val, const void *filename_ptr,
size_t filename_len, struct Object *appCompat_ptr)
{
union ObjectArg a[3];
int32_t result;
a[1].o = appElf_val;
a[0].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 };
result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromRegion, a,
ObjectCounts_pack(1, 0, 1, 1));
*appCompat_ptr = a[2].o;
return result;
}
static inline int32_t
IQSEEComCompatAppLoader_loadFromBuffer(struct Object self,
const void *appElf_ptr, size_t appElf_len,
const void *filename_ptr, size_t filename_len,
void *distName_ptr, size_t distName_len,
size_t *distName_lenout, struct Object *appCompat_ptr)
{
union ObjectArg a[4];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appElf_ptr, appElf_len * 1 };
a[1].bi = (struct ObjectBufIn) { filename_ptr, filename_len * 1 };
a[2].b = (struct ObjectBuf) { distName_ptr, distName_len * 1 };
result = Object_invoke(self, IQSEEComCompatAppLoader_OP_loadFromBuffer,
a, ObjectCounts_pack(2, 1, 0, 1));
*distName_lenout = a[2].b.size / 1;
*appCompat_ptr = a[3].o;
return result;
}
static inline int32_t
IQSEEComCompatAppLoader_lookupTA(struct Object self, const void *appName_ptr,
size_t appName_len, struct Object *appCompat_ptr)
{
union ObjectArg a[2];
int32_t result;
a[0].bi = (struct ObjectBufIn) { appName_ptr, appName_len * 1 };
result = Object_invoke(self, IQSEEComCompatAppLoader_OP_lookupTA,
a, ObjectCounts_pack(1, 0, 0, 1));
*appCompat_ptr = a[1].o;
return result;
}

View File

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_QSEECOMCOMPAT_H
#define __SMCI_QSEECOMCOMPAT_H
#include "smci_object.h"
#include "IQSEEComCompat.h"
#define SMCI_QSEECOMCOMPAT_ERROR_APP_UNAVAILABLE INT32_C(10)
#define SMCI_QSEECOMCOMPAT_OP_SENDREQUEST 0
#define SMCI_QSEECOMCOMPAT_OP_DISCONNECT 1
#define SMCI_QSEECOMCOMPAT_OP_UNLOAD 2
static inline int32_t
smci_qseecomcompat_release(struct smci_object self)
{
return IQSEEComCompat_release(self);
}
static inline int32_t
smci_qseecomcompat_retain(struct smci_object self)
{
return IQSEEComCompat_retain(self);
}
static inline int32_t
smci_qseecomcompat_sendrequest(struct smci_object self,
const void *req_in_ptr, size_t req_in_len,
const void *rsp_in_ptr, size_t rsp_in_len,
void *req_out_ptr, size_t req_out_len, size_t *req_out_lenout,
void *rsp_out_ptr, size_t rsp_out_len, size_t *rsp_out_lenout,
const uint32_t *embedded_buf_offsets_ptr,
size_t embedded_buf_offsets_len, uint32_t is64_val,
struct smci_object smo1_val, struct smci_object smo2_val,
struct smci_object smo3_val, struct smci_object smo4_val)
{
return IQSEEComCompat_sendRequest(self,
req_in_ptr, req_in_len,
rsp_in_ptr, rsp_in_len,
req_out_ptr, req_out_len, req_out_lenout,
rsp_out_ptr, rsp_out_len, rsp_out_lenout,
embedded_buf_offsets_ptr,
embedded_buf_offsets_len, is64_val,
smo1_val, smo2_val,
smo3_val, smo4_val);
}
static inline int32_t
smci_qseecomcompat_disconnect(struct smci_object self)
{
return IQSEEComCompat_disconnect(self);
}
static inline int32_t
smci_qseecomcompat_unload(struct smci_object self)
{
return IQSEEComCompat_unload(self);
}
#endif /* __SMCI_QSEECOMCOMPAT_H */

View File

@ -0,0 +1,83 @@
/* SPDX-License-Identifier: GPL-2.0-only
*
* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __SMCI_QSEECOMCOMPATAPPLOADER_H
#define __SMCI_QSEECOMCOMPATAPPLOADER_H
#include "smci_object.h"
#include "IQSEEComCompatAppLoader.h"
#define SMCI_QSEECOMCOMPATAPPLOADER_MAX_FILENAME_LEN UINT32_C(64)
#define SMCI_QSEECOMCOMPATAPPLOADER_ELFCLASS32 UINT32_C(1)
#define SMCI_QSEECOMCOMPATAPPLOADER_ELFCLASS64 UINT32_C(2)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_INVALID_BUFFER INT32_C(10)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_PIL_ROLLBACK_FAILURE INT32_C(11)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_ELF_SIGNATURE_ERROR INT32_C(12)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_METADATA_INVALID INT32_C(13)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_MAX_NUM_APPS INT32_C(14)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_NO_NAME_IN_METADATA INT32_C(15)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_ALREADY_LOADED INT32_C(16)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_EMBEDDED_IMAGE_NOT_FOUND INT32_C(17)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_TZ_HEAP_MALLOC_FAILURE INT32_C(18)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_TA_APP_REGION_MALLOC_FAILURE INT32_C(19)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_CLIENT_CRED_PARSING_FAILURE INT32_C(20)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_UNTRUSTED_CLIENT INT32_C(21)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_BLACKLISTED INT32_C(22)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_NOT_LOADED INT32_C(23)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_NOT_QSEECOM_COMPAT_APP INT32_C(24)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_FILENAME_TOO_LONG INT32_C(25)
#define SMCI_QSEECOMCOMPATAPPLOADER_ERROR_APP_ARCH_NOT_SUPPORTED INT32_C(26)
#define SMCI_QSEECOMCOMPATAPPLOADER_OP_LOADFROMREGION 0
#define SMCI_QSEECOMCOMPATAPPLOADER_OP_LOADFROMBUFFER 1
#define SMCI_QSEECOMCOMPATAPPLOADER_OP_LOOKUPTA 2
static inline int32_t
smci_qseecomcompatapploader_release(struct smci_object self)
{
return IQSEEComCompatAppLoader_release(self);
}
static inline int32_t
smci_qseecomcompatapploader_retain(struct smci_object self)
{
return IQSEEComCompatAppLoader_retain(self);
}
static inline int32_t
smci_qseecomcompatapploader_loadfromregion(struct smci_object self,
struct smci_object app_elf_val, const void *filename_ptr,
size_t filename_len, struct smci_object *app_compat_ptr)
{
return IQSEEComCompatAppLoader_loadFromRegion(self,
app_elf_val, filename_ptr,
filename_len, app_compat_ptr);
}
static inline int32_t
smci_qseecomcompatapploader_loadfrombuffer(struct smci_object self,
const void *app_elf_ptr, size_t app_elf_len,
const void *filename_ptr, size_t filename_len,
void *dist_name_ptr, size_t dist_name_len,
size_t *dist_name_lenout, struct smci_object *app_compat_ptr)
{
return IQSEEComCompatAppLoader_loadFromBuffer(self,
app_elf_ptr, app_elf_len,
filename_ptr, filename_len,
dist_name_ptr, dist_name_len,
dist_name_lenout, app_compat_ptr);
}
static inline int32_t
smci_qseecomcompatapploader_lookupta(struct smci_object self, const void *app_name_ptr,
size_t app_name_len, struct smci_object *app_compat_ptr)
{
return IQSEEComCompatAppLoader_lookupTA(self, app_name_ptr,
app_name_len, app_compat_ptr);
}
#endif /* __SMCI_QSEECOMCOMPATAPPLOADER_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,639 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fdtable.h>
#include <linux/anon_inodes.h>
#include <linux/delay.h>
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/elf.h>
#include "smcinvoke.h"
#include "smcinvoke_object.h"
#include "IClientEnv.h"
#if IS_ENABLED(CONFIG_QSEECOM_COMPAT)
#include "IQSEEComCompat.h"
#include "IQSEEComCompatAppLoader.h"
#include "linux/qseecom_api.h"
#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
#include <linux/qseecom_kernel.h>
#else
#include "misc/qseecom_kernel.h"
#endif
#endif
#define MAX_FW_APP_SIZE 256 //Application name size.
#define FILE_EXT_SIZE 5 //File extension like .mbn etc
const uint32_t CQSEEComCompatAppLoader_UID = 122;
extern struct device *class_dev;
struct qseecom_compat_context {
void *dev; /* in/out */
unsigned char *sbuf; /* in/out */
uint32_t sbuf_len; /* in/out */
struct qtee_shm shm;
uint8_t app_arch;
struct Object client_env;
struct Object app_loader;
struct Object app_controller;
};
struct tzobject_context {
int fd;
struct kref refs;
};
static int invoke_over_smcinvoke(void *cxt,
uint32_t op,
union ObjectArg *args,
uint32_t counts);
static struct Object tzobject_new(int fd)
{
struct tzobject_context *me =
kzalloc(sizeof(struct tzobject_context), GFP_KERNEL);
if (!me)
return Object_NULL;
kref_init(&me->refs);
me->fd = fd;
pr_debug("%s: me->fd = %d, me->refs = %u\n", __func__,
me->fd, kref_read(&me->refs));
return (struct Object) { invoke_over_smcinvoke, me };
}
static void tzobject_delete(struct kref *refs)
{
struct tzobject_context *me = container_of(refs,
struct tzobject_context, refs);
pr_info("%s: me->fd = %d, me->refs = %d, files = %p\n",
__func__, me->fd, kref_read(&me->refs), current->files);
/*
* after _close_fd(), ref_cnt will be 0,
* but smcinvoke_release() was still not called,
* so we first call smcinvoke_release_from_kernel_client() to
* free filp and ask TZ to release object, then call _close_fd()
*/
smcinvoke_release_from_kernel_client(me->fd);
close_fd(me->fd);
kfree(me);
}
int getObjectFromHandle(int handle, struct Object *obj)
{
int ret = 0;
if (handle == SMCINVOKE_USERSPACE_OBJ_NULL) {
/* NULL object*/
Object_ASSIGN_NULL(*obj);
} else if (handle > SMCINVOKE_USERSPACE_OBJ_NULL) {
*obj = tzobject_new(handle);
if (Object_isNull(*obj))
ret = OBJECT_ERROR_BADOBJ;
} else {
pr_err("CBobj not supported for handle %d\n", handle);
ret = OBJECT_ERROR_BADOBJ;
}
return ret;
}
int getHandleFromObject(struct Object obj, int *handle)
{
int ret = 0;
if (Object_isNull(obj)) {
/* set NULL Object's fd to be -1 */
*handle = SMCINVOKE_USERSPACE_OBJ_NULL;
return ret;
}
if (obj.invoke == invoke_over_smcinvoke) {
struct tzobject_context *ctx = (struct tzobject_context *)(obj.context);
if (ctx != NULL) {
*handle = ctx->fd;
} else {
pr_err("Failed to get tzobject_context obj handle, ret = %d\n", ret);
ret = OBJECT_ERROR_BADOBJ;
}
} else {
pr_err("CBobj not supported\n");
ret = OBJECT_ERROR_BADOBJ;
}
return ret;
}
static int marshalIn(struct smcinvoke_cmd_req *req,
union smcinvoke_arg *argptr,
uint32_t op, union ObjectArg *args,
uint32_t counts)
{
size_t i = 0;
req->op = op;
req->counts = counts;
req->argsize = sizeof(union smcinvoke_arg);
req->args = (uintptr_t)argptr;
FOR_ARGS(i, counts, buffers) {
argptr[i].b.addr = (uintptr_t) args[i].b.ptr;
argptr[i].b.size = args[i].b.size;
}
FOR_ARGS(i, counts, OI) {
int handle = -1, ret;
ret = getHandleFromObject(args[i].o, &handle);
if (ret) {
pr_err("invalid OI[%zu]\n", i);
return OBJECT_ERROR_BADOBJ;
}
argptr[i].o.fd = handle;
}
FOR_ARGS(i, counts, OO) {
argptr[i].o.fd = SMCINVOKE_USERSPACE_OBJ_NULL;
}
return OBJECT_OK;
}
static int marshalOut(struct smcinvoke_cmd_req *req,
union smcinvoke_arg *argptr,
union ObjectArg *args, uint32_t counts,
struct tzobject_context *me)
{
int ret = req->result;
bool failed = false;
size_t i = 0;
argptr = (union smcinvoke_arg *)(uintptr_t)(req->args);
FOR_ARGS(i, counts, BO) {
args[i].b.size = argptr[i].b.size;
}
FOR_ARGS(i, counts, OO) {
ret = getObjectFromHandle(argptr[i].o.fd, &(args[i].o));
if (ret) {
pr_err("Failed to get OO[%zu] from handle = %d\n",
i, (int)argptr[i].o.fd);
failed = true;
break;
}
pr_debug("Succeed to create OO for args[%zu].o, fd = %d\n",
i, (int)argptr[i].o.fd);
}
if (failed) {
FOR_ARGS(i, counts, OO) {
Object_ASSIGN_NULL(args[i].o);
}
/* Only overwrite ret value if invoke result is 0 */
if (ret == 0)
ret = OBJECT_ERROR_BADOBJ;
}
return ret;
}
static int invoke_over_smcinvoke(void *cxt,
uint32_t op,
union ObjectArg *args,
uint32_t counts)
{
int ret = OBJECT_OK;
struct smcinvoke_cmd_req req = {0, 0, 0, 0, 0};
size_t i = 0;
struct tzobject_context *me = NULL;
uint32_t method;
union smcinvoke_arg *argptr = NULL;
FOR_ARGS(i, counts, OO) {
args[i].o = Object_NULL;
}
me = (struct tzobject_context *)cxt;
method = ObjectOp_methodID(op);
pr_debug("%s: cxt = %p, fd = %d, op = %u, cnt = %x, refs = %u\n",
__func__, me, me->fd, op, counts, kref_read(&me->refs));
if (ObjectOp_isLocal(op)) {
switch (method) {
case Object_OP_retain:
kref_get(&me->refs);
return OBJECT_OK;
case Object_OP_release:
kref_put(&me->refs, tzobject_delete);
return OBJECT_OK;
}
return OBJECT_ERROR_REMOTE;
}
argptr = kcalloc(OBJECT_COUNTS_TOTAL(counts),
sizeof(union smcinvoke_arg), GFP_KERNEL);
if (argptr == NULL)
return OBJECT_ERROR_KMEM;
ret = marshalIn(&req, argptr, op, args, counts);
if (ret)
goto exit;
ret = process_invoke_request_from_kernel_client(me->fd, &req);
if (ret) {
pr_err("INVOKE failed with ret = %d, result = %d\n"
"obj.context = %p, fd = %d, op = %d, counts = 0x%x\n",
ret, req.result, me, me->fd, op, counts);
FOR_ARGS(i, counts, OO) {
struct smcinvoke_obj obj = argptr[i].o;
if (obj.fd >= 0) {
pr_err("Close OO[%zu].fd = %lld\n", i, obj.fd);
close_fd(obj.fd);
}
}
if (ret == -EBUSY) {
ret = OBJECT_ERROR_BUSY;
}
else if (ret == -ENOMEM){
ret = OBJECT_ERROR_KMEM;
} else {
ret = OBJECT_ERROR_UNAVAIL;
}
goto exit;
}
if (!req.result)
ret = marshalOut(&req, argptr, args, counts, me);
exit:
kfree(argptr);
return ret | req.result;
}
int get_root_obj(struct Object *rootObj)
{
int ret = 0;
int root_fd = -1;
ret = get_root_fd(&root_fd);
if (ret) {
pr_err("Failed to get root fd, ret = %d\n", ret);
return ret;
}
*rootObj = tzobject_new(root_fd);
if (Object_isNull(*rootObj)) {
close_fd(root_fd);
ret = -ENOMEM;
}
return ret;
}
/*
* Get a client environment using a NULL credentials Object
*/
int32_t get_client_env_object(struct Object *clientEnvObj)
{
int32_t ret = OBJECT_ERROR;
int retry_count = 0;
struct Object rootObj = Object_NULL;
/* get rootObj */
ret = get_root_obj(&rootObj);
if (ret) {
pr_err("Failed to create rootobj\n");
return ret;
}
/* get client env */
do {
ret = IClientEnv_registerWithCredentials(rootObj,
Object_NULL, clientEnvObj);
if (ret == OBJECT_ERROR_BUSY) {
pr_err("Secure side is busy,will retry after 5 ms, retry_count = %d",retry_count);
msleep(SMCINVOKE_INTERFACE_BUSY_WAIT_MS);
}
} while ((ret == OBJECT_ERROR_BUSY) && (retry_count++ < SMCINVOKE_INTERFACE_MAX_RETRY));
if (ret)
pr_err("Failed to get ClientEnvObject, ret = %d\n", ret);
Object_release(rootObj);
return ret;
}
EXPORT_SYMBOL(get_client_env_object);
#if IS_ENABLED(CONFIG_QSEECOM_COMPAT)
static int load_app(struct qseecom_compat_context *cxt, const char *app_name)
{
size_t fw_size = 0;
u8 *imgbuf_va = NULL;
int ret = 0;
char dist_name[MAX_FW_APP_SIZE] = {0};
size_t dist_name_len = 0;
struct qtee_shm shm = {0};
if (strnlen(app_name, MAX_FW_APP_SIZE) == MAX_FW_APP_SIZE) {
pr_err("The app_name (%s) with length %zu is not valid\n",
app_name, strnlen(app_name, MAX_FW_APP_SIZE));
return -EINVAL;
}
ret = IQSEEComCompatAppLoader_lookupTA(cxt->app_loader,
app_name, strlen(app_name), &cxt->app_controller);
if (!ret) {
pr_info("app %s exists\n", app_name);
return ret;
}
imgbuf_va = firmware_request_from_smcinvoke(app_name, &fw_size, &shm);
if (imgbuf_va == NULL) {
pr_err("Failed on firmware_request_from_smcinvoke\n");
return -EINVAL;
}
ret = IQSEEComCompatAppLoader_loadFromBuffer(
cxt->app_loader, imgbuf_va, fw_size,
app_name, strlen(app_name),
dist_name, MAX_FW_APP_SIZE, &dist_name_len,
&cxt->app_controller);
if (ret) {
pr_err("loadFromBuffer failed for app %s, ret = %d\n",
app_name, ret);
goto exit_release_shm;
}
cxt->app_arch = *(uint8_t *)(imgbuf_va + EI_CLASS);
pr_info("%s %d, loaded app %s, dist_name %s, dist_name_len %zu\n",
__func__, __LINE__, app_name, dist_name, dist_name_len);
exit_release_shm:
qtee_shmbridge_free_shm(&shm);
return ret;
}
static int __qseecom_start_app(struct qseecom_handle **handle,
char *app_name, uint32_t size)
{
int ret = 0;
struct qseecom_compat_context *cxt = NULL;
pr_warn("%s, start app %s, size %u\n",
__func__, app_name, size);
if (app_name == NULL || handle == NULL) {
pr_err("app_name is null or invalid handle\n");
return -EINVAL;
}
/* allocate qseecom_compat_context */
cxt = kzalloc(sizeof(struct qseecom_compat_context), GFP_KERNEL);
if (!cxt)
return -ENOMEM;
/* get client env */
ret = get_client_env_object(&cxt->client_env);
if (ret) {
pr_err("failed to get clientEnv when loading app %s, ret %d\n",
app_name, ret);
ret = -EINVAL;
goto exit_free_cxt;
}
/* get apploader with CQSEEComCompatAppLoader_UID */
ret = IClientEnv_open(cxt->client_env, CQSEEComCompatAppLoader_UID,
&cxt->app_loader);
if (ret) {
pr_err("failed to get apploader when loading app %s, ret %d\n",
app_name, ret);
ret = -EINVAL;
goto exit_release_clientenv;
}
/* load app*/
ret = load_app(cxt, app_name);
if (ret) {
pr_err("failed to load app %s, ret = %d\n",
app_name, ret);
ret = -EINVAL;
goto exit_release_apploader;
}
/* Get the physical address of the req/resp buffer */
ret = qtee_shmbridge_allocate_shm(size, &cxt->shm);
if (ret) {
pr_err("qtee_shmbridge_allocate_shm failed, ret :%d\n", ret);
ret = -EINVAL;
goto exit_release_appcontroller;
}
cxt->sbuf = cxt->shm.vaddr;
cxt->sbuf_len = size;
*handle = (struct qseecom_handle *)cxt;
return ret;
exit_release_appcontroller:
Object_release(cxt->app_controller);
exit_release_apploader:
Object_release(cxt->app_loader);
exit_release_clientenv:
Object_release(cxt->client_env);
exit_free_cxt:
kfree(cxt);
return ret;
}
static int __qseecom_shutdown_app(struct qseecom_handle **handle)
{
struct qseecom_compat_context *cxt = NULL;
if ((handle == NULL) || (*handle == NULL)) {
pr_err("Handle is NULL\n");
return -EINVAL;
}
cxt = (struct qseecom_compat_context *)(*handle);
qtee_shmbridge_free_shm(&cxt->shm);
Object_release(cxt->app_controller);
Object_release(cxt->app_loader);
Object_release(cxt->client_env);
kfree(cxt);
*handle = NULL;
return 0;
}
static int __qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
{
struct qseecom_compat_context *cxt =
(struct qseecom_compat_context *)handle;
size_t out_len = 0;
pr_debug("%s, sbuf_len %u, rbuf_len %u\n",
__func__, sbuf_len, rbuf_len);
if (!handle || !send_buf || !resp_buf || !sbuf_len || !rbuf_len) {
pr_err("One of params is invalid. %s, handle %p, send_buf %p,resp_buf %p,sbuf_len %u, rbuf_len %u\n",
__func__, handle, send_buf, resp_buf, sbuf_len, rbuf_len);
return -EINVAL;
}
return IQSEEComCompat_sendRequest(cxt->app_controller,
send_buf, sbuf_len,
resp_buf, rbuf_len,
send_buf, sbuf_len, &out_len,
resp_buf, rbuf_len, &out_len,
NULL, 0, /* embedded offset array */
(cxt->app_arch == ELFCLASS64),
Object_NULL, Object_NULL,
Object_NULL, Object_NULL);
}
#if IS_ENABLED(CONFIG_QSEECOM_PROXY)
const static struct qseecom_drv_ops qseecom_driver_ops = {
.qseecom_send_command = __qseecom_send_command,
.qseecom_start_app = __qseecom_start_app,
.qseecom_shutdown_app = __qseecom_shutdown_app,
};
int get_qseecom_kernel_fun_ops(void)
{
return provide_qseecom_kernel_fun_ops(&qseecom_driver_ops);
}
#else
int qseecom_start_app(struct qseecom_handle **handle,
char *app_name, uint32_t size)
{
return __qseecom_start_app(handle, app_name, size);
}
EXPORT_SYMBOL(qseecom_start_app);
int qseecom_shutdown_app(struct qseecom_handle **handle)
{
return __qseecom_shutdown_app(handle);
}
EXPORT_SYMBOL(qseecom_shutdown_app);
int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
{
return __qseecom_send_command(handle, send_buf, sbuf_len,
resp_buf, rbuf_len);
}
EXPORT_SYMBOL(qseecom_send_command);
#endif
#endif
char *firmware_request_from_smcinvoke(const char *appname, size_t *fw_size, struct qtee_shm *shm)
{
int rc = 0;
const struct firmware *fw_entry = NULL, *fw_entry00 = NULL, *fw_entrylast = NULL;
char fw_name[MAX_FW_APP_SIZE + FILE_EXT_SIZE] = "\0";
int num_images = 0, phi = 0;
unsigned char app_arch = 0;
u8 *img_data_ptr = NULL;
size_t bufferOffset = 0, phdr_table_offset = 0;
size_t *offset = NULL;
Elf32_Phdr phdr32;
Elf64_Phdr phdr64;
struct elf32_hdr *ehdr = NULL;
struct elf64_hdr *ehdr64 = NULL;
/* load b00*/
snprintf(fw_name, sizeof(fw_name), "%s.b00", appname);
rc = firmware_request_nowarn(&fw_entry00, fw_name, class_dev);
if (rc) {
pr_err("Load %s failed, ret:%d\n", fw_name, rc);
return NULL;
}
app_arch = *(unsigned char *)(fw_entry00->data + EI_CLASS);
/*Get the offsets for split images header*/
if (app_arch == ELFCLASS32) {
ehdr = (struct elf32_hdr *)fw_entry00->data;
num_images = ehdr->e_phnum;
offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
if (offset == NULL)
goto release_fw_entry00;
phdr_table_offset = (size_t) ehdr->e_phoff;
for (phi = 1; phi < num_images; ++phi) {
bufferOffset = phdr_table_offset + phi * sizeof(Elf32_Phdr);
phdr32 = *(Elf32_Phdr *)(fw_entry00->data + bufferOffset);
offset[phi] = (size_t)phdr32.p_offset;
}
} else if (app_arch == ELFCLASS64) {
ehdr64 = (struct elf64_hdr *)fw_entry00->data;
num_images = ehdr64->e_phnum;
offset = kcalloc(num_images, sizeof(size_t), GFP_KERNEL);
if (offset == NULL)
goto release_fw_entry00;
phdr_table_offset = (size_t) ehdr64->e_phoff;
for (phi = 1; phi < num_images; ++phi) {
bufferOffset = phdr_table_offset + phi * sizeof(Elf64_Phdr);
phdr64 = *(Elf64_Phdr *)(fw_entry00->data + bufferOffset);
offset[phi] = (size_t)phdr64.p_offset;
}
} else {
pr_err("QSEE %s app, arch %u is not supported\n", appname, app_arch);
goto release_fw_entry00;
}
/*Find the size of last split bin image*/
snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, num_images-1);
rc = firmware_request_nowarn(&fw_entrylast, fw_name, class_dev);
if (rc) {
pr_err("Failed to locate blob %s\n", fw_name);
goto release_fw_entry00;
}
/*Total size of image will be the offset of last image + the size of last split image*/
*fw_size = fw_entrylast->size + offset[num_images-1];
/*Allocate memory for the buffer that will hold the split image*/
rc = qtee_shmbridge_allocate_shm((*fw_size), shm);
if (rc) {
pr_err("smbridge alloc failed for size: %zu\n", *fw_size);
goto release_fw_entrylast;
}
img_data_ptr = shm->vaddr;
/*
* Copy contents of split bins to the buffer
*/
memcpy(img_data_ptr, fw_entry00->data, fw_entry00->size);
for (phi = 1; phi < num_images-1; phi++) {
snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, phi);
rc = firmware_request_nowarn(&fw_entry, fw_name, class_dev);
if (rc) {
pr_err("Failed to locate blob %s\n", fw_name);
qtee_shmbridge_free_shm(shm);
img_data_ptr = NULL;
goto release_fw_entrylast;
}
memcpy(img_data_ptr + offset[phi], fw_entry->data, fw_entry->size);
release_firmware(fw_entry);
fw_entry = NULL;
}
memcpy(img_data_ptr + offset[phi], fw_entrylast->data, fw_entrylast->size);
release_fw_entrylast:
release_firmware(fw_entrylast);
release_fw_entry00:
release_firmware(fw_entry00);
kfree(offset);
return img_data_ptr;
}
EXPORT_SYMBOL(firmware_request_from_smcinvoke);

View File

@ -0,0 +1,502 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM smcinvoke
#if !defined(_TRACE_SMCINVOKE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_SMCINVOKE_H
#include <linux/types.h>
#include <linux/tracepoint.h>
#include "smcinvoke.h"
TRACE_EVENT(put_pending_cbobj_locked,
TP_PROTO(uint16_t srvr_id, uint16_t obj_id),
TP_ARGS(srvr_id, obj_id),
TP_STRUCT__entry(
__field(uint16_t, srvr_id)
__field(uint16_t, obj_id)
),
TP_fast_assign(
__entry->srvr_id = srvr_id;
__entry->obj_id = obj_id;
),
TP_printk("srvr_id=0x%x obj_id=0x%x",
__entry->srvr_id, __entry->obj_id)
);
TRACE_EVENT(release_mem_obj_locked,
TP_PROTO(uint32_t tzhandle, size_t buf_len),
TP_ARGS(tzhandle, buf_len),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(size_t, buf_len)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->buf_len = buf_len;
),
TP_printk("tzhandle=0x%08x, buf_len=%zu",
__entry->tzhandle, __entry->buf_len)
);
TRACE_EVENT(invoke_cmd_handler,
TP_PROTO(int cmd, uint64_t response_type, int32_t result, int ret),
TP_ARGS(cmd, response_type, result, ret),
TP_STRUCT__entry(
__field(int, cmd)
__field(uint64_t, response_type)
__field(int32_t, result)
__field(int, ret)
),
TP_fast_assign(
__entry->response_type = response_type;
__entry->result = result;
__entry->ret = ret;
__entry->cmd = cmd;
),
TP_printk("cmd=0x%x (%d), response_type=%llu, result=0x%x (%d), ret=%d",
__entry->cmd, __entry->cmd, __entry->response_type,
__entry->result, __entry->result, __entry->ret)
);
TRACE_EVENT(process_tzcb_req_handle,
TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts),
TP_ARGS(tzhandle, op, counts),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x",
__entry->tzhandle, __entry->op, __entry->counts)
);
TRACE_EVENT(process_tzcb_req_wait,
TP_PROTO(uint32_t tzhandle, int cbobj_retries, uint32_t txn_id, pid_t pid, pid_t tgid,
uint16_t server_state, uint16_t server_id, unsigned int cb_reqs_inflight),
TP_ARGS(tzhandle, cbobj_retries, txn_id, pid, tgid, server_state, server_id,
cb_reqs_inflight),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(int, cbobj_retries)
__field(uint32_t, txn_id)
__field(pid_t, pid)
__field(pid_t, tgid)
__field(uint16_t, server_state)
__field(uint16_t, server_id)
__field(unsigned int, cb_reqs_inflight)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->cbobj_retries = cbobj_retries;
__entry->txn_id = txn_id;
__entry->pid = pid;
__entry->tgid = tgid;
__entry->server_state = server_state;
__entry->server_id = server_id;
__entry->cb_reqs_inflight = cb_reqs_inflight;
),
TP_printk("tzhandle=0x%08x, retries=%d, txn_id=%d, pid %x,tid %x, srvr state=%d, server_id=0x%x, cb_reqs_inflight=%d",
__entry->tzhandle, __entry->cbobj_retries, __entry->txn_id,
__entry->pid, __entry->tgid, __entry->server_state,
__entry->server_id, __entry->cb_reqs_inflight)
);
TRACE_EVENT(process_tzcb_req_result,
TP_PROTO(int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts,
unsigned int cb_reqs_inflight),
TP_ARGS(result, tzhandle, op, counts, cb_reqs_inflight),
TP_STRUCT__entry(
__field(int32_t, result)
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
__field(unsigned int, cb_reqs_inflight)
),
TP_fast_assign(
__entry->result = result;
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
__entry->cb_reqs_inflight = cb_reqs_inflight;
),
TP_printk("result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x, cb_reqs_inflight=%d",
__entry->result, __entry->tzhandle, __entry->op, __entry->counts,
__entry->cb_reqs_inflight)
);
TRACE_EVENT(marshal_out_invoke_req,
TP_PROTO(int i, uint32_t tzhandle, uint16_t server, uint32_t fd),
TP_ARGS(i, tzhandle, server, fd),
TP_STRUCT__entry(
__field(int, i)
__field(uint32_t, tzhandle)
__field(uint16_t, server)
__field(uint32_t, fd)
),
TP_fast_assign(
__entry->i = i;
__entry->tzhandle = tzhandle;
__entry->server = server;
__entry->fd = fd;
),
TP_printk("OO[%d]: tzhandle=0x%x server=0x%x fd=0x%x",
__entry->i, __entry->tzhandle, __entry->server, __entry->fd)
);
TRACE_EVENT(prepare_send_scm_msg,
TP_PROTO(uint64_t response_type, int32_t result),
TP_ARGS(response_type, result),
TP_STRUCT__entry(
__field(uint64_t, response_type)
__field(int32_t, result)
),
TP_fast_assign(
__entry->response_type = response_type;
__entry->result = result;
),
TP_printk("response_type=%llu (%llu), result=0x%x (%d)",
__entry->response_type, __entry->response_type,
__entry->result, __entry->result)
);
TRACE_EVENT(marshal_in_invoke_req,
TP_PROTO(int i, int64_t fd, int32_t cb_server_fd, uint32_t tzhandle),
TP_ARGS(i, fd, cb_server_fd, tzhandle),
TP_STRUCT__entry(
__field(int, i)
__field(int64_t, fd)
__field(int32_t, cb_server_fd)
__field(uint32_t, tzhandle)
),
TP_fast_assign(
__entry->i = i;
__entry->fd = fd;
__entry->cb_server_fd = cb_server_fd;
__entry->tzhandle = tzhandle;
),
TP_printk("OI[%d]: fd=%lld cb_server_fd=0x%x tzhandle=0x%x",
__entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle)
);
TRACE_EVENT(marshal_in_tzcb_req_handle,
TP_PROTO(uint32_t tzhandle, int srvr_id, int32_t cbobj_id, uint32_t op, uint32_t counts),
TP_ARGS(tzhandle, srvr_id, cbobj_id, op, counts),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(int, srvr_id)
__field(int32_t, cbobj_id)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->srvr_id = srvr_id;
__entry->cbobj_id = cbobj_id;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("tzhandle=0x%x srvr_id=0x%x cbobj_id=0x%08x op=0x%02x counts=0x%04x",
__entry->tzhandle, __entry->srvr_id, __entry->cbobj_id,
__entry->op, __entry->counts)
);
TRACE_EVENT(marshal_in_tzcb_req_fd,
TP_PROTO(int i, uint32_t tzhandle, int srvr_id, int32_t fd),
TP_ARGS(i, tzhandle, srvr_id, fd),
TP_STRUCT__entry(
__field(int, i)
__field(uint32_t, tzhandle)
__field(int, srvr_id)
__field(int32_t, fd)
),
TP_fast_assign(
__entry->i = i;
__entry->tzhandle = tzhandle;
__entry->srvr_id = srvr_id;
__entry->fd = fd;
),
TP_printk("OI[%d]: tzhandle=0x%x srvr_id=0x%x fd=0x%x",
__entry->i, __entry->tzhandle, __entry->srvr_id, __entry->fd)
);
TRACE_EVENT(marshal_out_tzcb_req,
TP_PROTO(uint32_t i, int32_t fd, int32_t cb_server_fd, uint32_t tzhandle),
TP_ARGS(i, fd, cb_server_fd, tzhandle),
TP_STRUCT__entry(
__field(int, i)
__field(int32_t, fd)
__field(int32_t, cb_server_fd)
__field(uint32_t, tzhandle)
),
TP_fast_assign(
__entry->i = i;
__entry->fd = fd;
__entry->cb_server_fd = cb_server_fd;
__entry->tzhandle = tzhandle;
),
TP_printk("OO[%d]: fd=0x%x cb_server_fd=0x%x tzhandle=0x%x",
__entry->i, __entry->fd, __entry->cb_server_fd, __entry->tzhandle)
);
TRACE_EVENT(process_invoke_req_tzhandle,
TP_PROTO(uint32_t tzhandle, uint32_t op, uint32_t counts),
TP_ARGS(tzhandle, op, counts),
TP_STRUCT__entry(
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("tzhandle=0x%08x op=0x%02x counts=0x%04x",
__entry->tzhandle, __entry->op, __entry->counts)
);
TRACE_EVENT(process_invoke_req_result,
TP_PROTO(int ret, int32_t result, uint32_t tzhandle, uint32_t op, uint32_t counts),
TP_ARGS(ret, result, tzhandle, op, counts),
TP_STRUCT__entry(
__field(int, ret)
__field(int32_t, result)
__field(uint32_t, tzhandle)
__field(uint32_t, op)
__field(uint32_t, counts)
),
TP_fast_assign(
__entry->ret = ret;
__entry->result = result;
__entry->tzhandle = tzhandle;
__entry->op = op;
__entry->counts = counts;
),
TP_printk("ret=%d result=%d tzhandle=0x%08x op=0x%02x counts=0x%04x",
__entry->ret, __entry->result, __entry->tzhandle,
__entry->op, __entry->counts)
);
TRACE_EVENT(process_log_info,
TP_PROTO(char *buf, uint32_t context_type, uint32_t tzhandle),
TP_ARGS(buf, context_type, tzhandle),
TP_STRUCT__entry(
__string(str, buf)
__field(uint32_t, context_type)
__field(uint32_t, tzhandle)
),
TP_fast_assign(
__assign_str(str, buf);
__entry->context_type = context_type;
__entry->tzhandle = tzhandle;
),
TP_printk("%s context_type=%d tzhandle=0x%08x",
__get_str(str),
__entry->context_type, __entry->tzhandle)
);
TRACE_EVENT_CONDITION(smcinvoke_ioctl,
TP_PROTO(unsigned int cmd, long ret),
TP_ARGS(cmd, ret),
TP_CONDITION(ret),
TP_STRUCT__entry(
__field(unsigned int, cmd)
__field(long, ret)
),
TP_fast_assign(
__entry->cmd = cmd;
__entry->ret = ret;
),
TP_printk("cmd=%s ret=%ld",
__print_symbolic(__entry->cmd,
{SMCINVOKE_IOCTL_INVOKE_REQ, "SMCINVOKE_IOCTL_INVOKE_REQ"},
{SMCINVOKE_IOCTL_ACCEPT_REQ, "SMCINVOKE_IOCTL_ACCEPT_REQ"},
{SMCINVOKE_IOCTL_SERVER_REQ, "SMCINVOKE_IOCTL_SERVER_REQ"},
{SMCINVOKE_IOCTL_ACK_LOCAL_OBJ, "SMCINVOKE_IOCTL_ACK_LOCAL_OBJ"},
{SMCINVOKE_IOCTL_LOG, "SMCINVOKE_IOCTL_LOG"}
), __entry->ret)
);
TRACE_EVENT(smcinvoke_create_bridge,
TP_PROTO(uint64_t shmbridge_handle, uint16_t mem_region_id),
TP_ARGS(shmbridge_handle, mem_region_id),
TP_STRUCT__entry(
__field(uint64_t, shmbridge_handle)
__field(uint16_t, mem_region_id)
),
TP_fast_assign(
__entry->shmbridge_handle = shmbridge_handle;
__entry->mem_region_id = mem_region_id;
),
TP_printk("created shm bridge handle %llu for mem_region_id %u",
__entry->shmbridge_handle, __entry->mem_region_id)
);
TRACE_EVENT(status,
TP_PROTO(const char *func, const char *status),
TP_ARGS(func, status),
TP_STRUCT__entry(
__string(str, func)
__string(str2, status)
),
TP_fast_assign(
__assign_str(str, func);
__assign_str(str2, status);
),
TP_printk("%s status=%s", __get_str(str), __get_str(str2))
);
TRACE_EVENT(process_accept_req_has_response,
TP_PROTO(pid_t pid, pid_t tgid),
TP_ARGS(pid, tgid),
TP_STRUCT__entry(
__field(pid_t, pid)
__field(pid_t, tgid)
),
TP_fast_assign(
__entry->pid = pid;
__entry->tgid = tgid;
),
TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid)
);
TRACE_EVENT(process_accept_req_ret,
TP_PROTO(pid_t pid, pid_t tgid, int ret),
TP_ARGS(pid, tgid, ret),
TP_STRUCT__entry(
__field(pid_t, pid)
__field(pid_t, tgid)
__field(int, ret)
),
TP_fast_assign(
__entry->pid = pid;
__entry->tgid = tgid;
__entry->ret = ret;
),
TP_printk("pid=0x%x tgid=0x%x ret=%d", __entry->pid, __entry->tgid, __entry->ret)
);
TRACE_EVENT(process_accept_req_placed,
TP_PROTO(pid_t pid, pid_t tgid),
TP_ARGS(pid, tgid),
TP_STRUCT__entry(
__field(pid_t, pid)
__field(pid_t, tgid)
),
TP_fast_assign(
__entry->pid = pid;
__entry->tgid = tgid;
),
TP_printk("pid=0x%x, tgid=0x%x", __entry->pid, __entry->tgid)
);
TRACE_EVENT(process_invoke_request_from_kernel_client,
TP_PROTO(int fd, struct file *filp, int f_count),
TP_ARGS(fd, filp, f_count),
TP_STRUCT__entry(
__field(int, fd)
__field(struct file*, filp)
__field(int, f_count)
),
TP_fast_assign(
__entry->fd = fd;
__entry->filp = filp;
__entry->f_count = f_count;
),
TP_printk("fd=%d, filp=%p, f_count=%d",
__entry->fd,
__entry->filp,
__entry->f_count)
);
TRACE_EVENT(smcinvoke_release_filp,
TP_PROTO(struct files_struct *files, struct file *filp,
int f_count, uint32_t context_type),
TP_ARGS(files, filp, f_count, context_type),
TP_STRUCT__entry(
__field(struct files_struct*, files)
__field(struct file*, filp)
__field(int, f_count)
__field(uint32_t, context_type)
),
TP_fast_assign(
__entry->files = files;
__entry->filp = filp;
__entry->f_count = f_count;
__entry->context_type = context_type;
),
TP_printk("files=%p, filp=%p, f_count=%u, cxt_type=%d",
__entry->files,
__entry->filp,
__entry->f_count,
__entry->context_type)
);
TRACE_EVENT(smcinvoke_release_from_kernel_client,
TP_PROTO(struct files_struct *files, struct file *filp, int f_count),
TP_ARGS(files, filp, f_count),
TP_STRUCT__entry(
__field(struct files_struct*, files)
__field(struct file*, filp)
__field(int, f_count)
),
TP_fast_assign(
__entry->files = files;
__entry->filp = filp;
__entry->f_count = f_count;
),
TP_printk("files=%p, filp=%p, f_count=%u",
__entry->files,
__entry->filp,
__entry->f_count)
);
TRACE_EVENT(smcinvoke_release,
TP_PROTO(struct files_struct *files, struct file *filp,
int f_count, void *private_data),
TP_ARGS(files, filp, f_count, private_data),
TP_STRUCT__entry(
__field(struct files_struct*, files)
__field(struct file*, filp)
__field(int, f_count)
__field(void*, private_data)
),
TP_fast_assign(
__entry->files = files;
__entry->filp = filp;
__entry->f_count = f_count;
__entry->private_data = private_data;
),
TP_printk("files=%p, filp=%p, f_count=%d, private_data=%p",
__entry->files,
__entry->filp,
__entry->f_count,
__entry->private_data)
);
#endif /* _TRACE_SMCINVOKE_H */
/*
* Path must be relative to location of 'define_trace.h' header in kernel
* Define path if not defined in bazel file
*/
#ifndef SMCINVOKE_TRACE_INCLUDE_PATH
#define SMCINVOKE_TRACE_INCLUDE_PATH ../../../../vendor/qcom/opensource/securemsm-kernel/smcinvoke
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH SMCINVOKE_TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE trace_smcinvoke
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -0,0 +1,58 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QTI_SMMU_PROXY_UAPI_H_
#define __QTI_SMMU_PROXY_UAPI_H_
#include <linux/types.h>
#define QTI_SMMU_PROXY_CAMERA_CB 0
#define QTI_SMMU_PROXY_DISPLAY_CB 1
#define QTI_SMMU_PROXY_EVA_CB 2
#define QTI_SMMU_PROXY_IOC_BASE 0x55
struct csf_version {
__u32 arch_ver;
__u32 max_ver;
__u32 min_ver;
__u32 padding;
};
#define QTI_SMMU_PROXY_GET_VERSION_IOCTL _IOR(QTI_SMMU_PROXY_IOC_BASE, 0, \
struct csf_version)
struct smmu_proxy_acl_ctl {
__u32 dma_buf_fd;
__u32 padding;
};
#define QTI_SMMU_PROXY_AC_LOCK_BUFFER _IOW(QTI_SMMU_PROXY_IOC_BASE, 1, \
struct smmu_proxy_acl_ctl)
#define QTI_SMMU_PROXY_AC_UNLOCK_BUFFER _IOW(QTI_SMMU_PROXY_IOC_BASE, 2, \
struct smmu_proxy_acl_ctl)
struct smmu_proxy_wipe_buf_ctl {
__u64 context_bank_id_array;
__u32 num_cb_ids;
__u32 padding;
};
#define QTI_SMMU_PROXY_WIPE_BUFFERS _IOW(QTI_SMMU_PROXY_IOC_BASE, 3, \
struct smmu_proxy_wipe_buf_ctl)
struct smmu_proxy_get_dma_buf_ctl {
/*
* memparcel_hdl only needs to be 32-bit for Gunyah, but a 64-bit value
* is needed to remain forward compatible with FF-A .
*/
__u64 memparcel_hdl;
__u32 dma_buf_fd;
__u32 padding;
};
#define QTI_SMMU_PROXY_GET_DMA_BUF _IOWR(QTI_SMMU_PROXY_IOC_BASE, 4, \
struct smmu_proxy_get_dma_buf_ctl)
#endif /* __QTI_SMMU_PROXY_UAPI_H_ */

View File

@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QTI_SMMU_PROXY_H_
#define __QTI_SMMU_PROXY_H_
#include <linux/dma-buf.h>
#include <linux/scatterlist.h>
#include <linux/align.h>
#include <smmu-proxy/include/uapi/linux/qti-smmu-proxy.h>
#define SMMU_PROXY_MEM_ALIGNMENT (1 << 21)
int smmu_proxy_get_csf_version(struct csf_version *csf_version);
#endif /* __QTI_SMMU_PROXY_H_ */

View File

@ -0,0 +1,113 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/cdev.h>
#include <linux/version.h>
#include "qti-smmu-proxy-common.h"
#include "smcinvoke_object.h"
#include "../include/linux/ITrustedCameraDriver.h"
#include "../include/linux/CTrustedCameraDriver.h"
#include "../include/linux/IClientEnv.h"
#define SMMU_PROXY_MAX_DEVS 1
static dev_t smmu_proxy_dev_no;
static struct class *smmu_proxy_class;
static struct cdev smmu_proxy_char_dev;
static struct csf_version cached_csf_version;
int smmu_proxy_get_csf_version(struct csf_version *csf_version)
{
int ret;
struct Object client_env = {0};
struct Object sc_object;
/* Assumption is that cached_csf_version.arch_ver !=0 ==> other vals are set */
if (cached_csf_version.arch_ver != 0) {
csf_version->arch_ver = cached_csf_version.arch_ver;
csf_version->max_ver = cached_csf_version.max_ver;
csf_version->min_ver = cached_csf_version.min_ver;
return 0;
}
ret = get_client_env_object(&client_env);
if (ret) {
pr_err("%s: Failed to get env object rc: %d\n", __func__,
ret);
return ret;
}
ret = IClientEnv_open(client_env, CTrustedCameraDriver_UID, &sc_object);
if (ret) {
pr_err("%s: Failed to get seccam object rc: %d\n", __func__,
ret);
return ret;
}
ret = ITrustedCameraDriver_getVersion(sc_object, &csf_version->arch_ver,
&csf_version->max_ver,
&csf_version->min_ver);
Object_release(sc_object);
Object_release(client_env);
/*
* Once we set cached_csf_version.arch_ver, concurrent callers will get
* the cached value.
*/
cached_csf_version.min_ver = csf_version->min_ver;
cached_csf_version.max_ver = csf_version->max_ver;
cached_csf_version.arch_ver = csf_version->arch_ver;
return ret;
}
EXPORT_SYMBOL(smmu_proxy_get_csf_version);
int smmu_proxy_create_dev(const struct file_operations *fops)
{
int ret;
struct device *class_dev;
ret = alloc_chrdev_region(&smmu_proxy_dev_no, 0, SMMU_PROXY_MAX_DEVS,
"qti-smmu-proxy");
if (ret < 0)
return ret;
#if (KERNEL_VERSION(6, 3, 0) <= LINUX_VERSION_CODE)
smmu_proxy_class = class_create("qti-smmu-proxy");
#else
smmu_proxy_class = class_create(THIS_MODULE, "qti-smmu-proxy");
#endif
if (IS_ERR(smmu_proxy_class)) {
ret = PTR_ERR(smmu_proxy_class);
goto err_class_create;
}
cdev_init(&smmu_proxy_char_dev, fops);
ret = cdev_add(&smmu_proxy_char_dev, smmu_proxy_dev_no,
SMMU_PROXY_MAX_DEVS);
if (ret < 0)
goto err_cdev_add;
class_dev = device_create(smmu_proxy_class, NULL, smmu_proxy_dev_no, NULL,
"qti-smmu-proxy");
if (IS_ERR(class_dev)) {
ret = PTR_ERR(class_dev);
goto err_dev_create;
}
return 0;
err_dev_create:
cdev_del(&smmu_proxy_char_dev);
err_cdev_add:
class_destroy(smmu_proxy_class);
err_class_create:
unregister_chrdev_region(smmu_proxy_dev_no, SMMU_PROXY_MAX_DEVS);
return ret;
}

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QTI_SMMU_PROXY_COMMON_H_
#define __QTI_SMMU_PROXY_COMMON_H_
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/cdev.h>
#include <linux/dma-buf.h>
#include <linux/mem-buf.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/gunyah/gh_msgq.h>
#include "qti-smmu-proxy-msgq.h"
#include "linux/qti-smmu-proxy.h"
union smmu_proxy_ioctl_arg {
struct csf_version csf_version;
struct smmu_proxy_acl_ctl acl_ctl;
struct smmu_proxy_wipe_buf_ctl wipe_buf_ctl;
struct smmu_proxy_get_dma_buf_ctl get_dma_buf_ctl;
};
int smmu_proxy_create_dev(const struct file_operations *fops);
#endif /* __QTI_SMMU_PROXY_COMMON_H_ */

View File

@ -0,0 +1,107 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef SMMU_PROXY_MSGQ_H
#define SMMU_PROXY_MSGQ_H
#include <linux/gunyah/gh_rm_drv.h>
/**
* enum smmu_proxy_msg_type: Message types used by the SMMU proxy driver for
* communication.
* @SMMU_PROXY_MAP: The message is a request to map memory into the VM's
* SMMU.
* @SMMU_PROXY_MAP_RESP: The message is a response from a remote VM to a
* mapping request issued by the receiving VM
* @SMMU_PROXY_UNMAP: The message is a request to unmap some previously
* SMMU-mapped memory from the VM
* @SMMU_PROXY_UNMAP_RESP: The message is a response from a remote VM to an
* unmapping request issued by the receiving VM
* @SMMU_PROXY_ERR_RESP: The message is a response from a remote VM to give
* a generic error response for a prior message sent to the remote VM
*/
enum smmu_proxy_msg_type {
SMMU_PROXY_MAP,
SMMU_PROXY_MAP_RESP,
SMMU_PROXY_UNMAP,
SMMU_PROXY_UNMAP_RESP,
SMMU_PROXY_ERR_RESP,
SMMU_PROXY_MSG_MAX,
};
/**
* struct smmu_proxy_msg_hdr: The header for SMMU proxy messages
* @msg_type: The type of message.
* @msg_size: The size of message.
*/
struct smmu_proxy_msg_hdr {
u32 msg_type;
u32 msg_size;
} __packed;
/**
* struct smmu_proxy_msg_hdr: The header for responses to SMMU proxy messages
* @msg_type: The type of message.
* @msg_size: The size of message.
* @ret: Return code from remote VM
*/
struct smmu_proxy_resp_hdr {
u32 msg_type;
u32 msg_size;
s32 ret;
} __packed;
/**
* struct smmu_proxy_map_req: The message format for an SMMU mapping request from
* another VM.
* @hdr: Message header
* @hdl: The memparcel handle associated with the memory to be mapped in the SMMU
* of the relevant VM
* @cb_id: Context bank ID that we will map the memory associated with @hdl to
* @acl_desc: A GH ACL descriptor that describes the VMIDs that will be
* accessing the memory, as well as what permissions each VMID will have.
*/
struct smmu_proxy_map_req {
struct smmu_proxy_msg_hdr hdr;
u32 hdl;
u32 cb_id;
struct gh_acl_desc acl_desc;
} __packed;
/**
* struct smmu_proxy_map_resp: The message format for an SMMU mapping
* request response.
* @hdr: Response header
* @iova: IOVA of mapped memory
* @mapping_len: Lenth of IOMMU IOVA mapping
*/
struct smmu_proxy_map_resp {
struct smmu_proxy_resp_hdr hdr;
u64 iova;
u64 mapping_len;
} __packed;
/**
* struct smmu_proxy_unmap_req: The message format for an SMMU unmapping request from
* another VM.
* @hdr: Message header
* @hdl: The memparcel handle associated with the memory to be mapped in the SMMU
* of the relevant VM
*/
struct smmu_proxy_unmap_req {
struct smmu_proxy_msg_hdr hdr;
u32 hdl;
} __packed;
/**
* struct smmu_proxy_unmap_resp: The message format for an SMMU unmapping
* request response.
* @hdr: Response header
*/
struct smmu_proxy_unmap_resp {
struct smmu_proxy_resp_hdr hdr;
} __packed;
#endif /* SMMU_PROXY_MSGQ_H */

View File

@ -0,0 +1,323 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "qti-smmu-proxy-common.h"
#include <linux/qti-smmu-proxy-callbacks.h>
#include <linux/qcom-dma-mapping.h>
#include <linux/of.h>
static void *msgq_hdl;
DEFINE_MUTEX(sender_mutex);
static const struct file_operations smmu_proxy_dev_fops;
int smmu_proxy_unmap(void *data)
{
struct dma_buf *dmabuf;
void *buf;
size_t size;
int ret;
struct smmu_proxy_unmap_req *req;
struct smmu_proxy_unmap_resp *resp;
mutex_lock(&sender_mutex);
buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
pr_err("%s: Failed to allocate memory!\n", __func__);
goto out;
}
req = buf;
dmabuf = data;
ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
if (ret) {
pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
goto free_buf;
}
req->hdr.msg_type = SMMU_PROXY_UNMAP;
req->hdr.msg_size = sizeof(*req);
ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
if (ret < 0) {
pr_err("%s: failed to send message rc: %d\n", __func__, ret);
goto free_buf;
}
/*
* No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
* GH_MSGQ_MAX_MSG_SIZE_BYTES
*/
ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
if (ret < 0) {
pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
goto free_buf;
}
resp = buf;
if (resp->hdr.ret) {
ret = resp->hdr.ret;
pr_err("%s: Unmap call failed on remote VM, rc: %d\n", __func__,
resp->hdr.ret);
}
free_buf:
kfree(buf);
out:
mutex_unlock(&sender_mutex);
return ret;
}
int smmu_proxy_map(struct device *client_dev, struct sg_table *proxy_iova,
struct dma_buf *dmabuf)
{
void *buf;
size_t size;
int ret = 0;
int n_acl_entries, i;
int vmids[2] = { VMID_TVM, VMID_OEMVM };
int perms[2] = { PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
struct csf_version csf_version;
struct mem_buf_lend_kernel_arg arg = {0};
struct smmu_proxy_map_req *req;
struct smmu_proxy_map_resp *resp;
ret = smmu_proxy_get_csf_version(&csf_version);
if (ret) {
return ret;
}
/*
* We enter this function iff the CSF version is 2.5.* . If CSF 2.5.1
* is in use, we set n_acl_entries to two, in order to assign this
* memory to the TVM and OEM VM. If CSF 2.5.0 is in use, we just assign
* it to the TVM.
*/
n_acl_entries = csf_version.min_ver == 1 ? 2 : 1;
mutex_lock(&sender_mutex);
buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto out;
}
if (mem_buf_dma_buf_exclusive_owner(dmabuf)) {
arg.vmids = vmids;
arg.perms = perms;
arg.nr_acl_entries = n_acl_entries;
ret = mem_buf_lend(dmabuf, &arg);
if (ret) {
pr_err("%s: Failed to lend buf rc: %d\n", __func__, ret);
goto free_buf;
}
}
/* Prepare the message */
req = buf;
req->acl_desc.n_acl_entries = n_acl_entries;
for (i = 0; i < n_acl_entries; i++) {
req->acl_desc.acl_entries[i].vmid = vmids[i];
req->acl_desc.acl_entries[i].perms = perms[i];
}
ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &req->hdl);
if (ret) {
pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
goto free_buf;
}
ret = of_property_read_u32(client_dev->of_node,
"qti,smmu-proxy-cb-id",
&req->cb_id);
if (ret) {
dev_err(client_dev, "%s: Err reading 'qti,smmu-proxy-cb-id' rc: %d\n",
__func__, ret);
goto free_buf;
}
req->hdr.msg_type = SMMU_PROXY_MAP;
req->hdr.msg_size = offsetof(struct smmu_proxy_map_req,
acl_desc.acl_entries[n_acl_entries]);
ret = gh_msgq_send(msgq_hdl, (void *) req, req->hdr.msg_size, 0);
if (ret < 0) {
pr_err("%s: failed to send message rc: %d\n", __func__, ret);
goto free_buf;
}
/*
* No need to validate size - gh_msgq_recv() ensures that sizeof(*resp) <
* GH_MSGQ_MAX_MSG_SIZE_BYTES
*/
ret = gh_msgq_recv(msgq_hdl, buf, sizeof(*resp), &size, 0);
if (ret < 0) {
pr_err_ratelimited("%s: failed to receive message rc: %d\n", __func__, ret);
goto free_buf;
}
resp = buf;
if (resp->hdr.ret) {
ret = resp->hdr.ret;
pr_err_ratelimited("%s: Map call failed on remote VM, rc: %d\n", __func__,
resp->hdr.ret);
goto free_buf;
}
ret = mem_buf_dma_buf_set_destructor(dmabuf, smmu_proxy_unmap, dmabuf);
if (ret) {
pr_err_ratelimited("%s: Failed to set vmperm destructor, rc: %d\n",
__func__, ret);
goto free_buf;
}
sg_dma_address(proxy_iova->sgl) = resp->iova;
sg_dma_len(proxy_iova->sgl) = resp->mapping_len;
/*
* We set the number of entries to one here, as we only allow the mapping to go
* through on the TVM if the sg_table returned by dma_buf_map_attachment has one
* entry.
*/
proxy_iova->nents = 1;
free_buf:
kfree(buf);
out:
mutex_unlock(&sender_mutex);
return ret;
}
void smmu_proxy_unmap_nop(struct device *client_dev, struct sg_table *table,
struct dma_buf *dmabuf)
{
}
static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
unsigned int dir = _IOC_DIR(cmd);
union smmu_proxy_ioctl_arg ioctl_arg;
int ret;
if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
return -EINVAL;
if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
if (!(dir & _IOC_WRITE))
memset(&ioctl_arg, 0, sizeof(ioctl_arg));
switch (cmd) {
case QTI_SMMU_PROXY_GET_VERSION_IOCTL:
{
struct csf_version *csf_version =
&ioctl_arg.csf_version;
ret = smmu_proxy_get_csf_version(csf_version);
if(ret)
return ret;
break;
}
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &ioctl_arg,
_IOC_SIZE(cmd)))
return -EFAULT;
}
return 0;
}
static const struct file_operations smmu_proxy_dev_fops = {
.unlocked_ioctl = smmu_proxy_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static int sender_probe_handler(struct platform_device *pdev)
{
int ret;
struct csf_version csf_version;
msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
if (IS_ERR(msgq_hdl)) {
ret = PTR_ERR(msgq_hdl);
pr_err("%s: Queue registration failed rc: %ld!\n", __func__, PTR_ERR(msgq_hdl));
return ret;
}
ret = smmu_proxy_get_csf_version(&csf_version);
if (ret) {
pr_err("%s: Failed to get CSF version rc: %d\n", __func__, ret);
goto free_msgq;
}
if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
ret = qti_smmu_proxy_register_callbacks(NULL, NULL);
} else if (csf_version.arch_ver == 2 && csf_version.max_ver == 5) {
ret = qti_smmu_proxy_register_callbacks(smmu_proxy_map, smmu_proxy_unmap_nop);
} else {
pr_err("%s: Invalid CSF version: %d.%d\n", __func__, csf_version.arch_ver,
csf_version.max_ver);
goto free_msgq;
}
if (ret) {
pr_err("%s: Failed to set SMMU proxy callbacks rc: %d\n", __func__, ret);
goto free_msgq;
}
ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
if (ret) {
pr_err("%s: Failed to create character device rc: %d\n", __func__,
ret);
goto set_callbacks_null;
}
return 0;
set_callbacks_null:
qti_smmu_proxy_register_callbacks(NULL, NULL);
free_msgq:
gh_msgq_unregister(msgq_hdl);
return ret;
}
static const struct of_device_id smmu_proxy_match_table[] = {
{.compatible = "smmu-proxy-sender"},
{},
};
static struct platform_driver smmu_proxy_driver = {
.probe = sender_probe_handler,
.driver = {
.name = "qti-smmu-proxy",
.of_match_table = smmu_proxy_match_table,
},
};
int __init init_smmu_proxy_driver(void)
{
return platform_driver_register(&smmu_proxy_driver);
}
module_init(init_smmu_proxy_driver);
MODULE_IMPORT_NS(DMA_BUF);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,775 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kthread.h>
#include <linux/qcom-iommu-util.h>
#include <dt-bindings/arm/msm/qti-smmu-proxy-dt-ids.h>
#include "qti-smmu-proxy-common.h"
#define RECEIVER_COMPAT_STR "smmu-proxy-receiver"
#define CB_COMPAT_STR "smmu-proxy-cb"
static void *msgq_hdl;
struct smmu_proxy_buffer_cb_info {
bool mapped;
struct dma_buf_attachment *attachment;
struct sg_table *sg_table;
};
struct smmu_proxy_buffer_state {
bool locked;
struct smmu_proxy_buffer_cb_info cb_info[QTI_SMMU_PROXY_CB_IDS_LEN];
struct dma_buf *dmabuf;
};
static DEFINE_MUTEX(buffer_state_lock);
static DEFINE_XARRAY(buffer_state_arr);
static unsigned int cb_map_counts[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
struct device *cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
struct task_struct *receiver_msgq_handler_thread;
static int zero_dma_buf(struct dma_buf *dmabuf)
{
int ret;
struct iosys_map vmap_struct = {0};
ret = dma_buf_vmap(dmabuf, &vmap_struct);
if (ret) {
pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
return ret;
}
/* Use DMA_TO_DEVICE since we are not reading anything */
ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
if (ret) {
pr_err("%s: dma_buf_begin_cpu_access() failed with %d\n", __func__, ret);
goto unmap;
}
memset(vmap_struct.vaddr, 0, dmabuf->size);
ret = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
if (ret)
pr_err("%s: dma_buf_end_cpu_access() failed with %d\n", __func__, ret);
unmap:
dma_buf_vunmap(dmabuf, &vmap_struct);
if (ret)
pr_err("%s: Failed to properly zero the DMA-BUF\n", __func__);
return ret;
}
static int iommu_unmap_and_relinquish(u32 hdl)
{
int cb_id, ret = 0;
struct smmu_proxy_buffer_state *buf_state;
mutex_lock(&buffer_state_lock);
buf_state = xa_load(&buffer_state_arr, hdl);
if (!buf_state) {
pr_err("%s: handle 0x%x unknown to proxy driver!\n", __func__, hdl);
ret = -EINVAL;
goto out;
}
if (buf_state->locked) {
pr_err("%s: handle 0x%x is locked!\n", __func__, hdl);
ret = -EINVAL;
goto out;
}
for (cb_id = 0; cb_id < QTI_SMMU_PROXY_CB_IDS_LEN; cb_id++) {
if (buf_state->cb_info[cb_id].mapped) {
dma_buf_unmap_attachment(buf_state->cb_info[cb_id].attachment,
buf_state->cb_info[cb_id].sg_table,
DMA_BIDIRECTIONAL);
dma_buf_detach(buf_state->dmabuf,
buf_state->cb_info[cb_id].attachment);
buf_state->cb_info[cb_id].mapped = false;
/* If nothing left is mapped for this CB, unprogram its SMR */
cb_map_counts[cb_id]--;
if (!cb_map_counts[cb_id]) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
if (ret) {
pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
__func__, cb_id, ret);
break;
}
}
}
}
ret = zero_dma_buf(buf_state->dmabuf);
if (!ret) {
dma_buf_put(buf_state->dmabuf);
flush_delayed_fput();
}
xa_erase(&buffer_state_arr, hdl);
kfree(buf_state);
out:
mutex_unlock(&buffer_state_lock);
return ret;
}
static int process_unmap_request(struct smmu_proxy_unmap_req *req, size_t size)
{
struct smmu_proxy_unmap_resp *resp;
int ret = 0;
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp) {
pr_err("%s: Failed to allocate memory for response\n", __func__);
return -ENOMEM;
}
ret = iommu_unmap_and_relinquish(req->hdl);
resp->hdr.msg_type = SMMU_PROXY_UNMAP_RESP;
resp->hdr.msg_size = sizeof(*resp);
resp->hdr.ret = ret;
ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
if (ret < 0)
pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
else
pr_debug("%s: response to mapping request sent\n", __func__);
kfree(resp);
return ret;
}
static
inline
struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retrieve_arg,
u32 cb_id)
{
int ret;
struct dma_buf *dmabuf;
bool new_buf = false;
struct smmu_proxy_buffer_state *buf_state;
struct dma_buf_attachment *attachment;
struct sg_table *table;
if (cb_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
pr_err("%s: CB ID %d too large\n", __func__, cb_id);
return ERR_PTR(-EINVAL);
}
if (!cb_devices[cb_id]) {
pr_err("%s: CB of ID %d not defined\n", __func__, cb_id);
return ERR_PTR(-EINVAL);
}
mutex_lock(&buffer_state_lock);
buf_state = xa_load(&buffer_state_arr, retrieve_arg->memparcel_hdl);
if (buf_state) {
if (buf_state->cb_info[cb_id].mapped) {
table = buf_state->cb_info[cb_id].sg_table;
goto unlock;
}
if (buf_state->locked) {
pr_err("%s: handle 0x%x is locked!\n", __func__,
retrieve_arg->memparcel_hdl);
ret = -EINVAL;
goto unlock_err;
}
dmabuf = buf_state->dmabuf;
} else {
new_buf = true;
dmabuf = mem_buf_retrieve(retrieve_arg);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
pr_err("%s: Failed to retrieve DMA-BUF rc: %d\n", __func__, ret);
goto unlock_err;
}
ret = zero_dma_buf(dmabuf);
if (ret) {
pr_err("%s: Failed to zero the DMA-BUF rc: %d\n", __func__, ret);
goto free_buf;
}
buf_state = kzalloc(sizeof(*buf_state), GFP_KERNEL);
if (!buf_state) {
pr_err("%s: Unable to allocate memory for buf_state\n",
__func__);
ret = -ENOMEM;
goto free_buf;
}
buf_state->dmabuf = dmabuf;
}
attachment = dma_buf_attach(dmabuf, cb_devices[cb_id]);
if (IS_ERR(attachment)) {
ret = PTR_ERR(attachment);
pr_err("%s: Failed to attach rc: %d\n", __func__, ret);
goto free_buf_state;
}
table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
if (IS_ERR(table)) {
ret = PTR_ERR(table);
pr_err("%s: Failed to map rc: %d\n", __func__, ret);
goto detach;
}
if (table->nents != 1) {
ret = -EINVAL;
pr_err("%s: Buffer not mapped as one segment!\n", __func__);
goto unmap;
}
buf_state->cb_info[cb_id].mapped = true;
buf_state->cb_info[cb_id].attachment = attachment;
buf_state->cb_info[cb_id].sg_table = table;
if (!cb_map_counts[cb_id]) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_ACQUIRE);
if (ret) {
pr_err("%s: Failed to program SMRs for cb_id %d rc: %d\n", __func__,
cb_id, ret);
goto unmap;
}
}
cb_map_counts[cb_id]++;
ret = xa_err(xa_store(&buffer_state_arr, retrieve_arg->memparcel_hdl, buf_state,
GFP_KERNEL));
if (ret < 0) {
pr_err("%s: Failed to store new buffer in xarray rc: %d\n", __func__,
ret);
goto dec_cb_map_count;
}
unlock:
mutex_unlock(&buffer_state_lock);
return table;
dec_cb_map_count:
cb_map_counts[cb_id]--;
if (!cb_map_counts[cb_id]) {
ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
if (ret)
pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
__func__, cb_id, ret);
}
unmap:
dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
detach:
dma_buf_detach(dmabuf, attachment);
free_buf_state:
if (new_buf)
kfree(buf_state);
free_buf:
if (new_buf)
dma_buf_put(dmabuf);
unlock_err:
mutex_unlock(&buffer_state_lock);
return ERR_PTR(ret);
}
static int process_map_request(struct smmu_proxy_map_req *req, size_t size)
{
struct smmu_proxy_map_resp *resp;
int ret = 0;
u32 n_acl_entries = req->acl_desc.n_acl_entries;
size_t map_req_len = offsetof(struct smmu_proxy_map_req,
acl_desc.acl_entries[n_acl_entries]);
struct mem_buf_retrieve_kernel_arg retrieve_arg = {0};
int i;
struct sg_table *table;
/*
* Last entry of smmu_proxy_map_req is an array of arbitrary length.
* Validate that the number of entries fits within the buffer given
* to us by the message queue.
*/
if (map_req_len > size) {
pr_err("%s: Reported size of smmu_proxy_map_request (%ld bytes) greater than message size given by message queue (%ld bytes)\n",
__func__, map_req_len, size);
return -EINVAL;
}
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
if (!resp) {
pr_err("%s: Failed to allocate memory for response\n", __func__);
return -ENOMEM;
}
retrieve_arg.vmids = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.vmids), GFP_KERNEL);
if (!retrieve_arg.vmids) {
ret = -ENOMEM;
goto free_resp;
}
retrieve_arg.perms = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.perms), GFP_KERNEL);
if (!retrieve_arg.perms) {
ret = -ENOMEM;
goto free_vmids;
}
retrieve_arg.fd_flags = O_RDWR;
retrieve_arg.memparcel_hdl = req->hdl;
retrieve_arg.sender_vmid = VMID_HLOS;
retrieve_arg.nr_acl_entries = n_acl_entries;
for (i = 0; i < n_acl_entries; i++) {
retrieve_arg.vmids[i] = req->acl_desc.acl_entries[i].vmid;
retrieve_arg.perms[i] = req->acl_desc.acl_entries[i].perms;
}
table = retrieve_and_iommu_map(&retrieve_arg, req->cb_id);
if (IS_ERR(table)) {
ret = PTR_ERR(table);
goto free_perms;
}
resp->hdr.msg_type = SMMU_PROXY_MAP_RESP;
resp->hdr.msg_size = sizeof(*resp);
resp->hdr.ret = ret;
resp->iova = sg_dma_address(table->sgl);
resp->mapping_len = sg_dma_len(table->sgl);
ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
if (ret < 0) {
pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
iommu_unmap_and_relinquish(req->hdl);
} else {
pr_debug("%s: response to mapping request sent\n", __func__);
}
free_perms:
kfree(retrieve_arg.perms);
free_vmids:
kfree(retrieve_arg.vmids);
free_resp:
kfree(resp);
return ret;
}
static void smmu_proxy_process_msg(void *buf, size_t size)
{
struct smmu_proxy_msg_hdr *msg_hdr = buf;
struct smmu_proxy_resp_hdr *resp;
int ret = -EINVAL;
if (size < sizeof(*msg_hdr) || msg_hdr->msg_size != size) {
pr_err("%s: message received is not of a proper size: 0x%lx, 0x:%x\n",
__func__, size, msg_hdr->msg_size);
goto handle_err;
}
switch (msg_hdr->msg_type) {
case SMMU_PROXY_MAP:
ret = process_map_request(buf, size);
break;
case SMMU_PROXY_UNMAP:
ret = process_unmap_request(buf, size);
break;
default:
pr_err("%s: received message of unknown type: %d\n", __func__,
msg_hdr->msg_type);
}
if (!ret)
return;
handle_err:
resp = kzalloc(sizeof(resp), GFP_KERNEL);
if (!resp) {
pr_err("%s: Failed to allocate memory for response\n", __func__);
return;
}
resp->msg_type = SMMU_PROXY_ERR_RESP;
resp->msg_size = sizeof(resp);
resp->ret = ret;
ret = gh_msgq_send(msgq_hdl, resp, resp->msg_size, 0);
if (ret < 0)
pr_err("%s: failed to send error response rc: %d\n", __func__, ret);
else
pr_debug("%s: response to mapping request sent\n", __func__);
kfree(resp);
}
static int receiver_msgq_handler(void *msgq_hdl)
{
void *buf;
size_t size;
int ret;
buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (!kthread_should_stop()) {
ret = gh_msgq_recv(msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
if (ret < 0) {
pr_err_ratelimited("%s failed to receive message rc: %d\n", __func__, ret);
} else {
smmu_proxy_process_msg(buf, size);
}
}
kfree(buf);
return 0;
}
static int smmu_proxy_ac_lock_toggle(int dma_buf_fd, bool lock)
{
int ret = 0;
struct smmu_proxy_buffer_state *buf_state;
struct dma_buf *dmabuf;
u32 handle;
dmabuf = dma_buf_get(dma_buf_fd);
if (IS_ERR(dmabuf)) {
pr_err("%s: unable to get dma-buf from FD %d, rc: %ld\n", __func__,
dma_buf_fd, PTR_ERR(dmabuf));
return PTR_ERR(dmabuf);
}
ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &handle);
if (ret) {
pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
goto free_buf;
}
mutex_lock(&buffer_state_lock);
buf_state = xa_load(&buffer_state_arr, handle);
if (!buf_state) {
pr_err("%s: handle 0x%x unknown to proxy driver!\n", __func__, handle);
ret = -EINVAL;
goto out;
}
if (buf_state->locked == lock) {
pr_err("%s: handle 0x%x already %s!\n", __func__, handle,
lock ? "locked" : "unlocked");
ret = -EINVAL;
goto out;
}
buf_state->locked = lock;
out:
mutex_unlock(&buffer_state_lock);
free_buf:
dma_buf_put(dmabuf);
return ret;
}
/*
* Iterate over all buffers mapped to context bank @context_bank_id, and zero
* out the buffers. If there is a single error for any buffer, we bail out with
* an error and disregard the rest of the buffers mapped to @context_bank_id.
*/
int smmu_proxy_clear_all_buffers(void __user *context_bank_id_array,
__u32 num_cb_ids)
{
unsigned long handle;
struct smmu_proxy_buffer_state *buf_state;
__u32 cb_ids[QTI_SMMU_PROXY_CB_IDS_LEN];
int i, ret = 0;
bool found_mapped_cb;
/* Checking this allows us to keep cb_id_arr fixed in length */
if (num_cb_ids > QTI_SMMU_PROXY_CB_IDS_LEN) {
pr_err("%s: Invalid number of CB IDs: %u\n", __func__, num_cb_ids);
return -EINVAL;
}
ret = copy_struct_from_user(&cb_ids, sizeof(cb_ids), context_bank_id_array,
sizeof(cb_ids));
if (ret) {
pr_err("%s: Failed to get CB IDs from user space rc %d\n", __func__, ret);
return ret;
}
for (i = 0; i < num_cb_ids; i++) {
if (cb_ids[i] >= QTI_SMMU_PROXY_CB_IDS_LEN) {
pr_err("%s: Invalid CB ID of %u at pos %d\n", __func__, cb_ids[i], i);
return -EINVAL;
}
}
mutex_lock(&buffer_state_lock);
xa_for_each(&buffer_state_arr, handle, buf_state) {
found_mapped_cb = false;
for (i = 0; i < num_cb_ids; i++) {
if (buf_state->cb_info[cb_ids[i]].mapped) {
found_mapped_cb = true;
break;
}
}
if (!found_mapped_cb)
continue;
ret = zero_dma_buf(buf_state->dmabuf);
if (ret) {
pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
break;
}
}
mutex_unlock(&buffer_state_lock);
return ret;
}
static int smmu_proxy_get_dma_buf(struct smmu_proxy_get_dma_buf_ctl *get_dma_buf_ctl)
{
struct smmu_proxy_buffer_state *buf_state;
int fd, ret = 0;
mutex_lock(&buffer_state_lock);
buf_state = xa_load(&buffer_state_arr, get_dma_buf_ctl->memparcel_hdl);
if (!buf_state) {
pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__,
get_dma_buf_ctl->memparcel_hdl);
ret = -EINVAL;
goto out;
}
get_dma_buf(buf_state->dmabuf);
fd = dma_buf_fd(buf_state->dmabuf, O_CLOEXEC);
if (fd < 0) {
ret = fd;
pr_err("%s: Failed to install FD for dma-buf rc: %d\n", __func__,
ret);
dma_buf_put(buf_state->dmabuf);
} else {
get_dma_buf_ctl->dma_buf_fd = fd;
}
out:
mutex_unlock(&buffer_state_lock);
return ret;
}
static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
unsigned int dir = _IOC_DIR(cmd);
union smmu_proxy_ioctl_arg ioctl_arg;
int ret;
if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
return -EINVAL;
if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
return -EFAULT;
if (!(dir & _IOC_WRITE))
memset(&ioctl_arg, 0, sizeof(ioctl_arg));
switch (cmd) {
case QTI_SMMU_PROXY_AC_LOCK_BUFFER:
{
struct smmu_proxy_acl_ctl *acl_ctl =
&ioctl_arg.acl_ctl;
ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, true);
if (ret)
return ret;
break;
}
case QTI_SMMU_PROXY_AC_UNLOCK_BUFFER:
{
struct smmu_proxy_acl_ctl *acl_ctl =
&ioctl_arg.acl_ctl;
ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, false);
if (ret)
return ret;
break;
}
case QTI_SMMU_PROXY_WIPE_BUFFERS:
{
struct smmu_proxy_wipe_buf_ctl *wipe_buf_ctl =
&ioctl_arg.wipe_buf_ctl;
ret = smmu_proxy_clear_all_buffers((void *) wipe_buf_ctl->context_bank_id_array,
wipe_buf_ctl->num_cb_ids);
break;
}
case QTI_SMMU_PROXY_GET_DMA_BUF:
{
ret = smmu_proxy_get_dma_buf(&ioctl_arg.get_dma_buf_ctl);
break;
}
default:
return -ENOTTY;
}
if (dir & _IOC_READ) {
if (copy_to_user((void __user *)arg, &ioctl_arg,
_IOC_SIZE(cmd)))
return -EFAULT;
}
return 0;
}
static const struct file_operations smmu_proxy_dev_fops = {
.unlocked_ioctl = smmu_proxy_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static int receiver_probe_handler(struct device *dev)
{
int ret = 0;
msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
if (IS_ERR(msgq_hdl)) {
ret = PTR_ERR(msgq_hdl);
dev_err(dev, "Queue registration failed: %ld!\n", PTR_ERR(msgq_hdl));
return ret;
}
receiver_msgq_handler_thread = kthread_run(receiver_msgq_handler, msgq_hdl,
"smmu_proxy_msgq_handler");
if (IS_ERR(receiver_msgq_handler_thread)) {
ret = PTR_ERR(receiver_msgq_handler_thread);
dev_err(dev, "Failed to launch receiver_msgq_handler thread: %ld\n",
PTR_ERR(receiver_msgq_handler_thread));
goto free_msgq;
}
ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
if (ret) {
pr_err("Failed to create character device with error %d\n", ret);
goto free_kthread;
}
return 0;
free_kthread:
kthread_stop(receiver_msgq_handler_thread);
free_msgq:
gh_msgq_unregister(msgq_hdl);
return ret;
}
static int proxy_fault_handler(struct iommu_domain *domain, struct device *dev,
unsigned long iova, int flags, void *token)
{
dev_err(dev, "Context fault with IOVA %lx and fault flags %d\n", iova, flags);
return -EINVAL;
}
static int cb_probe_handler(struct device *dev)
{
int ret;
unsigned int context_bank_id;
struct iommu_domain *domain;
ret = of_property_read_u32(dev->of_node, "qti,cb-id", &context_bank_id);
if (ret) {
dev_err(dev, "Failed to read qti,cb-id property for device\n");
return -EINVAL;
}
if (context_bank_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
dev_err(dev, "Invalid CB ID: %u\n", context_bank_id);
return -EINVAL;
}
if (cb_devices[context_bank_id]) {
dev_err(dev, "Context bank %u is already populated\n", context_bank_id);
return -EINVAL;
}
ret = dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "Failed to set segment size\n");
return ret;
}
ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
dev_err(dev, "Failed to set DMA-MASK\n");
return ret;
}
domain = iommu_get_domain_for_dev(dev);
if (IS_ERR_OR_NULL(domain)) {
dev_err(dev, "%s: Failed to get iommu domain\n", __func__);
return -EINVAL;
}
iommu_set_fault_handler(domain, proxy_fault_handler, NULL);
cb_devices[context_bank_id] = dev;
return 0;
}
static int smmu_proxy_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
if (of_device_is_compatible(dev->of_node, CB_COMPAT_STR)) {
return cb_probe_handler(dev);
} else if (of_device_is_compatible(dev->of_node, RECEIVER_COMPAT_STR)) {
return receiver_probe_handler(dev);
} else {
return -EINVAL;
}
}
static const struct of_device_id smmu_proxy_match_table[] = {
{.compatible = RECEIVER_COMPAT_STR},
{.compatible = CB_COMPAT_STR},
{},
};
static struct platform_driver smmu_proxy_driver = {
.probe = smmu_proxy_probe,
.driver = {
.name = "qti-smmu-proxy",
.of_match_table = smmu_proxy_match_table,
},
};
int __init init_smmu_proxy_driver(void)
{
int ret;
struct csf_version csf_version;
ret = smmu_proxy_get_csf_version(&csf_version);
if (ret) {
pr_err("%s: Unable to get CSF version\n", __func__);
return ret;
}
if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
pr_err("%s: CSF 2.5 not in use, not loading module\n", __func__);
return -EINVAL;
}
return platform_driver_register(&smmu_proxy_driver);
}
module_init(init_smmu_proxy_driver);
MODULE_IMPORT_NS(DMA_BUF);
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,113 @@
# Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
# Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import filecmp
import os
import re
import subprocess
import sys
def run_headers_install(verbose, gen_dir, headers_install, unifdef, prefix, h):
if not h.startswith(prefix):
print('error: expected prefix [%s] on header [%s]' % (prefix, h))
return False
# out_h is combining the relative path to the header file (made in gen_smcinvoke_headers()) to the gen_dir out/soong/.temp/sbox/<temp hash value>/out/
# ex. out/soong/.temp/sbox/<temp hash value>/out/linux/smcinvoke.h
# After the build is complete, you can find the headers that you exposed located in the following gen path:
# out/soong/.intermediates/.../qti_generate_smcinvoke_kernel_headers/gen/
if 'include/uapi' in h:
out_h = os.path.join(gen_dir,'include', h[len(prefix):])
else:
out_h = os.path.join(gen_dir, h[len(prefix):])
(out_h_dirname, out_h_basename) = os.path.split(out_h)
env = os.environ.copy()
env["LOC_UNIFDEF"] = unifdef
cmd = ["sh", headers_install, h, out_h]
if verbose:
print('run_headers_install: cmd is %s' % cmd)
result = subprocess.call(cmd, env=env)
if result != 0:
print('error: run_headers_install: cmd %s failed %d' % (cmd, result))
return False
return True
def gen_smcinvoke_headers(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose):
error_count = 0
# smcinvoke_headers_to_expose is a string list of individual paths to headers to expose
# They are passed using Android.bp variable substition: $(locations <label>) ex. $(locations linux/*.h)
# Note <label> has to be a rule to find the file, it cannot be the file itself.
for h in smcinvoke_headers_to_expose:
# h will be the relative path from the repo root directory securemsm-kernel ex. <parent directory structure>/securemsm-kernel/linux/smcinvoke.h
# So we need to split the string and keep the directory structure we want to expose i.e. just linux/smcinvoke.h
topDirectory = 'securemsm-kernel'
if 'include/uapi' in h:
directorySplitLocation = '/'+ topDirectory +'/'
smcinvoke_headers_to_expose_prefix = os.path.join(h.split(directorySplitLocation)[0], topDirectory, 'include', 'uapi') + os.sep
if not run_headers_install(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose_prefix, h):
error_count += 1
else:
directorySplitLocation = '/'+ topDirectory +'/'
smcinvoke_headers_to_expose_prefix = os.path.join(h.split(directorySplitLocation)[0], topDirectory) + os.sep
if not run_headers_install(verbose, gen_dir, headers_install, unifdef, smcinvoke_headers_to_expose_prefix, h):
error_count += 1
return error_count
def main():
"""Parse command line arguments and perform top level control."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Arguments that apply to every invocation of this script.
parser.add_argument(
'--verbose', action='store_true',
help='Print output that describes the workings of this script.')
parser.add_argument(
'--header_arch', required=True,
help='The arch for which to generate headers.')
parser.add_argument(
'--gen_dir', required=True,
help='Where to place the generated files.')
parser.add_argument(
'--smcinvoke_headers_to_expose', required=True, nargs='*',
help='The list of smcinvoke header files.')
parser.add_argument(
'--headers_install', required=True,
help='The headers_install tool to process input headers.')
parser.add_argument(
'--unifdef',
required=True,
help='The unifdef tool used by headers_install.')
args = parser.parse_args()
if args.verbose:
print('header_arch [%s]' % args.header_arch)
print('gen_dir [%s]' % args.gen_dir)
print('smcinvoke_headers_to_expose [%s]' % args.smcinvoke_headers_to_expose)
print('headers_install [%s]' % args.headers_install)
print('unifdef [%s]' % args.unifdef)
return gen_smcinvoke_headers(args.verbose, args.gen_dir,
args.headers_install, args.unifdef, args.smcinvoke_headers_to_expose)
if __name__ == '__main__':
sys.exit(main())

File diff suppressed because it is too large Load Diff