soc: qcom: Added support for virtualized FBE

Virtualized FBE is snapshot from msm-5.15 branch with commit id

commit 4d0e75a8e326 ("soc: qcom: Added support for virtualized FBE")

Modified virt block crypto supported APIS according to latest
implementation.

Change-Id: Ifee4bbd481b5a034cba5312216a6a05272895191
Signed-off-by: Santosh Dronamraju <quic_sdronamr@quicinc.com>
This commit is contained in:
Santosh Dronamraju 2023-11-01 10:56:01 +05:30
parent bced539cab
commit b39e3411a2
8 changed files with 638 additions and 10 deletions

View File

@ -383,6 +383,15 @@ config VIRTIO_BLK
This is the virtual block driver for virtio. It can be used with This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M. QEMU based VMMs (like KVM or Xen). Say Y or M.
config VIRTIO_BLK_QTI_CRYPTO
tristate "Vendor specific VIRTIO Crypto Engine Support"
depends on VIRTIO_BLK && QTI_CRYPTO_VIRTUALIZATION
help
Enable storage inline crypto engine support for guest virtual machine.
Enabling this allows kernel to use crypto operations defined
and implemented by QTI.
Say Y or M.
config BLK_DEV_RBD config BLK_DEV_RBD
tristate "Rados block device (RBD)" tristate "Rados block device (RBD)"
depends on INET && BLOCK depends on INET && BLOCK

View File

@ -24,6 +24,7 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
obj-$(CONFIG_VIRTIO_BLK_QTI_CRYPTO) += virtio_blk_qti_crypto.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o

View File

@ -21,6 +21,10 @@
#ifdef CONFIG_GH_VIRTIO_DEBUG #ifdef CONFIG_GH_VIRTIO_DEBUG
#include <trace/events/gh_virtio_frontend.h> #include <trace/events/gh_virtio_frontend.h>
#endif #endif
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
#include <linux/blk-crypto-profile.h>
#include "virtio_blk_qti_crypto.h"
#endif
#define PART_BITS 4 #define PART_BITS 4
#define VQ_NAME_LEN 16 #define VQ_NAME_LEN 16
@ -104,6 +108,7 @@ struct virtio_blk_ice_info {
u64 data_unit_num; u64 data_unit_num;
} __packed; } __packed;
#endif #endif
struct virtblk_req { struct virtblk_req {
struct virtio_blk_outhdr out_hdr; struct virtio_blk_outhdr out_hdr;
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION) #if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
@ -139,23 +144,19 @@ static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
struct scatterlist hdr, status, *sgs[3]; struct scatterlist hdr, status, *sgs[3];
unsigned int num_out = 0, num_in = 0; unsigned int num_out = 0, num_in = 0;
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION) #if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
size_t hdr_size;
/* Backend (HOST) expects to receive encryption info via extended /* Backend (HOST) expects to receive encryption info via extended
* structure when ICE negotiation is successful which will be used * structure when ICE negotiation is successful which will be used
* by backend ufs/sdhci host controller to program the descriptors * by backend ufs/sdhci host controller to program the descriptors
* as per JEDEC standard. To enable encryption on data, Need to pass * as per spec standards to enable the encryption on read/write
* required encryption info instead of zeros. * of data from/to disk.
*/ */
memset(&(vbr->ice_info), 0, sizeof(vbr->ice_info)); size_t const hdr_size = virtio_has_feature(vq->vdev, VIRTIO_BLK_F_ICE_IV) ?
hdr_size = virtio_has_feature(vq->vdev, VIRTIO_BLK_F_ICE_IV) ? sizeof(vbr->out_hdr) + sizeof(vbr->ice_info) :
sizeof(vbr->out_hdr) + sizeof(vbr->ice_info) : sizeof(vbr->out_hdr);
sizeof(vbr->out_hdr);
sg_init_one(&hdr, &vbr->out_hdr, hdr_size); sg_init_one(&hdr, &vbr->out_hdr, hdr_size);
#else #else
sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
#endif #endif
sgs[num_out++] = &hdr; sgs[num_out++] = &hdr;
if (vbr->sg_table.nents) { if (vbr->sg_table.nents) {
@ -372,6 +373,25 @@ static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
} }
} }
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
static void virtblk_get_ice_info(struct request *req)
{
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
/* whether or not the request needs inline crypto operations*/
if (!req || !req->crypt_keyslot) {
/* ice is not activated */
vbr->ice_info.activate = false;
} else {
vbr->ice_info.ice_slot = blk_crypto_keyslot_index(req->crypt_keyslot);
/* ice is activated - successful flow */
vbr->ice_info.activate = true;
/* data unit number i.e. iv value */
vbr->ice_info.data_unit_num = req->crypt_ctx->bc_dun[0];
}
}
#endif
static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
struct virtio_blk *vblk, struct virtio_blk *vblk,
struct request *req, struct request *req,
@ -384,6 +404,10 @@ static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
if (unlikely(status)) if (unlikely(status))
return status; return status;
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_ICE_IV))
virtblk_get_ice_info(req);
#endif
num = virtblk_map_data(hctx, req, vbr); num = virtblk_map_data(hctx, req, vbr);
if (unlikely(num < 0)) if (unlikely(num < 0))
return virtblk_fail_to_queue(req, -ENOMEM); return virtblk_fail_to_queue(req, -ENOMEM);
@ -1198,8 +1222,17 @@ static int virtblk_probe(struct virtio_device *vdev)
} }
virtblk_update_capacity(vblk, false); virtblk_update_capacity(vblk, false);
virtio_device_ready(vdev);
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_ICE_IV)) {
dev_notice(&vdev->dev, "%s\n", vblk->disk->disk_name);
/* Initilaize supported crypto capabilities*/
err = virtblk_init_crypto_qti_spec(&vblk->vdev->dev);
if (!err)
virtblk_crypto_qti_crypto_register(vblk->disk->queue);
}
#endif
virtio_device_ready(vdev);
err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
if (err) if (err)
goto out_cleanup_disk; goto out_cleanup_disk;

View File

@ -0,0 +1,168 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* virtio block crypto ops QTI implementation.
*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/device.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/crypto_qti_virt.h>
#include <linux/blk-crypto-profile.h>
/*keyslot manager for vrtual IO*/
static struct blk_crypto_profile virtio_crypto_profile;
/* initialize crypto profile only once */
static bool is_crypto_profile_initalized;
/*To get max ice slots for guest vm */
static uint32_t num_ice_slots;
void virtblk_crypto_qti_crypto_register(struct request_queue *q)
{
blk_crypto_register(&virtio_crypto_profile, q);
}
EXPORT_SYMBOL_GPL(virtblk_crypto_qti_crypto_register);
static inline bool virtblk_keyslot_valid(unsigned int slot)
{
/*
* slot numbers range from 0 to max available
* slots for vm.
*/
return slot < num_ice_slots;
}
static int virtblk_crypto_qti_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
int err = 0;
if (!virtblk_keyslot_valid(slot)) {
pr_err("%s: key slot is not valid\n",
__func__);
return -EINVAL;
}
err = crypto_qti_virt_program_key(key, slot);
if (err) {
pr_err("%s: program key failed with error %d\n",
__func__, err);
err = crypto_qti_virt_invalidate_key(slot);
if (err) {
pr_err("%s: invalidate key failed with error %d\n",
__func__, err);
return err;
}
}
return err;
}
static int virtblk_crypto_qti_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
int err = 0;
if (!virtblk_keyslot_valid(slot)) {
pr_err("%s: key slot is not valid\n",
__func__);
return -EINVAL;
}
err = crypto_qti_virt_invalidate_key(slot);
if (err) {
pr_err("%s: evict key failed with error %d\n",
__func__, err);
return err;
}
return err;
}
static int virtblk_crypto_qti_derive_raw_secret(struct blk_crypto_profile *profile,
const u8 *eph_key,
size_t eph_key_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE])
{
int err = 0;
if (eph_key_size <= BLK_CRYPTO_SW_SECRET_SIZE) {
pr_err("%s: Invalid wrapped_key_size: %u\n",
__func__, eph_key_size);
err = -EINVAL;
return err;
}
if (eph_key_size > 64) {
err = crypto_qti_virt_derive_raw_secret_platform(eph_key,
eph_key_size,
sw_secret,
BLK_CRYPTO_SW_SECRET_SIZE);
} else {
memcpy(sw_secret, eph_key, BLK_CRYPTO_SW_SECRET_SIZE);
}
return err;
}
static const struct blk_crypto_ll_ops virtio_blk_qti_crypto_ops = {
.keyslot_program = virtblk_crypto_qti_keyslot_program,
.keyslot_evict = virtblk_crypto_qti_keyslot_evict,
.derive_sw_secret = virtblk_crypto_qti_derive_raw_secret,
};
int virtblk_init_crypto_qti_spec(struct device *dev)
{
int err = 0;
unsigned int crypto_modes_supported[BLK_ENCRYPTION_MODE_MAX];
memset(crypto_modes_supported, 0, sizeof(crypto_modes_supported));
/* Actual determination of capabilities for UFS/EMMC for different
* encryption modes are done in the back end (host operating system)
* in case of virtualization driver, so will get crypto capabilities
* from the back end. The received capabilities is feeded as input
* parameter to keyslot manager
*/
err = crypto_qti_virt_get_crypto_capabilities(crypto_modes_supported,
sizeof(crypto_modes_supported));
if (err) {
pr_err("crypto_qti_virt_get_crypto_capabilities failed error = %d\n", err);
return err;
}
/* Get max number of ice slots for guest vm */
err = crypto_qti_virt_ice_get_info(&num_ice_slots);
if (err) {
pr_err("crypto_qti_virt_ice_get_info failed error = %d\n", err);
return err;
}
/* Return from here incase keyslot manager is already initialized */
if (is_crypto_profile_initalized)
return 0;
/* create keyslot manager and which will manage the keyslots for all
* virtual disks
*/
err = devm_blk_crypto_profile_init(dev, &virtio_crypto_profile, num_ice_slots);
if (err) {
pr_err("%s: crypto profile initialization failed\n", __func__);
return err;
}
is_crypto_profile_initalized = true;
virtio_crypto_profile.ll_ops = virtio_blk_qti_crypto_ops;
/* This value suppose to get from host based on storage type
* will remove hard code value later
*/
virtio_crypto_profile.max_dun_bytes_supported = 8;
virtio_crypto_profile.key_types_supported = BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
virtio_crypto_profile.dev = dev;
memcpy(virtio_crypto_profile.modes_supported, crypto_modes_supported,
sizeof(crypto_modes_supported));
pr_info("%s: crypto profile initialized.\n", __func__);
return err;
}
EXPORT_SYMBOL_GPL(virtblk_init_crypto_qti_spec);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto Virtual library for storage encryption");

View File

@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _VIRTIO_BLK_QTI_CRYPTO_H
#define _VIRTIO_BLK_QTI_CRYPTO_H
#include <linux/device.h>
#include <linux/blkdev.h>
/**
* This function intializes the supported crypto capabilities
* and create crypto profile to manage keyslots for virtual
* disks.
*
* Return: zero on success, else a -errno value
*/
int virtblk_init_crypto_qti_spec(struct device *dev);
/**
* set up a crypto profile in the virtual disks request_queue
*
* @request_queue: virtual disk request queue
*/
void virtblk_crypto_qti_crypto_register(struct request_queue *q);
#endif /* _VIRTIO_BLK_QTI_CRYPTO_H */

View File

@ -65,6 +65,7 @@ crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER) += crypto-qti-hwkm.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o
crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER_V1) += crypto-qti-hwkm.o crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER_V1) += crypto-qti-hwkm.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER_V1) += hwkm_v1.o obj-$(CONFIG_QTI_HW_KEY_MANAGER_V1) += hwkm_v1.o
obj-$(CONFIG_QTI_CRYPTO_VIRTUALIZATION) += crypto-qti-virt.o
obj-$(CONFIG_MSM_TMECOM_QMP) += tmecom/ obj-$(CONFIG_MSM_TMECOM_QMP) += tmecom/
obj-$(CONFIG_MSM_HAB) += hab/ obj-$(CONFIG_MSM_HAB) += hab/
obj-$(CONFIG_QCOM_HGSL) += hgsl/ obj-$(CONFIG_QCOM_HGSL) += hgsl/

View File

@ -0,0 +1,289 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Crypto virtual library for storage encryption.
*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/string.h>
#include <linux/habmm.h>
#include <linux/crypto_qti_virt.h>
#include <linux/ktime.h>
#include <linux/kthread.h>
#include <linux/completion.h>
#define RESERVE_SIZE (36*sizeof(uint16_t))
#define SECRET_SIZE (32)
#define HAB_TIMEOUT_MS (50000)
/* FBE request command ids */
#define FBE_GET_MAX_SLOTS (7)
#define FBE_SET_KEY_V2 (8)
#define FBE_CLEAR_KEY_V2 (9)
#define FBE_DERIVE_RAW_SECRET (10)
#define FBE_GET_CRYPTO_CAPABILITIES (11)
#define FBE_VERIFY_CRYPTO_CAPS (12)
#define MAX_CRYPTO_MODES_SUPPORTED (4)
struct fbe_derive_secret {
uint8_t wrapped_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
uint32_t wrapped_key_size;
};
struct fbe_request_v2_t {
uint8_t reserve[RESERVE_SIZE]; /*for compatibility*/
uint32_t cmd;
uint8_t key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
uint32_t key_size;
uint32_t virt_slot;
uint32_t data_unit_size;
enum blk_crypto_mode_num crypto_mode;
struct fbe_derive_secret derive_raw_secret;
};
struct fbe_v2_resp {
int32_t status;
uint8_t secret_key[SECRET_SIZE];
uint32_t crypto_modes_supported[MAX_CRYPTO_MODES_SUPPORTED];
};
struct fbe_req_args {
struct fbe_request_v2_t req;
struct fbe_v2_resp response;
int32_t ret;
};
static struct completion send_fbe_req_done;
static int32_t send_fbe_req_hab(void *arg)
{
int ret = 0;
uint32_t status_size;
uint32_t handle;
struct fbe_req_args *req_args = (struct fbe_req_args *)arg;
do {
if (!req_args) {
pr_err("%s Null input\n", __func__);
ret = -EINVAL;
break;
}
ret = habmm_socket_open(&handle, MM_FDE_1, 0, 0);
if (ret) {
pr_err("habmm_socket_open failed with ret = %d\n", ret);
break;
}
ret = habmm_socket_send(handle, &req_args->req, sizeof(struct fbe_request_v2_t), 0);
if (ret) {
pr_err("habmm_socket_send failed, ret= 0x%x\n", ret);
break;
}
do {
status_size = sizeof(struct fbe_v2_resp);
ret = habmm_socket_recv(handle, &req_args->response, &status_size, 0,
HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
} while (-EINTR == ret);
if (ret) {
pr_err("habmm_socket_recv failed, ret= 0x%x\n", ret);
break;
}
if (status_size != sizeof(struct fbe_v2_resp)) {
pr_err("habmm_socket_recv expected size: %lu, actual=%u\n",
sizeof(struct fbe_v2_resp),
status_size);
ret = -E2BIG;
break;
}
ret = habmm_socket_close(handle);
if (ret) {
pr_err("habmm_socket_close failed with ret = %d\n", ret);
break;
}
} while (0);
req_args->ret = ret;
complete(&send_fbe_req_done);
return 0;
}
static void send_fbe_req(struct fbe_req_args *arg)
{
struct task_struct *thread;
init_completion(&send_fbe_req_done);
arg->response.status = 0;
thread = kthread_run(send_fbe_req_hab, arg, "send_fbe_req");
if (IS_ERR(thread)) {
arg->ret = -1;
return;
}
if (wait_for_completion_interruptible_timeout(
&send_fbe_req_done, msecs_to_jiffies(HAB_TIMEOUT_MS)) <= 0) {
pr_err("%s: timeout hit\n", __func__);
kthread_stop(thread);
arg->ret = -ETIME;
return;
}
}
int crypto_qti_virt_ice_get_info(uint32_t *total_num_slots)
{
struct fbe_req_args arg;
if (!total_num_slots) {
pr_err("%s Null input\n", __func__);
return -EINVAL;
}
arg.req.cmd = FBE_GET_MAX_SLOTS;
send_fbe_req(&arg);
if (arg.ret || arg.response.status < 0) {
pr_err("send_fbe_req_v2 failed with ret = %d, max_slots = %d\n",
arg.ret, arg.response.status);
return -ECOMM;
}
*total_num_slots = (uint32_t) arg.response.status;
return 0;
}
EXPORT_SYMBOL_GPL(crypto_qti_virt_ice_get_info);
static int verify_crypto_capabilities(enum blk_crypto_mode_num crypto_mode,
unsigned int data_unit_size)
{
struct fbe_req_args arg;
arg.req.cmd = FBE_VERIFY_CRYPTO_CAPS;
arg.req.crypto_mode = crypto_mode;
arg.req.data_unit_size = data_unit_size;
send_fbe_req(&arg);
if (arg.ret || arg.response.status < 0) {
pr_err("send_fbe_req_v2 failed with ret = %d, status = %d\n",
arg.ret, arg.response.status);
return -EINVAL;
}
return arg.response.status;
}
int crypto_qti_virt_program_key(const struct blk_crypto_key *key,
unsigned int slot)
{
struct fbe_req_args arg;
int ret = 0;
if (!key)
return -EINVAL;
/* Actual determination of capabilities for UFS/EMMC for different
* encryption modes are done in the back end (host operating system)
* in case of virtualization driver, so will send details to backend
* and BE will verify the given capabilities.
*/
ret = verify_crypto_capabilities(key->crypto_cfg.crypto_mode,
key->crypto_cfg.data_unit_size);
if (ret)
return -EINVAL;
/* program key */
arg.req.cmd = FBE_SET_KEY_V2;
arg.req.virt_slot = slot;
arg.req.key_size = key->size;
memcpy(&(arg.req.key[0]), key->raw, key->size);
send_fbe_req(&arg);
if (arg.ret || arg.response.status) {
pr_err("send_fbe_req_v2 failed with ret = %d, status = %d\n",
arg.ret, arg.response.status);
return -ECOMM;
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_qti_virt_program_key);
int crypto_qti_virt_invalidate_key(unsigned int slot)
{
struct fbe_req_args arg;
arg.req.cmd = FBE_CLEAR_KEY_V2;
arg.req.virt_slot = slot;
send_fbe_req(&arg);
if (arg.ret || arg.response.status) {
pr_err("send_fbe_req_v2 failed with ret = %d, status = %d\n",
arg.ret, arg.response.status);
return -ECOMM;
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_qti_virt_invalidate_key);
int crypto_qti_virt_get_crypto_capabilities(unsigned int *crypto_modes_supported,
uint32_t crypto_array_size)
{
struct fbe_req_args arg;
// To compatible with BE(Back End)
crypto_array_size -= sizeof(uint32_t);
arg.req.cmd = FBE_GET_CRYPTO_CAPABILITIES;
send_fbe_req(&arg);
if (arg.ret || arg.response.status) {
pr_err("send_fbe_req_v2 failed with ret = %d, status = %d\n",
arg.ret, arg.response.status);
return -ECOMM;
}
memcpy(crypto_modes_supported, &(arg.response.crypto_modes_supported[0]),
crypto_array_size);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_qti_virt_get_crypto_capabilities);
int crypto_qti_virt_derive_raw_secret_platform(const u8 *wrapped_key,
unsigned int wrapped_key_size,
u8 *secret,
unsigned int secret_size)
{
struct fbe_req_args arg;
arg.req.cmd = FBE_DERIVE_RAW_SECRET;
memcpy(&(arg.req.derive_raw_secret.wrapped_key[0]), wrapped_key,
wrapped_key_size);
arg.req.derive_raw_secret.wrapped_key_size = wrapped_key_size;
send_fbe_req(&arg);
if (arg.ret || arg.response.status) {
pr_err("send_fbe_req_v2 failed with ret = %d, status = %d\n",
arg.ret, arg.response.status);
return -EINVAL;
}
memcpy(secret, &(arg.response.secret_key[0]), secret_size);
return 0;
}
EXPORT_SYMBOL_GPL(crypto_qti_virt_derive_raw_secret_platform);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Crypto Virtual library for storage encryption");

View File

@ -0,0 +1,97 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _CRYPTO_QTI_VIRT_H
#define _CRYPTO_QTI_VIRT_H
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/blk-crypto.h>
//#include <linux/blk-crypto-profile.h>
//#define RAW_SECRET_SIZE 32
#if IS_ENABLED(CONFIG_QTI_CRYPTO_VIRTUALIZATION)
/**
* crypto_qti_virt_program_key() - will send key and virtual slot
* info to Back end (BE) and BE will program the key into specified
* keyslot in the inline encryption hardware.
*
* @blk_crypto_key: Actual key or wrapped key
* @slot: virtual slot
*
* Return: zero on success, else a -errno value
*/
int crypto_qti_virt_program_key(const struct blk_crypto_key *key,
unsigned int slot);
/**
* crypto_qti_virt_invalidate_key() - will virtual slot
* info to Back end (BE) and BE will Evict key from the
* specified keyslot in the hardware
*
* @slot: virtual slot
*
* Return: zero on success, else a -errno value
*/
int crypto_qti_virt_invalidate_key(unsigned int slot);
/**
* crypto_qti_virt_derive_raw_secret_platform() - Derive
* software secret from wrapped key
*
* @wrapped_key: The wrapped key
* @wrapped_key_size: Size of the wrapped key in bytes
* @secret: (output) the software secret
* @secret_size: (output) the number of secret bytes to derive
*
* Return: zero on success, else a -errno value
*/
int crypto_qti_virt_derive_raw_secret_platform(const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size);
/**
* crypto_qti_virt_ice_get_info() - Determines the
* total number of available slot for virtual machine
*
* @total_num_slots: its an out param and this will update
* with max number of slots.
*
* Return: zero on success, else a -errno value
*/
int crypto_qti_virt_ice_get_info(uint32_t *total_num_slots);
int crypto_qti_virt_get_crypto_capabilities(unsigned int *crypto_modes_supported,
uint32_t crypto_array_size);
#else
static inline int crypto_qti_virt_program_key(const struct blk_crypto_key *key,
unsigned int slot)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_virt_invalidate_key(unsigned int slot)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_virt_derive_raw_secret_platform(
const u8 *wrapped_key,
unsigned int wrapped_key_size, u8 *secret,
unsigned int secret_size)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_virt_ice_get_info(uint32_t *total_num_slots)
{
return -EOPNOTSUPP;
}
static inline int crypto_qti_virt_get_crypto_capabilities(unsigned int *crypto_modes_supported,
uint32_t crypto_array_size)
{
return -EOPNOTSUPP;
}
#endif /* CONFIG_QTI_CRYPTO_VIRTUALIZATION */
#endif /*_CRYPTO_QTI_VIRT_H */