crypto: msm: Add QTI crypto drivers

This is a snapshot of the crypto drivers as of msm-4.19
commit 52d84be6ae6e151ba ("crypto: msm: qcrypto: Fix null
pointer dereference error in qcrypto driver")
with changes to comply with msm-5.4 format requirement.

Change-Id: Ic566a15e41511d229a9e476c03962753da583946
Signed-off-by: Zhen Kong <zkong@codeaurora.org>
This commit is contained in:
Zhen Kong 2020-01-02 11:42:47 -08:00
parent 05b1c784bd
commit 377c69bf3e
17 changed files with 17043 additions and 0 deletions

View File

@ -626,6 +626,48 @@ config CRYPTO_DEV_QCE
hardware. To compile this driver as a module, choose M here. The
module will be called qcrypto.
config CRYPTO_DEV_QCOM_MSM_QCE
tristate "QTI Crypto Engine (QCE) module"
depends on ARCH_QCOM
help
This driver supports QTI Crypto Engine accelerator hardware, which
is present on SDM845, etc. This is the core crypto driver which adds
CE5.0 functionalities. To compile this driver as a module, choose
M here. The module will be called QCE50.
config CRYPTO_DEV_QCRYPTO
tristate "QTI Crypto accelerator"
depends on ARCH_QCOM
select CRYPTO_DES
select CRYPTO_ALGAPI
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
help
This driver supports QTI crypto acceleration
for kernel clients. To compile this driver as a module,
choose M here: the module will be called qcrypto. Please
select Y here to enable.
config CRYPTO_DEV_QCEDEV
tristate "QCEDEV Interface to CE module"
depends on ARCH_QCOM
help
This driver supports QTI QCEDEV Crypto Engine 5.0.
This exposes the interface to the QCE hardware accelerator
via IOCTLs.
To compile this driver as a module, choose M here: the
module will be called qcedev.
config CRYPTO_DEV_OTA_CRYPTO
tristate "OTA Crypto module"
depends on ARCH_QCOM
help
This driver supports QTI OTA Crypto in the FSM9xxx.
To compile this driver as a module, choose M here: the
module will be called ota_crypto. Please select Y here
to enable.
config CRYPTO_DEV_QCOM_RNG
tristate "Qualcomm Random Number Generator Driver"
depends on ARCH_QCOM || COMPILE_TEST

View File

@ -34,6 +34,7 @@ obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o
obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/
obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o
obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o

View File

@ -0,0 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev_smmu.o
obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o

View File

@ -0,0 +1,994 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* QTI Over the Air (OTA) Crypto driver
*
* Copyright (c) 2010-2014,2017-2020 The Linux Foundation. All rights reserved.
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/uaccess.h>
#include <linux/debugfs.h>
#include <linux/cache.h>
#include <linux/qcota.h>
#include "qce.h"
#include "qce_ota.h"
enum qce_ota_oper_enum {
QCE_OTA_F8_OPER = 0,
QCE_OTA_MPKT_F8_OPER = 1,
QCE_OTA_F9_OPER = 2,
QCE_OTA_VAR_MPKT_F8_OPER = 3,
QCE_OTA_OPER_LAST
};
struct ota_dev_control;
struct ota_async_req {
struct list_head rlist;
struct completion complete;
int err;
enum qce_ota_oper_enum op;
union {
struct qce_f9_req f9_req;
struct qce_f8_req f8_req;
struct qce_f8_multi_pkt_req f8_mp_req;
struct qce_f8_variable_multi_pkt_req f8_v_mp_req;
} req;
unsigned int steps;
struct ota_qce_dev *pqce;
};
/*
* Register ourselves as a char device /dev/qcota0 to be able to access the ota
* from userspace.
*/
#define QCOTA_DEV "qcota0"
struct ota_dev_control {
/* char device */
struct cdev cdev;
int minor;
struct list_head ready_commands;
unsigned int magic;
struct list_head qce_dev;
spinlock_t lock;
struct mutex register_lock;
bool registered;
uint32_t total_units;
};
struct ota_qce_dev {
struct list_head qlist;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
struct ota_async_req *active_command;
struct tasklet_struct done_tasklet;
struct ota_dev_control *podev;
uint32_t unit;
u64 total_req;
u64 err_req;
};
#define OTA_MAGIC 0x4f544143
static long qcota_ioctl(struct file *file,
unsigned int cmd, unsigned long arg);
static int qcota_open(struct inode *inode, struct file *file);
static int qcota_release(struct inode *inode, struct file *file);
static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
static const struct file_operations qcota_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = qcota_ioctl,
.open = qcota_open,
.release = qcota_release,
};
static struct ota_dev_control qcota_dev = {
.magic = OTA_MAGIC,
};
static dev_t qcota_device_no;
static struct class *driver_class;
static struct device *class_dev;
#define DEBUG_MAX_FNAME 16
#define DEBUG_MAX_RW_BUF 1024
struct qcota_stat {
u64 f8_req;
u64 f8_mp_req;
u64 f8_v_mp_req;
u64 f9_req;
u64 f8_op_success;
u64 f8_op_fail;
u64 f8_mp_op_success;
u64 f8_mp_op_fail;
u64 f8_v_mp_op_success;
u64 f8_v_mp_op_fail;
u64 f9_op_success;
u64 f9_op_fail;
};
static struct qcota_stat _qcota_stat;
static struct dentry *_debug_dent;
static char _debug_read_buf[DEBUG_MAX_RW_BUF];
static int _debug_qcota;
static struct ota_dev_control *qcota_control(void)
{
return &qcota_dev;
}
static int qcota_open(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = qcota_control();
if (podev == NULL) {
pr_err("%s: no such device %d\n", __func__,
MINOR(inode->i_rdev));
return -ENOENT;
}
file->private_data = podev;
return 0;
}
static int qcota_release(struct inode *inode, struct file *file)
{
struct ota_dev_control *podev;
podev = file->private_data;
if (podev != NULL && podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
}
file->private_data = NULL;
return 0;
}
static bool _next_v_mp_req(struct ota_async_req *areq)
{
unsigned char *p;
if (areq->err)
return false;
if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
return false;
p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
areq->req.f8_v_mp_req.qce_f8_req.data_len =
areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
areq->req.f8_v_mp_req.qce_f8_req.count_c++;
return true;
}
static void req_done(unsigned long data)
{
struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
struct ota_dev_control *podev = pqce->podev;
struct ota_async_req *areq;
unsigned long flags;
struct ota_async_req *new_req = NULL;
int ret = 0;
bool schedule = true;
spin_lock_irqsave(&podev->lock, flags);
areq = pqce->active_command;
if (unlikely(areq == NULL))
pr_err("ota_crypto: %s, no active request\n", __func__);
else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
if (_next_v_mp_req(areq)) {
/* execute next subcommand */
spin_unlock_irqrestore(&podev->lock, flags);
ret = start_req(pqce, areq);
if (unlikely(ret)) {
areq->err = ret;
schedule = true;
spin_lock_irqsave(&podev->lock, flags);
} else {
areq = NULL;
schedule = false;
}
} else {
/* done with this variable mp req */
schedule = true;
}
}
while (schedule) {
if (!list_empty(&podev->ready_commands)) {
new_req = container_of(podev->ready_commands.next,
struct ota_async_req, rlist);
list_del(&new_req->rlist);
pqce->active_command = new_req;
spin_unlock_irqrestore(&podev->lock, flags);
if (new_req) {
new_req->err = 0;
/* start a new request */
ret = start_req(pqce, new_req);
}
if (unlikely(new_req && ret)) {
new_req->err = ret;
complete(&new_req->complete);
ret = 0;
new_req = NULL;
spin_lock_irqsave(&podev->lock, flags);
} else {
schedule = false;
}
} else {
pqce->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
schedule = false;
}
}
if (areq)
complete(&areq->complete);
}
static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_qce_dev *pqce;
pqce = areq->pqce;
areq->req.f9_req.mac_i = *((uint32_t *)icv);
if (ret) {
pqce->err_req++;
areq->err = -ENXIO;
} else
areq->err = 0;
tasklet_schedule(&pqce->done_tasklet);
}
static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
int ret)
{
struct ota_async_req *areq = (struct ota_async_req *) cookie;
struct ota_qce_dev *pqce;
pqce = areq->pqce;
if (ret) {
pqce->err_req++;
areq->err = -ENXIO;
} else {
areq->err = 0;
}
tasklet_schedule(&pqce->done_tasklet);
}
static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
{
struct qce_f9_req *pf9;
struct qce_f8_multi_pkt_req *p_mp_f8;
struct qce_f8_req *pf8;
int ret = 0;
/* command should be on the podev->active_command */
areq->pqce = pqce;
switch (areq->op) {
case QCE_OTA_F8_OPER:
pf8 = &areq->req.f8_req;
ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
break;
case QCE_OTA_MPKT_F8_OPER:
p_mp_f8 = &areq->req.f8_mp_req;
ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
break;
case QCE_OTA_F9_OPER:
pf9 = &areq->req.f9_req;
ret = qce_f9_req(pqce->qce, pf9, areq, f9_cb);
break;
case QCE_OTA_VAR_MPKT_F8_OPER:
pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
break;
default:
ret = -ENOTSUPP;
break;
}
areq->err = ret;
pqce->total_req++;
if (ret)
pqce->err_req++;
return ret;
}
static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
{
/* do this function with spinlock set */
struct ota_qce_dev *p;
if (unlikely(list_empty(&podev->qce_dev))) {
pr_err("%s: no valid qce to schedule\n", __func__);
return NULL;
}
list_for_each_entry(p, &podev->qce_dev, qlist) {
if (p->active_command == NULL)
return p;
}
return NULL;
}
static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
{
unsigned long flags;
int ret = 0;
struct qcota_stat *pstat;
struct ota_qce_dev *pqce;
areq->err = 0;
spin_lock_irqsave(&podev->lock, flags);
pqce = schedule_qce(podev);
if (pqce) {
pqce->active_command = areq;
spin_unlock_irqrestore(&podev->lock, flags);
ret = start_req(pqce, areq);
if (ret != 0) {
spin_lock_irqsave(&podev->lock, flags);
pqce->active_command = NULL;
spin_unlock_irqrestore(&podev->lock, flags);
}
} else {
list_add_tail(&areq->rlist, &podev->ready_commands);
spin_unlock_irqrestore(&podev->lock, flags);
}
if (ret == 0)
wait_for_completion(&areq->complete);
pstat = &_qcota_stat;
switch (areq->op) {
case QCE_OTA_F8_OPER:
if (areq->err)
pstat->f8_op_fail++;
else
pstat->f8_op_success++;
break;
case QCE_OTA_MPKT_F8_OPER:
if (areq->err)
pstat->f8_mp_op_fail++;
else
pstat->f8_mp_op_success++;
break;
case QCE_OTA_F9_OPER:
if (areq->err)
pstat->f9_op_fail++;
else
pstat->f9_op_success++;
break;
case QCE_OTA_VAR_MPKT_F8_OPER:
default:
if (areq->err)
pstat->f8_v_mp_op_fail++;
else
pstat->f8_v_mp_op_success++;
break;
}
return areq->err;
}
static long qcota_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int err = 0;
struct ota_dev_control *podev;
uint8_t *user_src;
uint8_t *user_dst;
uint8_t *k_buf = NULL;
struct ota_async_req areq;
uint32_t total, temp;
struct qcota_stat *pstat;
int i;
uint8_t *p = NULL;
podev = file->private_data;
if (podev == NULL || podev->magic != OTA_MAGIC) {
pr_err("%s: invalid handle %pK\n",
__func__, podev);
return -ENOENT;
}
/* Verify user arguments. */
if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
return -ENOTTY;
init_completion(&areq.complete);
pstat = &_qcota_stat;
switch (cmd) {
case QCOTA_F9_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f9_req, (void __user *)arg,
sizeof(struct qce_f9_req)))
return -EFAULT;
user_src = areq.req.f9_req.message;
if (!access_ok(VERIFY_READ, (void __user *)user_src,
areq.req.f9_req.msize))
return -EFAULT;
if (areq.req.f9_req.msize == 0)
return 0;
k_buf = memdup_user((const void __user *)user_src,
areq.req.f9_req.msize);
if (IS_ERR(k_buf))
return -EFAULT;
areq.req.f9_req.message = k_buf;
areq.op = QCE_OTA_F9_OPER;
pstat->f9_req++;
err = submit_req(&areq, podev);
areq.req.f9_req.message = user_src;
if (err == 0 && copy_to_user((void __user *)arg,
&areq.req.f9_req, sizeof(struct qce_f9_req))) {
err = -EFAULT;
}
kfree(k_buf);
break;
case QCOTA_F8_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_req, (void __user *)arg,
sizeof(struct qce_f8_req)))
return -EFAULT;
total = areq.req.f8_req.data_len;
user_src = areq.req.f8_req.data_in;
if (user_src != NULL) {
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
}
user_dst = areq.req.f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
if (!total)
return 0;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
/* k_buf returned from kmalloc should be cache line aligned */
if (user_src && copy_from_user(k_buf,
(void __user *)user_src, total)) {
kfree(k_buf);
return -EFAULT;
}
if (user_src)
areq.req.f8_req.data_in = k_buf;
else
areq.req.f8_req.data_in = NULL;
areq.req.f8_req.data_out = k_buf;
areq.op = QCE_OTA_F8_OPER;
pstat->f8_req++;
err = submit_req(&areq, podev);
if (err == 0 && copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
sizeof(struct qce_f8_multi_pkt_req)))
return -EFAULT;
temp = areq.req.f8_mp_req.qce_f8_req.data_len;
if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
areq.req.f8_mp_req.cipher_size)
return -EINVAL;
total = (uint32_t) areq.req.f8_mp_req.num_pkt *
areq.req.f8_mp_req.qce_f8_req.data_len;
user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
if (!access_ok(VERIFY_READ, (void __user *)
user_src, total))
return -EFAULT;
user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
if (!access_ok(VERIFY_WRITE, (void __user *)
user_dst, total))
return -EFAULT;
if (!total)
return 0;
/* k_buf should be cache line aligned */
k_buf = memdup_user((const void __user *)user_src, total);
if (IS_ERR(k_buf))
return -EFAULT;
areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
areq.op = QCE_OTA_MPKT_F8_OPER;
pstat->f8_mp_req++;
err = submit_req(&areq, podev);
if (err == 0 && copy_to_user(user_dst, k_buf, total))
err = -EFAULT;
kfree(k_buf);
break;
case QCOTA_F8_V_MPKT_REQ:
if (!access_ok(VERIFY_WRITE, (void __user *)arg,
sizeof(struct qce_f8_variable_multi_pkt_req)))
return -EFAULT;
if (copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
sizeof(struct qce_f8_variable_multi_pkt_req)))
return -EFAULT;
if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
return -EINVAL;
for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
if (!access_ok(VERIFY_WRITE, (void __user *)
areq.req.f8_v_mp_req.cipher_iov[i].addr,
areq.req.f8_v_mp_req.cipher_iov[i].size))
return -EFAULT;
total += areq.req.f8_v_mp_req.cipher_iov[i].size;
total = ALIGN(total, L1_CACHE_BYTES);
}
if (!total)
return 0;
k_buf = kmalloc(total, GFP_KERNEL);
if (k_buf == NULL)
return -ENOMEM;
for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
user_src = areq.req.f8_v_mp_req.cipher_iov[i].addr;
if (copy_from_user(p, (void __user *)user_src,
areq.req.f8_v_mp_req.cipher_iov[i].size)) {
kfree(k_buf);
return -EFAULT;
}
p += areq.req.f8_v_mp_req.cipher_iov[i].size;
p = (uint8_t *) ALIGN(((uintptr_t)p),
L1_CACHE_BYTES);
}
areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
areq.req.f8_v_mp_req.qce_f8_req.data_len =
areq.req.f8_v_mp_req.cipher_iov[0].size;
areq.steps = 0;
areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
pstat->f8_v_mp_req++;
err = submit_req(&areq, podev);
if (err != 0) {
kfree(k_buf);
return err;
}
for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
user_dst = areq.req.f8_v_mp_req.cipher_iov[i].addr;
if (copy_to_user(user_dst, p,
areq.req.f8_v_mp_req.cipher_iov[i].size)) {
kfree(k_buf);
return -EFAULT;
}
p += areq.req.f8_v_mp_req.cipher_iov[i].size;
p = (uint8_t *) ALIGN(((uintptr_t)p),
L1_CACHE_BYTES);
}
kfree(k_buf);
break;
default:
return -ENOTTY;
}
return err;
}
static int qcota_probe(struct platform_device *pdev)
{
void *handle = NULL;
int rc = 0;
struct ota_dev_control *podev;
struct ce_hw_support ce_support;
struct ota_qce_dev *pqce;
unsigned long flags;
podev = &qcota_dev;
pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
if (!pqce)
return -ENOMEM;
rc = alloc_chrdev_region(&qcota_device_no, 0, 1, QCOTA_DEV);
if (rc < 0) {
pr_err("alloc_chrdev_region failed %d\n", rc);
return rc;
}
driver_class = class_create(THIS_MODULE, QCOTA_DEV);
if (IS_ERR(driver_class)) {
rc = -ENOMEM;
pr_err("class_create failed %d\n", rc);
goto exit_unreg_chrdev_region;
}
class_dev = device_create(driver_class, NULL, qcota_device_no, NULL,
QCOTA_DEV);
if (IS_ERR(class_dev)) {
pr_err("class_device_create failed %d\n", rc);
rc = -ENOMEM;
goto exit_destroy_class;
}
cdev_init(&podev->cdev, &qcota_fops);
podev->cdev.owner = THIS_MODULE;
rc = cdev_add(&podev->cdev, MKDEV(MAJOR(qcota_device_no), 0), 1);
if (rc < 0) {
pr_err("cdev_add failed %d\n", rc);
goto exit_destroy_device;
}
podev->minor = 0;
pqce->podev = podev;
pqce->active_command = NULL;
tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
/* open qce */
handle = qce_open(pdev, &rc);
if (handle == NULL) {
pr_err("%s: device %s, can not open qce\n",
__func__, pdev->name);
goto exit_del_cdev;
}
if (qce_hw_support(handle, &ce_support) < 0 ||
!ce_support.ota) {
pr_err("%s: device %s, qce does not support ota capability\n",
__func__, pdev->name);
rc = -ENODEV;
goto err;
}
pqce->qce = handle;
pqce->pdev = pdev;
pqce->total_req = 0;
pqce->err_req = 0;
platform_set_drvdata(pdev, pqce);
mutex_lock(&podev->register_lock);
rc = 0;
if (!podev->registered) {
if (rc == 0) {
pqce->unit = podev->total_units;
podev->total_units++;
podev->registered = true;
}
} else {
pqce->unit = podev->total_units;
podev->total_units++;
}
mutex_unlock(&podev->register_lock);
if (rc) {
pr_err("ion: failed to register misc device.\n");
goto err;
}
spin_lock_irqsave(&podev->lock, flags);
list_add_tail(&pqce->qlist, &podev->qce_dev);
spin_unlock_irqrestore(&podev->lock, flags);
return 0;
err:
if (handle)
qce_close(handle);
platform_set_drvdata(pdev, NULL);
tasklet_kill(&pqce->done_tasklet);
exit_del_cdev:
cdev_del(&podev->cdev);
exit_destroy_device:
device_destroy(driver_class, qcota_device_no);
exit_destroy_class:
class_destroy(driver_class);
exit_unreg_chrdev_region:
unregister_chrdev_region(qcota_device_no, 1);
kfree(pqce);
return rc;
}
static int qcota_remove(struct platform_device *pdev)
{
struct ota_dev_control *podev;
struct ota_qce_dev *pqce;
unsigned long flags;
pqce = platform_get_drvdata(pdev);
if (!pqce)
return 0;
if (pqce->qce)
qce_close(pqce->qce);
podev = pqce->podev;
if (!podev)
goto ret;
spin_lock_irqsave(&podev->lock, flags);
list_del(&pqce->qlist);
spin_unlock_irqrestore(&podev->lock, flags);
mutex_lock(&podev->register_lock);
if (--podev->total_units == 0) {
cdev_del(&podev->cdev);
device_destroy(driver_class, qcota_device_no);
class_destroy(driver_class);
unregister_chrdev_region(qcota_device_no, 1);
podev->registered = false;
}
mutex_unlock(&podev->register_lock);
ret:
tasklet_kill(&pqce->done_tasklet);
kfree(pqce);
return 0;
}
static const struct of_device_id qcota_match[] = {
{ .compatible = "qcom,qcota",
},
{}
};
static struct platform_driver qcota_plat_driver = {
.probe = qcota_probe,
.remove = qcota_remove,
.driver = {
.name = "qcota",
.of_match_table = qcota_match,
},
};
static int _disp_stats(void)
{
struct qcota_stat *pstat;
int len = 0;
struct ota_dev_control *podev = &qcota_dev;
unsigned long flags;
struct ota_qce_dev *p;
pstat = &_qcota_stat;
len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
"\nQTI OTA crypto accelerator Statistics:\n");
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 request : %llu\n",
pstat->f8_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation success : %llu\n",
pstat->f8_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 operation fail : %llu\n",
pstat->f8_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP request : %llu\n",
pstat->f8_mp_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation success : %llu\n",
pstat->f8_mp_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 MP operation fail : %llu\n",
pstat->f8_mp_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP request : %llu\n",
pstat->f8_v_mp_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP operation success: %llu\n",
pstat->f8_v_mp_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F8 Variable MP operation fail : %llu\n",
pstat->f8_v_mp_op_fail);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 request : %llu\n",
pstat->f9_req);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation success : %llu\n",
pstat->f9_op_success);
len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
" F9 operation fail : %llu\n",
pstat->f9_op_fail);
spin_lock_irqsave(&podev->lock, flags);
list_for_each_entry(p, &podev->qce_dev, qlist) {
len += scnprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
" Engine %4d Req : %llu\n",
p->unit,
p->total_req
);
len += scnprintf(
_debug_read_buf + len,
DEBUG_MAX_RW_BUF - len - 1,
" Engine %4d Req Error : %llu\n",
p->unit,
p->err_req
);
}
spin_unlock_irqrestore(&podev->lock, flags);
return len;
}
static ssize_t _debug_stats_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int rc = -EINVAL;
int len;
len = _disp_stats();
if (len <= count)
rc = simple_read_from_buffer((void __user *) buf, len,
ppos, (void *) _debug_read_buf, len);
return rc;
}
static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct ota_dev_control *podev = &qcota_dev;
unsigned long flags;
struct ota_qce_dev *p;
memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
spin_lock_irqsave(&podev->lock, flags);
list_for_each_entry(p, &podev->qce_dev, qlist) {
p->total_req = 0;
p->err_req = 0;
}
spin_unlock_irqrestore(&podev->lock, flags);
return count;
}
static const struct file_operations _debug_stats_ops = {
.open = simple_open,
.read = _debug_stats_read,
.write = _debug_stats_write,
};
static int _qcota_debug_init(void)
{
int rc;
char name[DEBUG_MAX_FNAME];
struct dentry *dent;
_debug_dent = debugfs_create_dir("qcota", NULL);
if (IS_ERR(_debug_dent)) {
pr_err("qcota debugfs_create_dir fail, error %ld\n",
PTR_ERR(_debug_dent));
return PTR_ERR(_debug_dent);
}
snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
_debug_qcota = 0;
dent = debugfs_create_file(name, 0644, _debug_dent,
&_debug_qcota, &_debug_stats_ops);
if (dent == NULL) {
pr_err("qcota debugfs_create_file fail, error %ld\n",
PTR_ERR(dent));
rc = PTR_ERR(dent);
goto err;
}
return 0;
err:
debugfs_remove_recursive(_debug_dent);
return rc;
}
static int __init qcota_init(void)
{
int rc;
struct ota_dev_control *podev;
rc = _qcota_debug_init();
if (rc)
return rc;
podev = &qcota_dev;
INIT_LIST_HEAD(&podev->ready_commands);
INIT_LIST_HEAD(&podev->qce_dev);
spin_lock_init(&podev->lock);
mutex_init(&podev->register_lock);
podev->registered = false;
podev->total_units = 0;
return platform_driver_register(&qcota_plat_driver);
}
static void __exit qcota_exit(void)
{
debugfs_remove_recursive(_debug_dent);
platform_driver_unregister(&qcota_plat_driver);
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QTI Ota Crypto driver");
module_init(qcota_init);
module_exit(qcota_exit);

194
drivers/crypto/msm/qce.h Normal file
View File

@ -0,0 +1,194 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI Crypto Engine driver API
*
* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCE_H
#define __CRYPTO_MSM_QCE_H
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/crypto.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
/* SHA digest size in bytes */
#define SHA256_DIGESTSIZE 32
#define SHA1_DIGESTSIZE 20
#define AES_CE_BLOCK_SIZE 16
/* key size in bytes */
#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */
#define SHA_HMAC_KEY_SIZE 64
#define DES_KEY_SIZE 8
#define TRIPLE_DES_KEY_SIZE 24
#define AES128_KEY_SIZE 16
#define AES192_KEY_SIZE 24
#define AES256_KEY_SIZE 32
#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE
/* iv length in bytes */
#define AES_IV_LENGTH 16
#define DES_IV_LENGTH 8
#define MAX_IV_LENGTH AES_IV_LENGTH
/* Maximum number of bytes per transfer */
#define QCE_MAX_OPER_DATA 0xFF00
/* Maximum Nonce bytes */
#define MAX_NONCE 16
/* Crypto clock control flags */
#define QCE_CLK_ENABLE_FIRST 1
#define QCE_BW_REQUEST_FIRST 2
#define QCE_CLK_DISABLE_FIRST 3
#define QCE_BW_REQUEST_RESET_FIRST 4
/* interconnect average and peak bw for crypto device */
#define CRYPTO_AVG_BW 393600
#define CRYPTO_PEAK_BW 393600
typedef void (*qce_comp_func_ptr_t)(void *areq,
unsigned char *icv, unsigned char *iv, int ret);
/* Cipher algorithms supported */
enum qce_cipher_alg_enum {
CIPHER_ALG_DES = 0,
CIPHER_ALG_3DES = 1,
CIPHER_ALG_AES = 2,
CIPHER_ALG_LAST
};
/* Hash and hmac algorithms supported */
enum qce_hash_alg_enum {
QCE_HASH_SHA1 = 0,
QCE_HASH_SHA256 = 1,
QCE_HASH_SHA1_HMAC = 2,
QCE_HASH_SHA256_HMAC = 3,
QCE_HASH_AES_CMAC = 4,
QCE_HASH_LAST
};
/* Cipher encryption/decryption operations */
enum qce_cipher_dir_enum {
QCE_ENCRYPT = 0,
QCE_DECRYPT = 1,
QCE_CIPHER_DIR_LAST
};
/* Cipher algorithms modes */
enum qce_cipher_mode_enum {
QCE_MODE_CBC = 0,
QCE_MODE_ECB = 1,
QCE_MODE_CTR = 2,
QCE_MODE_XTS = 3,
QCE_MODE_CCM = 4,
QCE_CIPHER_MODE_LAST
};
/* Cipher operation type */
enum qce_req_op_enum {
QCE_REQ_ABLK_CIPHER = 0,
QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
QCE_REQ_AEAD = 2,
QCE_REQ_LAST
};
/* Algorithms/features supported in CE HW engine */
struct ce_hw_support {
bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
bool cmac;
bool aes_key_192;
bool aes_xts;
bool aes_ccm;
bool ota;
bool aligned_only;
bool bam;
bool is_shared;
bool hw_key;
bool use_sw_aes_cbc_ecb_ctr_algo;
bool use_sw_aead_algo;
bool use_sw_aes_xts_algo;
bool use_sw_ahash_algo;
bool use_sw_hmac_algo;
bool use_sw_aes_ccm_algo;
bool clk_mgmt_sus_res;
bool req_bw_before_clk;
unsigned int ce_device;
unsigned int ce_hw_instance;
unsigned int max_request;
};
/* Sha operation parameters */
struct qce_sha_req {
qce_comp_func_ptr_t qce_cb; /* call back */
enum qce_hash_alg_enum alg; /* sha algorithm */
unsigned char *digest; /* sha digest */
struct scatterlist *src; /* pointer to scatter list entry */
uint32_t auth_data[4]; /* byte count */
unsigned char *authkey; /* auth key */
unsigned int authklen; /* auth key length */
bool first_blk; /* first block indicator */
bool last_blk; /* last block indicator */
unsigned int size; /* data length in bytes */
void *areq;
unsigned int flags;
};
struct qce_req {
enum qce_req_op_enum op; /* operation type */
qce_comp_func_ptr_t qce_cb; /* call back */
void *areq;
enum qce_cipher_alg_enum alg; /* cipher algorithms*/
enum qce_cipher_dir_enum dir; /* encryption? decryption? */
enum qce_cipher_mode_enum mode; /* algorithm mode */
enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
unsigned char *authkey; /* authentication key */
unsigned int authklen; /* authentication key kength */
unsigned int authsize; /* authentication key kength */
unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */
unsigned char *assoc; /* Ptr to formatted associated data */
unsigned int assoclen; /* Formatted associated data length */
struct scatterlist *asg; /* Formatted associated data sg */
unsigned char *enckey; /* cipher key */
unsigned int encklen; /* cipher key length */
unsigned char *iv; /* initialization vector */
unsigned int ivsize; /* initialization vector size*/
unsigned int cryptlen; /* data length */
unsigned int use_pmem; /* is source of data PMEM allocated? */
struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/
unsigned int flags;
};
struct qce_pm_table {
int (*suspend)(void *handle);
int (*resume)(void *handle);
};
extern struct qce_pm_table qce_pm_table;
void *qce_open(struct platform_device *pdev, int *rc);
int qce_close(void *handle);
int qce_aead_req(void *handle, struct qce_req *req);
int qce_ablk_cipher_req(void *handle, struct qce_req *req);
int qce_hw_support(void *handle, struct ce_hw_support *support);
int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
int qce_enable_clk(void *handle);
int qce_disable_clk(void *handle);
void qce_get_driver_stats(void *handle);
void qce_clear_driver_stats(void *handle);
void qce_dump_req(void *handle);
#endif /* __CRYPTO_MSM_QCE_H */

6186
drivers/crypto/msm/qce50.c Normal file

File diff suppressed because it is too large Load Diff

239
drivers/crypto/msm/qce50.h Normal file
View File

@ -0,0 +1,239 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
#define _DRIVERS_CRYPTO_MSM_QCE50_H_
#include <linux/msm-sps.h>
/* MAX Data xfer block size between BAM and CE */
#define MAX_CE_BAM_BURST_SIZE 0x40
#define QCEBAM_BURST_SIZE MAX_CE_BAM_BURST_SIZE
#define GET_VIRT_ADDR(x) \
((uintptr_t)pce_dev->coh_vmem + \
((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
#define GET_PHYS_ADDR(x) \
(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem + \
((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
#define CRYPTO_REG_SIZE 4
#define NUM_OF_CRYPTO_AUTH_IV_REG 16
#define NUM_OF_CRYPTO_CNTR_IV_REG 4
#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
#define CRYPTO_TOTAL_REGISTERS_DUMPED 26
#define CRYPTO_RESULT_DUMP_SIZE \
ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
QCEBAM_BURST_SIZE)
/* QCE max number of descriptor in a descriptor list */
#define QCE_MAX_NUM_DESC 128
#define SPS_MAX_PKT_SIZE (32 * 1024 - 64)
/* default bam ipc log level */
#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
/* State of consumer/producer Pipe */
enum qce_pipe_st_enum {
QCE_PIPE_STATE_IDLE = 0,
QCE_PIPE_STATE_IN_PROG = 1,
QCE_PIPE_STATE_COMP = 2,
QCE_PIPE_STATE_LAST
};
enum qce_xfer_type_enum {
QCE_XFER_HASHING,
QCE_XFER_CIPHERING,
QCE_XFER_AEAD,
QCE_XFER_F8,
QCE_XFER_F9,
QCE_XFER_TYPE_LAST
};
struct qce_sps_ep_conn_data {
struct sps_pipe *pipe;
struct sps_connect connect;
struct sps_register_event event;
};
/* CE Result DUMP format*/
struct ce_result_dump_format {
uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
__be32 status;
__be32 status2;
};
struct qce_cmdlist_info {
unsigned long cmdlist;
struct sps_command_element *crypto_cfg;
struct sps_command_element *encr_seg_cfg;
struct sps_command_element *encr_seg_size;
struct sps_command_element *encr_seg_start;
struct sps_command_element *encr_key;
struct sps_command_element *encr_xts_key;
struct sps_command_element *encr_cntr_iv;
struct sps_command_element *encr_ccm_cntr_iv;
struct sps_command_element *encr_mask;
struct sps_command_element *encr_xts_du_size;
struct sps_command_element *auth_seg_cfg;
struct sps_command_element *auth_seg_size;
struct sps_command_element *auth_seg_start;
struct sps_command_element *auth_key;
struct sps_command_element *auth_iv;
struct sps_command_element *auth_nonce_info;
struct sps_command_element *auth_bytecount;
struct sps_command_element *seg_size;
struct sps_command_element *go_proc;
ptrdiff_t size;
};
struct qce_cmdlistptr_ops {
struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
struct qce_cmdlist_info cipher_aes_128_ecb;
struct qce_cmdlist_info cipher_aes_256_ecb;
struct qce_cmdlist_info cipher_aes_128_xts;
struct qce_cmdlist_info cipher_aes_256_xts;
struct qce_cmdlist_info cipher_des_cbc;
struct qce_cmdlist_info cipher_des_ecb;
struct qce_cmdlist_info cipher_3des_cbc;
struct qce_cmdlist_info cipher_3des_ecb;
struct qce_cmdlist_info auth_sha1;
struct qce_cmdlist_info auth_sha256;
struct qce_cmdlist_info auth_sha1_hmac;
struct qce_cmdlist_info auth_sha256_hmac;
struct qce_cmdlist_info auth_aes_128_cmac;
struct qce_cmdlist_info auth_aes_256_cmac;
struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
struct qce_cmdlist_info aead_aes_128_ccm;
struct qce_cmdlist_info aead_aes_256_ccm;
struct qce_cmdlist_info cipher_null;
struct qce_cmdlist_info f8_kasumi;
struct qce_cmdlist_info f8_snow3g;
struct qce_cmdlist_info f9_kasumi;
struct qce_cmdlist_info f9_snow3g;
struct qce_cmdlist_info unlock_all_pipes;
};
struct qce_ce_cfg_reg_setting {
uint32_t crypto_cfg_be;
uint32_t crypto_cfg_le;
uint32_t encr_cfg_aes_cbc_128;
uint32_t encr_cfg_aes_cbc_256;
uint32_t encr_cfg_aes_ecb_128;
uint32_t encr_cfg_aes_ecb_256;
uint32_t encr_cfg_aes_xts_128;
uint32_t encr_cfg_aes_xts_256;
uint32_t encr_cfg_aes_ctr_128;
uint32_t encr_cfg_aes_ctr_256;
uint32_t encr_cfg_aes_ccm_128;
uint32_t encr_cfg_aes_ccm_256;
uint32_t encr_cfg_des_cbc;
uint32_t encr_cfg_des_ecb;
uint32_t encr_cfg_3des_cbc;
uint32_t encr_cfg_3des_ecb;
uint32_t encr_cfg_kasumi;
uint32_t encr_cfg_snow3g;
uint32_t auth_cfg_cmac_128;
uint32_t auth_cfg_cmac_256;
uint32_t auth_cfg_sha1;
uint32_t auth_cfg_sha256;
uint32_t auth_cfg_hmac_sha1;
uint32_t auth_cfg_hmac_sha256;
uint32_t auth_cfg_aes_ccm_128;
uint32_t auth_cfg_aes_ccm_256;
uint32_t auth_cfg_aead_sha1_hmac;
uint32_t auth_cfg_aead_sha256_hmac;
uint32_t auth_cfg_kasumi;
uint32_t auth_cfg_snow3g;
};
struct ce_bam_info {
uint32_t bam_irq;
uint32_t bam_mem;
void __iomem *bam_iobase;
uint32_t ce_device;
uint32_t ce_hw_instance;
uint32_t bam_ee;
unsigned int pipe_pair_index;
unsigned int src_pipe_index;
unsigned int dest_pipe_index;
unsigned long bam_handle;
int ce_burst_size;
uint32_t minor_version;
struct qce_sps_ep_conn_data producer;
struct qce_sps_ep_conn_data consumer;
};
/* SPS data structure with buffers, commandlists & commmand pointer lists */
struct ce_sps_data {
enum qce_pipe_st_enum producer_state; /* Producer pipe state */
int consumer_status; /* consumer pipe status */
int producer_status; /* producer pipe status */
struct sps_transfer in_transfer;
struct sps_transfer out_transfer;
struct qce_cmdlistptr_ops cmdlistptr;
uint32_t result_dump; /* reuslt dump virtual address */
uint32_t result_dump_null;
uint32_t result_dump_phy; /* result dump physical address (32 bits) */
uint32_t result_dump_null_phy;
uint32_t ignore_buffer; /* ignore buffer virtual address */
struct ce_result_dump_format *result; /* ponter to result dump */
struct ce_result_dump_format *result_null;
};
struct ce_request_info {
atomic_t in_use;
bool in_prog;
enum qce_xfer_type_enum xfer_type;
struct ce_sps_data ce_sps;
qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */
void *user;
void *areq;
int assoc_nents;
struct scatterlist *asg; /* Formatted associated data sg */
int src_nents;
int dst_nents;
dma_addr_t phy_iv_in;
unsigned char dec_iv[16];
int dir;
enum qce_cipher_mode_enum mode;
dma_addr_t phy_ota_src;
dma_addr_t phy_ota_dst;
unsigned int ota_size;
unsigned int req_len;
};
struct qce_driver_stats {
int no_of_timeouts;
int no_of_dummy_reqs;
int current_mode;
int outstanding_reqs;
};
#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI Crypto Engine driver OTA API
*
* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCE_OTA_H
#define __CRYPTO_MSM_QCE_OTA_H
#include <linux/platform_device.h>
#include <linux/qcota.h>
int qce_f8_req(void *handle, struct qce_f8_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
int qce_f9_req(void *handle, struct qce_f9_req *req,
void *cookie, qce_comp_func_ptr_t qce_cb);
#endif /* __CRYPTO_MSM_QCE_OTA_H */

2320
drivers/crypto/msm/qcedev.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,445 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Qti (or) Qualcomm Technologies Inc CE device driver.
*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include <linux/dma-iommu.h>
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/qcedev.h>
#include "qcedevi.h"
#include "qcedev_smmu.h"
#include "soc/qcom/secure_buffer.h"
static int qcedev_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
if (!dev || !cb) {
pr_err("%s err: invalid input params\n", __func__);
return -EINVAL;
}
cb->dev = dev;
if (!dev->dma_parms) {
dev->dma_parms = devm_kzalloc(dev,
sizeof(*dev->dma_parms), GFP_KERNEL);
if (!dev->dma_parms)
return -ENOMEM;
}
dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
return 0;
}
int qcedev_parse_context_bank(struct platform_device *pdev)
{
struct qcedev_control *podev;
struct context_bank_info *cb = NULL;
struct device_node *np = NULL;
int rc = 0;
if (!pdev) {
pr_err("%s err: invalid platform devices\n", __func__);
return -EINVAL;
}
if (!pdev->dev.parent) {
pr_err("%s err: failed to find a parent for %s\n",
__func__, dev_name(&pdev->dev));
return -EINVAL;
}
podev = dev_get_drvdata(pdev->dev.parent);
np = pdev->dev.of_node;
cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
if (!cb) {
pr_err("%s ERROR = Failed to allocate cb\n", __func__);
return -ENOMEM;
}
INIT_LIST_HEAD(&cb->list);
list_add_tail(&cb->list, &podev->context_banks);
rc = of_property_read_string(np, "label", &cb->name);
if (rc)
pr_debug("%s ERROR = Unable to read label\n", __func__);
cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
rc = qcedev_setup_context_bank(cb, &pdev->dev);
if (rc) {
pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
goto err_setup_cb;
}
return 0;
err_setup_cb:
list_del(&cb->list);
devm_kfree(&pdev->dev, cb);
return rc;
}
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
{
struct qcedev_mem_client *mem_client = NULL;
if (mtype != MEM_ION) {
pr_err("%s: err: Mem type not supported\n", __func__);
goto err;
}
mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
if (!mem_client)
goto err;
mem_client->mtype = mtype;
return mem_client;
err:
return NULL;
}
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
{
kfree(mem_client);
}
static bool is_iommu_present(struct qcedev_handle *qce_hndl)
{
return !list_empty(&qce_hndl->cntl->context_banks);
}
static struct context_bank_info *get_context_bank(
struct qcedev_handle *qce_hndl, bool is_secure)
{
struct qcedev_control *podev = qce_hndl->cntl;
struct context_bank_info *cb = NULL, *match = NULL;
list_for_each_entry(cb, &podev->context_banks, list) {
if (cb->is_secure == is_secure) {
match = cb;
break;
}
}
return match;
}
static int ion_map_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client, int fd,
unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
unsigned long ion_flags = 0;
int rc = 0;
struct dma_buf *buf = NULL;
struct dma_buf_attachment *attach = NULL;
struct sg_table *table = NULL;
struct context_bank_info *cb = NULL;
buf = dma_buf_get(fd);
if (IS_ERR_OR_NULL(buf))
return -EINVAL;
rc = dma_buf_get_flags(buf, &ion_flags);
if (rc) {
pr_err("%s: err: failed to get ion flags: %d\n", __func__, rc);
goto map_err;
}
if (is_iommu_present(qce_hndl)) {
cb = get_context_bank(qce_hndl, ion_flags & ION_FLAG_SECURE);
if (!cb) {
pr_err("%s: err: failed to get context bank info\n",
__func__);
rc = -EIO;
goto map_err;
}
/* Prepare a dma buf for dma on the given device */
attach = dma_buf_attach(buf, cb->dev);
if (IS_ERR_OR_NULL(attach)) {
rc = PTR_ERR(attach) ?: -ENOMEM;
pr_err("%s: err: failed to attach dmabuf\n", __func__);
goto map_err;
}
/* Get the scatterlist for the given attachment */
attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
if (IS_ERR_OR_NULL(table)) {
rc = PTR_ERR(table) ?: -ENOMEM;
pr_err("%s: err: failed to map table\n", __func__);
goto map_table_err;
}
if (table->sgl) {
binfo->ion_buf.iova = sg_dma_address(table->sgl);
binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
if (binfo->ion_buf.mapped_buf_size < fd_size) {
pr_err("%s: err: mapping failed, size mismatch\n",
__func__);
rc = -ENOMEM;
goto map_sg_err;
}
} else {
pr_err("%s: err: sg list is NULL\n", __func__);
rc = -ENOMEM;
goto map_sg_err;
}
binfo->ion_buf.mapping_info.dev = cb->dev;
binfo->ion_buf.mapping_info.mapping = cb->mapping;
binfo->ion_buf.mapping_info.table = table;
binfo->ion_buf.mapping_info.attach = attach;
binfo->ion_buf.mapping_info.buf = buf;
binfo->ion_buf.ion_fd = fd;
} else {
pr_err("%s: err: smmu not enabled\n", __func__);
rc = -EIO;
goto map_err;
}
return 0;
map_sg_err:
dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
map_table_err:
dma_buf_detach(buf, attach);
map_err:
dma_buf_put(buf);
return rc;
}
static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_reg_buf_info *binfo)
{
struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
if (is_iommu_present(qce_hndl)) {
dma_buf_unmap_attachment(mapping_info->attach,
mapping_info->table, DMA_BIDIRECTIONAL);
dma_buf_detach(mapping_info->buf, mapping_info->attach);
dma_buf_put(mapping_info->buf);
}
return 0;
}
static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client, int fd,
unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
{
int rc = -1;
switch (mem_client->mtype) {
case MEM_ION:
rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
break;
default:
pr_err("%s: err: Mem type not supported\n", __func__);
break;
}
if (rc)
pr_err("%s: err: failed to map buffer\n", __func__);
return rc;
}
static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
struct qcedev_mem_client *mem_client,
struct qcedev_reg_buf_info *binfo)
{
int rc = -1;
switch (mem_client->mtype) {
case MEM_ION:
rc = ion_unmap_buffer(qce_hndl, binfo);
break;
default:
pr_err("%s: err: Mem type not supported\n", __func__);
break;
}
if (rc)
pr_err("%s: err: failed to unmap buffer\n", __func__);
return rc;
}
int qcedev_check_and_map_buffer(void *handle,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr)
{
bool found = false;
struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
int rc = 0;
unsigned long mapped_size = 0;
if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
/* Check if the buffer fd is already mapped */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
if (temp->ion_buf.ion_fd == fd) {
found = true;
*vaddr = temp->ion_buf.iova;
mapped_size = temp->ion_buf.mapped_buf_size;
atomic_inc(&temp->ref_count);
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
/* If buffer fd is not mapped then create a fresh mapping */
if (!found) {
pr_debug("%s: info: ion fd not registered with driver\n",
__func__);
binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!binfo) {
pr_err("%s: err: failed to allocate binfo\n",
__func__);
rc = -ENOMEM;
goto error;
}
rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
fd_size, binfo);
if (rc) {
pr_err("%s: err: failed to map fd (%d) error = %d\n",
__func__, fd, rc);
goto error;
}
*vaddr = binfo->ion_buf.iova;
mapped_size = binfo->ion_buf.mapped_buf_size;
atomic_inc(&binfo->ref_count);
/* Add buffer mapping information to regd buffer list */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
mutex_unlock(&qce_hndl->registeredbufs.lock);
}
/* Make sure the offset is within the mapped range */
if (offset >= mapped_size) {
pr_err(
"%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
__func__, offset, mapped_size, fd);
rc = -ERANGE;
goto unmap;
}
/* return the mapped virtual address adjusted by offset */
*vaddr += offset;
return 0;
unmap:
if (!found)
qcedev_unmap_buffer(handle, mem_client, binfo);
error:
kfree(binfo);
return rc;
}
int qcedev_check_and_unmap_buffer(void *handle, int fd)
{
struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
bool found = false;
if (!handle || fd < 0) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
/* Check if the buffer fd is mapped and present in the regd list. */
mutex_lock(&qce_hndl->registeredbufs.lock);
list_for_each_entry_safe(binfo, dummy,
&qce_hndl->registeredbufs.list, list) {
if (binfo->ion_buf.ion_fd == fd) {
found = true;
atomic_dec(&binfo->ref_count);
/* Unmap only if there are no more references */
if (atomic_read(&binfo->ref_count) == 0) {
qcedev_unmap_buffer(qce_hndl,
mem_client, binfo);
list_del(&binfo->list);
kfree(binfo);
}
break;
}
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
if (!found) {
pr_err("%s: err: calling unmap on unknown fd %d\n",
__func__, fd);
return -EINVAL;
}
return 0;
}
int qcedev_unmap_all_buffers(void *handle)
{
struct qcedev_reg_buf_info *binfo = NULL;
struct qcedev_mem_client *mem_client = NULL;
struct qcedev_handle *qce_hndl = handle;
struct list_head *pos;
if (!handle) {
pr_err("%s: err: invalid input arguments\n", __func__);
return -EINVAL;
}
if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
pr_err("%s: err: invalid qcedev handle\n", __func__);
return -EINVAL;
}
mem_client = qce_hndl->cntl->mem_client;
if (mem_client->mtype != MEM_ION)
return -EPERM;
mutex_lock(&qce_hndl->registeredbufs.lock);
while (!list_empty(&qce_hndl->registeredbufs.list)) {
pos = qce_hndl->registeredbufs.list.next;
binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
if (binfo)
qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
list_del(pos);
kfree(binfo);
}
mutex_unlock(&qce_hndl->registeredbufs.lock);
return 0;
}

View File

@ -0,0 +1,82 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Qti (or) Qualcomm Technologies Inc CE device driver.
*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_PARSE_H_
#define _DRIVERS_CRYPTO_PARSE_H_
#include <linux/dma-iommu.h>
#include <linux/dma-buf.h>
#include <linux/dma-direction.h>
#include <linux/iommu.h>
#include <linux/msm_dma_iommu_mapping.h>
#include <linux/msm_ion.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/types.h>
struct context_bank_info {
struct list_head list;
const char *name;
u32 buffer_type;
u32 start_addr;
u32 size;
bool is_secure;
struct device *dev;
struct dma_iommu_mapping *mapping;
};
enum qcedev_mem_type {
MEM_ION,
};
struct qcedev_mem_client {
enum qcedev_mem_type mtype;
};
struct dma_mapping_info {
struct device *dev;
struct dma_iommu_mapping *mapping;
struct sg_table *table;
struct dma_buf_attachment *attach;
struct dma_buf *buf;
};
struct qcedev_ion_buf_info {
struct dma_mapping_info mapping_info;
dma_addr_t iova;
unsigned long mapped_buf_size;
int ion_fd;
};
struct qcedev_reg_buf_info {
struct list_head list;
union {
struct qcedev_ion_buf_info ion_buf;
};
atomic_t ref_count;
};
struct qcedev_buffer_list {
struct list_head list;
struct mutex lock;
};
int qcedev_parse_context_bank(struct platform_device *pdev);
struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype);
void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client);
int qcedev_check_and_map_buffer(void *qce_hndl,
int fd, unsigned int offset, unsigned int fd_size,
unsigned long long *vaddr);
int qcedev_check_and_unmap_buffer(void *handle, int fd);
int qcedev_unmap_all_buffers(void *handle);
extern struct qcedev_reg_buf_info *global_binfo_in;
extern struct qcedev_reg_buf_info *global_binfo_out;
extern struct qcedev_reg_buf_info *global_binfo_res;
#endif

View File

@ -0,0 +1,126 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* QTI crypto Driver
*
* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __CRYPTO_MSM_QCEDEVI_H
#define __CRYPTO_MSM_QCEDEVI_H
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <crypto/hash.h>
#include <linux/platform_data/qcom_crypto_device.h>
#include <linux/fips_status.h>
#include "qce.h"
#include "qcedev_smmu.h"
#define CACHE_LINE_SIZE 32
#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
enum qcedev_crypto_oper_type {
QCEDEV_CRYPTO_OPER_CIPHER = 0,
QCEDEV_CRYPTO_OPER_SHA = 1,
QCEDEV_CRYPTO_OPER_LAST
};
struct qcedev_handle;
struct qcedev_cipher_req {
struct ablkcipher_request creq;
void *cookie;
};
struct qcedev_sha_req {
struct ahash_request sreq;
void *cookie;
};
struct qcedev_sha_ctxt {
uint32_t auth_data[4];
uint8_t digest[QCEDEV_MAX_SHA_DIGEST];
uint32_t diglen;
uint8_t trailing_buf[64];
uint32_t trailing_buf_len;
uint8_t first_blk;
uint8_t last_blk;
uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
bool init_done;
};
struct qcedev_async_req {
struct list_head list;
struct completion complete;
enum qcedev_crypto_oper_type op_type;
union {
struct qcedev_cipher_op_req cipher_op_req;
struct qcedev_sha_op_req sha_op_req;
};
union {
struct qcedev_cipher_req cipher_req;
struct qcedev_sha_req sha_req;
};
struct qcedev_handle *handle;
int err;
};
/**********************************************************************
* Register ourselves as a char device to be able to access the dev driver
* from userspace.
*/
#define QCEDEV_DEV "qce"
struct qcedev_control {
/* CE features supported by platform */
struct msm_ce_hw_support platform_support;
uint32_t ce_lock_count;
uint32_t high_bw_req_count;
/* CE features/algorithms supported by HW engine*/
struct ce_hw_support ce_support;
/* replaced msm_bus with interconnect path */
struct icc_path *icc_path;
/* char device */
struct cdev cdev;
int minor;
/* qce handle */
void *qce;
/* platform device */
struct platform_device *pdev;
unsigned int magic;
struct list_head ready_commands;
struct qcedev_async_req *active_command;
spinlock_t lock;
struct tasklet_struct done_tasklet;
struct list_head context_banks;
struct qcedev_mem_client *mem_client;
};
struct qcedev_handle {
/* qcedev control handle */
struct qcedev_control *cntl;
/* qce internal sha context*/
struct qcedev_sha_ctxt sha_ctxt;
/* qcedev mapped buffer list */
struct qcedev_buffer_list registeredbufs;
};
void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
unsigned char *iv, int ret);
void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
unsigned char *authdata, int ret);
#endif /* __CRYPTO_MSM_QCEDEVI_H */

5575
drivers/crypto/msm/qcrypto.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,521 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
#define CRYPTO_BAM_CNFG_BITS_REG 0x0007C
#define CRYPTO_BAM_CD_ENABLE 27
#define CRYPTO_BAM_CD_ENABLE_MASK (1 << CRYPTO_BAM_CD_ENABLE)
#define QCE_AUTH_REG_BYTE_COUNT 4
#define CRYPTO_VERSION_REG 0x1A000
#define CRYPTO_DATA_IN0_REG 0x1A010
#define CRYPTO_DATA_IN1_REG 0x1A014
#define CRYPTO_DATA_IN2_REG 0x1A018
#define CRYPTO_DATA_IN3_REG 0x1A01C
#define CRYPTO_DATA_OUT0_REG 0x1A020
#define CRYPTO_DATA_OUT1_REG 0x1A024
#define CRYPTO_DATA_OUT2_REG 0x1A028
#define CRYPTO_DATA_OUT3_REG 0x1A02C
#define CRYPTO_STATUS_REG 0x1A100
#define CRYPTO_STATUS2_REG 0x1A104
#define CRYPTO_ENGINES_AVAIL 0x1A108
#define CRYPTO_FIFO_SIZES_REG 0x1A10C
#define CRYPTO_SEG_SIZE_REG 0x1A110
#define CRYPTO_GOPROC_REG 0x1A120
#define CRYPTO_GOPROC_QC_KEY_REG 0x1B000
#define CRYPTO_GOPROC_OEM_KEY_REG 0x1C000
#define CRYPTO_ENCR_SEG_CFG_REG 0x1A200
#define CRYPTO_ENCR_SEG_SIZE_REG 0x1A204
#define CRYPTO_ENCR_SEG_START_REG 0x1A208
#define CRYPTO_ENCR_KEY0_REG 0x1D000
#define CRYPTO_ENCR_KEY1_REG 0x1D004
#define CRYPTO_ENCR_KEY2_REG 0x1D008
#define CRYPTO_ENCR_KEY3_REG 0x1D00C
#define CRYPTO_ENCR_KEY4_REG 0x1D010
#define CRYPTO_ENCR_KEY5_REG 0x1D014
#define CRYPTO_ENCR_KEY6_REG 0x1D018
#define CRYPTO_ENCR_KEY7_REG 0x1D01C
#define CRYPTO_ENCR_XTS_KEY0_REG 0x1D020
#define CRYPTO_ENCR_XTS_KEY1_REG 0x1D024
#define CRYPTO_ENCR_XTS_KEY2_REG 0x1D028
#define CRYPTO_ENCR_XTS_KEY3_REG 0x1D02C
#define CRYPTO_ENCR_XTS_KEY4_REG 0x1D030
#define CRYPTO_ENCR_XTS_KEY5_REG 0x1D034
#define CRYPTO_ENCR_XTS_KEY6_REG 0x1D038
#define CRYPTO_ENCR_XTS_KEY7_REG 0x1D03C
#define CRYPTO_ENCR_PIPE0_KEY0_REG 0x1E000
#define CRYPTO_ENCR_PIPE0_KEY1_REG 0x1E004
#define CRYPTO_ENCR_PIPE0_KEY2_REG 0x1E008
#define CRYPTO_ENCR_PIPE0_KEY3_REG 0x1E00C
#define CRYPTO_ENCR_PIPE0_KEY4_REG 0x1E010
#define CRYPTO_ENCR_PIPE0_KEY5_REG 0x1E014
#define CRYPTO_ENCR_PIPE0_KEY6_REG 0x1E018
#define CRYPTO_ENCR_PIPE0_KEY7_REG 0x1E01C
#define CRYPTO_ENCR_PIPE1_KEY0_REG 0x1E020
#define CRYPTO_ENCR_PIPE1_KEY1_REG 0x1E024
#define CRYPTO_ENCR_PIPE1_KEY2_REG 0x1E028
#define CRYPTO_ENCR_PIPE1_KEY3_REG 0x1E02C
#define CRYPTO_ENCR_PIPE1_KEY4_REG 0x1E030
#define CRYPTO_ENCR_PIPE1_KEY5_REG 0x1E034
#define CRYPTO_ENCR_PIPE1_KEY6_REG 0x1E038
#define CRYPTO_ENCR_PIPE1_KEY7_REG 0x1E03C
#define CRYPTO_ENCR_PIPE2_KEY0_REG 0x1E040
#define CRYPTO_ENCR_PIPE2_KEY1_REG 0x1E044
#define CRYPTO_ENCR_PIPE2_KEY2_REG 0x1E048
#define CRYPTO_ENCR_PIPE2_KEY3_REG 0x1E04C
#define CRYPTO_ENCR_PIPE2_KEY4_REG 0x1E050
#define CRYPTO_ENCR_PIPE2_KEY5_REG 0x1E054
#define CRYPTO_ENCR_PIPE2_KEY6_REG 0x1E058
#define CRYPTO_ENCR_PIPE2_KEY7_REG 0x1E05C
#define CRYPTO_ENCR_PIPE3_KEY0_REG 0x1E060
#define CRYPTO_ENCR_PIPE3_KEY1_REG 0x1E064
#define CRYPTO_ENCR_PIPE3_KEY2_REG 0x1E068
#define CRYPTO_ENCR_PIPE3_KEY3_REG 0x1E06C
#define CRYPTO_ENCR_PIPE3_KEY4_REG 0x1E070
#define CRYPTO_ENCR_PIPE3_KEY5_REG 0x1E074
#define CRYPTO_ENCR_PIPE3_KEY6_REG 0x1E078
#define CRYPTO_ENCR_PIPE3_KEY7_REG 0x1E07C
#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG 0x1E200
#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG 0x1E204
#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG 0x1E208
#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG 0x1E20C
#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG 0x1E210
#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG 0x1E214
#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG 0x1E218
#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG 0x1E21C
#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG 0x1E220
#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG 0x1E224
#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG 0x1E228
#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG 0x1E22C
#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG 0x1E230
#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG 0x1E234
#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG 0x1E238
#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG 0x1E23C
#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG 0x1E240
#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG 0x1E244
#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG 0x1E248
#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG 0x1E24C
#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG 0x1E250
#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG 0x1E254
#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG 0x1E258
#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG 0x1E25C
#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG 0x1E260
#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG 0x1E264
#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG 0x1E268
#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG 0x1E26C
#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG 0x1E270
#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG 0x1E274
#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG 0x1E278
#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG 0x1E27C
#define CRYPTO_CNTR0_IV0_REG 0x1A20C
#define CRYPTO_CNTR1_IV1_REG 0x1A210
#define CRYPTO_CNTR2_IV2_REG 0x1A214
#define CRYPTO_CNTR3_IV3_REG 0x1A218
#define CRYPTO_CNTR_MASK_REG0 0x1A23C
#define CRYPTO_CNTR_MASK_REG1 0x1A238
#define CRYPTO_CNTR_MASK_REG2 0x1A234
#define CRYPTO_CNTR_MASK_REG 0x1A21C
#define CRYPTO_ENCR_CCM_INT_CNTR0_REG 0x1A220
#define CRYPTO_ENCR_CCM_INT_CNTR1_REG 0x1A224
#define CRYPTO_ENCR_CCM_INT_CNTR2_REG 0x1A228
#define CRYPTO_ENCR_CCM_INT_CNTR3_REG 0x1A22C
#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x1A230
#define CRYPTO_AUTH_SEG_CFG_REG 0x1A300
#define CRYPTO_AUTH_SEG_SIZE_REG 0x1A304
#define CRYPTO_AUTH_SEG_START_REG 0x1A308
#define CRYPTO_AUTH_KEY0_REG 0x1D040
#define CRYPTO_AUTH_KEY1_REG 0x1D044
#define CRYPTO_AUTH_KEY2_REG 0x1D048
#define CRYPTO_AUTH_KEY3_REG 0x1D04C
#define CRYPTO_AUTH_KEY4_REG 0x1D050
#define CRYPTO_AUTH_KEY5_REG 0x1D054
#define CRYPTO_AUTH_KEY6_REG 0x1D058
#define CRYPTO_AUTH_KEY7_REG 0x1D05C
#define CRYPTO_AUTH_KEY8_REG 0x1D060
#define CRYPTO_AUTH_KEY9_REG 0x1D064
#define CRYPTO_AUTH_KEY10_REG 0x1D068
#define CRYPTO_AUTH_KEY11_REG 0x1D06C
#define CRYPTO_AUTH_KEY12_REG 0x1D070
#define CRYPTO_AUTH_KEY13_REG 0x1D074
#define CRYPTO_AUTH_KEY14_REG 0x1D078
#define CRYPTO_AUTH_KEY15_REG 0x1D07C
#define CRYPTO_AUTH_PIPE0_KEY0_REG 0x1E800
#define CRYPTO_AUTH_PIPE0_KEY1_REG 0x1E804
#define CRYPTO_AUTH_PIPE0_KEY2_REG 0x1E808
#define CRYPTO_AUTH_PIPE0_KEY3_REG 0x1E80C
#define CRYPTO_AUTH_PIPE0_KEY4_REG 0x1E810
#define CRYPTO_AUTH_PIPE0_KEY5_REG 0x1E814
#define CRYPTO_AUTH_PIPE0_KEY6_REG 0x1E818
#define CRYPTO_AUTH_PIPE0_KEY7_REG 0x1E81C
#define CRYPTO_AUTH_PIPE0_KEY8_REG 0x1E820
#define CRYPTO_AUTH_PIPE0_KEY9_REG 0x1E824
#define CRYPTO_AUTH_PIPE0_KEY10_REG 0x1E828
#define CRYPTO_AUTH_PIPE0_KEY11_REG 0x1E82C
#define CRYPTO_AUTH_PIPE0_KEY12_REG 0x1E830
#define CRYPTO_AUTH_PIPE0_KEY13_REG 0x1E834
#define CRYPTO_AUTH_PIPE0_KEY14_REG 0x1E838
#define CRYPTO_AUTH_PIPE0_KEY15_REG 0x1E83C
#define CRYPTO_AUTH_PIPE1_KEY0_REG 0x1E880
#define CRYPTO_AUTH_PIPE1_KEY1_REG 0x1E884
#define CRYPTO_AUTH_PIPE1_KEY2_REG 0x1E888
#define CRYPTO_AUTH_PIPE1_KEY3_REG 0x1E88C
#define CRYPTO_AUTH_PIPE1_KEY4_REG 0x1E890
#define CRYPTO_AUTH_PIPE1_KEY5_REG 0x1E894
#define CRYPTO_AUTH_PIPE1_KEY6_REG 0x1E898
#define CRYPTO_AUTH_PIPE1_KEY7_REG 0x1E89C
#define CRYPTO_AUTH_PIPE1_KEY8_REG 0x1E8A0
#define CRYPTO_AUTH_PIPE1_KEY9_REG 0x1E8A4
#define CRYPTO_AUTH_PIPE1_KEY10_REG 0x1E8A8
#define CRYPTO_AUTH_PIPE1_KEY11_REG 0x1E8AC
#define CRYPTO_AUTH_PIPE1_KEY12_REG 0x1E8B0
#define CRYPTO_AUTH_PIPE1_KEY13_REG 0x1E8B4
#define CRYPTO_AUTH_PIPE1_KEY14_REG 0x1E8B8
#define CRYPTO_AUTH_PIPE1_KEY15_REG 0x1E8BC
#define CRYPTO_AUTH_PIPE2_KEY0_REG 0x1E900
#define CRYPTO_AUTH_PIPE2_KEY1_REG 0x1E904
#define CRYPTO_AUTH_PIPE2_KEY2_REG 0x1E908
#define CRYPTO_AUTH_PIPE2_KEY3_REG 0x1E90C
#define CRYPTO_AUTH_PIPE2_KEY4_REG 0x1E910
#define CRYPTO_AUTH_PIPE2_KEY5_REG 0x1E914
#define CRYPTO_AUTH_PIPE2_KEY6_REG 0x1E918
#define CRYPTO_AUTH_PIPE2_KEY7_REG 0x1E91C
#define CRYPTO_AUTH_PIPE2_KEY8_REG 0x1E920
#define CRYPTO_AUTH_PIPE2_KEY9_REG 0x1E924
#define CRYPTO_AUTH_PIPE2_KEY10_REG 0x1E928
#define CRYPTO_AUTH_PIPE2_KEY11_REG 0x1E92C
#define CRYPTO_AUTH_PIPE2_KEY12_REG 0x1E930
#define CRYPTO_AUTH_PIPE2_KEY13_REG 0x1E934
#define CRYPTO_AUTH_PIPE2_KEY14_REG 0x1E938
#define CRYPTO_AUTH_PIPE2_KEY15_REG 0x1E93C
#define CRYPTO_AUTH_PIPE3_KEY0_REG 0x1E980
#define CRYPTO_AUTH_PIPE3_KEY1_REG 0x1E984
#define CRYPTO_AUTH_PIPE3_KEY2_REG 0x1E988
#define CRYPTO_AUTH_PIPE3_KEY3_REG 0x1E98C
#define CRYPTO_AUTH_PIPE3_KEY4_REG 0x1E990
#define CRYPTO_AUTH_PIPE3_KEY5_REG 0x1E994
#define CRYPTO_AUTH_PIPE3_KEY6_REG 0x1E998
#define CRYPTO_AUTH_PIPE3_KEY7_REG 0x1E99C
#define CRYPTO_AUTH_PIPE3_KEY8_REG 0x1E9A0
#define CRYPTO_AUTH_PIPE3_KEY9_REG 0x1E9A4
#define CRYPTO_AUTH_PIPE3_KEY10_REG 0x1E9A8
#define CRYPTO_AUTH_PIPE3_KEY11_REG 0x1E9AC
#define CRYPTO_AUTH_PIPE3_KEY12_REG 0x1E9B0
#define CRYPTO_AUTH_PIPE3_KEY13_REG 0x1E9B4
#define CRYPTO_AUTH_PIPE3_KEY14_REG 0x1E9B8
#define CRYPTO_AUTH_PIPE3_KEY15_REG 0x1E9BC
#define CRYPTO_AUTH_IV0_REG 0x1A310
#define CRYPTO_AUTH_IV1_REG 0x1A314
#define CRYPTO_AUTH_IV2_REG 0x1A318
#define CRYPTO_AUTH_IV3_REG 0x1A31C
#define CRYPTO_AUTH_IV4_REG 0x1A320
#define CRYPTO_AUTH_IV5_REG 0x1A324
#define CRYPTO_AUTH_IV6_REG 0x1A328
#define CRYPTO_AUTH_IV7_REG 0x1A32C
#define CRYPTO_AUTH_IV8_REG 0x1A330
#define CRYPTO_AUTH_IV9_REG 0x1A334
#define CRYPTO_AUTH_IV10_REG 0x1A338
#define CRYPTO_AUTH_IV11_REG 0x1A33C
#define CRYPTO_AUTH_IV12_REG 0x1A340
#define CRYPTO_AUTH_IV13_REG 0x1A344
#define CRYPTO_AUTH_IV14_REG 0x1A348
#define CRYPTO_AUTH_IV15_REG 0x1A34C
#define CRYPTO_AUTH_INFO_NONCE0_REG 0x1A350
#define CRYPTO_AUTH_INFO_NONCE1_REG 0x1A354
#define CRYPTO_AUTH_INFO_NONCE2_REG 0x1A358
#define CRYPTO_AUTH_INFO_NONCE3_REG 0x1A35C
#define CRYPTO_AUTH_BYTECNT0_REG 0x1A390
#define CRYPTO_AUTH_BYTECNT1_REG 0x1A394
#define CRYPTO_AUTH_BYTECNT2_REG 0x1A398
#define CRYPTO_AUTH_BYTECNT3_REG 0x1A39C
#define CRYPTO_AUTH_EXP_MAC0_REG 0x1A3A0
#define CRYPTO_AUTH_EXP_MAC1_REG 0x1A3A4
#define CRYPTO_AUTH_EXP_MAC2_REG 0x1A3A8
#define CRYPTO_AUTH_EXP_MAC3_REG 0x1A3AC
#define CRYPTO_AUTH_EXP_MAC4_REG 0x1A3B0
#define CRYPTO_AUTH_EXP_MAC5_REG 0x1A3B4
#define CRYPTO_AUTH_EXP_MAC6_REG 0x1A3B8
#define CRYPTO_AUTH_EXP_MAC7_REG 0x1A3BC
#define CRYPTO_CONFIG_REG 0x1A400
#define CRYPTO_DEBUG_ENABLE_REG 0x1AF00
#define CRYPTO_DEBUG_REG 0x1AF04
/* Register bits */
#define CRYPTO_CORE_STEP_REV_MASK 0xFFFF
#define CRYPTO_CORE_STEP_REV 0 /* bit 15-0 */
#define CRYPTO_CORE_MAJOR_REV_MASK 0xFF000000
#define CRYPTO_CORE_MAJOR_REV 24 /* bit 31-24 */
#define CRYPTO_CORE_MINOR_REV_MASK 0xFF0000
#define CRYPTO_CORE_MINOR_REV 16 /* bit 23-16 */
/* status reg */
#define CRYPTO_MAC_FAILED 31
#define CRYPTO_DOUT_SIZE_AVAIL 26 /* bit 30-26 */
#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
#define CRYPTO_HSD_ERR 20
#define CRYPTO_ACCESS_VIOL 19
#define CRYPTO_PIPE_ACTIVE_ERR 18
#define CRYPTO_CFG_CHNG_ERR 17
#define CRYPTO_DOUT_ERR 16
#define CRYPTO_DIN_ERR 15
#define CRYPTO_AXI_ERR 14
#define CRYPTO_CRYPTO_STATE 10 /* bit 13-10 */
#define CRYPTO_CRYPTO_STATE_MASK (0xF << CRYPTO_CRYPTO_STATE)
#define CRYPTO_ENCR_BUSY 9
#define CRYPTO_AUTH_BUSY 8
#define CRYPTO_DOUT_INTR 7
#define CRYPTO_DIN_INTR 6
#define CRYPTO_OP_DONE_INTR 5
#define CRYPTO_ERR_INTR 4
#define CRYPTO_DOUT_RDY 3
#define CRYPTO_DIN_RDY 2
#define CRYPTO_OPERATION_DONE 1
#define CRYPTO_SW_ERR 0
/* status2 reg */
#define CRYPTO_AXI_EXTRA 1
#define CRYPTO_LOCKED 2
/* config reg */
#define CRYPTO_REQ_SIZE 17 /* bit 20-17 */
#define CRYPTO_REQ_SIZE_MASK (0xF << CRYPTO_REQ_SIZE)
#define CRYPTO_REQ_SIZE_ENUM_1_BEAT 0
#define CRYPTO_REQ_SIZE_ENUM_2_BEAT 1
#define CRYPTO_REQ_SIZE_ENUM_3_BEAT 2
#define CRYPTO_REQ_SIZE_ENUM_4_BEAT 3
#define CRYPTO_REQ_SIZE_ENUM_5_BEAT 4
#define CRYPTO_REQ_SIZE_ENUM_6_BEAT 5
#define CRYPTO_REQ_SIZE_ENUM_7_BEAT 6
#define CRYPTO_REQ_SIZE_ENUM_8_BEAT 7
#define CRYPTO_REQ_SIZE_ENUM_9_BEAT 8
#define CRYPTO_REQ_SIZE_ENUM_10_BEAT 9
#define CRYPTO_REQ_SIZE_ENUM_11_BEAT 10
#define CRYPTO_REQ_SIZE_ENUM_12_BEAT 11
#define CRYPTO_REQ_SIZE_ENUM_13_BEAT 12
#define CRYPTO_REQ_SIZE_ENUM_14_BEAT 13
#define CRYPTO_REQ_SIZE_ENUM_15_BEAT 14
#define CRYPTO_REQ_SIZE_ENUM_16_BEAT 15
#define CRYPTO_MAX_QUEUED_REQ 14 /* bit 16-14 */
#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ)
#define CRYPTO_ENUM_1_QUEUED_REQS 0
#define CRYPTO_ENUM_2_QUEUED_REQS 1
#define CRYPTO_ENUM_3_QUEUED_REQS 2
#define CRYPTO_IRQ_ENABLES 10 /* bit 13-10 */
#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES)
#define CRYPTO_LITTLE_ENDIAN_MODE 9
#define CRYPTO_LITTLE_ENDIAN_MASK (1 << CRYPTO_LITTLE_ENDIAN_MODE)
#define CRYPTO_PIPE_SET_SELECT 5 /* bit 8-5 */
#define CRYPTO_PIPE_SET_SELECT_MASK (0xF << CRYPTO_PIPE_SET_SELECT)
#define CRYPTO_HIGH_SPD_EN_N 4
#define CRYPTO_MASK_DOUT_INTR 3
#define CRYPTO_MASK_DIN_INTR 2
#define CRYPTO_MASK_OP_DONE_INTR 1
#define CRYPTO_MASK_ERR_INTR 0
/* auth_seg_cfg reg */
#define CRYPTO_COMP_EXP_MAC 24
#define CRYPTO_COMP_EXP_MAC_DISABLED 0
#define CRYPTO_COMP_EXP_MAC_ENABLED 1
#define CRYPTO_F9_DIRECTION 23
#define CRYPTO_F9_DIRECTION_UPLINK 0
#define CRYPTO_F9_DIRECTION_DOWNLINK 1
#define CRYPTO_AUTH_NONCE_NUM_WORDS 20 /* bit 22-20 */
#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
#define CRYPTO_USE_PIPE_KEY_AUTH 19
#define CRYPTO_USE_HW_KEY_AUTH 18
#define CRYPTO_FIRST 17
#define CRYPTO_LAST 16
#define CRYPTO_AUTH_POS 14 /* bit 15 .. 14*/
#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS)
#define CRYPTO_AUTH_POS_BEFORE 0
#define CRYPTO_AUTH_POS_AFTER 1
#define CRYPTO_AUTH_SIZE 9 /* bits 13 .. 9*/
#define CRYPTO_AUTH_SIZE_MASK (0x1F << CRYPTO_AUTH_SIZE)
#define CRYPTO_AUTH_SIZE_SHA1 0
#define CRYPTO_AUTH_SIZE_SHA256 1
#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES 0
#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES 1
#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES 2
#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 3
#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES 4
#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 5
#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES 6
#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 7
#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES 8
#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 9
#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES 10
#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 11
#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES 12
#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 13
#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES 14
#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 15
#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/
#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE)
#define CRYPTO_AUTH_MODE_HASH 0
#define CRYPTO_AUTH_MODE_HMAC 1
#define CRYPTO_AUTH_MODE_CCM 0
#define CRYPTO_AUTH_MODE_CMAC 1
#define CRYPTO_AUTH_KEY_SIZE 3 /* bit 5 .. 3*/
#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE)
#define CRYPTO_AUTH_KEY_SZ_AES128 0
#define CRYPTO_AUTH_KEY_SZ_AES256 2
#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/
#define CRYPTO_AUTH_ALG_MASK 7
#define CRYPTO_AUTH_ALG_NONE 0
#define CRYPTO_AUTH_ALG_SHA 1
#define CRYPTO_AUTH_ALG_AES 2
#define CRYPTO_AUTH_ALG_KASUMI 3
#define CRYPTO_AUTH_ALG_SNOW3G 4
#define CRYPTO_AUTH_ALG_ZUC 5
/* encr_xts_du_size reg */
#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */
#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff
/* encr_seg_cfg reg */
#define CRYPTO_F8_KEYSTREAM_ENABLE 17/* bit */
#define CRYPTO_F8_KEYSTREAM_DISABLED 0
#define CRYPTO_F8_KEYSTREAM_ENABLED 1
#define CRYPTO_F8_DIRECTION 16 /* bit */
#define CRYPTO_F8_DIRECTION_UPLINK 0
#define CRYPTO_F8_DIRECTION_DOWNLINK 1
#define CRYPTO_USE_PIPE_KEY_ENCR 15 /* bit */
#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED 1
#define CRYPTO_USE_KEY_REGISTERS 0
#define CRYPTO_USE_HW_KEY_ENCR 14
#define CRYPTO_USE_KEY_REG 0
#define CRYPTO_USE_HW_KEY 1
#define CRYPTO_LAST_CCM 13
#define CRYPTO_LAST_CCM_XFR 1
#define CRYPTO_INTERM_CCM_XFR 0
#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */
#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG)
#define CRYPTO_CNTR_ALG_NIST 0
#define CRYPTO_ENCODE 10
#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */
#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE)
/* only valid when AES */
#define CRYPTO_ENCR_MODE_ECB 0
#define CRYPTO_ENCR_MODE_CBC 1
#define CRYPTO_ENCR_MODE_CTR 2
#define CRYPTO_ENCR_MODE_XTS 3
#define CRYPTO_ENCR_MODE_CCM 4
#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */
#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ)
#define CRYPTO_ENCR_KEY_SZ_DES 0
#define CRYPTO_ENCR_KEY_SZ_3DES 1
#define CRYPTO_ENCR_KEY_SZ_AES128 0
#define CRYPTO_ENCR_KEY_SZ_AES256 2
#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */
#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG)
#define CRYPTO_ENCR_ALG_NONE 0
#define CRYPTO_ENCR_ALG_DES 1
#define CRYPTO_ENCR_ALG_AES 2
#define CRYPTO_ENCR_ALG_KASUMI 4
#define CRYPTO_ENCR_ALG_SNOW_3G 5
#define CRYPTO_ENCR_ALG_ZUC 6
/* goproc reg */
#define CRYPTO_GO 0
#define CRYPTO_CLR_CNTXT 1
#define CRYPTO_RESULTS_DUMP 2
/* F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG */
#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */
#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */
#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
/* F9 definition of CRYPTO_AUTH_IV4 REG */
#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */
#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
(0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
/* engines_avail */
#define CRYPTO_ENCR_AES_SEL 0
#define CRYPTO_DES_SEL 1
#define CRYPTO_ENCR_SNOW3G_SEL 2
#define CRYPTO_ENCR_KASUMI_SEL 3
#define CRYPTO_SHA_SEL 4
#define CRYPTO_SHA512_SEL 5
#define CRYPTO_AUTH_AES_SEL 6
#define CRYPTO_AUTH_SNOW3G_SEL 7
#define CRYPTO_AUTH_KASUMI_SEL 8
#define CRYPTO_BAM_PIPE_SETS 9 /* bit 12 - 9 */
#define CRYPTO_AXI_WR_BEATS 13 /* bit 18 - 13 */
#define CRYPTO_AXI_RD_BEATS 19 /* bit 24 - 19 */
#define CRYPTO_ENCR_ZUC_SEL 26
#define CRYPTO_AUTH_ZUC_SEL 27
#define CRYPTO_ZUC_ENABLE 28
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __QCOM_CRYPTO_DEVICE__H
#define __QCOM_CRYPTO_DEVICE__H
struct msm_ce_hw_support {
uint32_t ce_shared;
uint32_t shared_ce_resource;
uint32_t hw_key_support;
uint32_t sha_hmac;
};
#endif /* __QCOM_CRYPTO_DEVICE__H */

60
include/linux/qcrypto.h Normal file
View File

@ -0,0 +1,60 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
#include <linux/crypto.h>
#include <crypto/hash.h>
#include <crypto/skcipher.h>
#include <crypto/aead.h>
#define QCRYPTO_CTX_KEY_MASK 0x000000ff
#define QCRYPTO_CTX_USE_HW_KEY 0x00000001
#define QCRYPTO_CTX_USE_PIPE_KEY 0x00000002
#define QCRYPTO_CTX_XTS_MASK 0x0000ff00
#define QCRYPTO_CTX_XTS_DU_SIZE_512B 0x00000100
#define QCRYPTO_CTX_XTS_DU_SIZE_1KB 0x00000200
int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);
int qcrypto_cipher_set_flag(struct skcipher_request *req, unsigned int flags);
int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);
int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
unsigned int flags);
int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);
struct crypto_engine_entry {
u32 hw_instance;
u32 ce_device;
int shared;
};
int qcrypto_get_num_engines(void);
void qcrypto_get_engine_list(size_t num_engines,
struct crypto_engine_entry *arr);
int qcrypto_cipher_set_device_hw(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
struct qcrypto_func_set {
int (*cipher_set)(struct skcipher_request *req,
unsigned int fde_pfe,
unsigned int hw_inst);
int (*cipher_flag)(struct skcipher_request *req, unsigned int flags);
int (*get_num_engines)(void);
void (*get_engine_list)(size_t num_engines,
struct crypto_engine_entry *arr);
};
#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */

215
include/uapi/linux/qcota.h Normal file
View File

@ -0,0 +1,215 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_QCOTA_H
#define _UAPI_QCOTA_H
#include <linux/types.h>
#include <linux/ioctl.h>
#define QCE_OTA_MAX_BEARER 31
#define OTA_KEY_SIZE 16 /* 128 bits of keys. */
enum qce_ota_dir_enum {
QCE_OTA_DIR_UPLINK = 0,
QCE_OTA_DIR_DOWNLINK = 1,
QCE_OTA_DIR_LAST
};
enum qce_ota_algo_enum {
QCE_OTA_ALGO_KASUMI = 0,
QCE_OTA_ALGO_SNOW3G = 1,
QCE_OTA_ALGO_LAST
};
/**
* struct qce_f8_req - qce f8 request
* @data_in: packets input data stream to be ciphered.
* If NULL, streaming mode operation.
* @data_out: ciphered packets output data.
* @data_len: length of data_in and data_out in bytes.
* @count_c: count-C, ciphering sequence number, 32 bit
* @bearer: 5 bit of radio bearer identifier.
* @ckey: 128 bits of confidentiality key,
* ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0.
* @direction: uplink or donwlink.
* @algorithm: Kasumi, or Snow3G.
*
* If data_in is NULL, the engine will run in a special mode called
* key stream mode. In this special mode, the engine will generate
* key stream output for the number of bytes specified in the
* data_len, based on the input parameters of direction, algorithm,
* ckey, bearer, and count_c. The data_len is restricted to
* the length of multiple of 16 bytes. Application can then take the
* output stream, do a exclusive or to the input data stream, and
* generate the final cipher data stream.
*/
struct qce_f8_req {
uint8_t *data_in;
uint8_t *data_out;
uint16_t data_len;
uint32_t count_c;
uint8_t bearer;
uint8_t ckey[OTA_KEY_SIZE];
enum qce_ota_dir_enum direction;
enum qce_ota_algo_enum algorithm;
};
/**
* struct qce_f8_multi_pkt_req - qce f8 multiple packet request
* Muliptle packets with uniform size, and
* F8 ciphering parameters can be ciphered in a
* single request.
*
* @num_pkt: number of packets.
*
* @cipher_start: ciphering starts offset within a packet.
*
* @cipher_size: number of bytes to be ciphered within a packet.
*
* @qce_f8_req: description of the packet and F8 parameters.
* The following fields have special meaning for
* multiple packet operation,
*
* @data_len: data_len indicates the length of a packet.
*
* @data_in: packets are concatenated together in a byte
* stream started at data_in.
*
* @data_out: The returned ciphered output for multiple
* packets.
* Each packet ciphered output are concatenated
* together into a byte stream started at data_out.
* Note, each ciphered packet output area from
* offset 0 to cipher_start-1, and from offset
* cipher_size to data_len -1 are remained
* unaltered from packet input area.
* @count_c: count-C of the first packet, 32 bit.
*
*
* In one request, multiple packets can be ciphered, and output to the
* data_out stream.
*
* Packet data are laid out contiguously in sequence in data_in,
* and data_out area. Every packet is identical size.
* If the PDU is not byte aligned, set the data_len value of
* to the rounded up value of the packet size. Eg, PDU size of
* 253 bits, set the packet size to 32 bytes. Next packet starts on
* the next byte boundary.
*
* For each packet, data from offset 0 to cipher_start
* will be left unchanged and output to the data_out area.
* This area of the packet can be for the RLC header, which is not
* to be ciphered.
*
* The ciphering of a packet starts from offset cipher_start, for
* cipher_size bytes of data. Data starting from
* offset cipher_start + cipher_size to the end of packet will be left
* unchanged and output to the dataOut area.
*
* For each packet the input arguments of bearer, direction,
* ckey, algorithm have to be the same. count_c is the ciphering sequence
* number of the first packet. The 2nd packet's ciphering sequence
* number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
* number is count_c + 2.....
*
*/
struct qce_f8_multi_pkt_req {
uint16_t num_pkt;
uint16_t cipher_start;
uint16_t cipher_size;
struct qce_f8_req qce_f8_req;
};
/**
* struct qce_f8_variable_multi_pkt_req - qce f8 multiple packet request
* Muliptle packets with variable size, and
* F8 ciphering parameters can be ciphered in a
* single request.
*
* @num_pkt: number of packets.
*
* @cipher_iov[]: array of iov of packets to be ciphered.
*
*
* @qce_f8_req: description of the packet and F8 parameters.
* The following fields have special meaning for
* multiple packet operation,
*
* @data_len: ignored.
*
* @data_in: ignored.
*
* @data_out: ignored.
*
* @count_c: count-C of the first packet, 32 bit.
*
*
* In one request, multiple packets can be ciphered.
*
* The i-th packet are defined in cipher_iov[i-1].
* The ciphering of i-th packet starts from offset 0 of the PDU specified
* by cipher_iov[i-1].addr, for cipher_iov[i-1].size bytes of data.
* If the PDU is not byte aligned, set the cipher_iov[i-1].size value
* to the rounded up value of the packet size. Eg, PDU size of
* 253 bits, set the packet size to 32 bytes.
*
* Ciphering are done in place. That is, the ciphering
* input and output data are both in cipher_iov[i-1].addr for the i-th
* packet.
*
* For each packet the input arguments of bearer, direction,
* ckey, algorithm have to be the same. count_c is the ciphering sequence
* number of the first packet. The 2nd packet's ciphering sequence
* number is assumed to be count_c + 1. The 3rd packet's ciphering sequence
* number is count_c + 2.....
*/
#define MAX_NUM_V_MULTI_PKT 20
struct cipher_iov {
unsigned char *addr;
unsigned short size;
};
struct qce_f8_variable_multi_pkt_req {
unsigned short num_pkt;
struct cipher_iov cipher_iov[MAX_NUM_V_MULTI_PKT];
struct qce_f8_req qce_f8_req;
};
/**
* struct qce_f9_req - qce f9 request
* @message: message
* @msize: message size in bytes (include the last partial byte).
* @last_bits: valid bits in the last byte of message.
* @mac_i: 32 bit message authentication code, to be returned.
* @fresh: random 32 bit number, one per user.
* @count_i: 32 bit count-I integrity sequence number.
* @direction: uplink or donwlink.
* @ikey: 128 bits of integrity key,
* ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0.
* @algorithm: Kasumi, or Snow3G.
*/
struct qce_f9_req {
uint8_t *message;
uint16_t msize;
uint8_t last_bits;
uint32_t mac_i;
uint32_t fresh;
uint32_t count_i;
enum qce_ota_dir_enum direction;
uint8_t ikey[OTA_KEY_SIZE];
enum qce_ota_algo_enum algorithm;
};
#define QCOTA_IOC_MAGIC 0x85
#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req)
#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req)
#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req)
#define QCOTA_F8_V_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 4,\
struct qce_f8_variable_multi_pkt_req)
#endif /* _UAPI_QCOTA_H */