Merge "soc: qcom: hab: snapshot of HAB driver"

This commit is contained in:
qctecmdr 2023-04-04 16:20:30 -07:00 committed by Gerrit - the friendly Code Review server
commit 96c702befb
44 changed files with 11675 additions and 0 deletions

View File

@ -239,6 +239,7 @@ config QCOM_QMI_HELPERS
source "drivers/soc/qcom/memshare/Kconfig"
source "drivers/soc/qcom/hab/Kconfig"
config QCOM_RMTFS_MEM
tristate "Qualcomm Remote Filesystem memory driver"
depends on ARCH_QCOM

View File

@ -64,6 +64,7 @@ crypto-qti-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
crypto-qti-$(CONFIG_QTI_HW_KEY_MANAGER) += crypto-qti-hwkm.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o
obj-$(CONFIG_MSM_TMECOM_QMP) += tmecom/
obj-$(CONFIG_MSM_HAB) += hab/
obj-$(CONFIG_QCOM_MINIDUMP) += minidump.o
minidump-y += msm_minidump.o minidump_log.o
minidump-$(CONFIG_QCOM_MINIDUMP_PANIC_MEMORY_INFO) += minidump_memory.o

View File

@ -0,0 +1,43 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# HAB configuration
#
config MSM_HAB
tristate "Enable Hypervisor ABstraction Layer"
depends on QCOM_DMABUF_HEAPS
select WANT_DEV_COREDUMP
help
HAB(Hypervisor ABstraction) driver can provide the message
transmission and memory sharing services among different OSes.
Internally, HAB makes use of some specific communication mechanism
provided by the underlying hypervisor.
It is required by the virtualization support for some multimedia
and platform devices in MSM devices.
config MSM_HAB_DEFAULT_VMID
int
default 2
help
The default HAB VMID.
It will not be used when there are some other configuration sources,
e.g., device tree.
config MSM_VIRTIO_HAB
bool "Enable hab plugin for virtio hypervisor"
depends on VIRTIO
depends on MSM_HAB
help
hab plugin for virtio devices on guest OS.
The plugin running on guest OS relies on
virtio queues to transport HAB messages and
communicate with host OS.
config MSM_VHOST_HAB
bool "Enable hab plugin for vhost hypervisor"
select VHOST
depends on MSM_HAB
help
hab plugin for vhost device on host OS.
The plugin running on host OS relies on
vhost framework to transport HAB messages
and communicate with guest OS.

View File

@ -0,0 +1,53 @@
# SPDX-License-Identifier: GPL-2.0-only
CFLAGS_hab.o := -I$(src)
# Common code
msm_hab-objs = \
hab.o \
hab_msg.o \
hab_vchan.o \
hab_pchan.o \
hab_open.o \
hab_mimex.o \
hab_pipe.o \
hab_parser.o \
hab_stat.o
# Common code in Linux
msm_hab-objs += \
khab.o \
hab_linux.o \
hab_mem_linux.o \
khab_test.o
# Hypervisor specific
ifdef CONFIG_GHS_VMM
msm_hab-objs += \
ghs_comm.o \
ghs_comm_linux.o \
hab_ghs.o \
hab_ghs_linux.o
else
ifdef CONFIG_MSM_VIRTIO_HAB
msm_hab-objs += hab_virtio.o
else
ifdef CONFIG_MSM_VHOST_HAB
ccflags-y += -I$(srctree)/drivers/vhost
msm_hab-objs += hab_vhost.o
else
ifdef CONFIG_QTI_QUIN_GVM
msm_hab-objs += \
qvm_comm.o \
qvm_comm_linux.o \
hab_qvm.o \
hab_qvm_linux.o
else
msm_hab-objs += \
hab_comm.o \
hyp_stub.o
endif
endif
endif
endif
obj-$(CONFIG_MSM_HAB) += msm_hab.o

View File

@ -0,0 +1,139 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
/* size in header is only for payload excluding the header itself */
if (dev->read_size < read_size + sizeof(struct hab_header)) {
pr_warn("read %zd is less than requested %zd plus header %zd\n",
dev->read_size, read_size, sizeof(struct hab_header));
read_size = 0;
} else {
/* always skip the header */
memcpy(payload, (unsigned char *)dev->read_data +
sizeof(struct hab_header) + dev->read_offset, read_size);
dev->read_offset += read_size;
}
return read_size;
}
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
uint8_t *msg;
int irqs_disabled;
if (!dev) {
pr_err("no send pchan %s has been de-alloced msg for %zd bytes\n",
pchan->name);
return -ENODEV;
}
irqs_disabled = irqs_disabled();
hab_spin_lock(&dev->io_lock, irqs_disabled);
result = hab_gipc_wait_to_send(dev->endpoint);
if (result != GIPC_Success) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("failed to wait to send %d\n", result);
return -EBUSY;
}
result = GIPC_PrepareMessage(dev->endpoint, sizebytes+sizeof(*header),
(void **)&msg);
if (result == GIPC_Full) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
/* need to wait for space! */
pr_err("failed to reserve send msg for %zd bytes\n",
sizebytes+sizeof(*header));
return -EBUSY;
} else if (result != GIPC_Success) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("failed to send due to error %d\n", result);
return -ENOMEM;
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timespec64 ts = {0};
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
ktime_get_ts64(&ts);
pstat->tx_sec = ts.tv_sec;
pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
}
memcpy(msg, header, sizeof(*header));
if (sizebytes)
memcpy(msg+sizeof(*header), payload, sizebytes);
result = GIPC_IssueMessage(dev->endpoint, sizebytes+sizeof(*header),
header->id_type);
hab_spin_unlock(&dev->io_lock, irqs_disabled);
if (result != GIPC_Success) {
pr_err("send error %d, sz %zd, id type %x, size %x\n",
result, sizebytes+sizeof(*header),
header->id_type, header->payload_size);
return -EAGAIN;
}
return 0;
}
void physical_channel_rx_dispatch_common(unsigned long physical_channel)
{
struct hab_header header;
struct physical_channel *pchan =
(struct physical_channel *)physical_channel;
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
int irqs_disabled;
if (!dev) {
pr_err("no recv pchan %s has been de-alloced msg for %zd bytes\n",
pchan->name);
return;
}
irqs_disabled = irqs_disabled();
hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
while (1) {
dev->read_size = 0;
dev->read_offset = 0;
result = GIPC_ReceiveMessage(dev->endpoint,
dev->read_data,
GIPC_RECV_BUFF_SIZE_BYTES,
&dev->read_size,
&header.id_type);
if (result == GIPC_Success || dev->read_size > 0) {
/* handle corrupted msg? */
hab_msg_recv(pchan, dev->read_data);
continue;
} else if (result == GIPC_Empty) {
/* no more pending msg */
break;
}
pr_err("recv unhandled result %d, size %zd\n",
result, dev->read_size);
break;
}
hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
}

View File

@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
inline int hab_gipc_wait_to_send(GIPC_Endpoint endpoint)
{
(void)endpoint;
return GIPC_Success;
}
void physical_channel_rx_dispatch(unsigned long physical_channel)
{
struct physical_channel *pchan =
(struct physical_channel *)physical_channel;
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
uint32_t events;
unsigned long flags;
spin_lock_irqsave(&pchan->rxbuf_lock, flags);
events = kgipc_dequeue_events(dev->endpoint);
spin_unlock_irqrestore(&pchan->rxbuf_lock, flags);
if (events & (GIPC_EVENT_RESET))
pr_err("hab gipc %s remote vmid %d RESET\n",
dev->name, pchan->vmid_remote);
if (events & (GIPC_EVENT_RESETINPROGRESS))
pr_err("hab gipc %s remote vmid %d RESETINPROGRESS\n",
dev->name, pchan->vmid_remote);
if (events & (GIPC_EVENT_RECEIVEREADY))
physical_channel_rx_dispatch_common(physical_channel);
if (events & (GIPC_EVENT_SENDREADY))
pr_debug("kgipc send ready\n");
}

1062
drivers/soc/qcom/hab/hab.c Normal file

File diff suppressed because it is too large Load Diff

643
drivers/soc/qcom/hab/hab.h Normal file
View File

@ -0,0 +1,643 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_H
#define __HAB_H
#include "hab_os.h" /* OS-specific part in the core header file */
enum hab_payload_type {
HAB_PAYLOAD_TYPE_MSG = 0x0,
HAB_PAYLOAD_TYPE_INIT,
HAB_PAYLOAD_TYPE_INIT_ACK,
HAB_PAYLOAD_TYPE_INIT_DONE,
HAB_PAYLOAD_TYPE_EXPORT,
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
HAB_PAYLOAD_TYPE_INIT_CANCEL,
HAB_PAYLOAD_TYPE_SCHE_MSG,
HAB_PAYLOAD_TYPE_SCHE_MSG_ACK,
HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ,
HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP,
HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
/*
* Tuning required. If there are multiple clients, the aging of previous
* "request" might be discarded
*/
#define Q_AGE_THRESHOLD 1000000
/* match the name to dtsi if for real HYP framework */
#define DEVICE_AUD1_NAME "hab_aud1"
#define DEVICE_AUD2_NAME "hab_aud2"
#define DEVICE_AUD3_NAME "hab_aud3"
#define DEVICE_AUD4_NAME "hab_aud4"
#define DEVICE_CAM1_NAME "hab_cam1"
#define DEVICE_CAM2_NAME "hab_cam2"
#define DEVICE_DISP1_NAME "hab_disp1"
#define DEVICE_DISP2_NAME "hab_disp2"
#define DEVICE_DISP3_NAME "hab_disp3"
#define DEVICE_DISP4_NAME "hab_disp4"
#define DEVICE_DISP5_NAME "hab_disp5"
#define DEVICE_GFX_NAME "hab_ogles"
#define DEVICE_VID_NAME "hab_vid"
#define DEVICE_VID2_NAME "hab_vid2"
#define DEVICE_VID3_NAME "hab_vid3"
#define DEVICE_MISC_NAME "hab_misc"
#define DEVICE_QCPE1_NAME "hab_qcpe_vm1"
#define DEVICE_CLK1_NAME "hab_clock_vm1"
#define DEVICE_CLK2_NAME "hab_clock_vm2"
#define DEVICE_FDE1_NAME "hab_fde1"
#define DEVICE_BUFFERQ1_NAME "hab_bufferq1"
#define DEVICE_DATA1_NAME "hab_data_network1"
#define DEVICE_DATA2_NAME "hab_data_network2"
#define DEVICE_HSI2S1_NAME "hab_hsi2s1"
#define DEVICE_XVM1_NAME "hab_xvm1"
#define DEVICE_XVM2_NAME "hab_xvm2"
#define DEVICE_XVM3_NAME "hab_xvm3"
#define DEVICE_VNW1_NAME "hab_vnw1"
#define DEVICE_EXT1_NAME "hab_ext1"
#define HABCFG_MMID_NUM 26
#define HAB_MMID_ALL_AREA 0
/* make sure concascaded name is less than this value */
#define MAX_VMID_NAME_SIZE 30
/*
* The maximum value of payload_count in struct export_desc
* Max u32_t size_bytes from hab_ioctl.h(0xFFFFFFFF) / page size(0x1000)
*/
#define MAX_EXP_PAYLOAD_COUNT 0xFFFFF
#define HABCFG_FILE_SIZE_MAX 256
#define HABCFG_MMID_AREA_MAX (MM_ID_MAX/100)
#define HABCFG_VMID_MAX 16
#define HABCFG_VMID_INVALID (-1)
#define HABCFG_VMID_DONT_CARE (-2)
#define HABCFG_ID_LINE_LIMIT ","
#define HABCFG_ID_VMID "VMID="
#define HABCFG_ID_BE "BE="
#define HABCFG_ID_FE "FE="
#define HABCFG_ID_MMID "MMID="
#define HABCFG_ID_RANGE "-"
#define HABCFG_ID_DONTCARE "X"
#define HABCFG_FOUND_VMID 1
#define HABCFG_FOUND_FE_MMIDS 2
#define HABCFG_FOUND_BE_MMIDS 3
#define HABCFG_FOUND_NOTHING (-1)
#define HABCFG_BE_FALSE 0
#define HABCFG_BE_TRUE 1
#define HABCFG_GET_VMID(_local_cfg_, _vmid_) \
((settings)->vmid_mmid_list[_vmid_].vmid)
#define HABCFG_GET_MMID(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].mmid[_mmid_])
#define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_])
struct hab_header {
uint32_t id_type;
uint32_t payload_size;
uint32_t session_id;
uint32_t signature;
uint32_t sequence;
} __packed;
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_TYPE_SHIFT 16
#define HAB_HEADER_ID_SHIFT 20
/*
* On HQX platforms, the maximum payload size is
* PIPE_SHMEM_SIZE - sizeof(hab_header)
* 500KB is big enough for now and leave a margin for other usage
*/
#define HAB_HEADER_SIZE_MAX 0x0007D000
#define HAB_HEADER_TYPE_MASK 0x000F0000
#define HAB_HEADER_ID_MASK 0xFFF00000
#define HAB_HEADER_INITIALIZER {0}
#define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF)
#define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF)
#define HAB_VCID_ID_SHIFT 0
#define HAB_VCID_DOMID_SHIFT 12
#define HAB_VCID_MMID_SHIFT 20
#define HAB_VCID_ID_MASK 0x00000FFF
#define HAB_VCID_DOMID_MASK 0x000FF000
#define HAB_VCID_MMID_MASK 0xFFF00000
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
#define HAB_HEADER_SET_SESSION_ID(header, sid) \
((header).session_id = (sid))
#define HAB_HEADER_SET_SIZE(header, size) \
((header).payload_size = (size))
#define HAB_HEADER_SET_TYPE(header, type) \
((header).id_type = ((header).id_type & \
(~HAB_HEADER_TYPE_MASK)) | \
(((type) << HAB_HEADER_TYPE_SHIFT) & \
HAB_HEADER_TYPE_MASK))
#define HAB_HEADER_SET_ID(header, id) \
((header).id_type = ((header).id_type & \
(~HAB_HEADER_ID_MASK)) | \
((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \
HAB_HEADER_ID_MASK))
#define HAB_HEADER_GET_SIZE(header) \
((header).payload_size)
#define HAB_HEADER_GET_TYPE(header) \
(((header).id_type & \
HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
#define HAB_HEADER_GET_ID(header) \
((((header).id_type & HAB_HEADER_ID_MASK) >> \
(HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
#define HAB_HS_TIMEOUT (10*1000*1000)
#define HAB_HEAD_SIGNATURE 0xBEE1BEE1
struct physical_channel {
struct list_head node;
char name[MAX_VMID_NAME_SIZE];
int is_be;
struct kref refcount;
struct hab_device *habdev;
struct idr vchan_idr;
spinlock_t vid_lock;
struct idr expid_idr;
spinlock_t expid_lock;
void *hyp_data;
int dom_id; /* BE role: remote vmid; FE role: don't care */
int vmid_local; /* from DT or hab_config */
int vmid_remote;
char vmname_local[12]; /* from DT */
char vmname_remote[12];
int closed;
spinlock_t rxbuf_lock;
/* debug only */
uint32_t sequence_tx;
uint32_t sequence_rx;
uint32_t status;
/* vchans on this pchan */
struct list_head vchannels;
int vcnt;
rwlock_t vchans_lock;
};
/* this payload has to be used together with type */
struct hab_open_send_data {
int vchan_id;
int sub_id;
int open_id;
int ver_fe;
int ver_be;
int reserved;
};
struct hab_open_request {
int type;
struct physical_channel *pchan;
struct hab_open_send_data xdata;
};
struct hab_open_node {
struct hab_open_request request;
struct list_head node;
int64_t age; /* sec */
};
struct hab_export_ack {
uint32_t export_id;
int32_t vcid_local;
int32_t vcid_remote;
};
struct hab_export_ack_recvd {
struct hab_export_ack ack;
struct list_head node;
int age;
};
struct hab_message {
struct list_head node;
size_t sizebytes;
bool scatter;
uint32_t sequence_rx;
uint32_t data[];
};
/* for all the pchans of same kind */
struct hab_device {
char name[MAX_VMID_NAME_SIZE];
uint32_t id;
struct list_head pchannels;
int pchan_cnt;
rwlock_t pchan_lock;
struct list_head openq_list; /* received */
spinlock_t openlock;
wait_queue_head_t openq;
int openq_cnt;
};
struct uhab_context {
struct list_head node; /* managed by the driver */
struct kref refcount;
struct list_head vchannels;
int vcnt;
struct list_head exp_whse;
uint32_t export_total;
wait_queue_head_t exp_wq;
struct list_head exp_rxq;
rwlock_t exp_lock;
spinlock_t expq_lock;
struct list_head imp_whse;
spinlock_t imp_lock;
uint32_t import_total;
void *import_ctx;
struct list_head pending_open; /* sent to remote */
int pending_cnt;
rwlock_t ctx_lock;
int closing;
int kernel;
int owner;
int lb_be; /* loopback only */
};
/*
* array to describe the VM and its MMID configuration as
* what is connected to so this is describing a pchan's remote side
*/
struct vmid_mmid_desc {
int vmid; /* remote vmid */
int mmid[HABCFG_MMID_AREA_MAX+1]; /* selected or not */
int is_listener[HABCFG_MMID_AREA_MAX+1]; /* yes or no */
};
struct local_vmid {
int32_t self; /* only this field is for local */
struct vmid_mmid_desc vmid_mmid_list[HABCFG_VMID_MAX];
};
struct hab_driver {
struct device *dev; /* mmid dev list */
struct cdev cdev;
dev_t major;
struct class *class;
int ndevices;
struct hab_device *devp;
struct uhab_context *kctx;
struct list_head uctx_list;
int ctx_cnt;
spinlock_t drvlock;
struct list_head imp_list;
int imp_cnt;
spinlock_t imp_lock;
struct local_vmid settings; /* parser results */
int b_server_dom;
int b_loopback_be; /* only allow 2 apps simultaneously 1 fe 1 be */
int b_loopback;
void *hyp_priv; /* hypervisor plug-in storage */
void *hab_vmm_handle;
};
struct virtual_channel {
struct list_head node; /* for ctx */
struct list_head pnode; /* for pchan */
/*
* refcount is used to track the references from hab core to the virtual
* channel such as references from physical channels,
* i.e. references from the "other" side
*/
struct kref refcount;
struct physical_channel *pchan;
struct uhab_context *ctx;
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
int id;
int otherend_id;
int otherend_closed;
uint32_t session_id;
/*
* set when local close() is called explicitly. vchan could be
* used in hab-recv-msg() path (2) then close() is called (1).
* this is same case as close is not called and no msg path
*/
int closed;
int forked; /* if fork is detected and assume only once */
/* stats */
uint64_t tx_cnt; /* total succeeded tx */
uint64_t rx_cnt; /* total succeeded rx */
int rx_inflight; /* rx in progress/blocking */
};
/*
* Struct shared between local and remote, contents
* are composed by exporter, the importer only writes
* to pdata and local (exporter) domID
*/
struct export_desc {
uint32_t export_id;
int readonly;
uint64_t import_index;
struct virtual_channel *vchan; /* vchan could be freed earlier */
struct uhab_context *ctx;
struct physical_channel *pchan;
int32_t vcid_local;
int32_t vcid_remote;
int domid_local;
int domid_remote;
int flags;
struct list_head node;
void *kva;
int payload_count;
unsigned char payload[1];
} __packed;
struct export_desc_super {
struct kref refcount;
void *platform_data;
unsigned long offset;
struct export_desc exp;
};
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid, int32_t *vcid,
int32_t timeout, uint32_t flags);
int hab_vchan_close(struct uhab_context *ctx,
int32_t vcid);
long hab_vchan_send(struct uhab_context *ctx,
int vcid,
size_t sizebytes,
void *data,
unsigned int flags);
int hab_vchan_recv(struct uhab_context *ctx,
struct hab_message **msg,
int vcid,
int *rsize,
unsigned int timeout,
unsigned int flags);
void hab_vchan_stop(struct virtual_channel *vchan);
void hab_vchans_stop(struct physical_channel *pchan);
void hab_vchan_stop_notify(struct virtual_channel *vchan);
void hab_vchans_empty_wait(int vmid);
int hab_mem_export(struct uhab_context *ctx,
struct hab_export *param, int kernel);
int hab_mem_import(struct uhab_context *ctx,
struct hab_import *param, int kernel);
int hab_mem_unexport(struct uhab_context *ctx,
struct hab_unexport *param, int kernel);
void habmem_export_get(struct export_desc_super *exp_super);
int habmem_export_put(struct export_desc_super *exp_super);
int hab_mem_unimport(struct uhab_context *ctx,
struct hab_unimport *param, int kernel);
void habmem_remove_export(struct export_desc *exp);
/* memory hypervisor framework plugin I/F */
struct export_desc_super *habmem_add_export(
struct virtual_channel *vchan,
int sizebytes,
uint32_t flags);
int habmem_hyp_grant_user(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *compressed_size,
int *export_id);
int habmem_hyp_grant(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *compressed_size,
int *export_id);
int habmem_hyp_revoke(void *expdata, uint32_t count);
int habmem_exp_release(struct export_desc_super *exp_super);
void *habmem_imp_hyp_open(void);
void habmem_imp_hyp_close(void *priv, int kernel);
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
struct export_desc *exp, int kernel);
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel);
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp);
void hab_msg_free(struct hab_message *message);
int hab_msg_dequeue(struct virtual_channel *vchan,
struct hab_message **msg, int *rsize, unsigned int timeout,
unsigned int flags);
int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header);
void hab_open_request_init(struct hab_open_request *request,
int type,
struct physical_channel *pchan,
int vchan_id,
int sub_id,
int open_id);
int hab_open_request_send(struct hab_open_request *request);
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type);
void hab_open_request_free(struct hab_open_request *request);
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
struct hab_open_request **recv_request,
int ms_timeout);
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
struct physical_channel *pchan, int openid);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
struct uhab_context *ctx, int ignore_remote);
struct physical_channel *hab_pchan_alloc(struct hab_device *habdev,
int otherend_id);
struct physical_channel *hab_pchan_find_domid(struct hab_device *dev,
int dom_id);
int hab_vchan_find_domid(struct virtual_channel *vchan);
void hab_pchan_get(struct physical_channel *pchan);
void hab_pchan_put(struct physical_channel *pchan);
struct uhab_context *hab_ctx_alloc(int kernel);
void hab_ctx_free(struct kref *ref);
static inline void hab_ctx_get(struct uhab_context *ctx)
{
if (ctx)
kref_get(&ctx->refcount);
}
static inline void hab_ctx_put(struct uhab_context *ctx)
{
if (ctx)
kref_put(&ctx->refcount, hab_ctx_free);
}
void hab_send_close_msg(struct virtual_channel *vchan);
int hab_hypervisor_register(void);
int hab_hypervisor_register_os(void);
int hab_hypervisor_unregister_os(void);
void hab_hypervisor_unregister(void);
void hab_hypervisor_unregister_common(void);
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device);
int habhyp_commdev_dealloc(void *commdev);
void habhyp_commdev_dealloc_os(void *commdev);
int habhyp_commdev_create_dispatcher(struct physical_channel *pchan);
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size);
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload);
void physical_channel_rx_dispatch(unsigned long physical_channel);
void physical_channel_rx_dispatch_common(unsigned long physical_channel);
int loopback_pchan_create(struct hab_device *dev, char *pchan_name);
int hab_parse(struct local_vmid *settings);
int do_hab_parse(void);
int fill_default_gvm_settings(struct local_vmid *settings,
int vmid_local, int mmid_start, int mmid_end);
bool hab_is_loopback(void);
int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
char *names, size_t name_size, uint32_t flags);
struct hab_device *find_hab_device(unsigned int mm_id);
unsigned int get_refcnt(struct kref ref);
int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending);
int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending);
int hab_open_cancel_notify(struct hab_open_request *request);
int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes);
int hab_stat_init(struct hab_driver *drv);
int hab_stat_deinit(struct hab_driver *drv);
int hab_stat_show_vchan(struct hab_driver *drv, char *buf, int sz);
int hab_stat_show_ctx(struct hab_driver *drv, char *buf, int sz);
int hab_stat_show_expimp(struct hab_driver *drv, int pid, char *buf, int sz);
int hab_stat_init_sub(struct hab_driver *drv);
int hab_stat_deinit_sub(struct hab_driver *drv);
static inline void hab_spin_lock(spinlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
spin_lock(lock);
else
spin_lock_bh(lock);
}
static inline void hab_spin_unlock(spinlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
spin_unlock(lock);
else
spin_unlock_bh(lock);
}
static inline void hab_write_lock(rwlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
write_lock(lock);
else
write_lock_bh(lock);
}
static inline void hab_write_unlock(rwlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
write_unlock(lock);
else
write_unlock_bh(lock);
}
/* Global singleton HAB instance */
extern struct hab_driver hab_driver;
int dump_hab_get_file_name(char *file_time, int ft_size);
int dump_hab_open(void);
void dump_hab_close(void);
int dump_hab_buf(void *buf, int size);
void hab_pipe_read_dump(struct physical_channel *pchan);
void dump_hab(int mmid);
void dump_hab_wq(struct physical_channel *pchan);
int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
int dest_size);
int hab_stat_buffer_print(char *dest,
int dest_size, const char *fmt, ...);
#endif /* __HAB_H */

View File

@ -0,0 +1,263 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
struct loopback_msg {
struct list_head node;
int payload_size;
struct hab_header header;
char payload[];
};
struct lb_thread_struct {
int stop; /* set by creator */
int bexited; /* set by thread */
void *data; /* thread private data */
};
struct loopback_dev {
spinlock_t io_lock;
struct list_head msg_list;
int msg_cnt;
struct task_struct *kthread; /* creator's thread handle */
struct lb_thread_struct thread_data; /* thread private data */
wait_queue_head_t thread_queue;
struct loopback_msg *current_msg;
};
static int lb_thread_queue_empty(struct loopback_dev *dev)
{
int ret;
spin_lock_bh(&dev->io_lock);
ret = list_empty(&dev->msg_list);
spin_unlock_bh(&dev->io_lock);
return ret;
}
int lb_kthread(void *d)
{
struct lb_thread_struct *p = (struct lb_thread_struct *)d;
struct physical_channel *pchan = (struct physical_channel *)p->data;
struct loopback_dev *dev = pchan->hyp_data;
int ret = 0;
while (!p->stop) {
schedule();
ret = wait_event_interruptible(dev->thread_queue,
!lb_thread_queue_empty(dev) ||
p->stop);
spin_lock_bh(&dev->io_lock);
while (!list_empty(&dev->msg_list)) {
struct loopback_msg *msg = NULL;
msg = list_first_entry(&dev->msg_list,
struct loopback_msg, node);
dev->current_msg = msg;
list_del(&msg->node);
dev->msg_cnt--;
ret = hab_msg_recv(pchan, &msg->header);
if (ret) {
pr_err("failed %d msg handling sz %d header %d %d %d, %d %X %d, total %d\n",
ret, msg->payload_size,
HAB_HEADER_GET_ID(msg->header),
HAB_HEADER_GET_TYPE(msg->header),
HAB_HEADER_GET_SIZE(msg->header),
msg->header.session_id,
msg->header.signature,
msg->header.sequence, dev->msg_cnt);
}
kfree(msg);
dev->current_msg = NULL;
}
spin_unlock_bh(&dev->io_lock);
}
p->bexited = 1;
pr_debug("exit kthread\n");
return 0;
}
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
int size = HAB_HEADER_GET_SIZE(*header); /* payload size */
struct timespec64 ts = {0};
struct loopback_msg *msg = NULL;
struct loopback_dev *dev = pchan->hyp_data;
msg = kmalloc(size + sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
memcpy(&msg->header, header, sizeof(*header));
msg->payload_size = size; /* payload size could be zero */
if (size && payload) {
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
ktime_get_ts64(&ts);
pstat->tx_sec = ts.tv_sec;
pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
}
memcpy(msg->payload, payload, size);
}
spin_lock_bh(&dev->io_lock);
list_add_tail(&msg->node, &dev->msg_list);
dev->msg_cnt++;
spin_unlock_bh(&dev->io_lock);
wake_up_interruptible(&dev->thread_queue);
return 0;
}
/* loopback read is only used during open */
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct loopback_dev *dev = pchan->hyp_data;
struct loopback_msg *msg = dev->current_msg;
if (read_size) {
if (read_size != msg->payload_size) {
pr_err("read size mismatch requested %zd, received %d\n",
read_size, msg->payload_size);
memcpy(payload, msg->payload, min(((int)read_size),
msg->payload_size));
} else {
memcpy(payload, msg->payload, read_size);
}
} else {
read_size = 0;
}
return read_size;
}
/* pchan is directly added into the hab_device */
int loopback_pchan_create(struct hab_device *dev, char *pchan_name)
{
int result;
struct physical_channel *pchan = NULL;
struct loopback_dev *lb_dev = NULL;
pchan = hab_pchan_alloc(dev, LOOPBACK_DOM);
if (!pchan) {
result = -ENOMEM;
goto err;
}
pchan->closed = 0;
strscpy(pchan->name, pchan_name, sizeof(pchan->name));
lb_dev = kzalloc(sizeof(*lb_dev), GFP_KERNEL);
if (!lb_dev) {
result = -ENOMEM;
goto err;
}
spin_lock_init(&lb_dev->io_lock);
INIT_LIST_HEAD(&lb_dev->msg_list);
init_waitqueue_head(&lb_dev->thread_queue);
lb_dev->thread_data.data = pchan;
lb_dev->kthread = kthread_run(lb_kthread, &lb_dev->thread_data,
pchan->name);
if (IS_ERR(lb_dev->kthread)) {
result = PTR_ERR(lb_dev->kthread);
pr_err("failed to create kthread for %s, ret %d\n",
pchan->name, result);
goto err;
}
pchan->hyp_data = lb_dev;
return 0;
err:
kfree(lb_dev);
kfree(pchan);
return result;
}
void physical_channel_rx_dispatch(unsigned long data)
{
}
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device)
{
struct physical_channel *pchan;
int ret = loopback_pchan_create(mmid_device, name);
if (ret) {
pr_err("failed to create %s pchan in mmid device %s, ret %d, pchan cnt %d\n",
name, mmid_device->name, ret, mmid_device->pchan_cnt);
*commdev = NULL;
} else {
pr_debug("loopback physical channel on %s return %d, loopback mode(%d), total pchan %d\n",
name, ret, hab_driver.b_loopback,
mmid_device->pchan_cnt);
pchan = hab_pchan_find_domid(mmid_device,
HABCFG_VMID_DONT_CARE);
*commdev = pchan;
hab_pchan_put(pchan);
pr_debug("pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = commdev;
struct loopback_dev *dev = pchan->hyp_data;
struct loopback_msg *msg, *tmp;
int ret;
spin_lock_bh(&dev->io_lock);
if (!list_empty(&dev->msg_list) || dev->msg_cnt) {
pr_err("pchan %s msg leak cnt %d\n", pchan->name, dev->msg_cnt);
list_for_each_entry_safe(msg, tmp, &dev->msg_list, node) {
list_del(&msg->node);
dev->msg_cnt--;
kfree(msg);
}
pr_debug("pchan %s msg cnt %d now\n",
pchan->name, dev->msg_cnt);
}
spin_unlock_bh(&dev->io_lock);
dev->thread_data.stop = 1;
ret = kthread_stop(dev->kthread);
while (!dev->thread_data.bexited)
schedule();
dev->kthread = NULL;
/* hyp_data is freed in pchan */
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
hab_pchan_put((struct physical_channel *)commdev);
return 0;
}

View File

@ -0,0 +1,196 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
#define GIPC_VM_SET_CNT 22
/* same vmid assignment for all the vms. it should matches dt_gipc_path_name */
static int mmid_order[GIPC_VM_SET_CNT] = {
MM_AUD_1,
MM_AUD_2,
MM_AUD_3,
MM_AUD_4,
MM_CAM_1,
MM_CAM_2,
MM_DISP_1,
MM_DISP_2,
MM_DISP_3,
MM_DISP_4,
MM_DISP_5,
MM_GFX,
MM_VID,
MM_MISC,
MM_QCPE_VM1,
MM_VID_2, /* newly recycled */
0,
0,
MM_CLK_VM1,
MM_CLK_VM2,
MM_FDE_1,
MM_BUFFERQ_1,
};
struct ghs_vmm_plugin_info_s ghs_vmm_plugin_info = {
dt_gipc_path_name,
mmid_order,
0,
0,
};
int get_dt_name_idx(int vmid_base, int mmid,
struct ghs_vmm_plugin_info_s *plugin_info)
{
int idx = -1;
int i;
if (vmid_base < 0 || vmid_base > plugin_info->probe_cnt /
GIPC_VM_SET_CNT) {
pr_err("vmid %d overflow expected max %d\n", vmid_base,
plugin_info->probe_cnt / GIPC_VM_SET_CNT);
return idx;
}
for (i = 0; i < GIPC_VM_SET_CNT; i++) {
if (mmid == plugin_info->mmid_dt_mapping[i]) {
idx = vmid_base * GIPC_VM_SET_CNT + i;
if (idx > plugin_info->probe_cnt) {
pr_err("dt name idx %d overflow max %d\n",
idx, plugin_info->probe_cnt);
idx = -1;
}
break;
}
}
return idx;
}
/* static struct physical_channel *habhyp_commdev_alloc(int id) */
int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device)
{
struct ghs_vdev *dev = NULL;
struct ghs_vdev_os *dev_os = NULL;
struct physical_channel *pchan = NULL;
struct physical_channel **ppchan = (struct physical_channel **)commdev;
int ret = 0;
if (ghs_vmm_plugin_info.curr > ghs_vmm_plugin_info.probe_cnt) {
pr_err("too many commdev alloc %d, supported is %d\n",
ghs_vmm_plugin_info.curr,
ghs_vmm_plugin_info.probe_cnt);
ret = -ENOENT;
goto err;
}
/* common part for hyp_data */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
pr_err("allocate struct ghs_vdev failed %zu bytes on pchan %s\n",
sizeof(*dev), name);
goto err;
}
memset(dev, 0, sizeof(*dev));
/* os specific part for hyp_data */
dev_os = kzalloc(sizeof(*dev_os), GFP_KERNEL);
if (!dev_os) {
ret = -ENOMEM;
pr_err("allocate ghs_vdev_os failed %zu bytes on pchan %s\n",
sizeof(*dev_os), name);
goto err;
}
dev->os_data = dev_os;
spin_lock_init(&dev->io_lock);
/*
* TODO: ExtractEndpoint is in ghs_comm.c because it blocks.
* Extrace and Request should be in roughly the same spot
*/
ret = hab_gipc_ep_attach(is_be, name, vmid_remote, mmid_device, dev);
if (ret)
goto err;
/* add pchan into the mmid_device list */
pchan = hab_pchan_alloc(mmid_device, vmid_remote);
if (!pchan) {
pr_err("hab_pchan_alloc failed for %s, cnt %d\n",
mmid_device->name, mmid_device->pchan_cnt);
ret = -ENOMEM;
goto err;
}
pchan->closed = 0;
pchan->hyp_data = (void *)dev;
pchan->is_be = is_be;
strscpy(dev->name, name, sizeof(dev->name));
strscpy(pchan->name, name, sizeof(pchan->name));
*ppchan = pchan;
dev->read_data = kmalloc(GIPC_RECV_BUFF_SIZE_BYTES, GFP_KERNEL);
if (!dev->read_data) {
ret = -ENOMEM;
goto err;
}
ret = habhyp_commdev_create_dispatcher(pchan);
if (ret)
goto err;
/* this value could be more than devp total */
ghs_vmm_plugin_info.curr++;
return 0;
err:
hab_pchan_put(pchan);
kfree(dev);
kfree(dev_os);
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct ghs_vdev *dev = pchan->hyp_data;
/* os specific deallocation for this commdev */
habhyp_commdev_dealloc_os(commdev);
kfree(dev->read_data);
kfree(dev->os_data);
kfree(dev);
pchan->closed = 1;
pchan->hyp_data = NULL;
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
hab_pchan_put(pchan);
return 0;
}
void hab_hypervisor_unregister(void)
{
pr_debug("total %d\n", hab_driver.ndevices);
hab_hypervisor_unregister_common();
ghs_vmm_plugin_info.curr = 0;
}
int hab_hypervisor_register(void)
{
int ret = 0;
/* os-specific registration work */
ret = hab_hypervisor_register_os();
return ret;
}

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_GHS_H
#define __HAB_GHS_H
#include "hab_ghs_os.h"
#define GIPC_RECV_BUFF_SIZE_BYTES (256*1024)
struct ghs_vdev {
int be;
void *read_data; /* buffer to receive from gipc */
size_t read_size;
int read_offset;
GIPC_Endpoint endpoint;
spinlock_t io_lock;
char name[32];
struct ghs_vdev_os *os_data; /* os-specific for this comm dev */
};
struct ghs_vmm_plugin_info_s {
const char * const *dt_name;
int *mmid_dt_mapping;
int curr;
int probe_cnt;
};
extern struct ghs_vmm_plugin_info_s ghs_vmm_plugin_info;
extern const char * const dt_gipc_path_name[];
int get_dt_name_idx(int vmid_base, int mmid,
struct ghs_vmm_plugin_info_s *plugin_info);
int hab_gipc_wait_to_send(GIPC_Endpoint endpoint);
int hab_gipc_ep_attach(int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device, struct ghs_vdev *dev);
#endif /* __HAB_GHS_H */

View File

@ -0,0 +1,161 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
const char * const dt_gipc_path_name[] = {
"testgipc1",
"testgipc2",
"testgipc3",
"testgipc4",
"testgipc5",
"testgipc6",
"testgipc7",
"testgipc8",
"testgipc9",
"testgipc10",
"testgipc11",
"testgipc12",
"testgipc13",
"testgipc14",
"testgipc15",
"testgipc16",
"testgipc17",
"testgipc18",
"testgipc19",
"testgipc20",
"testgipc21",
"testgipc22",
};
static void ghs_irq_handler(void *cookie)
{
struct physical_channel *pchan = cookie;
struct ghs_vdev *dev =
(struct ghs_vdev *) (pchan ? pchan->hyp_data : NULL);
if (dev)
tasklet_hi_schedule(&dev->os_data->task);
}
int hab_gipc_ep_attach(int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device, struct ghs_vdev *dev)
{
int dt_name_idx = 0;
int ret = 0;
if (is_be) {
/* role is backend */
dev->be = 1;
} else {
/* role is FE */
struct device_node *gvh_dn;
gvh_dn = of_find_node_by_path("/aliases");
if (gvh_dn) {
const char *ep_path = NULL;
struct device_node *ep_dn = NULL;
dt_name_idx = get_dt_name_idx(vmid_remote,
mmid_device->id,
&ghs_vmm_plugin_info);
if (dt_name_idx < 0) {
pr_err("failed to find %s for vmid %d ret %d\n",
mmid_device->name,
mmid_device->id,
dt_name_idx);
of_node_put(gvh_dn);
ret = -ENOENT;
goto exit;
}
ret = of_property_read_string(gvh_dn,
ghs_vmm_plugin_info.dt_name[dt_name_idx],
&ep_path);
if (ret) {
pr_err("failed to read endpoint str ret %d\n",
ret);
of_node_put(gvh_dn);
ret = -ENOENT;
goto exit;
}
of_node_put(gvh_dn);
ep_dn = of_find_node_by_path(ep_path);
if (ep_dn) {
dev->endpoint = kgipc_endpoint_alloc(ep_dn);
of_node_put(ep_dn);
if (IS_ERR(dev->endpoint)) {
ret = PTR_ERR(dev->endpoint);
pr_err("alloc failed %d %s ret %d\n",
dt_name_idx, mmid_device->name,
ret);
} else {
pr_debug("gipc ep found for %d %s\n",
dt_name_idx, mmid_device->name);
}
} else {
pr_err("of_parse_phandle failed id %d %s\n",
dt_name_idx, mmid_device->name);
ret = -ENOENT;
}
} else {
pr_err("of_find_compatible_node failed id %d %s\n",
dt_name_idx, mmid_device->name);
ret = -ENOENT;
}
}
exit:
return ret;
}
int habhyp_commdev_create_dispatcher(struct physical_channel *pchan)
{
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
int ret = 0;
tasklet_init(&dev->os_data->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
ret = kgipc_endpoint_start_with_irq_callback(dev->endpoint,
ghs_irq_handler,
pchan);
if (ret)
pr_err("irq alloc failed id: %d %s, ret: %d\n",
ghs_vmm_plugin_info.curr, pchan->name, ret);
else
pr_debug("ep irq handler started for %d %s, ret %d\n",
ghs_vmm_plugin_info.curr, pchan->name, ret);
return ret;
}
void habhyp_commdev_dealloc_os(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct ghs_vdev *dev = pchan->hyp_data;
kgipc_endpoint_free(dev->endpoint);
}
int hab_hypervisor_register_os(void)
{
ghs_vmm_plugin_info.probe_cnt = ARRAY_SIZE(dt_gipc_path_name);
hab_driver.b_server_dom = 0;
return 0;
}
void dump_hab_wq(struct physical_channel *pchan) {};
void hab_pipe_read_dump(struct physical_channel *pchan) {};
int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
int dest_size)
{
return 0;
};

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_GHS_OS_H
#define __HAB_GHS_OS_H
#include <ghs_vmm/kgipc.h>
struct ghs_vdev_os {
struct tasklet_struct task;
};
#endif /* __HAB_GHS_OS_H */

View File

@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_GRANTABLE_H
#define __HAB_GRANTABLE_H
/* Grantable should be common between exporter and importer */
struct grantable {
unsigned long pfn;
};
struct compressed_pfns {
unsigned long first_pfn;
int nregions;
struct region {
int size;
int space;
} region[];
};
#endif /* __HAB_GRANTABLE_H */

View File

@ -0,0 +1,430 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/of_device.h>
#include "hab.h"
unsigned int get_refcnt(struct kref ref)
{
return kref_read(&ref);
}
static int hab_open(struct inode *inodep, struct file *filep)
{
int result = 0;
struct uhab_context *ctx;
ctx = hab_ctx_alloc(0);
if (!ctx) {
pr_err("hab_ctx_alloc failed\n");
filep->private_data = NULL;
return -ENOMEM;
}
ctx->owner = task_pid_nr(current);
filep->private_data = ctx;
pr_debug("ctx owner %d refcnt %d\n", ctx->owner,
get_refcnt(ctx->refcount));
return result;
}
static int hab_release(struct inode *inodep, struct file *filep)
{
struct uhab_context *ctx = filep->private_data;
struct virtual_channel *vchan, *tmp;
struct hab_open_node *node;
if (!ctx)
return 0;
pr_debug("inode %pK, filep %pK ctx %pK\n", inodep, filep, ctx);
write_lock(&ctx->ctx_lock);
/* notify remote side on vchan closing */
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
/* local close starts */
vchan->closed = 1;
list_del(&vchan->node); /* vchan is not in this ctx anymore */
ctx->vcnt--;
write_unlock(&ctx->ctx_lock);
hab_vchan_stop_notify(vchan);
hab_vchan_put(vchan); /* there is a lock inside */
write_lock(&ctx->ctx_lock);
}
/* notify remote side on pending open */
list_for_each_entry(node, &ctx->pending_open, node) {
/* no touch to the list itself. it is allocated on the stack */
if (hab_open_cancel_notify(&node->request))
pr_err("failed to send open cancel vcid %x subid %d openid %d pchan %s\n",
node->request.xdata.vchan_id,
node->request.xdata.sub_id,
node->request.xdata.open_id,
node->request.pchan->habdev->name);
}
write_unlock(&ctx->ctx_lock);
hab_ctx_put(ctx);
filep->private_data = NULL;
return 0;
}
static long hab_copy_data(struct hab_message *msg, struct hab_recv *recv_param)
{
long ret = 0;
int i = 0;
void **scatter_buf = (void **)msg->data;
uint64_t dest = 0U;
if (unlikely(msg->scatter)) {
/* The maximum size of msg is limited in hab_msg_alloc */
for (i = 0; i < msg->sizebytes / PAGE_SIZE; i++) {
dest = (uint64_t)(recv_param->data) + (uint64_t)(i * PAGE_SIZE);
if (copy_to_user((void __user *)dest,
scatter_buf[i],
PAGE_SIZE)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
recv_param->vcid, (int)msg->sizebytes);
recv_param->sizebytes = 0;
ret = -EFAULT;
break;
}
}
if ((ret != -EFAULT) && (msg->sizebytes % PAGE_SIZE)) {
dest = (uint64_t)(recv_param->data) + (uint64_t)(i * PAGE_SIZE);
if (copy_to_user((void __user *)dest,
scatter_buf[i],
msg->sizebytes % PAGE_SIZE)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
recv_param->vcid, (int)msg->sizebytes);
recv_param->sizebytes = 0;
ret = -EFAULT;
}
}
} else {
if (copy_to_user((void __user *)recv_param->data,
msg->data,
msg->sizebytes)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
recv_param->vcid, (int)msg->sizebytes);
recv_param->sizebytes = 0;
ret = -EFAULT;
}
}
return ret;
}
static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct uhab_context *ctx = (struct uhab_context *)filep->private_data;
struct hab_open *open_param;
struct hab_close *close_param;
struct hab_recv *recv_param;
struct hab_send *send_param;
struct hab_info *info_param;
struct hab_message *msg = NULL;
void *send_data;
unsigned char data[256] = { 0 };
long ret = 0;
char names[30] = { 0 };
if (_IOC_SIZE(cmd) && (cmd & IOC_IN)) {
if (_IOC_SIZE(cmd) > sizeof(data))
return -EINVAL;
if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd))) {
pr_err("copy_from_user failed cmd=%x size=%d\n",
cmd, _IOC_SIZE(cmd));
return -EFAULT;
}
}
switch (cmd) {
case IOCTL_HAB_VC_OPEN:
open_param = (struct hab_open *)data;
ret = hab_vchan_open(ctx, open_param->mmid,
&open_param->vcid,
open_param->timeout,
open_param->flags);
break;
case IOCTL_HAB_VC_CLOSE:
close_param = (struct hab_close *)data;
ret = hab_vchan_close(ctx, close_param->vcid);
break;
case IOCTL_HAB_SEND:
send_param = (struct hab_send *)data;
if (send_param->sizebytes > (uint32_t)(HAB_HEADER_SIZE_MAX)) {
ret = -EINVAL;
break;
}
send_data = kzalloc(send_param->sizebytes, GFP_KERNEL);
if (!send_data) {
ret = -ENOMEM;
break;
}
if (copy_from_user(send_data, (void __user *)send_param->data,
send_param->sizebytes)) {
ret = -EFAULT;
} else {
ret = hab_vchan_send(ctx, send_param->vcid,
send_param->sizebytes,
send_data,
send_param->flags);
}
kfree(send_data);
break;
case IOCTL_HAB_RECV:
recv_param = (struct hab_recv *)data;
if (!recv_param->data) {
ret = -EINVAL;
break;
}
ret = hab_vchan_recv(ctx, &msg, recv_param->vcid,
&recv_param->sizebytes, recv_param->timeout,
recv_param->flags);
if (msg) {
if (ret == 0)
ret = hab_copy_data(msg, recv_param);
else
pr_warn("vcid %X recv failed %d and msg is still of %zd bytes\n",
recv_param->vcid, (int)ret, msg->sizebytes);
hab_msg_free(msg);
}
break;
case IOCTL_HAB_VC_EXPORT:
ret = hab_mem_export(ctx, (struct hab_export *)data, 0);
break;
case IOCTL_HAB_VC_IMPORT:
ret = hab_mem_import(ctx, (struct hab_import *)data, 0);
break;
case IOCTL_HAB_VC_UNEXPORT:
ret = hab_mem_unexport(ctx, (struct hab_unexport *)data, 0);
break;
case IOCTL_HAB_VC_UNIMPORT:
ret = hab_mem_unimport(ctx, (struct hab_unimport *)data, 0);
break;
case IOCTL_HAB_VC_QUERY:
info_param = (struct hab_info *)data;
if (!info_param->names || !info_param->namesize ||
info_param->namesize > sizeof(names)) {
pr_err("wrong param for vm info vcid %X, names %llX, sz %d\n",
info_param->vcid, info_param->names,
info_param->namesize);
ret = -EINVAL;
break;
}
ret = hab_vchan_query(ctx, info_param->vcid,
(uint64_t *)&info_param->ids,
names, info_param->namesize, 0);
if (!ret) {
if (copy_to_user((void __user *)info_param->names,
names,
info_param->namesize)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
info_param->vcid,
info_param->namesize*2);
info_param->namesize = 0;
ret = -EFAULT;
}
}
break;
default:
ret = -ENOIOCTLCMD;
}
if (_IOC_SIZE(cmd) && (cmd & IOC_OUT))
if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) {
pr_err("copy_to_user failed: cmd=%x\n", cmd);
ret = -EFAULT;
}
return ret;
}
static long hab_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
return hab_ioctl(filep, cmd, arg);
}
static const struct file_operations hab_fops = {
.owner = THIS_MODULE,
.open = hab_open,
.release = hab_release,
.mmap = habmem_imp_hyp_mmap,
.unlocked_ioctl = hab_ioctl,
.compat_ioctl = hab_compat_ioctl
};
/*
* These map sg functions are pass through because the memory backing the
* sg list is already accessible to the kernel as they come from a the
* dedicated shared vm pool
*/
static int hab_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
/* return nelems directly */
return nelems;
}
static void hab_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
unsigned long attrs)
{
/*Do nothing */
}
static const struct dma_map_ops hab_dma_ops = {
.map_sg = hab_map_sg,
.unmap_sg = hab_unmap_sg,
};
static int hab_power_down_callback(
struct notifier_block *nfb, unsigned long action, void *data)
{
switch (action) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
pr_debug("reboot called %ld\n", action);
hab_hypervisor_unregister(); /* only for single VM guest */
break;
}
pr_debug("reboot called %ld done\n", action);
return NOTIFY_DONE;
}
static struct notifier_block hab_reboot_notifier = {
.notifier_call = hab_power_down_callback,
};
static int __init hab_init(void)
{
int result;
dev_t dev;
result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
if (result < 0) {
pr_err("alloc_chrdev_region failed: %d\n", result);
return result;
}
cdev_init(&hab_driver.cdev, &hab_fops);
hab_driver.cdev.owner = THIS_MODULE;
hab_driver.cdev.ops = &hab_fops;
dev = MKDEV(MAJOR(hab_driver.major), 0);
result = cdev_add(&hab_driver.cdev, dev, 1);
if (result < 0) {
unregister_chrdev_region(dev, 1);
pr_err("cdev_add failed: %d\n", result);
return result;
}
hab_driver.class = class_create(THIS_MODULE, "hab");
if (IS_ERR(hab_driver.class)) {
result = PTR_ERR(hab_driver.class);
pr_err("class_create failed: %d\n", result);
goto err;
}
hab_driver.dev = device_create(hab_driver.class, NULL,
dev, &hab_driver, "hab");
if (IS_ERR(hab_driver.dev)) {
result = PTR_ERR(hab_driver.dev);
pr_err("device_create failed: %d\n", result);
goto err;
}
result = register_reboot_notifier(&hab_reboot_notifier);
if (result)
pr_err("failed to register reboot notifier %d\n", result);
/* read in hab config, then configure pchans */
result = do_hab_parse();
if (!result) {
hab_driver.kctx = hab_ctx_alloc(1);
if (!hab_driver.kctx) {
pr_err("hab_ctx_alloc failed\n");
result = -ENOMEM;
hab_hypervisor_unregister();
goto err;
} else {
/* First, try to configure system dma_ops */
result = dma_coerce_mask_and_coherent(
hab_driver.dev,
DMA_BIT_MASK(64));
/* System dma_ops failed, fallback to dma_ops of hab */
if (result) {
pr_warn("config system dma_ops failed %d, fallback to hab\n",
result);
hab_driver.dev->bus = NULL;
set_dma_ops(hab_driver.dev, &hab_dma_ops);
}
}
}
hab_stat_init(&hab_driver);
return result;
err:
if (!IS_ERR_OR_NULL(hab_driver.dev))
device_destroy(hab_driver.class, dev);
if (!IS_ERR_OR_NULL(hab_driver.class))
class_destroy(hab_driver.class);
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
pr_err("Error in hab init, result %d\n", result);
return result;
}
static void __exit hab_exit(void)
{
dev_t dev;
hab_hypervisor_unregister();
hab_stat_deinit(&hab_driver);
hab_ctx_put(hab_driver.kctx);
dev = MKDEV(MAJOR(hab_driver.major), 0);
device_destroy(hab_driver.class, dev);
class_destroy(hab_driver.class);
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
unregister_reboot_notifier(&hab_reboot_notifier);
pr_debug("hab exit called\n");
}
#if IS_MODULE(CONFIG_MSM_HAB)
module_init(hab_init);
#else
subsys_initcall(hab_init);
#endif
module_exit(hab_exit);
MODULE_DESCRIPTION("Hypervisor abstraction layer");
MODULE_LICENSE("GPL");

View File

@ -0,0 +1,963 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include <linux/fdtable.h>
#include <linux/dma-buf.h>
#include "hab_grantable.h"
enum hab_page_list_type {
HAB_PAGE_LIST_IMPORT = 0x1,
HAB_PAGE_LIST_EXPORT
};
struct pages_list {
struct list_head list;
struct page **pages;
long npages;
void *vmapping;
uint32_t userflags;
int32_t export_id;
int32_t vcid;
struct physical_channel *pchan;
uint32_t type;
struct kref refcount;
};
struct importer_context {
struct file *filp;
};
struct exp_platform_data {
void *dmabuf;
void *attach;
void *sg_table;
};
static struct dma_buf_ops dma_buf_ops;
static struct pages_list *pages_list_create(
struct export_desc *exp,
uint32_t userflags)
{
struct page **pages = NULL;
struct compressed_pfns *pfn_table =
(struct compressed_pfns *)exp->payload;
struct pages_list *pglist = NULL;
unsigned long pfn;
int i, j, k = 0, size;
unsigned long region_total_page = 0;
if (!pfn_table)
return ERR_PTR(-EINVAL);
pfn = pfn_table->first_pfn;
if (pfn_valid(pfn) == 0 || page_is_ram(pfn) == 0) {
pr_err("imp sanity failed pfn %lx valid %d ram %d pchan %s\n",
pfn, pfn_valid(pfn),
page_is_ram(pfn), exp->pchan->name);
return ERR_PTR(-EINVAL);
}
size = exp->payload_count * sizeof(struct page *);
pages = vmalloc(size);
if (!pages)
return ERR_PTR(-ENOMEM);
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
vfree(pages);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < pfn_table->nregions; i++) {
if (pfn_table->region[i].size <= 0) {
pr_err("pfn_table->region[%d].size %d is less than 1\n",
i, pfn_table->region[i].size);
goto err_region_total_page;
}
region_total_page += pfn_table->region[i].size;
if (region_total_page > exp->payload_count) {
pr_err("payload_count %d but region_total_page %lu\n",
exp->payload_count, region_total_page);
goto err_region_total_page;
}
for (j = 0; j < pfn_table->region[i].size; j++) {
pages[k] = pfn_to_page(pfn+j);
k++;
}
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
if (region_total_page != exp->payload_count) {
pr_err("payload_count %d and region_total_page %lu are not equal\n",
exp->payload_count, region_total_page);
goto err_region_total_page;
}
pglist->pages = pages;
pglist->npages = exp->payload_count;
pglist->userflags = userflags;
pglist->export_id = exp->export_id;
pglist->vcid = exp->vcid_remote;
pglist->pchan = exp->pchan;
kref_init(&pglist->refcount);
return pglist;
err_region_total_page:
vfree(pages);
kfree(pglist);
return ERR_PTR(-EINVAL);
}
static void pages_list_add(struct pages_list *pglist)
{
spin_lock_bh(&hab_driver.imp_lock);
list_add_tail(&pglist->list, &hab_driver.imp_list);
hab_driver.imp_cnt++;
spin_unlock_bh(&hab_driver.imp_lock);
}
static void pages_list_remove(struct pages_list *pglist)
{
spin_lock_bh(&hab_driver.imp_lock);
list_del(&pglist->list);
hab_driver.imp_cnt--;
spin_unlock_bh(&hab_driver.imp_lock);
}
static void pages_list_destroy(struct kref *refcount)
{
struct pages_list *pglist = container_of(refcount,
struct pages_list, refcount);
if (pglist->vmapping) {
vunmap(pglist->vmapping);
pglist->vmapping = NULL;
}
/* the imported pages used, notify the remote */
if (pglist->type == HAB_PAGE_LIST_IMPORT)
pages_list_remove(pglist);
vfree(pglist->pages);
kfree(pglist);
}
static void pages_list_get(struct pages_list *pglist)
{
kref_get(&pglist->refcount);
}
static int pages_list_put(struct pages_list *pglist)
{
return kref_put(&pglist->refcount, pages_list_destroy);
}
static struct pages_list *pages_list_lookup(
uint32_t export_id,
struct physical_channel *pchan,
bool get_pages_list)
{
struct pages_list *pglist = NULL, *tmp = NULL;
spin_lock_bh(&hab_driver.imp_lock);
list_for_each_entry_safe(pglist, tmp, &hab_driver.imp_list, list) {
if (pglist->export_id == export_id &&
pglist->pchan == pchan) {
if (get_pages_list)
pages_list_get(pglist);
spin_unlock_bh(&hab_driver.imp_lock);
return pglist;
}
}
spin_unlock_bh(&hab_driver.imp_lock);
return NULL;
}
static int match_file(const void *p, struct file *file, unsigned int fd)
{
/*
* We must return fd + 1 because iterate_fd stops searching on
* non-zero return, but 0 is a valid fd.
*/
return (p == file) ? (fd + 1) : 0;
}
static struct dma_buf *habmem_get_dma_buf_from_va(unsigned long address,
int page_count,
unsigned long *offset)
{
struct vm_area_struct *vma = NULL;
struct dma_buf *dmabuf = NULL;
int rc = 0;
int fd = -1;
mmap_read_lock(current->mm);
vma = find_vma(current->mm, address);
if (!vma || !vma->vm_file) {
pr_err("cannot find vma\n");
rc = -EBADF;
goto pro_end;
}
/* Look for the fd that matches this the vma file */
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
if (fd == 0) {
pr_err("iterate_fd failed\n");
rc = -EBADF;
goto pro_end;
}
dmabuf = dma_buf_get(fd - 1);
if (IS_ERR_OR_NULL(dmabuf)) {
pr_err("dma_buf_get failed fd %d ret %pK\n", fd, dmabuf);
rc = -EBADF;
goto pro_end;
}
*offset = address - vma->vm_start;
pro_end:
mmap_read_unlock(current->mm);
return rc < 0 ? ERR_PTR(rc) : dmabuf;
}
static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address,
int page_count)
{
struct page **pages = NULL;
int i, ret = 0;
struct dma_buf *dmabuf = NULL;
struct pages_list *pglist = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
pages = vmalloc((page_count * sizeof(struct page *)));
if (!pages) {
ret = -ENOMEM;
goto err;
}
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
ret = -ENOMEM;
goto err;
}
mmap_read_lock(current->mm);
ret = get_user_pages(address, page_count, 0, pages, NULL);
mmap_read_unlock(current->mm);
if (ret <= 0) {
ret = -EINVAL;
pr_err("get %d user pages failed %d\n",
page_count, ret);
goto err;
}
pglist->pages = pages;
pglist->npages = page_count;
pglist->type = HAB_PAGE_LIST_EXPORT;
kref_init(&pglist->refcount);
exp_info.ops = &dma_buf_ops;
exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
for (i = 0; i < page_count; i++)
put_page(pages[i]);
pr_err("export to dmabuf failed\n");
ret = PTR_ERR(dmabuf);
goto err;
}
return dmabuf;
err:
vfree(pages);
kfree(pglist);
return ERR_PTR(ret);
}
static int habmem_compress_pfns(
struct export_desc_super *exp_super,
struct compressed_pfns *pfns,
uint32_t *data_size)
{
int ret = 0;
struct exp_platform_data *platform_data =
(struct exp_platform_data *) exp_super->platform_data;
struct dma_buf *dmabuf =
(struct dma_buf *) platform_data->dmabuf;
int page_count = exp_super->exp.payload_count;
struct pages_list *pglist = NULL;
struct page **pages = NULL;
int i = 0, j = 0;
int region_size = 1;
struct scatterlist *s = NULL;
struct sg_table *sg_table = NULL;
struct dma_buf_attachment *attach = NULL;
struct page *page = NULL, *pre_page = NULL;
unsigned long page_offset;
uint32_t spage_size = 0;
if (IS_ERR_OR_NULL(dmabuf) || !pfns || !data_size)
return -EINVAL;
pr_debug("page_count %d\n", page_count);
/* DMA buffer from fd */
if (dmabuf->ops != &dma_buf_ops) {
attach = dma_buf_attach(dmabuf, hab_driver.dev);
if (IS_ERR_OR_NULL(attach)) {
pr_err("dma_buf_attach failed %d\n", -EBADF);
ret = -EBADF;
goto err;
}
sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR_OR_NULL(sg_table)) {
pr_err("dma_buf_map_attachment failed %d\n", -EBADF);
ret = -EBADF;
goto err;
}
/* Restore sg table and attach of dmabuf */
platform_data->attach = attach;
platform_data->sg_table = sg_table;
page_offset = exp_super->offset >> PAGE_SHIFT;
pr_debug("page_offset %lu\n", page_offset);
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
spage_size = s->length >> PAGE_SHIFT;
if (page_offset >= spage_size) {
page_offset -= spage_size;
continue;
}
page = sg_page(s);
if (j == 0) {
pfns->first_pfn = page_to_pfn(nth_page(page,
page_offset));
} else {
pfns->region[j-1].space =
page_to_pfn(nth_page(page, 0)) -
page_to_pfn(pre_page) - 1;
pr_debug("j %d, space %d, ppfn %lu, pfn %lu\n",
j, pfns->region[j-1].space,
page_to_pfn(pre_page),
page_to_pfn(nth_page(page, 0)));
}
pfns->region[j].size = spage_size - page_offset;
if (pfns->region[j].size >= page_count) {
pfns->region[j].size = page_count;
pfns->region[j].space = 0;
break;
}
page_count -= pfns->region[j].size;
pre_page = nth_page(page, pfns->region[j].size - 1);
page_offset = 0;
j++;
}
pfns->nregions = j+1;
} else {
pglist = dmabuf->priv;
pages = pglist->pages;
pfns->first_pfn = page_to_pfn(pages[0]);
for (i = 1; i < page_count; i++) {
if ((page_to_pfn(pages[i]) - 1) ==
page_to_pfn(pages[i-1])) {
region_size++;
} else {
pfns->region[j].size = region_size;
pfns->region[j].space =
page_to_pfn(pages[i]) -
page_to_pfn(pages[i-1]) - 1;
j++;
region_size = 1;
}
}
pfns->region[j].size = region_size;
pfns->region[j].space = 0;
pfns->nregions = j+1;
}
*data_size = sizeof(struct compressed_pfns) +
sizeof(struct region) * pfns->nregions;
pr_debug("first_pfn %lu, nregions %d, data_size %u\n",
pfns->first_pfn, pfns->nregions, *data_size);
return 0;
err:
if (!IS_ERR_OR_NULL(attach)) {
if (!IS_ERR_OR_NULL(sg_table))
dma_buf_unmap_attachment(attach,
sg_table,
DMA_TO_DEVICE);
dma_buf_detach(dmabuf, attach);
}
return ret;
}
static int habmem_add_export_compress(struct virtual_channel *vchan,
unsigned long offset,
int page_count,
void *buf,
int flags,
int *payload_size,
int *export_id)
{
int ret = 0;
struct export_desc *exp = NULL;
struct export_desc_super *exp_super = NULL;
struct exp_platform_data *platform_data = NULL;
struct compressed_pfns *pfns = NULL;
uint32_t sizebytes = sizeof(*exp_super) +
sizeof(struct compressed_pfns) +
page_count * sizeof(struct region);
pr_debug("exp_desc %zu, comp_pfns %zu, region %zu, page_count %d\n",
sizeof(struct export_desc),
sizeof(struct compressed_pfns),
sizeof(struct region), page_count);
exp_super = habmem_add_export(vchan,
sizebytes,
flags);
if (IS_ERR_OR_NULL(exp_super)) {
ret = -ENOMEM;
goto err_add_exp;
}
exp = &exp_super->exp;
exp->payload_count = page_count;
platform_data = kzalloc(
sizeof(struct exp_platform_data),
GFP_KERNEL);
if (!platform_data) {
ret = -ENOMEM;
goto err_alloc;
}
platform_data->dmabuf = buf;
exp_super->offset = offset;
exp_super->platform_data = (void *)platform_data;
kref_init(&exp_super->refcount);
pfns = (struct compressed_pfns *)&exp->payload[0];
ret = habmem_compress_pfns(exp_super, pfns, payload_size);
if (ret) {
pr_err("hab compressed pfns failed %d\n", ret);
*payload_size = 0;
goto err_compress_pfns;
}
*export_id = exp->export_id;
return 0;
err_compress_pfns:
kfree(platform_data);
err_alloc:
spin_lock(&vchan->pchan->expid_lock);
idr_remove(&vchan->pchan->expid_idr, exp->export_id);
spin_unlock(&vchan->pchan->expid_lock);
vfree(exp_super);
err_add_exp:
dma_buf_put((struct dma_buf *)buf);
return ret;
}
/*
* exporter - grant & revoke
* degenerate sharabled page list based on CPU friendly virtual "address".
* The result as an array is stored in ppdata to return to caller
* page size 4KB is assumed
*/
int habmem_hyp_grant_user(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *payload_size,
int *export_id)
{
int ret = 0;
struct dma_buf *dmabuf = NULL;
unsigned long off = 0;
if (HABMM_EXP_MEM_TYPE_DMA & flags)
dmabuf = habmem_get_dma_buf_from_va(address,
page_count, &off);
else if (HABMM_EXPIMP_FLAGS_FD & flags)
dmabuf = dma_buf_get(address);
else
dmabuf = habmem_get_dma_buf_from_uva(address, page_count);
if (IS_ERR_OR_NULL(dmabuf))
return -EINVAL;
ret = habmem_add_export_compress(vchan,
off,
page_count,
dmabuf,
flags,
payload_size,
export_id);
return ret;
}
/*
* exporter - grant & revoke
* generate shareable page list based on CPU friendly virtual "address".
* The result as an array is stored in ppdata to return to caller
* page size 4KB is assumed
*/
int habmem_hyp_grant(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *payload_size,
int *export_id)
{
int ret = 0;
void *kva = (void *)(uintptr_t)address;
int is_vmalloc = is_vmalloc_addr(kva);
struct page **pages = NULL;
int i;
struct dma_buf *dmabuf = NULL;
struct pages_list *pglist = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (HABMM_EXPIMP_FLAGS_DMABUF & flags) {
dmabuf = (struct dma_buf *)address;
if (dmabuf)
get_dma_buf(dmabuf);
} else if (HABMM_EXPIMP_FLAGS_FD & flags)
dmabuf = dma_buf_get(address);
else { /*Input is kva;*/
pages = vmalloc((page_count *
sizeof(struct page *)));
if (!pages) {
ret = -ENOMEM;
goto err;
}
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
ret = -ENOMEM;
goto err;
}
pglist->pages = pages;
pglist->npages = page_count;
pglist->type = HAB_PAGE_LIST_EXPORT;
pglist->pchan = vchan->pchan;
pglist->vcid = vchan->id;
kref_init(&pglist->refcount);
for (i = 0; i < page_count; i++) {
kva = (void *)(uintptr_t)(address + i*PAGE_SIZE);
if (is_vmalloc)
pages[i] = vmalloc_to_page(kva);
else
pages[i] = virt_to_page(kva);
}
exp_info.ops = &dma_buf_ops;
exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
dmabuf = dma_buf_export(&exp_info);
}
if (IS_ERR_OR_NULL(dmabuf)) {
pr_err("dmabuf get failed %d\n", PTR_ERR(dmabuf));
ret = -EINVAL;
goto err;
}
ret = habmem_add_export_compress(vchan,
0,
page_count,
dmabuf,
flags,
payload_size,
export_id);
return ret;
err:
vfree(pages);
kfree(pglist);
return ret;
}
int habmem_exp_release(struct export_desc_super *exp_super)
{
struct exp_platform_data *platform_data =
(struct exp_platform_data *)exp_super->platform_data;
struct dma_buf *dmabuf =
(struct dma_buf *) platform_data->dmabuf;
struct dma_buf_attachment *attach = NULL;
struct sg_table *sg_table = NULL;
if (!IS_ERR_OR_NULL(dmabuf)) {
attach = (struct dma_buf_attachment *) platform_data->attach;
if (!IS_ERR_OR_NULL(attach)) {
sg_table = (struct sg_table *) platform_data->sg_table;
if (!IS_ERR_OR_NULL(sg_table))
dma_buf_unmap_attachment(attach,
sg_table,
DMA_TO_DEVICE);
dma_buf_detach(dmabuf, attach);
}
dma_buf_put(dmabuf);
} else
pr_debug("release failed, dmabuf is null!!!\n");
kfree(platform_data);
return 0;
}
int habmem_hyp_revoke(void *expdata, uint32_t count)
{
return 0;
}
void *habmem_imp_hyp_open(void)
{
struct importer_context *priv = NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
return priv;
}
void habmem_imp_hyp_close(void *imp_ctx, int kernel)
{
struct importer_context *priv = imp_ctx;
if (!priv)
return;
kfree(priv);
}
static struct sg_table *hab_mem_map_dma_buf(
struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct pages_list *pglist = dmabuf->priv;
struct sg_table *sgt;
struct scatterlist *sg;
int i;
int ret = 0;
struct page **pages = pglist->pages;
sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
if (ret) {
kfree(sgt);
return ERR_PTR(-ENOMEM);
}
for_each_sg(sgt->sgl, sg, pglist->npages, i) {
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
}
return sgt;
}
static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
{
sg_free_table(sgt);
kfree(sgt);
}
static vm_fault_t hab_map_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL;
struct pages_list *pglist = NULL;
unsigned long offset, fault_offset;
int page_idx;
if (vma == NULL)
return VM_FAULT_SIGBUS;
offset = vma->vm_pgoff << PAGE_SHIFT;
/* PHY address */
fault_offset =
(unsigned long)vmf->address - vma->vm_start + offset;
page_idx = fault_offset>>PAGE_SHIFT;
pglist = vma->vm_private_data;
if (page_idx < 0 || page_idx >= pglist->npages) {
pr_err("Out of page array! page_idx %d, pg cnt %ld\n",
page_idx, pglist->npages);
return VM_FAULT_SIGBUS;
}
page = pglist->pages[page_idx];
get_page(page);
vmf->page = page;
return 0;
}
static void hab_map_open(struct vm_area_struct *vma)
{
struct pages_list *pglist =
(struct pages_list *)vma->vm_private_data;
pages_list_get(pglist);
}
static void hab_map_close(struct vm_area_struct *vma)
{
struct pages_list *pglist =
(struct pages_list *)vma->vm_private_data;
pages_list_put(pglist);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct habmem_vm_ops = {
.fault = hab_map_fault,
.open = hab_map_open,
.close = hab_map_close,
};
static vm_fault_t hab_buffer_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct pages_list *pglist = vma->vm_private_data;
pgoff_t page_offset;
int ret;
page_offset = ((unsigned long)vmf->address - vma->vm_start) >>
PAGE_SHIFT;
if (page_offset > pglist->npages)
return VM_FAULT_SIGBUS;
ret = vm_insert_page(vma, (unsigned long)vmf->address,
pglist->pages[page_offset]);
switch (ret) {
case 0:
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
case -EFAULT:
case -EINVAL:
return VM_FAULT_SIGBUS;
default:
WARN_ON(1);
return VM_FAULT_SIGBUS;
}
}
static void hab_buffer_open(struct vm_area_struct *vma)
{
}
static void hab_buffer_close(struct vm_area_struct *vma)
{
}
static const struct vm_operations_struct hab_buffer_vm_ops = {
.fault = hab_buffer_fault,
.open = hab_buffer_open,
.close = hab_buffer_close,
};
static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct pages_list *pglist = dmabuf->priv;
uint32_t obj_size = pglist->npages << PAGE_SHIFT;
if (vma == NULL)
return VM_FAULT_SIGBUS;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &hab_buffer_vm_ops;
vma->vm_private_data = pglist;
vma->vm_flags |= VM_MIXEDMAP;
if (!(pglist->userflags & HABMM_IMPORT_FLAGS_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return 0;
}
static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
{
struct pages_list *pglist = dmabuf->priv;
pages_list_put(pglist);
}
static int hab_mem_dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct pages_list *pglist = dmabuf->priv;
if (!pglist->vmapping) {
pglist->vmapping = vmap(pglist->pages,
pglist->npages,
VM_IOREMAP,
pgprot_writecombine(PAGE_KERNEL));
if (!pglist->vmapping)
return -ENOMEM;
}
iosys_map_set_vaddr(map, pglist->vmapping);
return 0;
}
static void hab_mem_dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct pages_list *pglist = dmabuf->priv;
/* sanity check */
if (map->vaddr != pglist->vmapping)
pr_warn("vunmap pass-in %pK != at-hand %pK\n",
map->vaddr, pglist->vmapping);
if (pglist->vmapping) {
vunmap(pglist->vmapping);
pglist->vmapping = NULL;
}
}
static struct dma_buf_ops dma_buf_ops = {
.cache_sgt_mapping = true,
.map_dma_buf = hab_mem_map_dma_buf,
.unmap_dma_buf = hab_mem_unmap_dma_buf,
.mmap = hab_mem_mmap,
.release = hab_mem_dma_buf_release,
.vmap = hab_mem_dma_buf_vmap,
.vunmap = hab_mem_dma_buf_vunmap,
};
static struct dma_buf *habmem_import_to_dma_buf(
struct physical_channel *pchan,
struct export_desc *exp,
uint32_t userflags)
{
struct pages_list *pglist = NULL;
struct dma_buf *dmabuf = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
pglist = pages_list_lookup(exp->export_id, pchan, true);
if (pglist)
goto buffer_ready;
pglist = pages_list_create(exp, userflags);
if (IS_ERR(pglist))
return (void *)pglist;
pages_list_add(pglist);
pglist->type = HAB_PAGE_LIST_IMPORT;
buffer_ready:
exp_info.ops = &dma_buf_ops;
exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
pr_err("export to dmabuf failed, exp %d, pchan %s\n",
exp->export_id, pchan->name);
pages_list_put(pglist);
}
return dmabuf;
}
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
struct export_desc *exp, int kernel)
{
int fd = -1;
struct dma_buf *dma_buf = NULL;
struct physical_channel *pchan = exp->pchan;
dma_buf = habmem_import_to_dma_buf(pchan, exp, param->flags);
if (IS_ERR_OR_NULL(dma_buf))
return -EINVAL;
if (kernel) {
param->kva = (uint64_t)dma_buf;
} else {
fd = dma_buf_fd(dma_buf, O_CLOEXEC);
if (fd < 0) {
pr_err("dma buf to fd failed\n");
dma_buf_put(dma_buf);
return -EINVAL;
}
param->kva = (uint64_t)fd;
}
return 0;
}
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
{
/* dma_buf is the only supported format in khab */
if (kernel)
dma_buf_put((struct dma_buf *)exp->kva);
return 0;
}
int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
{
return -EFAULT;
}
int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp)
{
struct pages_list *pglist = NULL;
int found = 0;
pglist = pages_list_lookup(exp->export_id, exp->pchan, false);
if (pglist)
found = 1;
return found;
}
MODULE_IMPORT_NS(DMA_BUF);

View File

@ -0,0 +1,460 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_grantable.h"
/*
* use physical channel to send export parcel
* local remote
* send(export) --> IRQ store to export warehouse
* wait(export ack) <-- send(export ack)
* the actual data consists the following 3 parts listed in order
* 1. header (uint32_t) vcid|type|size
* 2. export parcel (full struct)
* 3. full contents in export->pdata
*/
static int hab_export_ack_find(struct uhab_context *ctx,
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret = 0;
struct hab_export_ack_recvd *ack_recvd, *tmp;
spin_lock_bh(&ctx->expq_lock);
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if ((ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote)
|| vchan->otherend_closed) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
ret = 1;
break;
}
ack_recvd->age++;
if (ack_recvd->age > Q_AGE_THRESHOLD) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
}
}
spin_unlock_bh(&ctx->expq_lock);
return ret;
}
static int hab_export_ack_wait(struct uhab_context *ctx,
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret;
ret = wait_event_interruptible_timeout(ctx->exp_wq,
hab_export_ack_find(ctx, expect_ack, vchan),
HAB_HS_TIMEOUT);
if (!ret || (ret == -ERESTARTSYS))
ret = -EAGAIN;
else if (vchan->otherend_closed)
ret = -ENODEV;
else if (ret > 0)
ret = 0;
return ret;
}
/*
* Get id from free list first. if not available, new id is generated.
* Once generated it will not be erased
* assumptions: no handshake or memory map/unmap in this helper function
*/
struct export_desc_super *habmem_add_export(
struct virtual_channel *vchan,
int sizebytes,
uint32_t flags)
{
struct export_desc *exp = NULL;
struct export_desc_super *exp_super = NULL;
if (!vchan || !sizebytes)
return NULL;
exp_super = vzalloc(sizebytes);
if (!exp_super)
return NULL;
exp = &exp_super->exp;
idr_preload(GFP_KERNEL);
spin_lock(&vchan->pchan->expid_lock);
exp->export_id =
idr_alloc(&vchan->pchan->expid_idr, exp, 1, 0, GFP_NOWAIT);
spin_unlock(&vchan->pchan->expid_lock);
idr_preload_end();
exp->readonly = flags;
exp->vcid_local = vchan->id;
exp->vcid_remote = vchan->otherend_id;
exp->domid_local = vchan->pchan->vmid_local;
exp->domid_remote = vchan->pchan->vmid_remote;
return exp_super;
}
void habmem_remove_export(struct export_desc *exp)
{
struct uhab_context *ctx = NULL;
struct export_desc_super *exp_super =
container_of(exp,
struct export_desc_super,
exp);
if (!exp || !exp->ctx) {
if (exp)
pr_err("invalid info in exp %pK ctx %pK\n",
exp, exp->ctx);
else
pr_err("invalid exp\n");
return;
}
ctx = exp->ctx;
write_lock(&ctx->exp_lock);
ctx->export_total--;
write_unlock(&ctx->exp_lock);
exp->ctx = NULL;
habmem_export_put(exp_super);
}
static void habmem_export_destroy(struct kref *refcount)
{
struct physical_channel *pchan = NULL;
struct export_desc_super *exp_super =
container_of(
refcount,
struct export_desc_super,
refcount);
struct export_desc *exp = NULL;
if (!exp_super) {
pr_err("invalid exp_super\n");
return;
}
exp = &exp_super->exp;
if (!exp || !exp->pchan) {
if (exp)
pr_err("invalid info in exp %pK pchan %pK\n",
exp, exp->pchan);
else
pr_err("invalid exp\n");
return;
}
pchan = exp->pchan;
spin_lock(&pchan->expid_lock);
idr_remove(&pchan->expid_idr, exp->export_id);
spin_unlock(&pchan->expid_lock);
habmem_exp_release(exp_super);
vfree(exp_super);
}
/*
* store the parcel to the warehouse, then send the parcel to remote side
* both exporter composed export descriptor and the grantrefids are sent
* as one msg to the importer side
*/
static int habmem_export_vchan(struct uhab_context *ctx,
struct virtual_channel *vchan,
int payload_size,
uint32_t flags,
uint32_t export_id)
{
int ret;
struct export_desc *exp = NULL;
uint32_t sizebytes = sizeof(*exp) + payload_size;
struct hab_export_ack expected_ack = {0};
struct hab_header header = HAB_HEADER_INITIALIZER;
if (sizebytes > (uint32_t)HAB_HEADER_SIZE_MAX) {
pr_err("exp message too large, %u bytes, max is %d\n",
sizebytes, HAB_HEADER_SIZE_MAX);
return -EINVAL;
}
exp = idr_find(&vchan->pchan->expid_idr, export_id);
if (!exp) {
pr_err("export vchan failed: exp_id %d, pchan %s\n",
export_id, vchan->pchan->name);
return -EINVAL;
}
pr_debug("sizebytes including exp_desc: %u = %zu + %d\n",
sizebytes, sizeof(*exp), payload_size);
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
ret = physical_channel_send(vchan->pchan, &header, exp);
if (ret != 0) {
pr_err("failed to export payload to the remote %d\n", ret);
return ret;
}
expected_ack.export_id = exp->export_id;
expected_ack.vcid_local = exp->vcid_local;
expected_ack.vcid_remote = exp->vcid_remote;
ret = hab_export_ack_wait(ctx, &expected_ack, vchan);
if (ret != 0) {
pr_err("failed to receive remote export ack %d on vc %x\n",
ret, vchan->id);
return ret;
}
exp->pchan = vchan->pchan;
exp->vchan = vchan;
exp->ctx = ctx;
write_lock(&ctx->exp_lock);
ctx->export_total++;
list_add_tail(&exp->node, &ctx->exp_whse);
write_unlock(&ctx->exp_lock);
return ret;
}
void habmem_export_get(struct export_desc_super *exp_super)
{
kref_get(&exp_super->refcount);
}
int habmem_export_put(struct export_desc_super *exp_super)
{
return kref_put(&exp_super->refcount, habmem_export_destroy);
}
int hab_mem_export(struct uhab_context *ctx,
struct hab_export *param,
int kernel)
{
int ret = 0;
unsigned int payload_size = 0;
uint32_t export_id = 0;
struct virtual_channel *vchan;
int page_count;
int compressed = 0;
if (!ctx || !param || !param->sizebytes
|| ((param->sizebytes % PAGE_SIZE) != 0)
|| (!param->buffer && !(HABMM_EXPIMP_FLAGS_FD & param->flags))
)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 0);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err;
}
page_count = param->sizebytes/PAGE_SIZE;
if (kernel) {
ret = habmem_hyp_grant(vchan,
(unsigned long)param->buffer,
page_count,
param->flags,
vchan->pchan->dom_id,
&compressed,
&payload_size,
&export_id);
} else {
ret = habmem_hyp_grant_user(vchan,
(unsigned long)param->buffer,
page_count,
param->flags,
vchan->pchan->dom_id,
&compressed,
&payload_size,
&export_id);
}
if (ret < 0) {
pr_err("habmem_hyp_grant vc %x failed size=%d ret=%d\n",
param->vcid, payload_size, ret);
goto err;
}
ret = habmem_export_vchan(ctx,
vchan,
payload_size,
param->flags,
export_id);
param->exportid = export_id;
err:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
int hab_mem_unexport(struct uhab_context *ctx,
struct hab_unexport *param,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *tmp = NULL;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
/* refcnt on the access */
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 1);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err_novchan;
}
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) {
if (param->exportid == exp->export_id &&
vchan->pchan == exp->pchan) {
list_del(&exp->node);
found = 1;
break;
}
}
write_unlock(&ctx->exp_lock);
if (!found) {
ret = -EINVAL;
goto err_novchan;
}
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
if (ret) {
pr_err("Error found in revoke grant with ret %d\n", ret);
goto err_novchan;
}
habmem_remove_export(exp);
err_novchan:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
int hab_mem_import(struct uhab_context *ctx,
struct hab_import *param,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 0);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err_imp;
}
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(exp->pchan == vchan->pchan)) {
found = 1;
break;
}
}
spin_unlock_bh(&ctx->imp_lock);
if (!found) {
pr_err("Fail to get export descriptor from export id %d\n",
param->exportid);
ret = -ENODEV;
goto err_imp;
}
if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) {
pr_err("input size %d don't match buffer size %d\n",
param->sizebytes, exp->payload_count << PAGE_SHIFT);
ret = -EINVAL;
goto err_imp;
}
ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
if (ret) {
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload));
goto err_imp;
}
exp->import_index = param->index;
exp->kva = kernel ? (void *)param->kva : NULL;
err_imp:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
int hab_mem_unimport(struct uhab_context *ctx,
struct hab_unimport *param,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *exp_tmp;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 1);
if (!vchan || !vchan->pchan) {
if (vchan)
hab_vchan_put(vchan);
return -ENODEV;
}
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->export_id == param->exportid &&
exp->pchan == vchan->pchan) {
/* same pchan is expected here */
list_del(&exp->node);
ctx->import_total--;
found = 1;
break;
}
}
spin_unlock_bh(&ctx->imp_lock);
if (!found)
ret = -EINVAL;
else {
ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel);
if (ret) {
pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
exp->export_id, exp->payload_count, exp->vcid_remote);
}
param->kva = (uint64_t)exp->kva;
kfree(exp);
}
if (vchan)
hab_vchan_put(vchan);
return ret;
}

View File

@ -0,0 +1,604 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_grantable.h"
static int hab_rx_queue_empty(struct virtual_channel *vchan)
{
int ret = 0;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
ret = list_empty(&vchan->rx_list);
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
return ret;
}
static struct hab_message*
hab_scatter_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
{
struct hab_message *message = NULL;
int i = 0;
int allocated = 0;
bool failed = false;
void **scatter_buf = NULL;
uint32_t total_num, page_num = 0U;
/* The scatter routine is only for the message larger than one page size */
if (sizebytes <= PAGE_SIZE)
return NULL;
page_num = sizebytes >> PAGE_SHIFT;
total_num = (sizebytes % PAGE_SIZE == 0) ? page_num : (page_num + 1);
message = kzalloc(sizeof(struct hab_message)
+ (total_num * sizeof(void *)), GFP_ATOMIC);
if (!message)
return NULL;
message->scatter = true;
scatter_buf = (void **)message->data;
/*
* All recv buffers need to be prepared before actual recv.
* If instant recving is performed when each page is allocated,
* we cannot ensure the success of the next allocation.
* Part of the message will stuck in the channel if allocation
* failed half way.
*/
for (i = 0; i < page_num; i++) {
scatter_buf[i] = kzalloc(PAGE_SIZE, GFP_ATOMIC);
if (scatter_buf[i] == NULL) {
failed = true;
allocated = i;
break;
}
}
if ((!failed) && (sizebytes % PAGE_SIZE != 0)) {
scatter_buf[i] = kzalloc(sizebytes % PAGE_SIZE, GFP_ATOMIC);
if (scatter_buf[i] == NULL) {
failed = true;
allocated = i;
}
}
if (!failed) {
for (i = 0; i < sizebytes / PAGE_SIZE; i++)
message->sizebytes += physical_channel_read(pchan,
scatter_buf[i], PAGE_SIZE);
if (sizebytes % PAGE_SIZE)
message->sizebytes += physical_channel_read(pchan,
scatter_buf[i], sizebytes % PAGE_SIZE);
message->sequence_rx = pchan->sequence_rx;
} else {
for (i = 0; i < allocated; i++)
kfree(scatter_buf[i]);
kfree(message);
message = NULL;
}
return message;
}
static struct hab_message*
hab_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
{
struct hab_message *message;
if (sizebytes > HAB_HEADER_SIZE_MAX) {
pr_err("pchan %s send size too large %zd\n",
pchan->name, sizebytes);
return NULL;
}
message = kzalloc(sizeof(*message) + sizebytes, GFP_ATOMIC);
if (!message)
/*
* big buffer allocation may fail when memory fragment.
* Instead of one big consecutive kmem, try alloc one page at a time
*/
message = hab_scatter_msg_alloc(pchan, sizebytes);
else {
message->sizebytes =
physical_channel_read(pchan, message->data, sizebytes);
message->sequence_rx = pchan->sequence_rx;
}
return message;
}
void hab_msg_free(struct hab_message *message)
{
int i = 0;
uint32_t page_num = 0U;
void **scatter_buf = NULL;
if (unlikely(message->scatter)) {
scatter_buf = (void **)message->data;
page_num = message->sizebytes >> PAGE_SHIFT;
if (message->sizebytes % PAGE_SIZE)
page_num++;
for (i = 0; i < page_num; i++)
kfree(scatter_buf[i]);
}
kfree(message);
}
int
hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
int *rsize, unsigned int timeout, unsigned int flags)
{
struct hab_message *message = NULL;
/*
* 1. When the user sets the Non-blocking flag and the rx_list is empty,
* or hab_rx_queue_empty is not empty, but due to the competition relationship,
* the rx_list is empty after the lock is obtained,
* and the value of ret in both cases is the default value.
* 2. When the function calls API wait_event_*, wait_event_* returns due to timeout
* and the condition is not met, the value of ret is set to 0.
* If the default value of ret is 0, we would have a hard time distinguishing
* between the above two cases (or with more redundant code).
* So we set the default value of ret to be -EAGAIN.
* In this way, we can easily distinguish the above two cases.
* This is what we expected to see.
*/
int ret = -EAGAIN;
int wait = !(flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
int interruptible = !(flags & HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
int timeout_flag = flags & HABMM_SOCKET_RECV_FLAGS_TIMEOUT;
int irqs_disabled = irqs_disabled();
if (wait) {
/* we will wait forever if timeout_flag not set */
if (!timeout_flag)
timeout = UINT_MAX;
if (hab_rx_queue_empty(vchan)) {
if (interruptible)
ret = wait_event_interruptible_timeout(vchan->rx_queue,
!hab_rx_queue_empty(vchan) ||
vchan->otherend_closed,
msecs_to_jiffies(timeout));
else
ret = wait_event_timeout(vchan->rx_queue,
!hab_rx_queue_empty(vchan) ||
vchan->otherend_closed,
msecs_to_jiffies(timeout));
}
}
/*
* return all the received messages before the remote close,
* and need empty check again in case the list is empty now due to
* dequeue by other threads
*/
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
if (!list_empty(&vchan->rx_list)) {
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
if (message) {
if (*rsize >= message->sizebytes) {
/* msg can be safely retrieved in full */
list_del(&message->node);
ret = 0;
*rsize = message->sizebytes;
} else {
pr_err("vcid %x rcv buf too small %d < %zd\n",
vchan->id, *rsize,
message->sizebytes);
/*
* Here we return the actual message size in RxQ instead of 0,
* so that the hab client can re-receive the message with the
* correct message size.
*/
*rsize = message->sizebytes;
message = NULL;
ret = -EOVERFLOW; /* come back again */
}
}
} else {
/* no message received */
*rsize = 0;
if (vchan->otherend_closed)
ret = -ENODEV;
else if (ret == -ERESTARTSYS)
ret = -EINTR;
else if (ret == 0) {
pr_info("timeout! vcid: %x\n", vchan->id);
ret = -ETIMEDOUT;
} else {
pr_debug("EAGAIN: ret = %d, flags = %x\n", ret, flags);
ret = -EAGAIN;
}
}
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
*msg = message;
return ret;
}
static void hab_msg_queue(struct virtual_channel *vchan,
struct hab_message *message)
{
int irqs_disabled = irqs_disabled();
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
list_add_tail(&message->node, &vchan->rx_list);
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
wake_up(&vchan->rx_queue);
}
static int hab_export_enqueue(struct virtual_channel *vchan,
struct export_desc *exp)
{
struct uhab_context *ctx = vchan->ctx;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&ctx->imp_lock, irqs_disabled);
list_add_tail(&exp->node, &ctx->imp_whse);
ctx->import_total++;
hab_spin_unlock(&ctx->imp_lock, irqs_disabled);
return 0;
}
static int hab_send_export_ack(struct virtual_channel *vchan,
struct physical_channel *pchan,
struct export_desc *exp)
{
struct hab_export_ack exp_ack = {
.export_id = exp->export_id,
.vcid_local = exp->vcid_local,
.vcid_remote = exp->vcid_remote
};
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
HAB_HEADER_SET_ID(header, exp->vcid_local);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
return physical_channel_send(pchan, &header, &exp_ack);
}
static int hab_receive_create_export_ack(struct physical_channel *pchan,
struct uhab_context *ctx, size_t sizebytes)
{
struct hab_export_ack_recvd *ack_recvd =
kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
int irqs_disabled = irqs_disabled();
if (!ack_recvd)
return -ENOMEM;
if (sizeof(ack_recvd->ack) != sizebytes)
pr_err("%s exp ack size %zu is not as arrived %zu\n",
pchan->name, sizeof(ack_recvd->ack), sizebytes);
if (sizebytes > sizeof(ack_recvd->ack)) {
pr_err("pchan %s read size too large %zd %zd\n",
pchan->name, sizebytes, sizeof(ack_recvd->ack));
kfree(ack_recvd);
return -EINVAL;
}
/*
* If the hab version on remote side is different with local side,
* the size of the ack structure may differ. Under this circumstance,
* the sizebytes is still trusted. Thus, we need to read it out and
* drop the mismatched ack message from channel.
* Dropping such message could avoid the [payload][header][payload]
* data layout which will make the whole channel unusable.
* But for security reason, we cannot perform it when sizebytes is
* larger than expected.
*/
if (physical_channel_read(pchan,
&ack_recvd->ack,
sizebytes) != sizebytes) {
kfree(ack_recvd);
return -EIO;
}
/* add ack_recvd node into rx queue only if the sizebytes is expected */
if (sizeof(ack_recvd->ack) == sizebytes) {
hab_spin_lock(&ctx->expq_lock, irqs_disabled);
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
hab_spin_unlock(&ctx->expq_lock, irqs_disabled);
} else {
kfree(ack_recvd);
return -EINVAL;
}
return 0;
}
static void hab_msg_drop(struct physical_channel *pchan, size_t sizebytes)
{
uint8_t *data = NULL;
if (sizebytes > HAB_HEADER_SIZE_MAX) {
pr_err("%s read size too large %zd\n", pchan->name, sizebytes);
return;
}
data = kmalloc(sizebytes, GFP_ATOMIC);
if (data == NULL)
return;
physical_channel_read(pchan, data, sizebytes);
kfree(data);
}
int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header)
{
int ret = 0;
struct hab_message *message;
struct hab_device *dev = pchan->habdev;
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
struct virtual_channel *vchan = NULL;
struct export_desc *exp_desc;
struct timespec64 ts = {0};
unsigned long long rx_mpm_tv;
size_t exp_desc_size_expected = 0;
struct compressed_pfns *pfn_table = NULL;
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
payload_type != HAB_PAYLOAD_TYPE_INIT_DONE &&
payload_type != HAB_PAYLOAD_TYPE_INIT_CANCEL) {
/* sanity check the received message */
if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT)
|| !vchan_id || !session_id) {
pr_err("@@ %s Invalid msg type %d vcid %x bytes %zx sn %d\n",
pchan->name, payload_type,
vchan_id, sizebytes, session_id);
dump_hab_wq(pchan);
}
/*
* need both vcid and session_id to be accurate.
* this is from pchan instead of ctx
*/
vchan = hab_vchan_get(pchan, header);
if (!vchan) {
pr_debug("vchan not found type %d vcid %x sz %zx sesn %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("%s msg dropped type %d size %d vcid %X session id %d\n",
pchan->name, payload_type,
sizebytes, vchan_id,
session_id);
}
return -EINVAL;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
pr_info("vchan remote is closed payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id,
sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("%s message %d dropped remote close, session id %d\n",
pchan->name, payload_type,
session_id);
}
return -ENODEV;
}
} else {
if (sizebytes != sizeof(struct hab_open_send_data)) {
pr_err("%s Invalid open req type %d vcid %x bytes %zx session %d\n",
pchan->name, payload_type, vchan_id,
sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("%s msg %d dropped unknown reason session id %d\n",
pchan->name,
payload_type,
session_id);
dump_hab_wq(pchan);
}
return -ENODEV;
}
}
switch (payload_type) {
case HAB_PAYLOAD_TYPE_MSG:
case HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ:
case HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP:
message = hab_msg_alloc(pchan, sizebytes);
if (!message)
break;
hab_msg_queue(vchan, message);
break;
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
case HAB_PAYLOAD_TYPE_INIT_DONE:
ret = hab_open_request_add(pchan, sizebytes, payload_type);
if (ret) {
pr_err("%s open request add failed, ret %d, payload type %d, sizebytes %zx\n",
pchan->name, ret, payload_type, sizebytes);
break;
}
wake_up_interruptible(&dev->openq);
break;
case HAB_PAYLOAD_TYPE_INIT_CANCEL:
pr_info("remote open cancel header vcid %X session %d local %d remote %d\n",
vchan_id, session_id, pchan->vmid_local,
pchan->vmid_remote);
ret = hab_open_receive_cancel(pchan, sizebytes);
if (ret)
pr_err("%s open cancel handling failed ret %d vcid %X session %d\n",
pchan->name, ret, vchan_id, session_id);
break;
case HAB_PAYLOAD_TYPE_EXPORT:
exp_desc_size_expected = sizeof(struct export_desc)
+ sizeof(struct compressed_pfns);
if (sizebytes > (size_t)(HAB_HEADER_SIZE_MAX) ||
sizebytes < exp_desc_size_expected) {
pr_err("%s exp size too large/small %zu header %zu\n",
pchan->name, sizebytes, sizeof(*exp_desc));
break;
}
pr_debug("%s exp payload %zu bytes\n",
pchan->name, sizebytes);
exp_desc = kzalloc(sizebytes, GFP_ATOMIC);
if (!exp_desc)
break;
if (physical_channel_read(pchan, exp_desc, sizebytes) !=
sizebytes) {
pr_err("%s corrupted exp expect %zd bytes vcid %X remote %X open %d!\n",
pchan->name, sizebytes, vchan->id,
vchan->otherend_id, vchan->session_id);
kfree(exp_desc);
break;
}
if (pchan->vmid_local != exp_desc->domid_remote ||
pchan->vmid_remote != exp_desc->domid_local)
pr_err("%s corrupted vmid %d != %d %d != %d\n",
pchan->name, pchan->vmid_local, exp_desc->domid_remote,
pchan->vmid_remote, exp_desc->domid_local);
exp_desc->domid_remote = pchan->vmid_remote;
exp_desc->domid_local = pchan->vmid_local;
exp_desc->pchan = pchan;
/*
* We should do all the checks here.
* But in order to improve performance, we put the
* checks related to exp->payload_count and pfn_table->region[i].size
* into function pages_list_create. So any potential usage of such data
* from the remote side after the checks here and before the checks in
* pages_list_create needs to add some more checks if necessary.
*/
pfn_table = (struct compressed_pfns *)exp_desc->payload;
if (pfn_table->nregions <= 0 ||
(pfn_table->nregions > SIZE_MAX / sizeof(struct region)) ||
(SIZE_MAX - exp_desc_size_expected <
pfn_table->nregions * sizeof(struct region))) {
pr_err("%s nregions is too large or negative, nregions:%d!\n",
pchan->name, pfn_table->nregions);
kfree(exp_desc);
break;
}
if (pfn_table->nregions > exp_desc->payload_count) {
pr_err("%s nregions %d greater than payload_count %d\n",
pchan->name, pfn_table->nregions, exp_desc->payload_count);
kfree(exp_desc);
break;
}
if (exp_desc->payload_count > MAX_EXP_PAYLOAD_COUNT) {
pr_err("payload_count out of range: %d size overflow\n",
exp_desc->payload_count);
kfree(exp_desc);
break;
}
exp_desc_size_expected += pfn_table->nregions * sizeof(struct region);
if (sizebytes != exp_desc_size_expected) {
pr_err("%s exp size not equal %zu expect %zu\n",
pchan->name, sizebytes, exp_desc_size_expected);
kfree(exp_desc);
break;
}
hab_export_enqueue(vchan, exp_desc);
hab_send_export_ack(vchan, pchan, exp_desc);
break;
case HAB_PAYLOAD_TYPE_EXPORT_ACK:
ret = hab_receive_create_export_ack(pchan, vchan->ctx,
sizebytes);
if (ret) {
pr_err("%s failed to handled export ack %d\n",
pchan->name, ret);
break;
}
wake_up_interruptible(&vchan->ctx->exp_wq);
break;
case HAB_PAYLOAD_TYPE_CLOSE:
/* remote request close */
pr_debug("remote close vcid %pK %X other id %X session %d refcnt %d\n",
vchan, vchan->id, vchan->otherend_id,
session_id, get_refcnt(vchan->refcount));
hab_vchan_stop(vchan);
break;
case HAB_PAYLOAD_TYPE_PROFILE:
ktime_get_ts64(&ts);
if (sizebytes < sizeof(struct habmm_xing_vm_stat)) {
pr_err("%s expected size greater than %zd at least %zd\n",
pchan->name, sizebytes, sizeof(struct habmm_xing_vm_stat));
break;
}
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
if (!message)
pr_err("%s failed to allocate msg Arrived msg will be lost\n",
pchan->name);
else {
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)message->data;
pstat->rx_sec = ts.tv_sec;
pstat->rx_usec = ts.tv_nsec/NSEC_PER_USEC;
hab_msg_queue(vchan, message);
}
break;
case HAB_PAYLOAD_TYPE_SCHE_MSG:
case HAB_PAYLOAD_TYPE_SCHE_MSG_ACK:
if (sizebytes < sizeof(unsigned long long)) {
pr_err("%s expected size greater than %zd at least %zd\n",
pchan->name, sizebytes, sizeof(unsigned long long));
break;
}
rx_mpm_tv = msm_timer_get_sclk_ticks();
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
if (!message)
pr_err("%s failed to allocate msg Arrived msg will be lost\n",
pchan->name);
else {
((unsigned long long *)message->data)[0] = rx_mpm_tv;
hab_msg_queue(vchan, message);
}
break;
default:
pr_err("%s unknown msg received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
pchan->name, payload_type, vchan_id,
sizebytes, session_id);
break;
}
if (vchan)
hab_vchan_put(vchan);
return ret;
}

View File

@ -0,0 +1,303 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#define HAB_OPEN_REQ_EXPIRE_TIME_S (3600*10)
void hab_open_request_init(struct hab_open_request *request,
int type,
struct physical_channel *pchan,
int vchan_id,
int sub_id,
int open_id)
{
request->type = type;
request->pchan = pchan;
request->xdata.vchan_id = vchan_id;
request->xdata.sub_id = sub_id;
request->xdata.open_id = open_id;
}
int hab_open_request_send(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, request->type);
return physical_channel_send(request->pchan, &header, &request->xdata);
}
/*
* called when remote sends in open-request.
* The sanity of the arg sizebytes is ensured by its caller hab_msg_recv.
* The sizebytes should be equal to sizeof(struct hab_open_send_data)
*/
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
struct hab_open_request *request;
struct timespec64 ts = {0};
int irqs_disabled = irqs_disabled();
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
request = &node->request;
if (physical_channel_read(pchan, &request->xdata, sizebytes)
!= sizebytes)
return -EIO;
request->type = request_type;
request->pchan = pchan;
ktime_get_ts64(&ts);
node->age = ts.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
ts.tv_nsec/NSEC_PER_SEC;
hab_pchan_get(pchan);
hab_spin_lock(&dev->openlock, irqs_disabled);
list_add_tail(&node->node, &dev->openq_list);
dev->openq_cnt++;
hab_spin_unlock(&dev->openlock, irqs_disabled);
return 0;
}
/* local only */
static int hab_open_request_find(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
struct hab_open_request **recv_request)
{
struct hab_open_node *node, *tmp;
struct hab_open_request *request;
struct timespec64 ts = {0};
int ret = 0;
if (ctx->closing ||
(listen->pchan && listen->pchan->closed)) {
*recv_request = NULL;
return 1;
}
spin_lock_bh(&dev->openlock);
if (list_empty(&dev->openq_list))
goto done;
ktime_get_ts64(&ts);
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = (struct hab_open_request *)node;
if ((request->type == listen->type ||
request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) &&
(request->xdata.sub_id == listen->xdata.sub_id) &&
(!listen->xdata.open_id ||
request->xdata.open_id == listen->xdata.open_id) &&
(!listen->pchan ||
request->pchan == listen->pchan)) {
list_del(&node->node);
dev->openq_cnt--;
*recv_request = request;
ret = 1;
break;
}
if (node->age < (int64_t)ts.tv_sec + ts.tv_nsec/NSEC_PER_SEC) {
pr_warn("open request type %d sub %d open %d\n",
request->type, request->xdata.sub_id,
request->xdata.sub_id);
list_del(&node->node);
hab_open_request_free(request);
}
}
done:
spin_unlock_bh(&dev->openlock);
return ret;
}
void hab_open_request_free(struct hab_open_request *request)
{
if (request) {
hab_pchan_put(request->pchan);
kfree(request);
} else
pr_err("empty request found\n");
}
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
struct hab_open_request **recv_request,
int ms_timeout)
{
int ret = 0;
if (!ctx || !listen || !recv_request) {
pr_err("listen failed ctx %pK listen %pK request %pK\n",
ctx, listen, recv_request);
return -EINVAL;
}
*recv_request = NULL;
if (ms_timeout > 0) { /* be case */
ms_timeout = msecs_to_jiffies(ms_timeout);
ret = wait_event_interruptible_timeout(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request),
ms_timeout);
if (!ret) {
pr_debug("%s timeout in open listen\n", dev->name);
ret = -EAGAIN; /* condition not met */
} else if (-ERESTARTSYS == ret) {
pr_warn("something failed in open listen ret %d\n",
ret);
ret = -EINTR; /* condition not met */
} else if (ret > 0)
ret = 0; /* condition met */
} else { /* fe case */
ret = wait_event_interruptible(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request));
if (ctx->closing) {
pr_warn("local closing during open ret %d\n", ret);
ret = -ENODEV;
} else if (-ERESTARTSYS == ret) {
pr_warn("local interrupted ret %d\n", ret);
ret = -EINTR;
}
}
return ret;
}
/*
* called when receiving remote's cancel init from FE or init-ack from BE.
* The sanity of the arg sizebytes is ensured by its caller hab_msg_recv.
* The sizebytes should be equal to sizeof(struct hab_open_send_data)
*/
int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes)
{
struct hab_device *dev = pchan->habdev;
struct hab_open_send_data data = {0};
struct hab_open_request *request;
struct hab_open_node *node, *tmp;
int bfound = 0;
struct timespec64 ts = {0};
int irqs_disabled = irqs_disabled();
if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
return -EIO;
hab_spin_lock(&dev->openlock, irqs_disabled);
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = &node->request;
/* check if open request has been serviced or not */
if ((request->type == HAB_PAYLOAD_TYPE_INIT ||
request->type == HAB_PAYLOAD_TYPE_INIT_ACK) &&
(request->xdata.sub_id == data.sub_id) &&
(request->xdata.open_id == data.open_id) &&
(request->xdata.vchan_id == data.vchan_id)) {
list_del(&node->node);
kfree(node);
dev->openq_cnt--;
pr_info("open cancelled on pchan %s vcid %x subid %d openid %d\n",
pchan->name, data.vchan_id,
data.sub_id, data.open_id);
/* found un-serviced open request, delete it */
bfound = 1;
break;
}
}
hab_spin_unlock(&dev->openlock, irqs_disabled);
if (!bfound) {
pr_info("init waiting is in-flight. vcid %x sub %d open %d\n",
data.vchan_id, data.sub_id, data.open_id);
/* add cancel to the openq to let the waiting open bail out */
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
request = &node->request;
request->type = HAB_PAYLOAD_TYPE_INIT_CANCEL;
request->pchan = pchan;
request->xdata.vchan_id = data.vchan_id;
request->xdata.sub_id = data.sub_id;
request->xdata.open_id = data.open_id;
request->xdata.ver_fe = data.ver_fe;
request->xdata.ver_be = data.ver_be;
ktime_get_ts64(&ts);
node->age = ts.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
ts.tv_nsec/NSEC_PER_SEC;
/* put when this node is handled in open path */
hab_pchan_get(pchan);
hab_spin_lock(&dev->openlock, irqs_disabled);
list_add_tail(&node->node, &dev->openq_list);
dev->openq_cnt++;
hab_spin_unlock(&dev->openlock, irqs_disabled);
wake_up_interruptible(&dev->openq);
}
return 0;
}
/* calls locally to send cancel pending open to remote */
int hab_open_cancel_notify(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_INIT_CANCEL);
return physical_channel_send(request->pchan, &header, &request->xdata);
}
int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
write_lock(&ctx->ctx_lock);
list_add_tail(&pending->node, &ctx->pending_open);
ctx->pending_cnt++;
write_unlock(&ctx->ctx_lock);
return 0;
}
int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
struct hab_open_node *node, *tmp;
int ret = -ENOENT;
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(node, tmp, &ctx->pending_open, node) {
if ((node->request.type == pending->request.type) &&
(node->request.pchan
== pending->request.pchan) &&
(node->request.xdata.vchan_id
== pending->request.xdata.vchan_id) &&
(node->request.xdata.sub_id
== pending->request.xdata.sub_id) &&
(node->request.xdata.open_id
== pending->request.xdata.open_id)) {
list_del(&node->node);
ctx->pending_cnt--;
ret = 0;
}
}
write_unlock(&ctx->ctx_lock);
return ret;
}

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_OS_H
#define __HAB_OS_H
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) "hab:%s:%d " fmt, __func__, __LINE__
#include <linux/types.h>
#include <linux/habmm.h>
#include <linux/hab_ioctl.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/dma-map-ops.h>
#include <linux/jiffies.h>
#include <linux/reboot.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
#include <linux/version.h>
#include <linux/devcoredump.h>
#if IS_ENABLED(CONFIG_QGKI_MSM_BOOT_TIME_MARKER)
#include <soc/qcom/boot_stats.h>
#else
static inline unsigned long long msm_timer_get_sclk_ticks(void)
{
return 0;
}
#endif
#endif /*__HAB_OS_H*/

View File

@ -0,0 +1,167 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include <linux/of.h>
/*
* set valid mmid value in tbl to show this is valid entry. All inputs here are
* normalized to 1 based integer
*/
static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
int32_t vm_range, int32_t mmid_start,
int32_t mmid_range, int32_t be)
{
int i, j;
for (i = vm_start; i < vm_start+vm_range; i++) {
tbl[i].vmid = i; /* set valid vmid value to make it usable */
for (j = mmid_start; j < mmid_start + mmid_range; j++) {
/* sanity check */
if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) {
pr_err("overwrite previous setting vmid %d, mmid %d, be %d\n",
i, j, tbl[i].is_listener[j]);
}
tbl[i].mmid[j] = j;
tbl[i].is_listener[j] = be; /* BE IS listen */
}
}
return 0;
}
void dump_settings(struct local_vmid *settings)
{
pr_debug("self vmid is %d\n", settings->self);
}
#ifdef CONFIG_MSM_VHOST_HAB
int fill_default_gvm_settings(struct local_vmid *settings, int vmid_default,
int mmid_start, int mmid_end)
{
int32_t be = HABCFG_BE_TRUE;
int32_t range = 1;
int32_t vmremote = vmid_default;
/* default gvm always talks to host as vm0 */
settings->self = 0;
return fill_vmid_mmid_tbl(settings->vmid_mmid_list, vmremote, range,
mmid_start/100, (mmid_end-mmid_start)/100+1, be);
}
#else
int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local,
int mmid_start, int mmid_end)
{
int32_t be = HABCFG_BE_FALSE;
int32_t range = 1;
int32_t vmremote = 0; /* default to host[0] as local is guest[2] */
settings->self = vmid_local;
/* default gvm always talks to host as vm0 */
return fill_vmid_mmid_tbl(settings->vmid_mmid_list, vmremote, range,
mmid_start/100, (mmid_end-mmid_start)/100+1, be);
}
#endif
/* device tree based parser */
static int hab_parse_dt(struct local_vmid *settings)
{
int result, i;
struct device_node *hab_node = NULL;
struct device_node *mmid_grp_node = NULL;
const char *role = NULL;
int tmp = -1, vmids_num;
u32 vmids[16];
int32_t grp_start_id, be;
/* parse device tree*/
pr_debug("parsing hab node in device tree...\n");
hab_node = of_find_compatible_node(NULL, NULL, "qcom,hab");
if (!hab_node) {
pr_err("no hab device tree node\n");
return -ENODEV;
}
/* read the local vmid of this VM, like 0 for host, 1 for AGL GVM */
result = of_property_read_u32(hab_node, "vmid", &tmp);
if (result) {
pr_err("failed to read local vmid, result = %d\n", result);
return result;
}
pr_debug("local vmid = %d\n", tmp);
settings->self = tmp;
for_each_child_of_node(hab_node, mmid_grp_node) {
/* read the group starting id */
result = of_property_read_u32(mmid_grp_node,
"grp-start-id", &tmp);
if (result) {
pr_err("failed to read grp-start-id, result = %d\n",
result);
return result;
}
pr_debug("grp-start-id = %d\n", tmp);
grp_start_id = tmp;
/* read the role(fe/be) of these pchans in this mmid group */
result = of_property_read_string(mmid_grp_node, "role", &role);
if (result) {
pr_err("failed to get role, result = %d\n", result);
return result;
}
pr_debug("local role of this mmid group is %s\n", role);
if (!strcmp(role, "be"))
be = 1;
else
be = 0;
/* read the remote vmids for these pchans in this mmid group */
vmids_num = of_property_count_elems_of_size(mmid_grp_node,
"remote-vmids", sizeof(u32));
result = of_property_read_u32_array(mmid_grp_node,
"remote-vmids", vmids, vmids_num);
if (result) {
pr_err("failed to read remote-vmids, result = %d\n",
result);
return result;
}
for (i = 0; i < vmids_num; i++) {
pr_debug("vmids_num = %d, vmids[%d] = %d\n",
vmids_num, i, vmids[i]);
result = fill_vmid_mmid_tbl(
settings->vmid_mmid_list,
vmids[i], 1,
grp_start_id/100, 1, be);
if (result) {
pr_err("fill_vmid_mmid_tbl failed\n");
return result;
}
}
}
dump_settings(settings);
return 0;
}
/*
* 0: successful
* negative: various failure core
*/
int hab_parse(struct local_vmid *settings)
{
int ret;
ret = hab_parse_dt(settings);
return ret;
}

View File

@ -0,0 +1,100 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
struct physical_channel *
hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
{
struct physical_channel *pchan = kzalloc(sizeof(*pchan), GFP_KERNEL);
if (!pchan)
return NULL;
idr_init(&pchan->vchan_idr);
spin_lock_init(&pchan->vid_lock);
idr_init(&pchan->expid_idr);
spin_lock_init(&pchan->expid_lock);
kref_init(&pchan->refcount);
pchan->habdev = habdev;
pchan->dom_id = otherend_id;
pchan->closed = 1;
pchan->hyp_data = NULL;
INIT_LIST_HEAD(&pchan->vchannels);
rwlock_init(&pchan->vchans_lock);
spin_lock_init(&pchan->rxbuf_lock);
write_lock_bh(&habdev->pchan_lock);
list_add_tail(&pchan->node, &habdev->pchannels);
habdev->pchan_cnt++;
write_unlock_bh(&habdev->pchan_lock);
return pchan;
}
static void hab_pchan_free(struct kref *ref)
{
struct physical_channel *pchan =
container_of(ref, struct physical_channel, refcount);
struct virtual_channel *vchan;
pr_debug("pchan %s refcnt %d\n", pchan->name,
get_refcnt(pchan->refcount));
write_lock_bh(&pchan->habdev->pchan_lock);
list_del(&pchan->node);
pchan->habdev->pchan_cnt--;
write_unlock_bh(&pchan->habdev->pchan_lock);
/* check vchan leaking */
read_lock(&pchan->vchans_lock);
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
/* no logging on the owner. it might have been gone */
pr_warn("leaking vchan id %X remote %X refcnt %d\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount));
}
read_unlock(&pchan->vchans_lock);
kfree(pchan);
}
struct physical_channel *
hab_pchan_find_domid(struct hab_device *dev, int dom_id)
{
struct physical_channel *pchan;
read_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE)
break;
if (pchan->dom_id != dom_id && dom_id != HABCFG_VMID_DONT_CARE) {
pr_err("dom_id mismatch requested %d, existing %d\n",
dom_id, pchan->dom_id);
pchan = NULL;
}
if (pchan && !kref_get_unless_zero(&pchan->refcount))
pchan = NULL;
read_unlock_bh(&dev->pchan_lock);
return pchan;
}
void hab_pchan_get(struct physical_channel *pchan)
{
if (pchan)
kref_get(&pchan->refcount);
}
void hab_pchan_put(struct physical_channel *pchan)
{
if (pchan)
kref_put(&pchan->refcount, hab_pchan_free);
}

View File

@ -0,0 +1,256 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_pipe.h"
size_t hab_pipe_calc_required_bytes(const uint32_t shared_buf_size)
{
return sizeof(struct hab_pipe)
+ (2 * (sizeof(struct hab_shared_buf) + shared_buf_size));
}
/*
* Must store the tx and rx ring buf pointers in non-shared/local area and
* always use such pointers(inaccessible from the remote untrusted side) to
* read/write the shared ring buffer region. Following reasons to keep it
* in local:
* 1. Such kind of local ring buf pointers are of no use for the remote side.
* 2. There is a info disclosure risk if they are stored and used in share buffer.
* 3. Furthermore, the untrusted peer can modify it deliberately. It will cause
* arbitrary/OOB access on local side.
*/
struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe,
struct hab_shared_buf **tx_buf_p,
struct hab_shared_buf **rx_buf_p,
struct dbg_items **itms,
const uint32_t shared_buf_size, int top)
{
struct hab_pipe_endpoint *ep = NULL;
struct hab_shared_buf *buf_a = NULL;
struct hab_shared_buf *buf_b = NULL;
struct dbg_items *its = NULL;
if (!pipe || !tx_buf_p || !rx_buf_p)
return NULL;
/* debug only */
its = kzalloc(sizeof(struct dbg_items), GFP_KERNEL);
buf_a = (struct hab_shared_buf *) pipe->buf_base;
buf_b = (struct hab_shared_buf *) (pipe->buf_base
+ sizeof(struct hab_shared_buf) + shared_buf_size);
if (top) {
ep = &pipe->top;
memset(ep, 0, sizeof(*ep));
*tx_buf_p = buf_a;
*rx_buf_p = buf_b;
pipe->legacy_buf_a = NULL;
} else {
ep = &pipe->bottom;
memset(ep, 0, sizeof(*ep));
*tx_buf_p = buf_b;
*rx_buf_p = buf_a;
memset(buf_b, 0, sizeof(struct hab_shared_buf));
memset(buf_a, 0, sizeof(struct hab_shared_buf));
buf_a->size = shared_buf_size;
buf_b->size = shared_buf_size;
pipe->legacy_buf_b = NULL;
pipe->legacy_total_size = 0;
}
*itms = its;
return ep;
}
uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf,
const uint32_t buf_size,
unsigned char *p, uint32_t num_bytes)
{
/* Save a copy for index and count to avoid ToC-ToU issue */
uint32_t ep_tx_index = ep->tx_info.index;
uint32_t ep_tx_wr_count = ep->tx_info.wr_count;
uint32_t sh_buf_rd_count = sh_buf->rd_count;
uint32_t space = 0U;
uint32_t count1, count2;
if (buf_size < (ep_tx_wr_count - sh_buf_rd_count)) {
pr_err("rd/wr counter error wr:%u rd:%u\n",
ep_tx_wr_count, sh_buf_rd_count);
return 0;
}
space = buf_size - (ep_tx_wr_count - sh_buf_rd_count);
if (!p || num_bytes > space || num_bytes == 0) {
pr_err("****can not write to pipe p %pK to-write %d space available %d\n",
p, num_bytes, space);
return 0;
}
asm volatile("dmb ish" ::: "memory");
if ((buf_size < ep_tx_index) || (buf_size < num_bytes)) {
pr_err("index in tx ep is out of boundary or number of bytes is larger than the ring buffer size\n");
return 0;
}
count1 = (num_bytes <= (buf_size - ep_tx_index))
? num_bytes : (buf_size - ep_tx_index);
count2 = num_bytes - count1;
if (count1 > 0) {
memcpy((void *)&sh_buf->data[ep_tx_index], p, count1);
ep_tx_wr_count += count1;
ep_tx_index += count1;
if (ep_tx_index >= buf_size)
ep_tx_index = 0;
}
if (count2 > 0) {/* handle buffer wrapping */
memcpy((void *)&sh_buf->data[ep_tx_index],
p + count1, count2);
ep_tx_wr_count += count2;
ep_tx_index += count2;
if (ep_tx_index >= buf_size)
ep_tx_index = 0;
}
ep->tx_info.wr_count = ep_tx_wr_count;
ep->tx_info.index = ep_tx_index;
return num_bytes;
}
/* Updates the write index which is shared with the other VM */
void hab_pipe_write_commit(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf)
{
/* Must commit data before incrementing count */
asm volatile("dmb ishst" ::: "memory");
sh_buf->wr_count = ep->tx_info.wr_count;
}
#define HAB_HEAD_CLEAR 0xCC
uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf,
const uint32_t buf_size,
unsigned char *p, uint32_t size, uint32_t clear)
{
/* Save a copy for index to avoid ToC-ToU issue */
uint32_t ep_rx_index = ep->rx_info.index;
/* mb to guarantee wr_count is updated after contents are written */
uint32_t avail = sh_buf->wr_count - sh_buf->rd_count;
uint32_t count1, count2, to_read;
uint32_t index_saved = ep_rx_index; /* store original for retry */
static uint8_t signature_mismatch;
if (!p || avail == 0 || size == 0 || ep_rx_index > buf_size)
return 0;
asm volatile("dmb ishld" ::: "memory");
/* error if available is less than size and available is not zero */
to_read = (avail < size) ? avail : size;
/*
* Generally, the available size should be equal to the expected read size.
* But when calling hab_msg_drop() during message recv, available size may
* less than expected size.
*/
if (to_read < size)
pr_info("less data available %d than requested %d\n",
avail, size);
count1 = (to_read <= (buf_size - ep_rx_index)) ? to_read :
(buf_size - ep_rx_index);
count2 = to_read - count1;
if (count1 > 0) {
memcpy(p, (void *)&sh_buf->data[ep_rx_index], count1);
ep_rx_index += count1;
if (ep_rx_index >= buf_size)
ep_rx_index = 0;
}
if (count2 > 0) { /* handle buffer wrapping */
memcpy(p + count1, (void *)&sh_buf->data[ep_rx_index],
count2);
ep_rx_index += count2;
}
ep->rx_info.index = ep_rx_index;
if (count1 + count2) {
struct hab_header *head = (struct hab_header *)p;
int retry_cnt = 0;
if (clear && (size == sizeof(*head))) {
retry:
if (unlikely(head->signature != 0xBEE1BEE1)) {
pr_debug("hab head corruption detected at %pK buf %pK %08X %08X %08X %08X %08X rd %d wr %d index %X saved %X retry %d\n",
head, &sh_buf->data[0],
head->id_type,
head->payload_size,
head->session_id,
head->signature, head->sequence,
sh_buf->rd_count, sh_buf->wr_count,
ep->rx_info.index, index_saved,
retry_cnt);
if (retry_cnt++ <= 1000) {
memcpy(p, &sh_buf->data[index_saved],
count1);
if (count2)
memcpy(&p[count1],
&sh_buf->data[ep_rx_index - count2],
count2);
if (!signature_mismatch)
goto retry;
} else
pr_err("quit retry after %d time may fail %X %X %X %X %X rd %d wr %d index %X\n",
retry_cnt, head->id_type,
head->payload_size,
head->session_id,
head->signature,
head->sequence,
sh_buf->rd_count,
sh_buf->wr_count,
ep->rx_info.index);
signature_mismatch = 1;
} else
signature_mismatch = 0;
}
/* If the signature has mismatched,
* don't increment the shared buffer index.
*/
if (signature_mismatch) {
ep->rx_info.index = index_saved + 1;
if (ep->rx_info.index >= sh_buf->size)
ep->rx_info.index = 0;
to_read = (retry_cnt < 1000) ? 0xFFFFFFFE : 0xFFFFFFFF;
}
/*Must commit data before incremeting count*/
asm volatile("dmb ish" ::: "memory");
sh_buf->rd_count += (signature_mismatch) ? 1 : count1 + count2;
}
return to_read;
}
void hab_pipe_rxinfo(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf,
uint32_t *rd_cnt,
uint32_t *wr_cnt, uint32_t *idx)
{
*idx = ep->rx_info.index;
*rd_cnt = sh_buf->rd_count;
*wr_cnt = sh_buf->wr_count;
}

View File

@ -0,0 +1,84 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef HAB_PIPE_H
#define HAB_PIPE_H
struct hab_shared_buf {
uint32_t rd_count; /* volatile cannot be used here */
uint32_t wr_count; /* volatile cannot be used here */
uint32_t size;
unsigned char data[]; /* volatile cannot be used here */
};
/* debug only */
struct dbg_item {
uint32_t rd_cnt;
uint32_t wr_cnt;
void *va; /* local for read or write */
uint32_t index; /* local */
uint32_t sz; /* size in */
uint32_t ret; /* actual bytes read */
};
#define DBG_ITEM_SIZE 20
struct dbg_items {
struct dbg_item it[DBG_ITEM_SIZE];
int idx;
};
struct hab_pipe_endpoint {
struct {
uint32_t wr_count;
uint32_t index;
struct hab_shared_buf *legacy_sh_buf;
} tx_info;
struct {
uint32_t index;
struct hab_shared_buf *legacy_sh_buf;
} rx_info;
};
struct hab_pipe {
struct hab_pipe_endpoint top;
struct hab_pipe_endpoint bottom;
/* Legacy debugging metadata, replaced by dbg_itms from qvm_channel */
struct hab_shared_buf *legacy_buf_a; /* top TX, bottom RX */
struct hab_shared_buf *legacy_buf_b; /* top RX, bottom TX */
size_t legacy_total_size;
unsigned char buf_base[];
};
size_t hab_pipe_calc_required_bytes(const uint32_t shared_buf_size);
struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe,
struct hab_shared_buf **tx_buf_p,
struct hab_shared_buf **rx_buf_p,
struct dbg_items **itms,
const uint32_t shared_buf_size, int top);
uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf,
const uint32_t buf_size,
unsigned char *p, uint32_t num_bytes);
void hab_pipe_write_commit(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf);
uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf,
const uint32_t buf_size,
unsigned char *p, uint32_t size, uint32_t clear);
/* debug only */
void hab_pipe_rxinfo(struct hab_pipe_endpoint *ep,
struct hab_shared_buf *sh_buf,
uint32_t *rd_cnt,
uint32_t *wr_cnt, uint32_t *idx);
#endif /* HAB_PIPE_H */

View File

@ -0,0 +1,247 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_qvm.h"
/*
* this is for platform does not provide probe features. the size should match
* hab device side (all mmids)
*/
static struct shmem_irq_config pchan_factory_settings[] = {
{0x1b000000, 7},
{0x1b001000, 8},
{0x1b002000, 9},
{0x1b003000, 10},
{0x1b004000, 11},
{0x1b005000, 12},
{0x1b006000, 13},
{0x1b007000, 14},
{0x1b008000, 15},
{0x1b009000, 16},
{0x1b00a000, 17},
{0x1b00b000, 18},
{0x1b00c000, 19},
{0x1b00d000, 20},
{0x1b00e000, 21},
{0x1b00f000, 22},
{0x1b010000, 23},
{0x1b011000, 24},
{0x1b012000, 25},
{0x1b013000, 26},
{0x1b014000, 27},
{0x1b015000, 28},
{0x1b016000, 29},
{0x1b017000, 30},
{0x1b018000, 31},
{0x1b019000, 32},
{0x1b01a000, 33},
{0x1b01b000, 34},
};
struct qvm_plugin_info qvm_priv_info = {
pchan_factory_settings,
ARRAY_SIZE(pchan_factory_settings),
0,
ARRAY_SIZE(pchan_factory_settings)
};
/*
* this is common but only for guest
*/
uint64_t get_guest_ctrl_paddr(struct qvm_channel *dev,
unsigned long factory_addr, int irq, const char *name, uint32_t pages)
{
int i;
unsigned long factory_va;
pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n",
name, factory_addr, irq, pages);
/* get guest factory's va */
factory_va = hab_shmem_factory_va(factory_addr);
dev->guest_factory = (struct guest_shm_factory *)factory_va;
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
pr_err("signature error: %ld != %llu, factory addr %lx\n",
GUEST_SHM_SIGNATURE, dev->guest_factory->signature,
factory_addr);
iounmap(dev->guest_factory);
return 0;
}
dev->guest_intr = dev->guest_factory->vector;
/*
* Set the name field on the factory page to identify the shared memory
* region
*/
for (i = 0; i < strlen(name) && i < GUEST_SHM_MAX_NAME - 1; i++)
dev->guest_factory->name[i] = name[i];
dev->guest_factory->name[i] = (char) 0;
guest_shm_create(dev->guest_factory, pages);
/* See if we successfully created/attached to the region. */
if (dev->guest_factory->status != GSS_OK) {
pr_err("create failed: %d\n", dev->guest_factory->status);
iounmap(dev->guest_factory);
return 0;
}
pr_debug("shm creation size %x, paddr=%llx, vector %d, dev %pK\n",
dev->guest_factory->size,
dev->guest_factory->shmem,
dev->guest_intr,
dev);
dev->factory_addr = factory_addr;
dev->irq = irq;
return dev->guest_factory->shmem;
}
void hab_pipe_reset(struct physical_channel *pchan)
{
struct hab_pipe_endpoint *pipe_ep;
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
pipe_ep = hab_pipe_init(dev->pipe, &dev->tx_buf,
&dev->rx_buf, &dev->dbg_itms, PIPE_SHMEM_SIZE,
pchan->is_be ? 0 : 1);
if (dev->pipe_ep != pipe_ep)
pr_warn("The pipe endpoint must not change\n");
}
/*
* allocate hypervisor plug-in specific resource for pchan, and call hab pchan
* alloc common function. hab driver struct is directly accessed.
* commdev: pointer to store the pchan address
* id: index to hab_device (mmids)
* is_be: pchan local endpoint role
* name: pchan name
* return: status 0: success, otherwise: failures
*/
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device)
{
struct qvm_channel *dev = NULL;
struct qvm_channel_os *dev_os = NULL;
struct physical_channel **pchan = (struct physical_channel **)commdev;
int ret = 0;
char *shmdata;
uint32_t pipe_alloc_size =
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
uint32_t pipe_alloc_pages =
(pipe_alloc_size + PAGE_SIZE - 1) / PAGE_SIZE;
pr_debug("%s: pipe_alloc_size is %d\n", __func__, pipe_alloc_size);
/* allocate common part for the commdev */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err;
}
/* allocate the os-specific data for the commdev */
dev_os = kzalloc(sizeof(*dev_os), GFP_KERNEL);
if (!dev_os) {
ret = -ENOMEM;
goto err;
}
dev->os_data = dev_os;
spin_lock_init(&dev->io_lock);
/*
* create/attach to the shmem region, and get back the
* shmem data vaddr
*/
shmdata = hab_shmem_attach(dev, name, pipe_alloc_pages);
if (IS_ERR(shmdata)) {
ret = PTR_ERR(shmdata);
goto err;
}
dev->pipe = (struct hab_pipe *)shmdata;
pr_debug("\"%s\": pipesize %d, addr 0x%pK, be %d\n", name,
pipe_alloc_size, dev->pipe, is_be);
dev->pipe_ep = hab_pipe_init(dev->pipe, &dev->tx_buf, &dev->rx_buf,
&dev->dbg_itms, PIPE_SHMEM_SIZE, is_be ? 0 : 1);
/* newly created pchan is added to mmid device list */
*pchan = hab_pchan_alloc(mmid_device, vmid_remote);
if (!(*pchan)) {
ret = -ENOMEM;
goto err;
}
(*pchan)->closed = 0;
(*pchan)->hyp_data = (void *)dev;
strscpy((*pchan)->name, name, MAX_VMID_NAME_SIZE);
(*pchan)->is_be = is_be;
ret = habhyp_commdev_create_dispatcher(*pchan);
if (ret < 0)
goto err;
return ret;
err:
pr_err("%s failed\n", __func__);
if (*commdev)
habhyp_commdev_dealloc(*commdev);
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
/* os specific deallocation for this commdev */
habhyp_commdev_dealloc_os(commdev);
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt,
get_refcnt(pchan->refcount));
}
kfree(dev->os_data);
kfree(dev);
hab_pchan_put(pchan);
return 0;
}
int hab_hypervisor_register(void)
{
int ret = 0;
/* os-specific registration work */
ret = hab_hypervisor_register_os();
if (ret)
goto done;
pr_info("initializing for %s VM\n", hab_driver.b_server_dom ?
"host" : "guest");
hab_driver.hyp_priv = &qvm_priv_info;
done:
return ret;
}
void hab_hypervisor_unregister(void)
{
hab_hypervisor_unregister_os();
}

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_QNX_H
#define __HAB_QNX_H
#include "hab.h"
#include "hab_pipe.h"
#include "hab_qvm_os.h"
struct qvm_channel {
int be;
struct hab_pipe *pipe;
struct hab_pipe_endpoint *pipe_ep;
struct hab_shared_buf *tx_buf;
struct hab_shared_buf *rx_buf;
struct dbg_items *dbg_itms;
spinlock_t io_lock;
/* common but only for guest */
struct guest_shm_factory *guest_factory;
struct guest_shm_control *guest_ctrl;
/* cached guest ctrl idx value to prevent trap when accessed */
uint32_t idx;
/* Guest VM */
unsigned int guest_intr;
unsigned int guest_iid;
unsigned int factory_addr;
unsigned int irq;
/* os-specific part */
struct qvm_channel_os *os_data;
/* debug only */
struct workqueue_struct *wq;
struct work_data {
struct work_struct work;
int data; /* free to modify */
} wdata;
char *side_buf; /* to store the contents from hab-pipe */
};
/* This is common but only for guest in HQX */
struct shmem_irq_config {
unsigned long factory_addr; /* from gvm settings when provided */
int irq; /* from gvm settings when provided */
};
struct qvm_plugin_info {
struct shmem_irq_config *pchan_settings;
int setting_size;
int curr;
int probe_cnt;
};
extern struct qvm_plugin_info qvm_priv_info;
/* Shared mem size in each direction for communication pipe */
#define PIPE_SHMEM_SIZE (512 * 1024)
void hab_pipe_reset(struct physical_channel *pchan);
void habhyp_notify(void *commdev);
unsigned long hab_shmem_factory_va(unsigned long factory_addr);
char *hab_shmem_attach(struct qvm_channel *dev, const char *name,
uint32_t pages);
uint64_t get_guest_ctrl_paddr(struct qvm_channel *dev,
unsigned long factory_addr, int irq, const char *name, uint32_t pages);
#endif /* __HAB_QNX_H */

View File

@ -0,0 +1,281 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_qvm.h"
#include <linux/highmem.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
static int hab_shmem_remove(struct platform_device *pdev)
{
return 0;
}
static void hab_shmem_shutdown(struct platform_device *pdev)
{
}
static const struct of_device_id hab_shmem_match_table[] = {
{.compatible = "qvm,guest_shm"},
{},
};
/* this happens before hypervisor register */
static int hab_shmem_probe(struct platform_device *pdev)
{
int irq = 0;
struct resource *mem;
void __iomem *shmem_base = NULL;
int ret = 0;
/* hab in one GVM will not have pchans more than one VM could allowed */
if (qvm_priv_info.probe_cnt >= hab_driver.ndevices) {
pr_err("no more channel, current %d, maximum %d\n",
qvm_priv_info.probe_cnt, hab_driver.ndevices);
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
pr_err("no interrupt for the channel %d, error %d\n",
qvm_priv_info.probe_cnt, irq);
return irq;
}
qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].irq = irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
pr_err("can not get io mem resource for channel %d\n",
qvm_priv_info.probe_cnt);
return -EINVAL;
}
shmem_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(shmem_base)) {
pr_err("ioremap failed for channel %d, mem %pK\n",
qvm_priv_info.probe_cnt, mem);
return -EINVAL;
}
qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].factory_addr
= (unsigned long)((uintptr_t)shmem_base);
pr_debug("pchan idx %d, hab irq=%d shmem_base=%pK, mem %pK\n",
qvm_priv_info.probe_cnt, irq, shmem_base, mem);
qvm_priv_info.probe_cnt++;
return ret;
}
static struct platform_driver hab_shmem_driver = {
.probe = hab_shmem_probe,
.remove = hab_shmem_remove,
.shutdown = hab_shmem_shutdown,
.driver = {
.name = "hab_shmem",
.of_match_table = of_match_ptr(hab_shmem_match_table),
},
};
static int hab_shmem_init(void)
{
qvm_priv_info.probe_cnt = 0;
return platform_driver_register(&hab_shmem_driver);
}
static void hab_shmem_exit(void)
{
platform_driver_unregister(&hab_shmem_driver);
qvm_priv_info.probe_cnt = 0;
}
int hab_hypervisor_register_os(void)
{
hab_driver.b_server_dom = 0;
hab_shmem_init();
return 0;
}
int hab_hypervisor_unregister_os(void)
{
hab_shmem_exit();
return 0;
}
void habhyp_commdev_dealloc_os(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
dev->guest_ctrl->detach = 0;
}
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
{
irqreturn_t rc = IRQ_NONE;
struct physical_channel *pchan = (struct physical_channel *) _pchan;
struct qvm_channel *dev =
(struct qvm_channel *) (pchan ? pchan->hyp_data : NULL);
if (dev && dev->guest_ctrl) {
int status = dev->guest_ctrl->status;
if (status & 0xffff) {/*source bitmask indicator*/
rc = IRQ_HANDLED;
tasklet_hi_schedule(&dev->os_data->task);
}
}
return rc;
}
/* debug only */
static void work_func(struct work_struct *work)
{
struct qvm_channel *dev = container_of(work, struct qvm_channel, wdata.work);
dump_hab(dev->wdata.data);
}
int habhyp_commdev_create_dispatcher(struct physical_channel *pchan)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int ret;
tasklet_init(&dev->os_data->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
/* debug */
dev->wq = create_workqueue("wq_dump");
INIT_WORK(&dev->wdata.work, work_func);
dev->wdata.data = 0; /* let the caller wait */
dev->side_buf = kzalloc(PIPE_SHMEM_SIZE, GFP_KERNEL);
pr_debug("request_irq: irq = %d, pchan name = %s\n",
dev->irq, pchan->name);
ret = request_irq(dev->irq, shm_irq_handler, IRQF_SHARED |
IRQF_NO_SUSPEND, pchan->name, pchan);
if (ret)
pr_err("request_irq for %s failed: %d\n",
pchan->name, ret);
return ret;
}
/* Debug: critical section? */
void hab_pipe_read_dump(struct physical_channel *pchan)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
char str[250];
int i;
struct dbg_items *its = dev->dbg_itms;
struct hab_shared_buf *sh_buf = dev->rx_buf;
uint32_t buf_size = PIPE_SHMEM_SIZE;
snprintf(str, sizeof(str),
"index 0x%X rd_cnt %d wr_cnt %d size %d data_addr %lX",
dev->pipe_ep->rx_info.index,
sh_buf->rd_count,
sh_buf->wr_count,
sh_buf->size,
&sh_buf->data[0]);
dump_hab_buf(str, strlen(str)+1);
/* trace history buffer dump */
snprintf(str, sizeof(str), "dbg hist buffer index %d\n", its->idx);
dump_hab_buf(str, strlen(str)+1);
for (i = 0; i < DBG_ITEM_SIZE; i++) {
struct dbg_item *it = &its->it[i];
snprintf(str, sizeof(str),
"it %d: rd %d wr %d va %lX index 0x%X size %d ret %d\n",
i, it->rd_cnt, it->wr_cnt, it->va, it->index, it->sz, it->ret);
dump_hab_buf(str, strlen(str)+1);
}
/* !!!! to end the readable string */
str[0] = str[1] = str[2] = str[3] = 33;
dump_hab_buf(str, 4); /* separator */
dump_hab_buf((void *)sh_buf->data, buf_size);
str[0] = str[1] = str[2] = str[3] = str[4] = str[5] = str[6] =
str[7] = 33; /* !!!! to end the readable string */
dump_hab_buf(str, 16); /* separator */
dump_hab_buf(dev->side_buf, buf_size);
}
void dump_hab_wq(struct physical_channel *pchan)
{
struct qvm_channel *dev = pchan->hyp_data;
dev->wdata.data = pchan->habdev->id;
queue_work(dev->wq, &dev->wdata.work);
}
int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
int dest_size)
{
return 0;
};
/* The input is already va now */
inline unsigned long hab_shmem_factory_va(unsigned long factory_addr)
{
return factory_addr;
}
/* to get the shmem data region virtual address */
char *hab_shmem_attach(struct qvm_channel *dev, const char *name,
uint32_t pipe_alloc_pages)
{
struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv;
uint64_t paddr;
char *shmdata;
int ret = 0;
/* no more vdev-shmem for more pchan considering the 1:1 rule */
if (qvm_priv->curr >= qvm_priv->probe_cnt) {
pr_err("pchan guest factory setting %d overflow probe cnt %d\n",
qvm_priv->curr, qvm_priv->probe_cnt);
ret = -1;
goto err;
}
paddr = get_guest_ctrl_paddr(dev,
qvm_priv->pchan_settings[qvm_priv->curr].factory_addr,
qvm_priv->pchan_settings[qvm_priv->curr].irq,
name,
pipe_alloc_pages);
dev->guest_ctrl = memremap(paddr,
(dev->guest_factory->size + 1) * PAGE_SIZE, MEMREMAP_WB);
/* page size should be 4KB */
if (!dev->guest_ctrl) {
ret = -ENOMEM;
goto err;
}
shmdata = (char *)dev->guest_ctrl + PAGE_SIZE;
pr_debug("ctrl page 0x%llx mapped at 0x%pK, idx %d\n",
paddr, dev->guest_ctrl, dev->guest_ctrl->idx);
pr_debug("data buffer mapped at 0x%pK\n", shmdata);
dev->idx = dev->guest_ctrl->idx;
qvm_priv->curr++;
return shmdata;
err:
return ERR_PTR(ret);
}

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_QVM_OS_H
#define __HAB_QVM_OS_H
#include <linux/guest_shm.h>
#include <linux/stddef.h>
struct qvm_channel_os {
struct tasklet_struct task;
};
#endif /*__HAB_QVM_OS_H*/

View File

@ -0,0 +1,298 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_grantable.h"
#define MAX_LINE_SIZE 128
int hab_stat_init(struct hab_driver *driver)
{
return hab_stat_init_sub(driver);
}
int hab_stat_deinit(struct hab_driver *driver)
{
return hab_stat_deinit_sub(driver);
}
/*
* If all goes well the return value is the formated print and concatenated
* original dest string.
*/
int hab_stat_buffer_print(char *dest,
int dest_size, const char *fmt, ...)
{
va_list args;
char line[MAX_LINE_SIZE];
int ret;
va_start(args, fmt);
ret = vsnprintf(line, sizeof(line), fmt, args);
va_end(args);
if (ret > 0)
ret = strlcat(dest, line, dest_size);
return ret;
}
int hab_stat_show_vchan(struct hab_driver *driver,
char *buf, int size)
{
int i, ret = 0;
ret = strscpy(buf, "", size);
for (i = 0; i < driver->ndevices; i++) {
struct hab_device *dev = &driver->devp[i];
struct physical_channel *pchan;
struct virtual_channel *vc;
read_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node) {
if (!pchan->vcnt)
continue;
ret = hab_stat_buffer_print(buf, size,
"nm %s r %d lc %d rm %d sq_t %d sq_r %d st 0x%x vn %d:\n",
pchan->name, pchan->is_be, pchan->vmid_local,
pchan->vmid_remote, pchan->sequence_tx,
pchan->sequence_rx, pchan->status, pchan->vcnt);
read_lock(&pchan->vchans_lock);
list_for_each_entry(vc, &pchan->vchannels, pnode) {
ret = hab_stat_buffer_print(buf, size,
"%08X(%d:%d:%lu:%lu:%d) ", vc->id,
get_refcnt(vc->refcount),
vc->otherend_closed,
(unsigned long)vc->tx_cnt,
(unsigned long)vc->rx_cnt,
vc->rx_inflight);
}
ret = hab_stat_buffer_print(buf, size, "\n");
read_unlock(&pchan->vchans_lock);
}
read_unlock_bh(&dev->pchan_lock);
}
return ret;
}
int hab_stat_show_ctx(struct hab_driver *driver,
char *buf, int size)
{
int ret = 0;
struct uhab_context *ctx;
ret = strscpy(buf, "", size);
spin_lock_bh(&hab_driver.drvlock);
ret = hab_stat_buffer_print(buf, size,
"Total contexts %d\n",
driver->ctx_cnt);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
ret = hab_stat_buffer_print(buf, size,
"ctx %d K %d close %d vc %d exp %d imp %d open %d ref %d\n",
ctx->owner, ctx->kernel, ctx->closing,
ctx->vcnt, ctx->export_total,
ctx->import_total, ctx->pending_cnt,
get_refcnt(ctx->refcount));
}
spin_unlock_bh(&hab_driver.drvlock);
return ret;
}
static int get_pft_tbl_total_size(struct compressed_pfns *pfn_table)
{
int i, total_size = 0;
for (i = 0; i < pfn_table->nregions; i++)
total_size += pfn_table->region[i].size * PAGE_SIZE;
return total_size;
}
static int print_ctx_total_expimp(struct uhab_context *ctx,
char *buf, int size)
{
struct compressed_pfns *pfn_table = NULL;
int exp_total = 0, imp_total = 0;
int exp_cnt = 0, imp_cnt = 0;
struct export_desc *exp = NULL;
int exim_size = 0;
int ret = 0;
read_lock(&ctx->exp_lock);
list_for_each_entry(exp, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)exp->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
exp_total += exim_size;
exp_cnt++;
}
read_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) {
pfn_table = (struct compressed_pfns *)exp->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
imp_total += exim_size;
imp_cnt++;
}
}
spin_unlock_bh(&ctx->imp_lock);
if (exp_cnt || exp_total || imp_cnt || imp_total)
hab_stat_buffer_print(buf, size,
"ctx %d exp %d size %d imp %d size %d\n",
ctx->owner, exp_cnt, exp_total,
imp_cnt, imp_total);
else
return 0;
read_lock(&ctx->exp_lock);
hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)exp->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
}
hab_stat_buffer_print(buf, size, "\n");
read_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->imp_whse, node) {
if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) {
pfn_table = (struct compressed_pfns *)exp->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
}
}
ret = hab_stat_buffer_print(buf, size, "\n");
spin_unlock_bh(&ctx->imp_lock);
return ret;
}
int hab_stat_show_expimp(struct hab_driver *driver,
int pid, char *buf, int size)
{
struct uhab_context *ctx = NULL;
int ret = 0;
struct virtual_channel *vchan = NULL;
int mmid = 0;
struct physical_channel *pchans[HABCFG_MMID_NUM];
int pchan_count = 0;
(void)driver;
ret = strscpy(buf, "", size);
spin_lock_bh(&hab_driver.drvlock);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
if (pid == ctx->owner) {
ret = print_ctx_total_expimp(ctx, buf, size);
list_for_each_entry(vchan, &ctx->vchannels, node) {
if (vchan->pchan->habdev->id != mmid) {
mmid = vchan->pchan->habdev->id;
pchans[pchan_count++] = vchan->pchan;
if (pchan_count >= HABCFG_MMID_NUM)
break;
}
}
break;
}
}
spin_unlock_bh(&hab_driver.drvlock);
/* print pchannel status, drvlock is not required */
if (pchan_count > 0)
ret = hab_stat_log(pchans, pchan_count, buf, size);
return ret;
}
#define HAB_PIPE_DUMP_FILE_NAME "/sdcard/habpipe-"
#define HAB_PIPE_DUMP_FILE_EXT ".dat"
#define HAB_PIPEDUMP_SIZE (768*1024*4)
static char *filp;
static int pipedump_idx;
int dump_hab_open(void)
{
int rc = 0;
char file_path[256];
char file_time[100];
rc = dump_hab_get_file_name(file_time, sizeof(file_time));
strscpy(file_path, HAB_PIPE_DUMP_FILE_NAME, sizeof(file_path));
strlcat(file_path, file_time, sizeof(file_path));
strlcat(file_path, HAB_PIPE_DUMP_FILE_EXT, sizeof(file_path));
filp = vmalloc(HAB_PIPEDUMP_SIZE);
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
pr_err("failed to create pipe dump buffer rc %d\n", rc);
filp = NULL;
} else {
pr_info("hab pipe dump buffer opened %s\n", file_path);
pipedump_idx = 0;
dump_hab_buf(file_path, strlen(file_path)); /* id first */
}
return rc;
}
void dump_hab_close(void)
{
pr_info("pipe dump content size %d completed\n", pipedump_idx);
/* transfer buffer ownership to devcoredump */
filp = NULL;
pipedump_idx = 0;
}
int dump_hab_buf(void *buf, int size)
{
if (!buf || !size || size > HAB_PIPEDUMP_SIZE - pipedump_idx) {
pr_err("wrong parameters buf %pK size %d allowed %d\n",
buf, size, HAB_PIPEDUMP_SIZE - pipedump_idx);
return 0;
}
memcpy(&filp[pipedump_idx], buf, size);
pipedump_idx += size;
return size;
}
void dump_hab(int mmid)
{
struct physical_channel *pchan = NULL;
int i = 0;
char str[8] = {35, 35, 35, 35, 35, 35, 35, 35}; /* ## */
dump_hab_open();
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *habdev = &hab_driver.devp[i];
if (habdev->id == mmid) {
list_for_each_entry(pchan, &habdev->pchannels, node) {
if (pchan->vcnt > 0) {
pr_info("***** dump pchan %s vcnt %d *****\n",
pchan->name, pchan->vcnt);
hab_pipe_read_dump(pchan);
break;
}
}
dump_hab_buf(str, 8); /* separator */
}
}
dev_coredumpv(hab_driver.dev, filp, pipedump_idx, GFP_KERNEL);
dump_hab_close();
}

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM hab
#if !defined(_TRACE_HAB_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_HAB_H
#include <linux/tracepoint.h>
#include "hab.h"
/* send path */
TRACE_EVENT(hab_pchan_send_start,
TP_PROTO(struct physical_channel *pchan),
TP_ARGS(pchan),
TP_STRUCT__entry(
__array(char, pchan_name, MAX_VMID_NAME_SIZE)
__field(uint32_t, seq_tx)
__field(unsigned long long, mpm_tv)
),
TP_fast_assign(
memcpy(__entry->pchan_name, pchan->name, MAX_VMID_NAME_SIZE);
__entry->seq_tx = pchan->sequence_tx + 1;
__entry->mpm_tv = msm_timer_get_sclk_ticks();
),
TP_printk("PTI:%s:%u:%llu\n", __entry->pchan_name,
__entry->seq_tx, __entry->mpm_tv)
);
TRACE_EVENT(hab_pchan_send_done,
TP_PROTO(struct physical_channel *pchan),
TP_ARGS(pchan),
TP_STRUCT__entry(
__array(char, pchan_name, MAX_VMID_NAME_SIZE)
__field(uint32_t, seq_tx)
__field(unsigned long long, mpm_tv)
),
TP_fast_assign(
memcpy(__entry->pchan_name, pchan->name, MAX_VMID_NAME_SIZE);
__entry->seq_tx = pchan->sequence_tx;
__entry->mpm_tv = msm_timer_get_sclk_ticks();
),
TP_printk("PTO:%s:%u:%llu\n", __entry->pchan_name,
__entry->seq_tx, __entry->mpm_tv)
);
TRACE_EVENT(hab_vchan_send_start,
TP_PROTO(struct virtual_channel *vchan),
TP_ARGS(vchan),
TP_STRUCT__entry(
__array(char, pchan_name, MAX_VMID_NAME_SIZE)
__field(uint32_t, seq_tx)
__field(unsigned long long, mpm_tv)
),
TP_fast_assign(
memcpy(__entry->pchan_name, vchan->pchan->name,
MAX_VMID_NAME_SIZE);
__entry->seq_tx = vchan->pchan->sequence_tx + 1;
__entry->mpm_tv = msm_timer_get_sclk_ticks();
),
TP_printk("VTI:%s:%u:%llu\n", __entry->pchan_name,
__entry->seq_tx, __entry->mpm_tv)
);
TRACE_EVENT(hab_vchan_send_done,
TP_PROTO(struct virtual_channel *vchan),
TP_ARGS(vchan),
TP_STRUCT__entry(
__array(char, pchan_name, MAX_VMID_NAME_SIZE)
__field(uint32_t, seq_tx)
__field(unsigned long long, mpm_tv)
),
TP_fast_assign(
memcpy(__entry->pchan_name, vchan->pchan->name,
MAX_VMID_NAME_SIZE);
__entry->seq_tx = vchan->pchan->sequence_tx;
__entry->mpm_tv = msm_timer_get_sclk_ticks();
),
TP_printk("VTO:%s:%u:%llu\n", __entry->pchan_name,
__entry->seq_tx, __entry->mpm_tv)
);
/* receive path */
TRACE_EVENT(hab_pchan_recv_start,
TP_PROTO(struct physical_channel *pchan),
TP_ARGS(pchan),
TP_STRUCT__entry(
__array(char, pchan_name, MAX_VMID_NAME_SIZE)
__field(uint32_t, seq_rx)
__field(unsigned long long, mpm_tv)
),
TP_fast_assign(
memcpy(__entry->pchan_name, pchan->name, MAX_VMID_NAME_SIZE);
__entry->seq_rx = pchan->sequence_rx;
__entry->mpm_tv = msm_timer_get_sclk_ticks();
),
TP_printk("PRI:%s:%u:%llu\n", __entry->pchan_name,
__entry->seq_rx, __entry->mpm_tv)
);
TRACE_EVENT(hab_vchan_recv_done,
TP_PROTO(struct virtual_channel *vchan,
struct hab_message *msg),
TP_ARGS(vchan, msg),
TP_STRUCT__entry(
__array(char, pchan_name, MAX_VMID_NAME_SIZE)
__field(uint32_t, seq_rx)
__field(unsigned long long, mpm_tv)
),
TP_fast_assign(
memcpy(__entry->pchan_name, vchan->pchan->name,
MAX_VMID_NAME_SIZE);
__entry->seq_rx = msg->sequence_rx;
__entry->mpm_tv = msm_timer_get_sclk_ticks();
),
TP_printk("VRO:%s:%u:%llu\n", __entry->pchan_name,
__entry->seq_rx, __entry->mpm_tv)
);
#endif /* _TRACE_HAB_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE hab_trace_os
#include <trace/define_trace.h>

View File

@ -0,0 +1,299 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
struct virtual_channel *
hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
int openid)
{
int id;
struct virtual_channel *vchan;
if (!pchan || !ctx)
return NULL;
vchan = kzalloc(sizeof(*vchan), GFP_KERNEL);
if (!vchan)
return NULL;
/* This should be the first thing we do in this function */
idr_preload(GFP_KERNEL);
spin_lock_bh(&pchan->vid_lock);
id = idr_alloc(&pchan->vchan_idr, vchan, 1,
(HAB_VCID_ID_MASK >> HAB_VCID_ID_SHIFT) + 1, GFP_NOWAIT);
spin_unlock_bh(&pchan->vid_lock);
idr_preload_end();
if (id <= 0) {
pr_err("idr failed %d\n", id);
kfree(vchan);
return NULL;
}
mb(); /* id must be generated done before pchan_get */
hab_pchan_get(pchan);
vchan->pchan = pchan;
/* vchan need both vcid and openid to be properly located */
vchan->session_id = openid;
write_lock(&pchan->vchans_lock);
list_add_tail(&vchan->pnode, &pchan->vchannels);
pchan->vcnt++;
write_unlock(&pchan->vchans_lock);
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
HAB_VCID_MMID_MASK) |
((pchan->dom_id << HAB_VCID_DOMID_SHIFT) &
HAB_VCID_DOMID_MASK);
spin_lock_init(&vchan->rx_lock);
INIT_LIST_HEAD(&vchan->rx_list);
init_waitqueue_head(&vchan->rx_queue);
kref_init(&vchan->refcount);
vchan->otherend_closed = pchan->closed;
hab_ctx_get(ctx);
vchan->ctx = ctx;
return vchan;
}
static void
hab_vchan_free(struct kref *ref)
{
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
list_del(&message->node);
hab_msg_free(message);
}
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
/* release vchan from pchan. no more msg for this vchan */
hab_write_lock(&pchan->vchans_lock, irqs_disabled);
list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
if (vchan == vc) {
list_del(&vc->pnode);
/* the ref is held in case of pchan is freed */
pchan->vcnt--;
break;
}
}
hab_write_unlock(&pchan->vchans_lock, irqs_disabled);
/* the release vchan from ctx was done earlier in vchan close() */
hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
/* release idr at the last so same idr will not be used early */
hab_spin_lock(&pchan->vid_lock, irqs_disabled);
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
hab_spin_unlock(&pchan->vid_lock, irqs_disabled);
hab_pchan_put(pchan); /* no more need for pchan from this vchan */
kfree(vchan);
}
/*
* only for msg recv path to retrieve vchan from vcid and openid based on
* pchan's vchan list
*/
struct virtual_channel*
hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
int irqs_disabled = irqs_disabled();
hab_spin_lock(&pchan->vid_lock, irqs_disabled);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
if (vchan) {
if (vchan->session_id != session_id)
/*
* skipped if session is different even vcid
* is the same
*/
vchan = NULL;
else if (!vchan->otherend_id /*&& !vchan->session_id*/) {
/*
* not paired vchan can be fetched right after it is
* alloc'ed. so it has to be skipped during search
* for remote msg
*/
pr_warn("vcid %x is not paired yet session %d refcnt %d type %d sz %zd\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount),
payload_type, sizebytes);
vchan = NULL;
} else if (vchan->otherend_closed || vchan->closed) {
pr_debug("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
vchan->otherend_closed, vchan->closed,
vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
} else if (!kref_get_unless_zero(&vchan->refcount)) {
/*
* this happens when refcnt is already zero
* (put from other thread) or there is an actual error
*/
pr_err("failed to inc vcid %pK %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
vchan, vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
}
}
hab_spin_unlock(&pchan->vid_lock, irqs_disabled);
return vchan;
}
/* wake up local waiting Q, so stop-vchan can be processed */
void hab_vchan_stop(struct virtual_channel *vchan)
{
if (vchan) {
vchan->otherend_closed = 1;
wake_up(&vchan->rx_queue);
if (vchan->ctx)
wake_up_interruptible(&vchan->ctx->exp_wq);
else
pr_err("NULL ctx for vchan %x\n", vchan->id);
}
}
void hab_vchans_stop(struct physical_channel *pchan)
{
struct virtual_channel *vchan, *tmp;
read_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
hab_vchan_stop(vchan);
}
read_unlock(&pchan->vchans_lock);
}
/* send vchan close to remote and stop receiving anything locally */
void hab_vchan_stop_notify(struct virtual_channel *vchan)
{
hab_send_close_msg(vchan);
hab_vchan_stop(vchan);
}
static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
{
int empty;
read_lock(&pchan->vchans_lock);
empty = list_empty(&pchan->vchannels);
if (!empty) {
struct virtual_channel *vchan;
int vcnt = pchan->vcnt;
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
/* discount open-pending unpaired vchan */
if (!vchan->session_id)
vcnt--;
else
pr_err("vchan %pK %x rm %x sn %d rf %d clsd %d rm clsd %d\n",
vchan, vchan->id,
vchan->otherend_id,
vchan->session_id,
get_refcnt(vchan->refcount),
vchan->closed, vchan->otherend_closed);
}
if (!vcnt)
empty = 1;/* unpaired vchan can exist at init time */
}
read_unlock(&pchan->vchans_lock);
return empty;
}
static int hab_vchans_empty(int vmid)
{
int i, empty = 1;
struct physical_channel *pchan;
struct hab_device *hab_dev;
for (i = 0; i < hab_driver.ndevices; i++) {
hab_dev = &hab_driver.devp[i];
read_lock_bh(&hab_dev->pchan_lock);
list_for_each_entry(pchan, &hab_dev->pchannels, node) {
if (pchan->vmid_remote == vmid) {
if (!hab_vchans_per_pchan_empty(pchan)) {
empty = 0;
pr_info("vmid %d %s's vchans are not closed\n",
vmid, pchan->name);
break;
}
}
}
read_unlock_bh(&hab_dev->pchan_lock);
}
return empty;
}
/*
* block until all vchans of a given GVM are explicitly closed
* with habmm_socket_close() by hab clients themselves
*/
void hab_vchans_empty_wait(int vmid)
{
pr_info("waiting for GVM%d's sockets closure\n", vmid);
while (!hab_vchans_empty(vmid))
usleep_range(10000, 12000);
pr_info("all of GVM%d's sockets are closed\n", vmid);
}
int hab_vchan_find_domid(struct virtual_channel *vchan)
{
return vchan ? vchan->pchan->dom_id : -1;
}
void hab_vchan_put(struct virtual_channel *vchan)
{
if (vchan)
kref_put(&vchan->refcount, hab_vchan_free);
}
int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
char *names, size_t name_size, uint32_t flags)
{
struct virtual_channel *vchan;
vchan = hab_get_vchan_fromvcid(vcid, ctx, 1);
if (!vchan)
return -EINVAL;
if (vchan->otherend_closed) {
hab_vchan_put(vchan);
return -ENODEV;
}
*ids = vchan->pchan->vmid_local |
((uint64_t)vchan->pchan->vmid_remote) << 32;
names[0] = 0;
names[name_size/2] = 0;
hab_vchan_put(vchan);
return 0;
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,130 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __HAB_VIRTIO_H
#define __HAB_VIRTIO_H
#include "hab.h"
enum {
HAB_PCHAN_TX_VQ = 0, /* receive data from gvm */
HAB_PCHAN_RX_VQ, /* send data to gvm */
HAB_PCHAN_VQ_MAX,
};
/* cross link between probe and comm-dev-alloc */
struct vq_pchan {
uint32_t mmid;
struct physical_channel *pchan;
struct virtio_hab *vhab;
unsigned int index[HAB_PCHAN_VQ_MAX]; /* vring index */
struct virtqueue *vq[HAB_PCHAN_VQ_MAX];
spinlock_t lock[HAB_PCHAN_VQ_MAX]; /* per pchan lock */
wait_queue_head_t out_wq;
struct tasklet_struct task; /* for rxq only */
void *s_pool;
struct list_head s_list; /* small buffer available out list */
int s_cnt;
void *m_pool;
struct list_head m_list; /* medium buffer available out list */
int m_cnt;
void *l_pool;
struct list_head l_list; /* large buffer available out list */
int l_cnt;
void *in_pool;
struct list_head in_list; /* only used for init then stored in vqs */
int in_cnt;
void *read_data; /* recved buf should be one of the in bufs */
size_t read_size;
int read_offset;
bool pchan_ready;
};
typedef void (*vq_callback)(struct virtqueue *);
struct virtio_hab {
struct virtio_device *vdev; /* the actual virtio device probed */
uint32_t mmid_start; /* starting mmid for this virthab */
int mmid_range; /* total mmid used in this virthab, it might cross mmid groups */
/* in case vqs are not start from zero to support all the needs of one
* virtio device, and it always starts after "other" vqs
*/
int vqs_offset;
struct virtqueue **vqs; /* holds total # of vqs for all the pchans. 2 vqs per pchan */
vq_callback_t **cbs; /* each vqs callback */
char **names; /* each vqs' names */
struct vq_pchan *vqpchans; /* total # of pchans */
spinlock_t mlock; /* master lock for all the pchans */
bool ready; /* overall device ready flag */
struct list_head node; /* list of all probed virtio hab */
};
/*
* this commdev has two parts, the pchan for hab driver created in commdev alloc,
* and, virtio dev and vqs created during virtio probe.
* commdev might happen earlier than virtio probe
* one kind of hab driver for one kind of virtio device. within this one pair
* there is one list/array of pchans/commdevs
*/
struct virtio_pchan_link {
uint32_t mmid;
struct physical_channel *pchan; /* link back to hab driver */
struct vq_pchan *vpc; /* link back to the virtio probe result */
struct virtio_hab *vhab; /* this is initialized during virtio probe */
};
#ifdef CONFIG_MSM_VIRTIO_HAB
int virthab_queue_inbufs(struct virtio_hab *vh, int alloc);
int virthab_alloc(struct virtio_device *vdev, struct virtio_hab **pvh,
uint32_t mmid_start, int mmid_range);
int virthab_init_vqs_pre(struct virtio_hab *vh);
int virthab_init_vqs_post(struct virtio_hab *vh);
struct virtio_device *virthab_get_vdev(int32_t mmid);
#else
int virthab_queue_inbufs(struct virtio_hab *vh, int alloc)
{
return -ENODEV;
}
int virthab_alloc(struct virtio_device *vdev, struct virtio_hab **pvh,
uint32_t mmid_start, int mmid_range)
{
return -ENODEV;
}
int virthab_init_vqs_pre(struct virtio_hab *vh)
{
return -ENODEV;
}
int virthab_init_vqs_post(struct virtio_hab *vh)
{
return -ENODEV;
}
struct virtio_device *virthab_get_vdev(int32_t mmid)
{
return NULL;
}
#endif
#endif /* __HAB_VIRTIO_H */

View File

@ -0,0 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
int hab_hypervisor_register(void)
{
hab_driver.b_loopback = 1;
return 0;
}
void hab_hypervisor_unregister(void)
{
hab_hypervisor_unregister_common();
}

169
drivers/soc/qcom/hab/khab.c Normal file
View File

@ -0,0 +1,169 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include <linux/module.h>
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
uint32_t timeout, uint32_t flags)
{
return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle,
timeout, flags);
}
EXPORT_SYMBOL(habmm_socket_open);
int32_t habmm_socket_close(int32_t handle)
{
return hab_vchan_close(hab_driver.kctx, handle);
}
EXPORT_SYMBOL(habmm_socket_close);
int32_t habmm_socket_send(int32_t handle, void *src_buff,
uint32_t size_bytes, uint32_t flags)
{
struct hab_send param = {0};
param.vcid = handle;
param.data = (uint64_t)(uintptr_t)src_buff;
param.sizebytes = size_bytes;
param.flags = flags;
return hab_vchan_send(hab_driver.kctx, handle,
size_bytes, src_buff, flags);
}
EXPORT_SYMBOL(habmm_socket_send);
int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
uint32_t timeout, uint32_t flags)
{
int ret = 0;
struct hab_message *msg = NULL;
void **scatter_buf = NULL;
int i = 0;
if (!size_bytes || !dst_buff)
return -EINVAL;
ret = hab_vchan_recv(hab_driver.kctx, &msg, handle, size_bytes, timeout, flags);
if (ret == 0 && msg) {
if (unlikely(msg->scatter)) {
scatter_buf = (void **)msg->data;
/* The maximum size of msg is limited in hab_msg_alloc*/
for (i = 0; i < msg->sizebytes / PAGE_SIZE; i++)
memcpy((char *)((uint64_t)dst_buff
+ (uint64_t)(i * PAGE_SIZE)), scatter_buf[i], PAGE_SIZE);
if (msg->sizebytes % PAGE_SIZE)
memcpy((char *)((uint64_t)dst_buff
+ (uint64_t)(i * PAGE_SIZE)), scatter_buf[i],
msg->sizebytes % PAGE_SIZE);
} else
memcpy(dst_buff, msg->data, msg->sizebytes);
} else if (ret && msg) {
pr_warn("vcid %X recv failed %d but msg is still received %zd bytes\n",
handle, ret, msg->sizebytes);
}
if (msg)
hab_msg_free(msg);
return ret;
}
EXPORT_SYMBOL(habmm_socket_recv);
int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes,
uint32_t *export_id, uint32_t flags)
{
int ret;
struct hab_export param = {0};
if (!export_id)
return -EINVAL;
param.vcid = handle;
param.buffer = (uint64_t)(uintptr_t)buff_to_share;
param.sizebytes = size_bytes;
param.flags = flags;
ret = hab_mem_export(hab_driver.kctx, &param, 1);
*export_id = param.exportid;
return ret;
}
EXPORT_SYMBOL(habmm_export);
int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags)
{
struct hab_unexport param = {0};
param.vcid = handle;
param.exportid = export_id;
return hab_mem_unexport(hab_driver.kctx, &param, 1);
}
EXPORT_SYMBOL(habmm_unexport);
int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
uint32_t export_id, uint32_t flags)
{
int ret;
struct hab_import param = {0};
if (!buff_shared)
return -EINVAL;
param.vcid = handle;
param.sizebytes = size_bytes;
param.exportid = export_id;
param.flags = flags;
ret = hab_mem_import(hab_driver.kctx, &param, 1);
if (!ret)
*buff_shared = (void *)(uintptr_t)param.kva;
return ret;
}
EXPORT_SYMBOL(habmm_import);
int32_t habmm_unimport(int32_t handle,
uint32_t export_id,
void *buff_shared,
uint32_t flags)
{
struct hab_unimport param = {0};
param.vcid = handle;
param.exportid = export_id;
param.kva = (uint64_t)(uintptr_t)buff_shared;
return hab_mem_unimport(hab_driver.kctx, &param, 1);
}
EXPORT_SYMBOL(habmm_unimport);
int32_t habmm_socket_query(int32_t handle,
struct hab_socket_info *info,
uint32_t flags)
{
int ret;
uint64_t ids;
char nm[VMNAME_SIZE * 2];
if (!info)
return -EINVAL;
ret = hab_vchan_query(hab_driver.kctx, handle, &ids, nm, sizeof(nm), 1);
if (!ret) {
info->vmid_local = ids & 0xFFFFFFFF;
info->vmid_remote = (ids & 0xFFFFFFFF00000000UL) > 32;
strscpy(info->vmname_local, nm, sizeof(info->vmname_local));
strscpy(info->vmname_remote, &nm[sizeof(info->vmname_local)],
sizeof(info->vmname_remote));
}
return ret;
}
EXPORT_SYMBOL(habmm_socket_query);

View File

@ -0,0 +1,409 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include <linux/rtc.h>
#if !defined CONFIG_GHS_VMM && defined(CONFIG_QTI_QUIN_GVM)
#include <linux/cacheflush.h>
#include <linux/list.h>
#include "hab_pipe.h"
#include "hab_qvm.h"
#include "khab_test.h"
static char g_perf_test_result[256];
enum hab_perf_test_type {
HAB_SHMM_THGPUT = 0x0,
};
#define HAB_PERF_TEST_MMID 802
#define PERF_TEST_ITERATION 50
#define MEM_READ_ITERATION 30
static int hab_shmm_throughput_test(void)
{
struct hab_device *habDev;
struct qvm_channel *dev;
struct hab_shared_buf *sh_buf;
struct physical_channel *pchan;
ktime_t start_time = 0, end_time = 0;
int i, counter;
void *test_data;
unsigned char *source_data, *shmm_adr;
register int sum;
register int *pp, *lastone;
int throughput[3][2] = { {0} };
int latency[6][PERF_TEST_ITERATION];
int ret = 0, tmp, size;
habDev = find_hab_device(HAB_PERF_TEST_MMID);
if (!habDev || list_empty(&(habDev->pchannels))) {
ret = -ENOMEM;
return ret;
}
pchan = list_first_entry(&(habDev->pchannels),
struct physical_channel, node);
dev = pchan->hyp_data;
if (!dev) {
ret = -EPERM;
return ret;
}
sh_buf = dev->tx_buf;
/* pChannel is of 128k, we use 64k to test */
size = 0x10000;
if (!sh_buf) {
pr_err("Share buffer address is empty, exit the perf test\n");
ret = -ENOMEM;
return ret;
}
shmm_adr = (unsigned char *)sh_buf->data;
test_data = kzalloc(size, GFP_ATOMIC);
if (!test_data) {
ret = -ENOMEM;
return ret;
}
source_data = kzalloc(size, GFP_ATOMIC);
if (!source_data) {
ret = -ENOMEM;
kfree(test_data);
return ret;
}
for (i = 0; i < PERF_TEST_ITERATION; i++) {
/* Normal memory copy latency */
flush_cache_all();
start_time = ktime_get();
memcpy(test_data, source_data, size);
end_time = ktime_get();
latency[0][i] = ktime_us_delta(end_time, start_time);
/* Share memory copy latency */
flush_cache_all();
start_time = ktime_get();
memcpy(shmm_adr, source_data, size);
end_time = ktime_get();
latency[1][i] = ktime_us_delta(end_time, start_time);
/* Normal memory read latency */
counter = MEM_READ_ITERATION;
sum = 0;
latency[2][i] = 0;
flush_cache_all();
while (counter-- > 0) {
pp = test_data;
lastone = (int *)((char *)test_data + size - 512);
start_time = ktime_get();
while (pp <= lastone) {
sum +=
pp[0] + pp[4] + pp[8] + pp[12]
+ pp[16] + pp[20] + pp[24] + pp[28]
+ pp[32] + pp[36] + pp[40] + pp[44]
+ pp[48] + pp[52] + pp[56] + pp[60]
+ pp[64] + pp[68] + pp[72] + pp[76]
+ pp[80] + pp[84] + pp[88] + pp[92]
+ pp[96] + pp[100] + pp[104]
+ pp[108] + pp[112]
+ pp[116] + pp[120]
+ pp[124];
pp += 128;
}
end_time = ktime_get();
latency[2][i] += ktime_us_delta(end_time, start_time);
flush_cache_all();
}
/* Share memory read latency*/
counter = MEM_READ_ITERATION;
sum = 0;
latency[3][i] = 0;
while (counter-- > 0) {
pp = (int *)shmm_adr;
lastone = (int *)(shmm_adr + size - 512);
start_time = ktime_get();
while (pp <= lastone) {
sum +=
pp[0] + pp[4] + pp[8] + pp[12]
+ pp[16] + pp[20] + pp[24] + pp[28]
+ pp[32] + pp[36] + pp[40] + pp[44]
+ pp[48] + pp[52] + pp[56] + pp[60]
+ pp[64] + pp[68] + pp[72] + pp[76]
+ pp[80] + pp[84] + pp[88] + pp[92]
+ pp[96] + pp[100] + pp[104]
+ pp[108] + pp[112]
+ pp[116] + pp[120]
+ pp[124];
pp += 128;
}
end_time = ktime_get();
latency[3][i] += ktime_us_delta(end_time, start_time);
flush_cache_all();
}
/* Normal memory write latency */
flush_cache_all();
start_time = ktime_get();
memset(test_data, 'c', size);
end_time = ktime_get();
latency[4][i] = ktime_us_delta(end_time, start_time);
/* Share memory write latency */
flush_cache_all();
start_time = ktime_get();
memset(shmm_adr, 'c', size);
end_time = ktime_get();
latency[5][i] = ktime_us_delta(end_time, start_time);
}
/* Calculate normal memory copy throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[0][i];
throughput[0][0] = (tmp != 0) ? size*PERF_TEST_ITERATION/tmp : 0;
/* Calculate share memory copy throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[1][i];
throughput[0][1] = (tmp != 0) ? size*PERF_TEST_ITERATION/tmp : 0;
/* Calculate normal memory read throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[2][i];
throughput[1][0] = (tmp != 0) ?
size*PERF_TEST_ITERATION*MEM_READ_ITERATION/tmp : 0;
/* Calculate share memory read throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[3][i];
throughput[1][1] = (tmp != 0) ?
size*PERF_TEST_ITERATION*MEM_READ_ITERATION/tmp : 0;
/* Calculate normal memory write throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[4][i];
throughput[2][0] = (tmp != 0) ?
size*PERF_TEST_ITERATION/tmp : 0;
/* Calculate share memory write throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[5][i];
throughput[2][1] = (tmp != 0) ?
size*PERF_TEST_ITERATION/tmp : 0;
kfree(test_data);
kfree(source_data);
snprintf(g_perf_test_result, sizeof(g_perf_test_result),
"cpy(%d,%d)/read(%d,%d)/write(%d,%d)",
throughput[0][0], throughput[0][1], throughput[1][0],
throughput[1][1], throughput[2][0], throughput[2][1]);
return ret;
}
int hab_perf_test(long testId)
{
int ret;
switch (testId) {
case HAB_SHMM_THGPUT:
ret = hab_shmm_throughput_test();
break;
default:
pr_err("Invalid performance test ID %ld\n", testId);
ret = -EINVAL;
}
return ret;
}
static int kick_hab_perf_test(const char *val, const struct kernel_param *kp);
static int get_hab_perf_result(char *buffer, const struct kernel_param *kp);
module_param_call(perf_test, kick_hab_perf_test, get_hab_perf_result,
NULL, 0600);
static int kick_hab_perf_test(const char *val, const struct kernel_param *kp)
{
long testId;
int err = kstrtol(val, 10, &testId);
if (err)
return err;
memset(g_perf_test_result, 0, sizeof(g_perf_test_result));
return hab_perf_test(testId);
}
static int get_hab_perf_result(char *buffer, const struct kernel_param *kp)
{
return strscpy(buffer, g_perf_test_result,
strlen(g_perf_test_result)+1);
}
#endif
static struct kobject *hab_kobject;
static int vchan_stat;
static int context_stat;
static int pid_stat;
static ssize_t vchan_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return hab_stat_show_vchan(&hab_driver, buf, PAGE_SIZE);
}
static ssize_t vchan_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = sscanf(buf, "%du", &vchan_stat);
if (ret < 1) {
pr_err("failed to read anything from input %d\n", ret);
return 0;
} else
return count;
}
static ssize_t ctx_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return hab_stat_show_ctx(&hab_driver, buf, PAGE_SIZE);
}
static ssize_t ctx_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = sscanf(buf, "%du", &context_stat);
if (ret < 1) {
pr_err("failed to read anything from input %d\n", ret);
return 0;
} else
return count;
}
static ssize_t expimp_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return hab_stat_show_expimp(&hab_driver, pid_stat, buf, PAGE_SIZE);
}
static ssize_t expimp_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret = -1;
char str[36] = {0};
struct uhab_context *ctx = NULL;
struct virtual_channel *vchan = NULL;
if (buf) {
ret = sscanf(buf, "%35s", str);
if (ret < 1) {
pr_err("failed to read anything from input %d\n", ret);
return -EINVAL;
}
} else
return -EINVAL;
if (strnlen(str, strlen("dump_pipe")) == strlen("dump_pipe") &&
strcmp(str, "dump_pipe") == 0) {
/* string terminator is ignored */
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
if (ctx->owner == pid_stat) {
vchan = list_first_entry(&ctx->vchannels,
struct virtual_channel, node);
if (vchan) {
dump_hab_wq(vchan->pchan); /* user context */
break;
}
}
}
return count;
}
ret = sscanf(buf, "%du", &pid_stat);
if (ret < 1)
pr_err("failed to read anything from input %d\n", ret);
else
return count; /* good result stored */
return -EEXIST;
}
static struct kobj_attribute vchan_attribute = __ATTR(vchan_stat, 0660,
vchan_show,
vchan_store);
static struct kobj_attribute ctx_attribute = __ATTR(context_stat, 0660,
ctx_show,
ctx_store);
static struct kobj_attribute expimp_attribute = __ATTR(pid_stat, 0660,
expimp_show,
expimp_store);
int hab_stat_init_sub(struct hab_driver *driver)
{
int result;
hab_kobject = kobject_create_and_add("hab", kernel_kobj);
if (!hab_kobject)
return -ENOMEM;
result = sysfs_create_file(hab_kobject, &vchan_attribute.attr);
if (result)
pr_debug("cannot add vchan in /sys/kernel/hab %d\n", result);
result = sysfs_create_file(hab_kobject, &ctx_attribute.attr);
if (result)
pr_debug("cannot add ctx in /sys/kernel/hab %d\n", result);
result = sysfs_create_file(hab_kobject, &expimp_attribute.attr);
if (result)
pr_debug("cannot add expimp in /sys/kernel/hab %d\n", result);
return result;
}
int hab_stat_deinit_sub(struct hab_driver *driver)
{
sysfs_remove_file(hab_kobject, &vchan_attribute.attr);
sysfs_remove_file(hab_kobject, &ctx_attribute.attr);
sysfs_remove_file(hab_kobject, &expimp_attribute.attr);
kobject_put(hab_kobject);
return 0;
}
int dump_hab_get_file_name(char *file_time, int ft_size)
{
struct timespec64 time = {0};
unsigned long local_time;
struct rtc_time tm;
ktime_get_real_ts64(&time);
local_time = (unsigned long)(time.tv_sec - sys_tz.tz_minuteswest * 60);
rtc_time64_to_tm(local_time, &tm);
snprintf(file_time, ft_size, "%04d_%02d_%02d-%02d_%02d_%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
return 0;
}

View File

@ -0,0 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __KHAB_TEST_H
#define __KHAB_TEST_H
int hab_perf_test(long testId);
#endif /* __KHAB_TEST_H */

View File

@ -0,0 +1,201 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab.h"
#include "hab_qvm.h"
#include "hab_trace_os.h"
static unsigned long long xvm_sche_tx_tv_buffer[2];
static void pipe_read_trace(struct qvm_channel *dev,
int size, int ret)
{
struct hab_pipe_endpoint *ep = dev->pipe_ep;
struct hab_shared_buf *sh_buf = dev->rx_buf;
struct dbg_items *its = dev->dbg_itms;
struct dbg_item *it = &its->it[its->idx];
it->rd_cnt = sh_buf->rd_count;
it->wr_cnt = sh_buf->wr_count;
it->va = (void *)&sh_buf->data[ep->rx_info.index];
it->index = ep->rx_info.index;
it->sz = size;
it->ret = ret;
its->idx++;
if (its->idx >= DBG_ITEM_SIZE)
its->idx = 0;
}
/* this is only used to read payload, never the head! */
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
if (dev) {
int ret = hab_pipe_read(dev->pipe_ep,
dev->rx_buf, PIPE_SHMEM_SIZE,
payload, read_size, 0);
/* log */
pipe_read_trace(dev, read_size, ret);
return ret;
} else
return 0;
}
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
size_t total_size = sizeof(*header) + sizebytes;
uint32_t buf_size = PIPE_SHMEM_SIZE;
int irqs_disabled = irqs_disabled();
if (total_size > buf_size)
return -EINVAL; /* too much data for ring */
hab_spin_lock(&dev->io_lock, irqs_disabled);
trace_hab_pchan_send_start(pchan);
if ((buf_size -
(dev->pipe_ep->tx_info.wr_count -
dev->tx_buf->rd_count)) < total_size) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
return -EAGAIN; /* not enough free space */
}
header->sequence = pchan->sequence_tx + 1;
header->signature = HAB_HEAD_SIGNATURE;
if (hab_pipe_write(dev->pipe_ep, dev->tx_buf, buf_size,
(unsigned char *)header,
sizeof(*header)) != sizeof(*header)) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("***incompleted pchan send id-type %x size %x session %d seq# %d\n",
header->id_type, header->payload_size,
header->session_id,
header->sequence);
return -EIO;
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timespec64 ts = {0};
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
if (pstat) {
ktime_get_ts64(&ts);
pstat->tx_sec = ts.tv_sec;
pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
} else {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("***incompleted pchan send prof id-type %x size %x session %d seq# %d\n",
header->id_type, header->payload_size,
header->session_id,
header->sequence);
return -EINVAL;
}
} else if (HAB_HEADER_GET_TYPE(*header)
== HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ) {
((unsigned long long *)payload)[0] = xvm_sche_tx_tv_buffer[0];
} else if (HAB_HEADER_GET_TYPE(*header)
== HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP) {
((unsigned long long *)payload)[2] = xvm_sche_tx_tv_buffer[1];
}
if (sizebytes) {
if (hab_pipe_write(dev->pipe_ep, dev->tx_buf, buf_size,
(unsigned char *)payload,
sizebytes) != sizebytes) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("***incompleted pchan send id-type %x size %x session %d seq# %d\n",
header->id_type, header->payload_size,
header->session_id,
header->sequence);
return -EIO;
}
}
hab_pipe_write_commit(dev->pipe_ep, dev->tx_buf);
/* locally +1 as late as possible but before unlock */
++pchan->sequence_tx;
trace_hab_pchan_send_done(pchan);
hab_spin_unlock(&dev->io_lock, irqs_disabled);
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_SCHE_MSG)
xvm_sche_tx_tv_buffer[0] = msm_timer_get_sclk_ticks();
else if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_SCHE_MSG_ACK)
xvm_sche_tx_tv_buffer[1] = msm_timer_get_sclk_ticks();
habhyp_notify(dev);
return 0;
}
void physical_channel_rx_dispatch(unsigned long data)
{
struct hab_header header;
struct physical_channel *pchan = (struct physical_channel *)data;
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int irqs_disabled = irqs_disabled();
uint32_t buf_size = PIPE_SHMEM_SIZE;
hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
while (1) {
uint32_t rd, wr, idx;
int ret;
ret = hab_pipe_read(dev->pipe_ep,
dev->rx_buf, buf_size,
(unsigned char *)&header,
sizeof(header), 1); /* clear head after read */
/* debug */
pipe_read_trace(dev, sizeof(header), ret);
if (ret == 0xFFFFFFFF) { /* signature mismatched first time */
hab_pipe_rxinfo(dev->pipe_ep, dev->rx_buf, &rd, &wr, &idx);
pr_err("!!!!! HAB signature mismatch expect %X received %X, id_type %X size %X session %X sequence %X\n",
HAB_HEAD_SIGNATURE, header.signature,
header.id_type,
header.payload_size,
header.session_id,
header.sequence);
pr_err("!!!!! rxinfo rd %d wr %d index %X\n",
rd, wr, idx);
memcpy(dev->side_buf,
(void *)&dev->rx_buf->data[0],
buf_size);
hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
/* cannot run in elevated context */
dump_hab_wq(pchan);
hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
} else if (ret == 0xFFFFFFFE) { /* continuous signature mismatches */
continue;
} else if (ret != sizeof(header))
break; /* no data available */
pchan->sequence_rx = header.sequence;
/* log msg recv timestamp: enter pchan dispatcher */
trace_hab_pchan_recv_start(pchan);
hab_msg_recv(pchan, &header);
}
hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
}

View File

@ -0,0 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "hab_qvm.h"
inline void habhyp_notify(void *commdev)
{
struct qvm_channel *dev = (struct qvm_channel *)commdev;
if (dev && dev->guest_ctrl)
dev->guest_ctrl->notify = ~0;
}

140
include/linux/guest_shm.h Normal file
View File

@ -0,0 +1,140 @@
/*
* Copyright 2018, QNX Software Systems Limited (QSS).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Additional Patent Grant
*
* QSS hereby grants to you a perpetual, worldwide, non-exclusive,
* no-charge, irrevocable (except as stated in this section) patent
* license to make, have made, use, offer to sell, sell, import,
* transfer, and otherwise run, modify and propagate the contents of this
* header file (Implementation) , where such license applies
* only to those patent claims, both currently owned by QSS and
* acquired in the future, licensable by QSS that are necessarily
* infringed by this Implementation. This grant does
* not include claims that would be infringed only as a consequence of
* further modification of this Implementation. If you or your agent or
* exclusive licensee institute or order or agree to the institution of
* patent litigation against any entity (including a cross-claim or
* counterclaim in a lawsuit) alleging that this Implementation constitutes
* direct or contributory patent infringement, or inducement of patent
* infringement, then any patent rights granted to you under this license for
* this Implementation shall terminate as of the date such litigation is filed.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*/
/**
* @file
* definitions guest shared memory device
*/
#ifndef _QVM_GUEST_SHM_H
#define _QVM_GUEST_SHM_H
#ifdef __linux__
#include <linux/types.h>
#else
#include <stdint.h>
#endif
/*
* Temporary VID definition until the updated <pci/pci_id.h> propogates around
*/
#define PCI_VID_BlackBerry_QNX 0x1C05
#define PCI_DID_QNX_GUEST_SHM 0x0001
/** status of last creation request */
enum guest_shm_status {
GSS_OK, /**< creation succeeded */
GSS_UNKNOWN_FAILURE, /**< creation failed for an unknown reason */
GSS_NOMEM, /**< creation failed due to lack of memory */
GSS_CLIENT_MAX, /**< creation failed due to region already being used by the maximum number of guests */
GSS_ILLEGAL_NAME, /**< creation failed due to illegal region name */
GSS_NO_PERMISSION, /**< creation failed due to lack of permission */
GSS_DOES_NOT_EXIST, /**< A find request failed */
};
/** Maximum number of clients allowed to connect to a shared memory region */
#define GUEST_SHM_MAX_CLIENTS 16
/** Maximum length allowed for region name */
#define GUEST_SHM_MAX_NAME 32
/** Signature value to verify that vdev is present */
#define GUEST_SHM_SIGNATURE 0x4d534732474d5651
/** Register layout for factory registers */
struct guest_shm_factory {
uint64_t signature; /**< == GUEST_SHM_SIGNATURE (R/O) */
uint64_t shmem; /**< shared memory paddr (R/O) */
uint32_t vector; /**< interrupt vector number (R/O) */
uint32_t status; /**< status of last creation (R/O) */
uint32_t size; /**< requested size in 4K pages, write causes creation */
char name[GUEST_SHM_MAX_NAME]; /**< name of shared memory region */
uint32_t find; /**< find an existing shared memory connection */
};
/** Register layout for a region control page */
struct guest_shm_control {
uint32_t status; /**< lower 16 bits: pending notification bitset, upper 16 bits: current active clients (R/O) */
uint32_t idx; /**< connection index for this client (R/O) */
uint32_t notify; /**< write a bitset of clients to notify */
uint32_t detach; /**< write here to detach from the shared memory region */
};
static inline void
guest_shm_create(volatile struct guest_shm_factory *const __factory, unsigned const __size) {
/* Surround the size assignment with memory barriers so that
* the compiler doesn't try to shift the assignment before/after
* necessary bits (e.g. setting the name of the region) */
asm volatile( "" ::: "memory");
__factory->size = __size;
asm volatile( "" ::: "memory");
}
static inline void
guest_shm_find(volatile struct guest_shm_factory *const __factory, unsigned const __find_num) {
/* Surround the find assignment with memory barriers so that
* the compiler doesn't try to shift the assignment before/after
* necessary bits (e.g. setting the name of the region) */
asm volatile( "" ::: "memory");
__factory->find = __find_num;
asm volatile( "" ::: "memory");
}
#endif
#if defined(__QNXNTO__) && defined(__USESRCVERSION)
#include <sys/srcversion.h>
__SRCVERSION("$URL$ $Rev$")
#endif

375
include/linux/habmm.h Normal file
View File

@ -0,0 +1,375 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef HABMM_H
#define HABMM_H
#include "linux/habmmid.h"
#define HAB_API_VER_DEF(_MAJOR_, _MINOR_) \
((_MAJOR_&0xFF)<<16 | (_MINOR_&0xFFF))
#define HAB_API_VER HAB_API_VER_DEF(1, 0)
#include <linux/types.h>
/* habmm_socket_open
*
* Description:
*
* Establish a communication channel between Virtual Machines. Blocks
* until the connection is established between sender and receiver.
* Client can call this APImultiple times with the same name to connect
* to the same communication channel, the function returns a different context
* for every open for proper resource allocation and client identification.
*
* Params:
* out handle - An opaque handle associated with a successful virtual channel
* creation in MM_ID - multimedia ID used to allocate the physical channels to
* service all the virtual channels created through this open
* in timeout - timeout value specified by the client to avoid forever block
* in flags - future extension
*
* Return:
* status (success/failure/timeout)
*
*/
/* single FE-BE connection multi-to-multi point to point matching (default) */
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000
/* one BE for one domU */
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001
/* one BE for all the domUs */
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS 0x00000002
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
uint32_t timeout, uint32_t flags);
/* habmm_socket_close
*
* Description:
*
* Tear down the virtual channel that was established through habmm_socket_open
* and release all resources associated with it.
*
* Params:
*
* in handle - handle to the virtual channel created by habmm_socket_open
*
* Return:
* status - (success/failure)
*
*
*/
int32_t habmm_socket_close(int32_t handle);
/* habmm_socket_send
*
* Description:
*
* Send data over the virtual channel
*
* Params:
*
* in handle - handle created by habmm_socket_open
* in src_buff - data to be send across the virtual channel
* inout size_bytes - size of the data to be send. Either the whole packet is
* sent or not
* in flags - future extension
*
* Return:
* status (success/fail/disconnected)
*
*/
/* Non-blocking mode: function will return immediately with HAB_AGAIN
* if the send operation cannot be completed without blocking.
*/
#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001
/* Collect cross-VM stats: client provides stat-buffer large enough to allow 2
* sets of a 2-uint64_t pair to collect seconds and nano-seconds at the
* beginning of the stat-buffer. Stats are collected when the stat-buffer leaves
* VM1, then enters VM2
*/
#define HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT 0x00000002
/* start to measure cross-vm schedule latency: VM1 send msg with this flag
* to VM2 to kick off the measurement. In the hab driver level, the VM1 hab
* driver shall record the time of schedule out with mpm_timer, and buffer
* it for later usage. The VM2 hab driver shall record the time of schedule
* in with mpm_timer and pass it to "habtest" application.
*/
#define HABMM_SOCKET_XVM_SCHE_TEST 0x00000004
/* VM2 responds this message to VM1 for HABMM_SOCKET_XVM_SCHE_TEST.
* In the hab driver level, the VM2 hab driver shall record the time of schedule
* out with mpm_timer, and buffer it for later usage; the VM1 hab driver
* shall record the time of schedule in with mpm_timer and pass it to "habtest"
* application.
*/
#define HABMM_SOCKET_XVM_SCHE_TEST_ACK 0x00000008
/* VM1 sends this message to VM2 asking for collect all the mpm_timer values
* to calculate the latency of schduling between VM1 and VM2. In the hab driver
* level, the VM1 hab driver shall save the previous restored schduling out
* time to the message buffer
*/
#define HABMM_SOCKET_XVM_SCHE_RESULT_REQ 0x00000010
/* VM2 responds this message to VM2 for HABMM_SOCKET_XVM_SCHE_RESULT_REQ.
* In the habtest application level, VM2 shall save the previous restored
* scheduling in time into message buffer, in the hab driver level, VM2
* shall save the previous restored scheduling out time to the message
* buffer.
*/
#define HABMM_SOCKET_XVM_SCHE_RESULT_RSP 0x00000020
struct habmm_xing_vm_stat {
uint64_t tx_sec;
uint64_t tx_usec;
uint64_t rx_sec;
uint64_t rx_usec;
};
int32_t habmm_socket_send(int32_t handle, void *src_buff, uint32_t size_bytes,
uint32_t flags);
/* habmm_socket_recv
*
* Description:
*
* Receive data over the virtual channel created by habmm_socket_open.
* Blocking until actual data is received or timeout value expires
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* inout dst_buff - buffer pointer to store received data
* inout size_bytes - size of the dst_buff. returned value shows the actual
* bytes received.
* in timeout - timeout value specified by the client to avoid forever blocking,
* The unit of measurement is ms.
* 0 is immediately timeout; -1 is forever blocking.
* in flags - details as below.
*
*
* Return:
* status (success/failure/timeout/disconnected)
*
*/
/* Non-blocking mode: function will return immediately if there is no data
* available.
*/
#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001
/* In the blocking mode, this flag is used to indicate it is an
* uninterruptbile blocking call.
*/
#define HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE 0x00000002
/* Enable timeout function, This flag is used to indicate that the timeout
* function takes effect. Note that the timeout parameter is meaningful only if
* this flag is added, otherwise the timeout parameter is ignored.
* In addition, when the HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING flag is set,
* the current flag is ignored.
*/
#define HABMM_SOCKET_RECV_FLAGS_TIMEOUT 0x00000004
int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
uint32_t timeout, uint32_t flags);
/* habmm_socket_sendto
*
* Description:
*
* This is for backend only. Send data over the virtual channel to remote
* frontend virtual channel for multi-FEs-to-single-BE model when
* the BE virtual channel is created using
* HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU or
* HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS
*
* Params:
*
* in handle - handle created by habmm_socket_open
* in src_buff - data to be send across the virtual channel
* inout size_bytes - size of the data to be send. The packet is fully sent on
* success,or not sent at all upon any failure
* in remote_handle - the destination of this send using remote FE's virtual
* channel handle
* in flags - future extension
*
* Return:
* status (success/fail/disconnected)
*/
int32_t habmm_socket_sendto(int32_t handle, void *src_buff, uint32_t size_bytes,
int32_t remote_handle, uint32_t flags);
/* habmm_socket_recvfrom
*
* Description:
*
* Receive data over the virtual channel created by habmm_socket_open.
* Returned is the remote FE's virtual channel handle to be used for sendto.
* Blocking until actual data is received or timeout value expires. This is for
* BE running in multi-FEs-to-single-BE model when the BE virtual channel is
* created using HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU or
* HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* inout dst_buff - buffer pointer to store received data
* inout size_bytes - size of the dst_buff. returned value shows the actual
* bytes received.
* in timeout - timeout value specified by the client to avoid forever block
* in remote_handle - the FE who sent this message through the
* connected virtual channel to BE.
* in flags - future extension
*
* Return:
* status (success/failure/timeout/disconnected)
*
*/
int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff,
uint32_t *size_bytes, uint32_t timeout,
int32_t *remote_handle, uint32_t flags);
/* exporting memory type DMA : This is platform dependent for user mode. If it
* does exist, HAB needs to use DMA method to retrieve the memory for exporting.
* If it does not exist, this flag is ignored.
*/
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001
/*
* this flag is used for export from dma_buf fd or import to dma_buf fd
*/
#define HABMM_EXPIMP_FLAGS_FD 0x00010000
#define HABMM_EXPIMP_FLAGS_DMABUF 0x00020000
#define HAB_MAX_EXPORT_SIZE 0x8000000
/*
* Description:
*
* Prepare the sharing of the buffer on the exporter side. The returned
* reference ID needs to be sent to importer separately.
* During sending the HAB will attach the actual exporting buffer information.
* The exporting is per process space.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in buff_to_share - buffer to be exported
* in size_bytes - size of the exporting buffer in bytes
* out export_id - to be returned by this call upon success
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes,
uint32_t *export_id, uint32_t flags);
/*
* Description:
*
* Free any allocated resource associated with this export IDin on local side.
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in export_id - all resource allocated with export_id are to be freed
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags);
/*
* Description:
*
* Import the exporter's shared reference ID.
* The importing is per process space.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* out buff_shared - buffer to be imported. returned upon success
* in size_bytes - size of the imported buffer in bytes. It should match the
* original exported buffer size
* in export_id - received when exporter sent its exporting ID through
* habmm_socket_send() previously
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
/* Non-blocking mode: function will return immediately if there is no data
* available. Supported only for kernel clients.
*/
#define HABMM_IMPORT_FLAGS_CACHED 0x00000001
int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
uint32_t export_id, uint32_t flags);
/*
* Description:
*
* Release any resource associated with the export ID on the importer side.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in export_id - received when exporter sent its exporting ID through
* habmm_socket_send() previously
* in buff_shared - received from habmm_import() together with export_id
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
int32_t habmm_unimport(int32_t handle, uint32_t export_id, void *buff_shared,
uint32_t flags);
/*
* Description:
*
* Query various information of the opened hab socket.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in habmm_socket_info - retrieve socket information regarding local and remote
* VMs
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
#define VMNAME_SIZE 12
struct hab_socket_info {
int32_t vmid_remote; /* habmm's vmid */
int32_t vmid_local;
/* name from hypervisor framework if available */
char vmname_remote[VMNAME_SIZE];
char vmname_local[VMNAME_SIZE];
};
int32_t habmm_socket_query(int32_t handle, struct hab_socket_info *info,
uint32_t flags);
#endif /* HABMM_H */

View File

@ -0,0 +1,110 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2016-2018, 2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HAB_IOCTL_H
#define _HAB_IOCTL_H
#include <linux/types.h>
struct hab_send {
__u64 data;
__s32 vcid;
__u32 sizebytes;
__u32 flags;
};
struct hab_recv {
__u64 data;
__s32 vcid;
__u32 sizebytes;
__u32 timeout;
__u32 flags;
};
struct hab_open {
__s32 vcid;
__u32 mmid;
__u32 timeout;
__u32 flags;
};
struct hab_close {
__s32 vcid;
__u32 flags;
};
struct hab_export {
__u64 buffer;
__s32 vcid;
__u32 sizebytes;
__u32 exportid;
__u32 flags;
};
struct hab_import {
__u64 index;
__u64 kva;
__s32 vcid;
__u32 sizebytes;
__u32 exportid;
__u32 flags;
};
struct hab_unexport {
__s32 vcid;
__u32 exportid;
__u32 flags;
};
struct hab_unimport {
__s32 vcid;
__u32 exportid;
__u64 kva;
__u32 flags;
};
struct hab_info {
__s32 vcid;
__u64 ids; /* high part remote; low part local */
__u64 names;
__u32 namesize; /* single name length */
__u32 flags;
};
struct vhost_hab_config {
__u8 vm_name[32];
};
#define HAB_IOC_TYPE 0x0A
#define IOCTL_HAB_SEND \
_IOW(HAB_IOC_TYPE, 0x2, struct hab_send)
#define IOCTL_HAB_RECV \
_IOWR(HAB_IOC_TYPE, 0x3, struct hab_recv)
#define IOCTL_HAB_VC_OPEN \
_IOWR(HAB_IOC_TYPE, 0x4, struct hab_open)
#define IOCTL_HAB_VC_CLOSE \
_IOW(HAB_IOC_TYPE, 0x5, struct hab_close)
#define IOCTL_HAB_VC_EXPORT \
_IOWR(HAB_IOC_TYPE, 0x6, struct hab_export)
#define IOCTL_HAB_VC_IMPORT \
_IOWR(HAB_IOC_TYPE, 0x7, struct hab_import)
#define IOCTL_HAB_VC_UNEXPORT \
_IOW(HAB_IOC_TYPE, 0x8, struct hab_unexport)
#define IOCTL_HAB_VC_UNIMPORT \
_IOW(HAB_IOC_TYPE, 0x9, struct hab_unimport)
#define IOCTL_HAB_VC_QUERY \
_IOWR(HAB_IOC_TYPE, 0xA, struct hab_info)
#endif /* _HAB_IOCTL_H */

View File

@ -0,0 +1,87 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef HABMMID_H
#define HABMMID_H
#define HAB_MMID_CREATE(major, minor) ((major&0xFFFF) | ((minor&0xFF)<<16))
#define MM_AUD_START 100
#define MM_AUD_1 101
#define MM_AUD_2 102
#define MM_AUD_3 103
#define MM_AUD_4 104
#define MM_AUD_END 105
#define MM_CAM_START 200
#define MM_CAM_1 201
#define MM_CAM_2 202
#define MM_CAM_END 203
#define MM_DISP_START 300
#define MM_DISP_1 301
#define MM_DISP_2 302
#define MM_DISP_3 303
#define MM_DISP_4 304
#define MM_DISP_5 305
#define MM_DISP_END 306
#define MM_GFX_START 400
#define MM_GFX 401
#define MM_GFX_END 402
#define MM_VID_START 500
#define MM_VID 501
#define MM_VID_2 502
#define MM_VID_3 503
#define MM_VID_END 504
#define MM_MISC_START 600
#define MM_MISC 601
#define MM_MISC_END 602
#define MM_QCPE_START 700
#define MM_QCPE_VM1 701
#define MM_QCPE_END 702
#define MM_CLK_START 800
#define MM_CLK_VM1 801
#define MM_CLK_VM2 802
#define MM_CLK_END 803
#define MM_FDE_START 900
#define MM_FDE_1 901
#define MM_FDE_END 902
#define MM_BUFFERQ_START 1000
#define MM_BUFFERQ_1 1001
#define MM_BUFFERQ_END 1002
#define MM_DATA_START 1100
#define MM_DATA_NETWORK_1 1101
#define MM_DATA_NETWORK_2 1102
#define MM_DATA_END 1103
#define MM_HSI2S_START 1200
#define MM_HSI2S_1 1201
#define MM_HSI2S_END 1202
#define MM_XVM_START 1300
#define MM_XVM_1 1301
#define MM_XVM_2 1302
#define MM_XVM_3 1303
#define MM_XVM_END 1304
#define MM_VNW_START 1400
#define MM_VNW_1 1401
#define MM_VNW_END 1402
#define MM_EXT_START 1500
#define MM_EXT_1 1501
#define MM_EXT_END 1502
#define MM_ID_MAX 1503
#endif /* HABMMID_H */