soc: qcom: add HAB driver

Add snapshot for HAB from msm-4.14 commit b1ff2848541a
 (soc: qcom: hab: fix memory leak).

Change-Id: I88cdfd2c3530320b80a64f294b59611d4a80cc08
Signed-off-by: Yong Ding <yongding@codeaurora.org>
This commit is contained in:
Yong Ding 2020-07-03 15:20:53 +08:00
parent 5bf38f9905
commit cdd72fdb19
41 changed files with 8022 additions and 0 deletions

View File

@ -601,6 +601,8 @@ config QCOM_GLINK_PKT
This enable the userspace clients to read and write to
some glink packets channel.
source "drivers/soc/qcom/hab/Kconfig"
config MSM_PERFORMANCE
tristate "msm performance driver to support userspace fmin/fmax request"
help

View File

@ -90,6 +90,7 @@ obj-$(CONFIG_QTI_CRYPTO_TZ) += crypto-qti-tz.o
obj-$(CONFIG_QTI_HW_KEY_MANAGER) += hwkm.o crypto-qti-hwkm.o
obj-$(CONFIG_QCOM_WDT_CORE) += qcom_wdt_core.o
obj-$(CONFIG_QCOM_SOC_WATCHDOG) += qcom_soc_wdt.o
obj-$(CONFIG_MSM_HAB) += hab/
ifdef CONFIG_DEBUG_FS
obj-$(CONFIG_MSM_RPM_SMD) += rpm-smd-debug.o
endif

View File

@ -0,0 +1,22 @@
# SPDX-License-Identifier: GPL-2.0-only
#
# HAB configuration
#
config MSM_HAB
bool "Enable Hypervisor ABstraction Layer"
select WANT_DEV_COREDUMP
help
HAB(Hypervisor ABstraction) driver can provide the message
transmission and memory sharing services among different OSes.
Internally, HAB makes use of some specific communication mechanism
provided by the underlying hypervisor.
It is required by the virtualization support for some multimedia
and platform devices in MSM devices.
config MSM_HAB_DEFAULT_VMID
int
default 2
help
The default HAB VMID.
It will not be used when there are some other configuration sources,
e.g., device tree.

View File

@ -0,0 +1,39 @@
# SPDX-License-Identifier: GPL-2.0-only
msm_hab-objs = \
hab.o \
hab_msg.o \
hab_vchan.o \
hab_pchan.o \
hab_open.o \
hab_mimex.o \
hab_pipe.o \
hab_parser.o \
hab_stat.o
msm_hab_linux-objs = \
khab.o \
hab_linux.o \
hab_mem_linux.o \
khab_test.o
ifdef CONFIG_GHS_VMM
msm_hab_hyp-objs = \
ghs_comm.o \
ghs_comm_linux.o \
hab_ghs.o \
hab_ghs_linux.o
else
ifdef CONFIG_QTI_QUIN_GVM
msm_hab_hyp-objs = \
qvm_comm.o \
qvm_comm_linux.o \
hab_qvm.o \
hab_qvm_linux.o
else
msm_hab_hyp-objs = \
hab_comm.o \
hyp_stub.o
endif
endif
obj-$(CONFIG_MSM_HAB) += msm_hab.o msm_hab_linux.o msm_hab_hyp.o

View File

@ -0,0 +1,122 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
/* size in header is only for payload excluding the header itself */
if (dev->read_size < read_size + sizeof(struct hab_header)) {
pr_warn("read %zd is less than requested %zd plus header %zd\n",
dev->read_size, read_size, sizeof(struct hab_header));
read_size = dev->read_size;
}
/* always skip the header */
memcpy(payload, (unsigned char *)dev->read_data +
sizeof(struct hab_header) + dev->read_offset, read_size);
dev->read_offset += read_size;
return read_size;
}
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
uint8_t *msg;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&dev->io_lock, irqs_disabled);
result = hab_gipc_wait_to_send(dev->endpoint);
if (result != GIPC_Success) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("failed to wait to send %d\n", result);
return -EBUSY;
}
result = GIPC_PrepareMessage(dev->endpoint, sizebytes+sizeof(*header),
(void **)&msg);
if (result == GIPC_Full) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
/* need to wait for space! */
pr_err("failed to reserve send msg for %zd bytes\n",
sizebytes+sizeof(*header));
return -EBUSY;
} else if (result != GIPC_Success) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("failed to send due to error %d\n", result);
return -ENOMEM;
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timespec64 ts = {0};
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
ktime_get_ts64(&ts);
pstat->tx_sec = ts.tv_sec;
pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
}
memcpy(msg, header, sizeof(*header));
if (sizebytes)
memcpy(msg+sizeof(*header), payload, sizebytes);
result = GIPC_IssueMessage(dev->endpoint, sizebytes+sizeof(*header),
header->id_type_size);
hab_spin_unlock(&dev->io_lock, irqs_disabled);
if (result != GIPC_Success) {
pr_err("send error %d, sz %zd, prot %x\n",
result, sizebytes+sizeof(*header),
header->id_type_size);
return -EAGAIN;
}
return 0;
}
void physical_channel_rx_dispatch_common(unsigned long physical_channel)
{
struct hab_header header;
struct physical_channel *pchan =
(struct physical_channel *)physical_channel;
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
GIPC_Result result;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
while (1) {
dev->read_size = 0;
dev->read_offset = 0;
result = GIPC_ReceiveMessage(dev->endpoint,
dev->read_data,
GIPC_RECV_BUFF_SIZE_BYTES,
&dev->read_size,
&header.id_type_size);
if (result == GIPC_Success || dev->read_size > 0) {
/* handle corrupted msg? */
hab_msg_recv(pchan, dev->read_data);
continue;
} else if (result == GIPC_Empty) {
/* no more pending msg */
break;
}
pr_err("recv unhandled result %d, size %zd\n",
result, dev->read_size);
break;
}
hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
}

View File

@ -0,0 +1,40 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
inline int hab_gipc_wait_to_send(GIPC_Endpoint endpoint)
{
(void)endpoint;
return GIPC_Success;
}
void physical_channel_rx_dispatch(unsigned long physical_channel)
{
struct physical_channel *pchan =
(struct physical_channel *)physical_channel;
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
uint32_t events;
unsigned long flags;
spin_lock_irqsave(&pchan->rxbuf_lock, flags);
events = kgipc_dequeue_events(dev->endpoint);
spin_unlock_irqrestore(&pchan->rxbuf_lock, flags);
if (events & (GIPC_EVENT_RESET))
pr_err("hab gipc %s remote vmid %d RESET\n",
dev->name, pchan->vmid_remote);
if (events & (GIPC_EVENT_RESETINPROGRESS))
pr_err("hab gipc %s remote vmid %d RESETINPROGRESS\n",
dev->name, pchan->vmid_remote);
if (events & (GIPC_EVENT_RECEIVEREADY))
physical_channel_rx_dispatch_common(physical_channel);
if (events & (GIPC_EVENT_SENDREADY))
pr_debug("kgipc send ready\n");
}

1070
drivers/soc/qcom/hab/hab.c Normal file

File diff suppressed because it is too large Load Diff

613
drivers/soc/qcom/hab/hab.h Normal file
View File

@ -0,0 +1,613 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_H
#define __HAB_H
#include "hab_os.h" /* OS-specific part in the core header file */
enum hab_payload_type {
HAB_PAYLOAD_TYPE_MSG = 0x0,
HAB_PAYLOAD_TYPE_INIT,
HAB_PAYLOAD_TYPE_INIT_ACK,
HAB_PAYLOAD_TYPE_INIT_DONE,
HAB_PAYLOAD_TYPE_EXPORT,
HAB_PAYLOAD_TYPE_EXPORT_ACK,
HAB_PAYLOAD_TYPE_PROFILE,
HAB_PAYLOAD_TYPE_CLOSE,
HAB_PAYLOAD_TYPE_INIT_CANCEL,
HAB_PAYLOAD_TYPE_SCHE_MSG,
HAB_PAYLOAD_TYPE_SCHE_MSG_ACK,
HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ,
HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP,
HAB_PAYLOAD_TYPE_MAX,
};
#define LOOPBACK_DOM 0xFF
/*
* Tuning required. If there are multiple clients, the aging of previous
* "request" might be discarded
*/
#define Q_AGE_THRESHOLD 1000000
/* match the name to dtsi if for real HYP framework */
#define DEVICE_AUD1_NAME "hab_aud1"
#define DEVICE_AUD2_NAME "hab_aud2"
#define DEVICE_AUD3_NAME "hab_aud3"
#define DEVICE_AUD4_NAME "hab_aud4"
#define DEVICE_CAM1_NAME "hab_cam1"
#define DEVICE_CAM2_NAME "hab_cam2"
#define DEVICE_DISP1_NAME "hab_disp1"
#define DEVICE_DISP2_NAME "hab_disp2"
#define DEVICE_DISP3_NAME "hab_disp3"
#define DEVICE_DISP4_NAME "hab_disp4"
#define DEVICE_DISP5_NAME "hab_disp5"
#define DEVICE_GFX_NAME "hab_ogles"
#define DEVICE_VID_NAME "hab_vid"
#define DEVICE_VID2_NAME "hab_vid2"
#define DEVICE_MISC_NAME "hab_misc"
#define DEVICE_QCPE1_NAME "hab_qcpe_vm1"
#define DEVICE_CLK1_NAME "hab_clock_vm1"
#define DEVICE_CLK2_NAME "hab_clock_vm2"
#define DEVICE_FDE1_NAME "hab_fde1"
#define DEVICE_BUFFERQ1_NAME "hab_bufferq1"
#define DEVICE_DATA1_NAME "hab_data_network1"
#define DEVICE_DATA2_NAME "hab_data_network2"
#define DEVICE_HSI2S1_NAME "hab_hsi2s1"
/* make sure concascaded name is less than this value */
#define MAX_VMID_NAME_SIZE 30
#define HABCFG_FILE_SIZE_MAX 256
#define HABCFG_MMID_AREA_MAX (MM_ID_MAX/100)
#define HABCFG_VMID_MAX 16
#define HABCFG_VMID_INVALID (-1)
#define HABCFG_VMID_DONT_CARE (-2)
#define HABCFG_ID_LINE_LIMIT ","
#define HABCFG_ID_VMID "VMID="
#define HABCFG_ID_BE "BE="
#define HABCFG_ID_FE "FE="
#define HABCFG_ID_MMID "MMID="
#define HABCFG_ID_RANGE "-"
#define HABCFG_ID_DONTCARE "X"
#define HABCFG_FOUND_VMID 1
#define HABCFG_FOUND_FE_MMIDS 2
#define HABCFG_FOUND_BE_MMIDS 3
#define HABCFG_FOUND_NOTHING (-1)
#define HABCFG_BE_FALSE 0
#define HABCFG_BE_TRUE 1
#define HABCFG_GET_VMID(_local_cfg_, _vmid_) \
((settings)->vmid_mmid_list[_vmid_].vmid)
#define HABCFG_GET_MMID(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].mmid[_mmid_])
#define HABCFG_GET_BE(_local_cfg_, _vmid_, _mmid_) \
((settings)->vmid_mmid_list[_vmid_].is_listener[_mmid_])
struct hab_header {
uint32_t id_type_size;
uint32_t session_id;
uint32_t signature;
uint32_t sequence;
} __packed;
/* "Size" of the HAB_HEADER_ID and HAB_VCID_ID must match */
#define HAB_HEADER_SIZE_SHIFT 0
#define HAB_HEADER_TYPE_SHIFT 16
#define HAB_HEADER_ID_SHIFT 20
#define HAB_HEADER_SIZE_MASK 0x0000FFFF
#define HAB_HEADER_TYPE_MASK 0x000F0000
#define HAB_HEADER_ID_MASK 0xFFF00000
#define HAB_HEADER_INITIALIZER {0}
#define HAB_MMID_GET_MAJOR(mmid) (mmid & 0xFFFF)
#define HAB_MMID_GET_MINOR(mmid) ((mmid>>16) & 0xFF)
#define HAB_VCID_ID_SHIFT 0
#define HAB_VCID_DOMID_SHIFT 12
#define HAB_VCID_MMID_SHIFT 20
#define HAB_VCID_ID_MASK 0x00000FFF
#define HAB_VCID_DOMID_MASK 0x000FF000
#define HAB_VCID_MMID_MASK 0xFFF00000
#define HAB_VCID_GET_ID(vcid) \
(((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT)
#define HAB_HEADER_SET_SESSION_ID(header, sid) \
((header).session_id = (sid))
#define HAB_HEADER_SET_SIZE(header, size) \
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_SIZE_MASK)) | \
(((size) << HAB_HEADER_SIZE_SHIFT) & \
HAB_HEADER_SIZE_MASK))
#define HAB_HEADER_SET_TYPE(header, type) \
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_TYPE_MASK)) | \
(((type) << HAB_HEADER_TYPE_SHIFT) & \
HAB_HEADER_TYPE_MASK))
#define HAB_HEADER_SET_ID(header, id) \
((header).id_type_size = ((header).id_type_size & \
(~HAB_HEADER_ID_MASK)) | \
((HAB_VCID_GET_ID(id) << HAB_HEADER_ID_SHIFT) & \
HAB_HEADER_ID_MASK))
#define HAB_HEADER_GET_SIZE(header) \
(((header).id_type_size & \
HAB_HEADER_SIZE_MASK) >> HAB_HEADER_SIZE_SHIFT)
#define HAB_HEADER_GET_TYPE(header) \
(((header).id_type_size & \
HAB_HEADER_TYPE_MASK) >> HAB_HEADER_TYPE_SHIFT)
#define HAB_HEADER_GET_ID(header) \
((((header).id_type_size & HAB_HEADER_ID_MASK) >> \
(HAB_HEADER_ID_SHIFT - HAB_VCID_ID_SHIFT)) & HAB_VCID_ID_MASK)
#define HAB_HEADER_GET_SESSION_ID(header) ((header).session_id)
#define HAB_HS_TIMEOUT (10*1000*1000)
struct physical_channel {
struct list_head node;
char name[MAX_VMID_NAME_SIZE];
int is_be;
struct kref refcount;
struct hab_device *habdev;
struct idr vchan_idr;
spinlock_t vid_lock;
struct idr expid_idr;
spinlock_t expid_lock;
void *hyp_data;
int dom_id; /* BE role: remote vmid; FE role: don't care */
int vmid_local; /* from DT or hab_config */
int vmid_remote;
char vmname_local[12]; /* from DT */
char vmname_remote[12];
int closed;
spinlock_t rxbuf_lock;
/* debug only */
uint32_t sequence_tx;
uint32_t sequence_rx;
uint32_t status;
/* vchans on this pchan */
struct list_head vchannels;
int vcnt;
rwlock_t vchans_lock;
};
/* this payload has to be used together with type */
struct hab_open_send_data {
int vchan_id;
int sub_id;
int open_id;
int ver_fe;
int ver_be;
int reserved;
};
struct hab_open_request {
int type;
struct physical_channel *pchan;
struct hab_open_send_data xdata;
};
struct hab_open_node {
struct hab_open_request request;
struct list_head node;
int64_t age; /* sec */
};
struct hab_export_ack {
uint32_t export_id;
int32_t vcid_local;
int32_t vcid_remote;
};
struct hab_export_ack_recvd {
struct hab_export_ack ack;
struct list_head node;
int age;
};
struct hab_message {
struct list_head node;
size_t sizebytes;
uint32_t data[];
};
/* for all the pchans of same kind */
struct hab_device {
char name[MAX_VMID_NAME_SIZE];
uint32_t id;
struct list_head pchannels;
int pchan_cnt;
spinlock_t pchan_lock;
struct list_head openq_list; /* received */
spinlock_t openlock;
wait_queue_head_t openq;
int openq_cnt;
};
struct uhab_context {
struct list_head node; /* managed by the driver */
struct kref refcount;
struct list_head vchannels;
int vcnt;
struct list_head exp_whse;
uint32_t export_total;
wait_queue_head_t exp_wq;
struct list_head exp_rxq;
rwlock_t exp_lock;
spinlock_t expq_lock;
struct list_head imp_whse;
spinlock_t imp_lock;
uint32_t import_total;
void *import_ctx;
struct list_head pending_open; /* sent to remote */
int pending_cnt;
rwlock_t ctx_lock;
int closing;
int kernel;
int owner;
int lb_be; /* loopback only */
};
/*
* array to describe the VM and its MMID configuration as
* what is connected to so this is describing a pchan's remote side
*/
struct vmid_mmid_desc {
int vmid; /* remote vmid */
int mmid[HABCFG_MMID_AREA_MAX+1]; /* selected or not */
int is_listener[HABCFG_MMID_AREA_MAX+1]; /* yes or no */
};
struct local_vmid {
int32_t self; /* only this field is for local */
struct vmid_mmid_desc vmid_mmid_list[HABCFG_VMID_MAX];
};
struct hab_driver {
struct device *dev; /* mmid dev list */
struct cdev cdev;
dev_t major;
struct class *class;
int ndevices;
struct hab_device *devp;
struct uhab_context *kctx;
struct list_head uctx_list;
int ctx_cnt;
spinlock_t drvlock;
struct list_head imp_list;
int imp_cnt;
spinlock_t imp_lock;
struct local_vmid settings; /* parser results */
int b_server_dom;
int b_loopback_be; /* only allow 2 apps simultaneously 1 fe 1 be */
int b_loopback;
void *hyp_priv; /* hypervisor plug-in storage */
void *hab_vmm_handle;
};
struct virtual_channel {
struct list_head node; /* for ctx */
struct list_head pnode; /* for pchan */
/*
* refcount is used to track the references from hab core to the virtual
* channel such as references from physical channels,
* i.e. references from the "other" side
*/
struct kref refcount;
struct physical_channel *pchan;
struct uhab_context *ctx;
struct list_head rx_list;
wait_queue_head_t rx_queue;
spinlock_t rx_lock;
int id;
int otherend_id;
int otherend_closed;
uint32_t session_id;
/*
* set when local close() is called explicitly. vchan could be
* used in hab-recv-msg() path (2) then close() is called (1).
* this is same case as close is not called and no msg path
*/
int closed;
int forked; /* if fork is detected and assume only once */
};
/*
* Struct shared between local and remote, contents
* are composed by exporter, the importer only writes
* to pdata and local (exporter) domID
*/
struct export_desc {
uint32_t export_id;
int readonly;
uint64_t import_index;
struct virtual_channel *vchan; /* vchan could be freed earlier */
struct uhab_context *ctx;
struct physical_channel *pchan;
int32_t vcid_local;
int32_t vcid_remote;
int domid_local;
int domid_remote;
int flags;
struct list_head node;
void *kva;
int payload_count;
unsigned char payload[1];
} __packed;
struct export_desc_super {
struct kref refcount;
void *platform_data;
unsigned long offset;
struct export_desc exp;
};
int hab_vchan_open(struct uhab_context *ctx,
unsigned int mmid, int32_t *vcid,
int32_t timeout, uint32_t flags);
int hab_vchan_close(struct uhab_context *ctx,
int32_t vcid);
long hab_vchan_send(struct uhab_context *ctx,
int vcid,
size_t sizebytes,
void *data,
unsigned int flags);
int hab_vchan_recv(struct uhab_context *ctx,
struct hab_message **msg,
int vcid,
int *rsize,
unsigned int flags);
void hab_vchan_stop(struct virtual_channel *vchan);
void hab_vchans_stop(struct physical_channel *pchan);
void hab_vchan_stop_notify(struct virtual_channel *vchan);
void hab_vchans_empty_wait(int vmid);
int hab_mem_export(struct uhab_context *ctx,
struct hab_export *param, int kernel);
int hab_mem_import(struct uhab_context *ctx,
struct hab_import *param, int kernel);
int hab_mem_unexport(struct uhab_context *ctx,
struct hab_unexport *param, int kernel);
void habmem_export_get(struct export_desc_super *exp_super);
int habmem_export_put(struct export_desc_super *exp_super);
int hab_mem_unimport(struct uhab_context *ctx,
struct hab_unimport *param, int kernel);
void habmem_remove_export(struct export_desc *exp);
/* memory hypervisor framework plugin I/F */
struct export_desc_super *habmem_add_export(
struct virtual_channel *vchan,
int sizebytes,
uint32_t flags);
int habmem_hyp_grant_user(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *compressed_size,
int *export_id);
int habmem_hyp_grant(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *compressed_size,
int *export_id);
int habmem_hyp_revoke(void *expdata, uint32_t count);
int habmem_exp_release(struct export_desc_super *exp_super);
void *habmem_imp_hyp_open(void);
void habmem_imp_hyp_close(void *priv, int kernel);
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
struct export_desc *exp, int kernel);
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel);
int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma);
int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp);
void hab_msg_free(struct hab_message *message);
int hab_msg_dequeue(struct virtual_channel *vchan,
struct hab_message **msg, int *rsize, unsigned int flags);
int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header);
void hab_open_request_init(struct hab_open_request *request,
int type,
struct physical_channel *pchan,
int vchan_id,
int sub_id,
int open_id);
int hab_open_request_send(struct hab_open_request *request);
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type);
void hab_open_request_free(struct hab_open_request *request);
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
struct hab_open_request **recv_request,
int ms_timeout);
struct virtual_channel *hab_vchan_alloc(struct uhab_context *ctx,
struct physical_channel *pchan, int openid);
struct virtual_channel *hab_vchan_get(struct physical_channel *pchan,
struct hab_header *header);
void hab_vchan_put(struct virtual_channel *vchan);
struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
struct uhab_context *ctx, int ignore_remote);
struct physical_channel *hab_pchan_alloc(struct hab_device *habdev,
int otherend_id);
struct physical_channel *hab_pchan_find_domid(struct hab_device *dev,
int dom_id);
int hab_vchan_find_domid(struct virtual_channel *vchan);
void hab_pchan_get(struct physical_channel *pchan);
void hab_pchan_put(struct physical_channel *pchan);
struct uhab_context *hab_ctx_alloc(int kernel);
void hab_ctx_free(struct kref *ref);
static inline void hab_ctx_get(struct uhab_context *ctx)
{
if (ctx)
kref_get(&ctx->refcount);
}
static inline void hab_ctx_put(struct uhab_context *ctx)
{
if (ctx)
kref_put(&ctx->refcount, hab_ctx_free);
}
void hab_send_close_msg(struct virtual_channel *vchan);
int hab_hypervisor_register(void);
int hab_hypervisor_register_os(void);
void hab_hypervisor_unregister(void);
void hab_hypervisor_unregister_common(void);
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device);
int habhyp_commdev_dealloc(void *commdev);
void habhyp_commdev_dealloc_os(void *commdev);
int habhyp_commdev_create_dispatcher(struct physical_channel *pchan);
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size);
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload);
void physical_channel_rx_dispatch(unsigned long physical_channel);
void physical_channel_rx_dispatch_common(unsigned long physical_channel);
int loopback_pchan_create(struct hab_device *dev, char *pchan_name);
int hab_parse(struct local_vmid *settings);
int do_hab_parse(void);
int fill_default_gvm_settings(struct local_vmid *settings,
int vmid_local, int mmid_start, int mmid_end);
bool hab_is_loopback(void);
int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
char *names, size_t name_size, uint32_t flags);
struct hab_device *find_hab_device(unsigned int mm_id);
unsigned int get_refcnt(struct kref ref);
int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending);
int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending);
int hab_open_cancel_notify(struct hab_open_request *request);
int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes);
int hab_stat_init(struct hab_driver *drv);
int hab_stat_deinit(struct hab_driver *drv);
int hab_stat_show_vchan(struct hab_driver *drv, char *buf, int sz);
int hab_stat_show_ctx(struct hab_driver *drv, char *buf, int sz);
int hab_stat_show_expimp(struct hab_driver *drv, int pid, char *buf, int sz);
int hab_stat_init_sub(struct hab_driver *drv);
int hab_stat_deinit_sub(struct hab_driver *drv);
static inline void hab_spin_lock(spinlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
spin_lock(lock);
else
spin_lock_bh(lock);
}
static inline void hab_spin_unlock(spinlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
spin_unlock(lock);
else
spin_unlock_bh(lock);
}
static inline void hab_write_lock(rwlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
write_lock(lock);
else
write_lock_bh(lock);
}
static inline void hab_write_unlock(rwlock_t *lock, int irqs_disabled)
{
if (irqs_disabled)
write_unlock(lock);
else
write_unlock_bh(lock);
}
/* Global singleton HAB instance */
extern struct hab_driver hab_driver;
int dump_hab_get_file_name(char *file_time, int ft_size);
int dump_hab_open(void);
void dump_hab_close(void);
int dump_hab_buf(void *buf, int size);
void hab_pipe_read_dump(struct physical_channel *pchan);
void dump_hab(void);
void dump_hab_wq(void *hyp_data);
#endif /* __HAB_H */

View File

@ -0,0 +1,262 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
struct loopback_msg {
struct list_head node;
int payload_size;
struct hab_header header;
char payload[];
};
struct lb_thread_struct {
int stop; /* set by creator */
int bexited; /* set by thread */
void *data; /* thread private data */
};
struct loopback_dev {
spinlock_t io_lock;
struct list_head msg_list;
int msg_cnt;
struct task_struct *kthread; /* creator's thread handle */
struct lb_thread_struct thread_data; /* thread private data */
wait_queue_head_t thread_queue;
struct loopback_msg *current_msg;
};
static int lb_thread_queue_empty(struct loopback_dev *dev)
{
int ret;
spin_lock_bh(&dev->io_lock);
ret = list_empty(&dev->msg_list);
spin_unlock_bh(&dev->io_lock);
return ret;
}
int lb_kthread(void *d)
{
struct lb_thread_struct *p = (struct lb_thread_struct *)d;
struct physical_channel *pchan = (struct physical_channel *)p->data;
struct loopback_dev *dev = pchan->hyp_data;
int ret = 0;
while (!p->stop) {
schedule();
ret = wait_event_interruptible(dev->thread_queue,
!lb_thread_queue_empty(dev) ||
p->stop);
spin_lock_bh(&dev->io_lock);
while (!list_empty(&dev->msg_list)) {
struct loopback_msg *msg = NULL;
msg = list_first_entry(&dev->msg_list,
struct loopback_msg, node);
dev->current_msg = msg;
list_del(&msg->node);
dev->msg_cnt--;
ret = hab_msg_recv(pchan, &msg->header);
if (ret) {
pr_err("failed %d msg handling sz %d header %d %d %d, %d %X %d, total %d\n",
ret, msg->payload_size,
HAB_HEADER_GET_ID(msg->header),
HAB_HEADER_GET_TYPE(msg->header),
HAB_HEADER_GET_SIZE(msg->header),
msg->header.session_id,
msg->header.signature,
msg->header.sequence, dev->msg_cnt);
}
kfree(msg);
dev->current_msg = NULL;
}
spin_unlock_bh(&dev->io_lock);
}
p->bexited = 1;
pr_debug("exit kthread\n");
return 0;
}
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
int size = HAB_HEADER_GET_SIZE(*header); /* payload size */
struct timespec64 ts = {0};
struct loopback_msg *msg = NULL;
struct loopback_dev *dev = pchan->hyp_data;
msg = kmalloc(size + sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
memcpy(&msg->header, header, sizeof(*header));
msg->payload_size = size; /* payload size could be zero */
if (size && payload) {
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
ktime_get_ts64(&ts);
pstat->tx_sec = ts.tv_sec;
pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
}
memcpy(msg->payload, payload, size);
}
spin_lock_bh(&dev->io_lock);
list_add_tail(&msg->node, &dev->msg_list);
dev->msg_cnt++;
spin_unlock_bh(&dev->io_lock);
wake_up_interruptible(&dev->thread_queue);
return 0;
}
/* loopback read is only used during open */
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct loopback_dev *dev = pchan->hyp_data;
struct loopback_msg *msg = dev->current_msg;
if (read_size) {
if (read_size != msg->payload_size) {
pr_err("read size mismatch requested %zd, received %d\n",
read_size, msg->payload_size);
memcpy(payload, msg->payload, min(((int)read_size),
msg->payload_size));
} else {
memcpy(payload, msg->payload, read_size);
}
} else {
read_size = 0;
}
return read_size;
}
/* pchan is directly added into the hab_device */
int loopback_pchan_create(struct hab_device *dev, char *pchan_name)
{
int result;
struct physical_channel *pchan = NULL;
struct loopback_dev *lb_dev = NULL;
pchan = hab_pchan_alloc(dev, LOOPBACK_DOM);
if (!pchan) {
result = -ENOMEM;
goto err;
}
pchan->closed = 0;
strlcpy(pchan->name, pchan_name, sizeof(pchan->name));
lb_dev = kzalloc(sizeof(*lb_dev), GFP_KERNEL);
if (!lb_dev) {
result = -ENOMEM;
goto err;
}
spin_lock_init(&lb_dev->io_lock);
INIT_LIST_HEAD(&lb_dev->msg_list);
init_waitqueue_head(&lb_dev->thread_queue);
lb_dev->thread_data.data = pchan;
lb_dev->kthread = kthread_run(lb_kthread, &lb_dev->thread_data,
pchan->name);
if (IS_ERR(lb_dev->kthread)) {
result = PTR_ERR(lb_dev->kthread);
pr_err("failed to create kthread for %s, ret %d\n",
pchan->name, result);
goto err;
}
pchan->hyp_data = lb_dev;
return 0;
err:
kfree(lb_dev);
kfree(pchan);
return result;
}
void physical_channel_rx_dispatch(unsigned long data)
{
}
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device)
{
struct physical_channel *pchan;
int ret = loopback_pchan_create(mmid_device, name);
if (ret) {
pr_err("failed to create %s pchan in mmid device %s, ret %d, pchan cnt %d\n",
name, mmid_device->name, ret, mmid_device->pchan_cnt);
*commdev = NULL;
} else {
pr_debug("loopback physical channel on %s return %d, loopback mode(%d), total pchan %d\n",
name, ret, hab_driver.b_loopback,
mmid_device->pchan_cnt);
pchan = hab_pchan_find_domid(mmid_device,
HABCFG_VMID_DONT_CARE);
*commdev = pchan;
hab_pchan_put(pchan);
pr_debug("pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = commdev;
struct loopback_dev *dev = pchan->hyp_data;
struct loopback_msg *msg, *tmp;
int ret;
spin_lock_bh(&dev->io_lock);
if (!list_empty(&dev->msg_list) || dev->msg_cnt) {
pr_err("pchan %s msg leak cnt %d\n", pchan->name, dev->msg_cnt);
list_for_each_entry_safe(msg, tmp, &dev->msg_list, node) {
list_del(&msg->node);
dev->msg_cnt--;
kfree(msg);
}
pr_debug("pchan %s msg cnt %d now\n",
pchan->name, dev->msg_cnt);
}
spin_unlock_bh(&dev->io_lock);
dev->thread_data.stop = 1;
ret = kthread_stop(dev->kthread);
while (!dev->thread_data.bexited)
schedule();
dev->kthread = NULL;
/* hyp_data is freed in pchan */
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
hab_pchan_put((struct physical_channel *)commdev);
return 0;
}

View File

@ -0,0 +1,192 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
#define GIPC_VM_SET_CNT 22
/* same vmid assignment for all the vms. it should matches dt_gipc_path_name */
static int mmid_order[GIPC_VM_SET_CNT] = {
MM_AUD_1,
MM_AUD_2,
MM_AUD_3,
MM_AUD_4,
MM_CAM_1,
MM_CAM_2,
MM_DISP_1,
MM_DISP_2,
MM_DISP_3,
MM_DISP_4,
MM_DISP_5,
MM_GFX,
MM_VID,
MM_MISC,
MM_QCPE_VM1,
MM_VID_2, /* newly recycled */
0,
0,
MM_CLK_VM1,
MM_CLK_VM2,
MM_FDE_1,
MM_BUFFERQ_1,
};
struct ghs_vmm_plugin_info_s ghs_vmm_plugin_info = {
dt_gipc_path_name,
mmid_order,
0,
0,
};
int get_dt_name_idx(int vmid_base, int mmid,
struct ghs_vmm_plugin_info_s *plugin_info)
{
int idx = -1;
int i;
if (vmid_base < 0 || vmid_base > plugin_info->probe_cnt /
GIPC_VM_SET_CNT) {
pr_err("vmid %d overflow expected max %d\n", vmid_base,
plugin_info->probe_cnt / GIPC_VM_SET_CNT);
return idx;
}
for (i = 0; i < GIPC_VM_SET_CNT; i++) {
if (mmid == plugin_info->mmid_dt_mapping[i]) {
idx = vmid_base * GIPC_VM_SET_CNT + i;
if (idx > plugin_info->probe_cnt) {
pr_err("dt name idx %d overflow max %d\n",
idx, plugin_info->probe_cnt);
idx = -1;
}
break;
}
}
return idx;
}
/* static struct physical_channel *habhyp_commdev_alloc(int id) */
int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device)
{
struct ghs_vdev *dev = NULL;
struct ghs_vdev_os *dev_os = NULL;
struct physical_channel *pchan = NULL;
struct physical_channel **ppchan = (struct physical_channel **)commdev;
int ret = 0;
if (ghs_vmm_plugin_info.curr > ghs_vmm_plugin_info.probe_cnt) {
pr_err("too many commdev alloc %d, supported is %d\n",
ghs_vmm_plugin_info.curr,
ghs_vmm_plugin_info.probe_cnt);
ret = -ENOENT;
goto err;
}
/* common part for hyp_data */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
pr_err("allocate struct ghs_vdev failed %zu bytes on pchan %s\n",
sizeof(*dev), name);
goto err;
}
memset(dev, 0, sizeof(*dev));
/* os specific part for hyp_data */
dev_os = kzalloc(sizeof(*dev_os), GFP_KERNEL);
if (!dev_os) {
ret = -ENOMEM;
pr_err("allocate ghs_vdev_os failed %zu bytes on pchan %s\n",
sizeof(*dev_os), name);
goto err;
}
dev->os_data = dev_os;
spin_lock_init(&dev->io_lock);
/*
* TODO: ExtractEndpoint is in ghs_comm.c because it blocks.
* Extrace and Request should be in roughly the same spot
*/
ret = hab_gipc_ep_attach(is_be, name, vmid_remote, mmid_device, dev);
if (ret)
goto err;
/* add pchan into the mmid_device list */
pchan = hab_pchan_alloc(mmid_device, vmid_remote);
if (!pchan) {
pr_err("hab_pchan_alloc failed for %s, cnt %d\n",
mmid_device->name, mmid_device->pchan_cnt);
ret = -ENOMEM;
goto err;
}
pchan->closed = 0;
pchan->hyp_data = (void *)dev;
pchan->is_be = is_be;
strlcpy(dev->name, name, sizeof(dev->name));
strlcpy(pchan->name, name, sizeof(pchan->name));
*ppchan = pchan;
dev->read_data = kmalloc(GIPC_RECV_BUFF_SIZE_BYTES, GFP_KERNEL);
if (!dev->read_data) {
ret = -ENOMEM;
goto err;
}
ret = habhyp_commdev_create_dispatcher(pchan);
if (ret)
goto err;
/* this value could be more than devp total */
ghs_vmm_plugin_info.curr++;
return 0;
err:
hab_pchan_put(pchan);
kfree(dev);
kfree(dev_os);
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct ghs_vdev *dev = pchan->hyp_data;
/* os specific deallocation for this commdev */
habhyp_commdev_dealloc_os(commdev);
kfree(dev->read_data);
kfree(dev->os_data);
kfree(dev);
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
}
hab_pchan_put(pchan);
return 0;
}
void hab_hypervisor_unregister(void)
{
pr_debug("total %d\n", hab_driver.ndevices);
hab_hypervisor_unregister_common();
ghs_vmm_plugin_info.curr = 0;
}
int hab_hypervisor_register(void)
{
int ret = 0;
/* os-specific registration work */
ret = hab_hypervisor_register_os();
return ret;
}

View File

@ -0,0 +1,39 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_GHS_H
#define __HAB_GHS_H
#include "hab_ghs_os.h"
#define GIPC_RECV_BUFF_SIZE_BYTES (32*1024)
struct ghs_vdev {
int be;
void *read_data; /* buffer to receive from gipc */
size_t read_size;
int read_offset;
GIPC_Endpoint endpoint;
spinlock_t io_lock;
char name[32];
struct ghs_vdev_os *os_data; /* os-specific for this comm dev */
};
struct ghs_vmm_plugin_info_s {
const char * const *dt_name;
int *mmid_dt_mapping;
int curr;
int probe_cnt;
};
extern struct ghs_vmm_plugin_info_s ghs_vmm_plugin_info;
extern const char * const dt_gipc_path_name[];
int get_dt_name_idx(int vmid_base, int mmid,
struct ghs_vmm_plugin_info_s *plugin_info);
int hab_gipc_wait_to_send(GIPC_Endpoint endpoint);
int hab_gipc_ep_attach(int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device, struct ghs_vdev *dev);
#endif /* __HAB_GHS_H */

View File

@ -0,0 +1,152 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_ghs.h"
const char * const dt_gipc_path_name[] = {
"testgipc1",
"testgipc2",
"testgipc3",
"testgipc4",
"testgipc5",
"testgipc6",
"testgipc7",
"testgipc8",
"testgipc9",
"testgipc10",
"testgipc11",
"testgipc12",
"testgipc13",
"testgipc14",
"testgipc15",
"testgipc16",
"testgipc17",
"testgipc18",
"testgipc19",
"testgipc20",
"testgipc21",
"testgipc22",
};
static void ghs_irq_handler(void *cookie)
{
struct physical_channel *pchan = cookie;
struct ghs_vdev *dev =
(struct ghs_vdev *) (pchan ? pchan->hyp_data : NULL);
if (dev)
tasklet_hi_schedule(&dev->os_data->task);
}
int hab_gipc_ep_attach(int is_be, char *name, int vmid_remote,
struct hab_device *mmid_device, struct ghs_vdev *dev)
{
int dt_name_idx = 0;
int ret = 0;
if (is_be) {
/* role is backend */
dev->be = 1;
} else {
/* role is FE */
struct device_node *gvh_dn;
gvh_dn = of_find_node_by_path("/aliases");
if (gvh_dn) {
const char *ep_path = NULL;
struct device_node *ep_dn = NULL;
dt_name_idx = get_dt_name_idx(vmid_remote,
mmid_device->id,
&ghs_vmm_plugin_info);
if (dt_name_idx < 0) {
pr_err("failed to find %s for vmid %d ret %d\n",
mmid_device->name,
mmid_device->id,
dt_name_idx);
of_node_put(gvh_dn);
ret = -ENOENT;
goto exit;
}
ret = of_property_read_string(gvh_dn,
ghs_vmm_plugin_info.dt_name[dt_name_idx],
&ep_path);
if (ret) {
pr_err("failed to read endpoint str ret %d\n",
ret);
of_node_put(gvh_dn);
ret = -ENOENT;
goto exit;
}
of_node_put(gvh_dn);
ep_dn = of_find_node_by_path(ep_path);
if (ep_dn) {
dev->endpoint = kgipc_endpoint_alloc(ep_dn);
of_node_put(ep_dn);
if (IS_ERR(dev->endpoint)) {
ret = PTR_ERR(dev->endpoint);
pr_err("alloc failed %d %s ret %d\n",
dt_name_idx, mmid_device->name,
ret);
} else {
pr_debug("gipc ep found for %d %s\n",
dt_name_idx, mmid_device->name);
}
} else {
pr_err("of_parse_phandle failed id %d %s\n",
dt_name_idx, mmid_device->name);
ret = -ENOENT;
}
} else {
pr_err("of_find_compatible_node failed id %d %s\n",
dt_name_idx, mmid_device->name);
ret = -ENOENT;
}
}
exit:
return ret;
}
int habhyp_commdev_create_dispatcher(struct physical_channel *pchan)
{
struct ghs_vdev *dev = (struct ghs_vdev *)pchan->hyp_data;
int ret = 0;
tasklet_init(&dev->os_data->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
ret = kgipc_endpoint_start_with_irq_callback(dev->endpoint,
ghs_irq_handler,
pchan);
if (ret)
pr_err("irq alloc failed id: %d %s, ret: %d\n",
ghs_vmm_plugin_info.curr, pchan->name, ret);
else
pr_debug("ep irq handler started for %d %s, ret %d\n",
ghs_vmm_plugin_info.curr, pchan->name, ret);
return ret;
}
void habhyp_commdev_dealloc_os(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct ghs_vdev *dev = pchan->hyp_data;
kgipc_endpoint_free(dev->endpoint);
}
int hab_hypervisor_register_os(void)
{
ghs_vmm_plugin_info.probe_cnt = ARRAY_SIZE(dt_gipc_path_name);
hab_driver.b_server_dom = 0;
return 0;
}

View File

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_GHS_OS_H
#define __HAB_GHS_OS_H
#include <ghs_vmm/kgipc.h>
struct ghs_vdev_os {
struct tasklet_struct task;
};
#endif /* __HAB_GHS_OS_H */

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_GRANTABLE_H
#define __HAB_GRANTABLE_H
/* Grantable should be common between exporter and importer */
struct grantable {
unsigned long pfn;
};
struct compressed_pfns {
unsigned long first_pfn;
int nregions;
struct region {
int size;
int space;
} region[];
};
#endif /* __HAB_GRANTABLE_H */

View File

@ -0,0 +1,371 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
unsigned int get_refcnt(struct kref ref)
{
return kref_read(&ref);
}
static int hab_open(struct inode *inodep, struct file *filep)
{
int result = 0;
struct uhab_context *ctx;
ctx = hab_ctx_alloc(0);
if (!ctx) {
pr_err("hab_ctx_alloc failed\n");
filep->private_data = NULL;
return -ENOMEM;
}
ctx->owner = task_pid_nr(current);
filep->private_data = ctx;
pr_debug("ctx owner %d refcnt %d\n", ctx->owner,
get_refcnt(ctx->refcount));
return result;
}
static int hab_release(struct inode *inodep, struct file *filep)
{
struct uhab_context *ctx = filep->private_data;
struct virtual_channel *vchan, *tmp;
struct hab_open_node *node;
if (!ctx)
return 0;
pr_debug("inode %pK, filep %pK ctx %pK\n", inodep, filep, ctx);
write_lock(&ctx->ctx_lock);
/* notify remote side on vchan closing */
list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
/* local close starts */
vchan->closed = 1;
list_del(&vchan->node); /* vchan is not in this ctx anymore */
ctx->vcnt--;
write_unlock(&ctx->ctx_lock);
hab_vchan_stop_notify(vchan);
hab_vchan_put(vchan); /* there is a lock inside */
write_lock(&ctx->ctx_lock);
}
/* notify remote side on pending open */
list_for_each_entry(node, &ctx->pending_open, node) {
/* no touch to the list itself. it is allocated on the stack */
if (hab_open_cancel_notify(&node->request))
pr_err("failed to send open cancel vcid %x subid %d openid %d pchan %s\n",
node->request.xdata.vchan_id,
node->request.xdata.sub_id,
node->request.xdata.open_id,
node->request.pchan->habdev->name);
}
write_unlock(&ctx->ctx_lock);
hab_ctx_put(ctx);
filep->private_data = NULL;
return 0;
}
static long hab_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct uhab_context *ctx = (struct uhab_context *)filep->private_data;
struct hab_open *open_param;
struct hab_close *close_param;
struct hab_recv *recv_param;
struct hab_send *send_param;
struct hab_info *info_param;
struct hab_message *msg = NULL;
void *send_data;
unsigned char data[256] = { 0 };
long ret = 0;
char names[30];
if (_IOC_SIZE(cmd) && (cmd & IOC_IN)) {
if (_IOC_SIZE(cmd) > sizeof(data))
return -EINVAL;
if (copy_from_user(data, (void __user *)arg, _IOC_SIZE(cmd))) {
pr_err("copy_from_user failed cmd=%x size=%d\n",
cmd, _IOC_SIZE(cmd));
return -EFAULT;
}
}
switch (cmd) {
case IOCTL_HAB_VC_OPEN:
open_param = (struct hab_open *)data;
ret = hab_vchan_open(ctx, open_param->mmid,
&open_param->vcid,
open_param->timeout,
open_param->flags);
break;
case IOCTL_HAB_VC_CLOSE:
close_param = (struct hab_close *)data;
ret = hab_vchan_close(ctx, close_param->vcid);
break;
case IOCTL_HAB_SEND:
send_param = (struct hab_send *)data;
if (send_param->sizebytes > (uint32_t)(HAB_HEADER_SIZE_MASK)) {
ret = -EINVAL;
break;
}
send_data = kzalloc(send_param->sizebytes, GFP_KERNEL);
if (!send_data) {
ret = -ENOMEM;
break;
}
if (copy_from_user(send_data, (void __user *)send_param->data,
send_param->sizebytes)) {
ret = -EFAULT;
} else {
ret = hab_vchan_send(ctx, send_param->vcid,
send_param->sizebytes,
send_data,
send_param->flags);
}
kfree(send_data);
break;
case IOCTL_HAB_RECV:
recv_param = (struct hab_recv *)data;
if (!recv_param->data) {
ret = -EINVAL;
break;
}
ret = hab_vchan_recv(ctx, &msg, recv_param->vcid,
&recv_param->sizebytes, recv_param->flags);
if (ret == 0 && msg) {
if (copy_to_user((void __user *)recv_param->data,
msg->data,
msg->sizebytes)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
recv_param->vcid, (int)msg->sizebytes);
recv_param->sizebytes = 0;
ret = -EFAULT;
}
} else if (ret && msg) {
pr_warn("vcid %X recv failed %d and msg is still of %zd bytes\n",
recv_param->vcid, (int)ret, msg->sizebytes);
}
if (msg)
hab_msg_free(msg);
break;
case IOCTL_HAB_VC_EXPORT:
ret = hab_mem_export(ctx, (struct hab_export *)data, 0);
break;
case IOCTL_HAB_VC_IMPORT:
ret = hab_mem_import(ctx, (struct hab_import *)data, 0);
break;
case IOCTL_HAB_VC_UNEXPORT:
ret = hab_mem_unexport(ctx, (struct hab_unexport *)data, 0);
break;
case IOCTL_HAB_VC_UNIMPORT:
ret = hab_mem_unimport(ctx, (struct hab_unimport *)data, 0);
break;
case IOCTL_HAB_VC_QUERY:
info_param = (struct hab_info *)data;
if (!info_param->names || !info_param->namesize ||
info_param->namesize > sizeof(names)) {
pr_err("wrong param for vm info vcid %X, names %llX, sz %d\n",
info_param->vcid, info_param->names,
info_param->namesize);
ret = -EINVAL;
break;
}
ret = hab_vchan_query(ctx, info_param->vcid,
(uint64_t *)&info_param->ids,
names, info_param->namesize, 0);
if (!ret) {
if (copy_to_user((void __user *)info_param->names,
names,
info_param->namesize)) {
pr_err("copy_to_user failed: vc=%x size=%d\n",
info_param->vcid,
info_param->namesize*2);
info_param->namesize = 0;
ret = -EFAULT;
}
}
break;
default:
ret = -ENOIOCTLCMD;
}
if (_IOC_SIZE(cmd) && (cmd & IOC_OUT))
if (copy_to_user((void __user *) arg, data, _IOC_SIZE(cmd))) {
pr_err("copy_to_user failed: cmd=%x\n", cmd);
ret = -EFAULT;
}
return ret;
}
static long hab_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
return hab_ioctl(filep, cmd, arg);
}
static const struct file_operations hab_fops = {
.owner = THIS_MODULE,
.open = hab_open,
.release = hab_release,
.mmap = habmem_imp_hyp_mmap,
.unlocked_ioctl = hab_ioctl,
.compat_ioctl = hab_compat_ioctl
};
/*
* These map sg functions are pass through because the memory backing the
* sg list is already accessible to the kernel as they come from a the
* dedicated shared vm pool
*/
static int hab_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
/* return nelems directly */
return nelems;
}
static void hab_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
unsigned long attrs)
{
/*Do nothing */
}
static const struct dma_map_ops hab_dma_ops = {
.map_sg = hab_map_sg,
.unmap_sg = hab_unmap_sg,
};
static int hab_power_down_callback(
struct notifier_block *nfb, unsigned long action, void *data)
{
switch (action) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
pr_debug("reboot called %ld\n", action);
hab_hypervisor_unregister(); /* only for single VM guest */
break;
}
pr_debug("reboot called %ld done\n", action);
return NOTIFY_DONE;
}
static struct notifier_block hab_reboot_notifier = {
.notifier_call = hab_power_down_callback,
};
static int __init hab_init(void)
{
int result;
dev_t dev;
result = alloc_chrdev_region(&hab_driver.major, 0, 1, "hab");
if (result < 0) {
pr_err("alloc_chrdev_region failed: %d\n", result);
return result;
}
cdev_init(&hab_driver.cdev, &hab_fops);
hab_driver.cdev.owner = THIS_MODULE;
hab_driver.cdev.ops = &hab_fops;
dev = MKDEV(MAJOR(hab_driver.major), 0);
result = cdev_add(&hab_driver.cdev, dev, 1);
if (result < 0) {
unregister_chrdev_region(dev, 1);
pr_err("cdev_add failed: %d\n", result);
return result;
}
hab_driver.class = class_create(THIS_MODULE, "hab");
if (IS_ERR(hab_driver.class)) {
result = PTR_ERR(hab_driver.class);
pr_err("class_create failed: %d\n", result);
goto err;
}
hab_driver.dev = device_create(hab_driver.class, NULL,
dev, &hab_driver, "hab");
if (IS_ERR(hab_driver.dev)) {
result = PTR_ERR(hab_driver.dev);
pr_err("device_create failed: %d\n", result);
goto err;
}
result = register_reboot_notifier(&hab_reboot_notifier);
if (result)
pr_err("failed to register reboot notifier %d\n", result);
/* read in hab config, then configure pchans */
result = do_hab_parse();
if (!result) {
hab_driver.kctx = hab_ctx_alloc(1);
if (!hab_driver.kctx) {
pr_err("hab_ctx_alloc failed\n");
result = -ENOMEM;
hab_hypervisor_unregister();
goto err;
} else
set_dma_ops(hab_driver.dev, &hab_dma_ops);
}
hab_stat_init(&hab_driver);
return result;
err:
if (!IS_ERR_OR_NULL(hab_driver.dev))
device_destroy(hab_driver.class, dev);
if (!IS_ERR_OR_NULL(hab_driver.class))
class_destroy(hab_driver.class);
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
pr_err("Error in hab init, result %d\n", result);
return result;
}
static void __exit hab_exit(void)
{
dev_t dev;
hab_hypervisor_unregister();
hab_stat_deinit(&hab_driver);
hab_ctx_put(hab_driver.kctx);
dev = MKDEV(MAJOR(hab_driver.major), 0);
device_destroy(hab_driver.class, dev);
class_destroy(hab_driver.class);
cdev_del(&hab_driver.cdev);
unregister_chrdev_region(dev, 1);
unregister_reboot_notifier(&hab_reboot_notifier);
pr_debug("hab exit called\n");
}
subsys_initcall(hab_init);
module_exit(hab_exit);
MODULE_DESCRIPTION("Hypervisor abstraction layer");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,889 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include <linux/fdtable.h>
#include <linux/dma-buf.h>
#include "hab_grantable.h"
enum hab_page_list_type {
HAB_PAGE_LIST_IMPORT = 0x1,
HAB_PAGE_LIST_EXPORT
};
struct pages_list {
struct list_head list;
struct page **pages;
long npages;
void *vmapping;
uint32_t userflags;
int32_t export_id;
int32_t vcid;
struct physical_channel *pchan;
uint32_t type;
struct kref refcount;
};
struct importer_context {
struct file *filp;
};
static struct dma_buf_ops dma_buf_ops;
static struct pages_list *pages_list_create(
struct export_desc *exp,
uint32_t userflags)
{
struct page **pages = NULL;
struct compressed_pfns *pfn_table =
(struct compressed_pfns *)exp->payload;
struct pages_list *pglist = NULL;
unsigned long pfn;
int i, j, k = 0, size;
if (!pfn_table)
return ERR_PTR(-EINVAL);
pfn = pfn_table->first_pfn;
if (pfn_valid(pfn) == 0 || page_is_ram(pfn) == 0) {
pr_err("imp sanity failed pfn %lx valid %d ram %d pchan %s\n",
pfn, pfn_valid(pfn),
page_is_ram(pfn), exp->pchan->name);
return ERR_PTR(-EINVAL);
}
size = exp->payload_count * sizeof(struct page *);
pages = kmalloc(size, GFP_KERNEL);
if (!pages)
return ERR_PTR(-ENOMEM);
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
kfree(pages);
return ERR_PTR(-ENOMEM);
}
for (i = 0; i < pfn_table->nregions; i++) {
for (j = 0; j < pfn_table->region[i].size; j++) {
pages[k] = pfn_to_page(pfn+j);
k++;
}
pfn += pfn_table->region[i].size + pfn_table->region[i].space;
}
pglist->pages = pages;
pglist->npages = exp->payload_count;
pglist->userflags = userflags;
pglist->export_id = exp->export_id;
pglist->vcid = exp->vcid_remote;
pglist->pchan = exp->pchan;
kref_init(&pglist->refcount);
return pglist;
}
static void pages_list_add(struct pages_list *pglist)
{
spin_lock_bh(&hab_driver.imp_lock);
list_add_tail(&pglist->list, &hab_driver.imp_list);
hab_driver.imp_cnt++;
spin_unlock_bh(&hab_driver.imp_lock);
}
static void pages_list_remove(struct pages_list *pglist)
{
spin_lock_bh(&hab_driver.imp_lock);
list_del(&pglist->list);
hab_driver.imp_cnt--;
spin_unlock_bh(&hab_driver.imp_lock);
}
static void pages_list_destroy(struct kref *refcount)
{
struct pages_list *pglist = container_of(refcount,
struct pages_list, refcount);
if (pglist->vmapping) {
vunmap(pglist->vmapping);
pglist->vmapping = NULL;
}
/* the imported pages used, notify the remote */
if (pglist->type == HAB_PAGE_LIST_IMPORT)
pages_list_remove(pglist);
kfree(pglist->pages);
kfree(pglist);
}
static void pages_list_get(struct pages_list *pglist)
{
kref_get(&pglist->refcount);
}
static int pages_list_put(struct pages_list *pglist)
{
return kref_put(&pglist->refcount, pages_list_destroy);
}
static struct pages_list *pages_list_lookup(
uint32_t export_id,
struct physical_channel *pchan,
bool get_pages_list)
{
struct pages_list *pglist = NULL, *tmp = NULL;
spin_lock_bh(&hab_driver.imp_lock);
list_for_each_entry_safe(pglist, tmp, &hab_driver.imp_list, list) {
if (pglist->export_id == export_id &&
pglist->pchan == pchan) {
if (get_pages_list)
pages_list_get(pglist);
spin_unlock_bh(&hab_driver.imp_lock);
return pglist;
}
}
spin_unlock_bh(&hab_driver.imp_lock);
return NULL;
}
static int match_file(const void *p, struct file *file, unsigned int fd)
{
/*
* We must return fd + 1 because iterate_fd stops searching on
* non-zero return, but 0 is a valid fd.
*/
return (p == file) ? (fd + 1) : 0;
}
static struct dma_buf *habmem_get_dma_buf_from_va(unsigned long address,
int page_count,
unsigned long *offset)
{
struct vm_area_struct *vma = NULL;
struct dma_buf *dmabuf = NULL;
int rc = 0;
int fd = -1;
down_read(&current->mm->mmap_sem);
vma = find_vma(current->mm, address);
if (!vma || !vma->vm_file) {
pr_err("cannot find vma\n");
rc = -EBADF;
goto pro_end;
}
/* Look for the fd that matches this the vma file */
fd = iterate_fd(current->files, 0, match_file, vma->vm_file);
if (fd == 0) {
pr_err("iterate_fd failed\n");
rc = -EBADF;
goto pro_end;
}
dmabuf = dma_buf_get(fd - 1);
if (IS_ERR_OR_NULL(dmabuf)) {
pr_err("dma_buf_get failed fd %d ret %pK\n", fd, dmabuf);
rc = -EBADF;
goto pro_end;
}
*offset = address - vma->vm_start;
pro_end:
up_read(&current->mm->mmap_sem);
return rc < 0 ? ERR_PTR(rc) : dmabuf;
}
static struct dma_buf *habmem_get_dma_buf_from_uva(unsigned long address,
int page_count)
{
struct page **pages = NULL;
int i, ret = 0;
struct dma_buf *dmabuf = NULL;
struct pages_list *pglist = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto err;
}
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
ret = -ENOMEM;
goto err;
}
down_read(&current->mm->mmap_sem);
ret = get_user_pages(address, page_count, 0, pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret <= 0) {
ret = -EINVAL;
pr_err("get %d user pages failed %d\n",
page_count, ret);
goto err;
}
pglist->pages = pages;
pglist->npages = page_count;
pglist->type = HAB_PAGE_LIST_EXPORT;
kref_init(&pglist->refcount);
exp_info.ops = &dma_buf_ops;
exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
for (i = 0; i < page_count; i++)
put_page(pages[i]);
pr_err("export to dmabuf failed\n");
ret = PTR_ERR(dmabuf);
goto err;
}
return dmabuf;
err:
kfree(pages);
kfree(pglist);
return ERR_PTR(ret);
}
static int habmem_compress_pfns(
struct export_desc_super *exp_super,
struct compressed_pfns *pfns,
uint32_t *data_size)
{
int ret = 0;
struct dma_buf *dmabuf = exp_super->platform_data;
int page_count = exp_super->exp.payload_count;
struct pages_list *pglist = NULL;
struct page **pages = NULL;
int i = 0, j = 0;
int region_size = 1;
struct scatterlist *s = NULL;
struct sg_table *sg_table = NULL;
struct dma_buf_attachment *attach = NULL;
struct page *page = NULL, *pre_page = NULL;
unsigned long page_offset;
uint32_t spage_size = 0;
if (IS_ERR_OR_NULL(dmabuf) || !pfns || !data_size)
return -EINVAL;
/* DMA buffer from fd */
if (dmabuf->ops != &dma_buf_ops) {
attach = dma_buf_attach(dmabuf, hab_driver.dev);
if (IS_ERR_OR_NULL(attach)) {
pr_err("dma_buf_attach failed %d\n", -EBADF);
ret = -EBADF;
goto err;
}
sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
if (IS_ERR_OR_NULL(sg_table)) {
pr_err("dma_buf_map_attachment failed %d\n", -EBADF);
ret = -EBADF;
goto err;
}
page_offset = exp_super->offset >> PAGE_SHIFT;
for_each_sg(sg_table->sgl, s, sg_table->nents, i) {
spage_size = s->length >> PAGE_SHIFT;
if (page_offset >= spage_size) {
page_offset -= spage_size;
continue;
}
page = sg_page(s);
if (j == 0) {
pfns->first_pfn = page_to_pfn(nth_page(page,
page_offset));
} else {
pfns->region[j-1].space =
page_to_pfn(nth_page(page, 0)) -
page_to_pfn(pre_page) - 1;
}
pfns->region[j].size = spage_size - page_offset;
if (pfns->region[j].size >= page_count) {
pfns->region[j].size = page_count;
pfns->region[j].space = 0;
break;
}
page_count -= pfns->region[j].size;
pre_page = nth_page(page, pfns->region[j].size - 1);
page_offset = 0;
j++;
}
pfns->nregions = j+1;
} else {
pglist = dmabuf->priv;
pages = pglist->pages;
pfns->first_pfn = page_to_pfn(pages[0]);
for (i = 1; i < page_count; i++) {
if ((page_to_pfn(pages[i]) - 1) ==
page_to_pfn(pages[i-1])) {
region_size++;
} else {
pfns->region[j].size = region_size;
pfns->region[j].space =
page_to_pfn(pages[i]) -
page_to_pfn(pages[i-1]) - 1;
j++;
region_size = 1;
}
}
pfns->region[j].size = region_size;
pfns->region[j].space = 0;
pfns->nregions = j+1;
}
*data_size = sizeof(struct compressed_pfns) +
sizeof(struct region) * pfns->nregions;
err:
if (!IS_ERR_OR_NULL(sg_table))
dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE);
if (!IS_ERR_OR_NULL(attach))
dma_buf_detach(dmabuf, attach);
/* TODO: This dma buffer should not be put here,
* but currently display is depended on this put to do recircle,
* so we just put dma buffer here to ensure there is no memleak.
* we can remove this after display have a fix.
*/
if (HABMM_EXP_MEM_TYPE_DMA & exp_super->exp.readonly) {
if (!IS_ERR_OR_NULL(dmabuf)
&& dmabuf->ops != &dma_buf_ops) {
dma_buf_put(dmabuf);
exp_super->platform_data = NULL;
}
}
return ret;
}
static int habmem_add_export_compress(struct virtual_channel *vchan,
unsigned long offset,
int page_count,
void *buf,
int flags,
int *payload_size,
int *export_id)
{
int ret = 0;
struct export_desc *exp = NULL;
struct export_desc_super *exp_super = NULL;
struct compressed_pfns *pfns = NULL;
uint32_t sizebytes = sizeof(*exp_super) +
sizeof(struct compressed_pfns) +
page_count * sizeof(struct region);
exp_super = habmem_add_export(vchan,
sizebytes,
flags);
if (!exp_super) {
dma_buf_put((struct dma_buf *)buf);
ret = -ENOMEM;
goto err;
}
exp = &exp_super->exp;
exp->payload_count = page_count;
exp_super->platform_data = buf;
exp_super->offset = offset;
kref_init(&exp_super->refcount);
pfns = (struct compressed_pfns *)&exp->payload[0];
ret = habmem_compress_pfns(exp_super, pfns, payload_size);
if (ret) {
pr_err("hab compressed pfns failed %d\n", ret);
kfree(exp_super);
dma_buf_put((struct dma_buf *)buf);
*payload_size = 0;
goto err;
}
*export_id = exp->export_id;
err:
return ret;
}
/*
* exporter - grant & revoke
* degenerate sharabled page list based on CPU friendly virtual "address".
* The result as an array is stored in ppdata to return to caller
* page size 4KB is assumed
*/
int habmem_hyp_grant_user(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *payload_size,
int *export_id)
{
int ret = 0;
struct dma_buf *dmabuf = NULL;
unsigned long off = 0;
if (HABMM_EXP_MEM_TYPE_DMA & flags)
dmabuf = habmem_get_dma_buf_from_va(address,
page_count, &off);
else if (HABMM_EXPIMP_FLAGS_FD & flags)
dmabuf = dma_buf_get(address);
else
dmabuf = habmem_get_dma_buf_from_uva(address, page_count);
if (IS_ERR_OR_NULL(dmabuf))
return -EINVAL;
ret = habmem_add_export_compress(vchan,
off,
page_count,
dmabuf,
flags,
payload_size,
export_id);
return ret;
}
/*
* exporter - grant & revoke
* generate shareable page list based on CPU friendly virtual "address".
* The result as an array is stored in ppdata to return to caller
* page size 4KB is assumed
*/
int habmem_hyp_grant(struct virtual_channel *vchan,
unsigned long address,
int page_count,
int flags,
int remotedom,
int *compressed,
int *payload_size,
int *export_id)
{
int ret = 0;
void *kva = (void *)(uintptr_t)address;
int is_vmalloc = is_vmalloc_addr(kva);
struct page **pages = NULL;
int i;
struct dma_buf *dmabuf = NULL;
struct pages_list *pglist = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
if (HABMM_EXPIMP_FLAGS_DMABUF & flags) {
dmabuf = (struct dma_buf *)address;
if (dmabuf)
get_dma_buf(dmabuf);
} else if (HABMM_EXPIMP_FLAGS_FD & flags)
dmabuf = dma_buf_get(address);
else { /*Input is kva;*/
pages = kmalloc_array(page_count,
sizeof(struct page *),
GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto err;
}
pglist = kzalloc(sizeof(*pglist), GFP_KERNEL);
if (!pglist) {
ret = -ENOMEM;
goto err;
}
pglist->pages = pages;
pglist->npages = page_count;
pglist->type = HAB_PAGE_LIST_EXPORT;
pglist->pchan = vchan->pchan;
pglist->vcid = vchan->id;
kref_init(&pglist->refcount);
for (i = 0; i < page_count; i++) {
kva = (void *)(uintptr_t)(address + i*PAGE_SIZE);
if (is_vmalloc)
pages[i] = vmalloc_to_page(kva);
else
pages[i] = virt_to_page(kva);
}
exp_info.ops = &dma_buf_ops;
exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
dmabuf = dma_buf_export(&exp_info);
}
if (IS_ERR_OR_NULL(dmabuf)) {
pr_err("dmabuf get failed %d\n", PTR_ERR(dmabuf));
ret = -EINVAL;
goto err;
}
ret = habmem_add_export_compress(vchan,
0,
page_count,
dmabuf,
flags,
payload_size,
export_id);
return ret;
err:
kfree(pages);
kfree(pglist);
return ret;
}
int habmem_exp_release(struct export_desc_super *exp_super)
{
struct dma_buf *dmabuf =
(struct dma_buf *) exp_super->platform_data;
if (!IS_ERR_OR_NULL(dmabuf))
dma_buf_put(dmabuf);
else
pr_debug("release failed, dmabuf is null!!!\n");
return 0;
}
int habmem_hyp_revoke(void *expdata, uint32_t count)
{
return 0;
}
void *habmem_imp_hyp_open(void)
{
struct importer_context *priv = NULL;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return NULL;
return priv;
}
void habmem_imp_hyp_close(void *imp_ctx, int kernel)
{
struct importer_context *priv = imp_ctx;
if (!priv)
return;
kfree(priv);
}
static struct sg_table *hab_mem_map_dma_buf(
struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_buf *dmabuf = attachment->dmabuf;
struct pages_list *pglist = dmabuf->priv;
struct sg_table *sgt;
struct scatterlist *sg;
int i;
int ret = 0;
struct page **pages = pglist->pages;
sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!sgt)
return ERR_PTR(-ENOMEM);
ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL);
if (ret) {
kfree(sgt);
return ERR_PTR(-ENOMEM);
}
for_each_sg(sgt->sgl, sg, pglist->npages, i) {
sg_set_page(sg, pages[i], PAGE_SIZE, 0);
}
return sgt;
}
static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sgt,
enum dma_data_direction direction)
{
sg_free_table(sgt);
kfree(sgt);
}
static vm_fault_t hab_map_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct page *page = NULL;
struct pages_list *pglist = NULL;
unsigned long offset, fault_offset;
int page_idx;
if (vma == NULL)
return VM_FAULT_SIGBUS;
offset = vma->vm_pgoff << PAGE_SHIFT;
/* PHY address */
fault_offset =
(unsigned long)vmf->address - vma->vm_start + offset;
page_idx = fault_offset>>PAGE_SHIFT;
pglist = vma->vm_private_data;
if (page_idx < 0 || page_idx >= pglist->npages) {
pr_err("Out of page array! page_idx %d, pg cnt %ld\n",
page_idx, pglist->npages);
return VM_FAULT_SIGBUS;
}
page = pglist->pages[page_idx];
get_page(page);
vmf->page = page;
return 0;
}
static void hab_map_open(struct vm_area_struct *vma)
{
struct pages_list *pglist =
(struct pages_list *)vma->vm_private_data;
pages_list_get(pglist);
}
static void hab_map_close(struct vm_area_struct *vma)
{
struct pages_list *pglist =
(struct pages_list *)vma->vm_private_data;
pages_list_put(pglist);
vma->vm_private_data = NULL;
}
static const struct vm_operations_struct habmem_vm_ops = {
.fault = hab_map_fault,
.open = hab_map_open,
.close = hab_map_close,
};
static vm_fault_t hab_buffer_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct pages_list *pglist = vma->vm_private_data;
pgoff_t page_offset;
int ret;
page_offset = ((unsigned long)vmf->address - vma->vm_start) >>
PAGE_SHIFT;
if (page_offset > pglist->npages)
return VM_FAULT_SIGBUS;
ret = vm_insert_page(vma, (unsigned long)vmf->address,
pglist->pages[page_offset]);
switch (ret) {
case 0:
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
case -EFAULT:
case -EINVAL:
return VM_FAULT_SIGBUS;
default:
WARN_ON(1);
return VM_FAULT_SIGBUS;
}
}
static void hab_buffer_open(struct vm_area_struct *vma)
{
}
static void hab_buffer_close(struct vm_area_struct *vma)
{
}
static const struct vm_operations_struct hab_buffer_vm_ops = {
.fault = hab_buffer_fault,
.open = hab_buffer_open,
.close = hab_buffer_close,
};
static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{
struct pages_list *pglist = dmabuf->priv;
uint32_t obj_size = pglist->npages << PAGE_SHIFT;
if (vma == NULL)
return VM_FAULT_SIGBUS;
/* Check for valid size. */
if (obj_size < vma->vm_end - vma->vm_start)
return -EINVAL;
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &hab_buffer_vm_ops;
vma->vm_private_data = pglist;
vma->vm_flags |= VM_MIXEDMAP;
if (!(pglist->userflags & HABMM_IMPORT_FLAGS_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
return 0;
}
static void hab_mem_dma_buf_release(struct dma_buf *dmabuf)
{
struct pages_list *pglist = dmabuf->priv;
pages_list_put(pglist);
}
static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf,
unsigned long offset)
{
return NULL;
}
static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf,
unsigned long offset,
void *ptr)
{
}
static void *hab_mem_dma_buf_vmap(struct dma_buf *dmabuf)
{
struct pages_list *pglist = dmabuf->priv;
if (!pglist->vmapping)
pglist->vmapping = vmap(pglist->pages,
pglist->npages,
VM_IOREMAP,
pgprot_writecombine(PAGE_KERNEL));
return pglist->vmapping;
}
static void hab_mem_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
struct pages_list *pglist = dmabuf->priv;
if (pglist->vmapping) {
vunmap(pglist->vmapping);
pglist->vmapping = NULL;
}
}
static struct dma_buf_ops dma_buf_ops = {
.map_dma_buf = hab_mem_map_dma_buf,
.unmap_dma_buf = hab_mem_unmap_dma_buf,
.mmap = hab_mem_mmap,
.release = hab_mem_dma_buf_release,
.map = hab_mem_dma_buf_kmap,
.unmap = hab_mem_dma_buf_kunmap,
.vmap = hab_mem_dma_buf_vmap,
.vunmap = hab_mem_dma_buf_vunmap,
};
static struct dma_buf *habmem_import_to_dma_buf(
struct physical_channel *pchan,
struct export_desc *exp,
uint32_t userflags)
{
struct pages_list *pglist = NULL;
struct dma_buf *dmabuf = NULL;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
pglist = pages_list_lookup(exp->export_id, pchan, true);
if (pglist)
goto buffer_ready;
pglist = pages_list_create(exp, userflags);
if (IS_ERR(pglist))
return (void *)pglist;
pages_list_add(pglist);
pglist->type = HAB_PAGE_LIST_IMPORT;
buffer_ready:
exp_info.ops = &dma_buf_ops;
exp_info.size = pglist->npages << PAGE_SHIFT;
exp_info.flags = O_RDWR;
exp_info.priv = pglist;
dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf))
pr_err("export to dmabuf failed, exp %d, pchan %s\n",
exp->export_id, pchan->name);
return dmabuf;
}
int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param,
struct export_desc *exp, int kernel)
{
int fd = -1;
struct dma_buf *dma_buf = NULL;
struct physical_channel *pchan = exp->pchan;
dma_buf = habmem_import_to_dma_buf(pchan, exp, param->flags);
if (IS_ERR_OR_NULL(dma_buf))
return -EINVAL;
if (kernel) {
param->kva = (uint64_t)dma_buf;
} else {
fd = dma_buf_fd(dma_buf, O_CLOEXEC);
if (fd < 0) {
pr_err("dma buf to fd failed\n");
dma_buf_put(dma_buf);
return -EINVAL;
}
param->kva = (uint64_t)fd;
}
return 0;
}
int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp, int kernel)
{
return 0;
}
int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma)
{
return -EFAULT;
}
int habmm_imp_hyp_map_check(void *imp_ctx, struct export_desc *exp)
{
struct pages_list *pglist = NULL;
int found = 0;
pglist = pages_list_lookup(exp->export_id, exp->pchan, false);
if (pglist)
found = 1;
return found;
}

View File

@ -0,0 +1,447 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_grantable.h"
/*
* use physical channel to send export parcel
* local remote
* send(export) --> IRQ store to export warehouse
* wait(export ack) <-- send(export ack)
* the actual data consists the following 3 parts listed in order
* 1. header (uint32_t) vcid|type|size
* 2. export parcel (full struct)
* 3. full contents in export->pdata
*/
static int hab_export_ack_find(struct uhab_context *ctx,
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret = 0;
struct hab_export_ack_recvd *ack_recvd, *tmp;
spin_lock_bh(&ctx->expq_lock);
list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
if ((ack_recvd->ack.export_id == expect_ack->export_id &&
ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
ack_recvd->ack.vcid_remote == expect_ack->vcid_remote)
|| vchan->otherend_closed) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
ret = 1;
break;
}
ack_recvd->age++;
if (ack_recvd->age > Q_AGE_THRESHOLD) {
list_del(&ack_recvd->node);
kfree(ack_recvd);
}
}
spin_unlock_bh(&ctx->expq_lock);
return ret;
}
static int hab_export_ack_wait(struct uhab_context *ctx,
struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
{
int ret;
ret = wait_event_interruptible_timeout(ctx->exp_wq,
hab_export_ack_find(ctx, expect_ack, vchan),
HAB_HS_TIMEOUT);
if (!ret || (ret == -ERESTARTSYS))
ret = -EAGAIN;
else if (vchan->otherend_closed)
ret = -ENODEV;
else if (ret > 0)
ret = 0;
return ret;
}
/*
* Get id from free list first. if not available, new id is generated.
* Once generated it will not be erased
* assumptions: no handshake or memory map/unmap in this helper function
*/
struct export_desc_super *habmem_add_export(
struct virtual_channel *vchan,
int sizebytes,
uint32_t flags)
{
struct uhab_context *ctx = NULL;
struct export_desc *exp = NULL;
struct export_desc_super *exp_super = NULL;
if (!vchan || !sizebytes)
return NULL;
exp_super = kzalloc(sizebytes, GFP_KERNEL);
if (!exp_super)
return NULL;
exp = &exp_super->exp;
idr_preload(GFP_KERNEL);
spin_lock(&vchan->pchan->expid_lock);
exp->export_id =
idr_alloc(&vchan->pchan->expid_idr, exp, 1, 0, GFP_NOWAIT);
spin_unlock(&vchan->pchan->expid_lock);
idr_preload_end();
exp->readonly = flags;
exp->vchan = vchan;
exp->vcid_local = vchan->id;
exp->vcid_remote = vchan->otherend_id;
exp->domid_local = vchan->pchan->vmid_local;
exp->domid_remote = vchan->pchan->vmid_remote;
exp->ctx = vchan->ctx;
exp->pchan = vchan->pchan;
ctx = vchan->ctx;
write_lock(&ctx->exp_lock);
ctx->export_total++;
list_add_tail(&exp->node, &ctx->exp_whse);
write_unlock(&ctx->exp_lock);
return exp_super;
}
void habmem_remove_export(struct export_desc *exp)
{
struct uhab_context *ctx = NULL;
struct export_desc_super *exp_super =
container_of(exp,
struct export_desc_super,
exp);
if (!exp || !exp->ctx) {
if (exp)
pr_err("invalid info in exp %pK ctx %pK\n",
exp, exp->ctx);
else
pr_err("invalid exp\n");
return;
}
ctx = exp->ctx;
ctx->export_total--;
exp->ctx = NULL;
habmem_export_put(exp_super);
}
static void habmem_export_destroy(struct kref *refcount)
{
struct physical_channel *pchan = NULL;
struct export_desc_super *exp_super =
container_of(
refcount,
struct export_desc_super,
refcount);
struct export_desc *exp = NULL;
if (!exp_super) {
pr_err("invalid exp_super\n");
return;
}
exp = &exp_super->exp;
if (!exp || !exp->pchan) {
if (exp)
pr_err("invalid info in exp %pK pchan %pK\n",
exp, exp->pchan);
else
pr_err("invalid exp\n");
return;
}
pchan = exp->pchan;
spin_lock(&pchan->expid_lock);
idr_remove(&pchan->expid_idr, exp->export_id);
spin_unlock(&pchan->expid_lock);
habmem_exp_release(exp_super);
kfree(exp_super);
}
/*
* store the parcel to the warehouse, then send the parcel to remote side
* both exporter composed export descriptor and the grantrefids are sent
* as one msg to the importer side
*/
static int habmem_export_vchan(struct uhab_context *ctx,
struct virtual_channel *vchan,
int payload_size,
uint32_t flags,
uint32_t export_id)
{
int ret;
struct export_desc *exp;
uint32_t sizebytes = sizeof(*exp) + payload_size;
struct hab_export_ack expected_ack = {0};
struct hab_header header = HAB_HEADER_INITIALIZER;
exp = idr_find(&vchan->pchan->expid_idr, export_id);
if (!exp) {
pr_err("export vchan failed: exp_id %d, pchan %s\n",
export_id, vchan->pchan->name);
return -EINVAL;
}
HAB_HEADER_SET_SIZE(header, sizebytes);
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
HAB_HEADER_SET_ID(header, vchan->otherend_id);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
ret = physical_channel_send(vchan->pchan, &header, exp);
if (ret != 0) {
pr_err("failed to export payload to the remote %d\n", ret);
return ret;
}
expected_ack.export_id = exp->export_id;
expected_ack.vcid_local = exp->vcid_local;
expected_ack.vcid_remote = exp->vcid_remote;
ret = hab_export_ack_wait(ctx, &expected_ack, vchan);
if (ret != 0) {
pr_err("failed to receive remote export ack %d on vc %x\n",
ret, vchan->id);
return ret;
}
return ret;
}
void habmem_export_get(struct export_desc_super *exp_super)
{
kref_get(&exp_super->refcount);
}
int habmem_export_put(struct export_desc_super *exp_super)
{
return kref_put(&exp_super->refcount, habmem_export_destroy);
}
int hab_mem_export(struct uhab_context *ctx,
struct hab_export *param,
int kernel)
{
int ret = 0;
unsigned int payload_size = 0;
uint32_t export_id = 0;
struct virtual_channel *vchan;
int page_count;
int compressed = 0;
if (!ctx || !param || !param->buffer)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 0);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err;
}
page_count = param->sizebytes/PAGE_SIZE;
if (kernel) {
ret = habmem_hyp_grant(vchan,
(unsigned long)param->buffer,
page_count,
param->flags,
vchan->pchan->dom_id,
&compressed,
&payload_size,
&export_id);
} else {
ret = habmem_hyp_grant_user(vchan,
(unsigned long)param->buffer,
page_count,
param->flags,
vchan->pchan->dom_id,
&compressed,
&payload_size,
&export_id);
}
if (ret < 0) {
pr_err("habmem_hyp_grant vc %x failed size=%d ret=%d\n",
param->vcid, payload_size, ret);
goto err;
}
ret = habmem_export_vchan(ctx,
vchan,
payload_size,
param->flags,
export_id);
param->exportid = export_id;
err:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
int hab_mem_unexport(struct uhab_context *ctx,
struct hab_unexport *param,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *tmp = NULL;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
/* refcnt on the access */
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 1);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err_novchan;
}
write_lock(&ctx->exp_lock);
list_for_each_entry_safe(exp, tmp, &ctx->exp_whse, node) {
if (param->exportid == exp->export_id &&
vchan->pchan == exp->pchan) {
list_del(&exp->node);
found = 1;
break;
}
}
write_unlock(&ctx->exp_lock);
if (!found) {
ret = -EINVAL;
goto err_novchan;
}
ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
if (ret) {
pr_err("Error found in revoke grant with ret %d\n", ret);
goto err_novchan;
}
habmem_remove_export(exp);
err_novchan:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
int hab_mem_import(struct uhab_context *ctx,
struct hab_import *param,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 0);
if (!vchan || !vchan->pchan) {
ret = -ENODEV;
goto err_imp;
}
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry(exp, &ctx->imp_whse, node) {
if ((exp->export_id == param->exportid) &&
(exp->pchan == vchan->pchan)) {
found = 1;
break;
}
}
spin_unlock_bh(&ctx->imp_lock);
if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) {
pr_err("input size %d don't match buffer size %d\n",
param->sizebytes, exp->payload_count << PAGE_SHIFT);
ret = -EINVAL;
goto err_imp;
}
if (!found) {
pr_err("Fail to get export descriptor from export id %d\n",
param->exportid);
ret = -ENODEV;
goto err_imp;
}
ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
if (ret) {
pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
ret, exp->payload_count,
exp->domid_local, *((uint32_t *)exp->payload));
goto err_imp;
}
exp->import_index = param->index;
exp->kva = kernel ? (void *)param->kva : NULL;
err_imp:
if (vchan)
hab_vchan_put(vchan);
return ret;
}
int hab_mem_unimport(struct uhab_context *ctx,
struct hab_unimport *param,
int kernel)
{
int ret = 0, found = 0;
struct export_desc *exp = NULL, *exp_tmp;
struct virtual_channel *vchan;
if (!ctx || !param)
return -EINVAL;
vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 1);
if (!vchan || !vchan->pchan) {
if (vchan)
hab_vchan_put(vchan);
return -ENODEV;
}
spin_lock_bh(&ctx->imp_lock);
list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
if (exp->export_id == param->exportid &&
exp->pchan == vchan->pchan) {
/* same pchan is expected here */
list_del(&exp->node);
ctx->import_total--;
found = 1;
break;
}
}
spin_unlock_bh(&ctx->imp_lock);
if (!found)
ret = -EINVAL;
else {
ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel);
if (ret) {
pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
exp->export_id, exp->payload_count, exp->vcid_remote);
}
param->kva = (uint64_t)exp->kva;
kfree(exp);
}
if (vchan)
hab_vchan_put(vchan);
return ret;
}

View File

@ -0,0 +1,397 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
static int hab_rx_queue_empty(struct virtual_channel *vchan)
{
int ret;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
ret = list_empty(&vchan->rx_list);
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
return ret;
}
static struct hab_message*
hab_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
{
struct hab_message *message;
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("pchan %s send size too large %zd\n",
pchan->name, sizebytes);
return NULL;
}
message = kzalloc(sizeof(*message) + sizebytes, GFP_ATOMIC);
if (!message)
return NULL;
message->sizebytes =
physical_channel_read(pchan, message->data, sizebytes);
return message;
}
void hab_msg_free(struct hab_message *message)
{
kfree(message);
}
int
hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
int *rsize, unsigned int flags)
{
struct hab_message *message = NULL;
int ret = 0;
int wait = !(flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
int interruptible = !(flags & HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
int irqs_disabled = irqs_disabled();
if (wait) {
if (hab_rx_queue_empty(vchan)) {
if (interruptible)
ret = wait_event_interruptible(vchan->rx_queue,
!hab_rx_queue_empty(vchan) ||
vchan->otherend_closed);
else
wait_event(vchan->rx_queue,
!hab_rx_queue_empty(vchan) ||
vchan->otherend_closed);
}
}
/*
* return all the received messages before the remote close,
* and need empty check again in case the list is empty now due to
* dequeue by other threads
*/
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
if ((!ret || (ret == -ERESTARTSYS)) && !list_empty(&vchan->rx_list)) {
message = list_first_entry(&vchan->rx_list,
struct hab_message, node);
if (message) {
if (*rsize >= message->sizebytes) {
/* msg can be safely retrieved in full */
list_del(&message->node);
ret = 0;
*rsize = message->sizebytes;
} else {
pr_err("vcid %x rcv buf too small %d < %zd\n",
vchan->id, *rsize,
message->sizebytes);
*rsize = message->sizebytes;
message = NULL;
ret = -EOVERFLOW; /* come back again */
}
}
} else
/* no message received, retain the original status */
*rsize = 0;
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
*msg = message;
return ret;
}
static void hab_msg_queue(struct virtual_channel *vchan,
struct hab_message *message)
{
int irqs_disabled = irqs_disabled();
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
list_add_tail(&message->node, &vchan->rx_list);
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
wake_up(&vchan->rx_queue);
}
static int hab_export_enqueue(struct virtual_channel *vchan,
struct export_desc *exp)
{
struct uhab_context *ctx = vchan->ctx;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&ctx->imp_lock, irqs_disabled);
list_add_tail(&exp->node, &ctx->imp_whse);
ctx->import_total++;
hab_spin_unlock(&ctx->imp_lock, irqs_disabled);
return 0;
}
static int hab_send_export_ack(struct virtual_channel *vchan,
struct physical_channel *pchan,
struct export_desc *exp)
{
struct hab_export_ack exp_ack = {
.export_id = exp->export_id,
.vcid_local = exp->vcid_local,
.vcid_remote = exp->vcid_remote
};
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
HAB_HEADER_SET_ID(header, exp->vcid_local);
HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
return physical_channel_send(pchan, &header, &exp_ack);
}
static int hab_receive_create_export_ack(struct physical_channel *pchan,
struct uhab_context *ctx, size_t sizebytes)
{
struct hab_export_ack_recvd *ack_recvd =
kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
int irqs_disabled = irqs_disabled();
if (!ack_recvd)
return -ENOMEM;
if (sizeof(ack_recvd->ack) != sizebytes)
pr_err("%s exp ack size %zu is not as arrived %zu\n",
pchan->name, sizeof(ack_recvd->ack), sizebytes);
if (sizebytes > sizeof(ack_recvd->ack)) {
pr_err("pchan %s read size too large %zd %zd\n",
pchan->name, sizebytes, sizeof(ack_recvd->ack));
return -EINVAL;
}
if (physical_channel_read(pchan,
&ack_recvd->ack,
sizebytes) != sizebytes)
return -EIO;
hab_spin_lock(&ctx->expq_lock, irqs_disabled);
list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
hab_spin_unlock(&ctx->expq_lock, irqs_disabled);
return 0;
}
static void hab_msg_drop(struct physical_channel *pchan, size_t sizebytes)
{
uint8_t *data = NULL;
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("%s read size too large %zd\n", pchan->name, sizebytes);
return;
}
data = kmalloc(sizebytes, GFP_ATOMIC);
if (data == NULL)
return;
physical_channel_read(pchan, data, sizebytes);
kfree(data);
}
int hab_msg_recv(struct physical_channel *pchan,
struct hab_header *header)
{
int ret = 0;
struct hab_message *message;
struct hab_device *dev = pchan->habdev;
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
struct virtual_channel *vchan = NULL;
struct export_desc *exp_desc;
struct timespec64 ts = {0};
unsigned long long rx_mpm_tv;
/* get the local virtual channel if it isn't an open message */
if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
payload_type != HAB_PAYLOAD_TYPE_INIT_DONE &&
payload_type != HAB_PAYLOAD_TYPE_INIT_CANCEL) {
/* sanity check the received message */
if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT)
|| !vchan_id || !session_id) {
pr_err("@@ %s Invalid msg type %d vcid %x bytes %zx sn %d\n",
pchan->name, payload_type,
vchan_id, sizebytes, session_id);
dump_hab_wq(pchan->hyp_data);
}
/*
* need both vcid and session_id to be accurate.
* this is from pchan instead of ctx
*/
vchan = hab_vchan_get(pchan, header);
if (!vchan) {
pr_debug("vchan not found type %d vcid %x sz %zx sesn %d\n",
payload_type, vchan_id, sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("%s msg dropped type %d size %d vcid %X session id %d\n",
pchan->name, payload_type,
sizebytes, vchan_id,
session_id);
}
return -EINVAL;
} else if (vchan->otherend_closed) {
hab_vchan_put(vchan);
pr_info("vchan remote is closed payload type %d, vchan id %x, sizebytes %zx, session %d\n",
payload_type, vchan_id,
sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("%s message %d dropped remote close, session id %d\n",
pchan->name, payload_type,
session_id);
}
return -ENODEV;
}
} else {
if (sizebytes != sizeof(struct hab_open_send_data)) {
pr_err("%s Invalid open req type %d vcid %x bytes %zx session %d\n",
pchan->name, payload_type, vchan_id,
sizebytes, session_id);
if (sizebytes) {
hab_msg_drop(pchan, sizebytes);
pr_err("%s msg %d dropped unknown reason session id %d\n",
pchan->name,
payload_type,
session_id);
dump_hab_wq(pchan->hyp_data);
}
return -ENODEV;
}
}
switch (payload_type) {
case HAB_PAYLOAD_TYPE_MSG:
case HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ:
case HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP:
message = hab_msg_alloc(pchan, sizebytes);
if (!message)
break;
hab_msg_queue(vchan, message);
break;
case HAB_PAYLOAD_TYPE_INIT:
case HAB_PAYLOAD_TYPE_INIT_ACK:
case HAB_PAYLOAD_TYPE_INIT_DONE:
ret = hab_open_request_add(pchan, sizebytes, payload_type);
if (ret) {
pr_err("%s open request add failed, ret %d, payload type %d, sizebytes %zx\n",
pchan->name, ret, payload_type, sizebytes);
break;
}
wake_up_interruptible(&dev->openq);
break;
case HAB_PAYLOAD_TYPE_INIT_CANCEL:
pr_info("remote open cancel header vcid %X session %d local %d remote %d\n",
vchan_id, session_id, pchan->vmid_local,
pchan->vmid_remote);
ret = hab_open_receive_cancel(pchan, sizebytes);
if (ret)
pr_err("%s open cancel handling failed ret %d vcid %X session %d\n",
pchan->name, ret, vchan_id, session_id);
break;
case HAB_PAYLOAD_TYPE_EXPORT:
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("%s exp size too large %zd header %zd\n",
pchan->name, sizebytes, sizeof(*exp_desc));
break;
}
exp_desc = kzalloc(sizebytes, GFP_ATOMIC);
if (!exp_desc)
break;
if (physical_channel_read(pchan, exp_desc, sizebytes) !=
sizebytes) {
pr_err("%s corrupted exp expect %zd bytes vcid %X remote %X open %d!\n",
pchan->name, sizebytes, vchan->id,
vchan->otherend_id, vchan->session_id);
kfree(exp_desc);
break;
}
if (pchan->vmid_local != exp_desc->domid_remote ||
pchan->vmid_remote != exp_desc->domid_local)
pr_err("%s corrupted vmid %d != %d %d != %d\n",
pchan->name, pchan->vmid_local, exp_desc->domid_remote,
pchan->vmid_remote, exp_desc->domid_local);
exp_desc->domid_remote = pchan->vmid_remote;
exp_desc->domid_local = pchan->vmid_local;
exp_desc->pchan = pchan;
hab_export_enqueue(vchan, exp_desc);
hab_send_export_ack(vchan, pchan, exp_desc);
break;
case HAB_PAYLOAD_TYPE_EXPORT_ACK:
ret = hab_receive_create_export_ack(pchan, vchan->ctx,
sizebytes);
if (ret) {
pr_err("%s failed to handled export ack %d\n",
pchan->name, ret);
break;
}
wake_up_interruptible(&vchan->ctx->exp_wq);
break;
case HAB_PAYLOAD_TYPE_CLOSE:
/* remote request close */
pr_debug("remote close vcid %pK %X other id %X session %d refcnt %d\n",
vchan, vchan->id, vchan->otherend_id,
session_id, get_refcnt(vchan->refcount));
hab_vchan_stop(vchan);
break;
case HAB_PAYLOAD_TYPE_PROFILE:
ktime_get_ts64(&ts);
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
if (!message)
pr_err("%s failed to allocate msg Arrived msg will be lost\n",
pchan->name);
else {
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)message->data;
pstat->rx_sec = ts.tv_sec;
pstat->rx_usec = ts.tv_nsec/NSEC_PER_USEC;
hab_msg_queue(vchan, message);
}
break;
case HAB_PAYLOAD_TYPE_SCHE_MSG:
case HAB_PAYLOAD_TYPE_SCHE_MSG_ACK:
rx_mpm_tv = msm_timer_get_sclk_ticks();
/* pull down the incoming data */
message = hab_msg_alloc(pchan, sizebytes);
if (!message)
pr_err("%s failed to allocate msg Arrived msg will be lost\n",
pchan->name);
else {
((unsigned long long *)message->data)[0] = rx_mpm_tv;
hab_msg_queue(vchan, message);
}
break;
default:
pr_err("%s unknown msg received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
pchan->name, payload_type, vchan_id,
sizebytes, session_id);
break;
}
if (vchan)
hab_vchan_put(vchan);
return ret;
}

View File

@ -0,0 +1,303 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#define HAB_OPEN_REQ_EXPIRE_TIME_S (3600*10)
void hab_open_request_init(struct hab_open_request *request,
int type,
struct physical_channel *pchan,
int vchan_id,
int sub_id,
int open_id)
{
request->type = type;
request->pchan = pchan;
request->xdata.vchan_id = vchan_id;
request->xdata.sub_id = sub_id;
request->xdata.open_id = open_id;
}
int hab_open_request_send(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, request->type);
return physical_channel_send(request->pchan, &header, &request->xdata);
}
/* called when remote sends in open-request */
int hab_open_request_add(struct physical_channel *pchan,
size_t sizebytes, int request_type)
{
struct hab_open_node *node;
struct hab_device *dev = pchan->habdev;
struct hab_open_request *request;
struct timespec64 ts = {0};
int irqs_disabled = irqs_disabled();
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("pchan %s request size too large %zd\n",
pchan->name, sizebytes);
return -EINVAL;
}
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
request = &node->request;
if (physical_channel_read(pchan, &request->xdata, sizebytes)
!= sizebytes)
return -EIO;
request->type = request_type;
request->pchan = pchan;
ktime_get_ts64(&ts);
node->age = ts.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
ts.tv_nsec/NSEC_PER_SEC;
hab_pchan_get(pchan);
hab_spin_lock(&dev->openlock, irqs_disabled);
list_add_tail(&node->node, &dev->openq_list);
dev->openq_cnt++;
hab_spin_unlock(&dev->openlock, irqs_disabled);
return 0;
}
/* local only */
static int hab_open_request_find(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
struct hab_open_request **recv_request)
{
struct hab_open_node *node, *tmp;
struct hab_open_request *request;
struct timespec64 ts = {0};
int ret = 0;
if (ctx->closing ||
(listen->pchan && listen->pchan->closed)) {
*recv_request = NULL;
return 1;
}
spin_lock_bh(&dev->openlock);
if (list_empty(&dev->openq_list))
goto done;
ktime_get_ts64(&ts);
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = (struct hab_open_request *)node;
if ((request->type == listen->type ||
request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) &&
(request->xdata.sub_id == listen->xdata.sub_id) &&
(!listen->xdata.open_id ||
request->xdata.open_id == listen->xdata.open_id) &&
(!listen->pchan ||
request->pchan == listen->pchan)) {
list_del(&node->node);
dev->openq_cnt--;
*recv_request = request;
ret = 1;
break;
}
if (node->age < (int64_t)ts.tv_sec + ts.tv_nsec/NSEC_PER_SEC) {
pr_warn("open request type %d sub %d open %d\n",
request->type, request->xdata.sub_id,
request->xdata.sub_id);
list_del(&node->node);
hab_open_request_free(request);
}
}
done:
spin_unlock_bh(&dev->openlock);
return ret;
}
void hab_open_request_free(struct hab_open_request *request)
{
if (request) {
hab_pchan_put(request->pchan);
kfree(request);
} else
pr_err("empty request found\n");
}
int hab_open_listen(struct uhab_context *ctx,
struct hab_device *dev,
struct hab_open_request *listen,
struct hab_open_request **recv_request,
int ms_timeout)
{
int ret = 0;
if (!ctx || !listen || !recv_request) {
pr_err("listen failed ctx %pK listen %pK request %pK\n",
ctx, listen, recv_request);
return -EINVAL;
}
*recv_request = NULL;
if (ms_timeout > 0) { /* be case */
ms_timeout = msecs_to_jiffies(ms_timeout);
ret = wait_event_interruptible_timeout(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request),
ms_timeout);
if (!ret) {
pr_debug("%s timeout in open listen\n", dev->name);
ret = -EAGAIN; /* condition not met */
} else if (-ERESTARTSYS == ret) {
pr_warn("something failed in open listen ret %d\n",
ret);
ret = -EINTR; /* condition not met */
} else if (ret > 0)
ret = 0; /* condition met */
} else { /* fe case */
ret = wait_event_interruptible(dev->openq,
hab_open_request_find(ctx, dev, listen, recv_request));
if (ctx->closing) {
pr_warn("local closing during open ret %d\n", ret);
ret = -ENODEV;
} else if (-ERESTARTSYS == ret) {
pr_warn("local interrupted ret %d\n", ret);
ret = -EINTR;
}
}
return ret;
}
/* called when receives remote's cancel init from FE or init-ack from BE */
int hab_open_receive_cancel(struct physical_channel *pchan,
size_t sizebytes)
{
struct hab_device *dev = pchan->habdev;
struct hab_open_send_data data;
struct hab_open_request *request;
struct hab_open_node *node, *tmp;
int bfound = 0;
struct timespec64 ts = {0};
int irqs_disabled = irqs_disabled();
if (sizebytes > HAB_HEADER_SIZE_MASK) {
pr_err("pchan %s cancel size too large %zd\n",
pchan->name, sizebytes);
return -EINVAL;
}
if (physical_channel_read(pchan, &data, sizebytes) != sizebytes)
return -EIO;
hab_spin_lock(&dev->openlock, irqs_disabled);
list_for_each_entry_safe(node, tmp, &dev->openq_list, node) {
request = &node->request;
/* check if open request has been serviced or not */
if ((request->type == HAB_PAYLOAD_TYPE_INIT ||
request->type == HAB_PAYLOAD_TYPE_INIT_ACK) &&
(request->xdata.sub_id == data.sub_id) &&
(request->xdata.open_id == data.open_id) &&
(request->xdata.vchan_id == data.vchan_id)) {
list_del(&node->node);
dev->openq_cnt--;
pr_info("open cancelled on pchan %s vcid %x subid %d openid %d\n",
pchan->name, data.vchan_id,
data.sub_id, data.open_id);
/* found un-serviced open request, delete it */
bfound = 1;
break;
}
}
hab_spin_unlock(&dev->openlock, irqs_disabled);
if (!bfound) {
pr_info("init waiting is in-flight. vcid %x sub %d open %d\n",
data.vchan_id, data.sub_id, data.open_id);
/* add cancel to the openq to let the waiting open bail out */
node = kzalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
request = &node->request;
request->type = HAB_PAYLOAD_TYPE_INIT_CANCEL;
request->pchan = pchan;
request->xdata.vchan_id = data.vchan_id;
request->xdata.sub_id = data.sub_id;
request->xdata.open_id = data.open_id;
ktime_get_ts64(&ts);
node->age = ts.tv_sec + HAB_OPEN_REQ_EXPIRE_TIME_S +
ts.tv_nsec/NSEC_PER_SEC;
/* put when this node is handled in open path */
hab_pchan_get(pchan);
hab_spin_lock(&dev->openlock, irqs_disabled);
list_add_tail(&node->node, &dev->openq_list);
dev->openq_cnt++;
hab_spin_unlock(&dev->openlock, irqs_disabled);
wake_up_interruptible(&dev->openq);
}
return 0;
}
/* calls locally to send cancel pending open to remote */
int hab_open_cancel_notify(struct hab_open_request *request)
{
struct hab_header header = HAB_HEADER_INITIALIZER;
HAB_HEADER_SET_SIZE(header, sizeof(struct hab_open_send_data));
HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_INIT_CANCEL);
return physical_channel_send(request->pchan, &header, &request->xdata);
}
int hab_open_pending_enter(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
write_lock(&ctx->ctx_lock);
list_add_tail(&pending->node, &ctx->pending_open);
ctx->pending_cnt++;
write_unlock(&ctx->ctx_lock);
return 0;
}
int hab_open_pending_exit(struct uhab_context *ctx,
struct physical_channel *pchan,
struct hab_open_node *pending)
{
struct hab_open_node *node, *tmp;
int ret = -ENOENT;
write_lock(&ctx->ctx_lock);
list_for_each_entry_safe(node, tmp, &ctx->pending_open, node) {
if ((node->request.type == pending->request.type) &&
(node->request.pchan
== pending->request.pchan) &&
(node->request.xdata.vchan_id
== pending->request.xdata.vchan_id) &&
(node->request.xdata.sub_id
== pending->request.xdata.sub_id) &&
(node->request.xdata.open_id
== pending->request.xdata.open_id)) {
list_del(&node->node);
ctx->pending_cnt--;
ret = 0;
}
}
write_unlock(&ctx->ctx_lock);
return ret;
}

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_OS_H
#define __HAB_OS_H
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) "hab:%s:%d " fmt, __func__, __LINE__
#include <linux/types.h>
#include <linux/habmm.h>
#include <linux/hab_ioctl.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
#include <linux/idr.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/jiffies.h>
#include <linux/reboot.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
#include <linux/version.h>
#include <linux/devcoredump.h>
#ifdef CONFIG_MSM_BOOT_STATS
#include <soc/qcom/boot_stats.h>
#else
static inline unsigned long long msm_timer_get_sclk_ticks(void)
{
return 0;
}
#endif
#endif /*__HAB_OS_H*/

View File

@ -0,0 +1,150 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include <linux/of.h>
/*
* set valid mmid value in tbl to show this is valid entry. All inputs here are
* normalized to 1 based integer
*/
static int fill_vmid_mmid_tbl(struct vmid_mmid_desc *tbl, int32_t vm_start,
int32_t vm_range, int32_t mmid_start,
int32_t mmid_range, int32_t be)
{
int i, j;
for (i = vm_start; i < vm_start+vm_range; i++) {
tbl[i].vmid = i; /* set valid vmid value to make it usable */
for (j = mmid_start; j < mmid_start + mmid_range; j++) {
/* sanity check */
if (tbl[i].mmid[j] != HABCFG_VMID_INVALID) {
pr_err("overwrite previous setting vmid %d, mmid %d, be %d\n",
i, j, tbl[i].is_listener[j]);
}
tbl[i].mmid[j] = j;
tbl[i].is_listener[j] = be; /* BE IS listen */
}
}
return 0;
}
void dump_settings(struct local_vmid *settings)
{
pr_debug("self vmid is %d\n", settings->self);
}
int fill_default_gvm_settings(struct local_vmid *settings, int vmid_local,
int mmid_start, int mmid_end)
{
int32_t be = HABCFG_BE_FALSE;
int32_t range = 1;
int32_t vmremote = 0; /* default to host[0] as local is guest[2] */
settings->self = vmid_local;
/* default gvm always talks to host as vm0 */
return fill_vmid_mmid_tbl(settings->vmid_mmid_list, vmremote, range,
mmid_start/100, (mmid_end-mmid_start)/100+1, be);
}
/* device tree based parser */
static int hab_parse_dt(struct local_vmid *settings)
{
int result, i;
struct device_node *hab_node = NULL;
struct device_node *mmid_grp_node = NULL;
const char *role = NULL;
int tmp = -1, vmids_num;
u32 vmids[16];
int32_t grp_start_id, be;
/* parse device tree*/
pr_debug("parsing hab node in device tree...\n");
hab_node = of_find_compatible_node(NULL, NULL, "qcom,hab");
if (!hab_node) {
pr_err("no hab device tree node\n");
return -ENODEV;
}
/* read the local vmid of this VM, like 0 for host, 1 for AGL GVM */
result = of_property_read_u32(hab_node, "vmid", &tmp);
if (result) {
pr_err("failed to read local vmid, result = %d\n", result);
return result;
}
pr_debug("local vmid = %d\n", tmp);
settings->self = tmp;
for_each_child_of_node(hab_node, mmid_grp_node) {
/* read the group starting id */
result = of_property_read_u32(mmid_grp_node,
"grp-start-id", &tmp);
if (result) {
pr_err("failed to read grp-start-id, result = %d\n",
result);
return result;
}
pr_debug("grp-start-id = %d\n", tmp);
grp_start_id = tmp;
/* read the role(fe/be) of these pchans in this mmid group */
result = of_property_read_string(mmid_grp_node, "role", &role);
if (result) {
pr_err("failed to get role, result = %d\n", result);
return result;
}
pr_debug("local role of this mmid group is %s\n", role);
if (!strcmp(role, "be"))
be = 1;
else
be = 0;
/* read the remote vmids for these pchans in this mmid group */
vmids_num = of_property_count_elems_of_size(mmid_grp_node,
"remote-vmids", sizeof(u32));
result = of_property_read_u32_array(mmid_grp_node,
"remote-vmids", vmids, vmids_num);
if (result) {
pr_err("failed to read remote-vmids, result = %d\n",
result);
return result;
}
for (i = 0; i < vmids_num; i++) {
pr_debug("vmids_num = %d, vmids[%d] = %d\n",
vmids_num, i, vmids[i]);
result = fill_vmid_mmid_tbl(
settings->vmid_mmid_list,
vmids[i], 1,
grp_start_id/100, 1, be);
if (result) {
pr_err("fill_vmid_mmid_tbl failed\n");
return result;
}
}
}
dump_settings(settings);
return 0;
}
/*
* 0: successful
* negative: various failure core
*/
int hab_parse(struct local_vmid *settings)
{
int ret;
ret = hab_parse_dt(settings);
return ret;
}

View File

@ -0,0 +1,100 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
struct physical_channel *
hab_pchan_alloc(struct hab_device *habdev, int otherend_id)
{
struct physical_channel *pchan = kzalloc(sizeof(*pchan), GFP_KERNEL);
if (!pchan)
return NULL;
idr_init(&pchan->vchan_idr);
spin_lock_init(&pchan->vid_lock);
idr_init(&pchan->expid_idr);
spin_lock_init(&pchan->expid_lock);
kref_init(&pchan->refcount);
pchan->habdev = habdev;
pchan->dom_id = otherend_id;
pchan->closed = 1;
pchan->hyp_data = NULL;
INIT_LIST_HEAD(&pchan->vchannels);
rwlock_init(&pchan->vchans_lock);
spin_lock_init(&pchan->rxbuf_lock);
spin_lock_bh(&habdev->pchan_lock);
list_add_tail(&pchan->node, &habdev->pchannels);
habdev->pchan_cnt++;
spin_unlock_bh(&habdev->pchan_lock);
return pchan;
}
static void hab_pchan_free(struct kref *ref)
{
struct physical_channel *pchan =
container_of(ref, struct physical_channel, refcount);
struct virtual_channel *vchan;
pr_debug("pchan %s refcnt %d\n", pchan->name,
get_refcnt(pchan->refcount));
spin_lock_bh(&pchan->habdev->pchan_lock);
list_del(&pchan->node);
pchan->habdev->pchan_cnt--;
spin_unlock_bh(&pchan->habdev->pchan_lock);
/* check vchan leaking */
read_lock(&pchan->vchans_lock);
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
/* no logging on the owner. it might have been gone */
pr_warn("leaking vchan id %X remote %X refcnt %d\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount));
}
read_unlock(&pchan->vchans_lock);
kfree(pchan->hyp_data);
kfree(pchan);
}
struct physical_channel *
hab_pchan_find_domid(struct hab_device *dev, int dom_id)
{
struct physical_channel *pchan;
spin_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node)
if (pchan->dom_id == dom_id || dom_id == HABCFG_VMID_DONT_CARE)
break;
if (pchan->dom_id != dom_id && dom_id != HABCFG_VMID_DONT_CARE) {
pr_err("dom_id mismatch requested %d, existing %d\n",
dom_id, pchan->dom_id);
pchan = NULL;
}
if (pchan && !kref_get_unless_zero(&pchan->refcount))
pchan = NULL;
spin_unlock_bh(&dev->pchan_lock);
return pchan;
}
void hab_pchan_get(struct physical_channel *pchan)
{
if (pchan)
kref_get(&pchan->refcount);
}
void hab_pchan_put(struct physical_channel *pchan)
{
if (pchan)
kref_put(&pchan->refcount, hab_pchan_free);
}

View File

@ -0,0 +1,193 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_pipe.h"
size_t hab_pipe_calc_required_bytes(uint32_t shared_buf_size)
{
return sizeof(struct hab_pipe)
+ (2 * (sizeof(struct hab_shared_buf) + shared_buf_size));
}
struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe,
uint32_t shared_buf_size, int top)
{
struct hab_pipe_endpoint *ep = NULL;
struct hab_shared_buf *buf_a;
struct hab_shared_buf *buf_b;
/* debug only */
struct dbg_items *its = kzalloc(sizeof(struct dbg_items), GFP_KERNEL);
if (!pipe)
return NULL;
buf_a = (struct hab_shared_buf *) pipe->buf_base;
buf_b = (struct hab_shared_buf *) (pipe->buf_base
+ sizeof(struct hab_shared_buf) + shared_buf_size);
if (top) {
ep = &pipe->top;
memset(ep, 0, sizeof(*ep));
ep->tx_info.sh_buf = buf_a;
ep->rx_info.sh_buf = buf_b;
} else {
ep = &pipe->bottom;
memset(ep, 0, sizeof(*ep));
ep->tx_info.sh_buf = buf_b;
ep->rx_info.sh_buf = buf_a;
memset(ep->tx_info.sh_buf, 0, sizeof(struct hab_shared_buf));
memset(ep->rx_info.sh_buf, 0, sizeof(struct hab_shared_buf));
ep->tx_info.sh_buf->size = shared_buf_size;
ep->rx_info.sh_buf->size = shared_buf_size;
pipe->buf_a = buf_a;
pipe->buf_b = buf_b;
pipe->total_size =
hab_pipe_calc_required_bytes(shared_buf_size);
}
pipe->buf_a = (struct hab_shared_buf *)its;
return ep;
}
uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep,
unsigned char *p, uint32_t num_bytes)
{
struct hab_shared_buf *sh_buf = ep->tx_info.sh_buf;
uint32_t space =
(sh_buf->size - (ep->tx_info.wr_count - sh_buf->rd_count));
uint32_t count1, count2;
if (!p || num_bytes > space || num_bytes == 0) {
pr_err("****can not write to pipe p %pK to-write %d space available %d\n",
p, num_bytes, space);
return 0;
}
asm volatile("dmb ish" ::: "memory");
count1 = (num_bytes <= (sh_buf->size - ep->tx_info.index)) ? num_bytes :
(sh_buf->size - ep->tx_info.index);
count2 = num_bytes - count1;
if (count1 > 0) {
memcpy((void *)&sh_buf->data[ep->tx_info.index], p, count1);
ep->tx_info.wr_count += count1;
ep->tx_info.index += count1;
if (ep->tx_info.index >= sh_buf->size)
ep->tx_info.index = 0;
}
if (count2 > 0) {/* handle buffer wrapping */
memcpy((void *)&sh_buf->data[ep->tx_info.index],
p + count1, count2);
ep->tx_info.wr_count += count2;
ep->tx_info.index += count2;
if (ep->tx_info.index >= sh_buf->size)
ep->tx_info.index = 0;
}
return num_bytes;
}
/* Updates the write index which is shared with the other VM */
void hab_pipe_write_commit(struct hab_pipe_endpoint *ep)
{
struct hab_shared_buf *sh_buf = ep->tx_info.sh_buf;
/* Must commit data before incrementing count */
asm volatile("dmb ishst" ::: "memory");
sh_buf->wr_count = ep->tx_info.wr_count;
}
#define HAB_HEAD_CLEAR 0xCC
uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep,
unsigned char *p, uint32_t size, uint32_t clear)
{
struct hab_shared_buf *sh_buf = ep->rx_info.sh_buf;
/* mb to guarantee wr_count is updated after contents are written */
uint32_t avail = sh_buf->wr_count - sh_buf->rd_count;
uint32_t count1, count2, to_read;
uint32_t index_saved = ep->rx_info.index; /* store original for retry */
if (!p || avail == 0 || size == 0)
return 0;
asm volatile("dmb ishld" ::: "memory");
/* error if available is less than size and available is not zero */
to_read = (avail < size) ? avail : size;
if (to_read < size) /* only provide exact read size, not less */
pr_err("less data available %d than requested %d\n",
avail, size);
count1 = (to_read <= (sh_buf->size - ep->rx_info.index)) ? to_read :
(sh_buf->size - ep->rx_info.index);
count2 = to_read - count1;
if (count1 > 0) {
memcpy(p, (void *)&sh_buf->data[ep->rx_info.index], count1);
ep->rx_info.index += count1;
if (ep->rx_info.index >= sh_buf->size)
ep->rx_info.index = 0;
}
if (count2 > 0) { /* handle buffer wrapping */
memcpy(p + count1, (void *)&sh_buf->data[ep->rx_info.index],
count2);
ep->rx_info.index += count2;
}
if (count1 + count2) {
struct hab_header *head = (struct hab_header *)p;
int retry_cnt = 0;
if (clear && (size == sizeof(*head))) {
retry:
if (unlikely(head->signature != 0xBEE1BEE1)) {
pr_err("hab head corruption detected at %pK buf %pK %08X %08X %08X %08X rd %d wr %d index %X saved %X retry %d\n",
head, &sh_buf->data[0],
head->id_type_size, head->session_id,
head->signature, head->sequence,
sh_buf->rd_count, sh_buf->wr_count,
ep->rx_info.index, index_saved,
retry_cnt);
if (retry_cnt++ <= 1000) {
memcpy(p, &sh_buf->data[index_saved],
count1);
if (count2)
memcpy(&p[count1],
&sh_buf->data[ep->rx_info.index - count2],
count2);
goto retry;
} else
pr_err("quit retry after %d time may fail %X %X %X %X rd %d wr %d index %X\n",
retry_cnt, head->id_type_size,
head->session_id,
head->signature,
head->sequence,
sh_buf->rd_count,
sh_buf->wr_count,
ep->rx_info.index);
}
}
/*Must commit data before incremeting count*/
asm volatile("dmb ish" ::: "memory");
sh_buf->rd_count += count1 + count2;
}
return to_read;
}
void hab_pipe_rxinfo(struct hab_pipe_endpoint *ep, uint32_t *rd_cnt,
uint32_t *wr_cnt, uint32_t *idx)
{
struct hab_shared_buf *sh_buf = ep->rx_info.sh_buf;
*idx = ep->rx_info.index;
*rd_cnt = sh_buf->rd_count;
*wr_cnt = sh_buf->wr_count;
}

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef HAB_PIPE_H
#define HAB_PIPE_H
struct hab_shared_buf {
uint32_t rd_count; /* volatile cannot be used here */
uint32_t wr_count; /* volatile cannot be used here */
uint32_t size;
unsigned char data[]; /* volatile cannot be used here */
};
/* debug only */
struct dbg_item {
uint32_t rd_cnt;
uint32_t wr_cnt;
void *va; /* local for read or write */
uint32_t index; /* local */
uint32_t sz; /* size in */
uint32_t ret; /* actual bytes read */
};
#define DBG_ITEM_SIZE 20
struct dbg_items {
struct dbg_item it[DBG_ITEM_SIZE];
int idx;
};
struct hab_pipe_endpoint {
struct {
uint32_t wr_count;
uint32_t index;
struct hab_shared_buf *sh_buf;
} tx_info;
struct {
uint32_t index;
struct hab_shared_buf *sh_buf;
} rx_info;
};
struct hab_pipe {
struct hab_pipe_endpoint top;
struct hab_pipe_endpoint bottom;
/* For debugging only */
struct hab_shared_buf *buf_a; /* top TX, bottom RX */
struct hab_shared_buf *buf_b; /* top RX, bottom TX */
size_t total_size;
unsigned char buf_base[];
};
size_t hab_pipe_calc_required_bytes(uint32_t shared_buf_size);
struct hab_pipe_endpoint *hab_pipe_init(struct hab_pipe *pipe,
uint32_t shared_buf_size, int top);
uint32_t hab_pipe_write(struct hab_pipe_endpoint *ep,
unsigned char *p, uint32_t num_bytes);
void hab_pipe_write_commit(struct hab_pipe_endpoint *ep);
uint32_t hab_pipe_read(struct hab_pipe_endpoint *ep,
unsigned char *p, uint32_t size, uint32_t clear);
/* debug only */
void hab_pipe_rxinfo(struct hab_pipe_endpoint *ep, uint32_t *rd_cnt,
uint32_t *wr_cnt, uint32_t *idx);
#endif /* HAB_PIPE_H */

View File

@ -0,0 +1,242 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_qvm.h"
/*
* this is for platform does not provide probe features. the size should match
* hab device side (all mmids)
*/
static struct shmem_irq_config pchan_factory_settings[] = {
{0x1b000000, 7},
{0x1b001000, 8},
{0x1b002000, 9},
{0x1b003000, 10},
{0x1b004000, 11},
{0x1b005000, 12},
{0x1b006000, 13},
{0x1b007000, 14},
{0x1b008000, 15},
{0x1b009000, 16},
{0x1b00a000, 17},
{0x1b00b000, 18},
{0x1b00c000, 19},
{0x1b00d000, 20},
{0x1b00e000, 21},
{0x1b00f000, 22},
{0x1b010000, 23},
{0x1b011000, 24},
{0x1b012000, 25},
{0x1b013000, 26},
{0x1b014000, 27},
{0x1b015000, 28},
{0x1b016000, 29},
{0x1b017000, 30},
};
struct qvm_plugin_info qvm_priv_info = {
pchan_factory_settings,
ARRAY_SIZE(pchan_factory_settings),
0,
ARRAY_SIZE(pchan_factory_settings)
};
/*
* this is common but only for guest
*/
uint64_t get_guest_ctrl_paddr(struct qvm_channel *dev,
unsigned long factory_addr, int irq, const char *name, uint32_t pages)
{
int i;
unsigned long factory_va;
pr_debug("name = %s, factory paddr = 0x%lx, irq %d, pages %d\n",
name, factory_addr, irq, pages);
/* get guest factory's va */
factory_va = hab_shmem_factory_va(factory_addr);
dev->guest_factory = (struct guest_shm_factory *)factory_va;
if (dev->guest_factory->signature != GUEST_SHM_SIGNATURE) {
pr_err("signature error: %ld != %llu, factory addr %lx\n",
GUEST_SHM_SIGNATURE, dev->guest_factory->signature,
factory_addr);
iounmap(dev->guest_factory);
return 0;
}
dev->guest_intr = dev->guest_factory->vector;
/*
* Set the name field on the factory page to identify the shared memory
* region
*/
for (i = 0; i < strlen(name) && i < GUEST_SHM_MAX_NAME - 1; i++)
dev->guest_factory->name[i] = name[i];
dev->guest_factory->name[i] = (char) 0;
guest_shm_create(dev->guest_factory, pages);
/* See if we successfully created/attached to the region. */
if (dev->guest_factory->status != GSS_OK) {
pr_err("create failed: %d\n", dev->guest_factory->status);
iounmap(dev->guest_factory);
return 0;
}
pr_debug("shm creation size %x, paddr=%llx, vector %d, dev %pK\n",
dev->guest_factory->size,
dev->guest_factory->shmem,
dev->guest_intr,
dev);
dev->factory_addr = factory_addr;
dev->irq = irq;
return dev->guest_factory->shmem;
}
void hab_pipe_reset(struct physical_channel *pchan)
{
struct hab_pipe_endpoint *pipe_ep;
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
pchan->is_be ? 0 : 1);
if (dev->pipe_ep != pipe_ep)
pr_warn("The pipe endpoint must not change\n");
}
/*
* allocate hypervisor plug-in specific resource for pchan, and call hab pchan
* alloc common function. hab driver struct is directly accessed.
* commdev: pointer to store the pchan address
* id: index to hab_device (mmids)
* is_be: pchan local endpoint role
* name: pchan name
* return: status 0: success, otherwise: failures
*/
int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
int vmid_remote, struct hab_device *mmid_device)
{
struct qvm_channel *dev = NULL;
struct qvm_channel_os *dev_os = NULL;
struct physical_channel **pchan = (struct physical_channel **)commdev;
int ret = 0;
char *shmdata;
uint32_t pipe_alloc_size =
hab_pipe_calc_required_bytes(PIPE_SHMEM_SIZE);
uint32_t pipe_alloc_pages =
(pipe_alloc_size + PAGE_SIZE - 1) / PAGE_SIZE;
pr_debug("%s: pipe_alloc_size is %d\n", __func__, pipe_alloc_size);
/* allocate common part for the commdev */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err;
}
/* allocate the os-specific data for the commdev */
dev_os = kzalloc(sizeof(*dev_os), GFP_KERNEL);
if (!dev_os) {
ret = -ENOMEM;
goto err;
}
dev->os_data = dev_os;
spin_lock_init(&dev->io_lock);
/*
* create/attach to the shmem region, and get back the
* shmem data vaddr
*/
shmdata = hab_shmem_attach(dev, name, pipe_alloc_pages);
if (IS_ERR(shmdata)) {
ret = PTR_ERR(shmdata);
goto err;
}
dev->pipe = (struct hab_pipe *)shmdata;
pr_debug("\"%s\": pipesize %d, addr 0x%pK, be %d\n", name,
pipe_alloc_size, dev->pipe, is_be);
dev->pipe_ep = hab_pipe_init(dev->pipe, PIPE_SHMEM_SIZE,
is_be ? 0 : 1);
/* newly created pchan is added to mmid device list */
*pchan = hab_pchan_alloc(mmid_device, vmid_remote);
if (!(*pchan)) {
ret = -ENOMEM;
goto err;
}
(*pchan)->closed = 0;
(*pchan)->hyp_data = (void *)dev;
strlcpy((*pchan)->name, name, MAX_VMID_NAME_SIZE);
(*pchan)->is_be = is_be;
ret = habhyp_commdev_create_dispatcher(*pchan);
if (ret < 0)
goto err;
return ret;
err:
pr_err("%s failed\n", __func__);
if (*commdev)
habhyp_commdev_dealloc(*commdev);
return ret;
}
int habhyp_commdev_dealloc(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
/* os specific deallocation for this commdev */
habhyp_commdev_dealloc_os(commdev);
if (get_refcnt(pchan->refcount) > 1) {
pr_warn("potential leak pchan %s vchans %d refcnt %d\n",
pchan->name, pchan->vcnt,
get_refcnt(pchan->refcount));
}
kfree(dev->os_data);
kfree(dev);
if (pchan)
hab_pchan_put(pchan);
return 0;
}
int hab_hypervisor_register(void)
{
int ret = 0;
/* os-specific registration work */
ret = hab_hypervisor_register_os();
if (ret)
goto done;
pr_info("initializing for %s VM\n", hab_driver.b_server_dom ?
"host" : "guest");
hab_driver.hyp_priv = &qvm_priv_info;
done:
return ret;
}
void hab_hypervisor_unregister(void)
{
pr_info("unregistration is called, but do nothing\n");
}

View File

@ -0,0 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_QNX_H
#define __HAB_QNX_H
#include "hab.h"
#include "hab_pipe.h"
#include "hab_qvm_os.h"
struct qvm_channel {
int be;
struct hab_pipe *pipe;
struct hab_pipe_endpoint *pipe_ep;
spinlock_t io_lock;
/* common but only for guest */
struct guest_shm_factory *guest_factory;
struct guest_shm_control *guest_ctrl;
/* cached guest ctrl idx value to prevent trap when accessed */
uint32_t idx;
/* Guest VM */
unsigned int guest_intr;
unsigned int guest_iid;
unsigned int factory_addr;
unsigned int irq;
/* os-specific part */
struct qvm_channel_os *os_data;
/* debug only */
struct workqueue_struct *wq;
struct work_data {
struct work_struct work;
int data; /* free to modify */
} wdata;
char *side_buf; /* to store the contents from hab-pipe */
};
/* This is common but only for guest in HQX */
struct shmem_irq_config {
unsigned long factory_addr; /* from gvm settings when provided */
int irq; /* from gvm settings when provided */
};
struct qvm_plugin_info {
struct shmem_irq_config *pchan_settings;
int setting_size;
int curr;
int probe_cnt;
};
extern struct qvm_plugin_info qvm_priv_info;
/* Shared mem size in each direction for communication pipe */
#define PIPE_SHMEM_SIZE (128 * 1024)
void hab_pipe_reset(struct physical_channel *pchan);
void habhyp_notify(void *commdev);
unsigned long hab_shmem_factory_va(unsigned long factory_addr);
char *hab_shmem_attach(struct qvm_channel *dev, const char *name,
uint32_t pages);
uint64_t get_guest_ctrl_paddr(struct qvm_channel *dev,
unsigned long factory_addr, int irq, const char *name, uint32_t pages);
#endif /* __HAB_QNX_H */

View File

@ -0,0 +1,271 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_qvm.h"
#include <linux/highmem.h>
#include <linux/string.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
int hab_hypervisor_register_os(void)
{
hab_driver.b_server_dom = 0;
return 0;
}
void habhyp_commdev_dealloc_os(void *commdev)
{
struct physical_channel *pchan = (struct physical_channel *)commdev;
struct qvm_channel *dev = pchan->hyp_data;
dev->guest_ctrl->detach = 0;
}
static irqreturn_t shm_irq_handler(int irq, void *_pchan)
{
irqreturn_t rc = IRQ_NONE;
struct physical_channel *pchan = (struct physical_channel *) _pchan;
struct qvm_channel *dev =
(struct qvm_channel *) (pchan ? pchan->hyp_data : NULL);
if (dev && dev->guest_ctrl) {
int status = dev->guest_ctrl->status;
if (status & 0xffff) {/*source bitmask indicator*/
rc = IRQ_HANDLED;
tasklet_hi_schedule(&dev->os_data->task);
}
}
return rc;
}
/* debug only */
static void work_func(struct work_struct *work)
{
struct work_data *wdata = (struct work_data *) work;
dump_hab();
wdata->data = 1; /* done, now unblock tasklet */
}
int habhyp_commdev_create_dispatcher(struct physical_channel *pchan)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int ret;
tasklet_init(&dev->os_data->task, physical_channel_rx_dispatch,
(unsigned long) pchan);
/* debug */
dev->wq = create_workqueue("wq_dump");
INIT_WORK(&dev->wdata.work, work_func);
dev->wdata.data = 0; /* let the caller wait */
dev->side_buf = kzalloc(PIPE_SHMEM_SIZE, GFP_KERNEL);
pr_debug("request_irq: irq = %d, pchan name = %s\n",
dev->irq, pchan->name);
ret = request_irq(dev->irq, shm_irq_handler, IRQF_SHARED |
IRQF_NO_SUSPEND, pchan->name, pchan);
if (ret)
pr_err("request_irq for %s failed: %d\n",
pchan->name, ret);
return ret;
}
/* Debug: critical section? */
void hab_pipe_read_dump(struct physical_channel *pchan)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
char str[250];
int i;
struct dbg_items *its = (struct dbg_items *)dev->pipe->buf_a;
snprintf(str, sizeof(str),
"index 0x%X rd_cnt %d wr_cnt %d size %d data_addr %lX",
dev->pipe_ep->rx_info.index,
dev->pipe_ep->rx_info.sh_buf->rd_count,
dev->pipe_ep->rx_info.sh_buf->wr_count,
dev->pipe_ep->rx_info.sh_buf->size,
&dev->pipe_ep->rx_info.sh_buf->data[0]);
dump_hab_buf(str, strlen(str)+1);
/* trace history buffer dump */
snprintf(str, sizeof(str), "dbg hist buffer index %d\n", its->idx);
dump_hab_buf(str, strlen(str)+1);
for (i = 0; i < DBG_ITEM_SIZE; i++) {
struct dbg_item *it = &its->it[i];
snprintf(str, sizeof(str),
"it %d: rd %d wr %d va %lX index 0x%X size %d ret %d\n",
i, it->rd_cnt, it->wr_cnt, it->va, it->index, it->sz, it->ret);
dump_hab_buf(str, strlen(str)+1);
}
/* !!!! to end the readable string */
str[0] = str[1] = str[2] = str[3] = 33;
dump_hab_buf(str, 4); /* separator */
dump_hab_buf((void *)dev->pipe_ep->rx_info.sh_buf->data,
dev->pipe_ep->rx_info.sh_buf->size);
str[0] = str[1] = str[2] = str[3] = str[4] = str[5] = str[6] =
str[7] = 33; /* !!!! to end the readable string */
dump_hab_buf(str, 16); /* separator */
dump_hab_buf(dev->side_buf, dev->pipe_ep->rx_info.sh_buf->size);
}
void dump_hab_wq(void *hyp_data)
{
struct qvm_channel *dev = (struct qvm_channel *)hyp_data;
queue_work(dev->wq, &dev->wdata.work);
dev->wdata.data = 0; /* reset it back */
}
/* The input is already va now */
inline unsigned long hab_shmem_factory_va(unsigned long factory_addr)
{
return factory_addr;
}
/* to get the shmem data region virtual address */
char *hab_shmem_attach(struct qvm_channel *dev, const char *name,
uint32_t pipe_alloc_pages)
{
struct qvm_plugin_info *qvm_priv = hab_driver.hyp_priv;
uint64_t paddr;
char *shmdata;
int ret = 0;
/* no more vdev-shmem for more pchan considering the 1:1 rule */
if (qvm_priv->curr >= qvm_priv->probe_cnt) {
pr_err("pchan guest factory setting %d overflow probe cnt %d\n",
qvm_priv->curr, qvm_priv->probe_cnt);
ret = -1;
goto err;
}
paddr = get_guest_ctrl_paddr(dev,
qvm_priv->pchan_settings[qvm_priv->curr].factory_addr,
qvm_priv->pchan_settings[qvm_priv->curr].irq,
name,
pipe_alloc_pages);
dev->guest_ctrl = memremap(paddr,
(dev->guest_factory->size + 1) * PAGE_SIZE, MEMREMAP_WB);
/* page size should be 4KB */
if (!dev->guest_ctrl) {
ret = -ENOMEM;
goto err;
}
shmdata = (char *)dev->guest_ctrl + PAGE_SIZE;
pr_debug("ctrl page 0x%llx mapped at 0x%pK, idx %d\n",
paddr, dev->guest_ctrl, dev->guest_ctrl->idx);
pr_debug("data buffer mapped at 0x%pK\n", shmdata);
dev->idx = dev->guest_ctrl->idx;
qvm_priv->curr++;
return shmdata;
err:
return ERR_PTR(ret);
}
/* this happens before hypervisor register */
static int hab_shmem_probe(struct platform_device *pdev)
{
int irq = 0;
struct resource *mem;
void __iomem *shmem_base = NULL;
int ret = 0;
/* hab in one GVM will not have pchans more than one VM could allowed */
if (qvm_priv_info.probe_cnt >= hab_driver.ndevices) {
pr_err("no more channel, current %d, maximum %d\n",
qvm_priv_info.probe_cnt, hab_driver.ndevices);
return -ENODEV;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
pr_err("no interrupt for the channel %d, error %d\n",
qvm_priv_info.probe_cnt, irq);
return irq;
}
qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].irq = irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
pr_err("can not get io mem resource for channel %d\n",
qvm_priv_info.probe_cnt);
return -EINVAL;
}
shmem_base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(shmem_base)) {
pr_err("ioremap failed for channel %d, mem %pK\n",
qvm_priv_info.probe_cnt, mem);
return -EINVAL;
}
qvm_priv_info.pchan_settings[qvm_priv_info.probe_cnt].factory_addr
= (unsigned long)((uintptr_t)shmem_base);
pr_debug("pchan idx %d, hab irq=%d shmem_base=%pK, mem %pK\n",
qvm_priv_info.probe_cnt, irq, shmem_base, mem);
qvm_priv_info.probe_cnt++;
return ret;
}
static int hab_shmem_remove(struct platform_device *pdev)
{
return 0;
}
static void hab_shmem_shutdown(struct platform_device *pdev)
{
}
static const struct of_device_id hab_shmem_match_table[] = {
{.compatible = "qvm,guest_shm"},
{},
};
static struct platform_driver hab_shmem_driver = {
.probe = hab_shmem_probe,
.remove = hab_shmem_remove,
.shutdown = hab_shmem_shutdown,
.driver = {
.name = "hab_shmem",
.of_match_table = of_match_ptr(hab_shmem_match_table),
},
};
static int __init hab_shmem_init(void)
{
qvm_priv_info.probe_cnt = 0;
return platform_driver_register(&hab_shmem_driver);
}
static void __exit hab_shmem_exit(void)
{
platform_driver_unregister(&hab_shmem_driver);
qvm_priv_info.probe_cnt = 0;
}
core_initcall(hab_shmem_init);
module_exit(hab_shmem_exit);
MODULE_DESCRIPTION("Hypervisor shared memory driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#ifndef __HAB_QVM_OS_H
#define __HAB_QVM_OS_H
#include <linux/guest_shm.h>
#include <linux/stddef.h>
struct qvm_channel_os {
struct tasklet_struct task;
};
#endif /*__HAB_QVM_OS_H*/

View File

@ -0,0 +1,254 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_grantable.h"
#define MAX_LINE_SIZE 128
int hab_stat_init(struct hab_driver *driver)
{
return hab_stat_init_sub(driver);
}
int hab_stat_deinit(struct hab_driver *driver)
{
return hab_stat_deinit_sub(driver);
}
/*
* If all goes well the return value is the formated print and concatenated
* original dest string.
*/
static int hab_stat_buffer_print(char *dest,
int dest_size, const char *fmt, ...)
{
va_list args;
char line[MAX_LINE_SIZE];
int ret;
va_start(args, fmt);
ret = vsnprintf(line, sizeof(line), fmt, args);
va_end(args);
if (ret > 0)
ret = strlcat(dest, line, dest_size);
return ret;
}
int hab_stat_show_vchan(struct hab_driver *driver,
char *buf, int size)
{
int i, ret = 0;
ret = strlcpy(buf, "", size);
for (i = 0; i < driver->ndevices; i++) {
struct hab_device *dev = &driver->devp[i];
struct physical_channel *pchan;
struct virtual_channel *vc;
spin_lock_bh(&dev->pchan_lock);
list_for_each_entry(pchan, &dev->pchannels, node) {
if (!pchan->vcnt)
continue;
ret = hab_stat_buffer_print(buf, size,
"nm %s r %d lc %d rm %d sq_t %d sq_r %d st 0x%x vn %d:\n",
pchan->name, pchan->is_be, pchan->vmid_local,
pchan->vmid_remote, pchan->sequence_tx,
pchan->sequence_rx, pchan->status, pchan->vcnt);
read_lock(&pchan->vchans_lock);
list_for_each_entry(vc, &pchan->vchannels, pnode) {
ret = hab_stat_buffer_print(buf, size,
"%08X(%d:%d) ", vc->id,
get_refcnt(vc->refcount),
vc->otherend_closed);
}
ret = hab_stat_buffer_print(buf, size, "\n");
read_unlock(&pchan->vchans_lock);
}
spin_unlock_bh(&dev->pchan_lock);
}
return ret;
}
int hab_stat_show_ctx(struct hab_driver *driver,
char *buf, int size)
{
int ret = 0;
struct uhab_context *ctx;
ret = strlcpy(buf, "", size);
spin_lock_bh(&hab_driver.drvlock);
ret = hab_stat_buffer_print(buf, size,
"Total contexts %d\n",
driver->ctx_cnt);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
ret = hab_stat_buffer_print(buf, size,
"ctx %d K %d close %d vc %d exp %d imp %d open %d\n",
ctx->owner, ctx->kernel, ctx->closing,
ctx->vcnt, ctx->export_total,
ctx->import_total, ctx->pending_cnt);
}
spin_unlock_bh(&hab_driver.drvlock);
return ret;
}
static int get_pft_tbl_total_size(struct compressed_pfns *pfn_table)
{
int i, total_size = 0;
for (i = 0; i < pfn_table->nregions; i++)
total_size += pfn_table->region[i].size * PAGE_SIZE;
return total_size;
}
static int print_ctx_total_expimp(struct uhab_context *ctx,
char *buf, int size)
{
struct compressed_pfns *pfn_table = NULL;
int exp_total = 0, imp_total = 0;
int exp_cnt = 0, imp_cnt = 0;
struct export_desc *exp = NULL;
int exim_size = 0;
read_lock(&ctx->exp_lock);
hab_stat_buffer_print(buf, size, "export[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->exp_whse, node) {
pfn_table = (struct compressed_pfns *)exp->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
exp_total += exim_size;
exp_cnt++;
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
}
hab_stat_buffer_print(buf, size, "\n");
read_unlock(&ctx->exp_lock);
spin_lock_bh(&ctx->imp_lock);
hab_stat_buffer_print(buf, size, "import[expid:vcid:size]: ");
list_for_each_entry(exp, &ctx->imp_whse, node) {
if (habmm_imp_hyp_map_check(ctx->import_ctx, exp)) {
pfn_table = (struct compressed_pfns *)exp->payload;
exim_size = get_pft_tbl_total_size(pfn_table);
imp_total += exim_size;
imp_cnt++;
hab_stat_buffer_print(buf, size,
"[%d:%x:%d] ", exp->export_id,
exp->vcid_local, exim_size);
}
}
hab_stat_buffer_print(buf, size, "\n");
spin_unlock_bh(&ctx->imp_lock);
if (exp_cnt || exp_total || imp_cnt || imp_total)
return hab_stat_buffer_print(buf, size,
"ctx %d exp %d size %d imp %d size %d\n",
ctx->owner, exp_cnt, exp_total,
imp_cnt, imp_total);
else
return 0;
}
int hab_stat_show_expimp(struct hab_driver *driver,
int pid, char *buf, int size)
{
struct uhab_context *ctx;
int ret;
ret = strlcpy(buf, "", size);
spin_lock_bh(&hab_driver.drvlock);
list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
if (pid == ctx->owner)
ret = print_ctx_total_expimp(ctx, buf, size);
}
spin_unlock_bh(&hab_driver.drvlock);
return ret;
}
#define HAB_PIPE_DUMP_FILE_NAME "/sdcard/habpipe-"
#define HAB_PIPE_DUMP_FILE_EXT ".dat"
#define HAB_PIPEDUMP_SIZE (768*1024)
static char *filp;
static int pipedump_idx;
int dump_hab_open(void)
{
int rc = 0;
char file_path[256];
char file_time[100];
rc = dump_hab_get_file_name(file_time, sizeof(file_time));
strlcpy(file_path, HAB_PIPE_DUMP_FILE_NAME, sizeof(file_path));
strlcat(file_path, file_time, sizeof(file_path));
strlcat(file_path, HAB_PIPE_DUMP_FILE_EXT, sizeof(file_path));
filp = vmalloc(HAB_PIPEDUMP_SIZE);
if (IS_ERR(filp)) {
rc = PTR_ERR(filp);
pr_err("failed to create pipe dump buffer rc %d\n", rc);
filp = NULL;
} else {
pr_info("hab pipe dump buffer opened %s\n", file_path);
pipedump_idx = 0;
dump_hab_buf(file_path, strlen(file_path)); /* id first */
}
return rc;
}
void dump_hab_close(void)
{
pr_info("pipe dump content size %d completed\n", pipedump_idx);
/* transfer buffer ownership to devcoredump */
filp = NULL;
pipedump_idx = 0;
}
int dump_hab_buf(void *buf, int size)
{
if (!buf || !size || size > HAB_PIPEDUMP_SIZE - pipedump_idx) {
pr_err("wrong parameters buf %pK size %d allowed %d\n",
buf, size, HAB_PIPEDUMP_SIZE - pipedump_idx);
return 0;
}
memcpy(&filp[pipedump_idx], buf, size);
pipedump_idx += size;
return size;
}
void dump_hab(void)
{
struct physical_channel *pchan = NULL;
int i = 0;
char str[8] = {35, 35, 35, 35, 35, 35, 35, 35}; /* ## */
dump_hab_open();
for (i = 0; i < hab_driver.ndevices; i++) {
struct hab_device *habdev = &hab_driver.devp[i];
/* only care gfx and mis */
if (habdev->id == MM_GFX || habdev->id == MM_MISC) {
list_for_each_entry(pchan, &habdev->pchannels, node) {
if (pchan->vcnt > 0) {
pr_info("***** dump pchan %s vcnt %d *****\n",
pchan->name, pchan->vcnt);
hab_pipe_read_dump(pchan);
}
}
dump_hab_buf(str, 8); /* separator */
}
}
dev_coredumpv(hab_driver.dev, filp, pipedump_idx, GFP_KERNEL);
dump_hab_close();
}

View File

@ -0,0 +1,299 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
struct virtual_channel *
hab_vchan_alloc(struct uhab_context *ctx, struct physical_channel *pchan,
int openid)
{
int id;
struct virtual_channel *vchan;
if (!pchan || !ctx)
return NULL;
vchan = kzalloc(sizeof(*vchan), GFP_KERNEL);
if (!vchan)
return NULL;
/* This should be the first thing we do in this function */
idr_preload(GFP_KERNEL);
spin_lock_bh(&pchan->vid_lock);
id = idr_alloc(&pchan->vchan_idr, vchan, 1,
(HAB_VCID_ID_MASK >> HAB_VCID_ID_SHIFT) + 1, GFP_NOWAIT);
spin_unlock_bh(&pchan->vid_lock);
idr_preload_end();
if (id <= 0) {
pr_err("idr failed %d\n", id);
kfree(vchan);
return NULL;
}
mb(); /* id must be generated done before pchan_get */
hab_pchan_get(pchan);
vchan->pchan = pchan;
/* vchan need both vcid and openid to be properly located */
vchan->session_id = openid;
write_lock(&pchan->vchans_lock);
list_add_tail(&vchan->pnode, &pchan->vchannels);
pchan->vcnt++;
write_unlock(&pchan->vchans_lock);
vchan->id = ((id << HAB_VCID_ID_SHIFT) & HAB_VCID_ID_MASK) |
((pchan->habdev->id << HAB_VCID_MMID_SHIFT) &
HAB_VCID_MMID_MASK) |
((pchan->dom_id << HAB_VCID_DOMID_SHIFT) &
HAB_VCID_DOMID_MASK);
spin_lock_init(&vchan->rx_lock);
INIT_LIST_HEAD(&vchan->rx_list);
init_waitqueue_head(&vchan->rx_queue);
kref_init(&vchan->refcount);
vchan->otherend_closed = pchan->closed;
hab_ctx_get(ctx);
vchan->ctx = ctx;
return vchan;
}
static void
hab_vchan_free(struct kref *ref)
{
struct virtual_channel *vchan =
container_of(ref, struct virtual_channel, refcount);
struct hab_message *message, *msg_tmp;
struct physical_channel *pchan = vchan->pchan;
struct uhab_context *ctx = vchan->ctx;
struct virtual_channel *vc, *vc_tmp;
int irqs_disabled = irqs_disabled();
hab_spin_lock(&vchan->rx_lock, irqs_disabled);
list_for_each_entry_safe(message, msg_tmp, &vchan->rx_list, node) {
list_del(&message->node);
hab_msg_free(message);
}
hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
/* release vchan from pchan. no more msg for this vchan */
hab_write_lock(&pchan->vchans_lock, irqs_disabled);
list_for_each_entry_safe(vc, vc_tmp, &pchan->vchannels, pnode) {
if (vchan == vc) {
list_del(&vc->pnode);
/* the ref is held in case of pchan is freed */
pchan->vcnt--;
break;
}
}
hab_write_unlock(&pchan->vchans_lock, irqs_disabled);
/* the release vchan from ctx was done earlier in vchan close() */
hab_ctx_put(ctx); /* now ctx is not needed from this vchan's view */
/* release idr at the last so same idr will not be used early */
hab_spin_lock(&pchan->vid_lock, irqs_disabled);
idr_remove(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan->id));
hab_spin_unlock(&pchan->vid_lock, irqs_disabled);
hab_pchan_put(pchan); /* no more need for pchan from this vchan */
kfree(vchan);
}
/*
* only for msg recv path to retrieve vchan from vcid and openid based on
* pchan's vchan list
*/
struct virtual_channel*
hab_vchan_get(struct physical_channel *pchan, struct hab_header *header)
{
struct virtual_channel *vchan;
uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
int irqs_disabled = irqs_disabled();
hab_spin_lock(&pchan->vid_lock, irqs_disabled);
vchan = idr_find(&pchan->vchan_idr, HAB_VCID_GET_ID(vchan_id));
if (vchan) {
if (vchan->session_id != session_id)
/*
* skipped if session is different even vcid
* is the same
*/
vchan = NULL;
else if (!vchan->otherend_id /*&& !vchan->session_id*/) {
/*
* not paired vchan can be fetched right after it is
* alloc'ed. so it has to be skipped during search
* for remote msg
*/
pr_warn("vcid %x is not paired yet session %d refcnt %d type %d sz %zd\n",
vchan->id, vchan->otherend_id,
get_refcnt(vchan->refcount),
payload_type, sizebytes);
vchan = NULL;
} else if (vchan->otherend_closed || vchan->closed) {
pr_debug("closed already remote %d local %d vcid %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
vchan->otherend_closed, vchan->closed,
vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
} else if (!kref_get_unless_zero(&vchan->refcount)) {
/*
* this happens when refcnt is already zero
* (put from other thread) or there is an actual error
*/
pr_err("failed to inc vcid %pK %x remote %x session %d refcnt %d header %x session %d type %d sz %zd\n",
vchan, vchan->id, vchan->otherend_id,
vchan->session_id, get_refcnt(vchan->refcount),
vchan_id, session_id, payload_type, sizebytes);
vchan = NULL;
}
}
hab_spin_unlock(&pchan->vid_lock, irqs_disabled);
return vchan;
}
/* wake up local waiting Q, so stop-vchan can be processed */
void hab_vchan_stop(struct virtual_channel *vchan)
{
if (vchan) {
vchan->otherend_closed = 1;
wake_up(&vchan->rx_queue);
if (vchan->ctx)
wake_up_interruptible(&vchan->ctx->exp_wq);
else
pr_err("NULL ctx for vchan %x\n", vchan->id);
}
}
void hab_vchans_stop(struct physical_channel *pchan)
{
struct virtual_channel *vchan, *tmp;
read_lock(&pchan->vchans_lock);
list_for_each_entry_safe(vchan, tmp, &pchan->vchannels, pnode) {
hab_vchan_stop(vchan);
}
read_unlock(&pchan->vchans_lock);
}
/* send vchan close to remote and stop receiving anything locally */
void hab_vchan_stop_notify(struct virtual_channel *vchan)
{
hab_send_close_msg(vchan);
hab_vchan_stop(vchan);
}
static int hab_vchans_per_pchan_empty(struct physical_channel *pchan)
{
int empty;
read_lock(&pchan->vchans_lock);
empty = list_empty(&pchan->vchannels);
if (!empty) {
struct virtual_channel *vchan;
int vcnt = pchan->vcnt;
list_for_each_entry(vchan, &pchan->vchannels, pnode) {
/* discount open-pending unpaired vchan */
if (!vchan->session_id)
vcnt--;
else
pr_err("vchan %pK %x rm %x sn %d rf %d clsd %d rm clsd %d\n",
vchan, vchan->id,
vchan->otherend_id,
vchan->session_id,
get_refcnt(vchan->refcount),
vchan->closed, vchan->otherend_closed);
}
if (!vcnt)
empty = 1;/* unpaired vchan can exist at init time */
}
read_unlock(&pchan->vchans_lock);
return empty;
}
static int hab_vchans_empty(int vmid)
{
int i, empty = 1;
struct physical_channel *pchan;
struct hab_device *hab_dev;
for (i = 0; i < hab_driver.ndevices; i++) {
hab_dev = &hab_driver.devp[i];
spin_lock_bh(&hab_dev->pchan_lock);
list_for_each_entry(pchan, &hab_dev->pchannels, node) {
if (pchan->vmid_remote == vmid) {
if (!hab_vchans_per_pchan_empty(pchan)) {
empty = 0;
spin_unlock_bh(&hab_dev->pchan_lock);
pr_info("vmid %d %s's vchans are not closed\n",
vmid, pchan->name);
break;
}
}
}
spin_unlock_bh(&hab_dev->pchan_lock);
}
return empty;
}
/*
* block until all vchans of a given GVM are explicitly closed
* with habmm_socket_close() by hab clients themselves
*/
void hab_vchans_empty_wait(int vmid)
{
pr_info("waiting for GVM%d's sockets closure\n", vmid);
while (!hab_vchans_empty(vmid))
usleep_range(10000, 12000);
pr_info("all of GVM%d's sockets are closed\n", vmid);
}
int hab_vchan_find_domid(struct virtual_channel *vchan)
{
return vchan ? vchan->pchan->dom_id : -1;
}
void hab_vchan_put(struct virtual_channel *vchan)
{
if (vchan)
kref_put(&vchan->refcount, hab_vchan_free);
}
int hab_vchan_query(struct uhab_context *ctx, int32_t vcid, uint64_t *ids,
char *names, size_t name_size, uint32_t flags)
{
struct virtual_channel *vchan;
vchan = hab_get_vchan_fromvcid(vcid, ctx, 1);
if (!vchan)
return -EINVAL;
if (vchan->otherend_closed) {
hab_vchan_put(vchan);
return -ENODEV;
}
*ids = vchan->pchan->vmid_local |
((uint64_t)vchan->pchan->vmid_remote) << 32;
names[0] = 0;
names[name_size/2] = 0;
hab_vchan_put(vchan);
return 0;
}

View File

@ -0,0 +1,17 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
int hab_hypervisor_register(void)
{
hab_driver.b_loopback = 1;
return 0;
}
void hab_hypervisor_unregister(void)
{
hab_hypervisor_unregister_common();
}

152
drivers/soc/qcom/hab/khab.c Normal file
View File

@ -0,0 +1,152 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include <linux/module.h>
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
uint32_t timeout, uint32_t flags)
{
return hab_vchan_open(hab_driver.kctx, mm_ip_id, handle,
timeout, flags);
}
EXPORT_SYMBOL(habmm_socket_open);
int32_t habmm_socket_close(int32_t handle)
{
return hab_vchan_close(hab_driver.kctx, handle);
}
EXPORT_SYMBOL(habmm_socket_close);
int32_t habmm_socket_send(int32_t handle, void *src_buff,
uint32_t size_bytes, uint32_t flags)
{
struct hab_send param = {0};
param.vcid = handle;
param.data = (uint64_t)(uintptr_t)src_buff;
param.sizebytes = size_bytes;
param.flags = flags;
return hab_vchan_send(hab_driver.kctx, handle,
size_bytes, src_buff, flags);
}
EXPORT_SYMBOL(habmm_socket_send);
int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
uint32_t timeout, uint32_t flags)
{
int ret = 0;
struct hab_message *msg = NULL;
if (!size_bytes || !dst_buff)
return -EINVAL;
ret = hab_vchan_recv(hab_driver.kctx, &msg, handle, size_bytes, flags);
if (ret == 0 && msg)
memcpy(dst_buff, msg->data, msg->sizebytes);
else if (ret && msg)
pr_warn("vcid %X recv failed %d but msg is still received %zd bytes\n",
handle, ret, msg->sizebytes);
if (msg)
hab_msg_free(msg);
return ret;
}
EXPORT_SYMBOL(habmm_socket_recv);
int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes,
uint32_t *export_id, uint32_t flags)
{
int ret;
struct hab_export param = {0};
if (!export_id)
return -EINVAL;
param.vcid = handle;
param.buffer = (uint64_t)(uintptr_t)buff_to_share;
param.sizebytes = size_bytes;
param.flags = flags;
ret = hab_mem_export(hab_driver.kctx, &param, 1);
*export_id = param.exportid;
return ret;
}
EXPORT_SYMBOL(habmm_export);
int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags)
{
struct hab_unexport param = {0};
param.vcid = handle;
param.exportid = export_id;
return hab_mem_unexport(hab_driver.kctx, &param, 1);
}
EXPORT_SYMBOL(habmm_unexport);
int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
uint32_t export_id, uint32_t flags)
{
int ret;
struct hab_import param = {0};
if (!buff_shared)
return -EINVAL;
param.vcid = handle;
param.sizebytes = size_bytes;
param.exportid = export_id;
param.flags = flags;
ret = hab_mem_import(hab_driver.kctx, &param, 1);
if (!ret)
*buff_shared = (void *)(uintptr_t)param.kva;
return ret;
}
EXPORT_SYMBOL(habmm_import);
int32_t habmm_unimport(int32_t handle,
uint32_t export_id,
void *buff_shared,
uint32_t flags)
{
struct hab_unimport param = {0};
param.vcid = handle;
param.exportid = export_id;
param.kva = (uint64_t)(uintptr_t)buff_shared;
return hab_mem_unimport(hab_driver.kctx, &param, 1);
}
EXPORT_SYMBOL(habmm_unimport);
int32_t habmm_socket_query(int32_t handle,
struct hab_socket_info *info,
uint32_t flags)
{
int ret;
uint64_t ids;
char nm[VMNAME_SIZE * 2];
if (!info)
return -EINVAL;
ret = hab_vchan_query(hab_driver.kctx, handle, &ids, nm, sizeof(nm), 1);
if (!ret) {
info->vmid_local = ids & 0xFFFFFFFF;
info->vmid_remote = (ids & 0xFFFFFFFF00000000UL) > 32;
strlcpy(info->vmname_local, nm, sizeof(info->vmname_local));
strlcpy(info->vmname_remote, &nm[sizeof(info->vmname_local)],
sizeof(info->vmname_remote));
}
return ret;
}
EXPORT_SYMBOL(habmm_socket_query);

View File

@ -0,0 +1,391 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#if !defined CONFIG_GHS_VMM && defined(CONFIG_QTI_QUIN_GVM)
#include <asm/cacheflush.h>
#include <linux/list.h>
#include <linux/rtc.h>
#include "hab_pipe.h"
#include "hab_qvm.h"
#include "khab_test.h"
static char g_perf_test_result[256];
enum hab_perf_test_type {
HAB_SHMM_THGPUT = 0x0,
};
#define HAB_PERF_TEST_MMID 802
#define PERF_TEST_ITERATION 50
#define MEM_READ_ITERATION 30
static int hab_shmm_throughput_test(void)
{
struct hab_device *habDev;
struct qvm_channel *dev;
struct hab_shared_buf *sh_buf;
struct physical_channel *pchan;
ktime_t start_time = 0, end_time = 0;
int i, counter;
void *test_data;
unsigned char *source_data, *shmm_adr;
register int sum;
register int *pp, *lastone;
int throughput[3][2] = { {0} };
int latency[6][PERF_TEST_ITERATION];
int ret = 0, tmp, size;
habDev = find_hab_device(HAB_PERF_TEST_MMID);
if (!habDev || list_empty(&(habDev->pchannels))) {
ret = -ENOMEM;
return ret;
}
pchan = list_first_entry(&(habDev->pchannels),
struct physical_channel, node);
dev = pchan->hyp_data;
if (!dev) {
ret = -EPERM;
return ret;
}
sh_buf = dev->pipe_ep->tx_info.sh_buf;
/* pChannel is of 128k, we use 64k to test */
size = 0x10000;
if (!sh_buf) {
pr_err("Share buffer address is empty, exit the perf test\n");
ret = -ENOMEM;
return ret;
}
shmm_adr = (unsigned char *)sh_buf->data;
test_data = kzalloc(size, GFP_ATOMIC);
if (!test_data) {
ret = -ENOMEM;
return ret;
}
source_data = kzalloc(size, GFP_ATOMIC);
if (!source_data) {
ret = -ENOMEM;
return ret;
}
for (i = 0; i < PERF_TEST_ITERATION; i++) {
/* Normal memory copy latency */
flush_cache_all();
start_time = ktime_get();
memcpy(test_data, source_data, size);
end_time = ktime_get();
latency[0][i] = ktime_us_delta(end_time, start_time);
/* Share memory copy latency */
flush_cache_all();
start_time = ktime_get();
memcpy(shmm_adr, source_data, size);
end_time = ktime_get();
latency[1][i] = ktime_us_delta(end_time, start_time);
/* Normal memory read latency */
counter = MEM_READ_ITERATION;
sum = 0;
latency[2][i] = 0;
flush_cache_all();
while (counter-- > 0) {
pp = test_data;
lastone = (int *)((char *)test_data + size - 512);
start_time = ktime_get();
while (pp <= lastone) {
sum +=
pp[0] + pp[4] + pp[8] + pp[12]
+ pp[16] + pp[20] + pp[24] + pp[28]
+ pp[32] + pp[36] + pp[40] + pp[44]
+ pp[48] + pp[52] + pp[56] + pp[60]
+ pp[64] + pp[68] + pp[72] + pp[76]
+ pp[80] + pp[84] + pp[88] + pp[92]
+ pp[96] + pp[100] + pp[104]
+ pp[108] + pp[112]
+ pp[116] + pp[120]
+ pp[124];
pp += 128;
}
end_time = ktime_get();
latency[2][i] += ktime_us_delta(end_time, start_time);
flush_cache_all();
}
/* Share memory read latency*/
counter = MEM_READ_ITERATION;
sum = 0;
latency[3][i] = 0;
while (counter-- > 0) {
pp = (int *)shmm_adr;
lastone = (int *)(shmm_adr + size - 512);
start_time = ktime_get();
while (pp <= lastone) {
sum +=
pp[0] + pp[4] + pp[8] + pp[12]
+ pp[16] + pp[20] + pp[24] + pp[28]
+ pp[32] + pp[36] + pp[40] + pp[44]
+ pp[48] + pp[52] + pp[56] + pp[60]
+ pp[64] + pp[68] + pp[72] + pp[76]
+ pp[80] + pp[84] + pp[88] + pp[92]
+ pp[96] + pp[100] + pp[104]
+ pp[108] + pp[112]
+ pp[116] + pp[120]
+ pp[124];
pp += 128;
}
end_time = ktime_get();
latency[3][i] += ktime_us_delta(end_time, start_time);
flush_cache_all();
}
/* Normal memory write latency */
flush_cache_all();
start_time = ktime_get();
memset(test_data, 'c', size);
end_time = ktime_get();
latency[4][i] = ktime_us_delta(end_time, start_time);
/* Share memory write latency */
flush_cache_all();
start_time = ktime_get();
memset(shmm_adr, 'c', size);
end_time = ktime_get();
latency[5][i] = ktime_us_delta(end_time, start_time);
}
/* Calculate normal memory copy throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[0][i];
throughput[0][0] = (tmp != 0) ? size*PERF_TEST_ITERATION/tmp : 0;
/* Calculate share memory copy throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[1][i];
throughput[0][1] = (tmp != 0) ? size*PERF_TEST_ITERATION/tmp : 0;
/* Calculate normal memory read throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[2][i];
throughput[1][0] = (tmp != 0) ?
size*PERF_TEST_ITERATION*MEM_READ_ITERATION/tmp : 0;
/* Calculate share memory read throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[3][i];
throughput[1][1] = (tmp != 0) ?
size*PERF_TEST_ITERATION*MEM_READ_ITERATION/tmp : 0;
/* Calculate normal memory write throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[4][i];
throughput[2][0] = (tmp != 0) ?
size*PERF_TEST_ITERATION/tmp : 0;
/* Calculate share memory write throughput by average */
tmp = 0;
for (i = 0; i < PERF_TEST_ITERATION; i++)
tmp += latency[5][i];
throughput[2][1] = (tmp != 0) ?
size*PERF_TEST_ITERATION/tmp : 0;
kfree(test_data);
kfree(source_data);
snprintf(g_perf_test_result, sizeof(g_perf_test_result),
"cpy(%d,%d)/read(%d,%d)/write(%d,%d)",
throughput[0][0], throughput[0][1], throughput[1][0],
throughput[1][1], throughput[2][0], throughput[2][1]);
return ret;
}
int hab_perf_test(long testId)
{
int ret;
switch (testId) {
case HAB_SHMM_THGPUT:
ret = hab_shmm_throughput_test();
break;
default:
pr_err("Invalid performance test ID %ld\n", testId);
ret = -EINVAL;
}
return ret;
}
static int kick_hab_perf_test(const char *val, const struct kernel_param *kp);
static int get_hab_perf_result(char *buffer, const struct kernel_param *kp);
module_param_call(perf_test, kick_hab_perf_test, get_hab_perf_result,
NULL, 0600);
static int kick_hab_perf_test(const char *val, const struct kernel_param *kp)
{
long testId;
int err = kstrtol(val, 10, &testId);
if (err)
return err;
memset(g_perf_test_result, 0, sizeof(g_perf_test_result));
return hab_perf_test(testId);
}
static int get_hab_perf_result(char *buffer, const struct kernel_param *kp)
{
return strlcpy(buffer, g_perf_test_result,
strlen(g_perf_test_result)+1);
}
#endif
static struct kobject *hab_kobject;
static int vchan_stat;
static int context_stat;
static int pid_stat;
static ssize_t vchan_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return hab_stat_show_vchan(&hab_driver, buf, PAGE_SIZE);
}
static ssize_t vchan_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = sscanf(buf, "%du", &vchan_stat);
if (ret < 1) {
pr_err("failed to read anything from input %d\n", ret);
return 0;
} else
return vchan_stat;
}
static ssize_t ctx_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return hab_stat_show_ctx(&hab_driver, buf, PAGE_SIZE);
}
static ssize_t ctx_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
ret = sscanf(buf, "%du", &context_stat);
if (ret < 1) {
pr_err("failed to read anything from input %d\n", ret);
return 0;
} else
return context_stat;
}
static ssize_t expimp_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return hab_stat_show_expimp(&hab_driver, pid_stat, buf, PAGE_SIZE);
}
static ssize_t expimp_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
char str[36] = {0};
ret = sscanf(buf, "%35s", str);
if (ret < 1)
pr_err("failed to read anything from input %d\n", ret);
if (strnlen(str, strlen("dump_pipe")) == strlen("dump_pipe") &&
strcmp(str, "dump_pipe") == 0) {
/* string terminator is ignored */
dump_hab();
return strlen("dump_pipe");
}
ret = sscanf(buf, "%du", &pid_stat);
if (ret < 1)
pr_err("failed to read anything from input %d\n", ret);
else
return pid_stat; /* good result stored */
return -EEXIST;
}
static struct kobj_attribute vchan_attribute = __ATTR(vchan_stat, 0660,
vchan_show,
vchan_store);
static struct kobj_attribute ctx_attribute = __ATTR(context_stat, 0660,
ctx_show,
ctx_store);
static struct kobj_attribute expimp_attribute = __ATTR(pid_stat, 0660,
expimp_show,
expimp_store);
int hab_stat_init_sub(struct hab_driver *driver)
{
int result;
hab_kobject = kobject_create_and_add("hab", kernel_kobj);
if (!hab_kobject)
return -ENOMEM;
result = sysfs_create_file(hab_kobject, &vchan_attribute.attr);
if (result)
pr_debug("cannot add vchan in /sys/kernel/hab %d\n", result);
result = sysfs_create_file(hab_kobject, &ctx_attribute.attr);
if (result)
pr_debug("cannot add ctx in /sys/kernel/hab %d\n", result);
result = sysfs_create_file(hab_kobject, &expimp_attribute.attr);
if (result)
pr_debug("cannot add expimp in /sys/kernel/hab %d\n", result);
return result;
}
int hab_stat_deinit_sub(struct hab_driver *driver)
{
sysfs_remove_file(hab_kobject, &vchan_attribute.attr);
sysfs_remove_file(hab_kobject, &ctx_attribute.attr);
sysfs_remove_file(hab_kobject, &expimp_attribute.attr);
kobject_put(hab_kobject);
return 0;
}
int dump_hab_get_file_name(char *file_time, int ft_size)
{
struct timespec64 time = {0};
unsigned long local_time;
struct rtc_time tm;
ktime_get_real_ts64(&time);
local_time = (unsigned long)(time.tv_sec - sys_tz.tz_minuteswest * 60);
rtc_time_to_tm(local_time, &tm);
snprintf(file_time, ft_size, "%04d_%02d_%02d-%02d_%02d_%02d",
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour,
tm.tm_min, tm.tm_sec);
return 0;
}

View File

@ -0,0 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef __KHAB_TEST_H
#define __KHAB_TEST_H
int hab_perf_test(long testId);
#endif /* __KHAB_TEST_H */

View File

@ -0,0 +1,181 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "hab.h"
#include "hab_qvm.h"
static unsigned long long xvm_sche_tx_tv_buffer[2];
static void pipe_read_trace(struct hab_pipe *pipe, struct hab_pipe_endpoint *ep,
int size, int ret)
{
struct hab_shared_buf *sh_buf = ep->rx_info.sh_buf;
struct dbg_items *its = (struct dbg_items *)pipe->buf_a;
struct dbg_item *it = &its->it[its->idx];
it->rd_cnt = sh_buf->rd_count;
it->wr_cnt = sh_buf->wr_count;
it->va = (void *)&sh_buf->data[ep->rx_info.index];
it->index = ep->rx_info.index;
it->sz = size;
it->ret = ret;
its->idx++;
if (its->idx >= DBG_ITEM_SIZE)
its->idx = 0;
}
/* this is only used to read payload, never the head! */
int physical_channel_read(struct physical_channel *pchan,
void *payload,
size_t read_size)
{
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
if (dev) {
int ret = hab_pipe_read(dev->pipe_ep, payload, read_size, 0);
/* log */
pipe_read_trace(dev->pipe, dev->pipe_ep, read_size, ret);
return ret;
} else
return 0;
}
#define HAB_HEAD_SIGNATURE 0xBEE1BEE1
int physical_channel_send(struct physical_channel *pchan,
struct hab_header *header,
void *payload)
{
size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
size_t total_size = sizeof(*header) + sizebytes;
int irqs_disabled = irqs_disabled();
if (total_size > dev->pipe_ep->tx_info.sh_buf->size)
return -EINVAL; /* too much data for ring */
hab_spin_lock(&dev->io_lock, irqs_disabled);
if ((dev->pipe_ep->tx_info.sh_buf->size -
(dev->pipe_ep->tx_info.wr_count -
dev->pipe_ep->tx_info.sh_buf->rd_count)) < total_size) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
return -EAGAIN; /* not enough free space */
}
header->sequence = pchan->sequence_tx + 1;
header->signature = HAB_HEAD_SIGNATURE;
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)header,
sizeof(*header)) != sizeof(*header)) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("***incompleted pchan send id-type-size %x session %d seq# %d\n",
header->id_type_size, header->session_id,
header->sequence);
return -EIO;
}
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
struct timespec64 ts = {0};
struct habmm_xing_vm_stat *pstat =
(struct habmm_xing_vm_stat *)payload;
if (pstat) {
ktime_get_ts64(&ts);
pstat->tx_sec = ts.tv_sec;
pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
} else {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("***incompleted pchan send prof id-type-size %x session %d seq# %d\n",
header->id_type_size, header->session_id,
header->sequence);
return -EINVAL;
}
} else if (HAB_HEADER_GET_TYPE(*header)
== HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ) {
((unsigned long long *)payload)[0] = xvm_sche_tx_tv_buffer[0];
} else if (HAB_HEADER_GET_TYPE(*header)
== HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP) {
((unsigned long long *)payload)[2] = xvm_sche_tx_tv_buffer[1];
}
if (sizebytes) {
if (hab_pipe_write(dev->pipe_ep,
(unsigned char *)payload,
sizebytes) != sizebytes) {
hab_spin_unlock(&dev->io_lock, irqs_disabled);
pr_err("***incompleted pchan send id-type-size %x session %d seq# %d\n",
header->id_type_size, header->session_id,
header->sequence);
return -EIO;
}
}
hab_pipe_write_commit(dev->pipe_ep);
hab_spin_unlock(&dev->io_lock, irqs_disabled);
if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_SCHE_MSG)
xvm_sche_tx_tv_buffer[0] = msm_timer_get_sclk_ticks();
else if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_SCHE_MSG_ACK)
xvm_sche_tx_tv_buffer[1] = msm_timer_get_sclk_ticks();
habhyp_notify(dev);
++pchan->sequence_tx;
return 0;
}
void physical_channel_rx_dispatch(unsigned long data)
{
struct hab_header header;
struct physical_channel *pchan = (struct physical_channel *)data;
struct qvm_channel *dev = (struct qvm_channel *)pchan->hyp_data;
int irqs_disabled = irqs_disabled();
int i;
hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
i = 0;
while (1) {
uint32_t rd, wr, idx;
int ret;
ret = hab_pipe_read(dev->pipe_ep,
(unsigned char *)&header,
sizeof(header), 1); /* clear head after read */
/* debug */
pipe_read_trace(dev->pipe, dev->pipe_ep, sizeof(header), ret);
if (ret != sizeof(header))
break; /* no data available */
hab_pipe_rxinfo(dev->pipe_ep, &rd, &wr, &idx);
if (header.signature != HAB_HEAD_SIGNATURE) {
pr_err("!!!!! HAB signature mismatch expect %X received %X, id_type_size %X session %X sequence %X i %d\n",
HAB_HEAD_SIGNATURE, header.signature,
header.id_type_size,
header.session_id,
header.sequence, i);
pr_err("!!!!! rxinfo rd %d wr %d index %X\n",
rd, wr, idx);
memcpy(dev->side_buf,
(void *)&dev->pipe_ep->rx_info.sh_buf->data[0],
dev->pipe_ep->rx_info.sh_buf->size);
hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
/* cannot run in elevated context */
dump_hab_wq(dev);
hab_spin_lock(&pchan->rxbuf_lock, irqs_disabled);
}
pchan->sequence_rx = header.sequence;
hab_msg_recv(pchan, &header);
i++;
}
hab_spin_unlock(&pchan->rxbuf_lock, irqs_disabled);
}

View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#include "hab_qvm.h"
inline void habhyp_notify(void *commdev)
{
struct qvm_channel *dev = (struct qvm_channel *)commdev;
if (dev && dev->guest_ctrl)
dev->guest_ctrl->notify = ~0;
}

View File

@ -642,6 +642,8 @@ gen_headers_out_arm = [
"linux/xilinx-v4l2-controls.h",
"linux/zorro.h",
"linux/zorro_ids.h",
"linux/hab_ioctl.h",
"linux/habmmid.h",
"media/msm_cvp_private.h",
"media/radio-iris-commands.h",
"media/radio-iris.h",

View File

@ -637,6 +637,8 @@ gen_headers_out_arm64 = [
"linux/xilinx-v4l2-controls.h",
"linux/zorro.h",
"linux/zorro_ids.h",
"linux/hab_ioctl.h",
"linux/habmmid.h",
"media/msm_cvp_private.h",
"media/radio-iris-commands.h",
"media/radio-iris.h",

364
include/linux/habmm.h Normal file
View File

@ -0,0 +1,364 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
*/
#ifndef HABMM_H
#define HABMM_H
#include "linux/habmmid.h"
#define HAB_API_VER_DEF(_MAJOR_, _MINOR_) \
((_MAJOR_&0xFF)<<16 | (_MINOR_&0xFFF))
#define HAB_API_VER HAB_API_VER_DEF(1, 0)
#include <linux/types.h>
/* habmm_socket_open
*
* Description:
*
* Establish a communication channel between Virtual Machines. Blocks
* until the connection is established between sender and receiver.
* Client can call this APImultiple times with the same name to connect
* to the same communication channel, the function returns a different context
* for every open for proper resource allocation and client identification.
*
* Params:
* out handle - An opaque handle associated with a successful virtual channel
* creation in MM_ID - multimedia ID used to allocate the physical channels to
* service all the virtual channels created through this open
* in timeout - timeout value specified by the client to avoid forever block
* in flags - future extension
*
* Return:
* status (success/failure/timeout)
*
*/
/* single FE-BE connection multi-to-multi point to point matching (default) */
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_FE 0x00000000
/* one BE for one domU */
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU 0x00000001
/* one BE for all the domUs */
#define HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS 0x00000002
int32_t habmm_socket_open(int32_t *handle, uint32_t mm_ip_id,
uint32_t timeout, uint32_t flags);
/* habmm_socket_close
*
* Description:
*
* Tear down the virtual channel that was established through habmm_socket_open
* and release all resources associated with it.
*
* Params:
*
* in handle - handle to the virtual channel created by habmm_socket_open
*
* Return:
* status - (success/failure)
*
*
*/
int32_t habmm_socket_close(int32_t handle);
/* habmm_socket_send
*
* Description:
*
* Send data over the virtual channel
*
* Params:
*
* in handle - handle created by habmm_socket_open
* in src_buff - data to be send across the virtual channel
* inout size_bytes - size of the data to be send. Either the whole packet is
* sent or not
* in flags - future extension
*
* Return:
* status (success/fail/disconnected)
*
*/
/* Non-blocking mode: function will return immediately with HAB_AGAIN
* if the send operation cannot be completed without blocking.
*/
#define HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING 0x00000001
/* Collect cross-VM stats: client provides stat-buffer large enough to allow 2
* sets of a 2-uint64_t pair to collect seconds and nano-seconds at the
* beginning of the stat-buffer. Stats are collected when the stat-buffer leaves
* VM1, then enters VM2
*/
#define HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT 0x00000002
/* start to measure cross-vm schedule latency: VM1 send msg with this flag
* to VM2 to kick off the measurement. In the hab driver level, the VM1 hab
* driver shall record the time of schdule out with mpm_timer, and buffer
* it for later usage. The VM2 hab driver shall record the time of schedule
* in with mpm_timer and pass it to "habtest" application.
*/
#define HABMM_SOCKET_XVM_SCHE_TEST 0x00000004
/* VM2 responds this message to VM1 for HABMM_SOCKET_XVM_SCHE_TEST.
* In the hab driver level, the VM2 hab driver shall record the time of schedule
* out with mpm_timer, and buffer it for later usage; the VM1 hab driver
* shall record the time of schedule in with mpm_timer and pass it to "habtest"
* application.
*/
#define HABMM_SOCKET_XVM_SCHE_TEST_ACK 0x00000008
/* VM1 sends this message to VM2 asking for collect all the mpm_timer values
* to calculate the latency of schduling between VM1 and VM2. In the hab driver
* level, the VM1 hab driver shall save the previous restored schduling out
* time to the message buffer
*/
#define HABMM_SOCKET_XVM_SCHE_RESULT_REQ 0x00000010
/* VM2 responds this message to VM2 for HABMM_SOCKET_XVM_SCHE_RESULT_REQ.
* In the habtest application level, VM2 shall save the previous restored
* scheduling in time into message buffer, in the hab driver level, VM2
* shall save the previous restored scheduling out time to the message
* buffer.
*/
#define HABMM_SOCKET_XVM_SCHE_RESULT_RSP 0x00000020
struct habmm_xing_vm_stat {
uint64_t tx_sec;
uint64_t tx_usec;
uint64_t rx_sec;
uint64_t rx_usec;
};
int32_t habmm_socket_send(int32_t handle, void *src_buff, uint32_t size_bytes,
uint32_t flags);
/* habmm_socket_recv
*
* Description:
*
* Receive data over the virtual channel created by habmm_socket_open.
* Blocking until actual data is received or timeout value expires
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* inout dst_buff - buffer pointer to store received data
* inout size_bytes - size of the dst_buff. returned value shows the actual
* bytes received.
* in timeout - timeout value specified by the client to avoid forever block
* in flags - future extension
*
*
* Return:
* status (success/failure/timeout/disconnected)
*
*/
/* Non-blocking mode: function will return immediately if there is no data
* available. Supported only for kernel clients.
*/
#define HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING 0x00000001
/* In the blocking mode, this flag is used to indicate it is an
* uninterruptbile blocking call.
*/
#define HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE 0x00000002
int32_t habmm_socket_recv(int32_t handle, void *dst_buff, uint32_t *size_bytes,
uint32_t timeout, uint32_t flags);
/* habmm_socket_sendto
*
* Description:
*
* This is for backend only. Send data over the virtual channel to remote
* frontend virtual channel for multi-FEs-to-single-BE model when
* the BE virtual channel is created using
* HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU or
* HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS
*
* Params:
*
* in handle - handle created by habmm_socket_open
* in src_buff - data to be send across the virtual channel
* inout size_bytes - size of the data to be send. The packet is fully sent on
* success,or not sent at all upon any failure
* in remote_handle - the destination of this send using remote FE's virtual
* channel handle
* in flags - future extension
*
* Return:
* status (success/fail/disconnected)
*/
int32_t habmm_socket_sendto(int32_t handle, void *src_buff, uint32_t size_bytes,
int32_t remote_handle, uint32_t flags);
/* habmm_socket_recvfrom
*
* Description:
*
* Receive data over the virtual channel created by habmm_socket_open.
* Returned is the remote FE's virtual channel handle to be used for sendto.
* Blocking until actual data is received or timeout value expires. This is for
* BE running in multi-FEs-to-single-BE model when the BE virtual channel is
* created using HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_SINGLE_DOMU or
* HABMM_SOCKET_OPEN_FLAGS_SINGLE_BE_MULTI_DOMUS.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* inout dst_buff - buffer pointer to store received data
* inout size_bytes - size of the dst_buff. returned value shows the actual
* bytes received.
* in timeout - timeout value specified by the client to avoid forever block
* in remote_handle - the FE who sent this message through the
* connected virtual channel to BE.
* in flags - future extension
*
* Return:
* status (success/failure/timeout/disconnected)
*
*/
int32_t habmm_socket_recvfrom(int32_t handle, void *dst_buff,
uint32_t *size_bytes, uint32_t timeout,
int32_t *remote_handle, uint32_t flags);
/* exporting memory type DMA : This is platform dependent for user mode. If it
* does exist, HAB needs to use DMA method to retrieve the memory for exporting.
* If it does not exist, this flag is ignored.
*/
#define HABMM_EXP_MEM_TYPE_DMA 0x00000001
/*
* this flag is used for export from dma_buf fd or import to dma_buf fd
*/
#define HABMM_EXPIMP_FLAGS_FD 0x00010000
#define HABMM_EXPIMP_FLAGS_DMABUF 0x00020000
#define HAB_MAX_EXPORT_SIZE 0x8000000
/*
* Description:
*
* Prepare the sharing of the buffer on the exporter side. The returned
* reference ID needs to be sent to importer separately.
* During sending the HAB will attach the actual exporting buffer information.
* The exporting is per process space.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in buff_to_share - buffer to be exported
* in size_bytes - size of the exporting buffer in bytes
* out export_id - to be returned by this call upon success
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
int32_t habmm_export(int32_t handle, void *buff_to_share, uint32_t size_bytes,
uint32_t *export_id, uint32_t flags);
/*
* Description:
*
* Free any allocated resource associated with this export IDin on local side.
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in export_id - all resource allocated with export_id are to be freed
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
int32_t habmm_unexport(int32_t handle, uint32_t export_id, uint32_t flags);
/*
* Description:
*
* Import the exporter's shared reference ID.
* The importing is per process space.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* out buff_shared - buffer to be imported. returned upon success
* in size_bytes - size of the imported buffer in bytes. It should match the
* original exported buffer size
* in export_id - received when exporter sent its exporting ID through
* habmm_socket_send() previously
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
/* Non-blocking mode: function will return immediately if there is no data
* available. Supported only for kernel clients.
*/
#define HABMM_IMPORT_FLAGS_CACHED 0x00000001
int32_t habmm_import(int32_t handle, void **buff_shared, uint32_t size_bytes,
uint32_t export_id, uint32_t flags);
/*
* Description:
*
* Release any resource associated with the export ID on the importer side.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in export_id - received when exporter sent its exporting ID through
* habmm_socket_send() previously
* in buff_shared - received from habmm_import() together with export_id
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
int32_t habmm_unimport(int32_t handle, uint32_t export_id, void *buff_shared,
uint32_t flags);
/*
* Description:
*
* Query various information of the opened hab socket.
*
* Params:
*
* in handle - communication channel created by habmm_socket_open
* in habmm_socket_info - retrieve socket information regarding local and remote
* VMs
* in flags - future extension
*
* Return:
* status (success/failure)
*
*/
#define VMNAME_SIZE 12
struct hab_socket_info {
int32_t vmid_remote; /* habmm's vmid */
int32_t vmid_local;
/* name from hypervisor framework if available */
char vmname_remote[VMNAME_SIZE];
char vmname_local[VMNAME_SIZE];
};
int32_t habmm_socket_query(int32_t handle, struct hab_socket_info *info,
uint32_t flags);
#endif /* HABMM_H */

View File

@ -0,0 +1,104 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _HAB_IOCTL_H
#define _HAB_IOCTL_H
#include <linux/types.h>
struct hab_send {
__u64 data;
__s32 vcid;
__u32 sizebytes;
__u32 flags;
};
struct hab_recv {
__u64 data;
__s32 vcid;
__u32 sizebytes;
__u32 flags;
};
struct hab_open {
__s32 vcid;
__u32 mmid;
__u32 timeout;
__u32 flags;
};
struct hab_close {
__s32 vcid;
__u32 flags;
};
struct hab_export {
__u64 buffer;
__s32 vcid;
__u32 sizebytes;
__u32 exportid;
__u32 flags;
};
struct hab_import {
__u64 index;
__u64 kva;
__s32 vcid;
__u32 sizebytes;
__u32 exportid;
__u32 flags;
};
struct hab_unexport {
__s32 vcid;
__u32 exportid;
__u32 flags;
};
struct hab_unimport {
__s32 vcid;
__u32 exportid;
__u64 kva;
__u32 flags;
};
struct hab_info {
__s32 vcid;
__u64 ids; /* high part remote; low part local */
__u64 names;
__u32 namesize; /* single name length */
__u32 flags;
};
#define HAB_IOC_TYPE 0x0A
#define IOCTL_HAB_SEND \
_IOW(HAB_IOC_TYPE, 0x2, struct hab_send)
#define IOCTL_HAB_RECV \
_IOWR(HAB_IOC_TYPE, 0x3, struct hab_recv)
#define IOCTL_HAB_VC_OPEN \
_IOWR(HAB_IOC_TYPE, 0x4, struct hab_open)
#define IOCTL_HAB_VC_CLOSE \
_IOW(HAB_IOC_TYPE, 0x5, struct hab_close)
#define IOCTL_HAB_VC_EXPORT \
_IOWR(HAB_IOC_TYPE, 0x6, struct hab_export)
#define IOCTL_HAB_VC_IMPORT \
_IOWR(HAB_IOC_TYPE, 0x7, struct hab_import)
#define IOCTL_HAB_VC_UNEXPORT \
_IOW(HAB_IOC_TYPE, 0x8, struct hab_unexport)
#define IOCTL_HAB_VC_UNIMPORT \
_IOW(HAB_IOC_TYPE, 0x9, struct hab_unimport)
#define IOCTL_HAB_VC_QUERY \
_IOWR(HAB_IOC_TYPE, 0xA, struct hab_info)
#endif /* _HAB_IOCTL_H */

View File

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#ifndef HABMMID_H
#define HABMMID_H
#define HAB_MMID_CREATE(major, minor) ((major&0xFFFF) | ((minor&0xFF)<<16))
#define MM_AUD_START 100
#define MM_AUD_1 101
#define MM_AUD_2 102
#define MM_AUD_3 103
#define MM_AUD_4 104
#define MM_AUD_END 105
#define MM_CAM_START 200
#define MM_CAM_1 201
#define MM_CAM_2 202
#define MM_CAM_END 203
#define MM_DISP_START 300
#define MM_DISP_1 301
#define MM_DISP_2 302
#define MM_DISP_3 303
#define MM_DISP_4 304
#define MM_DISP_5 305
#define MM_DISP_END 306
#define MM_GFX_START 400
#define MM_GFX 401
#define MM_GFX_END 402
#define MM_VID_START 500
#define MM_VID 501
#define MM_VID_2 502
#define MM_VID_END 503
#define MM_MISC_START 600
#define MM_MISC 601
#define MM_MISC_END 602
#define MM_QCPE_START 700
#define MM_QCPE_VM1 701
#define MM_QCPE_END 702
#define MM_CLK_START 800
#define MM_CLK_VM1 801
#define MM_CLK_VM2 802
#define MM_CLK_END 803
#define MM_FDE_START 900
#define MM_FDE_1 901
#define MM_FDE_END 902
#define MM_BUFFERQ_START 1000
#define MM_BUFFERQ_1 1001
#define MM_BUFFERQ_END 1002
#define MM_DATA_START 1100
#define MM_DATA_NETWORK_1 1101
#define MM_DATA_NETWORK_2 1102
#define MM_DATA_END 1103
#define MM_HSI2S_START 1200
#define MM_HSI2S_1 1201
#define MM_HSI2S_END 1202
#define MM_ID_MAX 1203
#endif /* HABMMID_H */