Haven: Rename hypervisor to gunyah
The change is to rename 'haven' to 'gunyah', 'hh' to 'gh'. The following words is unchanged because of the dependency from hypervisor or userspace: 'haven-hypervisor', 'qcom,hh-watchdog', 'qcom,haven-message-queue', 'qcom,haven-vm-id', 'hh_virtio_backend'. To track histroy, this change only change content, not rename the files. Change-Id: I4fa3e3fb00d4a53b44b6aec81b4c22cbeb5c6853 Signed-off-by: Cong Zhang <congzhan@codeaurora.org>
This commit is contained in:
parent
a815b09be6
commit
52892f7d3b
@ -1,32 +1,32 @@
|
||||
What: /sys/hypervisor/type
|
||||
Date: April 2020
|
||||
KernelVersion: 5.4.25
|
||||
Description: If running under Haven:
|
||||
Description: If running under Gunyah:
|
||||
Type of hypervisor:
|
||||
"haven": Haven hypervisor
|
||||
"gunyah": Gunyah hypervisor
|
||||
|
||||
What: /sys/hypervisor/version/api
|
||||
Date: April 2020
|
||||
KernelVersion: 5.4.25
|
||||
Description: If running under Haven:
|
||||
The Haven API version.
|
||||
Description: If running under Gunyah:
|
||||
The Gunyah API version.
|
||||
|
||||
What: /sys/hypervisor/version/variant
|
||||
Date: April 2020
|
||||
KernelVersion: 5.4.25
|
||||
Description: If running under Haven:
|
||||
The Haven variant (build) version.
|
||||
Description: If running under Gunyah:
|
||||
The Gunyah variant (build) version.
|
||||
|
||||
What: /sys/kernel/debug/haven/trace_set
|
||||
What: /sys/kernel/debug/gunyah/trace_set
|
||||
Date: April 2020
|
||||
KernelVersion: 5.4.25
|
||||
Description: If running under Haven with Trace Control support:
|
||||
Description: If running under Gunyah with Trace Control support:
|
||||
When reading, gets active trace class flags as a hexadecimal.
|
||||
When writing, adds to the trace class flags.
|
||||
|
||||
What: /sys/kernel/debug/haven/trace_clear
|
||||
What: /sys/kernel/debug/gunyah/trace_clear
|
||||
Date: April 2020
|
||||
KernelVersion: 5.4.25
|
||||
Description: If running under Haven with Trace Control support:
|
||||
Description: If running under Gunyah with Trace Control support:
|
||||
When reading, gets active trace class flags as a hexadecimal.
|
||||
When writing, clears the specified trace class flags.
|
||||
|
@ -118,7 +118,7 @@ CONFIG_DMABUF_HEAPS=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_VIRTIO_MMIO_SWIOTLB=y
|
||||
CONFIG_HH_VIRTIO_DEBUG=y
|
||||
CONFIG_GH_VIRTIO_DEBUG=y
|
||||
CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT=y
|
||||
CONFIG_ANDROID=y
|
||||
CONFIG_ANDROID_VENDOR_HOOKS=y
|
||||
|
26
arch/arm64/configs/vendor/neo.config
vendored
26
arch/arm64/configs/vendor/neo.config
vendored
@ -25,18 +25,18 @@ CONFIG_DRM_TTM=y
|
||||
CONFIG_DRM_TTM_DMA_PAGE_POOL=y
|
||||
CONFIG_FB_CMDLINE=y
|
||||
CONFIG_GKI_HIDDEN_DRM_CONFIGS=y
|
||||
CONFIG_HAVEN_DRIVERS=y
|
||||
CONFIG_GUNYAH_DRIVERS=y
|
||||
CONFIG_HDMI=y
|
||||
CONFIG_HH_CTRL=y
|
||||
CONFIG_HH_DBL=y
|
||||
CONFIG_HH_IRQ_LEND=y
|
||||
CONFIG_HH_MEM_NOTIFIER=y
|
||||
CONFIG_HH_MSGQ=y
|
||||
CONFIG_HH_RM_DRV=y
|
||||
# CONFIG_HH_VIRTIO_BACKEND is not set
|
||||
CONFIG_HH_VIRT_WATCHDOG=y
|
||||
CONFIG_HVC_HAVEN=y
|
||||
CONFIG_HVC_HAVEN_CONSOLE=y
|
||||
CONFIG_GH_CTRL=y
|
||||
CONFIG_GH_DBL=y
|
||||
CONFIG_GH_IRQ_LEND=y
|
||||
CONFIG_GH_MEM_NOTIFIER=y
|
||||
CONFIG_GH_MSGQ=y
|
||||
CONFIG_GH_RM_DRV=y
|
||||
# CONFIG_GH_VIRTIO_BACKEND is not set
|
||||
CONFIG_GH_VIRT_WATCHDOG=y
|
||||
CONFIG_HVC_GUNYAH=y
|
||||
CONFIG_HVC_GUNYAH_CONSOLE=y
|
||||
CONFIG_I2C_ALGOBIT=y
|
||||
# CONFIG_IPQ_APSS_PLL is not set
|
||||
# CONFIG_IPQ_GCC_4019 is not set
|
||||
@ -94,7 +94,7 @@ CONFIG_MODULE_SIG_SHA512=y
|
||||
# CONFIG_MSM_MMCC_8998 is not set
|
||||
# CONFIG_MSM_VIDEOCC_LAHAINA is not set
|
||||
# CONFIG_MSM_VIDEOCC_WAIPIO is not set
|
||||
# CONFIG_NEURON_CH_HAVEN is not set
|
||||
# CONFIG_NEURON_CH_GUNYAH is not set
|
||||
# CONFIG_PHY_CADENCE_SIERRA is not set
|
||||
# CONFIG_QCOM_A53PLL is not set
|
||||
# CONFIG_QCOM_CLK_APCC_MSM8996 is not set
|
||||
@ -119,7 +119,7 @@ CONFIG_QCOM_WDT_CORE=y
|
||||
# CONFIG_QCS_Q6SSTOP_404 is not set
|
||||
# CONFIG_QCS_TURING_404 is not set
|
||||
CONFIG_QRTR=y
|
||||
CONFIG_QRTR_HAVEN=y
|
||||
CONFIG_QRTR_GUNYAH=y
|
||||
CONFIG_QRTR_NODE_ID=20
|
||||
CONFIG_QRTR_WAKEUP_MS=500
|
||||
CONFIG_REGMAP=y
|
||||
|
26
arch/arm64/configs/vendor/waipio_GKI.config
vendored
26
arch/arm64/configs/vendor/waipio_GKI.config
vendored
@ -36,18 +36,18 @@ CONFIG_EDAC_QCOM=m
|
||||
CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
|
||||
# CONFIG_FTRACE_STARTUP_TEST is not set
|
||||
CONFIG_FTS_TRUSTED_TOUCH=y
|
||||
CONFIG_HAVEN_DRIVERS=y
|
||||
CONFIG_GUNYAH_DRIVERS=y
|
||||
CONFIG_HDCP_QSEECOM=m
|
||||
CONFIG_HH_CTRL=m
|
||||
CONFIG_HH_DBL=m
|
||||
CONFIG_HH_IRQ_LEND=m
|
||||
CONFIG_HH_MEM_NOTIFIER=m
|
||||
CONFIG_HH_MSGQ=m
|
||||
CONFIG_HH_RM_DRV=m
|
||||
CONFIG_HH_VIRTIO_BACKEND=m
|
||||
CONFIG_HH_VIRT_WATCHDOG=m
|
||||
CONFIG_HVC_HAVEN=m
|
||||
# CONFIG_HVC_HAVEN_CONSOLE is not set
|
||||
CONFIG_GH_CTRL=m
|
||||
CONFIG_GH_DBL=m
|
||||
CONFIG_GH_IRQ_LEND=m
|
||||
CONFIG_GH_MEM_NOTIFIER=m
|
||||
CONFIG_GH_MSGQ=m
|
||||
CONFIG_GH_RM_DRV=m
|
||||
CONFIG_GH_VIRTIO_BACKEND=m
|
||||
CONFIG_GH_VIRT_WATCHDOG=m
|
||||
CONFIG_HVC_GUNYAH=m
|
||||
# CONFIG_HVC_GUNYAH_CONSOLE is not set
|
||||
CONFIG_HWMON=m
|
||||
CONFIG_HWSPINLOCK_QCOM=m
|
||||
CONFIG_HW_RANDOM_MSM_LEGACY=m
|
||||
@ -98,7 +98,7 @@ CONFIG_MSM_SPCOM=m
|
||||
CONFIG_MSM_SPSS_UTILS=m
|
||||
CONFIG_MSM_SYSSTATS=m
|
||||
CONFIG_MSM_VIDEOCC_WAIPIO=m
|
||||
# CONFIG_NEURON_CH_HAVEN is not set
|
||||
# CONFIG_NEURON_CH_GUNYAH is not set
|
||||
CONFIG_NFC_QTI_I2C=m
|
||||
CONFIG_NOP_USB_XCEIV=m
|
||||
CONFIG_NVMEM_SPMI_SDAM=m
|
||||
@ -203,7 +203,7 @@ CONFIG_QCOM_WATCHDOG_WAKEUP_ENABLE=y
|
||||
CONFIG_QCOM_WDT_CORE=m
|
||||
CONFIG_QPNP_PBS=m
|
||||
CONFIG_QRTR=m
|
||||
CONFIG_QRTR_HAVEN=m
|
||||
CONFIG_QRTR_GUNYAH=m
|
||||
CONFIG_QRTR_MHI=m
|
||||
CONFIG_QRTR_SMD=m
|
||||
CONFIG_QSEECOM=m
|
||||
|
26
arch/arm64/configs/vendor/waipio_tuivm.config
vendored
26
arch/arm64/configs/vendor/waipio_tuivm.config
vendored
@ -36,19 +36,19 @@ CONFIG_FTS_TRUSTED_TOUCH=y
|
||||
CONFIG_GENERIC_PINCONF=y
|
||||
CONFIG_GKI_HIDDEN_DRM_CONFIGS=y
|
||||
CONFIG_GPIOLIB_IRQCHIP=y
|
||||
CONFIG_HAVEN_DRIVERS=y
|
||||
CONFIG_GUNYAH_DRIVERS=y
|
||||
CONFIG_HAVE_NET_DSA=y
|
||||
CONFIG_HDMI=y
|
||||
CONFIG_HH_CTRL=y
|
||||
CONFIG_HH_DBL=y
|
||||
CONFIG_HH_IRQ_LEND=y
|
||||
CONFIG_HH_MEM_NOTIFIER=y
|
||||
CONFIG_HH_MSGQ=y
|
||||
CONFIG_HH_RM_DRV=y
|
||||
# CONFIG_HH_VIRTIO_BACKEND is not set
|
||||
CONFIG_HH_VIRT_WATCHDOG=y
|
||||
CONFIG_HVC_HAVEN=y
|
||||
CONFIG_HVC_HAVEN_CONSOLE=y
|
||||
CONFIG_GH_CTRL=y
|
||||
CONFIG_GH_DBL=y
|
||||
CONFIG_GH_IRQ_LEND=y
|
||||
CONFIG_GH_MEM_NOTIFIER=y
|
||||
CONFIG_GH_MSGQ=y
|
||||
CONFIG_GH_RM_DRV=y
|
||||
# CONFIG_GH_VIRTIO_BACKEND is not set
|
||||
CONFIG_GH_VIRT_WATCHDOG=y
|
||||
CONFIG_HVC_GUNYAH=y
|
||||
CONFIG_HVC_GUNYAH_CONSOLE=y
|
||||
CONFIG_I2C_ALGOBIT=y
|
||||
CONFIG_I2C_MSM_GENI=y
|
||||
CONFIG_INET=y
|
||||
@ -129,7 +129,7 @@ CONFIG_MSM_GPI_DMA_DEBUG=y
|
||||
# CONFIG_NET_NCSI is not set
|
||||
# CONFIG_NET_PKTGEN is not set
|
||||
# CONFIG_NET_SWITCHDEV is not set
|
||||
# CONFIG_NEURON_CH_HAVEN is not set
|
||||
# CONFIG_NEURON_CH_GUNYAH is not set
|
||||
# CONFIG_OPENVSWITCH is not set
|
||||
# CONFIG_PHY_CADENCE_SIERRA is not set
|
||||
CONFIG_PINCONF=y
|
||||
@ -163,7 +163,7 @@ CONFIG_QCOM_WDT_CORE=y
|
||||
# CONFIG_QPNP_PBS is not set
|
||||
# CONFIG_QPNP_REVID is not set
|
||||
CONFIG_QRTR=y
|
||||
CONFIG_QRTR_HAVEN=y
|
||||
CONFIG_QRTR_GUNYAH=y
|
||||
CONFIG_QRTR_NODE_ID=20
|
||||
CONFIG_QRTR_WAKEUP_MS=500
|
||||
# CONFIG_RDS is not set
|
||||
|
@ -1,23 +1,23 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef __ASM_HH_HCALL_H
|
||||
#define __ASM_HH_HCALL_H
|
||||
#ifndef __ASM_GH_HCALL_H
|
||||
#define __ASM_GH_HCALL_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/haven/hcall_common.h>
|
||||
#include <linux/gunyah/hcall_common.h>
|
||||
|
||||
/**
|
||||
* _hh_hcall: Performs an AArch64-specific call into hypervisor using Haven ABI
|
||||
* _gh_hcall: Performs an AArch64-specific call into hypervisor using Gunyah ABI
|
||||
* @hcall_num: Hypercall function ID to invoke
|
||||
* @args: Hypercall argument registers
|
||||
* @resp: Pointer to location to store response
|
||||
*/
|
||||
static inline int _hh_hcall(const hh_hcall_fnid_t hcall_num,
|
||||
const struct hh_hcall_args args,
|
||||
struct hh_hcall_resp *resp)
|
||||
static inline int _gh_hcall(const gh_hcall_fnid_t hcall_num,
|
||||
const struct gh_hcall_args args,
|
||||
struct gh_hcall_resp *resp)
|
||||
{
|
||||
uint64_t _x18;
|
||||
|
||||
|
@ -465,7 +465,7 @@ config NEURON_APP_BLOCK_CLIENT
|
||||
select NEURON_PROT_BLOCK_CLIENT
|
||||
help
|
||||
Neuron is a device-sharing framework which is used by guests of the
|
||||
haven hypervisor to serve or access shared I/O devices and other
|
||||
gunyah hypervisor to serve or access shared I/O devices and other
|
||||
inter-VM services.
|
||||
|
||||
This option enables the Neuron block client, which can access block
|
||||
@ -482,7 +482,7 @@ config NEURON_APP_BLOCK_SERVER
|
||||
select NEURON_PROT_BLOCK_SERVER
|
||||
help
|
||||
Neuron is a device-sharing framework which is used by guests of the
|
||||
haven hypervisors to serve or access shared I/O devices and other
|
||||
gunyah hypervisors to serve or access shared I/O devices and other
|
||||
inter-VM services.
|
||||
|
||||
This option enables the Neuron block device server, which exposes a
|
||||
|
@ -16,8 +16,8 @@
|
||||
#include <linux/blk-mq-virtio.h>
|
||||
#include <linux/numa.h>
|
||||
#include <uapi/linux/virtio_ring.h>
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#include <trace/events/hh_virtio_frontend.h>
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
#include <trace/events/gh_virtio_frontend.h>
|
||||
#endif
|
||||
|
||||
#define PART_BITS 4
|
||||
@ -191,7 +191,7 @@ static void virtblk_done(struct virtqueue *vq)
|
||||
|
||||
if (likely(!blk_should_fake_timeout(req->q)))
|
||||
blk_mq_complete_request(req);
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_block_done(vq->vdev->index, req_op(req), blk_rq_pos(req));
|
||||
#endif
|
||||
req_done = true;
|
||||
@ -284,7 +284,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
|
||||
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
|
||||
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_block_submit(vblk->vqs[qid].vq->vdev->index,
|
||||
vbr->out_hdr.type, vbr->out_hdr.sector, vbr->out_hdr.ioprio, err, num);
|
||||
#endif
|
||||
|
@ -2,7 +2,7 @@
|
||||
*
|
||||
* FocalTech TouchScreen driver.
|
||||
*
|
||||
* Copyright (c) 2012-2019, FocalTech Systems, Ltd., all rights reserved.
|
||||
* Copyright (c) 2012-2019, 2021, FocalTech Systems, Ltd., all rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
@ -61,10 +61,10 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include "linux/haven/hh_irq_lend.h"
|
||||
#include "linux/haven/hh_msgq.h"
|
||||
#include "linux/haven/hh_mem_notifier.h"
|
||||
#include "linux/haven/hh_rm_drv.h"
|
||||
#include "linux/gunyah/gh_irq_lend.h"
|
||||
#include "linux/gunyah/gh_msgq.h"
|
||||
#include "linux/gunyah/gh_mem_notifier.h"
|
||||
#include "linux/gunyah/gh_rm_drv.h"
|
||||
#include <linux/sort.h>
|
||||
#endif
|
||||
|
||||
@ -144,32 +144,32 @@ static void fts_ts_register_for_panel_events(struct device_node *dp,
|
||||
|
||||
static void fts_ts_trusted_touch_abort_handler(struct fts_ts_data *fts_data,
|
||||
int error);
|
||||
static struct hh_acl_desc *fts_ts_vm_get_acl(enum hh_vm_names vm_name)
|
||||
static struct gh_acl_desc *fts_ts_vm_get_acl(enum gh_vm_names vm_name)
|
||||
{
|
||||
struct hh_acl_desc *acl_desc;
|
||||
hh_vmid_t vmid;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
gh_vmid_t vmid;
|
||||
|
||||
hh_rm_get_vmid(vm_name, &vmid);
|
||||
gh_rm_get_vmid(vm_name, &vmid);
|
||||
|
||||
acl_desc = kzalloc(offsetof(struct hh_acl_desc, acl_entries[1]),
|
||||
acl_desc = kzalloc(offsetof(struct gh_acl_desc, acl_entries[1]),
|
||||
GFP_KERNEL);
|
||||
if (!acl_desc)
|
||||
return ERR_PTR(ENOMEM);
|
||||
|
||||
acl_desc->n_acl_entries = 1;
|
||||
acl_desc->acl_entries[0].vmid = vmid;
|
||||
acl_desc->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl_desc->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
return acl_desc;
|
||||
}
|
||||
|
||||
static struct hh_sgl_desc *fts_ts_vm_get_sgl(
|
||||
static struct gh_sgl_desc *fts_ts_vm_get_sgl(
|
||||
struct trusted_touch_vm_info *vm_info)
|
||||
{
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
int i;
|
||||
|
||||
sgl_desc = kzalloc(offsetof(struct hh_sgl_desc,
|
||||
sgl_desc = kzalloc(offsetof(struct gh_sgl_desc,
|
||||
sgl_entries[vm_info->iomem_list_size]), GFP_KERNEL);
|
||||
if (!sgl_desc)
|
||||
return ERR_PTR(ENOMEM);
|
||||
@ -198,8 +198,8 @@ static int fts_ts_populate_vm_info(struct fts_ts_data *fts_data)
|
||||
}
|
||||
|
||||
fts_data->vm_info = vm_info;
|
||||
vm_info->irq_label = HH_IRQ_LABEL_TRUSTED_TOUCH;
|
||||
vm_info->vm_name = HH_TRUSTED_VM;
|
||||
vm_info->irq_label = GH_IRQ_LABEL_TRUSTED_TOUCH;
|
||||
vm_info->vm_name = GH_TRUSTED_VM;
|
||||
rc = of_property_read_u32(np, "focaltech,trusted-touch-spi-irq",
|
||||
&vm_info->hw_irq);
|
||||
if (rc) {
|
||||
@ -279,7 +279,7 @@ static void fts_ts_destroy_vm_info(struct fts_ts_data *fts_data)
|
||||
static void fts_ts_vm_deinit(struct fts_ts_data *fts_data)
|
||||
{
|
||||
if (fts_data->vm_info->mem_cookie)
|
||||
hh_mem_notifier_unregister(fts_data->vm_info->mem_cookie);
|
||||
gh_mem_notifier_unregister(fts_data->vm_info->mem_cookie);
|
||||
fts_ts_destroy_vm_info(fts_data);
|
||||
}
|
||||
|
||||
@ -359,14 +359,14 @@ static void fts_ts_trusted_touch_set_tvm_driver_state(struct fts_ts_data *fts_da
|
||||
|
||||
static int fts_ts_sgl_cmp(const void *a, const void *b)
|
||||
{
|
||||
struct hh_sgl_entry *left = (struct hh_sgl_entry *)a;
|
||||
struct hh_sgl_entry *right = (struct hh_sgl_entry *)b;
|
||||
struct gh_sgl_entry *left = (struct gh_sgl_entry *)a;
|
||||
struct gh_sgl_entry *right = (struct gh_sgl_entry *)b;
|
||||
|
||||
return (left->ipa_base - right->ipa_base);
|
||||
}
|
||||
|
||||
static int fts_ts_vm_compare_sgl_desc(struct hh_sgl_desc *expected,
|
||||
struct hh_sgl_desc *received)
|
||||
static int fts_ts_vm_compare_sgl_desc(struct gh_sgl_desc *expected,
|
||||
struct gh_sgl_desc *received)
|
||||
{
|
||||
int idx;
|
||||
|
||||
@ -378,8 +378,8 @@ static int fts_ts_vm_compare_sgl_desc(struct hh_sgl_desc *expected,
|
||||
sizeof(expected->sgl_entries[0]), fts_ts_sgl_cmp, NULL);
|
||||
|
||||
for (idx = 0; idx < expected->n_sgl_entries; idx++) {
|
||||
struct hh_sgl_entry *left = &expected->sgl_entries[idx];
|
||||
struct hh_sgl_entry *right = &received->sgl_entries[idx];
|
||||
struct gh_sgl_entry *left = &expected->sgl_entries[idx];
|
||||
struct gh_sgl_entry *right = &received->sgl_entries[idx];
|
||||
|
||||
if ((left->ipa_base != right->ipa_base) ||
|
||||
(left->size != right->size)) {
|
||||
@ -414,8 +414,8 @@ static int fts_ts_vm_handle_vm_hardware(struct fts_ts_data *fts_data)
|
||||
static void fts_ts_trusted_touch_tvm_vm_mode_enable(struct fts_ts_data *fts_data)
|
||||
{
|
||||
|
||||
struct hh_sgl_desc *sgl_desc, *expected_sgl_desc;
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct gh_sgl_desc *sgl_desc, *expected_sgl_desc;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
struct irq_data *irq_data;
|
||||
int rc = 0;
|
||||
int irq = 0;
|
||||
@ -428,19 +428,19 @@ static void fts_ts_trusted_touch_tvm_vm_mode_enable(struct fts_ts_data *fts_data
|
||||
return;
|
||||
}
|
||||
|
||||
acl_desc = fts_ts_vm_get_acl(HH_TRUSTED_VM);
|
||||
acl_desc = fts_ts_vm_get_acl(GH_TRUSTED_VM);
|
||||
if (IS_ERR(acl_desc)) {
|
||||
pr_err("failed to populated acl data:rc=%d\n",
|
||||
PTR_ERR(acl_desc));
|
||||
goto accept_fail;
|
||||
}
|
||||
|
||||
sgl_desc = hh_rm_mem_accept(fts_data->vm_info->vm_mem_handle,
|
||||
HH_RM_MEM_TYPE_IO,
|
||||
HH_RM_TRANS_TYPE_LEND,
|
||||
HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
|
||||
HH_RM_MEM_ACCEPT_VALIDATE_LABEL |
|
||||
HH_RM_MEM_ACCEPT_DONE, TRUSTED_TOUCH_MEM_LABEL,
|
||||
sgl_desc = gh_rm_mem_accept(fts_data->vm_info->vm_mem_handle,
|
||||
GH_RM_MEM_TYPE_IO,
|
||||
GH_RM_TRANS_TYPE_LEND,
|
||||
GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
|
||||
GH_RM_MEM_ACCEPT_VALIDATE_LABEL |
|
||||
GH_RM_MEM_ACCEPT_DONE, TRUSTED_TOUCH_MEM_LABEL,
|
||||
acl_desc, NULL, NULL, 0);
|
||||
if (IS_ERR_OR_NULL(sgl_desc)) {
|
||||
pr_err("failed to do mem accept :rc=%d\n",
|
||||
@ -470,7 +470,7 @@ static void fts_ts_trusted_touch_tvm_vm_mode_enable(struct fts_ts_data *fts_data
|
||||
kfree(expected_sgl_desc);
|
||||
kfree(acl_desc);
|
||||
|
||||
irq = hh_irq_accept(fts_data->vm_info->irq_label, -1, IRQ_TYPE_EDGE_RISING);
|
||||
irq = gh_irq_accept(fts_data->vm_info->irq_label, -1, IRQ_TYPE_EDGE_RISING);
|
||||
fts_trusted_touch_intr_gpio_toggle(fts_data, false);
|
||||
if (irq < 0) {
|
||||
pr_err("failed to accept irq\n");
|
||||
@ -515,7 +515,7 @@ static void fts_ts_trusted_touch_tvm_vm_mode_enable(struct fts_ts_data *fts_data
|
||||
}
|
||||
static void fts_ts_vm_irq_on_lend_callback(void *data,
|
||||
unsigned long notif_type,
|
||||
enum hh_irq_label label)
|
||||
enum gh_irq_label label)
|
||||
{
|
||||
struct fts_ts_data *fts_data = data;
|
||||
|
||||
@ -530,15 +530,15 @@ static void fts_ts_vm_irq_on_lend_callback(void *data,
|
||||
}
|
||||
}
|
||||
|
||||
static void fts_ts_vm_mem_on_lend_handler(enum hh_mem_notifier_tag tag,
|
||||
static void fts_ts_vm_mem_on_lend_handler(enum gh_mem_notifier_tag tag,
|
||||
unsigned long notif_type, void *entry_data, void *notif_msg)
|
||||
{
|
||||
struct hh_rm_notif_mem_shared_payload *payload;
|
||||
struct gh_rm_notif_mem_shared_payload *payload;
|
||||
struct trusted_touch_vm_info *vm_info;
|
||||
struct fts_ts_data *fts_data;
|
||||
|
||||
if (notif_type != HH_RM_NOTIF_MEM_SHARED ||
|
||||
tag != HH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
if (notif_type != GH_RM_NOTIF_MEM_SHARED ||
|
||||
tag != GH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
pr_err("Invalid command passed from rm\n");
|
||||
return;
|
||||
}
|
||||
@ -555,8 +555,8 @@ static void fts_ts_vm_mem_on_lend_handler(enum hh_mem_notifier_tag tag,
|
||||
return;
|
||||
}
|
||||
|
||||
payload = (struct hh_rm_notif_mem_shared_payload *)notif_msg;
|
||||
if (payload->trans_type != HH_RM_TRANS_TYPE_LEND ||
|
||||
payload = (struct gh_rm_notif_mem_shared_payload *)notif_msg;
|
||||
if (payload->trans_type != GH_RM_TRANS_TYPE_LEND ||
|
||||
payload->label != TRUSTED_TOUCH_MEM_LABEL) {
|
||||
pr_err("Invalid label or transaction type\n");
|
||||
return;
|
||||
@ -584,13 +584,13 @@ static int fts_ts_vm_mem_release(struct fts_ts_data *fts_data)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rc = hh_rm_mem_release(fts_data->vm_info->vm_mem_handle, 0);
|
||||
rc = gh_rm_mem_release(fts_data->vm_info->vm_mem_handle, 0);
|
||||
if (rc)
|
||||
pr_err("VM mem release failed: rc=%d\n", rc);
|
||||
|
||||
rc = hh_rm_mem_notify(fts_data->vm_info->vm_mem_handle,
|
||||
HH_RM_MEM_NOTIFY_OWNER_RELEASED,
|
||||
HH_MEM_NOTIFIER_TAG_TOUCH, 0);
|
||||
rc = gh_rm_mem_notify(fts_data->vm_info->vm_mem_handle,
|
||||
GH_RM_MEM_NOTIFY_OWNER_RELEASED,
|
||||
GH_MEM_NOTIFIER_TAG_TOUCH, 0);
|
||||
if (rc)
|
||||
pr_err("Failed to notify mem release to PVM: rc=%d\n");
|
||||
pr_debug("vm mem release succeded\n");
|
||||
@ -612,7 +612,7 @@ static void fts_ts_trusted_touch_tvm_vm_mode_disable(struct fts_ts_data *fts_dat
|
||||
fts_ts_trusted_touch_set_tvm_driver_state(fts_data,
|
||||
TVM_INTERRUPT_DISABLED);
|
||||
|
||||
rc = hh_irq_release(fts_data->vm_info->irq_label);
|
||||
rc = gh_irq_release(fts_data->vm_info->irq_label);
|
||||
if (rc) {
|
||||
pr_err("Failed to release irq rc:%d\n", rc);
|
||||
goto error;
|
||||
@ -620,7 +620,7 @@ static void fts_ts_trusted_touch_tvm_vm_mode_disable(struct fts_ts_data *fts_dat
|
||||
fts_ts_trusted_touch_set_tvm_driver_state(fts_data,
|
||||
TVM_IRQ_RELEASED);
|
||||
}
|
||||
rc = hh_irq_release_notify(fts_data->vm_info->irq_label);
|
||||
rc = gh_irq_release_notify(fts_data->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("Failed to notify release irq rc:%d\n", rc);
|
||||
|
||||
@ -709,10 +709,10 @@ static void fts_ts_trusted_touch_abort_tvm(struct fts_ts_data *fts_data)
|
||||
fts_irq_disable();
|
||||
case TVM_IRQ_ACCEPTED:
|
||||
case TVM_INTERRUPT_DISABLED:
|
||||
rc = hh_irq_release(fts_data->vm_info->irq_label);
|
||||
rc = gh_irq_release(fts_data->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("Failed to release irq rc:%d\n", rc);
|
||||
rc = hh_irq_release_notify(fts_data->vm_info->irq_label);
|
||||
rc = gh_irq_release_notify(fts_data->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("Failed to notify irq release rc:%d\n", rc);
|
||||
case TVM_I2C_SESSION_ACQUIRED:
|
||||
@ -778,14 +778,14 @@ static void fts_ts_trusted_touch_abort_pvm(struct fts_ts_data *fts_data)
|
||||
case PVM_ALL_RESOURCES_RELEASE_NOTIFIED:
|
||||
case PVM_IRQ_LENT:
|
||||
case PVM_IRQ_LENT_NOTIFIED:
|
||||
rc = hh_irq_reclaim(fts_data->vm_info->irq_label);
|
||||
rc = gh_irq_reclaim(fts_data->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("failed to reclaim irq on pvm rc:%d\n", rc);
|
||||
case PVM_IRQ_RECLAIMED:
|
||||
case PVM_IOMEM_LENT:
|
||||
case PVM_IOMEM_LENT_NOTIFIED:
|
||||
case PVM_IOMEM_RELEASE_NOTIFIED:
|
||||
rc = hh_rm_mem_reclaim(fts_data->vm_info->vm_mem_handle, 0);
|
||||
rc = gh_rm_mem_reclaim(fts_data->vm_info->vm_mem_handle, 0);
|
||||
if (rc)
|
||||
pr_err("failed to reclaim iomem on pvm rc:%d\n", rc);
|
||||
fts_data->vm_info->vm_mem_handle = 0;
|
||||
@ -876,11 +876,11 @@ static void fts_ts_bus_put(struct fts_ts_data *fts_data)
|
||||
fts_ts_enable_reg(fts_data, false);
|
||||
}
|
||||
|
||||
static struct hh_notify_vmid_desc *fts_ts_vm_get_vmid(hh_vmid_t vmid)
|
||||
static struct gh_notify_vmid_desc *fts_ts_vm_get_vmid(gh_vmid_t vmid)
|
||||
{
|
||||
struct hh_notify_vmid_desc *vmid_desc;
|
||||
struct gh_notify_vmid_desc *vmid_desc;
|
||||
|
||||
vmid_desc = kzalloc(offsetof(struct hh_notify_vmid_desc,
|
||||
vmid_desc = kzalloc(offsetof(struct gh_notify_vmid_desc,
|
||||
vmid_entries[1]), GFP_KERNEL);
|
||||
if (!vmid_desc)
|
||||
return ERR_PTR(ENOMEM);
|
||||
@ -903,7 +903,7 @@ static void fts_trusted_touch_pvm_vm_mode_disable(struct fts_ts_data *fts_data)
|
||||
PVM_ALL_RESOURCES_RELEASE_NOTIFIED)
|
||||
pr_err("all release notifications are not received yet\n");
|
||||
|
||||
rc = hh_irq_reclaim(fts_data->vm_info->irq_label);
|
||||
rc = gh_irq_reclaim(fts_data->vm_info->irq_label);
|
||||
if (rc) {
|
||||
pr_err("failed to reclaim irq on pvm rc:%d\n", rc);
|
||||
goto error;
|
||||
@ -911,7 +911,7 @@ static void fts_trusted_touch_pvm_vm_mode_disable(struct fts_ts_data *fts_data)
|
||||
fts_ts_trusted_touch_set_pvm_driver_state(fts_data,
|
||||
PVM_IRQ_RECLAIMED);
|
||||
|
||||
rc = hh_rm_mem_reclaim(fts_data->vm_info->vm_mem_handle, 0);
|
||||
rc = gh_rm_mem_reclaim(fts_data->vm_info->vm_mem_handle, 0);
|
||||
if (rc) {
|
||||
pr_err("Trusted touch VM mem reclaim failed rc:%d\n", rc);
|
||||
goto error;
|
||||
@ -936,11 +936,11 @@ static void fts_trusted_touch_pvm_vm_mode_disable(struct fts_ts_data *fts_data)
|
||||
|
||||
static void fts_ts_vm_irq_on_release_callback(void *data,
|
||||
unsigned long notif_type,
|
||||
enum hh_irq_label label)
|
||||
enum gh_irq_label label)
|
||||
{
|
||||
struct fts_ts_data *fts_data = data;
|
||||
|
||||
if (notif_type != HH_RM_NOTIF_VM_IRQ_RELEASED) {
|
||||
if (notif_type != GH_RM_NOTIF_VM_IRQ_RELEASED) {
|
||||
pr_err("invalid notification type\n");
|
||||
return;
|
||||
}
|
||||
@ -955,19 +955,19 @@ static void fts_ts_vm_irq_on_release_callback(void *data,
|
||||
}
|
||||
}
|
||||
|
||||
static void fts_ts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
static void fts_ts_vm_mem_on_release_handler(enum gh_mem_notifier_tag tag,
|
||||
unsigned long notif_type, void *entry_data, void *notif_msg)
|
||||
{
|
||||
struct hh_rm_notif_mem_released_payload *release_payload;
|
||||
struct gh_rm_notif_mem_released_payload *release_payload;
|
||||
struct trusted_touch_vm_info *vm_info;
|
||||
struct fts_ts_data *fts_data;
|
||||
|
||||
if (notif_type != HH_RM_NOTIF_MEM_RELEASED) {
|
||||
if (notif_type != GH_RM_NOTIF_MEM_RELEASED) {
|
||||
pr_err(" Invalid notification type\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (tag != HH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
if (tag != GH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
pr_err(" Invalid tag\n");
|
||||
return;
|
||||
}
|
||||
@ -984,7 +984,7 @@ static void fts_ts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
return;
|
||||
}
|
||||
|
||||
release_payload = (struct hh_rm_notif_mem_released_payload *)notif_msg;
|
||||
release_payload = (struct gh_rm_notif_mem_released_payload *)notif_msg;
|
||||
if (release_payload->mem_handle != vm_info->vm_mem_handle) {
|
||||
pr_err("Invalid mem handle detected\n");
|
||||
return;
|
||||
@ -1002,14 +1002,14 @@ static void fts_ts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
|
||||
static int fts_ts_vm_mem_lend(struct fts_ts_data *fts_data)
|
||||
{
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct hh_notify_vmid_desc *vmid_desc;
|
||||
hh_memparcel_handle_t mem_handle;
|
||||
hh_vmid_t trusted_vmid;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
struct gh_notify_vmid_desc *vmid_desc;
|
||||
gh_memparcel_handle_t mem_handle;
|
||||
gh_vmid_t trusted_vmid;
|
||||
int rc = 0;
|
||||
|
||||
acl_desc = fts_ts_vm_get_acl(HH_TRUSTED_VM);
|
||||
acl_desc = fts_ts_vm_get_acl(GH_TRUSTED_VM);
|
||||
if (IS_ERR(acl_desc)) {
|
||||
pr_err("Failed to get acl of IO memories for Trusted touch\n");
|
||||
PTR_ERR(acl_desc);
|
||||
@ -1024,7 +1024,7 @@ static int fts_ts_vm_mem_lend(struct fts_ts_data *fts_data)
|
||||
goto sgl_error;
|
||||
}
|
||||
|
||||
rc = hh_rm_mem_lend(HH_RM_MEM_TYPE_IO, 0, TRUSTED_TOUCH_MEM_LABEL,
|
||||
rc = gh_rm_mem_lend(GH_RM_MEM_TYPE_IO, 0, TRUSTED_TOUCH_MEM_LABEL,
|
||||
acl_desc, sgl_desc, NULL, &mem_handle);
|
||||
if (rc) {
|
||||
pr_err("Failed to lend IO memories for Trusted touch rc:%d\n",
|
||||
@ -1036,12 +1036,12 @@ static int fts_ts_vm_mem_lend(struct fts_ts_data *fts_data)
|
||||
|
||||
fts_ts_trusted_touch_set_pvm_driver_state(fts_data, PVM_IOMEM_LENT);
|
||||
|
||||
hh_rm_get_vmid(HH_TRUSTED_VM, &trusted_vmid);
|
||||
gh_rm_get_vmid(GH_TRUSTED_VM, &trusted_vmid);
|
||||
|
||||
vmid_desc = fts_ts_vm_get_vmid(trusted_vmid);
|
||||
|
||||
rc = hh_rm_mem_notify(mem_handle, HH_RM_MEM_NOTIFY_RECIPIENT_SHARED,
|
||||
HH_MEM_NOTIFIER_TAG_TOUCH, vmid_desc);
|
||||
rc = gh_rm_mem_notify(mem_handle, GH_RM_MEM_NOTIFY_RECIPIENT_SHARED,
|
||||
GH_MEM_NOTIFIER_TAG_TOUCH, vmid_desc);
|
||||
if (rc) {
|
||||
pr_err("Failed to notify mem lend to hypervisor rc:%d\n", rc);
|
||||
goto vmid_error;
|
||||
@ -1085,7 +1085,7 @@ static int fts_ts_trusted_touch_pvm_vm_mode_enable(struct fts_ts_data *fts_data)
|
||||
goto error;
|
||||
}
|
||||
|
||||
rc = hh_irq_lend_v2(vm_info->irq_label, vm_info->vm_name,
|
||||
rc = gh_irq_lend_v2(vm_info->irq_label, vm_info->vm_name,
|
||||
fts_data->irq, &fts_ts_vm_irq_on_release_callback, fts_data);
|
||||
if (rc) {
|
||||
pr_err("Failed to lend irq\n");
|
||||
@ -1095,7 +1095,7 @@ static int fts_ts_trusted_touch_pvm_vm_mode_enable(struct fts_ts_data *fts_data)
|
||||
pr_info("vm irq lend succeded for irq:%d\n", fts_data->irq);
|
||||
fts_ts_trusted_touch_set_pvm_driver_state(fts_data, PVM_IRQ_LENT);
|
||||
|
||||
rc = hh_irq_lend_notify(vm_info->irq_label);
|
||||
rc = gh_irq_lend_notify(vm_info->irq_label);
|
||||
if (rc) {
|
||||
pr_err("Failed to notify irq\n");
|
||||
goto error;
|
||||
@ -1189,7 +1189,7 @@ static int fts_ts_vm_init(struct fts_ts_data *fts_data)
|
||||
|
||||
vm_info = fts_data->vm_info;
|
||||
#ifdef CONFIG_ARCH_QTI_VM
|
||||
mem_cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
mem_cookie = gh_mem_notifier_register(GH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
fts_ts_vm_mem_on_lend_handler, fts_data);
|
||||
if (!mem_cookie) {
|
||||
pr_err("Failed to register on lend mem notifier\n");
|
||||
@ -1197,12 +1197,12 @@ static int fts_ts_vm_init(struct fts_ts_data *fts_data)
|
||||
goto init_fail;
|
||||
}
|
||||
vm_info->mem_cookie = mem_cookie;
|
||||
rc = hh_irq_wait_for_lend_v2(vm_info->irq_label, HH_PRIMARY_VM,
|
||||
rc = gh_irq_wait_for_lend_v2(vm_info->irq_label, GH_PRIMARY_VM,
|
||||
&fts_ts_vm_irq_on_lend_callback, fts_data);
|
||||
mutex_init(&fts_data->vm_info->tvm_state_mutex);
|
||||
fts_ts_trusted_touch_set_tvm_driver_state(fts_data, TRUSTED_TOUCH_TVM_INIT);
|
||||
#else
|
||||
mem_cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
mem_cookie = gh_mem_notifier_register(GH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
fts_ts_vm_mem_on_release_handler, fts_data);
|
||||
if (!mem_cookie) {
|
||||
pr_err("Failed to register on release mem notifier\n");
|
||||
|
@ -2,7 +2,7 @@
|
||||
*
|
||||
* FocalTech TouchScreen driver.
|
||||
*
|
||||
* Copyright (c) 2012-2019, Focaltech Ltd. All rights reserved.
|
||||
* Copyright (c) 2012-2019, 2021, Focaltech Ltd. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
@ -61,7 +61,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/haven/hh_irq_lend.h>
|
||||
#include <linux/gunyah/gh_irq_lend.h>
|
||||
#include "focaltech_common.h"
|
||||
|
||||
/*****************************************************************************
|
||||
@ -209,10 +209,10 @@ enum trusted_touch_tvm_states {
|
||||
#define TRUSTED_TOUCH_EVENT_NOTIFICATIONS_PENDING 5
|
||||
|
||||
struct trusted_touch_vm_info {
|
||||
enum hh_irq_label irq_label;
|
||||
enum hh_vm_names vm_name;
|
||||
enum gh_irq_label irq_label;
|
||||
enum gh_vm_names vm_name;
|
||||
u32 hw_irq;
|
||||
hh_memparcel_handle_t vm_mem_handle;
|
||||
gh_memparcel_handle_t vm_mem_handle;
|
||||
u32 *iomem_bases;
|
||||
u32 *iomem_sizes;
|
||||
u32 iomem_list_size;
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2016-2019, STMicroelectronics Limited.
|
||||
* Copyright (C) 2016-2019, 2021, STMicroelectronics Limited.
|
||||
* Authors: AMG(Analog Mems Group)
|
||||
*
|
||||
* marco.cali@st.com
|
||||
@ -67,10 +67,10 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include "linux/haven/hh_irq_lend.h"
|
||||
#include "linux/haven/hh_msgq.h"
|
||||
#include "linux/haven/hh_mem_notifier.h"
|
||||
#include "linux/haven/hh_rm_drv.h"
|
||||
#include "linux/gunyah/gh_irq_lend.h"
|
||||
#include "linux/gunyah/gh_msgq.h"
|
||||
#include "linux/gunyah/gh_mem_notifier.h"
|
||||
#include "linux/gunyah/gh_rm_drv.h"
|
||||
#include <linux/sort.h>
|
||||
#endif
|
||||
|
||||
@ -124,31 +124,31 @@ static int fts_probe_delayed(struct fts_ts_info *info);
|
||||
|
||||
#ifdef CONFIG_ST_TRUSTED_TOUCH
|
||||
|
||||
static struct hh_acl_desc *fts_vm_get_acl(enum hh_vm_names vm_name)
|
||||
static struct gh_acl_desc *fts_vm_get_acl(enum gh_vm_names vm_name)
|
||||
{
|
||||
struct hh_acl_desc *acl_desc;
|
||||
hh_vmid_t vmid;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
gh_vmid_t vmid;
|
||||
|
||||
hh_rm_get_vmid(vm_name, &vmid);
|
||||
gh_rm_get_vmid(vm_name, &vmid);
|
||||
|
||||
acl_desc = kzalloc(offsetof(struct hh_acl_desc, acl_entries[1]),
|
||||
acl_desc = kzalloc(offsetof(struct gh_acl_desc, acl_entries[1]),
|
||||
GFP_KERNEL);
|
||||
if (!acl_desc)
|
||||
return ERR_PTR(ENOMEM);
|
||||
|
||||
acl_desc->n_acl_entries = 1;
|
||||
acl_desc->acl_entries[0].vmid = vmid;
|
||||
acl_desc->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl_desc->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
return acl_desc;
|
||||
}
|
||||
|
||||
static struct hh_sgl_desc *fts_vm_get_sgl(struct trusted_touch_vm_info *vm_info)
|
||||
static struct gh_sgl_desc *fts_vm_get_sgl(struct trusted_touch_vm_info *vm_info)
|
||||
{
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
int i;
|
||||
|
||||
sgl_desc = kzalloc(offsetof(struct hh_sgl_desc,
|
||||
sgl_desc = kzalloc(offsetof(struct gh_sgl_desc,
|
||||
sgl_entries[vm_info->iomem_list_size]), GFP_KERNEL);
|
||||
if (!sgl_desc)
|
||||
return ERR_PTR(ENOMEM);
|
||||
@ -177,8 +177,8 @@ static int fts_populate_vm_info(struct fts_ts_info *info)
|
||||
}
|
||||
|
||||
info->vm_info = vm_info;
|
||||
vm_info->irq_label = HH_IRQ_LABEL_TRUSTED_TOUCH;
|
||||
vm_info->vm_name = HH_TRUSTED_VM;
|
||||
vm_info->irq_label = GH_IRQ_LABEL_TRUSTED_TOUCH;
|
||||
vm_info->vm_name = GH_TRUSTED_VM;
|
||||
rc = of_property_read_u32(np, "st,trusted-touch-spi-irq",
|
||||
&vm_info->hw_irq);
|
||||
if (rc) {
|
||||
@ -258,7 +258,7 @@ static void fts_destroy_vm_info(struct fts_ts_info *info)
|
||||
static void fts_vm_deinit(struct fts_ts_info *info)
|
||||
{
|
||||
if (info->vm_info->mem_cookie)
|
||||
hh_mem_notifier_unregister(info->vm_info->mem_cookie);
|
||||
gh_mem_notifier_unregister(info->vm_info->mem_cookie);
|
||||
fts_destroy_vm_info(info);
|
||||
}
|
||||
|
||||
@ -268,14 +268,14 @@ static void fts_trusted_touch_vm_mode_disable(struct fts_ts_info *info);
|
||||
|
||||
static int fts_sgl_cmp(const void *a, const void *b)
|
||||
{
|
||||
struct hh_sgl_entry *left = (struct hh_sgl_entry *)a;
|
||||
struct hh_sgl_entry *right = (struct hh_sgl_entry *)b;
|
||||
struct gh_sgl_entry *left = (struct gh_sgl_entry *)a;
|
||||
struct gh_sgl_entry *right = (struct gh_sgl_entry *)b;
|
||||
|
||||
return (left->ipa_base - right->ipa_base);
|
||||
}
|
||||
|
||||
static int fts_vm_compare_sgl_desc(struct hh_sgl_desc *expected,
|
||||
struct hh_sgl_desc *received)
|
||||
static int fts_vm_compare_sgl_desc(struct gh_sgl_desc *expected,
|
||||
struct gh_sgl_desc *received)
|
||||
{
|
||||
int idx;
|
||||
|
||||
@ -287,8 +287,8 @@ static int fts_vm_compare_sgl_desc(struct hh_sgl_desc *expected,
|
||||
sizeof(expected->sgl_entries[0]), fts_sgl_cmp, NULL);
|
||||
|
||||
for (idx = 0; idx < expected->n_sgl_entries; idx++) {
|
||||
struct hh_sgl_entry *left = &expected->sgl_entries[idx];
|
||||
struct hh_sgl_entry *right = &received->sgl_entries[idx];
|
||||
struct gh_sgl_entry *left = &expected->sgl_entries[idx];
|
||||
struct gh_sgl_entry *right = &received->sgl_entries[idx];
|
||||
|
||||
if ((left->ipa_base != right->ipa_base) ||
|
||||
(left->size != right->size)) {
|
||||
@ -323,7 +323,7 @@ static int fts_vm_handle_vm_hardware(struct fts_ts_info *info)
|
||||
|
||||
static void fts_vm_irq_on_lend_callback(void *data,
|
||||
unsigned long notif_type,
|
||||
enum hh_irq_label label)
|
||||
enum gh_irq_label label)
|
||||
{
|
||||
struct fts_ts_info *info = data;
|
||||
struct irq_data *irq_data;
|
||||
@ -331,7 +331,7 @@ static void fts_vm_irq_on_lend_callback(void *data,
|
||||
int const resource_timeout = msecs_to_jiffies(2000);
|
||||
int rc = 0;
|
||||
|
||||
irq = hh_irq_accept(info->vm_info->irq_label, -1, IRQ_TYPE_LEVEL_HIGH);
|
||||
irq = gh_irq_accept(info->vm_info->irq_label, -1, IRQ_TYPE_LEVEL_HIGH);
|
||||
if (irq < 0) {
|
||||
pr_err("failed to accept irq\n");
|
||||
goto irq_fail;
|
||||
@ -371,18 +371,18 @@ static void fts_vm_irq_on_lend_callback(void *data,
|
||||
fts_trusted_touch_vm_mode_disable(info);
|
||||
}
|
||||
|
||||
static void fts_vm_mem_on_lend_handler(enum hh_mem_notifier_tag tag,
|
||||
static void fts_vm_mem_on_lend_handler(enum gh_mem_notifier_tag tag,
|
||||
unsigned long notif_type, void *entry_data, void *notif_msg)
|
||||
{
|
||||
struct hh_rm_notif_mem_shared_payload *payload;
|
||||
struct hh_sgl_desc *sgl_desc, *expected_sgl_desc;
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct gh_rm_notif_mem_shared_payload *payload;
|
||||
struct gh_sgl_desc *sgl_desc, *expected_sgl_desc;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
struct trusted_touch_vm_info *vm_info;
|
||||
struct fts_ts_info *info;
|
||||
int rc = 0;
|
||||
|
||||
if (notif_type != HH_RM_NOTIF_MEM_SHARED ||
|
||||
tag != HH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
if (notif_type != GH_RM_NOTIF_MEM_SHARED ||
|
||||
tag != GH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
pr_err("Invalid command passed from rm\n");
|
||||
return;
|
||||
}
|
||||
@ -399,25 +399,25 @@ static void fts_vm_mem_on_lend_handler(enum hh_mem_notifier_tag tag,
|
||||
return;
|
||||
}
|
||||
|
||||
payload = (struct hh_rm_notif_mem_shared_payload *)notif_msg;
|
||||
if (payload->trans_type != HH_RM_TRANS_TYPE_LEND ||
|
||||
payload = (struct gh_rm_notif_mem_shared_payload *)notif_msg;
|
||||
if (payload->trans_type != GH_RM_TRANS_TYPE_LEND ||
|
||||
payload->label != TRUSTED_TOUCH_MEM_LABEL) {
|
||||
pr_err("Invalid label or transaction type\n");
|
||||
goto onlend_fail;
|
||||
}
|
||||
|
||||
acl_desc = fts_vm_get_acl(HH_TRUSTED_VM);
|
||||
acl_desc = fts_vm_get_acl(GH_TRUSTED_VM);
|
||||
if (IS_ERR(acl_desc)) {
|
||||
pr_err("failed to populated acl data:rc=%d\n",
|
||||
PTR_ERR(acl_desc));
|
||||
goto onlend_fail;
|
||||
}
|
||||
|
||||
sgl_desc = hh_rm_mem_accept(payload->mem_handle, HH_RM_MEM_TYPE_IO,
|
||||
HH_RM_TRANS_TYPE_LEND,
|
||||
HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
|
||||
HH_RM_MEM_ACCEPT_VALIDATE_LABEL |
|
||||
HH_RM_MEM_ACCEPT_DONE, payload->label, acl_desc,
|
||||
sgl_desc = gh_rm_mem_accept(payload->mem_handle, GH_RM_MEM_TYPE_IO,
|
||||
GH_RM_TRANS_TYPE_LEND,
|
||||
GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
|
||||
GH_RM_MEM_ACCEPT_VALIDATE_LABEL |
|
||||
GH_RM_MEM_ACCEPT_DONE, payload->label, acl_desc,
|
||||
NULL, NULL, 0);
|
||||
if (IS_ERR_OR_NULL(sgl_desc)) {
|
||||
pr_err("failed to do mem accept :rc=%d\n",
|
||||
@ -459,13 +459,13 @@ static int fts_vm_mem_release(struct fts_ts_info *info)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
rc = hh_rm_mem_release(info->vm_info->vm_mem_handle, 0);
|
||||
rc = gh_rm_mem_release(info->vm_info->vm_mem_handle, 0);
|
||||
if (rc)
|
||||
pr_err("VM mem release failed: rc=%d\n", rc);
|
||||
|
||||
rc = hh_rm_mem_notify(info->vm_info->vm_mem_handle,
|
||||
HH_RM_MEM_NOTIFY_OWNER_RELEASED,
|
||||
HH_MEM_NOTIFIER_TAG_TOUCH, 0);
|
||||
rc = gh_rm_mem_notify(info->vm_info->vm_mem_handle,
|
||||
GH_RM_MEM_NOTIFY_OWNER_RELEASED,
|
||||
GH_MEM_NOTIFIER_TAG_TOUCH, 0);
|
||||
if (rc)
|
||||
pr_err("Failed to notify mem release to PVM: rc=%d\n");
|
||||
|
||||
@ -494,13 +494,13 @@ static void fts_trusted_touch_vm_mode_disable(struct fts_ts_info *info)
|
||||
}
|
||||
|
||||
if (atomic_read(&info->vm_info->tvm_owns_irq)) {
|
||||
rc = hh_irq_release(info->vm_info->irq_label);
|
||||
rc = gh_irq_release(info->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("Failed to release irq rc:%d\n", rc);
|
||||
else
|
||||
atomic_set(&info->vm_info->tvm_owns_irq, 0);
|
||||
|
||||
rc = hh_irq_release_notify(info->vm_info->irq_label);
|
||||
rc = gh_irq_release_notify(info->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("Failed to notify release irq rc:%d\n", rc);
|
||||
}
|
||||
@ -602,11 +602,11 @@ static void fts_bus_put(struct fts_ts_info *info)
|
||||
mutex_unlock(&info->fts_clk_io_ctrl_mutex);
|
||||
}
|
||||
|
||||
static struct hh_notify_vmid_desc *fts_vm_get_vmid(hh_vmid_t vmid)
|
||||
static struct gh_notify_vmid_desc *fts_vm_get_vmid(gh_vmid_t vmid)
|
||||
{
|
||||
struct hh_notify_vmid_desc *vmid_desc;
|
||||
struct gh_notify_vmid_desc *vmid_desc;
|
||||
|
||||
vmid_desc = kzalloc(offsetof(struct hh_notify_vmid_desc,
|
||||
vmid_desc = kzalloc(offsetof(struct gh_notify_vmid_desc,
|
||||
vmid_entries[1]), GFP_KERNEL);
|
||||
if (!vmid_desc)
|
||||
return ERR_PTR(ENOMEM);
|
||||
@ -632,28 +632,28 @@ static void fts_trusted_touch_complete(struct fts_ts_info *info)
|
||||
|
||||
static void fts_vm_irq_on_release_callback(void *data,
|
||||
unsigned long notif_type,
|
||||
enum hh_irq_label label)
|
||||
enum gh_irq_label label)
|
||||
{
|
||||
struct fts_ts_info *info = data;
|
||||
int rc = 0;
|
||||
|
||||
rc = hh_irq_reclaim(info->vm_info->irq_label);
|
||||
rc = gh_irq_reclaim(info->vm_info->irq_label);
|
||||
if (rc)
|
||||
pr_err("failed to reclaim irq on pvm rc:%d\n", rc);
|
||||
else
|
||||
atomic_set(&info->vm_info->pvm_owns_irq, 1);
|
||||
}
|
||||
|
||||
static void fts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
static void fts_vm_mem_on_release_handler(enum gh_mem_notifier_tag tag,
|
||||
unsigned long notif_type, void *entry_data, void *notif_msg)
|
||||
{
|
||||
struct hh_rm_notif_mem_released_payload *payload;
|
||||
struct gh_rm_notif_mem_released_payload *payload;
|
||||
struct trusted_touch_vm_info *vm_info;
|
||||
struct fts_ts_info *info;
|
||||
int rc = 0;
|
||||
|
||||
if (notif_type != HH_RM_NOTIF_MEM_RELEASED ||
|
||||
tag != HH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
if (notif_type != GH_RM_NOTIF_MEM_RELEASED ||
|
||||
tag != GH_MEM_NOTIFIER_TAG_TOUCH) {
|
||||
pr_err(" Invalid tag or command passed\n");
|
||||
return;
|
||||
}
|
||||
@ -663,7 +663,7 @@ static void fts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
return;
|
||||
}
|
||||
|
||||
payload = (struct hh_rm_notif_mem_released_payload *)notif_msg;
|
||||
payload = (struct gh_rm_notif_mem_released_payload *)notif_msg;
|
||||
info = (struct fts_ts_info *)entry_data;
|
||||
vm_info = info->vm_info;
|
||||
if (!vm_info) {
|
||||
@ -676,7 +676,7 @@ static void fts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
return;
|
||||
}
|
||||
|
||||
rc = hh_rm_mem_reclaim(payload->mem_handle, 0);
|
||||
rc = gh_rm_mem_reclaim(payload->mem_handle, 0);
|
||||
if (rc) {
|
||||
pr_err("Trusted touch VM mem release failed rc:%d\n", rc);
|
||||
return;
|
||||
@ -687,14 +687,14 @@ static void fts_vm_mem_on_release_handler(enum hh_mem_notifier_tag tag,
|
||||
|
||||
static int fts_vm_mem_lend(struct fts_ts_info *info)
|
||||
{
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct hh_notify_vmid_desc *vmid_desc;
|
||||
hh_memparcel_handle_t mem_handle;
|
||||
hh_vmid_t trusted_vmid;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
struct gh_notify_vmid_desc *vmid_desc;
|
||||
gh_memparcel_handle_t mem_handle;
|
||||
gh_vmid_t trusted_vmid;
|
||||
int rc = 0;
|
||||
|
||||
acl_desc = fts_vm_get_acl(HH_TRUSTED_VM);
|
||||
acl_desc = fts_vm_get_acl(GH_TRUSTED_VM);
|
||||
if (IS_ERR(acl_desc)) {
|
||||
pr_err("Failed to get acl of IO memories for Trusted touch\n");
|
||||
PTR_ERR(acl_desc);
|
||||
@ -709,7 +709,7 @@ static int fts_vm_mem_lend(struct fts_ts_info *info)
|
||||
goto sgl_error;
|
||||
}
|
||||
|
||||
rc = hh_rm_mem_lend(HH_RM_MEM_TYPE_IO, 0, TRUSTED_TOUCH_MEM_LABEL,
|
||||
rc = gh_rm_mem_lend(GH_RM_MEM_TYPE_IO, 0, TRUSTED_TOUCH_MEM_LABEL,
|
||||
acl_desc, sgl_desc, NULL, &mem_handle);
|
||||
if (rc) {
|
||||
pr_err("Failed to lend IO memories for Trusted touch rc:%d\n",
|
||||
@ -717,12 +717,12 @@ static int fts_vm_mem_lend(struct fts_ts_info *info)
|
||||
goto error;
|
||||
}
|
||||
|
||||
hh_rm_get_vmid(HH_TRUSTED_VM, &trusted_vmid);
|
||||
gh_rm_get_vmid(GH_TRUSTED_VM, &trusted_vmid);
|
||||
|
||||
vmid_desc = fts_vm_get_vmid(trusted_vmid);
|
||||
|
||||
rc = hh_rm_mem_notify(mem_handle, HH_RM_MEM_NOTIFY_RECIPIENT_SHARED,
|
||||
HH_MEM_NOTIFIER_TAG_TOUCH, vmid_desc);
|
||||
rc = gh_rm_mem_notify(mem_handle, GH_RM_MEM_NOTIFY_RECIPIENT_SHARED,
|
||||
GH_MEM_NOTIFIER_TAG_TOUCH, vmid_desc);
|
||||
if (rc) {
|
||||
pr_err("Failed to notify mem lend to hypervisor rc:%d\n", rc);
|
||||
goto vmid_error;
|
||||
@ -763,7 +763,7 @@ static int fts_trusted_touch_vm_mode_enable(struct fts_ts_info *info)
|
||||
}
|
||||
atomic_set(&vm_info->pvm_owns_iomem, 0);
|
||||
|
||||
rc = hh_irq_lend_v2(vm_info->irq_label, vm_info->vm_name,
|
||||
rc = gh_irq_lend_v2(vm_info->irq_label, vm_info->vm_name,
|
||||
info->client->irq, &fts_vm_irq_on_release_callback, info);
|
||||
if (rc) {
|
||||
pr_err("Failed to lend irq\n");
|
||||
@ -771,7 +771,7 @@ static int fts_trusted_touch_vm_mode_enable(struct fts_ts_info *info)
|
||||
}
|
||||
atomic_set(&vm_info->pvm_owns_irq, 0);
|
||||
|
||||
rc = hh_irq_lend_notify(vm_info->irq_label);
|
||||
rc = gh_irq_lend_notify(vm_info->irq_label);
|
||||
if (rc) {
|
||||
pr_err("Failed to notify irq\n");
|
||||
return -EINVAL;
|
||||
@ -839,7 +839,7 @@ static int fts_vm_init(struct fts_ts_info *info)
|
||||
|
||||
vm_info = info->vm_info;
|
||||
#ifdef CONFIG_ARCH_QTI_VM
|
||||
mem_cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
mem_cookie = gh_mem_notifier_register(GH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
fts_vm_mem_on_lend_handler, info);
|
||||
if (!mem_cookie) {
|
||||
pr_err("Failed to register on lend mem notifier\n");
|
||||
@ -847,13 +847,13 @@ static int fts_vm_init(struct fts_ts_info *info)
|
||||
goto init_fail;
|
||||
}
|
||||
vm_info->mem_cookie = mem_cookie;
|
||||
rc = hh_irq_wait_for_lend_v2(vm_info->irq_label, HH_PRIMARY_VM,
|
||||
rc = gh_irq_wait_for_lend_v2(vm_info->irq_label, GH_PRIMARY_VM,
|
||||
&fts_vm_irq_on_lend_callback, info);
|
||||
atomic_set(&vm_info->tvm_owns_irq, 0);
|
||||
atomic_set(&vm_info->tvm_owns_iomem, 0);
|
||||
init_completion(&info->resource_checkpoint);
|
||||
#else
|
||||
mem_cookie = hh_mem_notifier_register(HH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
mem_cookie = gh_mem_notifier_register(GH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
fts_vm_mem_on_release_handler, info);
|
||||
if (!mem_cookie) {
|
||||
pr_err("Failed to register on release mem notifier\n");
|
||||
|
@ -2,7 +2,7 @@
|
||||
/*
|
||||
* FTS Capacitive touch screen controller (FingerTipS)
|
||||
*
|
||||
* Copyright (C) 2016-2019, STMicroelectronics Limited.
|
||||
* Copyright (C) 2016-2019, 2021, STMicroelectronics Limited.
|
||||
* Authors: AMG(Analog Mems Group) <marco.cali@st.com>
|
||||
*
|
||||
*
|
||||
@ -25,7 +25,7 @@
|
||||
/*#include <linux/wakelock.h>*/
|
||||
#include <linux/pm_wakeup.h>
|
||||
#include <linux/timekeeping.h>
|
||||
#include <linux/haven/hh_irq_lend.h>
|
||||
#include <linux/gunyah/gh_irq_lend.h>
|
||||
|
||||
#include "fts_lib/ftsSoftware.h"
|
||||
#include "fts_lib/ftsHardware.h"
|
||||
@ -221,10 +221,10 @@ enum trusted_touch_mode_config {
|
||||
#define TRUSTED_TOUCH_MEM_LABEL 0x7
|
||||
|
||||
struct trusted_touch_vm_info {
|
||||
enum hh_irq_label irq_label;
|
||||
enum hh_vm_names vm_name;
|
||||
enum gh_irq_label irq_label;
|
||||
enum gh_vm_names vm_name;
|
||||
u32 hw_irq;
|
||||
hh_memparcel_handle_t vm_mem_handle;
|
||||
gh_memparcel_handle_t vm_mem_handle;
|
||||
u32 *iomem_bases;
|
||||
u32 *iomem_sizes;
|
||||
u32 iomem_list_size;
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
@ -29,12 +29,12 @@
|
||||
#define NUM_RESERVED_CPUS 2
|
||||
|
||||
const static struct {
|
||||
enum hh_vm_names val;
|
||||
enum gh_vm_names val;
|
||||
const char *str;
|
||||
} conversion[] = {
|
||||
{HH_PRIMARY_VM, "pvm"},
|
||||
{HH_TRUSTED_VM, "trustedvm"},
|
||||
{HH_CPUSYS_VM, "cpusys_vm"},
|
||||
{GH_PRIMARY_VM, "pvm"},
|
||||
{GH_TRUSTED_VM, "trustedvm"},
|
||||
{GH_CPUSYS_VM, "cpusys_vm"},
|
||||
};
|
||||
|
||||
static struct kobj_type guestvm_kobj_type = {
|
||||
@ -107,7 +107,7 @@ static void guestvm_timer_callback(struct timer_list *t)
|
||||
complete(&isolation_done);
|
||||
}
|
||||
|
||||
static inline enum hh_vm_names get_hh_vm_name(const char *str)
|
||||
static inline enum gh_vm_names get_gh_vm_name(const char *str)
|
||||
{
|
||||
int vmid;
|
||||
|
||||
@ -115,20 +115,20 @@ static inline enum hh_vm_names get_hh_vm_name(const char *str)
|
||||
if (!strcmp(str, conversion[vmid].str))
|
||||
return conversion[vmid].val;
|
||||
}
|
||||
return HH_VM_MAX;
|
||||
return GH_VM_MAX;
|
||||
}
|
||||
|
||||
static int guestvm_loader_nb_handler(struct notifier_block *this,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
struct guestvm_loader_private *priv;
|
||||
struct hh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
struct gh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
u8 vm_status = vm_status_payload->vm_status;
|
||||
int ret;
|
||||
|
||||
priv = container_of(this, struct guestvm_loader_private, guestvm_nb);
|
||||
|
||||
if (cmd != HH_RM_NOTIF_VM_STATUS)
|
||||
if (cmd != GH_RM_NOTIF_VM_STATUS)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (priv->vmid != vm_status_payload->vmid) {
|
||||
@ -144,23 +144,23 @@ static int guestvm_loader_nb_handler(struct notifier_block *this,
|
||||
* and DBL.
|
||||
*/
|
||||
switch (vm_status) {
|
||||
case HH_RM_VM_STATUS_READY:
|
||||
priv->vm_status = HH_RM_VM_STATUS_READY;
|
||||
ret = hh_rm_populate_hyp_res(vm_status_payload->vmid, priv->vm_name);
|
||||
case GH_RM_VM_STATUS_READY:
|
||||
priv->vm_status = GH_RM_VM_STATUS_READY;
|
||||
ret = gh_rm_populate_hyp_res(vm_status_payload->vmid, priv->vm_name);
|
||||
if (ret < 0) {
|
||||
dev_err(priv->dev, "Failed to get hyp resources for vmid = %d ret = %d\n",
|
||||
vm_status_payload->vmid, ret);
|
||||
complete_all(&priv->vm_start);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
ret = hh_rm_get_vm_id_info(get_hh_vm_name(priv->vm_name),
|
||||
ret = gh_rm_get_vm_id_info(get_gh_vm_name(priv->vm_name),
|
||||
priv->vmid);
|
||||
if (ret < 0)
|
||||
dev_err(priv->dev, "Couldn't obtain VM ID info.\n");
|
||||
|
||||
complete_all(&priv->vm_start);
|
||||
break;
|
||||
case HH_RM_VM_STATUS_RUNNING:
|
||||
case GH_RM_VM_STATUS_RUNNING:
|
||||
dev_info(priv->dev, "vm(%d) started running\n", vm_status_payload->vmid);
|
||||
break;
|
||||
default:
|
||||
@ -257,8 +257,8 @@ static ssize_t guestvm_loader_start(struct kobject *kobj,
|
||||
}
|
||||
|
||||
if (boot) {
|
||||
priv->vm_status = HH_RM_VM_STATUS_INIT;
|
||||
ret = hh_rm_vm_alloc_vmid(get_hh_vm_name(priv->vm_name),
|
||||
priv->vm_status = GH_RM_VM_STATUS_INIT;
|
||||
ret = gh_rm_vm_alloc_vmid(get_gh_vm_name(priv->vm_name),
|
||||
&priv->vmid);
|
||||
if (ret < 0) {
|
||||
dev_err(priv->dev, "Couldn't allocate VMID.\n");
|
||||
@ -278,7 +278,7 @@ static ssize_t guestvm_loader_start(struct kobject *kobj,
|
||||
return count;
|
||||
}
|
||||
|
||||
priv->vm_status = HH_RM_VM_STATUS_RUNNING;
|
||||
priv->vm_status = GH_RM_VM_STATUS_RUNNING;
|
||||
|
||||
if (priv->iso_needed) {
|
||||
INIT_WORK(&unisolation_work, guestvm_unisolate_work);
|
||||
@ -288,7 +288,7 @@ static ssize_t guestvm_loader_start(struct kobject *kobj,
|
||||
msecs_to_jiffies(guestvm_unisolate_timeout));
|
||||
}
|
||||
|
||||
ret = hh_rm_vm_start(priv->vmid);
|
||||
ret = gh_rm_vm_start(priv->vmid);
|
||||
if (ret)
|
||||
dev_err(priv->dev, "VM start failed for vmid = %d ret = %d\n",
|
||||
priv->vmid, ret);
|
||||
@ -347,7 +347,7 @@ static int guestvm_loader_probe(struct platform_device *pdev)
|
||||
init_completion(&isolation_done);
|
||||
priv->guestvm_nb.notifier_call = guestvm_loader_nb_handler;
|
||||
priv->guestvm_nb.priority = 1;
|
||||
ret = hh_rm_register_notifier(&priv->guestvm_nb);
|
||||
ret = gh_rm_register_notifier(&priv->guestvm_nb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -380,7 +380,7 @@ static int guestvm_loader_probe(struct platform_device *pdev)
|
||||
timer_setup(&guestvm_cpu_isolate_timer, guestvm_timer_callback, 0);
|
||||
|
||||
no_iso:
|
||||
priv->vm_status = HH_RM_VM_STATUS_NO_STATE;
|
||||
priv->vm_status = GH_RM_VM_STATUS_NO_STATE;
|
||||
return 0;
|
||||
|
||||
error_return:
|
||||
|
@ -23,9 +23,9 @@
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include <linux/haven/hcall.h>
|
||||
#include <linux/haven/hh_errno.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/hcall.h>
|
||||
#include <linux/gunyah/gh_errno.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
|
||||
#include <linux/sched/walt.h>
|
||||
|
||||
@ -44,9 +44,9 @@ static DEFINE_PER_CPU(unsigned int, qos_min_freq);
|
||||
*
|
||||
*/
|
||||
struct hyp_core_ctl_cpu_map {
|
||||
hh_capid_t cap_id;
|
||||
hh_label_t pcpu;
|
||||
hh_label_t curr_pcpu;
|
||||
gh_capid_t cap_id;
|
||||
gh_label_t pcpu;
|
||||
gh_label_t curr_pcpu;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -80,7 +80,7 @@ struct hyp_core_ctl_data {
|
||||
#include "hyp_core_ctl_trace.h"
|
||||
|
||||
static struct hyp_core_ctl_data *the_hcd;
|
||||
static struct hyp_core_ctl_cpu_map hh_cpumap[NR_CPUS];
|
||||
static struct hyp_core_ctl_cpu_map gh_cpumap[NR_CPUS];
|
||||
static bool is_vcpu_info_populated;
|
||||
static bool init_done;
|
||||
static int nr_vcpus;
|
||||
@ -194,7 +194,7 @@ static void finalize_reservation(struct hyp_core_ctl_data *hcd, cpumask_t *temp)
|
||||
* maintained in vcpu_adjust_mask and processed in the 2nd pass.
|
||||
*/
|
||||
for (i = 0; i < MAX_RESERVE_CPUS; i++) {
|
||||
if (hcd->cpumap[i].cap_id == HH_CAPID_INVAL)
|
||||
if (hcd->cpumap[i].cap_id == GH_CAPID_INVAL)
|
||||
break;
|
||||
|
||||
orig_cpu = hcd->cpumap[i].pcpu;
|
||||
@ -211,9 +211,9 @@ static void finalize_reservation(struct hyp_core_ctl_data *hcd, cpumask_t *temp)
|
||||
* is available in final_reserved_cpus. so restore
|
||||
* the assignment.
|
||||
*/
|
||||
err = hh_hcall_vcpu_affinity_set(hcd->cpumap[i].cap_id,
|
||||
err = gh_hcall_vcpu_affinity_set(hcd->cpumap[i].cap_id,
|
||||
orig_cpu);
|
||||
if (err != HH_ERROR_OK) {
|
||||
if (err != GH_ERROR_OK) {
|
||||
pr_err("restore: fail to assign pcpu for vcpu#%d err=%d cap_id=%llu cpu=%d\n",
|
||||
i, err, hcd->cpumap[i].cap_id, orig_cpu);
|
||||
continue;
|
||||
@ -256,9 +256,9 @@ static void finalize_reservation(struct hyp_core_ctl_data *hcd, cpumask_t *temp)
|
||||
replacement_cpu = cpumask_any(temp);
|
||||
cpumask_clear_cpu(replacement_cpu, temp);
|
||||
|
||||
err = hh_hcall_vcpu_affinity_set(hcd->cpumap[i].cap_id,
|
||||
err = gh_hcall_vcpu_affinity_set(hcd->cpumap[i].cap_id,
|
||||
replacement_cpu);
|
||||
if (err != HH_ERROR_OK) {
|
||||
if (err != GH_ERROR_OK) {
|
||||
pr_err("adjust: fail to assign pcpu for vcpu#%d err=%d cap_id=%llu cpu=%d\n",
|
||||
i, err, hcd->cpumap[i].cap_id, replacement_cpu);
|
||||
continue;
|
||||
@ -666,12 +666,12 @@ static void hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd)
|
||||
cpumask_clear(&hcd->reserve_cpus);
|
||||
|
||||
for (i = 0; i < MAX_RESERVE_CPUS; i++) {
|
||||
if (hh_cpumap[i].cap_id == HH_CAPID_INVAL)
|
||||
if (gh_cpumap[i].cap_id == GH_CAPID_INVAL)
|
||||
break;
|
||||
|
||||
hcd->cpumap[i].cap_id = hh_cpumap[i].cap_id;
|
||||
hcd->cpumap[i].pcpu = hh_cpumap[i].pcpu;
|
||||
hcd->cpumap[i].curr_pcpu = hh_cpumap[i].curr_pcpu;
|
||||
hcd->cpumap[i].cap_id = gh_cpumap[i].cap_id;
|
||||
hcd->cpumap[i].pcpu = gh_cpumap[i].pcpu;
|
||||
hcd->cpumap[i].curr_pcpu = gh_cpumap[i].curr_pcpu;
|
||||
cpumask_set_cpu(hcd->cpumap[i].pcpu, &hcd->reserve_cpus);
|
||||
pr_debug("vcpu%u map to pcpu%u\n", i, hcd->cpumap[i].pcpu);
|
||||
}
|
||||
@ -685,7 +685,7 @@ static void hyp_core_ctl_init_reserve_cpus(struct hyp_core_ctl_data *hcd)
|
||||
* Called when vm_status is STATUS_READY, multiple times before status
|
||||
* moves to STATUS_RUNNING
|
||||
*/
|
||||
static int hh_vcpu_populate_affinity_info(hh_label_t cpu_idx, hh_capid_t cap_id)
|
||||
static int gh_vcpu_populate_affinity_info(gh_label_t cpu_idx, gh_capid_t cap_id)
|
||||
{
|
||||
if (!init_done) {
|
||||
pr_err("Driver probe failed\n");
|
||||
@ -693,9 +693,9 @@ static int hh_vcpu_populate_affinity_info(hh_label_t cpu_idx, hh_capid_t cap_id)
|
||||
}
|
||||
|
||||
if (!is_vcpu_info_populated) {
|
||||
hh_cpumap[nr_vcpus].cap_id = cap_id;
|
||||
hh_cpumap[nr_vcpus].pcpu = cpu_idx;
|
||||
hh_cpumap[nr_vcpus].curr_pcpu = cpu_idx;
|
||||
gh_cpumap[nr_vcpus].cap_id = cap_id;
|
||||
gh_cpumap[nr_vcpus].pcpu = cpu_idx;
|
||||
gh_cpumap[nr_vcpus].curr_pcpu = cpu_idx;
|
||||
|
||||
nr_vcpus++;
|
||||
pr_debug("cpu_index:%u vcpu_cap_id:%llu nr_vcpus:%d\n",
|
||||
@ -705,14 +705,14 @@ static int hh_vcpu_populate_affinity_info(hh_label_t cpu_idx, hh_capid_t cap_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hh_vcpu_done_populate_affinity_info(struct notifier_block *nb,
|
||||
static int gh_vcpu_done_populate_affinity_info(struct notifier_block *nb,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
struct hh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
struct gh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
u8 vm_status = vm_status_payload->vm_status;
|
||||
|
||||
if (cmd == HH_RM_NOTIF_VM_STATUS &&
|
||||
vm_status == HH_RM_VM_STATUS_RUNNING &&
|
||||
if (cmd == GH_RM_NOTIF_VM_STATUS &&
|
||||
vm_status == GH_RM_VM_STATUS_RUNNING &&
|
||||
!is_vcpu_info_populated) {
|
||||
mutex_lock(&the_hcd->reservation_mutex);
|
||||
hyp_core_ctl_init_reserve_cpus(the_hcd);
|
||||
@ -723,8 +723,8 @@ static int hh_vcpu_done_populate_affinity_info(struct notifier_block *nb,
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block hh_vcpu_nb = {
|
||||
.notifier_call = hh_vcpu_done_populate_affinity_info,
|
||||
static struct notifier_block gh_vcpu_nb = {
|
||||
.notifier_call = gh_vcpu_done_populate_affinity_info,
|
||||
};
|
||||
|
||||
static void hyp_core_ctl_enable(bool enable)
|
||||
@ -814,7 +814,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
|
||||
"Vcpu to Pcpu mappings:\n");
|
||||
|
||||
for (i = 0; i < MAX_RESERVE_CPUS; i++) {
|
||||
if (hcd->cpumap[i].cap_id == HH_CAPID_INVAL)
|
||||
if (hcd->cpumap[i].cap_id == GH_CAPID_INVAL)
|
||||
break;
|
||||
|
||||
count += scnprintf(buf + count, PAGE_SIZE - count,
|
||||
@ -1023,15 +1023,15 @@ static int hyp_core_ctl_probe(struct platform_device *pdev)
|
||||
struct hyp_core_ctl_data *hcd;
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
||||
|
||||
ret = hh_rm_set_vcpu_affinity_cb(&hh_vcpu_populate_affinity_info);
|
||||
ret = gh_rm_set_vcpu_affinity_cb(&gh_vcpu_populate_affinity_info);
|
||||
if (ret) {
|
||||
pr_err("fail to set the vcpu affinity callback\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hh_rm_register_notifier(&hh_vcpu_nb);
|
||||
ret = gh_rm_register_notifier(&gh_vcpu_nb);
|
||||
if (ret) {
|
||||
pr_err("fail to register hh_rm_notifier\n");
|
||||
pr_err("fail to register gh_rm_notifier\n");
|
||||
goto reset_cb;
|
||||
}
|
||||
|
||||
@ -1077,9 +1077,9 @@ static int hyp_core_ctl_probe(struct platform_device *pdev)
|
||||
free_hcd:
|
||||
kfree(hcd);
|
||||
unregister_rm_notifier:
|
||||
hh_rm_unregister_notifier(&hh_vcpu_nb);
|
||||
gh_rm_unregister_notifier(&gh_vcpu_nb);
|
||||
reset_cb:
|
||||
hh_rm_set_vcpu_affinity_cb(NULL);
|
||||
gh_rm_set_vcpu_affinity_cb(NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
config QCOM_MEM_BUF
|
||||
tristate "Qualcomm Technologies, Inc. Memory Buffer Sharing Driver"
|
||||
depends on HH_MSGQ && HH_RM_DRV
|
||||
depends on GH_MSGQ && GH_RM_DRV
|
||||
select QCOM_MEM_BUF_DEV
|
||||
help
|
||||
Add support for lending memory from one virtual machine to another.
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
|
||||
#include <linux/mem-buf.h>
|
||||
@ -80,10 +80,10 @@ EXPORT_SYMBOL(mem_buf_unassign_mem);
|
||||
int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
|
||||
int *dst_vmids, int *dst_perms,
|
||||
u32 nr_acl_entries,
|
||||
hh_memparcel_handle_t *memparcel_hdl)
|
||||
gh_memparcel_handle_t *memparcel_hdl)
|
||||
{
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
unsigned int i, nr_sg_entries;
|
||||
struct scatterlist *sg;
|
||||
int ret;
|
||||
@ -94,13 +94,13 @@ int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
|
||||
return -EINVAL;
|
||||
|
||||
nr_sg_entries = sgt->nents;
|
||||
sgl_desc_size = offsetof(struct hh_sgl_desc,
|
||||
sgl_desc_size = offsetof(struct gh_sgl_desc,
|
||||
sgl_entries[nr_sg_entries]);
|
||||
sgl_desc = kzalloc(sgl_desc_size, GFP_KERNEL);
|
||||
if (!sgl_desc)
|
||||
return -ENOMEM;
|
||||
|
||||
acl_desc_size = offsetof(struct hh_acl_desc,
|
||||
acl_desc_size = offsetof(struct gh_acl_desc,
|
||||
acl_entries[nr_acl_entries]);
|
||||
acl_desc = kzalloc(acl_desc_size, GFP_KERNEL);
|
||||
if (!acl_desc) {
|
||||
@ -121,11 +121,11 @@ int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
|
||||
}
|
||||
|
||||
|
||||
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL, 0, acl_desc,
|
||||
ret = gh_rm_mem_qcom_lookup_sgl(GH_RM_MEM_TYPE_NORMAL, 0, acl_desc,
|
||||
sgl_desc, NULL, memparcel_hdl);
|
||||
trace_lookup_sgl(sgl_desc, ret, *memparcel_hdl);
|
||||
if (ret < 0)
|
||||
pr_err("%s: hh_rm_mem_qcom_lookup_sgl failure rc: %d\n",
|
||||
pr_err("%s: gh_rm_mem_qcom_lookup_sgl failure rc: %d\n",
|
||||
__func__, ret);
|
||||
|
||||
kfree(acl_desc);
|
||||
@ -134,34 +134,34 @@ int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
|
||||
}
|
||||
EXPORT_SYMBOL(mem_buf_retrieve_memparcel_hdl);
|
||||
|
||||
static int mem_buf_get_mem_xfer_type(struct hh_acl_desc *acl_desc)
|
||||
static int mem_buf_get_mem_xfer_type(struct gh_acl_desc *acl_desc)
|
||||
{
|
||||
u32 i, nr_acl_entries = acl_desc->n_acl_entries;
|
||||
|
||||
for (i = 0; i < nr_acl_entries; i++)
|
||||
if (acl_desc->acl_entries[i].vmid == VMID_HLOS &&
|
||||
acl_desc->acl_entries[i].perms != 0)
|
||||
return HH_RM_TRANS_TYPE_SHARE;
|
||||
return GH_RM_TRANS_TYPE_SHARE;
|
||||
|
||||
return HH_RM_TRANS_TYPE_LEND;
|
||||
return GH_RM_TRANS_TYPE_LEND;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: hh_rm_mem_accept uses kmemdup, which isn't right for large buffers.
|
||||
* FIXME: gh_rm_mem_accept uses kmemdup, which isn't right for large buffers.
|
||||
*/
|
||||
struct hh_sgl_desc *mem_buf_map_mem_s2(hh_memparcel_handle_t memparcel_hdl,
|
||||
struct hh_acl_desc *acl_desc)
|
||||
struct gh_sgl_desc *mem_buf_map_mem_s2(gh_memparcel_handle_t memparcel_hdl,
|
||||
struct gh_acl_desc *acl_desc)
|
||||
{
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
|
||||
if (!acl_desc)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pr_debug("%s: adding CPU MMU stage 2 mappings\n", __func__);
|
||||
sgl_desc = hh_rm_mem_accept(memparcel_hdl, HH_RM_MEM_TYPE_NORMAL,
|
||||
sgl_desc = gh_rm_mem_accept(memparcel_hdl, GH_RM_MEM_TYPE_NORMAL,
|
||||
mem_buf_get_mem_xfer_type(acl_desc),
|
||||
HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
|
||||
HH_RM_MEM_ACCEPT_DONE, 0, acl_desc, NULL,
|
||||
GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS |
|
||||
GH_RM_MEM_ACCEPT_DONE, 0, acl_desc, NULL,
|
||||
NULL, 0);
|
||||
if (IS_ERR(sgl_desc)) {
|
||||
pr_err("%s failed to map memory in stage 2 rc: %d\n", __func__,
|
||||
@ -174,12 +174,12 @@ struct hh_sgl_desc *mem_buf_map_mem_s2(hh_memparcel_handle_t memparcel_hdl,
|
||||
}
|
||||
EXPORT_SYMBOL(mem_buf_map_mem_s2);
|
||||
|
||||
int mem_buf_unmap_mem_s2(hh_memparcel_handle_t memparcel_hdl)
|
||||
int mem_buf_unmap_mem_s2(gh_memparcel_handle_t memparcel_hdl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pr_debug("%s: removing CPU MMU stage 2 mappings\n", __func__);
|
||||
ret = hh_rm_mem_release(memparcel_hdl, 0);
|
||||
ret = gh_rm_mem_release(memparcel_hdl, 0);
|
||||
|
||||
if (ret < 0)
|
||||
pr_err("%s: Failed to release memparcel hdl: 0x%lx rc: %d\n",
|
||||
@ -191,7 +191,7 @@ int mem_buf_unmap_mem_s2(hh_memparcel_handle_t memparcel_hdl)
|
||||
}
|
||||
EXPORT_SYMBOL(mem_buf_unmap_mem_s2);
|
||||
|
||||
int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc)
|
||||
int mem_buf_map_mem_s1(struct gh_sgl_desc *sgl_desc)
|
||||
{
|
||||
u64 base, size;
|
||||
int i, ret;
|
||||
@ -221,7 +221,7 @@ int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc)
|
||||
}
|
||||
EXPORT_SYMBOL(mem_buf_map_mem_s1);
|
||||
|
||||
int mem_buf_unmap_mem_s1(struct hh_sgl_desc *sgl_desc)
|
||||
int mem_buf_unmap_mem_s1(struct gh_sgl_desc *sgl_desc)
|
||||
{
|
||||
u64 base, size;
|
||||
int i, ret;
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/mem-buf.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dma-heap.h>
|
||||
@ -28,29 +28,29 @@ int mem_buf_unassign_mem(struct sg_table *sgt, int *src_vmids,
|
||||
int mem_buf_retrieve_memparcel_hdl(struct sg_table *sgt,
|
||||
int *dst_vmids, int *dst_perms,
|
||||
u32 nr_acl_entries,
|
||||
hh_memparcel_handle_t *memparcel_hdl);
|
||||
struct hh_sgl_desc *mem_buf_map_mem_s2(hh_memparcel_handle_t memparcel_hdl,
|
||||
struct hh_acl_desc *acl_desc);
|
||||
int mem_buf_unmap_mem_s2(hh_memparcel_handle_t memparcel_hdl);
|
||||
gh_memparcel_handle_t *memparcel_hdl);
|
||||
struct gh_sgl_desc *mem_buf_map_mem_s2(gh_memparcel_handle_t memparcel_hdl,
|
||||
struct gh_acl_desc *acl_desc);
|
||||
int mem_buf_unmap_mem_s2(gh_memparcel_handle_t memparcel_hdl);
|
||||
|
||||
/* Memory Hotplug */
|
||||
int mem_buf_map_mem_s1(struct hh_sgl_desc *sgl_desc);
|
||||
int mem_buf_unmap_mem_s1(struct hh_sgl_desc *sgl_desc);
|
||||
int mem_buf_map_mem_s1(struct gh_sgl_desc *sgl_desc);
|
||||
int mem_buf_unmap_mem_s1(struct gh_sgl_desc *sgl_desc);
|
||||
|
||||
#define MEM_BUF_API_HYP_ASSIGN BIT(0)
|
||||
#define MEM_BUF_API_HAVEN BIT(1)
|
||||
#define MEM_BUF_API_GUNYAH BIT(1)
|
||||
|
||||
/*
|
||||
* @vmid - id assigned by hypervisor to uniquely identify a VM
|
||||
* @hh_id - id used to request the real vmid from the kernel
|
||||
* haven driver. This is a legacy field which should eventually be
|
||||
* @gh_id - id used to request the real vmid from the kernel
|
||||
* gunyah driver. This is a legacy field which should eventually be
|
||||
* removed once a better design is present.
|
||||
* @allowed_api - Some vms may use a different hypervisor interface.
|
||||
*/
|
||||
struct mem_buf_vm {
|
||||
const char *name;
|
||||
u16 vmid;
|
||||
enum hh_vm_names hh_id;
|
||||
enum gh_vm_names gh_id;
|
||||
u32 allowed_api;
|
||||
struct cdev cdev;
|
||||
struct device dev;
|
||||
|
@ -27,7 +27,7 @@ int current_vmid;
|
||||
static struct mem_buf_vm vm_ ## _lname = { \
|
||||
.name = "qcom," #_lname, \
|
||||
.vmid = VMID_ ## _uname, \
|
||||
.hh_id = HH_VM_MAX, \
|
||||
.gh_id = GH_VM_MAX, \
|
||||
.allowed_api = MEM_BUF_API_HYP_ASSIGN, \
|
||||
}
|
||||
|
||||
@ -46,15 +46,15 @@ PERIPHERAL_VM(CP_CDSP, cp_cdsp);
|
||||
static struct mem_buf_vm vm_trusted_vm = {
|
||||
.name = "qcom,trusted_vm",
|
||||
/* Vmid via dynamic lookup */
|
||||
.hh_id = HH_TRUSTED_VM,
|
||||
.allowed_api = MEM_BUF_API_HAVEN,
|
||||
.gh_id = GH_TRUSTED_VM,
|
||||
.allowed_api = MEM_BUF_API_GUNYAH,
|
||||
};
|
||||
|
||||
static struct mem_buf_vm vm_hlos = {
|
||||
.name = "qcom,hlos",
|
||||
.vmid = VMID_HLOS,
|
||||
.hh_id = HH_VM_MAX,
|
||||
.allowed_api = MEM_BUF_API_HYP_ASSIGN | MEM_BUF_API_HAVEN,
|
||||
.gh_id = GH_VM_MAX,
|
||||
.allowed_api = MEM_BUF_API_HYP_ASSIGN | MEM_BUF_API_GUNYAH,
|
||||
};
|
||||
|
||||
struct mem_buf_vm *pdata_array[] = {
|
||||
@ -101,8 +101,8 @@ static const struct file_operations mem_buf_vm_fops = {
|
||||
static struct mem_buf_vm *find_vm_by_vmid(int vmid)
|
||||
{
|
||||
struct mem_buf_vm *vm;
|
||||
enum hh_vm_names vm_name;
|
||||
hh_vmid_t hh_vmid;
|
||||
enum gh_vm_names vm_name;
|
||||
gh_vmid_t gh_vmid;
|
||||
unsigned long idx;
|
||||
int ret;
|
||||
|
||||
@ -110,20 +110,20 @@ static struct mem_buf_vm *find_vm_by_vmid(int vmid)
|
||||
if (vm)
|
||||
return vm;
|
||||
|
||||
for (vm_name = HH_PRIMARY_VM; vm_name < HH_VM_MAX; vm_name++) {
|
||||
ret = hh_rm_get_vmid(vm_name, &hh_vmid);
|
||||
for (vm_name = GH_PRIMARY_VM; vm_name < GH_VM_MAX; vm_name++) {
|
||||
ret = gh_rm_get_vmid(vm_name, &gh_vmid);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (hh_vmid == vmid)
|
||||
if (gh_vmid == vmid)
|
||||
break;
|
||||
}
|
||||
|
||||
if (vm_name == HH_VM_MAX)
|
||||
if (vm_name == GH_VM_MAX)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
xa_for_each(&mem_buf_vm_minors, idx, vm) {
|
||||
if (vm->hh_id == vm_name)
|
||||
if (vm->gh_id == vm_name)
|
||||
return vm;
|
||||
}
|
||||
WARN_ON(1);
|
||||
@ -155,7 +155,7 @@ int mem_buf_vm_get_backend_api(int *vmids, unsigned int nr_acl_entries)
|
||||
if (allowed_api & MEM_BUF_API_HYP_ASSIGN)
|
||||
return MEM_BUF_API_HYP_ASSIGN;
|
||||
else
|
||||
return MEM_BUF_API_HAVEN;
|
||||
return MEM_BUF_API_GUNYAH;
|
||||
}
|
||||
|
||||
int mem_buf_fd_to_vmid(int fd)
|
||||
@ -163,7 +163,7 @@ int mem_buf_fd_to_vmid(int fd)
|
||||
int ret = -EINVAL;
|
||||
struct mem_buf_vm *vm;
|
||||
struct file *file;
|
||||
hh_vmid_t vmid;
|
||||
gh_vmid_t vmid;
|
||||
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
@ -176,14 +176,14 @@ int mem_buf_fd_to_vmid(int fd)
|
||||
}
|
||||
|
||||
vm = file->private_data;
|
||||
if (vm->hh_id == HH_VM_MAX) {
|
||||
if (vm->gh_id == GH_VM_MAX) {
|
||||
fput(file);
|
||||
return vm->vmid;
|
||||
}
|
||||
|
||||
ret = hh_rm_get_vmid(vm->hh_id, &vmid);
|
||||
ret = gh_rm_get_vmid(vm->gh_id, &vmid);
|
||||
if (ret)
|
||||
pr_err("hh_rm_get_vmid %d failed\n", vm->hh_id);
|
||||
pr_err("gh_rm_get_vmid %d failed\n", vm->gh_id);
|
||||
fput(file);
|
||||
return ret ? ret : vmid;
|
||||
}
|
||||
@ -232,7 +232,7 @@ static int mem_buf_vm_add(struct mem_buf_vm *new_vm)
|
||||
dev_set_drvdata(dev, new_vm);
|
||||
dev_set_name(dev, "%s", new_vm->name);
|
||||
|
||||
if (new_vm->hh_id == HH_VM_MAX) {
|
||||
if (new_vm->gh_id == GH_VM_MAX) {
|
||||
ret = xa_err(xa_store(&mem_buf_vms, new_vm->vmid, new_vm, GFP_KERNEL));
|
||||
if (ret)
|
||||
goto err_xa_store;
|
||||
@ -288,7 +288,7 @@ static int mem_buf_vm_add_self(void)
|
||||
/* Create an aliased name */
|
||||
self->name = "qcom,self";
|
||||
self->vmid = vm->vmid;
|
||||
self->hh_id = vm->hh_id;
|
||||
self->gh_id = vm->gh_id;
|
||||
self->allowed_api = vm->allowed_api;
|
||||
|
||||
ret = mem_buf_vm_add(self);
|
||||
|
@ -9,8 +9,8 @@
|
||||
#include <linux/file.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/haven/hh_msgq.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_msgq.h>
|
||||
#include <linux/ion.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kthread.h>
|
||||
@ -54,11 +54,11 @@ static LIST_HEAD(mem_buf_list);
|
||||
static DEFINE_MUTEX(mem_buf_idr_mutex);
|
||||
static DEFINE_IDR(mem_buf_txn_idr);
|
||||
static struct task_struct *mem_buf_msgq_recv_thr;
|
||||
static void *mem_buf_hh_msgq_hdl;
|
||||
static void *mem_buf_gh_msgq_hdl;
|
||||
static struct workqueue_struct *mem_buf_wq;
|
||||
|
||||
static size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc);
|
||||
static struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc);
|
||||
static size_t mem_buf_get_sgl_buf_size(struct gh_sgl_desc *sgl_desc);
|
||||
static struct sg_table *dup_gh_sgl_desc_to_sgt(struct gh_sgl_desc *sgl_desc);
|
||||
static int mem_buf_acl_to_vmid_perms_list(unsigned int nr_acl_entries,
|
||||
const void __user *acl_entries,
|
||||
int **dst_vmids, int **dst_perms,
|
||||
@ -119,7 +119,7 @@ struct mem_buf_xfer_mem {
|
||||
void *mem_type_data;
|
||||
struct sg_table *mem_sgt;
|
||||
bool secure_alloc;
|
||||
hh_memparcel_handle_t hdl;
|
||||
gh_memparcel_handle_t hdl;
|
||||
struct list_head entry;
|
||||
u32 nr_acl_entries;
|
||||
int *dst_vmids;
|
||||
@ -130,9 +130,9 @@ struct mem_buf_xfer_mem {
|
||||
* struct mem_buf_desc - Internal data structure, which contains information
|
||||
* about a particular memory buffer.
|
||||
* @size: The size of the memory buffer
|
||||
* @acl_desc: A HH ACL descriptor that describes the VMIDs that have access to
|
||||
* @acl_desc: A GH ACL descriptor that describes the VMIDs that have access to
|
||||
* the memory, as well as the permissions each VMID has.
|
||||
* @sgl_desc: An HH SG-List descriptor that describes the IPAs of the memory
|
||||
* @sgl_desc: An GH SG-List descriptor that describes the IPAs of the memory
|
||||
* associated with the memory buffer that was allocated from another VM.
|
||||
* @memparcel_hdl: The handle associated with the memparcel that represents the
|
||||
* memory buffer.
|
||||
@ -148,9 +148,9 @@ struct mem_buf_xfer_mem {
|
||||
*/
|
||||
struct mem_buf_desc {
|
||||
size_t size;
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
hh_memparcel_handle_t memparcel_hdl;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
gh_memparcel_handle_t memparcel_hdl;
|
||||
enum mem_buf_mem_type src_mem_type;
|
||||
void *src_data;
|
||||
enum mem_buf_mem_type dst_mem_type;
|
||||
@ -188,7 +188,7 @@ static int mem_buf_msg_send(void *msg, size_t msg_size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = hh_msgq_send(mem_buf_hh_msgq_hdl, msg, msg_size, 0);
|
||||
ret = gh_msgq_send(mem_buf_gh_msgq_hdl, msg, msg_size, 0);
|
||||
if (ret < 0)
|
||||
pr_err("%s: failed to send allocation request rc: %d\n",
|
||||
__func__, ret);
|
||||
@ -305,7 +305,7 @@ static void mem_buf_rmt_free_mem(struct mem_buf_xfer_mem *xfer_mem)
|
||||
mem_buf_rmt_free_dmaheap_mem(xfer_mem);
|
||||
}
|
||||
|
||||
static int mem_buf_hh_acl_desc_to_vmid_perm_list(struct hh_acl_desc *acl_desc,
|
||||
static int mem_buf_gh_acl_desc_to_vmid_perm_list(struct gh_acl_desc *acl_desc,
|
||||
int **vmids, int **perms)
|
||||
{
|
||||
int *vmids_arr = NULL, *perms_arr = NULL;
|
||||
@ -338,25 +338,25 @@ static int mem_buf_hh_acl_desc_to_vmid_perm_list(struct hh_acl_desc *acl_desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hh_acl_desc *mem_buf_vmid_perm_list_to_hh_acl(int *vmids, int *perms,
|
||||
static struct gh_acl_desc *mem_buf_vmid_perm_list_to_gh_acl(int *vmids, int *perms,
|
||||
unsigned int nr_acl_entries)
|
||||
{
|
||||
struct hh_acl_desc *hh_acl;
|
||||
struct gh_acl_desc *gh_acl;
|
||||
size_t size;
|
||||
unsigned int i;
|
||||
|
||||
size = offsetof(struct hh_acl_desc, acl_entries[nr_acl_entries]);
|
||||
hh_acl = kmalloc(size, GFP_KERNEL);
|
||||
if (!hh_acl)
|
||||
size = offsetof(struct gh_acl_desc, acl_entries[nr_acl_entries]);
|
||||
gh_acl = kmalloc(size, GFP_KERNEL);
|
||||
if (!gh_acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hh_acl->n_acl_entries = nr_acl_entries;
|
||||
gh_acl->n_acl_entries = nr_acl_entries;
|
||||
for (i = 0; i < nr_acl_entries; i++) {
|
||||
hh_acl->acl_entries[i].vmid = vmids[i];
|
||||
hh_acl->acl_entries[i].perms = perms[i];
|
||||
gh_acl->acl_entries[i].vmid = vmids[i];
|
||||
gh_acl->acl_entries[i].perms = perms[i];
|
||||
}
|
||||
|
||||
return hh_acl;
|
||||
return gh_acl;
|
||||
}
|
||||
|
||||
static
|
||||
@ -419,7 +419,7 @@ struct mem_buf_xfer_mem *mem_buf_prep_xfer_mem(void *req_msg)
|
||||
xfer_mem->size = req->size;
|
||||
xfer_mem->mem_type = req->src_mem_type;
|
||||
xfer_mem->nr_acl_entries = req->acl_desc.n_acl_entries;
|
||||
ret = mem_buf_hh_acl_desc_to_vmid_perm_list(&req->acl_desc,
|
||||
ret = mem_buf_gh_acl_desc_to_vmid_perm_list(&req->acl_desc,
|
||||
&xfer_mem->dst_vmids,
|
||||
&xfer_mem->dst_perms);
|
||||
if (ret) {
|
||||
@ -542,7 +542,7 @@ static void mem_buf_alloc_req_work(struct work_struct *work)
|
||||
|
||||
resp_msg->ret = ret;
|
||||
trace_send_alloc_resp_msg(resp_msg);
|
||||
ret = hh_msgq_send(mem_buf_hh_msgq_hdl, resp_msg, sizeof(*resp_msg), 0);
|
||||
ret = gh_msgq_send(mem_buf_gh_msgq_hdl, resp_msg, sizeof(*resp_msg), 0);
|
||||
|
||||
/*
|
||||
* Free the buffer regardless of the return value as the hypervisor
|
||||
@ -571,7 +571,7 @@ static void mem_buf_relinquish_work(struct work_struct *work)
|
||||
struct mem_buf_xfer_mem *xfer_mem_iter, *tmp, *xfer_mem = NULL;
|
||||
struct mem_buf_rmt_msg *rmt_msg = to_rmt_msg(work);
|
||||
struct mem_buf_alloc_relinquish *relinquish_msg = rmt_msg->msg;
|
||||
hh_memparcel_handle_t hdl = relinquish_msg->hdl;
|
||||
gh_memparcel_handle_t hdl = relinquish_msg->hdl;
|
||||
|
||||
trace_receive_relinquish_msg(relinquish_msg);
|
||||
mutex_lock(&mem_buf_xfer_mem_list_lock);
|
||||
@ -595,7 +595,7 @@ static void mem_buf_relinquish_work(struct work_struct *work)
|
||||
}
|
||||
|
||||
static int mem_buf_decode_alloc_resp(void *buf, size_t size,
|
||||
hh_memparcel_handle_t *ret_hdl)
|
||||
gh_memparcel_handle_t *ret_hdl)
|
||||
{
|
||||
struct mem_buf_alloc_resp *alloc_resp = buf;
|
||||
|
||||
@ -621,7 +621,7 @@ static void mem_buf_process_alloc_resp(struct mem_buf_msg_hdr *hdr, void *buf,
|
||||
size_t size)
|
||||
{
|
||||
struct mem_buf_txn *txn;
|
||||
hh_memparcel_handle_t hdl;
|
||||
gh_memparcel_handle_t hdl;
|
||||
|
||||
mutex_lock(&mem_buf_idr_mutex);
|
||||
txn = idr_find(&mem_buf_txn_idr, hdr->txn_id);
|
||||
@ -690,12 +690,12 @@ static int mem_buf_msgq_recv_fn(void *unused)
|
||||
int ret;
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
buf = kzalloc(HH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
|
||||
buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
|
||||
if (!buf)
|
||||
continue;
|
||||
|
||||
ret = hh_msgq_recv(mem_buf_hh_msgq_hdl, buf,
|
||||
HH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
|
||||
ret = gh_msgq_recv(mem_buf_gh_msgq_hdl, buf,
|
||||
GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
|
||||
if (ret < 0) {
|
||||
kfree(buf);
|
||||
pr_err_ratelimited("%s failed to receive message rc: %d\n",
|
||||
@ -748,7 +748,7 @@ static void *mem_buf_construct_alloc_req(struct mem_buf_desc *membuf,
|
||||
req->hdr.msg_type = MEM_BUF_ALLOC_REQ;
|
||||
req->size = membuf->size;
|
||||
req->src_mem_type = membuf->src_mem_type;
|
||||
acl_desc_size = offsetof(struct hh_acl_desc,
|
||||
acl_desc_size = offsetof(struct gh_acl_desc,
|
||||
acl_entries[nr_acl_entries]);
|
||||
memcpy(&req->acl_desc, membuf->acl_desc, acl_desc_size);
|
||||
|
||||
@ -765,7 +765,7 @@ static int mem_buf_request_mem(struct mem_buf_desc *membuf)
|
||||
struct mem_buf_txn txn;
|
||||
void *alloc_req_msg;
|
||||
size_t msg_size;
|
||||
hh_memparcel_handle_t resp_hdl;
|
||||
gh_memparcel_handle_t resp_hdl;
|
||||
int ret;
|
||||
|
||||
ret = mem_buf_init_txn(&txn, &resp_hdl);
|
||||
@ -814,7 +814,7 @@ static void mem_buf_relinquish_mem(u32 memparcel_hdl)
|
||||
msg->hdl = memparcel_hdl;
|
||||
|
||||
trace_send_relinquish_msg(msg);
|
||||
ret = hh_msgq_send(mem_buf_hh_msgq_hdl, msg, sizeof(*msg), 0);
|
||||
ret = gh_msgq_send(mem_buf_gh_msgq_hdl, msg, sizeof(*msg), 0);
|
||||
|
||||
/*
|
||||
* Free the buffer regardless of the return value as the hypervisor
|
||||
@ -946,20 +946,20 @@ static bool is_valid_mem_buf_perms(u32 mem_buf_perms)
|
||||
static int mem_buf_vmid_to_vmid(u32 mem_buf_vmid)
|
||||
{
|
||||
int ret;
|
||||
hh_vmid_t vmid;
|
||||
enum hh_vm_names vm_name;
|
||||
gh_vmid_t vmid;
|
||||
enum gh_vm_names vm_name;
|
||||
|
||||
if (!is_valid_mem_buf_vmid(mem_buf_vmid))
|
||||
return -EINVAL;
|
||||
|
||||
if (mem_buf_vmid == MEM_BUF_VMID_PRIMARY_VM)
|
||||
vm_name = HH_PRIMARY_VM;
|
||||
vm_name = GH_PRIMARY_VM;
|
||||
else if (mem_buf_vmid == MEM_BUF_VMID_TRUSTED_VM)
|
||||
vm_name = HH_TRUSTED_VM;
|
||||
vm_name = GH_TRUSTED_VM;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
ret = hh_rm_get_vmid(vm_name, &vmid);
|
||||
ret = gh_rm_get_vmid(vm_name, &vmid);
|
||||
if (!ret)
|
||||
return vmid;
|
||||
return ret;
|
||||
@ -1093,7 +1093,7 @@ static void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data)
|
||||
int ret;
|
||||
struct file *filp;
|
||||
struct mem_buf_desc *membuf;
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
|
||||
if (!(mem_buf_capability & MEM_BUF_CAP_CONSUMER))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
@ -1111,7 +1111,7 @@ static void *mem_buf_alloc(struct mem_buf_allocation_data *alloc_data)
|
||||
|
||||
pr_debug("%s: mem buf alloc begin\n", __func__);
|
||||
membuf->size = ALIGN(alloc_data->size, MEM_BUF_MHP_ALIGNMENT);
|
||||
membuf->acl_desc = mem_buf_vmid_perm_list_to_hh_acl(
|
||||
membuf->acl_desc = mem_buf_vmid_perm_list_to_gh_acl(
|
||||
alloc_data->vmids, alloc_data->perms,
|
||||
alloc_data->nr_acl_entries);
|
||||
if (IS_ERR(membuf->acl_desc)) {
|
||||
@ -1270,8 +1270,8 @@ struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
|
||||
{
|
||||
int ret;
|
||||
struct qcom_sg_buffer *buffer;
|
||||
struct hh_acl_desc *acl_desc;
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct gh_acl_desc *acl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
struct dma_buf *dmabuf;
|
||||
struct sg_table *sgt;
|
||||
@ -1289,11 +1289,11 @@ struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
|
||||
if (!buffer)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
acl_desc = mem_buf_vmid_perm_list_to_hh_acl(arg->vmids, arg->perms,
|
||||
acl_desc = mem_buf_vmid_perm_list_to_gh_acl(arg->vmids, arg->perms,
|
||||
arg->nr_acl_entries);
|
||||
if (IS_ERR(acl_desc)) {
|
||||
ret = PTR_ERR(acl_desc);
|
||||
goto err_hh_acl;
|
||||
goto err_gh_acl;
|
||||
}
|
||||
|
||||
sgl_desc = mem_buf_map_mem_s2(arg->memparcel_hdl, acl_desc);
|
||||
@ -1306,7 +1306,7 @@ struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
|
||||
if (ret < 0)
|
||||
goto err_map_mem_s1;
|
||||
|
||||
sgt = dup_hh_sgl_desc_to_sgt(sgl_desc);
|
||||
sgt = dup_gh_sgl_desc_to_sgt(sgl_desc);
|
||||
if (IS_ERR(sgt)) {
|
||||
ret = PTR_ERR(sgt);
|
||||
goto err_dup_sgt;
|
||||
@ -1344,7 +1344,7 @@ struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg)
|
||||
mem_buf_unmap_mem_s2(arg->memparcel_hdl);
|
||||
err_map_s2:
|
||||
kfree(acl_desc);
|
||||
err_hh_acl:
|
||||
err_gh_acl:
|
||||
kfree(buffer);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -1500,7 +1500,7 @@ static int mem_buf_acl_to_vmid_perms_list(unsigned int nr_acl_entries,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc)
|
||||
static size_t mem_buf_get_sgl_buf_size(struct gh_sgl_desc *sgl_desc)
|
||||
{
|
||||
size_t size = 0;
|
||||
unsigned int i;
|
||||
@ -1511,7 +1511,7 @@ static size_t mem_buf_get_sgl_buf_size(struct hh_sgl_desc *sgl_desc)
|
||||
return size;
|
||||
}
|
||||
|
||||
static struct sg_table *dup_hh_sgl_desc_to_sgt(struct hh_sgl_desc *sgl_desc)
|
||||
static struct sg_table *dup_gh_sgl_desc_to_sgt(struct gh_sgl_desc *sgl_desc)
|
||||
{
|
||||
struct sg_table *new_table;
|
||||
int ret, i;
|
||||
@ -1779,9 +1779,9 @@ static int mem_buf_msgq_probe(struct platform_device *pdev)
|
||||
goto err_kthread_create;
|
||||
}
|
||||
|
||||
mem_buf_hh_msgq_hdl = hh_msgq_register(HH_MSGQ_LABEL_MEMBUF);
|
||||
if (IS_ERR(mem_buf_hh_msgq_hdl)) {
|
||||
ret = PTR_ERR(mem_buf_hh_msgq_hdl);
|
||||
mem_buf_gh_msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_MEMBUF);
|
||||
if (IS_ERR(mem_buf_gh_msgq_hdl)) {
|
||||
ret = PTR_ERR(mem_buf_gh_msgq_hdl);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev,
|
||||
"Message queue registration failed: rc: %d\n",
|
||||
@ -1807,8 +1807,8 @@ static int mem_buf_msgq_probe(struct platform_device *pdev)
|
||||
err_dev_create:
|
||||
cdev_del(&mem_buf_char_dev);
|
||||
err_cdev_add:
|
||||
hh_msgq_unregister(mem_buf_hh_msgq_hdl);
|
||||
mem_buf_hh_msgq_hdl = NULL;
|
||||
gh_msgq_unregister(mem_buf_gh_msgq_hdl);
|
||||
mem_buf_gh_msgq_hdl = NULL;
|
||||
err_msgq_register:
|
||||
kthread_stop(mem_buf_msgq_recv_thr);
|
||||
mem_buf_msgq_recv_thr = NULL;
|
||||
@ -1834,8 +1834,8 @@ static int mem_buf_msgq_remove(struct platform_device *pdev)
|
||||
|
||||
device_destroy(mem_buf_class, mem_buf_dev_no);
|
||||
cdev_del(&mem_buf_char_dev);
|
||||
hh_msgq_unregister(mem_buf_hh_msgq_hdl);
|
||||
mem_buf_hh_msgq_hdl = NULL;
|
||||
gh_msgq_unregister(mem_buf_gh_msgq_hdl);
|
||||
mem_buf_gh_msgq_hdl = NULL;
|
||||
kthread_stop(mem_buf_msgq_recv_thr);
|
||||
mem_buf_msgq_recv_thr = NULL;
|
||||
destroy_workqueue(mem_buf_wq);
|
||||
|
@ -19,7 +19,7 @@ struct mem_buf_vmperm {
|
||||
unsigned int max_acl_entries;
|
||||
struct dma_buf *dmabuf;
|
||||
struct sg_table *sgt;
|
||||
hh_memparcel_handle_t memparcel_hdl;
|
||||
gh_memparcel_handle_t memparcel_hdl;
|
||||
struct mutex lock;
|
||||
mem_buf_dma_buf_destructor dtor;
|
||||
void *dtor_data;
|
||||
@ -136,7 +136,7 @@ static struct mem_buf_vmperm *mem_buf_vmperm_alloc_flags(
|
||||
|
||||
/* Must be freed via mem_buf_vmperm_release. */
|
||||
struct mem_buf_vmperm *mem_buf_vmperm_alloc_accept(struct sg_table *sgt,
|
||||
hh_memparcel_handle_t memparcel_hdl)
|
||||
gh_memparcel_handle_t memparcel_hdl)
|
||||
{
|
||||
int vmids[1];
|
||||
int perms[1];
|
||||
@ -195,33 +195,33 @@ static int __mem_buf_vmperm_reclaim(struct mem_buf_vmperm *vmperm)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hh_sgl_desc *dup_sgt_to_hh_sgl_desc(struct sg_table *sgt)
|
||||
static struct gh_sgl_desc *dup_sgt_to_gh_sgl_desc(struct sg_table *sgt)
|
||||
{
|
||||
struct hh_sgl_desc *hh_sgl;
|
||||
struct gh_sgl_desc *gh_sgl;
|
||||
size_t size;
|
||||
int i;
|
||||
struct scatterlist *sg;
|
||||
|
||||
size = offsetof(struct hh_sgl_desc, sgl_entries[sgt->orig_nents]);
|
||||
hh_sgl = kvmalloc(size, GFP_KERNEL);
|
||||
if (!hh_sgl)
|
||||
size = offsetof(struct gh_sgl_desc, sgl_entries[sgt->orig_nents]);
|
||||
gh_sgl = kvmalloc(size, GFP_KERNEL);
|
||||
if (!gh_sgl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
hh_sgl->n_sgl_entries = sgt->orig_nents;
|
||||
gh_sgl->n_sgl_entries = sgt->orig_nents;
|
||||
for_each_sgtable_sg(sgt, sg, i) {
|
||||
hh_sgl->sgl_entries[i].ipa_base = sg_phys(sg);
|
||||
hh_sgl->sgl_entries[i].size = sg->length;
|
||||
gh_sgl->sgl_entries[i].ipa_base = sg_phys(sg);
|
||||
gh_sgl->sgl_entries[i].size = sg->length;
|
||||
}
|
||||
|
||||
return hh_sgl;
|
||||
return gh_sgl;
|
||||
}
|
||||
|
||||
static int mem_buf_vmperm_relinquish(struct mem_buf_vmperm *vmperm)
|
||||
{
|
||||
int ret;
|
||||
struct hh_sgl_desc *sgl_desc;
|
||||
struct gh_sgl_desc *sgl_desc;
|
||||
|
||||
sgl_desc = dup_sgt_to_hh_sgl_desc(vmperm->sgt);
|
||||
sgl_desc = dup_sgt_to_gh_sgl_desc(vmperm->sgt);
|
||||
if (IS_ERR(sgl_desc))
|
||||
return PTR_ERR(sgl_desc);
|
||||
|
||||
@ -453,7 +453,7 @@ int mem_buf_lend_internal(struct dma_buf *dmabuf,
|
||||
if (api < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (api == MEM_BUF_API_HAVEN) {
|
||||
if (api == MEM_BUF_API_GUNYAH) {
|
||||
/* Due to hyp-assign batching */
|
||||
if (sgt->nents > 1) {
|
||||
pr_err_ratelimited("Operation requires physically contiguous memory\n");
|
||||
@ -517,7 +517,7 @@ int mem_buf_lend_internal(struct dma_buf *dmabuf,
|
||||
goto err_assign;
|
||||
}
|
||||
|
||||
if (api == MEM_BUF_API_HAVEN) {
|
||||
if (api == MEM_BUF_API_GUNYAH) {
|
||||
ret = mem_buf_retrieve_memparcel_hdl(vmperm->sgt, arg->vmids,
|
||||
arg->perms, arg->nr_acl_entries,
|
||||
&arg->memparcel_hdl);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
@ -13,7 +13,7 @@
|
||||
#include <linux/mem-buf.h>
|
||||
|
||||
#ifdef CREATE_TRACE_POINTS
|
||||
static void __maybe_unused hh_acl_to_vmid_perms(struct hh_acl_desc *acl_desc,
|
||||
static void __maybe_unused gh_acl_to_vmid_perms(struct gh_acl_desc *acl_desc,
|
||||
u16 *vmids, u8 *perms)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -25,7 +25,7 @@ static void __maybe_unused hh_acl_to_vmid_perms(struct hh_acl_desc *acl_desc,
|
||||
}
|
||||
|
||||
static void __maybe_unused
|
||||
hh_sgl_to_ipa_bases_sizes(struct hh_sgl_desc *sgl_desc,
|
||||
gh_sgl_to_ipa_bases_sizes(struct gh_sgl_desc *sgl_desc,
|
||||
u64 *ipa_bases, u64 *sizes)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -61,7 +61,7 @@ TRACE_EVENT(mem_buf_alloc_info,
|
||||
|
||||
TP_PROTO(size_t size, enum mem_buf_mem_type src_mem_type,
|
||||
enum mem_buf_mem_type dst_mem_type,
|
||||
struct hh_acl_desc *acl_desc),
|
||||
struct gh_acl_desc *acl_desc),
|
||||
|
||||
TP_ARGS(size, src_mem_type, dst_mem_type, acl_desc),
|
||||
|
||||
@ -79,7 +79,7 @@ TRACE_EVENT(mem_buf_alloc_info,
|
||||
__assign_str(src_type, mem_type_to_str(src_mem_type));
|
||||
__assign_str(dst_type, mem_type_to_str(dst_mem_type));
|
||||
__entry->nr_acl_entries = acl_desc->n_acl_entries;
|
||||
hh_acl_to_vmid_perms(acl_desc, __get_dynamic_array(vmids),
|
||||
gh_acl_to_vmid_perms(acl_desc, __get_dynamic_array(vmids),
|
||||
__get_dynamic_array(perms));
|
||||
),
|
||||
|
||||
@ -115,7 +115,7 @@ DECLARE_EVENT_CLASS(alloc_req_msg_class,
|
||||
__entry->size = req->size;
|
||||
__assign_str(src_type, mem_type_to_str(req->src_mem_type));
|
||||
__entry->nr_acl_entries = req->acl_desc.n_acl_entries;
|
||||
hh_acl_to_vmid_perms(&req->acl_desc, __get_dynamic_array(vmids),
|
||||
gh_acl_to_vmid_perms(&req->acl_desc, __get_dynamic_array(vmids),
|
||||
__get_dynamic_array(perms));
|
||||
),
|
||||
|
||||
@ -151,7 +151,7 @@ DECLARE_EVENT_CLASS(relinquish_req_msg_class,
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(msg_type, msg_type_to_str(rel_req->hdr.msg_type))
|
||||
__field(hh_memparcel_handle_t, hdl)
|
||||
__field(gh_memparcel_handle_t, hdl)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -188,7 +188,7 @@ DECLARE_EVENT_CLASS(alloc_resp_class,
|
||||
__field(u32, txn_id)
|
||||
__string(msg_type, msg_type_to_str(resp->hdr.msg_type))
|
||||
__field(s32, ret)
|
||||
__field(hh_memparcel_handle_t, hdl)
|
||||
__field(gh_memparcel_handle_t, hdl)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -220,8 +220,8 @@ DEFINE_EVENT(alloc_resp_class, receive_alloc_resp_msg,
|
||||
|
||||
TRACE_EVENT(lookup_sgl,
|
||||
|
||||
TP_PROTO(struct hh_sgl_desc *sgl_desc, int ret,
|
||||
hh_memparcel_handle_t hdl),
|
||||
TP_PROTO(struct gh_sgl_desc *sgl_desc, int ret,
|
||||
gh_memparcel_handle_t hdl),
|
||||
|
||||
TP_ARGS(sgl_desc, ret, hdl),
|
||||
|
||||
@ -230,12 +230,12 @@ TRACE_EVENT(lookup_sgl,
|
||||
__dynamic_array(u64, ipa_bases, sgl_desc->n_sgl_entries)
|
||||
__dynamic_array(u64, sizes, sgl_desc->n_sgl_entries)
|
||||
__field(int, ret)
|
||||
__field(hh_memparcel_handle_t, hdl)
|
||||
__field(gh_memparcel_handle_t, hdl)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->nr_sgl_entries = sgl_desc->n_sgl_entries;
|
||||
hh_sgl_to_ipa_bases_sizes(sgl_desc,
|
||||
gh_sgl_to_ipa_bases_sizes(sgl_desc,
|
||||
__get_dynamic_array(ipa_bases),
|
||||
__get_dynamic_array(sizes));
|
||||
__entry->ret = ret;
|
||||
@ -254,12 +254,12 @@ TRACE_EVENT(lookup_sgl,
|
||||
|
||||
TRACE_EVENT(map_mem_s2,
|
||||
|
||||
TP_PROTO(hh_memparcel_handle_t hdl, struct hh_sgl_desc *sgl_desc),
|
||||
TP_PROTO(gh_memparcel_handle_t hdl, struct gh_sgl_desc *sgl_desc),
|
||||
|
||||
TP_ARGS(hdl, sgl_desc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(hh_memparcel_handle_t, hdl)
|
||||
__field(gh_memparcel_handle_t, hdl)
|
||||
__field(u16, nr_sgl_entries)
|
||||
__dynamic_array(u64, ipa_bases, sgl_desc->n_sgl_entries)
|
||||
__dynamic_array(u64, sizes, sgl_desc->n_sgl_entries)
|
||||
@ -268,7 +268,7 @@ TRACE_EVENT(map_mem_s2,
|
||||
TP_fast_assign(
|
||||
__entry->hdl = hdl;
|
||||
__entry->nr_sgl_entries = sgl_desc->n_sgl_entries;
|
||||
hh_sgl_to_ipa_bases_sizes(sgl_desc,
|
||||
gh_sgl_to_ipa_bases_sizes(sgl_desc,
|
||||
__get_dynamic_array(ipa_bases),
|
||||
__get_dynamic_array(sizes));
|
||||
),
|
||||
|
@ -108,21 +108,21 @@ config HVC_DCC_SERIALIZE_SMP
|
||||
output FIFOs that all cores will use. Reads and writes from/to DCC
|
||||
are handled by a workqueue that runs only core 0.
|
||||
|
||||
config HVC_HAVEN
|
||||
tristate "Haven tty support"
|
||||
depends on HH_RM_DRV
|
||||
config HVC_GUNYAH
|
||||
tristate "Gunyah tty support"
|
||||
depends on GH_RM_DRV
|
||||
select HVC_DRIVER
|
||||
help
|
||||
This console exposes communication with other
|
||||
virtual machines in the Haven hypervisor. This
|
||||
virtual machines in the Gunyah hypervisor. This
|
||||
option may also be used as an early console
|
||||
to another VM.
|
||||
|
||||
config HVC_HAVEN_CONSOLE
|
||||
bool "Haven console support"
|
||||
depends on HVC_HAVEN
|
||||
config HVC_GUNYAH_CONSOLE
|
||||
bool "Gunyah console support"
|
||||
depends on HVC_GUNYAH
|
||||
help
|
||||
Select this option to allow Haven tty
|
||||
Select this option to allow Gunyah tty
|
||||
as an boot console communicating with
|
||||
the primary VM. Still need to specify
|
||||
earlycon and console parameters.
|
||||
|
@ -4,7 +4,7 @@ obj-$(CONFIG_HVC_OPAL) += hvc_opal.o hvsi_lib.o
|
||||
obj-$(CONFIG_HVC_OLD_HVSI) += hvsi.o
|
||||
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
|
||||
obj-$(CONFIG_HVC_DCC) += hvc_dcc.o
|
||||
obj-$(CONFIG_HVC_HAVEN) += hvc_haven.o
|
||||
obj-$(CONFIG_HVC_GUNYAH) += hvc_gunyah.o
|
||||
obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
|
||||
obj-$(CONFIG_HVC_IRQ) += hvc_irq.o
|
||||
obj-$(CONFIG_HVC_XEN) += hvc_xen.o
|
||||
|
@ -1,9 +1,9 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "hvc_haven: " fmt
|
||||
#define pr_fmt(fmt) "hvc_gunyah: " fmt
|
||||
|
||||
#include <linux/console.h>
|
||||
#include <linux/init.h>
|
||||
@ -14,16 +14,16 @@
|
||||
#include <linux/printk.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <linux/haven/hh_msgq.h>
|
||||
#include <linux/haven/hh_common.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_msgq.h>
|
||||
#include <linux/gunyah/gh_common.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
|
||||
#include "hvc_console.h"
|
||||
|
||||
/*
|
||||
* Note: hvc_alloc follows first-come, first-served for assigning
|
||||
* numbers to registered hvc instances. Thus, the following assignments occur
|
||||
* when both DCC and HAVEN consoles are compiled:
|
||||
* when both DCC and GUNYAH consoles are compiled:
|
||||
* | DCC connected | DCC not connected
|
||||
* (dcc) | hvc0 | (not present)
|
||||
* SELF | hvc1 | hvc0
|
||||
@ -32,49 +32,49 @@
|
||||
* "DCC connected" means a DCC terminal is open with device
|
||||
*/
|
||||
|
||||
#define HVC_HH_VTERM_COOKIE 0x474E5948
|
||||
#define HVC_GH_VTERM_COOKIE 0x474E5948
|
||||
/* # of payload bytes that can fit in a 1-fragment CONSOLE_WRITE message */
|
||||
#define HH_HVC_WRITE_MSG_SIZE ((1 * (HH_MSGQ_MAX_MSG_SIZE_BYTES - 8)) - 4)
|
||||
#define GH_HVC_WRITE_MSG_SIZE ((1 * (GH_MSGQ_MAX_MSG_SIZE_BYTES - 8)) - 4)
|
||||
|
||||
struct hh_hvc_prv {
|
||||
struct gh_hvc_prv {
|
||||
struct hvc_struct *hvc;
|
||||
enum hh_vm_names vm_name;
|
||||
enum gh_vm_names vm_name;
|
||||
DECLARE_KFIFO(get_fifo, char, 1024);
|
||||
DECLARE_KFIFO(put_fifo, char, 1024);
|
||||
struct work_struct put_work;
|
||||
};
|
||||
|
||||
static DEFINE_SPINLOCK(fifo_lock);
|
||||
static struct hh_hvc_prv hh_hvc_data[HH_VM_MAX];
|
||||
static struct gh_hvc_prv gh_hvc_data[GH_VM_MAX];
|
||||
|
||||
static inline int hh_vm_name_to_vtermno(enum hh_vm_names vmname)
|
||||
static inline int gh_vm_name_to_vtermno(enum gh_vm_names vmname)
|
||||
{
|
||||
return vmname + HVC_HH_VTERM_COOKIE;
|
||||
return vmname + HVC_GH_VTERM_COOKIE;
|
||||
}
|
||||
|
||||
static inline int vtermno_to_hh_vm_name(int vtermno)
|
||||
static inline int vtermno_to_gh_vm_name(int vtermno)
|
||||
{
|
||||
return vtermno - HVC_HH_VTERM_COOKIE;
|
||||
return vtermno - HVC_GH_VTERM_COOKIE;
|
||||
}
|
||||
|
||||
static int hh_hvc_notify_console_chars(struct notifier_block *this,
|
||||
static int gh_hvc_notify_console_chars(struct notifier_block *this,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
struct hh_rm_notif_vm_console_chars *msg = data;
|
||||
enum hh_vm_names vm_name;
|
||||
struct gh_rm_notif_vm_console_chars *msg = data;
|
||||
enum gh_vm_names vm_name;
|
||||
int ret;
|
||||
|
||||
if (cmd != HH_RM_NOTIF_VM_CONSOLE_CHARS)
|
||||
if (cmd != GH_RM_NOTIF_VM_CONSOLE_CHARS)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
ret = hh_rm_get_vm_name(msg->vmid, &vm_name);
|
||||
ret = gh_rm_get_vm_name(msg->vmid, &vm_name);
|
||||
if (ret) {
|
||||
pr_warn_ratelimited("don't know VMID %d ret: %d\n", msg->vmid,
|
||||
ret);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
ret = kfifo_in_spinlocked(&hh_hvc_data[vm_name].get_fifo,
|
||||
ret = kfifo_in_spinlocked(&gh_hvc_data[vm_name].get_fifo,
|
||||
msg->bytes, msg->num_bytes,
|
||||
&fifo_lock);
|
||||
|
||||
@ -85,22 +85,22 @@ static int hh_hvc_notify_console_chars(struct notifier_block *this,
|
||||
pr_warn_ratelimited("dropped %d bytes from VM%d - full fifo\n",
|
||||
msg->num_bytes - ret, vm_name);
|
||||
|
||||
if (hvc_poll(hh_hvc_data[vm_name].hvc))
|
||||
if (hvc_poll(gh_hvc_data[vm_name].hvc))
|
||||
hvc_kick();
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static void hh_hvc_put_work_fn(struct work_struct *ws)
|
||||
static void gh_hvc_put_work_fn(struct work_struct *ws)
|
||||
{
|
||||
hh_vmid_t vmid;
|
||||
char buf[HH_HVC_WRITE_MSG_SIZE];
|
||||
gh_vmid_t vmid;
|
||||
char buf[GH_HVC_WRITE_MSG_SIZE];
|
||||
int count, ret;
|
||||
struct hh_hvc_prv *prv = container_of(ws, struct hh_hvc_prv, put_work);
|
||||
struct gh_hvc_prv *prv = container_of(ws, struct gh_hvc_prv, put_work);
|
||||
|
||||
ret = hh_rm_get_vmid(prv->vm_name, &vmid);
|
||||
ret = gh_rm_get_vmid(prv->vm_name, &vmid);
|
||||
if (ret) {
|
||||
pr_warn_once("%s: hh_rm_get_vmid failed for %d: %d\n",
|
||||
pr_warn_once("%s: gh_rm_get_vmid failed for %d: %d\n",
|
||||
__func__, prv->vm_name, ret);
|
||||
return;
|
||||
}
|
||||
@ -111,205 +111,205 @@ static void hh_hvc_put_work_fn(struct work_struct *ws)
|
||||
if (count <= 0)
|
||||
continue;
|
||||
|
||||
ret = hh_rm_console_write(vmid, buf, count);
|
||||
ret = gh_rm_console_write(vmid, buf, count);
|
||||
if (ret) {
|
||||
pr_warn_once("%s hh_rm_console_write failed for %d: %d\n",
|
||||
pr_warn_once("%s gh_rm_console_write failed for %d: %d\n",
|
||||
__func__, prv->vm_name, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int hh_hvc_get_chars(uint32_t vtermno, char *buf, int count)
|
||||
static int gh_hvc_get_chars(uint32_t vtermno, char *buf, int count)
|
||||
{
|
||||
int vm_name = vtermno_to_hh_vm_name(vtermno);
|
||||
int vm_name = vtermno_to_gh_vm_name(vtermno);
|
||||
|
||||
if (vm_name < 0 || vm_name >= HH_VM_MAX)
|
||||
if (vm_name < 0 || vm_name >= GH_VM_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
return kfifo_out_spinlocked(&hh_hvc_data[vm_name].get_fifo,
|
||||
return kfifo_out_spinlocked(&gh_hvc_data[vm_name].get_fifo,
|
||||
buf, count, &fifo_lock);
|
||||
}
|
||||
|
||||
static int hh_hvc_put_chars(uint32_t vtermno, const char *buf, int count)
|
||||
static int gh_hvc_put_chars(uint32_t vtermno, const char *buf, int count)
|
||||
{
|
||||
int ret, vm_name = vtermno_to_hh_vm_name(vtermno);
|
||||
int ret, vm_name = vtermno_to_gh_vm_name(vtermno);
|
||||
|
||||
if (vm_name < 0 || vm_name >= HH_VM_MAX)
|
||||
if (vm_name < 0 || vm_name >= GH_VM_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
ret = kfifo_in_spinlocked(&hh_hvc_data[vm_name].put_fifo,
|
||||
ret = kfifo_in_spinlocked(&gh_hvc_data[vm_name].put_fifo,
|
||||
buf, count, &fifo_lock);
|
||||
if (ret > 0)
|
||||
schedule_work(&hh_hvc_data[vm_name].put_work);
|
||||
schedule_work(&gh_hvc_data[vm_name].put_work);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hh_hvc_flush(uint32_t vtermno, bool wait)
|
||||
static int gh_hvc_flush(uint32_t vtermno, bool wait)
|
||||
{
|
||||
int ret, vm_name = vtermno_to_hh_vm_name(vtermno);
|
||||
hh_vmid_t vmid;
|
||||
int ret, vm_name = vtermno_to_gh_vm_name(vtermno);
|
||||
gh_vmid_t vmid;
|
||||
|
||||
/* RM calls will all sleep. A flush without waiting isn't possible */
|
||||
if (!wait)
|
||||
return 0;
|
||||
might_sleep();
|
||||
|
||||
if (vm_name < 0 || vm_name >= HH_VM_MAX)
|
||||
if (vm_name < 0 || vm_name >= GH_VM_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hh_rm_get_vmid(vm_name, &vmid);
|
||||
ret = gh_rm_get_vmid(vm_name, &vmid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (cancel_work_sync(&hh_hvc_data[vm_name].put_work)) {
|
||||
if (cancel_work_sync(&gh_hvc_data[vm_name].put_work)) {
|
||||
/* flush the fifo */
|
||||
hh_hvc_put_work_fn(&hh_hvc_data[vm_name].put_work);
|
||||
gh_hvc_put_work_fn(&gh_hvc_data[vm_name].put_work);
|
||||
}
|
||||
|
||||
return hh_rm_console_flush(vmid);
|
||||
return gh_rm_console_flush(vmid);
|
||||
}
|
||||
|
||||
static int hh_hvc_notify_add(struct hvc_struct *hp, int vm_name)
|
||||
static int gh_hvc_notify_add(struct hvc_struct *hp, int vm_name)
|
||||
{
|
||||
int ret;
|
||||
hh_vmid_t vmid;
|
||||
gh_vmid_t vmid;
|
||||
|
||||
#ifdef CONFIG_HVC_HAVEN_CONSOLE
|
||||
#ifdef CONFIG_HVC_GUNYAH_CONSOLE
|
||||
/* tty layer is opening, but kernel has already opened for printk */
|
||||
if (vm_name == HH_SELF_VM)
|
||||
if (vm_name == GH_SELF_VM)
|
||||
return 0;
|
||||
#endif /* CONFIG_HVC_HAVEN_CONSOLE */
|
||||
#endif /* CONFIG_HVC_GUNYAH_CONSOLE */
|
||||
|
||||
ret = hh_rm_get_vmid(vm_name, &vmid);
|
||||
ret = gh_rm_get_vmid(vm_name, &vmid);
|
||||
if (ret) {
|
||||
pr_err("%s: hh_rm_get_vmid failed for %d: %d\n", __func__,
|
||||
pr_err("%s: gh_rm_get_vmid failed for %d: %d\n", __func__,
|
||||
vm_name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return hh_rm_console_open(vmid);
|
||||
return gh_rm_console_open(vmid);
|
||||
}
|
||||
|
||||
static void hh_hvc_notify_del(struct hvc_struct *hp, int vm_name)
|
||||
static void gh_hvc_notify_del(struct hvc_struct *hp, int vm_name)
|
||||
{
|
||||
int ret;
|
||||
hh_vmid_t vmid;
|
||||
gh_vmid_t vmid;
|
||||
|
||||
if (vm_name < 0 || vm_name >= HH_VM_MAX)
|
||||
if (vm_name < 0 || vm_name >= GH_VM_MAX)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_HVC_HAVEN_CONSOLE
|
||||
#ifdef CONFIG_HVC_GUNYAH_CONSOLE
|
||||
/* tty layer is closing, but kernel is still using for printk. */
|
||||
if (vm_name == HH_SELF_VM)
|
||||
if (vm_name == GH_SELF_VM)
|
||||
return;
|
||||
#endif /* CONFIG_HVC_HAVEN_CONSOLE */
|
||||
#endif /* CONFIG_HVC_GUNYAH_CONSOLE */
|
||||
|
||||
if (cancel_work_sync(&hh_hvc_data[vm_name].put_work)) {
|
||||
if (cancel_work_sync(&gh_hvc_data[vm_name].put_work)) {
|
||||
/* flush the fifo */
|
||||
hh_hvc_put_work_fn(&hh_hvc_data[vm_name].put_work);
|
||||
gh_hvc_put_work_fn(&gh_hvc_data[vm_name].put_work);
|
||||
}
|
||||
|
||||
ret = hh_rm_get_vmid(vm_name, &vmid);
|
||||
ret = gh_rm_get_vmid(vm_name, &vmid);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = hh_rm_console_close(vmid);
|
||||
ret = gh_rm_console_close(vmid);
|
||||
|
||||
if (ret)
|
||||
pr_err("%s: failed close VM%d console - %d\n", __func__,
|
||||
vm_name, ret);
|
||||
|
||||
kfifo_reset(&hh_hvc_data[vm_name].get_fifo);
|
||||
kfifo_reset(&gh_hvc_data[vm_name].get_fifo);
|
||||
}
|
||||
|
||||
static struct notifier_block hh_hvc_nb = {
|
||||
.notifier_call = hh_hvc_notify_console_chars,
|
||||
static struct notifier_block gh_hvc_nb = {
|
||||
.notifier_call = gh_hvc_notify_console_chars,
|
||||
};
|
||||
|
||||
static const struct hv_ops hh_hv_ops = {
|
||||
.get_chars = hh_hvc_get_chars,
|
||||
.put_chars = hh_hvc_put_chars,
|
||||
.flush = hh_hvc_flush,
|
||||
.notifier_add = hh_hvc_notify_add,
|
||||
.notifier_del = hh_hvc_notify_del,
|
||||
static const struct hv_ops gh_hv_ops = {
|
||||
.get_chars = gh_hvc_get_chars,
|
||||
.put_chars = gh_hvc_put_chars,
|
||||
.flush = gh_hvc_flush,
|
||||
.notifier_add = gh_hvc_notify_add,
|
||||
.notifier_del = gh_hvc_notify_del,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_HVC_HAVEN_CONSOLE
|
||||
static int __init hvc_hh_console_init(void)
|
||||
#ifdef CONFIG_HVC_GUNYAH_CONSOLE
|
||||
static int __init hvc_gh_console_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Need to call RM CONSOLE_OPEN before console can be used */
|
||||
ret = hh_rm_console_open(0);
|
||||
ret = gh_rm_console_open(0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hvc_instantiate(hh_vm_name_to_vtermno(HH_SELF_VM), 0,
|
||||
&hh_hv_ops);
|
||||
ret = hvc_instantiate(gh_vm_name_to_vtermno(GH_SELF_VM), 0,
|
||||
&gh_hv_ops);
|
||||
|
||||
return ret < 0 ? -ENODEV : 0;
|
||||
}
|
||||
#else
|
||||
static int __init hvc_hh_console_init(void)
|
||||
static int __init hvc_gh_console_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HVC_HAVEN_CONSOLE */
|
||||
#endif /* CONFIG_HVC_GUNYAH_CONSOLE */
|
||||
|
||||
static int __init hvc_hh_init(void)
|
||||
static int __init hvc_gh_init(void)
|
||||
{
|
||||
int i, ret = 0;
|
||||
struct hh_hvc_prv *prv;
|
||||
struct gh_hvc_prv *prv;
|
||||
|
||||
/* Must initialize fifos and work before calling hvc_hh_console_init */
|
||||
for (i = 0; i < HH_VM_MAX; i++) {
|
||||
prv = &hh_hvc_data[i];
|
||||
/* Must initialize fifos and work before calling hvc_gh_console_init */
|
||||
for (i = 0; i < GH_VM_MAX; i++) {
|
||||
prv = &gh_hvc_data[i];
|
||||
prv->vm_name = i;
|
||||
INIT_KFIFO(prv->get_fifo);
|
||||
INIT_KFIFO(prv->put_fifo);
|
||||
INIT_WORK(&prv->put_work, hh_hvc_put_work_fn);
|
||||
INIT_WORK(&prv->put_work, gh_hvc_put_work_fn);
|
||||
}
|
||||
|
||||
/* Must instantiate console before calling hvc_alloc */
|
||||
hvc_hh_console_init();
|
||||
hvc_gh_console_init();
|
||||
|
||||
for (i = 0; i < HH_VM_MAX; i++) {
|
||||
prv = &hh_hvc_data[i];
|
||||
prv->hvc = hvc_alloc(hh_vm_name_to_vtermno(i), i, &hh_hv_ops,
|
||||
for (i = 0; i < GH_VM_MAX; i++) {
|
||||
prv = &gh_hvc_data[i];
|
||||
prv->hvc = hvc_alloc(gh_vm_name_to_vtermno(i), i, &gh_hv_ops,
|
||||
256);
|
||||
ret = PTR_ERR_OR_ZERO(prv->hvc);
|
||||
if (ret)
|
||||
goto bail;
|
||||
}
|
||||
|
||||
ret = hh_rm_register_notifier(&hh_hvc_nb);
|
||||
ret = gh_rm_register_notifier(&gh_hvc_nb);
|
||||
if (ret)
|
||||
goto bail;
|
||||
|
||||
return 0;
|
||||
bail:
|
||||
for (--i; i >= 0; i--) {
|
||||
hvc_remove(hh_hvc_data[i].hvc);
|
||||
hh_hvc_data[i].hvc = NULL;
|
||||
hvc_remove(gh_hvc_data[i].hvc);
|
||||
gh_hvc_data[i].hvc = NULL;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
late_initcall(hvc_hh_init);
|
||||
late_initcall(hvc_gh_init);
|
||||
|
||||
static __exit void hvc_hh_exit(void)
|
||||
static __exit void hvc_gh_exit(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
hh_rm_unregister_notifier(&hh_hvc_nb);
|
||||
gh_rm_unregister_notifier(&gh_hvc_nb);
|
||||
|
||||
for (i = 0; i < HH_VM_MAX; i++)
|
||||
if (hh_hvc_data[i].hvc) {
|
||||
hvc_remove(hh_hvc_data[i].hvc);
|
||||
hh_hvc_data[i].hvc = NULL;
|
||||
for (i = 0; i < GH_VM_MAX; i++)
|
||||
if (gh_hvc_data[i].hvc) {
|
||||
hvc_remove(gh_hvc_data[i].hvc);
|
||||
gh_hvc_data[i].hvc = NULL;
|
||||
}
|
||||
}
|
||||
module_exit(hvc_hh_exit);
|
||||
module_exit(hvc_gh_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven Hypervisor Console Driver");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Hypervisor Console Driver");
|
||||
|
@ -32,7 +32,7 @@ config FSL_HV_MANAGER
|
||||
partition shuts down.
|
||||
|
||||
source "drivers/virt/vboxguest/Kconfig"
|
||||
source "drivers/virt/haven/Kconfig"
|
||||
source "drivers/virt/gunyah/Kconfig"
|
||||
|
||||
source "drivers/virt/nitro_enclaves/Kconfig"
|
||||
endif
|
||||
|
@ -5,6 +5,6 @@
|
||||
|
||||
obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
|
||||
obj-y += vboxguest/
|
||||
obj-y += haven/
|
||||
obj-y += gunyah/
|
||||
|
||||
obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves/
|
||||
|
@ -1,44 +1,44 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
menuconfig HAVEN_DRIVERS
|
||||
bool "Haven Virtualization drivers"
|
||||
menuconfig GUNYAH_DRIVERS
|
||||
bool "Gunyah Virtualization drivers"
|
||||
depends on ARM64
|
||||
help
|
||||
The Haven drivers are the helper interfaces that runs on the
|
||||
The Gunyah drivers are the helper interfaces that runs on the
|
||||
virtual machines that provides support such as memory/device
|
||||
sharing, IRQ sharing, IPC/signalling mechanisms, and so on.
|
||||
|
||||
Say Y here to enable the drivers needed to work on Haven
|
||||
Say Y here to enable the drivers needed to work on Gunyah
|
||||
virtualization environment.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and disabled.
|
||||
|
||||
if HAVEN_DRIVERS
|
||||
if GUNYAH_DRIVERS
|
||||
|
||||
config HH_CTRL
|
||||
tristate "Create Haven entries under /sys/hypervisor"
|
||||
config GH_CTRL
|
||||
tristate "Create Gunyah entries under /sys/hypervisor"
|
||||
depends on SYSFS
|
||||
select SYS_HYPERVISOR
|
||||
help
|
||||
Create entries under /sys/hypervisor for the Haven hypervisor.
|
||||
Create entries under /sys/hypervisor for the Gunyah hypervisor.
|
||||
The driver also provides a facility for controlling
|
||||
hypervisor debug features.
|
||||
See Documentation/ABI/testing/sysfs-hypervisor-haven for more details.
|
||||
See Documentation/ABI/testing/sysfs-hypervisor-gunyah for more details.
|
||||
|
||||
config HH_MSGQ
|
||||
tristate "Haven Message Queue driver"
|
||||
config GH_MSGQ
|
||||
tristate "Gunyah Message Queue driver"
|
||||
help
|
||||
Haven offers message-queues as one of the IPC mechanisms to
|
||||
Gunyah offers message-queues as one of the IPC mechanisms to
|
||||
communicate among the Virtual Machines. The message queue drivers
|
||||
runs on the Virtual machines to provide an interface to the clients
|
||||
who wish to communicate to other clients on a different VM. Currently,
|
||||
the services offered by the drivers is simply to send and receive
|
||||
messages in a blocking manner.
|
||||
|
||||
config HH_RM_DRV
|
||||
tristate "Haven Resource Manager driver"
|
||||
config GH_RM_DRV
|
||||
tristate "Gunyah Resource Manager driver"
|
||||
help
|
||||
The Haven Resource Manager driver is used to communicate with the
|
||||
The Gunyah Resource Manager driver is used to communicate with the
|
||||
Resource Manager Virtual Machine (RM-VM). The RM-VM acts as a mediator
|
||||
and provides numerous services to the other VMs running in the system,
|
||||
such as notifying when a particular VM is up, resource (IRQ/device)
|
||||
@ -48,53 +48,53 @@ config HH_RM_DRV
|
||||
interface to other driver in order to obtain the services provided by
|
||||
the RM-VM.
|
||||
|
||||
config HH_DBL
|
||||
tristate "Haven Doorbell driver"
|
||||
config GH_DBL
|
||||
tristate "Gunyah Doorbell driver"
|
||||
help
|
||||
Haven offers a simple inter VMs(Virtual Machines) communication
|
||||
Gunyah offers a simple inter VMs(Virtual Machines) communication
|
||||
through the use of doorbell interrupts. A single doorbell instance
|
||||
provides an unidirectional communication between two VMs and it acts
|
||||
like either a source(Tx) or generate(Rx). Individual VMs make use of
|
||||
these doorbells by calling send and/or a receive primitives exposed by
|
||||
driver and trigger an interrupt to each other and exchange the data.
|
||||
|
||||
config HH_IRQ_LEND
|
||||
tristate "Haven IRQ Lending Framework"
|
||||
depends on HH_RM_DRV
|
||||
config GH_IRQ_LEND
|
||||
tristate "Gunyah IRQ Lending Framework"
|
||||
depends on GH_RM_DRV
|
||||
help
|
||||
Haven Resource Manager permits interrupts to be shared between
|
||||
Gunyah Resource Manager permits interrupts to be shared between
|
||||
virtual machines. This config enables a framework which
|
||||
supports sharing these interrupts. The follows RM recommended
|
||||
protocol.
|
||||
|
||||
config HH_MEM_NOTIFIER
|
||||
tristate "Haven Memory Resource Notification Framework"
|
||||
depends on HH_RM_DRV
|
||||
config GH_MEM_NOTIFIER
|
||||
tristate "Gunyah Memory Resource Notification Framework"
|
||||
depends on GH_RM_DRV
|
||||
help
|
||||
The Haven Resource Manager allows for different memory resources
|
||||
The Gunyah Resource Manager allows for different memory resources
|
||||
to be transferred across virtual machines with different notification
|
||||
labels assigned to each resource to aid in distinguishing them.
|
||||
Enabling the Haven Memory Resource Notification Framework provides an
|
||||
Enabling the Gunyah Memory Resource Notification Framework provides an
|
||||
interface for clients to transmit memory resources between virtual
|
||||
machines, and register callbacks that get invoked only when
|
||||
notifications pertaining to their memory resources arrive.
|
||||
|
||||
config HH_VIRT_WATCHDOG
|
||||
tristate "Haven Virtual Watchdog Driver"
|
||||
config GH_VIRT_WATCHDOG
|
||||
tristate "Gunyah Virtual Watchdog Driver"
|
||||
depends on QCOM_WDT_CORE
|
||||
help
|
||||
This enables the Qualcomm Technologies, Inc. watchdog module for
|
||||
the Haven hypervisor. It provides an interface to perform watchdog
|
||||
the Gunyah hypervisor. It provides an interface to perform watchdog
|
||||
actions such as setting the bark/bite time and also petting the
|
||||
watchdog in the hypervisor.
|
||||
|
||||
config HH_VIRTIO_BACKEND
|
||||
tristate "Haven Virtio Backend driver"
|
||||
depends on HH_RM_DRV
|
||||
config GH_VIRTIO_BACKEND
|
||||
tristate "Gunyah Virtio Backend driver"
|
||||
depends on GH_RM_DRV
|
||||
help
|
||||
This driver helps an application that implements a virtio backend
|
||||
driver to communicate with its frontend counterpart in a guest OS
|
||||
running on top of Haven hypervisor. One device node per virtual
|
||||
running on top of Gunyah hypervisor. One device node per virtual
|
||||
machine is created for all backend devices of the VM. Say y or m here
|
||||
to enable. If unsure, say n.
|
||||
|
||||
|
@ -1,10 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_HH_CTRL) += hh_ctrl.o
|
||||
obj-$(CONFIG_HH_MSGQ) += hh_msgq.o
|
||||
obj-$(CONFIG_HH_RM_DRV) += hh_rm_drv.o
|
||||
hh_rm_drv-y := hh_rm_core.o hh_rm_iface.o
|
||||
obj-$(CONFIG_HH_DBL) += hh_dbl.o
|
||||
obj-$(CONFIG_HH_IRQ_LEND) += hh_irq_lend.o
|
||||
obj-$(CONFIG_HH_MEM_NOTIFIER) += hh_mem_notifier.o
|
||||
obj-$(CONFIG_HH_VIRT_WATCHDOG)+= hh_virt_wdt.o
|
||||
obj-$(CONFIG_HH_VIRTIO_BACKEND) += hh_virtio_backend.o
|
||||
obj-$(CONFIG_GH_CTRL) += gh_ctrl.o
|
||||
obj-$(CONFIG_GH_MSGQ) += gh_msgq.o
|
||||
obj-$(CONFIG_GH_RM_DRV) += gh_rm_drv.o
|
||||
gh_rm_drv-y := gh_rm_core.o gh_rm_iface.o
|
||||
obj-$(CONFIG_GH_DBL) += gh_dbl.o
|
||||
obj-$(CONFIG_GH_IRQ_LEND) += gh_irq_lend.o
|
||||
obj-$(CONFIG_GH_MEM_NOTIFIER) += gh_mem_notifier.o
|
||||
obj-$(CONFIG_GH_VIRT_WATCHDOG)+= gh_virt_wdt.o
|
||||
obj-$(CONFIG_GH_VIRTIO_BACKEND) += gh_virtio_backend.o
|
||||
|
@ -3,7 +3,7 @@
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "haven: " fmt
|
||||
#define pr_fmt(fmt) "gunyah: " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/debugfs.h>
|
||||
@ -12,8 +12,8 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/haven/hcall.h>
|
||||
#include <linux/haven/hh_errno.h>
|
||||
#include <linux/gunyah/hcall.h>
|
||||
#include <linux/gunyah/gh_errno.h>
|
||||
|
||||
#define QC_HYP_SMCCC_CALL_UID \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
|
||||
@ -27,29 +27,29 @@
|
||||
#define QC_HYP_UID2 0x946f609b
|
||||
#define QC_HYP_UID3 0x54539de6
|
||||
|
||||
#define HH_API_INFO_API_VERSION(x) (((x) >> 0) & 0x3fff)
|
||||
#define HH_API_INFO_BIG_ENDIAN(x) (((x) >> 14) & 1)
|
||||
#define HH_API_INFO_IS_64BIT(x) (((x) >> 15) & 1)
|
||||
#define HH_API_INFO_VARIANT(x) (((x) >> 56) & 0xff)
|
||||
#define GH_API_INFO_API_VERSION(x) (((x) >> 0) & 0x3fff)
|
||||
#define GH_API_INFO_BIG_ENDIAN(x) (((x) >> 14) & 1)
|
||||
#define GH_API_INFO_IS_64BIT(x) (((x) >> 15) & 1)
|
||||
#define GH_API_INFO_VARIANT(x) (((x) >> 56) & 0xff)
|
||||
|
||||
#define HH_IDENTIFY_PARTITION_CSPACE(x) (((x) >> 0) & 1)
|
||||
#define HH_IDENTIFY_DOORBELL(x) (((x) >> 1) & 1)
|
||||
#define HH_IDENTIFY_MSGQUEUE(x) (((x) >> 2) & 1)
|
||||
#define HH_IDENTIFY_VIC(x) (((x) >> 3) & 1)
|
||||
#define HH_IDENTIFY_VPM(x) (((x) >> 4) & 1)
|
||||
#define HH_IDENTIFY_VCPU(x) (((x) >> 5) & 1)
|
||||
#define HH_IDENTIFY_MEMEXTENT(x) (((x) >> 6) & 1)
|
||||
#define HH_IDENTIFY_TRACE_CTRL(x) (((x) >> 7) & 1)
|
||||
#define HH_IDENTIFY_ROOTVM_CHANNEL(x) (((x) >> 16) & 1)
|
||||
#define HH_IDENTIFY_SCHEDULER(x) (((x) >> 28) & 0xf)
|
||||
#define GH_IDENTIFY_PARTITION_CSPACE(x) (((x) >> 0) & 1)
|
||||
#define GH_IDENTIFY_DOORBELL(x) (((x) >> 1) & 1)
|
||||
#define GH_IDENTIFY_MSGQUEUE(x) (((x) >> 2) & 1)
|
||||
#define GH_IDENTIFY_VIC(x) (((x) >> 3) & 1)
|
||||
#define GH_IDENTIFY_VPM(x) (((x) >> 4) & 1)
|
||||
#define GH_IDENTIFY_VCPU(x) (((x) >> 5) & 1)
|
||||
#define GH_IDENTIFY_MEMEXTENT(x) (((x) >> 6) & 1)
|
||||
#define GH_IDENTIFY_TRACE_CTRL(x) (((x) >> 7) & 1)
|
||||
#define GH_IDENTIFY_ROOTVM_CHANNEL(x) (((x) >> 16) & 1)
|
||||
#define GH_IDENTIFY_SCHEDULER(x) (((x) >> 28) & 0xf)
|
||||
|
||||
static bool qc_hyp_calls;
|
||||
static struct hh_hcall_hyp_identify_resp haven_api;
|
||||
static struct gh_hcall_hyp_identify_resp gunyah_api;
|
||||
|
||||
static ssize_t type_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buffer)
|
||||
{
|
||||
return scnprintf(buffer, PAGE_SIZE, "haven\n");
|
||||
return scnprintf(buffer, PAGE_SIZE, "gunyah\n");
|
||||
}
|
||||
static struct kobj_attribute type_attr = __ATTR_RO(type);
|
||||
|
||||
@ -57,7 +57,7 @@ static ssize_t api_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buffer)
|
||||
{
|
||||
return scnprintf(buffer, PAGE_SIZE, "%d\n",
|
||||
(int)HH_API_INFO_API_VERSION(haven_api.api_info));
|
||||
(int)GH_API_INFO_API_VERSION(gunyah_api.api_info));
|
||||
}
|
||||
static struct kobj_attribute api_attr = __ATTR_RO(api);
|
||||
|
||||
@ -65,7 +65,7 @@ static ssize_t variant_show(struct kobject *kobj, struct kobj_attribute *attr,
|
||||
char *buffer)
|
||||
{
|
||||
return scnprintf(buffer, PAGE_SIZE, "%d\n",
|
||||
(int)HH_API_INFO_VARIANT(haven_api.api_info));
|
||||
(int)GH_API_INFO_VARIANT(gunyah_api.api_info));
|
||||
}
|
||||
static struct kobj_attribute variant_attr = __ATTR_RO(variant);
|
||||
|
||||
@ -77,7 +77,7 @@ static const struct attribute_group version_group = {
|
||||
.attrs = version_attrs,
|
||||
};
|
||||
|
||||
static int __init hh_sysfs_register(void)
|
||||
static int __init gh_sysfs_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -88,7 +88,7 @@ static int __init hh_sysfs_register(void)
|
||||
return sysfs_create_group(hypervisor_kobj, &version_group);
|
||||
}
|
||||
|
||||
static void __exit hh_sysfs_unregister(void)
|
||||
static void __exit gh_sysfs_unregister(void)
|
||||
{
|
||||
sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
|
||||
sysfs_remove_group(hypervisor_kobj, &version_group);
|
||||
@ -105,99 +105,99 @@ static void __exit hh_sysfs_unregister(void)
|
||||
#define ENABLE 1
|
||||
#define DISABLE 0
|
||||
|
||||
static struct dentry *hh_dbgfs_dir;
|
||||
static struct dentry *gh_dbgfs_dir;
|
||||
static int hyp_uart_enable;
|
||||
|
||||
static void hh_control_hyp_uart(int val)
|
||||
static void gh_control_hyp_uart(int val)
|
||||
{
|
||||
switch (val) {
|
||||
case ENABLE:
|
||||
if (!hyp_uart_enable) {
|
||||
hyp_uart_enable = val;
|
||||
pr_info("Haven: enabling HYP UART\n");
|
||||
pr_info("Gunyah: enabling HYP UART\n");
|
||||
arm_smccc_1_1_smc(QC_HYP_SMCCC_UART_ENABLE, NULL);
|
||||
} else {
|
||||
pr_info("Haven: HYP UART already enabled\n");
|
||||
pr_info("Gunyah: HYP UART already enabled\n");
|
||||
}
|
||||
break;
|
||||
case DISABLE:
|
||||
if (hyp_uart_enable) {
|
||||
hyp_uart_enable = val;
|
||||
pr_info("Haven: disabling HYP UART\n");
|
||||
pr_info("Gunyah: disabling HYP UART\n");
|
||||
arm_smccc_1_1_smc(QC_HYP_SMCCC_UART_DISABLE, NULL);
|
||||
} else {
|
||||
pr_info("Haven: HYP UART already disabled\n");
|
||||
pr_info("Gunyah: HYP UART already disabled\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_info("Haven: supported values disable(0)/enable(1)\n");
|
||||
pr_info("Gunyah: supported values disable(0)/enable(1)\n");
|
||||
}
|
||||
}
|
||||
|
||||
static int hh_dbgfs_trace_class_set(void *data, u64 val)
|
||||
static int gh_dbgfs_trace_class_set(void *data, u64 val)
|
||||
{
|
||||
return hh_remap_error(hh_hcall_trace_update_class_flags(val, 0, NULL));
|
||||
return gh_remap_error(gh_hcall_trace_update_class_flags(val, 0, NULL));
|
||||
}
|
||||
|
||||
static int hh_dbgfs_trace_class_clear(void *data, u64 val)
|
||||
static int gh_dbgfs_trace_class_clear(void *data, u64 val)
|
||||
{
|
||||
return hh_remap_error(hh_hcall_trace_update_class_flags(0, val, NULL));
|
||||
return gh_remap_error(gh_hcall_trace_update_class_flags(0, val, NULL));
|
||||
}
|
||||
|
||||
static int hh_dbgfs_trace_class_get(void *data, u64 *val)
|
||||
static int gh_dbgfs_trace_class_get(void *data, u64 *val)
|
||||
{
|
||||
*val = 0;
|
||||
return hh_remap_error(hh_hcall_trace_update_class_flags(0, 0, val));
|
||||
return gh_remap_error(gh_hcall_trace_update_class_flags(0, 0, val));
|
||||
}
|
||||
|
||||
static int hh_dbgfs_hyp_uart_set(void *data, u64 val)
|
||||
static int gh_dbgfs_hyp_uart_set(void *data, u64 val)
|
||||
{
|
||||
hh_control_hyp_uart(val);
|
||||
gh_control_hyp_uart(val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hh_dbgfs_hyp_uart_get(void *data, u64 *val)
|
||||
static int gh_dbgfs_hyp_uart_get(void *data, u64 *val)
|
||||
{
|
||||
*val = hyp_uart_enable;
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(hh_dbgfs_trace_class_set_fops,
|
||||
hh_dbgfs_trace_class_get,
|
||||
hh_dbgfs_trace_class_set,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(gh_dbgfs_trace_class_set_fops,
|
||||
gh_dbgfs_trace_class_get,
|
||||
gh_dbgfs_trace_class_set,
|
||||
"0x%llx\n");
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(hh_dbgfs_trace_class_clear_fops,
|
||||
hh_dbgfs_trace_class_get,
|
||||
hh_dbgfs_trace_class_clear,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(gh_dbgfs_trace_class_clear_fops,
|
||||
gh_dbgfs_trace_class_get,
|
||||
gh_dbgfs_trace_class_clear,
|
||||
"0x%llx\n");
|
||||
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(hh_dbgfs_hyp_uart_ctrl_fops,
|
||||
hh_dbgfs_hyp_uart_get,
|
||||
hh_dbgfs_hyp_uart_set,
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(gh_dbgfs_hyp_uart_ctrl_fops,
|
||||
gh_dbgfs_hyp_uart_get,
|
||||
gh_dbgfs_hyp_uart_set,
|
||||
"0x%llx\n");
|
||||
|
||||
static int __init hh_dbgfs_register(void)
|
||||
static int __init gh_dbgfs_register(void)
|
||||
{
|
||||
struct dentry *dentry;
|
||||
|
||||
hh_dbgfs_dir = debugfs_create_dir("haven", NULL);
|
||||
if (IS_ERR_OR_NULL(hh_dbgfs_dir))
|
||||
return PTR_ERR(hh_dbgfs_dir);
|
||||
gh_dbgfs_dir = debugfs_create_dir("gunyah", NULL);
|
||||
if (IS_ERR_OR_NULL(gh_dbgfs_dir))
|
||||
return PTR_ERR(gh_dbgfs_dir);
|
||||
|
||||
if (HH_IDENTIFY_TRACE_CTRL(haven_api.flags[0])) {
|
||||
dentry = debugfs_create_file("trace_set", 0600, hh_dbgfs_dir,
|
||||
NULL, &hh_dbgfs_trace_class_set_fops);
|
||||
if (GH_IDENTIFY_TRACE_CTRL(gunyah_api.flags[0])) {
|
||||
dentry = debugfs_create_file("trace_set", 0600, gh_dbgfs_dir,
|
||||
NULL, &gh_dbgfs_trace_class_set_fops);
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
|
||||
dentry = debugfs_create_file("trace_clear", 0600, hh_dbgfs_dir,
|
||||
NULL, &hh_dbgfs_trace_class_clear_fops);
|
||||
dentry = debugfs_create_file("trace_clear", 0600, gh_dbgfs_dir,
|
||||
NULL, &gh_dbgfs_trace_class_clear_fops);
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
|
||||
dentry = debugfs_create_file("hyp_uart_ctrl", 0600, hh_dbgfs_dir,
|
||||
NULL, &hh_dbgfs_hyp_uart_ctrl_fops);
|
||||
dentry = debugfs_create_file("hyp_uart_ctrl", 0600, gh_dbgfs_dir,
|
||||
NULL, &gh_dbgfs_hyp_uart_ctrl_fops);
|
||||
if (IS_ERR(dentry))
|
||||
return PTR_ERR(dentry);
|
||||
}
|
||||
@ -205,16 +205,16 @@ static int __init hh_dbgfs_register(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit hh_dbgfs_unregister(void)
|
||||
static void __exit gh_dbgfs_unregister(void)
|
||||
{
|
||||
debugfs_remove_recursive(hh_dbgfs_dir);
|
||||
debugfs_remove_recursive(gh_dbgfs_dir);
|
||||
}
|
||||
#else /* !defined (CONFIG_DEBUG_FS) */
|
||||
static inline int hh_dbgfs_register(void) { return 0; }
|
||||
static inline int hh_dbgfs_unregister(void) { return 0; }
|
||||
static inline int gh_dbgfs_register(void) { return 0; }
|
||||
static inline int gh_dbgfs_unregister(void) { return 0; }
|
||||
#endif
|
||||
|
||||
static int __init hh_ctrl_init(void)
|
||||
static int __init gh_ctrl_init(void)
|
||||
{
|
||||
int ret;
|
||||
struct device_node *hyp;
|
||||
@ -227,9 +227,9 @@ static int __init hh_ctrl_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
(void)hh_hcall_hyp_identify(&haven_api);
|
||||
(void)gh_hcall_hyp_identify(&gunyah_api);
|
||||
|
||||
if (HH_API_INFO_API_VERSION(haven_api.api_info) != 1) {
|
||||
if (GH_API_INFO_API_VERSION(gunyah_api.api_info) != 1) {
|
||||
pr_err("unknown version\n");
|
||||
return 0;
|
||||
}
|
||||
@ -241,27 +241,27 @@ static int __init hh_ctrl_init(void)
|
||||
qc_hyp_calls = true;
|
||||
|
||||
if (qc_hyp_calls) {
|
||||
ret = hh_sysfs_register();
|
||||
ret = gh_sysfs_register();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hh_dbgfs_register();
|
||||
ret = gh_dbgfs_register();
|
||||
if (ret)
|
||||
pr_warn("failed to register dbgfs: %d\n", ret);
|
||||
} else {
|
||||
pr_info("Haven: no QC HYP interface detected\n");
|
||||
pr_info("Gunyah: no QC HYP interface detected\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
module_init(hh_ctrl_init);
|
||||
module_init(gh_ctrl_init);
|
||||
|
||||
static void __exit hh_ctrl_exit(void)
|
||||
static void __exit gh_ctrl_exit(void)
|
||||
{
|
||||
hh_sysfs_unregister();
|
||||
hh_dbgfs_unregister();
|
||||
gh_sysfs_unregister();
|
||||
gh_dbgfs_unregister();
|
||||
}
|
||||
module_exit(hh_ctrl_exit);
|
||||
module_exit(gh_ctrl_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven Hypervisor Control Driver");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Hypervisor Control Driver");
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
@ -9,26 +9,26 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <linux/haven/hh_dbl.h>
|
||||
#include <linux/haven/hh_errno.h>
|
||||
#include <linux/haven/hcall.h>
|
||||
#include <linux/gunyah/gh_dbl.h>
|
||||
#include <linux/gunyah/gh_errno.h>
|
||||
#include <linux/gunyah/hcall.h>
|
||||
|
||||
struct hh_dbl_desc {
|
||||
enum hh_dbl_label label;
|
||||
struct gh_dbl_desc {
|
||||
enum gh_dbl_label label;
|
||||
};
|
||||
|
||||
enum hh_dbl_dir {
|
||||
HH_DBL_DIRECTION_TX,
|
||||
HH_DBL_DIRECTION_RX
|
||||
enum gh_dbl_dir {
|
||||
GH_DBL_DIRECTION_TX,
|
||||
GH_DBL_DIRECTION_RX
|
||||
};
|
||||
|
||||
struct hh_dbl_cap_table {
|
||||
struct hh_dbl_desc *client_desc;
|
||||
struct gh_dbl_cap_table {
|
||||
struct gh_dbl_desc *client_desc;
|
||||
spinlock_t cap_entry_lock;
|
||||
hh_capid_t tx_cap_id;
|
||||
gh_capid_t tx_cap_id;
|
||||
int tx_reg_done;
|
||||
|
||||
hh_capid_t rx_cap_id;
|
||||
gh_capid_t rx_cap_id;
|
||||
int rx_irq;
|
||||
int rx_reg_done;
|
||||
const char *rx_irq_name;
|
||||
@ -37,26 +37,26 @@ struct hh_dbl_cap_table {
|
||||
wait_queue_head_t cap_wq;
|
||||
};
|
||||
|
||||
static bool hh_dbl_initialized;
|
||||
static struct hh_dbl_cap_table hh_dbl_cap_table[HH_DBL_LABEL_MAX];
|
||||
static bool gh_dbl_initialized;
|
||||
static struct gh_dbl_cap_table gh_dbl_cap_table[GH_DBL_LABEL_MAX];
|
||||
|
||||
/**
|
||||
* hh_dbl_validate_params - Validate doorbell common parameters
|
||||
* gh_dbl_validate_params - Validate doorbell common parameters
|
||||
*/
|
||||
static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
enum hh_dbl_dir dir, const unsigned long flags)
|
||||
static int gh_dbl_validate_params(struct gh_dbl_desc *client_desc,
|
||||
enum gh_dbl_dir dir, const unsigned long flags)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(client_desc))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check if the client has manipulated the label */
|
||||
if (client_desc->label < 0 || client_desc->label >= HH_DBL_LABEL_MAX)
|
||||
if (client_desc->label < 0 || client_desc->label >= GH_DBL_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -68,18 +68,18 @@ static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
|
||||
/*
|
||||
* rx_cap_id == NULL and tx_cap_id == NULL means TWO things
|
||||
* either "hh_dbl_populate_cap_info()" call from RM is not over
|
||||
* either "gh_dbl_populate_cap_info()" call from RM is not over
|
||||
* or
|
||||
* There are no doorbell setup for Tx or Rx
|
||||
*/
|
||||
if (dir == HH_DBL_DIRECTION_RX) {
|
||||
if (dir == GH_DBL_DIRECTION_RX) {
|
||||
if (!cap_table_entry->rx_reg_done) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((cap_table_entry->rx_cap_id == HH_CAPID_INVAL) &&
|
||||
(flags & HH_DBL_NONBLOCK)) {
|
||||
if ((cap_table_entry->rx_cap_id == GH_CAPID_INVAL) &&
|
||||
(flags & GH_DBL_NONBLOCK)) {
|
||||
ret = -EAGAIN;
|
||||
goto err;
|
||||
}
|
||||
@ -87,7 +87,7 @@ static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
if (wait_event_interruptible(cap_table_entry->cap_wq,
|
||||
cap_table_entry->rx_cap_id != HH_CAPID_INVAL))
|
||||
cap_table_entry->rx_cap_id != GH_CAPID_INVAL))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
} else {
|
||||
@ -96,8 +96,8 @@ static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((cap_table_entry->tx_cap_id == HH_CAPID_INVAL) &&
|
||||
(flags & HH_DBL_NONBLOCK)) {
|
||||
if ((cap_table_entry->tx_cap_id == GH_CAPID_INVAL) &&
|
||||
(flags & GH_DBL_NONBLOCK)) {
|
||||
ret = -EAGAIN;
|
||||
goto err;
|
||||
}
|
||||
@ -105,7 +105,7 @@ static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
if (wait_event_interruptible(cap_table_entry->cap_wq,
|
||||
cap_table_entry->tx_cap_id != HH_CAPID_INVAL))
|
||||
cap_table_entry->tx_cap_id != GH_CAPID_INVAL))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
}
|
||||
@ -117,11 +117,11 @@ static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_dbl_read_and_clean - Automatically read and clear the flags in doorbell
|
||||
* gh_dbl_read_and_clean - Automatically read and clear the flags in doorbell
|
||||
* @client_desc: client handle to indetify the doorbell object
|
||||
* @clear_flags: clear the bits mentioned in the clear_flags
|
||||
* @flags: Optional flags to pass to send the data. For the list of flags,
|
||||
* see linux/haven/hh_dbl.h
|
||||
* see linux/gunyah/gh_dbl.h
|
||||
*
|
||||
* Reads and clears the flags of the Doorbell object. If there is a pending
|
||||
* bound virtual interrupt, it will be de-asserted
|
||||
@ -130,45 +130,45 @@ static int hh_dbl_validate_params(struct hh_dbl_desc *client_desc,
|
||||
* 0 on success, @clear_flags contains the doorbell’s previous unmasked flags
|
||||
* before the @clear_flags were removed.
|
||||
*/
|
||||
int hh_dbl_read_and_clean(void *dbl_client_desc, hh_dbl_flags_t *clear_flags,
|
||||
int gh_dbl_read_and_clean(void *dbl_client_desc, gh_dbl_flags_t *clear_flags,
|
||||
const unsigned long flags)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct hh_hcall_dbl_recv_resp recv_resp;
|
||||
struct hh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, hh_ret;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_hcall_dbl_recv_resp recv_resp;
|
||||
struct gh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, gh_ret;
|
||||
|
||||
if (!clear_flags)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hh_dbl_validate_params(client_desc, HH_DBL_DIRECTION_RX, flags);
|
||||
ret = gh_dbl_validate_params(client_desc, GH_DBL_DIRECTION_RX, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
hh_ret = hh_hcall_dbl_recv(cap_table_entry->rx_cap_id,
|
||||
gh_ret = gh_hcall_dbl_recv(cap_table_entry->rx_cap_id,
|
||||
*clear_flags, &recv_resp);
|
||||
|
||||
ret = hh_remap_error(hh_ret);
|
||||
ret = gh_remap_error(gh_ret);
|
||||
if (ret != 0)
|
||||
pr_err("%s: Hypercall failed, ret = %d\n", __func__, hh_ret);
|
||||
pr_err("%s: Hypercall failed, ret = %d\n", __func__, gh_ret);
|
||||
else
|
||||
*clear_flags = recv_resp.old_flags;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_read_and_clean);
|
||||
EXPORT_SYMBOL(gh_dbl_read_and_clean);
|
||||
|
||||
/**
|
||||
* hh_dbl_set_mask - Set doorbell object mask
|
||||
* gh_dbl_set_mask - Set doorbell object mask
|
||||
* @client_desc: client handle to indetify the doorbell object
|
||||
* @enable_mask: The mask of flags that will cause an assertion of
|
||||
* the doorbell's bound virtual interrupt
|
||||
* @ack_mask: Controls which flags should be automatically cleared
|
||||
* when the interrupt is asserted
|
||||
* @flags: Optional flags to pass to send the data. For the list of flags,
|
||||
* see linux/haven/hh_dbl.h
|
||||
* see linux/gunyah/gh_dbl.h
|
||||
*
|
||||
* Sets the Doorbell object’s masks. A doorbell object has two masks
|
||||
* which are configured by the receiver to control which flags it is
|
||||
@ -177,37 +177,37 @@ EXPORT_SYMBOL(hh_dbl_read_and_clean);
|
||||
* Returns:
|
||||
* 0 on success
|
||||
*/
|
||||
int hh_dbl_set_mask(void *dbl_client_desc, hh_dbl_flags_t enable_mask,
|
||||
hh_dbl_flags_t ack_mask, const unsigned long flags)
|
||||
int gh_dbl_set_mask(void *dbl_client_desc, gh_dbl_flags_t enable_mask,
|
||||
gh_dbl_flags_t ack_mask, const unsigned long flags)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct hh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, hh_ret;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, gh_ret;
|
||||
|
||||
ret = hh_dbl_validate_params(client_desc, HH_DBL_DIRECTION_RX, flags);
|
||||
ret = gh_dbl_validate_params(client_desc, GH_DBL_DIRECTION_RX, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
hh_ret = hh_hcall_dbl_mask(cap_table_entry->rx_cap_id,
|
||||
gh_ret = gh_hcall_dbl_mask(cap_table_entry->rx_cap_id,
|
||||
enable_mask, ack_mask);
|
||||
|
||||
ret = hh_remap_error(hh_ret);
|
||||
ret = gh_remap_error(gh_ret);
|
||||
if (ret != 0)
|
||||
pr_err("%s: Hypercall failed ret = %d\n", __func__, hh_ret);
|
||||
pr_err("%s: Hypercall failed ret = %d\n", __func__, gh_ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_set_mask);
|
||||
EXPORT_SYMBOL(gh_dbl_set_mask);
|
||||
|
||||
/**
|
||||
* hh_dbl_send - Set flags in the doorbell
|
||||
* gh_dbl_send - Set flags in the doorbell
|
||||
* @client_desc: client handle to indetify the doorbell object
|
||||
* @newflags: flags to set in the doorbell. This flag along with enable_mask
|
||||
* in the doorbell decide whehter to raise vIRQ are not.
|
||||
* @flags: Optional flags to pass to send the data. For the list of flags,
|
||||
* see linux/haven/hh_dbl.h
|
||||
* see linux/gunyah/gh_dbl.h
|
||||
*
|
||||
* Set flags in the doorbell. If following the send, the set of enabled flags
|
||||
* as defined by the bitwise-AND of the doorbell flags with the EnableMask,
|
||||
@ -217,42 +217,42 @@ EXPORT_SYMBOL(hh_dbl_set_mask);
|
||||
* 0 on success, @newflags contains the doorbell’s previous unmasked flags
|
||||
* before the @newflags were added.
|
||||
*/
|
||||
int hh_dbl_send(void *dbl_client_desc, hh_dbl_flags_t *newflags,
|
||||
int gh_dbl_send(void *dbl_client_desc, gh_dbl_flags_t *newflags,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct hh_hcall_dbl_send_resp send_resp;
|
||||
struct hh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, hh_ret;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_hcall_dbl_send_resp send_resp;
|
||||
struct gh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, gh_ret;
|
||||
|
||||
if (!newflags)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hh_dbl_validate_params(client_desc, HH_DBL_DIRECTION_TX, flags);
|
||||
ret = gh_dbl_validate_params(client_desc, GH_DBL_DIRECTION_TX, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
hh_ret = hh_hcall_dbl_send(cap_table_entry->tx_cap_id, *newflags,
|
||||
gh_ret = gh_hcall_dbl_send(cap_table_entry->tx_cap_id, *newflags,
|
||||
&send_resp);
|
||||
|
||||
ret = hh_remap_error(hh_ret);
|
||||
ret = gh_remap_error(gh_ret);
|
||||
if (ret != 0)
|
||||
pr_err("%s: Hypercall failed ret = %d\n", __func__, hh_ret);
|
||||
pr_err("%s: Hypercall failed ret = %d\n", __func__, gh_ret);
|
||||
else
|
||||
*newflags = send_resp.old_flags;
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_send);
|
||||
EXPORT_SYMBOL(gh_dbl_send);
|
||||
|
||||
/**
|
||||
* hh_dbl_reset - clear all the flags of the doorbell and sets all bits in
|
||||
* gh_dbl_reset - clear all the flags of the doorbell and sets all bits in
|
||||
* the Doorbell’s mask.
|
||||
* @client_desc: client handle to indetify the doorbell object
|
||||
* @flags: Optional flags to pass to send the data. For the list of flags,
|
||||
* see linux/haven/hh_dbl.h
|
||||
* see linux/gunyah/gh_dbl.h
|
||||
*
|
||||
* Clears all the flags of the doorbell and sets all bits in the doorbell’s
|
||||
* mask. If there is a pending bound virtual interrupt, it will be de-asserted.
|
||||
@ -260,31 +260,31 @@ EXPORT_SYMBOL(hh_dbl_send);
|
||||
* Returns:
|
||||
* 0 on success
|
||||
*/
|
||||
int hh_dbl_reset(void *dbl_client_desc, const unsigned long flags)
|
||||
int gh_dbl_reset(void *dbl_client_desc, const unsigned long flags)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct hh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, hh_ret;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_desc *client_desc = dbl_client_desc;
|
||||
int ret, gh_ret;
|
||||
|
||||
ret = hh_dbl_validate_params(client_desc, HH_DBL_DIRECTION_RX, flags);
|
||||
ret = gh_dbl_validate_params(client_desc, GH_DBL_DIRECTION_RX, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
hh_ret = hh_hcall_dbl_reset(cap_table_entry->rx_cap_id);
|
||||
gh_ret = gh_hcall_dbl_reset(cap_table_entry->rx_cap_id);
|
||||
|
||||
ret = hh_remap_error(hh_ret);
|
||||
ret = gh_remap_error(gh_ret);
|
||||
if (ret != 0)
|
||||
pr_err("%s: Hypercall failed ret = %d\n", __func__, hh_ret);
|
||||
pr_err("%s: Hypercall failed ret = %d\n", __func__, gh_ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_reset);
|
||||
EXPORT_SYMBOL(gh_dbl_reset);
|
||||
|
||||
static irqreturn_t hh_dbl_rx_callback_thread(int irq, void *data)
|
||||
static irqreturn_t gh_dbl_rx_callback_thread(int irq, void *data)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry = data;
|
||||
struct gh_dbl_cap_table *cap_table_entry = data;
|
||||
|
||||
if (!cap_table_entry->rx_callback)
|
||||
return IRQ_HANDLED;
|
||||
@ -294,7 +294,7 @@ static irqreturn_t hh_dbl_rx_callback_thread(int irq, void *data)
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_dbl_tx_register: Register as a Tx client to use the doorbell
|
||||
* gh_dbl_tx_register: Register as a Tx client to use the doorbell
|
||||
* @label: The label associated to the doorbell that the client wants
|
||||
* to send a message to other VM.
|
||||
*
|
||||
@ -304,19 +304,19 @@ static irqreturn_t hh_dbl_rx_callback_thread(int irq, void *data)
|
||||
* the return value using IS_ERR_OR_NULL() and PTR_ERR() to extract the error
|
||||
* code.
|
||||
*/
|
||||
void *hh_dbl_tx_register(enum hh_dbl_label label)
|
||||
void *gh_dbl_tx_register(enum gh_dbl_label label)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct hh_dbl_desc *client_desc;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_desc *client_desc;
|
||||
int ret;
|
||||
|
||||
if (label < 0 || label >= HH_DBL_LABEL_MAX)
|
||||
if (label < 0 || label >= GH_DBL_LABEL_MAX)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!hh_dbl_initialized)
|
||||
if (!gh_dbl_initialized)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[label];
|
||||
cap_table_entry = &gh_dbl_cap_table[label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -350,10 +350,10 @@ void *hh_dbl_tx_register(enum hh_dbl_label label)
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_tx_register);
|
||||
EXPORT_SYMBOL(gh_dbl_tx_register);
|
||||
|
||||
/**
|
||||
* hh_dbl_rx_register: Register as a Rx client to use the doorbell
|
||||
* gh_dbl_rx_register: Register as a Rx client to use the doorbell
|
||||
* @label: The label associated to the doorbell that the client wants
|
||||
* to read a message.
|
||||
* @rx_cb: Callback of the client when there is a vIRQ on doorbell
|
||||
@ -366,19 +366,19 @@ EXPORT_SYMBOL(hh_dbl_tx_register);
|
||||
* code.
|
||||
*/
|
||||
|
||||
void *hh_dbl_rx_register(enum hh_dbl_label label, dbl_rx_cb_t rx_cb, void *priv)
|
||||
void *gh_dbl_rx_register(enum gh_dbl_label label, dbl_rx_cb_t rx_cb, void *priv)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct hh_dbl_desc *client_desc;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_desc *client_desc;
|
||||
int ret;
|
||||
|
||||
if (label < 0 || label >= HH_DBL_LABEL_MAX)
|
||||
if (label < 0 || label >= GH_DBL_LABEL_MAX)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!hh_dbl_initialized)
|
||||
if (!gh_dbl_initialized)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[label];
|
||||
cap_table_entry = &gh_dbl_cap_table[label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -417,29 +417,29 @@ void *hh_dbl_rx_register(enum hh_dbl_label label, dbl_rx_cb_t rx_cb, void *priv)
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_rx_register);
|
||||
EXPORT_SYMBOL(gh_dbl_rx_register);
|
||||
|
||||
/**
|
||||
* hh_dbl_tx_unregister: Unregister Tx client to use the doorbell
|
||||
* @client_desc: The descriptor that was passed via hh_dbl_tx_register() or
|
||||
* hh_dbl_rx_register()
|
||||
* gh_dbl_tx_unregister: Unregister Tx client to use the doorbell
|
||||
* @client_desc: The descriptor that was passed via gh_dbl_tx_register() or
|
||||
* gh_dbl_rx_register()
|
||||
*
|
||||
* The function returns 0 is the client was unregistered successfully. Else,
|
||||
* -EINVAL for invalid arguments.
|
||||
*/
|
||||
int hh_dbl_tx_unregister(void *dbl_client_desc)
|
||||
int gh_dbl_tx_unregister(void *dbl_client_desc)
|
||||
{
|
||||
struct hh_dbl_desc *client_desc = dbl_client_desc;
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_desc *client_desc = dbl_client_desc;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
|
||||
if (IS_ERR_OR_NULL(client_desc))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check if the client has manipulated the label */
|
||||
if (client_desc->label < 0 || client_desc->label >= HH_DBL_LABEL_MAX)
|
||||
if (client_desc->label < 0 || client_desc->label >= GH_DBL_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -467,29 +467,29 @@ int hh_dbl_tx_unregister(void *dbl_client_desc)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_tx_unregister);
|
||||
EXPORT_SYMBOL(gh_dbl_tx_unregister);
|
||||
|
||||
/**
|
||||
* hh_dbl_rx_unregister: Unregister Rx client to use the doorbell
|
||||
* @client_desc: The descriptor that was passed via hh_dbl_tx_register() or
|
||||
* hh_dbl_rx_register()
|
||||
* gh_dbl_rx_unregister: Unregister Rx client to use the doorbell
|
||||
* @client_desc: The descriptor that was passed via gh_dbl_tx_register() or
|
||||
* gh_dbl_rx_register()
|
||||
*
|
||||
* The function returns 0 is the client was unregistered successfully. Else,
|
||||
* -EINVAL for invalid arguments.
|
||||
*/
|
||||
int hh_dbl_rx_unregister(void *dbl_client_desc)
|
||||
int gh_dbl_rx_unregister(void *dbl_client_desc)
|
||||
{
|
||||
struct hh_dbl_desc *client_desc = dbl_client_desc;
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_desc *client_desc = dbl_client_desc;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
|
||||
if (IS_ERR_OR_NULL(client_desc))
|
||||
return -EINVAL;
|
||||
|
||||
/* Check if the client has manipulated the label */
|
||||
if (client_desc->label < 0 || client_desc->label >= HH_DBL_LABEL_MAX)
|
||||
if (client_desc->label < 0 || client_desc->label >= GH_DBL_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_dbl_cap_table[client_desc->label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -520,29 +520,29 @@ int hh_dbl_rx_unregister(void *dbl_client_desc)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_rx_unregister);
|
||||
EXPORT_SYMBOL(gh_dbl_rx_unregister);
|
||||
|
||||
/**
|
||||
* This API is called by RM driver to populate doorbell objects
|
||||
*/
|
||||
int hh_dbl_populate_cap_info(enum hh_dbl_label label, u64 cap_id,
|
||||
int gh_dbl_populate_cap_info(enum gh_dbl_label label, u64 cap_id,
|
||||
int direction, int rx_irq)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
int ret = 0;
|
||||
|
||||
if (!hh_dbl_initialized)
|
||||
if (!gh_dbl_initialized)
|
||||
return -EAGAIN;
|
||||
|
||||
if (label < 0 || label >= HH_DBL_LABEL_MAX) {
|
||||
if (label < 0 || label >= GH_DBL_LABEL_MAX) {
|
||||
pr_err("%s: Invalid label passed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cap_table_entry = &hh_dbl_cap_table[label];
|
||||
cap_table_entry = &gh_dbl_cap_table[label];
|
||||
|
||||
switch (direction) {
|
||||
case HH_DBL_DIRECTION_TX:
|
||||
case GH_DBL_DIRECTION_TX:
|
||||
/* No interrupt should associated with Tx doorbell*/
|
||||
if (rx_irq > 0) {
|
||||
pr_err("%s: No IRQ associated for Tx doorbell!\n",
|
||||
@ -560,7 +560,7 @@ int hh_dbl_populate_cap_info(enum hh_dbl_label label, u64 cap_id,
|
||||
pr_debug("%s: label: %d; tx_cap_id: %llu; dir: %d; rx_irq: %d\n",
|
||||
__func__, label, cap_id, direction, rx_irq);
|
||||
break;
|
||||
case HH_DBL_DIRECTION_RX:
|
||||
case GH_DBL_DIRECTION_RX:
|
||||
if (rx_irq <= 0) {
|
||||
pr_err("%s: Invalid IRQ number for Rx doorbell\n",
|
||||
__func__);
|
||||
@ -571,7 +571,7 @@ int hh_dbl_populate_cap_info(enum hh_dbl_label label, u64 cap_id,
|
||||
cap_table_entry->rx_irq = rx_irq;
|
||||
ret = request_threaded_irq(cap_table_entry->rx_irq,
|
||||
NULL,
|
||||
hh_dbl_rx_callback_thread,
|
||||
gh_dbl_rx_callback_thread,
|
||||
IRQF_ONESHOT | IRQF_TRIGGER_RISING,
|
||||
cap_table_entry->rx_irq_name,
|
||||
cap_table_entry);
|
||||
@ -601,56 +601,56 @@ int hh_dbl_populate_cap_info(enum hh_dbl_label label, u64 cap_id,
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_dbl_populate_cap_info);
|
||||
EXPORT_SYMBOL(gh_dbl_populate_cap_info);
|
||||
|
||||
static void hh_dbl_cleanup(int begin_idx)
|
||||
static void gh_dbl_cleanup(int begin_idx)
|
||||
{
|
||||
struct hh_dbl_cap_table *cap_table_entry;
|
||||
struct gh_dbl_cap_table *cap_table_entry;
|
||||
int i;
|
||||
|
||||
if (begin_idx >= HH_DBL_LABEL_MAX)
|
||||
begin_idx = HH_DBL_LABEL_MAX - 1;
|
||||
if (begin_idx >= GH_DBL_LABEL_MAX)
|
||||
begin_idx = GH_DBL_LABEL_MAX - 1;
|
||||
|
||||
for (i = begin_idx; i >= 0; i--) {
|
||||
cap_table_entry = &hh_dbl_cap_table[i];
|
||||
cap_table_entry = &gh_dbl_cap_table[i];
|
||||
kfree(cap_table_entry->rx_irq_name);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init hh_dbl_init(void)
|
||||
static int __init gh_dbl_init(void)
|
||||
{
|
||||
struct hh_dbl_cap_table *entry;
|
||||
struct gh_dbl_cap_table *entry;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HH_DBL_LABEL_MAX; i++) {
|
||||
entry = &hh_dbl_cap_table[i];
|
||||
for (i = 0; i < GH_DBL_LABEL_MAX; i++) {
|
||||
entry = &gh_dbl_cap_table[i];
|
||||
spin_lock_init(&entry->cap_entry_lock);
|
||||
init_waitqueue_head(&entry->cap_wq);
|
||||
entry->tx_cap_id = HH_CAPID_INVAL;
|
||||
entry->rx_cap_id = HH_CAPID_INVAL;
|
||||
entry->rx_irq_name = kasprintf(GFP_KERNEL, "hh_dbl_rx_%d", i);
|
||||
entry->tx_cap_id = GH_CAPID_INVAL;
|
||||
entry->rx_cap_id = GH_CAPID_INVAL;
|
||||
entry->rx_irq_name = kasprintf(GFP_KERNEL, "gh_dbl_rx_%d", i);
|
||||
if (!entry->rx_irq_name) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
hh_dbl_initialized = true;
|
||||
gh_dbl_initialized = true;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
hh_dbl_cleanup(i);
|
||||
gh_dbl_cleanup(i);
|
||||
return ret;
|
||||
}
|
||||
module_init(hh_dbl_init);
|
||||
module_init(gh_dbl_init);
|
||||
|
||||
static void __exit hh_dbl_exit(void)
|
||||
static void __exit gh_dbl_exit(void)
|
||||
{
|
||||
hh_dbl_cleanup(HH_DBL_LABEL_MAX - 1);
|
||||
gh_dbl_cleanup(GH_DBL_LABEL_MAX - 1);
|
||||
}
|
||||
module_exit(hh_dbl_exit);
|
||||
module_exit(gh_dbl_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven Doorbell Driver");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Doorbell Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
@ -9,73 +9,73 @@
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <linux/haven/hh_irq_lend.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_irq_lend.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
|
||||
#include "hh_rm_drv_private.h"
|
||||
#include "gh_rm_drv_private.h"
|
||||
|
||||
struct hh_irq_entry {
|
||||
hh_vmid_t vmid;
|
||||
enum hh_vm_names vm_name;
|
||||
hh_irq_handle_fn_v2 v2_handle;
|
||||
hh_irq_handle_fn handle;
|
||||
struct gh_irq_entry {
|
||||
gh_vmid_t vmid;
|
||||
enum gh_vm_names vm_name;
|
||||
gh_irq_handle_fn_v2 v2_handle;
|
||||
gh_irq_handle_fn handle;
|
||||
void *data;
|
||||
|
||||
enum {
|
||||
HH_IRQ_STATE_NONE,
|
||||
GH_IRQ_STATE_NONE,
|
||||
|
||||
HH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT,
|
||||
HH_IRQ_STATE_WAIT_LEND,
|
||||
GH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT,
|
||||
GH_IRQ_STATE_WAIT_LEND,
|
||||
|
||||
/* notification states */
|
||||
HH_IRQ_STATE_RELEASED, /* svm -> pvm */
|
||||
HH_IRQ_STATE_ACCEPTED, /* svm -> pvm */
|
||||
HH_IRQ_STATE_LENT, /* pvm -> svm */
|
||||
GH_IRQ_STATE_RELEASED, /* svm -> pvm */
|
||||
GH_IRQ_STATE_ACCEPTED, /* svm -> pvm */
|
||||
GH_IRQ_STATE_LENT, /* pvm -> svm */
|
||||
} state;
|
||||
hh_virq_handle_t virq_handle;
|
||||
gh_virq_handle_t virq_handle;
|
||||
};
|
||||
|
||||
static struct hh_irq_entry hh_irq_entries[HH_IRQ_LABEL_MAX];
|
||||
static DEFINE_SPINLOCK(hh_irq_lend_lock);
|
||||
static struct gh_irq_entry gh_irq_entries[GH_IRQ_LABEL_MAX];
|
||||
static DEFINE_SPINLOCK(gh_irq_lend_lock);
|
||||
|
||||
static int hh_irq_released_accepted_nb_handler(struct notifier_block *this,
|
||||
static int gh_irq_released_accepted_nb_handler(struct notifier_block *this,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
enum hh_irq_label label;
|
||||
struct hh_irq_entry *entry;
|
||||
struct hh_rm_notif_vm_irq_released_payload *released;
|
||||
struct hh_rm_notif_vm_irq_accepted_payload *accepted;
|
||||
enum gh_irq_label label;
|
||||
struct gh_irq_entry *entry;
|
||||
struct gh_rm_notif_vm_irq_released_payload *released;
|
||||
struct gh_rm_notif_vm_irq_accepted_payload *accepted;
|
||||
|
||||
if (cmd != HH_RM_NOTIF_VM_IRQ_RELEASED &&
|
||||
cmd != HH_RM_NOTIF_VM_IRQ_ACCEPTED)
|
||||
if (cmd != GH_RM_NOTIF_VM_IRQ_RELEASED &&
|
||||
cmd != GH_RM_NOTIF_VM_IRQ_ACCEPTED)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
spin_lock_irqsave(&hh_irq_lend_lock, flags);
|
||||
for (label = 0; label < HH_IRQ_LABEL_MAX; label++) {
|
||||
entry = &hh_irq_entries[label];
|
||||
spin_lock_irqsave(&gh_irq_lend_lock, flags);
|
||||
for (label = 0; label < GH_IRQ_LABEL_MAX; label++) {
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (entry->state != HH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT &&
|
||||
entry->state != HH_IRQ_STATE_ACCEPTED)
|
||||
if (entry->state != GH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT &&
|
||||
entry->state != GH_IRQ_STATE_ACCEPTED)
|
||||
continue;
|
||||
|
||||
switch (cmd) {
|
||||
case HH_RM_NOTIF_VM_IRQ_RELEASED:
|
||||
case GH_RM_NOTIF_VM_IRQ_RELEASED:
|
||||
released = data;
|
||||
if (released->virq_handle == entry->virq_handle) {
|
||||
entry->state = HH_IRQ_STATE_RELEASED;
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock,
|
||||
entry->state = GH_IRQ_STATE_RELEASED;
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock,
|
||||
flags);
|
||||
entry->v2_handle(entry->data, cmd, label);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
break;
|
||||
case HH_RM_NOTIF_VM_IRQ_ACCEPTED:
|
||||
case GH_RM_NOTIF_VM_IRQ_ACCEPTED:
|
||||
accepted = data;
|
||||
if (accepted->virq_handle == entry->virq_handle) {
|
||||
entry->state = HH_IRQ_STATE_ACCEPTED;
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock,
|
||||
entry->state = GH_IRQ_STATE_ACCEPTED;
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock,
|
||||
flags);
|
||||
entry->v2_handle(entry->data, cmd, label);
|
||||
return NOTIFY_OK;
|
||||
@ -84,50 +84,50 @@ static int hh_irq_released_accepted_nb_handler(struct notifier_block *this,
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block hh_irq_released_accepted_nb = {
|
||||
.notifier_call = hh_irq_released_accepted_nb_handler,
|
||||
static struct notifier_block gh_irq_released_accepted_nb = {
|
||||
.notifier_call = gh_irq_released_accepted_nb_handler,
|
||||
};
|
||||
|
||||
static int hh_irq_lent_nb_handler(struct notifier_block *this,
|
||||
static int gh_irq_lent_nb_handler(struct notifier_block *this,
|
||||
unsigned long cmd, void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
enum hh_irq_label label;
|
||||
enum hh_vm_names owner_name;
|
||||
struct hh_irq_entry *entry;
|
||||
struct hh_rm_notif_vm_irq_lent_payload *lent = data;
|
||||
enum gh_irq_label label;
|
||||
enum gh_vm_names owner_name;
|
||||
struct gh_irq_entry *entry;
|
||||
struct gh_rm_notif_vm_irq_lent_payload *lent = data;
|
||||
int ret;
|
||||
|
||||
if (cmd != HH_RM_NOTIF_VM_IRQ_LENT)
|
||||
if (cmd != GH_RM_NOTIF_VM_IRQ_LENT)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
ret = hh_rm_get_vm_name(lent->owner_vmid, &owner_name);
|
||||
ret = gh_rm_get_vm_name(lent->owner_vmid, &owner_name);
|
||||
if (ret) {
|
||||
pr_warn_ratelimited("%s: unknown name for vmid: %d\n", __func__,
|
||||
lent->owner_vmid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&hh_irq_lend_lock, flags);
|
||||
for (label = 0; label < HH_IRQ_LABEL_MAX; label++) {
|
||||
entry = &hh_irq_entries[label];
|
||||
if (entry->state != HH_IRQ_STATE_WAIT_LEND &&
|
||||
entry->state != HH_IRQ_STATE_LENT)
|
||||
spin_lock_irqsave(&gh_irq_lend_lock, flags);
|
||||
for (label = 0; label < GH_IRQ_LABEL_MAX; label++) {
|
||||
entry = &gh_irq_entries[label];
|
||||
if (entry->state != GH_IRQ_STATE_WAIT_LEND &&
|
||||
entry->state != GH_IRQ_STATE_LENT)
|
||||
continue;
|
||||
|
||||
if (label == lent->virq_label &&
|
||||
(entry->vm_name == HH_VM_MAX ||
|
||||
(entry->vm_name == GH_VM_MAX ||
|
||||
entry->vm_name == owner_name)) {
|
||||
entry->vmid = lent->owner_vmid;
|
||||
entry->virq_handle = lent->virq_handle;
|
||||
|
||||
entry->state = HH_IRQ_STATE_LENT;
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock,
|
||||
entry->state = GH_IRQ_STATE_LENT;
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock,
|
||||
flags);
|
||||
|
||||
entry->v2_handle(entry->data, cmd, label);
|
||||
@ -135,17 +135,17 @@ static int hh_irq_lent_nb_handler(struct notifier_block *this,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block hh_irq_lent_nb = {
|
||||
.notifier_call = hh_irq_lent_nb_handler,
|
||||
static struct notifier_block gh_irq_lent_nb = {
|
||||
.notifier_call = gh_irq_lent_nb_handler,
|
||||
};
|
||||
|
||||
/**
|
||||
* hh_irq_lend_v2: Lend a hardware interrupt to another VM
|
||||
* gh_irq_lend_v2: Lend a hardware interrupt to another VM
|
||||
* @label: vIRQ high-level label
|
||||
* @name: VM name to send interrupt to
|
||||
* @irq: Linux IRQ number to lend
|
||||
@ -155,45 +155,45 @@ static struct notifier_block hh_irq_lent_nb = {
|
||||
* Returns 0 on success also the handle corresponding to Linux IRQ#.
|
||||
* Returns < 0 on error
|
||||
*/
|
||||
int hh_irq_lend_v2(enum hh_irq_label label, enum hh_vm_names name,
|
||||
int irq, hh_irq_handle_fn_v2 cb_handle, void *data)
|
||||
int gh_irq_lend_v2(enum gh_irq_label label, enum gh_vm_names name,
|
||||
int irq, gh_irq_handle_fn_v2 cb_handle, void *data)
|
||||
{
|
||||
int ret, virq;
|
||||
unsigned long flags;
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX || !cb_handle)
|
||||
if (label >= GH_IRQ_LABEL_MAX || !cb_handle)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (hh_rm_irq_to_virq(irq, &virq))
|
||||
if (gh_rm_irq_to_virq(irq, &virq))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&hh_irq_lend_lock, flags);
|
||||
if (entry->state != HH_IRQ_STATE_NONE) {
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
spin_lock_irqsave(&gh_irq_lend_lock, flags);
|
||||
if (entry->state != GH_IRQ_STATE_NONE) {
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = hh_rm_get_vmid(name, &entry->vmid);
|
||||
ret = gh_rm_get_vmid(name, &entry->vmid);
|
||||
if (ret) {
|
||||
entry->state = HH_IRQ_STATE_NONE;
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
entry->state = GH_IRQ_STATE_NONE;
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
entry->v2_handle = cb_handle;
|
||||
entry->data = data;
|
||||
entry->state = HH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT;
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
entry->state = GH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT;
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
|
||||
return hh_rm_vm_irq_lend(entry->vmid, virq, label, &entry->virq_handle);
|
||||
return gh_rm_vm_irq_lend(entry->vmid, virq, label, &entry->virq_handle);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_lend_v2);
|
||||
EXPORT_SYMBOL(gh_irq_lend_v2);
|
||||
|
||||
/**
|
||||
* hh_irq_lend: Lend a hardware interrupt to another VM
|
||||
* gh_irq_lend: Lend a hardware interrupt to another VM
|
||||
* @label: vIRQ high-level label
|
||||
* @name: VM name to send interrupt to
|
||||
* @irq: Linux IRQ number to lend
|
||||
@ -203,123 +203,123 @@ EXPORT_SYMBOL(hh_irq_lend_v2);
|
||||
* Returns 0 on success also the handle corresponding to Linux IRQ#.
|
||||
* Returns < 0 on error
|
||||
*/
|
||||
int hh_irq_lend(enum hh_irq_label label, enum hh_vm_names name,
|
||||
int irq, hh_irq_handle_fn cb_handle, void *data)
|
||||
int gh_irq_lend(enum gh_irq_label label, enum gh_vm_names name,
|
||||
int irq, gh_irq_handle_fn cb_handle, void *data)
|
||||
{
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX || !cb_handle)
|
||||
if (label >= GH_IRQ_LABEL_MAX || !cb_handle)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
entry->handle = cb_handle;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_lend);
|
||||
EXPORT_SYMBOL(gh_irq_lend);
|
||||
|
||||
/**
|
||||
* hh_irq_lend_notify: Pass the irq handle to other VM for accept
|
||||
* gh_irq_lend_notify: Pass the irq handle to other VM for accept
|
||||
* @label: vIRQ high-level label
|
||||
*
|
||||
* Returns 0 on success, < 0 on error
|
||||
*/
|
||||
int hh_irq_lend_notify(enum hh_irq_label label)
|
||||
int gh_irq_lend_notify(enum gh_irq_label label)
|
||||
{
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX)
|
||||
if (label >= GH_IRQ_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
if (entry->state == HH_IRQ_STATE_NONE)
|
||||
entry = &gh_irq_entries[label];
|
||||
if (entry->state == GH_IRQ_STATE_NONE)
|
||||
return -EINVAL;
|
||||
|
||||
return hh_rm_vm_irq_lend_notify(entry->vmid, entry->virq_handle);
|
||||
return gh_rm_vm_irq_lend_notify(entry->vmid, entry->virq_handle);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_lend_notify);
|
||||
EXPORT_SYMBOL(gh_irq_lend_notify);
|
||||
|
||||
/**
|
||||
* hh_irq_reclaim: Reclaim a hardware interrupt after other VM
|
||||
* gh_irq_reclaim: Reclaim a hardware interrupt after other VM
|
||||
* has released.
|
||||
* @label: vIRQ high-level label
|
||||
*
|
||||
* This function should be called inside or after on_release()
|
||||
* callback from hh_irq_lend.
|
||||
* This function is not thread-safe. Do not race with another hh_irq_reclaim
|
||||
* callback from gh_irq_lend.
|
||||
* This function is not thread-safe. Do not race with another gh_irq_reclaim
|
||||
* with same label
|
||||
*/
|
||||
int hh_irq_reclaim(enum hh_irq_label label)
|
||||
int gh_irq_reclaim(enum gh_irq_label label)
|
||||
{
|
||||
int ret;
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX)
|
||||
if (label >= GH_IRQ_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (entry->state != HH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT &&
|
||||
(entry->state != HH_IRQ_STATE_RELEASED))
|
||||
if (entry->state != GH_IRQ_STATE_WAIT_RELEASE_OR_ACCEPT &&
|
||||
(entry->state != GH_IRQ_STATE_RELEASED))
|
||||
return -EINVAL;
|
||||
|
||||
ret = hh_rm_vm_irq_reclaim(entry->virq_handle);
|
||||
ret = gh_rm_vm_irq_reclaim(entry->virq_handle);
|
||||
if (!ret)
|
||||
entry->state = HH_IRQ_STATE_NONE;
|
||||
entry->state = GH_IRQ_STATE_NONE;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_reclaim);
|
||||
EXPORT_SYMBOL(gh_irq_reclaim);
|
||||
|
||||
/**
|
||||
* hh_irq_wait_for_lend_v2: Register to claim a lent interrupt from another VM
|
||||
* gh_irq_wait_for_lend_v2: Register to claim a lent interrupt from another VM
|
||||
* @label: vIRQ high-level label
|
||||
* @name: Lender's VM name. If don't care, then use HH_VM_MAX
|
||||
* @name: Lender's VM name. If don't care, then use GH_VM_MAX
|
||||
* @on_lend: callback to invoke when other VM lends the interrupt
|
||||
* @data: Argument to pass to on_lend
|
||||
*/
|
||||
int hh_irq_wait_for_lend_v2(enum hh_irq_label label, enum hh_vm_names name,
|
||||
hh_irq_handle_fn_v2 on_lend, void *data)
|
||||
int gh_irq_wait_for_lend_v2(enum gh_irq_label label, enum gh_vm_names name,
|
||||
gh_irq_handle_fn_v2 on_lend, void *data)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX || !on_lend)
|
||||
if (label >= GH_IRQ_LABEL_MAX || !on_lend)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
spin_lock_irqsave(&hh_irq_lend_lock, flags);
|
||||
if (entry->state != HH_IRQ_STATE_NONE) {
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
spin_lock_irqsave(&gh_irq_lend_lock, flags);
|
||||
if (entry->state != GH_IRQ_STATE_NONE) {
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
entry->vm_name = name;
|
||||
entry->v2_handle = on_lend;
|
||||
entry->data = data;
|
||||
entry->state = HH_IRQ_STATE_WAIT_LEND;
|
||||
spin_unlock_irqrestore(&hh_irq_lend_lock, flags);
|
||||
entry->state = GH_IRQ_STATE_WAIT_LEND;
|
||||
spin_unlock_irqrestore(&gh_irq_lend_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_wait_for_lend_v2);
|
||||
EXPORT_SYMBOL(gh_irq_wait_for_lend_v2);
|
||||
|
||||
/**
|
||||
* hh_irq_wait_lend: Register to claim a lent interrupt from another VM
|
||||
* gh_irq_wait_lend: Register to claim a lent interrupt from another VM
|
||||
* @label: vIRQ high-level label
|
||||
* @name: Lender's VM name. If don't care, then use HH_VM_MAX
|
||||
* @name: Lender's VM name. If don't care, then use GH_VM_MAX
|
||||
* @on_lend: callback to invoke when other VM lends the interrupt
|
||||
* @data: Argument to pass to on_lend
|
||||
*/
|
||||
int hh_irq_wait_for_lend(enum hh_irq_label label, enum hh_vm_names name,
|
||||
hh_irq_handle_fn on_lend, void *data)
|
||||
int gh_irq_wait_for_lend(enum gh_irq_label label, enum gh_vm_names name,
|
||||
gh_irq_handle_fn on_lend, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_wait_for_lend);
|
||||
EXPORT_SYMBOL(gh_irq_wait_for_lend);
|
||||
|
||||
/**
|
||||
* hh_irq_accept: Register to receive interrupts with a lent vIRQ
|
||||
* gh_irq_accept: Register to receive interrupts with a lent vIRQ
|
||||
* @label: vIRQ high-level label
|
||||
* @irq: Linux IRQ# to associate vIRQ with. If don't care, use -1
|
||||
* @type: IRQ flags to use when allowing RM to choose the IRQ. If irq parameter
|
||||
@ -328,128 +328,128 @@ EXPORT_SYMBOL(hh_irq_wait_for_lend);
|
||||
* Returns the Linux IRQ# that vIRQ was registered to on success.
|
||||
* Returns <0 on error
|
||||
* This function is not thread-safe w.r.t. IRQ lend state. Do not race with
|
||||
* with hh_irq_release or another hh_irq_accept with same label.
|
||||
* gh_irq_release or another gh_irq_accept with same label.
|
||||
*/
|
||||
int hh_irq_accept(enum hh_irq_label label, int irq, int type)
|
||||
int gh_irq_accept(enum gh_irq_label label, int irq, int type)
|
||||
{
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
u32 virq;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX)
|
||||
if (label >= GH_IRQ_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (entry->state != HH_IRQ_STATE_LENT)
|
||||
if (entry->state != GH_IRQ_STATE_LENT)
|
||||
return -EINVAL;
|
||||
|
||||
if (irq != -1) {
|
||||
if (hh_rm_irq_to_virq(irq, &virq))
|
||||
if (gh_rm_irq_to_virq(irq, &virq))
|
||||
return -EINVAL;
|
||||
} else
|
||||
virq = -1;
|
||||
|
||||
virq = hh_rm_vm_irq_accept(entry->virq_handle, virq);
|
||||
virq = gh_rm_vm_irq_accept(entry->virq_handle, virq);
|
||||
if (virq < 0)
|
||||
return virq;
|
||||
|
||||
if (irq == -1)
|
||||
irq = hh_rm_virq_to_irq(virq, type);
|
||||
irq = gh_rm_virq_to_irq(virq, type);
|
||||
|
||||
entry->state = HH_IRQ_STATE_ACCEPTED;
|
||||
entry->state = GH_IRQ_STATE_ACCEPTED;
|
||||
return irq;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_accept);
|
||||
EXPORT_SYMBOL(gh_irq_accept);
|
||||
|
||||
/**
|
||||
* hh_irq_accept_notify: Notify the lend vm (pvm) that IRQ is accepted
|
||||
* gh_irq_accept_notify: Notify the lend vm (pvm) that IRQ is accepted
|
||||
* @label: vIRQ high-level label
|
||||
* @irq: Linux IRQ# to associate vIRQ with. If don't care, use -1
|
||||
*
|
||||
* Returns the Linux IRQ# that vIRQ was registered to on success.
|
||||
* Returns <0 on error
|
||||
* This function is not thread-safe w.r.t. IRQ lend state. Do not race with
|
||||
* hh_irq_release or another hh_irq_accept with same label.
|
||||
* gh_irq_release or another gh_irq_accept with same label.
|
||||
*/
|
||||
int hh_irq_accept_notify(enum hh_irq_label label)
|
||||
int gh_irq_accept_notify(enum gh_irq_label label)
|
||||
{
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX)
|
||||
if (label >= GH_IRQ_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (entry->state != HH_IRQ_STATE_ACCEPTED)
|
||||
if (entry->state != GH_IRQ_STATE_ACCEPTED)
|
||||
return -EINVAL;
|
||||
|
||||
return hh_rm_vm_irq_accept_notify(entry->vmid,
|
||||
return gh_rm_vm_irq_accept_notify(entry->vmid,
|
||||
entry->virq_handle);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_accept_notify);
|
||||
EXPORT_SYMBOL(gh_irq_accept_notify);
|
||||
|
||||
/**
|
||||
* hh_irq_release: Release a lent interrupt
|
||||
* gh_irq_release: Release a lent interrupt
|
||||
* @label: vIRQ high-level label
|
||||
* This function is not thread-safe w.r.t. IRQ lend state. Do not race with
|
||||
* with hh_irq_accept or another hh_irq_release with same label.
|
||||
* gh_irq_accept or another gh_irq_release with same label.
|
||||
*/
|
||||
int hh_irq_release(enum hh_irq_label label)
|
||||
int gh_irq_release(enum gh_irq_label label)
|
||||
{
|
||||
int ret;
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX)
|
||||
if (label >= GH_IRQ_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (entry->state != HH_IRQ_STATE_ACCEPTED)
|
||||
if (entry->state != GH_IRQ_STATE_ACCEPTED)
|
||||
return -EINVAL;
|
||||
|
||||
ret = hh_rm_vm_irq_release(entry->virq_handle);
|
||||
ret = gh_rm_vm_irq_release(entry->virq_handle);
|
||||
if (!ret)
|
||||
entry->state = HH_IRQ_STATE_WAIT_LEND;
|
||||
entry->state = GH_IRQ_STATE_WAIT_LEND;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_release);
|
||||
EXPORT_SYMBOL(gh_irq_release);
|
||||
|
||||
int hh_irq_release_notify(enum hh_irq_label label)
|
||||
int gh_irq_release_notify(enum gh_irq_label label)
|
||||
{
|
||||
struct hh_irq_entry *entry;
|
||||
struct gh_irq_entry *entry;
|
||||
|
||||
if (label >= HH_IRQ_LABEL_MAX)
|
||||
if (label >= GH_IRQ_LABEL_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
entry = &hh_irq_entries[label];
|
||||
entry = &gh_irq_entries[label];
|
||||
|
||||
if (entry->state != HH_IRQ_STATE_ACCEPTED &&
|
||||
entry->state != HH_IRQ_STATE_WAIT_LEND)
|
||||
if (entry->state != GH_IRQ_STATE_ACCEPTED &&
|
||||
entry->state != GH_IRQ_STATE_WAIT_LEND)
|
||||
return -EINVAL;
|
||||
|
||||
return hh_rm_vm_irq_release_notify(entry->vmid,
|
||||
return gh_rm_vm_irq_release_notify(entry->vmid,
|
||||
entry->virq_handle);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_irq_release_notify);
|
||||
EXPORT_SYMBOL(gh_irq_release_notify);
|
||||
|
||||
static int __init hh_irq_lend_init(void)
|
||||
static int __init gh_irq_lend_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = hh_rm_register_notifier(&hh_irq_lent_nb);
|
||||
ret = gh_rm_register_notifier(&gh_irq_lent_nb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return hh_rm_register_notifier(&hh_irq_released_accepted_nb);
|
||||
return gh_rm_register_notifier(&gh_irq_released_accepted_nb);
|
||||
}
|
||||
module_init(hh_irq_lend_init);
|
||||
module_init(gh_irq_lend_init);
|
||||
|
||||
static void hh_irq_lend_exit(void)
|
||||
static void gh_irq_lend_exit(void)
|
||||
{
|
||||
hh_rm_unregister_notifier(&hh_irq_lent_nb);
|
||||
hh_rm_unregister_notifier(&hh_irq_released_accepted_nb);
|
||||
gh_rm_unregister_notifier(&gh_irq_lent_nb);
|
||||
gh_rm_unregister_notifier(&gh_irq_released_accepted_nb);
|
||||
}
|
||||
module_exit(hh_irq_lend_exit);
|
||||
module_exit(gh_irq_lend_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven IRQ Lending Library");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah IRQ Lending Library");
|
||||
|
@ -1,10 +1,10 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/haven/hh_mem_notifier.h>
|
||||
#include <linux/gunyah/gh_mem_notifier.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mutex.h>
|
||||
@ -12,22 +12,22 @@
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct mem_notifier_entry {
|
||||
hh_mem_notifier_handler handler;
|
||||
gh_mem_notifier_handler handler;
|
||||
void *data;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(mem_notifier_entries_lock);
|
||||
static struct mem_notifier_entry mem_notifier_entries[HH_MEM_NOTIFIER_TAG_MAX];
|
||||
static struct mem_notifier_entry mem_notifier_entries[GH_MEM_NOTIFIER_TAG_MAX];
|
||||
|
||||
static bool hh_mem_notifier_tag_valid(enum hh_mem_notifier_tag tag)
|
||||
static bool gh_mem_notifier_tag_valid(enum gh_mem_notifier_tag tag)
|
||||
{
|
||||
return tag >= 0 && tag < HH_MEM_NOTIFIER_TAG_MAX;
|
||||
return tag >= 0 && tag < GH_MEM_NOTIFIER_TAG_MAX;
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_mem_notifier_register: Bind a callback and arbitrary data to a particular
|
||||
* gh_mem_notifier_register: Bind a callback and arbitrary data to a particular
|
||||
* notification tag. The callback will be invoked when
|
||||
* the Haven MEM_SHARED and MEM_RELEASED notifications
|
||||
* the Gunyah MEM_SHARED and MEM_RELEASED notifications
|
||||
* involving the tag that was registered with arrive
|
||||
* at the VM.
|
||||
* @tag: The tag for which the caller would like to receive MEM_SHARED and
|
||||
@ -44,12 +44,12 @@ static bool hh_mem_notifier_tag_valid(enum hh_mem_notifier_tag tag)
|
||||
* errors. The cookie must be used when unregistering the handler from the
|
||||
* tag.
|
||||
*/
|
||||
void *hh_mem_notifier_register(enum hh_mem_notifier_tag tag,
|
||||
hh_mem_notifier_handler handler, void *data)
|
||||
void *gh_mem_notifier_register(enum gh_mem_notifier_tag tag,
|
||||
gh_mem_notifier_handler handler, void *data)
|
||||
{
|
||||
struct mem_notifier_entry *entry;
|
||||
|
||||
if (!hh_mem_notifier_tag_valid(tag) || !handler)
|
||||
if (!gh_mem_notifier_tag_valid(tag) || !handler)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mutex_lock(&mem_notifier_entries_lock);
|
||||
@ -64,19 +64,19 @@ void *hh_mem_notifier_register(enum hh_mem_notifier_tag tag,
|
||||
|
||||
return entry;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_mem_notifier_register);
|
||||
EXPORT_SYMBOL(gh_mem_notifier_register);
|
||||
|
||||
/**
|
||||
* hh_mem_notifier_unregister: Unregister for memory notifier notifications
|
||||
* gh_mem_notifier_unregister: Unregister for memory notifier notifications
|
||||
* with respect to a particular tag.
|
||||
* @cookie: The cookie returned by hh_mem_notifier_register
|
||||
* @cookie: The cookie returned by gh_mem_notifier_register
|
||||
*
|
||||
* On success, the function will unbind the handler specified in
|
||||
* hh_mem_notifier_register from the tag, preventing the handler from being
|
||||
* gh_mem_notifier_register from the tag, preventing the handler from being
|
||||
* invoked when subsequent MEM_SHARED/MEM_RELEASED notifications pertaining
|
||||
* to the tag arrive.
|
||||
*/
|
||||
void hh_mem_notifier_unregister(void *cookie)
|
||||
void gh_mem_notifier_unregister(void *cookie)
|
||||
{
|
||||
struct mem_notifier_entry *entry = cookie;
|
||||
|
||||
@ -88,36 +88,36 @@ void hh_mem_notifier_unregister(void *cookie)
|
||||
entry->data = NULL;
|
||||
mutex_unlock(&mem_notifier_entries_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(hh_mem_notifier_unregister);
|
||||
EXPORT_SYMBOL(gh_mem_notifier_unregister);
|
||||
|
||||
static enum hh_mem_notifier_tag hh_mem_notifier_get_tag(unsigned long action,
|
||||
static enum gh_mem_notifier_tag gh_mem_notifier_get_tag(unsigned long action,
|
||||
void *msg)
|
||||
{
|
||||
if (action == HH_RM_NOTIF_MEM_SHARED)
|
||||
if (action == GH_RM_NOTIF_MEM_SHARED)
|
||||
return
|
||||
((struct hh_rm_notif_mem_shared_payload *)msg)->mem_info_tag;
|
||||
else if (action == HH_RM_NOTIF_MEM_RELEASED)
|
||||
((struct gh_rm_notif_mem_shared_payload *)msg)->mem_info_tag;
|
||||
else if (action == GH_RM_NOTIF_MEM_RELEASED)
|
||||
return
|
||||
((struct hh_rm_notif_mem_released_payload *)msg)->mem_info_tag;
|
||||
((struct gh_rm_notif_mem_released_payload *)msg)->mem_info_tag;
|
||||
|
||||
return ((struct hh_rm_notif_mem_accepted_payload *)msg)->mem_info_tag;
|
||||
return ((struct gh_rm_notif_mem_accepted_payload *)msg)->mem_info_tag;
|
||||
}
|
||||
|
||||
static int hh_mem_notifier_call(struct notifier_block *nb, unsigned long action,
|
||||
static int gh_mem_notifier_call(struct notifier_block *nb, unsigned long action,
|
||||
void *msg)
|
||||
{
|
||||
struct mem_notifier_entry *entry;
|
||||
enum hh_mem_notifier_tag tag;
|
||||
hh_mem_notifier_handler handler = NULL;
|
||||
enum gh_mem_notifier_tag tag;
|
||||
gh_mem_notifier_handler handler = NULL;
|
||||
void *data;
|
||||
|
||||
if ((action != HH_RM_NOTIF_MEM_SHARED) &&
|
||||
(action != HH_RM_NOTIF_MEM_RELEASED) &&
|
||||
(action != HH_RM_NOTIF_MEM_ACCEPTED))
|
||||
if ((action != GH_RM_NOTIF_MEM_SHARED) &&
|
||||
(action != GH_RM_NOTIF_MEM_RELEASED) &&
|
||||
(action != GH_RM_NOTIF_MEM_ACCEPTED))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
tag = hh_mem_notifier_get_tag(action, msg);
|
||||
if (!hh_mem_notifier_tag_valid(tag))
|
||||
tag = gh_mem_notifier_get_tag(action, msg);
|
||||
if (!gh_mem_notifier_tag_valid(tag))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mutex_lock(&mem_notifier_entries_lock);
|
||||
@ -132,26 +132,26 @@ static int hh_mem_notifier_call(struct notifier_block *nb, unsigned long action,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block hh_mem_notifier_blk = {
|
||||
.notifier_call = hh_mem_notifier_call,
|
||||
static struct notifier_block gh_mem_notifier_blk = {
|
||||
.notifier_call = gh_mem_notifier_call,
|
||||
};
|
||||
|
||||
static int __init hh_mem_notifier_init(void)
|
||||
static int __init gh_mem_notifier_init(void)
|
||||
{
|
||||
int ret = hh_rm_register_notifier(&hh_mem_notifier_blk);
|
||||
int ret = gh_rm_register_notifier(&gh_mem_notifier_blk);
|
||||
|
||||
if (ret)
|
||||
pr_err("%s: registration with RM notifier failed rc: %d\n",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
module_init(hh_mem_notifier_init);
|
||||
module_init(gh_mem_notifier_init);
|
||||
|
||||
static void __exit hh_mem_notifier_exit(void)
|
||||
static void __exit gh_mem_notifier_exit(void)
|
||||
{
|
||||
hh_rm_unregister_notifier(&hh_mem_notifier_blk);
|
||||
gh_rm_unregister_notifier(&gh_mem_notifier_blk);
|
||||
}
|
||||
module_exit(hh_mem_notifier_exit);
|
||||
module_exit(gh_mem_notifier_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven Memory Notifier");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Memory Notifier");
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
@ -12,23 +12,23 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ratelimit.h>
|
||||
|
||||
#include <linux/haven/hcall.h>
|
||||
#include <linux/haven/hh_msgq.h>
|
||||
#include <linux/haven/hh_errno.h>
|
||||
#include <linux/gunyah/hcall.h>
|
||||
#include <linux/gunyah/gh_msgq.h>
|
||||
#include <linux/gunyah/gh_errno.h>
|
||||
|
||||
/* HVC call specific mask: 0 to 31 */
|
||||
#define HH_MSGQ_HVC_FLAGS_MASK GENMASK_ULL(31, 0)
|
||||
#define GH_MSGQ_HVC_FLAGS_MASK GENMASK_ULL(31, 0)
|
||||
|
||||
struct hh_msgq_desc {
|
||||
enum hh_msgq_label label;
|
||||
struct gh_msgq_desc {
|
||||
enum gh_msgq_label label;
|
||||
};
|
||||
|
||||
struct hh_msgq_cap_table {
|
||||
struct hh_msgq_desc *client_desc;
|
||||
struct gh_msgq_cap_table {
|
||||
struct gh_msgq_desc *client_desc;
|
||||
spinlock_t cap_entry_lock;
|
||||
|
||||
hh_capid_t tx_cap_id;
|
||||
hh_capid_t rx_cap_id;
|
||||
gh_capid_t tx_cap_id;
|
||||
gh_capid_t rx_cap_id;
|
||||
int tx_irq;
|
||||
int rx_irq;
|
||||
const char *tx_irq_name;
|
||||
@ -42,12 +42,12 @@ struct hh_msgq_cap_table {
|
||||
wait_queue_head_t rx_wq;
|
||||
};
|
||||
|
||||
static bool hh_msgq_initialized;
|
||||
static struct hh_msgq_cap_table hh_msgq_cap_table[HH_MSGQ_LABEL_MAX];
|
||||
static bool gh_msgq_initialized;
|
||||
static struct gh_msgq_cap_table gh_msgq_cap_table[GH_MSGQ_LABEL_MAX];
|
||||
|
||||
static irqreturn_t hh_msgq_rx_isr(int irq, void *dev)
|
||||
static irqreturn_t gh_msgq_rx_isr(int irq, void *dev)
|
||||
{
|
||||
struct hh_msgq_cap_table *cap_table_entry = dev;
|
||||
struct gh_msgq_cap_table *cap_table_entry = dev;
|
||||
|
||||
spin_lock(&cap_table_entry->rx_lock);
|
||||
cap_table_entry->rx_empty = false;
|
||||
@ -58,9 +58,9 @@ static irqreturn_t hh_msgq_rx_isr(int irq, void *dev)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t hh_msgq_tx_isr(int irq, void *dev)
|
||||
static irqreturn_t gh_msgq_tx_isr(int irq, void *dev)
|
||||
{
|
||||
struct hh_msgq_cap_table *cap_table_entry = dev;
|
||||
struct gh_msgq_cap_table *cap_table_entry = dev;
|
||||
|
||||
spin_lock(&cap_table_entry->tx_lock);
|
||||
cap_table_entry->tx_full = false;
|
||||
@ -71,53 +71,53 @@ static irqreturn_t hh_msgq_tx_isr(int irq, void *dev)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int __hh_msgq_recv(struct hh_msgq_cap_table *cap_table_entry,
|
||||
static int __gh_msgq_recv(struct gh_msgq_cap_table *cap_table_entry,
|
||||
void *buff, size_t buff_size,
|
||||
size_t *recv_size, u64 rx_flags)
|
||||
{
|
||||
struct hh_hcall_msgq_recv_resp resp = {};
|
||||
struct gh_hcall_msgq_recv_resp resp = {};
|
||||
unsigned long flags;
|
||||
int hh_ret;
|
||||
int gh_ret;
|
||||
int ret = 0;
|
||||
|
||||
/* Discard the driver specific flags, and keep only HVC specifics */
|
||||
rx_flags &= HH_MSGQ_HVC_FLAGS_MASK;
|
||||
rx_flags &= GH_MSGQ_HVC_FLAGS_MASK;
|
||||
|
||||
spin_lock_irqsave(&cap_table_entry->rx_lock, flags);
|
||||
hh_ret = hh_hcall_msgq_recv(cap_table_entry->rx_cap_id, buff,
|
||||
gh_ret = gh_hcall_msgq_recv(cap_table_entry->rx_cap_id, buff,
|
||||
buff_size, &resp);
|
||||
|
||||
switch (hh_ret) {
|
||||
case HH_ERROR_OK:
|
||||
switch (gh_ret) {
|
||||
case GH_ERROR_OK:
|
||||
*recv_size = resp.recv_size;
|
||||
cap_table_entry->rx_empty = !resp.not_empty;
|
||||
ret = 0;
|
||||
break;
|
||||
case HH_ERROR_MSGQUEUE_EMPTY:
|
||||
case GH_ERROR_MSGQUEUE_EMPTY:
|
||||
cap_table_entry->rx_empty = true;
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
default:
|
||||
ret = hh_remap_error(hh_ret);
|
||||
ret = gh_remap_error(gh_ret);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cap_table_entry->rx_lock, flags);
|
||||
|
||||
if (ret != 0 && ret != -EAGAIN)
|
||||
pr_err("%s: Failed to recv from msgq. Hypercall error: %d\n",
|
||||
__func__, hh_ret);
|
||||
__func__, gh_ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_msgq_recv: Receive a message from the client running on a different VM
|
||||
* @client_desc: The client descriptor that was obtained via hh_msgq_register()
|
||||
* gh_msgq_recv: Receive a message from the client running on a different VM
|
||||
* @client_desc: The client descriptor that was obtained via gh_msgq_register()
|
||||
* @buff: Pointer to the buffer where the received data must be placed
|
||||
* @buff_size: The size of the buffer space available
|
||||
* @recv_size: The actual amount of data that is copied into buff
|
||||
* @flags: Optional flags to pass to receive the data. For the list of flags,
|
||||
* see linux/haven/hh_msgq.h
|
||||
* see linux/gunyah/gh_msgq.h
|
||||
*
|
||||
* The function returns 0 if the data is successfully received and recv_size
|
||||
* would contain the actual amount of data copied into buff.
|
||||
@ -129,21 +129,21 @@ static int __hh_msgq_recv(struct hh_msgq_cap_table *cap_table_entry,
|
||||
* Note: this function may sleep and should not be called from interrupt
|
||||
* context
|
||||
*/
|
||||
int hh_msgq_recv(void *msgq_client_desc,
|
||||
int gh_msgq_recv(void *msgq_client_desc,
|
||||
void *buff, size_t buff_size,
|
||||
size_t *recv_size, unsigned long flags)
|
||||
{
|
||||
struct hh_msgq_desc *client_desc = msgq_client_desc;
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_desc *client_desc = msgq_client_desc;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
int ret;
|
||||
|
||||
if (!client_desc || !buff || !buff_size || !recv_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (buff_size > HH_MSGQ_MAX_MSG_SIZE_BYTES)
|
||||
if (buff_size > GH_MSGQ_MAX_MSG_SIZE_BYTES)
|
||||
return -E2BIG;
|
||||
|
||||
cap_table_entry = &hh_msgq_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_msgq_cap_table[client_desc->label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -153,8 +153,8 @@ int hh_msgq_recv(void *msgq_client_desc,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((cap_table_entry->rx_cap_id == HH_CAPID_INVAL) &&
|
||||
(flags & HH_MSGQ_NONBLOCK)) {
|
||||
if ((cap_table_entry->rx_cap_id == GH_CAPID_INVAL) &&
|
||||
(flags & GH_MSGQ_NONBLOCK)) {
|
||||
pr_err_ratelimited(
|
||||
"%s: Recv info for label %d not yet initialized\n",
|
||||
__func__, client_desc->label);
|
||||
@ -165,7 +165,7 @@ int hh_msgq_recv(void *msgq_client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
if (wait_event_interruptible(cap_table_entry->rx_wq,
|
||||
cap_table_entry->rx_cap_id != HH_CAPID_INVAL))
|
||||
cap_table_entry->rx_cap_id != GH_CAPID_INVAL))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
@ -180,19 +180,19 @@ int hh_msgq_recv(void *msgq_client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
do {
|
||||
if (cap_table_entry->rx_empty && (flags & HH_MSGQ_NONBLOCK))
|
||||
if (cap_table_entry->rx_empty && (flags & GH_MSGQ_NONBLOCK))
|
||||
return -EAGAIN;
|
||||
|
||||
if (wait_event_interruptible(cap_table_entry->rx_wq,
|
||||
!cap_table_entry->rx_empty))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
ret = __hh_msgq_recv(cap_table_entry, buff, buff_size,
|
||||
ret = __gh_msgq_recv(cap_table_entry, buff, buff_size,
|
||||
recv_size, flags);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (!ret)
|
||||
print_hex_dump_debug("hh_msgq_recv: ", DUMP_PREFIX_OFFSET,
|
||||
print_hex_dump_debug("gh_msgq_recv: ", DUMP_PREFIX_OFFSET,
|
||||
4, 1, buff, *recv_size, false);
|
||||
|
||||
return ret;
|
||||
@ -201,75 +201,75 @@ int hh_msgq_recv(void *msgq_client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_msgq_recv);
|
||||
EXPORT_SYMBOL(gh_msgq_recv);
|
||||
|
||||
static int __hh_msgq_send(struct hh_msgq_cap_table *cap_table_entry,
|
||||
static int __gh_msgq_send(struct gh_msgq_cap_table *cap_table_entry,
|
||||
void *buff, size_t size, u64 tx_flags)
|
||||
{
|
||||
struct hh_hcall_msgq_send_resp resp = {};
|
||||
struct gh_hcall_msgq_send_resp resp = {};
|
||||
unsigned long flags;
|
||||
int hh_ret;
|
||||
int gh_ret;
|
||||
int ret = 0;
|
||||
|
||||
/* Discard the driver specific flags, and keep only HVC specifics */
|
||||
tx_flags &= HH_MSGQ_HVC_FLAGS_MASK;
|
||||
tx_flags &= GH_MSGQ_HVC_FLAGS_MASK;
|
||||
|
||||
print_hex_dump_debug("hh_msgq_send: ", DUMP_PREFIX_OFFSET,
|
||||
print_hex_dump_debug("gh_msgq_send: ", DUMP_PREFIX_OFFSET,
|
||||
4, 1, buff, size, false);
|
||||
|
||||
spin_lock_irqsave(&cap_table_entry->tx_lock, flags);
|
||||
hh_ret = hh_hcall_msgq_send(cap_table_entry->tx_cap_id,
|
||||
gh_ret = gh_hcall_msgq_send(cap_table_entry->tx_cap_id,
|
||||
size, buff, tx_flags, &resp);
|
||||
|
||||
switch (hh_ret) {
|
||||
case HH_ERROR_OK:
|
||||
switch (gh_ret) {
|
||||
case GH_ERROR_OK:
|
||||
cap_table_entry->tx_full = !resp.not_full;
|
||||
ret = 0;
|
||||
break;
|
||||
case HH_ERROR_MSGQUEUE_FULL:
|
||||
case GH_ERROR_MSGQUEUE_FULL:
|
||||
cap_table_entry->tx_full = true;
|
||||
ret = -EAGAIN;
|
||||
break;
|
||||
default:
|
||||
ret = hh_remap_error(hh_ret);
|
||||
ret = gh_remap_error(gh_ret);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cap_table_entry->tx_lock, flags);
|
||||
|
||||
if (ret != 0 && ret != -EAGAIN)
|
||||
pr_err("%s: Failed to send on msgq. Hypercall error: %d\n",
|
||||
__func__, hh_ret);
|
||||
__func__, gh_ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_msgq_send: Send a message to the client on a different VM
|
||||
* @client_desc: The client descriptor that was obtained via hh_msgq_register()
|
||||
* gh_msgq_send: Send a message to the client on a different VM
|
||||
* @client_desc: The client descriptor that was obtained via gh_msgq_register()
|
||||
* @buff: Pointer to the buffer that needs to be sent
|
||||
* @size: The size of the buffer
|
||||
* @flags: Optional flags to pass to send the data. For the list of flags,
|
||||
* see linux/haven/hh_msgq.h
|
||||
* see linux/gunyah/gh_msgq.h
|
||||
*
|
||||
* The function returns -EINVAL if the caller passes invalid arguments,
|
||||
* -EAGAIN if the message queue is not yet ready to communicate, and -EPERM if
|
||||
* the caller doesn't have permissions to send the data.
|
||||
*
|
||||
*/
|
||||
int hh_msgq_send(void *msgq_client_desc,
|
||||
int gh_msgq_send(void *msgq_client_desc,
|
||||
void *buff, size_t size, unsigned long flags)
|
||||
{
|
||||
struct hh_msgq_desc *client_desc = msgq_client_desc;
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_desc *client_desc = msgq_client_desc;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
int ret;
|
||||
|
||||
if (!client_desc || !buff || !size)
|
||||
return -EINVAL;
|
||||
|
||||
if (size > HH_MSGQ_MAX_MSG_SIZE_BYTES)
|
||||
if (size > GH_MSGQ_MAX_MSG_SIZE_BYTES)
|
||||
return -E2BIG;
|
||||
|
||||
cap_table_entry = &hh_msgq_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_msgq_cap_table[client_desc->label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -279,8 +279,8 @@ int hh_msgq_send(void *msgq_client_desc,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if ((cap_table_entry->tx_cap_id == HH_CAPID_INVAL) &&
|
||||
(flags & HH_MSGQ_NONBLOCK)) {
|
||||
if ((cap_table_entry->tx_cap_id == GH_CAPID_INVAL) &&
|
||||
(flags & GH_MSGQ_NONBLOCK)) {
|
||||
pr_err_ratelimited(
|
||||
"%s: Send info for label %d not yet initialized\n",
|
||||
__func__, client_desc->label);
|
||||
@ -291,7 +291,7 @@ int hh_msgq_send(void *msgq_client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
if (wait_event_interruptible(cap_table_entry->tx_wq,
|
||||
cap_table_entry->tx_cap_id != HH_CAPID_INVAL))
|
||||
cap_table_entry->tx_cap_id != GH_CAPID_INVAL))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
@ -306,14 +306,14 @@ int hh_msgq_send(void *msgq_client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
do {
|
||||
if (cap_table_entry->tx_full && (flags & HH_MSGQ_NONBLOCK))
|
||||
if (cap_table_entry->tx_full && (flags & GH_MSGQ_NONBLOCK))
|
||||
return -EAGAIN;
|
||||
|
||||
if (wait_event_interruptible(cap_table_entry->tx_wq,
|
||||
!cap_table_entry->tx_full))
|
||||
return -ERESTARTSYS;
|
||||
|
||||
ret = __hh_msgq_send(cap_table_entry, buff, size, flags);
|
||||
ret = __gh_msgq_send(cap_table_entry, buff, size, flags);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
return ret;
|
||||
@ -321,10 +321,10 @@ int hh_msgq_send(void *msgq_client_desc,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_msgq_send);
|
||||
EXPORT_SYMBOL(gh_msgq_send);
|
||||
|
||||
/**
|
||||
* hh_msgq_register: Register as a client to the use the message queue
|
||||
* gh_msgq_register: Register as a client to the use the message queue
|
||||
* @label: The label associated to the message queue that the client wants
|
||||
* to communicate
|
||||
*
|
||||
@ -334,18 +334,18 @@ EXPORT_SYMBOL(hh_msgq_send);
|
||||
* the return value using IS_ERR_OR_NULL() and PTR_ERR() to extract the error
|
||||
* code.
|
||||
*/
|
||||
void *hh_msgq_register(enum hh_msgq_label label)
|
||||
void *gh_msgq_register(enum gh_msgq_label label)
|
||||
{
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct hh_msgq_desc *client_desc;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_desc *client_desc;
|
||||
|
||||
if (!hh_msgq_initialized)
|
||||
if (!gh_msgq_initialized)
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
|
||||
if (label < 0 || label >= HH_MSGQ_LABEL_MAX)
|
||||
if (label < 0 || label >= GH_MSGQ_LABEL_MAX)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
cap_table_entry = &hh_msgq_cap_table[label];
|
||||
cap_table_entry = &gh_msgq_cap_table[label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -367,28 +367,28 @@ void *hh_msgq_register(enum hh_msgq_label label)
|
||||
cap_table_entry->client_desc = client_desc;
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
pr_info("hh_msgq: Registered client for label: %d\n", label);
|
||||
pr_info("gh_msgq: Registered client for label: %d\n", label);
|
||||
|
||||
return client_desc;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_msgq_register);
|
||||
EXPORT_SYMBOL(gh_msgq_register);
|
||||
|
||||
/**
|
||||
* hh_msgq_unregister: Unregister as a client to the use the message queue
|
||||
* @client_desc: The descriptor that was passed via hh_msgq_register()
|
||||
* gh_msgq_unregister: Unregister as a client to the use the message queue
|
||||
* @client_desc: The descriptor that was passed via gh_msgq_register()
|
||||
*
|
||||
* The function returns 0 is the client was unregistered successfully. Else,
|
||||
* -EINVAL for invalid arguments.
|
||||
*/
|
||||
int hh_msgq_unregister(void *msgq_client_desc)
|
||||
int gh_msgq_unregister(void *msgq_client_desc)
|
||||
{
|
||||
struct hh_msgq_desc *client_desc = msgq_client_desc;
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_desc *client_desc = msgq_client_desc;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
|
||||
if (!client_desc)
|
||||
return -EINVAL;
|
||||
|
||||
cap_table_entry = &hh_msgq_cap_table[client_desc->label];
|
||||
cap_table_entry = &gh_msgq_cap_table[client_desc->label];
|
||||
|
||||
spin_lock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
@ -411,18 +411,18 @@ int hh_msgq_unregister(void *msgq_client_desc)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_msgq_unregister);
|
||||
EXPORT_SYMBOL(gh_msgq_unregister);
|
||||
|
||||
int hh_msgq_populate_cap_info(enum hh_msgq_label label, u64 cap_id,
|
||||
int gh_msgq_populate_cap_info(enum gh_msgq_label label, u64 cap_id,
|
||||
int direction, int irq)
|
||||
{
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
int ret;
|
||||
|
||||
if (!hh_msgq_initialized)
|
||||
if (!gh_msgq_initialized)
|
||||
return -EAGAIN;
|
||||
|
||||
if (label < 0 || label >= HH_MSGQ_LABEL_MAX) {
|
||||
if (label < 0 || label >= GH_MSGQ_LABEL_MAX) {
|
||||
pr_err("%s: Invalid label passed\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -432,10 +432,10 @@ int hh_msgq_populate_cap_info(enum hh_msgq_label label, u64 cap_id,
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
cap_table_entry = &hh_msgq_cap_table[label];
|
||||
cap_table_entry = &gh_msgq_cap_table[label];
|
||||
|
||||
if (direction == HH_MSGQ_DIRECTION_TX) {
|
||||
ret = request_irq(irq, hh_msgq_tx_isr, 0,
|
||||
if (direction == GH_MSGQ_DIRECTION_TX) {
|
||||
ret = request_irq(irq, gh_msgq_tx_isr, 0,
|
||||
cap_table_entry->tx_irq_name, cap_table_entry);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
@ -446,8 +446,8 @@ int hh_msgq_populate_cap_info(enum hh_msgq_label label, u64 cap_id,
|
||||
spin_unlock(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
wake_up_interruptible(&cap_table_entry->tx_wq);
|
||||
} else if (direction == HH_MSGQ_DIRECTION_RX) {
|
||||
ret = request_irq(irq, hh_msgq_rx_isr, 0,
|
||||
} else if (direction == GH_MSGQ_DIRECTION_RX) {
|
||||
ret = request_irq(irq, gh_msgq_rx_isr, 0,
|
||||
cap_table_entry->rx_irq_name, cap_table_entry);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
@ -475,10 +475,10 @@ int hh_msgq_populate_cap_info(enum hh_msgq_label label, u64 cap_id,
|
||||
err:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_msgq_populate_cap_info);
|
||||
EXPORT_SYMBOL(gh_msgq_populate_cap_info);
|
||||
|
||||
static int hh_msgq_probe_direction(struct platform_device *pdev,
|
||||
enum hh_msgq_label label, int direction, int idx)
|
||||
static int gh_msgq_probe_direction(struct platform_device *pdev,
|
||||
enum gh_msgq_label label, int direction, int idx)
|
||||
{
|
||||
int irq, ret;
|
||||
u64 capid;
|
||||
@ -496,10 +496,10 @@ static int hh_msgq_probe_direction(struct platform_device *pdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
return hh_msgq_populate_cap_info(label, capid, direction, irq);
|
||||
return gh_msgq_populate_cap_info(label, capid, direction, irq);
|
||||
}
|
||||
|
||||
int hh_msgq_probe(struct platform_device *pdev, enum hh_msgq_label label)
|
||||
int gh_msgq_probe(struct platform_device *pdev, enum gh_msgq_label label)
|
||||
{
|
||||
int ret, idx = 0;
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
@ -508,7 +508,7 @@ int hh_msgq_probe(struct platform_device *pdev, enum hh_msgq_label label)
|
||||
duplex = of_property_read_bool(node, "qcom,is-full-duplex");
|
||||
|
||||
if (duplex || of_property_read_bool(node, "qcom,is-sender")) {
|
||||
ret = hh_msgq_probe_direction(pdev, label, HH_MSGQ_DIRECTION_TX,
|
||||
ret = gh_msgq_probe_direction(pdev, label, GH_MSGQ_DIRECTION_TX,
|
||||
idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -516,7 +516,7 @@ int hh_msgq_probe(struct platform_device *pdev, enum hh_msgq_label label)
|
||||
}
|
||||
|
||||
if (duplex || of_property_read_bool(node, "qcom,is-receiver")) {
|
||||
ret = hh_msgq_probe_direction(pdev, label, HH_MSGQ_DIRECTION_RX,
|
||||
ret = gh_msgq_probe_direction(pdev, label, GH_MSGQ_DIRECTION_RX,
|
||||
idx);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -524,35 +524,35 @@ int hh_msgq_probe(struct platform_device *pdev, enum hh_msgq_label label)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hh_msgq_probe);
|
||||
EXPORT_SYMBOL(gh_msgq_probe);
|
||||
|
||||
static void hh_msgq_cleanup(int begin_idx)
|
||||
static void gh_msgq_cleanup(int begin_idx)
|
||||
{
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
int i;
|
||||
|
||||
if (begin_idx >= HH_MSGQ_LABEL_MAX)
|
||||
begin_idx = HH_MSGQ_LABEL_MAX - 1;
|
||||
if (begin_idx >= GH_MSGQ_LABEL_MAX)
|
||||
begin_idx = GH_MSGQ_LABEL_MAX - 1;
|
||||
|
||||
for (i = begin_idx; i >= 0; i--) {
|
||||
cap_table_entry = &hh_msgq_cap_table[i];
|
||||
cap_table_entry = &gh_msgq_cap_table[i];
|
||||
|
||||
kfree(cap_table_entry->tx_irq_name);
|
||||
kfree(cap_table_entry->rx_irq_name);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init hh_msgq_init(void)
|
||||
static int __init gh_msgq_init(void)
|
||||
{
|
||||
struct hh_msgq_cap_table *cap_table_entry;
|
||||
struct gh_msgq_cap_table *cap_table_entry;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HH_MSGQ_LABEL_MAX; i++) {
|
||||
cap_table_entry = &hh_msgq_cap_table[i];
|
||||
for (i = 0; i < GH_MSGQ_LABEL_MAX; i++) {
|
||||
cap_table_entry = &gh_msgq_cap_table[i];
|
||||
|
||||
cap_table_entry->tx_cap_id = HH_CAPID_INVAL;
|
||||
cap_table_entry->rx_cap_id = HH_CAPID_INVAL;
|
||||
cap_table_entry->tx_cap_id = GH_CAPID_INVAL;
|
||||
cap_table_entry->rx_cap_id = GH_CAPID_INVAL;
|
||||
cap_table_entry->tx_full = false;
|
||||
cap_table_entry->rx_empty = true;
|
||||
init_waitqueue_head(&cap_table_entry->tx_wq);
|
||||
@ -562,34 +562,34 @@ static int __init hh_msgq_init(void)
|
||||
spin_lock_init(&cap_table_entry->cap_entry_lock);
|
||||
|
||||
cap_table_entry->tx_irq_name = kasprintf(GFP_KERNEL,
|
||||
"hh_msgq_tx_%d", i);
|
||||
"gh_msgq_tx_%d", i);
|
||||
if (!cap_table_entry->tx_irq_name) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cap_table_entry->rx_irq_name = kasprintf(GFP_KERNEL,
|
||||
"hh_msgq_rx_%d", i);
|
||||
"gh_msgq_rx_%d", i);
|
||||
if (!cap_table_entry->rx_irq_name) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
hh_msgq_initialized = true;
|
||||
gh_msgq_initialized = true;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
hh_msgq_cleanup(i);
|
||||
gh_msgq_cleanup(i);
|
||||
return ret;
|
||||
}
|
||||
module_init(hh_msgq_init);
|
||||
module_init(gh_msgq_init);
|
||||
|
||||
static void __exit hh_msgq_exit(void)
|
||||
static void __exit gh_msgq_exit(void)
|
||||
{
|
||||
hh_msgq_cleanup(HH_MSGQ_LABEL_MAX - 1);
|
||||
gh_msgq_cleanup(GH_MSGQ_LABEL_MAX - 1);
|
||||
}
|
||||
module_exit(hh_msgq_exit);
|
||||
module_exit(gh_msgq_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven Message Queue Driver");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Message Queue Driver");
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3,19 +3,19 @@
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef __HH_RM_DRV_PRIVATE_H
|
||||
#define __HH_RM_DRV_PRIVATE_H
|
||||
#ifndef __GH_RM_DRV_PRIVATE_H
|
||||
#define __GH_RM_DRV_PRIVATE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/haven/hh_msgq.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/haven/hh_common.h>
|
||||
#include <linux/gunyah/gh_msgq.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_common.h>
|
||||
|
||||
extern bool hh_rm_core_initialized;
|
||||
extern bool gh_rm_core_initialized;
|
||||
|
||||
/* Resource Manager Header */
|
||||
struct hh_rm_rpc_hdr {
|
||||
struct gh_rm_rpc_hdr {
|
||||
u8 version:4,
|
||||
hdr_words:4;
|
||||
u8 type:2,
|
||||
@ -25,14 +25,14 @@ struct hh_rm_rpc_hdr {
|
||||
} __packed;
|
||||
|
||||
/* Standard reply header */
|
||||
struct hh_rm_rpc_reply_hdr {
|
||||
struct hh_rm_rpc_hdr rpc_hdr;
|
||||
struct gh_rm_rpc_reply_hdr {
|
||||
struct gh_rm_rpc_hdr rpc_hdr;
|
||||
u32 err_code;
|
||||
} __packed;
|
||||
|
||||
/* VM specific properties to be cached */
|
||||
struct hh_vm_property {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_property {
|
||||
gh_vmid_t vmid;
|
||||
u8 *guid;
|
||||
char *uri;
|
||||
char *name;
|
||||
@ -40,141 +40,141 @@ struct hh_vm_property {
|
||||
};
|
||||
|
||||
/* RPC Header versions */
|
||||
#define HH_RM_RPC_HDR_VERSION_ONE 0x1
|
||||
#define GH_RM_RPC_HDR_VERSION_ONE 0x1
|
||||
|
||||
/* RPC Header words */
|
||||
#define HH_RM_RPC_HDR_WORDS 0x2
|
||||
#define GH_RM_RPC_HDR_WORDS 0x2
|
||||
|
||||
/* RPC Message types */
|
||||
#define HH_RM_RPC_TYPE_CONT 0x0
|
||||
#define HH_RM_RPC_TYPE_REQ 0x1
|
||||
#define HH_RM_RPC_TYPE_RPLY 0x2
|
||||
#define HH_RM_RPC_TYPE_NOTIF 0x3
|
||||
#define GH_RM_RPC_TYPE_CONT 0x0
|
||||
#define GH_RM_RPC_TYPE_REQ 0x1
|
||||
#define GH_RM_RPC_TYPE_RPLY 0x2
|
||||
#define GH_RM_RPC_TYPE_NOTIF 0x3
|
||||
|
||||
/* RPC Message IDs */
|
||||
/* Call type Message IDs that has a request/reply pattern */
|
||||
/* Message IDs: Informative */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_GET_IDENT 0x00000001
|
||||
#define HH_RM_RPC_MSG_ID_CALL_GET_FEATURES 0x00000002
|
||||
#define GH_RM_RPC_MSG_ID_CALL_GET_IDENT 0x00000001
|
||||
#define GH_RM_RPC_MSG_ID_CALL_GET_FEATURES 0x00000002
|
||||
|
||||
/* Message IDs: Memory management */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT 0x51000011
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_LEND 0x51000012
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_SHARE 0x51000013
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_RELEASE 0x51000014
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_RECLAIM 0x51000015
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_NOTIFY 0x51000017
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_ACCEPT 0x51000011
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_LEND 0x51000012
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_SHARE 0x51000013
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_RELEASE 0x51000014
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_RECLAIM 0x51000015
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_NOTIFY 0x51000017
|
||||
|
||||
/* Message IDs: extensions for hyp-assign */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_MEM_QCOM_LOOKUP_SGL 0x5100001A
|
||||
#define GH_RM_RPC_MSG_ID_CALL_MEM_QCOM_LOOKUP_SGL 0x5100001A
|
||||
|
||||
/* Message IDs: VM Management */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_ALLOCATE 0x56000001
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_START 0x56000004
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_ALLOCATE 0x56000001
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_START 0x56000004
|
||||
|
||||
/* Message IDs: VM Query */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_GET_ID 0x56000010
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_GET_STATE 0x56000017
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_GET_HYP_RESOURCES 0x56000020
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_LOOKUP_HYP_CAPIDS 0x56000021
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_LOOKUP_HYP_IRQS 0X56000022
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_GET_ID 0x56000010
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_GET_STATE 0x56000017
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_GET_HYP_RESOURCES 0x56000020
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_LOOKUP_HYP_CAPIDS 0x56000021
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_LOOKUP_HYP_IRQS 0X56000022
|
||||
|
||||
/* Message IDs: VM Configuration */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_IRQ_ACCEPT 0x56000050
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_IRQ_LEND 0x56000051
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_IRQ_RELEASE 0x56000052
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_IRQ_RECLAIM 0x56000053
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_IRQ_NOTIFY 0x56000054
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_IRQ_UNMAP 0x56000055
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_IRQ_ACCEPT 0x56000050
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_IRQ_LEND 0x56000051
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_IRQ_RELEASE 0x56000052
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_IRQ_RECLAIM 0x56000053
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_IRQ_NOTIFY 0x56000054
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_IRQ_UNMAP 0x56000055
|
||||
|
||||
/* Message IDs: VM Services */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_SET_STATUS 0x56000080
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_OPEN 0x56000081
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_CLOSE 0x56000082
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_WRITE 0x56000083
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_FLUSH 0x56000084
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_SET_STATUS 0x56000080
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_OPEN 0x56000081
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_CLOSE 0x56000082
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_WRITE 0x56000083
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_CONSOLE_FLUSH 0x56000084
|
||||
|
||||
/* Message IDs: VM-Host Query */
|
||||
#define HH_RM_RPC_MSG_ID_CALL_VM_HOST_GET_TYPE 0x560000A0
|
||||
#define GH_RM_RPC_MSG_ID_CALL_VM_HOST_GET_TYPE 0x560000A0
|
||||
|
||||
/* End Call type Message IDs */
|
||||
/* End RPC Message IDs */
|
||||
|
||||
/* Call: VM_ALLOCATE */
|
||||
struct hh_vm_allocate_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_allocate_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_allocate_resp_payload {
|
||||
struct gh_vm_allocate_resp_payload {
|
||||
u32 vmid;
|
||||
} __packed;
|
||||
|
||||
/* Call: VM_START */
|
||||
struct hh_vm_start_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_start_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_start_resp_payload {
|
||||
struct gh_vm_start_resp_payload {
|
||||
u32 response;
|
||||
} __packed;
|
||||
|
||||
/* Call: CONSOLE_OPEN, CONSOLE_CLOSE, CONSOLE_FLUSH */
|
||||
struct hh_vm_console_common_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_console_common_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved0;
|
||||
} __packed;
|
||||
|
||||
/* Call: CONSOLE_WRITE */
|
||||
struct hh_vm_console_write_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_console_write_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 num_bytes;
|
||||
u8 data[0];
|
||||
} __packed;
|
||||
|
||||
/* Call: GET_ID */
|
||||
#define HH_RM_ID_TYPE_GUID 0
|
||||
#define HH_RM_ID_TYPE_URI 1
|
||||
#define HH_RM_ID_TYPE_NAME 2
|
||||
#define HH_RM_ID_TYPE_SIGN_AUTH 3
|
||||
#define GH_RM_ID_TYPE_GUID 0
|
||||
#define GH_RM_ID_TYPE_URI 1
|
||||
#define GH_RM_ID_TYPE_NAME 2
|
||||
#define GH_RM_ID_TYPE_SIGN_AUTH 3
|
||||
|
||||
struct hh_vm_get_id_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_get_id_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_get_id_resp_entry {
|
||||
struct gh_vm_get_id_resp_entry {
|
||||
u8 id_type;
|
||||
u8 reserved;
|
||||
u16 id_size;
|
||||
void *id_info;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_get_id_resp_payload {
|
||||
struct gh_vm_get_id_resp_payload {
|
||||
u32 n_id_entries;
|
||||
struct hh_vm_get_id_resp_entry resp_entries[];
|
||||
struct gh_vm_get_id_resp_entry resp_entries[];
|
||||
} __packed;
|
||||
|
||||
/* Message ID headers */
|
||||
/* Call: VM_GET_HYP_RESOURCES */
|
||||
#define HH_RM_RES_TYPE_DB_TX 0
|
||||
#define HH_RM_RES_TYPE_DB_RX 1
|
||||
#define HH_RM_RES_TYPE_MQ_TX 2
|
||||
#define HH_RM_RES_TYPE_MQ_RX 3
|
||||
#define HH_RM_RES_TYPE_VCPU 4
|
||||
#define HH_RM_RES_TYPE_VPMGRP 5
|
||||
#define HH_RM_RES_TYPE_VIRTIO_MMIO 6
|
||||
#define GH_RM_RES_TYPE_DB_TX 0
|
||||
#define GH_RM_RES_TYPE_DB_RX 1
|
||||
#define GH_RM_RES_TYPE_MQ_TX 2
|
||||
#define GH_RM_RES_TYPE_MQ_RX 3
|
||||
#define GH_RM_RES_TYPE_VCPU 4
|
||||
#define GH_RM_RES_TYPE_VPMGRP 5
|
||||
#define GH_RM_RES_TYPE_VIRTIO_MMIO 6
|
||||
|
||||
struct hh_vm_get_hyp_res_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_get_hyp_res_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_get_hyp_res_resp_entry {
|
||||
struct gh_vm_get_hyp_res_resp_entry {
|
||||
u8 res_type;
|
||||
u8 reserved;
|
||||
hh_vmid_t partner_vmid;
|
||||
gh_vmid_t partner_vmid;
|
||||
u32 resource_handle;
|
||||
u32 resource_label;
|
||||
u32 cap_id_low;
|
||||
@ -187,50 +187,50 @@ struct hh_vm_get_hyp_res_resp_entry {
|
||||
u32 size_high;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_get_hyp_res_resp_payload {
|
||||
struct gh_vm_get_hyp_res_resp_payload {
|
||||
u32 n_resource_entries;
|
||||
struct hh_vm_get_hyp_res_resp_entry resp_entries[];
|
||||
struct gh_vm_get_hyp_res_resp_entry resp_entries[];
|
||||
} __packed;
|
||||
|
||||
/* Call: VM_IRQ_ACCEPT */
|
||||
struct hh_vm_irq_accept_req_payload {
|
||||
hh_virq_handle_t virq_handle;
|
||||
struct gh_vm_irq_accept_req_payload {
|
||||
gh_virq_handle_t virq_handle;
|
||||
s32 virq;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_irq_accept_resp_payload {
|
||||
struct gh_vm_irq_accept_resp_payload {
|
||||
s32 virq;
|
||||
} __packed;
|
||||
|
||||
/* Call: VM_IRQ_LEND */
|
||||
struct hh_vm_irq_lend_req_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_vm_irq_lend_req_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
s32 virq;
|
||||
s32 label;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_irq_lend_resp_payload {
|
||||
hh_virq_handle_t virq;
|
||||
struct gh_vm_irq_lend_resp_payload {
|
||||
gh_virq_handle_t virq;
|
||||
} __packed;
|
||||
|
||||
/* Call: VM_IRQ_NOTIFY */
|
||||
#define HH_VM_IRQ_NOTIFY_FLAGS_LENT BIT(0)
|
||||
#define HH_VM_IRQ_NOTIFY_FLAGS_RELEASED BIT(1)
|
||||
#define HH_VM_IRQ_NOTIFY_FLAGS_ACCEPTED BIT(2)
|
||||
#define GH_VM_IRQ_NOTIFY_FLAGS_LENT BIT(0)
|
||||
#define GH_VM_IRQ_NOTIFY_FLAGS_RELEASED BIT(1)
|
||||
#define GH_VM_IRQ_NOTIFY_FLAGS_ACCEPTED BIT(2)
|
||||
|
||||
/* Call: VM_IRQ_RELEASE */
|
||||
struct hh_vm_irq_release_req_payload {
|
||||
hh_virq_handle_t virq_handle;
|
||||
struct gh_vm_irq_release_req_payload {
|
||||
gh_virq_handle_t virq_handle;
|
||||
} __packed;
|
||||
|
||||
/* Call: VM_IRQ_RECLAIM */
|
||||
struct hh_vm_irq_reclaim_req_payload {
|
||||
hh_virq_handle_t virq_handle;
|
||||
struct gh_vm_irq_reclaim_req_payload {
|
||||
gh_virq_handle_t virq_handle;
|
||||
} __packed;
|
||||
|
||||
struct hh_vm_irq_notify_req_payload {
|
||||
hh_virq_handle_t virq;
|
||||
struct gh_vm_irq_notify_req_payload {
|
||||
gh_virq_handle_t virq;
|
||||
u8 flags;
|
||||
u8 reserved0;
|
||||
u16 reserved1;
|
||||
@ -238,7 +238,7 @@ struct hh_vm_irq_notify_req_payload {
|
||||
u16 num_vmids;
|
||||
u16 reserved;
|
||||
struct __packed {
|
||||
hh_vmid_t vmid;
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
} vmids[0];
|
||||
} optional[0];
|
||||
@ -250,19 +250,19 @@ struct hh_vm_irq_notify_req_payload {
|
||||
* to simplify allocation and treatment of packets with multiple flexible
|
||||
* array members.
|
||||
*/
|
||||
struct hh_mem_qcom_lookup_sgl_req_payload_hdr {
|
||||
struct gh_mem_qcom_lookup_sgl_req_payload_hdr {
|
||||
u32 mem_type:8;
|
||||
u32 reserved:24;
|
||||
hh_label_t label;
|
||||
gh_label_t label;
|
||||
} __packed;
|
||||
|
||||
struct hh_mem_qcom_lookup_sgl_resp_payload {
|
||||
hh_memparcel_handle_t memparcel_handle;
|
||||
struct gh_mem_qcom_lookup_sgl_resp_payload {
|
||||
gh_memparcel_handle_t memparcel_handle;
|
||||
} __packed;
|
||||
|
||||
/* Call: MEM_RELEASE/MEM_RECLAIM */
|
||||
struct hh_mem_release_req_payload {
|
||||
hh_memparcel_handle_t memparcel_handle;
|
||||
struct gh_mem_release_req_payload {
|
||||
gh_memparcel_handle_t memparcel_handle;
|
||||
u32 flags:8;
|
||||
u32 reserved:24;
|
||||
} __packed;
|
||||
@ -274,8 +274,8 @@ struct hh_mem_release_req_payload {
|
||||
* to simplify allocation and treatment of packets with multiple flexible
|
||||
* array members.
|
||||
*/
|
||||
struct hh_mem_accept_req_payload_hdr {
|
||||
hh_memparcel_handle_t memparcel_handle;
|
||||
struct gh_mem_accept_req_payload_hdr {
|
||||
gh_memparcel_handle_t memparcel_handle;
|
||||
u8 mem_type;
|
||||
u8 trans_type;
|
||||
u8 flags;
|
||||
@ -283,7 +283,7 @@ struct hh_mem_accept_req_payload_hdr {
|
||||
u32 validate_label;
|
||||
} __packed;
|
||||
|
||||
struct hh_mem_accept_resp_payload {
|
||||
struct gh_mem_accept_resp_payload {
|
||||
u16 n_sgl_entries;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
@ -295,7 +295,7 @@ struct hh_mem_accept_resp_payload {
|
||||
* to simplify allocation and treatment of packets with multiple flexible
|
||||
* array members.
|
||||
*/
|
||||
struct hh_mem_share_req_payload_hdr {
|
||||
struct gh_mem_share_req_payload_hdr {
|
||||
u8 mem_type;
|
||||
u8 reserved1;
|
||||
u8 flags;
|
||||
@ -303,30 +303,30 @@ struct hh_mem_share_req_payload_hdr {
|
||||
u32 label;
|
||||
} __packed;
|
||||
|
||||
struct hh_mem_share_resp_payload {
|
||||
hh_memparcel_handle_t memparcel_handle;
|
||||
struct gh_mem_share_resp_payload {
|
||||
gh_memparcel_handle_t memparcel_handle;
|
||||
} __packed;
|
||||
|
||||
/* Call: MEM_NOTIFY */
|
||||
struct hh_mem_notify_req_payload {
|
||||
hh_memparcel_handle_t memparcel_handle;
|
||||
struct gh_mem_notify_req_payload {
|
||||
gh_memparcel_handle_t memparcel_handle;
|
||||
u32 flags:8;
|
||||
u32 reserved1:24;
|
||||
hh_label_t mem_info_tag;
|
||||
gh_label_t mem_info_tag;
|
||||
} __packed;
|
||||
|
||||
/* End Message ID headers */
|
||||
|
||||
/* Common function declerations */
|
||||
int hh_update_vm_prop_table(enum hh_vm_names vm_name,
|
||||
struct hh_vm_property *vm_prop);
|
||||
void *hh_rm_call(hh_rm_msgid_t message_id,
|
||||
int gh_update_vm_prop_table(enum gh_vm_names vm_name,
|
||||
struct gh_vm_property *vm_prop);
|
||||
void *gh_rm_call(gh_rm_msgid_t message_id,
|
||||
void *req_buff, size_t req_buff_size,
|
||||
size_t *resp_buff_size, int *reply_err_code);
|
||||
struct hh_vm_get_id_resp_entry *
|
||||
hh_rm_vm_get_id(hh_vmid_t vmid, u32 *out_n_entries);
|
||||
struct hh_vm_get_hyp_res_resp_entry *
|
||||
hh_rm_vm_get_hyp_res(hh_vmid_t vmid, u32 *out_n_entries);
|
||||
int hh_msgq_populate_cap_info(enum hh_msgq_label label, u64 cap_id,
|
||||
struct gh_vm_get_id_resp_entry *
|
||||
gh_rm_vm_get_id(gh_vmid_t vmid, u32 *out_n_entries);
|
||||
struct gh_vm_get_hyp_res_resp_entry *
|
||||
gh_rm_vm_get_hyp_res(gh_vmid_t vmid, u32 *out_n_entries);
|
||||
int gh_msgq_populate_cap_info(enum gh_msgq_label label, u64 cap_id,
|
||||
int direction, int irq);
|
||||
#endif /* __HH_RM_DRV_PRIVATE_H */
|
||||
#endif /* __GH_RM_DRV_PRIVATE_H */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,12 +1,12 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <soc/qcom/watchdog.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/haven/hh_errno.h>
|
||||
#include <linux/gunyah/gh_errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
@ -26,7 +26,7 @@
|
||||
#define VIRT_WDT_NO_CHANGE 0xFFFF
|
||||
|
||||
/**
|
||||
* hh_wdt_call() - Sends ARM SMCCC 1.1 Calls to the hypervisor
|
||||
* gh_wdt_call() - Sends ARM SMCCC 1.1 Calls to the hypervisor
|
||||
*
|
||||
* @smc_id: The smc id needed to interact with the watchdog in the hypervisor
|
||||
* @arg1: A u32 value to be sent to the the hypervisor
|
||||
@ -39,7 +39,7 @@
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static struct arm_smccc_res hh_wdt_call(u32 smc_id, u32 arg1,
|
||||
static struct arm_smccc_res gh_wdt_call(u32 smc_id, u32 arg1,
|
||||
u16 arg2, u16 arg3)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
@ -54,7 +54,7 @@ static struct arm_smccc_res hh_wdt_call(u32 smc_id, u32 arg1,
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_set_wdt_bark() - Sets the bark time for the virtual watchdog
|
||||
* gh_set_wdt_bark() - Sets the bark time for the virtual watchdog
|
||||
*
|
||||
* @time: A u32 value to be converted to milliseconds (u16)
|
||||
* @wdog_dd: The qcom watchdog data structure
|
||||
@ -65,16 +65,16 @@ static struct arm_smccc_res hh_wdt_call(u32 smc_id, u32 arg1,
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int hh_set_wdt_bark(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
static int gh_set_wdt_bark(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int hret, ret;
|
||||
u16 bark_time;
|
||||
|
||||
bark_time = (u16) time;
|
||||
res = hh_wdt_call(VIRT_WDT_SET_TIME, 0, bark_time, VIRT_WDT_NO_CHANGE);
|
||||
res = gh_wdt_call(VIRT_WDT_SET_TIME, 0, bark_time, VIRT_WDT_NO_CHANGE);
|
||||
hret = res.a0;
|
||||
ret = hh_remap_error(hret);
|
||||
ret = gh_remap_error(hret);
|
||||
if (hret) {
|
||||
dev_err(wdog_dd->dev, "failed to set bark time for vDOG, hret = %d ret = %d\n",
|
||||
hret, ret);
|
||||
@ -84,7 +84,7 @@ static int hh_set_wdt_bark(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_set_wdt_bite() - Sets the bite time for the virtual watchdog
|
||||
* gh_set_wdt_bite() - Sets the bite time for the virtual watchdog
|
||||
*
|
||||
* @time: A u32 value to be converted to milliseconds (u16)
|
||||
* @wdog_dd: The qcom watchdog data structure
|
||||
@ -95,16 +95,16 @@ static int hh_set_wdt_bark(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int hh_set_wdt_bite(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
static int gh_set_wdt_bite(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int hret, ret;
|
||||
u16 bite_time;
|
||||
|
||||
bite_time = (u16) time;
|
||||
res = hh_wdt_call(VIRT_WDT_SET_TIME, 0, VIRT_WDT_NO_CHANGE, bite_time);
|
||||
res = gh_wdt_call(VIRT_WDT_SET_TIME, 0, VIRT_WDT_NO_CHANGE, bite_time);
|
||||
hret = res.a0;
|
||||
ret = hh_remap_error(hret);
|
||||
ret = gh_remap_error(hret);
|
||||
if (hret) {
|
||||
dev_err(wdog_dd->dev, "failed to set bite time for vWDOG, hret = %d ret = %d\n",
|
||||
hret, ret);
|
||||
@ -114,7 +114,7 @@ static int hh_set_wdt_bite(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_reset_wdt() - Resets the virtual watchdog timer
|
||||
* gh_reset_wdt() - Resets the virtual watchdog timer
|
||||
*
|
||||
* @wdog_dd: The qcom watchdog data structure
|
||||
*
|
||||
@ -122,14 +122,14 @@ static int hh_set_wdt_bite(u32 time, struct msm_watchdog_data *wdog_dd)
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int hh_reset_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
static int gh_reset_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int hret, ret;
|
||||
|
||||
res = hh_wdt_call(VIRT_WDT_PET, 0, 0, 0);
|
||||
res = gh_wdt_call(VIRT_WDT_PET, 0, 0, 0);
|
||||
hret = res.a0;
|
||||
ret = hh_remap_error(hret);
|
||||
ret = gh_remap_error(hret);
|
||||
if (hret) {
|
||||
dev_err(wdog_dd->dev, "failed to reset vWDOG, hret = %d ret = %d\n",
|
||||
hret, ret);
|
||||
@ -139,7 +139,7 @@ static int hh_reset_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_enable_wdt() - Enables the virtual watchdog
|
||||
* gh_enable_wdt() - Enables the virtual watchdog
|
||||
*
|
||||
* @wdog_dd: The qcom watchdog data structure
|
||||
* @state: state value to send to watchdog
|
||||
@ -155,7 +155,7 @@ static int hh_reset_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int hh_enable_wdt(u32 state, struct msm_watchdog_data *wdog_dd)
|
||||
static int gh_enable_wdt(u32 state, struct msm_watchdog_data *wdog_dd)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int hret, ret;
|
||||
@ -164,9 +164,9 @@ static int hh_enable_wdt(u32 state, struct msm_watchdog_data *wdog_dd)
|
||||
dev_err(wdog_dd->dev, "vWDT already enabled\n");
|
||||
return 0;
|
||||
}
|
||||
res = hh_wdt_call(VIRT_WDT_CONTROL, 3, 0, 0);
|
||||
res = gh_wdt_call(VIRT_WDT_CONTROL, 3, 0, 0);
|
||||
hret = res.a0;
|
||||
ret = hh_remap_error(hret);
|
||||
ret = gh_remap_error(hret);
|
||||
if (hret) {
|
||||
dev_err(wdog_dd->dev, "failed enabling vWDOG, hret = %d ret = %d\n",
|
||||
hret, ret);
|
||||
@ -176,7 +176,7 @@ static int hh_enable_wdt(u32 state, struct msm_watchdog_data *wdog_dd)
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_disable_wdt() - Disables the virtual watchdog
|
||||
* gh_disable_wdt() - Disables the virtual watchdog
|
||||
*
|
||||
* @wdog_dd: The qcom watchdog data structure
|
||||
*
|
||||
@ -191,7 +191,7 @@ static int hh_enable_wdt(u32 state, struct msm_watchdog_data *wdog_dd)
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int hh_disable_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
static int gh_disable_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int hret, ret;
|
||||
@ -200,9 +200,9 @@ static int hh_disable_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
dev_err(wdog_dd->dev, "vWDT already disabled\n");
|
||||
return 0;
|
||||
}
|
||||
res = hh_wdt_call(VIRT_WDT_CONTROL, 2, 0, 0);
|
||||
res = gh_wdt_call(VIRT_WDT_CONTROL, 2, 0, 0);
|
||||
hret = res.a0;
|
||||
ret = hh_remap_error(hret);
|
||||
ret = gh_remap_error(hret);
|
||||
if (hret) {
|
||||
dev_err(wdog_dd->dev, "failed disabling VDOG, hret = %d ret = %d\n",
|
||||
hret, ret);
|
||||
@ -212,7 +212,7 @@ static int hh_disable_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
}
|
||||
|
||||
/**
|
||||
* hh_get_wdt_status() - Displays the status of the virtual watchdog
|
||||
* gh_get_wdt_status() - Displays the status of the virtual watchdog
|
||||
*
|
||||
* @wdog_dd: The qcom watchdog data structure
|
||||
*
|
||||
@ -220,14 +220,14 @@ static int hh_disable_wdt(struct msm_watchdog_data *wdog_dd)
|
||||
*
|
||||
* return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
static int hh_show_wdt_status(struct msm_watchdog_data *wdog_dd)
|
||||
static int gh_show_wdt_status(struct msm_watchdog_data *wdog_dd)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
int hret, ret;
|
||||
|
||||
res = hh_wdt_call(VIRT_WDT_STATUS, 0, 0, 0);
|
||||
res = gh_wdt_call(VIRT_WDT_STATUS, 0, 0, 0);
|
||||
hret = res.a0;
|
||||
ret = hh_remap_error(hret);
|
||||
ret = gh_remap_error(hret);
|
||||
if (hret) {
|
||||
dev_err(wdog_dd->dev, "failed to get vWDOG status, hret = %d ret = %d\n",
|
||||
hret, ret);
|
||||
@ -240,55 +240,55 @@ static int hh_show_wdt_status(struct msm_watchdog_data *wdog_dd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct qcom_wdt_ops hh_wdt_ops = {
|
||||
.set_bark_time = hh_set_wdt_bark,
|
||||
.set_bite_time = hh_set_wdt_bite,
|
||||
.reset_wdt = hh_reset_wdt,
|
||||
.enable_wdt = hh_enable_wdt,
|
||||
.disable_wdt = hh_disable_wdt,
|
||||
.show_wdt_status = hh_show_wdt_status
|
||||
static struct qcom_wdt_ops gh_wdt_ops = {
|
||||
.set_bark_time = gh_set_wdt_bark,
|
||||
.set_bite_time = gh_set_wdt_bite,
|
||||
.reset_wdt = gh_reset_wdt,
|
||||
.enable_wdt = gh_enable_wdt,
|
||||
.disable_wdt = gh_disable_wdt,
|
||||
.show_wdt_status = gh_show_wdt_status
|
||||
};
|
||||
|
||||
static int hh_wdt_probe(struct platform_device *pdev)
|
||||
static int gh_wdt_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct msm_watchdog_data *wdog_dd;
|
||||
|
||||
wdog_dd = devm_kzalloc(&pdev->dev, sizeof(*wdog_dd), GFP_KERNEL);
|
||||
if (!wdog_dd)
|
||||
return -ENOMEM;
|
||||
wdog_dd->ops = &hh_wdt_ops;
|
||||
wdog_dd->ops = &gh_wdt_ops;
|
||||
|
||||
return qcom_wdt_register(pdev, wdog_dd, "hh-watchdog");
|
||||
return qcom_wdt_register(pdev, wdog_dd, "gh-watchdog");
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops hh_wdt_dev_pm_ops = {
|
||||
static const struct dev_pm_ops gh_wdt_dev_pm_ops = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.suspend_late = qcom_wdt_pet_suspend,
|
||||
.resume_early = qcom_wdt_pet_resume,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct of_device_id hh_wdt_match_table[] = {
|
||||
static const struct of_device_id gh_wdt_match_table[] = {
|
||||
{ .compatible = "qcom,hh-watchdog" },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver hh_wdt_driver = {
|
||||
.probe = hh_wdt_probe,
|
||||
static struct platform_driver gh_wdt_driver = {
|
||||
.probe = gh_wdt_probe,
|
||||
.remove = qcom_wdt_remove,
|
||||
.driver = {
|
||||
.name = "hh-watchdog",
|
||||
.pm = &hh_wdt_dev_pm_ops,
|
||||
.of_match_table = hh_wdt_match_table,
|
||||
.name = "gh-watchdog",
|
||||
.pm = &gh_wdt_dev_pm_ops,
|
||||
.of_match_table = gh_wdt_match_table,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init init_watchdog(void)
|
||||
{
|
||||
return platform_driver_register(&hh_wdt_driver);
|
||||
return platform_driver_register(&gh_wdt_driver);
|
||||
}
|
||||
|
||||
#if IS_MODULE(CONFIG_HH_VIRT_WATCHDOG)
|
||||
#if IS_MODULE(CONFIG_GH_VIRT_WATCHDOG)
|
||||
module_init(init_watchdog);
|
||||
#else
|
||||
pure_initcall(init_watchdog);
|
||||
@ -296,8 +296,8 @@ pure_initcall(init_watchdog);
|
||||
|
||||
static __exit void exit_watchdog(void)
|
||||
{
|
||||
platform_driver_unregister(&hh_wdt_driver);
|
||||
platform_driver_unregister(&gh_wdt_driver);
|
||||
}
|
||||
module_exit(exit_watchdog);
|
||||
MODULE_DESCRIPTION("QCOM Haven Watchdog Driver");
|
||||
MODULE_DESCRIPTION("QCOM Gunyah Watchdog Driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -24,36 +24,36 @@
|
||||
#include <linux/eventfd.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/hh_virtio_backend.h>
|
||||
#include <linux/gh_virtio_backend.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <uapi/linux/virtio_mmio.h>
|
||||
#include <linux/haven/hcall.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/hcall.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/hh_virtio_backend.h>
|
||||
#include <trace/events/gh_virtio_backend.h>
|
||||
#undef CREATE_TRACE_POINTS
|
||||
|
||||
#define MAX_DEVICE_NAME 32
|
||||
#define MAX_VM_NAME 32
|
||||
#define MAX_CDEV_NAME 64
|
||||
#define MAX_VM_DEVICES 32
|
||||
#define VIRTIO_BE_CLASS "hh_virtio_backend"
|
||||
#define VIRTIO_BE_CLASS "gh_virtio_backend"
|
||||
#define MAX_QUEUES 4
|
||||
#define MAX_IO_CONTEXTS MAX_QUEUES
|
||||
|
||||
#define VIRTIO_PRINT_MARKER "virtio_backend"
|
||||
|
||||
#define assert_virq hh_hcall_virtio_mmio_backend_assert_virq
|
||||
#define set_dev_features hh_hcall_virtio_mmio_backend_set_dev_features
|
||||
#define set_queue_num_max hh_hcall_virtio_mmio_backend_set_queue_num_max
|
||||
#define get_drv_features hh_hcall_virtio_mmio_backend_get_drv_features
|
||||
#define get_queue_info hh_hcall_virtio_mmio_backend_get_queue_info
|
||||
#define get_event hh_hcall_virtio_mmio_backend_get_event
|
||||
#define ack_reset hh_hcall_virtio_mmio_backend_ack_reset
|
||||
#define assert_virq gh_hcall_virtio_mmio_backend_assert_virq
|
||||
#define set_dev_features gh_hcall_virtio_mmio_backend_set_dev_features
|
||||
#define set_queue_num_max gh_hcall_virtio_mmio_backend_set_queue_num_max
|
||||
#define get_drv_features gh_hcall_virtio_mmio_backend_get_drv_features
|
||||
#define get_queue_info gh_hcall_virtio_mmio_backend_get_queue_info
|
||||
#define get_event gh_hcall_virtio_mmio_backend_get_event
|
||||
#define ack_reset gh_hcall_virtio_mmio_backend_ack_reset
|
||||
|
||||
static DEFINE_MUTEX(vm_mutex);
|
||||
static DEFINE_IDA(vm_minor_id);
|
||||
@ -64,7 +64,7 @@ static dev_t vbe_dev;
|
||||
|
||||
struct shared_memory {
|
||||
struct resource r;
|
||||
u32 haven_label, shm_memparcel;
|
||||
u32 gunyah_label, shm_memparcel;
|
||||
};
|
||||
|
||||
struct virt_machine {
|
||||
@ -124,7 +124,7 @@ struct virtio_backend_device {
|
||||
u32 features[2];
|
||||
u32 queue_num_max[MAX_QUEUES];
|
||||
struct mutex mutex;
|
||||
hh_capid_t cap_id;
|
||||
gh_capid_t cap_id;
|
||||
/* Backend program supplied config data */
|
||||
char *config_data;
|
||||
u32 config_size;
|
||||
@ -197,7 +197,7 @@ static int vb_dev_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
|
||||
if (flags & EPOLLIN) {
|
||||
int rc = assert_virq(vb_dev->cap_id, 1);
|
||||
|
||||
trace_hh_virtio_backend_irq_inj(vb_dev->label, rc);
|
||||
trace_gh_virtio_backend_irq_inj(vb_dev->label, rc);
|
||||
}
|
||||
|
||||
if (flags & EPOLLHUP)
|
||||
@ -306,7 +306,7 @@ static void signal_vqs(struct virtio_backend_device *vb_dev)
|
||||
if ((vb_dev->vdev_event_data & flags) && vb_dev->ioctx[i].ctx) {
|
||||
eventfd_signal(vb_dev->ioctx[i].ctx, 1);
|
||||
vb_dev->vdev_event_data &= ~flags;
|
||||
trace_hh_virtio_backend_queue_notify(vb_dev->label, i);
|
||||
trace_gh_virtio_backend_queue_notify(vb_dev->label, i);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -324,7 +324,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
struct virtio_ack_reset r;
|
||||
struct virtio_config_data d;
|
||||
struct virtio_queue_info qi;
|
||||
struct hh_hcall_virtio_queue_info qinfo;
|
||||
struct gh_hcall_virtio_queue_info qinfo;
|
||||
struct virtio_driver_features df;
|
||||
struct virtio_event ve;
|
||||
u64 features;
|
||||
@ -338,7 +338,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
return -EINVAL;
|
||||
|
||||
switch (cmd) {
|
||||
case HH_SET_APP_READY:
|
||||
case GH_SET_APP_READY:
|
||||
spin_lock(&vm->vb_dev_lock);
|
||||
vm->app_ready = 1;
|
||||
if (vm->waiting_for_app_ready)
|
||||
@ -347,13 +347,13 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
pr_debug("%s: App is ready!!\n", VIRTIO_PRINT_MARKER);
|
||||
break;
|
||||
|
||||
case HH_GET_SHARED_MEMORY_SIZE:
|
||||
case GH_GET_SHARED_MEMORY_SIZE:
|
||||
if (copy_to_user(argp, &vm->shmem_size,
|
||||
sizeof(vm->shmem_size)))
|
||||
return -EFAULT;
|
||||
break;
|
||||
|
||||
case HH_IOEVENTFD:
|
||||
case GH_IOEVENTFD:
|
||||
if (copy_from_user(&efd, argp, sizeof(efd)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -373,7 +373,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
return ret;
|
||||
|
||||
case HH_IRQFD:
|
||||
case GH_IRQFD:
|
||||
if (copy_from_user(&ifd, argp, sizeof(ifd)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -393,7 +393,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
return ret;
|
||||
|
||||
case HH_WAIT_FOR_EVENT:
|
||||
case GH_WAIT_FOR_EVENT:
|
||||
if (copy_from_user(&ve, argp, sizeof(ve)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -459,7 +459,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
spin_unlock_irqrestore(&vb_dev->lock, flags);
|
||||
|
||||
trace_hh_virtio_backend_wait_event(vb_dev->label, vb_dev->cur_event,
|
||||
trace_gh_virtio_backend_wait_event(vb_dev->label, vb_dev->cur_event,
|
||||
org_event, vb_dev->cur_event_data, org_data);
|
||||
|
||||
if (!vb_dev->cur_event)
|
||||
@ -475,7 +475,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
break;
|
||||
|
||||
case HH_GET_DRIVER_FEATURES:
|
||||
case GH_GET_DRIVER_FEATURES:
|
||||
if (copy_from_user(&df, argp, sizeof(df)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -506,7 +506,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
break;
|
||||
|
||||
case HH_GET_QUEUE_INFO:
|
||||
case GH_GET_QUEUE_INFO:
|
||||
if (copy_from_user(&qi, argp, sizeof(qi)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -545,7 +545,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
break;
|
||||
|
||||
case HH_ACK_DRIVER_OK:
|
||||
case GH_ACK_DRIVER_OK:
|
||||
label = (u32) arg;
|
||||
|
||||
if (!label)
|
||||
@ -561,7 +561,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
break;
|
||||
|
||||
case HH_ACK_RESET:
|
||||
case GH_ACK_RESET:
|
||||
if (copy_from_user(&r, argp, sizeof(r)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -578,7 +578,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
pr_debug("%s: ack_reset for label %x!\n", VIRTIO_PRINT_MARKER, r.label);
|
||||
break;
|
||||
|
||||
case HH_SET_DEVICE_FEATURES:
|
||||
case GH_SET_DEVICE_FEATURES:
|
||||
if (copy_from_user(&f, argp, sizeof(f)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -596,7 +596,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
f.label, f.features_sel, f.features);
|
||||
break;
|
||||
|
||||
case HH_SET_QUEUE_NUM_MAX:
|
||||
case GH_SET_QUEUE_NUM_MAX:
|
||||
if (copy_from_user(&q, argp, sizeof(q)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -616,7 +616,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
break;
|
||||
|
||||
case HH_GET_DRIVER_CONFIG_DATA:
|
||||
case GH_GET_DRIVER_CONFIG_DATA:
|
||||
if (copy_from_user(&d, argp, sizeof(d)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -639,7 +639,7 @@ static long virtio_backend_ioctl(struct file *file, unsigned int cmd,
|
||||
vb_dev_put(vb_dev);
|
||||
return ret;
|
||||
|
||||
case HH_SET_DEVICE_CONFIG_DATA:
|
||||
case GH_SET_DEVICE_CONFIG_DATA:
|
||||
if (copy_from_user(&d, argp, sizeof(d)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -971,12 +971,12 @@ note_shared_buffers(struct device_node *np, struct virt_machine *vm)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = of_property_read_u32(snp, "haven-label",
|
||||
&vm->shmem[idx].haven_label);
|
||||
ret = of_property_read_u32(snp, "gunyah-label",
|
||||
&vm->shmem[idx].gunyah_label);
|
||||
if (ret) {
|
||||
of_node_put(snp);
|
||||
kfree(vm->shmem);
|
||||
pr_err("%s: haven-label property absent at index %d\n",
|
||||
pr_err("%s: gunyah-label property absent at index %d\n",
|
||||
VIRTIO_PRINT_MARKER, idx);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1063,7 +1063,7 @@ static struct virt_machine *find_vm_by_name(const char *vm_name)
|
||||
return v;
|
||||
}
|
||||
|
||||
static int hh_virtio_backend_probe(struct platform_device *pdev)
|
||||
static int gh_virtio_backend_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
struct device_node *np = pdev->dev.of_node, *vm_np;
|
||||
@ -1159,7 +1159,7 @@ static int hh_virtio_backend_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __exit hh_virtio_backend_remove(struct platform_device *pdev)
|
||||
static int __exit gh_virtio_backend_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct virtio_backend_device *vb_dev = platform_get_drvdata(pdev);
|
||||
struct virt_machine *vm;
|
||||
@ -1247,7 +1247,7 @@ static irqreturn_t vdev_interrupt(int irq, void *data)
|
||||
unsigned long flags;
|
||||
|
||||
ret = get_event(vb_dev->cap_id, &event_data, &event);
|
||||
trace_hh_virtio_backend_irq(vb_dev->label, event, event_data, ret);
|
||||
trace_gh_virtio_backend_irq(vb_dev->label, event, event_data, ret);
|
||||
if (ret || !event)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
@ -1268,7 +1268,7 @@ static irqreturn_t vdev_interrupt(int irq, void *data)
|
||||
}
|
||||
|
||||
static int
|
||||
unshare_a_vm_buffer(hh_vmid_t self, hh_vmid_t peer, struct resource *r)
|
||||
unshare_a_vm_buffer(gh_vmid_t self, gh_vmid_t peer, struct resource *r)
|
||||
{
|
||||
u32 src_vmlist[2] = {self, peer};
|
||||
int dst_vmlist[1] = {self};
|
||||
@ -1284,20 +1284,20 @@ unshare_a_vm_buffer(hh_vmid_t self, hh_vmid_t peer, struct resource *r)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int share_a_vm_buffer(hh_vmid_t self, hh_vmid_t peer, int haven_label,
|
||||
static int share_a_vm_buffer(gh_vmid_t self, gh_vmid_t peer, int gunyah_label,
|
||||
struct resource *r, u32 *shm_memparcel)
|
||||
{
|
||||
u32 src_vmlist[1] = {self};
|
||||
int dst_vmlist[2] = {self, peer};
|
||||
int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
|
||||
struct hh_acl_desc *acl;
|
||||
struct hh_sgl_desc *sgl;
|
||||
struct gh_acl_desc *acl;
|
||||
struct gh_sgl_desc *sgl;
|
||||
int ret;
|
||||
|
||||
acl = kzalloc(offsetof(struct hh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
if (!acl)
|
||||
return -ENOMEM;
|
||||
sgl = kzalloc(offsetof(struct hh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
if (!sgl) {
|
||||
kfree(acl);
|
||||
return -ENOMEM;
|
||||
@ -1315,15 +1315,15 @@ static int share_a_vm_buffer(hh_vmid_t self, hh_vmid_t peer, int haven_label,
|
||||
|
||||
acl->n_acl_entries = 2;
|
||||
acl->acl_entries[0].vmid = (u16)self;
|
||||
acl->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
acl->acl_entries[1].vmid = (u16)peer;
|
||||
acl->acl_entries[1].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
sgl->n_sgl_entries = 1;
|
||||
sgl->sgl_entries[0].ipa_base = r->start;
|
||||
sgl->sgl_entries[0].size = resource_size(r);
|
||||
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL,
|
||||
haven_label, acl, sgl, NULL, shm_memparcel);
|
||||
ret = gh_rm_mem_qcom_lookup_sgl(GH_RM_MEM_TYPE_NORMAL,
|
||||
gunyah_label, acl, sgl, NULL, shm_memparcel);
|
||||
if (ret) {
|
||||
pr_err("%s: lookup_sgl failed %d\n", VIRTIO_PRINT_MARKER, ret);
|
||||
unshare_a_vm_buffer(self, peer, r);
|
||||
@ -1335,17 +1335,17 @@ static int share_a_vm_buffer(hh_vmid_t self, hh_vmid_t peer, int haven_label,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int share_vm_buffers(struct virt_machine *vm, hh_vmid_t peer)
|
||||
static int share_vm_buffers(struct virt_machine *vm, gh_vmid_t peer)
|
||||
{
|
||||
int i, ret;
|
||||
hh_vmid_t self_vmid;
|
||||
gh_vmid_t self_vmid;
|
||||
|
||||
ret = hh_rm_get_vmid(HH_PRIMARY_VM, &self_vmid);
|
||||
ret = gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < vm->shmem_entries; ++i) {
|
||||
ret = share_a_vm_buffer(self_vmid, peer, vm->shmem[i].haven_label,
|
||||
ret = share_a_vm_buffer(self_vmid, peer, vm->shmem[i].gunyah_label,
|
||||
&vm->shmem[i].r, &vm->shmem[i].shm_memparcel);
|
||||
if (ret) {
|
||||
i--;
|
||||
@ -1363,8 +1363,8 @@ static int share_vm_buffers(struct virt_machine *vm, hh_vmid_t peer)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hh_virtio_mmio_init(hh_vmid_t vmid, const char *vm_name, hh_label_t label,
|
||||
hh_capid_t cap_id, int linux_irq, u64 base, u64 size)
|
||||
static int gh_virtio_mmio_init(gh_vmid_t vmid, const char *vm_name, gh_label_t label,
|
||||
gh_capid_t cap_id, int linux_irq, u64 base, u64 size)
|
||||
{
|
||||
struct virt_machine *vm;
|
||||
struct virtio_backend_device *vb_dev;
|
||||
@ -1496,21 +1496,21 @@ VIRTIO_PRINT_MARKER, label);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id hh_virtio_backend_match_table[] = {
|
||||
static const struct of_device_id gh_virtio_backend_match_table[] = {
|
||||
{ .compatible = "qcom,virtio_backend" },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver hh_virtio_backend_driver = {
|
||||
.probe = hh_virtio_backend_probe,
|
||||
.remove = hh_virtio_backend_remove,
|
||||
static struct platform_driver gh_virtio_backend_driver = {
|
||||
.probe = gh_virtio_backend_probe,
|
||||
.remove = gh_virtio_backend_remove,
|
||||
.driver = {
|
||||
.name = "hh_virtio_backend",
|
||||
.of_match_table = hh_virtio_backend_match_table,
|
||||
.name = "gh_virtio_backend",
|
||||
.of_match_table = gh_virtio_backend_match_table,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init hh_virtio_backend_init(void)
|
||||
static int __init gh_virtio_backend_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1518,13 +1518,13 @@ static int __init hh_virtio_backend_init(void)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = hh_rm_set_virtio_mmio_cb(hh_virtio_mmio_init);
|
||||
ret = gh_rm_set_virtio_mmio_cb(gh_virtio_mmio_init);
|
||||
if (ret) {
|
||||
vb_devclass_deinit();
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = platform_driver_register(&hh_virtio_backend_driver);
|
||||
ret = platform_driver_register(&gh_virtio_backend_driver);
|
||||
if (ret) {
|
||||
gh_rm_unset_virtio_mmio_cb();
|
||||
vb_devclass_deinit();
|
||||
@ -1532,15 +1532,15 @@ static int __init hh_virtio_backend_init(void)
|
||||
|
||||
return ret;
|
||||
}
|
||||
module_init(hh_virtio_backend_init);
|
||||
module_init(gh_virtio_backend_init);
|
||||
|
||||
static void __exit hh_virtio_backend_exit(void)
|
||||
static void __exit gh_virtio_backend_exit(void)
|
||||
{
|
||||
gh_rm_unset_virtio_mmio_cb();
|
||||
platform_driver_unregister(&hh_virtio_backend_driver);
|
||||
platform_driver_unregister(&gh_virtio_backend_driver);
|
||||
vb_devclass_deinit();
|
||||
}
|
||||
module_exit(hh_virtio_backend_exit);
|
||||
module_exit(gh_virtio_backend_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Haven Virtio Backend driver");
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Gunyah Virtio Backend driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
@ -133,8 +133,8 @@ config VIRTIO_MMIO_SWIOTLB
|
||||
shared with another virtual machine which is hosting virtio
|
||||
backend drivers.
|
||||
|
||||
config HH_VIRTIO_DEBUG
|
||||
bool "Enable debug features for virtio front-end drivers on Haven Hypervisor"
|
||||
config GH_VIRTIO_DEBUG
|
||||
bool "Enable debug features for virtio front-end drivers on Gunyah Hypervisor"
|
||||
depends on VIRTIO_MMIO
|
||||
help
|
||||
Activating this feature provides additional tracepoints in various
|
||||
|
@ -70,9 +70,9 @@
|
||||
#include <uapi/linux/virtio_mmio.h>
|
||||
#include <linux/virtio_ring.h>
|
||||
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/hh_virtio_frontend.h>
|
||||
#include <trace/events/gh_virtio_frontend.h>
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#endif
|
||||
|
||||
@ -297,7 +297,7 @@ static bool vm_notify(struct virtqueue *vq)
|
||||
{
|
||||
struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
|
||||
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_mmio_vm_notify(vq->vdev->index, vq->index);
|
||||
#endif
|
||||
/* We write the queue's selector into the notification register to
|
||||
@ -317,7 +317,7 @@ static irqreturn_t vm_interrupt(int irq, void *opaque)
|
||||
|
||||
/* Read and acknowledge interrupts */
|
||||
status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_mmio_vm_interrupt(vm_dev->vdev.index, status);
|
||||
#endif
|
||||
|
||||
|
@ -11,8 +11,8 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#include <trace/events/hh_virtio_frontend.h>
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
#include <trace/events/gh_virtio_frontend.h>
|
||||
#endif
|
||||
#include <xen/xen.h>
|
||||
|
||||
@ -564,7 +564,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
|
||||
|
||||
pr_debug("Added buffer head %i to %p\n", head, vq);
|
||||
END_USE(vq);
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_vring_split_add(_vq->vdev->index, head,
|
||||
vq->split.avail_idx_shadow-1, descs_used, vq->vq.num_free);
|
||||
#endif
|
||||
@ -656,7 +656,7 @@ static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
|
||||
/* Plus final descriptor */
|
||||
vq->vq.num_free++;
|
||||
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_detach_buf(vq->vq.vdev->index, vq->free_head, vq->vq.num_free);
|
||||
#endif
|
||||
|
||||
@ -708,7 +708,7 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HH_VIRTIO_DEBUG
|
||||
#ifdef CONFIG_GH_VIRTIO_DEBUG
|
||||
trace_virtio_get_buf_ctx_split(_vq->vdev->index, vq->last_used_idx,
|
||||
virtio16_to_cpu(vq->vq.vdev, vq->split.vring.used->idx));
|
||||
#endif
|
||||
|
@ -1,29 +1,29 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef __HH_HCALL_H
|
||||
#define __HH_HCALL_H
|
||||
#ifndef __GH_HCALL_H
|
||||
#define __GH_HCALL_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <linux/haven/hcall_common.h>
|
||||
#include <linux/haven/hh_common.h>
|
||||
#include <asm/haven/hcall.h>
|
||||
#include <linux/gunyah/hcall_common.h>
|
||||
#include <linux/gunyah/gh_common.h>
|
||||
#include <asm/gunyah/hcall.h>
|
||||
|
||||
struct hh_hcall_hyp_identify_resp {
|
||||
struct gh_hcall_hyp_identify_resp {
|
||||
u64 api_info;
|
||||
u64 flags[3];
|
||||
};
|
||||
|
||||
static inline int hh_hcall_hyp_identify(struct hh_hcall_hyp_identify_resp *resp)
|
||||
static inline int gh_hcall_hyp_identify(struct gh_hcall_hyp_identify_resp *resp)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6000,
|
||||
(struct hh_hcall_args){ 0 },
|
||||
ret = _gh_hcall(0x6000,
|
||||
(struct gh_hcall_args){ 0 },
|
||||
&_resp);
|
||||
|
||||
if (resp) {
|
||||
@ -36,44 +36,44 @@ static inline int hh_hcall_hyp_identify(struct hh_hcall_hyp_identify_resp *resp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_dbl_bind(hh_capid_t dbl_capid, hh_capid_t vic_capid,
|
||||
hh_virq_handle_t virq_info)
|
||||
static inline int gh_hcall_dbl_bind(gh_capid_t dbl_capid, gh_capid_t vic_capid,
|
||||
gh_virq_handle_t virq_info)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6010,
|
||||
(struct hh_hcall_args){ dbl_capid, vic_capid, virq_info },
|
||||
ret = _gh_hcall(0x6010,
|
||||
(struct gh_hcall_args){ dbl_capid, vic_capid, virq_info },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_dbl_unbind(hh_capid_t dbl_capid)
|
||||
static inline int gh_hcall_dbl_unbind(gh_capid_t dbl_capid)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6011,
|
||||
(struct hh_hcall_args){ dbl_capid },
|
||||
ret = _gh_hcall(0x6011,
|
||||
(struct gh_hcall_args){ dbl_capid },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hh_hcall_dbl_send_resp {
|
||||
struct gh_hcall_dbl_send_resp {
|
||||
u64 old_flags;
|
||||
};
|
||||
|
||||
static inline int hh_hcall_dbl_send(hh_capid_t dbl_capid,
|
||||
hh_dbl_flags_t new_flags,
|
||||
struct hh_hcall_dbl_send_resp *resp)
|
||||
static inline int gh_hcall_dbl_send(gh_capid_t dbl_capid,
|
||||
gh_dbl_flags_t new_flags,
|
||||
struct gh_hcall_dbl_send_resp *resp)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6012,
|
||||
(struct hh_hcall_args){ dbl_capid, new_flags },
|
||||
ret = _gh_hcall(0x6012,
|
||||
(struct gh_hcall_args){ dbl_capid, new_flags },
|
||||
&_resp);
|
||||
|
||||
if (!ret && resp)
|
||||
@ -82,19 +82,19 @@ static inline int hh_hcall_dbl_send(hh_capid_t dbl_capid,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hh_hcall_dbl_recv_resp {
|
||||
struct gh_hcall_dbl_recv_resp {
|
||||
u64 old_flags;
|
||||
};
|
||||
|
||||
static inline int hh_hcall_dbl_recv(hh_capid_t dbl_capid,
|
||||
hh_dbl_flags_t clear_flags,
|
||||
struct hh_hcall_dbl_recv_resp *resp)
|
||||
static inline int gh_hcall_dbl_recv(gh_capid_t dbl_capid,
|
||||
gh_dbl_flags_t clear_flags,
|
||||
struct gh_hcall_dbl_recv_resp *resp)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6013,
|
||||
(struct hh_hcall_args){ dbl_capid, clear_flags },
|
||||
ret = _gh_hcall(0x6013,
|
||||
(struct gh_hcall_args){ dbl_capid, clear_flags },
|
||||
&_resp);
|
||||
|
||||
if (!ret && resp)
|
||||
@ -103,97 +103,97 @@ static inline int hh_hcall_dbl_recv(hh_capid_t dbl_capid,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_dbl_reset(hh_capid_t dbl_capid)
|
||||
static inline int gh_hcall_dbl_reset(gh_capid_t dbl_capid)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6014,
|
||||
(struct hh_hcall_args){ dbl_capid },
|
||||
ret = _gh_hcall(0x6014,
|
||||
(struct gh_hcall_args){ dbl_capid },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_dbl_mask(hh_capid_t dbl_capid,
|
||||
hh_dbl_flags_t enable_mask,
|
||||
hh_dbl_flags_t ack_mask)
|
||||
static inline int gh_hcall_dbl_mask(gh_capid_t dbl_capid,
|
||||
gh_dbl_flags_t enable_mask,
|
||||
gh_dbl_flags_t ack_mask)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6015,
|
||||
(struct hh_hcall_args){ dbl_capid, enable_mask, ack_mask },
|
||||
ret = _gh_hcall(0x6015,
|
||||
(struct gh_hcall_args){ dbl_capid, enable_mask, ack_mask },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_bind_send(hh_capid_t msgq_capid,
|
||||
hh_capid_t vic_capid,
|
||||
hh_virq_handle_t virq_info)
|
||||
static inline int gh_hcall_msgq_bind_send(gh_capid_t msgq_capid,
|
||||
gh_capid_t vic_capid,
|
||||
gh_virq_handle_t virq_info)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6017,
|
||||
(struct hh_hcall_args){ msgq_capid, vic_capid, virq_info },
|
||||
ret = _gh_hcall(0x6017,
|
||||
(struct gh_hcall_args){ msgq_capid, vic_capid, virq_info },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_bind_recv(hh_capid_t msgq_capid,
|
||||
hh_capid_t vic_capid,
|
||||
hh_virq_handle_t virq_info)
|
||||
static inline int gh_hcall_msgq_bind_recv(gh_capid_t msgq_capid,
|
||||
gh_capid_t vic_capid,
|
||||
gh_virq_handle_t virq_info)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6018,
|
||||
(struct hh_hcall_args){ msgq_capid, vic_capid, virq_info },
|
||||
ret = _gh_hcall(0x6018,
|
||||
(struct gh_hcall_args){ msgq_capid, vic_capid, virq_info },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_unbind_send(hh_capid_t msgq_capid)
|
||||
static inline int gh_hcall_msgq_unbind_send(gh_capid_t msgq_capid)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6019,
|
||||
(struct hh_hcall_args){ msgq_capid },
|
||||
ret = _gh_hcall(0x6019,
|
||||
(struct gh_hcall_args){ msgq_capid },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_unbind_recv(hh_capid_t msgq_capid)
|
||||
static inline int gh_hcall_msgq_unbind_recv(gh_capid_t msgq_capid)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x601A,
|
||||
(struct hh_hcall_args){ msgq_capid },
|
||||
ret = _gh_hcall(0x601A,
|
||||
(struct gh_hcall_args){ msgq_capid },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hh_hcall_msgq_send_resp {
|
||||
struct gh_hcall_msgq_send_resp {
|
||||
bool not_full;
|
||||
};
|
||||
|
||||
static inline int hh_hcall_msgq_send(hh_capid_t msgq_capid, size_t size,
|
||||
static inline int gh_hcall_msgq_send(gh_capid_t msgq_capid, size_t size,
|
||||
void *data, u64 send_flags,
|
||||
struct hh_hcall_msgq_send_resp *resp)
|
||||
struct gh_hcall_msgq_send_resp *resp)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x601B,
|
||||
(struct hh_hcall_args){ msgq_capid, size, (unsigned long)data,
|
||||
ret = _gh_hcall(0x601B,
|
||||
(struct gh_hcall_args){ msgq_capid, size, (unsigned long)data,
|
||||
send_flags },
|
||||
&_resp);
|
||||
|
||||
@ -203,20 +203,20 @@ static inline int hh_hcall_msgq_send(hh_capid_t msgq_capid, size_t size,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hh_hcall_msgq_recv_resp {
|
||||
struct gh_hcall_msgq_recv_resp {
|
||||
size_t recv_size;
|
||||
bool not_empty;
|
||||
};
|
||||
|
||||
static inline int hh_hcall_msgq_recv(hh_capid_t msgq_capid, void *buffer,
|
||||
static inline int gh_hcall_msgq_recv(gh_capid_t msgq_capid, void *buffer,
|
||||
size_t max_size,
|
||||
struct hh_hcall_msgq_recv_resp *resp)
|
||||
struct gh_hcall_msgq_recv_resp *resp)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x601C,
|
||||
(struct hh_hcall_args){ msgq_capid, (unsigned long)buffer,
|
||||
ret = _gh_hcall(0x601C,
|
||||
(struct gh_hcall_args){ msgq_capid, (unsigned long)buffer,
|
||||
max_size },
|
||||
&_resp);
|
||||
|
||||
@ -228,84 +228,84 @@ static inline int hh_hcall_msgq_recv(hh_capid_t msgq_capid, void *buffer,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_flush(hh_capid_t msgq_capid)
|
||||
static inline int gh_hcall_msgq_flush(gh_capid_t msgq_capid)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x601D,
|
||||
(struct hh_hcall_args){ msgq_capid },
|
||||
ret = _gh_hcall(0x601D,
|
||||
(struct gh_hcall_args){ msgq_capid },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_configure_send(hh_capid_t msgq_capid,
|
||||
static inline int gh_hcall_msgq_configure_send(gh_capid_t msgq_capid,
|
||||
long not_full_threshold,
|
||||
long not_full_delay)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x601F,
|
||||
(struct hh_hcall_args){ msgq_capid, not_full_threshold,
|
||||
ret = _gh_hcall(0x601F,
|
||||
(struct gh_hcall_args){ msgq_capid, not_full_threshold,
|
||||
not_full_delay, -1 },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_msgq_configure_recv(hh_capid_t msgq_capid,
|
||||
static inline int gh_hcall_msgq_configure_recv(gh_capid_t msgq_capid,
|
||||
long not_empty_threshold,
|
||||
long not_empty_delay)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6020,
|
||||
(struct hh_hcall_args){ msgq_capid, not_empty_threshold,
|
||||
ret = _gh_hcall(0x6020,
|
||||
(struct gh_hcall_args){ msgq_capid, not_empty_threshold,
|
||||
not_empty_delay, -1 },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_vcpu_affinity_set(hh_capid_t vcpu_capid,
|
||||
static inline int gh_hcall_vcpu_affinity_set(gh_capid_t vcpu_capid,
|
||||
uint32_t cpu_index)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x603d,
|
||||
(struct hh_hcall_args){ vcpu_capid, cpu_index, -1 },
|
||||
ret = _gh_hcall(0x603d,
|
||||
(struct gh_hcall_args){ vcpu_capid, cpu_index, -1 },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_vpm_group_get_state(u64 vpmg_capid,
|
||||
static inline int gh_hcall_vpm_group_get_state(u64 vpmg_capid,
|
||||
uint64_t *vpmg_state)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6045,
|
||||
(struct hh_hcall_args){ vpmg_capid, 0 },
|
||||
ret = _gh_hcall(0x6045,
|
||||
(struct gh_hcall_args){ vpmg_capid, 0 },
|
||||
&_resp);
|
||||
*vpmg_state = _resp.resp1;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int hh_hcall_trace_update_class_flags(
|
||||
static inline int gh_hcall_trace_update_class_flags(
|
||||
uint64_t set_flags, uint64_t clear_flags,
|
||||
uint64_t *new_flags)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x603f,
|
||||
(struct hh_hcall_args){ set_flags, clear_flags, 0 },
|
||||
ret = _gh_hcall(0x603f,
|
||||
(struct gh_hcall_args){ set_flags, clear_flags, 0 },
|
||||
&_resp);
|
||||
|
||||
if (!ret && new_flags)
|
||||
@ -315,55 +315,55 @@ static inline int hh_hcall_trace_update_class_flags(
|
||||
}
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_assert_virq(hh_capid_t capid, u64 int_status)
|
||||
gh_hcall_virtio_mmio_backend_assert_virq(gh_capid_t capid, u64 int_status)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x604e,
|
||||
(struct hh_hcall_args){ capid, int_status, 0 },
|
||||
ret = _gh_hcall(0x604e,
|
||||
(struct gh_hcall_args){ capid, int_status, 0 },
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_set_dev_features(hh_capid_t capid,
|
||||
gh_hcall_virtio_mmio_backend_set_dev_features(gh_capid_t capid,
|
||||
u64 features_sel, u64 features)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x604f,
|
||||
(struct hh_hcall_args){ capid, features_sel,
|
||||
ret = _gh_hcall(0x604f,
|
||||
(struct gh_hcall_args){ capid, features_sel,
|
||||
features, 0 }, &_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_set_queue_num_max(hh_capid_t capid,
|
||||
gh_hcall_virtio_mmio_backend_set_queue_num_max(gh_capid_t capid,
|
||||
u64 queue_sel, u64 queue_num_max)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6050,
|
||||
(struct hh_hcall_args){ capid, queue_sel,
|
||||
ret = _gh_hcall(0x6050,
|
||||
(struct gh_hcall_args){ capid, queue_sel,
|
||||
queue_num_max, 0 }, &_resp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_get_drv_features(hh_capid_t capid,
|
||||
gh_hcall_virtio_mmio_backend_get_drv_features(gh_capid_t capid,
|
||||
u64 features_sel, u64 *features)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6051,
|
||||
(struct hh_hcall_args){ capid, features_sel, 0},
|
||||
ret = _gh_hcall(0x6051,
|
||||
(struct gh_hcall_args){ capid, features_sel, 0},
|
||||
&_resp);
|
||||
|
||||
if (!ret && features)
|
||||
@ -372,7 +372,7 @@ hh_hcall_virtio_mmio_backend_get_drv_features(hh_capid_t capid,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct hh_hcall_virtio_queue_info {
|
||||
struct gh_hcall_virtio_queue_info {
|
||||
u64 queue_num;
|
||||
u64 queue_ready;
|
||||
u64 queue_desc;
|
||||
@ -381,14 +381,14 @@ struct hh_hcall_virtio_queue_info {
|
||||
};
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_get_queue_info(hh_capid_t capid,
|
||||
u64 queue_sel, struct hh_hcall_virtio_queue_info *queue_info)
|
||||
gh_hcall_virtio_mmio_backend_get_queue_info(gh_capid_t capid,
|
||||
u64 queue_sel, struct gh_hcall_virtio_queue_info *queue_info)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6052,
|
||||
(struct hh_hcall_args){ capid, queue_sel, 0},
|
||||
ret = _gh_hcall(0x6052,
|
||||
(struct gh_hcall_args){ capid, queue_sel, 0},
|
||||
&_resp);
|
||||
|
||||
if (!ret && queue_info) {
|
||||
@ -403,14 +403,14 @@ hh_hcall_virtio_mmio_backend_get_queue_info(hh_capid_t capid,
|
||||
}
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_get_event(hh_capid_t capid,
|
||||
gh_hcall_virtio_mmio_backend_get_event(gh_capid_t capid,
|
||||
u64 *event_data, u64 *event)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6053,
|
||||
(struct hh_hcall_args){ capid, 0},
|
||||
ret = _gh_hcall(0x6053,
|
||||
(struct gh_hcall_args){ capid, 0},
|
||||
&_resp);
|
||||
|
||||
if (!ret && event_data)
|
||||
@ -422,13 +422,13 @@ hh_hcall_virtio_mmio_backend_get_event(hh_capid_t capid,
|
||||
}
|
||||
|
||||
static inline int
|
||||
hh_hcall_virtio_mmio_backend_ack_reset(hh_capid_t capid)
|
||||
gh_hcall_virtio_mmio_backend_ack_reset(gh_capid_t capid)
|
||||
{
|
||||
int ret;
|
||||
struct hh_hcall_resp _resp = {0};
|
||||
struct gh_hcall_resp _resp = {0};
|
||||
|
||||
ret = _hh_hcall(0x6054,
|
||||
(struct hh_hcall_args){ capid, 0},
|
||||
ret = _gh_hcall(0x6054,
|
||||
(struct gh_hcall_args){ capid, 0},
|
||||
&_resp);
|
||||
|
||||
return ret;
|
||||
|
@ -1,20 +1,20 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#ifndef __HH_HCALL_COMMON_H
|
||||
#define __HH_HCALL_COMMON_H
|
||||
#ifndef __GH_HCALL_COMMON_H
|
||||
#define __GH_HCALL_COMMON_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
struct hh_hcall_args {
|
||||
struct gh_hcall_args {
|
||||
unsigned long arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7;
|
||||
};
|
||||
|
||||
struct hh_hcall_resp {
|
||||
struct gh_hcall_resp {
|
||||
unsigned long resp0, resp1, resp2, resp3, resp4, resp5, resp6, resp7;
|
||||
};
|
||||
|
||||
typedef u16 hh_hcall_fnid_t;
|
||||
typedef u16 gh_hcall_fnid_t;
|
||||
|
||||
#endif
|
||||
|
@ -4,40 +4,40 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_COMMON_H
|
||||
#define __HH_COMMON_H
|
||||
#ifndef __GH_COMMON_H
|
||||
#define __GH_COMMON_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Common Haven types */
|
||||
typedef u16 hh_vmid_t;
|
||||
typedef u32 hh_rm_msgid_t;
|
||||
typedef u32 hh_virq_handle_t;
|
||||
typedef u32 hh_label_t;
|
||||
typedef u32 hh_memparcel_handle_t;
|
||||
typedef u64 hh_capid_t;
|
||||
typedef u64 hh_dbl_flags_t;
|
||||
/* Common Gunyah types */
|
||||
typedef u16 gh_vmid_t;
|
||||
typedef u32 gh_rm_msgid_t;
|
||||
typedef u32 gh_virq_handle_t;
|
||||
typedef u32 gh_label_t;
|
||||
typedef u32 gh_memparcel_handle_t;
|
||||
typedef u64 gh_capid_t;
|
||||
typedef u64 gh_dbl_flags_t;
|
||||
|
||||
struct hh_vminfo {
|
||||
struct gh_vminfo {
|
||||
u8 *guid;
|
||||
char *uri;
|
||||
char *name;
|
||||
char *sign_auth;
|
||||
};
|
||||
|
||||
/* Common Haven macros */
|
||||
#define HH_CAPID_INVAL U64_MAX
|
||||
/* Common Gunyah macros */
|
||||
#define GH_CAPID_INVAL U64_MAX
|
||||
|
||||
enum hh_vm_names {
|
||||
enum gh_vm_names {
|
||||
/*
|
||||
* HH_SELF_VM is an alias for VMID 0. Useful for RM APIs which allow
|
||||
* GH_SELF_VM is an alias for VMID 0. Useful for RM APIs which allow
|
||||
* operations on current VM such as console
|
||||
*/
|
||||
HH_SELF_VM,
|
||||
HH_PRIMARY_VM,
|
||||
HH_TRUSTED_VM,
|
||||
HH_CPUSYS_VM,
|
||||
HH_VM_MAX
|
||||
GH_SELF_VM,
|
||||
GH_PRIMARY_VM,
|
||||
GH_TRUSTED_VM,
|
||||
GH_CPUSYS_VM,
|
||||
GH_VM_MAX
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -4,91 +4,91 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_DBL_H
|
||||
#define __HH_DBL_H
|
||||
#ifndef __GH_DBL_H
|
||||
#define __GH_DBL_H
|
||||
|
||||
#include "hh_common.h"
|
||||
#include "gh_common.h"
|
||||
|
||||
typedef void (*dbl_rx_cb_t)(int irq, void *priv_data);
|
||||
|
||||
enum hh_dbl_label {
|
||||
HH_DBL_TUI_LABEL,
|
||||
HH_DBL_TUI_NEURON_BLK0,
|
||||
HH_DBL_TUI_NEURON_BLK1,
|
||||
HH_DBL_TUI_QRTR,
|
||||
HH_DBL_LABEL_MAX
|
||||
enum gh_dbl_label {
|
||||
GH_DBL_TUI_LABEL,
|
||||
GH_DBL_TUI_NEURON_BLK0,
|
||||
GH_DBL_TUI_NEURON_BLK1,
|
||||
GH_DBL_TUI_QRTR,
|
||||
GH_DBL_LABEL_MAX
|
||||
};
|
||||
|
||||
/* Possible flags to pass for send, set_mask, read, reset */
|
||||
#define HH_DBL_NONBLOCK BIT(32)
|
||||
#define GH_DBL_NONBLOCK BIT(32)
|
||||
|
||||
#if IS_ENABLED(CONFIG_HH_DBL)
|
||||
void *hh_dbl_tx_register(enum hh_dbl_label label);
|
||||
void *hh_dbl_rx_register(enum hh_dbl_label label, dbl_rx_cb_t rx_cb,
|
||||
#if IS_ENABLED(CONFIG_GH_DBL)
|
||||
void *gh_dbl_tx_register(enum gh_dbl_label label);
|
||||
void *gh_dbl_rx_register(enum gh_dbl_label label, dbl_rx_cb_t rx_cb,
|
||||
void *priv);
|
||||
|
||||
int hh_dbl_tx_unregister(void *dbl_client_desc);
|
||||
int hh_dbl_rx_unregister(void *dbl_client_desc);
|
||||
int gh_dbl_tx_unregister(void *dbl_client_desc);
|
||||
int gh_dbl_rx_unregister(void *dbl_client_desc);
|
||||
|
||||
int hh_dbl_send(void *dbl_client_desc, uint64_t *newflags,
|
||||
int gh_dbl_send(void *dbl_client_desc, uint64_t *newflags,
|
||||
const unsigned long flags);
|
||||
int hh_dbl_set_mask(void *dbl_client_desc, hh_dbl_flags_t enable_mask,
|
||||
hh_dbl_flags_t ack_mask, const unsigned long flags);
|
||||
int hh_dbl_read_and_clean(void *dbl_client_desc, hh_dbl_flags_t *clear_flags,
|
||||
int gh_dbl_set_mask(void *dbl_client_desc, gh_dbl_flags_t enable_mask,
|
||||
gh_dbl_flags_t ack_mask, const unsigned long flags);
|
||||
int gh_dbl_read_and_clean(void *dbl_client_desc, gh_dbl_flags_t *clear_flags,
|
||||
const unsigned long flags);
|
||||
int hh_dbl_reset(void *dbl_client_desc, const unsigned long flags);
|
||||
int hh_dbl_populate_cap_info(enum hh_dbl_label label, u64 cap_id,
|
||||
int gh_dbl_reset(void *dbl_client_desc, const unsigned long flags);
|
||||
int gh_dbl_populate_cap_info(enum gh_dbl_label label, u64 cap_id,
|
||||
int direction, int rx_irq);
|
||||
#else
|
||||
static inline void *hh_dbl_tx_register(enum hh_dbl_label label)
|
||||
static inline void *gh_dbl_tx_register(enum gh_dbl_label label)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void *hh_dbl_rx_register(enum hh_dbl_label label,
|
||||
static inline void *gh_dbl_rx_register(enum gh_dbl_label label,
|
||||
dbl_rx_cb_t rx_cb,
|
||||
void *priv)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline int hh_dbl_tx_unregister(void *dbl_client_desc)
|
||||
static inline int gh_dbl_tx_unregister(void *dbl_client_desc)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_dbl_rx_unregister(void *dbl_client_desc)
|
||||
static inline int gh_dbl_rx_unregister(void *dbl_client_desc)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_dbl_send(void *dbl_client_desc, uint64_t *newflags,
|
||||
static inline int gh_dbl_send(void *dbl_client_desc, uint64_t *newflags,
|
||||
const unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_dbl_set_mask(void *dbl_client_desc,
|
||||
hh_dbl_flags_t enable_mask,
|
||||
hh_dbl_flags_t ack_mask,
|
||||
static inline int gh_dbl_set_mask(void *dbl_client_desc,
|
||||
gh_dbl_flags_t enable_mask,
|
||||
gh_dbl_flags_t ack_mask,
|
||||
const unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_dbl_read_and_clean(void *dbl_client_desc,
|
||||
hh_dbl_flags_t *clear_flags,
|
||||
static inline int gh_dbl_read_and_clean(void *dbl_client_desc,
|
||||
gh_dbl_flags_t *clear_flags,
|
||||
const unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_dbl_reset(void *dbl_client_desc, const unsigned long flags)
|
||||
static inline int gh_dbl_reset(void *dbl_client_desc, const unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_dbl_populate_cap_info(enum hh_dbl_label label, u64 cap_id,
|
||||
static inline int gh_dbl_populate_cap_info(enum gh_dbl_label label, u64 cap_id,
|
||||
int direction, int rx_irq)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
@ -1,72 +1,72 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_ERRNO_H
|
||||
#define __HH_ERRNO_H
|
||||
#ifndef __GH_ERRNO_H
|
||||
#define __GH_ERRNO_H
|
||||
|
||||
#include <linux/errno.h>
|
||||
|
||||
#define HH_ERROR_OK 0
|
||||
#define HH_ERROR_UNIMPLEMENTED -1
|
||||
#define GH_ERROR_OK 0
|
||||
#define GH_ERROR_UNIMPLEMENTED -1
|
||||
|
||||
#define HH_ERROR_ARG_INVAL 1
|
||||
#define HH_ERROR_ARG_SIZE 2
|
||||
#define HH_ERROR_ARG_ALIGN 3
|
||||
#define GH_ERROR_ARG_INVAL 1
|
||||
#define GH_ERROR_ARG_SIZE 2
|
||||
#define GH_ERROR_ARG_ALIGN 3
|
||||
|
||||
#define HH_ERROR_NOMEM 10
|
||||
#define GH_ERROR_NOMEM 10
|
||||
|
||||
#define HH_ERROR_ADDR_OVFL 20
|
||||
#define HH_ERROR_ADDR_UNFL 21
|
||||
#define HH_ERROR_ADDR_INVAL 22
|
||||
#define GH_ERROR_ADDR_OVFL 20
|
||||
#define GH_ERROR_ADDR_UNFL 21
|
||||
#define GH_ERROR_ADDR_INVAL 22
|
||||
|
||||
#define HH_ERROR_DENIED 30
|
||||
#define HH_ERROR_BUSY 31
|
||||
#define HH_ERROR_IDLE 32
|
||||
#define GH_ERROR_DENIED 30
|
||||
#define GH_ERROR_BUSY 31
|
||||
#define GH_ERROR_IDLE 32
|
||||
|
||||
#define HH_ERROR_IRQ_BOUND 40
|
||||
#define HH_ERROR_IRQ_UNBOUND 41
|
||||
#define GH_ERROR_IRQ_BOUND 40
|
||||
#define GH_ERROR_IRQ_UNBOUND 41
|
||||
|
||||
#define HH_ERROR_CSPACE_CAP_NULL 50
|
||||
#define HH_ERROR_CSPACE_CAP_REVOKED 51
|
||||
#define HH_ERROR_CSPACE_WRONG_OBJ_TYPE 52
|
||||
#define HH_ERROR_CSPACE_INSUF_RIGHTS 53
|
||||
#define HH_ERROR_CSPACE_FULL 54
|
||||
#define GH_ERROR_CSPACE_CAP_NULL 50
|
||||
#define GH_ERROR_CSPACE_CAP_REVOKED 51
|
||||
#define GH_ERROR_CSPACE_WRONG_OBJ_TYPE 52
|
||||
#define GH_ERROR_CSPACE_INSUF_RIGHTS 53
|
||||
#define GH_ERROR_CSPACE_FULL 54
|
||||
|
||||
#define HH_ERROR_MSGQUEUE_EMPTY 60
|
||||
#define HH_ERROR_MSGQUEUE_FULL 61
|
||||
#define GH_ERROR_MSGQUEUE_EMPTY 60
|
||||
#define GH_ERROR_MSGQUEUE_FULL 61
|
||||
|
||||
static inline int hh_remap_error(int hh_error)
|
||||
static inline int gh_remap_error(int gh_error)
|
||||
{
|
||||
switch (hh_error) {
|
||||
case HH_ERROR_OK:
|
||||
switch (gh_error) {
|
||||
case GH_ERROR_OK:
|
||||
return 0;
|
||||
case HH_ERROR_NOMEM:
|
||||
case GH_ERROR_NOMEM:
|
||||
return -ENOMEM;
|
||||
case HH_ERROR_DENIED:
|
||||
case HH_ERROR_CSPACE_CAP_NULL:
|
||||
case HH_ERROR_CSPACE_CAP_REVOKED:
|
||||
case HH_ERROR_CSPACE_WRONG_OBJ_TYPE:
|
||||
case HH_ERROR_CSPACE_INSUF_RIGHTS:
|
||||
case HH_ERROR_CSPACE_FULL:
|
||||
case GH_ERROR_DENIED:
|
||||
case GH_ERROR_CSPACE_CAP_NULL:
|
||||
case GH_ERROR_CSPACE_CAP_REVOKED:
|
||||
case GH_ERROR_CSPACE_WRONG_OBJ_TYPE:
|
||||
case GH_ERROR_CSPACE_INSUF_RIGHTS:
|
||||
case GH_ERROR_CSPACE_FULL:
|
||||
return -EACCES;
|
||||
case HH_ERROR_BUSY:
|
||||
case HH_ERROR_IDLE:
|
||||
case GH_ERROR_BUSY:
|
||||
case GH_ERROR_IDLE:
|
||||
return -EBUSY;
|
||||
case HH_ERROR_IRQ_BOUND:
|
||||
case HH_ERROR_IRQ_UNBOUND:
|
||||
case HH_ERROR_MSGQUEUE_FULL:
|
||||
case HH_ERROR_MSGQUEUE_EMPTY:
|
||||
case GH_ERROR_IRQ_BOUND:
|
||||
case GH_ERROR_IRQ_UNBOUND:
|
||||
case GH_ERROR_MSGQUEUE_FULL:
|
||||
case GH_ERROR_MSGQUEUE_EMPTY:
|
||||
return -EPERM;
|
||||
case HH_ERROR_UNIMPLEMENTED:
|
||||
case HH_ERROR_ARG_INVAL:
|
||||
case HH_ERROR_ARG_SIZE:
|
||||
case HH_ERROR_ARG_ALIGN:
|
||||
case HH_ERROR_ADDR_OVFL:
|
||||
case HH_ERROR_ADDR_UNFL:
|
||||
case HH_ERROR_ADDR_INVAL:
|
||||
case GH_ERROR_UNIMPLEMENTED:
|
||||
case GH_ERROR_ARG_INVAL:
|
||||
case GH_ERROR_ARG_SIZE:
|
||||
case GH_ERROR_ARG_ALIGN:
|
||||
case GH_ERROR_ADDR_OVFL:
|
||||
case GH_ERROR_ADDR_UNFL:
|
||||
case GH_ERROR_ADDR_INVAL:
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1,39 +1,39 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_IRQ_LEND_H
|
||||
#define __HH_IRQ_LEND_H
|
||||
#ifndef __GH_IRQ_LEND_H
|
||||
#define __GH_IRQ_LEND_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "hh_common.h"
|
||||
#include "hh_rm_drv.h"
|
||||
#include "gh_common.h"
|
||||
#include "gh_rm_drv.h"
|
||||
|
||||
enum hh_irq_label {
|
||||
HH_IRQ_LABEL_SDE,
|
||||
HH_IRQ_LABEL_TRUSTED_TOUCH,
|
||||
HH_IRQ_LABEL_MAX
|
||||
enum gh_irq_label {
|
||||
GH_IRQ_LABEL_SDE,
|
||||
GH_IRQ_LABEL_TRUSTED_TOUCH,
|
||||
GH_IRQ_LABEL_MAX
|
||||
};
|
||||
|
||||
typedef void (*hh_irq_handle_fn)(void *req, enum hh_irq_label label);
|
||||
typedef void (*hh_irq_handle_fn_v2)(void *req, unsigned long notif_type,
|
||||
enum hh_irq_label label);
|
||||
int hh_irq_lend(enum hh_irq_label label, enum hh_vm_names name,
|
||||
int hw_irq, hh_irq_handle_fn cb_handle, void *data);
|
||||
int hh_irq_lend_v2(enum hh_irq_label label, enum hh_vm_names name,
|
||||
int hw_irq, hh_irq_handle_fn_v2 cb_handle, void *data);
|
||||
int hh_irq_lend_notify(enum hh_irq_label label);
|
||||
int hh_irq_reclaim(enum hh_irq_label label);
|
||||
int hh_irq_wait_for_lend(enum hh_irq_label label, enum hh_vm_names name,
|
||||
hh_irq_handle_fn on_lend, void *data);
|
||||
int hh_irq_wait_for_lend_v2(enum hh_irq_label label, enum hh_vm_names name,
|
||||
hh_irq_handle_fn_v2 on_lend, void *data);
|
||||
int hh_irq_accept(enum hh_irq_label label, int irq, int type);
|
||||
int hh_irq_accept_notify(enum hh_irq_label label);
|
||||
int hh_irq_release(enum hh_irq_label label);
|
||||
int hh_irq_release_notify(enum hh_irq_label label);
|
||||
typedef void (*gh_irq_handle_fn)(void *req, enum gh_irq_label label);
|
||||
typedef void (*gh_irq_handle_fn_v2)(void *req, unsigned long notif_type,
|
||||
enum gh_irq_label label);
|
||||
int gh_irq_lend(enum gh_irq_label label, enum gh_vm_names name,
|
||||
int hw_irq, gh_irq_handle_fn cb_handle, void *data);
|
||||
int gh_irq_lend_v2(enum gh_irq_label label, enum gh_vm_names name,
|
||||
int hw_irq, gh_irq_handle_fn_v2 cb_handle, void *data);
|
||||
int gh_irq_lend_notify(enum gh_irq_label label);
|
||||
int gh_irq_reclaim(enum gh_irq_label label);
|
||||
int gh_irq_wait_for_lend(enum gh_irq_label label, enum gh_vm_names name,
|
||||
gh_irq_handle_fn on_lend, void *data);
|
||||
int gh_irq_wait_for_lend_v2(enum gh_irq_label label, enum gh_vm_names name,
|
||||
gh_irq_handle_fn_v2 on_lend, void *data);
|
||||
int gh_irq_accept(enum gh_irq_label label, int irq, int type);
|
||||
int gh_irq_accept_notify(enum gh_irq_label label);
|
||||
int gh_irq_release(enum gh_irq_label label);
|
||||
int gh_irq_release_notify(enum gh_irq_label label);
|
||||
|
||||
#endif
|
||||
|
@ -1,41 +1,41 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_MEM_NOTIFIER_H
|
||||
#define __HH_MEM_NOTIFIER_H
|
||||
#ifndef __GH_MEM_NOTIFIER_H
|
||||
#define __GH_MEM_NOTIFIER_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
enum hh_mem_notifier_tag {
|
||||
HH_MEM_NOTIFIER_TAG_DISPLAY,
|
||||
HH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
HH_MEM_NOTIFIER_TAG_MAX
|
||||
enum gh_mem_notifier_tag {
|
||||
GH_MEM_NOTIFIER_TAG_DISPLAY,
|
||||
GH_MEM_NOTIFIER_TAG_TOUCH,
|
||||
GH_MEM_NOTIFIER_TAG_MAX
|
||||
};
|
||||
|
||||
typedef void (*hh_mem_notifier_handler)(enum hh_mem_notifier_tag tag,
|
||||
typedef void (*gh_mem_notifier_handler)(enum gh_mem_notifier_tag tag,
|
||||
unsigned long notif_type,
|
||||
void *entry_data, void *notif_msg);
|
||||
|
||||
#if IS_ENABLED(CONFIG_HH_MEM_NOTIFIER)
|
||||
void *hh_mem_notifier_register(enum hh_mem_notifier_tag tag,
|
||||
hh_mem_notifier_handler notif_handler,
|
||||
#if IS_ENABLED(CONFIG_GH_MEM_NOTIFIER)
|
||||
void *gh_mem_notifier_register(enum gh_mem_notifier_tag tag,
|
||||
gh_mem_notifier_handler notif_handler,
|
||||
void *data);
|
||||
void hh_mem_notifier_unregister(void *cookie);
|
||||
void gh_mem_notifier_unregister(void *cookie);
|
||||
#else
|
||||
static void *hh_mem_notifier_register(enum hh_mem_notifier_tag tag,
|
||||
hh_mem_notifier_handler notif_handler,
|
||||
static void *gh_mem_notifier_register(enum gh_mem_notifier_tag tag,
|
||||
gh_mem_notifier_handler notif_handler,
|
||||
void *data)
|
||||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static void hh_mem_notifier_unregister(void *cookie)
|
||||
static void gh_mem_notifier_unregister(void *cookie)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
@ -4,67 +4,67 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_MSGQ_H
|
||||
#define __HH_MSGQ_H
|
||||
#ifndef __GH_MSGQ_H
|
||||
#define __GH_MSGQ_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#include "hh_common.h"
|
||||
#include "gh_common.h"
|
||||
|
||||
enum hh_msgq_label {
|
||||
HH_MSGQ_LABEL_RM,
|
||||
HH_MSGQ_LABEL_MEMBUF,
|
||||
HH_MSGQ_LABEL_DISPLAY,
|
||||
HH_MSGQ_LABEL_MAX
|
||||
enum gh_msgq_label {
|
||||
GH_MSGQ_LABEL_RM,
|
||||
GH_MSGQ_LABEL_MEMBUF,
|
||||
GH_MSGQ_LABEL_DISPLAY,
|
||||
GH_MSGQ_LABEL_MAX
|
||||
};
|
||||
|
||||
#define HH_MSGQ_MAX_MSG_SIZE_BYTES 240
|
||||
#define GH_MSGQ_MAX_MSG_SIZE_BYTES 240
|
||||
|
||||
#define HH_MSGQ_DIRECTION_TX 0
|
||||
#define HH_MSGQ_DIRECTION_RX 1
|
||||
#define GH_MSGQ_DIRECTION_TX 0
|
||||
#define GH_MSGQ_DIRECTION_RX 1
|
||||
|
||||
/* Possible flags to pass for Tx or Rx */
|
||||
#define HH_MSGQ_TX_PUSH BIT(0)
|
||||
#define HH_MSGQ_NONBLOCK BIT(32)
|
||||
#define GH_MSGQ_TX_PUSH BIT(0)
|
||||
#define GH_MSGQ_NONBLOCK BIT(32)
|
||||
|
||||
#if IS_ENABLED(CONFIG_HH_MSGQ)
|
||||
void *hh_msgq_register(enum hh_msgq_label label);
|
||||
int hh_msgq_unregister(void *msgq_client_desc);
|
||||
int hh_msgq_send(void *msgq_client_desc,
|
||||
#if IS_ENABLED(CONFIG_GH_MSGQ)
|
||||
void *gh_msgq_register(enum gh_msgq_label label);
|
||||
int gh_msgq_unregister(void *msgq_client_desc);
|
||||
int gh_msgq_send(void *msgq_client_desc,
|
||||
void *buff, size_t size, unsigned long flags);
|
||||
int hh_msgq_recv(void *msgq_client_desc,
|
||||
int gh_msgq_recv(void *msgq_client_desc,
|
||||
void *buff, size_t buff_size,
|
||||
size_t *recv_size, unsigned long flags);
|
||||
|
||||
int hh_msgq_populate_cap_info(enum hh_msgq_label label, u64 cap_id,
|
||||
int gh_msgq_populate_cap_info(enum gh_msgq_label label, u64 cap_id,
|
||||
int direction, int irq);
|
||||
int hh_msgq_probe(struct platform_device *pdev, enum hh_msgq_label label);
|
||||
int gh_msgq_probe(struct platform_device *pdev, enum gh_msgq_label label);
|
||||
#else
|
||||
static inline void *hh_msgq_register(enum hh_msgq_label label)
|
||||
static inline void *gh_msgq_register(enum gh_msgq_label label)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline int hh_msgq_unregister(void *msgq_client_desc)
|
||||
static inline int gh_msgq_unregister(void *msgq_client_desc)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_msgq_send(void *msgq_client_desc,
|
||||
static inline int gh_msgq_send(void *msgq_client_desc,
|
||||
void *buff, size_t size, unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_msgq_recv(void *msgq_client_desc,
|
||||
static inline int gh_msgq_recv(void *msgq_client_desc,
|
||||
void *buff, size_t buff_size,
|
||||
size_t *recv_size, unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_msgq_populate_cap_info(enum hh_msgq_label label,
|
||||
static inline int gh_msgq_populate_cap_info(enum gh_msgq_label label,
|
||||
u64 cap_id,
|
||||
int direction,
|
||||
int irq)
|
||||
@ -72,8 +72,8 @@ static inline int hh_msgq_populate_cap_info(enum hh_msgq_label label,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_msgq_probe(struct platform_device *pdev,
|
||||
enum hh_msgq_label label)
|
||||
static inline int gh_msgq_probe(struct platform_device *pdev,
|
||||
enum gh_msgq_label label)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -4,53 +4,53 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __HH_RM_DRV_H
|
||||
#define __HH_RM_DRV_H
|
||||
#ifndef __GH_RM_DRV_H
|
||||
#define __GH_RM_DRV_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include "hh_common.h"
|
||||
#include "gh_common.h"
|
||||
|
||||
/* Notification type Message IDs */
|
||||
/* Memory APIs */
|
||||
#define HH_RM_NOTIF_MEM_SHARED 0x51100011
|
||||
#define HH_RM_NOTIF_MEM_RELEASED 0x51100012
|
||||
#define HH_RM_NOTIF_MEM_ACCEPTED 0x51100013
|
||||
#define GH_RM_NOTIF_MEM_SHARED 0x51100011
|
||||
#define GH_RM_NOTIF_MEM_RELEASED 0x51100012
|
||||
#define GH_RM_NOTIF_MEM_ACCEPTED 0x51100013
|
||||
|
||||
#define HH_RM_MEM_TYPE_NORMAL 0
|
||||
#define HH_RM_MEM_TYPE_IO 1
|
||||
#define GH_RM_MEM_TYPE_NORMAL 0
|
||||
#define GH_RM_MEM_TYPE_IO 1
|
||||
|
||||
#define HH_RM_TRANS_TYPE_DONATE 0
|
||||
#define HH_RM_TRANS_TYPE_LEND 1
|
||||
#define HH_RM_TRANS_TYPE_SHARE 2
|
||||
#define GH_RM_TRANS_TYPE_DONATE 0
|
||||
#define GH_RM_TRANS_TYPE_LEND 1
|
||||
#define GH_RM_TRANS_TYPE_SHARE 2
|
||||
|
||||
#define HH_RM_ACL_X BIT(0)
|
||||
#define HH_RM_ACL_W BIT(1)
|
||||
#define HH_RM_ACL_R BIT(2)
|
||||
#define GH_RM_ACL_X BIT(0)
|
||||
#define GH_RM_ACL_W BIT(1)
|
||||
#define GH_RM_ACL_R BIT(2)
|
||||
|
||||
#define HH_RM_MEM_RELEASE_CLEAR BIT(0)
|
||||
#define HH_RM_MEM_RECLAIM_CLEAR BIT(0)
|
||||
#define GH_RM_MEM_RELEASE_CLEAR BIT(0)
|
||||
#define GH_RM_MEM_RECLAIM_CLEAR BIT(0)
|
||||
|
||||
#define HH_RM_MEM_ACCEPT_VALIDATE_SANITIZED BIT(0)
|
||||
#define HH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS BIT(1)
|
||||
#define HH_RM_MEM_ACCEPT_VALIDATE_LABEL BIT(2)
|
||||
#define HH_RM_MEM_ACCEPT_DONE BIT(7)
|
||||
#define GH_RM_MEM_ACCEPT_VALIDATE_SANITIZED BIT(0)
|
||||
#define GH_RM_MEM_ACCEPT_VALIDATE_ACL_ATTRS BIT(1)
|
||||
#define GH_RM_MEM_ACCEPT_VALIDATE_LABEL BIT(2)
|
||||
#define GH_RM_MEM_ACCEPT_DONE BIT(7)
|
||||
|
||||
#define HH_RM_MEM_SHARE_SANITIZE BIT(0)
|
||||
#define HH_RM_MEM_LEND_SANITIZE BIT(0)
|
||||
#define GH_RM_MEM_SHARE_SANITIZE BIT(0)
|
||||
#define GH_RM_MEM_LEND_SANITIZE BIT(0)
|
||||
|
||||
#define HH_RM_MEM_NOTIFY_RECIPIENT_SHARED BIT(0)
|
||||
#define HH_RM_MEM_NOTIFY_RECIPIENT HH_RM_MEM_NOTIFY_RECIPIENT_SHARED
|
||||
#define HH_RM_MEM_NOTIFY_OWNER_RELEASED BIT(1)
|
||||
#define HH_RM_MEM_NOTIFY_OWNER HH_RM_MEM_NOTIFY_OWNER_RELEASED
|
||||
#define HH_RM_MEM_NOTIFY_OWNER_ACCEPTED BIT(2)
|
||||
#define GH_RM_MEM_NOTIFY_RECIPIENT_SHARED BIT(0)
|
||||
#define GH_RM_MEM_NOTIFY_RECIPIENT GH_RM_MEM_NOTIFY_RECIPIENT_SHARED
|
||||
#define GH_RM_MEM_NOTIFY_OWNER_RELEASED BIT(1)
|
||||
#define GH_RM_MEM_NOTIFY_OWNER GH_RM_MEM_NOTIFY_OWNER_RELEASED
|
||||
#define GH_RM_MEM_NOTIFY_OWNER_ACCEPTED BIT(2)
|
||||
|
||||
struct hh_rm_mem_shared_acl_entry;
|
||||
struct hh_rm_mem_shared_sgl_entry;
|
||||
struct hh_rm_mem_shared_attr_entry;
|
||||
struct gh_rm_mem_shared_acl_entry;
|
||||
struct gh_rm_mem_shared_sgl_entry;
|
||||
struct gh_rm_mem_shared_attr_entry;
|
||||
|
||||
struct hh_rm_notif_mem_shared_payload {
|
||||
struct gh_rm_notif_mem_shared_payload {
|
||||
u32 mem_handle;
|
||||
u8 mem_type;
|
||||
u8 trans_type;
|
||||
@ -59,389 +59,389 @@ struct hh_rm_notif_mem_shared_payload {
|
||||
u16 owner_vmid;
|
||||
u16 reserved2;
|
||||
u32 label;
|
||||
hh_label_t mem_info_tag;
|
||||
gh_label_t mem_info_tag;
|
||||
/* TODO: How to arrange multiple variable length struct arrays? */
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_mem_shared_acl_entry {
|
||||
struct gh_rm_mem_shared_acl_entry {
|
||||
u16 acl_vmid;
|
||||
u8 acl_rights;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_mem_shared_sgl_entry {
|
||||
struct gh_rm_mem_shared_sgl_entry {
|
||||
u32 sgl_size_low;
|
||||
u32 sgl_size_high;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_mem_shared_attr_entry {
|
||||
struct gh_rm_mem_shared_attr_entry {
|
||||
u16 attributes;
|
||||
u16 attributes_vmid;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_notif_mem_released_payload {
|
||||
struct gh_rm_notif_mem_released_payload {
|
||||
u32 mem_handle;
|
||||
u16 participant_vmid;
|
||||
u16 reserved;
|
||||
hh_label_t mem_info_tag;
|
||||
gh_label_t mem_info_tag;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_notif_mem_accepted_payload {
|
||||
struct gh_rm_notif_mem_accepted_payload {
|
||||
u32 mem_handle;
|
||||
u16 participant_vmid;
|
||||
u16 reserved;
|
||||
hh_label_t mem_info_tag;
|
||||
gh_label_t mem_info_tag;
|
||||
} __packed;
|
||||
|
||||
struct hh_acl_entry {
|
||||
struct gh_acl_entry {
|
||||
u16 vmid;
|
||||
u8 perms;
|
||||
u8 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_sgl_entry {
|
||||
struct gh_sgl_entry {
|
||||
u64 ipa_base;
|
||||
u64 size;
|
||||
} __packed;
|
||||
|
||||
struct hh_mem_attr_entry {
|
||||
struct gh_mem_attr_entry {
|
||||
u16 attr;
|
||||
u16 vmid;
|
||||
} __packed;
|
||||
|
||||
struct hh_acl_desc {
|
||||
struct gh_acl_desc {
|
||||
u32 n_acl_entries;
|
||||
struct hh_acl_entry acl_entries[];
|
||||
struct gh_acl_entry acl_entries[];
|
||||
} __packed;
|
||||
|
||||
struct hh_sgl_desc {
|
||||
struct gh_sgl_desc {
|
||||
u16 n_sgl_entries;
|
||||
u16 reserved;
|
||||
struct hh_sgl_entry sgl_entries[];
|
||||
struct gh_sgl_entry sgl_entries[];
|
||||
} __packed;
|
||||
|
||||
struct hh_mem_attr_desc {
|
||||
struct gh_mem_attr_desc {
|
||||
u16 n_mem_attr_entries;
|
||||
u16 reserved;
|
||||
struct hh_mem_attr_entry attr_entries[];
|
||||
struct gh_mem_attr_entry attr_entries[];
|
||||
} __packed;
|
||||
|
||||
struct hh_notify_vmid_entry {
|
||||
struct gh_notify_vmid_entry {
|
||||
u16 vmid;
|
||||
u16 reserved;
|
||||
} __packed;
|
||||
|
||||
struct hh_notify_vmid_desc {
|
||||
struct gh_notify_vmid_desc {
|
||||
u16 n_vmid_entries;
|
||||
u16 reserved;
|
||||
struct hh_notify_vmid_entry vmid_entries[];
|
||||
struct gh_notify_vmid_entry vmid_entries[];
|
||||
} __packed;
|
||||
|
||||
/* VM APIs */
|
||||
#define HH_RM_NOTIF_VM_STATUS 0x56100008
|
||||
#define HH_RM_NOTIF_VM_IRQ_LENT 0x56100011
|
||||
#define HH_RM_NOTIF_VM_IRQ_RELEASED 0x56100012
|
||||
#define HH_RM_NOTIF_VM_IRQ_ACCEPTED 0x56100013
|
||||
#define GH_RM_NOTIF_VM_STATUS 0x56100008
|
||||
#define GH_RM_NOTIF_VM_IRQ_LENT 0x56100011
|
||||
#define GH_RM_NOTIF_VM_IRQ_RELEASED 0x56100012
|
||||
#define GH_RM_NOTIF_VM_IRQ_ACCEPTED 0x56100013
|
||||
|
||||
#define HH_RM_VM_STATUS_NO_STATE 0
|
||||
#define HH_RM_VM_STATUS_INIT 1
|
||||
#define HH_RM_VM_STATUS_READY 2
|
||||
#define HH_RM_VM_STATUS_RUNNING 3
|
||||
#define HH_RM_VM_STATUS_PAUSED 4
|
||||
#define HH_RM_VM_STATUS_SHUTDOWN 5
|
||||
#define HH_RM_VM_STATUS_SHUTOFF 6
|
||||
#define HH_RM_VM_STATUS_CRASHED 7
|
||||
#define HH_RM_VM_STATUS_INIT_FAILED 8
|
||||
#define GH_RM_VM_STATUS_NO_STATE 0
|
||||
#define GH_RM_VM_STATUS_INIT 1
|
||||
#define GH_RM_VM_STATUS_READY 2
|
||||
#define GH_RM_VM_STATUS_RUNNING 3
|
||||
#define GH_RM_VM_STATUS_PAUSED 4
|
||||
#define GH_RM_VM_STATUS_SHUTDOWN 5
|
||||
#define GH_RM_VM_STATUS_SHUTOFF 6
|
||||
#define GH_RM_VM_STATUS_CRASHED 7
|
||||
#define GH_RM_VM_STATUS_INIT_FAILED 8
|
||||
|
||||
#define HH_RM_OS_STATUS_NONE 0
|
||||
#define HH_RM_OS_STATUS_EARLY_BOOT 1
|
||||
#define HH_RM_OS_STATUS_BOOT 2
|
||||
#define HH_RM_OS_STATUS_INIT 3
|
||||
#define HH_RM_OS_STATUS_RUN 4
|
||||
#define GH_RM_OS_STATUS_NONE 0
|
||||
#define GH_RM_OS_STATUS_EARLY_BOOT 1
|
||||
#define GH_RM_OS_STATUS_BOOT 2
|
||||
#define GH_RM_OS_STATUS_INIT 3
|
||||
#define GH_RM_OS_STATUS_RUN 4
|
||||
|
||||
struct hh_rm_notif_vm_status_payload {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_rm_notif_vm_status_payload {
|
||||
gh_vmid_t vmid;
|
||||
u16 reserved;
|
||||
u8 vm_status;
|
||||
u8 os_status;
|
||||
u16 app_status;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_notif_vm_irq_lent_payload {
|
||||
hh_vmid_t owner_vmid;
|
||||
struct gh_rm_notif_vm_irq_lent_payload {
|
||||
gh_vmid_t owner_vmid;
|
||||
u16 reserved;
|
||||
hh_virq_handle_t virq_handle;
|
||||
hh_label_t virq_label;
|
||||
gh_virq_handle_t virq_handle;
|
||||
gh_label_t virq_label;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_notif_vm_irq_released_payload {
|
||||
hh_virq_handle_t virq_handle;
|
||||
struct gh_rm_notif_vm_irq_released_payload {
|
||||
gh_virq_handle_t virq_handle;
|
||||
} __packed;
|
||||
|
||||
struct hh_rm_notif_vm_irq_accepted_payload {
|
||||
hh_virq_handle_t virq_handle;
|
||||
struct gh_rm_notif_vm_irq_accepted_payload {
|
||||
gh_virq_handle_t virq_handle;
|
||||
} __packed;
|
||||
|
||||
/* VM Services */
|
||||
#define HH_RM_NOTIF_VM_CONSOLE_CHARS 0X56100080
|
||||
#define GH_RM_NOTIF_VM_CONSOLE_CHARS 0X56100080
|
||||
|
||||
struct hh_rm_notif_vm_console_chars {
|
||||
hh_vmid_t vmid;
|
||||
struct gh_rm_notif_vm_console_chars {
|
||||
gh_vmid_t vmid;
|
||||
u16 num_bytes;
|
||||
u8 bytes[0];
|
||||
} __packed;
|
||||
|
||||
struct notifier_block;
|
||||
|
||||
typedef int (*hh_virtio_mmio_cb_t)(hh_vmid_t peer, const char *vm_name,
|
||||
hh_label_t label, hh_capid_t cap_id, int linux_irq, u64 base, u64 size);
|
||||
typedef int (*hh_vcpu_affinity_cb_t)(hh_label_t label, hh_capid_t cap_id);
|
||||
typedef int (*gh_virtio_mmio_cb_t)(gh_vmid_t peer, const char *vm_name,
|
||||
gh_label_t label, gh_capid_t cap_id, int linux_irq, u64 base, u64 size);
|
||||
typedef int (*gh_vcpu_affinity_cb_t)(gh_label_t label, gh_capid_t cap_id);
|
||||
|
||||
#if IS_ENABLED(CONFIG_HH_RM_DRV)
|
||||
#if IS_ENABLED(CONFIG_GH_RM_DRV)
|
||||
/* RM client registration APIs */
|
||||
int hh_rm_register_notifier(struct notifier_block *nb);
|
||||
int hh_rm_unregister_notifier(struct notifier_block *nb);
|
||||
int gh_rm_register_notifier(struct notifier_block *nb);
|
||||
int gh_rm_unregister_notifier(struct notifier_block *nb);
|
||||
|
||||
/* Client APIs for IRQ management */
|
||||
int hh_rm_virq_to_irq(u32 virq, u32 type);
|
||||
int hh_rm_irq_to_virq(int irq, u32 *virq);
|
||||
int gh_rm_virq_to_irq(u32 virq, u32 type);
|
||||
int gh_rm_irq_to_virq(int irq, u32 *virq);
|
||||
|
||||
int hh_rm_vm_irq_lend(hh_vmid_t vmid,
|
||||
int gh_rm_vm_irq_lend(gh_vmid_t vmid,
|
||||
int virq,
|
||||
int label,
|
||||
hh_virq_handle_t *virq_handle);
|
||||
int hh_rm_vm_irq_lend_notify(hh_vmid_t vmid, hh_virq_handle_t virq_handle);
|
||||
int hh_rm_vm_irq_accept(hh_virq_handle_t virq_handle, int virq);
|
||||
int hh_rm_vm_irq_accept_notify(hh_vmid_t vmid, hh_virq_handle_t virq_handle);
|
||||
int hh_rm_vm_irq_release(hh_virq_handle_t virq_handle);
|
||||
int hh_rm_vm_irq_release_notify(hh_vmid_t vmid, hh_virq_handle_t virq_handle);
|
||||
gh_virq_handle_t *virq_handle);
|
||||
int gh_rm_vm_irq_lend_notify(gh_vmid_t vmid, gh_virq_handle_t virq_handle);
|
||||
int gh_rm_vm_irq_accept(gh_virq_handle_t virq_handle, int virq);
|
||||
int gh_rm_vm_irq_accept_notify(gh_vmid_t vmid, gh_virq_handle_t virq_handle);
|
||||
int gh_rm_vm_irq_release(gh_virq_handle_t virq_handle);
|
||||
int gh_rm_vm_irq_release_notify(gh_vmid_t vmid, gh_virq_handle_t virq_handle);
|
||||
|
||||
|
||||
int hh_rm_vm_irq_reclaim(hh_virq_handle_t virq_handle);
|
||||
int gh_rm_vm_irq_reclaim(gh_virq_handle_t virq_handle);
|
||||
|
||||
int hh_rm_set_virtio_mmio_cb(hh_virtio_mmio_cb_t fnptr);
|
||||
int gh_rm_set_virtio_mmio_cb(gh_virtio_mmio_cb_t fnptr);
|
||||
void gh_rm_unset_virtio_mmio_cb(void);
|
||||
int hh_rm_set_vcpu_affinity_cb(hh_vcpu_affinity_cb_t fnptr);
|
||||
int gh_rm_set_vcpu_affinity_cb(gh_vcpu_affinity_cb_t fnptr);
|
||||
|
||||
/* Client APIs for VM management */
|
||||
int hh_rm_vm_alloc_vmid(enum hh_vm_names vm_name, int *vmid);
|
||||
int hh_rm_get_vmid(enum hh_vm_names vm_name, hh_vmid_t *vmid);
|
||||
int hh_rm_get_vm_name(hh_vmid_t vmid, enum hh_vm_names *vm_name);
|
||||
int hh_rm_get_vminfo(enum hh_vm_names vm_name, struct hh_vminfo *vminfo);
|
||||
int hh_rm_vm_start(int vmid);
|
||||
int hh_rm_get_vm_id_info(enum hh_vm_names vm_name, hh_vmid_t vmid);
|
||||
int gh_rm_vm_alloc_vmid(enum gh_vm_names vm_name, int *vmid);
|
||||
int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid);
|
||||
int gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name);
|
||||
int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vminfo);
|
||||
int gh_rm_vm_start(int vmid);
|
||||
int gh_rm_get_vm_id_info(enum gh_vm_names vm_name, gh_vmid_t vmid);
|
||||
|
||||
/* Client APIs for VM query */
|
||||
int hh_rm_populate_hyp_res(hh_vmid_t vmid, const char *vm_name);
|
||||
int gh_rm_populate_hyp_res(gh_vmid_t vmid, const char *vm_name);
|
||||
|
||||
/* Client APIs for VM Services */
|
||||
int hh_rm_console_open(hh_vmid_t vmid);
|
||||
int hh_rm_console_close(hh_vmid_t vmid);
|
||||
int hh_rm_console_write(hh_vmid_t vmid, const char *buf, size_t size);
|
||||
int hh_rm_console_flush(hh_vmid_t vmid);
|
||||
int hh_rm_mem_qcom_lookup_sgl(u8 mem_type, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc,
|
||||
struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
hh_memparcel_handle_t *handle);
|
||||
int hh_rm_mem_release(hh_memparcel_handle_t handle, u8 flags);
|
||||
int hh_rm_mem_reclaim(hh_memparcel_handle_t handle, u8 flags);
|
||||
struct hh_sgl_desc *hh_rm_mem_accept(hh_memparcel_handle_t handle, u8 mem_type,
|
||||
u8 trans_type, u8 flags, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc,
|
||||
struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
int gh_rm_console_open(gh_vmid_t vmid);
|
||||
int gh_rm_console_close(gh_vmid_t vmid);
|
||||
int gh_rm_console_write(gh_vmid_t vmid, const char *buf, size_t size);
|
||||
int gh_rm_console_flush(gh_vmid_t vmid);
|
||||
int gh_rm_mem_qcom_lookup_sgl(u8 mem_type, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc,
|
||||
struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
gh_memparcel_handle_t *handle);
|
||||
int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags);
|
||||
int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags);
|
||||
struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle, u8 mem_type,
|
||||
u8 trans_type, u8 flags, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc,
|
||||
struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
u16 map_vmid);
|
||||
int hh_rm_mem_share(u8 mem_type, u8 flags, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc, struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
hh_memparcel_handle_t *handle);
|
||||
int hh_rm_mem_lend(u8 mem_type, u8 flags, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc, struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
hh_memparcel_handle_t *handle);
|
||||
int hh_rm_mem_notify(hh_memparcel_handle_t handle, u8 flags,
|
||||
hh_label_t mem_info_tag,
|
||||
struct hh_notify_vmid_desc *vmid_desc);
|
||||
int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
gh_memparcel_handle_t *handle);
|
||||
int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
gh_memparcel_handle_t *handle);
|
||||
int gh_rm_mem_notify(gh_memparcel_handle_t handle, u8 flags,
|
||||
gh_label_t mem_info_tag,
|
||||
struct gh_notify_vmid_desc *vmid_desc);
|
||||
|
||||
#else
|
||||
/* RM client register notifications APIs */
|
||||
static inline int hh_rm_register_notifier(struct notifier_block *nb)
|
||||
static inline int gh_rm_register_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int hh_rm_unregister_notifier(struct notifier_block *nb)
|
||||
static inline int gh_rm_unregister_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Client APIs for IRQ management */
|
||||
static inline int hh_rm_virq_to_irq(u32 virq)
|
||||
static inline int gh_rm_virq_to_irq(u32 virq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_lend(hh_vmid_t vmid,
|
||||
static inline int gh_rm_vm_irq_lend(gh_vmid_t vmid,
|
||||
int virq,
|
||||
int label,
|
||||
hh_virq_handle_t *virq_handle)
|
||||
gh_virq_handle_t *virq_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_irq_to_virq(int irq, u32 *virq)
|
||||
static inline int gh_rm_irq_to_virq(int irq, u32 *virq)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_lend_notify(hh_vmid_t vmid,
|
||||
hh_virq_handle_t virq_handle)
|
||||
static inline int gh_rm_vm_irq_lend_notify(gh_vmid_t vmid,
|
||||
gh_virq_handle_t virq_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_accept(hh_virq_handle_t virq_handle, int virq)
|
||||
static inline int gh_rm_vm_irq_accept(gh_virq_handle_t virq_handle, int virq)
|
||||
{
|
||||
return -EINVAL;
|
||||
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_accept_notify(hh_vmid_t vmid,
|
||||
hh_virq_handle_t virq_handle)
|
||||
static inline int gh_rm_vm_irq_accept_notify(gh_vmid_t vmid,
|
||||
gh_virq_handle_t virq_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_release(hh_virq_handle_t virq_handle)
|
||||
static inline int gh_rm_vm_irq_release(gh_virq_handle_t virq_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_release_notify(hh_vmid_t vmid,
|
||||
hh_virq_handle_t virq_handle)
|
||||
static inline int gh_rm_vm_irq_release_notify(gh_vmid_t vmid,
|
||||
gh_virq_handle_t virq_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_irq_reclaim(hh_virq_handle_t virq_handle)
|
||||
static inline int gh_rm_vm_irq_reclaim(gh_virq_handle_t virq_handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Client APIs for VM management */
|
||||
static inline int hh_rm_vm_alloc_vmid(enum hh_vm_names vm_name, int *vmid)
|
||||
static inline int gh_rm_vm_alloc_vmid(enum gh_vm_names vm_name, int *vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_get_vmid(enum hh_vm_names vm_name, hh_vmid_t *vmid)
|
||||
static inline int gh_rm_get_vmid(enum gh_vm_names vm_name, gh_vmid_t *vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_get_vm_name(hh_vmid_t vmid, enum hh_vm_names *vm_name)
|
||||
static inline int gh_rm_get_vm_name(gh_vmid_t vmid, enum gh_vm_names *vm_name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_get_vminfo(enum hh_vm_names vm_name, struct hh_vminfo *vminfo);
|
||||
static inline int gh_rm_get_vminfo(enum gh_vm_names vm_name, struct gh_vminfo *vminfo);
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_vm_start(int vmid)
|
||||
static inline int gh_rm_vm_start(int vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_get_vm_id_info(enum hh_vm_names vm_name, hh_vmid_t vmid)
|
||||
static inline int gh_rm_get_vm_id_info(enum gh_vm_names vm_name, gh_vmid_t vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Client APIs for VM query */
|
||||
static inline int hh_rm_populate_hyp_res(hh_vmid_t vmid, const char *vm_name)
|
||||
static inline int gh_rm_populate_hyp_res(gh_vmid_t vmid, const char *vm_name)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Client APIs for VM Services */
|
||||
static inline int hh_rm_console_open(hh_vmid_t vmid)
|
||||
static inline int gh_rm_console_open(gh_vmid_t vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_console_close(hh_vmid_t vmid)
|
||||
static inline int gh_rm_console_close(gh_vmid_t vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_console_write(hh_vmid_t vmid, const char *buf,
|
||||
static inline int gh_rm_console_write(gh_vmid_t vmid, const char *buf,
|
||||
size_t size)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_console_flush(hh_vmid_t vmid)
|
||||
static inline int gh_rm_console_flush(gh_vmid_t vmid)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_mem_qcom_lookup_sgl(u8 mem_type, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc,
|
||||
struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
hh_memparcel_handle_t *handle)
|
||||
static inline int gh_rm_mem_qcom_lookup_sgl(u8 mem_type, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc,
|
||||
struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
gh_memparcel_handle_t *handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_mem_release(hh_memparcel_handle_t handle, u8 flags)
|
||||
static inline int gh_rm_mem_release(gh_memparcel_handle_t handle, u8 flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_mem_reclaim(hh_memparcel_handle_t handle, u8 flags)
|
||||
static inline int gh_rm_mem_reclaim(gh_memparcel_handle_t handle, u8 flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline struct hh_sgl_desc *hh_rm_mem_accept(hh_memparcel_handle_t handle,
|
||||
static inline struct gh_sgl_desc *gh_rm_mem_accept(gh_memparcel_handle_t handle,
|
||||
u8 mem_type,
|
||||
u8 trans_type, u8 flags, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc,
|
||||
struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
u8 trans_type, u8 flags, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc,
|
||||
struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
u16 map_vmid)
|
||||
{
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static inline int hh_rm_mem_share(u8 mem_type, u8 flags, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc, struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
hh_memparcel_handle_t *handle)
|
||||
static inline int gh_rm_mem_share(u8 mem_type, u8 flags, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
gh_memparcel_handle_t *handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_mem_lend(u8 mem_type, u8 flags, hh_label_t label,
|
||||
struct hh_acl_desc *acl_desc, struct hh_sgl_desc *sgl_desc,
|
||||
struct hh_mem_attr_desc *mem_attr_desc,
|
||||
hh_memparcel_handle_t *handle)
|
||||
static inline int gh_rm_mem_lend(u8 mem_type, u8 flags, gh_label_t label,
|
||||
struct gh_acl_desc *acl_desc, struct gh_sgl_desc *sgl_desc,
|
||||
struct gh_mem_attr_desc *mem_attr_desc,
|
||||
gh_memparcel_handle_t *handle)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_mem_notify(hh_memparcel_handle_t handle, u8 flags,
|
||||
hh_label_t mem_info_tag,
|
||||
struct hh_notify_vmid_desc *vmid_desc)
|
||||
static inline int gh_rm_mem_notify(gh_memparcel_handle_t handle, u8 flags,
|
||||
gh_label_t mem_info_tag,
|
||||
struct gh_notify_vmid_desc *vmid_desc)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int hh_rm_set_virtio_mmio_cb(hh_virtio_mmio_cb_t fnptr)
|
||||
static inline int gh_rm_set_virtio_mmio_cb(gh_virtio_mmio_cb_t fnptr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -451,7 +451,7 @@ static inline void gh_rm_unset_virtio_mmio_cb(void)
|
||||
|
||||
}
|
||||
|
||||
static inline int hh_rm_set_vcpu_affinity_cb(hh_vcpu_affinity_cb fnptr)
|
||||
static inline int gh_rm_set_vcpu_affinity_cb(gh_vcpu_affinity_cb fnptr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -7,7 +7,7 @@
|
||||
#define _MEM_BUF_EXPORTER_H
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
|
||||
int mem_buf_dma_buf_attach(struct dma_buf *dmabuf,
|
||||
@ -59,7 +59,7 @@ struct mem_buf_vmperm *mem_buf_vmperm_alloc_staticvm(struct sg_table *sgt, int *
|
||||
* A dmabuf in the "MEMACCEPT" state.
|
||||
*/
|
||||
struct mem_buf_vmperm *mem_buf_vmperm_alloc_accept(struct sg_table *sgt,
|
||||
hh_memparcel_handle_t memparcel_hdl);
|
||||
gh_memparcel_handle_t memparcel_hdl);
|
||||
|
||||
|
||||
/*
|
||||
|
@ -8,7 +8,7 @@
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <uapi/linux/mem-buf.h>
|
||||
@ -52,7 +52,7 @@ struct mem_buf_msg_hdr {
|
||||
* @hdr: Message header
|
||||
* @size: The size of the memory allocation to be performed on the remote VM.
|
||||
* @src_mem_type: The type of memory that the remote VM should allocate.
|
||||
* @acl_desc: A HH ACL descriptor that describes the VMIDs that will be
|
||||
* @acl_desc: A GH ACL descriptor that describes the VMIDs that will be
|
||||
* accessing the memory, as well as what permissions each VMID will have.
|
||||
*
|
||||
* NOTE: Certain memory types require additional information for the remote VM
|
||||
@ -65,7 +65,7 @@ struct mem_buf_alloc_req {
|
||||
struct mem_buf_msg_hdr hdr;
|
||||
u64 size;
|
||||
u32 src_mem_type;
|
||||
struct hh_acl_desc acl_desc;
|
||||
struct gh_acl_desc acl_desc;
|
||||
} __packed;
|
||||
|
||||
/**
|
||||
@ -153,7 +153,7 @@ struct mem_buf_lend_kernel_arg {
|
||||
unsigned int nr_acl_entries;
|
||||
int *vmids;
|
||||
int *perms;
|
||||
hh_memparcel_handle_t memparcel_hdl;
|
||||
gh_memparcel_handle_t memparcel_hdl;
|
||||
};
|
||||
|
||||
int mem_buf_lend(struct dma_buf *dmabuf,
|
||||
@ -174,7 +174,7 @@ struct mem_buf_retrieve_kernel_arg {
|
||||
unsigned int nr_acl_entries;
|
||||
int *vmids;
|
||||
int *perms;
|
||||
hh_memparcel_handle_t memparcel_hdl;
|
||||
gh_memparcel_handle_t memparcel_hdl;
|
||||
int fd_flags;
|
||||
};
|
||||
struct dma_buf *mem_buf_retrieve(struct mem_buf_retrieve_kernel_arg *arg);
|
||||
|
@ -3,14 +3,14 @@
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM hh_virtio_backend
|
||||
#define TRACE_SYSTEM gh_virtio_backend
|
||||
|
||||
#if !defined(_TRACE_HH_VIRTIO_BACKEND_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HH_VIRTIO_BACKEND_H
|
||||
#if !defined(_TRACE_GH_VIRTIO_BACKEND_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_GH_VIRTIO_BACKEND_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
TRACE_EVENT(hh_virtio_backend_irq_inj,
|
||||
TRACE_EVENT(gh_virtio_backend_irq_inj,
|
||||
|
||||
TP_PROTO(int label, int rc),
|
||||
|
||||
@ -29,7 +29,7 @@ TRACE_EVENT(hh_virtio_backend_irq_inj,
|
||||
TP_printk("device %d inj_irq rc %d", __entry->label, __entry->rc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hh_virtio_backend_queue_notify,
|
||||
TRACE_EVENT(gh_virtio_backend_queue_notify,
|
||||
|
||||
TP_PROTO(int label, int qno),
|
||||
|
||||
@ -48,7 +48,7 @@ TRACE_EVENT(hh_virtio_backend_queue_notify,
|
||||
TP_printk("device %d queue_notify on %d", __entry->label, __entry->qno)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hh_virtio_backend_wait_event,
|
||||
TRACE_EVENT(gh_virtio_backend_wait_event,
|
||||
|
||||
TP_PROTO(int label, int cur_event, int org_event, int cur_event_data, int org_event_data),
|
||||
|
||||
@ -75,7 +75,7 @@ TRACE_EVENT(hh_virtio_backend_wait_event,
|
||||
__entry->cur_event_data, __entry->org_event_data)
|
||||
);
|
||||
|
||||
TRACE_EVENT(hh_virtio_backend_irq,
|
||||
TRACE_EVENT(gh_virtio_backend_irq,
|
||||
|
||||
TP_PROTO(int label, int event, int event_data, int rc),
|
||||
|
||||
@ -100,7 +100,7 @@ TRACE_EVENT(hh_virtio_backend_irq,
|
||||
);
|
||||
|
||||
|
||||
#endif /* _TRACE_HH_VIRTIO_BACKEND_H */
|
||||
#endif /* _TRACE_GH_VIRTIO_BACKEND_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -3,10 +3,10 @@
|
||||
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM hh_virtio_frontend
|
||||
#define TRACE_SYSTEM gh_virtio_frontend
|
||||
|
||||
#if !defined(_TRACE_HH_VIRTIO_FRONTEND_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HH_VIRTIO_FRONTEND_H
|
||||
#if !defined(_TRACE_GH_VIRTIO_FRONTEND_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_GH_VIRTIO_FRONTEND_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
@ -173,7 +173,7 @@ TRACE_EVENT(virtio_block_submit,
|
||||
__entry->ioprio, __entry->num, __entry->err)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_HH_VIRTIO_FRONTEND_H */
|
||||
#endif /* _TRACE_GH_VIRTIO_FRONTEND_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (c) 2020, The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_LINUX_VIRTIO_BACKEND_H
|
||||
@ -28,28 +28,28 @@
|
||||
/*
|
||||
* IOCTLs supported by virtio backend driver
|
||||
*/
|
||||
#define HH_GET_SHARED_MEMORY_SIZE _IOR(VIRTIO_BE_IOC_MAGIC, 1, __u64)
|
||||
#define HH_IOEVENTFD _IOW(VIRTIO_BE_IOC_MAGIC, 2, \
|
||||
#define GH_GET_SHARED_MEMORY_SIZE _IOR(VIRTIO_BE_IOC_MAGIC, 1, __u64)
|
||||
#define GH_IOEVENTFD _IOW(VIRTIO_BE_IOC_MAGIC, 2, \
|
||||
struct virtio_eventfd)
|
||||
#define HH_IRQFD _IOW(VIRTIO_BE_IOC_MAGIC, 3, \
|
||||
#define GH_IRQFD _IOW(VIRTIO_BE_IOC_MAGIC, 3, \
|
||||
struct virtio_irqfd)
|
||||
#define HH_WAIT_FOR_EVENT _IOWR(VIRTIO_BE_IOC_MAGIC, 4, \
|
||||
#define GH_WAIT_FOR_EVENT _IOWR(VIRTIO_BE_IOC_MAGIC, 4, \
|
||||
struct virtio_event)
|
||||
#define HH_SET_DEVICE_FEATURES _IOW(VIRTIO_BE_IOC_MAGIC, 5, \
|
||||
#define GH_SET_DEVICE_FEATURES _IOW(VIRTIO_BE_IOC_MAGIC, 5, \
|
||||
struct virtio_dev_features)
|
||||
#define HH_SET_QUEUE_NUM_MAX _IOW(VIRTIO_BE_IOC_MAGIC, 6, \
|
||||
#define GH_SET_QUEUE_NUM_MAX _IOW(VIRTIO_BE_IOC_MAGIC, 6, \
|
||||
struct virtio_queue_max)
|
||||
#define HH_SET_DEVICE_CONFIG_DATA _IOW(VIRTIO_BE_IOC_MAGIC, 7, \
|
||||
#define GH_SET_DEVICE_CONFIG_DATA _IOW(VIRTIO_BE_IOC_MAGIC, 7, \
|
||||
struct virtio_config_data)
|
||||
#define HH_GET_DRIVER_CONFIG_DATA _IOWR(VIRTIO_BE_IOC_MAGIC, 8, \
|
||||
#define GH_GET_DRIVER_CONFIG_DATA _IOWR(VIRTIO_BE_IOC_MAGIC, 8, \
|
||||
struct virtio_config_data)
|
||||
#define HH_GET_QUEUE_INFO _IOWR(VIRTIO_BE_IOC_MAGIC, 9, \
|
||||
#define GH_GET_QUEUE_INFO _IOWR(VIRTIO_BE_IOC_MAGIC, 9, \
|
||||
struct virtio_queue_info)
|
||||
#define HH_GET_DRIVER_FEATURES _IOWR(VIRTIO_BE_IOC_MAGIC, 10, \
|
||||
#define GH_GET_DRIVER_FEATURES _IOWR(VIRTIO_BE_IOC_MAGIC, 10, \
|
||||
struct virtio_driver_features)
|
||||
#define HH_ACK_DRIVER_OK _IOWR(VIRTIO_BE_IOC_MAGIC, 11, __u32)
|
||||
#define HH_SET_APP_READY _IO(VIRTIO_BE_IOC_MAGIC, 12)
|
||||
#define HH_ACK_RESET _IOW(VIRTIO_BE_IOC_MAGIC, 13, struct virtio_ack_reset)
|
||||
#define GH_ACK_DRIVER_OK _IOWR(VIRTIO_BE_IOC_MAGIC, 11, __u32)
|
||||
#define GH_SET_APP_READY _IO(VIRTIO_BE_IOC_MAGIC, 12)
|
||||
#define GH_ACK_RESET _IOW(VIRTIO_BE_IOC_MAGIC, 13, struct virtio_ack_reset)
|
||||
|
||||
struct virtio_ack_reset {
|
||||
__u32 label;
|
||||
|
@ -13,11 +13,11 @@ deferred-free-helper.ko
|
||||
dispcc-waipio.ko
|
||||
gcc-waipio.ko
|
||||
gdsc-regulator.ko
|
||||
hh_ctrl.ko
|
||||
hh_dbl.ko
|
||||
hh_msgq.ko
|
||||
hh_rm_drv.ko
|
||||
hh_virt_wdt.ko
|
||||
gh_ctrl.ko
|
||||
gh_dbl.ko
|
||||
gh_msgq.ko
|
||||
gh_rm_drv.ko
|
||||
gh_virt_wdt.ko
|
||||
icc-bcm-voter.ko
|
||||
icc-debug.ko
|
||||
icc-rpmh.ko
|
||||
|
@ -5,7 +5,7 @@ config NEURON
|
||||
tristate "Support for Neuron device-sharing framework"
|
||||
help
|
||||
This option enables the Neuron device-sharing framework. It is used
|
||||
by guests of the haven hypervisors to serve or access shared I/O
|
||||
by guests of the gunyah hypervisors to serve or access shared I/O
|
||||
devices and other inter-VM services. The Neuron framework is composed
|
||||
of three buses that represent different layers in the framework
|
||||
(channel, protocol, application).
|
||||
|
@ -1,10 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config NEURON_CH_HAVEN
|
||||
tristate "Shared memory channel drivers for Haven guests"
|
||||
depends on OF && HAVEN_DRIVERS
|
||||
config NEURON_CH_GUNYAH
|
||||
tristate "Shared memory channel drivers for Gunyah guests"
|
||||
depends on OF && GUNYAH_DRIVERS
|
||||
help
|
||||
This option enables receive and send Neuron channel drivers that use
|
||||
a shared memory buffer and a pair of notification objects to
|
||||
communicate with another VM. This is the Haven version.
|
||||
communicate with another VM. This is the Gunyah version.
|
||||
|
||||
If unsure, say Y.
|
||||
|
@ -1,3 +1,3 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_NEURON_CH_HAVEN) += ch_haven_recv.o
|
||||
obj-$(CONFIG_NEURON_CH_HAVEN) += ch_haven_send.o
|
||||
obj-$(CONFIG_NEURON_CH_GUNYAH) += ch_gunyah_recv.o
|
||||
obj-$(CONFIG_NEURON_CH_GUNYAH) += ch_gunyah_send.o
|
||||
|
@ -19,8 +19,8 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/neuron.h>
|
||||
#include <asm-generic/barrier.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/haven/hh_dbl.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_dbl.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ch_mq_shmem_common.h"
|
||||
|
||||
@ -91,19 +91,19 @@ static inline int ring_read_msg(struct neuron_msg_queue *msgq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int channel_hh_kick(struct neuron_mq_data_priv *priv)
|
||||
static inline int channel_gh_kick(struct neuron_mq_data_priv *priv)
|
||||
{
|
||||
hh_dbl_flags_t dbl_mask = CH_DBL_MASK;
|
||||
gh_dbl_flags_t dbl_mask = CH_DBL_MASK;
|
||||
int ret;
|
||||
|
||||
ret = hh_dbl_send(priv->tx_dbl, &dbl_mask, 0);
|
||||
ret = gh_dbl_send(priv->tx_dbl, &dbl_mask, 0);
|
||||
if (ret)
|
||||
pr_err("failed to raise virq to the sender %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t channel_hh_receivev(struct neuron_channel *channel_dev,
|
||||
static ssize_t channel_gh_receivev(struct neuron_channel *channel_dev,
|
||||
struct buffer_list buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
@ -177,13 +177,13 @@ static ssize_t channel_hh_receivev(struct neuron_channel *channel_dev,
|
||||
if (left_space < space_for_next && new_left_space >= space_for_next) {
|
||||
dev_dbg(&channel_dev->dev, "Waking the sender up");
|
||||
/* wake up the sender */
|
||||
channel_hh_kick(priv);
|
||||
channel_gh_kick(priv);
|
||||
}
|
||||
|
||||
return (ssize_t)len;
|
||||
}
|
||||
|
||||
static ssize_t channel_hh_receive(struct neuron_channel *channel_dev,
|
||||
static ssize_t channel_gh_receive(struct neuron_channel *channel_dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct buffer_list buf = {
|
||||
@ -191,10 +191,10 @@ static ssize_t channel_hh_receive(struct neuron_channel *channel_dev,
|
||||
.offset = 0,
|
||||
.size = skb->len
|
||||
};
|
||||
return channel_hh_receivev(channel_dev, buf);
|
||||
return channel_gh_receivev(channel_dev, buf);
|
||||
}
|
||||
|
||||
static void channel_hh_cb(int irq, void *data)
|
||||
static void channel_gh_cb(int irq, void *data)
|
||||
{
|
||||
struct neuron_mq_data_priv *priv = data;
|
||||
|
||||
@ -240,7 +240,7 @@ static int msgq_init(struct neuron_mq_data_priv *priv)
|
||||
hdr->max_msg_size = channel->max_size;
|
||||
|
||||
/* Notify the sender that the channel has been reset. */
|
||||
channel_hh_kick(priv);
|
||||
channel_gh_kick(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -279,7 +279,7 @@ static int channel_sync_thread(void *data)
|
||||
shm_clear_header(priv);
|
||||
|
||||
/* Notify the sender that shared memory header has been initialized. */
|
||||
channel_hh_kick(priv);
|
||||
channel_gh_kick(priv);
|
||||
|
||||
/* Waiting for the sender's readiness. */
|
||||
wait_event_killable(priv->wait_q, smp_load_acquire(&hdr->head) != -1 ||
|
||||
@ -296,14 +296,14 @@ static int channel_sync_thread(void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
hh_vmid_t self, hh_vmid_t peer)
|
||||
static int channel_gh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
gh_vmid_t self, gh_vmid_t peer)
|
||||
{
|
||||
u32 src_vmlist[1] = {self};
|
||||
int dst_vmlist[2] = {self, peer};
|
||||
int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
|
||||
struct hh_acl_desc *acl;
|
||||
struct hh_sgl_desc *sgl;
|
||||
struct gh_acl_desc *acl;
|
||||
struct gh_sgl_desc *sgl;
|
||||
int ret;
|
||||
|
||||
ret = hyp_assign_phys(priv->buffer.start, resource_size(&priv->buffer),
|
||||
@ -315,10 +315,10 @@ static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
acl = kzalloc(offsetof(struct hh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
if (!acl)
|
||||
return -ENOMEM;
|
||||
sgl = kzalloc(offsetof(struct hh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
if (!sgl) {
|
||||
kfree(acl);
|
||||
return -ENOMEM;
|
||||
@ -326,15 +326,15 @@ static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
|
||||
acl->n_acl_entries = 2;
|
||||
acl->acl_entries[0].vmid = (u16)self;
|
||||
acl->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
acl->acl_entries[1].vmid = (u16)peer;
|
||||
acl->acl_entries[1].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
sgl->n_sgl_entries = 1;
|
||||
sgl->sgl_entries[0].ipa_base = priv->buffer.start;
|
||||
sgl->sgl_entries[0].size = resource_size(&priv->buffer);
|
||||
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL,
|
||||
priv->haven_label,
|
||||
ret = gh_rm_mem_qcom_lookup_sgl(GH_RM_MEM_TYPE_NORMAL,
|
||||
priv->gunyah_label,
|
||||
acl, sgl, NULL,
|
||||
&priv->shm_memparcel);
|
||||
kfree(acl);
|
||||
@ -343,39 +343,39 @@ static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int channel_hh_rm_cb(struct notifier_block *nb, unsigned long cmd,
|
||||
static int channel_gh_rm_cb(struct notifier_block *nb, unsigned long cmd,
|
||||
void *data)
|
||||
{
|
||||
struct hh_rm_notif_vm_status_payload *vm_status_payload;
|
||||
struct gh_rm_notif_vm_status_payload *vm_status_payload;
|
||||
struct neuron_mq_data_priv *priv;
|
||||
hh_vmid_t peer_vmid;
|
||||
hh_vmid_t self_vmid;
|
||||
gh_vmid_t peer_vmid;
|
||||
gh_vmid_t self_vmid;
|
||||
|
||||
priv = container_of(nb, struct neuron_mq_data_priv, rm_nb);
|
||||
|
||||
if (cmd != HH_RM_NOTIF_VM_STATUS)
|
||||
if (cmd != GH_RM_NOTIF_VM_STATUS)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
vm_status_payload = data;
|
||||
if (vm_status_payload->vm_status != HH_RM_VM_STATUS_READY)
|
||||
if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY)
|
||||
return NOTIFY_DONE;
|
||||
if (hh_rm_get_vmid(priv->peer_name, &peer_vmid))
|
||||
if (gh_rm_get_vmid(priv->peer_name, &peer_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (hh_rm_get_vmid(HH_PRIMARY_VM, &self_vmid))
|
||||
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (peer_vmid != vm_status_payload->vmid)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (channel_hh_share_mem(priv, self_vmid, peer_vmid))
|
||||
if (channel_gh_share_mem(priv, self_vmid, peer_vmid))
|
||||
pr_err("%s: failed to share memory\n", __func__);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct device_node *
|
||||
channel_hh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
channel_gh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
{
|
||||
const char *compat = "qcom,neuron-channel-haven-shmem-gen";
|
||||
const char *compat = "qcom,neuron-channel-gunyah-shmem-gen";
|
||||
struct device_node *np = NULL;
|
||||
struct device_node *shm_np;
|
||||
u32 label;
|
||||
@ -387,7 +387,7 @@ channel_hh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
of_node_put(np);
|
||||
continue;
|
||||
}
|
||||
if (label == priv->haven_label)
|
||||
if (label == priv->gunyah_label)
|
||||
break;
|
||||
|
||||
of_node_put(np);
|
||||
@ -403,7 +403,7 @@ channel_hh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
return shm_np;
|
||||
}
|
||||
|
||||
static int channel_hh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
static int channel_gh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device_node *np;
|
||||
@ -413,7 +413,7 @@ static int channel_hh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
|
||||
np = of_parse_phandle(dev->of_node, "shared-buffer", 0);
|
||||
if (!np) {
|
||||
np = channel_hh_svm_of_parse(priv, dev);
|
||||
np = channel_gh_svm_of_parse(priv, dev);
|
||||
if (!np) {
|
||||
dev_err(dev, "cant parse shared mem node!\n");
|
||||
return -EINVAL;
|
||||
@ -454,22 +454,22 @@ static int channel_hh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
ret = of_property_read_u32(dev->of_node, "peer-name",
|
||||
&priv->peer_name);
|
||||
if (ret)
|
||||
priv->peer_name = HH_SELF_VM;
|
||||
priv->peer_name = GH_SELF_VM;
|
||||
|
||||
priv->rm_nb.notifier_call = channel_hh_rm_cb;
|
||||
priv->rm_nb.notifier_call = channel_gh_rm_cb;
|
||||
priv->rm_nb.priority = INT_MAX;
|
||||
hh_rm_register_notifier(&priv->rm_nb);
|
||||
gh_rm_register_notifier(&priv->rm_nb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int channel_hh_probe(struct neuron_channel *cdev)
|
||||
static int channel_gh_probe(struct neuron_channel *cdev)
|
||||
{
|
||||
struct device_node *node = cdev->dev.of_node;
|
||||
struct device *dev = &cdev->dev;
|
||||
struct neuron_mq_data_priv *priv;
|
||||
enum hh_dbl_label dbl_label;
|
||||
enum gh_dbl_label dbl_label;
|
||||
int ret;
|
||||
|
||||
if (!node)
|
||||
@ -481,31 +481,31 @@ static int channel_hh_probe(struct neuron_channel *cdev)
|
||||
priv->dev = cdev;
|
||||
init_waitqueue_head(&priv->wait_q);
|
||||
|
||||
ret = of_property_read_u32(node, "haven-label", &priv->haven_label);
|
||||
ret = of_property_read_u32(node, "gunyah-label", &priv->gunyah_label);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to read label info %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = channel_hh_map_memory(priv, dev);
|
||||
ret = channel_gh_map_memory(priv, dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to map memory %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get outgoing haven doorbell information */
|
||||
dbl_label = priv->haven_label;
|
||||
priv->tx_dbl = hh_dbl_tx_register(dbl_label);
|
||||
/* Get outgoing gunyah doorbell information */
|
||||
dbl_label = priv->gunyah_label;
|
||||
priv->tx_dbl = gh_dbl_tx_register(dbl_label);
|
||||
if (IS_ERR_OR_NULL(priv->tx_dbl)) {
|
||||
ret = PTR_ERR(priv->tx_dbl);
|
||||
dev_err(dev, "failed to get haven tx dbl %d\n", ret);
|
||||
dev_err(dev, "failed to get gunyah tx dbl %d\n", ret);
|
||||
goto fail_tx_dbl;
|
||||
}
|
||||
|
||||
priv->rx_dbl = hh_dbl_rx_register(dbl_label, channel_hh_cb, priv);
|
||||
priv->rx_dbl = gh_dbl_rx_register(dbl_label, channel_gh_cb, priv);
|
||||
if (IS_ERR_OR_NULL(priv->rx_dbl)) {
|
||||
ret = PTR_ERR(priv->rx_dbl);
|
||||
dev_err(dev, "failed to get haven rx dbl %d\n", ret);
|
||||
dev_err(dev, "failed to get gunyah rx dbl %d\n", ret);
|
||||
goto fail_rx_dbl;
|
||||
}
|
||||
/* Start the thread for syncing with the sender. */
|
||||
@ -517,13 +517,13 @@ static int channel_hh_probe(struct neuron_channel *cdev)
|
||||
return 0;
|
||||
|
||||
fail_rx_dbl:
|
||||
hh_dbl_tx_unregister(priv->tx_dbl);
|
||||
gh_dbl_tx_unregister(priv->tx_dbl);
|
||||
fail_tx_dbl:
|
||||
iounmap(priv->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void channel_hh_remove(struct neuron_channel *cdev)
|
||||
static void channel_gh_remove(struct neuron_channel *cdev)
|
||||
{
|
||||
struct neuron_mq_data_priv *priv = dev_get_drvdata(&cdev->dev);
|
||||
|
||||
@ -533,34 +533,34 @@ static void channel_hh_remove(struct neuron_channel *cdev)
|
||||
devm_release_mem_region(&cdev->dev, priv->buffer.start,
|
||||
resource_size(&priv->buffer));
|
||||
|
||||
hh_dbl_tx_unregister(priv->tx_dbl);
|
||||
hh_dbl_rx_unregister(priv->rx_dbl);
|
||||
gh_dbl_tx_unregister(priv->tx_dbl);
|
||||
gh_dbl_rx_unregister(priv->rx_dbl);
|
||||
}
|
||||
|
||||
static const struct of_device_id channel_hh_match[] = {
|
||||
{ .compatible = "qcom,neuron-channel-haven-shmem" },
|
||||
static const struct of_device_id channel_gh_match[] = {
|
||||
{ .compatible = "qcom,neuron-channel-gunyah-shmem" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, channel_hh_match);
|
||||
MODULE_DEVICE_TABLE(of, channel_gh_match);
|
||||
|
||||
static struct neuron_channel_driver channel_hh_recv_driver = {
|
||||
static struct neuron_channel_driver channel_gh_recv_driver = {
|
||||
.driver = {
|
||||
.name = "ch_haven_recv",
|
||||
.of_match_table = channel_hh_match,
|
||||
.name = "ch_gunyah_recv",
|
||||
.of_match_table = channel_gh_match,
|
||||
},
|
||||
.type = NEURON_CHANNEL_MESSAGE_QUEUE,
|
||||
.direction = NEURON_CHANNEL_RECEIVE,
|
||||
.receive_msgv = channel_hh_receivev,
|
||||
.receive_msg = channel_hh_receive,
|
||||
.probe = channel_hh_probe,
|
||||
.remove = channel_hh_remove,
|
||||
.receive_msgv = channel_gh_receivev,
|
||||
.receive_msg = channel_gh_receive,
|
||||
.probe = channel_gh_probe,
|
||||
.remove = channel_gh_remove,
|
||||
};
|
||||
|
||||
static int __init channel_hh_init(void)
|
||||
static int __init channel_gh_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = neuron_register_channel_driver(&channel_hh_recv_driver);
|
||||
ret = neuron_register_channel_driver(&channel_gh_recv_driver);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to register driver:%d\n", ret);
|
||||
return ret;
|
||||
@ -569,13 +569,13 @@ static int __init channel_hh_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit channel_hh_exit(void)
|
||||
static void __exit channel_gh_exit(void)
|
||||
{
|
||||
neuron_unregister_channel_driver(&channel_hh_recv_driver);
|
||||
neuron_unregister_channel_driver(&channel_gh_recv_driver);
|
||||
}
|
||||
module_init(channel_hh_init);
|
||||
module_exit(channel_hh_exit);
|
||||
module_init(channel_gh_init);
|
||||
module_exit(channel_gh_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Neuron channel haven shared memory receiver driver");
|
||||
MODULE_DESCRIPTION("Neuron channel gunyah shared memory receiver driver");
|
||||
|
||||
|
@ -19,8 +19,8 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/neuron.h>
|
||||
#include <asm-generic/barrier.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/haven/hh_dbl.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_dbl.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "ch_mq_shmem_common.h"
|
||||
|
||||
@ -65,19 +65,19 @@ static inline void ring_write_msg(struct neuron_msg_queue *msgq,
|
||||
msgq->offset = offset;
|
||||
}
|
||||
|
||||
static inline int channel_hh_kick(struct neuron_mq_data_priv *priv)
|
||||
static inline int channel_gh_kick(struct neuron_mq_data_priv *priv)
|
||||
{
|
||||
hh_dbl_flags_t dbl_mask = CH_DBL_MASK;
|
||||
gh_dbl_flags_t dbl_mask = CH_DBL_MASK;
|
||||
int ret;
|
||||
|
||||
ret = hh_dbl_send(priv->tx_dbl, &dbl_mask, 0);
|
||||
ret = gh_dbl_send(priv->tx_dbl, &dbl_mask, 0);
|
||||
if (ret)
|
||||
pr_err("failed to raise virq to the sender %d\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int channel_hh_sendv(struct neuron_channel *channel_dev,
|
||||
static int channel_gh_sendv(struct neuron_channel *channel_dev,
|
||||
struct buffer_list buf)
|
||||
{
|
||||
size_t space, space_needed, len;
|
||||
@ -141,12 +141,12 @@ static int channel_hh_sendv(struct neuron_channel *channel_dev,
|
||||
smp_store_release(msgq->headp, msgq->offset);
|
||||
|
||||
if (prev_empty)
|
||||
channel_hh_kick(priv);
|
||||
channel_gh_kick(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int channel_hh_send(struct neuron_channel *channel_dev,
|
||||
static int channel_gh_send(struct neuron_channel *channel_dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct buffer_list buf = {
|
||||
@ -155,10 +155,10 @@ static int channel_hh_send(struct neuron_channel *channel_dev,
|
||||
.size = skb->len
|
||||
};
|
||||
|
||||
return channel_hh_sendv(channel_dev, buf);
|
||||
return channel_gh_sendv(channel_dev, buf);
|
||||
}
|
||||
|
||||
static void channel_hh_cb(int irq, void *data)
|
||||
static void channel_gh_cb(int irq, void *data)
|
||||
{
|
||||
struct neuron_mq_data_priv *priv = data;
|
||||
|
||||
@ -250,7 +250,7 @@ static int channel_sync_thread(void *data)
|
||||
/* Make sure the memory writing is in order. */
|
||||
mb();
|
||||
hdr->head = -1;
|
||||
channel_hh_kick(priv);
|
||||
channel_gh_kick(priv);
|
||||
|
||||
/* Waiting for tail_offset being updated by the send. */
|
||||
wait_event_killable(priv->wait_q, kthread_should_stop() ||
|
||||
@ -276,21 +276,21 @@ static int channel_sync_thread(void *data)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
channel_hh_kick(priv);
|
||||
channel_gh_kick(priv);
|
||||
|
||||
neuron_channel_wakeup(priv->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
hh_vmid_t self, hh_vmid_t peer)
|
||||
static int channel_gh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
gh_vmid_t self, gh_vmid_t peer)
|
||||
{
|
||||
u32 src_vmlist[1] = {self};
|
||||
int dst_vmlist[2] = {self, peer};
|
||||
int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
|
||||
struct hh_acl_desc *acl;
|
||||
struct hh_sgl_desc *sgl;
|
||||
struct gh_acl_desc *acl;
|
||||
struct gh_sgl_desc *sgl;
|
||||
int ret;
|
||||
|
||||
ret = hyp_assign_phys(priv->buffer.start, resource_size(&priv->buffer),
|
||||
@ -302,10 +302,10 @@ static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
acl = kzalloc(offsetof(struct hh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
if (!acl)
|
||||
return -ENOMEM;
|
||||
sgl = kzalloc(offsetof(struct hh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
if (!sgl) {
|
||||
kfree(acl);
|
||||
return -ENOMEM;
|
||||
@ -313,15 +313,15 @@ static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
|
||||
acl->n_acl_entries = 2;
|
||||
acl->acl_entries[0].vmid = (u16)self;
|
||||
acl->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
acl->acl_entries[1].vmid = (u16)peer;
|
||||
acl->acl_entries[1].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
sgl->n_sgl_entries = 1;
|
||||
sgl->sgl_entries[0].ipa_base = priv->buffer.start;
|
||||
sgl->sgl_entries[0].size = resource_size(&priv->buffer);
|
||||
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL,
|
||||
priv->haven_label,
|
||||
ret = gh_rm_mem_qcom_lookup_sgl(GH_RM_MEM_TYPE_NORMAL,
|
||||
priv->gunyah_label,
|
||||
acl, sgl, NULL,
|
||||
&priv->shm_memparcel);
|
||||
kfree(acl);
|
||||
@ -330,37 +330,37 @@ static int channel_hh_share_mem(struct neuron_mq_data_priv *priv,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int channel_hh_rm_cb(struct notifier_block *nb, unsigned long cmd,
|
||||
static int channel_gh_rm_cb(struct notifier_block *nb, unsigned long cmd,
|
||||
void *data)
|
||||
{
|
||||
struct hh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
struct gh_rm_notif_vm_status_payload *vm_status_payload = data;
|
||||
struct neuron_mq_data_priv *priv;
|
||||
hh_vmid_t peer_vmid;
|
||||
hh_vmid_t self_vmid;
|
||||
gh_vmid_t peer_vmid;
|
||||
gh_vmid_t self_vmid;
|
||||
|
||||
priv = container_of(nb, struct neuron_mq_data_priv, rm_nb);
|
||||
if (cmd != HH_RM_NOTIF_VM_STATUS)
|
||||
if (cmd != GH_RM_NOTIF_VM_STATUS)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (vm_status_payload->vm_status != HH_RM_VM_STATUS_READY)
|
||||
if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY)
|
||||
return NOTIFY_DONE;
|
||||
if (hh_rm_get_vmid(priv->peer_name, &peer_vmid))
|
||||
if (gh_rm_get_vmid(priv->peer_name, &peer_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (hh_rm_get_vmid(HH_PRIMARY_VM, &self_vmid))
|
||||
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (peer_vmid != vm_status_payload->vmid)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (channel_hh_share_mem(priv, self_vmid, peer_vmid))
|
||||
if (channel_gh_share_mem(priv, self_vmid, peer_vmid))
|
||||
pr_err("%s: failed to share memory\n", __func__);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct device_node *
|
||||
channel_hh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
channel_gh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
{
|
||||
const char *compat = "qcom,neuron-channel-haven-shmem-gen";
|
||||
const char *compat = "qcom,neuron-channel-gunyah-shmem-gen";
|
||||
struct device_node *np = NULL;
|
||||
struct device_node *shm_np;
|
||||
u32 label;
|
||||
@ -372,7 +372,7 @@ channel_hh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
of_node_put(np);
|
||||
continue;
|
||||
}
|
||||
if (label == priv->haven_label)
|
||||
if (label == priv->gunyah_label)
|
||||
break;
|
||||
|
||||
of_node_put(np);
|
||||
@ -388,7 +388,7 @@ channel_hh_svm_of_parse(struct neuron_mq_data_priv *priv, struct device *dev)
|
||||
return shm_np;
|
||||
}
|
||||
|
||||
static int channel_hh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
static int channel_gh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
struct device *dev)
|
||||
{
|
||||
struct device_node *np;
|
||||
@ -398,7 +398,7 @@ static int channel_hh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
|
||||
np = of_parse_phandle(dev->of_node, "shared-buffer", 0);
|
||||
if (!np) {
|
||||
np = channel_hh_svm_of_parse(priv, dev);
|
||||
np = channel_gh_svm_of_parse(priv, dev);
|
||||
if (!np) {
|
||||
dev_err(dev, "cant parse shared mem node!\n");
|
||||
return -EINVAL;
|
||||
@ -439,22 +439,22 @@ static int channel_hh_map_memory(struct neuron_mq_data_priv *priv,
|
||||
ret = of_property_read_u32(dev->of_node, "peer-name",
|
||||
&priv->peer_name);
|
||||
if (ret)
|
||||
priv->peer_name = HH_SELF_VM;
|
||||
priv->peer_name = GH_SELF_VM;
|
||||
|
||||
priv->rm_nb.notifier_call = channel_hh_rm_cb;
|
||||
priv->rm_nb.notifier_call = channel_gh_rm_cb;
|
||||
priv->rm_nb.priority = INT_MAX;
|
||||
hh_rm_register_notifier(&priv->rm_nb);
|
||||
gh_rm_register_notifier(&priv->rm_nb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int channel_hh_probe(struct neuron_channel *cdev)
|
||||
static int channel_gh_probe(struct neuron_channel *cdev)
|
||||
{
|
||||
struct device_node *node = cdev->dev.of_node;
|
||||
struct device *dev = &cdev->dev;
|
||||
struct neuron_mq_data_priv *priv;
|
||||
enum hh_dbl_label dbl_label;
|
||||
enum gh_dbl_label dbl_label;
|
||||
int ret;
|
||||
|
||||
if (!node)
|
||||
@ -466,29 +466,29 @@ static int channel_hh_probe(struct neuron_channel *cdev)
|
||||
priv->dev = cdev;
|
||||
init_waitqueue_head(&priv->wait_q);
|
||||
|
||||
ret = of_property_read_u32(node, "haven-label", &priv->haven_label);
|
||||
ret = of_property_read_u32(node, "gunyah-label", &priv->gunyah_label);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to read label info %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = channel_hh_map_memory(priv, dev);
|
||||
ret = channel_gh_map_memory(priv, dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Get outgoing haven doorbell information */
|
||||
dbl_label = priv->haven_label;
|
||||
priv->tx_dbl = hh_dbl_tx_register(dbl_label);
|
||||
/* Get outgoing gunyah doorbell information */
|
||||
dbl_label = priv->gunyah_label;
|
||||
priv->tx_dbl = gh_dbl_tx_register(dbl_label);
|
||||
if (IS_ERR_OR_NULL(priv->tx_dbl)) {
|
||||
ret = PTR_ERR(priv->tx_dbl);
|
||||
dev_err(dev, "failed to get haven tx dbl %d\n", ret);
|
||||
dev_err(dev, "failed to get gunyah tx dbl %d\n", ret);
|
||||
goto fail_tx_dbl;
|
||||
}
|
||||
|
||||
priv->rx_dbl = hh_dbl_rx_register(dbl_label, channel_hh_cb, priv);
|
||||
priv->rx_dbl = gh_dbl_rx_register(dbl_label, channel_gh_cb, priv);
|
||||
if (IS_ERR_OR_NULL(priv->rx_dbl)) {
|
||||
ret = PTR_ERR(priv->rx_dbl);
|
||||
dev_err(dev, "failed to get haven rx dbl %d\n", ret);
|
||||
dev_err(dev, "failed to get gunyah rx dbl %d\n", ret);
|
||||
goto fail_rx_dbl;
|
||||
}
|
||||
/* Start the thread for syncing with the receiver. */
|
||||
@ -499,13 +499,13 @@ static int channel_hh_probe(struct neuron_channel *cdev)
|
||||
return 0;
|
||||
|
||||
fail_rx_dbl:
|
||||
hh_dbl_tx_unregister(priv->tx_dbl);
|
||||
gh_dbl_tx_unregister(priv->tx_dbl);
|
||||
fail_tx_dbl:
|
||||
iounmap(priv->base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void channel_hh_remove(struct neuron_channel *cdev)
|
||||
static void channel_gh_remove(struct neuron_channel *cdev)
|
||||
{
|
||||
struct neuron_mq_data_priv *priv = dev_get_drvdata(&cdev->dev);
|
||||
|
||||
@ -515,34 +515,34 @@ static void channel_hh_remove(struct neuron_channel *cdev)
|
||||
devm_release_mem_region(&cdev->dev, priv->buffer.start,
|
||||
resource_size(&priv->buffer));
|
||||
|
||||
hh_dbl_tx_unregister(priv->tx_dbl);
|
||||
hh_dbl_rx_unregister(priv->rx_dbl);
|
||||
gh_dbl_tx_unregister(priv->tx_dbl);
|
||||
gh_dbl_rx_unregister(priv->rx_dbl);
|
||||
}
|
||||
|
||||
static const struct of_device_id channel_hh_match[] = {
|
||||
{ .compatible = "qcom,neuron-channel-haven-shmem" },
|
||||
static const struct of_device_id channel_gh_match[] = {
|
||||
{ .compatible = "qcom,neuron-channel-gunyah-shmem" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, channel_hh_match);
|
||||
MODULE_DEVICE_TABLE(of, channel_gh_match);
|
||||
|
||||
static struct neuron_channel_driver channel_hh_send_driver = {
|
||||
static struct neuron_channel_driver channel_gh_send_driver = {
|
||||
.driver = {
|
||||
.name = "ch_haven_send",
|
||||
.of_match_table = channel_hh_match,
|
||||
.name = "ch_gunyah_send",
|
||||
.of_match_table = channel_gh_match,
|
||||
},
|
||||
.type = NEURON_CHANNEL_MESSAGE_QUEUE,
|
||||
.direction = NEURON_CHANNEL_SEND,
|
||||
.send_msgv = channel_hh_sendv,
|
||||
.send_msg = channel_hh_send,
|
||||
.probe = channel_hh_probe,
|
||||
.remove = channel_hh_remove,
|
||||
.send_msgv = channel_gh_sendv,
|
||||
.send_msg = channel_gh_send,
|
||||
.probe = channel_gh_probe,
|
||||
.remove = channel_gh_remove,
|
||||
};
|
||||
|
||||
static int __init channel_hh_init(void)
|
||||
static int __init channel_gh_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = neuron_register_channel_driver(&channel_hh_send_driver);
|
||||
ret = neuron_register_channel_driver(&channel_gh_send_driver);
|
||||
if (ret < 0) {
|
||||
pr_err("Failed to register driver: %d\n", ret);
|
||||
return ret;
|
||||
@ -551,12 +551,12 @@ static int __init channel_hh_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit channel_hh_exit(void)
|
||||
static void __exit channel_gh_exit(void)
|
||||
{
|
||||
neuron_unregister_channel_driver(&channel_hh_send_driver);
|
||||
neuron_unregister_channel_driver(&channel_gh_send_driver);
|
||||
}
|
||||
module_init(channel_hh_init);
|
||||
module_exit(channel_hh_exit);
|
||||
module_init(channel_gh_init);
|
||||
module_exit(channel_gh_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Neuron channel haven shared memory send driver");
|
||||
MODULE_DESCRIPTION("Neuron channel gunyah shared memory send driver");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */
|
||||
/* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved. */
|
||||
|
||||
/* Neuron Message Queue transport layer header file */
|
||||
#ifndef __NEURON_MQ_H
|
||||
@ -78,15 +78,15 @@ struct neuron_mq_data_priv {
|
||||
u32 interrupt_counter;
|
||||
/* name of peer vm */
|
||||
u32 peer_name;
|
||||
/* label to get haven resources like doorbell and shm */
|
||||
u32 haven_label;
|
||||
/* haven tx doorbell descriptor */
|
||||
/* label to get gunyah resources like doorbell and shm */
|
||||
u32 gunyah_label;
|
||||
/* gunyah tx doorbell descriptor */
|
||||
void *tx_dbl;
|
||||
/* haven rx doorbell descriptor */
|
||||
/* gunyah rx doorbell descriptor */
|
||||
void *rx_dbl;
|
||||
/* memparcel handle after assigning label to shared memory */
|
||||
u32 shm_memparcel;
|
||||
/* haven rm status notifier block */
|
||||
/* gunyah rm status notifier block */
|
||||
struct notifier_block rm_nb;
|
||||
/* pointer to the device structure */
|
||||
struct neuron_channel *dev;
|
||||
|
@ -59,12 +59,12 @@ config QRTR_MHI
|
||||
MHI transport fakes synchronous sends by waiting for the uplink
|
||||
callback from the MHI framework before returing to qrtr core.
|
||||
|
||||
config QRTR_HAVEN
|
||||
tristate "Haven IPC Router channels"
|
||||
config QRTR_GUNYAH
|
||||
tristate "Gunyah IPC Router channels"
|
||||
help
|
||||
Say Y here to support a fifo based ipcrouter channel with haven
|
||||
hypervisor signaling. The haven transport layer enables IPC
|
||||
Say Y here to support a fifo based ipcrouter channel with gunyah
|
||||
hypervisor signaling. The gunyah transport layer enables IPC
|
||||
Router communication between two virtual machines. The transport
|
||||
uses dynamically shared memory and haven doorbells.
|
||||
uses dynamically shared memory and gunyah doorbells.
|
||||
|
||||
endif # QRTR
|
||||
|
@ -7,5 +7,5 @@ obj-$(CONFIG_QRTR_TUN) += qrtr-tun.o
|
||||
qrtr-tun-y := tun.o
|
||||
obj-$(CONFIG_QRTR_MHI) += qrtr-mhi.o
|
||||
qrtr-mhi-y := mhi.o
|
||||
obj-$(CONFIG_QRTR_HAVEN) += qrtr-haven.o
|
||||
qrtr-haven-y := haven.o
|
||||
obj-$(CONFIG_QRTR_GUNYAH) += qrtr-gunyah.o
|
||||
qrtr-gunyah-y := gunyah.o
|
||||
|
256
net/qrtr/haven.c
256
net/qrtr/haven.c
@ -11,17 +11,17 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/haven/hh_rm_drv.h>
|
||||
#include <linux/haven/hh_dbl.h>
|
||||
#include <linux/gunyah/gh_rm_drv.h>
|
||||
#include <linux/gunyah/gh_dbl.h>
|
||||
#include <soc/qcom/secure_buffer.h>
|
||||
#include "qrtr.h"
|
||||
|
||||
#define HAVEN_MAGIC_KEY 0x24495043 /* "$IPC" */
|
||||
#define GUNYAH_MAGIC_KEY 0x24495043 /* "$IPC" */
|
||||
#define FIFO_SIZE 0x4000
|
||||
#define FIFO_FULL_RESERVE 8
|
||||
#define FIFO_0_START 0x1000
|
||||
#define FIFO_1_START (FIFO_0_START + FIFO_SIZE)
|
||||
#define HAVEN_MAGIC_IDX 0x0
|
||||
#define GUNYAH_MAGIC_IDX 0x0
|
||||
#define TAIL_0_IDX 0x1
|
||||
#define HEAD_0_IDX 0x2
|
||||
#define TAIL_1_IDX 0x3
|
||||
@ -32,13 +32,13 @@
|
||||
|
||||
#define MAX_PKT_SZ SZ_64K
|
||||
|
||||
struct haven_ring {
|
||||
struct gunyah_ring {
|
||||
void *buf;
|
||||
size_t len;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
struct haven_pipe {
|
||||
struct gunyah_pipe {
|
||||
__le32 *tail;
|
||||
__le32 *head;
|
||||
__le32 *read_notify;
|
||||
@ -48,7 +48,7 @@ struct haven_pipe {
|
||||
};
|
||||
|
||||
/**
|
||||
* qrtr_haven_dev - qrtr haven transport structure
|
||||
* qrtr_gunyah_dev - qrtr gunyah transport structure
|
||||
* @ep: qrtr endpoint specific info.
|
||||
* @dev: device from platform_device.
|
||||
* @pkt: buf for reading from fifo.
|
||||
@ -59,16 +59,16 @@ struct haven_pipe {
|
||||
* @master: primary vm indicator.
|
||||
* @peer_name: name of vm peer.
|
||||
* @rm_nb: notifier block for vm status from rm
|
||||
* @label: label for haven resources
|
||||
* @label: label for gunyah resources
|
||||
* @tx_dbl: doorbell for tx notifications.
|
||||
* @rx_dbl: doorbell for rx notifications.
|
||||
* @tx_pipe: TX haven specific info.
|
||||
* @rx_pipe: RX haven specific info.
|
||||
* @tx_pipe: TX gunyah specific info.
|
||||
* @rx_pipe: RX gunyah specific info.
|
||||
*/
|
||||
struct qrtr_haven_dev {
|
||||
struct qrtr_gunyah_dev {
|
||||
struct qrtr_endpoint ep;
|
||||
struct device *dev;
|
||||
struct haven_ring ring;
|
||||
struct gunyah_ring ring;
|
||||
|
||||
struct resource res;
|
||||
u32 memparcel;
|
||||
@ -83,19 +83,19 @@ struct qrtr_haven_dev {
|
||||
void *rx_dbl;
|
||||
struct work_struct work;
|
||||
|
||||
struct haven_pipe tx_pipe;
|
||||
struct haven_pipe rx_pipe;
|
||||
struct gunyah_pipe tx_pipe;
|
||||
struct gunyah_pipe rx_pipe;
|
||||
wait_queue_head_t tx_avail_notify;
|
||||
};
|
||||
|
||||
static void qrtr_haven_read(struct qrtr_haven_dev *qdev);
|
||||
static void qrtr_gunyah_read(struct qrtr_gunyah_dev *qdev);
|
||||
|
||||
static void qrtr_haven_kick(struct qrtr_haven_dev *qdev)
|
||||
static void qrtr_gunyah_kick(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
hh_dbl_flags_t dbl_mask = QRTR_DBL_MASK;
|
||||
gh_dbl_flags_t dbl_mask = QRTR_DBL_MASK;
|
||||
int ret;
|
||||
|
||||
ret = hh_dbl_send(qdev->tx_dbl, &dbl_mask, HH_DBL_NONBLOCK);
|
||||
ret = gh_dbl_send(qdev->tx_dbl, &dbl_mask, GH_DBL_NONBLOCK);
|
||||
if (ret) {
|
||||
dev_err(qdev->dev, "failed to raise doorbell %d\n", ret);
|
||||
if (!qdev->master)
|
||||
@ -103,21 +103,21 @@ static void qrtr_haven_kick(struct qrtr_haven_dev *qdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_haven_retry_work(struct work_struct *work)
|
||||
static void qrtr_gunyah_retry_work(struct work_struct *work)
|
||||
{
|
||||
struct qrtr_haven_dev *qdev = container_of(work, struct qrtr_haven_dev,
|
||||
struct qrtr_gunyah_dev *qdev = container_of(work, struct qrtr_gunyah_dev,
|
||||
work);
|
||||
hh_dbl_flags_t dbl_mask = QRTR_DBL_MASK;
|
||||
gh_dbl_flags_t dbl_mask = QRTR_DBL_MASK;
|
||||
|
||||
hh_dbl_send(qdev->tx_dbl, &dbl_mask, 0);
|
||||
gh_dbl_send(qdev->tx_dbl, &dbl_mask, 0);
|
||||
}
|
||||
|
||||
static void qrtr_haven_cb(int irq, void *data)
|
||||
static void qrtr_gunyah_cb(int irq, void *data)
|
||||
{
|
||||
qrtr_haven_read((struct qrtr_haven_dev *)data);
|
||||
qrtr_gunyah_read((struct qrtr_gunyah_dev *)data);
|
||||
}
|
||||
|
||||
static size_t haven_rx_avail(struct haven_pipe *pipe)
|
||||
static size_t gunyah_rx_avail(struct gunyah_pipe *pipe)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
@ -137,8 +137,8 @@ static size_t haven_rx_avail(struct haven_pipe *pipe)
|
||||
return len;
|
||||
}
|
||||
|
||||
static void haven_rx_peak(struct haven_pipe *pipe, void *data,
|
||||
unsigned int offset, size_t count)
|
||||
static void gunyah_rx_peak(struct gunyah_pipe *pipe, void *data,
|
||||
unsigned int offset, size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 tail;
|
||||
@ -156,7 +156,7 @@ static void haven_rx_peak(struct haven_pipe *pipe, void *data,
|
||||
memcpy_fromio(data + len, pipe->fifo, (count - len));
|
||||
}
|
||||
|
||||
static void haven_rx_advance(struct haven_pipe *pipe, size_t count)
|
||||
static void gunyah_rx_advance(struct gunyah_pipe *pipe, size_t count)
|
||||
{
|
||||
u32 tail;
|
||||
|
||||
@ -169,7 +169,7 @@ static void haven_rx_advance(struct haven_pipe *pipe, size_t count)
|
||||
*pipe->tail = cpu_to_le32(tail);
|
||||
}
|
||||
|
||||
static size_t haven_tx_avail(struct haven_pipe *pipe)
|
||||
static size_t gunyah_tx_avail(struct gunyah_pipe *pipe)
|
||||
{
|
||||
u32 avail;
|
||||
u32 head;
|
||||
@ -191,8 +191,8 @@ static size_t haven_tx_avail(struct haven_pipe *pipe)
|
||||
return avail;
|
||||
}
|
||||
|
||||
static void haven_tx_write(struct haven_pipe *pipe,
|
||||
const void *data, size_t count)
|
||||
static void gunyah_tx_write(struct gunyah_pipe *pipe, const void *data,
|
||||
size_t count)
|
||||
{
|
||||
size_t len;
|
||||
u32 head;
|
||||
@ -216,32 +216,32 @@ static void haven_tx_write(struct haven_pipe *pipe,
|
||||
*pipe->head = cpu_to_le32(head);
|
||||
}
|
||||
|
||||
static void haven_set_tx_notify(struct qrtr_haven_dev *qdev)
|
||||
static void gunyah_set_tx_notify(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
*qdev->tx_pipe.read_notify = cpu_to_le32(1);
|
||||
}
|
||||
|
||||
static void haven_clr_tx_notify(struct qrtr_haven_dev *qdev)
|
||||
static void gunyah_clr_tx_notify(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
*qdev->tx_pipe.read_notify = 0;
|
||||
}
|
||||
|
||||
static bool haven_get_read_notify(struct qrtr_haven_dev *qdev)
|
||||
static bool gunyah_get_read_notify(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
return le32_to_cpu(*qdev->rx_pipe.read_notify);
|
||||
}
|
||||
|
||||
static void haven_wait_for_tx_avail(struct qrtr_haven_dev *qdev)
|
||||
static void gunyah_wait_for_tx_avail(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
haven_set_tx_notify(qdev);
|
||||
gunyah_set_tx_notify(qdev);
|
||||
wait_event_timeout(qdev->tx_avail_notify,
|
||||
haven_tx_avail(&qdev->tx_pipe), 10 * HZ);
|
||||
gunyah_tx_avail(&qdev->tx_pipe), 10 * HZ);
|
||||
}
|
||||
|
||||
/* from qrtr to haven */
|
||||
static int qrtr_haven_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
/* from qrtr to gunyah */
|
||||
static int qrtr_gunyah_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
{
|
||||
struct qrtr_haven_dev *qdev;
|
||||
struct qrtr_gunyah_dev *qdev;
|
||||
size_t tx_avail;
|
||||
int chunk_size;
|
||||
int left_size;
|
||||
@ -249,7 +249,7 @@ static int qrtr_haven_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
|
||||
int rc;
|
||||
|
||||
qdev = container_of(ep, struct qrtr_haven_dev, ep);
|
||||
qdev = container_of(ep, struct qrtr_gunyah_dev, ep);
|
||||
|
||||
rc = skb_linearize(skb);
|
||||
if (rc) {
|
||||
@ -260,9 +260,9 @@ static int qrtr_haven_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
left_size = skb->len;
|
||||
offset = 0;
|
||||
while (left_size > 0) {
|
||||
tx_avail = haven_tx_avail(&qdev->tx_pipe);
|
||||
tx_avail = gunyah_tx_avail(&qdev->tx_pipe);
|
||||
if (!tx_avail) {
|
||||
haven_wait_for_tx_avail(qdev);
|
||||
gunyah_wait_for_tx_avail(qdev);
|
||||
continue;
|
||||
}
|
||||
if (tx_avail < left_size)
|
||||
@ -270,40 +270,40 @@ static int qrtr_haven_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
|
||||
else
|
||||
chunk_size = left_size;
|
||||
|
||||
haven_tx_write(&qdev->tx_pipe, skb->data + offset, chunk_size);
|
||||
gunyah_tx_write(&qdev->tx_pipe, skb->data + offset, chunk_size);
|
||||
offset += chunk_size;
|
||||
left_size -= chunk_size;
|
||||
|
||||
qrtr_haven_kick(qdev);
|
||||
qrtr_gunyah_kick(qdev);
|
||||
}
|
||||
haven_clr_tx_notify(qdev);
|
||||
gunyah_clr_tx_notify(qdev);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qrtr_haven_read_new(struct qrtr_haven_dev *qdev)
|
||||
static void qrtr_gunyah_read_new(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct haven_ring *ring = &qdev->ring;
|
||||
struct gunyah_ring *ring = &qdev->ring;
|
||||
size_t rx_avail;
|
||||
size_t pkt_len;
|
||||
u32 hdr[8];
|
||||
int rc;
|
||||
size_t hdr_len = sizeof(hdr);
|
||||
|
||||
haven_rx_peak(&qdev->rx_pipe, &hdr, 0, hdr_len);
|
||||
gunyah_rx_peak(&qdev->rx_pipe, &hdr, 0, hdr_len);
|
||||
pkt_len = qrtr_peek_pkt_size((void *)&hdr);
|
||||
if ((int)pkt_len < 0 || pkt_len > MAX_PKT_SZ) {
|
||||
dev_err(qdev->dev, "invalid pkt_len %zu\n", pkt_len);
|
||||
return;
|
||||
}
|
||||
|
||||
rx_avail = haven_rx_avail(&qdev->rx_pipe);
|
||||
rx_avail = gunyah_rx_avail(&qdev->rx_pipe);
|
||||
if (rx_avail > pkt_len)
|
||||
rx_avail = pkt_len;
|
||||
|
||||
haven_rx_peak(&qdev->rx_pipe, ring->buf, 0, rx_avail);
|
||||
haven_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
gunyah_rx_peak(&qdev->rx_pipe, ring->buf, 0, rx_avail);
|
||||
gunyah_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
|
||||
if (rx_avail == pkt_len) {
|
||||
rc = qrtr_endpoint_post(&qdev->ep, ring->buf, pkt_len);
|
||||
@ -315,18 +315,18 @@ static void qrtr_haven_read_new(struct qrtr_haven_dev *qdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_haven_read_frag(struct qrtr_haven_dev *qdev)
|
||||
static void qrtr_gunyah_read_frag(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct haven_ring *ring = &qdev->ring;
|
||||
struct gunyah_ring *ring = &qdev->ring;
|
||||
size_t rx_avail;
|
||||
int rc;
|
||||
|
||||
rx_avail = haven_rx_avail(&qdev->rx_pipe);
|
||||
rx_avail = gunyah_rx_avail(&qdev->rx_pipe);
|
||||
if (rx_avail + ring->offset > ring->len)
|
||||
rx_avail = ring->len - ring->offset;
|
||||
|
||||
haven_rx_peak(&qdev->rx_pipe, ring->buf + ring->offset, 0, rx_avail);
|
||||
haven_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
gunyah_rx_peak(&qdev->rx_pipe, ring->buf + ring->offset, 0, rx_avail);
|
||||
gunyah_rx_advance(&qdev->rx_pipe, rx_avail);
|
||||
|
||||
if (rx_avail + ring->offset == ring->len) {
|
||||
rc = qrtr_endpoint_post(&qdev->ep, ring->buf, ring->len);
|
||||
@ -339,29 +339,29 @@ static void qrtr_haven_read_frag(struct qrtr_haven_dev *qdev)
|
||||
}
|
||||
}
|
||||
|
||||
static void qrtr_haven_read(struct qrtr_haven_dev *qdev)
|
||||
static void qrtr_gunyah_read(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
wake_up_all(&qdev->tx_avail_notify);
|
||||
|
||||
while (haven_rx_avail(&qdev->rx_pipe)) {
|
||||
while (gunyah_rx_avail(&qdev->rx_pipe)) {
|
||||
if (qdev->ring.offset)
|
||||
qrtr_haven_read_frag(qdev);
|
||||
qrtr_gunyah_read_frag(qdev);
|
||||
else
|
||||
qrtr_haven_read_new(qdev);
|
||||
qrtr_gunyah_read_new(qdev);
|
||||
|
||||
if (haven_get_read_notify(qdev))
|
||||
qrtr_haven_kick(qdev);
|
||||
if (gunyah_get_read_notify(qdev))
|
||||
qrtr_gunyah_kick(qdev);
|
||||
}
|
||||
}
|
||||
|
||||
static int qrtr_haven_share_mem(struct qrtr_haven_dev *qdev,
|
||||
hh_vmid_t self, hh_vmid_t peer)
|
||||
static int qrtr_gunyah_share_mem(struct qrtr_gunyah_dev *qdev, gh_vmid_t self,
|
||||
gh_vmid_t peer)
|
||||
{
|
||||
u32 src_vmlist[1] = {self};
|
||||
int dst_vmlist[2] = {self, peer};
|
||||
int dst_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
|
||||
struct hh_acl_desc *acl;
|
||||
struct hh_sgl_desc *sgl;
|
||||
struct gh_acl_desc *acl;
|
||||
struct gh_sgl_desc *sgl;
|
||||
int ret;
|
||||
|
||||
ret = hyp_assign_phys(qdev->res.start, resource_size(&qdev->res),
|
||||
@ -373,24 +373,24 @@ static int qrtr_haven_share_mem(struct qrtr_haven_dev *qdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
acl = kzalloc(offsetof(struct hh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
acl = kzalloc(offsetof(struct gh_acl_desc, acl_entries[2]), GFP_KERNEL);
|
||||
if (!acl)
|
||||
return -ENOMEM;
|
||||
sgl = kzalloc(offsetof(struct hh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
sgl = kzalloc(offsetof(struct gh_sgl_desc, sgl_entries[1]), GFP_KERNEL);
|
||||
if (!sgl) {
|
||||
kfree(acl);
|
||||
return -ENOMEM;
|
||||
}
|
||||
acl->n_acl_entries = 2;
|
||||
acl->acl_entries[0].vmid = (u16)self;
|
||||
acl->acl_entries[0].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[0].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
acl->acl_entries[1].vmid = (u16)peer;
|
||||
acl->acl_entries[1].perms = HH_RM_ACL_R | HH_RM_ACL_W;
|
||||
acl->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W;
|
||||
|
||||
sgl->n_sgl_entries = 1;
|
||||
sgl->sgl_entries[0].ipa_base = qdev->res.start;
|
||||
sgl->sgl_entries[0].size = resource_size(&qdev->res);
|
||||
ret = hh_rm_mem_qcom_lookup_sgl(HH_RM_MEM_TYPE_NORMAL,
|
||||
ret = gh_rm_mem_qcom_lookup_sgl(GH_RM_MEM_TYPE_NORMAL,
|
||||
qdev->label,
|
||||
acl, sgl, NULL,
|
||||
&qdev->memparcel);
|
||||
@ -400,44 +400,44 @@ static int qrtr_haven_share_mem(struct qrtr_haven_dev *qdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qrtr_haven_rm_cb(struct notifier_block *nb, unsigned long cmd,
|
||||
void *data)
|
||||
static int qrtr_gunyah_rm_cb(struct notifier_block *nb, unsigned long cmd,
|
||||
void *data)
|
||||
{
|
||||
struct hh_rm_notif_vm_status_payload *vm_status_payload;
|
||||
struct qrtr_haven_dev *qdev;
|
||||
hh_vmid_t peer_vmid;
|
||||
hh_vmid_t self_vmid;
|
||||
struct gh_rm_notif_vm_status_payload *vm_status_payload;
|
||||
struct qrtr_gunyah_dev *qdev;
|
||||
gh_vmid_t peer_vmid;
|
||||
gh_vmid_t self_vmid;
|
||||
|
||||
qdev = container_of(nb, struct qrtr_haven_dev, rm_nb);
|
||||
qdev = container_of(nb, struct qrtr_gunyah_dev, rm_nb);
|
||||
|
||||
if (cmd != HH_RM_NOTIF_VM_STATUS)
|
||||
if (cmd != GH_RM_NOTIF_VM_STATUS)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
vm_status_payload = data;
|
||||
if (vm_status_payload->vm_status != HH_RM_VM_STATUS_READY)
|
||||
if (vm_status_payload->vm_status != GH_RM_VM_STATUS_READY)
|
||||
return NOTIFY_DONE;
|
||||
if (hh_rm_get_vmid(qdev->peer_name, &peer_vmid))
|
||||
if (gh_rm_get_vmid(qdev->peer_name, &peer_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (hh_rm_get_vmid(HH_PRIMARY_VM, &self_vmid))
|
||||
if (gh_rm_get_vmid(GH_PRIMARY_VM, &self_vmid))
|
||||
return NOTIFY_DONE;
|
||||
if (peer_vmid != vm_status_payload->vmid)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
if (qrtr_haven_share_mem(qdev, self_vmid, peer_vmid))
|
||||
if (qrtr_gunyah_share_mem(qdev, self_vmid, peer_vmid))
|
||||
pr_err("%s: failed to share memory\n", __func__);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* qrtr_haven_fifo_init() - init haven xprt configs
|
||||
* qrtr_gunyah_fifo_init() - init gunyah xprt configs
|
||||
*
|
||||
* @return: 0 on success, standard Linux error codes on error.
|
||||
*
|
||||
* This function is called to initialize the haven XPRT pointer with
|
||||
* the haven XPRT configurations either from device tree or static arrays.
|
||||
* This function is called to initialize the gunyah XPRT pointer with
|
||||
* the gunyah XPRT configurations either from device tree or static arrays.
|
||||
*/
|
||||
static void qrtr_haven_fifo_init(struct qrtr_haven_dev *qdev)
|
||||
static void qrtr_gunyah_fifo_init(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
__le32 *descs;
|
||||
|
||||
@ -445,7 +445,7 @@ static void qrtr_haven_fifo_init(struct qrtr_haven_dev *qdev)
|
||||
memset(qdev->base, 0, sizeof(*descs) * 10);
|
||||
|
||||
descs = qdev->base;
|
||||
descs[HAVEN_MAGIC_IDX] = HAVEN_MAGIC_KEY;
|
||||
descs[GUNYAH_MAGIC_IDX] = GUNYAH_MAGIC_KEY;
|
||||
|
||||
if (qdev->master) {
|
||||
qdev->tx_pipe.tail = &descs[TAIL_0_IDX];
|
||||
@ -479,9 +479,9 @@ static void qrtr_haven_fifo_init(struct qrtr_haven_dev *qdev)
|
||||
*qdev->rx_pipe.tail = 0;
|
||||
}
|
||||
|
||||
static struct device_node *qrtr_haven_svm_of_parse(struct qrtr_haven_dev *qdev)
|
||||
static struct device_node *qrtr_gunyah_svm_of_parse(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
const char *compat = "qcom,qrtr-haven-gen";
|
||||
const char *compat = "qcom,qrtr-gunyah-gen";
|
||||
struct device_node *np = NULL;
|
||||
struct device_node *shm_np;
|
||||
u32 label;
|
||||
@ -509,7 +509,7 @@ static struct device_node *qrtr_haven_svm_of_parse(struct qrtr_haven_dev *qdev)
|
||||
return shm_np;
|
||||
}
|
||||
|
||||
static int qrtr_haven_map_memory(struct qrtr_haven_dev *qdev)
|
||||
static int qrtr_gunyah_map_memory(struct qrtr_gunyah_dev *qdev)
|
||||
{
|
||||
struct device *dev = qdev->dev;
|
||||
struct device_node *np;
|
||||
@ -518,7 +518,7 @@ static int qrtr_haven_map_memory(struct qrtr_haven_dev *qdev)
|
||||
|
||||
np = of_parse_phandle(dev->of_node, "shared-buffer", 0);
|
||||
if (!np) {
|
||||
np = qrtr_haven_svm_of_parse(qdev);
|
||||
np = qrtr_gunyah_svm_of_parse(qdev);
|
||||
if (!np) {
|
||||
dev_err(dev, "cant parse shared mem node!\n");
|
||||
return -EINVAL;
|
||||
@ -544,20 +544,20 @@ static int qrtr_haven_map_memory(struct qrtr_haven_dev *qdev)
|
||||
}
|
||||
|
||||
/**
|
||||
* qrtr_haven_probe() - Probe a haven xprt
|
||||
* qrtr_gunyah_probe() - Probe a gunyah xprt
|
||||
*
|
||||
* @pdev: Platform device corresponding to haven xprt.
|
||||
* @pdev: Platform device corresponding to gunyah xprt.
|
||||
*
|
||||
* @return: 0 on success, standard Linux error codes on error.
|
||||
*
|
||||
* This function is called when the underlying device tree driver registers
|
||||
* a platform device, mapped to a haven transport.
|
||||
* a platform device, mapped to a gunyah transport.
|
||||
*/
|
||||
static int qrtr_haven_probe(struct platform_device *pdev)
|
||||
static int qrtr_gunyah_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct qrtr_haven_dev *qdev;
|
||||
enum hh_dbl_label dbl_label;
|
||||
struct qrtr_gunyah_dev *qdev;
|
||||
enum gh_dbl_label dbl_label;
|
||||
int ret;
|
||||
|
||||
qdev = devm_kzalloc(&pdev->dev, sizeof(*qdev), GFP_KERNEL);
|
||||
@ -570,90 +570,90 @@ static int qrtr_haven_probe(struct platform_device *pdev)
|
||||
if (!qdev->ring.buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = of_property_read_u32(node, "haven-label", &qdev->label);
|
||||
ret = of_property_read_u32(node, "gunyah-label", &qdev->label);
|
||||
if (ret) {
|
||||
dev_err(qdev->dev, "failed to read label info %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
qdev->master = of_property_read_bool(node, "qcom,master");
|
||||
|
||||
ret = qrtr_haven_map_memory(qdev);
|
||||
ret = qrtr_gunyah_map_memory(qdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
qrtr_haven_fifo_init(qdev);
|
||||
qrtr_gunyah_fifo_init(qdev);
|
||||
init_waitqueue_head(&qdev->tx_avail_notify);
|
||||
|
||||
if (qdev->master) {
|
||||
ret = of_property_read_u32(node, "peer-name", &qdev->peer_name);
|
||||
if (ret)
|
||||
qdev->peer_name = HH_SELF_VM;
|
||||
qdev->peer_name = GH_SELF_VM;
|
||||
|
||||
qdev->rm_nb.notifier_call = qrtr_haven_rm_cb;
|
||||
qdev->rm_nb.notifier_call = qrtr_gunyah_rm_cb;
|
||||
qdev->rm_nb.priority = INT_MAX;
|
||||
hh_rm_register_notifier(&qdev->rm_nb);
|
||||
gh_rm_register_notifier(&qdev->rm_nb);
|
||||
}
|
||||
|
||||
dbl_label = qdev->label;
|
||||
qdev->tx_dbl = hh_dbl_tx_register(dbl_label);
|
||||
qdev->tx_dbl = gh_dbl_tx_register(dbl_label);
|
||||
if (IS_ERR_OR_NULL(qdev->tx_dbl)) {
|
||||
ret = PTR_ERR(qdev->tx_dbl);
|
||||
dev_err(qdev->dev, "failed to get haven tx dbl %d\n", ret);
|
||||
dev_err(qdev->dev, "failed to get gunyah tx dbl %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
INIT_WORK(&qdev->work, qrtr_haven_retry_work);
|
||||
INIT_WORK(&qdev->work, qrtr_gunyah_retry_work);
|
||||
|
||||
qdev->rx_dbl = hh_dbl_rx_register(dbl_label, qrtr_haven_cb, qdev);
|
||||
qdev->rx_dbl = gh_dbl_rx_register(dbl_label, qrtr_gunyah_cb, qdev);
|
||||
if (IS_ERR_OR_NULL(qdev->rx_dbl)) {
|
||||
ret = PTR_ERR(qdev->rx_dbl);
|
||||
dev_err(qdev->dev, "failed to get haven rx dbl %d\n", ret);
|
||||
dev_err(qdev->dev, "failed to get gunyah rx dbl %d\n", ret);
|
||||
goto fail_rx_dbl;
|
||||
}
|
||||
|
||||
qdev->ep.xmit = qrtr_haven_send;
|
||||
qdev->ep.xmit = qrtr_gunyah_send;
|
||||
ret = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NET_ID_AUTO, false);
|
||||
if (ret)
|
||||
goto register_fail;
|
||||
|
||||
if (haven_rx_avail(&qdev->rx_pipe))
|
||||
qrtr_haven_read(qdev);
|
||||
if (gunyah_rx_avail(&qdev->rx_pipe))
|
||||
qrtr_gunyah_read(qdev);
|
||||
|
||||
return 0;
|
||||
|
||||
register_fail:
|
||||
hh_dbl_rx_unregister(qdev->rx_dbl);
|
||||
gh_dbl_rx_unregister(qdev->rx_dbl);
|
||||
fail_rx_dbl:
|
||||
cancel_work_sync(&qdev->work);
|
||||
hh_dbl_tx_unregister(qdev->tx_dbl);
|
||||
gh_dbl_tx_unregister(qdev->tx_dbl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qrtr_haven_remove(struct platform_device *pdev)
|
||||
static int qrtr_gunyah_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct qrtr_haven_dev *qdev = dev_get_drvdata(&pdev->dev);
|
||||
struct qrtr_gunyah_dev *qdev = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
cancel_work_sync(&qdev->work);
|
||||
hh_dbl_tx_unregister(qdev->tx_dbl);
|
||||
hh_dbl_rx_unregister(qdev->rx_dbl);
|
||||
gh_dbl_tx_unregister(qdev->tx_dbl);
|
||||
gh_dbl_rx_unregister(qdev->rx_dbl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qrtr_haven_match_table[] = {
|
||||
{ .compatible = "qcom,qrtr-haven" },
|
||||
static const struct of_device_id qrtr_gunyah_match_table[] = {
|
||||
{ .compatible = "qcom,qrtr-gunyah" },
|
||||
{}
|
||||
};
|
||||
|
||||
static struct platform_driver qrtr_haven_driver = {
|
||||
static struct platform_driver qrtr_gunyah_driver = {
|
||||
.driver = {
|
||||
.name = "qcom_haven_qrtr",
|
||||
.of_match_table = qrtr_haven_match_table,
|
||||
.name = "qcom_gunyah_qrtr",
|
||||
.of_match_table = qrtr_gunyah_match_table,
|
||||
},
|
||||
.probe = qrtr_haven_probe,
|
||||
.remove = qrtr_haven_remove,
|
||||
.probe = qrtr_gunyah_probe,
|
||||
.remove = qrtr_gunyah_remove,
|
||||
};
|
||||
module_platform_driver(qrtr_haven_driver);
|
||||
module_platform_driver(qrtr_gunyah_driver);
|
||||
|
||||
MODULE_DESCRIPTION("QTI IPC-Router Haven interface driver");
|
||||
MODULE_DESCRIPTION("QTI IPC-Router Gunyah interface driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
Loading…
Reference in New Issue
Block a user