Merge commit 'b7e8439a2334bb613b38e97eb48712f683b423a1' into HEAD
* commit 'b7e8439a2334bb613b38e97eb48712f683b423a1': ANDROID: Snap to android14-6.1-2023-06 Conflicts: android/abi_gki_aarch64.stg Change-Id: I790794c34ced4ea629957ee33d9d1fefe7637dc1 Signed-off-by: jianzhou <quic_jianzhou@quicinc.com> Signed-off-by: Elliot Berman <quic_eberman@quicinc.com> Signed-off-by: Guru Das Srinagesh <quic_gurus@quicinc.com>
This commit is contained in:
parent
49157c1372
commit
4ab178cfc4
@ -1 +0,0 @@
|
||||
per-file sysfs-fs-f2fs=file:/fs/f2fs/OWNERS
|
@ -264,6 +264,16 @@ Description:
|
||||
attached to the port will not be detected, initialized,
|
||||
or enumerated.
|
||||
|
||||
What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/state
|
||||
Date: June 2023
|
||||
Contact: Roy Luo <royluo@google.com>
|
||||
Description:
|
||||
Indicates current state of the USB device attached to the port.
|
||||
Valid states are: 'not-attached', 'attached', 'powered',
|
||||
'reconnecting', 'unauthenticated', 'default', 'addressed',
|
||||
'configured', and 'suspended'. This file supports poll() to
|
||||
monitor the state change from user space.
|
||||
|
||||
What: /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout
|
||||
Date: May 2013
|
||||
Contact: Mathias Nyman <mathias.nyman@linux.intel.com>
|
||||
|
@ -1 +0,0 @@
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
@ -136,7 +136,7 @@ Code Seq# Include File Comments
|
||||
'F' DD video/sstfb.h conflict!
|
||||
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
|
||||
'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
|
||||
'G' 00-0f linux/gunyah.h conflict!
|
||||
'G' 00-0F linux/gunyah.h conflict!
|
||||
'H' 00-7F linux/hiddev.h conflict!
|
||||
'H' 00-0F linux/hidraw.h conflict!
|
||||
'H' 01 linux/mei.h conflict!
|
||||
|
@ -7,14 +7,12 @@ Virtual Machine Manager
|
||||
The Gunyah Virtual Machine Manager is a Linux driver to support launching
|
||||
virtual machines using Gunyah.
|
||||
|
||||
Except for some basic information about the location of initial binaries,
|
||||
most of the configuration about a Gunyah virtual machine is described in the
|
||||
VM's devicetree. The devicetree is generated by userspace. Interacting with the
|
||||
virtual machine is still done via the kernel and VM configuration requires some
|
||||
of the corresponding functionality to be set up in the kernel. For instance,
|
||||
sharing userspace memory with a VM is done via the `GH_VM_SET_USER_MEM_REGION`_
|
||||
ioctl. The VM itself is configured to use the memory region via the
|
||||
devicetree.
|
||||
Configuration of a Gunyah virtual machine is done via a devicetree. When the VM
|
||||
is launched, memory is provided by the host VM which contains the devictree.
|
||||
Gunyah reads the devicetree to configure the memory map and create resources
|
||||
such as vCPUs for the VM. Memory can be shared with the VM with
|
||||
`GH_VM_SET_USER_MEM_REGION`_. Userspace can interact with the resources in Linux
|
||||
by adding "functions" to the VM.
|
||||
|
||||
Gunyah Functions
|
||||
================
|
||||
@ -56,6 +54,9 @@ GH_CREATE_VM
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Creates a Gunyah VM. The argument is reserved for future use and must be 0.
|
||||
A successful call will return a Gunyah VM file descriptor. See
|
||||
`Gunyah VM API Descriptions`_ for list of IOCTLs that can be made on this file
|
||||
file descriptor.
|
||||
|
||||
Gunyah VM API Descriptions
|
||||
--------------------------
|
||||
@ -70,8 +71,8 @@ unique per virtual machine.
|
||||
|
||||
While VMM is guest-agnostic and allows runtime addition of memory regions,
|
||||
Linux guest virtual machines do not support accepting memory regions at runtime.
|
||||
Thus, memory regions should be provided before starting the VM and the VM must
|
||||
be configured to accept these at boot-up.
|
||||
Thus, for Linux guests, memory regions should be provided before starting the VM
|
||||
and the VM must be configured via the devicetree to accept these at boot-up.
|
||||
|
||||
The guest physical address is used by Linux kernel to check that the requested
|
||||
user regions do not overlap and to help find the corresponding memory region
|
||||
@ -87,7 +88,7 @@ GH_VM_SET_DTB_CONFIG
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This ioctl sets the location of the VM's devicetree blob and is used by Gunyah
|
||||
Resource Manager to allocate resources. The guest physical memory should be part
|
||||
Resource Manager to allocate resources. The guest physical memory must be part
|
||||
of the primary memory parcel provided to the VM prior to GH_VM_START.
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/gunyah.h
|
||||
@ -104,7 +105,7 @@ GH_VM_ADD_FUNCTION
|
||||
This ioctl registers a Gunyah VM function with the VM manager. The VM function
|
||||
is described with a &struct gh_fn_desc.type and some arguments for that type.
|
||||
Typically, the function is added before the VM starts, but the function doesn't
|
||||
"operate" until the VM starts with `GH_VM_START`_. For example, vCPU ioclts will
|
||||
"operate" until the VM starts with `GH_VM_START`_. For example, vCPU ioctls will
|
||||
all return an error until the VM starts because the vCPUs don't exist until the
|
||||
VM is started. This allows the VMM to set up all the kernel functions needed for
|
||||
the VM *before* the VM starts.
|
||||
|
23
OWNERS
23
OWNERS
@ -1,13 +1,12 @@
|
||||
# The full list of approvers is defined in
|
||||
# https://android.googlesource.com/kernel/common/+/refs/meta/config/OWNERS
|
||||
set noparent
|
||||
|
||||
# The following OWNERS are defined at the top level to improve the OWNERS
|
||||
# suggestions through any user interface. Consider those people the ones that
|
||||
# can help with finding the best person to review.
|
||||
adelva@google.com
|
||||
gregkh@google.com
|
||||
maennich@google.com
|
||||
saravanak@google.com
|
||||
smuckle@google.com
|
||||
surenb@google.com
|
||||
tkjos@google.com
|
||||
# GKI Dr. No Enforcement is active on this branch. Approval of one of the Dr.
|
||||
# No reviewers is required following a regular CodeReview+2 vote of a code
|
||||
# reviewer.
|
||||
#
|
||||
# See the GKI release documentation (go/gki-dr-no) for further details.
|
||||
#
|
||||
# The expanded list of reviewers can be found at:
|
||||
# https://android.googlesource.com/kernel/common/+/android-mainline/OWNERS_DrNo
|
||||
|
||||
include kernel/common:android-mainline:/OWNERS_DrNo
|
||||
|
@ -1,13 +0,0 @@
|
||||
# If we ever add another OWNERS above this directory, it's likely to be
|
||||
# more permissive, so don't inherit from it
|
||||
set noparent
|
||||
include kernel/common:android-mainline:/OWNERS_DrNo
|
||||
|
||||
# Downstream boards maintained directly in this manifest branch
|
||||
per-file abi_gki_aarch64_cuttlefish = adelva@google.com, rammuthiah@google.com
|
||||
per-file abi_gki_aarch64_goldfish = rkir@google.com
|
||||
|
||||
# per-file for review purposes
|
||||
per-file gki_system_dlkm_modules = ramjiyani@google.com
|
||||
per-file abi_gki_protected_exports_* = ramjiyani@google.com
|
||||
per-file gki_*_protected_modules = ramjiyani@google.com
|
File diff suppressed because it is too large
Load Diff
@ -54,6 +54,7 @@
|
||||
__printk_ratelimit
|
||||
prepare_to_wait_exclusive
|
||||
proc_symlink
|
||||
public_key_verify_signature
|
||||
radix_tree_lookup_slot
|
||||
radix_tree_replace_slot
|
||||
_raw_write_trylock
|
||||
|
@ -244,3 +244,14 @@
|
||||
#required by mi_mempool.ko
|
||||
__traceiter_android_vh_madvise_cold_pageout_skip
|
||||
__tracepoint_android_vh_madvise_cold_pageout_skip
|
||||
|
||||
#required by n_gsm.ko
|
||||
tty_write_room
|
||||
tty_port_tty_set
|
||||
tty_register_device
|
||||
tty_hung_up_p
|
||||
tty_name
|
||||
tty_port_block_til_ready
|
||||
tty_port_close_start
|
||||
tty_port_lower_dtr_rts
|
||||
tty_port_close_end
|
||||
|
@ -27,6 +27,7 @@ drivers/net/usb/usbnet.ko
|
||||
drivers/usb/class/cdc-acm.ko
|
||||
drivers/usb/serial/ftdi_sio.ko
|
||||
drivers/usb/serial/usbserial.ko
|
||||
kernel/kheaders.ko
|
||||
lib/crypto/libarc4.ko
|
||||
mm/zsmalloc.ko
|
||||
net/6lowpan/6lowpan.ko
|
||||
|
@ -1 +0,0 @@
|
||||
include ../arm64/OWNERS
|
@ -1,4 +0,0 @@
|
||||
per-file crypto/**=file:/crypto/OWNERS
|
||||
per-file {include,kernel,kvm,lib}/**=mzyngier@google.com,willdeacon@google.com
|
||||
per-file mm/**=file:/mm/OWNERS
|
||||
per-file net/**=file:/net/OWNERS
|
@ -94,6 +94,7 @@ CONFIG_MODULE_SIG_PROTECT=y
|
||||
CONFIG_MODPROBE_PATH="/system/bin/modprobe"
|
||||
CONFIG_BLK_DEV_ZONED=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_CGROUP_IOCOST=y
|
||||
CONFIG_BLK_INLINE_ENCRYPTION=y
|
||||
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
|
||||
CONFIG_IOSCHED_BFQ=y
|
||||
@ -182,6 +183,7 @@ CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
|
@ -16,13 +16,15 @@ bool arch_is_gh_guest(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
uuid_t uuid;
|
||||
u32 *up;
|
||||
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
|
||||
|
||||
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
|
||||
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
|
||||
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
|
||||
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
|
||||
up = (u32 *)&uuid.b[0];
|
||||
up[0] = lower_32_bits(res.a0);
|
||||
up[1] = lower_32_bits(res.a1);
|
||||
up[2] = lower_32_bits(res.a2);
|
||||
up[3] = lower_32_bits(res.a3);
|
||||
|
||||
return uuid_equal(&uuid, &GUNYAH_UUID);
|
||||
}
|
||||
|
@ -8,16 +8,11 @@
|
||||
#define __ASM_EXCEPTION_H
|
||||
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry __kprobes
|
||||
#endif
|
||||
|
||||
static inline unsigned long disr_to_esr(u64 disr)
|
||||
{
|
||||
|
@ -72,7 +72,10 @@ typedef u64 kvm_pte_t;
|
||||
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
|
||||
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN 1
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN 3
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN_XN 2
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN GENMASK(54, 53)
|
||||
|
||||
static inline bool kvm_pte_valid(kvm_pte_t pte)
|
||||
{
|
||||
@ -167,6 +170,11 @@ struct kvm_pgtable_mm_ops {
|
||||
void (*icache_inval_pou)(void *addr, size_t size);
|
||||
};
|
||||
|
||||
static inline kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
|
||||
{
|
||||
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
|
||||
}
|
||||
|
||||
/**
|
||||
* enum kvm_pgtable_stage2_flags - Stage-2 page-table flags.
|
||||
* @KVM_PGTABLE_S2_NOFWB: Don't enforce Normal-WB even if the CPUs have
|
||||
@ -184,7 +192,9 @@ enum kvm_pgtable_stage2_flags {
|
||||
* @KVM_PGTABLE_PROT_W: Write permission.
|
||||
* @KVM_PGTABLE_PROT_R: Read permission.
|
||||
* @KVM_PGTABLE_PROT_DEVICE: Device attributes.
|
||||
* @KVM_PGTABLE_PROT_NC: Normal non-cacheable attributes.
|
||||
* @KVM_PGTABLE_PROT_NC: Normal non-cacheable attributes.
|
||||
* @KVM_PGTABLE_PROT_PXN: Privileged execute-never.
|
||||
* @KVM_PGTABLE_PROT_UXN: Unprivileged execute-never.
|
||||
* @KVM_PGTABLE_PROT_SW0: Software bit 0.
|
||||
* @KVM_PGTABLE_PROT_SW1: Software bit 1.
|
||||
* @KVM_PGTABLE_PROT_SW2: Software bit 2.
|
||||
@ -197,6 +207,8 @@ enum kvm_pgtable_prot {
|
||||
|
||||
KVM_PGTABLE_PROT_DEVICE = BIT(3),
|
||||
KVM_PGTABLE_PROT_NC = BIT(4),
|
||||
KVM_PGTABLE_PROT_PXN = BIT(5),
|
||||
KVM_PGTABLE_PROT_UXN = BIT(6),
|
||||
|
||||
KVM_PGTABLE_PROT_SW0 = BIT(55),
|
||||
KVM_PGTABLE_PROT_SW1 = BIT(56),
|
||||
@ -490,6 +502,21 @@ int kvm_pgtable_stage2_annotate(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
*/
|
||||
int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_reclaim_leaves() - Attempt to reclaim leaf page-table
|
||||
* pages by coalescing table entries into
|
||||
* block mappings.
|
||||
* @pgt: Page-table structure initialised by kvm_pgtable_stage2_init*().
|
||||
* @addr: Intermediate physical address from which to reclaim leaves.
|
||||
* @size: Size of the range.
|
||||
*
|
||||
* The offset of @addr within a page is ignored and @size is rounded-up to
|
||||
* the next page boundary.
|
||||
*
|
||||
* Return: 0 on success, negative error code on failure.
|
||||
*/
|
||||
int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
|
||||
/**
|
||||
* kvm_pgtable_stage2_wrprotect() - Write-protect guest stage-2 address range
|
||||
* without TLB invalidation.
|
||||
|
@ -93,8 +93,6 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
|
||||
int __pkvm_iommu_pm_notify(unsigned long dev_id,
|
||||
enum pkvm_iommu_pm_event event);
|
||||
int __pkvm_iommu_finalize(int err);
|
||||
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
|
||||
phys_addr_t *end);
|
||||
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
|
||||
phys_addr_t fault_pa);
|
||||
void pkvm_iommu_host_stage2_idmap(phys_addr_t start, phys_addr_t end,
|
||||
|
@ -72,6 +72,8 @@ int __pkvm_host_share_hyp(u64 pfn);
|
||||
int __pkvm_host_unshare_hyp(u64 pfn);
|
||||
int __pkvm_host_reclaim_page(struct pkvm_hyp_vm *vm, u64 pfn, u64 ipa);
|
||||
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
|
||||
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio);
|
||||
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages);
|
||||
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
|
||||
int __pkvm_host_share_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
|
||||
int __pkvm_host_donate_guest(u64 pfn, u64 gfn, struct pkvm_hyp_vcpu *vcpu);
|
||||
|
@ -392,6 +392,7 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
|
||||
.id = dev_id,
|
||||
.ops = drv->ops,
|
||||
.pa = dev_pa,
|
||||
.va = hyp_phys_to_virt(dev_pa),
|
||||
.size = dev_size,
|
||||
.flags = flags,
|
||||
};
|
||||
@ -421,22 +422,11 @@ int __pkvm_iommu_register(unsigned long dev_id, unsigned long drv_id,
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap the device's MMIO range from host stage-2. If registration
|
||||
* is successful, future attempts to re-map will be blocked by
|
||||
* pkvm_iommu_host_stage2_adjust_range.
|
||||
*/
|
||||
ret = host_stage2_unmap_reg_locked(dev_pa, dev_size);
|
||||
ret = __pkvm_host_donate_hyp_locked(hyp_phys_to_pfn(dev_pa),
|
||||
PAGE_ALIGN(dev_size) >> PAGE_SHIFT);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
|
||||
/* Create EL2 mapping for the device. */
|
||||
ret = __pkvm_create_private_mapping(dev_pa, dev_size,
|
||||
PAGE_HYP_DEVICE, (unsigned long *)(&dev->va));
|
||||
if (ret){
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Register device and prevent host from mapping the MMIO range. */
|
||||
list_add_tail(&dev->list, &iommu_list);
|
||||
if (dev->parent)
|
||||
@ -495,39 +485,6 @@ int __pkvm_iommu_pm_notify(unsigned long dev_id, enum pkvm_iommu_pm_event event)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check host memory access against IOMMUs' MMIO regions.
|
||||
* Returns -EPERM if the address is within the bounds of a registered device.
|
||||
* Otherwise returns zero and adjusts boundaries of the new mapping to avoid
|
||||
* MMIO regions of registered IOMMUs.
|
||||
*/
|
||||
int pkvm_iommu_host_stage2_adjust_range(phys_addr_t addr, phys_addr_t *start,
|
||||
phys_addr_t *end)
|
||||
{
|
||||
struct pkvm_iommu *dev;
|
||||
phys_addr_t new_start = *start;
|
||||
phys_addr_t new_end = *end;
|
||||
phys_addr_t dev_start, dev_end;
|
||||
|
||||
assert_host_component_locked();
|
||||
|
||||
list_for_each_entry(dev, &iommu_list, list) {
|
||||
dev_start = dev->pa;
|
||||
dev_end = dev_start + dev->size;
|
||||
|
||||
if (addr < dev_start)
|
||||
new_end = min(new_end, dev_start);
|
||||
else if (addr >= dev_end)
|
||||
new_start = max(new_start, dev_end);
|
||||
else
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
*start = new_start;
|
||||
*end = new_end;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool pkvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u32 esr,
|
||||
phys_addr_t pa)
|
||||
{
|
||||
|
@ -79,10 +79,35 @@ static void hyp_unlock_component(void)
|
||||
hyp_spin_unlock(&pkvm_pgd_lock);
|
||||
}
|
||||
|
||||
static void assert_host_can_alloc(void)
|
||||
{
|
||||
/* We can always get back to the host from guest context */
|
||||
if (read_sysreg(vttbr_el2) != kvm_get_vttbr(&host_mmu.arch.mmu))
|
||||
return;
|
||||
|
||||
/*
|
||||
* An error code must be returned to EL1 to handle memory allocation
|
||||
* failures cleanly. That's doable for explicit calls into higher
|
||||
* ELs, but not so much for other EL2 entry reasons such as mem aborts.
|
||||
* Thankfully we don't need memory allocation in these cases by
|
||||
* construction, so let's enforce the invariant.
|
||||
*/
|
||||
switch (ESR_ELx_EC(read_sysreg(esr_el2))) {
|
||||
case ESR_ELx_EC_HVC64:
|
||||
case ESR_ELx_EC_SMC64:
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void *host_s2_zalloc_pages_exact(size_t size)
|
||||
{
|
||||
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
void *addr;
|
||||
|
||||
assert_host_can_alloc();
|
||||
|
||||
addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
|
||||
hyp_split_page(hyp_virt_to_page(addr));
|
||||
|
||||
/*
|
||||
@ -97,6 +122,8 @@ static void *host_s2_zalloc_pages_exact(size_t size)
|
||||
|
||||
static void *host_s2_zalloc_page(void *pool)
|
||||
{
|
||||
assert_host_can_alloc();
|
||||
|
||||
return hyp_alloc_pages(pool, 0);
|
||||
}
|
||||
|
||||
@ -146,6 +173,27 @@ static void prepare_host_vtcr(void)
|
||||
id_aa64mmfr1_el1_sys_val, phys_shift);
|
||||
}
|
||||
|
||||
static int prepopulate_host_stage2(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
u64 addr = 0;
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < hyp_memblock_nr; i++) {
|
||||
reg = &hyp_memory[i];
|
||||
ret = host_stage2_idmap_locked(addr, reg->base - addr, PKVM_HOST_MMIO_PROT, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = host_stage2_idmap_locked(reg->base, reg->size, PKVM_HOST_MEM_PROT, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
addr = reg->base + reg->size;
|
||||
}
|
||||
|
||||
return host_stage2_idmap_locked(addr, BIT(host_mmu.pgt.ia_bits) - addr, PKVM_HOST_MMIO_PROT,
|
||||
false);
|
||||
}
|
||||
|
||||
int kvm_host_prepare_stage2(void *pgt_pool_base)
|
||||
{
|
||||
struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
|
||||
@ -172,7 +220,7 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
|
||||
mmu->pgt = &host_mmu.pgt;
|
||||
atomic64_set(&mmu->vmid.id, 0);
|
||||
|
||||
return 0;
|
||||
return prepopulate_host_stage2();
|
||||
}
|
||||
|
||||
static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
|
||||
@ -398,7 +446,7 @@ int host_stage2_unmap_reg_locked(phys_addr_t start, u64 size)
|
||||
|
||||
hyp_assert_lock_held(&host_mmu.lock);
|
||||
|
||||
ret = kvm_pgtable_stage2_unmap(&host_mmu.pgt, start, size);
|
||||
ret = kvm_pgtable_stage2_reclaim_leaves(&host_mmu.pgt, start, size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -466,6 +514,11 @@ static enum kvm_pgtable_prot default_host_prot(bool is_memory)
|
||||
return is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
|
||||
}
|
||||
|
||||
static enum kvm_pgtable_prot default_hyp_prot(phys_addr_t phys)
|
||||
{
|
||||
return addr_is_memory(phys) ? PAGE_HYP : PAGE_HYP_DEVICE;
|
||||
}
|
||||
|
||||
bool addr_is_memory(phys_addr_t phys)
|
||||
{
|
||||
struct kvm_mem_range range;
|
||||
@ -763,22 +816,15 @@ static int host_stage2_idmap(struct kvm_vcpu_fault_info *fault, u64 addr)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust against IOMMU devices first. host_stage2_adjust_range() should
|
||||
* be called last for proper alignment.
|
||||
*/
|
||||
if (!is_memory) {
|
||||
ret = pkvm_iommu_host_stage2_adjust_range(addr, &range.start,
|
||||
&range.end);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = host_stage2_adjust_range(addr, &range, level);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return host_stage2_idmap_locked(range.start, range.end - range.start, prot, false);
|
||||
/*
|
||||
* We're guaranteed not to require memory allocation by construction,
|
||||
* no need to bother even trying to recycle pages.
|
||||
*/
|
||||
return __host_stage2_idmap(range.start, range.end, prot, false);
|
||||
}
|
||||
|
||||
static void (*illegal_abt_notifier)(struct kvm_cpu_context *host_ctxt);
|
||||
@ -972,7 +1018,7 @@ static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
if (is_memory && hyp_phys_to_page(addr)->flags & MODULE_OWNED_PAGE)
|
||||
return PKVM_MODULE_DONT_TOUCH;
|
||||
|
||||
if (!addr_is_allowed_memory(addr))
|
||||
if (is_memory && !addr_is_allowed_memory(addr))
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
if (!kvm_pte_valid(pte) && pte)
|
||||
@ -1186,8 +1232,10 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
|
||||
enum kvm_pgtable_prot perms)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
phys_addr_t phys = hyp_virt_to_phys((void *)addr);
|
||||
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
|
||||
|
||||
if (perms != PAGE_HYP)
|
||||
if (!addr_is_memory(phys) || perms != prot)
|
||||
return -EPERM;
|
||||
|
||||
if (__hyp_ack_skip_pgtable_check(tx))
|
||||
@ -1242,8 +1290,10 @@ static int hyp_complete_donation(u64 addr,
|
||||
const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
|
||||
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
|
||||
phys_addr_t phys = hyp_virt_to_phys(start);
|
||||
enum kvm_pgtable_prot prot = default_hyp_prot(phys);
|
||||
|
||||
prot = pkvm_mkstate(prot, PKVM_PAGE_OWNED);
|
||||
return pkvm_create_mappings_locked(start, end, prot);
|
||||
}
|
||||
|
||||
@ -1280,7 +1330,7 @@ static int guest_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
|
||||
if (perms != KVM_PGTABLE_PROT_RWX)
|
||||
if (!addr_is_memory(tx->completer.guest.phys) || perms != KVM_PGTABLE_PROT_RWX)
|
||||
return -EPERM;
|
||||
|
||||
return __guest_check_page_state_range(tx->completer.guest.hyp_vcpu,
|
||||
@ -1291,6 +1341,9 @@ static int guest_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
|
||||
{
|
||||
u64 size = tx->nr_pages * PAGE_SIZE;
|
||||
|
||||
if (!addr_is_memory(tx->completer.guest.phys))
|
||||
return -EPERM;
|
||||
|
||||
return __guest_check_page_state_range(tx->completer.guest.hyp_vcpu,
|
||||
addr, size, PKVM_NOPAGE);
|
||||
}
|
||||
@ -1776,7 +1829,7 @@ int __pkvm_host_share_hyp(u64 pfn)
|
||||
.id = PKVM_ID_HYP,
|
||||
},
|
||||
},
|
||||
.completer_prot = PAGE_HYP,
|
||||
.completer_prot = default_hyp_prot(host_addr),
|
||||
};
|
||||
|
||||
host_lock_component();
|
||||
@ -1873,7 +1926,7 @@ int __pkvm_host_unshare_hyp(u64 pfn)
|
||||
.id = PKVM_ID_HYP,
|
||||
},
|
||||
},
|
||||
.completer_prot = PAGE_HYP,
|
||||
.completer_prot = default_hyp_prot(host_addr),
|
||||
};
|
||||
|
||||
host_lock_component();
|
||||
@ -1888,6 +1941,27 @@ int __pkvm_host_unshare_hyp(u64 pfn)
|
||||
}
|
||||
|
||||
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
|
||||
{
|
||||
return ___pkvm_host_donate_hyp(pfn, nr_pages, false);
|
||||
}
|
||||
|
||||
int ___pkvm_host_donate_hyp(u64 pfn, u64 nr_pages, bool accept_mmio)
|
||||
{
|
||||
phys_addr_t start = hyp_pfn_to_phys(pfn);
|
||||
phys_addr_t end = start + (nr_pages << PAGE_SHIFT);
|
||||
int ret;
|
||||
|
||||
if (!accept_mmio && !range_is_memory(start, end))
|
||||
return -EPERM;
|
||||
|
||||
host_lock_component();
|
||||
ret = __pkvm_host_donate_hyp_locked(pfn, nr_pages);
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __pkvm_host_donate_hyp_locked(u64 pfn, u64 nr_pages)
|
||||
{
|
||||
int ret;
|
||||
u64 host_addr = hyp_pfn_to_phys(pfn);
|
||||
@ -1908,13 +1982,12 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
|
||||
},
|
||||
};
|
||||
|
||||
host_lock_component();
|
||||
hyp_assert_lock_held(&host_mmu.lock);
|
||||
hyp_lock_component();
|
||||
|
||||
ret = do_donate(&donation);
|
||||
|
||||
hyp_unlock_component();
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1964,15 +2037,19 @@ static int restrict_host_page_perms(u64 addr, kvm_pte_t pte, u32 level, enum kvm
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define MODULE_PROT_ALLOWLIST (KVM_PGTABLE_PROT_RWX | \
|
||||
KVM_PGTABLE_PROT_NC | \
|
||||
KVM_PGTABLE_PROT_PXN | \
|
||||
KVM_PGTABLE_PROT_UXN)
|
||||
int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
|
||||
{
|
||||
u64 addr = hyp_pfn_to_phys(pfn);
|
||||
struct hyp_page *page;
|
||||
struct hyp_page *page = NULL;
|
||||
kvm_pte_t pte;
|
||||
u32 level;
|
||||
int ret;
|
||||
|
||||
if ((prot & KVM_PGTABLE_PROT_RWX) != prot || !addr_is_memory(addr))
|
||||
if ((prot & MODULE_PROT_ALLOWLIST) != prot)
|
||||
return -EINVAL;
|
||||
|
||||
host_lock_component();
|
||||
@ -1980,6 +2057,14 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* There is no hyp_vmemmap covering MMIO regions, which makes tracking
|
||||
* of module-owned MMIO regions hard, so we trust the modules not to
|
||||
* mess things up.
|
||||
*/
|
||||
if (!addr_is_memory(addr))
|
||||
goto update;
|
||||
|
||||
ret = -EPERM;
|
||||
page = hyp_phys_to_page(addr);
|
||||
|
||||
@ -1994,14 +2079,15 @@ int module_change_host_page_prot(u64 pfn, enum kvm_pgtable_prot prot)
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (prot == KVM_PGTABLE_PROT_RWX)
|
||||
update:
|
||||
if (prot == default_host_prot(!!page))
|
||||
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_HOST);
|
||||
else if (!prot)
|
||||
ret = host_stage2_set_owner_locked(addr, PAGE_SIZE, PKVM_ID_PROTECTED);
|
||||
else
|
||||
ret = restrict_host_page_perms(addr, pte, level, prot);
|
||||
|
||||
if (ret)
|
||||
if (ret || !page)
|
||||
goto unlock;
|
||||
|
||||
if (prot != KVM_PGTABLE_PROT_RWX)
|
||||
|
@ -77,6 +77,11 @@ void __pkvm_close_module_registration(void)
|
||||
*/
|
||||
}
|
||||
|
||||
static int __pkvm_module_host_donate_hyp(u64 pfn, u64 nr_pages)
|
||||
{
|
||||
return ___pkvm_host_donate_hyp(pfn, nr_pages, true);
|
||||
}
|
||||
|
||||
const struct pkvm_module_ops module_ops = {
|
||||
.create_private_mapping = __pkvm_create_private_mapping,
|
||||
.alloc_module_va = __pkvm_alloc_module_va,
|
||||
@ -99,7 +104,7 @@ const struct pkvm_module_ops module_ops = {
|
||||
.register_illegal_abt_notifier = __pkvm_register_illegal_abt_notifier,
|
||||
.register_psci_notifier = __pkvm_register_psci_notifier,
|
||||
.register_hyp_panic_notifier = __pkvm_register_hyp_panic_notifier,
|
||||
.host_donate_hyp = __pkvm_host_donate_hyp,
|
||||
.host_donate_hyp = __pkvm_module_host_donate_hyp,
|
||||
.hyp_donate_host = __pkvm_hyp_donate_host,
|
||||
.host_share_hyp = __pkvm_host_share_hyp,
|
||||
.host_unshare_hyp = __pkvm_host_unshare_hyp,
|
||||
|
@ -277,6 +277,29 @@ static int fix_hyp_pgtable_refcnt_walker(u64 addr, u64 end, u32 level,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pin_table_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
enum kvm_pgtable_walk_flags flag, void * const arg)
|
||||
{
|
||||
struct kvm_pgtable_mm_ops *mm_ops = arg;
|
||||
kvm_pte_t pte = *ptep;
|
||||
|
||||
if (kvm_pte_valid(pte))
|
||||
mm_ops->get_page(kvm_pte_follow(pte, mm_ops));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pin_host_tables(void)
|
||||
{
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = pin_table_walker,
|
||||
.flags = KVM_PGTABLE_WALK_TABLE_POST,
|
||||
.arg = &host_mmu.mm_ops,
|
||||
};
|
||||
|
||||
return kvm_pgtable_walk(&host_mmu.pgt, 0, BIT(host_mmu.pgt.ia_bits), &walker);
|
||||
}
|
||||
|
||||
static int fix_host_ownership(void)
|
||||
{
|
||||
struct kvm_pgtable_walker walker = {
|
||||
@ -357,10 +380,6 @@ void __noreturn __pkvm_init_finalise(void)
|
||||
};
|
||||
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
|
||||
|
||||
ret = fix_host_ownership();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = fix_hyp_pgtable_refcnt();
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -369,10 +388,18 @@ void __noreturn __pkvm_init_finalise(void)
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = fix_host_ownership();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = unmap_protected_regions();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = pin_host_tables();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = hyp_ffa_init(ffa_proxy_pages);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
@ -76,11 +76,6 @@ static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
|
||||
return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
|
||||
}
|
||||
|
||||
static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
|
||||
{
|
||||
return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
|
||||
}
|
||||
|
||||
static void kvm_clear_pte(kvm_pte_t *ptep)
|
||||
{
|
||||
WRITE_ONCE(*ptep, 0);
|
||||
@ -281,7 +276,8 @@ static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
|
||||
kvm_pte_t attr;
|
||||
u32 mtype;
|
||||
|
||||
if (!(prot & KVM_PGTABLE_PROT_R) || (device && nc))
|
||||
if (!(prot & KVM_PGTABLE_PROT_R) || (device && nc) ||
|
||||
(prot & (KVM_PGTABLE_PROT_PXN | KVM_PGTABLE_PROT_UXN)))
|
||||
return -EINVAL;
|
||||
|
||||
if (device)
|
||||
@ -570,16 +566,15 @@ static bool stage2_has_fwb(struct kvm_pgtable *pgt)
|
||||
#define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
|
||||
|
||||
static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
|
||||
kvm_pte_t *ptep)
|
||||
kvm_pte_t *ptep)
|
||||
{
|
||||
u64 exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_XN;
|
||||
bool device = prot & KVM_PGTABLE_PROT_DEVICE;
|
||||
u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
|
||||
bool nc = prot & KVM_PGTABLE_PROT_NC;
|
||||
enum kvm_pgtable_prot exec_prot;
|
||||
kvm_pte_t attr;
|
||||
|
||||
if (device && nc)
|
||||
return -EINVAL;
|
||||
|
||||
if (device)
|
||||
attr = KVM_S2_MEMATTR(pgt, DEVICE_nGnRE);
|
||||
else if (nc)
|
||||
@ -587,11 +582,23 @@ static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot p
|
||||
else
|
||||
attr = KVM_S2_MEMATTR(pgt, NORMAL);
|
||||
|
||||
if (!(prot & KVM_PGTABLE_PROT_X))
|
||||
attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
|
||||
else if (device)
|
||||
return -EINVAL;
|
||||
exec_prot = prot & (KVM_PGTABLE_PROT_X | KVM_PGTABLE_PROT_PXN | KVM_PGTABLE_PROT_UXN);
|
||||
switch(exec_prot) {
|
||||
case KVM_PGTABLE_PROT_X:
|
||||
goto set_ap;
|
||||
case KVM_PGTABLE_PROT_PXN:
|
||||
exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN;
|
||||
break;
|
||||
case KVM_PGTABLE_PROT_UXN:
|
||||
exec_type = KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN;
|
||||
break;
|
||||
default:
|
||||
if (exec_prot)
|
||||
return -EINVAL;
|
||||
}
|
||||
attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, exec_type);
|
||||
|
||||
set_ap:
|
||||
if (prot & KVM_PGTABLE_PROT_R)
|
||||
attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
|
||||
|
||||
@ -617,8 +624,21 @@ enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
|
||||
prot |= KVM_PGTABLE_PROT_R;
|
||||
if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
|
||||
prot |= KVM_PGTABLE_PROT_W;
|
||||
if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
|
||||
switch(FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte)) {
|
||||
case 0:
|
||||
prot |= KVM_PGTABLE_PROT_X;
|
||||
break;
|
||||
case KVM_PTE_LEAF_ATTR_HI_S2_XN_PXN:
|
||||
prot |= KVM_PGTABLE_PROT_PXN;
|
||||
break;
|
||||
case KVM_PTE_LEAF_ATTR_HI_S2_XN_UXN:
|
||||
prot |= KVM_PGTABLE_PROT_UXN;
|
||||
break;
|
||||
case KVM_PTE_LEAF_ATTR_HI_S2_XN_XN:
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
return prot;
|
||||
}
|
||||
@ -660,7 +680,9 @@ static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
|
||||
|
||||
static bool stage2_pte_executable(kvm_pte_t pte)
|
||||
{
|
||||
return kvm_pte_valid(pte) && !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
|
||||
kvm_pte_t xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, pte);
|
||||
|
||||
return kvm_pte_valid(pte) && xn != KVM_PTE_LEAF_ATTR_HI_S2_XN_XN;
|
||||
}
|
||||
|
||||
static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
|
||||
@ -1017,6 +1039,30 @@ int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
||||
return kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
}
|
||||
|
||||
static int stage2_reclaim_leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
enum kvm_pgtable_walk_flags flag, void * const arg)
|
||||
{
|
||||
stage2_coalesce_walk_table_post(addr, end, level, ptep, arg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_pgtable_stage2_reclaim_leaves(struct kvm_pgtable *pgt, u64 addr, u64 size)
|
||||
{
|
||||
struct stage2_map_data map_data = {
|
||||
.phys = KVM_PHYS_INVALID,
|
||||
.mmu = pgt->mmu,
|
||||
.mm_ops = pgt->mm_ops,
|
||||
};
|
||||
struct kvm_pgtable_walker walker = {
|
||||
.cb = stage2_reclaim_leaf_walker,
|
||||
.arg = &map_data,
|
||||
.flags = KVM_PGTABLE_WALK_TABLE_POST,
|
||||
};
|
||||
|
||||
return kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
}
|
||||
|
||||
struct stage2_attr_data {
|
||||
kvm_pte_t attr_set;
|
||||
kvm_pte_t attr_clr;
|
||||
@ -1135,7 +1181,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
|
||||
u32 level;
|
||||
kvm_pte_t set = 0, clr = 0;
|
||||
|
||||
if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
|
||||
if (prot & !KVM_PGTABLE_PROT_RWX)
|
||||
return -EINVAL;
|
||||
|
||||
if (prot & KVM_PGTABLE_PROT_R)
|
||||
|
@ -82,6 +82,27 @@ WORKAROUND_CAVIUM_TX2_219_TVM
|
||||
WORKAROUND_CLEAN_CACHE
|
||||
WORKAROUND_DEVICE_LOAD_ACQUIRE
|
||||
WORKAROUND_NVIDIA_CARMEL_CNP
|
||||
WORKAROUND_NXP_ERR050104
|
||||
WORKAROUND_QCOM_FALKOR_E1003
|
||||
WORKAROUND_REPEAT_TLBI
|
||||
WORKAROUND_SPECULATIVE_AT
|
||||
ANDROID_KABI_RESERVE_01
|
||||
ANDROID_KABI_RESERVE_02
|
||||
ANDROID_KABI_RESERVE_03
|
||||
ANDROID_KABI_RESERVE_04
|
||||
ANDROID_KABI_RESERVE_05
|
||||
ANDROID_KABI_RESERVE_06
|
||||
ANDROID_KABI_RESERVE_07
|
||||
ANDROID_KABI_RESERVE_08
|
||||
ANDROID_KABI_RESERVE_09
|
||||
ANDROID_KABI_RESERVE_10
|
||||
ANDROID_KABI_RESERVE_11
|
||||
ANDROID_KABI_RESERVE_12
|
||||
ANDROID_KABI_RESERVE_13
|
||||
ANDROID_KABI_RESERVE_14
|
||||
ANDROID_KABI_RESERVE_15
|
||||
ANDROID_KABI_RESERVE_16
|
||||
ANDROID_KABI_RESERVE_17
|
||||
ANDROID_KABI_RESERVE_18
|
||||
ANDROID_KABI_RESERVE_19
|
||||
ANDROID_KABI_RESERVE_20
|
||||
|
@ -1,3 +0,0 @@
|
||||
per-file crypto/**=file:/crypto/OWNERS
|
||||
per-file mm/**=file:/mm/OWNERS
|
||||
per-file net/**=file:/net/OWNERS
|
@ -89,6 +89,7 @@ CONFIG_MODULE_SIG=y
|
||||
CONFIG_MODULE_SIG_PROTECT=y
|
||||
CONFIG_BLK_DEV_ZONED=y
|
||||
CONFIG_BLK_DEV_THROTTLING=y
|
||||
CONFIG_BLK_CGROUP_IOCOST=y
|
||||
CONFIG_BLK_INLINE_ENCRYPTION=y
|
||||
CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
|
||||
CONFIG_IOSCHED_BFQ=y
|
||||
@ -177,6 +178,7 @@ CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
|
@ -1,2 +0,0 @@
|
||||
bvanassche@google.com
|
||||
jaegeuk@google.com
|
@ -1,6 +1,6 @@
|
||||
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
|
||||
|
||||
KMI_GENERATION=8
|
||||
KMI_GENERATION=11
|
||||
|
||||
LLVM=1
|
||||
DEPMOD=depmod
|
||||
|
@ -1 +0,0 @@
|
||||
ardb@google.com
|
@ -1,6 +0,0 @@
|
||||
per-file base/**=gregkh@google.com,saravanak@google.com
|
||||
per-file block/**=akailash@google.com
|
||||
per-file md/**=akailash@google.com,paullawrence@google.com
|
||||
per-file net/**=file:/net/OWNERS
|
||||
per-file scsi/**=bvanassche@google.com,jaegeuk@google.com
|
||||
per-file {tty,usb}/**=gregkh@google.com
|
@ -213,8 +213,8 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
mm = alloc->mm;
|
||||
|
||||
if (mm) {
|
||||
mmap_read_lock(mm);
|
||||
vma = vma_lookup(mm, alloc->vma_addr);
|
||||
mmap_write_lock(mm);
|
||||
vma = alloc->vma;
|
||||
}
|
||||
|
||||
if (!vma && need_mm) {
|
||||
@ -271,7 +271,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
trace_binder_alloc_page_end(alloc, index);
|
||||
}
|
||||
if (mm) {
|
||||
mmap_read_unlock(mm);
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
return 0;
|
||||
@ -304,21 +304,24 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
|
||||
}
|
||||
err_no_vma:
|
||||
if (mm) {
|
||||
mmap_read_unlock(mm);
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
}
|
||||
return vma ? -ENOMEM : -ESRCH;
|
||||
}
|
||||
|
||||
static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
|
||||
struct vm_area_struct *vma)
|
||||
{
|
||||
/* pairs with smp_load_acquire in binder_alloc_get_vma() */
|
||||
smp_store_release(&alloc->vma, vma);
|
||||
}
|
||||
|
||||
static inline struct vm_area_struct *binder_alloc_get_vma(
|
||||
struct binder_alloc *alloc)
|
||||
{
|
||||
struct vm_area_struct *vma = NULL;
|
||||
|
||||
if (alloc->vma_addr)
|
||||
vma = vma_lookup(alloc->mm, alloc->vma_addr);
|
||||
|
||||
return vma;
|
||||
/* pairs with smp_store_release in binder_alloc_set_vma() */
|
||||
return smp_load_acquire(&alloc->vma);
|
||||
}
|
||||
|
||||
static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid)
|
||||
@ -381,15 +384,13 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
size_t size, data_offsets_size;
|
||||
int ret;
|
||||
|
||||
mmap_read_lock(alloc->mm);
|
||||
/* Check binder_alloc is fully initialized */
|
||||
if (!binder_alloc_get_vma(alloc)) {
|
||||
mmap_read_unlock(alloc->mm);
|
||||
binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
|
||||
"%d: binder_alloc_buf, no vma\n",
|
||||
alloc->pid);
|
||||
return ERR_PTR(-ESRCH);
|
||||
}
|
||||
mmap_read_unlock(alloc->mm);
|
||||
|
||||
data_offsets_size = ALIGN(data_size, sizeof(void *)) +
|
||||
ALIGN(offsets_size, sizeof(void *));
|
||||
@ -780,7 +781,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
||||
buffer->free = 1;
|
||||
binder_insert_free_buffer(alloc, buffer);
|
||||
alloc->free_async_space = alloc->buffer_size / 2;
|
||||
alloc->vma_addr = vma->vm_start;
|
||||
|
||||
/* Signal binder_alloc is fully initialized */
|
||||
binder_alloc_set_vma(alloc, vma);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -810,8 +813,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
||||
|
||||
buffers = 0;
|
||||
mutex_lock(&alloc->mutex);
|
||||
BUG_ON(alloc->vma_addr &&
|
||||
vma_lookup(alloc->mm, alloc->vma_addr));
|
||||
BUG_ON(alloc->vma);
|
||||
|
||||
while ((n = rb_first(&alloc->allocated_buffers))) {
|
||||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
@ -918,25 +920,17 @@ void binder_alloc_print_pages(struct seq_file *m,
|
||||
* Make sure the binder_alloc is fully initialized, otherwise we might
|
||||
* read inconsistent state.
|
||||
*/
|
||||
|
||||
mmap_read_lock(alloc->mm);
|
||||
if (binder_alloc_get_vma(alloc) == NULL) {
|
||||
mmap_read_unlock(alloc->mm);
|
||||
goto uninitialized;
|
||||
if (binder_alloc_get_vma(alloc) != NULL) {
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
}
|
||||
|
||||
mmap_read_unlock(alloc->mm);
|
||||
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
|
||||
page = &alloc->pages[i];
|
||||
if (!page->page_ptr)
|
||||
free++;
|
||||
else if (list_empty(&page->lru))
|
||||
active++;
|
||||
else
|
||||
lru++;
|
||||
}
|
||||
|
||||
uninitialized:
|
||||
mutex_unlock(&alloc->mutex);
|
||||
seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
|
||||
seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
|
||||
@ -971,7 +965,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
|
||||
*/
|
||||
void binder_alloc_vma_close(struct binder_alloc *alloc)
|
||||
{
|
||||
alloc->vma_addr = 0;
|
||||
binder_alloc_set_vma(alloc, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +75,7 @@ struct binder_lru_page {
|
||||
/**
|
||||
* struct binder_alloc - per-binder proc state for binder allocator
|
||||
* @mutex: protects binder_alloc fields
|
||||
* @vma_addr: vm_area_struct->vm_start passed to mmap_handler
|
||||
* @vma: vm_area_struct passed to mmap_handler
|
||||
* (invariant after mmap)
|
||||
* @mm: copy of task->mm (invariant after open)
|
||||
* @buffer: base of per-proc address space mapped via mmap
|
||||
@ -99,7 +99,7 @@ struct binder_lru_page {
|
||||
*/
|
||||
struct binder_alloc {
|
||||
struct mutex mutex;
|
||||
unsigned long vma_addr;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
void __user *buffer;
|
||||
struct list_head buffers;
|
||||
|
@ -287,7 +287,7 @@ void binder_selftest_alloc(struct binder_alloc *alloc)
|
||||
if (!binder_selftest_run)
|
||||
return;
|
||||
mutex_lock(&binder_selftest_lock);
|
||||
if (!binder_selftest_run || !alloc->vma_addr)
|
||||
if (!binder_selftest_run || !alloc->vma)
|
||||
goto done;
|
||||
pr_info("STARTED\n");
|
||||
binder_selftest_alloc_offset(alloc, end_offset, 0);
|
||||
|
@ -1416,7 +1416,9 @@ static void platform_remove(struct device *_dev)
|
||||
struct platform_driver *drv = to_platform_driver(_dev->driver);
|
||||
struct platform_device *dev = to_platform_device(_dev);
|
||||
|
||||
if (drv->remove) {
|
||||
if (drv->remove_new) {
|
||||
drv->remove_new(dev);
|
||||
} else if (drv->remove) {
|
||||
int ret = drv->remove(dev);
|
||||
|
||||
if (ret)
|
||||
|
@ -43,6 +43,8 @@ static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
|
||||
/* entry point from firmware to arch asm code */
|
||||
static unsigned long sdei_entry_point;
|
||||
|
||||
static int sdei_hp_state;
|
||||
|
||||
struct sdei_event {
|
||||
/* These three are protected by the sdei_list_lock */
|
||||
struct list_head list;
|
||||
@ -301,8 +303,6 @@ int sdei_mask_local_cpu(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
WARN_ON_ONCE(preemptible());
|
||||
|
||||
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
|
||||
if (err && err != -EIO) {
|
||||
pr_warn_once("failed to mask CPU[%u]: %d\n",
|
||||
@ -315,6 +315,7 @@ int sdei_mask_local_cpu(void)
|
||||
|
||||
static void _ipi_mask_cpu(void *ignored)
|
||||
{
|
||||
WARN_ON_ONCE(preemptible());
|
||||
sdei_mask_local_cpu();
|
||||
}
|
||||
|
||||
@ -322,8 +323,6 @@ int sdei_unmask_local_cpu(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
WARN_ON_ONCE(preemptible());
|
||||
|
||||
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
|
||||
if (err && err != -EIO) {
|
||||
pr_warn_once("failed to unmask CPU[%u]: %d\n",
|
||||
@ -336,6 +335,7 @@ int sdei_unmask_local_cpu(void)
|
||||
|
||||
static void _ipi_unmask_cpu(void *ignored)
|
||||
{
|
||||
WARN_ON_ONCE(preemptible());
|
||||
sdei_unmask_local_cpu();
|
||||
}
|
||||
|
||||
@ -343,6 +343,8 @@ static void _ipi_private_reset(void *ignored)
|
||||
{
|
||||
int err;
|
||||
|
||||
WARN_ON_ONCE(preemptible());
|
||||
|
||||
err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
|
||||
NULL);
|
||||
if (err && err != -EIO)
|
||||
@ -389,8 +391,6 @@ static void _local_event_enable(void *data)
|
||||
int err;
|
||||
struct sdei_crosscall_args *arg = data;
|
||||
|
||||
WARN_ON_ONCE(preemptible());
|
||||
|
||||
err = sdei_api_event_enable(arg->event->event_num);
|
||||
|
||||
sdei_cross_call_return(arg, err);
|
||||
@ -479,8 +479,6 @@ static void _local_event_unregister(void *data)
|
||||
int err;
|
||||
struct sdei_crosscall_args *arg = data;
|
||||
|
||||
WARN_ON_ONCE(preemptible());
|
||||
|
||||
err = sdei_api_event_unregister(arg->event->event_num);
|
||||
|
||||
sdei_cross_call_return(arg, err);
|
||||
@ -561,8 +559,6 @@ static void _local_event_register(void *data)
|
||||
struct sdei_registered_event *reg;
|
||||
struct sdei_crosscall_args *arg = data;
|
||||
|
||||
WARN_ON(preemptible());
|
||||
|
||||
reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
|
||||
err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
|
||||
reg, 0, 0);
|
||||
@ -717,6 +713,8 @@ static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
|
||||
{
|
||||
int rv;
|
||||
|
||||
WARN_ON_ONCE(preemptible());
|
||||
|
||||
switch (action) {
|
||||
case CPU_PM_ENTER:
|
||||
rv = sdei_mask_local_cpu();
|
||||
@ -765,7 +763,7 @@ static int sdei_device_freeze(struct device *dev)
|
||||
int err;
|
||||
|
||||
/* unregister private events */
|
||||
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
|
||||
cpuhp_remove_state(sdei_entry_point);
|
||||
|
||||
err = sdei_unregister_shared();
|
||||
if (err)
|
||||
@ -786,12 +784,15 @@ static int sdei_device_thaw(struct device *dev)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
|
||||
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
|
||||
&sdei_cpuhp_up, &sdei_cpuhp_down);
|
||||
if (err)
|
||||
if (err < 0) {
|
||||
pr_warn("Failed to re-register CPU hotplug notifier...\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
return err;
|
||||
sdei_hp_state = err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdei_device_restore(struct device *dev)
|
||||
@ -823,7 +824,7 @@ static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
|
||||
* We are going to reset the interface, after this there is no point
|
||||
* doing work when we take CPUs offline.
|
||||
*/
|
||||
cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
|
||||
cpuhp_remove_state(sdei_hp_state);
|
||||
|
||||
sdei_platform_reset();
|
||||
|
||||
@ -1003,13 +1004,15 @@ static int sdei_probe(struct platform_device *pdev)
|
||||
goto remove_cpupm;
|
||||
}
|
||||
|
||||
err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
|
||||
err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
|
||||
&sdei_cpuhp_up, &sdei_cpuhp_down);
|
||||
if (err) {
|
||||
if (err < 0) {
|
||||
pr_warn("Failed to register CPU hotplug notifier...\n");
|
||||
goto remove_reboot;
|
||||
}
|
||||
|
||||
sdei_hp_state = err;
|
||||
|
||||
return 0;
|
||||
|
||||
remove_reboot:
|
||||
|
@ -96,8 +96,9 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
|
||||
if (gh_error == GH_ERROR_OK) {
|
||||
if (!ready)
|
||||
return 0;
|
||||
} else
|
||||
} else {
|
||||
dev_err(msgq->mbox.dev, "Failed to send data: %d (%d)\n", gh_error, msgq->last_ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* We can send more messages. Mailbox framework requires that tx done
|
||||
@ -165,6 +166,8 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
if (ret)
|
||||
goto err_tx_ghrsc;
|
||||
|
||||
enable_irq_wake(msgq->tx_ghrsc->irq);
|
||||
|
||||
tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
|
||||
}
|
||||
|
||||
@ -175,6 +178,8 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
IRQF_ONESHOT, "gh_msgq_rx", msgq);
|
||||
if (ret)
|
||||
goto err_tx_irq;
|
||||
|
||||
enable_irq_wake(msgq->rx_ghrsc->irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -193,6 +198,8 @@ EXPORT_SYMBOL_GPL(gh_msgq_init);
|
||||
|
||||
void gh_msgq_remove(struct gh_msgq *msgq)
|
||||
{
|
||||
mbox_free_channel(gh_msgq_chan(msgq));
|
||||
|
||||
if (msgq->rx_ghrsc)
|
||||
free_irq(msgq->rx_ghrsc->irq, msgq);
|
||||
|
||||
|
@ -2018,6 +2018,19 @@ bool usb_device_is_owned(struct usb_device *udev)
|
||||
return !!hub->ports[udev->portnum - 1]->port_owner;
|
||||
}
|
||||
|
||||
static void update_port_device_state(struct usb_device *udev)
|
||||
{
|
||||
struct usb_hub *hub;
|
||||
struct usb_port *port_dev;
|
||||
|
||||
if (udev->parent) {
|
||||
hub = usb_hub_to_struct_hub(udev->parent);
|
||||
port_dev = hub->ports[udev->portnum - 1];
|
||||
WRITE_ONCE(port_dev->state, udev->state);
|
||||
sysfs_notify_dirent(port_dev->state_kn);
|
||||
}
|
||||
}
|
||||
|
||||
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
|
||||
{
|
||||
struct usb_hub *hub = usb_hub_to_struct_hub(udev);
|
||||
@ -2030,6 +2043,7 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
|
||||
if (udev->state == USB_STATE_SUSPENDED)
|
||||
udev->active_duration -= jiffies;
|
||||
udev->state = USB_STATE_NOTATTACHED;
|
||||
update_port_device_state(udev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2086,6 +2100,7 @@ void usb_set_device_state(struct usb_device *udev,
|
||||
udev->state != USB_STATE_SUSPENDED)
|
||||
udev->active_duration += jiffies;
|
||||
udev->state = new_state;
|
||||
update_port_device_state(udev);
|
||||
} else
|
||||
recursively_mark_NOTATTACHED(udev);
|
||||
spin_unlock_irqrestore(&device_state_lock, flags);
|
||||
|
@ -84,6 +84,8 @@ struct usb_hub {
|
||||
* @peer: related usb2 and usb3 ports (share the same connector)
|
||||
* @req: default pm qos request for hubs without port power control
|
||||
* @connect_type: port's connect type
|
||||
* @state: device state of the usb device attached to the port
|
||||
* @state_kn: kernfs_node of the sysfs attribute that accesses @state
|
||||
* @location: opaque representation of platform connector location
|
||||
* @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
|
||||
* @portnum: port index num based one
|
||||
@ -98,6 +100,8 @@ struct usb_port {
|
||||
struct usb_port *peer;
|
||||
struct dev_pm_qos_request *req;
|
||||
enum usb_port_connect_type connect_type;
|
||||
enum usb_device_state state;
|
||||
struct kernfs_node *state_kn;
|
||||
usb_port_location_t location;
|
||||
struct mutex status_lock;
|
||||
u32 over_current_count;
|
||||
|
@ -133,6 +133,16 @@ static ssize_t connect_type_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(connect_type);
|
||||
|
||||
static ssize_t state_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct usb_port *port_dev = to_usb_port(dev);
|
||||
enum usb_device_state state = READ_ONCE(port_dev->state);
|
||||
|
||||
return sysfs_emit(buf, "%s\n", usb_state_string(state));
|
||||
}
|
||||
static DEVICE_ATTR_RO(state);
|
||||
|
||||
static ssize_t over_current_count_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
@ -232,6 +242,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
|
||||
|
||||
static struct attribute *port_dev_attrs[] = {
|
||||
&dev_attr_connect_type.attr,
|
||||
&dev_attr_state.attr,
|
||||
&dev_attr_location.attr,
|
||||
&dev_attr_quirks.attr,
|
||||
&dev_attr_over_current_count.attr,
|
||||
@ -677,19 +688,24 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
|
||||
return retval;
|
||||
}
|
||||
|
||||
port_dev->state_kn = sysfs_get_dirent(port_dev->dev.kobj.sd, "state");
|
||||
if (!port_dev->state_kn) {
|
||||
dev_err(&port_dev->dev, "failed to sysfs_get_dirent 'state'\n");
|
||||
retval = -ENODEV;
|
||||
goto err_unregister;
|
||||
}
|
||||
|
||||
/* Set default policy of port-poweroff disabled. */
|
||||
retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
|
||||
DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
|
||||
if (retval < 0) {
|
||||
device_unregister(&port_dev->dev);
|
||||
return retval;
|
||||
goto err_put_kn;
|
||||
}
|
||||
|
||||
retval = component_add(&port_dev->dev, &connector_ops);
|
||||
if (retval) {
|
||||
dev_warn(&port_dev->dev, "failed to add component\n");
|
||||
device_unregister(&port_dev->dev);
|
||||
return retval;
|
||||
goto err_put_kn;
|
||||
}
|
||||
|
||||
find_and_link_peer(hub, port1);
|
||||
@ -726,6 +742,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
|
||||
port_dev->req = NULL;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_put_kn:
|
||||
sysfs_put(port_dev->state_kn);
|
||||
err_unregister:
|
||||
device_unregister(&port_dev->dev);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
|
||||
@ -737,5 +760,6 @@ void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
|
||||
if (peer)
|
||||
unlink_peers(port_dev, peer);
|
||||
component_del(&port_dev->dev, &connector_ops);
|
||||
sysfs_put(port_dev->state_kn);
|
||||
device_unregister(&port_dev->dev);
|
||||
}
|
||||
|
@ -37,10 +37,14 @@ static struct bus_type gadget_bus_type;
|
||||
* @vbus: for udcs who care about vbus status, this value is real vbus status;
|
||||
* for udcs who do not care about vbus status, this value is always true
|
||||
* @started: the UDC's started state. True if the UDC had started.
|
||||
* @connect_lock: protects udc->vbus, udc->started, gadget->connect, gadget->deactivate related
|
||||
* functions. usb_gadget_connect_locked, usb_gadget_disconnect_locked,
|
||||
* usb_udc_connect_control_locked, usb_gadget_udc_start_locked, usb_gadget_udc_stop_locked are
|
||||
* called with this lock held.
|
||||
* @allow_connect: Indicates whether UDC is allowed to be pulled up.
|
||||
* Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
|
||||
* unbound.
|
||||
* @connect_lock: protects udc->started, gadget->connect,
|
||||
* gadget->allow_connect and gadget->deactivate. The routines
|
||||
* usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
|
||||
* usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and
|
||||
* usb_gadget_udc_stop_locked() are called with this lock held.
|
||||
*
|
||||
* This represents the internal data structure which is used by the UDC-class
|
||||
* to hold information about udc driver and gadget together.
|
||||
@ -52,6 +56,8 @@ struct usb_udc {
|
||||
struct list_head list;
|
||||
bool vbus;
|
||||
bool started;
|
||||
bool allow_connect;
|
||||
struct work_struct vbus_work;
|
||||
struct mutex connect_lock;
|
||||
};
|
||||
|
||||
@ -665,7 +671,6 @@ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
|
||||
|
||||
/* Internal version of usb_gadget_connect needs to be called with connect_lock held. */
|
||||
static int usb_gadget_connect_locked(struct usb_gadget *gadget)
|
||||
__must_hold(&gadget->udc->connect_lock)
|
||||
{
|
||||
@ -676,12 +681,12 @@ static int usb_gadget_connect_locked(struct usb_gadget *gadget)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gadget->deactivated || !gadget->udc->started) {
|
||||
if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) {
|
||||
/*
|
||||
* If gadget is deactivated we only save new state.
|
||||
* Gadget will be connected automatically after activation.
|
||||
*
|
||||
* udc first needs to be started before gadget can be pulled up.
|
||||
* If the gadget isn't usable (because it is deactivated,
|
||||
* unbound, or not yet started), we only save the new state.
|
||||
* The gadget will be connected automatically when it is
|
||||
* activated/bound/started.
|
||||
*/
|
||||
gadget->connected = true;
|
||||
goto out;
|
||||
@ -719,7 +724,6 @@ int usb_gadget_connect(struct usb_gadget *gadget)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_connect);
|
||||
|
||||
/* Internal version of usb_gadget_disconnect needs to be called with connect_lock held. */
|
||||
static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
|
||||
__must_hold(&gadget->udc->connect_lock)
|
||||
{
|
||||
@ -737,8 +741,6 @@ static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
|
||||
/*
|
||||
* If gadget is deactivated we only save new state.
|
||||
* Gadget will stay disconnected after activation.
|
||||
*
|
||||
* udc should have been started before gadget being pulled down.
|
||||
*/
|
||||
gadget->connected = false;
|
||||
goto out;
|
||||
@ -799,10 +801,10 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (gadget->deactivated)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&gadget->udc->connect_lock);
|
||||
if (gadget->deactivated)
|
||||
goto unlock;
|
||||
|
||||
if (gadget->connected) {
|
||||
ret = usb_gadget_disconnect_locked(gadget);
|
||||
if (ret)
|
||||
@ -818,7 +820,6 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
out:
|
||||
trace_usb_gadget_deactivate(gadget, ret);
|
||||
|
||||
return ret;
|
||||
@ -838,10 +839,10 @@ int usb_gadget_activate(struct usb_gadget *gadget)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!gadget->deactivated)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&gadget->udc->connect_lock);
|
||||
if (!gadget->deactivated)
|
||||
goto unlock;
|
||||
|
||||
gadget->deactivated = false;
|
||||
|
||||
/*
|
||||
@ -852,7 +853,8 @@ int usb_gadget_activate(struct usb_gadget *gadget)
|
||||
ret = usb_gadget_connect_locked(gadget);
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
|
||||
out:
|
||||
unlock:
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
trace_usb_gadget_activate(gadget, ret);
|
||||
|
||||
return ret;
|
||||
@ -1094,12 +1096,21 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
|
||||
/* Acquire connect_lock before calling this function. */
|
||||
static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
|
||||
{
|
||||
if (udc->vbus && udc->started)
|
||||
if (udc->vbus)
|
||||
usb_gadget_connect_locked(udc->gadget);
|
||||
else
|
||||
usb_gadget_disconnect_locked(udc->gadget);
|
||||
}
|
||||
|
||||
static void vbus_event_work(struct work_struct *work)
|
||||
{
|
||||
struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
|
||||
|
||||
mutex_lock(&udc->connect_lock);
|
||||
usb_udc_connect_control_locked(udc);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_udc_vbus_handler - updates the udc core vbus status, and try to
|
||||
* connect or disconnect gadget
|
||||
@ -1108,17 +1119,23 @@ static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc
|
||||
*
|
||||
* The udc driver calls it when it wants to connect or disconnect gadget
|
||||
* according to vbus status.
|
||||
*
|
||||
* This function can be invoked from interrupt context by irq handlers of
|
||||
* the gadget drivers, however, usb_udc_connect_control() has to run in
|
||||
* non-atomic context due to the following:
|
||||
* a. Some of the gadget driver implementations expect the ->pullup
|
||||
* callback to be invoked in non-atomic context.
|
||||
* b. usb_gadget_disconnect() acquires udc_lock which is a mutex.
|
||||
* Hence offload invocation of usb_udc_connect_control() to workqueue.
|
||||
*/
|
||||
void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
|
||||
{
|
||||
struct usb_udc *udc = gadget->udc;
|
||||
|
||||
mutex_lock(&udc->connect_lock);
|
||||
if (udc) {
|
||||
udc->vbus = status;
|
||||
usb_udc_connect_control_locked(udc);
|
||||
schedule_work(&udc->vbus_work);
|
||||
}
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
|
||||
|
||||
@ -1351,6 +1368,7 @@ int usb_add_gadget(struct usb_gadget *gadget)
|
||||
mutex_lock(&udc_lock);
|
||||
list_add_tail(&udc->list, &udc_list);
|
||||
mutex_unlock(&udc_lock);
|
||||
INIT_WORK(&udc->vbus_work, vbus_event_work);
|
||||
|
||||
ret = device_add(&udc->dev);
|
||||
if (ret)
|
||||
@ -1482,6 +1500,7 @@ void usb_del_gadget(struct usb_gadget *gadget)
|
||||
flush_work(&gadget->work);
|
||||
device_del(&gadget->dev);
|
||||
ida_free(&gadget_id_numbers, gadget->id_number);
|
||||
cancel_work_sync(&udc->vbus_work);
|
||||
device_unregister(&udc->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_del_gadget);
|
||||
@ -1553,6 +1572,7 @@ static int gadget_bind_driver(struct device *dev)
|
||||
goto err_start;
|
||||
}
|
||||
usb_gadget_enable_async_callbacks(udc);
|
||||
udc->allow_connect = true;
|
||||
usb_udc_connect_control_locked(udc);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
|
||||
@ -1585,6 +1605,8 @@ static void gadget_unbind_driver(struct device *dev)
|
||||
|
||||
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
|
||||
|
||||
udc->allow_connect = false;
|
||||
cancel_work_sync(&udc->vbus_work);
|
||||
mutex_lock(&udc->connect_lock);
|
||||
usb_gadget_disconnect_locked(gadget);
|
||||
usb_gadget_disable_async_callbacks(udc);
|
||||
|
@ -535,8 +535,13 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
|
||||
cmd->status == COMP_COMMAND_RING_STOPPED) {
|
||||
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
|
||||
ret = -ETIME;
|
||||
goto cmd_cleanup;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
|
||||
if (ret)
|
||||
xhci_warn(xhci, "Sync device context failed, ret=%d\n", ret);
|
||||
|
||||
cmd_cleanup:
|
||||
xhci_free_command(xhci, cmd);
|
||||
return ret;
|
||||
@ -1824,6 +1829,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_bus_suspend);
|
||||
|
||||
/*
|
||||
* Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
|
||||
@ -1968,6 +1974,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_bus_resume);
|
||||
|
||||
unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
|
||||
{
|
||||
|
@ -65,7 +65,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
|
||||
return seg;
|
||||
}
|
||||
|
||||
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
{
|
||||
if (seg->trbs) {
|
||||
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
|
||||
@ -74,8 +74,9 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
kfree(seg->bounce_buf);
|
||||
kfree(seg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_segment_free);
|
||||
|
||||
static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
|
||||
void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_segment *first)
|
||||
{
|
||||
struct xhci_segment *seg;
|
||||
@ -96,9 +97,9 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
|
||||
* DMA address of the next segment. The caller needs to set any Link TRB
|
||||
* related flags, such as End TRB, Toggle Cycle, and no snoop.
|
||||
*/
|
||||
static void xhci_link_segments(struct xhci_segment *prev,
|
||||
struct xhci_segment *next,
|
||||
enum xhci_ring_type type, bool chain_links)
|
||||
void xhci_link_segments(struct xhci_segment *prev,
|
||||
struct xhci_segment *next,
|
||||
enum xhci_ring_type type, bool chain_links)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
@ -118,6 +119,7 @@ static void xhci_link_segments(struct xhci_segment *prev,
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_link_segments);
|
||||
|
||||
/*
|
||||
* Link the ring to the new segments.
|
||||
@ -256,7 +258,7 @@ static int xhci_update_stream_segment_mapping(
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xhci_remove_stream_mapping(struct xhci_ring *ring)
|
||||
void xhci_remove_stream_mapping(struct xhci_ring *ring)
|
||||
{
|
||||
struct xhci_segment *seg;
|
||||
|
||||
@ -269,6 +271,7 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
|
||||
seg = seg->next;
|
||||
} while (seg != ring->first_seg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_remove_stream_mapping);
|
||||
|
||||
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
|
||||
{
|
||||
@ -317,6 +320,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
|
||||
*/
|
||||
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
|
||||
|
||||
/* Allocate segments and link them for a ring */
|
||||
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
|
||||
@ -362,6 +366,54 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->free_container_ctx)
|
||||
ops->free_container_ctx(xhci, ctx);
|
||||
}
|
||||
|
||||
static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
|
||||
int type, gfp_t flags)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->alloc_container_ctx)
|
||||
ops->alloc_container_ctx(xhci, ctx, type, flags);
|
||||
}
|
||||
|
||||
static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
|
||||
u32 endpoint_type, enum xhci_ring_type ring_type,
|
||||
unsigned int max_packet, gfp_t mem_flags)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->alloc_transfer_ring)
|
||||
return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
|
||||
max_packet, mem_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->free_transfer_ring)
|
||||
ops->free_transfer_ring(xhci, virt_dev, ep_index);
|
||||
}
|
||||
|
||||
bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->is_usb_offload_enabled)
|
||||
return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new ring with zero or more segments.
|
||||
*
|
||||
@ -414,7 +466,11 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev,
|
||||
unsigned int ep_index)
|
||||
{
|
||||
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
|
||||
xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index);
|
||||
else
|
||||
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
|
||||
|
||||
virt_dev->eps[ep_index].ring = NULL;
|
||||
}
|
||||
|
||||
@ -472,6 +528,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
{
|
||||
struct xhci_container_ctx *ctx;
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
|
||||
return NULL;
|
||||
@ -485,7 +542,12 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
if (type == XHCI_CTX_TYPE_INPUT)
|
||||
ctx->size += CTX_SIZE(xhci->hcc_params);
|
||||
|
||||
ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
|
||||
(ops && ops->alloc_container_ctx))
|
||||
xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
|
||||
else
|
||||
ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
|
||||
|
||||
if (!ctx->bytes) {
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
@ -496,9 +558,16 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *ctx)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
|
||||
(ops && ops->free_container_ctx))
|
||||
xhci_vendor_free_container_ctx(xhci, ctx);
|
||||
else
|
||||
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
@ -520,6 +589,7 @@ struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
|
||||
return (struct xhci_slot_ctx *)
|
||||
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_get_slot_ctx);
|
||||
|
||||
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *ctx,
|
||||
@ -887,7 +957,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
if (dev->eps[i].ring)
|
||||
xhci_ring_free(xhci, dev->eps[i].ring);
|
||||
xhci_free_endpoint_ring(xhci, dev, i);
|
||||
if (dev->eps[i].stream_info)
|
||||
xhci_free_stream_info(xhci,
|
||||
dev->eps[i].stream_info);
|
||||
@ -1489,8 +1559,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
||||
mult = 0;
|
||||
|
||||
/* Set up the endpoint ring */
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
|
||||
usb_endpoint_xfer_isoc(&ep->desc)) {
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
|
||||
max_packet, mem_flags);
|
||||
} else {
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
|
||||
}
|
||||
|
||||
if (!virt_dev->eps[ep_index].new_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1838,6 +1916,24 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_free_erst);
|
||||
|
||||
static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
|
||||
struct xhci_hcd *xhci, gfp_t flags)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->alloc_dcbaa)
|
||||
return ops->alloc_dcbaa(xhci, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->free_dcbaa)
|
||||
ops->free_dcbaa(xhci);
|
||||
}
|
||||
|
||||
void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
@ -1889,9 +1985,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"Freed medium stream array pool");
|
||||
|
||||
if (xhci->dcbaa)
|
||||
dma_free_coherent(dev, sizeof(*xhci->dcbaa),
|
||||
xhci->dcbaa, xhci->dcbaa->dma);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
|
||||
xhci_vendor_free_dcbaa(xhci);
|
||||
} else {
|
||||
if (xhci->dcbaa)
|
||||
dma_free_coherent(dev, sizeof(*xhci->dcbaa),
|
||||
xhci->dcbaa, xhci->dcbaa->dma);
|
||||
}
|
||||
xhci->dcbaa = NULL;
|
||||
|
||||
scratchpad_free(xhci);
|
||||
@ -1972,7 +2072,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
|
||||
}
|
||||
|
||||
/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
|
||||
static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
|
||||
int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct {
|
||||
dma_addr_t input_dma;
|
||||
@ -2092,6 +2192,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
|
||||
xhci_dbg(xhci, "TRB math tests passed.\n");
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math);
|
||||
|
||||
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
|
||||
{
|
||||
@ -2428,15 +2529,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
* xHCI section 5.4.6 - Device Context array must be
|
||||
* "physically contiguous and 64-byte (cache line) aligned".
|
||||
*/
|
||||
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
|
||||
flags);
|
||||
if (!xhci->dcbaa)
|
||||
goto fail;
|
||||
xhci->dcbaa->dma = dma;
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
|
||||
xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
|
||||
if (!xhci->dcbaa)
|
||||
goto fail;
|
||||
} else {
|
||||
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
|
||||
flags);
|
||||
if (!xhci->dcbaa)
|
||||
goto fail;
|
||||
xhci->dcbaa->dma = dma;
|
||||
}
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"// Device context base array address = 0x%llx (DMA), %p (virt)",
|
||||
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
||||
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
|
||||
xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
|
||||
|
||||
/*
|
||||
* Initialize the ring segment pool. The ring must be a contiguous
|
||||
|
@ -173,6 +173,43 @@ static const struct of_device_id usb_xhci_of_match[] = {
|
||||
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
|
||||
#endif
|
||||
|
||||
static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite;
|
||||
|
||||
int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops)
|
||||
{
|
||||
if (vendor_ops == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
xhci_plat_vendor_overwrite.vendor_ops = vendor_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops);
|
||||
|
||||
static int xhci_vendor_init(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
struct xhci_plat_priv *priv = xhci_to_priv(xhci);
|
||||
|
||||
if (xhci_plat_vendor_overwrite.vendor_ops)
|
||||
ops = priv->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops;
|
||||
|
||||
if (ops && ops->vendor_init)
|
||||
return ops->vendor_init(xhci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_vendor_cleanup(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
struct xhci_plat_priv *priv = xhci_to_priv(xhci);
|
||||
|
||||
if (ops && ops->vendor_cleanup)
|
||||
ops->vendor_cleanup(xhci);
|
||||
|
||||
priv->vendor_ops = NULL;
|
||||
}
|
||||
|
||||
static int xhci_plat_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct xhci_plat_priv *priv_match;
|
||||
@ -317,6 +354,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
|
||||
goto disable_clk;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_init(xhci);
|
||||
if (ret)
|
||||
goto disable_usb_phy;
|
||||
|
||||
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
|
||||
|
||||
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
|
||||
@ -410,6 +451,9 @@ static int xhci_plat_remove(struct platform_device *dev)
|
||||
if (shared_hcd)
|
||||
usb_put_hcd(shared_hcd);
|
||||
|
||||
xhci_vendor_cleanup(xhci);
|
||||
|
||||
usb_put_hcd(shared_hcd);
|
||||
clk_disable_unprepare(clk);
|
||||
clk_disable_unprepare(reg_clk);
|
||||
usb_put_hcd(hcd);
|
||||
|
@ -13,6 +13,9 @@
|
||||
struct xhci_plat_priv {
|
||||
const char *firmware_name;
|
||||
unsigned long long quirks;
|
||||
struct xhci_vendor_ops *vendor_ops;
|
||||
struct xhci_vendor_data *vendor_data;
|
||||
int (*plat_setup)(struct usb_hcd *);
|
||||
void (*plat_start)(struct usb_hcd *);
|
||||
int (*init_quirk)(struct usb_hcd *);
|
||||
int (*suspend_quirk)(struct usb_hcd *);
|
||||
@ -21,4 +24,11 @@ struct xhci_plat_priv {
|
||||
|
||||
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
|
||||
#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
|
||||
|
||||
struct xhci_plat_priv_overwrite {
|
||||
struct xhci_vendor_ops *vendor_ops;
|
||||
};
|
||||
|
||||
int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops);
|
||||
|
||||
#endif /* _XHCI_PLAT_H */
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "xhci-trace.h"
|
||||
#include "xhci-debugfs.h"
|
||||
#include "xhci-dbgcap.h"
|
||||
#include "xhci-plat.h"
|
||||
|
||||
#define DRIVER_AUTHOR "Sarah Sharp"
|
||||
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
|
||||
@ -1675,6 +1676,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
|
||||
xhci_dbg(xhci, "skip urb for usb offload\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
|
||||
num_tds = urb->number_of_packets;
|
||||
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
|
||||
@ -3015,6 +3021,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
||||
xhci_finish_resource_reservation(xhci, ctrl_ctx);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
}
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (ret)
|
||||
xhci_warn(xhci, "sync device context failed, ret=%d", ret);
|
||||
|
||||
failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3158,7 +3172,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
for (i = 0; i < 31; i++) {
|
||||
if (virt_dev->eps[i].new_ring) {
|
||||
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
|
||||
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
|
||||
xhci_vendor_free_transfer_ring(xhci, virt_dev, i);
|
||||
else
|
||||
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
|
||||
|
||||
virt_dev->eps[i].new_ring = NULL;
|
||||
}
|
||||
}
|
||||
@ -3319,6 +3337,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
|
||||
wait_for_completion(stop_cmd->completion);
|
||||
|
||||
err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (err) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, err);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
|
||||
/* config ep command clears toggle if add and drop ep flags are set */
|
||||
@ -3350,6 +3375,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
|
||||
wait_for_completion(cfg_cmd->completion);
|
||||
|
||||
err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (err)
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, err);
|
||||
|
||||
xhci_free_command(xhci, cfg_cmd);
|
||||
cleanup:
|
||||
xhci_free_command(xhci, stop_cmd);
|
||||
@ -3895,6 +3925,13 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
|
||||
/* Wait for the Reset Device command to finish */
|
||||
wait_for_completion(reset_device_cmd->completion);
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
goto command_cleanup;
|
||||
}
|
||||
|
||||
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
|
||||
* unless we tried to reset a slot ID that wasn't enabled,
|
||||
* or the device wasn't in the addressed or configured state.
|
||||
@ -4144,6 +4181,14 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
|
||||
goto disable_slot;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
goto disable_slot;
|
||||
}
|
||||
|
||||
vdev = xhci->devs[slot_id];
|
||||
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
|
||||
trace_xhci_alloc_dev(slot_ctx);
|
||||
@ -4274,6 +4319,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
|
||||
wait_for_completion(command->completion);
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
|
||||
* the SetAddress() "recovery interval" required by USB and aborting the
|
||||
* command on a timeout.
|
||||
@ -4358,10 +4410,11 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
{
|
||||
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_address_device);
|
||||
|
||||
static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
{
|
||||
@ -4426,6 +4479,14 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
|
||||
@ -4453,6 +4514,30 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
|
||||
{
|
||||
return xhci_to_priv(xhci)->vendor_ops;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
|
||||
|
||||
int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->sync_dev_ctx)
|
||||
return ops->sync_dev_ctx(xhci, slot_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->usb_offload_skip_urb)
|
||||
return ops->usb_offload_skip_urb(xhci, urb);
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
/* BESL to HIRD Encoding array for USB2 LPM */
|
||||
@ -5182,6 +5267,15 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
xhci_free_command(xhci, config_cmd);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
|
||||
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
|
||||
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
|
||||
@ -5525,6 +5619,12 @@ void xhci_init_driver(struct hc_driver *drv,
|
||||
drv->reset_bandwidth = over->reset_bandwidth;
|
||||
if (over->update_hub_device)
|
||||
drv->update_hub_device = over->update_hub_device;
|
||||
if (over->address_device)
|
||||
drv->address_device = over->address_device;
|
||||
if (over->bus_suspend)
|
||||
drv->bus_suspend = over->bus_suspend;
|
||||
if (over->bus_resume)
|
||||
drv->bus_resume = over->bus_resume;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_init_driver);
|
||||
|
@ -1963,6 +1963,14 @@ struct xhci_driver_overrides {
|
||||
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
|
||||
int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
|
||||
struct usb_tt *tt, gfp_t mem_flags);
|
||||
int (*address_device)(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
int (*bus_suspend)(struct usb_hcd *hcd);
|
||||
int (*bus_resume)(struct usb_hcd *hcd);
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
|
||||
#define XHCI_CFC_DELAY 10
|
||||
@ -2083,6 +2091,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
|
||||
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
unsigned int num_segs, unsigned int cycle_state,
|
||||
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
|
||||
void xhci_remove_stream_mapping(struct xhci_ring *ring);
|
||||
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
||||
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
||||
unsigned int num_trbs, gfp_t flags);
|
||||
@ -2144,6 +2153,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
|
||||
struct usb_tt *tt, gfp_t mem_flags);
|
||||
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
|
||||
int xhci_ext_cap_init(struct xhci_hcd *xhci);
|
||||
|
||||
@ -2251,6 +2261,52 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
|
||||
urb->stream_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct xhci_vendor_ops - function callbacks for vendor specific operations
|
||||
* @vendor_init: called for vendor init process
|
||||
* @vendor_cleanup: called for vendor cleanup process
|
||||
* @is_usb_offload_enabled: called to check if usb offload enabled
|
||||
* @alloc_dcbaa: called when allocating vendor specific dcbaa
|
||||
* @free_dcbaa: called to free vendor specific dcbaa
|
||||
* @alloc_transfer_ring: called when remote transfer ring allocation is required
|
||||
* @free_transfer_ring: called to free vendor specific transfer ring
|
||||
* @sync_dev_ctx: called when synchronization for device context is required
|
||||
* @usb_offload_skip_urb: skip urb control for offloading
|
||||
* @alloc_container_ctx: called when allocating vendor specific container context
|
||||
* @free_container_ctx: called to free vendor specific container context
|
||||
*/
|
||||
struct xhci_vendor_ops {
|
||||
int (*vendor_init)(struct xhci_hcd *xhci);
|
||||
void (*vendor_cleanup)(struct xhci_hcd *xhci);
|
||||
bool (*is_usb_offload_enabled)(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *vdev,
|
||||
unsigned int ep_index);
|
||||
|
||||
struct xhci_device_context_array *(*alloc_dcbaa)(struct xhci_hcd *xhci,
|
||||
gfp_t flags);
|
||||
void (*free_dcbaa)(struct xhci_hcd *xhci);
|
||||
|
||||
struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci,
|
||||
u32 endpoint_type, enum xhci_ring_type ring_type,
|
||||
unsigned int max_packet, gfp_t mem_flags);
|
||||
void (*free_transfer_ring)(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index);
|
||||
int (*sync_dev_ctx)(struct xhci_hcd *xhci, unsigned int slot_id);
|
||||
bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb);
|
||||
void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
|
||||
int type, gfp_t flags);
|
||||
void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
|
||||
};
|
||||
|
||||
struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci);
|
||||
|
||||
int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id);
|
||||
bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb);
|
||||
void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index);
|
||||
bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index);
|
||||
|
||||
/*
|
||||
* TODO: As per spec Isochronous IDT transmissions are supported. We bypass
|
||||
* them anyways as we where unable to find a device that matches the
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include "rsc_mgr.h"
|
||||
|
||||
static struct gh_rm_platform_ops *rm_platform_ops;
|
||||
static const struct gh_rm_platform_ops *rm_platform_ops;
|
||||
static DECLARE_RWSEM(rm_platform_ops_lock);
|
||||
|
||||
int gh_rm_platform_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel)
|
||||
@ -36,7 +36,7 @@ int gh_rm_platform_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *m
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_rm_platform_post_mem_reclaim);
|
||||
|
||||
int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -50,7 +50,7 @@ int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_rm_register_platform_ops);
|
||||
|
||||
void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops)
|
||||
{
|
||||
down_write(&rm_platform_ops_lock);
|
||||
if (rm_platform_ops == platform_ops)
|
||||
@ -61,10 +61,10 @@ EXPORT_SYMBOL_GPL(gh_rm_unregister_platform_ops);
|
||||
|
||||
static void _devm_gh_rm_unregister_platform_ops(void *data)
|
||||
{
|
||||
gh_rm_unregister_platform_ops(data);
|
||||
gh_rm_unregister_platform_ops((const struct gh_rm_platform_ops *)data);
|
||||
}
|
||||
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, struct gh_rm_platform_ops *ops)
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, const struct gh_rm_platform_ops *ops)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -72,7 +72,7 @@ int devm_gh_rm_register_platform_ops(struct device *dev, struct gh_rm_platform_o
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return devm_add_action(dev, _devm_gh_rm_unregister_platform_ops, ops);
|
||||
return devm_add_action(dev, _devm_gh_rm_unregister_platform_ops, (void *)ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_gh_rm_register_platform_ops);
|
||||
|
||||
|
@ -38,36 +38,40 @@ static int qcom_scm_gh_rm_pre_mem_share(void *rm, struct gh_rm_mem_parcel *mem_p
|
||||
new_perms[n].perm |= QCOM_SCM_PERM_READ;
|
||||
}
|
||||
|
||||
src = (1ull << QCOM_SCM_VMID_HLOS);
|
||||
src = BIT_ULL(QCOM_SCM_VMID_HLOS);
|
||||
|
||||
for (i = 0; i < mem_parcel->n_mem_entries; i++) {
|
||||
src_cpy = src;
|
||||
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, mem_parcel->n_acl_entries);
|
||||
if (ret) {
|
||||
src = 0;
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
src |= (1ull << vmid);
|
||||
else
|
||||
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
|
||||
}
|
||||
|
||||
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
src_cpy = src;
|
||||
WARN_ON_ONCE(qcom_scm_assign_mem(
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, 1));
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
src = 0;
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
src |= BIT_ULL(vmid);
|
||||
else
|
||||
src |= BIT_ULL(QCOM_SCM_RM_MANAGED_VMID);
|
||||
}
|
||||
|
||||
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
src_cpy = src;
|
||||
WARN_ON_ONCE(qcom_scm_assign_mem(
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, 1));
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(new_perms);
|
||||
return ret;
|
||||
}
|
||||
@ -117,13 +121,15 @@ static bool gh_has_qcom_extensions(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
uuid_t uuid;
|
||||
u32 *up;
|
||||
|
||||
arm_smccc_1_1_smc(GH_QCOM_EXT_CALL_UUID_ID, &res);
|
||||
|
||||
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
|
||||
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
|
||||
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
|
||||
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
|
||||
up = (u32 *)&uuid.b[0];
|
||||
up[0] = lower_32_bits(res.a0);
|
||||
up[1] = lower_32_bits(res.a1);
|
||||
up[2] = lower_32_bits(res.a2);
|
||||
up[3] = lower_32_bits(res.a3);
|
||||
|
||||
return uuid_equal(&uuid, &QCOM_EXT_UUID);
|
||||
}
|
||||
|
@ -335,6 +335,8 @@ static bool gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_res
|
||||
if (ret)
|
||||
pr_warn("Failed to request vcpu irq %d: %d", vcpu->rsc->irq, ret);
|
||||
|
||||
enable_irq_wake(vcpu->rsc->irq);
|
||||
|
||||
out:
|
||||
mutex_unlock(&vcpu->run_lock);
|
||||
return !ret;
|
||||
|
@ -123,7 +123,7 @@ struct gh_rm_connection {
|
||||
|
||||
/**
|
||||
* struct gh_rm - private data for communicating w/Gunyah resource manager
|
||||
* @dev: pointer to device
|
||||
* @dev: pointer to RM platform device
|
||||
* @tx_ghrsc: message queue resource to TX to RM
|
||||
* @rx_ghrsc: message queue resource to RX from RM
|
||||
* @msgq: mailbox instance of TX/RX resources above
|
||||
@ -160,10 +160,10 @@ struct gh_rm {
|
||||
};
|
||||
|
||||
/**
|
||||
* gh_rm_remap_error() - Remap Gunyah resource manager errors into a Linux error code
|
||||
* gh_rm_error_remap() - Remap Gunyah resource manager errors into a Linux error code
|
||||
* @rm_error: "Standard" return value from Gunyah resource manager
|
||||
*/
|
||||
static inline int gh_rm_remap_error(enum gh_rm_error rm_error)
|
||||
static inline int gh_rm_error_remap(enum gh_rm_error rm_error)
|
||||
{
|
||||
switch (rm_error) {
|
||||
case GH_RM_ERROR_OK:
|
||||
@ -226,7 +226,7 @@ static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsig
|
||||
void *arg)
|
||||
{
|
||||
struct gh_irq_chip_data *chip_data, *spec = arg;
|
||||
struct irq_fwspec parent_fwspec;
|
||||
struct irq_fwspec parent_fwspec = {};
|
||||
struct gh_rm *rm = d->host_data;
|
||||
u32 gh_virq = spec->gh_virq;
|
||||
int ret;
|
||||
@ -309,7 +309,9 @@ struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_reso
|
||||
if (ret < 0) {
|
||||
dev_err(rm->dev,
|
||||
"Failed to allocate interrupt for resource %d label: %d: %d\n",
|
||||
ghrsc->type, ghrsc->rm_label, ghrsc->irq);
|
||||
ghrsc->type, ghrsc->rm_label, ret);
|
||||
kfree(ghrsc);
|
||||
return NULL;
|
||||
} else {
|
||||
ghrsc->irq = ret;
|
||||
}
|
||||
@ -417,7 +419,7 @@ static void gh_rm_process_notif(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
rm->active_rx_connection = connection;
|
||||
}
|
||||
|
||||
static void gh_rm_process_rply(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
static void gh_rm_process_reply(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
{
|
||||
struct gh_rm_rpc_reply_hdr *reply_hdr = msg;
|
||||
struct gh_rm_connection *connection;
|
||||
@ -514,7 +516,7 @@ static void gh_rm_msgq_rx_data(struct mbox_client *cl, void *mssg)
|
||||
gh_rm_process_notif(rm, msg, msg_size);
|
||||
break;
|
||||
case RM_RPC_TYPE_REPLY:
|
||||
gh_rm_process_rply(rm, msg, msg_size);
|
||||
gh_rm_process_reply(rm, msg, msg_size);
|
||||
break;
|
||||
case RM_RPC_TYPE_CONTINUATION:
|
||||
gh_rm_process_cont(rm, rm->active_rx_connection, msg, msg_size);
|
||||
@ -665,10 +667,10 @@ int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_si
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* Wait for response */
|
||||
ret = wait_for_completion_interruptible(&connection->reply.seq_done);
|
||||
if (ret)
|
||||
goto out;
|
||||
/* Wait for response. Uninterruptible because rollback based on what RM did to VM
|
||||
* requires us to know how RM handled the call.
|
||||
*/
|
||||
wait_for_completion(&connection->reply.seq_done);
|
||||
|
||||
/* Check for internal (kernel) error waiting for the response */
|
||||
if (connection->reply.ret) {
|
||||
@ -682,8 +684,7 @@ int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_si
|
||||
if (connection->reply.rm_error != GH_RM_ERROR_OK) {
|
||||
dev_warn(rm->dev, "RM rejected message %08x. Error: %d\n", message_id,
|
||||
connection->reply.rm_error);
|
||||
dump_stack();
|
||||
ret = gh_rm_remap_error(connection->reply.rm_error);
|
||||
ret = gh_rm_error_remap(connection->reply.rm_error);
|
||||
kfree(connection->payload);
|
||||
goto out;
|
||||
}
|
||||
@ -913,7 +914,6 @@ static int gh_rm_drv_probe(struct platform_device *pdev)
|
||||
err_irq_domain:
|
||||
irq_domain_remove(rm->irq_domain);
|
||||
err_msgq:
|
||||
mbox_free_channel(gh_msgq_chan(&rm->msgq));
|
||||
gh_msgq_remove(&rm->msgq);
|
||||
err_cache:
|
||||
kmem_cache_destroy(rm->cache);
|
||||
@ -928,7 +928,6 @@ static int gh_rm_drv_remove(struct platform_device *pdev)
|
||||
auxiliary_device_uninit(&rm->adev);
|
||||
misc_deregister(&rm->miscdev);
|
||||
irq_domain_remove(rm->irq_domain);
|
||||
mbox_free_channel(gh_msgq_chan(&rm->msgq));
|
||||
gh_msgq_remove(&rm->msgq);
|
||||
kmem_cache_destroy(rm->cache);
|
||||
|
||||
|
@ -139,7 +139,7 @@ static int _gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle, bool end_append,
|
||||
return -ENOMEM;
|
||||
|
||||
req_header = msg;
|
||||
mem_section = (void *)req_header + sizeof(struct gh_rm_mem_append_req_header);
|
||||
mem_section = (void *)(req_header + 1);
|
||||
|
||||
req_header->mem_handle = cpu_to_le32(mem_handle);
|
||||
if (end_append)
|
||||
|
@ -31,13 +31,10 @@ static void gh_vm_put_function(struct gh_vm_function *fn)
|
||||
static struct gh_vm_function *gh_vm_get_function(u32 type)
|
||||
{
|
||||
struct gh_vm_function *fn;
|
||||
int r;
|
||||
|
||||
fn = xa_load(&gh_vm_functions, type);
|
||||
if (!fn) {
|
||||
r = request_module("ghfunc:%d", type);
|
||||
if (r)
|
||||
return ERR_PTR(r > 0 ? -r : r);
|
||||
request_module("ghfunc:%d", type);
|
||||
|
||||
fn = xa_load(&gh_vm_functions, type);
|
||||
}
|
||||
@ -617,7 +614,7 @@ static int gh_vm_ensure_started(struct gh_vm *ghvm)
|
||||
if (ret)
|
||||
return ret;
|
||||
/** gh_vm_start() is guaranteed to bring status out of
|
||||
* GH_RM_VM_STATUS_LOAD, thus inifitely recursive call is not
|
||||
* GH_RM_VM_STATUS_LOAD, thus infinitely recursive call is not
|
||||
* possible
|
||||
*/
|
||||
return gh_vm_ensure_started(ghvm);
|
||||
@ -668,10 +665,6 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (overflows_type(dtb_config.guest_phys_addr + dtb_config.size, u64))
|
||||
return -EOVERFLOW;
|
||||
|
||||
/* Gunyah requires that dtb_config is page aligned */
|
||||
if (!PAGE_ALIGNED(dtb_config.guest_phys_addr) || !PAGE_ALIGNED(dtb_config.size))
|
||||
return -EINVAL;
|
||||
|
||||
ghvm->dtb_config = dtb_config;
|
||||
|
||||
r = 0;
|
||||
|
@ -14,9 +14,7 @@
|
||||
|
||||
static bool pages_are_mergeable(struct page *a, struct page *b)
|
||||
{
|
||||
if (page_to_pfn(a) + 1 != page_to_pfn(b))
|
||||
return false;
|
||||
return true;
|
||||
return page_to_pfn(a) + 1 == page_to_pfn(b);
|
||||
}
|
||||
|
||||
static bool gh_vm_mem_overlap(struct gh_vm_mem *a, u64 addr, u64 size)
|
||||
|
@ -1 +0,0 @@
|
||||
jaegeuk@google.com
|
@ -1 +0,0 @@
|
||||
balsini@google.com
|
@ -1,2 +0,0 @@
|
||||
akailash@google.com
|
||||
paullawrence@google.com
|
@ -1 +0,0 @@
|
||||
per-file net/**=file:/net/OWNERS
|
@ -1,4 +0,0 @@
|
||||
per-file bio.h=file:/block/OWNERS
|
||||
per-file blk*.h=file:/block/OWNERS
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
||||
per-file net**=file:/net/OWNERS
|
@ -161,7 +161,6 @@ enum cpuhp_state {
|
||||
CPUHP_AP_PERF_X86_CSTATE_STARTING,
|
||||
CPUHP_AP_PERF_XTENSA_STARTING,
|
||||
CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
|
||||
CPUHP_AP_ARM_SDEI_STARTING,
|
||||
CPUHP_AP_ARM_VFP_STARTING,
|
||||
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
|
||||
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
|
||||
|
@ -167,15 +167,15 @@ struct gh_rm_platform_ops {
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_GUNYAH_PLATFORM_HOOKS)
|
||||
int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops);
|
||||
void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops *platform_ops);
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, struct gh_rm_platform_ops *ops);
|
||||
int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops);
|
||||
void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops);
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, const struct gh_rm_platform_ops *ops);
|
||||
#else
|
||||
static inline int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
static inline int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops)
|
||||
{ return 0; }
|
||||
static inline void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops *platform_ops) { }
|
||||
static inline void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops) { }
|
||||
static inline int devm_gh_rm_register_platform_ops(struct device *dev,
|
||||
struct gh_rm_platform_ops *ops) { return 0; }
|
||||
const struct gh_rm_platform_ops *ops) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -21,6 +21,16 @@ int __must_check gh_vm_get(struct gh_vm *ghvm);
|
||||
void gh_vm_put(struct gh_vm *ghvm);
|
||||
|
||||
struct gh_vm_function_instance;
|
||||
/**
|
||||
* struct gh_vm_function - Represents a function type
|
||||
* @type: value from &enum gh_fn_type
|
||||
* @name: friendly name for debug purposes
|
||||
* @mod: owner of the function type
|
||||
* @bind: Called when a new function of this type has been allocated.
|
||||
* @unbind: Called when the function instance is being destroyed.
|
||||
* @compare: Compare function instance @f's argument to the provided arg.
|
||||
* Return true if they are equivalent. Used on GH_VM_REMOVE_FUNCTION.
|
||||
*/
|
||||
struct gh_vm_function {
|
||||
u32 type;
|
||||
const char *name;
|
||||
@ -84,9 +94,26 @@ void gh_vm_function_unregister(struct gh_vm_function *f);
|
||||
module_gh_vm_function(_name); \
|
||||
MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx)
|
||||
|
||||
/**
|
||||
* struct gh_vm_resource_ticket - Represents a ticket to reserve exclusive access to VM resource(s)
|
||||
* @vm_list: for @gh_vm->resource_tickets
|
||||
* @resources: List of resource(s) associated with this ticket(members are from @gh_resource->list)
|
||||
* @resource_type: Type of resource this ticket reserves
|
||||
* @label: Label of the resource from resource manager this ticket reserves.
|
||||
* @owner: owner of the ticket
|
||||
* @populate: callback provided by the ticket owner and called when a resource is found that
|
||||
* matches @resource_type and @label. Note that this callback could be called
|
||||
* multiple times if userspace created mutliple resources with the same type/label.
|
||||
* This callback may also have significant delay after gh_vm_add_resource_ticket()
|
||||
* since gh_vm_add_resource_ticket() could be called before the VM starts.
|
||||
* @unpopulate: callback provided by the ticket owner and called when the ticket owner should no
|
||||
* no longer use the resource provided in the argument. When unpopulate() returns,
|
||||
* the ticket owner should not be able to use the resource any more as the resource
|
||||
* might being freed.
|
||||
*/
|
||||
struct gh_vm_resource_ticket {
|
||||
struct list_head vm_list; /* for gh_vm's resource tickets list */
|
||||
struct list_head resources; /* resources associated with this ticket */
|
||||
struct list_head vm_list;
|
||||
struct list_head resources;
|
||||
enum gh_resource_type resource_type;
|
||||
u32 label;
|
||||
|
||||
|
@ -213,7 +213,18 @@ extern void platform_device_put(struct platform_device *pdev);
|
||||
|
||||
struct platform_driver {
|
||||
int (*probe)(struct platform_device *);
|
||||
|
||||
/*
|
||||
* Traditionally the remove callback returned an int which however is
|
||||
* ignored by the driver core. This led to wrong expectations by driver
|
||||
* authors who thought returning an error code was a valid error
|
||||
* handling strategy. To convert to a callback returning void, new
|
||||
* drivers should implement .remove_new() until the conversion it done
|
||||
* that eventually makes .remove() return void.
|
||||
*/
|
||||
int (*remove)(struct platform_device *);
|
||||
void (*remove_new)(struct platform_device *);
|
||||
|
||||
void (*shutdown)(struct platform_device *);
|
||||
int (*suspend)(struct platform_device *, pm_message_t state);
|
||||
int (*resume)(struct platform_device *);
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/alarmtimer.h>
|
||||
#include <linux/timerqueue.h>
|
||||
|
||||
@ -62,16 +63,18 @@ static inline int clockid_to_fd(const clockid_t clk)
|
||||
* cpu_timer - Posix CPU timer representation for k_itimer
|
||||
* @node: timerqueue node to queue in the task/sig
|
||||
* @head: timerqueue head on which this timer is queued
|
||||
* @task: Pointer to target task
|
||||
* @pid: Pointer to target task PID
|
||||
* @elist: List head for the expiry list
|
||||
* @firing: Timer is currently firing
|
||||
* @handling: Pointer to the task which handles expiry
|
||||
*/
|
||||
struct cpu_timer {
|
||||
struct timerqueue_node node;
|
||||
struct timerqueue_head *head;
|
||||
struct pid *pid;
|
||||
struct list_head elist;
|
||||
int firing;
|
||||
struct timerqueue_node node;
|
||||
struct timerqueue_head *head;
|
||||
struct pid *pid;
|
||||
struct list_head elist;
|
||||
int firing;
|
||||
struct task_struct __rcu *handling;
|
||||
};
|
||||
|
||||
static inline bool cpu_timer_enqueue(struct timerqueue_head *head,
|
||||
@ -135,10 +138,12 @@ struct posix_cputimers {
|
||||
/**
|
||||
* posix_cputimers_work - Container for task work based posix CPU timer expiry
|
||||
* @work: The task work to be scheduled
|
||||
* @mutex: Mutex held around expiry in context of this task work
|
||||
* @scheduled: @work has been scheduled already, no further processing
|
||||
*/
|
||||
struct posix_cputimers_work {
|
||||
struct callback_head work;
|
||||
struct mutex mutex;
|
||||
unsigned int scheduled;
|
||||
};
|
||||
|
||||
|
@ -71,7 +71,6 @@ struct sk_psock_link {
|
||||
};
|
||||
|
||||
struct sk_psock_work_state {
|
||||
struct sk_buff *skb;
|
||||
u32 len;
|
||||
u32 off;
|
||||
};
|
||||
@ -105,7 +104,7 @@ struct sk_psock {
|
||||
struct proto *sk_proto;
|
||||
struct mutex work_mutex;
|
||||
struct sk_psock_work_state work_state;
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
struct rcu_work rwork;
|
||||
};
|
||||
|
||||
|
@ -157,7 +157,17 @@ struct usb_phy {
|
||||
*/
|
||||
enum usb_charger_type (*charger_detect)(struct usb_phy *x);
|
||||
|
||||
/*
|
||||
* Reserved slot 0 here is seserved for a notify_port_status callback addition that narrowly
|
||||
* missed the ABI freeze deadline due to upstream review disussions. See
|
||||
* https://lore.kernel.org/linux-usb/20230607062500.24669-1-stanley_chang@realtek.com/
|
||||
* for details. All other slots are for "normal" future ABI breaks in LTS updates
|
||||
*/
|
||||
ANDROID_KABI_RESERVE(0);
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
|
||||
/* for board-specific init logic */
|
||||
|
@ -335,6 +335,7 @@ enum {
|
||||
enum {
|
||||
HCI_SETUP,
|
||||
HCI_CONFIG,
|
||||
HCI_DEBUGFS_CREATED,
|
||||
HCI_AUTO_OFF,
|
||||
HCI_RFKILLED,
|
||||
HCI_MGMT,
|
||||
|
@ -514,6 +514,7 @@ struct hci_dev {
|
||||
struct work_struct cmd_sync_work;
|
||||
struct list_head cmd_sync_work_list;
|
||||
struct mutex cmd_sync_work_lock;
|
||||
struct mutex unregister_lock;
|
||||
struct work_struct cmd_sync_cancel_work;
|
||||
struct work_struct reenable_adv_work;
|
||||
|
||||
|
@ -186,7 +186,7 @@ struct pneigh_entry {
|
||||
netdevice_tracker dev_tracker;
|
||||
u32 flags;
|
||||
u8 protocol;
|
||||
u8 key[];
|
||||
u32 key[];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -54,7 +54,7 @@ struct netns_sysctl_ipv6 {
|
||||
int seg6_flowlabel;
|
||||
u32 ioam6_id;
|
||||
u64 ioam6_id_wide;
|
||||
bool skip_notify_on_dev_down;
|
||||
int skip_notify_on_dev_down;
|
||||
u8 fib_notify_on_flag_change;
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
};
|
||||
|
@ -335,6 +335,7 @@ struct sk_filter;
|
||||
* @sk_cgrp_data: cgroup data for this cgroup
|
||||
* @sk_memcg: this socket's memory cgroup association
|
||||
* @sk_write_pending: a write to stream socket waits to start
|
||||
* @sk_wait_pending: number of threads blocked on this socket
|
||||
* @sk_state_change: callback to indicate change in the state of the sock
|
||||
* @sk_data_ready: callback to indicate there is data to be processed
|
||||
* @sk_write_space: callback to indicate there is bf sending space available
|
||||
@ -427,6 +428,7 @@ struct sock {
|
||||
unsigned int sk_napi_id;
|
||||
#endif
|
||||
int sk_rcvbuf;
|
||||
int sk_wait_pending;
|
||||
|
||||
struct sk_filter __rcu *sk_filter;
|
||||
union {
|
||||
@ -1182,6 +1184,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
|
||||
|
||||
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
|
||||
({ int __rc; \
|
||||
__sk->sk_wait_pending++; \
|
||||
release_sock(__sk); \
|
||||
__rc = __condition; \
|
||||
if (!__rc) { \
|
||||
@ -1191,6 +1194,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
|
||||
} \
|
||||
sched_annotate_sleep(); \
|
||||
lock_sock(__sk); \
|
||||
__sk->sk_wait_pending--; \
|
||||
__rc = __condition; \
|
||||
__rc; \
|
||||
})
|
||||
|
@ -1467,6 +1467,8 @@ static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
|
||||
}
|
||||
|
||||
void tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||
void __tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||
|
||||
|
||||
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
|
||||
* If 87.5 % (7/8) of the space has been consumed, we want to override
|
||||
@ -2291,6 +2293,14 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
|
||||
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
|
||||
#else
|
||||
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
|
||||
struct sk_msg *msg, u32 bytes, int flags);
|
||||
#endif /* CONFIG_NET_SOCK_MSG */
|
||||
|
@ -1 +0,0 @@
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
@ -161,8 +161,9 @@ DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
|
||||
TP_ARGS(rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
|
||||
TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
|
||||
TP_ARGS(prev, next, rq), 1);
|
||||
TP_PROTO(unsigned int sched_mode, struct task_struct *prev,
|
||||
struct task_struct *next, struct rq *rq),
|
||||
TP_ARGS(sched_mode, prev, next, rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
|
||||
TP_PROTO(int cpu),
|
||||
|
@ -1,3 +0,0 @@
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
||||
per-file fuse**=file:/fs/fuse/OWNERS
|
||||
per-file net**=file:/net/OWNERS
|
@ -28,7 +28,7 @@
|
||||
#define _BITUL(x) (_UL(1) << (x))
|
||||
#define _BITULL(x) (_ULL(1) << (x))
|
||||
|
||||
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (typeof(x))(a) - 1)
|
||||
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
|
||||
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
|
||||
|
||||
#define __KERNEL_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
||||
|
@ -1,4 +0,0 @@
|
||||
connoro@google.com
|
||||
elavila@google.com
|
||||
qperret@google.com
|
||||
tkjos@google.com
|
@ -6638,7 +6638,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
||||
rq->last_seen_need_resched_ns = 0;
|
||||
#endif
|
||||
|
||||
trace_android_rvh_schedule(prev, next, rq);
|
||||
trace_android_rvh_schedule(sched_mode, prev, next, rq);
|
||||
if (likely(prev != next)) {
|
||||
rq->nr_switches++;
|
||||
/*
|
||||
|
@ -5011,7 +5011,7 @@ static void android_vh_scheduler_tick(void *unused, struct rq *rq)
|
||||
walt_lb_tick(rq);
|
||||
}
|
||||
|
||||
static void android_rvh_schedule(void *unused, struct task_struct *prev,
|
||||
static void android_rvh_schedule(void *unused, unsigned int sched_mode, struct task_struct *prev,
|
||||
struct task_struct *next, struct rq *rq)
|
||||
{
|
||||
u64 wallclock;
|
||||
|
@ -847,6 +847,8 @@ static u64 collect_timerqueue(struct timerqueue_head *head,
|
||||
return expires;
|
||||
|
||||
ctmr->firing = 1;
|
||||
/* See posix_cpu_timer_wait_running() */
|
||||
rcu_assign_pointer(ctmr->handling, current);
|
||||
cpu_timer_dequeue(ctmr);
|
||||
list_add_tail(&ctmr->elist, firing);
|
||||
}
|
||||
@ -1162,7 +1164,49 @@ static void handle_posix_cpu_timers(struct task_struct *tsk);
|
||||
#ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
|
||||
static void posix_cpu_timers_work(struct callback_head *work)
|
||||
{
|
||||
struct posix_cputimers_work *cw = container_of(work, typeof(*cw), work);
|
||||
|
||||
mutex_lock(&cw->mutex);
|
||||
handle_posix_cpu_timers(current);
|
||||
mutex_unlock(&cw->mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoked from the posix-timer core when a cancel operation failed because
|
||||
* the timer is marked firing. The caller holds rcu_read_lock(), which
|
||||
* protects the timer and the task which is expiring it from being freed.
|
||||
*/
|
||||
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
|
||||
{
|
||||
struct task_struct *tsk = rcu_dereference(timr->it.cpu.handling);
|
||||
|
||||
/* Has the handling task completed expiry already? */
|
||||
if (!tsk)
|
||||
return;
|
||||
|
||||
/* Ensure that the task cannot go away */
|
||||
get_task_struct(tsk);
|
||||
/* Now drop the RCU protection so the mutex can be locked */
|
||||
rcu_read_unlock();
|
||||
/* Wait on the expiry mutex */
|
||||
mutex_lock(&tsk->posix_cputimers_work.mutex);
|
||||
/* Release it immediately again. */
|
||||
mutex_unlock(&tsk->posix_cputimers_work.mutex);
|
||||
/* Drop the task reference. */
|
||||
put_task_struct(tsk);
|
||||
/* Relock RCU so the callsite is balanced */
|
||||
rcu_read_lock();
|
||||
}
|
||||
|
||||
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
|
||||
{
|
||||
/* Ensure that timr->it.cpu.handling task cannot go away */
|
||||
rcu_read_lock();
|
||||
spin_unlock_irq(&timr->it_lock);
|
||||
posix_cpu_timer_wait_running(timr);
|
||||
rcu_read_unlock();
|
||||
/* @timr is on stack and is valid */
|
||||
spin_lock_irq(&timr->it_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1178,6 +1222,7 @@ void clear_posix_cputimers_work(struct task_struct *p)
|
||||
sizeof(p->posix_cputimers_work.work));
|
||||
init_task_work(&p->posix_cputimers_work.work,
|
||||
posix_cpu_timers_work);
|
||||
mutex_init(&p->posix_cputimers_work.mutex);
|
||||
p->posix_cputimers_work.scheduled = false;
|
||||
}
|
||||
|
||||
@ -1256,6 +1301,18 @@ static inline void __run_posix_cpu_timers(struct task_struct *tsk)
|
||||
lockdep_posixtimer_exit();
|
||||
}
|
||||
|
||||
static void posix_cpu_timer_wait_running(struct k_itimer *timr)
|
||||
{
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static void posix_cpu_timer_wait_running_nsleep(struct k_itimer *timr)
|
||||
{
|
||||
spin_unlock_irq(&timr->it_lock);
|
||||
cpu_relax();
|
||||
spin_lock_irq(&timr->it_lock);
|
||||
}
|
||||
|
||||
static inline bool posix_cpu_timers_work_scheduled(struct task_struct *tsk)
|
||||
{
|
||||
return false;
|
||||
@ -1364,6 +1421,8 @@ static void handle_posix_cpu_timers(struct task_struct *tsk)
|
||||
*/
|
||||
if (likely(cpu_firing >= 0))
|
||||
cpu_timer_fire(timer);
|
||||
/* See posix_cpu_timer_wait_running() */
|
||||
rcu_assign_pointer(timer->it.cpu.handling, NULL);
|
||||
spin_unlock(&timer->it_lock);
|
||||
}
|
||||
}
|
||||
@ -1498,23 +1557,16 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
|
||||
expires = cpu_timer_getexpires(&timer.it.cpu);
|
||||
error = posix_cpu_timer_set(&timer, 0, &zero_it, &it);
|
||||
if (!error) {
|
||||
/*
|
||||
* Timer is now unarmed, deletion can not fail.
|
||||
*/
|
||||
/* Timer is now unarmed, deletion can not fail. */
|
||||
posix_cpu_timer_del(&timer);
|
||||
} else {
|
||||
while (error == TIMER_RETRY) {
|
||||
posix_cpu_timer_wait_running_nsleep(&timer);
|
||||
error = posix_cpu_timer_del(&timer);
|
||||
}
|
||||
}
|
||||
spin_unlock_irq(&timer.it_lock);
|
||||
|
||||
while (error == TIMER_RETRY) {
|
||||
/*
|
||||
* We need to handle case when timer was or is in the
|
||||
* middle of firing. In other cases we already freed
|
||||
* resources.
|
||||
*/
|
||||
spin_lock_irq(&timer.it_lock);
|
||||
error = posix_cpu_timer_del(&timer);
|
||||
spin_unlock_irq(&timer.it_lock);
|
||||
}
|
||||
spin_unlock_irq(&timer.it_lock);
|
||||
|
||||
if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) {
|
||||
/*
|
||||
@ -1624,6 +1676,7 @@ const struct k_clock clock_posix_cpu = {
|
||||
.timer_del = posix_cpu_timer_del,
|
||||
.timer_get = posix_cpu_timer_get,
|
||||
.timer_rearm = posix_cpu_timer_rearm,
|
||||
.timer_wait_running = posix_cpu_timer_wait_running,
|
||||
};
|
||||
|
||||
const struct k_clock clock_process = {
|
||||
|
@ -846,6 +846,10 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
|
||||
rcu_read_lock();
|
||||
unlock_timer(timer, *flags);
|
||||
|
||||
/*
|
||||
* kc->timer_wait_running() might drop RCU lock. So @timer
|
||||
* cannot be touched anymore after the function returns!
|
||||
*/
|
||||
if (!WARN_ON_ONCE(!kc->timer_wait_running))
|
||||
kc->timer_wait_running(timer);
|
||||
|
||||
|
@ -1,2 +0,0 @@
|
||||
lorenzo@google.com
|
||||
maze@google.com
|
@ -2685,7 +2685,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
{
|
||||
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
||||
|
||||
mutex_lock(&hdev->unregister_lock);
|
||||
hci_dev_set_flag(hdev, HCI_UNREGISTER);
|
||||
mutex_unlock(&hdev->unregister_lock);
|
||||
|
||||
write_lock(&hci_dev_list_lock);
|
||||
list_del(&hdev->list);
|
||||
|
@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
|
||||
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
|
||||
INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
|
||||
mutex_init(&hdev->cmd_sync_work_lock);
|
||||
mutex_init(&hdev->unregister_lock);
|
||||
|
||||
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
|
||||
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
|
||||
@ -688,14 +689,19 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
||||
void *data, hci_cmd_sync_work_destroy_t destroy)
|
||||
{
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
int err = 0;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
|
||||
return -ENODEV;
|
||||
mutex_lock(&hdev->unregister_lock);
|
||||
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
entry->func = func;
|
||||
entry->data = data;
|
||||
entry->destroy = destroy;
|
||||
@ -706,7 +712,9 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
||||
|
||||
queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
|
||||
|
||||
return 0;
|
||||
unlock:
|
||||
mutex_unlock(&hdev->unregister_lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_cmd_sync_queue);
|
||||
|
||||
@ -4483,6 +4491,9 @@ static int hci_init_sync(struct hci_dev *hdev)
|
||||
!hci_dev_test_flag(hdev, HCI_CONFIG))
|
||||
return 0;
|
||||
|
||||
if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
|
||||
return 0;
|
||||
|
||||
hci_debugfs_create_common(hdev);
|
||||
|
||||
if (lmp_bredr_capable(hdev))
|
||||
|
@ -480,8 +480,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
||||
msg_rx = sk_psock_peek_msg(psock);
|
||||
}
|
||||
out:
|
||||
if (psock->work_state.skb && copied > 0)
|
||||
schedule_work(&psock->work);
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
|
||||
@ -623,42 +621,33 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
|
||||
|
||||
static void sk_psock_skb_state(struct sk_psock *psock,
|
||||
struct sk_psock_work_state *state,
|
||||
struct sk_buff *skb,
|
||||
int len, int off)
|
||||
{
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
state->skb = skb;
|
||||
state->len = len;
|
||||
state->off = off;
|
||||
} else {
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
static void sk_psock_backlog(struct work_struct *work)
|
||||
{
|
||||
struct sk_psock *psock = container_of(work, struct sk_psock, work);
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
|
||||
struct sk_psock_work_state *state = &psock->work_state;
|
||||
struct sk_buff *skb = NULL;
|
||||
u32 len = 0, off = 0;
|
||||
bool ingress;
|
||||
u32 len, off;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&psock->work_mutex);
|
||||
if (unlikely(state->skb)) {
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
skb = state->skb;
|
||||
if (unlikely(state->len)) {
|
||||
len = state->len;
|
||||
off = state->off;
|
||||
state->skb = NULL;
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
if (skb)
|
||||
goto start;
|
||||
|
||||
while ((skb = skb_dequeue(&psock->ingress_skb))) {
|
||||
while ((skb = skb_peek(&psock->ingress_skb))) {
|
||||
len = skb->len;
|
||||
off = 0;
|
||||
if (skb_bpf_strparser(skb)) {
|
||||
@ -667,7 +656,6 @@ static void sk_psock_backlog(struct work_struct *work)
|
||||
off = stm->offset;
|
||||
len = stm->full_len;
|
||||
}
|
||||
start:
|
||||
ingress = skb_bpf_ingress(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
do {
|
||||
@ -677,22 +665,28 @@ static void sk_psock_backlog(struct work_struct *work)
|
||||
len, ingress);
|
||||
if (ret <= 0) {
|
||||
if (ret == -EAGAIN) {
|
||||
sk_psock_skb_state(psock, state, skb,
|
||||
len, off);
|
||||
sk_psock_skb_state(psock, state, len, off);
|
||||
|
||||
/* Delay slightly to prioritize any
|
||||
* other work that might be here.
|
||||
*/
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
schedule_delayed_work(&psock->work, 1);
|
||||
goto end;
|
||||
}
|
||||
/* Hard errors break pipe and stop xmit. */
|
||||
sk_psock_report_error(psock, ret ? -ret : EPIPE);
|
||||
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
|
||||
sock_drop(psock->sk, skb);
|
||||
goto end;
|
||||
}
|
||||
off += ret;
|
||||
len -= ret;
|
||||
} while (len);
|
||||
|
||||
if (!ingress)
|
||||
skb = skb_dequeue(&psock->ingress_skb);
|
||||
if (!ingress) {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
end:
|
||||
mutex_unlock(&psock->work_mutex);
|
||||
@ -733,7 +727,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
|
||||
INIT_LIST_HEAD(&psock->link);
|
||||
spin_lock_init(&psock->link_lock);
|
||||
|
||||
INIT_WORK(&psock->work, sk_psock_backlog);
|
||||
INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
|
||||
mutex_init(&psock->work_mutex);
|
||||
INIT_LIST_HEAD(&psock->ingress_msg);
|
||||
spin_lock_init(&psock->ingress_lock);
|
||||
@ -785,11 +779,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
|
||||
skb_bpf_redirect_clear(skb);
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
kfree_skb(psock->work_state.skb);
|
||||
/* We null the skb here to ensure that calls to sk_psock_backlog
|
||||
* do not pick up the free'd skb.
|
||||
*/
|
||||
psock->work_state.skb = NULL;
|
||||
__sk_psock_purge_ingress_msg(psock);
|
||||
}
|
||||
|
||||
@ -808,7 +797,6 @@ void sk_psock_stop(struct sk_psock *psock)
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
|
||||
sk_psock_cork_free(psock);
|
||||
__sk_psock_zap_ingress(psock);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
@ -822,7 +810,8 @@ static void sk_psock_destroy(struct work_struct *work)
|
||||
|
||||
sk_psock_done_strp(psock);
|
||||
|
||||
cancel_work_sync(&psock->work);
|
||||
cancel_delayed_work_sync(&psock->work);
|
||||
__sk_psock_zap_ingress(psock);
|
||||
mutex_destroy(&psock->work_mutex);
|
||||
|
||||
psock_progs_drop(&psock->progs);
|
||||
@ -937,7 +926,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
skb_queue_tail(&psock_other->ingress_skb, skb);
|
||||
schedule_work(&psock_other->work);
|
||||
schedule_delayed_work(&psock_other->work, 0);
|
||||
spin_unlock_bh(&psock_other->ingress_lock);
|
||||
return 0;
|
||||
}
|
||||
@ -989,10 +978,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
err = -EIO;
|
||||
sk_other = psock->sk;
|
||||
if (sock_flag(sk_other, SOCK_DEAD) ||
|
||||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
skb_bpf_set_ingress(skb);
|
||||
|
||||
@ -1017,22 +1004,23 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
skb_queue_tail(&psock->ingress_skb, skb);
|
||||
schedule_work(&psock->work);
|
||||
schedule_delayed_work(&psock->work, 0);
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
if (err < 0) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
if (err < 0)
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case __SK_REDIRECT:
|
||||
tcp_eat_skb(psock->sk, skb);
|
||||
err = sk_psock_skb_redirect(psock, skb);
|
||||
break;
|
||||
case __SK_DROP:
|
||||
default:
|
||||
out_free:
|
||||
skb_bpf_redirect_clear(skb);
|
||||
tcp_eat_skb(psock->sk, skb);
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
|
||||
@ -1048,7 +1036,7 @@ static void sk_psock_write_space(struct sock *sk)
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock)) {
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
schedule_work(&psock->work);
|
||||
schedule_delayed_work(&psock->work, 0);
|
||||
write_space = psock->saved_write_space;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@ -1077,8 +1065,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
if (ret == SK_PASS)
|
||||
skb_bpf_set_strparser(skb);
|
||||
skb_bpf_set_strparser(skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
skb->sk = NULL;
|
||||
}
|
||||
@ -1180,12 +1167,11 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
|
||||
int ret = __SK_DROP;
|
||||
int len = skb->len;
|
||||
|
||||
skb_get(skb);
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (unlikely(!psock)) {
|
||||
len = 0;
|
||||
tcp_eat_skb(sk, skb);
|
||||
sock_drop(sk, skb);
|
||||
goto out;
|
||||
}
|
||||
@ -1209,10 +1195,20 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
|
||||
static void sk_psock_verdict_data_ready(struct sock *sk)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
int copied;
|
||||
|
||||
if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
|
||||
return;
|
||||
sock->ops->read_skb(sk, sk_psock_verdict_recv);
|
||||
copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
|
||||
if (copied >= 0) {
|
||||
struct sk_psock *psock;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (psock)
|
||||
psock->saved_data_ready(sk);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
|
||||
|
@ -1624,9 +1624,10 @@ void sock_map_close(struct sock *sk, long timeout)
|
||||
rcu_read_unlock();
|
||||
sk_psock_stop(psock);
|
||||
release_sock(sk);
|
||||
cancel_work_sync(&psock->work);
|
||||
cancel_delayed_work_sync(&psock->work);
|
||||
sk_psock_put(sk, psock);
|
||||
}
|
||||
|
||||
/* Make sure we do not recurse. This is a bug.
|
||||
* Leak the socket instead of crashing on a stack overflow.
|
||||
*/
|
||||
|
@ -589,6 +589,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
|
||||
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending += writebias;
|
||||
sk->sk_wait_pending++;
|
||||
|
||||
/* Basic assumption: if someone sets sk->sk_err, he _must_
|
||||
* change state of the socket from TCP_SYN_*.
|
||||
@ -604,6 +605,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
|
||||
}
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending -= writebias;
|
||||
sk->sk_wait_pending--;
|
||||
return timeo;
|
||||
}
|
||||
|
||||
|
@ -1143,6 +1143,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
|
||||
if (newsk) {
|
||||
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
||||
|
||||
newsk->sk_wait_pending = 0;
|
||||
inet_sk_set_state(newsk, TCP_SYN_RECV);
|
||||
newicsk->icsk_bind_hash = NULL;
|
||||
newicsk->icsk_bind2_hash = NULL;
|
||||
|
@ -1568,7 +1568,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
|
||||
* calculation of whether or not we must ACK for the sake of
|
||||
* a window update.
|
||||
*/
|
||||
static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
void __tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
bool time_to_ack = false;
|
||||
@ -1770,7 +1770,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
|
||||
tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
|
||||
used = recv_actor(sk, skb);
|
||||
consume_skb(skb);
|
||||
if (used < 0) {
|
||||
if (!copied)
|
||||
copied = used;
|
||||
@ -1784,14 +1783,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
break;
|
||||
}
|
||||
}
|
||||
WRITE_ONCE(tp->copied_seq, seq);
|
||||
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
/* Clean up data we have read: This will do ACK frames. */
|
||||
if (copied > 0)
|
||||
__tcp_cleanup_rbuf(sk, copied);
|
||||
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_read_skb);
|
||||
@ -3086,6 +3077,12 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
int old_state = sk->sk_state;
|
||||
u32 seq;
|
||||
|
||||
/* Deny disconnect if other threads are blocked in sk_wait_event()
|
||||
* or inet_wait_for_connect().
|
||||
*/
|
||||
if (sk->sk_wait_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (old_state != TCP_CLOSE)
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
|
||||
|
@ -11,6 +11,24 @@
|
||||
#include <net/inet_common.h>
|
||||
#include <net/tls.h>
|
||||
|
||||
void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tcp;
|
||||
int copied;
|
||||
|
||||
if (!skb || !skb->len || !sk_is_tcp(sk))
|
||||
return;
|
||||
|
||||
if (skb_bpf_strparser(skb))
|
||||
return;
|
||||
|
||||
tcp = tcp_sk(sk);
|
||||
copied = tcp->copied_seq + skb->len;
|
||||
WRITE_ONCE(tcp->copied_seq, copied);
|
||||
tcp_rcv_space_adjust(sk);
|
||||
__tcp_cleanup_rbuf(sk, skb->len);
|
||||
}
|
||||
|
||||
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
|
||||
struct sk_msg *msg, u32 apply_bytes, int flags)
|
||||
{
|
||||
@ -174,14 +192,34 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool is_next_msg_fin(struct sk_psock *psock)
|
||||
{
|
||||
struct scatterlist *sge;
|
||||
struct sk_msg *msg_rx;
|
||||
int i;
|
||||
|
||||
msg_rx = sk_psock_peek_msg(psock);
|
||||
i = msg_rx->sg.start;
|
||||
sge = sk_msg_elem(msg_rx, i);
|
||||
if (!sge->length) {
|
||||
struct sk_buff *skb = msg_rx->skb;
|
||||
|
||||
if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int tcp_bpf_recvmsg_parser(struct sock *sk,
|
||||
struct msghdr *msg,
|
||||
size_t len,
|
||||
int flags,
|
||||
int *addr_len)
|
||||
{
|
||||
struct tcp_sock *tcp = tcp_sk(sk);
|
||||
u32 seq = tcp->copied_seq;
|
||||
struct sk_psock *psock;
|
||||
int copied;
|
||||
int copied = 0;
|
||||
|
||||
if (unlikely(flags & MSG_ERRQUEUE))
|
||||
return inet_recv_error(sk, msg, len, addr_len);
|
||||
@ -194,8 +232,43 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
|
||||
return tcp_recvmsg(sk, msg, len, flags, addr_len);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* We may have received data on the sk_receive_queue pre-accept and
|
||||
* then we can not use read_skb in this context because we haven't
|
||||
* assigned a sk_socket yet so have no link to the ops. The work-around
|
||||
* is to check the sk_receive_queue and in these cases read skbs off
|
||||
* queue again. The read_skb hook is not running at this point because
|
||||
* of lock_sock so we avoid having multiple runners in read_skb.
|
||||
*/
|
||||
if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
|
||||
tcp_data_ready(sk);
|
||||
/* This handles the ENOMEM errors if we both receive data
|
||||
* pre accept and are already under memory pressure. At least
|
||||
* let user know to retry.
|
||||
*/
|
||||
if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
|
||||
copied = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
msg_bytes_ready:
|
||||
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
|
||||
/* The typical case for EFAULT is the socket was gracefully
|
||||
* shutdown with a FIN pkt. So check here the other case is
|
||||
* some error on copy_page_to_iter which would be unexpected.
|
||||
* On fin return correct return code to zero.
|
||||
*/
|
||||
if (copied == -EFAULT) {
|
||||
bool is_fin = is_next_msg_fin(psock);
|
||||
|
||||
if (is_fin) {
|
||||
copied = 0;
|
||||
seq++;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
seq += copied;
|
||||
if (!copied) {
|
||||
long timeo;
|
||||
int data;
|
||||
@ -233,6 +306,10 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
|
||||
copied = -EAGAIN;
|
||||
}
|
||||
out:
|
||||
WRITE_ONCE(tcp->copied_seq, seq);
|
||||
tcp_rcv_space_adjust(sk);
|
||||
if (copied > 0)
|
||||
__tcp_cleanup_rbuf(sk, copied);
|
||||
release_sock(sk);
|
||||
sk_psock_put(sk, psock);
|
||||
return copied;
|
||||
|
@ -1806,7 +1806,7 @@ EXPORT_SYMBOL(__skb_recv_udp);
|
||||
int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int err, copied;
|
||||
int err;
|
||||
|
||||
try_again:
|
||||
skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
|
||||
@ -1825,10 +1825,7 @@ int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
|
||||
copied = recv_actor(sk, skb);
|
||||
kfree_skb(skb);
|
||||
|
||||
return copied;
|
||||
return recv_actor(sk, skb);
|
||||
}
|
||||
EXPORT_SYMBOL(udp_read_skb);
|
||||
|
||||
|
@ -541,6 +541,19 @@ int tipc_bearer_mtu(struct net *net, u32 bearer_id)
|
||||
return mtu;
|
||||
}
|
||||
|
||||
int tipc_bearer_min_mtu(struct net *net, u32 bearer_id)
|
||||
{
|
||||
int mtu = TIPC_MIN_BEARER_MTU;
|
||||
struct tipc_bearer *b;
|
||||
|
||||
rcu_read_lock();
|
||||
b = bearer_get(net, bearer_id);
|
||||
if (b)
|
||||
mtu += b->encap_hlen;
|
||||
rcu_read_unlock();
|
||||
return mtu;
|
||||
}
|
||||
|
||||
/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
|
||||
*/
|
||||
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
|
||||
@ -1138,8 +1151,8 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
|
||||
return -EINVAL;
|
||||
}
|
||||
#ifdef CONFIG_TIPC_MEDIA_UDP
|
||||
if (tipc_udp_mtu_bad(nla_get_u32
|
||||
(props[TIPC_NLA_PROP_MTU]))) {
|
||||
if (nla_get_u32(props[TIPC_NLA_PROP_MTU]) <
|
||||
b->encap_hlen + TIPC_MIN_BEARER_MTU) {
|
||||
NL_SET_ERR_MSG(info->extack,
|
||||
"MTU value is out-of-range");
|
||||
return -EINVAL;
|
||||
|
@ -146,6 +146,7 @@ struct tipc_media {
|
||||
* @identity: array index of this bearer within TIPC bearer array
|
||||
* @disc: ptr to link setup request
|
||||
* @net_plane: network plane ('A' through 'H') currently associated with bearer
|
||||
* @encap_hlen: encap headers length
|
||||
* @up: bearer up flag (bit 0)
|
||||
* @refcnt: tipc_bearer reference counter
|
||||
*
|
||||
@ -170,6 +171,7 @@ struct tipc_bearer {
|
||||
u32 identity;
|
||||
struct tipc_discoverer *disc;
|
||||
char net_plane;
|
||||
u16 encap_hlen;
|
||||
unsigned long up;
|
||||
refcount_t refcnt;
|
||||
};
|
||||
@ -232,6 +234,7 @@ int tipc_bearer_setup(void);
|
||||
void tipc_bearer_cleanup(void);
|
||||
void tipc_bearer_stop(struct net *net);
|
||||
int tipc_bearer_mtu(struct net *net, u32 bearer_id);
|
||||
int tipc_bearer_min_mtu(struct net *net, u32 bearer_id);
|
||||
bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
|
||||
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
|
||||
struct sk_buff *skb,
|
||||
|
@ -2200,7 +2200,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
||||
struct tipc_msg *hdr = buf_msg(skb);
|
||||
struct tipc_gap_ack_blks *ga = NULL;
|
||||
bool reply = msg_probe(hdr), retransmitted = false;
|
||||
u32 dlen = msg_data_sz(hdr), glen = 0;
|
||||
u32 dlen = msg_data_sz(hdr), glen = 0, msg_max;
|
||||
u16 peers_snd_nxt = msg_next_sent(hdr);
|
||||
u16 peers_tol = msg_link_tolerance(hdr);
|
||||
u16 peers_prio = msg_linkprio(hdr);
|
||||
@ -2239,6 +2239,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
||||
switch (mtyp) {
|
||||
case RESET_MSG:
|
||||
case ACTIVATE_MSG:
|
||||
msg_max = msg_max_pkt(hdr);
|
||||
if (msg_max < tipc_bearer_min_mtu(l->net, l->bearer_id))
|
||||
break;
|
||||
/* Complete own link name with peer's interface name */
|
||||
if_name = strrchr(l->name, ':') + 1;
|
||||
if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
|
||||
@ -2283,8 +2286,8 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
|
||||
l->peer_session = msg_session(hdr);
|
||||
l->in_session = true;
|
||||
l->peer_bearer_id = msg_bearer_id(hdr);
|
||||
if (l->mtu > msg_max_pkt(hdr))
|
||||
l->mtu = msg_max_pkt(hdr);
|
||||
if (l->mtu > msg_max)
|
||||
l->mtu = msg_max;
|
||||
break;
|
||||
|
||||
case STATE_MSG:
|
||||
|
@ -738,8 +738,8 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
|
||||
udp_conf.local_ip.s_addr = local.ipv4.s_addr;
|
||||
udp_conf.use_udp_checksums = false;
|
||||
ub->ifindex = dev->ifindex;
|
||||
if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
|
||||
sizeof(struct udphdr))) {
|
||||
b->encap_hlen = sizeof(struct iphdr) + sizeof(struct udphdr);
|
||||
if (tipc_mtu_bad(dev, b->encap_hlen)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
@ -760,6 +760,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
|
||||
else
|
||||
udp_conf.local_ip6 = local.ipv6;
|
||||
ub->ifindex = dev->ifindex;
|
||||
b->encap_hlen = sizeof(struct ipv6hdr) + sizeof(struct udphdr);
|
||||
b->mtu = 1280;
|
||||
#endif
|
||||
} else {
|
||||
|
@ -2552,7 +2552,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
struct unix_sock *u = unix_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
int err, copied;
|
||||
int err;
|
||||
|
||||
mutex_lock(&u->iolock);
|
||||
skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
|
||||
@ -2560,10 +2560,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
copied = recv_actor(sk, skb);
|
||||
kfree_skb(skb);
|
||||
|
||||
return copied;
|
||||
return recv_actor(sk, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -21,6 +21,11 @@
|
||||
|
||||
#include <linux/gunyah.h>
|
||||
|
||||
#define DEFAULT_GUEST_BASE 0x80000000
|
||||
#define DEFAULT_GUEST_SIZE 0x6400000 /* 100 MiB */
|
||||
#define DEFAULT_DTB_OFFSET 0x45f0000 /* 70MiB - 64 KiB */
|
||||
#define DEFAULT_RAMDISK_OFFSET 0x4600000 /* 70MiB */
|
||||
|
||||
struct vm_config {
|
||||
int image_fd;
|
||||
int dtb_fd;
|
||||
@ -29,7 +34,6 @@ struct vm_config {
|
||||
uint64_t guest_base;
|
||||
uint64_t guest_size;
|
||||
|
||||
uint64_t image_offset;
|
||||
off_t image_size;
|
||||
uint64_t dtb_offset;
|
||||
off_t dtb_size;
|
||||
@ -44,7 +48,6 @@ static struct option options[] = {
|
||||
{ "ramdisk", optional_argument, NULL, 'r' },
|
||||
{ "base", optional_argument, NULL, 'B' },
|
||||
{ "size", optional_argument, NULL, 'S' },
|
||||
{ "image_offset", optional_argument, NULL, 'I' },
|
||||
{ "dtb_offset", optional_argument, NULL, 'D' },
|
||||
{ "ramdisk_offset", optional_argument, NULL, 'R' },
|
||||
{ }
|
||||
@ -58,12 +61,12 @@ static void print_help(char *cmd)
|
||||
" --image, -i <image> VM image file to load (e.g. a kernel Image) [Required]\n"
|
||||
" --dtb, -d <dtb> Devicetree file to load [Required]\n"
|
||||
" --ramdisk, -r <ramdisk> Ramdisk file to load\n"
|
||||
" --base, -B <address> Set the base address of guest's memory [Default: 0x80000000]\n"
|
||||
" --size, -S <number> The number of bytes large to make the guest's memory [Default: 0x6400000 (100 MB)]\n"
|
||||
" --image_offset, -I <number> Offset into guest memory to load the VM image file [Default: 0x10000]\n"
|
||||
" --dtb_offset, -D <number> Offset into guest memory to load the DTB [Default: 0]\n"
|
||||
" --ramdisk_offset, -R <number> Offset into guest memory to load a ramdisk [Default: 0x4600000]\n"
|
||||
, cmd);
|
||||
" --base, -B <address> Set the base address of guest's memory [Default: 0x%08x]\n"
|
||||
" --size, -S <number> The number of bytes large to make the guest's memory [Default: 0x%08x]\n"
|
||||
" --dtb_offset, -D <number> Offset into guest memory to load the DTB [Default: 0x%08x]\n"
|
||||
" --ramdisk_offset, -R <number> Offset into guest memory to load a ramdisk [Default: 0x%08x]\n"
|
||||
, cmd, DEFAULT_GUEST_BASE, DEFAULT_GUEST_SIZE,
|
||||
DEFAULT_DTB_OFFSET, DEFAULT_RAMDISK_OFFSET);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
@ -74,18 +77,19 @@ int main(int argc, char **argv)
|
||||
char *guest_mem;
|
||||
struct vm_config config = {
|
||||
/* Defaults good enough to boot static kernel and a basic ramdisk */
|
||||
.image_fd = -1,
|
||||
.dtb_fd = -1,
|
||||
.ramdisk_fd = -1,
|
||||
.guest_base = 0x80000000,
|
||||
.guest_size = 0x6400000, /* 100 MB */
|
||||
.image_offset = 0,
|
||||
.dtb_offset = 0x45f0000,
|
||||
.ramdisk_offset = 0x4600000, /* put at +70MB (30MB for ramdisk) */
|
||||
.guest_base = DEFAULT_GUEST_BASE,
|
||||
.guest_size = DEFAULT_GUEST_SIZE,
|
||||
.dtb_offset = DEFAULT_DTB_OFFSET,
|
||||
.ramdisk_offset = DEFAULT_RAMDISK_OFFSET,
|
||||
};
|
||||
struct stat st;
|
||||
int opt, optidx, ret = 0;
|
||||
long l;
|
||||
|
||||
while ((opt = getopt_long(argc, argv, "hi:d:r:B:S:I:D:R:c:", options, &optidx)) != -1) {
|
||||
while ((opt = getopt_long(argc, argv, "hi:d:r:B:S:D:R:c:", options, &optidx)) != -1) {
|
||||
switch (opt) {
|
||||
case 'i':
|
||||
config.image_fd = open(optarg, O_RDONLY | O_CLOEXEC);
|
||||
@ -139,14 +143,6 @@ int main(int argc, char **argv)
|
||||
}
|
||||
config.guest_size = l;
|
||||
break;
|
||||
case 'I':
|
||||
l = strtol(optarg, NULL, 0);
|
||||
if (l == LONG_MIN) {
|
||||
perror("Failed to parse image offset");
|
||||
return -1;
|
||||
}
|
||||
config.image_offset = l;
|
||||
break;
|
||||
case 'D':
|
||||
l = strtol(optarg, NULL, 0);
|
||||
if (l == LONG_MIN) {
|
||||
@ -172,13 +168,13 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if (!config.image_fd || !config.dtb_fd) {
|
||||
if (config.image_fd == -1 || config.dtb_fd == -1) {
|
||||
print_help(argv[0]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (config.image_offset + config.image_size > config.guest_size) {
|
||||
fprintf(stderr, "Image offset and size puts it outside guest memory. Make image smaller or increase guest memory size.\n");
|
||||
if (config.image_size > config.guest_size) {
|
||||
fprintf(stderr, "Image size puts it outside guest memory. Make image smaller or increase guest memory size.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -222,7 +218,7 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (read(config.image_fd, guest_mem + config.image_offset, config.image_size) < 0) {
|
||||
if (read(config.image_fd, guest_mem, config.image_size) < 0) {
|
||||
perror("Failed to read image into guest memory");
|
||||
return -1;
|
||||
}
|
||||
@ -264,7 +260,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
while (1)
|
||||
sleep(10);
|
||||
pause();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -712,7 +712,7 @@ static int fsl_micfil_probe(struct platform_device *pdev)
|
||||
ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to pcm register\n");
|
||||
return ret;
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
fsl_micfil_dai.capture.formats = micfil->soc->formats;
|
||||
@ -722,9 +722,20 @@ static int fsl_micfil_probe(struct platform_device *pdev)
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register component %s\n",
|
||||
fsl_micfil_component.name);
|
||||
goto err_pm_disable;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
err_pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void fsl_micfil_remove(struct platform_device *pdev)
|
||||
{
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
}
|
||||
|
||||
static int __maybe_unused fsl_micfil_runtime_suspend(struct device *dev)
|
||||
@ -785,6 +796,7 @@ static const struct dev_pm_ops fsl_micfil_pm_ops = {
|
||||
|
||||
static struct platform_driver fsl_micfil_driver = {
|
||||
.probe = fsl_micfil_probe,
|
||||
.remove_new = fsl_micfil_remove,
|
||||
.driver = {
|
||||
.name = "fsl-micfil-dai",
|
||||
.pm = &fsl_micfil_pm_ops,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user