Merge 'android14-6.1' into 'android14-6.1-lts'
This catches the -lts branch up with all of the recent changes that have gone into the non-lts branch, INCLUDING the ABI update which we want here to ensure that we do NOT break any newly added dependent symbols (and to bring back in the reverts that were required before the ABI break). This includes the following commits:0a859e781c
ANDROID: GKI: Include kheaders in gki_system_dlkm_modules35fe0d393f
ANDROID: 6/16/2023 KMI updatefcc5e942e0
ANDROID: sched: Add parameter to android_rvh_schedule.d4dd4d9f19
ANDROID: GKI: provide more padding for struct usb_phybb9c879726
ANDROID: GKI enable CONFIG_NETFILTER_XT_MATCH_CONNBYTES=ya7b1da66a2
FROMGIT: arm64: set __exception_irq_entry with __irq_entry as a defaultd73b3af21f
ANDROID: GKI: update symbol list file for xiaomi73185e2d4e
ANDROID: Remove all but top-level OWNERS1090306d3d
ANDROID: Enable GKI Dr. No Enforcement16c18c497d
ANDROID: 6/16/2023 KMI updatefcc32be061
ANDROID: virt: gunyah: Sync with latest platform ops69a3ec73e4
FROMGIT: usb: gadget: udc: core: Prevent soft_connect_store() race18b677ffae
FROMGIT: usb: gadget: udc: core: Offload usb_udc_vbus_handler processinga1741f9c45
UPSTREAM: Bluetooth: fix debugfs registrationd890debdaf
UPSTREAM: Bluetooth: hci_sync: add lock to protect HCI_UNREGISTER855c5479cb
UPSTREAM: net/ipv6: fix bool/int mismatch for skip_notify_on_dev_downb0fa6dd29a
UPSTREAM: neighbour: fix unaligned access to pneigh_entry1707d64dab
UPSTREAM: tcp: deny tcp_disconnect() when threads are waitinga7cd7a3dd7
ANDROID: sound: usb: Add vendor's hooking interface2c6f80378c
ANDROID: GKI: USB: XHCI: add Android ABI padding to struct xhci_driver_overridescd3b5ff535
ANDROID: usb: host: add address_device to xhci overridese3ff5d6bf0
UPSTREAM: bpf, sockmap: Avoid potential NULL dereference in sk_psock_verdict_data_ready()07873e75c6
UPSTREAM: bpf, sockmap: Incorrectly handling copied_seqe218734b1b
UPSTREAM: bpf, sockmap: Wake up polling after data copyf9cc0b7f9b
UPSTREAM: bpf, sockmap: TCP data stall on recv before accept028591f2c8
UPSTREAM: bpf, sockmap: Handle fin correctlye69ad7c838
UPSTREAM: bpf, sockmap: Improved check for empty queueecfcbe21d7
UPSTREAM: bpf, sockmap: Reschedule is now done through backlog42fcf3b6df
UPSTREAM: bpf, sockmap: Convert schedule_work into delayed_worka59051006b
UPSTREAM: bpf, sockmap: Pass skb ownership through read_skb86409bb4e1
ANDROID: virt: gunyah: Sync with latest Gunyah patches705a9b5feb
ANDROID: virt: gunyah: Sync with latest documentation and sample60662882b7
FROMLIST: usb: xhci-plat: add xhci_plat_priv_overwrite6496f6cfbb
ANDROID: usb: host: export symbols for xhci hooks usage90ab8e7f98
ANDROID: usb: host: add xhci hooks for USB offload Change-Id: I895db08515a0bc14c4548bb28b61acb1414a94cd Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
ed6634a559
@ -1 +0,0 @@
|
||||
per-file sysfs-fs-f2fs=file:/fs/f2fs/OWNERS
|
@ -1 +0,0 @@
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
@ -136,7 +136,7 @@ Code Seq# Include File Comments
|
||||
'F' DD video/sstfb.h conflict!
|
||||
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
|
||||
'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
|
||||
'G' 00-0f linux/gunyah.h conflict!
|
||||
'G' 00-0F linux/gunyah.h conflict!
|
||||
'H' 00-7F linux/hiddev.h conflict!
|
||||
'H' 00-0F linux/hidraw.h conflict!
|
||||
'H' 01 linux/mei.h conflict!
|
||||
|
@ -7,14 +7,12 @@ Virtual Machine Manager
|
||||
The Gunyah Virtual Machine Manager is a Linux driver to support launching
|
||||
virtual machines using Gunyah.
|
||||
|
||||
Except for some basic information about the location of initial binaries,
|
||||
most of the configuration about a Gunyah virtual machine is described in the
|
||||
VM's devicetree. The devicetree is generated by userspace. Interacting with the
|
||||
virtual machine is still done via the kernel and VM configuration requires some
|
||||
of the corresponding functionality to be set up in the kernel. For instance,
|
||||
sharing userspace memory with a VM is done via the `GH_VM_SET_USER_MEM_REGION`_
|
||||
ioctl. The VM itself is configured to use the memory region via the
|
||||
devicetree.
|
||||
Configuration of a Gunyah virtual machine is done via a devicetree. When the VM
|
||||
is launched, memory is provided by the host VM which contains the devictree.
|
||||
Gunyah reads the devicetree to configure the memory map and create resources
|
||||
such as vCPUs for the VM. Memory can be shared with the VM with
|
||||
`GH_VM_SET_USER_MEM_REGION`_. Userspace can interact with the resources in Linux
|
||||
by adding "functions" to the VM.
|
||||
|
||||
Gunyah Functions
|
||||
================
|
||||
@ -56,6 +54,9 @@ GH_CREATE_VM
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Creates a Gunyah VM. The argument is reserved for future use and must be 0.
|
||||
A successful call will return a Gunyah VM file descriptor. See
|
||||
`Gunyah VM API Descriptions`_ for list of IOCTLs that can be made on this file
|
||||
file descriptor.
|
||||
|
||||
Gunyah VM API Descriptions
|
||||
--------------------------
|
||||
@ -70,8 +71,8 @@ unique per virtual machine.
|
||||
|
||||
While VMM is guest-agnostic and allows runtime addition of memory regions,
|
||||
Linux guest virtual machines do not support accepting memory regions at runtime.
|
||||
Thus, memory regions should be provided before starting the VM and the VM must
|
||||
be configured to accept these at boot-up.
|
||||
Thus, for Linux guests, memory regions should be provided before starting the VM
|
||||
and the VM must be configured via the devicetree to accept these at boot-up.
|
||||
|
||||
The guest physical address is used by Linux kernel to check that the requested
|
||||
user regions do not overlap and to help find the corresponding memory region
|
||||
@ -87,7 +88,7 @@ GH_VM_SET_DTB_CONFIG
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This ioctl sets the location of the VM's devicetree blob and is used by Gunyah
|
||||
Resource Manager to allocate resources. The guest physical memory should be part
|
||||
Resource Manager to allocate resources. The guest physical memory must be part
|
||||
of the primary memory parcel provided to the VM prior to GH_VM_START.
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/gunyah.h
|
||||
@ -104,7 +105,7 @@ GH_VM_ADD_FUNCTION
|
||||
This ioctl registers a Gunyah VM function with the VM manager. The VM function
|
||||
is described with a &struct gh_fn_desc.type and some arguments for that type.
|
||||
Typically, the function is added before the VM starts, but the function doesn't
|
||||
"operate" until the VM starts with `GH_VM_START`_. For example, vCPU ioclts will
|
||||
"operate" until the VM starts with `GH_VM_START`_. For example, vCPU ioctls will
|
||||
all return an error until the VM starts because the vCPUs don't exist until the
|
||||
VM is started. This allows the VMM to set up all the kernel functions needed for
|
||||
the VM *before* the VM starts.
|
||||
|
23
OWNERS
23
OWNERS
@ -1,13 +1,12 @@
|
||||
# The full list of approvers is defined in
|
||||
# https://android.googlesource.com/kernel/common/+/refs/meta/config/OWNERS
|
||||
set noparent
|
||||
|
||||
# The following OWNERS are defined at the top level to improve the OWNERS
|
||||
# suggestions through any user interface. Consider those people the ones that
|
||||
# can help with finding the best person to review.
|
||||
adelva@google.com
|
||||
gregkh@google.com
|
||||
maennich@google.com
|
||||
saravanak@google.com
|
||||
smuckle@google.com
|
||||
surenb@google.com
|
||||
tkjos@google.com
|
||||
# GKI Dr. No Enforcement is active on this branch. Approval of one of the Dr.
|
||||
# No reviewers is required following a regular CodeReview+2 vote of a code
|
||||
# reviewer.
|
||||
#
|
||||
# See the GKI release documentation (go/gki-dr-no) for further details.
|
||||
#
|
||||
# The expanded list of reviewers can be found at:
|
||||
# https://android.googlesource.com/kernel/common/+/android-mainline/OWNERS_DrNo
|
||||
|
||||
include kernel/common:android-mainline:/OWNERS_DrNo
|
||||
|
@ -1,13 +0,0 @@
|
||||
# If we ever add another OWNERS above this directory, it's likely to be
|
||||
# more permissive, so don't inherit from it
|
||||
set noparent
|
||||
include kernel/common:android-mainline:/OWNERS_DrNo
|
||||
|
||||
# Downstream boards maintained directly in this manifest branch
|
||||
per-file abi_gki_aarch64_cuttlefish = adelva@google.com, rammuthiah@google.com
|
||||
per-file abi_gki_aarch64_goldfish = rkir@google.com
|
||||
|
||||
# per-file for review purposes
|
||||
per-file gki_system_dlkm_modules = ramjiyani@google.com
|
||||
per-file abi_gki_protected_exports_* = ramjiyani@google.com
|
||||
per-file gki_*_protected_modules = ramjiyani@google.com
|
File diff suppressed because it is too large
Load Diff
@ -244,3 +244,14 @@
|
||||
#required by mi_mempool.ko
|
||||
__traceiter_android_vh_madvise_cold_pageout_skip
|
||||
__tracepoint_android_vh_madvise_cold_pageout_skip
|
||||
|
||||
#required by n_gsm.ko
|
||||
tty_write_room
|
||||
tty_port_tty_set
|
||||
tty_register_device
|
||||
tty_hung_up_p
|
||||
tty_name
|
||||
tty_port_block_til_ready
|
||||
tty_port_close_start
|
||||
tty_port_lower_dtr_rts
|
||||
tty_port_close_end
|
||||
|
@ -27,6 +27,7 @@ drivers/net/usb/usbnet.ko
|
||||
drivers/usb/class/cdc-acm.ko
|
||||
drivers/usb/serial/ftdi_sio.ko
|
||||
drivers/usb/serial/usbserial.ko
|
||||
kernel/kheaders.ko
|
||||
lib/crypto/libarc4.ko
|
||||
mm/zsmalloc.ko
|
||||
net/6lowpan/6lowpan.ko
|
||||
|
@ -1 +0,0 @@
|
||||
include ../arm64/OWNERS
|
@ -1,4 +0,0 @@
|
||||
per-file crypto/**=file:/crypto/OWNERS
|
||||
per-file {include,kernel,kvm,lib}/**=mzyngier@google.com,willdeacon@google.com
|
||||
per-file mm/**=file:/mm/OWNERS
|
||||
per-file net/**=file:/net/OWNERS
|
@ -183,6 +183,7 @@ CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
|
@ -16,13 +16,15 @@ bool arch_is_gh_guest(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
uuid_t uuid;
|
||||
u32 *up;
|
||||
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
|
||||
|
||||
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
|
||||
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
|
||||
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
|
||||
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
|
||||
up = (u32 *)&uuid.b[0];
|
||||
up[0] = lower_32_bits(res.a0);
|
||||
up[1] = lower_32_bits(res.a1);
|
||||
up[2] = lower_32_bits(res.a2);
|
||||
up[3] = lower_32_bits(res.a3);
|
||||
|
||||
return uuid_equal(&uuid, &GUNYAH_UUID);
|
||||
}
|
||||
|
@ -8,16 +8,11 @@
|
||||
#define __ASM_EXCEPTION_H
|
||||
|
||||
#include <asm/esr.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry __kprobes
|
||||
#endif
|
||||
|
||||
static inline unsigned long disr_to_esr(u64 disr)
|
||||
{
|
||||
|
@ -1,3 +0,0 @@
|
||||
per-file crypto/**=file:/crypto/OWNERS
|
||||
per-file mm/**=file:/mm/OWNERS
|
||||
per-file net/**=file:/net/OWNERS
|
@ -178,6 +178,7 @@ CONFIG_NETFILTER_XT_TARGET_SECMARK=y
|
||||
CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
|
||||
CONFIG_NETFILTER_XT_MATCH_BPF=y
|
||||
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
|
||||
CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
|
||||
|
@ -1,2 +0,0 @@
|
||||
bvanassche@google.com
|
||||
jaegeuk@google.com
|
@ -1,6 +1,6 @@
|
||||
. ${ROOT_DIR}/${KERNEL_DIR}/build.config.constants
|
||||
|
||||
KMI_GENERATION=9
|
||||
KMI_GENERATION=11
|
||||
|
||||
LLVM=1
|
||||
DEPMOD=depmod
|
||||
|
@ -1 +0,0 @@
|
||||
ardb@google.com
|
@ -1,6 +0,0 @@
|
||||
per-file base/**=gregkh@google.com,saravanak@google.com
|
||||
per-file block/**=akailash@google.com
|
||||
per-file md/**=akailash@google.com,paullawrence@google.com
|
||||
per-file net/**=file:/net/OWNERS
|
||||
per-file scsi/**=bvanassche@google.com,jaegeuk@google.com
|
||||
per-file {tty,usb}/**=gregkh@google.com
|
@ -96,8 +96,9 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
|
||||
if (gh_error == GH_ERROR_OK) {
|
||||
if (!ready)
|
||||
return 0;
|
||||
} else
|
||||
} else {
|
||||
dev_err(msgq->mbox.dev, "Failed to send data: %d (%d)\n", gh_error, msgq->last_ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* We can send more messages. Mailbox framework requires that tx done
|
||||
@ -165,6 +166,8 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
if (ret)
|
||||
goto err_tx_ghrsc;
|
||||
|
||||
enable_irq_wake(msgq->tx_ghrsc->irq);
|
||||
|
||||
tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
|
||||
}
|
||||
|
||||
@ -175,6 +178,8 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
IRQF_ONESHOT, "gh_msgq_rx", msgq);
|
||||
if (ret)
|
||||
goto err_tx_irq;
|
||||
|
||||
enable_irq_wake(msgq->rx_ghrsc->irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -193,6 +198,8 @@ EXPORT_SYMBOL_GPL(gh_msgq_init);
|
||||
|
||||
void gh_msgq_remove(struct gh_msgq *msgq)
|
||||
{
|
||||
mbox_free_channel(gh_msgq_chan(msgq));
|
||||
|
||||
if (msgq->rx_ghrsc)
|
||||
free_irq(msgq->rx_ghrsc->irq, msgq);
|
||||
|
||||
|
@ -37,6 +37,14 @@ static struct bus_type gadget_bus_type;
|
||||
* @vbus: for udcs who care about vbus status, this value is real vbus status;
|
||||
* for udcs who do not care about vbus status, this value is always true
|
||||
* @started: the UDC's started state. True if the UDC had started.
|
||||
* @allow_connect: Indicates whether UDC is allowed to be pulled up.
|
||||
* Set/cleared by gadget_(un)bind_driver() after gadget driver is bound or
|
||||
* unbound.
|
||||
* @connect_lock: protects udc->started, gadget->connect,
|
||||
* gadget->allow_connect and gadget->deactivate. The routines
|
||||
* usb_gadget_connect_locked(), usb_gadget_disconnect_locked(),
|
||||
* usb_udc_connect_control_locked(), usb_gadget_udc_start_locked() and
|
||||
* usb_gadget_udc_stop_locked() are called with this lock held.
|
||||
*
|
||||
* This represents the internal data structure which is used by the UDC-class
|
||||
* to hold information about udc driver and gadget together.
|
||||
@ -48,6 +56,9 @@ struct usb_udc {
|
||||
struct list_head list;
|
||||
bool vbus;
|
||||
bool started;
|
||||
bool allow_connect;
|
||||
struct work_struct vbus_work;
|
||||
struct mutex connect_lock;
|
||||
};
|
||||
|
||||
static struct class *udc_class;
|
||||
@ -660,17 +671,8 @@ int usb_gadget_vbus_disconnect(struct usb_gadget *gadget)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
|
||||
|
||||
/**
|
||||
* usb_gadget_connect - software-controlled connect to USB host
|
||||
* @gadget:the peripheral being connected
|
||||
*
|
||||
* Enables the D+ (or potentially D-) pullup. The host will start
|
||||
* enumerating this gadget when the pullup is active and a VBUS session
|
||||
* is active (the link is powered).
|
||||
*
|
||||
* Returns zero on success, else negative errno.
|
||||
*/
|
||||
int usb_gadget_connect(struct usb_gadget *gadget)
|
||||
static int usb_gadget_connect_locked(struct usb_gadget *gadget)
|
||||
__must_hold(&gadget->udc->connect_lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -679,10 +681,12 @@ int usb_gadget_connect(struct usb_gadget *gadget)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (gadget->deactivated) {
|
||||
if (gadget->deactivated || !gadget->udc->allow_connect || !gadget->udc->started) {
|
||||
/*
|
||||
* If gadget is deactivated we only save new state.
|
||||
* Gadget will be connected automatically after activation.
|
||||
* If the gadget isn't usable (because it is deactivated,
|
||||
* unbound, or not yet started), we only save the new state.
|
||||
* The gadget will be connected automatically when it is
|
||||
* activated/bound/started.
|
||||
*/
|
||||
gadget->connected = true;
|
||||
goto out;
|
||||
@ -697,22 +701,31 @@ int usb_gadget_connect(struct usb_gadget *gadget)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_connect);
|
||||
|
||||
/**
|
||||
* usb_gadget_disconnect - software-controlled disconnect from USB host
|
||||
* @gadget:the peripheral being disconnected
|
||||
* usb_gadget_connect - software-controlled connect to USB host
|
||||
* @gadget:the peripheral being connected
|
||||
*
|
||||
* Disables the D+ (or potentially D-) pullup, which the host may see
|
||||
* as a disconnect (when a VBUS session is active). Not all systems
|
||||
* support software pullup controls.
|
||||
*
|
||||
* Following a successful disconnect, invoke the ->disconnect() callback
|
||||
* for the current gadget driver so that UDC drivers don't need to.
|
||||
* Enables the D+ (or potentially D-) pullup. The host will start
|
||||
* enumerating this gadget when the pullup is active and a VBUS session
|
||||
* is active (the link is powered).
|
||||
*
|
||||
* Returns zero on success, else negative errno.
|
||||
*/
|
||||
int usb_gadget_disconnect(struct usb_gadget *gadget)
|
||||
int usb_gadget_connect(struct usb_gadget *gadget)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&gadget->udc->connect_lock);
|
||||
ret = usb_gadget_connect_locked(gadget);
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_connect);
|
||||
|
||||
static int usb_gadget_disconnect_locked(struct usb_gadget *gadget)
|
||||
__must_hold(&gadget->udc->connect_lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -724,7 +737,7 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
|
||||
if (!gadget->connected)
|
||||
goto out;
|
||||
|
||||
if (gadget->deactivated) {
|
||||
if (gadget->deactivated || !gadget->udc->started) {
|
||||
/*
|
||||
* If gadget is deactivated we only save new state.
|
||||
* Gadget will stay disconnected after activation.
|
||||
@ -747,6 +760,30 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_gadget_disconnect - software-controlled disconnect from USB host
|
||||
* @gadget:the peripheral being disconnected
|
||||
*
|
||||
* Disables the D+ (or potentially D-) pullup, which the host may see
|
||||
* as a disconnect (when a VBUS session is active). Not all systems
|
||||
* support software pullup controls.
|
||||
*
|
||||
* Following a successful disconnect, invoke the ->disconnect() callback
|
||||
* for the current gadget driver so that UDC drivers don't need to.
|
||||
*
|
||||
* Returns zero on success, else negative errno.
|
||||
*/
|
||||
int usb_gadget_disconnect(struct usb_gadget *gadget)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&gadget->udc->connect_lock);
|
||||
ret = usb_gadget_disconnect_locked(gadget);
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
|
||||
|
||||
/**
|
||||
@ -764,13 +801,14 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&gadget->udc->connect_lock);
|
||||
if (gadget->deactivated)
|
||||
goto out;
|
||||
goto unlock;
|
||||
|
||||
if (gadget->connected) {
|
||||
ret = usb_gadget_disconnect(gadget);
|
||||
ret = usb_gadget_disconnect_locked(gadget);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* If gadget was being connected before deactivation, we want
|
||||
@ -780,7 +818,8 @@ int usb_gadget_deactivate(struct usb_gadget *gadget)
|
||||
}
|
||||
gadget->deactivated = true;
|
||||
|
||||
out:
|
||||
unlock:
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
trace_usb_gadget_deactivate(gadget, ret);
|
||||
|
||||
return ret;
|
||||
@ -800,8 +839,9 @@ int usb_gadget_activate(struct usb_gadget *gadget)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&gadget->udc->connect_lock);
|
||||
if (!gadget->deactivated)
|
||||
goto out;
|
||||
goto unlock;
|
||||
|
||||
gadget->deactivated = false;
|
||||
|
||||
@ -810,9 +850,11 @@ int usb_gadget_activate(struct usb_gadget *gadget)
|
||||
* while it was being deactivated, we call usb_gadget_connect().
|
||||
*/
|
||||
if (gadget->connected)
|
||||
ret = usb_gadget_connect(gadget);
|
||||
ret = usb_gadget_connect_locked(gadget);
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
|
||||
out:
|
||||
unlock:
|
||||
mutex_unlock(&gadget->udc->connect_lock);
|
||||
trace_usb_gadget_activate(gadget, ret);
|
||||
|
||||
return ret;
|
||||
@ -1051,12 +1093,22 @@ EXPORT_SYMBOL_GPL(usb_gadget_set_state);
|
||||
|
||||
/* ------------------------------------------------------------------------- */
|
||||
|
||||
static void usb_udc_connect_control(struct usb_udc *udc)
|
||||
/* Acquire connect_lock before calling this function. */
|
||||
static void usb_udc_connect_control_locked(struct usb_udc *udc) __must_hold(&udc->connect_lock)
|
||||
{
|
||||
if (udc->vbus)
|
||||
usb_gadget_connect(udc->gadget);
|
||||
usb_gadget_connect_locked(udc->gadget);
|
||||
else
|
||||
usb_gadget_disconnect(udc->gadget);
|
||||
usb_gadget_disconnect_locked(udc->gadget);
|
||||
}
|
||||
|
||||
static void vbus_event_work(struct work_struct *work)
|
||||
{
|
||||
struct usb_udc *udc = container_of(work, struct usb_udc, vbus_work);
|
||||
|
||||
mutex_lock(&udc->connect_lock);
|
||||
usb_udc_connect_control_locked(udc);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1067,6 +1119,14 @@ static void usb_udc_connect_control(struct usb_udc *udc)
|
||||
*
|
||||
* The udc driver calls it when it wants to connect or disconnect gadget
|
||||
* according to vbus status.
|
||||
*
|
||||
* This function can be invoked from interrupt context by irq handlers of
|
||||
* the gadget drivers, however, usb_udc_connect_control() has to run in
|
||||
* non-atomic context due to the following:
|
||||
* a. Some of the gadget driver implementations expect the ->pullup
|
||||
* callback to be invoked in non-atomic context.
|
||||
* b. usb_gadget_disconnect() acquires udc_lock which is a mutex.
|
||||
* Hence offload invocation of usb_udc_connect_control() to workqueue.
|
||||
*/
|
||||
void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
|
||||
{
|
||||
@ -1074,7 +1134,7 @@ void usb_udc_vbus_handler(struct usb_gadget *gadget, bool status)
|
||||
|
||||
if (udc) {
|
||||
udc->vbus = status;
|
||||
usb_udc_connect_control(udc);
|
||||
schedule_work(&udc->vbus_work);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_udc_vbus_handler);
|
||||
@ -1097,7 +1157,7 @@ void usb_gadget_udc_reset(struct usb_gadget *gadget,
|
||||
EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
|
||||
|
||||
/**
|
||||
* usb_gadget_udc_start - tells usb device controller to start up
|
||||
* usb_gadget_udc_start_locked - tells usb device controller to start up
|
||||
* @udc: The UDC to be started
|
||||
*
|
||||
* This call is issued by the UDC Class driver when it's about
|
||||
@ -1108,8 +1168,11 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
|
||||
* necessary to have it powered on.
|
||||
*
|
||||
* Returns zero on success, else negative errno.
|
||||
*
|
||||
* Caller should acquire connect_lock before invoking this function.
|
||||
*/
|
||||
static inline int usb_gadget_udc_start(struct usb_udc *udc)
|
||||
static inline int usb_gadget_udc_start_locked(struct usb_udc *udc)
|
||||
__must_hold(&udc->connect_lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -1126,7 +1189,7 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
|
||||
}
|
||||
|
||||
/**
|
||||
* usb_gadget_udc_stop - tells usb device controller we don't need it anymore
|
||||
* usb_gadget_udc_stop_locked - tells usb device controller we don't need it anymore
|
||||
* @udc: The UDC to be stopped
|
||||
*
|
||||
* This call is issued by the UDC Class driver after calling
|
||||
@ -1135,8 +1198,11 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
|
||||
* The details are implementation specific, but it can go as
|
||||
* far as powering off UDC completely and disable its data
|
||||
* line pullups.
|
||||
*
|
||||
* Caller should acquire connect lock before invoking this function.
|
||||
*/
|
||||
static inline void usb_gadget_udc_stop(struct usb_udc *udc)
|
||||
static inline void usb_gadget_udc_stop_locked(struct usb_udc *udc)
|
||||
__must_hold(&udc->connect_lock)
|
||||
{
|
||||
if (!udc->started) {
|
||||
dev_err(&udc->dev, "UDC had already stopped\n");
|
||||
@ -1295,12 +1361,14 @@ int usb_add_gadget(struct usb_gadget *gadget)
|
||||
|
||||
udc->gadget = gadget;
|
||||
gadget->udc = udc;
|
||||
mutex_init(&udc->connect_lock);
|
||||
|
||||
udc->started = false;
|
||||
|
||||
mutex_lock(&udc_lock);
|
||||
list_add_tail(&udc->list, &udc_list);
|
||||
mutex_unlock(&udc_lock);
|
||||
INIT_WORK(&udc->vbus_work, vbus_event_work);
|
||||
|
||||
ret = device_add(&udc->dev);
|
||||
if (ret)
|
||||
@ -1432,6 +1500,7 @@ void usb_del_gadget(struct usb_gadget *gadget)
|
||||
flush_work(&gadget->work);
|
||||
device_del(&gadget->dev);
|
||||
ida_free(&gadget_id_numbers, gadget->id_number);
|
||||
cancel_work_sync(&udc->vbus_work);
|
||||
device_unregister(&udc->dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(usb_del_gadget);
|
||||
@ -1496,11 +1565,16 @@ static int gadget_bind_driver(struct device *dev)
|
||||
if (ret)
|
||||
goto err_bind;
|
||||
|
||||
ret = usb_gadget_udc_start(udc);
|
||||
if (ret)
|
||||
mutex_lock(&udc->connect_lock);
|
||||
ret = usb_gadget_udc_start_locked(udc);
|
||||
if (ret) {
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
goto err_start;
|
||||
}
|
||||
usb_gadget_enable_async_callbacks(udc);
|
||||
usb_udc_connect_control(udc);
|
||||
udc->allow_connect = true;
|
||||
usb_udc_connect_control_locked(udc);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
|
||||
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
|
||||
return 0;
|
||||
@ -1531,12 +1605,16 @@ static void gadget_unbind_driver(struct device *dev)
|
||||
|
||||
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
|
||||
|
||||
usb_gadget_disconnect(gadget);
|
||||
udc->allow_connect = false;
|
||||
cancel_work_sync(&udc->vbus_work);
|
||||
mutex_lock(&udc->connect_lock);
|
||||
usb_gadget_disconnect_locked(gadget);
|
||||
usb_gadget_disable_async_callbacks(udc);
|
||||
if (gadget->irq)
|
||||
synchronize_irq(gadget->irq);
|
||||
udc->driver->unbind(gadget);
|
||||
usb_gadget_udc_stop(udc);
|
||||
usb_gadget_udc_stop_locked(udc);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
|
||||
mutex_lock(&udc_lock);
|
||||
driver->is_bound = false;
|
||||
@ -1622,11 +1700,15 @@ static ssize_t soft_connect_store(struct device *dev,
|
||||
}
|
||||
|
||||
if (sysfs_streq(buf, "connect")) {
|
||||
usb_gadget_udc_start(udc);
|
||||
usb_gadget_connect(udc->gadget);
|
||||
mutex_lock(&udc->connect_lock);
|
||||
usb_gadget_udc_start_locked(udc);
|
||||
usb_gadget_connect_locked(udc->gadget);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
} else if (sysfs_streq(buf, "disconnect")) {
|
||||
usb_gadget_disconnect(udc->gadget);
|
||||
usb_gadget_udc_stop(udc);
|
||||
mutex_lock(&udc->connect_lock);
|
||||
usb_gadget_disconnect_locked(udc->gadget);
|
||||
usb_gadget_udc_stop_locked(udc);
|
||||
mutex_unlock(&udc->connect_lock);
|
||||
} else {
|
||||
dev_err(dev, "unsupported command '%s'\n", buf);
|
||||
ret = -EINVAL;
|
||||
|
@ -535,8 +535,13 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
|
||||
cmd->status == COMP_COMMAND_RING_STOPPED) {
|
||||
xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n");
|
||||
ret = -ETIME;
|
||||
goto cmd_cleanup;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
|
||||
if (ret)
|
||||
xhci_warn(xhci, "Sync device context failed, ret=%d\n", ret);
|
||||
|
||||
cmd_cleanup:
|
||||
xhci_free_command(xhci, cmd);
|
||||
return ret;
|
||||
@ -1824,6 +1829,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_bus_suspend);
|
||||
|
||||
/*
|
||||
* Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3.
|
||||
@ -1968,6 +1974,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_bus_resume);
|
||||
|
||||
unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd)
|
||||
{
|
||||
|
@ -65,7 +65,7 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
|
||||
return seg;
|
||||
}
|
||||
|
||||
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
{
|
||||
if (seg->trbs) {
|
||||
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
|
||||
@ -74,8 +74,9 @@ static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
|
||||
kfree(seg->bounce_buf);
|
||||
kfree(seg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_segment_free);
|
||||
|
||||
static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
|
||||
void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_segment *first)
|
||||
{
|
||||
struct xhci_segment *seg;
|
||||
@ -96,9 +97,9 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
|
||||
* DMA address of the next segment. The caller needs to set any Link TRB
|
||||
* related flags, such as End TRB, Toggle Cycle, and no snoop.
|
||||
*/
|
||||
static void xhci_link_segments(struct xhci_segment *prev,
|
||||
struct xhci_segment *next,
|
||||
enum xhci_ring_type type, bool chain_links)
|
||||
void xhci_link_segments(struct xhci_segment *prev,
|
||||
struct xhci_segment *next,
|
||||
enum xhci_ring_type type, bool chain_links)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
@ -118,6 +119,7 @@ static void xhci_link_segments(struct xhci_segment *prev,
|
||||
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_link_segments);
|
||||
|
||||
/*
|
||||
* Link the ring to the new segments.
|
||||
@ -256,7 +258,7 @@ static int xhci_update_stream_segment_mapping(
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xhci_remove_stream_mapping(struct xhci_ring *ring)
|
||||
void xhci_remove_stream_mapping(struct xhci_ring *ring)
|
||||
{
|
||||
struct xhci_segment *seg;
|
||||
|
||||
@ -269,6 +271,7 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring)
|
||||
seg = seg->next;
|
||||
} while (seg != ring->first_seg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_remove_stream_mapping);
|
||||
|
||||
static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
|
||||
{
|
||||
@ -317,6 +320,7 @@ void xhci_initialize_ring_info(struct xhci_ring *ring,
|
||||
*/
|
||||
ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_initialize_ring_info);
|
||||
|
||||
/* Allocate segments and link them for a ring */
|
||||
static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
|
||||
@ -362,6 +366,54 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_vendor_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->free_container_ctx)
|
||||
ops->free_container_ctx(xhci, ctx);
|
||||
}
|
||||
|
||||
static void xhci_vendor_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
|
||||
int type, gfp_t flags)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->alloc_container_ctx)
|
||||
ops->alloc_container_ctx(xhci, ctx, type, flags);
|
||||
}
|
||||
|
||||
static struct xhci_ring *xhci_vendor_alloc_transfer_ring(struct xhci_hcd *xhci,
|
||||
u32 endpoint_type, enum xhci_ring_type ring_type,
|
||||
unsigned int max_packet, gfp_t mem_flags)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->alloc_transfer_ring)
|
||||
return ops->alloc_transfer_ring(xhci, endpoint_type, ring_type,
|
||||
max_packet, mem_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->free_transfer_ring)
|
||||
ops->free_transfer_ring(xhci, virt_dev, ep_index);
|
||||
}
|
||||
|
||||
bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->is_usb_offload_enabled)
|
||||
return ops->is_usb_offload_enabled(xhci, virt_dev, ep_index);
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new ring with zero or more segments.
|
||||
*
|
||||
@ -414,7 +466,11 @@ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev,
|
||||
unsigned int ep_index)
|
||||
{
|
||||
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index))
|
||||
xhci_vendor_free_transfer_ring(xhci, virt_dev, ep_index);
|
||||
else
|
||||
xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
|
||||
|
||||
virt_dev->eps[ep_index].ring = NULL;
|
||||
}
|
||||
|
||||
@ -472,6 +528,7 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
{
|
||||
struct xhci_container_ctx *ctx;
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if ((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT))
|
||||
return NULL;
|
||||
@ -485,7 +542,12 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
if (type == XHCI_CTX_TYPE_INPUT)
|
||||
ctx->size += CTX_SIZE(xhci->hcc_params);
|
||||
|
||||
ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
|
||||
(ops && ops->alloc_container_ctx))
|
||||
xhci_vendor_alloc_container_ctx(xhci, ctx, type, flags);
|
||||
else
|
||||
ctx->bytes = dma_pool_zalloc(xhci->device_pool, flags, &ctx->dma);
|
||||
|
||||
if (!ctx->bytes) {
|
||||
kfree(ctx);
|
||||
return NULL;
|
||||
@ -496,9 +558,16 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
|
||||
void xhci_free_container_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *ctx)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0) &&
|
||||
(ops && ops->free_container_ctx))
|
||||
xhci_vendor_free_container_ctx(xhci, ctx);
|
||||
else
|
||||
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
|
||||
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
@ -520,6 +589,7 @@ struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci,
|
||||
return (struct xhci_slot_ctx *)
|
||||
(ctx->bytes + CTX_SIZE(xhci->hcc_params));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_get_slot_ctx);
|
||||
|
||||
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
|
||||
struct xhci_container_ctx *ctx,
|
||||
@ -887,7 +957,7 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
|
||||
|
||||
for (i = 0; i < 31; i++) {
|
||||
if (dev->eps[i].ring)
|
||||
xhci_ring_free(xhci, dev->eps[i].ring);
|
||||
xhci_free_endpoint_ring(xhci, dev, i);
|
||||
if (dev->eps[i].stream_info)
|
||||
xhci_free_stream_info(xhci,
|
||||
dev->eps[i].stream_info);
|
||||
@ -1489,8 +1559,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
|
||||
mult = 0;
|
||||
|
||||
/* Set up the endpoint ring */
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, ep_index) &&
|
||||
usb_endpoint_xfer_isoc(&ep->desc)) {
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_vendor_alloc_transfer_ring(xhci, endpoint_type, ring_type,
|
||||
max_packet, mem_flags);
|
||||
} else {
|
||||
virt_dev->eps[ep_index].new_ring =
|
||||
xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
|
||||
}
|
||||
|
||||
if (!virt_dev->eps[ep_index].new_ring)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1838,6 +1916,24 @@ void xhci_free_erst(struct xhci_hcd *xhci, struct xhci_erst *erst)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_free_erst);
|
||||
|
||||
static struct xhci_device_context_array *xhci_vendor_alloc_dcbaa(
|
||||
struct xhci_hcd *xhci, gfp_t flags)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->alloc_dcbaa)
|
||||
return ops->alloc_dcbaa(xhci, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_vendor_free_dcbaa(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->free_dcbaa)
|
||||
ops->free_dcbaa(xhci);
|
||||
}
|
||||
|
||||
void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
|
||||
@ -1889,9 +1985,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"Freed medium stream array pool");
|
||||
|
||||
if (xhci->dcbaa)
|
||||
dma_free_coherent(dev, sizeof(*xhci->dcbaa),
|
||||
xhci->dcbaa, xhci->dcbaa->dma);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
|
||||
xhci_vendor_free_dcbaa(xhci);
|
||||
} else {
|
||||
if (xhci->dcbaa)
|
||||
dma_free_coherent(dev, sizeof(*xhci->dcbaa),
|
||||
xhci->dcbaa, xhci->dcbaa->dma);
|
||||
}
|
||||
xhci->dcbaa = NULL;
|
||||
|
||||
scratchpad_free(xhci);
|
||||
@ -1972,7 +2072,7 @@ static int xhci_test_trb_in_td(struct xhci_hcd *xhci,
|
||||
}
|
||||
|
||||
/* TRB math checks for xhci_trb_in_td(), using the command and event rings. */
|
||||
static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
|
||||
int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct {
|
||||
dma_addr_t input_dma;
|
||||
@ -2092,6 +2192,7 @@ static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci)
|
||||
xhci_dbg(xhci, "TRB math tests passed.\n");
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_check_trb_in_td_math);
|
||||
|
||||
static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
|
||||
{
|
||||
@ -2428,15 +2529,21 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
|
||||
* xHCI section 5.4.6 - Device Context array must be
|
||||
* "physically contiguous and 64-byte (cache line) aligned".
|
||||
*/
|
||||
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
|
||||
flags);
|
||||
if (!xhci->dcbaa)
|
||||
goto fail;
|
||||
xhci->dcbaa->dma = dma;
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, NULL, 0)) {
|
||||
xhci->dcbaa = xhci_vendor_alloc_dcbaa(xhci, flags);
|
||||
if (!xhci->dcbaa)
|
||||
goto fail;
|
||||
} else {
|
||||
xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma,
|
||||
flags);
|
||||
if (!xhci->dcbaa)
|
||||
goto fail;
|
||||
xhci->dcbaa->dma = dma;
|
||||
}
|
||||
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
|
||||
"// Device context base array address = 0x%llx (DMA), %p (virt)",
|
||||
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
|
||||
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
|
||||
xhci_write_64(xhci, xhci->dcbaa->dma, &xhci->op_regs->dcbaa_ptr);
|
||||
|
||||
/*
|
||||
* Initialize the ring segment pool. The ring must be a contiguous
|
||||
|
@ -173,6 +173,43 @@ static const struct of_device_id usb_xhci_of_match[] = {
|
||||
MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
|
||||
#endif
|
||||
|
||||
static struct xhci_plat_priv_overwrite xhci_plat_vendor_overwrite;
|
||||
|
||||
int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops)
|
||||
{
|
||||
if (vendor_ops == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
xhci_plat_vendor_overwrite.vendor_ops = vendor_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops);
|
||||
|
||||
static int xhci_vendor_init(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
struct xhci_plat_priv *priv = xhci_to_priv(xhci);
|
||||
|
||||
if (xhci_plat_vendor_overwrite.vendor_ops)
|
||||
ops = priv->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops;
|
||||
|
||||
if (ops && ops->vendor_init)
|
||||
return ops->vendor_init(xhci);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xhci_vendor_cleanup(struct xhci_hcd *xhci)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
struct xhci_plat_priv *priv = xhci_to_priv(xhci);
|
||||
|
||||
if (ops && ops->vendor_cleanup)
|
||||
ops->vendor_cleanup(xhci);
|
||||
|
||||
priv->vendor_ops = NULL;
|
||||
}
|
||||
|
||||
static int xhci_plat_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct xhci_plat_priv *priv_match;
|
||||
@ -317,6 +354,10 @@ static int xhci_plat_probe(struct platform_device *pdev)
|
||||
goto disable_clk;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_init(xhci);
|
||||
if (ret)
|
||||
goto disable_usb_phy;
|
||||
|
||||
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
|
||||
|
||||
if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
|
||||
@ -410,6 +451,9 @@ static int xhci_plat_remove(struct platform_device *dev)
|
||||
if (shared_hcd)
|
||||
usb_put_hcd(shared_hcd);
|
||||
|
||||
xhci_vendor_cleanup(xhci);
|
||||
|
||||
usb_put_hcd(shared_hcd);
|
||||
clk_disable_unprepare(clk);
|
||||
clk_disable_unprepare(reg_clk);
|
||||
usb_put_hcd(hcd);
|
||||
|
@ -13,6 +13,9 @@
|
||||
struct xhci_plat_priv {
|
||||
const char *firmware_name;
|
||||
unsigned long long quirks;
|
||||
struct xhci_vendor_ops *vendor_ops;
|
||||
struct xhci_vendor_data *vendor_data;
|
||||
int (*plat_setup)(struct usb_hcd *);
|
||||
void (*plat_start)(struct usb_hcd *);
|
||||
int (*init_quirk)(struct usb_hcd *);
|
||||
int (*suspend_quirk)(struct usb_hcd *);
|
||||
@ -21,4 +24,11 @@ struct xhci_plat_priv {
|
||||
|
||||
#define hcd_to_xhci_priv(h) ((struct xhci_plat_priv *)hcd_to_xhci(h)->priv)
|
||||
#define xhci_to_priv(x) ((struct xhci_plat_priv *)(x)->priv)
|
||||
|
||||
struct xhci_plat_priv_overwrite {
|
||||
struct xhci_vendor_ops *vendor_ops;
|
||||
};
|
||||
|
||||
int xhci_plat_register_vendor_ops(struct xhci_vendor_ops *vendor_ops);
|
||||
|
||||
#endif /* _XHCI_PLAT_H */
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "xhci-trace.h"
|
||||
#include "xhci-debugfs.h"
|
||||
#include "xhci-dbgcap.h"
|
||||
#include "xhci-plat.h"
|
||||
|
||||
#define DRIVER_AUTHOR "Sarah Sharp"
|
||||
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
|
||||
@ -1675,6 +1676,11 @@ static int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flag
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (xhci_vendor_usb_offload_skip_urb(xhci, urb)) {
|
||||
xhci_dbg(xhci, "skip urb for usb offload\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (usb_endpoint_xfer_isoc(&urb->ep->desc))
|
||||
num_tds = urb->number_of_packets;
|
||||
else if (usb_endpoint_is_bulk_out(&urb->ep->desc) &&
|
||||
@ -3015,6 +3021,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
||||
xhci_finish_resource_reservation(xhci, ctrl_ctx);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
}
|
||||
if (ret)
|
||||
goto failed;
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (ret)
|
||||
xhci_warn(xhci, "sync device context failed, ret=%d", ret);
|
||||
|
||||
failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -3158,7 +3172,11 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
for (i = 0; i < 31; i++) {
|
||||
if (virt_dev->eps[i].new_ring) {
|
||||
xhci_debugfs_remove_endpoint(xhci, virt_dev, i);
|
||||
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
|
||||
if (xhci_vendor_is_usb_offload_enabled(xhci, virt_dev, i))
|
||||
xhci_vendor_free_transfer_ring(xhci, virt_dev, i);
|
||||
else
|
||||
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
|
||||
|
||||
virt_dev->eps[i].new_ring = NULL;
|
||||
}
|
||||
}
|
||||
@ -3319,6 +3337,13 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
|
||||
wait_for_completion(stop_cmd->completion);
|
||||
|
||||
err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (err) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, err);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
|
||||
/* config ep command clears toggle if add and drop ep flags are set */
|
||||
@ -3350,6 +3375,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
|
||||
|
||||
wait_for_completion(cfg_cmd->completion);
|
||||
|
||||
err = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (err)
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, err);
|
||||
|
||||
xhci_free_command(xhci, cfg_cmd);
|
||||
cleanup:
|
||||
xhci_free_command(xhci, stop_cmd);
|
||||
@ -3895,6 +3925,13 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd,
|
||||
/* Wait for the Reset Device command to finish */
|
||||
wait_for_completion(reset_device_cmd->completion);
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
goto command_cleanup;
|
||||
}
|
||||
|
||||
/* The Reset Device command can't fail, according to the 0.95/0.96 spec,
|
||||
* unless we tried to reset a slot ID that wasn't enabled,
|
||||
* or the device wasn't in the addressed or configured state.
|
||||
@ -4144,6 +4181,14 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
|
||||
goto disable_slot;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
goto disable_slot;
|
||||
}
|
||||
|
||||
vdev = xhci->devs[slot_id];
|
||||
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
|
||||
trace_xhci_alloc_dev(slot_ctx);
|
||||
@ -4274,6 +4319,13 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
/* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
|
||||
wait_for_completion(command->completion);
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* FIXME: From section 4.3.4: "Software shall be responsible for timing
|
||||
* the SetAddress() "recovery interval" required by USB and aborting the
|
||||
* command on a timeout.
|
||||
@ -4358,10 +4410,11 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
{
|
||||
return xhci_setup_device(hcd, udev, SETUP_CONTEXT_ADDRESS);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_address_device);
|
||||
|
||||
static int xhci_enable_device(struct usb_hcd *hcd, struct usb_device *udev)
|
||||
{
|
||||
@ -4426,6 +4479,14 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, udev->slot_id);
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
xhci_slot_copy(xhci, command->in_ctx, virt_dev->out_ctx);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
|
||||
@ -4453,6 +4514,30 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci)
|
||||
{
|
||||
return xhci_to_priv(xhci)->vendor_ops;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_vendor_get_ops);
|
||||
|
||||
int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->sync_dev_ctx)
|
||||
return ops->sync_dev_ctx(xhci, slot_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb)
|
||||
{
|
||||
struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci);
|
||||
|
||||
if (ops && ops->usb_offload_skip_urb)
|
||||
return ops->usb_offload_skip_urb(xhci, urb);
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
/* BESL to HIRD Encoding array for USB2 LPM */
|
||||
@ -5182,6 +5267,15 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = xhci_vendor_sync_dev_ctx(xhci, hdev->slot_id);
|
||||
if (ret) {
|
||||
xhci_warn(xhci, "%s: Failed to sync device context failed, err=%d",
|
||||
__func__, ret);
|
||||
xhci_free_command(xhci, config_cmd);
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
|
||||
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
|
||||
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
|
||||
@ -5525,6 +5619,12 @@ void xhci_init_driver(struct hc_driver *drv,
|
||||
drv->reset_bandwidth = over->reset_bandwidth;
|
||||
if (over->update_hub_device)
|
||||
drv->update_hub_device = over->update_hub_device;
|
||||
if (over->address_device)
|
||||
drv->address_device = over->address_device;
|
||||
if (over->bus_suspend)
|
||||
drv->bus_suspend = over->bus_suspend;
|
||||
if (over->bus_resume)
|
||||
drv->bus_resume = over->bus_resume;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xhci_init_driver);
|
||||
|
@ -1963,6 +1963,14 @@ struct xhci_driver_overrides {
|
||||
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
|
||||
int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
|
||||
struct usb_tt *tt, gfp_t mem_flags);
|
||||
int (*address_device)(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
int (*bus_suspend)(struct usb_hcd *hcd);
|
||||
int (*bus_resume)(struct usb_hcd *hcd);
|
||||
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
|
||||
#define XHCI_CFC_DELAY 10
|
||||
@ -2083,6 +2091,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
|
||||
struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
|
||||
unsigned int num_segs, unsigned int cycle_state,
|
||||
enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
|
||||
void xhci_remove_stream_mapping(struct xhci_ring *ring);
|
||||
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
|
||||
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
||||
unsigned int num_trbs, gfp_t flags);
|
||||
@ -2144,6 +2153,7 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
|
||||
struct usb_tt *tt, gfp_t mem_flags);
|
||||
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
|
||||
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
|
||||
int xhci_ext_cap_init(struct xhci_hcd *xhci);
|
||||
|
||||
@ -2251,6 +2261,52 @@ static inline struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
|
||||
urb->stream_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct xhci_vendor_ops - function callbacks for vendor specific operations
|
||||
* @vendor_init: called for vendor init process
|
||||
* @vendor_cleanup: called for vendor cleanup process
|
||||
* @is_usb_offload_enabled: called to check if usb offload enabled
|
||||
* @alloc_dcbaa: called when allocating vendor specific dcbaa
|
||||
* @free_dcbaa: called to free vendor specific dcbaa
|
||||
* @alloc_transfer_ring: called when remote transfer ring allocation is required
|
||||
* @free_transfer_ring: called to free vendor specific transfer ring
|
||||
* @sync_dev_ctx: called when synchronization for device context is required
|
||||
* @usb_offload_skip_urb: skip urb control for offloading
|
||||
* @alloc_container_ctx: called when allocating vendor specific container context
|
||||
* @free_container_ctx: called to free vendor specific container context
|
||||
*/
|
||||
struct xhci_vendor_ops {
|
||||
int (*vendor_init)(struct xhci_hcd *xhci);
|
||||
void (*vendor_cleanup)(struct xhci_hcd *xhci);
|
||||
bool (*is_usb_offload_enabled)(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *vdev,
|
||||
unsigned int ep_index);
|
||||
|
||||
struct xhci_device_context_array *(*alloc_dcbaa)(struct xhci_hcd *xhci,
|
||||
gfp_t flags);
|
||||
void (*free_dcbaa)(struct xhci_hcd *xhci);
|
||||
|
||||
struct xhci_ring *(*alloc_transfer_ring)(struct xhci_hcd *xhci,
|
||||
u32 endpoint_type, enum xhci_ring_type ring_type,
|
||||
unsigned int max_packet, gfp_t mem_flags);
|
||||
void (*free_transfer_ring)(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index);
|
||||
int (*sync_dev_ctx)(struct xhci_hcd *xhci, unsigned int slot_id);
|
||||
bool (*usb_offload_skip_urb)(struct xhci_hcd *xhci, struct urb *urb);
|
||||
void (*alloc_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
|
||||
int type, gfp_t flags);
|
||||
void (*free_container_ctx)(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
|
||||
};
|
||||
|
||||
struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci);
|
||||
|
||||
int xhci_vendor_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id);
|
||||
bool xhci_vendor_usb_offload_skip_urb(struct xhci_hcd *xhci, struct urb *urb);
|
||||
void xhci_vendor_free_transfer_ring(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index);
|
||||
bool xhci_vendor_is_usb_offload_enabled(struct xhci_hcd *xhci,
|
||||
struct xhci_virt_device *virt_dev, unsigned int ep_index);
|
||||
|
||||
/*
|
||||
* TODO: As per spec Isochronous IDT transmissions are supported. We bypass
|
||||
* them anyways as we where unable to find a device that matches the
|
||||
|
@ -9,7 +9,7 @@
|
||||
|
||||
#include "rsc_mgr.h"
|
||||
|
||||
static struct gh_rm_platform_ops *rm_platform_ops;
|
||||
static const struct gh_rm_platform_ops *rm_platform_ops;
|
||||
static DECLARE_RWSEM(rm_platform_ops_lock);
|
||||
|
||||
int gh_rm_platform_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel)
|
||||
@ -36,7 +36,7 @@ int gh_rm_platform_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *m
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_rm_platform_post_mem_reclaim);
|
||||
|
||||
int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -50,7 +50,7 @@ int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_rm_register_platform_ops);
|
||||
|
||||
void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops)
|
||||
{
|
||||
down_write(&rm_platform_ops_lock);
|
||||
if (rm_platform_ops == platform_ops)
|
||||
@ -61,10 +61,10 @@ EXPORT_SYMBOL_GPL(gh_rm_unregister_platform_ops);
|
||||
|
||||
static void _devm_gh_rm_unregister_platform_ops(void *data)
|
||||
{
|
||||
gh_rm_unregister_platform_ops(data);
|
||||
gh_rm_unregister_platform_ops((const struct gh_rm_platform_ops *)data);
|
||||
}
|
||||
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, struct gh_rm_platform_ops *ops)
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, const struct gh_rm_platform_ops *ops)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -72,7 +72,7 @@ int devm_gh_rm_register_platform_ops(struct device *dev, struct gh_rm_platform_o
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return devm_add_action(dev, _devm_gh_rm_unregister_platform_ops, ops);
|
||||
return devm_add_action(dev, _devm_gh_rm_unregister_platform_ops, (void *)ops);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_gh_rm_register_platform_ops);
|
||||
|
||||
|
@ -38,36 +38,40 @@ static int qcom_scm_gh_rm_pre_mem_share(void *rm, struct gh_rm_mem_parcel *mem_p
|
||||
new_perms[n].perm |= QCOM_SCM_PERM_READ;
|
||||
}
|
||||
|
||||
src = (1ull << QCOM_SCM_VMID_HLOS);
|
||||
src = BIT_ULL(QCOM_SCM_VMID_HLOS);
|
||||
|
||||
for (i = 0; i < mem_parcel->n_mem_entries; i++) {
|
||||
src_cpy = src;
|
||||
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, mem_parcel->n_acl_entries);
|
||||
if (ret) {
|
||||
src = 0;
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
src |= (1ull << vmid);
|
||||
else
|
||||
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
|
||||
}
|
||||
|
||||
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
src_cpy = src;
|
||||
WARN_ON_ONCE(qcom_scm_assign_mem(
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, 1));
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
src = 0;
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
src |= BIT_ULL(vmid);
|
||||
else
|
||||
src |= BIT_ULL(QCOM_SCM_RM_MANAGED_VMID);
|
||||
}
|
||||
|
||||
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
src_cpy = src;
|
||||
WARN_ON_ONCE(qcom_scm_assign_mem(
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].phys_addr),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, 1));
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(new_perms);
|
||||
return ret;
|
||||
}
|
||||
@ -117,13 +121,15 @@ static bool gh_has_qcom_extensions(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
uuid_t uuid;
|
||||
u32 *up;
|
||||
|
||||
arm_smccc_1_1_smc(GH_QCOM_EXT_CALL_UUID_ID, &res);
|
||||
|
||||
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
|
||||
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
|
||||
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
|
||||
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
|
||||
up = (u32 *)&uuid.b[0];
|
||||
up[0] = lower_32_bits(res.a0);
|
||||
up[1] = lower_32_bits(res.a1);
|
||||
up[2] = lower_32_bits(res.a2);
|
||||
up[3] = lower_32_bits(res.a3);
|
||||
|
||||
return uuid_equal(&uuid, &QCOM_EXT_UUID);
|
||||
}
|
||||
|
@ -335,6 +335,8 @@ static bool gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_res
|
||||
if (ret)
|
||||
pr_warn("Failed to request vcpu irq %d: %d", vcpu->rsc->irq, ret);
|
||||
|
||||
enable_irq_wake(vcpu->rsc->irq);
|
||||
|
||||
out:
|
||||
mutex_unlock(&vcpu->run_lock);
|
||||
return !ret;
|
||||
|
@ -123,7 +123,7 @@ struct gh_rm_connection {
|
||||
|
||||
/**
|
||||
* struct gh_rm - private data for communicating w/Gunyah resource manager
|
||||
* @dev: pointer to device
|
||||
* @dev: pointer to RM platform device
|
||||
* @tx_ghrsc: message queue resource to TX to RM
|
||||
* @rx_ghrsc: message queue resource to RX from RM
|
||||
* @msgq: mailbox instance of TX/RX resources above
|
||||
@ -160,10 +160,10 @@ struct gh_rm {
|
||||
};
|
||||
|
||||
/**
|
||||
* gh_rm_remap_error() - Remap Gunyah resource manager errors into a Linux error code
|
||||
* gh_rm_error_remap() - Remap Gunyah resource manager errors into a Linux error code
|
||||
* @rm_error: "Standard" return value from Gunyah resource manager
|
||||
*/
|
||||
static inline int gh_rm_remap_error(enum gh_rm_error rm_error)
|
||||
static inline int gh_rm_error_remap(enum gh_rm_error rm_error)
|
||||
{
|
||||
switch (rm_error) {
|
||||
case GH_RM_ERROR_OK:
|
||||
@ -226,7 +226,7 @@ static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsig
|
||||
void *arg)
|
||||
{
|
||||
struct gh_irq_chip_data *chip_data, *spec = arg;
|
||||
struct irq_fwspec parent_fwspec;
|
||||
struct irq_fwspec parent_fwspec = {};
|
||||
struct gh_rm *rm = d->host_data;
|
||||
u32 gh_virq = spec->gh_virq;
|
||||
int ret;
|
||||
@ -309,7 +309,9 @@ struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_reso
|
||||
if (ret < 0) {
|
||||
dev_err(rm->dev,
|
||||
"Failed to allocate interrupt for resource %d label: %d: %d\n",
|
||||
ghrsc->type, ghrsc->rm_label, ghrsc->irq);
|
||||
ghrsc->type, ghrsc->rm_label, ret);
|
||||
kfree(ghrsc);
|
||||
return NULL;
|
||||
} else {
|
||||
ghrsc->irq = ret;
|
||||
}
|
||||
@ -417,7 +419,7 @@ static void gh_rm_process_notif(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
rm->active_rx_connection = connection;
|
||||
}
|
||||
|
||||
static void gh_rm_process_rply(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
static void gh_rm_process_reply(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
{
|
||||
struct gh_rm_rpc_reply_hdr *reply_hdr = msg;
|
||||
struct gh_rm_connection *connection;
|
||||
@ -514,7 +516,7 @@ static void gh_rm_msgq_rx_data(struct mbox_client *cl, void *mssg)
|
||||
gh_rm_process_notif(rm, msg, msg_size);
|
||||
break;
|
||||
case RM_RPC_TYPE_REPLY:
|
||||
gh_rm_process_rply(rm, msg, msg_size);
|
||||
gh_rm_process_reply(rm, msg, msg_size);
|
||||
break;
|
||||
case RM_RPC_TYPE_CONTINUATION:
|
||||
gh_rm_process_cont(rm, rm->active_rx_connection, msg, msg_size);
|
||||
@ -665,10 +667,10 @@ int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_si
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
/* Wait for response */
|
||||
ret = wait_for_completion_interruptible(&connection->reply.seq_done);
|
||||
if (ret)
|
||||
goto out;
|
||||
/* Wait for response. Uninterruptible because rollback based on what RM did to VM
|
||||
* requires us to know how RM handled the call.
|
||||
*/
|
||||
wait_for_completion(&connection->reply.seq_done);
|
||||
|
||||
/* Check for internal (kernel) error waiting for the response */
|
||||
if (connection->reply.ret) {
|
||||
@ -682,8 +684,7 @@ int gh_rm_call(void *_rm, u32 message_id, const void *req_buf, size_t req_buf_si
|
||||
if (connection->reply.rm_error != GH_RM_ERROR_OK) {
|
||||
dev_warn(rm->dev, "RM rejected message %08x. Error: %d\n", message_id,
|
||||
connection->reply.rm_error);
|
||||
dump_stack();
|
||||
ret = gh_rm_remap_error(connection->reply.rm_error);
|
||||
ret = gh_rm_error_remap(connection->reply.rm_error);
|
||||
kfree(connection->payload);
|
||||
goto out;
|
||||
}
|
||||
@ -913,7 +914,6 @@ static int gh_rm_drv_probe(struct platform_device *pdev)
|
||||
err_irq_domain:
|
||||
irq_domain_remove(rm->irq_domain);
|
||||
err_msgq:
|
||||
mbox_free_channel(gh_msgq_chan(&rm->msgq));
|
||||
gh_msgq_remove(&rm->msgq);
|
||||
err_cache:
|
||||
kmem_cache_destroy(rm->cache);
|
||||
@ -928,7 +928,6 @@ static int gh_rm_drv_remove(struct platform_device *pdev)
|
||||
auxiliary_device_uninit(&rm->adev);
|
||||
misc_deregister(&rm->miscdev);
|
||||
irq_domain_remove(rm->irq_domain);
|
||||
mbox_free_channel(gh_msgq_chan(&rm->msgq));
|
||||
gh_msgq_remove(&rm->msgq);
|
||||
kmem_cache_destroy(rm->cache);
|
||||
|
||||
|
@ -139,7 +139,7 @@ static int _gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle, bool end_append,
|
||||
return -ENOMEM;
|
||||
|
||||
req_header = msg;
|
||||
mem_section = (void *)req_header + sizeof(struct gh_rm_mem_append_req_header);
|
||||
mem_section = (void *)(req_header + 1);
|
||||
|
||||
req_header->mem_handle = cpu_to_le32(mem_handle);
|
||||
if (end_append)
|
||||
|
@ -31,13 +31,10 @@ static void gh_vm_put_function(struct gh_vm_function *fn)
|
||||
static struct gh_vm_function *gh_vm_get_function(u32 type)
|
||||
{
|
||||
struct gh_vm_function *fn;
|
||||
int r;
|
||||
|
||||
fn = xa_load(&gh_vm_functions, type);
|
||||
if (!fn) {
|
||||
r = request_module("ghfunc:%d", type);
|
||||
if (r)
|
||||
return ERR_PTR(r > 0 ? -r : r);
|
||||
request_module("ghfunc:%d", type);
|
||||
|
||||
fn = xa_load(&gh_vm_functions, type);
|
||||
}
|
||||
@ -617,7 +614,7 @@ static int gh_vm_ensure_started(struct gh_vm *ghvm)
|
||||
if (ret)
|
||||
return ret;
|
||||
/** gh_vm_start() is guaranteed to bring status out of
|
||||
* GH_RM_VM_STATUS_LOAD, thus inifitely recursive call is not
|
||||
* GH_RM_VM_STATUS_LOAD, thus infinitely recursive call is not
|
||||
* possible
|
||||
*/
|
||||
return gh_vm_ensure_started(ghvm);
|
||||
@ -668,10 +665,6 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (overflows_type(dtb_config.guest_phys_addr + dtb_config.size, u64))
|
||||
return -EOVERFLOW;
|
||||
|
||||
/* Gunyah requires that dtb_config is page aligned */
|
||||
if (!PAGE_ALIGNED(dtb_config.guest_phys_addr) || !PAGE_ALIGNED(dtb_config.size))
|
||||
return -EINVAL;
|
||||
|
||||
ghvm->dtb_config = dtb_config;
|
||||
|
||||
r = 0;
|
||||
|
@ -14,9 +14,7 @@
|
||||
|
||||
static bool pages_are_mergeable(struct page *a, struct page *b)
|
||||
{
|
||||
if (page_to_pfn(a) + 1 != page_to_pfn(b))
|
||||
return false;
|
||||
return true;
|
||||
return page_to_pfn(a) + 1 == page_to_pfn(b);
|
||||
}
|
||||
|
||||
static bool gh_vm_mem_overlap(struct gh_vm_mem *a, u64 addr, u64 size)
|
||||
|
@ -1 +0,0 @@
|
||||
jaegeuk@google.com
|
@ -1 +0,0 @@
|
||||
balsini@google.com
|
@ -1,2 +0,0 @@
|
||||
akailash@google.com
|
||||
paullawrence@google.com
|
@ -1 +0,0 @@
|
||||
per-file net/**=file:/net/OWNERS
|
@ -1,4 +0,0 @@
|
||||
per-file bio.h=file:/block/OWNERS
|
||||
per-file blk*.h=file:/block/OWNERS
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
||||
per-file net**=file:/net/OWNERS
|
@ -167,15 +167,15 @@ struct gh_rm_platform_ops {
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_GUNYAH_PLATFORM_HOOKS)
|
||||
int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops);
|
||||
void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops *platform_ops);
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, struct gh_rm_platform_ops *ops);
|
||||
int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops);
|
||||
void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops);
|
||||
int devm_gh_rm_register_platform_ops(struct device *dev, const struct gh_rm_platform_ops *ops);
|
||||
#else
|
||||
static inline int gh_rm_register_platform_ops(struct gh_rm_platform_ops *platform_ops)
|
||||
static inline int gh_rm_register_platform_ops(const struct gh_rm_platform_ops *platform_ops)
|
||||
{ return 0; }
|
||||
static inline void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops *platform_ops) { }
|
||||
static inline void gh_rm_unregister_platform_ops(const struct gh_rm_platform_ops *platform_ops) { }
|
||||
static inline int devm_gh_rm_register_platform_ops(struct device *dev,
|
||||
struct gh_rm_platform_ops *ops) { return 0; }
|
||||
const struct gh_rm_platform_ops *ops) { return 0; }
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -21,6 +21,16 @@ int __must_check gh_vm_get(struct gh_vm *ghvm);
|
||||
void gh_vm_put(struct gh_vm *ghvm);
|
||||
|
||||
struct gh_vm_function_instance;
|
||||
/**
|
||||
* struct gh_vm_function - Represents a function type
|
||||
* @type: value from &enum gh_fn_type
|
||||
* @name: friendly name for debug purposes
|
||||
* @mod: owner of the function type
|
||||
* @bind: Called when a new function of this type has been allocated.
|
||||
* @unbind: Called when the function instance is being destroyed.
|
||||
* @compare: Compare function instance @f's argument to the provided arg.
|
||||
* Return true if they are equivalent. Used on GH_VM_REMOVE_FUNCTION.
|
||||
*/
|
||||
struct gh_vm_function {
|
||||
u32 type;
|
||||
const char *name;
|
||||
@ -84,9 +94,26 @@ void gh_vm_function_unregister(struct gh_vm_function *f);
|
||||
module_gh_vm_function(_name); \
|
||||
MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx)
|
||||
|
||||
/**
|
||||
* struct gh_vm_resource_ticket - Represents a ticket to reserve exclusive access to VM resource(s)
|
||||
* @vm_list: for @gh_vm->resource_tickets
|
||||
* @resources: List of resource(s) associated with this ticket(members are from @gh_resource->list)
|
||||
* @resource_type: Type of resource this ticket reserves
|
||||
* @label: Label of the resource from resource manager this ticket reserves.
|
||||
* @owner: owner of the ticket
|
||||
* @populate: callback provided by the ticket owner and called when a resource is found that
|
||||
* matches @resource_type and @label. Note that this callback could be called
|
||||
* multiple times if userspace created mutliple resources with the same type/label.
|
||||
* This callback may also have significant delay after gh_vm_add_resource_ticket()
|
||||
* since gh_vm_add_resource_ticket() could be called before the VM starts.
|
||||
* @unpopulate: callback provided by the ticket owner and called when the ticket owner should no
|
||||
* no longer use the resource provided in the argument. When unpopulate() returns,
|
||||
* the ticket owner should not be able to use the resource any more as the resource
|
||||
* might being freed.
|
||||
*/
|
||||
struct gh_vm_resource_ticket {
|
||||
struct list_head vm_list; /* for gh_vm's resource tickets list */
|
||||
struct list_head resources; /* resources associated with this ticket */
|
||||
struct list_head vm_list;
|
||||
struct list_head resources;
|
||||
enum gh_resource_type resource_type;
|
||||
u32 label;
|
||||
|
||||
|
@ -71,7 +71,6 @@ struct sk_psock_link {
|
||||
};
|
||||
|
||||
struct sk_psock_work_state {
|
||||
struct sk_buff *skb;
|
||||
u32 len;
|
||||
u32 off;
|
||||
};
|
||||
@ -105,7 +104,7 @@ struct sk_psock {
|
||||
struct proto *sk_proto;
|
||||
struct mutex work_mutex;
|
||||
struct sk_psock_work_state work_state;
|
||||
struct work_struct work;
|
||||
struct delayed_work work;
|
||||
struct rcu_work rwork;
|
||||
};
|
||||
|
||||
|
@ -157,7 +157,17 @@ struct usb_phy {
|
||||
*/
|
||||
enum usb_charger_type (*charger_detect)(struct usb_phy *x);
|
||||
|
||||
/*
|
||||
* Reserved slot 0 here is seserved for a notify_port_status callback addition that narrowly
|
||||
* missed the ABI freeze deadline due to upstream review disussions. See
|
||||
* https://lore.kernel.org/linux-usb/20230607062500.24669-1-stanley_chang@realtek.com/
|
||||
* for details. All other slots are for "normal" future ABI breaks in LTS updates
|
||||
*/
|
||||
ANDROID_KABI_RESERVE(0);
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
|
||||
/* for board-specific init logic */
|
||||
|
@ -350,6 +350,7 @@ enum {
|
||||
enum {
|
||||
HCI_SETUP,
|
||||
HCI_CONFIG,
|
||||
HCI_DEBUGFS_CREATED,
|
||||
HCI_AUTO_OFF,
|
||||
HCI_RFKILLED,
|
||||
HCI_MGMT,
|
||||
|
@ -514,6 +514,7 @@ struct hci_dev {
|
||||
struct work_struct cmd_sync_work;
|
||||
struct list_head cmd_sync_work_list;
|
||||
struct mutex cmd_sync_work_lock;
|
||||
struct mutex unregister_lock;
|
||||
struct work_struct cmd_sync_cancel_work;
|
||||
struct work_struct reenable_adv_work;
|
||||
|
||||
|
@ -186,7 +186,7 @@ struct pneigh_entry {
|
||||
netdevice_tracker dev_tracker;
|
||||
u32 flags;
|
||||
u8 protocol;
|
||||
u8 key[];
|
||||
u32 key[];
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -54,7 +54,7 @@ struct netns_sysctl_ipv6 {
|
||||
int seg6_flowlabel;
|
||||
u32 ioam6_id;
|
||||
u64 ioam6_id_wide;
|
||||
bool skip_notify_on_dev_down;
|
||||
int skip_notify_on_dev_down;
|
||||
u8 fib_notify_on_flag_change;
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
};
|
||||
|
@ -335,6 +335,7 @@ struct sk_filter;
|
||||
* @sk_cgrp_data: cgroup data for this cgroup
|
||||
* @sk_memcg: this socket's memory cgroup association
|
||||
* @sk_write_pending: a write to stream socket waits to start
|
||||
* @sk_wait_pending: number of threads blocked on this socket
|
||||
* @sk_state_change: callback to indicate change in the state of the sock
|
||||
* @sk_data_ready: callback to indicate there is data to be processed
|
||||
* @sk_write_space: callback to indicate there is bf sending space available
|
||||
@ -427,6 +428,7 @@ struct sock {
|
||||
unsigned int sk_napi_id;
|
||||
#endif
|
||||
int sk_rcvbuf;
|
||||
int sk_wait_pending;
|
||||
|
||||
struct sk_filter __rcu *sk_filter;
|
||||
union {
|
||||
@ -1190,6 +1192,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
|
||||
|
||||
#define sk_wait_event(__sk, __timeo, __condition, __wait) \
|
||||
({ int __rc; \
|
||||
__sk->sk_wait_pending++; \
|
||||
release_sock(__sk); \
|
||||
__rc = __condition; \
|
||||
if (!__rc) { \
|
||||
@ -1199,6 +1202,7 @@ static inline void sock_rps_reset_rxhash(struct sock *sk)
|
||||
} \
|
||||
sched_annotate_sleep(); \
|
||||
lock_sock(__sk); \
|
||||
__sk->sk_wait_pending--; \
|
||||
__rc = __condition; \
|
||||
__rc; \
|
||||
})
|
||||
|
@ -1468,6 +1468,8 @@ static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
|
||||
}
|
||||
|
||||
void tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||
void __tcp_cleanup_rbuf(struct sock *sk, int copied);
|
||||
|
||||
|
||||
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
|
||||
* If 87.5 % (7/8) of the space has been consumed, we want to override
|
||||
@ -2292,6 +2294,14 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore);
|
||||
void tcp_bpf_clone(const struct sock *sk, struct sock *newsk);
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
void tcp_eat_skb(struct sock *sk, struct sk_buff *skb);
|
||||
#else
|
||||
static inline void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
int tcp_bpf_sendmsg_redir(struct sock *sk, bool ingress,
|
||||
struct sk_msg *msg, u32 bytes, int flags);
|
||||
#endif /* CONFIG_NET_SOCK_MSG */
|
||||
|
@ -1 +0,0 @@
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
@ -161,8 +161,9 @@ DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
|
||||
TP_ARGS(rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
|
||||
TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
|
||||
TP_ARGS(prev, next, rq), 1);
|
||||
TP_PROTO(unsigned int sched_mode, struct task_struct *prev,
|
||||
struct task_struct *next, struct rq *rq),
|
||||
TP_ARGS(sched_mode, prev, next, rq), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
|
||||
TP_PROTO(int cpu),
|
||||
|
@ -1,3 +0,0 @@
|
||||
per-file f2fs**=file:/fs/f2fs/OWNERS
|
||||
per-file fuse**=file:/fs/fuse/OWNERS
|
||||
per-file net**=file:/net/OWNERS
|
@ -1,4 +0,0 @@
|
||||
connoro@google.com
|
||||
elavila@google.com
|
||||
qperret@google.com
|
||||
tkjos@google.com
|
@ -6638,7 +6638,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
|
||||
rq->last_seen_need_resched_ns = 0;
|
||||
#endif
|
||||
|
||||
trace_android_rvh_schedule(prev, next, rq);
|
||||
trace_android_rvh_schedule(sched_mode, prev, next, rq);
|
||||
if (likely(prev != next)) {
|
||||
rq->nr_switches++;
|
||||
/*
|
||||
|
@ -1,2 +0,0 @@
|
||||
lorenzo@google.com
|
||||
maze@google.com
|
@ -2685,7 +2685,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
{
|
||||
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
|
||||
|
||||
mutex_lock(&hdev->unregister_lock);
|
||||
hci_dev_set_flag(hdev, HCI_UNREGISTER);
|
||||
mutex_unlock(&hdev->unregister_lock);
|
||||
|
||||
write_lock(&hci_dev_list_lock);
|
||||
list_del(&hdev->list);
|
||||
|
@ -629,6 +629,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
|
||||
INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
|
||||
INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
|
||||
mutex_init(&hdev->cmd_sync_work_lock);
|
||||
mutex_init(&hdev->unregister_lock);
|
||||
|
||||
INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
|
||||
INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
|
||||
@ -688,14 +689,19 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
||||
void *data, hci_cmd_sync_work_destroy_t destroy)
|
||||
{
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
int err = 0;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
|
||||
return -ENODEV;
|
||||
mutex_lock(&hdev->unregister_lock);
|
||||
if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
|
||||
err = -ENODEV;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
entry->func = func;
|
||||
entry->data = data;
|
||||
entry->destroy = destroy;
|
||||
@ -706,7 +712,9 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
|
||||
|
||||
queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
|
||||
|
||||
return 0;
|
||||
unlock:
|
||||
mutex_unlock(&hdev->unregister_lock);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_cmd_sync_queue);
|
||||
|
||||
@ -4484,6 +4492,9 @@ static int hci_init_sync(struct hci_dev *hdev)
|
||||
!hci_dev_test_flag(hdev, HCI_CONFIG))
|
||||
return 0;
|
||||
|
||||
if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
|
||||
return 0;
|
||||
|
||||
hci_debugfs_create_common(hdev);
|
||||
|
||||
if (lmp_bredr_capable(hdev))
|
||||
|
@ -480,8 +480,6 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
|
||||
msg_rx = sk_psock_peek_msg(psock);
|
||||
}
|
||||
out:
|
||||
if (psock->work_state.skb && copied > 0)
|
||||
schedule_work(&psock->work);
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
|
||||
@ -623,42 +621,33 @@ static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
|
||||
|
||||
static void sk_psock_skb_state(struct sk_psock *psock,
|
||||
struct sk_psock_work_state *state,
|
||||
struct sk_buff *skb,
|
||||
int len, int off)
|
||||
{
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
state->skb = skb;
|
||||
state->len = len;
|
||||
state->off = off;
|
||||
} else {
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
static void sk_psock_backlog(struct work_struct *work)
|
||||
{
|
||||
struct sk_psock *psock = container_of(work, struct sk_psock, work);
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct sk_psock *psock = container_of(dwork, struct sk_psock, work);
|
||||
struct sk_psock_work_state *state = &psock->work_state;
|
||||
struct sk_buff *skb = NULL;
|
||||
u32 len = 0, off = 0;
|
||||
bool ingress;
|
||||
u32 len, off;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&psock->work_mutex);
|
||||
if (unlikely(state->skb)) {
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
skb = state->skb;
|
||||
if (unlikely(state->len)) {
|
||||
len = state->len;
|
||||
off = state->off;
|
||||
state->skb = NULL;
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
if (skb)
|
||||
goto start;
|
||||
|
||||
while ((skb = skb_dequeue(&psock->ingress_skb))) {
|
||||
while ((skb = skb_peek(&psock->ingress_skb))) {
|
||||
len = skb->len;
|
||||
off = 0;
|
||||
if (skb_bpf_strparser(skb)) {
|
||||
@ -667,7 +656,6 @@ static void sk_psock_backlog(struct work_struct *work)
|
||||
off = stm->offset;
|
||||
len = stm->full_len;
|
||||
}
|
||||
start:
|
||||
ingress = skb_bpf_ingress(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
do {
|
||||
@ -677,22 +665,28 @@ static void sk_psock_backlog(struct work_struct *work)
|
||||
len, ingress);
|
||||
if (ret <= 0) {
|
||||
if (ret == -EAGAIN) {
|
||||
sk_psock_skb_state(psock, state, skb,
|
||||
len, off);
|
||||
sk_psock_skb_state(psock, state, len, off);
|
||||
|
||||
/* Delay slightly to prioritize any
|
||||
* other work that might be here.
|
||||
*/
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
schedule_delayed_work(&psock->work, 1);
|
||||
goto end;
|
||||
}
|
||||
/* Hard errors break pipe and stop xmit. */
|
||||
sk_psock_report_error(psock, ret ? -ret : EPIPE);
|
||||
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
|
||||
sock_drop(psock->sk, skb);
|
||||
goto end;
|
||||
}
|
||||
off += ret;
|
||||
len -= ret;
|
||||
} while (len);
|
||||
|
||||
if (!ingress)
|
||||
skb = skb_dequeue(&psock->ingress_skb);
|
||||
if (!ingress) {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
end:
|
||||
mutex_unlock(&psock->work_mutex);
|
||||
@ -733,7 +727,7 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
|
||||
INIT_LIST_HEAD(&psock->link);
|
||||
spin_lock_init(&psock->link_lock);
|
||||
|
||||
INIT_WORK(&psock->work, sk_psock_backlog);
|
||||
INIT_DELAYED_WORK(&psock->work, sk_psock_backlog);
|
||||
mutex_init(&psock->work_mutex);
|
||||
INIT_LIST_HEAD(&psock->ingress_msg);
|
||||
spin_lock_init(&psock->ingress_lock);
|
||||
@ -785,11 +779,6 @@ static void __sk_psock_zap_ingress(struct sk_psock *psock)
|
||||
skb_bpf_redirect_clear(skb);
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
kfree_skb(psock->work_state.skb);
|
||||
/* We null the skb here to ensure that calls to sk_psock_backlog
|
||||
* do not pick up the free'd skb.
|
||||
*/
|
||||
psock->work_state.skb = NULL;
|
||||
__sk_psock_purge_ingress_msg(psock);
|
||||
}
|
||||
|
||||
@ -808,7 +797,6 @@ void sk_psock_stop(struct sk_psock *psock)
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
|
||||
sk_psock_cork_free(psock);
|
||||
__sk_psock_zap_ingress(psock);
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
}
|
||||
|
||||
@ -822,7 +810,8 @@ static void sk_psock_destroy(struct work_struct *work)
|
||||
|
||||
sk_psock_done_strp(psock);
|
||||
|
||||
cancel_work_sync(&psock->work);
|
||||
cancel_delayed_work_sync(&psock->work);
|
||||
__sk_psock_zap_ingress(psock);
|
||||
mutex_destroy(&psock->work_mutex);
|
||||
|
||||
psock_progs_drop(&psock->progs);
|
||||
@ -937,7 +926,7 @@ static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
skb_queue_tail(&psock_other->ingress_skb, skb);
|
||||
schedule_work(&psock_other->work);
|
||||
schedule_delayed_work(&psock_other->work, 0);
|
||||
spin_unlock_bh(&psock_other->ingress_lock);
|
||||
return 0;
|
||||
}
|
||||
@ -989,10 +978,8 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
err = -EIO;
|
||||
sk_other = psock->sk;
|
||||
if (sock_flag(sk_other, SOCK_DEAD) ||
|
||||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
skb_bpf_set_ingress(skb);
|
||||
|
||||
@ -1017,22 +1004,23 @@ static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
|
||||
spin_lock_bh(&psock->ingress_lock);
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
|
||||
skb_queue_tail(&psock->ingress_skb, skb);
|
||||
schedule_work(&psock->work);
|
||||
schedule_delayed_work(&psock->work, 0);
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock_bh(&psock->ingress_lock);
|
||||
if (err < 0) {
|
||||
skb_bpf_redirect_clear(skb);
|
||||
if (err < 0)
|
||||
goto out_free;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case __SK_REDIRECT:
|
||||
tcp_eat_skb(psock->sk, skb);
|
||||
err = sk_psock_skb_redirect(psock, skb);
|
||||
break;
|
||||
case __SK_DROP:
|
||||
default:
|
||||
out_free:
|
||||
skb_bpf_redirect_clear(skb);
|
||||
tcp_eat_skb(psock->sk, skb);
|
||||
sock_drop(psock->sk, skb);
|
||||
}
|
||||
|
||||
@ -1048,7 +1036,7 @@ static void sk_psock_write_space(struct sock *sk)
|
||||
psock = sk_psock(sk);
|
||||
if (likely(psock)) {
|
||||
if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
|
||||
schedule_work(&psock->work);
|
||||
schedule_delayed_work(&psock->work, 0);
|
||||
write_space = psock->saved_write_space;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
@ -1077,8 +1065,7 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
|
||||
skb_dst_drop(skb);
|
||||
skb_bpf_redirect_clear(skb);
|
||||
ret = bpf_prog_run_pin_on_cpu(prog, skb);
|
||||
if (ret == SK_PASS)
|
||||
skb_bpf_set_strparser(skb);
|
||||
skb_bpf_set_strparser(skb);
|
||||
ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
|
||||
skb->sk = NULL;
|
||||
}
|
||||
@ -1180,12 +1167,11 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
|
||||
int ret = __SK_DROP;
|
||||
int len = skb->len;
|
||||
|
||||
skb_get(skb);
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (unlikely(!psock)) {
|
||||
len = 0;
|
||||
tcp_eat_skb(sk, skb);
|
||||
sock_drop(sk, skb);
|
||||
goto out;
|
||||
}
|
||||
@ -1209,10 +1195,20 @@ static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
|
||||
static void sk_psock_verdict_data_ready(struct sock *sk)
|
||||
{
|
||||
struct socket *sock = sk->sk_socket;
|
||||
int copied;
|
||||
|
||||
if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
|
||||
return;
|
||||
sock->ops->read_skb(sk, sk_psock_verdict_recv);
|
||||
copied = sock->ops->read_skb(sk, sk_psock_verdict_recv);
|
||||
if (copied >= 0) {
|
||||
struct sk_psock *psock;
|
||||
|
||||
rcu_read_lock();
|
||||
psock = sk_psock(sk);
|
||||
if (psock)
|
||||
psock->saved_data_ready(sk);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
|
||||
|
@ -1624,9 +1624,10 @@ void sock_map_close(struct sock *sk, long timeout)
|
||||
rcu_read_unlock();
|
||||
sk_psock_stop(psock);
|
||||
release_sock(sk);
|
||||
cancel_work_sync(&psock->work);
|
||||
cancel_delayed_work_sync(&psock->work);
|
||||
sk_psock_put(sk, psock);
|
||||
}
|
||||
|
||||
/* Make sure we do not recurse. This is a bug.
|
||||
* Leak the socket instead of crashing on a stack overflow.
|
||||
*/
|
||||
|
@ -589,6 +589,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
|
||||
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending += writebias;
|
||||
sk->sk_wait_pending++;
|
||||
|
||||
/* Basic assumption: if someone sets sk->sk_err, he _must_
|
||||
* change state of the socket from TCP_SYN_*.
|
||||
@ -604,6 +605,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
|
||||
}
|
||||
remove_wait_queue(sk_sleep(sk), &wait);
|
||||
sk->sk_write_pending -= writebias;
|
||||
sk->sk_wait_pending--;
|
||||
return timeo;
|
||||
}
|
||||
|
||||
|
@ -1143,6 +1143,7 @@ struct sock *inet_csk_clone_lock(const struct sock *sk,
|
||||
if (newsk) {
|
||||
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
||||
|
||||
newsk->sk_wait_pending = 0;
|
||||
inet_sk_set_state(newsk, TCP_SYN_RECV);
|
||||
newicsk->icsk_bind_hash = NULL;
|
||||
newicsk->icsk_bind2_hash = NULL;
|
||||
|
@ -1570,7 +1570,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
|
||||
* calculation of whether or not we must ACK for the sake of
|
||||
* a window update.
|
||||
*/
|
||||
static void __tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
void __tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
bool time_to_ack = false;
|
||||
@ -1772,7 +1772,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
|
||||
tcp_flags = TCP_SKB_CB(skb)->tcp_flags;
|
||||
used = recv_actor(sk, skb);
|
||||
consume_skb(skb);
|
||||
if (used < 0) {
|
||||
if (!copied)
|
||||
copied = used;
|
||||
@ -1786,14 +1785,6 @@ int tcp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
break;
|
||||
}
|
||||
}
|
||||
WRITE_ONCE(tp->copied_seq, seq);
|
||||
|
||||
tcp_rcv_space_adjust(sk);
|
||||
|
||||
/* Clean up data we have read: This will do ACK frames. */
|
||||
if (copied > 0)
|
||||
__tcp_cleanup_rbuf(sk, copied);
|
||||
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_read_skb);
|
||||
@ -3088,6 +3079,12 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
int old_state = sk->sk_state;
|
||||
u32 seq;
|
||||
|
||||
/* Deny disconnect if other threads are blocked in sk_wait_event()
|
||||
* or inet_wait_for_connect().
|
||||
*/
|
||||
if (sk->sk_wait_pending)
|
||||
return -EBUSY;
|
||||
|
||||
if (old_state != TCP_CLOSE)
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
|
||||
|
@ -11,6 +11,24 @@
|
||||
#include <net/inet_common.h>
|
||||
#include <net/tls.h>
|
||||
|
||||
void tcp_eat_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tcp;
|
||||
int copied;
|
||||
|
||||
if (!skb || !skb->len || !sk_is_tcp(sk))
|
||||
return;
|
||||
|
||||
if (skb_bpf_strparser(skb))
|
||||
return;
|
||||
|
||||
tcp = tcp_sk(sk);
|
||||
copied = tcp->copied_seq + skb->len;
|
||||
WRITE_ONCE(tcp->copied_seq, copied);
|
||||
tcp_rcv_space_adjust(sk);
|
||||
__tcp_cleanup_rbuf(sk, skb->len);
|
||||
}
|
||||
|
||||
static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
|
||||
struct sk_msg *msg, u32 apply_bytes, int flags)
|
||||
{
|
||||
@ -174,14 +192,34 @@ static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool is_next_msg_fin(struct sk_psock *psock)
|
||||
{
|
||||
struct scatterlist *sge;
|
||||
struct sk_msg *msg_rx;
|
||||
int i;
|
||||
|
||||
msg_rx = sk_psock_peek_msg(psock);
|
||||
i = msg_rx->sg.start;
|
||||
sge = sk_msg_elem(msg_rx, i);
|
||||
if (!sge->length) {
|
||||
struct sk_buff *skb = msg_rx->skb;
|
||||
|
||||
if (skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int tcp_bpf_recvmsg_parser(struct sock *sk,
|
||||
struct msghdr *msg,
|
||||
size_t len,
|
||||
int flags,
|
||||
int *addr_len)
|
||||
{
|
||||
struct tcp_sock *tcp = tcp_sk(sk);
|
||||
u32 seq = tcp->copied_seq;
|
||||
struct sk_psock *psock;
|
||||
int copied;
|
||||
int copied = 0;
|
||||
|
||||
if (unlikely(flags & MSG_ERRQUEUE))
|
||||
return inet_recv_error(sk, msg, len, addr_len);
|
||||
@ -194,8 +232,43 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
|
||||
return tcp_recvmsg(sk, msg, len, flags, addr_len);
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
/* We may have received data on the sk_receive_queue pre-accept and
|
||||
* then we can not use read_skb in this context because we haven't
|
||||
* assigned a sk_socket yet so have no link to the ops. The work-around
|
||||
* is to check the sk_receive_queue and in these cases read skbs off
|
||||
* queue again. The read_skb hook is not running at this point because
|
||||
* of lock_sock so we avoid having multiple runners in read_skb.
|
||||
*/
|
||||
if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
|
||||
tcp_data_ready(sk);
|
||||
/* This handles the ENOMEM errors if we both receive data
|
||||
* pre accept and are already under memory pressure. At least
|
||||
* let user know to retry.
|
||||
*/
|
||||
if (unlikely(!skb_queue_empty(&sk->sk_receive_queue))) {
|
||||
copied = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
msg_bytes_ready:
|
||||
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
|
||||
/* The typical case for EFAULT is the socket was gracefully
|
||||
* shutdown with a FIN pkt. So check here the other case is
|
||||
* some error on copy_page_to_iter which would be unexpected.
|
||||
* On fin return correct return code to zero.
|
||||
*/
|
||||
if (copied == -EFAULT) {
|
||||
bool is_fin = is_next_msg_fin(psock);
|
||||
|
||||
if (is_fin) {
|
||||
copied = 0;
|
||||
seq++;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
seq += copied;
|
||||
if (!copied) {
|
||||
long timeo;
|
||||
int data;
|
||||
@ -233,6 +306,10 @@ static int tcp_bpf_recvmsg_parser(struct sock *sk,
|
||||
copied = -EAGAIN;
|
||||
}
|
||||
out:
|
||||
WRITE_ONCE(tcp->copied_seq, seq);
|
||||
tcp_rcv_space_adjust(sk);
|
||||
if (copied > 0)
|
||||
__tcp_cleanup_rbuf(sk, copied);
|
||||
release_sock(sk);
|
||||
sk_psock_put(sk, psock);
|
||||
return copied;
|
||||
|
@ -1806,7 +1806,7 @@ EXPORT_SYMBOL(__skb_recv_udp);
|
||||
int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int err, copied;
|
||||
int err;
|
||||
|
||||
try_again:
|
||||
skb = skb_recv_udp(sk, MSG_DONTWAIT, &err);
|
||||
@ -1825,10 +1825,7 @@ int udp_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!skb_set_owner_sk_safe(skb, sk));
|
||||
copied = recv_actor(sk, skb);
|
||||
kfree_skb(skb);
|
||||
|
||||
return copied;
|
||||
return recv_actor(sk, skb);
|
||||
}
|
||||
EXPORT_SYMBOL(udp_read_skb);
|
||||
|
||||
|
@ -2552,7 +2552,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
{
|
||||
struct unix_sock *u = unix_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
int err, copied;
|
||||
int err;
|
||||
|
||||
mutex_lock(&u->iolock);
|
||||
skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
|
||||
@ -2560,10 +2560,7 @@ static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
|
||||
if (!skb)
|
||||
return err;
|
||||
|
||||
copied = recv_actor(sk, skb);
|
||||
kfree_skb(skb);
|
||||
|
||||
return copied;
|
||||
return recv_actor(sk, skb);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -21,6 +21,11 @@
|
||||
|
||||
#include <linux/gunyah.h>
|
||||
|
||||
#define DEFAULT_GUEST_BASE 0x80000000
|
||||
#define DEFAULT_GUEST_SIZE 0x6400000 /* 100 MiB */
|
||||
#define DEFAULT_DTB_OFFSET 0x45f0000 /* 70MiB - 64 KiB */
|
||||
#define DEFAULT_RAMDISK_OFFSET 0x4600000 /* 70MiB */
|
||||
|
||||
struct vm_config {
|
||||
int image_fd;
|
||||
int dtb_fd;
|
||||
@ -29,7 +34,6 @@ struct vm_config {
|
||||
uint64_t guest_base;
|
||||
uint64_t guest_size;
|
||||
|
||||
uint64_t image_offset;
|
||||
off_t image_size;
|
||||
uint64_t dtb_offset;
|
||||
off_t dtb_size;
|
||||
@ -44,7 +48,6 @@ static struct option options[] = {
|
||||
{ "ramdisk", optional_argument, NULL, 'r' },
|
||||
{ "base", optional_argument, NULL, 'B' },
|
||||
{ "size", optional_argument, NULL, 'S' },
|
||||
{ "image_offset", optional_argument, NULL, 'I' },
|
||||
{ "dtb_offset", optional_argument, NULL, 'D' },
|
||||
{ "ramdisk_offset", optional_argument, NULL, 'R' },
|
||||
{ }
|
||||
@ -58,12 +61,12 @@ static void print_help(char *cmd)
|
||||
" --image, -i <image> VM image file to load (e.g. a kernel Image) [Required]\n"
|
||||
" --dtb, -d <dtb> Devicetree file to load [Required]\n"
|
||||
" --ramdisk, -r <ramdisk> Ramdisk file to load\n"
|
||||
" --base, -B <address> Set the base address of guest's memory [Default: 0x80000000]\n"
|
||||
" --size, -S <number> The number of bytes large to make the guest's memory [Default: 0x6400000 (100 MB)]\n"
|
||||
" --image_offset, -I <number> Offset into guest memory to load the VM image file [Default: 0x10000]\n"
|
||||
" --dtb_offset, -D <number> Offset into guest memory to load the DTB [Default: 0]\n"
|
||||
" --ramdisk_offset, -R <number> Offset into guest memory to load a ramdisk [Default: 0x4600000]\n"
|
||||
, cmd);
|
||||
" --base, -B <address> Set the base address of guest's memory [Default: 0x%08x]\n"
|
||||
" --size, -S <number> The number of bytes large to make the guest's memory [Default: 0x%08x]\n"
|
||||
" --dtb_offset, -D <number> Offset into guest memory to load the DTB [Default: 0x%08x]\n"
|
||||
" --ramdisk_offset, -R <number> Offset into guest memory to load a ramdisk [Default: 0x%08x]\n"
|
||||
, cmd, DEFAULT_GUEST_BASE, DEFAULT_GUEST_SIZE,
|
||||
DEFAULT_DTB_OFFSET, DEFAULT_RAMDISK_OFFSET);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
@ -74,18 +77,19 @@ int main(int argc, char **argv)
|
||||
char *guest_mem;
|
||||
struct vm_config config = {
|
||||
/* Defaults good enough to boot static kernel and a basic ramdisk */
|
||||
.image_fd = -1,
|
||||
.dtb_fd = -1,
|
||||
.ramdisk_fd = -1,
|
||||
.guest_base = 0x80000000,
|
||||
.guest_size = 0x6400000, /* 100 MB */
|
||||
.image_offset = 0,
|
||||
.dtb_offset = 0x45f0000,
|
||||
.ramdisk_offset = 0x4600000, /* put at +70MB (30MB for ramdisk) */
|
||||
.guest_base = DEFAULT_GUEST_BASE,
|
||||
.guest_size = DEFAULT_GUEST_SIZE,
|
||||
.dtb_offset = DEFAULT_DTB_OFFSET,
|
||||
.ramdisk_offset = DEFAULT_RAMDISK_OFFSET,
|
||||
};
|
||||
struct stat st;
|
||||
int opt, optidx, ret = 0;
|
||||
long l;
|
||||
|
||||
while ((opt = getopt_long(argc, argv, "hi:d:r:B:S:I:D:R:c:", options, &optidx)) != -1) {
|
||||
while ((opt = getopt_long(argc, argv, "hi:d:r:B:S:D:R:c:", options, &optidx)) != -1) {
|
||||
switch (opt) {
|
||||
case 'i':
|
||||
config.image_fd = open(optarg, O_RDONLY | O_CLOEXEC);
|
||||
@ -139,14 +143,6 @@ int main(int argc, char **argv)
|
||||
}
|
||||
config.guest_size = l;
|
||||
break;
|
||||
case 'I':
|
||||
l = strtol(optarg, NULL, 0);
|
||||
if (l == LONG_MIN) {
|
||||
perror("Failed to parse image offset");
|
||||
return -1;
|
||||
}
|
||||
config.image_offset = l;
|
||||
break;
|
||||
case 'D':
|
||||
l = strtol(optarg, NULL, 0);
|
||||
if (l == LONG_MIN) {
|
||||
@ -172,13 +168,13 @@ int main(int argc, char **argv)
|
||||
}
|
||||
}
|
||||
|
||||
if (!config.image_fd || !config.dtb_fd) {
|
||||
if (config.image_fd == -1 || config.dtb_fd == -1) {
|
||||
print_help(argv[0]);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (config.image_offset + config.image_size > config.guest_size) {
|
||||
fprintf(stderr, "Image offset and size puts it outside guest memory. Make image smaller or increase guest memory size.\n");
|
||||
if (config.image_size > config.guest_size) {
|
||||
fprintf(stderr, "Image size puts it outside guest memory. Make image smaller or increase guest memory size.\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -222,7 +218,7 @@ int main(int argc, char **argv)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (read(config.image_fd, guest_mem + config.image_offset, config.image_size) < 0) {
|
||||
if (read(config.image_fd, guest_mem, config.image_size) < 0) {
|
||||
perror("Failed to read image into guest memory");
|
||||
return -1;
|
||||
}
|
||||
@ -264,7 +260,7 @@ int main(int argc, char **argv)
|
||||
}
|
||||
|
||||
while (1)
|
||||
sleep(10);
|
||||
pause();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -119,6 +119,56 @@ MODULE_PARM_DESC(skip_validation, "Skip unit descriptor validation (default: no)
|
||||
static DEFINE_MUTEX(register_mutex);
|
||||
static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
|
||||
static struct usb_driver usb_audio_driver;
|
||||
static struct snd_usb_audio_vendor_ops *usb_vendor_ops;
|
||||
|
||||
int snd_vendor_set_ops(struct snd_usb_audio_vendor_ops *ops)
|
||||
{
|
||||
if ((!ops->set_interface) ||
|
||||
(!ops->set_pcm_intf) ||
|
||||
(!ops->set_pcm_connection))
|
||||
return -EINVAL;
|
||||
|
||||
usb_vendor_ops = ops;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(snd_vendor_set_ops);
|
||||
|
||||
struct snd_usb_audio_vendor_ops *snd_vendor_get_ops(void)
|
||||
{
|
||||
return usb_vendor_ops;
|
||||
}
|
||||
|
||||
int snd_vendor_set_interface(struct usb_device *udev,
|
||||
struct usb_host_interface *intf,
|
||||
int iface, int alt)
|
||||
{
|
||||
struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
|
||||
|
||||
if (ops)
|
||||
return ops->set_interface(udev, intf, iface, alt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int snd_vendor_set_pcm_intf(struct usb_interface *intf, int iface, int alt,
|
||||
int direction, struct snd_usb_substream *subs)
|
||||
{
|
||||
struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
|
||||
|
||||
if (ops)
|
||||
return ops->set_pcm_intf(intf, iface, alt, direction, subs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int snd_vendor_set_pcm_connection(struct usb_device *udev,
|
||||
enum snd_vendor_pcm_open_close onoff,
|
||||
int direction)
|
||||
{
|
||||
struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
|
||||
|
||||
if (ops)
|
||||
return ops->set_pcm_connection(udev, onoff, direction);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* disconnect streams
|
||||
|
@ -216,4 +216,19 @@ struct snd_usb_stream {
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
int snd_vendor_set_ops(struct snd_usb_audio_vendor_ops *vendor_ops);
|
||||
struct snd_usb_audio_vendor_ops *snd_vendor_get_ops(void);
|
||||
int snd_vendor_set_interface(struct usb_device *udev,
|
||||
struct usb_host_interface *alts,
|
||||
int iface, int alt);
|
||||
int snd_vendor_set_rate(int iface, int rate, int alt);
|
||||
int snd_vendor_set_pcm_intf(struct usb_interface *intf, int iface, int alt,
|
||||
int direction, struct snd_usb_substream *subs);
|
||||
int snd_vendor_set_pcm_connection(struct usb_device *udev,
|
||||
enum snd_vendor_pcm_open_close onoff,
|
||||
int direction);
|
||||
int snd_vendor_set_pcm_binterval(const struct audioformat *fp,
|
||||
const struct audioformat *found,
|
||||
int *cur_attr, int *attr);
|
||||
|
||||
#endif /* __USBAUDIO_CARD_H */
|
||||
|
@ -641,6 +641,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
|
||||
struct snd_usb_audio *chip = subs->stream->chip;
|
||||
int retry = 0;
|
||||
int ret;
|
||||
struct usb_interface *iface;
|
||||
|
||||
ret = snd_usb_lock_shutdown(chip);
|
||||
if (ret < 0)
|
||||
@ -653,6 +654,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
|
||||
again:
|
||||
if (subs->sync_endpoint) {
|
||||
ret = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
|
||||
|
||||
if (ret < 0)
|
||||
goto unlock;
|
||||
}
|
||||
@ -664,6 +666,14 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
|
||||
snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
|
||||
ret = 0;
|
||||
|
||||
iface = usb_ifnum_to_if(chip->dev, subs->data_endpoint->iface);
|
||||
|
||||
ret = snd_vendor_set_pcm_intf(iface, subs->data_endpoint->iface,
|
||||
subs->data_endpoint->altsetting,
|
||||
subs->direction, subs);
|
||||
if (!ret)
|
||||
goto unlock;
|
||||
|
||||
/* reset the pointer */
|
||||
subs->buffer_bytes = frames_to_bytes(runtime, runtime->buffer_size);
|
||||
subs->inflight_bytes = 0;
|
||||
@ -1162,6 +1172,11 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream)
|
||||
struct snd_usb_substream *subs = &as->substream[direction];
|
||||
int ret;
|
||||
|
||||
ret = snd_vendor_set_pcm_connection(subs->dev, SOUND_PCM_OPEN,
|
||||
direction);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
runtime->hw = snd_usb_hardware;
|
||||
/* need an explicit sync to catch applptr update in low-latency mode */
|
||||
if (direction == SNDRV_PCM_STREAM_PLAYBACK &&
|
||||
@ -1195,6 +1210,11 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream)
|
||||
struct snd_usb_substream *subs = &as->substream[direction];
|
||||
int ret;
|
||||
|
||||
ret = snd_vendor_set_pcm_connection(subs->dev, SOUND_PCM_CLOSE,
|
||||
direction);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
snd_media_stop_pipeline(subs);
|
||||
|
||||
if (!snd_usb_lock_shutdown(subs->stream->chip)) {
|
||||
|
@ -1228,6 +1228,8 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
|
||||
snd_usb_init_pitch(chip, fp);
|
||||
snd_usb_init_sample_rate(chip, fp, fp->rate_max);
|
||||
usb_set_interface(chip->dev, iface_no, altno);
|
||||
if (protocol > UAC_VERSION_1)
|
||||
snd_vendor_set_interface(chip->dev, alts, iface_no, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
struct media_device;
|
||||
struct media_intf_devnode;
|
||||
struct snd_usb_substream;
|
||||
|
||||
#define MAX_CARD_INTERFACES 16
|
||||
|
||||
@ -210,4 +211,37 @@ extern bool snd_usb_skip_validation;
|
||||
#define QUIRK_FLAG_FORCE_IFACE_RESET (1U << 20)
|
||||
#define QUIRK_FLAG_FIXED_RATE (1U << 21)
|
||||
|
||||
struct audioformat;
|
||||
|
||||
enum snd_vendor_pcm_open_close {
|
||||
SOUND_PCM_CLOSE = 0,
|
||||
SOUND_PCM_OPEN,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct snd_usb_audio_vendor_ops - function callbacks for USB audio accelerators
|
||||
* @set_interface: called when an interface is initialized
|
||||
* @set_pcm_intf: called when the pcm interface is set
|
||||
* @set_pcm_connection: called when pcm is opened/closed
|
||||
*
|
||||
* Set of callbacks for some accelerated USB audio streaming hardware.
|
||||
*
|
||||
* TODO: make this USB host-controller specific, right now this only works for
|
||||
* one USB controller in the system at a time, which is only realistic for
|
||||
* self-contained systems like phones.
|
||||
*/
|
||||
struct snd_usb_audio_vendor_ops {
|
||||
int (*set_interface)(struct usb_device *udev,
|
||||
struct usb_host_interface *alts,
|
||||
int iface, int alt);
|
||||
int (*set_pcm_intf)(struct usb_interface *intf, int iface, int alt,
|
||||
int direction, struct snd_usb_substream *subs);
|
||||
int (*set_pcm_connection)(struct usb_device *udev,
|
||||
enum snd_vendor_pcm_open_close onoff,
|
||||
int direction);
|
||||
ANDROID_KABI_RESERVE(1);
|
||||
ANDROID_KABI_RESERVE(2);
|
||||
ANDROID_KABI_RESERVE(3);
|
||||
ANDROID_KABI_RESERVE(4);
|
||||
};
|
||||
#endif /* __USBAUDIO_H */
|
||||
|
@ -1,2 +0,0 @@
|
||||
# include OWNERS from the authoritative android-mainline branch
|
||||
include kernel/common:android-mainline:/tools/testing/selftests/filesystems/incfs/OWNERS
|
@ -1 +0,0 @@
|
||||
file:/fs/incfs/OWNERS
|
Loading…
Reference in New Issue
Block a user