Merge remote-tracking branch into HEAD
* keystone/mirror-android14-6.1: (63 commits) ANDROID: GKI: update the ABI symbol list ANDROID: freezer: Add vendor hook to freezer for GKI purpose. ANDROID: freezer: export the freezer_cgrp_subsys for GKI purpose. ANDROID: GKI: update the ABI symbol list ANDROID: Add vendor hooks for binder perf tuning ANDROID: Add vendor hooks to signal. ANDROID: Update the ABI symbol list ANDROID: page_pinner: add missing page_pinner_put_page ANDROID: page_pinner: prevent pp_buffer uninitialized access ANDROID: page_pinner: prevent pp_buffer access before initialization ANDROID: mm: fix use-after free of page_ext in page_pinner ANDROID: mm: introduce page_pinner ANDROID: abi_gki_aarch64_qcom: Add gh_rm_register_platform_ops ANDROID: gunyah: Sync remaining gunyah drivers with latest ANDROID: gunyah: Sync with latest "mailbox: Add Gunyah message queue mailbox" ANDROID: gunyah: Sync with latest "gunyah: Common types and error codes for Gunyah hypercalls" ANDROID: gunyah: Sync with latest hypercalls ANDROID: gunyah: Sync with latest documentation and UAPI ANDROID: gunyah: Sync with latest "firmware: qcom_scm: Register Gunyah platform ops" BACKPORT: firmware: qcom_scm: Use fixed width src vm bitmap ... Change-Id: I8c82b8ddecaa7f30ff816b51ef08003059a97a6c Signed-off-by: keystone-kernel-automerger <keystone-kernel-automerger@google.com>
This commit is contained in:
commit
69910e7ad6
@ -43,6 +43,7 @@ filegroup(
|
||||
"android/abi_gki_aarch64_oplus",
|
||||
"android/abi_gki_aarch64_pixel",
|
||||
"android/abi_gki_aarch64_qcom",
|
||||
"android/abi_gki_aarch64_unisoc",
|
||||
"android/abi_gki_aarch64_virtual_device",
|
||||
"android/abi_gki_aarch64_vivo",
|
||||
"android/abi_gki_aarch64_xiaomi",
|
||||
|
@ -12,7 +12,7 @@ most of the configuration about a Gunyah virtual machine is described in the
|
||||
VM's devicetree. The devicetree is generated by userspace. Interacting with the
|
||||
virtual machine is still done via the kernel and VM configuration requires some
|
||||
of the corresponding functionality to be set up in the kernel. For instance,
|
||||
sharing userspace memory with a VM is done via the GH_VM_SET_USER_MEM_REGION
|
||||
sharing userspace memory with a VM is done via the `GH_VM_SET_USER_MEM_REGION`_
|
||||
ioctl. The VM itself is configured to use the memory region via the
|
||||
devicetree.
|
||||
|
||||
@ -22,13 +22,13 @@ Gunyah Functions
|
||||
Components of a Gunyah VM's configuration that need kernel configuration are
|
||||
called "functions" and are built on top of a framework. Functions are identified
|
||||
by a string and have some argument(s) to configure them. They are typically
|
||||
created by the `GH_VM_ADD_FUNCTION` ioctl.
|
||||
created by the `GH_VM_ADD_FUNCTION`_ ioctl.
|
||||
|
||||
Functions typically will always do at least one of these operations:
|
||||
|
||||
1. Create resource ticket(s). Resource tickets allow a function to register
|
||||
itself as the client for a Gunyah resource (e.g. doorbell or vCPU) and
|
||||
the function is given the pointer to the `struct gh_resource` when the
|
||||
the function is given the pointer to the &struct gh_resource when the
|
||||
VM is starting.
|
||||
|
||||
2. Register IO handler(s). IO handlers allow a function to handle stage-2 faults
|
||||
@ -46,7 +46,7 @@ IOCTLs and userspace VMM flows
|
||||
|
||||
The kernel exposes a char device interface at /dev/gunyah.
|
||||
|
||||
To create a VM, use the GH_CREATE_VM ioctl. A successful call will return a
|
||||
To create a VM, use the `GH_CREATE_VM`_ ioctl. A successful call will return a
|
||||
"Gunyah VM" file descriptor.
|
||||
|
||||
/dev/gunyah API Descriptions
|
||||
@ -75,22 +75,13 @@ be configured to accept these at boot-up.
|
||||
|
||||
The guest physical address is used by Linux kernel to check that the requested
|
||||
user regions do not overlap and to help find the corresponding memory region
|
||||
for calls like GH_VM_SET_DTB_CONFIG. It must be page aligned.
|
||||
for calls like `GH_VM_SET_DTB_CONFIG`_. It must be page aligned.
|
||||
|
||||
memory_size and userspace_addr must be page-aligned.
|
||||
|
||||
The flags field of gh_userspace_memory_region accepts the following bits. All
|
||||
other bits must be 0 and are reserved for future use. The ioctl will return
|
||||
-EINVAL if an unsupported bit is detected.
|
||||
|
||||
- GH_MEM_ALLOW_READ/GH_MEM_ALLOW_WRITE/GH_MEM_ALLOW_EXEC sets read/write/exec
|
||||
permissions for the guest, respectively.
|
||||
|
||||
To add a memory region, call GH_VM_SET_USER_MEM_REGION with fields set as
|
||||
To add a memory region, call `GH_VM_SET_USER_MEM_REGION`_ with fields set as
|
||||
described above.
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/gunyah.h
|
||||
:identifiers: gh_userspace_memory_region
|
||||
:identifiers: gh_userspace_memory_region gh_mem_flags
|
||||
|
||||
GH_VM_SET_DTB_CONFIG
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
@ -111,20 +102,20 @@ GH_VM_ADD_FUNCTION
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This ioctl registers a Gunyah VM function with the VM manager. The VM function
|
||||
is described with a `type` string and some arguments for that type. Typically,
|
||||
the function is added before the VM starts, but the function doesn't "operate"
|
||||
until the VM starts with GH_VM_START: e.g. vCPU ioclts will all return an error
|
||||
until the VM starts because the vCPUs don't exist until the VM is started. This
|
||||
allows the VMM to set up all the kernel functionality needed for the VM *before*
|
||||
the VM starts.
|
||||
is described with a &struct gh_fn_desc.type and some arguments for that type.
|
||||
Typically, the function is added before the VM starts, but the function doesn't
|
||||
"operate" until the VM starts with `GH_VM_START`_. For example, vCPU ioclts will
|
||||
all return an error until the VM starts because the vCPUs don't exist until the
|
||||
VM is started. This allows the VMM to set up all the kernel functions needed for
|
||||
the VM *before* the VM starts.
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/gunyah.h
|
||||
:identifiers: gh_fn_desc
|
||||
:identifiers: gh_fn_desc gh_fn_type
|
||||
|
||||
The possible types are documented below:
|
||||
The argument types are documented below:
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/gunyah.h
|
||||
:identifiers: GH_FN_VCPU gh_fn_vcpu_arg GH_FN_IRQFD gh_fn_irqfd_arg GH_FN_IOEVENTFD gh_fn_ioeventfd_arg
|
||||
:identifiers: gh_fn_vcpu_arg gh_fn_irqfd_arg gh_irqfd_flags gh_fn_ioeventfd_arg gh_ioeventfd_flags
|
||||
|
||||
Gunyah VCPU API Descriptions
|
||||
----------------------------
|
||||
@ -137,15 +128,15 @@ GH_VCPU_RUN
|
||||
This ioctl is used to run a guest virtual cpu. While there are no
|
||||
explicit parameters, there is an implicit parameter block that can be
|
||||
obtained by mmap()ing the vcpu fd at offset 0, with the size given by
|
||||
GH_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct
|
||||
`GH_VCPU_MMAP_SIZE`_. The parameter block is formatted as a 'struct
|
||||
gh_vcpu_run' (see below).
|
||||
|
||||
GH_VCPU_MMAP_SIZE
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
The GH_VCPU_RUN ioctl communicates with userspace via a shared
|
||||
The `GH_VCPU_RUN`_ ioctl communicates with userspace via a shared
|
||||
memory region. This ioctl returns the size of that region. See the
|
||||
GH_VCPU_RUN documentation for details.
|
||||
`GH_VCPU_RUN`_ documentation for details.
|
||||
|
||||
.. kernel-doc:: include/uapi/linux/gunyah.h
|
||||
:identifiers: gh_vcpu_run gh_vm_exit_info
|
||||
:identifiers: gh_vcpu_exit gh_vcpu_run gh_vm_status gh_vm_exit_info
|
||||
|
@ -7790,6 +7790,11 @@ pointer_reference {
|
||||
kind: POINTER
|
||||
pointee_type_id: 0x6976b87f
|
||||
}
|
||||
pointer_reference {
|
||||
id: 0x10de2fab
|
||||
kind: POINTER
|
||||
pointee_type_id: 0x69385830
|
||||
}
|
||||
pointer_reference {
|
||||
id: 0x10e15e7e
|
||||
kind: POINTER
|
||||
@ -28460,6 +28465,11 @@ typedef {
|
||||
name: "bh_end_io_t"
|
||||
referred_type_id: 0x17592b81
|
||||
}
|
||||
typedef {
|
||||
id: 0x95ef30d6
|
||||
name: "binder_size_t"
|
||||
referred_type_id: 0xedf277ba
|
||||
}
|
||||
typedef {
|
||||
id: 0x86d95287
|
||||
name: "binder_uintptr_t"
|
||||
@ -51593,6 +51603,11 @@ member {
|
||||
type_id: 0xd359db99
|
||||
offset: 192
|
||||
}
|
||||
member {
|
||||
id: 0x9ad307bc
|
||||
name: "buf"
|
||||
type_id: 0xc8c766a0
|
||||
}
|
||||
member {
|
||||
id: 0x9aeff0bf
|
||||
name: "buf"
|
||||
@ -51795,6 +51810,11 @@ member {
|
||||
name: "buffer"
|
||||
type_id: 0x8e7b8b93
|
||||
}
|
||||
member {
|
||||
id: 0x3358d289
|
||||
name: "buffer"
|
||||
type_id: 0x86d95287
|
||||
}
|
||||
member {
|
||||
id: 0x335a61cc
|
||||
name: "buffer"
|
||||
@ -58866,6 +58886,12 @@ member {
|
||||
type_id: 0xd3c80119
|
||||
offset: 24256
|
||||
}
|
||||
member {
|
||||
id: 0x5406c379
|
||||
name: "code"
|
||||
type_id: 0xe62ebf07
|
||||
offset: 128
|
||||
}
|
||||
member {
|
||||
id: 0x5406c75b
|
||||
name: "code"
|
||||
@ -61532,6 +61558,12 @@ member {
|
||||
type_id: 0x86d95287
|
||||
offset: 768
|
||||
}
|
||||
member {
|
||||
id: 0x58cc89fb
|
||||
name: "cookie"
|
||||
type_id: 0x86d95287
|
||||
offset: 64
|
||||
}
|
||||
member {
|
||||
id: 0x14bf8247
|
||||
name: "cookie1"
|
||||
@ -66156,6 +66188,12 @@ member {
|
||||
type_id: 0x41fadac3
|
||||
offset: 64
|
||||
}
|
||||
member {
|
||||
id: 0xffd54088
|
||||
name: "data"
|
||||
type_id: 0x4765767f
|
||||
offset: 384
|
||||
}
|
||||
member {
|
||||
id: 0xffd88cf6
|
||||
name: "data"
|
||||
@ -66487,6 +66525,12 @@ member {
|
||||
type_id: 0x5d8155a5
|
||||
offset: 40
|
||||
}
|
||||
member {
|
||||
id: 0x569ccc07
|
||||
name: "data_size"
|
||||
type_id: 0x95ef30d6
|
||||
offset: 256
|
||||
}
|
||||
member {
|
||||
id: 0x56c02652
|
||||
name: "data_size"
|
||||
@ -92895,6 +92939,11 @@ member {
|
||||
type_id: 0xe62ebf07
|
||||
offset: 128
|
||||
}
|
||||
member {
|
||||
id: 0xb805bbb2
|
||||
name: "handle"
|
||||
type_id: 0xe62ebf07
|
||||
}
|
||||
member {
|
||||
id: 0xb80b9f8f
|
||||
name: "handle"
|
||||
@ -134069,12 +134118,24 @@ member {
|
||||
type_id: 0x8c43dc29
|
||||
offset: 320
|
||||
}
|
||||
member {
|
||||
id: 0x35690218
|
||||
name: "offsets"
|
||||
type_id: 0x86d95287
|
||||
offset: 64
|
||||
}
|
||||
member {
|
||||
id: 0x3572f05b
|
||||
name: "offsets"
|
||||
type_id: 0x9d2c4a95
|
||||
offset: 832
|
||||
}
|
||||
member {
|
||||
id: 0xaa221c83
|
||||
name: "offsets_size"
|
||||
type_id: 0x95ef30d6
|
||||
offset: 320
|
||||
}
|
||||
member {
|
||||
id: 0xaa43c86e
|
||||
name: "offsets_size"
|
||||
@ -149099,6 +149160,11 @@ member {
|
||||
type_id: 0x86d95287
|
||||
offset: 704
|
||||
}
|
||||
member {
|
||||
id: 0x46761387
|
||||
name: "ptr"
|
||||
type_id: 0x86d95287
|
||||
}
|
||||
member {
|
||||
id: 0x46c17f73
|
||||
name: "ptr"
|
||||
@ -149119,6 +149185,11 @@ member {
|
||||
name: "ptr"
|
||||
type_id: 0x2f5073a5
|
||||
}
|
||||
member {
|
||||
id: 0x46e47dff
|
||||
name: "ptr"
|
||||
type_id: 0x14b72a39
|
||||
}
|
||||
member {
|
||||
id: 0x46e877b0
|
||||
name: "ptr"
|
||||
@ -165827,6 +165898,18 @@ member {
|
||||
type_id: 0xe90b32b7
|
||||
offset: 928
|
||||
}
|
||||
member {
|
||||
id: 0x38f30d00
|
||||
name: "sender_euid"
|
||||
type_id: 0xba3f457a
|
||||
offset: 224
|
||||
}
|
||||
member {
|
||||
id: 0xd3bc24ef
|
||||
name: "sender_pid"
|
||||
type_id: 0x763389c7
|
||||
offset: 192
|
||||
}
|
||||
member {
|
||||
id: 0xc26d459e
|
||||
name: "sendmsg"
|
||||
@ -180727,6 +180810,11 @@ member {
|
||||
type_id: 0x6720d32f
|
||||
offset: 67008
|
||||
}
|
||||
member {
|
||||
id: 0xb3da3bc9
|
||||
name: "target"
|
||||
type_id: 0x78ea2ea9
|
||||
}
|
||||
member {
|
||||
id: 0x30229734
|
||||
name: "target_alloc"
|
||||
@ -201501,6 +201589,15 @@ struct_union {
|
||||
member_id: 0x9152ae19
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x14b72a39
|
||||
kind: STRUCT
|
||||
definition {
|
||||
bytesize: 16
|
||||
member_id: 0x3358d289
|
||||
member_id: 0x35690218
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x14e96bc2
|
||||
kind: STRUCT
|
||||
@ -204130,6 +204227,15 @@ struct_union {
|
||||
member_id: 0x39141955
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x4765767f
|
||||
kind: UNION
|
||||
definition {
|
||||
bytesize: 16
|
||||
member_id: 0x46e47dff
|
||||
member_id: 0x9ad307bc
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x47d8e06a
|
||||
kind: UNION
|
||||
@ -206809,6 +206915,15 @@ struct_union {
|
||||
member_id: 0x72454096
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x78ea2ea9
|
||||
kind: UNION
|
||||
definition {
|
||||
bytesize: 8
|
||||
member_id: 0xb805bbb2
|
||||
member_id: 0x46761387
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x791ba47c
|
||||
kind: UNION
|
||||
@ -209041,6 +209156,23 @@ struct_union {
|
||||
member_id: 0xed700768
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x69385830
|
||||
kind: STRUCT
|
||||
name: "binder_transaction_data"
|
||||
definition {
|
||||
bytesize: 64
|
||||
member_id: 0xb3da3bc9
|
||||
member_id: 0x58cc89fb
|
||||
member_id: 0x5406c379
|
||||
member_id: 0x2d8ea701
|
||||
member_id: 0xd3bc24ef
|
||||
member_id: 0x38f30d00
|
||||
member_id: 0x569ccc07
|
||||
member_id: 0xaa221c83
|
||||
member_id: 0xffd54088
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
id: 0x5fed90c9
|
||||
kind: STRUCT
|
||||
@ -285979,6 +286111,11 @@ function {
|
||||
parameter_id: 0x3f222c68
|
||||
parameter_id: 0x15b54c6f
|
||||
}
|
||||
function {
|
||||
id: 0x1f07d2c7
|
||||
return_type_id: 0x48b5725f
|
||||
parameter_id: 0x3e7e0d52
|
||||
}
|
||||
function {
|
||||
id: 0x1f0d7714
|
||||
return_type_id: 0x48b5725f
|
||||
@ -294940,6 +295077,11 @@ function {
|
||||
parameter_id: 0xf435685e
|
||||
parameter_id: 0x0efc9002
|
||||
}
|
||||
function {
|
||||
id: 0x921f607b
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x3e7e0d52
|
||||
}
|
||||
function {
|
||||
id: 0x9220b9bd
|
||||
return_type_id: 0x6720d32f
|
||||
@ -300127,6 +300269,14 @@ function {
|
||||
parameter_id: 0x6720d32f
|
||||
parameter_id: 0x2ec35650
|
||||
}
|
||||
function {
|
||||
id: 0x98731419
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0xf435685e
|
||||
parameter_id: 0x379d63b0
|
||||
parameter_id: 0x6720d32f
|
||||
}
|
||||
function {
|
||||
id: 0x987349b3
|
||||
return_type_id: 0x6720d32f
|
||||
@ -301988,6 +302138,14 @@ function {
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0x6720d32f
|
||||
}
|
||||
function {
|
||||
id: 0x9a340b23
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0x6720d32f
|
||||
parameter_id: 0x1d19a9d5
|
||||
parameter_id: 0x1d19a9d5
|
||||
}
|
||||
function {
|
||||
id: 0x9a343225
|
||||
return_type_id: 0x6720d32f
|
||||
@ -305570,6 +305728,15 @@ function {
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0x386883b9
|
||||
}
|
||||
function {
|
||||
id: 0x9bd6fb19
|
||||
return_type_id: 0x6720d32f
|
||||
parameter_id: 0x18bd6530
|
||||
parameter_id: 0x1f8dbf97
|
||||
parameter_id: 0x1f8dbf97
|
||||
parameter_id: 0x24373219
|
||||
parameter_id: 0x10de2fab
|
||||
}
|
||||
function {
|
||||
id: 0x9bd6ff14
|
||||
return_type_id: 0x6720d32f
|
||||
@ -313074,6 +313241,12 @@ function {
|
||||
return_type_id: 0x4585663f
|
||||
parameter_id: 0x31fa879c
|
||||
}
|
||||
function {
|
||||
id: 0xc2e99087
|
||||
return_type_id: 0x0ab9fa4c
|
||||
parameter_id: 0x0258f96e
|
||||
parameter_id: 0x4585663f
|
||||
}
|
||||
function {
|
||||
id: 0xc3320c3e
|
||||
return_type_id: 0x4585663f
|
||||
@ -318162,6 +318335,15 @@ elf_symbol {
|
||||
type_id: 0x9c203488
|
||||
full_name: "__page_mapcount"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xe595f8f9
|
||||
name: "__page_pinner_put_page"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x9c81e126
|
||||
type_id: 0x11388634
|
||||
full_name: "__page_pinner_put_page"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xdc9a73c7
|
||||
name: "__pagevec_release"
|
||||
@ -319422,6 +319604,15 @@ elf_symbol {
|
||||
type_id: 0x9bdbdcc4
|
||||
full_name: "__traceiter_android_rvh_prepare_prio_fork"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x13f466b7
|
||||
name: "__traceiter_android_rvh_refrigerator"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x811d8704
|
||||
type_id: 0x9a1a471c
|
||||
full_name: "__traceiter_android_rvh_refrigerator"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xe3e24295
|
||||
name: "__traceiter_android_rvh_replace_next_task_fair"
|
||||
@ -319935,6 +320126,15 @@ elf_symbol {
|
||||
type_id: 0x9b2d3bb4
|
||||
full_name: "__traceiter_android_vh_audio_usb_offload_connect"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x530ad17d
|
||||
name: "__traceiter_android_vh_binder_alloc_new_buf_locked"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x2a27381c
|
||||
type_id: 0x98731419
|
||||
full_name: "__traceiter_android_vh_binder_alloc_new_buf_locked"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xbebf7d98
|
||||
name: "__traceiter_android_vh_binder_free_proc"
|
||||
@ -320007,6 +320207,15 @@ elf_symbol {
|
||||
type_id: 0x9bd88151
|
||||
full_name: "__traceiter_android_vh_binder_read_done"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x2d244867
|
||||
name: "__traceiter_android_vh_binder_reply"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x0ec641ea
|
||||
type_id: 0x9bd6fb19
|
||||
full_name: "__traceiter_android_vh_binder_reply"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xc6c9353c
|
||||
name: "__traceiter_android_vh_binder_restore_priority"
|
||||
@ -320052,6 +320261,15 @@ elf_symbol {
|
||||
type_id: 0x9bd88151
|
||||
full_name: "__traceiter_android_vh_binder_thread_release"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xf6faffcb
|
||||
name: "__traceiter_android_vh_binder_trans"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x1570346e
|
||||
type_id: 0x9bd6fb19
|
||||
full_name: "__traceiter_android_vh_binder_trans"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x5cf60b10
|
||||
name: "__traceiter_android_vh_binder_transaction_init"
|
||||
@ -320232,6 +320450,15 @@ elf_symbol {
|
||||
type_id: 0x9a35263f
|
||||
full_name: "__traceiter_android_vh_do_futex"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x1cc3aec5
|
||||
name: "__traceiter_android_vh_do_send_sig_info"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xd6ea1719
|
||||
type_id: 0x9a340b23
|
||||
full_name: "__traceiter_android_vh_do_send_sig_info"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x9dbd7b92
|
||||
name: "__traceiter_android_vh_do_wake_up_sync"
|
||||
@ -321780,6 +322007,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_rvh_prepare_prio_fork"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x3b6248c1
|
||||
name: "__tracepoint_android_rvh_refrigerator"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0xaa020dd1
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_rvh_refrigerator"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x18bac297
|
||||
name: "__tracepoint_android_rvh_replace_next_task_fair"
|
||||
@ -322293,6 +322529,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_audio_usb_offload_connect"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xc8703937
|
||||
name: "__tracepoint_android_vh_binder_alloc_new_buf_locked"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x02c7faf0
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_binder_alloc_new_buf_locked"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xf32898c6
|
||||
name: "__tracepoint_android_vh_binder_free_proc"
|
||||
@ -322365,6 +322610,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_binder_read_done"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x6de9ac69
|
||||
name: "__tracepoint_android_vh_binder_reply"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x39132f3d
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_binder_reply"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x57a9a36a
|
||||
name: "__tracepoint_android_vh_binder_restore_priority"
|
||||
@ -322410,6 +322664,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_binder_thread_release"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xec7035fd
|
||||
name: "__tracepoint_android_vh_binder_trans"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x2bda2355
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_binder_trans"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xa9d55136
|
||||
name: "__tracepoint_android_vh_binder_transaction_init"
|
||||
@ -322590,6 +322853,15 @@ elf_symbol {
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_do_futex"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x82ce823f
|
||||
name: "__tracepoint_android_vh_do_send_sig_info"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x692a21ea
|
||||
type_id: 0x18ccbd2c
|
||||
full_name: "__tracepoint_android_vh_do_send_sig_info"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xe2d7542c
|
||||
name: "__tracepoint_android_vh_do_wake_up_sync"
|
||||
@ -332172,6 +332444,15 @@ elf_symbol {
|
||||
type_id: 0x9d27e8b1
|
||||
full_name: "devm_hwspin_lock_register"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xe896baa8
|
||||
name: "devm_hwspin_lock_request_specific"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x36d01cc2
|
||||
type_id: 0xc2e99087
|
||||
full_name: "devm_hwspin_lock_request_specific"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xa29138c1
|
||||
name: "devm_i2c_new_dummy_device"
|
||||
@ -339345,6 +339626,15 @@ elf_symbol {
|
||||
type_id: 0x8e47c273
|
||||
full_name: "freezer_active"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x3918f832
|
||||
name: "freezer_cgrp_subsys"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0x85c823b4
|
||||
type_id: 0x00571446
|
||||
full_name: "freezer_cgrp_subsys"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x238d82fe
|
||||
name: "freezing_slow_path"
|
||||
@ -340680,6 +340970,24 @@ elf_symbol {
|
||||
type_id: 0x977a5487
|
||||
full_name: "gh_rm_notifier_unregister"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x62906068
|
||||
name: "gh_rm_register_platform_ops"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0xa577ae43
|
||||
type_id: 0x921f607b
|
||||
full_name: "gh_rm_register_platform_ops"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x35ffaad3
|
||||
name: "gh_rm_unregister_platform_ops"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x5759f053
|
||||
type_id: 0x1f07d2c7
|
||||
full_name: "gh_rm_unregister_platform_ops"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x5a582da8
|
||||
name: "gic_nonsecure_priorities"
|
||||
@ -351561,6 +351869,15 @@ elf_symbol {
|
||||
type_id: 0x6a8ce717
|
||||
full_name: "page_mapping"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x44e50ff8
|
||||
name: "page_pinner_inited"
|
||||
is_defined: true
|
||||
symbol_type: OBJECT
|
||||
crc: 0xacfe4142
|
||||
type_id: 0x8e47c273
|
||||
full_name: "page_pinner_inited"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xec5c680b
|
||||
name: "page_pool_alloc_pages"
|
||||
@ -359220,6 +359537,15 @@ elf_symbol {
|
||||
type_id: 0x1c822746
|
||||
full_name: "sdhci_enable_clk"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x5ab300fb
|
||||
name: "sdhci_enable_v4_mode"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x80ef52da
|
||||
type_id: 0x1ec711b9
|
||||
full_name: "sdhci_enable_v4_mode"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0x33d40ef0
|
||||
name: "sdhci_execute_tuning"
|
||||
@ -359265,6 +359591,24 @@ elf_symbol {
|
||||
type_id: 0x1f5b92f5
|
||||
full_name: "sdhci_remove_host"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xdc85b8be
|
||||
name: "sdhci_request"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x4885a3af
|
||||
type_id: 0x1dbab156
|
||||
full_name: "sdhci_request"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xd1dc3f24
|
||||
name: "sdhci_request_atomic"
|
||||
is_defined: true
|
||||
symbol_type: FUNCTION
|
||||
crc: 0x30c50c36
|
||||
type_id: 0x90a203ea
|
||||
full_name: "sdhci_request_atomic"
|
||||
}
|
||||
elf_symbol {
|
||||
id: 0xa6a2da07
|
||||
name: "sdhci_reset"
|
||||
@ -373945,6 +374289,10 @@ symbols {
|
||||
key: "__page_mapcount"
|
||||
value: 0x8d43f7e0
|
||||
}
|
||||
symbol {
|
||||
key: "__page_pinner_put_page"
|
||||
value: 0xe595f8f9
|
||||
}
|
||||
symbol {
|
||||
key: "__pagevec_release"
|
||||
value: 0xdc9a73c7
|
||||
@ -374505,6 +374853,10 @@ symbols {
|
||||
key: "__traceiter_android_rvh_prepare_prio_fork"
|
||||
value: 0xeccbc3c1
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_rvh_refrigerator"
|
||||
value: 0x13f466b7
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_rvh_replace_next_task_fair"
|
||||
value: 0xe3e24295
|
||||
@ -374733,6 +375085,10 @@ symbols {
|
||||
key: "__traceiter_android_vh_audio_usb_offload_connect"
|
||||
value: 0x528da532
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_binder_alloc_new_buf_locked"
|
||||
value: 0x530ad17d
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_binder_free_proc"
|
||||
value: 0xbebf7d98
|
||||
@ -374765,6 +375121,10 @@ symbols {
|
||||
key: "__traceiter_android_vh_binder_read_done"
|
||||
value: 0x5c1ee0c5
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_binder_reply"
|
||||
value: 0x2d244867
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_binder_restore_priority"
|
||||
value: 0xc6c9353c
|
||||
@ -374785,6 +375145,10 @@ symbols {
|
||||
key: "__traceiter_android_vh_binder_thread_release"
|
||||
value: 0x25f13dbe
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_binder_trans"
|
||||
value: 0xf6faffcb
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_binder_transaction_init"
|
||||
value: 0x5cf60b10
|
||||
@ -374865,6 +375229,10 @@ symbols {
|
||||
key: "__traceiter_android_vh_do_futex"
|
||||
value: 0xd593b3ef
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_do_send_sig_info"
|
||||
value: 0x1cc3aec5
|
||||
}
|
||||
symbol {
|
||||
key: "__traceiter_android_vh_do_wake_up_sync"
|
||||
value: 0x9dbd7b92
|
||||
@ -375553,6 +375921,10 @@ symbols {
|
||||
key: "__tracepoint_android_rvh_prepare_prio_fork"
|
||||
value: 0x50605d97
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_rvh_refrigerator"
|
||||
value: 0x3b6248c1
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_rvh_replace_next_task_fair"
|
||||
value: 0x18bac297
|
||||
@ -375781,6 +376153,10 @@ symbols {
|
||||
key: "__tracepoint_android_vh_audio_usb_offload_connect"
|
||||
value: 0xfb7cdd24
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_binder_alloc_new_buf_locked"
|
||||
value: 0xc8703937
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_binder_free_proc"
|
||||
value: 0xf32898c6
|
||||
@ -375813,6 +376189,10 @@ symbols {
|
||||
key: "__tracepoint_android_vh_binder_read_done"
|
||||
value: 0x54aac8cb
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_binder_reply"
|
||||
value: 0x6de9ac69
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_binder_restore_priority"
|
||||
value: 0x57a9a36a
|
||||
@ -375833,6 +376213,10 @@ symbols {
|
||||
key: "__tracepoint_android_vh_binder_thread_release"
|
||||
value: 0x2fce8f78
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_binder_trans"
|
||||
value: 0xec7035fd
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_binder_transaction_init"
|
||||
value: 0xa9d55136
|
||||
@ -375913,6 +376297,10 @@ symbols {
|
||||
key: "__tracepoint_android_vh_do_futex"
|
||||
value: 0x9fe99d05
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_do_send_sig_info"
|
||||
value: 0x82ce823f
|
||||
}
|
||||
symbol {
|
||||
key: "__tracepoint_android_vh_do_wake_up_sync"
|
||||
value: 0xe2d7542c
|
||||
@ -380177,6 +380565,10 @@ symbols {
|
||||
key: "devm_hwspin_lock_register"
|
||||
value: 0x40c3a63b
|
||||
}
|
||||
symbol {
|
||||
key: "devm_hwspin_lock_request_specific"
|
||||
value: 0xe896baa8
|
||||
}
|
||||
symbol {
|
||||
key: "devm_i2c_new_dummy_device"
|
||||
value: 0xa29138c1
|
||||
@ -383357,6 +383749,10 @@ symbols {
|
||||
key: "freezer_active"
|
||||
value: 0x2ed5588c
|
||||
}
|
||||
symbol {
|
||||
key: "freezer_cgrp_subsys"
|
||||
value: 0x3918f832
|
||||
}
|
||||
symbol {
|
||||
key: "freezing_slow_path"
|
||||
value: 0x238d82fe
|
||||
@ -383949,6 +384345,14 @@ symbols {
|
||||
key: "gh_rm_notifier_unregister"
|
||||
value: 0x3049a5e5
|
||||
}
|
||||
symbol {
|
||||
key: "gh_rm_register_platform_ops"
|
||||
value: 0x62906068
|
||||
}
|
||||
symbol {
|
||||
key: "gh_rm_unregister_platform_ops"
|
||||
value: 0x35ffaad3
|
||||
}
|
||||
symbol {
|
||||
key: "gic_nonsecure_priorities"
|
||||
value: 0x5a582da8
|
||||
@ -388785,6 +389189,10 @@ symbols {
|
||||
key: "page_mapping"
|
||||
value: 0x4f3e5356
|
||||
}
|
||||
symbol {
|
||||
key: "page_pinner_inited"
|
||||
value: 0x44e50ff8
|
||||
}
|
||||
symbol {
|
||||
key: "page_pool_alloc_pages"
|
||||
value: 0xec5c680b
|
||||
@ -392189,6 +392597,10 @@ symbols {
|
||||
key: "sdhci_enable_clk"
|
||||
value: 0x6febaf59
|
||||
}
|
||||
symbol {
|
||||
key: "sdhci_enable_v4_mode"
|
||||
value: 0x5ab300fb
|
||||
}
|
||||
symbol {
|
||||
key: "sdhci_execute_tuning"
|
||||
value: 0x33d40ef0
|
||||
@ -392209,6 +392621,14 @@ symbols {
|
||||
key: "sdhci_remove_host"
|
||||
value: 0xad3b5931
|
||||
}
|
||||
symbol {
|
||||
key: "sdhci_request"
|
||||
value: 0xdc85b8be
|
||||
}
|
||||
symbol {
|
||||
key: "sdhci_request_atomic"
|
||||
value: 0xd1dc3f24
|
||||
}
|
||||
symbol {
|
||||
key: "sdhci_reset"
|
||||
value: 0xa6a2da07
|
||||
|
@ -1324,6 +1324,8 @@
|
||||
page_frag_alloc_align
|
||||
__page_frag_cache_drain
|
||||
page_frag_free
|
||||
page_pinner_inited
|
||||
__page_pinner_put_page
|
||||
panic
|
||||
panic_notifier_list
|
||||
param_array_ops
|
||||
|
@ -1225,6 +1225,8 @@
|
||||
gh_rm_call
|
||||
gh_rm_notifier_register
|
||||
gh_rm_notifier_unregister
|
||||
gh_rm_register_platform_ops
|
||||
gh_rm_unregister_platform_ops
|
||||
gic_nonsecure_priorities
|
||||
gov_attr_set_init
|
||||
gov_attr_set_put
|
||||
|
289
android/abi_gki_aarch64_unisoc
Normal file
289
android/abi_gki_aarch64_unisoc
Normal file
@ -0,0 +1,289 @@
|
||||
[abi_symbol_list]
|
||||
# commonly used symbols
|
||||
alt_cb_patch_nops
|
||||
arm64_use_ng_mappings
|
||||
clk_disable
|
||||
clk_enable
|
||||
clk_get_rate
|
||||
clk_prepare
|
||||
clk_unprepare
|
||||
__const_udelay
|
||||
debugfs_create_dir
|
||||
debugfs_create_file
|
||||
debugfs_remove
|
||||
_dev_err
|
||||
dev_err_probe
|
||||
dev_get_regmap
|
||||
_dev_info
|
||||
devm_clk_get
|
||||
devm_gpiochip_add_data_with_key
|
||||
devm_ioremap_resource
|
||||
devm_kmalloc
|
||||
devm_platform_ioremap_resource
|
||||
devm_regulator_register
|
||||
devm_request_threaded_irq
|
||||
devm_spi_register_controller
|
||||
_dev_warn
|
||||
gpiochip_disable_irq
|
||||
gpiochip_enable_irq
|
||||
gpiochip_get_data
|
||||
gpiochip_irq_relres
|
||||
gpiochip_irq_reqres
|
||||
handle_bad_irq
|
||||
handle_edge_irq
|
||||
handle_level_irq
|
||||
irq_get_irq_data
|
||||
__irq_resolve_mapping
|
||||
kfree
|
||||
__kmalloc
|
||||
kmalloc_caches
|
||||
kmalloc_trace
|
||||
ktime_get
|
||||
ktime_get_mono_fast_ns
|
||||
__list_add_valid
|
||||
__list_del_entry_valid
|
||||
log_post_read_mmio
|
||||
log_post_write_mmio
|
||||
log_read_mmio
|
||||
log_write_mmio
|
||||
memcpy
|
||||
memstart_addr
|
||||
module_layout
|
||||
__mutex_init
|
||||
mutex_lock
|
||||
mutex_unlock
|
||||
of_alias_get_id
|
||||
of_device_get_match_data
|
||||
of_property_read_variable_u32_array
|
||||
__platform_driver_register
|
||||
platform_driver_unregister
|
||||
platform_get_irq
|
||||
platform_get_resource
|
||||
__pm_runtime_disable
|
||||
pm_runtime_enable
|
||||
pm_runtime_set_autosuspend_delay
|
||||
__pm_runtime_set_status
|
||||
__pm_runtime_suspend
|
||||
__pm_runtime_use_autosuspend
|
||||
_printk
|
||||
put_device
|
||||
__put_task_struct
|
||||
_raw_spin_lock_irqsave
|
||||
_raw_spin_unlock_irqrestore
|
||||
regmap_read
|
||||
regmap_update_bits_base
|
||||
regmap_write
|
||||
regulator_disable_regmap
|
||||
regulator_enable_regmap
|
||||
regulator_get_voltage_sel_regmap
|
||||
regulator_is_enabled_regmap
|
||||
regulator_list_voltage_linear
|
||||
regulator_set_voltage_sel_regmap
|
||||
seq_lseek
|
||||
seq_printf
|
||||
seq_puts
|
||||
seq_read
|
||||
sg_next
|
||||
single_open
|
||||
single_release
|
||||
__spi_alloc_controller
|
||||
__stack_chk_fail
|
||||
strcmp
|
||||
usleep_range_state
|
||||
|
||||
# required by clk-sprd.ko
|
||||
clk_hw_get_num_parents
|
||||
clk_hw_get_parent
|
||||
clk_hw_is_enabled
|
||||
__clk_mux_determine_rate
|
||||
device_node_to_regmap
|
||||
devm_clk_hw_register
|
||||
devm_of_clk_add_hw_provider
|
||||
__devm_regmap_init_mmio_clk
|
||||
divider_get_val
|
||||
divider_recalc_rate
|
||||
divider_round_rate_parent
|
||||
of_clk_hw_onecell_get
|
||||
of_device_is_compatible
|
||||
of_find_property
|
||||
of_get_parent
|
||||
syscon_regmap_lookup_by_phandle
|
||||
__udelay
|
||||
|
||||
# required by gpio-eic-sprd.ko
|
||||
generic_handle_irq
|
||||
gpiochip_find
|
||||
|
||||
# required by gpio-pmic-eic-sprd.ko
|
||||
_find_next_bit
|
||||
handle_nested_irq
|
||||
|
||||
# required by gpio-sprd.ko
|
||||
generic_handle_domain_irq
|
||||
__platform_driver_probe
|
||||
|
||||
# required by mmc_hsq.ko
|
||||
finish_wait
|
||||
init_wait_entry
|
||||
__init_waitqueue_head
|
||||
mmc_cqe_request_done
|
||||
prepare_to_wait_event
|
||||
queue_work_on
|
||||
_raw_spin_lock_irq
|
||||
_raw_spin_unlock_irq
|
||||
schedule
|
||||
schedule_timeout
|
||||
system_wq
|
||||
__wake_up
|
||||
|
||||
# required by pwm-sprd.ko
|
||||
clk_bulk_disable
|
||||
clk_bulk_enable
|
||||
clk_bulk_prepare
|
||||
clk_bulk_unprepare
|
||||
devm_clk_bulk_get
|
||||
of_property_read_string_helper
|
||||
pwmchip_add
|
||||
pwmchip_remove
|
||||
|
||||
# required by sc2730-regulator.ko
|
||||
generic_file_llseek
|
||||
regulator_map_voltage_linear
|
||||
simple_attr_open
|
||||
simple_attr_read
|
||||
simple_attr_release
|
||||
simple_attr_write
|
||||
|
||||
# required by sdhci-sprd.ko
|
||||
clk_round_rate
|
||||
devm_pinctrl_get
|
||||
mmc_of_parse
|
||||
mmc_regulator_set_vqmmc
|
||||
mmc_request_done
|
||||
pinctrl_lookup_state
|
||||
pinctrl_select_state
|
||||
pm_runtime_force_resume
|
||||
pm_runtime_force_suspend
|
||||
__sdhci_add_host
|
||||
sdhci_cleanup_host
|
||||
sdhci_enable_clk
|
||||
sdhci_enable_v4_mode
|
||||
sdhci_pltfm_free
|
||||
sdhci_pltfm_init
|
||||
sdhci_remove_host
|
||||
sdhci_request
|
||||
sdhci_request_atomic
|
||||
sdhci_reset
|
||||
sdhci_runtime_resume_host
|
||||
sdhci_runtime_suspend_host
|
||||
sdhci_set_bus_width
|
||||
sdhci_setup_host
|
||||
|
||||
# required by spi-sprd-adi.ko
|
||||
_dev_emerg
|
||||
devm_hwspin_lock_request_specific
|
||||
__hwspin_lock_timeout
|
||||
__hwspin_unlock
|
||||
of_get_next_child
|
||||
of_get_property
|
||||
of_hwspin_lock_get_id
|
||||
register_restart_handler
|
||||
strncmp
|
||||
unregister_restart_handler
|
||||
|
||||
# required by spi-sprd.ko
|
||||
clk_set_parent
|
||||
complete
|
||||
dma_release_channel
|
||||
dma_request_chan
|
||||
__init_swait_queue_head
|
||||
__pm_runtime_resume
|
||||
spi_controller_suspend
|
||||
spi_finalize_current_transfer
|
||||
wait_for_completion
|
||||
|
||||
# required by sprd-sc27xx-spi.ko
|
||||
device_set_wakeup_capable
|
||||
device_wakeup_enable
|
||||
devm_of_platform_populate
|
||||
devm_regmap_add_irq_chip
|
||||
__devm_regmap_init
|
||||
driver_unregister
|
||||
irq_set_irq_wake
|
||||
__spi_register_driver
|
||||
spi_sync
|
||||
|
||||
# required by sprd_hwspinlock.ko
|
||||
devm_add_action
|
||||
devm_hwspin_lock_register
|
||||
|
||||
# required by sprd_power_manager.ko
|
||||
del_timer
|
||||
fortify_panic
|
||||
init_timer_key
|
||||
jiffies
|
||||
jiffies_to_msecs
|
||||
mod_timer
|
||||
__msecs_to_jiffies
|
||||
__pm_relax
|
||||
__pm_stay_awake
|
||||
register_pm_notifier
|
||||
register_reboot_notifier
|
||||
snprintf
|
||||
strnlen
|
||||
unregister_pm_notifier
|
||||
unregister_reboot_notifier
|
||||
wakeup_source_add
|
||||
wakeup_source_create
|
||||
|
||||
# required by system_heap.ko
|
||||
__alloc_pages
|
||||
dma_buf_export
|
||||
dma_heap_add
|
||||
dma_heap_get_dev
|
||||
dma_heap_get_name
|
||||
dma_map_sgtable
|
||||
dma_set_coherent_mask
|
||||
dma_set_mask
|
||||
dma_sync_sg_for_cpu
|
||||
dma_sync_sg_for_device
|
||||
dma_unmap_sg_attrs
|
||||
__free_pages
|
||||
remap_pfn_range
|
||||
sg_alloc_table
|
||||
sg_free_table
|
||||
__sg_page_iter_next
|
||||
__sg_page_iter_start
|
||||
vfree
|
||||
vmalloc
|
||||
vmap
|
||||
vunmap
|
||||
|
||||
# required by ums512-clk.ko
|
||||
clk_fixed_factor_ops
|
||||
device_get_match_data
|
||||
devm_reset_controller_register
|
||||
|
||||
# required by unisoc-iommu.ko
|
||||
blocking_notifier_call_chain
|
||||
blocking_notifier_chain_register
|
||||
dma_alloc_attrs
|
||||
dma_free_attrs
|
||||
gen_pool_add_owner
|
||||
gen_pool_alloc_algo_owner
|
||||
gen_pool_avail
|
||||
gen_pool_create
|
||||
gen_pool_destroy
|
||||
gen_pool_free_owner
|
||||
__get_free_pages
|
||||
ioremap_prot
|
||||
iounmap
|
||||
kimage_voffset
|
||||
kmalloc_large
|
||||
memset32
|
||||
memset
|
||||
of_address_to_resource
|
||||
of_count_phandle_with_args
|
||||
of_match_node
|
||||
__of_parse_phandle_with_args
|
||||
of_property_read_string
|
@ -299,6 +299,8 @@
|
||||
page_frag_alloc_align
|
||||
__page_frag_cache_drain
|
||||
page_frag_free
|
||||
page_pinner_inited
|
||||
__page_pinner_put_page
|
||||
param_ops_bool
|
||||
param_ops_charp
|
||||
param_ops_int
|
||||
|
@ -167,6 +167,11 @@
|
||||
__tracepoint_android_rvh_dequeue_task_fair
|
||||
__tracepoint_android_rvh_entity_tick
|
||||
|
||||
#required by millet.ko
|
||||
__traceiter_android_rvh_refrigerator
|
||||
__tracepoint_android_rvh_refrigerator
|
||||
freezer_cgrp_subsys
|
||||
|
||||
#required by metis.ko module
|
||||
__traceiter_android_vh_rwsem_read_wait_start
|
||||
__traceiter_android_vh_rwsem_write_wait_start
|
||||
@ -185,3 +190,17 @@
|
||||
__tracepoint_android_rvh_set_cpus_allowed_comm
|
||||
__tracepoint_android_rvh_dequeue_task
|
||||
cpuset_cpus_allowed
|
||||
|
||||
#required by millet.ko
|
||||
__traceiter_android_vh_binder_wait_for_work
|
||||
__traceiter_android_vh_do_send_sig_info
|
||||
__traceiter_android_vh_binder_preset
|
||||
__traceiter_android_vh_binder_trans
|
||||
__traceiter_android_vh_binder_reply
|
||||
__traceiter_android_vh_binder_alloc_new_buf_locked
|
||||
__tracepoint_android_vh_binder_wait_for_work
|
||||
__tracepoint_android_vh_do_send_sig_info
|
||||
__tracepoint_android_vh_binder_preset
|
||||
__tracepoint_android_vh_binder_trans
|
||||
__tracepoint_android_vh_binder_reply
|
||||
__tracepoint_android_vh_binder_alloc_new_buf_locked
|
||||
|
@ -538,6 +538,7 @@ CONFIG_DMABUF_HEAPS_PAGE_POOL=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_VIRT_DRIVERS=y
|
||||
CONFIG_GUNYAH=y
|
||||
# CONFIG_GUNYAH_QCOM_PLATFORM is not set
|
||||
CONFIG_GUNYAH_VCPU=y
|
||||
CONFIG_GUNYAH_IRQFD=y
|
||||
CONFIG_GUNYAH_IOEVENTFD=y
|
||||
@ -700,6 +701,7 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y
|
||||
# CONFIG_UBSAN_BOOL is not set
|
||||
# CONFIG_UBSAN_ENUM is not set
|
||||
CONFIG_PAGE_OWNER=y
|
||||
CONFIG_PAGE_PINNER=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
CONFIG_KASAN=y
|
||||
|
@ -8,18 +8,14 @@
|
||||
#include <linux/gunyah.h>
|
||||
#include <linux/uuid.h>
|
||||
|
||||
static const uuid_t gh_known_uuids[] = {
|
||||
/* Qualcomm's version of Gunyah {19bd54bd-0b37-571b-946f-609b54539de6} */
|
||||
UUID_INIT(0x19bd54bd, 0x0b37, 0x571b, 0x94, 0x6f, 0x60, 0x9b, 0x54, 0x53, 0x9d, 0xe6),
|
||||
/* Standard version of Gunyah {c1d58fcd-a453-5fdb-9265-ce36673d5f14} */
|
||||
UUID_INIT(0xc1d58fcd, 0xa453, 0x5fdb, 0x92, 0x65, 0xce, 0x36, 0x67, 0x3d, 0x5f, 0x14),
|
||||
};
|
||||
/* {c1d58fcd-a453-5fdb-9265-ce36673d5f14} */
|
||||
static const uuid_t GUNYAH_UUID =
|
||||
UUID_INIT(0xc1d58fcd, 0xa453, 0x5fdb, 0x92, 0x65, 0xce, 0x36, 0x67, 0x3d, 0x5f, 0x14);
|
||||
|
||||
bool arch_is_gh_guest(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
uuid_t uuid;
|
||||
int i;
|
||||
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res);
|
||||
|
||||
@ -28,11 +24,7 @@ bool arch_is_gh_guest(void)
|
||||
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
|
||||
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(gh_known_uuids); i++)
|
||||
if (uuid_equal(&uuid, &gh_known_uuids[i]))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return uuid_equal(&uuid, &GUNYAH_UUID);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_is_gh_guest);
|
||||
|
||||
@ -71,7 +63,7 @@ enum gh_error gh_hypercall_bell_send(u64 capid, u64 new_flags, u64 *old_flags)
|
||||
|
||||
arm_smccc_1_1_hvc(GH_HYPERCALL_BELL_SEND, capid, new_flags, 0, &res);
|
||||
|
||||
if (res.a0 == GH_ERROR_OK)
|
||||
if (res.a0 == GH_ERROR_OK && old_flags)
|
||||
*old_flags = res.a1;
|
||||
|
||||
return res.a0;
|
||||
@ -88,7 +80,7 @@ enum gh_error gh_hypercall_bell_set_mask(u64 capid, u64 enable_mask, u64 ack_mas
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_hypercall_bell_set_mask);
|
||||
|
||||
enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, int tx_flags, bool *ready)
|
||||
enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, u64 tx_flags, bool *ready)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
@ -134,7 +126,7 @@ enum gh_error gh_hypercall_vcpu_run(u64 capid, u64 *resume_data,
|
||||
arm_smccc_1_2_hvc(&args, &res);
|
||||
|
||||
if (res.a0 == GH_ERROR_OK) {
|
||||
resp->state = res.a1;
|
||||
resp->sized_state = res.a1;
|
||||
resp->state_data[0] = res.a2;
|
||||
resp->state_data[1] = res.a3;
|
||||
resp->state_data[2] = res.a4;
|
||||
|
@ -1,16 +1,17 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
#ifndef __ASM_GUNYAH_H_
|
||||
#define __ASM_GUNYAH_H_
|
||||
#ifndef _ASM_GUNYAH_H
|
||||
#define _ASM_GUNYAH_H
|
||||
|
||||
#include <linux/irq.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
|
||||
static inline int arch_gh_fill_irq_fwspec_params(u32 virq, struct irq_fwspec *fwspec)
|
||||
{
|
||||
if (virq < 32 || virq > 1019)
|
||||
/* Assume that Gunyah gave us an SPI; defensively check it */
|
||||
if (WARN_ON(virq < 32 || virq > 1019))
|
||||
return -EINVAL;
|
||||
|
||||
fwspec->param_count = 3;
|
||||
|
@ -15,6 +15,8 @@
|
||||
#define EMITS_PT_NOTE
|
||||
#endif
|
||||
|
||||
#define RUNTIME_DISCARD_EXIT
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
#undef mips
|
||||
|
@ -74,9 +74,7 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS)
|
||||
CFLAGS_REMOVE_ctype.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_ctype.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
AFLAGS_REMOVE_entry.o += -Wa,-gdwarf-2
|
||||
AFLAGS_REMOVE_memcpy.o += -Wa,-gdwarf-2
|
||||
AFLAGS_REMOVE_memset.o += -Wa,-gdwarf-2
|
||||
asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
@ -69,8 +69,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
|
||||
CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
|
||||
CFLAGS_string.o += $(PURGATORY_CFLAGS)
|
||||
|
||||
AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2
|
||||
AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2
|
||||
asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x))
|
||||
|
||||
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
@ -3187,6 +3187,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
target_proc = target_thread->proc;
|
||||
target_proc->tmp_ref++;
|
||||
binder_inner_proc_unlock(target_thread->proc);
|
||||
trace_android_vh_binder_reply(target_proc, proc, thread, tr);
|
||||
} else {
|
||||
if (tr->target.handle) {
|
||||
struct binder_ref *ref;
|
||||
@ -3249,6 +3250,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return_error_line = __LINE__;
|
||||
goto err_invalid_target_handle;
|
||||
}
|
||||
trace_android_vh_binder_trans(target_proc, proc, thread, tr);
|
||||
if (security_binder_transaction(proc->cred,
|
||||
target_proc->cred) < 0) {
|
||||
binder_txn_error("%d:%d transaction credentials failed\n",
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <linux/sizes.h>
|
||||
#include "binder_alloc.h"
|
||||
#include "binder_trace.h"
|
||||
#include <trace/hooks/binder.h>
|
||||
|
||||
struct list_lru binder_alloc_lru;
|
||||
|
||||
@ -406,6 +407,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
||||
alloc->pid, extra_buffers_size);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async);
|
||||
if (is_async &&
|
||||
alloc->free_async_space < size + sizeof(struct binder_buffer)) {
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
|
@ -61,11 +61,13 @@
|
||||
#include <trace/hooks/audio_usboffload.h>
|
||||
#include <trace/hooks/typec.h>
|
||||
#include <trace/hooks/user.h>
|
||||
#include <trace/hooks/signal.h>
|
||||
|
||||
/*
|
||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||
* associated with them) to allow external modules to probe them.
|
||||
*/
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_refrigerator);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init);
|
||||
@ -73,6 +75,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_priority_skip);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_set_priority);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start);
|
||||
@ -230,3 +233,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_alloc_new_buf_locked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans);
|
||||
|
@ -898,7 +898,7 @@ static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
|
||||
* Return negative errno on failure or 0 on success with @srcvm updated.
|
||||
*/
|
||||
int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
||||
unsigned int *srcvm,
|
||||
u64 *srcvm,
|
||||
const struct qcom_scm_vmperm *newvm,
|
||||
unsigned int dest_cnt)
|
||||
{
|
||||
@ -915,9 +915,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
||||
__le32 *src;
|
||||
void *ptr;
|
||||
int ret, i, b;
|
||||
unsigned long srcvm_bits = *srcvm;
|
||||
u64 srcvm_bits = *srcvm;
|
||||
|
||||
src_sz = hweight_long(srcvm_bits) * sizeof(*src);
|
||||
src_sz = hweight64(srcvm_bits) * sizeof(*src);
|
||||
mem_to_map_sz = sizeof(*mem_to_map);
|
||||
dest_sz = dest_cnt * sizeof(*destvm);
|
||||
ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
|
||||
@ -930,8 +930,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
||||
/* Fill source vmid detail */
|
||||
src = ptr;
|
||||
i = 0;
|
||||
for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
|
||||
src[i++] = cpu_to_le32(b);
|
||||
for (b = 0; b < BITS_PER_TYPE(u64); b++) {
|
||||
if (srcvm_bits & BIT(b))
|
||||
src[i++] = cpu_to_le32(b);
|
||||
}
|
||||
|
||||
/* Fill details of mem buff to map */
|
||||
mem_to_map = ptr + ALIGN(src_sz, SZ_64);
|
||||
|
@ -360,7 +360,6 @@ fpga_bridge_register(struct device *parent, const char *name,
|
||||
bridge->dev.parent = parent;
|
||||
bridge->dev.of_node = parent->of_node;
|
||||
bridge->dev.id = id;
|
||||
of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
|
||||
|
||||
ret = dev_set_name(&bridge->dev, "br%d", id);
|
||||
if (ret)
|
||||
@ -372,6 +371,8 @@ fpga_bridge_register(struct device *parent, const char *name,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
|
||||
|
||||
return bridge;
|
||||
|
||||
error_device:
|
||||
|
@ -653,6 +653,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
|
||||
if (!src->enabled_types || !src->funcs->set)
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(!amdgpu_irq_enabled(adev, src, type)))
|
||||
return -EINVAL;
|
||||
|
||||
if (atomic_dec_and_test(&src->enabled_types[type]))
|
||||
return amdgpu_irq_update(adev, src, type);
|
||||
|
||||
|
@ -167,10 +167,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
|
||||
if (amdgpu_in_reset(adev)) {
|
||||
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
|
||||
/* During gpu-reset we disable and then enable vblank irq, so
|
||||
* don't use amdgpu_irq_get/put() to avoid refcount change.
|
||||
*/
|
||||
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
|
||||
rc = -EBUSY;
|
||||
} else {
|
||||
rc = (enable)
|
||||
? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id)
|
||||
: amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id);
|
||||
}
|
||||
|
||||
if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
|
||||
return -EBUSY;
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
skip:
|
||||
if (amdgpu_in_reset(adev))
|
||||
|
@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = {
|
||||
.maximum_dsc_bits_per_component = 10,
|
||||
.dsc422_native_support = false,
|
||||
.is_line_buffer_bpp_fixed = true,
|
||||
.line_buffer_fixed_bpp = 49,
|
||||
.line_buffer_fixed_bpp = 48,
|
||||
.line_buffer_size_bits = 789504,
|
||||
.max_line_buffer_lines = 12,
|
||||
.writeback_interface_buffer_size_kbytes = 90,
|
||||
|
@ -165,7 +165,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
|
||||
DP_AUX_CH_CTL_TIME_OUT_MAX |
|
||||
DP_AUX_CH_CTL_RECEIVE_ERROR |
|
||||
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
|
||||
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
|
||||
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) |
|
||||
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
|
||||
|
||||
if (intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
|
@ -51,7 +51,7 @@ int i915_user_extensions(struct i915_user_extension __user *ext,
|
||||
return err;
|
||||
|
||||
if (get_user(next, &ext->next_extension) ||
|
||||
overflows_type(next, ext))
|
||||
overflows_type(next, uintptr_t))
|
||||
return -EFAULT;
|
||||
|
||||
ext = u64_to_user_ptr(next);
|
||||
|
@ -111,10 +111,6 @@ bool i915_error_injected(void);
|
||||
#define range_overflows_end_t(type, start, size, max) \
|
||||
range_overflows_end((type)(start), (type)(size), (type)(max))
|
||||
|
||||
/* Note we don't consider signbits :| */
|
||||
#define overflows_type(x, T) \
|
||||
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
|
||||
|
||||
#define ptr_mask_bits(ptr, n) ({ \
|
||||
unsigned long __v = (unsigned long)(ptr); \
|
||||
(typeof(ptr))(__v & -BIT(n)); \
|
||||
|
@ -840,6 +840,8 @@ static void vop2_enable(struct vop2 *vop2)
|
||||
return;
|
||||
}
|
||||
|
||||
regcache_sync(vop2->map);
|
||||
|
||||
if (vop2->data->soc_id == 3566)
|
||||
vop2_writel(vop2, RK3568_OTP_WIN_EN, 1);
|
||||
|
||||
@ -868,6 +870,8 @@ static void vop2_disable(struct vop2 *vop2)
|
||||
|
||||
pm_runtime_put_sync(vop2->dev);
|
||||
|
||||
regcache_mark_dirty(vop2->map);
|
||||
|
||||
clk_disable_unprepare(vop2->aclk);
|
||||
clk_disable_unprepare(vop2->hclk);
|
||||
}
|
||||
|
@ -1409,7 +1409,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio,
|
||||
trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name,
|
||||
iio_device_id(indio), trigger_name);
|
||||
if (!trig)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
trig->dev.parent = indio->dev.parent;
|
||||
iio_trigger_set_drvdata(trig, indio);
|
||||
|
@ -296,6 +296,12 @@ static int pegasus_probe(struct usb_interface *intf,
|
||||
pegasus->intf = intf;
|
||||
|
||||
pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress);
|
||||
/* Sanity check that pipe's type matches endpoint's type */
|
||||
if (usb_pipe_type_check(dev, pipe)) {
|
||||
error = -EINVAL;
|
||||
goto err_free_mem;
|
||||
}
|
||||
|
||||
pegasus->data_len = usb_maxpacket(dev, pipe);
|
||||
|
||||
pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL,
|
||||
|
@ -30,7 +30,8 @@ static irqreturn_t gh_msgq_rx_irq_handler(int irq, void *data)
|
||||
dev_warn(msgq->mbox.dev, "Failed to receive data: %d\n", gh_error);
|
||||
break;
|
||||
}
|
||||
mbox_chan_received_data(gh_msgq_chan(msgq), &rx_data);
|
||||
if (likely(gh_msgq_chan(msgq)->cl))
|
||||
mbox_chan_received_data(gh_msgq_chan(msgq), &rx_data);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -62,6 +63,9 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
|
||||
enum gh_error gh_error;
|
||||
bool ready;
|
||||
|
||||
if (!msgq->tx_ghrsc)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (msgq_data->push)
|
||||
tx_flags |= GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH;
|
||||
|
||||
@ -80,7 +84,7 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data)
|
||||
* framework, then no other messages can be sent and nobody will know
|
||||
* to retry this message.
|
||||
*/
|
||||
msgq->last_ret = gh_remap_error(gh_error);
|
||||
msgq->last_ret = gh_error_remap(gh_error);
|
||||
|
||||
/**
|
||||
* This message was successfully sent, but message queue isn't ready to
|
||||
@ -112,7 +116,7 @@ static struct mbox_chan_ops gh_msgq_ops = {
|
||||
|
||||
/**
|
||||
* gh_msgq_init() - Initialize a Gunyah message queue with an mbox_client
|
||||
* @parent: optional, device parent used for the mailbox controller
|
||||
* @parent: device parent used for the mailbox controller
|
||||
* @msgq: Pointer to the gh_msgq to initialize
|
||||
* @cl: A mailbox client to bind to the mailbox channel that the message queue creates
|
||||
* @tx_ghrsc: optional, the transmission side of the message queue
|
||||
@ -139,66 +143,68 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client
|
||||
(rx_ghrsc && rx_ghrsc->type != GH_RESOURCE_TYPE_MSGQ_RX))
|
||||
return -EINVAL;
|
||||
|
||||
msgq->tx_ghrsc = tx_ghrsc;
|
||||
msgq->rx_ghrsc = rx_ghrsc;
|
||||
|
||||
msgq->mbox.dev = parent;
|
||||
msgq->mbox.ops = &gh_msgq_ops;
|
||||
msgq->mbox.num_chans = 1;
|
||||
msgq->mbox.txdone_irq = true;
|
||||
msgq->mbox.chans = &msgq->mbox_chan;
|
||||
|
||||
if (msgq->tx_ghrsc) {
|
||||
ret = mbox_controller_register(&msgq->mbox);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = mbox_bind_client(gh_msgq_chan(msgq), cl);
|
||||
if (ret)
|
||||
goto err_mbox;
|
||||
|
||||
if (tx_ghrsc) {
|
||||
msgq->tx_ghrsc = tx_ghrsc;
|
||||
|
||||
ret = request_irq(msgq->tx_ghrsc->irq, gh_msgq_tx_irq_handler, 0, "gh_msgq_tx",
|
||||
msgq);
|
||||
if (ret)
|
||||
goto err_chans;
|
||||
goto err_tx_ghrsc;
|
||||
|
||||
tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
|
||||
}
|
||||
|
||||
if (msgq->rx_ghrsc) {
|
||||
if (rx_ghrsc) {
|
||||
msgq->rx_ghrsc = rx_ghrsc;
|
||||
|
||||
ret = request_threaded_irq(msgq->rx_ghrsc->irq, NULL, gh_msgq_rx_irq_handler,
|
||||
IRQF_ONESHOT, "gh_msgq_rx", msgq);
|
||||
if (ret)
|
||||
goto err_tx_irq;
|
||||
}
|
||||
|
||||
tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet);
|
||||
|
||||
ret = mbox_controller_register(&msgq->mbox);
|
||||
if (ret)
|
||||
goto err_rx_irq;
|
||||
|
||||
ret = mbox_bind_client(gh_msgq_chan(msgq), cl);
|
||||
if (ret)
|
||||
goto err_mbox;
|
||||
|
||||
return 0;
|
||||
err_mbox:
|
||||
mbox_controller_unregister(&msgq->mbox);
|
||||
err_rx_irq:
|
||||
if (msgq->rx_ghrsc)
|
||||
free_irq(msgq->rx_ghrsc->irq, msgq);
|
||||
err_tx_irq:
|
||||
if (msgq->tx_ghrsc)
|
||||
free_irq(msgq->tx_ghrsc->irq, msgq);
|
||||
err_chans:
|
||||
kfree(msgq->mbox.chans);
|
||||
|
||||
msgq->rx_ghrsc = NULL;
|
||||
err_tx_ghrsc:
|
||||
msgq->tx_ghrsc = NULL;
|
||||
err_mbox:
|
||||
mbox_controller_unregister(&msgq->mbox);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_msgq_init);
|
||||
|
||||
void gh_msgq_remove(struct gh_msgq *msgq)
|
||||
{
|
||||
tasklet_kill(&msgq->txdone_tasklet);
|
||||
mbox_controller_unregister(&msgq->mbox);
|
||||
|
||||
if (msgq->rx_ghrsc)
|
||||
free_irq(msgq->rx_ghrsc->irq, msgq);
|
||||
|
||||
if (msgq->tx_ghrsc)
|
||||
if (msgq->tx_ghrsc) {
|
||||
tasklet_kill(&msgq->txdone_tasklet);
|
||||
free_irq(msgq->tx_ghrsc->irq, msgq);
|
||||
}
|
||||
|
||||
kfree(msgq->mbox.chans);
|
||||
mbox_controller_unregister(&msgq->mbox);
|
||||
|
||||
msgq->rx_ghrsc = NULL;
|
||||
msgq->tx_ghrsc = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_msgq_remove);
|
||||
|
||||
|
@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host)
|
||||
return card;
|
||||
err_out:
|
||||
host->card = old_card;
|
||||
kfree_const(card->dev.kobj.name);
|
||||
kfree(card);
|
||||
return NULL;
|
||||
}
|
||||
@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work)
|
||||
put_device(&card->dev);
|
||||
host->card = NULL;
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
kfree_const(card->dev.kobj.name);
|
||||
kfree(card);
|
||||
}
|
||||
}
|
||||
|
||||
out_power_off:
|
||||
|
@ -247,7 +247,7 @@ struct fastrpc_channel_ctx {
|
||||
int domain_id;
|
||||
int sesscount;
|
||||
int vmcount;
|
||||
u32 perms;
|
||||
u64 perms;
|
||||
struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
|
||||
struct rpmsg_device *rpdev;
|
||||
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
|
||||
@ -303,7 +303,7 @@ static void fastrpc_free_map(struct kref *ref)
|
||||
perm.vmid = QCOM_SCM_VMID_HLOS;
|
||||
perm.perm = QCOM_SCM_PERM_RWX;
|
||||
err = qcom_scm_assign_mem(map->phys, map->size,
|
||||
&(map->fl->cctx->vmperms[0].vmid), &perm, 1);
|
||||
&map->fl->cctx->perms, &perm, 1);
|
||||
if (err) {
|
||||
dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
|
||||
map->phys, map->size, err);
|
||||
@ -754,10 +754,8 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
|
||||
* If subsystem VMIDs are defined in DTSI, then do
|
||||
* hyp_assign from HLOS to those VM(s)
|
||||
*/
|
||||
unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
|
||||
|
||||
map->attr = attr;
|
||||
err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
|
||||
err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
|
||||
fl->cctx->vmperms, fl->cctx->vmcount);
|
||||
if (err) {
|
||||
dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
|
||||
|
@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
|
||||
*/
|
||||
case MMC_TIMING_SD_HS:
|
||||
case MMC_TIMING_MMC_HS:
|
||||
case MMC_TIMING_UHS_SDR12:
|
||||
case MMC_TIMING_UHS_SDR25:
|
||||
val &= ~SDHCI_CTRL_HISPD;
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi,
|
||||
{
|
||||
struct qcom_scm_vmperm dst_perms[3];
|
||||
struct ath10k *ar = qmi->ar;
|
||||
unsigned int src_perms;
|
||||
u64 src_perms;
|
||||
u32 perm_count;
|
||||
int ret;
|
||||
|
||||
@ -60,7 +60,7 @@ static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi,
|
||||
{
|
||||
struct qcom_scm_vmperm dst_perms;
|
||||
struct ath10k *ar = qmi->ar;
|
||||
unsigned int src_perms;
|
||||
u64 src_perms;
|
||||
int ret;
|
||||
|
||||
src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN);
|
||||
|
@ -230,8 +230,8 @@ struct q6v5 {
|
||||
bool has_qaccept_regs;
|
||||
bool has_ext_cntl_regs;
|
||||
bool has_vq6;
|
||||
int mpss_perm;
|
||||
int mba_perm;
|
||||
u64 mpss_perm;
|
||||
u64 mba_perm;
|
||||
const char *hexagon_mdt_image;
|
||||
int version;
|
||||
};
|
||||
@ -407,7 +407,7 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds,
|
||||
}
|
||||
}
|
||||
|
||||
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm,
|
||||
static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm,
|
||||
bool local, bool remote, phys_addr_t addr,
|
||||
size_t size)
|
||||
{
|
||||
@ -939,7 +939,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw,
|
||||
unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
|
||||
dma_addr_t phys;
|
||||
void *metadata;
|
||||
int mdata_perm;
|
||||
u64 mdata_perm;
|
||||
int xferop_ret;
|
||||
size_t size;
|
||||
void *ptr;
|
||||
|
@ -30,7 +30,7 @@ struct qcom_rmtfs_mem {
|
||||
|
||||
unsigned int client_id;
|
||||
|
||||
unsigned int perms;
|
||||
u64 perms;
|
||||
};
|
||||
|
||||
static ssize_t qcom_rmtfs_mem_show(struct device *dev,
|
||||
|
@ -327,6 +327,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
|
||||
unsigned int current_mode;
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
|
||||
@ -345,6 +350,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused)
|
||||
}
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -390,6 +397,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = s->private;
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
|
||||
@ -409,6 +421,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused)
|
||||
seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -458,6 +472,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = s->private;
|
||||
unsigned long flags;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
|
||||
@ -488,6 +507,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused)
|
||||
seq_printf(s, "UNKNOWN %d\n", reg);
|
||||
}
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -504,6 +525,7 @@ static ssize_t dwc3_testmode_write(struct file *file,
|
||||
unsigned long flags;
|
||||
u32 testmode = 0;
|
||||
char buf[32];
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
|
||||
return -EFAULT;
|
||||
@ -521,10 +543,16 @@ static ssize_t dwc3_testmode_write(struct file *file,
|
||||
else
|
||||
testmode = 0;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
dwc3_gadget_set_test_mode(dwc, testmode);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -543,12 +571,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
|
||||
enum dwc3_link_state state;
|
||||
u32 reg;
|
||||
u8 speed;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
|
||||
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
|
||||
seq_puts(s, "Not available\n");
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -561,6 +595,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused)
|
||||
dwc3_gadget_hs_link_string(state));
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -579,6 +615,7 @@ static ssize_t dwc3_link_state_write(struct file *file,
|
||||
char buf[32];
|
||||
u32 reg;
|
||||
u8 speed;
|
||||
int ret;
|
||||
|
||||
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
|
||||
return -EFAULT;
|
||||
@ -598,10 +635,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
reg = dwc3_readl(dwc->regs, DWC3_GSTS);
|
||||
if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) {
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -611,12 +653,15 @@ static ssize_t dwc3_link_state_write(struct file *file,
|
||||
if (speed < DWC3_DSTS_SUPERSPEED &&
|
||||
state != DWC3_LINK_STATE_RECOV) {
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dwc3_gadget_set_link_state(dwc, state);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -640,6 +685,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
|
||||
unsigned long flags;
|
||||
u32 mdwidth;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_TXFIFO);
|
||||
@ -652,6 +702,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused)
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -662,6 +714,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
|
||||
unsigned long flags;
|
||||
u32 mdwidth;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_RXFIFO);
|
||||
@ -674,6 +731,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused)
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -683,12 +742,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = dep->dwc;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_TXREQQ);
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -698,12 +764,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = dep->dwc;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_RXREQQ);
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -713,12 +786,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = dep->dwc;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ);
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -728,12 +808,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = dep->dwc;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ);
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -743,12 +830,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = dep->dwc;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
val = dwc3_core_fifo_space(dep, DWC3_EVENTQ);
|
||||
seq_printf(s, "%u\n", val);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -793,6 +887,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
|
||||
struct dwc3 *dwc = dep->dwc;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
if (dep->number <= 1) {
|
||||
@ -822,6 +921,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused)
|
||||
out:
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -834,6 +935,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
|
||||
u32 lower_32_bits;
|
||||
u32 upper_32_bits;
|
||||
u32 reg;
|
||||
int ret;
|
||||
|
||||
ret = pm_runtime_resume_and_get(dwc->dev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number);
|
||||
@ -846,6 +952,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused)
|
||||
seq_printf(s, "0x%016llx\n", ep_info);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
pm_runtime_put_sync(dwc->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -905,6 +1013,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
|
||||
dwc->regset->regs = dwc3_regs;
|
||||
dwc->regset->nregs = ARRAY_SIZE(dwc3_regs);
|
||||
dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START;
|
||||
dwc->regset->dev = dwc->dev;
|
||||
|
||||
root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root);
|
||||
dwc->debug_root = root;
|
||||
|
@ -6,6 +6,7 @@ config GUNYAH
|
||||
depends on MAILBOX
|
||||
select GUNYAH_PLATFORM_HOOKS
|
||||
select AUXILIARY_BUS
|
||||
imply GUNYAH_QCOM_PLATFORM if ARCH_QCOM
|
||||
help
|
||||
The Gunyah drivers are the helper interfaces that run in a guest VM
|
||||
such as basic inter-VM IPC and signaling mechanisms, and higher level
|
||||
@ -17,6 +18,18 @@ config GUNYAH
|
||||
config GUNYAH_PLATFORM_HOOKS
|
||||
tristate
|
||||
|
||||
config GUNYAH_QCOM_PLATFORM
|
||||
tristate "Support for Gunyah on Qualcomm platforms"
|
||||
depends on GUNYAH
|
||||
select GUNYAH_PLATFORM_HOOKS
|
||||
select QCOM_SCM
|
||||
help
|
||||
Enable support for interacting with Gunyah on Qualcomm
|
||||
platforms. Interaction with Qualcomm firmware requires
|
||||
extra platform-specific support.
|
||||
|
||||
Say Y/M here to use Gunyah on Qualcomm platforms.
|
||||
|
||||
config GUNYAH_VCPU
|
||||
tristate "Runnable Gunyah vCPUs"
|
||||
depends on GUNYAH
|
||||
|
@ -1,9 +1,10 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_GUNYAH_PLATFORM_HOOKS) += gunyah_platform_hooks.o
|
||||
obj-$(CONFIG_GUNYAH_QCOM_PLATFORM) += gunyah_qcom.o
|
||||
|
||||
gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o
|
||||
obj-$(CONFIG_GUNYAH) += gunyah_rsc_mgr.o
|
||||
gunyah-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o
|
||||
obj-$(CONFIG_GUNYAH) += gunyah.o
|
||||
|
||||
obj-$(CONFIG_GUNYAH_VCPU) += gunyah_vcpu.o
|
||||
obj-$(CONFIG_GUNYAH_IRQFD) += gunyah_irqfd.o
|
||||
|
@ -35,13 +35,17 @@ static struct gh_vm_io_handler_ops io_ops = {
|
||||
static long gh_ioeventfd_bind(struct gh_vm_function_instance *f)
|
||||
{
|
||||
const struct gh_fn_ioeventfd_arg *args = f->argp;
|
||||
struct eventfd_ctx *ctx = NULL;
|
||||
struct gh_ioeventfd *iofd;
|
||||
struct eventfd_ctx *ctx;
|
||||
int ret;
|
||||
|
||||
if (f->arg_size != sizeof(*args))
|
||||
return -EINVAL;
|
||||
|
||||
/* All other flag bits are reserved for future use */
|
||||
if (args->flags & ~GH_IOEVENTFD_FLAGS_DATAMATCH)
|
||||
return -EINVAL;
|
||||
|
||||
/* must be natural-word sized, or 0 to ignore length */
|
||||
switch (args->len) {
|
||||
case 0:
|
||||
@ -55,15 +59,11 @@ static long gh_ioeventfd_bind(struct gh_vm_function_instance *f)
|
||||
}
|
||||
|
||||
/* check for range overflow */
|
||||
if (args->addr + args->len < args->addr)
|
||||
if (overflows_type(args->addr + args->len, u64))
|
||||
return -EINVAL;
|
||||
|
||||
/* ioeventfd with no length can't be combined with DATAMATCH */
|
||||
if (!args->len && (args->flags & GH_IOEVENTFD_DATAMATCH))
|
||||
return -EINVAL;
|
||||
|
||||
/* All other flag bits are reserved for future use */
|
||||
if (args->flags & ~GH_IOEVENTFD_DATAMATCH)
|
||||
if (!args->len && (args->flags & GH_IOEVENTFD_FLAGS_DATAMATCH))
|
||||
return -EINVAL;
|
||||
|
||||
ctx = eventfd_ctx_fdget(args->fd);
|
||||
@ -81,7 +81,7 @@ static long gh_ioeventfd_bind(struct gh_vm_function_instance *f)
|
||||
|
||||
iofd->ctx = ctx;
|
||||
|
||||
if (args->flags & GH_IOEVENTFD_DATAMATCH) {
|
||||
if (args->flags & GH_IOEVENTFD_FLAGS_DATAMATCH) {
|
||||
iofd->io_handler.datamatch = true;
|
||||
iofd->io_handler.len = args->len;
|
||||
iofd->io_handler.data = args->datamatch;
|
||||
@ -111,7 +111,20 @@ static void gh_ioevent_unbind(struct gh_vm_function_instance *f)
|
||||
kfree(iofd);
|
||||
}
|
||||
|
||||
DECLARE_GH_VM_FUNCTION_INIT(ioeventfd, GH_FN_IOEVENTFD,
|
||||
gh_ioeventfd_bind, gh_ioevent_unbind);
|
||||
MODULE_DESCRIPTION("Gunyah ioeventfds");
|
||||
static bool gh_ioevent_compare(const struct gh_vm_function_instance *f,
|
||||
const void *arg, size_t size)
|
||||
{
|
||||
const struct gh_fn_ioeventfd_arg *instance = f->argp,
|
||||
*other = arg;
|
||||
|
||||
if (sizeof(*other) != size)
|
||||
return false;
|
||||
|
||||
return instance->addr == other->addr;
|
||||
}
|
||||
|
||||
DECLARE_GH_VM_FUNCTION_INIT(ioeventfd, GH_FN_IOEVENTFD, 3,
|
||||
gh_ioeventfd_bind, gh_ioevent_unbind,
|
||||
gh_ioevent_compare);
|
||||
MODULE_DESCRIPTION("Gunyah ioeventfd VM Function");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -30,13 +30,11 @@ static int irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, v
|
||||
{
|
||||
struct gh_irqfd *irqfd = container_of(wait, struct gh_irqfd, wait);
|
||||
__poll_t flags = key_to_poll(key);
|
||||
u64 enable_mask = GH_BELL_NONBLOCK;
|
||||
u64 old_flags;
|
||||
int ret = 0;
|
||||
|
||||
if (flags & EPOLLIN) {
|
||||
if (irqfd->ghrsc) {
|
||||
ret = gh_hypercall_bell_send(irqfd->ghrsc->capid, enable_mask, &old_flags);
|
||||
ret = gh_hypercall_bell_send(irqfd->ghrsc->capid, 1, NULL);
|
||||
if (ret)
|
||||
pr_err_ratelimited("Failed to inject interrupt %d: %d\n",
|
||||
irqfd->ticket.label, ret);
|
||||
@ -54,28 +52,33 @@ static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, p
|
||||
add_wait_queue(wqh, &irq_ctx->wait);
|
||||
}
|
||||
|
||||
static int gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc)
|
||||
static bool gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc)
|
||||
{
|
||||
struct gh_irqfd *irqfd = container_of(ticket, struct gh_irqfd, ticket);
|
||||
u64 enable_mask = GH_BELL_NONBLOCK;
|
||||
u64 ack_mask = ~0;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (irqfd->ghrsc) {
|
||||
pr_warn("irqfd%d already got a Gunyah resource. Check if multiple resources with same label were configured.\n",
|
||||
irqfd->ticket.label);
|
||||
return -1;
|
||||
return false;
|
||||
}
|
||||
|
||||
irqfd->ghrsc = ghrsc;
|
||||
if (irqfd->level) {
|
||||
ret = gh_hypercall_bell_set_mask(irqfd->ghrsc->capid, enable_mask, ack_mask);
|
||||
/* Configure the bell to trigger when bit 0 is asserted (see
|
||||
* irq_wakeup) and for bell to automatically clear bit 0 once
|
||||
* received by the VM (ack_mask). need to make sure bit 0 is cleared right away,
|
||||
* otherwise the line will never be deasserted. Emulating edge
|
||||
* trigger interrupt does not need to set either mask
|
||||
* because irq is listed only once per gh_hypercall_bell_send
|
||||
*/
|
||||
ret = gh_hypercall_bell_set_mask(irqfd->ghrsc->capid, 1, 1);
|
||||
if (ret)
|
||||
pr_warn("irq %d couldn't be set as level triggered. Might cause IRQ storm if asserted\n",
|
||||
irqfd->ticket.label);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void gh_irqfd_unpopulate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc)
|
||||
@ -98,7 +101,7 @@ static long gh_irqfd_bind(struct gh_vm_function_instance *f)
|
||||
return -EINVAL;
|
||||
|
||||
/* All other flag bits are reserved for future use */
|
||||
if (args->flags & ~GH_IRQFD_LEVEL)
|
||||
if (args->flags & ~GH_IRQFD_FLAGS_LEVEL)
|
||||
return -EINVAL;
|
||||
|
||||
irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
|
||||
@ -120,7 +123,7 @@ static long gh_irqfd_bind(struct gh_vm_function_instance *f)
|
||||
goto err_fdput;
|
||||
}
|
||||
|
||||
if (args->flags & GH_IRQFD_LEVEL)
|
||||
if (args->flags & GH_IRQFD_FLAGS_LEVEL)
|
||||
irqfd->level = true;
|
||||
|
||||
init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
|
||||
@ -159,6 +162,19 @@ static void gh_irqfd_unbind(struct gh_vm_function_instance *f)
|
||||
kfree(irqfd);
|
||||
}
|
||||
|
||||
DECLARE_GH_VM_FUNCTION_INIT(irqfd, GH_FN_IRQFD, gh_irqfd_bind, gh_irqfd_unbind);
|
||||
MODULE_DESCRIPTION("Gunyah irqfds");
|
||||
static bool gh_irqfd_compare(const struct gh_vm_function_instance *f,
|
||||
const void *arg, size_t size)
|
||||
{
|
||||
const struct gh_fn_irqfd_arg *instance = f->argp,
|
||||
*other = arg;
|
||||
|
||||
if (sizeof(*other) != size)
|
||||
return false;
|
||||
|
||||
return instance->label == other->label;
|
||||
}
|
||||
|
||||
DECLARE_GH_VM_FUNCTION_INIT(irqfd, GH_FN_IRQFD, 2, gh_irqfd_bind, gh_irqfd_unbind,
|
||||
gh_irqfd_compare);
|
||||
MODULE_DESCRIPTION("Gunyah irqfd VM Function");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
147
drivers/virt/gunyah/gunyah_qcom.c
Normal file
147
drivers/virt/gunyah/gunyah_qcom.c
Normal file
@ -0,0 +1,147 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/gunyah_rsc_mgr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/qcom_scm.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uuid.h>
|
||||
|
||||
#define QCOM_SCM_RM_MANAGED_VMID 0x3A
|
||||
#define QCOM_SCM_MAX_MANAGED_VMID 0x3F
|
||||
|
||||
static int qcom_scm_gh_rm_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel)
|
||||
{
|
||||
struct qcom_scm_vmperm *new_perms;
|
||||
u64 src, src_cpy;
|
||||
int ret = 0, i, n;
|
||||
u16 vmid;
|
||||
|
||||
new_perms = kcalloc(mem_parcel->n_acl_entries, sizeof(*new_perms), GFP_KERNEL);
|
||||
if (!new_perms)
|
||||
return -ENOMEM;
|
||||
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
new_perms[n].vmid = vmid;
|
||||
else
|
||||
new_perms[n].vmid = QCOM_SCM_RM_MANAGED_VMID;
|
||||
if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_X)
|
||||
new_perms[n].perm |= QCOM_SCM_PERM_EXEC;
|
||||
if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_W)
|
||||
new_perms[n].perm |= QCOM_SCM_PERM_WRITE;
|
||||
if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_R)
|
||||
new_perms[n].perm |= QCOM_SCM_PERM_READ;
|
||||
}
|
||||
|
||||
src = (1ull << QCOM_SCM_VMID_HLOS);
|
||||
|
||||
for (i = 0; i < mem_parcel->n_mem_entries; i++) {
|
||||
src_cpy = src;
|
||||
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, mem_parcel->n_acl_entries);
|
||||
if (ret) {
|
||||
src = 0;
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
src |= (1ull << vmid);
|
||||
else
|
||||
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
|
||||
}
|
||||
|
||||
new_perms[0].vmid = QCOM_SCM_VMID_HLOS;
|
||||
|
||||
for (i--; i >= 0; i--) {
|
||||
src_cpy = src;
|
||||
WARN_ON_ONCE(qcom_scm_assign_mem(
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].ipa_base),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, new_perms, 1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(new_perms);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int qcom_scm_gh_rm_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel)
|
||||
{
|
||||
struct qcom_scm_vmperm new_perms;
|
||||
u64 src = 0, src_cpy;
|
||||
int ret = 0, i, n;
|
||||
u16 vmid;
|
||||
|
||||
new_perms.vmid = QCOM_SCM_VMID_HLOS;
|
||||
new_perms.perm = QCOM_SCM_PERM_EXEC | QCOM_SCM_PERM_WRITE | QCOM_SCM_PERM_READ;
|
||||
|
||||
for (n = 0; n < mem_parcel->n_acl_entries; n++) {
|
||||
vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid);
|
||||
if (vmid <= QCOM_SCM_MAX_MANAGED_VMID)
|
||||
src |= (1ull << vmid);
|
||||
else
|
||||
src |= (1ull << QCOM_SCM_RM_MANAGED_VMID);
|
||||
}
|
||||
|
||||
for (i = 0; i < mem_parcel->n_mem_entries; i++) {
|
||||
src_cpy = src;
|
||||
ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base),
|
||||
le64_to_cpu(mem_parcel->mem_entries[i].size),
|
||||
&src_cpy, &new_perms, 1);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct gh_rm_platform_ops qcom_scm_gh_rm_platform_ops = {
|
||||
.pre_mem_share = qcom_scm_gh_rm_pre_mem_share,
|
||||
.post_mem_reclaim = qcom_scm_gh_rm_post_mem_reclaim,
|
||||
};
|
||||
|
||||
/* {19bd54bd-0b37-571b-946f-609b54539de6} */
|
||||
static const uuid_t QCOM_EXT_UUID =
|
||||
UUID_INIT(0x19bd54bd, 0x0b37, 0x571b, 0x94, 0x6f, 0x60, 0x9b, 0x54, 0x53, 0x9d, 0xe6);
|
||||
|
||||
#define GH_QCOM_EXT_CALL_UUID_ID ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
|
||||
ARM_SMCCC_OWNER_VENDOR_HYP, 0x3f01)
|
||||
|
||||
static bool gh_has_qcom_extensions(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
uuid_t uuid;
|
||||
|
||||
arm_smccc_1_1_smc(GH_QCOM_EXT_CALL_UUID_ID, &res);
|
||||
|
||||
((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0);
|
||||
((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1);
|
||||
((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2);
|
||||
((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3);
|
||||
|
||||
return uuid_equal(&uuid, &QCOM_EXT_UUID);
|
||||
}
|
||||
|
||||
static int __init qcom_gh_platform_hooks_register(void)
|
||||
{
|
||||
if (!gh_has_qcom_extensions())
|
||||
return -ENODEV;
|
||||
|
||||
return gh_rm_register_platform_ops(&qcom_scm_gh_rm_platform_ops);
|
||||
}
|
||||
|
||||
static void __exit qcom_gh_platform_hooks_unregister(void)
|
||||
{
|
||||
gh_rm_unregister_platform_ops(&qcom_scm_gh_rm_platform_ops);
|
||||
}
|
||||
|
||||
module_init(qcom_gh_platform_hooks_register);
|
||||
module_exit(qcom_gh_platform_hooks_unregister);
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Platform Hooks for Gunyah");
|
||||
MODULE_LICENSE("GPL");
|
@ -41,19 +41,6 @@ struct gh_vcpu {
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
/* VCPU is ready to run */
|
||||
#define GH_VCPU_STATE_READY 0
|
||||
/* VCPU is sleeping until an interrupt arrives */
|
||||
#define GH_VCPU_STATE_EXPECTS_WAKEUP 1
|
||||
/* VCPU is powered off */
|
||||
#define GH_VCPU_STATE_POWERED_OFF 2
|
||||
/* VCPU is blocked in EL2 for unspecified reason */
|
||||
#define GH_VCPU_STATE_BLOCKED 3
|
||||
/* VCPU has returned for MMIO READ */
|
||||
#define GH_VCPU_ADDRSPACE_VMMIO_READ 4
|
||||
/* VCPU has returned for MMIO WRITE */
|
||||
#define GH_VCPU_ADDRSPACE_VMMIO_WRITE 5
|
||||
|
||||
static void vcpu_release(struct kref *kref)
|
||||
{
|
||||
struct gh_vcpu *vcpu = container_of(kref, struct gh_vcpu, kref);
|
||||
@ -81,6 +68,9 @@ static bool gh_handle_mmio(struct gh_vcpu *vcpu,
|
||||
len = vcpu_run_resp->state_data[1],
|
||||
data = vcpu_run_resp->state_data[2];
|
||||
|
||||
if (WARN_ON(len > sizeof(u64)))
|
||||
len = sizeof(u64);
|
||||
|
||||
if (vcpu_run_resp->state == GH_VCPU_ADDRSPACE_VMMIO_READ) {
|
||||
vcpu->vcpu_run->mmio.is_write = 0;
|
||||
/* Record that we need to give vCPU user's supplied value next gh_vcpu_run() */
|
||||
@ -188,6 +178,8 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu)
|
||||
vcpu->state = GH_VCPU_READY;
|
||||
break;
|
||||
case GH_VCPU_MMIO_READ:
|
||||
if (unlikely(vcpu->mmio_read_len > sizeof(state_data[0])))
|
||||
vcpu->mmio_read_len = sizeof(state_data[0]);
|
||||
memcpy(&state_data[0], vcpu->vcpu_run->mmio.data, vcpu->mmio_read_len);
|
||||
vcpu->state = GH_VCPU_READY;
|
||||
break;
|
||||
@ -205,7 +197,6 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu)
|
||||
|
||||
gh_error = gh_hypercall_vcpu_run(vcpu->rsc->capid, state_data, &vcpu_run_resp);
|
||||
if (gh_error == GH_ERROR_OK) {
|
||||
ret = 0;
|
||||
switch (vcpu_run_resp.state) {
|
||||
case GH_VCPU_STATE_READY:
|
||||
if (need_resched())
|
||||
@ -245,15 +236,15 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu)
|
||||
break;
|
||||
default:
|
||||
pr_warn_ratelimited("Unknown vCPU state: %llx\n",
|
||||
vcpu_run_resp.state);
|
||||
vcpu_run_resp.sized_state);
|
||||
schedule();
|
||||
break;
|
||||
}
|
||||
} else if (gh_error == GH_ERROR_RETRY) {
|
||||
schedule();
|
||||
ret = 0;
|
||||
} else
|
||||
ret = gh_remap_error(gh_error);
|
||||
} else {
|
||||
ret = gh_error_remap(gh_error);
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
@ -323,14 +314,16 @@ static const struct file_operations gh_vcpu_fops = {
|
||||
.mmap = gh_vcpu_mmap,
|
||||
};
|
||||
|
||||
static int gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc)
|
||||
static bool gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc)
|
||||
{
|
||||
struct gh_vcpu *vcpu = container_of(ticket, struct gh_vcpu, ticket);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&vcpu->run_lock);
|
||||
if (vcpu->rsc) {
|
||||
ret = -1;
|
||||
pr_warn("vcpu%d already got a Gunyah resource. Check if multiple resources with same label were configured.\n",
|
||||
vcpu->ticket.label);
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -344,7 +337,7 @@ static int gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_reso
|
||||
|
||||
out:
|
||||
mutex_unlock(&vcpu->run_lock);
|
||||
return ret;
|
||||
return !ret;
|
||||
}
|
||||
|
||||
static void gh_vcpu_unpopulate(struct gh_vm_resource_ticket *ticket,
|
||||
@ -399,15 +392,9 @@ static long gh_vcpu_bind(struct gh_vm_function_instance *f)
|
||||
if (r)
|
||||
goto err_destroy_page;
|
||||
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
r = fd;
|
||||
goto err_remove_vcpu;
|
||||
}
|
||||
|
||||
if (!gh_vm_get(f->ghvm)) {
|
||||
r = -ENODEV;
|
||||
goto err_put_fd;
|
||||
goto err_remove_resource_ticket;
|
||||
}
|
||||
vcpu->ghvm = f->ghvm;
|
||||
|
||||
@ -421,23 +408,30 @@ static long gh_vcpu_bind(struct gh_vm_function_instance *f)
|
||||
goto err_put_gh_vm;
|
||||
|
||||
kref_get(&vcpu->kref);
|
||||
snprintf(name, sizeof(name), "gh-vcpu:%d", vcpu->ticket.label);
|
||||
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0) {
|
||||
r = fd;
|
||||
goto err_notifier;
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "gh-vcpu:%u", vcpu->ticket.label);
|
||||
file = anon_inode_getfile(name, &gh_vcpu_fops, vcpu, O_RDWR);
|
||||
if (IS_ERR(file)) {
|
||||
r = PTR_ERR(file);
|
||||
goto err_notifier;
|
||||
goto err_put_fd;
|
||||
}
|
||||
|
||||
fd_install(fd, file);
|
||||
|
||||
return fd;
|
||||
err_put_fd:
|
||||
put_unused_fd(fd);
|
||||
err_notifier:
|
||||
gh_rm_notifier_unregister(f->rm, &vcpu->nb);
|
||||
err_put_gh_vm:
|
||||
gh_vm_put(vcpu->ghvm);
|
||||
err_put_fd:
|
||||
put_unused_fd(fd);
|
||||
err_remove_vcpu:
|
||||
err_remove_resource_ticket:
|
||||
gh_vm_remove_resource_ticket(f->ghvm, &vcpu->ticket);
|
||||
err_destroy_page:
|
||||
free_page((unsigned long)vcpu->vcpu_run);
|
||||
@ -457,6 +451,18 @@ static void gh_vcpu_unbind(struct gh_vm_function_instance *f)
|
||||
kref_put(&vcpu->kref, vcpu_release);
|
||||
}
|
||||
|
||||
DECLARE_GH_VM_FUNCTION_INIT(vcpu, GH_FN_VCPU, gh_vcpu_bind, gh_vcpu_unbind);
|
||||
MODULE_DESCRIPTION("Gunyah vCPU Driver");
|
||||
static bool gh_vcpu_compare(const struct gh_vm_function_instance *f,
|
||||
const void *arg, size_t size)
|
||||
{
|
||||
const struct gh_fn_vcpu_arg *instance = f->argp,
|
||||
*other = arg;
|
||||
|
||||
if (sizeof(*other) != size)
|
||||
return false;
|
||||
|
||||
return instance->id == other->id;
|
||||
}
|
||||
|
||||
DECLARE_GH_VM_FUNCTION_INIT(vcpu, GH_FN_VCPU, 1, gh_vcpu_bind, gh_vcpu_unbind, gh_vcpu_compare);
|
||||
MODULE_DESCRIPTION("Gunyah vCPU Function");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -126,7 +126,8 @@ struct gh_rm_connection {
|
||||
* @dev: pointer to device
|
||||
* @tx_ghrsc: message queue resource to TX to RM
|
||||
* @rx_ghrsc: message queue resource to RX from RM
|
||||
* @msgq: mailbox instance of above
|
||||
* @msgq: mailbox instance of TX/RX resources above
|
||||
* @msgq_client: mailbox client of above msgq
|
||||
* @active_rx_connection: ongoing gh_rm_connection for which we're receiving fragments
|
||||
* @last_tx_ret: return value of last mailbox tx
|
||||
* @call_xarray: xarray to allocate & lookup sequence IDs for Request/Response flows
|
||||
@ -160,7 +161,7 @@ struct gh_rm {
|
||||
|
||||
/**
|
||||
* gh_rm_remap_error() - Remap Gunyah resource manager errors into a Linux error code
|
||||
* @gh_error: "Standard" return value from Gunyah resource manager
|
||||
* @rm_error: "Standard" return value from Gunyah resource manager
|
||||
*/
|
||||
static inline int gh_rm_remap_error(enum gh_rm_error rm_error)
|
||||
{
|
||||
@ -230,7 +231,7 @@ static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsig
|
||||
u32 gh_virq = spec->gh_virq;
|
||||
int ret;
|
||||
|
||||
if (nr_irqs != 1 || gh_virq == U32_MAX)
|
||||
if (nr_irqs != 1)
|
||||
return -EINVAL;
|
||||
|
||||
chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
|
||||
@ -263,16 +264,13 @@ static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsig
|
||||
|
||||
static void gh_rm_irq_domain_free_single(struct irq_domain *d, unsigned int virq)
|
||||
{
|
||||
struct gh_irq_chip_data *chip_data;
|
||||
struct irq_data *irq_data;
|
||||
|
||||
irq_data = irq_domain_get_irq_data(d, virq);
|
||||
if (!irq_data)
|
||||
return;
|
||||
|
||||
chip_data = irq_data->chip_data;
|
||||
|
||||
kfree(chip_data);
|
||||
kfree(irq_data->chip_data);
|
||||
irq_data->chip_data = NULL;
|
||||
}
|
||||
|
||||
@ -292,6 +290,7 @@ static const struct irq_domain_ops gh_rm_irq_domain_ops = {
|
||||
struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_resource *hyp_resource)
|
||||
{
|
||||
struct gh_resource *ghrsc;
|
||||
int ret;
|
||||
|
||||
ghrsc = kzalloc(sizeof(*ghrsc), GFP_KERNEL);
|
||||
if (!ghrsc)
|
||||
@ -301,17 +300,18 @@ struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_reso
|
||||
ghrsc->capid = le64_to_cpu(hyp_resource->cap_id);
|
||||
ghrsc->irq = IRQ_NOTCONNECTED;
|
||||
ghrsc->rm_label = le32_to_cpu(hyp_resource->resource_label);
|
||||
if (hyp_resource->virq && le32_to_cpu(hyp_resource->virq) != U32_MAX) {
|
||||
if (hyp_resource->virq) {
|
||||
struct gh_irq_chip_data irq_data = {
|
||||
.gh_virq = le32_to_cpu(hyp_resource->virq),
|
||||
};
|
||||
|
||||
ghrsc->irq = irq_domain_alloc_irqs(rm->irq_domain, 1, NUMA_NO_NODE, &irq_data);
|
||||
if (ghrsc->irq < 0) {
|
||||
ret = irq_domain_alloc_irqs(rm->irq_domain, 1, NUMA_NO_NODE, &irq_data);
|
||||
if (ret < 0) {
|
||||
dev_err(rm->dev,
|
||||
"Failed to allocate interrupt for resource %d label: %d: %d\n",
|
||||
ghrsc->type, ghrsc->rm_label, ghrsc->irq);
|
||||
ghrsc->irq = IRQ_NOTCONNECTED;
|
||||
} else {
|
||||
ghrsc->irq = ret;
|
||||
}
|
||||
}
|
||||
|
||||
@ -379,9 +379,9 @@ static void gh_rm_notif_work(struct work_struct *work)
|
||||
notification.work);
|
||||
struct gh_rm *rm = connection->notification.rm;
|
||||
|
||||
blocking_notifier_call_chain(&rm->nh, connection->msg_id, connection->payload);
|
||||
blocking_notifier_call_chain(&rm->nh, le32_to_cpu(connection->msg_id), connection->payload);
|
||||
|
||||
gh_rm_put(rm);
|
||||
put_device(rm->dev);
|
||||
kfree(connection->payload);
|
||||
kfree(connection);
|
||||
}
|
||||
@ -402,14 +402,14 @@ static void gh_rm_process_notif(struct gh_rm *rm, void *msg, size_t msg_size)
|
||||
connection->type = RM_RPC_TYPE_NOTIF;
|
||||
connection->msg_id = hdr->msg_id;
|
||||
|
||||
gh_rm_get(rm);
|
||||
get_device(rm->dev);
|
||||
connection->notification.rm = rm;
|
||||
INIT_WORK(&connection->notification.work, gh_rm_notif_work);
|
||||
|
||||
ret = gh_rm_init_connection_payload(connection, msg, sizeof(*hdr), msg_size);
|
||||
if (ret) {
|
||||
dev_err(rm->dev, "Failed to initialize connection for notification: %d\n", ret);
|
||||
gh_rm_put(rm);
|
||||
put_device(rm->dev);
|
||||
kfree(connection);
|
||||
return;
|
||||
}
|
||||
@ -483,7 +483,7 @@ static void gh_rm_try_complete_connection(struct gh_rm *rm)
|
||||
schedule_work(&connection->notification.work);
|
||||
break;
|
||||
default:
|
||||
dev_err_ratelimited(rm->dev, "Invalid message type (%d) received\n",
|
||||
dev_err_ratelimited(rm->dev, "Invalid message type (%u) received\n",
|
||||
connection->type);
|
||||
gh_rm_abort_connection(rm);
|
||||
break;
|
||||
@ -537,11 +537,11 @@ static void gh_rm_msgq_tx_done(struct mbox_client *cl, void *mssg, int r)
|
||||
}
|
||||
|
||||
static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
|
||||
const void *req_buff, size_t req_buf_size,
|
||||
const void *req_buf, size_t req_buf_size,
|
||||
struct gh_rm_connection *connection)
|
||||
{
|
||||
size_t buf_size_remaining = req_buf_size;
|
||||
const void *req_buf_curr = req_buff;
|
||||
const void *req_buf_curr = req_buf;
|
||||
struct gh_msgq_tx_data *msg;
|
||||
struct gh_rm_rpc_hdr *hdr, hdr_template;
|
||||
u32 cont_fragments = 0;
|
||||
@ -550,8 +550,8 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
|
||||
int ret;
|
||||
|
||||
if (req_buf_size > GH_RM_MAX_NUM_FRAGMENTS * GH_RM_MAX_MSG_SIZE) {
|
||||
dev_warn(rm->dev, "Limit exceeded for the number of fragments: %u\n",
|
||||
cont_fragments);
|
||||
dev_warn(rm->dev, "Limit (%lu bytes) exceeded for the maximum message size: %lu\n",
|
||||
GH_RM_MAX_NUM_FRAGMENTS * GH_RM_MAX_MSG_SIZE, req_buf_size);
|
||||
dump_stack();
|
||||
return -E2BIG;
|
||||
}
|
||||
@ -561,7 +561,7 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
|
||||
|
||||
hdr_template.api = RM_RPC_API;
|
||||
hdr_template.type = FIELD_PREP(RM_RPC_TYPE_MASK, RM_RPC_TYPE_REQUEST) |
|
||||
FIELD_PREP(RM_RPC_FRAGMENTS_MASK, cont_fragments);
|
||||
FIELD_PREP(RM_RPC_FRAGMENTS_MASK, cont_fragments);
|
||||
hdr_template.seq = cpu_to_le16(connection->reply.seq);
|
||||
hdr_template.msg_id = cpu_to_le32(message_id);
|
||||
|
||||
@ -569,7 +569,6 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Consider also the 'request' packet for the loop count */
|
||||
do {
|
||||
msg = kmem_cache_zalloc(rm->cache, GFP_KERNEL);
|
||||
if (!msg) {
|
||||
@ -578,11 +577,11 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
|
||||
}
|
||||
|
||||
/* Fill header */
|
||||
hdr = (struct gh_rm_rpc_hdr *)msg->data;
|
||||
hdr = (struct gh_rm_rpc_hdr *)&msg->data[0];
|
||||
*hdr = hdr_template;
|
||||
|
||||
/* Copy payload */
|
||||
payload = hdr + 1;
|
||||
payload = &msg->data[0] + sizeof(*hdr);
|
||||
payload_size = min(buf_size_remaining, GH_RM_MAX_MSG_SIZE);
|
||||
memcpy(payload, req_buf_curr, payload_size);
|
||||
req_buf_curr += payload_size;
|
||||
@ -616,23 +615,23 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id,
|
||||
* gh_rm_call: Achieve request-response type communication with RPC
|
||||
* @rm: Pointer to Gunyah resource manager internal data
|
||||
* @message_id: The RM RPC message-id
|
||||
* @req_buff: Request buffer that contains the payload
|
||||
* @req_buf: Request buffer that contains the payload
|
||||
* @req_buf_size: Total size of the payload
|
||||
* @resp_buf: Pointer to a response buffer
|
||||
* @resp_buf_size: Size of the response buffer
|
||||
*
|
||||
* Make a request to the RM-VM and wait for reply back. For a successful
|
||||
* Make a request to the Resource Manager and wait for reply back. For a successful
|
||||
* response, the function returns the payload. The size of the payload is set in
|
||||
* resp_buf_size. The resp_buf should be freed by the caller when 0 is returned
|
||||
* resp_buf_size. The resp_buf must be freed by the caller when 0 is returned
|
||||
* and resp_buf_size != 0.
|
||||
*
|
||||
* req_buff should be not NULL for req_buf_size >0. If req_buf_size == 0,
|
||||
* req_buff *can* be NULL and no additional payload is sent.
|
||||
* req_buf should be not NULL for req_buf_size >0. If req_buf_size == 0,
|
||||
* req_buf *can* be NULL and no additional payload is sent.
|
||||
*
|
||||
* Context: Process context. Will sleep waiting for reply.
|
||||
* Return: 0 on success. <0 if error.
|
||||
*/
|
||||
int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buf_size,
|
||||
int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buf, size_t req_buf_size,
|
||||
void **resp_buf, size_t *resp_buf_size)
|
||||
{
|
||||
struct gh_rm_connection *connection;
|
||||
@ -640,7 +639,7 @@ int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buf_
|
||||
int ret;
|
||||
|
||||
/* message_id 0 is reserved. req_buf_size implies req_buf is not NULL */
|
||||
if (!message_id || (!req_buff && req_buf_size) || !rm)
|
||||
if (!rm || !message_id || (!req_buf && req_buf_size))
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
@ -661,7 +660,7 @@ int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buf_
|
||||
connection->reply.seq = lower_16_bits(seq_id);
|
||||
|
||||
/* Send the request to the Resource Manager */
|
||||
ret = gh_rm_send_request(rm, message_id, req_buff, req_buf_size, connection);
|
||||
ret = gh_rm_send_request(rm, message_id, req_buf, req_buf_size, connection);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
struct gh_rm;
|
||||
int gh_rm_call(struct gh_rm *rsc_mgr, u32 message_id, void *req_buff, size_t req_buf_size,
|
||||
int gh_rm_call(struct gh_rm *rsc_mgr, u32 message_id, void *req_buf, size_t req_buf_size,
|
||||
void **resp_buf, size_t *resp_buf_size);
|
||||
|
||||
int gh_rm_platform_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel);
|
||||
|
@ -60,7 +60,7 @@ struct gh_rm_mem_release_req {
|
||||
} __packed;
|
||||
|
||||
/* Call: MEM_APPEND */
|
||||
#define GH_MEM_APPEND_REQ_FLAGS_END BIT(0)
|
||||
#define GH_MEM_APPEND_REQ_FLAGS_END BIT(0)
|
||||
|
||||
struct gh_rm_mem_append_req_header {
|
||||
__le32 mem_handle;
|
||||
@ -76,7 +76,7 @@ struct gh_rm_vm_alloc_vmid_resp {
|
||||
} __packed;
|
||||
|
||||
/* Call: VM_STOP */
|
||||
#define GH_RM_VM_STOP_FLAG_FORCE_STOP BIT(0)
|
||||
#define GH_RM_VM_STOP_FLAG_FORCE_STOP BIT(0)
|
||||
|
||||
#define GH_RM_VM_STOP_REASON_FORCE_STOP 3
|
||||
|
||||
@ -184,6 +184,7 @@ static int gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle,
|
||||
static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_mem_parcel *p)
|
||||
{
|
||||
size_t msg_size = 0, initial_mem_entries = p->n_mem_entries, resp_size;
|
||||
size_t acl_section_size, mem_section_size;
|
||||
struct gh_rm_mem_share_req_acl_section *acl_section;
|
||||
struct gh_rm_mem_share_req_mem_section *mem_section;
|
||||
struct gh_rm_mem_share_req_header *req_header;
|
||||
@ -199,6 +200,8 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_
|
||||
if (initial_mem_entries > GH_RM_MAX_MEM_ENTRIES)
|
||||
initial_mem_entries = GH_RM_MAX_MEM_ENTRIES;
|
||||
|
||||
acl_section_size = struct_size(acl_section, entries, p->n_acl_entries);
|
||||
mem_section_size = struct_size(mem_section, entries, initial_mem_entries);
|
||||
/* The format of the message goes:
|
||||
* request header
|
||||
* ACL entries (which VMs get what kind of access to this memory parcel)
|
||||
@ -206,8 +209,8 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_
|
||||
* Memory attributes (currently unused, we'll hard-code the size to 0)
|
||||
*/
|
||||
msg_size += sizeof(struct gh_rm_mem_share_req_header);
|
||||
msg_size += struct_size(acl_section, entries, p->n_acl_entries);
|
||||
msg_size += struct_size(mem_section, entries, initial_mem_entries);
|
||||
msg_size += acl_section_size;
|
||||
msg_size += mem_section_size;
|
||||
msg_size += sizeof(u32); /* for memory attributes, currently unused */
|
||||
|
||||
msg = kzalloc(msg_size, GFP_KERNEL);
|
||||
@ -222,8 +225,8 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_
|
||||
|
||||
req_header = msg;
|
||||
acl_section = (void *)req_header + sizeof(*req_header);
|
||||
mem_section = (void *)acl_section + struct_size(acl_section, entries, p->n_acl_entries);
|
||||
attr_section = (void *)mem_section + struct_size(mem_section, entries, initial_mem_entries);
|
||||
mem_section = (void *)acl_section + acl_section_size;
|
||||
attr_section = (void *)mem_section + mem_section_size;
|
||||
|
||||
req_header->mem_type = p->mem_type;
|
||||
if (initial_mem_entries != p->n_mem_entries)
|
||||
@ -231,11 +234,12 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_
|
||||
req_header->label = cpu_to_le32(p->label);
|
||||
|
||||
acl_section->n_entries = cpu_to_le32(p->n_acl_entries);
|
||||
memcpy(acl_section->entries, p->acl_entries, sizeof(*(p->acl_entries)) * p->n_acl_entries);
|
||||
memcpy(acl_section->entries, p->acl_entries,
|
||||
flex_array_size(acl_section, entries, p->n_acl_entries));
|
||||
|
||||
mem_section->n_entries = cpu_to_le16(initial_mem_entries);
|
||||
memcpy(mem_section->entries, p->mem_entries,
|
||||
sizeof(*(p->mem_entries)) * initial_mem_entries);
|
||||
flex_array_size(mem_section, entries, initial_mem_entries));
|
||||
|
||||
/* Set n_entries for memory attribute section to 0 */
|
||||
*attr_section = 0;
|
||||
@ -249,6 +253,7 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_
|
||||
}
|
||||
|
||||
p->mem_handle = le32_to_cpu(*resp);
|
||||
kfree(resp);
|
||||
|
||||
if (initial_mem_entries != p->n_mem_entries) {
|
||||
ret = gh_rm_mem_append(rm, p->mem_handle,
|
||||
@ -260,14 +265,13 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_
|
||||
}
|
||||
}
|
||||
|
||||
kfree(resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* gh_rm_mem_lend() - Lend memory to other virtual machines.
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @parcel: Package the memory information of the memory to be lent.
|
||||
* @parcel: Information about the memory to be lent.
|
||||
*
|
||||
* Lending removes Linux's access to the memory while the memory parcel is lent.
|
||||
*/
|
||||
@ -280,7 +284,7 @@ int gh_rm_mem_lend(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel)
|
||||
/**
|
||||
* gh_rm_mem_share() - Share memory with other virtual machines.
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @parcel: Package the memory information of the memory to be shared.
|
||||
* @parcel: Information about the memory to be shared.
|
||||
*
|
||||
* Sharing keeps Linux's access to the memory while the memory parcel is shared.
|
||||
*/
|
||||
@ -292,7 +296,7 @@ int gh_rm_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel)
|
||||
/**
|
||||
* gh_rm_mem_reclaim() - Reclaim a memory parcel
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @parcel: Package the memory information of the memory to be reclaimed.
|
||||
* @parcel: Information about the memory to be reclaimed.
|
||||
*
|
||||
* RM maps the associated memory back into the stage-2 page tables of the owner VM.
|
||||
*/
|
||||
@ -304,7 +308,7 @@ int gh_rm_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel)
|
||||
int ret;
|
||||
|
||||
ret = gh_rm_call(rm, GH_RM_RPC_MEM_RECLAIM, &req, sizeof(req), NULL, NULL);
|
||||
/* Do not call platform mem reclaim hooks: the reclaim didn't happen*/
|
||||
/* Only call the platform mem reclaim hooks if we reclaimed the memory */
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -344,7 +348,7 @@ EXPORT_SYMBOL_GPL(gh_rm_vm_set_firmware_mem);
|
||||
int gh_rm_alloc_vmid(struct gh_rm *rm, u16 vmid)
|
||||
{
|
||||
struct gh_rm_vm_common_vmid_req req_payload = {
|
||||
.vmid = vmid,
|
||||
.vmid = cpu_to_le16(vmid),
|
||||
};
|
||||
struct gh_rm_vm_alloc_vmid_resp *resp_payload;
|
||||
size_t resp_size;
|
||||
@ -366,7 +370,7 @@ int gh_rm_alloc_vmid(struct gh_rm *rm, u16 vmid)
|
||||
}
|
||||
|
||||
/**
|
||||
* gh_rm_dealloc_vmid() - Dispose the VMID
|
||||
* gh_rm_dealloc_vmid() - Dispose of a VMID
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @vmid: VM identifier allocated with gh_rm_alloc_vmid
|
||||
*/
|
||||
@ -376,11 +380,11 @@ int gh_rm_dealloc_vmid(struct gh_rm *rm, u16 vmid)
|
||||
}
|
||||
|
||||
/**
|
||||
* gh_rm_vm_reset() - Reset the VM's resources
|
||||
* gh_rm_vm_reset() - Reset a VM's resources
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @vmid: VM identifier allocated with gh_rm_alloc_vmid
|
||||
*
|
||||
* While tearing down the VM, request RM to clean up all the VM resources
|
||||
* As part of tearing down the VM, request RM to clean up all the VM resources
|
||||
* associated with the VM. Only after this, Linux can clean up all the
|
||||
* references it maintains to resources.
|
||||
*/
|
||||
@ -390,7 +394,7 @@ int gh_rm_vm_reset(struct gh_rm *rm, u16 vmid)
|
||||
}
|
||||
|
||||
/**
|
||||
* gh_rm_vm_start() - Move the VM into "ready to run" state
|
||||
* gh_rm_vm_start() - Move a VM into "ready to run" state
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @vmid: VM identifier allocated with gh_rm_alloc_vmid
|
||||
*
|
||||
@ -432,9 +436,7 @@ int gh_rm_vm_stop(struct gh_rm *rm, u16 vmid)
|
||||
* @image_size: Size of the VM image
|
||||
* @dtb_offset: Start address of the devicetree binary with VM configuration,
|
||||
* relative to start of memparcel.
|
||||
* @dtb_size: Maximum size of devicetree binary. Resource manager applies
|
||||
* an overlay to the DTB and dtb_size should include room for
|
||||
* the overlay.
|
||||
* @dtb_size: Maximum size of devicetree binary.
|
||||
*/
|
||||
int gh_rm_vm_configure(struct gh_rm *rm, u16 vmid, enum gh_rm_vm_auth_mechanism auth_mechanism,
|
||||
u32 mem_handle, u64 image_offset, u64 image_size, u64 dtb_offset, u64 dtb_size)
|
||||
@ -470,6 +472,7 @@ int gh_rm_vm_init(struct gh_rm *rm, u16 vmid)
|
||||
* @rm: Handle to a Gunyah resource manager
|
||||
* @vmid: VMID of the other VM to get the resources of
|
||||
* @resources: Set by gh_rm_get_hyp_resources and contains the returned hypervisor resources.
|
||||
* Caller must free the resources pointer if successful.
|
||||
*/
|
||||
int gh_rm_get_hyp_resources(struct gh_rm *rm, u16 vmid,
|
||||
struct gh_rm_hyp_resources **resources)
|
||||
|
@ -19,47 +19,27 @@
|
||||
|
||||
#include "vm_mgr.h"
|
||||
|
||||
static DEFINE_XARRAY(functions);
|
||||
static void gh_vm_free(struct work_struct *work);
|
||||
|
||||
int gh_vm_function_register(struct gh_vm_function *fn)
|
||||
static DEFINE_XARRAY(gh_vm_functions);
|
||||
|
||||
static void gh_vm_put_function(struct gh_vm_function *fn)
|
||||
{
|
||||
if (!fn->bind || !fn->unbind)
|
||||
return -EINVAL;
|
||||
|
||||
return xa_err(xa_store(&functions, fn->type, fn, GFP_KERNEL));
|
||||
module_put(fn->mod);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_function_register);
|
||||
|
||||
static void gh_vm_remove_function_instance(struct gh_vm_function_instance *inst)
|
||||
__must_hold(&inst->ghvm->fn_lock)
|
||||
{
|
||||
inst->fn->unbind(inst);
|
||||
list_del(&inst->vm_list);
|
||||
module_put(inst->fn->mod);
|
||||
kfree(inst->argp);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
void gh_vm_function_unregister(struct gh_vm_function *fn)
|
||||
{
|
||||
/* Expecting unregister to only come when unloading a module */
|
||||
WARN_ON(fn->mod && module_refcount(fn->mod));
|
||||
xa_erase(&functions, fn->type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_function_unregister);
|
||||
|
||||
static struct gh_vm_function *gh_vm_get_function(u32 type)
|
||||
{
|
||||
struct gh_vm_function *fn;
|
||||
int r;
|
||||
|
||||
fn = xa_load(&functions, type);
|
||||
fn = xa_load(&gh_vm_functions, type);
|
||||
if (!fn) {
|
||||
r = request_module("ghfunc:%d", type);
|
||||
if (r)
|
||||
return ERR_PTR(r);
|
||||
return ERR_PTR(r > 0 ? -r : r);
|
||||
|
||||
fn = xa_load(&functions, type);
|
||||
fn = xa_load(&gh_vm_functions, type);
|
||||
}
|
||||
|
||||
if (!fn || !try_module_get(fn->mod))
|
||||
@ -68,14 +48,36 @@ static struct gh_vm_function *gh_vm_get_function(u32 type)
|
||||
return fn;
|
||||
}
|
||||
|
||||
static long gh_vm_add_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
static void gh_vm_remove_function_instance(struct gh_vm_function_instance *inst)
|
||||
__must_hold(&inst->ghvm->fn_lock)
|
||||
{
|
||||
inst->fn->unbind(inst);
|
||||
list_del(&inst->vm_list);
|
||||
gh_vm_put_function(inst->fn);
|
||||
kfree(inst->argp);
|
||||
kfree(inst);
|
||||
}
|
||||
|
||||
static void gh_vm_remove_functions(struct gh_vm *ghvm)
|
||||
{
|
||||
struct gh_vm_function_instance *inst, *iiter;
|
||||
|
||||
mutex_lock(&ghvm->fn_lock);
|
||||
list_for_each_entry_safe(inst, iiter, &ghvm->functions, vm_list) {
|
||||
gh_vm_remove_function_instance(inst);
|
||||
}
|
||||
mutex_unlock(&ghvm->fn_lock);
|
||||
}
|
||||
|
||||
static long gh_vm_add_function_instance(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
{
|
||||
struct gh_vm_function_instance *inst;
|
||||
void __user *argp;
|
||||
long r = 0;
|
||||
|
||||
if (f->arg_size > GH_FN_MAX_ARG_SIZE) {
|
||||
dev_err(ghvm->parent, "%s: arg_size > %d\n", __func__, GH_FN_MAX_ARG_SIZE);
|
||||
dev_err_ratelimited(ghvm->parent, "%s: arg_size > %d\n",
|
||||
__func__, GH_FN_MAX_ARG_SIZE);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -110,7 +112,8 @@ static long gh_vm_add_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
mutex_lock(&ghvm->fn_lock);
|
||||
r = inst->fn->bind(inst);
|
||||
if (r < 0) {
|
||||
module_put(inst->fn->mod);
|
||||
mutex_unlock(&ghvm->fn_lock);
|
||||
gh_vm_put_function(inst->fn);
|
||||
goto free_arg;
|
||||
}
|
||||
|
||||
@ -125,7 +128,7 @@ static long gh_vm_add_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
return r;
|
||||
}
|
||||
|
||||
static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
static long gh_vm_rm_function_instance(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
{
|
||||
struct gh_vm_function_instance *inst, *iter;
|
||||
void __user *user_argp;
|
||||
@ -150,11 +153,13 @@ static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
goto out;
|
||||
}
|
||||
|
||||
r = -ENOENT;
|
||||
list_for_each_entry_safe(inst, iter, &ghvm->functions, vm_list) {
|
||||
if (inst->fn->type == f->type &&
|
||||
f->arg_size == inst->arg_size &&
|
||||
!memcmp(argp, inst->argp, f->arg_size))
|
||||
inst->fn->compare(inst, argp, f->arg_size)) {
|
||||
gh_vm_remove_function_instance(inst);
|
||||
r = 0;
|
||||
}
|
||||
}
|
||||
|
||||
kfree(argp);
|
||||
@ -165,14 +170,31 @@ static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f)
|
||||
return r;
|
||||
}
|
||||
|
||||
int gh_vm_function_register(struct gh_vm_function *fn)
|
||||
{
|
||||
if (!fn->bind || !fn->unbind)
|
||||
return -EINVAL;
|
||||
|
||||
return xa_err(xa_store(&gh_vm_functions, fn->type, fn, GFP_KERNEL));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_function_register);
|
||||
|
||||
void gh_vm_function_unregister(struct gh_vm_function *fn)
|
||||
{
|
||||
/* Expecting unregister to only come when unloading a module */
|
||||
WARN_ON(fn->mod && module_refcount(fn->mod));
|
||||
xa_erase(&gh_vm_functions, fn->type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_function_unregister);
|
||||
|
||||
int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket)
|
||||
{
|
||||
struct gh_vm_resource_ticket *iter;
|
||||
struct gh_resource *ghrsc;
|
||||
struct gh_resource *ghrsc, *rsc_iter;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ghvm->resources_lock);
|
||||
list_for_each_entry(iter, &ghvm->resource_tickets, list) {
|
||||
list_for_each_entry(iter, &ghvm->resource_tickets, vm_list) {
|
||||
if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) {
|
||||
ret = -EEXIST;
|
||||
goto out;
|
||||
@ -184,12 +206,12 @@ int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *
|
||||
goto out;
|
||||
}
|
||||
|
||||
list_add(&ticket->list, &ghvm->resource_tickets);
|
||||
list_add(&ticket->vm_list, &ghvm->resource_tickets);
|
||||
INIT_LIST_HEAD(&ticket->resources);
|
||||
|
||||
list_for_each_entry(ghrsc, &ghvm->resources, list) {
|
||||
list_for_each_entry_safe(ghrsc, rsc_iter, &ghvm->resources, list) {
|
||||
if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
|
||||
if (!ticket->populate(ticket, ghrsc))
|
||||
if (ticket->populate(ticket, ghrsc))
|
||||
list_move(&ghrsc->list, &ticket->resources);
|
||||
}
|
||||
}
|
||||
@ -210,7 +232,7 @@ void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_tick
|
||||
}
|
||||
|
||||
module_put(ticket->owner);
|
||||
list_del(&ticket->list);
|
||||
list_del(&ticket->vm_list);
|
||||
mutex_unlock(&ghvm->resources_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_remove_resource_ticket);
|
||||
@ -220,12 +242,17 @@ static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource *ghrsc)
|
||||
struct gh_vm_resource_ticket *ticket;
|
||||
|
||||
mutex_lock(&ghvm->resources_lock);
|
||||
list_for_each_entry(ticket, &ghvm->resource_tickets, list) {
|
||||
list_for_each_entry(ticket, &ghvm->resource_tickets, vm_list) {
|
||||
if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) {
|
||||
if (!ticket->populate(ticket, ghrsc)) {
|
||||
if (ticket->populate(ticket, ghrsc))
|
||||
list_add(&ghrsc->list, &ticket->resources);
|
||||
goto found;
|
||||
}
|
||||
else
|
||||
list_add(&ghrsc->list, &ghvm->resources);
|
||||
/* unconditonal -- we prevent multiple identical
|
||||
* resource tickets so there will not be some other
|
||||
* ticket elsewhere in the list if populate() failed.
|
||||
*/
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
list_add(&ghrsc->list, &ghvm->resources);
|
||||
@ -233,6 +260,26 @@ static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource *ghrsc)
|
||||
mutex_unlock(&ghvm->resources_lock);
|
||||
}
|
||||
|
||||
static void gh_vm_clean_resources(struct gh_vm *ghvm)
|
||||
{
|
||||
struct gh_vm_resource_ticket *ticket, *titer;
|
||||
struct gh_resource *ghrsc, *riter;
|
||||
|
||||
mutex_lock(&ghvm->resources_lock);
|
||||
if (!list_empty(&ghvm->resource_tickets)) {
|
||||
dev_warn(ghvm->parent, "Dangling resource tickets:\n");
|
||||
list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, vm_list) {
|
||||
dev_warn(ghvm->parent, " %pS\n", ticket->populate);
|
||||
gh_vm_remove_resource_ticket(ghvm, ticket);
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
|
||||
gh_rm_free_resource(ghrsc);
|
||||
}
|
||||
mutex_unlock(&ghvm->resources_lock);
|
||||
}
|
||||
|
||||
static int _gh_vm_io_handler_compare(const struct rb_node *node, const struct rb_node *parent)
|
||||
{
|
||||
struct gh_vm_io_handler *n = container_of(node, struct gh_vm_io_handler, node);
|
||||
@ -248,9 +295,16 @@ static int _gh_vm_io_handler_compare(const struct rb_node *node, const struct rb
|
||||
return -1;
|
||||
if (n->len > p->len)
|
||||
return 1;
|
||||
if (n->datamatch < p->datamatch)
|
||||
/* one of the io handlers doesn't have datamatch and the other does.
|
||||
* For purposes of comparison, that makes them identical since the
|
||||
* one that doesn't have datamatch will cover the same handler that
|
||||
* does.
|
||||
*/
|
||||
if (n->datamatch != p->datamatch)
|
||||
return 0;
|
||||
if (n->data < p->data)
|
||||
return -1;
|
||||
if (n->datamatch > p->datamatch)
|
||||
if (n->data > p->data)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -273,7 +327,8 @@ static struct gh_vm_io_handler *gh_vm_mgr_find_io_hdlr(struct gh_vm *ghvm, u64 a
|
||||
struct gh_vm_io_handler key = {
|
||||
.addr = addr,
|
||||
.len = len,
|
||||
.datamatch = data,
|
||||
.datamatch = true,
|
||||
.data = data,
|
||||
};
|
||||
struct rb_node *node;
|
||||
|
||||
@ -331,7 +386,7 @@ static int gh_vm_rm_notification_status(struct gh_vm *ghvm, void *data)
|
||||
{
|
||||
struct gh_rm_vm_status_payload *payload = data;
|
||||
|
||||
if (payload->vmid != ghvm->vmid)
|
||||
if (le16_to_cpu(payload->vmid) != ghvm->vmid)
|
||||
return NOTIFY_OK;
|
||||
|
||||
/* All other state transitions are synchronous to a corresponding RM call */
|
||||
@ -349,7 +404,7 @@ static int gh_vm_rm_notification_exited(struct gh_vm *ghvm, void *data)
|
||||
{
|
||||
struct gh_rm_vm_exited_payload *payload = data;
|
||||
|
||||
if (payload->vmid != ghvm->vmid)
|
||||
if (le16_to_cpu(payload->vmid) != ghvm->vmid)
|
||||
return NOTIFY_OK;
|
||||
|
||||
down_write(&ghvm->status_lock);
|
||||
@ -359,6 +414,7 @@ static int gh_vm_rm_notification_exited(struct gh_vm *ghvm, void *data)
|
||||
memcpy(&ghvm->exit_info.reason, payload->exit_reason,
|
||||
min(GH_VM_MAX_EXIT_REASON_SIZE, ghvm->exit_info.reason_size));
|
||||
up_write(&ghvm->status_lock);
|
||||
wake_up(&ghvm->vm_status_wait);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
@ -387,146 +443,38 @@ static void gh_vm_stop(struct gh_vm *ghvm)
|
||||
if (ret)
|
||||
dev_warn(ghvm->parent, "Failed to stop VM: %d\n", ret);
|
||||
}
|
||||
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_EXITED;
|
||||
up_write(&ghvm->status_lock);
|
||||
|
||||
wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_EXITED);
|
||||
}
|
||||
|
||||
static void gh_vm_free(struct work_struct *work)
|
||||
{
|
||||
struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
|
||||
struct gh_vm_function_instance *inst, *iiter;
|
||||
struct gh_vm_resource_ticket *ticket, *titer;
|
||||
struct gh_resource *ghrsc, *riter;
|
||||
struct gh_vm_mem *mapping, *tmp;
|
||||
int ret;
|
||||
|
||||
switch (ghvm->vm_status) {
|
||||
case GH_RM_VM_STATUS_RUNNING:
|
||||
gh_vm_stop(ghvm);
|
||||
fallthrough;
|
||||
case GH_RM_VM_STATUS_INIT_FAILED:
|
||||
case GH_RM_VM_STATUS_EXITED:
|
||||
mutex_lock(&ghvm->fn_lock);
|
||||
list_for_each_entry_safe(inst, iiter, &ghvm->functions, vm_list) {
|
||||
gh_vm_remove_function_instance(inst);
|
||||
}
|
||||
mutex_unlock(&ghvm->fn_lock);
|
||||
|
||||
mutex_lock(&ghvm->resources_lock);
|
||||
if (!list_empty(&ghvm->resource_tickets)) {
|
||||
dev_warn(ghvm->parent, "Dangling resource tickets:\n");
|
||||
list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, list) {
|
||||
dev_warn(ghvm->parent, " %pS\n", ticket->populate);
|
||||
gh_vm_remove_resource_ticket(ghvm, ticket);
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) {
|
||||
gh_rm_free_resource(ghrsc);
|
||||
}
|
||||
mutex_unlock(&ghvm->resources_lock);
|
||||
|
||||
/* vm_status == LOAD if user creates VM, but then destroys it
|
||||
* without ever trying to start it. In that case, we have only
|
||||
* allocated VMID. Clean up functions (above), memory (below),
|
||||
* and dealloc vmid (below), but no call gh_rm_vm_reset().
|
||||
*/
|
||||
if (ghvm->vm_status != GH_RM_VM_STATUS_LOAD) {
|
||||
ret = gh_rm_vm_reset(ghvm->rm, ghvm->vmid);
|
||||
if (ret)
|
||||
dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret);
|
||||
wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET);
|
||||
}
|
||||
|
||||
mutex_lock(&ghvm->mm_lock);
|
||||
list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
|
||||
gh_vm_mem_reclaim(ghvm, mapping);
|
||||
kfree(mapping);
|
||||
}
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
fallthrough;
|
||||
case GH_RM_VM_STATUS_NO_STATE:
|
||||
ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
|
||||
if (ret)
|
||||
dev_warn(ghvm->parent, "Failed to deallocate vmid: %d\n", ret);
|
||||
|
||||
gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
|
||||
gh_rm_put(ghvm->rm);
|
||||
kfree(ghvm);
|
||||
break;
|
||||
default:
|
||||
dev_err(ghvm->parent, "VM is unknown state: %d. VM will not be cleaned up.\n",
|
||||
ghvm->vm_status);
|
||||
|
||||
gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
|
||||
gh_rm_put(ghvm->rm);
|
||||
kfree(ghvm);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void _gh_vm_put(struct kref *kref)
|
||||
{
|
||||
struct gh_vm *ghvm = container_of(kref, struct gh_vm, kref);
|
||||
|
||||
/* VM will be reset and make RM calls which can interruptible sleep.
|
||||
* Defer to a work so this thread can receive signal.
|
||||
*/
|
||||
schedule_work(&ghvm->free_work);
|
||||
}
|
||||
|
||||
int __must_check gh_vm_get(struct gh_vm *ghvm)
|
||||
{
|
||||
return kref_get_unless_zero(&ghvm->kref);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_get);
|
||||
|
||||
void gh_vm_put(struct gh_vm *ghvm)
|
||||
{
|
||||
kref_put(&ghvm->kref, _gh_vm_put);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_put);
|
||||
|
||||
static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm)
|
||||
{
|
||||
struct gh_vm *ghvm;
|
||||
int vmid, ret;
|
||||
|
||||
vmid = gh_rm_alloc_vmid(rm, 0);
|
||||
if (vmid < 0)
|
||||
return ERR_PTR(vmid);
|
||||
|
||||
ghvm = kzalloc(sizeof(*ghvm), GFP_KERNEL);
|
||||
if (!ghvm) {
|
||||
gh_rm_dealloc_vmid(rm, vmid);
|
||||
if (!ghvm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ghvm->parent = gh_rm_get(rm);
|
||||
ghvm->vmid = vmid;
|
||||
ghvm->vmid = GH_VMID_INVAL;
|
||||
ghvm->rm = rm;
|
||||
|
||||
init_waitqueue_head(&ghvm->vm_status_wait);
|
||||
ghvm->nb.notifier_call = gh_vm_rm_notification;
|
||||
ret = gh_rm_notifier_register(rm, &ghvm->nb);
|
||||
if (ret) {
|
||||
gh_rm_put(rm);
|
||||
gh_rm_dealloc_vmid(rm, vmid);
|
||||
kfree(ghvm);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
mmgrab(current->mm);
|
||||
ghvm->mm = current->mm;
|
||||
mutex_init(&ghvm->mm_lock);
|
||||
INIT_LIST_HEAD(&ghvm->memory_mappings);
|
||||
init_rwsem(&ghvm->status_lock);
|
||||
init_waitqueue_head(&ghvm->vm_status_wait);
|
||||
INIT_WORK(&ghvm->free_work, gh_vm_free);
|
||||
kref_init(&ghvm->kref);
|
||||
mutex_init(&ghvm->resources_lock);
|
||||
INIT_LIST_HEAD(&ghvm->resources);
|
||||
INIT_LIST_HEAD(&ghvm->resource_tickets);
|
||||
init_rwsem(&ghvm->mmio_handler_lock);
|
||||
ghvm->mmio_handler_root = RB_ROOT;
|
||||
INIT_LIST_HEAD(&ghvm->functions);
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_LOAD;
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_NO_STATE;
|
||||
|
||||
return ghvm;
|
||||
}
|
||||
@ -541,13 +489,27 @@ static int gh_vm_start(struct gh_vm *ghvm)
|
||||
int ret, i, n;
|
||||
|
||||
down_write(&ghvm->status_lock);
|
||||
if (ghvm->vm_status != GH_RM_VM_STATUS_LOAD) {
|
||||
if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) {
|
||||
up_write(&ghvm->status_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ghvm->nb.notifier_call = gh_vm_rm_notification;
|
||||
ret = gh_rm_notifier_register(ghvm->rm, &ghvm->nb);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = gh_rm_alloc_vmid(ghvm->rm, 0);
|
||||
if (ret < 0) {
|
||||
gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
|
||||
goto err;
|
||||
}
|
||||
ghvm->vmid = ret;
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_LOAD;
|
||||
|
||||
mutex_lock(&ghvm->mm_lock);
|
||||
list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
|
||||
mapping->parcel.acl_entries[0].vmid = cpu_to_le16(ghvm->vmid);
|
||||
switch (mapping->share_type) {
|
||||
case VM_MEM_LEND:
|
||||
ret = gh_rm_mem_lend(ghvm->rm, &mapping->parcel);
|
||||
@ -559,8 +521,8 @@ static int gh_vm_start(struct gh_vm *ghvm)
|
||||
if (ret) {
|
||||
dev_warn(ghvm->parent, "Failed to %s parcel %d: %d\n",
|
||||
mapping->share_type == VM_MEM_LEND ? "lend" : "share",
|
||||
mapping->parcel.label,
|
||||
ret);
|
||||
mapping->parcel.label, ret);
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -602,11 +564,12 @@ static int gh_vm_start(struct gh_vm *ghvm)
|
||||
}
|
||||
|
||||
ret = gh_rm_vm_init(ghvm->rm, ghvm->vmid);
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_RESET;
|
||||
if (ret) {
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED;
|
||||
dev_warn(ghvm->parent, "Failed to initialize VM: %d\n", ret);
|
||||
goto err;
|
||||
}
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_READY;
|
||||
|
||||
ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources);
|
||||
if (ret) {
|
||||
@ -634,7 +597,6 @@ static int gh_vm_start(struct gh_vm *ghvm)
|
||||
up_write(&ghvm->status_lock);
|
||||
return ret;
|
||||
err:
|
||||
ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED;
|
||||
/* gh_vm_free will handle releasing resources and reclaiming memory */
|
||||
up_write(&ghvm->status_lock);
|
||||
return ret;
|
||||
@ -649,11 +611,11 @@ static int gh_vm_ensure_started(struct gh_vm *ghvm)
|
||||
return ret;
|
||||
|
||||
/* Unlikely because VM is typically started */
|
||||
if (unlikely(ghvm->vm_status == GH_RM_VM_STATUS_LOAD)) {
|
||||
if (unlikely(ghvm->vm_status == GH_RM_VM_STATUS_NO_STATE)) {
|
||||
up_read(&ghvm->status_lock);
|
||||
ret = gh_vm_start(ghvm);
|
||||
if (ret)
|
||||
goto out;
|
||||
return ret;
|
||||
/** gh_vm_start() is guaranteed to bring status out of
|
||||
* GH_RM_VM_STATUS_LOAD, thus inifitely recursive call is not
|
||||
* possible
|
||||
@ -665,7 +627,6 @@ static int gh_vm_ensure_started(struct gh_vm *ghvm)
|
||||
if (unlikely(ghvm->vm_status != GH_RM_VM_STATUS_RUNNING))
|
||||
ret = -ENODEV;
|
||||
|
||||
out:
|
||||
up_read(&ghvm->status_lock);
|
||||
return ret;
|
||||
}
|
||||
@ -684,6 +645,10 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
case GH_VM_SET_USER_MEM_REGION: {
|
||||
struct gh_userspace_memory_region region;
|
||||
|
||||
/* only allow owner task to add memory */
|
||||
if (ghvm->mm != current->mm)
|
||||
return -EPERM;
|
||||
|
||||
if (copy_from_user(®ion, argp, sizeof(region)))
|
||||
return -EFAULT;
|
||||
|
||||
@ -700,10 +665,13 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (copy_from_user(&dtb_config, argp, sizeof(dtb_config)))
|
||||
return -EFAULT;
|
||||
|
||||
dtb_config.size = PAGE_ALIGN(dtb_config.size);
|
||||
if (dtb_config.guest_phys_addr + dtb_config.size < dtb_config.guest_phys_addr)
|
||||
if (overflows_type(dtb_config.guest_phys_addr + dtb_config.size, u64))
|
||||
return -EOVERFLOW;
|
||||
|
||||
/* Gunyah requires that dtb_config is page aligned */
|
||||
if (!PAGE_ALIGNED(dtb_config.guest_phys_addr) || !PAGE_ALIGNED(dtb_config.size))
|
||||
return -EINVAL;
|
||||
|
||||
ghvm->dtb_config = dtb_config;
|
||||
|
||||
r = 0;
|
||||
@ -728,21 +696,16 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (copy_from_user(&f, argp, sizeof(f)))
|
||||
return -EFAULT;
|
||||
|
||||
r = gh_vm_add_function(ghvm, &f);
|
||||
r = gh_vm_add_function_instance(ghvm, &f);
|
||||
break;
|
||||
}
|
||||
case GH_VM_REMOVE_FUNCTION: {
|
||||
struct gh_fn_desc *f;
|
||||
struct gh_fn_desc f;
|
||||
|
||||
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
||||
if (!f)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_from_user(f, argp, sizeof(*f)))
|
||||
if (copy_from_user(&f, argp, sizeof(f)))
|
||||
return -EFAULT;
|
||||
|
||||
r = gh_vm_rm_function(ghvm, f);
|
||||
kfree(f);
|
||||
r = gh_vm_rm_function_instance(ghvm, &f);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -753,6 +716,63 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
return r;
|
||||
}
|
||||
|
||||
static void gh_vm_free(struct work_struct *work)
|
||||
{
|
||||
struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work);
|
||||
int ret;
|
||||
|
||||
if (ghvm->vm_status == GH_RM_VM_STATUS_RUNNING)
|
||||
gh_vm_stop(ghvm);
|
||||
|
||||
gh_vm_remove_functions(ghvm);
|
||||
gh_vm_clean_resources(ghvm);
|
||||
|
||||
if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE &&
|
||||
ghvm->vm_status != GH_RM_VM_STATUS_LOAD &&
|
||||
ghvm->vm_status != GH_RM_VM_STATUS_RESET) {
|
||||
ret = gh_rm_vm_reset(ghvm->rm, ghvm->vmid);
|
||||
if (ret)
|
||||
dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret);
|
||||
wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET);
|
||||
}
|
||||
|
||||
gh_vm_mem_reclaim(ghvm);
|
||||
|
||||
if (ghvm->vm_status > GH_RM_VM_STATUS_NO_STATE) {
|
||||
gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb);
|
||||
|
||||
ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid);
|
||||
if (ret)
|
||||
dev_warn(ghvm->parent, "Failed to deallocate vmid: %d\n", ret);
|
||||
}
|
||||
|
||||
gh_rm_put(ghvm->rm);
|
||||
mmdrop(ghvm->mm);
|
||||
kfree(ghvm);
|
||||
}
|
||||
|
||||
int __must_check gh_vm_get(struct gh_vm *ghvm)
|
||||
{
|
||||
return kref_get_unless_zero(&ghvm->kref);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_get);
|
||||
|
||||
static void _gh_vm_put(struct kref *kref)
|
||||
{
|
||||
struct gh_vm *ghvm = container_of(kref, struct gh_vm, kref);
|
||||
|
||||
/* VM will be reset and make RM calls which can interruptible sleep.
|
||||
* Defer to a work so this thread can receive signal.
|
||||
*/
|
||||
schedule_work(&ghvm->free_work);
|
||||
}
|
||||
|
||||
void gh_vm_put(struct gh_vm *ghvm)
|
||||
{
|
||||
kref_put(&ghvm->kref, _gh_vm_put);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gh_vm_put);
|
||||
|
||||
static int gh_vm_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct gh_vm *ghvm = filp->private_data;
|
||||
@ -802,7 +822,7 @@ static long gh_dev_ioctl_create_vm(struct gh_rm *rm, unsigned long arg)
|
||||
err_put_fd:
|
||||
put_unused_fd(fd);
|
||||
err_destroy_vm:
|
||||
gh_vm_free(&ghvm->free_work);
|
||||
gh_vm_put(ghvm);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -3,8 +3,8 @@
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _GH_PRIV_VM_MGR_H
|
||||
#define _GH_PRIV_VM_MGR_H
|
||||
#ifndef _GH_VM_MGR_H
|
||||
#define _GH_VM_MGR_H
|
||||
|
||||
#include <linux/gunyah_rsc_mgr.h>
|
||||
#include <linux/gunyah_vm_mgr.h>
|
||||
@ -50,6 +50,7 @@ struct gh_vm {
|
||||
|
||||
struct work_struct free_work;
|
||||
struct kref kref;
|
||||
struct mm_struct *mm; /* userspace tied to this vm */
|
||||
struct mutex mm_lock;
|
||||
struct list_head memory_mappings;
|
||||
struct mutex fn_lock;
|
||||
@ -62,9 +63,7 @@ struct gh_vm {
|
||||
};
|
||||
|
||||
int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region, bool lend);
|
||||
void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping);
|
||||
int gh_vm_mem_free(struct gh_vm *ghvm, u32 label);
|
||||
struct gh_vm_mem *gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label);
|
||||
void gh_vm_mem_reclaim(struct gh_vm *ghvm);
|
||||
struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr, u32 size);
|
||||
|
||||
int gh_vm_mmio_write(struct gh_vm *ghvm, u64 addr, u32 len, u64 data);
|
||||
|
@ -12,6 +12,21 @@
|
||||
|
||||
#include "vm_mgr.h"
|
||||
|
||||
static bool pages_are_mergeable(struct page *a, struct page *b)
|
||||
{
|
||||
if (page_to_pfn(a) + 1 != page_to_pfn(b))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool gh_vm_mem_overlap(struct gh_vm_mem *a, u64 addr, u64 size)
|
||||
{
|
||||
u64 a_end = a->guest_phys_addr + (a->npages << PAGE_SHIFT);
|
||||
u64 end = addr + size;
|
||||
|
||||
return a->guest_phys_addr < end && addr < a_end;
|
||||
}
|
||||
|
||||
static struct gh_vm_mem *__gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label)
|
||||
__must_hold(&ghvm->mm_lock)
|
||||
{
|
||||
@ -24,10 +39,10 @@ static struct gh_vm_mem *__gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
|
||||
static void gh_vm_mem_reclaim_mapping(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
|
||||
__must_hold(&ghvm->mm_lock)
|
||||
{
|
||||
int i, ret = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (mapping->parcel.mem_handle != GH_MEM_HANDLE_INVAL) {
|
||||
ret = gh_rm_mem_reclaim(ghvm->rm, &mapping->parcel);
|
||||
@ -36,9 +51,10 @@ void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
|
||||
mapping->parcel.label, ret);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
for (i = 0; i < mapping->npages; i++)
|
||||
unpin_user_page(mapping->pages[i]);
|
||||
if (!ret) {
|
||||
unpin_user_pages(mapping->pages, mapping->npages);
|
||||
account_locked_vm(ghvm->mm, mapping->npages, false);
|
||||
}
|
||||
|
||||
kfree(mapping->pages);
|
||||
kfree(mapping->parcel.acl_entries);
|
||||
@ -47,21 +63,32 @@ void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping)
|
||||
list_del(&mapping->list);
|
||||
}
|
||||
|
||||
void gh_vm_mem_reclaim(struct gh_vm *ghvm)
|
||||
{
|
||||
struct gh_vm_mem *mapping, *tmp;
|
||||
|
||||
mutex_lock(&ghvm->mm_lock);
|
||||
|
||||
list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) {
|
||||
gh_vm_mem_reclaim_mapping(ghvm, mapping);
|
||||
kfree(mapping);
|
||||
}
|
||||
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
}
|
||||
|
||||
struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr, u32 size)
|
||||
{
|
||||
struct gh_vm_mem *mapping = NULL;
|
||||
int ret;
|
||||
struct gh_vm_mem *mapping;
|
||||
|
||||
ret = mutex_lock_interruptible(&ghvm->mm_lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
if (overflows_type(guest_phys_addr + size, u64))
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&ghvm->mm_lock);
|
||||
|
||||
list_for_each_entry(mapping, &ghvm->memory_mappings, list) {
|
||||
if (guest_phys_addr >= mapping->guest_phys_addr &&
|
||||
(guest_phys_addr + size <= mapping->guest_phys_addr +
|
||||
(mapping->npages << PAGE_SHIFT))) {
|
||||
if (gh_vm_mem_overlap(mapping, guest_phys_addr, size))
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
mapping = NULL;
|
||||
@ -70,36 +97,22 @@ struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr
|
||||
return mapping;
|
||||
}
|
||||
|
||||
struct gh_vm_mem *gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label)
|
||||
{
|
||||
struct gh_vm_mem *mapping;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&ghvm->mm_lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
mapping = __gh_vm_mem_find_by_label(ghvm, label);
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
|
||||
return mapping ? : ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region, bool lend)
|
||||
{
|
||||
struct gh_vm_mem *mapping, *tmp_mapping;
|
||||
struct gh_rm_mem_entry *mem_entries;
|
||||
phys_addr_t curr_page, prev_page;
|
||||
struct page *curr_page, *prev_page;
|
||||
struct gh_rm_mem_parcel *parcel;
|
||||
int i, j, pinned, ret = 0;
|
||||
unsigned int gup_flags;
|
||||
size_t entry_size;
|
||||
u16 vmid;
|
||||
|
||||
if (!region->memory_size || !PAGE_ALIGNED(region->memory_size) ||
|
||||
!PAGE_ALIGNED(region->userspace_addr) || !PAGE_ALIGNED(region->guest_phys_addr))
|
||||
!PAGE_ALIGNED(region->userspace_addr) ||
|
||||
!PAGE_ALIGNED(region->guest_phys_addr))
|
||||
return -EINVAL;
|
||||
|
||||
if (region->guest_phys_addr + region->memory_size < region->guest_phys_addr)
|
||||
if (overflows_type(region->guest_phys_addr + region->memory_size, u64))
|
||||
return -EOVERFLOW;
|
||||
|
||||
ret = mutex_lock_interruptible(&ghvm->mm_lock);
|
||||
@ -108,53 +121,55 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio
|
||||
|
||||
mapping = __gh_vm_mem_find_by_label(ghvm, region->label);
|
||||
if (mapping) {
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
return -EEXIST;
|
||||
ret = -EEXIST;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
|
||||
if (!mapping) {
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mapping->parcel.label = region->label;
|
||||
mapping->guest_phys_addr = region->guest_phys_addr;
|
||||
mapping->npages = region->memory_size >> PAGE_SHIFT;
|
||||
parcel = &mapping->parcel;
|
||||
parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */
|
||||
parcel->mem_type = GH_RM_MEM_TYPE_NORMAL;
|
||||
|
||||
/* Check for overlap */
|
||||
list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) {
|
||||
if (!((mapping->guest_phys_addr + (mapping->npages << PAGE_SHIFT) <=
|
||||
tmp_mapping->guest_phys_addr) ||
|
||||
(mapping->guest_phys_addr >=
|
||||
tmp_mapping->guest_phys_addr + (tmp_mapping->npages << PAGE_SHIFT)))) {
|
||||
if (gh_vm_mem_overlap(tmp_mapping, region->guest_phys_addr,
|
||||
region->memory_size)) {
|
||||
ret = -EEXIST;
|
||||
goto free_mapping;
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
list_add(&mapping->list, &ghvm->memory_mappings);
|
||||
mapping = kzalloc(sizeof(*mapping), GFP_KERNEL_ACCOUNT);
|
||||
if (!mapping) {
|
||||
ret = -ENOMEM;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL);
|
||||
mapping->guest_phys_addr = region->guest_phys_addr;
|
||||
mapping->npages = region->memory_size >> PAGE_SHIFT;
|
||||
parcel = &mapping->parcel;
|
||||
parcel->label = region->label;
|
||||
parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */
|
||||
parcel->mem_type = GH_RM_MEM_TYPE_NORMAL;
|
||||
|
||||
ret = account_locked_vm(ghvm->mm, mapping->npages, true);
|
||||
if (ret)
|
||||
goto free_mapping;
|
||||
|
||||
mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL_ACCOUNT);
|
||||
if (!mapping->pages) {
|
||||
ret = -ENOMEM;
|
||||
mapping->npages = 0; /* update npages for reclaim */
|
||||
goto reclaim;
|
||||
goto unlock_pages;
|
||||
}
|
||||
|
||||
gup_flags = FOLL_LONGTERM;
|
||||
if (region->flags & GH_MEM_ALLOW_WRITE)
|
||||
gup_flags |= FOLL_WRITE;
|
||||
|
||||
pinned = pin_user_pages_fast(region->userspace_addr, mapping->npages,
|
||||
FOLL_WRITE | FOLL_LONGTERM, mapping->pages);
|
||||
gup_flags, mapping->pages);
|
||||
if (pinned < 0) {
|
||||
ret = pinned;
|
||||
mapping->npages = 0; /* update npages for reclaim */
|
||||
goto reclaim;
|
||||
goto free_pages;
|
||||
} else if (pinned != mapping->npages) {
|
||||
ret = -EFAULT;
|
||||
mapping->npages = pinned; /* update npages for reclaim */
|
||||
goto reclaim;
|
||||
goto unpin_pages;
|
||||
}
|
||||
|
||||
if (lend) {
|
||||
@ -164,15 +179,16 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio
|
||||
parcel->n_acl_entries = 2;
|
||||
mapping->share_type = VM_MEM_SHARE;
|
||||
}
|
||||
parcel->acl_entries = kcalloc(parcel->n_acl_entries, sizeof(*parcel->acl_entries),
|
||||
GFP_KERNEL);
|
||||
parcel->acl_entries = kcalloc(parcel->n_acl_entries,
|
||||
sizeof(*parcel->acl_entries), GFP_KERNEL);
|
||||
if (!parcel->acl_entries) {
|
||||
ret = -ENOMEM;
|
||||
goto reclaim;
|
||||
goto unpin_pages;
|
||||
}
|
||||
|
||||
parcel->acl_entries[0].vmid = cpu_to_le16(ghvm->vmid);
|
||||
|
||||
/* acl_entries[0].vmid will be this VM's vmid. We'll fill it when the
|
||||
* VM is starting and we know the VM's vmid.
|
||||
*/
|
||||
if (region->flags & GH_MEM_ALLOW_READ)
|
||||
parcel->acl_entries[0].perms |= GH_RM_ACL_R;
|
||||
if (region->flags & GH_MEM_ALLOW_WRITE)
|
||||
@ -180,78 +196,66 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio
|
||||
if (region->flags & GH_MEM_ALLOW_EXEC)
|
||||
parcel->acl_entries[0].perms |= GH_RM_ACL_X;
|
||||
|
||||
if (mapping->share_type == VM_MEM_SHARE) {
|
||||
if (!lend) {
|
||||
ret = gh_rm_get_vmid(ghvm->rm, &vmid);
|
||||
if (ret)
|
||||
goto reclaim;
|
||||
goto free_acl;
|
||||
|
||||
parcel->acl_entries[1].vmid = cpu_to_le16(vmid);
|
||||
/* Host assumed to have all these permissions. Gunyah will not
|
||||
* grant new permissions if host actually had less than RWX
|
||||
*/
|
||||
parcel->acl_entries[1].perms |= GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X;
|
||||
* grant new permissions if host actually had less than RWX
|
||||
*/
|
||||
parcel->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X;
|
||||
}
|
||||
|
||||
mem_entries = kcalloc(mapping->npages, sizeof(*mem_entries), GFP_KERNEL);
|
||||
if (!mem_entries) {
|
||||
parcel->n_mem_entries = 1;
|
||||
for (i = 1; i < mapping->npages; i++) {
|
||||
if (!pages_are_mergeable(mapping->pages[i - 1], mapping->pages[i]))
|
||||
parcel->n_mem_entries++;
|
||||
}
|
||||
|
||||
parcel->mem_entries = kcalloc(parcel->n_mem_entries,
|
||||
sizeof(parcel->mem_entries[0]),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
if (!parcel->mem_entries) {
|
||||
ret = -ENOMEM;
|
||||
goto reclaim;
|
||||
goto free_acl;
|
||||
}
|
||||
|
||||
/* reduce number of entries by combining contiguous pages into single memory entry */
|
||||
prev_page = page_to_phys(mapping->pages[0]);
|
||||
mem_entries[0].ipa_base = cpu_to_le64(prev_page);
|
||||
prev_page = mapping->pages[0];
|
||||
parcel->mem_entries[0].ipa_base = cpu_to_le64(page_to_phys(prev_page));
|
||||
entry_size = PAGE_SIZE;
|
||||
for (i = 1, j = 0; i < mapping->npages; i++) {
|
||||
curr_page = page_to_phys(mapping->pages[i]);
|
||||
if (curr_page - prev_page == PAGE_SIZE) {
|
||||
curr_page = mapping->pages[i];
|
||||
if (pages_are_mergeable(prev_page, curr_page)) {
|
||||
entry_size += PAGE_SIZE;
|
||||
} else {
|
||||
mem_entries[j].size = cpu_to_le64(entry_size);
|
||||
parcel->mem_entries[j].size = cpu_to_le64(entry_size);
|
||||
j++;
|
||||
mem_entries[j].ipa_base = cpu_to_le64(curr_page);
|
||||
parcel->mem_entries[j].ipa_base =
|
||||
cpu_to_le64(page_to_phys(curr_page));
|
||||
entry_size = PAGE_SIZE;
|
||||
}
|
||||
|
||||
prev_page = curr_page;
|
||||
}
|
||||
mem_entries[j].size = cpu_to_le64(entry_size);
|
||||
|
||||
parcel->n_mem_entries = j + 1;
|
||||
parcel->mem_entries = kmemdup(mem_entries, sizeof(*mem_entries) * parcel->n_mem_entries,
|
||||
GFP_KERNEL);
|
||||
kfree(mem_entries);
|
||||
if (!parcel->mem_entries) {
|
||||
ret = -ENOMEM;
|
||||
goto reclaim;
|
||||
}
|
||||
parcel->mem_entries[j].size = cpu_to_le64(entry_size);
|
||||
|
||||
list_add(&mapping->list, &ghvm->memory_mappings);
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
return 0;
|
||||
reclaim:
|
||||
gh_vm_mem_reclaim(ghvm, mapping);
|
||||
free_acl:
|
||||
kfree(parcel->acl_entries);
|
||||
unpin_pages:
|
||||
unpin_user_pages(mapping->pages, pinned);
|
||||
free_pages:
|
||||
kfree(mapping->pages);
|
||||
unlock_pages:
|
||||
account_locked_vm(ghvm->mm, mapping->npages, false);
|
||||
free_mapping:
|
||||
kfree(mapping);
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int gh_vm_mem_free(struct gh_vm *ghvm, u32 label)
|
||||
{
|
||||
struct gh_vm_mem *mapping;
|
||||
int ret;
|
||||
|
||||
ret = mutex_lock_interruptible(&ghvm->mm_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mapping = __gh_vm_mem_find_by_label(ghvm, label);
|
||||
if (!mapping)
|
||||
goto out;
|
||||
|
||||
gh_vm_mem_reclaim(ghvm, mapping);
|
||||
kfree(mapping);
|
||||
out:
|
||||
unlock:
|
||||
mutex_unlock(&ghvm->mm_lock);
|
||||
return ret;
|
||||
}
|
||||
|
@ -974,6 +974,16 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If wb_tryget fails, the wb has been shutdown, skip it.
|
||||
*
|
||||
* Pin @wb so that it stays on @bdi->wb_list. This allows
|
||||
* continuing iteration from @wb after dropping and
|
||||
* regrabbing rcu read lock.
|
||||
*/
|
||||
if (!wb_tryget(wb))
|
||||
continue;
|
||||
|
||||
/* alloc failed, execute synchronously using on-stack fallback */
|
||||
work = &fallback_work;
|
||||
*work = *base_work;
|
||||
@ -982,13 +992,6 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
|
||||
work->done = &fallback_work_done;
|
||||
|
||||
wb_queue_work(wb, work);
|
||||
|
||||
/*
|
||||
* Pin @wb so that it stays on @bdi->wb_list. This allows
|
||||
* continuing iteration from @wb after dropping and
|
||||
* regrabbing rcu read lock.
|
||||
*/
|
||||
wb_get(wb);
|
||||
last_wb = wb;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area
|
||||
* @sci: segment constructor object
|
||||
*
|
||||
* nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of
|
||||
* the current segment summary block.
|
||||
*/
|
||||
static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci)
|
||||
{
|
||||
struct nilfs_segsum_pointer *ssp;
|
||||
|
||||
ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr;
|
||||
if (ssp->offset < ssp->bh->b_size)
|
||||
memset(ssp->bh->b_data + ssp->offset, 0,
|
||||
ssp->bh->b_size - ssp->offset);
|
||||
}
|
||||
|
||||
static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
|
||||
{
|
||||
sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks;
|
||||
@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci)
|
||||
* The current segment is filled up
|
||||
* (internal code)
|
||||
*/
|
||||
nilfs_segctor_zeropad_segsum(sci);
|
||||
sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg);
|
||||
return nilfs_segctor_reset_segment_buffer(sci);
|
||||
}
|
||||
@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci,
|
||||
goto retry;
|
||||
}
|
||||
if (unlikely(required)) {
|
||||
nilfs_segctor_zeropad_segsum(sci);
|
||||
err = nilfs_segbuf_extend_segsum(segbuf);
|
||||
if (unlikely(err))
|
||||
goto failed;
|
||||
@ -1531,6 +1550,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
|
||||
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
|
||||
sci->sc_stage = prev_stage;
|
||||
}
|
||||
nilfs_segctor_zeropad_segsum(sci);
|
||||
nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile);
|
||||
return 0;
|
||||
|
||||
|
@ -236,6 +236,7 @@ static inline void *offset_to_ptr(const int *off)
|
||||
* bool and also pointer types.
|
||||
*/
|
||||
#define is_signed_type(type) (((type)(-1)) < (__force type)1)
|
||||
#define is_unsigned_type(type) (!is_signed_type(type))
|
||||
|
||||
/*
|
||||
* This is needed in functions which generate the stack canary, see
|
||||
|
@ -14,13 +14,13 @@
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Follows resource manager's resource types for VM_GET_HYP_RESOURCES */
|
||||
/* Matches resource manager's resource types for VM_GET_HYP_RESOURCES RPC */
|
||||
enum gh_resource_type {
|
||||
GH_RESOURCE_TYPE_BELL_TX = 0,
|
||||
GH_RESOURCE_TYPE_BELL_RX = 1,
|
||||
GH_RESOURCE_TYPE_MSGQ_TX = 2,
|
||||
GH_RESOURCE_TYPE_MSGQ_RX = 3,
|
||||
GH_RESOURCE_TYPE_VCPU = 4,
|
||||
GH_RESOURCE_TYPE_VCPU = 4,
|
||||
};
|
||||
|
||||
struct gh_resource {
|
||||
@ -28,21 +28,15 @@ struct gh_resource {
|
||||
u64 capid;
|
||||
unsigned int irq;
|
||||
|
||||
/* To help allocator in vm manager */
|
||||
struct list_head list;
|
||||
u32 rm_label;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gunyah Doorbells
|
||||
*/
|
||||
#define GH_BELL_NONBLOCK BIT(32)
|
||||
|
||||
/**
|
||||
* Gunyah Message Queues
|
||||
*/
|
||||
|
||||
#define GH_MSGQ_MAX_MSG_SIZE 240
|
||||
#define GH_MSGQ_MAX_MSG_SIZE 240
|
||||
|
||||
struct gh_msgq_tx_data {
|
||||
size_t length;
|
||||
@ -115,10 +109,10 @@ enum gh_error {
|
||||
};
|
||||
|
||||
/**
|
||||
* gh_remap_error() - Remap Gunyah hypervisor errors into a Linux error code
|
||||
* gh_error_remap() - Remap Gunyah hypervisor errors into a Linux error code
|
||||
* @gh_error: Gunyah hypercall return value
|
||||
*/
|
||||
static inline int gh_remap_error(enum gh_error gh_error)
|
||||
static inline int gh_error_remap(enum gh_error gh_error)
|
||||
{
|
||||
switch (gh_error) {
|
||||
case GH_ERROR_OK:
|
||||
@ -149,16 +143,17 @@ static inline int gh_remap_error(enum gh_error gh_error)
|
||||
}
|
||||
|
||||
enum gh_api_feature {
|
||||
GH_FEATURE_DOORBELL = 1,
|
||||
GH_FEATURE_MSGQUEUE = 2,
|
||||
GH_FEATURE_VCPU = 5,
|
||||
GH_FEATURE_MEMEXTENT = 6,
|
||||
GH_FEATURE_DOORBELL = 1,
|
||||
GH_FEATURE_MSGQUEUE = 2,
|
||||
GH_FEATURE_VCPU = 5,
|
||||
GH_FEATURE_MEMEXTENT = 6,
|
||||
};
|
||||
|
||||
bool arch_is_gh_guest(void);
|
||||
|
||||
#define GH_API_V1 1
|
||||
|
||||
/* Other bits reserved for future use and will be zero */
|
||||
#define GH_API_INFO_API_VERSION_MASK GENMASK_ULL(13, 0)
|
||||
#define GH_API_INFO_BIG_ENDIAN BIT_ULL(14)
|
||||
#define GH_API_INFO_IS_64BIT BIT_ULL(15)
|
||||
@ -181,12 +176,28 @@ enum gh_error gh_hypercall_bell_set_mask(u64 capid, u64 enable_mask, u64 ack_mas
|
||||
|
||||
#define GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH BIT(0)
|
||||
|
||||
enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, int tx_flags, bool *ready);
|
||||
enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, u64 tx_flags, bool *ready);
|
||||
enum gh_error gh_hypercall_msgq_recv(u64 capid, void *buff, size_t size, size_t *recv_size,
|
||||
bool *ready);
|
||||
|
||||
struct gh_hypercall_vcpu_run_resp {
|
||||
u64 state;
|
||||
union {
|
||||
enum {
|
||||
/* VCPU is ready to run */
|
||||
GH_VCPU_STATE_READY = 0,
|
||||
/* VCPU is sleeping until an interrupt arrives */
|
||||
GH_VCPU_STATE_EXPECTS_WAKEUP = 1,
|
||||
/* VCPU is powered off */
|
||||
GH_VCPU_STATE_POWERED_OFF = 2,
|
||||
/* VCPU is blocked in EL2 for unspecified reason */
|
||||
GH_VCPU_STATE_BLOCKED = 3,
|
||||
/* VCPU has returned for MMIO READ */
|
||||
GH_VCPU_ADDRSPACE_VMMIO_READ = 4,
|
||||
/* VCPU has returned for MMIO WRITE */
|
||||
GH_VCPU_ADDRSPACE_VMMIO_WRITE = 5,
|
||||
} state;
|
||||
u64 sized_state;
|
||||
};
|
||||
u64 state_data[3];
|
||||
};
|
||||
|
||||
|
@ -10,12 +10,12 @@
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/gunyah.h>
|
||||
|
||||
#define GH_VMID_INVAL U16_MAX
|
||||
#define GH_VMID_INVAL U16_MAX
|
||||
#define GH_MEM_HANDLE_INVAL U32_MAX
|
||||
|
||||
struct gh_rm;
|
||||
int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buff_size,
|
||||
void **resp_buf, size_t *resp_buff_size);
|
||||
int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buf, size_t req_buf_size,
|
||||
void **resp_buf, size_t *resp_buf_size);
|
||||
int gh_rm_notifier_register(struct gh_rm *rm, struct notifier_block *nb);
|
||||
int gh_rm_notifier_unregister(struct gh_rm *rm, struct notifier_block *nb);
|
||||
struct device *gh_rm_get(struct gh_rm *rm);
|
||||
@ -31,12 +31,6 @@ struct gh_rm_vm_exited_payload {
|
||||
#define GH_RM_NOTIFICATION_VM_EXITED 0x56100001
|
||||
|
||||
enum gh_rm_vm_status {
|
||||
/**
|
||||
* RM doesn't have a state where load partially failed because
|
||||
* only Linux
|
||||
*/
|
||||
GH_RM_VM_STATUS_LOAD_FAILED = -1,
|
||||
|
||||
GH_RM_VM_STATUS_NO_STATE = 0,
|
||||
GH_RM_VM_STATUS_INIT = 1,
|
||||
GH_RM_VM_STATUS_READY = 2,
|
||||
@ -81,16 +75,16 @@ enum gh_rm_mem_type {
|
||||
};
|
||||
|
||||
/*
|
||||
* struct gh_rm_mem_parcel - Package info about memory to be lent/shared/donated/reclaimed
|
||||
* struct gh_rm_mem_parcel - Info about memory to be lent/shared/donated/reclaimed
|
||||
* @mem_type: The type of memory: normal (DDR) or IO
|
||||
* @label: An client-specified identifier which can be used by the other VMs to identify the purpose
|
||||
* of the memory parcel.
|
||||
* @n_acl_entries: Count of the number of entries in the @acl_entries array.
|
||||
* @acl_entries: An array of access control entries. Each entry specifies a VM and what access
|
||||
* is allowed for the memory parcel.
|
||||
* @n_acl_entries: Count of the number of entries in the `acl_entries` array.
|
||||
* @mem_entries: An list of regions to be associated with the memory parcel. Addresses should be
|
||||
* @n_mem_entries: Count of the number of entries in the @mem_entries array.
|
||||
* @mem_entries: An array of regions to be associated with the memory parcel. Addresses should be
|
||||
* (intermediate) physical addresses from Linux's perspective.
|
||||
* @n_mem_entries: Count of the number of entries in the `mem_entries` array.
|
||||
* @mem_handle: On success, filled with memory handle that RM allocates for this memory parcel
|
||||
*/
|
||||
struct gh_rm_mem_parcel {
|
||||
|
@ -27,6 +27,7 @@ struct gh_vm_function {
|
||||
struct module *mod;
|
||||
long (*bind)(struct gh_vm_function_instance *f);
|
||||
void (*unbind)(struct gh_vm_function_instance *f);
|
||||
bool (*compare)(const struct gh_vm_function_instance *f, const void *arg, size_t size);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -53,31 +54,44 @@ struct gh_vm_function_instance {
|
||||
int gh_vm_function_register(struct gh_vm_function *f);
|
||||
void gh_vm_function_unregister(struct gh_vm_function *f);
|
||||
|
||||
#define DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind) \
|
||||
static struct gh_vm_function _name = { \
|
||||
/* Since the function identifiers were setup in a uapi header as an
|
||||
* enum and we do no want to change that, the user must supply the expanded
|
||||
* constant as well and the compiler checks they are the same.
|
||||
* See also MODULE_ALIAS_RDMA_NETLINK.
|
||||
*/
|
||||
#define MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx) \
|
||||
static inline void __maybe_unused __chk##_idx(void) \
|
||||
{ \
|
||||
BUILD_BUG_ON(_type != _idx); \
|
||||
} \
|
||||
MODULE_ALIAS("ghfunc:" __stringify(_idx))
|
||||
|
||||
#define DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind, _compare) \
|
||||
static struct gh_vm_function _name = { \
|
||||
.type = _type, \
|
||||
.name = __stringify(_name), \
|
||||
.mod = THIS_MODULE, \
|
||||
.bind = _bind, \
|
||||
.unbind = _unbind, \
|
||||
}; \
|
||||
MODULE_ALIAS("ghfunc:"__stringify(_type))
|
||||
.compare = _compare, \
|
||||
}
|
||||
|
||||
#define module_gh_vm_function(__gf) \
|
||||
module_driver(__gf, gh_vm_function_register, gh_vm_function_unregister)
|
||||
|
||||
#define DECLARE_GH_VM_FUNCTION_INIT(_name, _type, _bind, _unbind) \
|
||||
DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind); \
|
||||
module_gh_vm_function(_name)
|
||||
#define DECLARE_GH_VM_FUNCTION_INIT(_name, _type, _idx, _bind, _unbind, _compare) \
|
||||
DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind, _compare); \
|
||||
module_gh_vm_function(_name); \
|
||||
MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx)
|
||||
|
||||
struct gh_vm_resource_ticket {
|
||||
struct list_head list; /* for gh_vm's resources list */
|
||||
struct list_head resources; /* for gh_resources's list */
|
||||
struct list_head vm_list; /* for gh_vm's resource tickets list */
|
||||
struct list_head resources; /* resources associated with this ticket */
|
||||
enum gh_resource_type resource_type;
|
||||
u32 label;
|
||||
|
||||
struct module *owner;
|
||||
int (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
|
||||
bool (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
|
||||
void (*unpopulate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc);
|
||||
};
|
||||
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/page_pinner.h>
|
||||
#include <linux/memremap.h>
|
||||
|
||||
struct mempolicy;
|
||||
@ -760,8 +761,13 @@ static inline unsigned int folio_order(struct folio *folio)
|
||||
*/
|
||||
static inline int put_page_testzero(struct page *page)
|
||||
{
|
||||
int ret;
|
||||
|
||||
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
|
||||
return page_ref_dec_and_test(page);
|
||||
ret = page_ref_dec_and_test(page);
|
||||
page_pinner_put_page(page);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int folio_put_testzero(struct folio *folio)
|
||||
|
@ -128,6 +128,53 @@ static inline bool __must_check __must_check_overflow(bool overflow)
|
||||
(*_d >> _to_shift) != _a); \
|
||||
}))
|
||||
|
||||
#define __overflows_type_constexpr(x, T) ( \
|
||||
is_unsigned_type(typeof(x)) ? \
|
||||
(x) > type_max(typeof(T)) : \
|
||||
is_unsigned_type(typeof(T)) ? \
|
||||
(x) < 0 || (x) > type_max(typeof(T)) : \
|
||||
(x) < type_min(typeof(T)) || (x) > type_max(typeof(T)))
|
||||
|
||||
#define __overflows_type(x, T) ({ \
|
||||
typeof(T) v = 0; \
|
||||
check_add_overflow((x), v, &v); \
|
||||
})
|
||||
|
||||
/**
|
||||
* overflows_type - helper for checking the overflows between value, variables,
|
||||
* or data type
|
||||
*
|
||||
* @n: source constant value or variable to be checked
|
||||
* @T: destination variable or data type proposed to store @x
|
||||
*
|
||||
* Compares the @x expression for whether or not it can safely fit in
|
||||
* the storage of the type in @T. @x and @T can have different types.
|
||||
* If @x is a constant expression, this will also resolve to a constant
|
||||
* expression.
|
||||
*
|
||||
* Returns: true if overflow can occur, false otherwise.
|
||||
*/
|
||||
#define overflows_type(n, T) \
|
||||
__builtin_choose_expr(__is_constexpr(n), \
|
||||
__overflows_type_constexpr(n, T), \
|
||||
__overflows_type(n, T))
|
||||
|
||||
/**
|
||||
* castable_to_type - like __same_type(), but also allows for casted literals
|
||||
*
|
||||
* @n: variable or constant value
|
||||
* @T: variable or data type
|
||||
*
|
||||
* Unlike the __same_type() macro, this allows a constant value as the
|
||||
* first argument. If this value would not overflow into an assignment
|
||||
* of the second argument's type, it returns true. Otherwise, this falls
|
||||
* back to __same_type().
|
||||
*/
|
||||
#define castable_to_type(n, T) \
|
||||
__builtin_choose_expr(__is_constexpr(n), \
|
||||
!__overflows_type_constexpr(n, T), \
|
||||
__same_type(n, T))
|
||||
|
||||
/**
|
||||
* size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX
|
||||
* @factor1: first factor
|
||||
|
@ -19,6 +19,10 @@ struct page_ext_operations {
|
||||
enum page_ext_flags {
|
||||
PAGE_EXT_OWNER,
|
||||
PAGE_EXT_OWNER_ALLOCATED,
|
||||
#if defined(CONFIG_PAGE_PINNER)
|
||||
/* page migration failed */
|
||||
PAGE_EXT_PINNER_MIGRATION_FAILED,
|
||||
#endif
|
||||
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
|
||||
PAGE_EXT_YOUNG,
|
||||
PAGE_EXT_IDLE,
|
||||
|
48
include/linux/page_pinner.h
Normal file
48
include/linux/page_pinner.h
Normal file
@ -0,0 +1,48 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __LINUX_PAGE_PINNER_H
|
||||
#define __LINUX_PAGE_PINNER_H
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#ifdef CONFIG_PAGE_PINNER
|
||||
extern struct static_key_false page_pinner_inited;
|
||||
extern struct static_key_true failure_tracking;
|
||||
extern struct page_ext_operations page_pinner_ops;
|
||||
|
||||
extern void __free_page_pinner(struct page *page, unsigned int order);
|
||||
void __page_pinner_failure_detect(struct page *page);
|
||||
void __page_pinner_put_page(struct page *page);
|
||||
|
||||
static inline void free_page_pinner(struct page *page, unsigned int order)
|
||||
{
|
||||
if (static_branch_unlikely(&page_pinner_inited))
|
||||
__free_page_pinner(page, order);
|
||||
}
|
||||
|
||||
static inline void page_pinner_put_page(struct page *page)
|
||||
{
|
||||
if (!static_branch_unlikely(&page_pinner_inited))
|
||||
return;
|
||||
|
||||
__page_pinner_put_page(page);
|
||||
}
|
||||
|
||||
static inline void page_pinner_failure_detect(struct page *page)
|
||||
{
|
||||
if (!static_branch_unlikely(&page_pinner_inited))
|
||||
return;
|
||||
|
||||
__page_pinner_failure_detect(page);
|
||||
}
|
||||
#else
|
||||
static inline void free_page_pinner(struct page *page, unsigned int order)
|
||||
{
|
||||
}
|
||||
static inline void page_pinner_put_page(struct page *page)
|
||||
{
|
||||
}
|
||||
static inline void page_pinner_failure_detect(struct page *page)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PAGE_PINNER */
|
||||
#endif /* __LINUX_PAGE_PINNER_H */
|
@ -96,7 +96,7 @@ extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
|
||||
u32 cp_nonpixel_start,
|
||||
u32 cp_nonpixel_size);
|
||||
extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
|
||||
unsigned int *src,
|
||||
u64 *src,
|
||||
const struct qcom_scm_vmperm *newvm,
|
||||
unsigned int dest_cnt);
|
||||
|
||||
|
@ -614,6 +614,7 @@ struct nft_set_binding {
|
||||
};
|
||||
|
||||
enum nft_trans_phase;
|
||||
void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set);
|
||||
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding,
|
||||
enum nft_trans_phase phase);
|
||||
|
@ -70,6 +70,19 @@ DECLARE_HOOK(android_vh_binder_select_worklist_ilocked,
|
||||
TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc,
|
||||
int wait_for_proc_work),
|
||||
TP_ARGS(list, thread, proc, wait_for_proc_work));
|
||||
DECLARE_HOOK(android_vh_binder_alloc_new_buf_locked,
|
||||
TP_PROTO(size_t size, size_t *free_async_space, int is_async),
|
||||
TP_ARGS(size, free_async_space, is_async));
|
||||
struct binder_transaction_data;
|
||||
DECLARE_HOOK(android_vh_binder_reply,
|
||||
TP_PROTO(struct binder_proc *target_proc, struct binder_proc *proc,
|
||||
struct binder_thread *thread, struct binder_transaction_data *tr),
|
||||
TP_ARGS(target_proc, proc, thread, tr));
|
||||
DECLARE_HOOK(android_vh_binder_trans,
|
||||
TP_PROTO(struct binder_proc *target_proc, struct binder_proc *proc,
|
||||
struct binder_thread *thread, struct binder_transaction_data *tr),
|
||||
TP_ARGS(target_proc, proc, thread, tr));
|
||||
|
||||
#endif /* _TRACE_HOOK_BINDER_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -12,6 +12,10 @@ DECLARE_HOOK(android_vh_cgroup_set_task,
|
||||
TP_PROTO(int ret, struct task_struct *task),
|
||||
TP_ARGS(ret, task));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_refrigerator,
|
||||
TP_PROTO(bool f),
|
||||
TP_ARGS(f), 1);
|
||||
|
||||
struct cgroup_subsys;
|
||||
struct cgroup_taskset;
|
||||
DECLARE_HOOK(android_vh_cgroup_attach,
|
||||
|
16
include/trace/hooks/signal.h
Normal file
16
include/trace/hooks/signal.h
Normal file
@ -0,0 +1,16 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM signal
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
#if !defined(_TRACE_HOOK_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_SIGNAL_H
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
struct task_struct;
|
||||
DECLARE_HOOK(android_vh_do_send_sig_info,
|
||||
TP_PROTO(int sig, struct task_struct *killer, struct task_struct *dst),
|
||||
TP_ARGS(sig, killer, dst));
|
||||
#endif /* _TRACE_HOOK_SIGNAL_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -3,8 +3,8 @@
|
||||
* Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_LINUX_GUNYAH
|
||||
#define _UAPI_LINUX_GUNYAH
|
||||
#ifndef _UAPI_LINUX_GUNYAH_H
|
||||
#define _UAPI_LINUX_GUNYAH_H
|
||||
|
||||
/*
|
||||
* Userspace interface for /dev/gunyah - gunyah based virtual machine
|
||||
@ -24,14 +24,22 @@
|
||||
* ioctls for VM fds
|
||||
*/
|
||||
|
||||
#define GH_MEM_ALLOW_READ (1UL << 0)
|
||||
#define GH_MEM_ALLOW_WRITE (1UL << 1)
|
||||
#define GH_MEM_ALLOW_EXEC (1UL << 2)
|
||||
/**
|
||||
* enum gh_mem_flags - Possible flags on &struct gh_userspace_memory_region
|
||||
* @GH_MEM_ALLOW_READ: Allow guest to read the memory
|
||||
* @GH_MEM_ALLOW_WRITE: Allow guest to write to the memory
|
||||
* @GH_MEM_ALLOW_EXEC: Allow guest to execute instructions in the memory
|
||||
*/
|
||||
enum gh_mem_flags {
|
||||
GH_MEM_ALLOW_READ = 1UL << 0,
|
||||
GH_MEM_ALLOW_WRITE = 1UL << 1,
|
||||
GH_MEM_ALLOW_EXEC = 1UL << 2,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gh_userspace_memory_region - Userspace memory descripion for GH_VM_SET_USER_MEM_REGION
|
||||
* @label: Unique identifer to the region.
|
||||
* @flags: Flags for memory parcel behavior
|
||||
* @label: Identifer to the region which is unique to the VM.
|
||||
* @flags: Flags for memory parcel behavior. See &enum gh_mem_flags.
|
||||
* @guest_phys_addr: Location of the memory region in guest's memory space (page-aligned)
|
||||
* @memory_size: Size of the region (page-aligned)
|
||||
* @userspace_addr: Location of the memory region in caller (userspace)'s memory
|
||||
@ -52,7 +60,9 @@ struct gh_userspace_memory_region {
|
||||
/**
|
||||
* struct gh_vm_dtb_config - Set the location of the VM's devicetree blob
|
||||
* @guest_phys_addr: Address of the VM's devicetree in guest memory.
|
||||
* @size: Maximum size of the devicetree.
|
||||
* @size: Maximum size of the devicetree including space for overlays.
|
||||
* Resource manager applies an overlay to the DTB and dtb_size should
|
||||
* include room for the overlay. A page of memory is typicaly plenty.
|
||||
*/
|
||||
struct gh_vm_dtb_config {
|
||||
__u64 guest_phys_addr;
|
||||
@ -63,67 +73,61 @@ struct gh_vm_dtb_config {
|
||||
#define GH_VM_START _IO(GH_IOCTL_TYPE, 0x3)
|
||||
|
||||
/**
|
||||
* GH_FN_VCPU - create a vCPU instance to control a vCPU
|
||||
* enum gh_fn_type - Valid types of Gunyah VM functions
|
||||
* @GH_FN_VCPU: create a vCPU instance to control a vCPU
|
||||
* &struct gh_fn_desc.arg is a pointer to &struct gh_fn_vcpu_arg
|
||||
* Return: file descriptor to manipulate the vcpu.
|
||||
* @GH_FN_IRQFD: register eventfd to assert a Gunyah doorbell
|
||||
* &struct gh_fn_desc.arg is a pointer to &struct gh_fn_irqfd_arg
|
||||
* @GH_FN_IOEVENTFD: register ioeventfd to trigger when VM faults on parameter
|
||||
* &struct gh_fn_desc.arg is a pointer to &struct gh_fn_ioeventfd_arg
|
||||
*/
|
||||
enum gh_fn_type {
|
||||
GH_FN_VCPU = 1,
|
||||
GH_FN_IRQFD,
|
||||
GH_FN_IOEVENTFD,
|
||||
};
|
||||
|
||||
#define GH_FN_MAX_ARG_SIZE 256
|
||||
|
||||
/**
|
||||
* struct gh_fn_vcpu_arg - Arguments to create a vCPU.
|
||||
* @id: vcpu id
|
||||
*
|
||||
* gh_fn_desc is filled with &struct gh_fn_vcpu_arg
|
||||
* Create this function with &GH_VM_ADD_FUNCTION using type &GH_FN_VCPU.
|
||||
*
|
||||
* The vcpu type will register with the VM Manager to expect to control
|
||||
* vCPU number `vcpu_id`. It returns a file descriptor allowing interaction with
|
||||
* the vCPU. See the Gunyah vCPU API description sections for interacting with
|
||||
* the Gunyah vCPU file descriptors.
|
||||
*
|
||||
* Return: file descriptor to manipulate the vcpu. See GH_VCPU_* ioctls
|
||||
*/
|
||||
#define GH_FN_VCPU 1
|
||||
|
||||
/**
|
||||
* GH_FN_IRQFD - register eventfd to assert a Gunyah doorbell
|
||||
*
|
||||
* gh_fn_desc is filled with gh_fn_irqfd_arg
|
||||
*
|
||||
* Allows setting an eventfd to directly trigger a guest interrupt.
|
||||
* irqfd.fd specifies the file descriptor to use as the eventfd.
|
||||
* irqfd.label corresponds to the doorbell label used in the guest VM's devicetree.
|
||||
*
|
||||
* Return: 0
|
||||
*/
|
||||
#define GH_FN_IRQFD 2
|
||||
|
||||
/**
|
||||
* GH_FN_IOEVENTFD - register ioeventfd to trigger when VM faults on parameter
|
||||
*
|
||||
* gh_fn_desc is filled with gh_fn_ioeventfd_arg
|
||||
*
|
||||
* Attaches an ioeventfd to a legal mmio address within the guest. A guest write
|
||||
* in the registered address will signal the provided event instead of triggering
|
||||
* an exit on the GH_VCPU_RUN ioctl.
|
||||
*
|
||||
* If GH_IOEVENTFD_DATAMATCH flag is set, the event will be signaled only if the
|
||||
* written value to the registered address is equal to datamatch in
|
||||
* struct gh_fn_ioeventfd_arg.
|
||||
*
|
||||
* Return: 0
|
||||
*/
|
||||
#define GH_FN_IOEVENTFD 3
|
||||
|
||||
#define GH_FN_MAX_ARG_SIZE 256
|
||||
|
||||
/**
|
||||
* struct gh_fn_vcpu_arg - Arguments to create a vCPU
|
||||
* @id: vcpu id
|
||||
*/
|
||||
struct gh_fn_vcpu_arg {
|
||||
__u32 id;
|
||||
};
|
||||
|
||||
#define GH_IRQFD_LEVEL (1UL << 0)
|
||||
/**
|
||||
* enum gh_irqfd_flags - flags for use in gh_fn_irqfd_arg
|
||||
* @GH_IRQFD_FLAGS_LEVEL: make the interrupt operate like a level triggered
|
||||
* interrupt on guest side. Triggering IRQFD before
|
||||
* guest handles the interrupt causes interrupt to
|
||||
* stay asserted.
|
||||
*/
|
||||
enum gh_irqfd_flags {
|
||||
GH_IRQFD_FLAGS_LEVEL = 1UL << 0,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gh_fn_irqfd_arg - Arguments to create an irqfd function
|
||||
* struct gh_fn_irqfd_arg - Arguments to create an irqfd function.
|
||||
*
|
||||
* Create this function with &GH_VM_ADD_FUNCTION using type &GH_FN_IRQFD.
|
||||
*
|
||||
* Allows setting an eventfd to directly trigger a guest interrupt.
|
||||
* irqfd.fd specifies the file descriptor to use as the eventfd.
|
||||
* irqfd.label corresponds to the doorbell label used in the guest VM's devicetree.
|
||||
*
|
||||
* @fd: an eventfd which when written to will raise a doorbell
|
||||
* @label: Label of the doorbell created on the guest VM
|
||||
* @flags: GH_IRQFD_LEVEL configures the corresponding doorbell to behave
|
||||
* like a level triggered interrupt.
|
||||
* @flags: see &enum gh_irqfd_flags
|
||||
* @padding: padding bytes
|
||||
*/
|
||||
struct gh_fn_irqfd_arg {
|
||||
@ -133,7 +137,15 @@ struct gh_fn_irqfd_arg {
|
||||
__u32 padding;
|
||||
};
|
||||
|
||||
#define GH_IOEVENTFD_DATAMATCH (1UL << 0)
|
||||
/**
|
||||
* enum gh_ioeventfd_flags - flags for use in gh_fn_ioeventfd_arg
|
||||
* @GH_IOEVENTFD_FLAGS_DATAMATCH: the event will be signaled only if the
|
||||
* written value to the registered address is
|
||||
* equal to &struct gh_fn_ioeventfd_arg.datamatch
|
||||
*/
|
||||
enum gh_ioeventfd_flags {
|
||||
GH_IOEVENTFD_FLAGS_DATAMATCH = 1UL << 0,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gh_fn_ioeventfd_arg - Arguments to create an ioeventfd function
|
||||
@ -141,10 +153,14 @@ struct gh_fn_irqfd_arg {
|
||||
* @addr: Address in guest memory
|
||||
* @len: Length of access
|
||||
* @fd: When ioeventfd is matched, this eventfd is written
|
||||
* @flags: If GH_IOEVENTFD_DATAMATCH flag is set, the event will be signaled
|
||||
* only if the written value to the registered address is equal to
|
||||
* @datamatch
|
||||
* @flags: See &enum gh_ioeventfd_flags
|
||||
* @padding: padding bytes
|
||||
*
|
||||
* Create this function with &GH_VM_ADD_FUNCTION using type &GH_FN_IOEVENTFD.
|
||||
*
|
||||
* Attaches an ioeventfd to a legal mmio address within the guest. A guest write
|
||||
* in the registered address will signal the provided event instead of triggering
|
||||
* an exit on the GH_VCPU_RUN ioctl.
|
||||
*/
|
||||
struct gh_fn_ioeventfd_arg {
|
||||
__u64 datamatch;
|
||||
@ -157,9 +173,10 @@ struct gh_fn_ioeventfd_arg {
|
||||
|
||||
/**
|
||||
* struct gh_fn_desc - Arguments to create a VM function
|
||||
* @type: Type of the function. See GH_FN_* macro for supported types
|
||||
* @type: Type of the function. See &enum gh_fn_type.
|
||||
* @arg_size: Size of argument to pass to the function. arg_size <= GH_FN_MAX_ARG_SIZE
|
||||
* @arg: Value or pointer to argument given to the function
|
||||
* @arg: Pointer to argument given to the function. See &enum gh_fn_type for expected
|
||||
* arguments for a function type.
|
||||
*/
|
||||
struct gh_fn_desc {
|
||||
__u32 type;
|
||||
@ -170,13 +187,21 @@ struct gh_fn_desc {
|
||||
#define GH_VM_ADD_FUNCTION _IOW(GH_IOCTL_TYPE, 0x4, struct gh_fn_desc)
|
||||
#define GH_VM_REMOVE_FUNCTION _IOW(GH_IOCTL_TYPE, 0x7, struct gh_fn_desc)
|
||||
|
||||
/*
|
||||
* ioctls for vCPU fds
|
||||
*/
|
||||
|
||||
/**
|
||||
* enum gh_vm_status - Stores status reason why VM is not runnable (exited).
|
||||
* @GH_VM_STATUS_LOAD_FAILED: VM didn't start because it couldn't be loaded.
|
||||
* @GH_VM_STATUS_EXITED: VM requested shutdown/reboot.
|
||||
* Use &struct gh_vm_exit_info.reason for further details.
|
||||
* @GH_VM_STATUS_CRASHED: VM state is unknown and has crashed.
|
||||
*/
|
||||
enum gh_vm_status {
|
||||
GH_VM_STATUS_LOAD_FAILED = 1,
|
||||
#define GH_VM_STATUS_LOAD_FAILED GH_VM_STATUS_LOAD_FAILED
|
||||
GH_VM_STATUS_EXITED = 2,
|
||||
#define GH_VM_STATUS_EXITED GH_VM_STATUS_EXITED
|
||||
GH_VM_STATUS_CRASHED = 3,
|
||||
#define GH_VM_STATUS_CRASHED GH_VM_STATUS_CRASHED
|
||||
};
|
||||
|
||||
/*
|
||||
@ -203,9 +228,20 @@ struct gh_vm_exit_info {
|
||||
__u8 reason[GH_VM_MAX_EXIT_REASON_SIZE];
|
||||
};
|
||||
|
||||
#define GH_VCPU_EXIT_UNKNOWN 0
|
||||
#define GH_VCPU_EXIT_MMIO 1
|
||||
#define GH_VCPU_EXIT_STATUS 2
|
||||
/**
|
||||
* enum gh_vcpu_exit - Stores reason why &GH_VCPU_RUN ioctl recently exited with status 0
|
||||
* @GH_VCPU_EXIT_UNKNOWN: Not used, status != 0
|
||||
* @GH_VCPU_EXIT_MMIO: vCPU performed a read or write that could not be handled
|
||||
* by hypervisor or Linux. Use @struct gh_vcpu_run.mmio for
|
||||
* details of the read/write.
|
||||
* @GH_VCPU_EXIT_STATUS: vCPU not able to run because the VM has exited.
|
||||
* Use @struct gh_vcpu_run.status for why VM has exited.
|
||||
*/
|
||||
enum gh_vcpu_exit {
|
||||
GH_VCPU_EXIT_UNKNOWN,
|
||||
GH_VCPU_EXIT_MMIO,
|
||||
GH_VCPU_EXIT_STATUS,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct gh_vcpu_run - Application code obtains a pointer to the gh_vcpu_run
|
||||
@ -213,19 +249,19 @@ struct gh_vm_exit_info {
|
||||
* @immediate_exit: polled when scheduling the vcpu. If set, immediately returns -EINTR.
|
||||
* @padding: padding bytes
|
||||
* @exit_reason: Set when GH_VCPU_RUN returns successfully and gives reason why
|
||||
* GH_VCPU_RUN has stopped running the vCPU.
|
||||
* GH_VCPU_RUN has stopped running the vCPU. See &enum gh_vcpu_exit.
|
||||
* @mmio: Used when exit_reason == GH_VCPU_EXIT_MMIO
|
||||
* The guest has faulted on an memory-mapped I/O instruction that
|
||||
* couldn't be satisfied by gunyah.
|
||||
* @mmio.phys_addr: Address guest tried to access
|
||||
* @mmio.data: the value that was written if `is_write == 1`. Filled by
|
||||
* user for reads (`is_wite == 0`).
|
||||
* user for reads (`is_write == 0`).
|
||||
* @mmio.len: Length of write. Only the first `len` bytes of `data`
|
||||
* are considered by Gunyah.
|
||||
* @mmio.is_write: 1 if VM tried to perform a write, 0 for a read
|
||||
* @status: Used when exit_reason == GH_VCPU_EXIT_STATUS.
|
||||
* The guest VM is no longer runnable. This struct informs why.
|
||||
* @status.status: See `enum gh_vm_status` for possible values
|
||||
* @status.status: See &enum gh_vm_status for possible values
|
||||
* @status.exit_info: Used when status == GH_VM_STATUS_EXITED
|
||||
*/
|
||||
struct gh_vcpu_run {
|
||||
|
10
init/Kconfig
10
init/Kconfig
@ -892,18 +892,14 @@ config CC_IMPLICIT_FALLTHROUGH
|
||||
default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
|
||||
default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
|
||||
|
||||
# Currently, disable gcc-11,12 array-bounds globally.
|
||||
# We may want to target only particular configurations some day.
|
||||
# Currently, disable gcc-11+ array-bounds globally.
|
||||
# It's still broken in gcc-13, so no upper bound yet.
|
||||
config GCC11_NO_ARRAY_BOUNDS
|
||||
def_bool y
|
||||
|
||||
config GCC12_NO_ARRAY_BOUNDS
|
||||
def_bool y
|
||||
|
||||
config CC_NO_ARRAY_BOUNDS
|
||||
bool
|
||||
default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
|
||||
default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
|
||||
default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
|
||||
|
||||
#
|
||||
# For architectures that know their GCC __int128 support is sound
|
||||
|
@ -475,3 +475,4 @@ struct cgroup_subsys freezer_cgrp_subsys = {
|
||||
.fork = freezer_fork,
|
||||
.legacy_cftypes = files,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(freezer_cgrp_subsys);
|
||||
|
@ -12,6 +12,9 @@
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
#undef CREATE_TRACE_POINT
|
||||
#include <trace/hooks/cgroup.h>
|
||||
|
||||
/* total number of freezing conditions in effect */
|
||||
DEFINE_STATIC_KEY_FALSE(freezer_active);
|
||||
EXPORT_SYMBOL(freezer_active);
|
||||
@ -75,6 +78,7 @@ bool __refrigerator(bool check_kthr_stop)
|
||||
|
||||
spin_lock_irq(&freezer_lock);
|
||||
freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop());
|
||||
trace_android_rvh_refrigerator(pm_nosig_freezing);
|
||||
spin_unlock_irq(&freezer_lock);
|
||||
|
||||
if (!freeze)
|
||||
|
@ -56,6 +56,8 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/syscall.h> /* for syscall_get_* */
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/signal.h>
|
||||
/*
|
||||
* SLAB caches for signal bits.
|
||||
*/
|
||||
@ -1291,7 +1293,7 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = -ESRCH;
|
||||
|
||||
trace_android_vh_do_send_sig_info(sig, current, p);
|
||||
if (lock_task_sighand(p, &flags)) {
|
||||
ret = send_signal_locked(sig, info, p, type);
|
||||
unlock_task_sighand(p, &flags);
|
||||
|
69
kernel/sys.c
69
kernel/sys.c
@ -666,6 +666,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
|
||||
struct cred *new;
|
||||
int retval;
|
||||
kuid_t kruid, keuid, ksuid;
|
||||
bool ruid_new, euid_new, suid_new;
|
||||
|
||||
kruid = make_kuid(ns, ruid);
|
||||
keuid = make_kuid(ns, euid);
|
||||
@ -680,25 +681,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
|
||||
if ((suid != (uid_t) -1) && !uid_valid(ksuid))
|
||||
return -EINVAL;
|
||||
|
||||
old = current_cred();
|
||||
|
||||
/* check for no-op */
|
||||
if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) &&
|
||||
(euid == (uid_t) -1 || (uid_eq(keuid, old->euid) &&
|
||||
uid_eq(keuid, old->fsuid))) &&
|
||||
(suid == (uid_t) -1 || uid_eq(ksuid, old->suid)))
|
||||
return 0;
|
||||
|
||||
ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
|
||||
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid);
|
||||
euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
|
||||
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid);
|
||||
suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
|
||||
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid);
|
||||
if ((ruid_new || euid_new || suid_new) &&
|
||||
!ns_capable_setid(old->user_ns, CAP_SETUID))
|
||||
return -EPERM;
|
||||
|
||||
new = prepare_creds();
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
old = current_cred();
|
||||
|
||||
retval = -EPERM;
|
||||
if (!ns_capable_setid(old->user_ns, CAP_SETUID)) {
|
||||
if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
|
||||
!uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
|
||||
goto error;
|
||||
if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
|
||||
!uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
|
||||
goto error;
|
||||
if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
|
||||
!uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (ruid != (uid_t) -1) {
|
||||
new->uid = kruid;
|
||||
if (!uid_eq(kruid, old->uid)) {
|
||||
@ -763,6 +768,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
|
||||
struct cred *new;
|
||||
int retval;
|
||||
kgid_t krgid, kegid, ksgid;
|
||||
bool rgid_new, egid_new, sgid_new;
|
||||
|
||||
krgid = make_kgid(ns, rgid);
|
||||
kegid = make_kgid(ns, egid);
|
||||
@ -775,23 +781,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
|
||||
if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
|
||||
return -EINVAL;
|
||||
|
||||
old = current_cred();
|
||||
|
||||
/* check for no-op */
|
||||
if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) &&
|
||||
(egid == (gid_t) -1 || (gid_eq(kegid, old->egid) &&
|
||||
gid_eq(kegid, old->fsgid))) &&
|
||||
(sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid)))
|
||||
return 0;
|
||||
|
||||
rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
|
||||
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid);
|
||||
egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
|
||||
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid);
|
||||
sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
|
||||
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid);
|
||||
if ((rgid_new || egid_new || sgid_new) &&
|
||||
!ns_capable_setid(old->user_ns, CAP_SETGID))
|
||||
return -EPERM;
|
||||
|
||||
new = prepare_creds();
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
old = current_cred();
|
||||
|
||||
retval = -EPERM;
|
||||
if (!ns_capable_setid(old->user_ns, CAP_SETGID)) {
|
||||
if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
|
||||
!gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
|
||||
goto error;
|
||||
if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
|
||||
!gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
|
||||
goto error;
|
||||
if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
|
||||
!gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (rgid != (gid_t) -1)
|
||||
new->gid = krgid;
|
||||
|
@ -377,6 +377,7 @@ obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o
|
||||
obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o
|
||||
obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
|
||||
obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o
|
||||
CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
|
||||
obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
|
||||
CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable)
|
||||
obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o
|
||||
|
@ -1293,26 +1293,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
|
||||
node = mas->alloc;
|
||||
node->request_count = 0;
|
||||
while (requested) {
|
||||
max_req = MAPLE_ALLOC_SLOTS;
|
||||
if (node->node_count) {
|
||||
unsigned int offset = node->node_count;
|
||||
|
||||
slots = (void **)&node->slot[offset];
|
||||
max_req -= offset;
|
||||
} else {
|
||||
slots = (void **)&node->slot;
|
||||
}
|
||||
|
||||
max_req = MAPLE_ALLOC_SLOTS - node->node_count;
|
||||
slots = (void **)&node->slot[node->node_count];
|
||||
max_req = min(requested, max_req);
|
||||
count = mt_alloc_bulk(gfp, max_req, slots);
|
||||
if (!count)
|
||||
goto nomem_bulk;
|
||||
|
||||
if (node->node_count == 0) {
|
||||
node->slot[0]->node_count = 0;
|
||||
node->slot[0]->request_count = 0;
|
||||
}
|
||||
|
||||
node->node_count += count;
|
||||
allocated += count;
|
||||
node = node->slot[0];
|
||||
node->node_count = 0;
|
||||
node->request_count = 0;
|
||||
requested -= count;
|
||||
}
|
||||
mas->alloc->total = allocated;
|
||||
@ -4968,7 +4963,8 @@ static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
|
||||
* Return: True if found in a leaf, false otherwise.
|
||||
*
|
||||
*/
|
||||
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
|
||||
static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
|
||||
unsigned long *gap_min, unsigned long *gap_max)
|
||||
{
|
||||
enum maple_type type = mte_node_type(mas->node);
|
||||
struct maple_node *node = mas_mn(mas);
|
||||
@ -5033,8 +5029,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
|
||||
|
||||
if (unlikely(ma_is_leaf(type))) {
|
||||
mas->offset = offset;
|
||||
mas->min = min;
|
||||
mas->max = min + gap - 1;
|
||||
*gap_min = min;
|
||||
*gap_max = min + gap - 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -5058,10 +5054,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
||||
{
|
||||
enum maple_type type = mte_node_type(mas->node);
|
||||
unsigned long pivot, min, gap = 0;
|
||||
unsigned char offset;
|
||||
unsigned long *gaps;
|
||||
unsigned long *pivots = ma_pivots(mas_mn(mas), type);
|
||||
void __rcu **slots = ma_slots(mas_mn(mas), type);
|
||||
unsigned char offset, data_end;
|
||||
unsigned long *gaps, *pivots;
|
||||
void __rcu **slots;
|
||||
struct maple_node *node;
|
||||
bool found = false;
|
||||
|
||||
if (ma_is_dense(type)) {
|
||||
@ -5069,13 +5065,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
|
||||
return true;
|
||||
}
|
||||
|
||||
gaps = ma_gaps(mte_to_node(mas->node), type);
|
||||
node = mas_mn(mas);
|
||||
pivots = ma_pivots(node, type);
|
||||
slots = ma_slots(node, type);
|
||||
gaps = ma_gaps(node, type);
|
||||
offset = mas->offset;
|
||||
min = mas_safe_min(mas, pivots, offset);
|
||||
for (; offset < mt_slots[type]; offset++) {
|
||||
pivot = mas_safe_pivot(mas, pivots, offset, type);
|
||||
if (offset && !pivot)
|
||||
break;
|
||||
data_end = ma_data_end(node, type, pivots, mas->max);
|
||||
for (; offset <= data_end; offset++) {
|
||||
pivot = mas_logical_pivot(mas, pivots, offset, type);
|
||||
|
||||
/* Not within lower bounds */
|
||||
if (mas->index > pivot)
|
||||
@ -5310,6 +5308,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
|
||||
unsigned long *pivots;
|
||||
enum maple_type mt;
|
||||
|
||||
if (min >= max)
|
||||
return -EINVAL;
|
||||
|
||||
if (mas_is_start(mas))
|
||||
mas_start(mas);
|
||||
else if (mas->offset >= 2)
|
||||
@ -5364,6 +5365,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
||||
{
|
||||
struct maple_enode *last = mas->node;
|
||||
|
||||
if (min >= max)
|
||||
return -EINVAL;
|
||||
|
||||
if (mas_is_start(mas)) {
|
||||
mas_start(mas);
|
||||
mas->offset = mas_data_end(mas);
|
||||
@ -5383,7 +5387,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
||||
mas->index = min;
|
||||
mas->last = max;
|
||||
|
||||
while (!mas_rev_awalk(mas, size)) {
|
||||
while (!mas_rev_awalk(mas, size, &min, &max)) {
|
||||
if (last == mas->node) {
|
||||
if (!mas_rewind_node(mas))
|
||||
return -EBUSY;
|
||||
@ -5398,17 +5402,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
|
||||
if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* mas_rev_awalk() has set mas->min and mas->max to the gap values. If
|
||||
* the maximum is outside the window we are searching, then use the last
|
||||
* location in the search.
|
||||
* mas->max and mas->min is the range of the gap.
|
||||
* mas->index and mas->last are currently set to the search range.
|
||||
*/
|
||||
|
||||
/* Trim the upper limit to the max. */
|
||||
if (mas->max <= mas->last)
|
||||
mas->last = mas->max;
|
||||
if (max <= mas->last)
|
||||
mas->last = max;
|
||||
|
||||
mas->index = mas->last - size + 1;
|
||||
return 0;
|
||||
|
@ -736,6 +736,384 @@ static void overflow_size_helpers_test(struct kunit *test)
|
||||
#undef check_one_size_helper
|
||||
}
|
||||
|
||||
static void overflows_type_test(struct kunit *test)
|
||||
{
|
||||
int count = 0;
|
||||
unsigned int var;
|
||||
|
||||
#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \
|
||||
bool __of = func(arg1, arg2); \
|
||||
KUNIT_EXPECT_EQ_MSG(test, __of, of, \
|
||||
"expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\
|
||||
of ? "" : " not"); \
|
||||
count++; \
|
||||
} while (0)
|
||||
|
||||
/* Args are: first type, second type, value, overflow expected */
|
||||
#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \
|
||||
__t1 t1 = (v); \
|
||||
__t2 t2; \
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\
|
||||
} while (0)
|
||||
|
||||
TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, u8, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, u16, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, u32, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, u64, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true);
|
||||
#endif
|
||||
TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u8, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, u16, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, u32, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, u64, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true);
|
||||
#endif
|
||||
TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false);
|
||||
#endif
|
||||
TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u8, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u16, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, u32, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, u64, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true);
|
||||
#endif
|
||||
TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u8, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u16, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u32, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, u64, -1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true);
|
||||
TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false);
|
||||
TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false);
|
||||
#endif
|
||||
|
||||
/* Check for macro side-effects. */
|
||||
var = INT_MAX - 1;
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false);
|
||||
__TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true);
|
||||
var = INT_MAX - 1;
|
||||
__TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
|
||||
__TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false);
|
||||
__TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true);
|
||||
|
||||
kunit_info(test, "%d overflows_type() tests finished\n", count);
|
||||
#undef TEST_OVERFLOWS_TYPE
|
||||
#undef __TEST_OVERFLOWS_TYPE
|
||||
}
|
||||
|
||||
static void same_type_test(struct kunit *test)
|
||||
{
|
||||
int count = 0;
|
||||
int var;
|
||||
|
||||
#define TEST_SAME_TYPE(t1, t2, same) do { \
|
||||
typeof(t1) __t1h = type_max(t1); \
|
||||
typeof(t1) __t1l = type_min(t1); \
|
||||
typeof(t2) __t2h = type_max(t2); \
|
||||
typeof(t2) __t2l = type_min(t2); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \
|
||||
KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \
|
||||
KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \
|
||||
} while (0)
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m)
|
||||
#else
|
||||
# define TEST_SAME_TYPE64(base, t, m) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \
|
||||
do { \
|
||||
TEST_SAME_TYPE(base, u8, mu8); \
|
||||
TEST_SAME_TYPE(base, u16, mu16); \
|
||||
TEST_SAME_TYPE(base, u32, mu32); \
|
||||
TEST_SAME_TYPE(base, s8, ms8); \
|
||||
TEST_SAME_TYPE(base, s16, ms16); \
|
||||
TEST_SAME_TYPE(base, s32, ms32); \
|
||||
TEST_SAME_TYPE64(base, u64, mu64); \
|
||||
TEST_SAME_TYPE64(base, s64, ms64); \
|
||||
} while (0)
|
||||
|
||||
TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false);
|
||||
TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false);
|
||||
TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false);
|
||||
TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false);
|
||||
TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false);
|
||||
TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false);
|
||||
TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true);
|
||||
#endif
|
||||
|
||||
/* Check for macro side-effects. */
|
||||
var = 4;
|
||||
KUNIT_EXPECT_EQ(test, var, 4);
|
||||
KUNIT_EXPECT_TRUE(test, __same_type(var++, int));
|
||||
KUNIT_EXPECT_EQ(test, var, 4);
|
||||
KUNIT_EXPECT_TRUE(test, __same_type(int, var++));
|
||||
KUNIT_EXPECT_EQ(test, var, 4);
|
||||
KUNIT_EXPECT_TRUE(test, __same_type(var++, var++));
|
||||
KUNIT_EXPECT_EQ(test, var, 4);
|
||||
|
||||
kunit_info(test, "%d __same_type() tests finished\n", count);
|
||||
|
||||
#undef TEST_TYPE_SETS
|
||||
#undef TEST_SAME_TYPE64
|
||||
#undef TEST_SAME_TYPE
|
||||
}
|
||||
|
||||
static void castable_to_type_test(struct kunit *test)
|
||||
{
|
||||
int count = 0;
|
||||
|
||||
#define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \
|
||||
bool __pass = castable_to_type(arg1, arg2); \
|
||||
KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \
|
||||
"expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\
|
||||
pass ? "" : " not"); \
|
||||
count++; \
|
||||
} while (0)
|
||||
|
||||
TEST_CASTABLE_TO_TYPE(16, u8, true);
|
||||
TEST_CASTABLE_TO_TYPE(16, u16, true);
|
||||
TEST_CASTABLE_TO_TYPE(16, u32, true);
|
||||
TEST_CASTABLE_TO_TYPE(16, s8, true);
|
||||
TEST_CASTABLE_TO_TYPE(16, s16, true);
|
||||
TEST_CASTABLE_TO_TYPE(16, s32, true);
|
||||
TEST_CASTABLE_TO_TYPE(-16, s8, true);
|
||||
TEST_CASTABLE_TO_TYPE(-16, s16, true);
|
||||
TEST_CASTABLE_TO_TYPE(-16, s32, true);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_CASTABLE_TO_TYPE(16, u64, true);
|
||||
TEST_CASTABLE_TO_TYPE(-16, s64, true);
|
||||
#endif
|
||||
|
||||
#define TEST_CASTABLE_TO_TYPE_VAR(width) do { \
|
||||
u ## width u ## width ## var = 0; \
|
||||
s ## width s ## width ## var = 0; \
|
||||
\
|
||||
/* Constant expressions that fit types. */ \
|
||||
TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \
|
||||
TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \
|
||||
/* Constant expressions that do not fit types. */ \
|
||||
TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \
|
||||
TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \
|
||||
/* Non-constant expression with mismatched type. */ \
|
||||
TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \
|
||||
} while (0)
|
||||
|
||||
#define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \
|
||||
unsigned long big = U ## width ## _MAX; \
|
||||
signed long small = S ## width ## _MIN; \
|
||||
u ## width u ## width ## var = 0; \
|
||||
s ## width s ## width ## var = 0; \
|
||||
\
|
||||
/* Constant expression in range. */ \
|
||||
TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \
|
||||
TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \
|
||||
TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \
|
||||
TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \
|
||||
/* Constant expression out of range. */ \
|
||||
TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \
|
||||
TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \
|
||||
/* Non-constant expression with mismatched type. */ \
|
||||
TEST_CASTABLE_TO_TYPE(big, u ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \
|
||||
TEST_CASTABLE_TO_TYPE(small, s ## width, false); \
|
||||
TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \
|
||||
} while (0)
|
||||
|
||||
TEST_CASTABLE_TO_TYPE_VAR(8);
|
||||
TEST_CASTABLE_TO_TYPE_VAR(16);
|
||||
TEST_CASTABLE_TO_TYPE_VAR(32);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_CASTABLE_TO_TYPE_VAR(64);
|
||||
#endif
|
||||
|
||||
TEST_CASTABLE_TO_TYPE_RANGE(8);
|
||||
TEST_CASTABLE_TO_TYPE_RANGE(16);
|
||||
#if BITS_PER_LONG == 64
|
||||
TEST_CASTABLE_TO_TYPE_RANGE(32);
|
||||
#endif
|
||||
kunit_info(test, "%d castable_to_type() tests finished\n", count);
|
||||
|
||||
#undef TEST_CASTABLE_TO_TYPE_RANGE
|
||||
#undef TEST_CASTABLE_TO_TYPE_VAR
|
||||
#undef TEST_CASTABLE_TO_TYPE
|
||||
}
|
||||
|
||||
static struct kunit_case overflow_test_cases[] = {
|
||||
KUNIT_CASE(u8_u8__u8_overflow_test),
|
||||
KUNIT_CASE(s8_s8__s8_overflow_test),
|
||||
@ -755,6 +1133,9 @@ static struct kunit_case overflow_test_cases[] = {
|
||||
KUNIT_CASE(shift_nonsense_test),
|
||||
KUNIT_CASE(overflow_allocation_test),
|
||||
KUNIT_CASE(overflow_size_helpers_test),
|
||||
KUNIT_CASE(overflows_type_test),
|
||||
KUNIT_CASE(same_type_test),
|
||||
KUNIT_CASE(castable_to_type_test),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -119,6 +119,22 @@ config PAGE_TABLE_CHECK_ENFORCED
|
||||
|
||||
If unsure say "n".
|
||||
|
||||
config PAGE_PINNER
|
||||
bool "Track page pinner"
|
||||
depends on DEBUG_KERNEL && STACKTRACE_SUPPORT
|
||||
select DEBUG_FS
|
||||
select STACKTRACE
|
||||
select STACKDEPOT
|
||||
select PAGE_EXTENSION
|
||||
help
|
||||
This keeps track of what call chain is the pinner of a page, may
|
||||
help to find page migration failures. Even if you include this
|
||||
feature in your build, it is disabled by default. You should pass
|
||||
"page_pinner=on" to boot parameter in order to enable it. Eats
|
||||
a fair amount of memory if enabled.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config PAGE_POISONING
|
||||
bool "Poison pages after freeing"
|
||||
help
|
||||
|
@ -109,6 +109,7 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
|
||||
obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o
|
||||
obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o
|
||||
obj-$(CONFIG_PAGE_OWNER) += page_owner.o
|
||||
obj-$(CONFIG_PAGE_PINNER) += page_pinner.o
|
||||
obj-$(CONFIG_CLEANCACHE) += cleancache.o
|
||||
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
|
||||
obj-$(CONFIG_ZPOOL) += zpool.o
|
||||
|
@ -380,6 +380,15 @@ static LIST_HEAD(offline_cgwbs);
|
||||
static void cleanup_offline_cgwbs_workfn(struct work_struct *work);
|
||||
static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn);
|
||||
|
||||
static void cgwb_free_rcu(struct rcu_head *rcu_head)
|
||||
{
|
||||
struct bdi_writeback *wb = container_of(rcu_head,
|
||||
struct bdi_writeback, rcu);
|
||||
|
||||
percpu_ref_exit(&wb->refcnt);
|
||||
kfree(wb);
|
||||
}
|
||||
|
||||
static void cgwb_release_workfn(struct work_struct *work)
|
||||
{
|
||||
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
|
||||
@ -402,11 +411,10 @@ static void cgwb_release_workfn(struct work_struct *work)
|
||||
list_del(&wb->offline_node);
|
||||
spin_unlock_irq(&cgwb_lock);
|
||||
|
||||
percpu_ref_exit(&wb->refcnt);
|
||||
wb_exit(wb);
|
||||
bdi_put(bdi);
|
||||
WARN_ON_ONCE(!list_empty(&wb->b_attached));
|
||||
kfree_rcu(wb, rcu);
|
||||
call_rcu(&wb->rcu, cgwb_free_rcu);
|
||||
}
|
||||
|
||||
static void cgwb_release(struct percpu_ref *refcnt)
|
||||
|
@ -1438,6 +1438,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
if (memcg_kmem_enabled() && PageMemcgKmem(page))
|
||||
__memcg_kmem_uncharge_page(page, order);
|
||||
reset_page_owner(page, order);
|
||||
free_page_pinner(page, order);
|
||||
page_table_check_free(page, order);
|
||||
return false;
|
||||
}
|
||||
@ -1478,6 +1479,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
page_cpupid_reset_last(page);
|
||||
page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
|
||||
reset_page_owner(page, order);
|
||||
free_page_pinner(page, order);
|
||||
page_table_check_free(page, order);
|
||||
|
||||
if (!PageHighMem(page)) {
|
||||
@ -9310,8 +9312,17 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
|
||||
|
||||
lru_cache_enable();
|
||||
if (ret < 0) {
|
||||
if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY)
|
||||
if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) {
|
||||
struct page *page;
|
||||
|
||||
alloc_contig_dump_pages(&cc->migratepages);
|
||||
list_for_each_entry(page, &cc->migratepages, lru) {
|
||||
/* The page will be freed by putback_movable_pages soon */
|
||||
if (page_count(page) == 1)
|
||||
continue;
|
||||
page_pinner_failure_detect(page);
|
||||
}
|
||||
}
|
||||
putback_movable_pages(&cc->migratepages);
|
||||
return ret;
|
||||
}
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/kmemleak.h>
|
||||
#include <linux/page_owner.h>
|
||||
#include <linux/page_pinner.h>
|
||||
#include <linux/page_idle.h>
|
||||
#include <linux/page_table_check.h>
|
||||
#include <linux/rcupdate.h>
|
||||
@ -81,6 +82,9 @@ static struct page_ext_operations *page_ext_ops[] __initdata = {
|
||||
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
|
||||
&page_idle_ops,
|
||||
#endif
|
||||
#ifdef CONFIG_PAGE_PINNER
|
||||
&page_pinner_ops,
|
||||
#endif
|
||||
#ifdef CONFIG_PAGE_TABLE_CHECK
|
||||
&page_table_check_ops,
|
||||
#endif
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/memory.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/page_owner.h>
|
||||
#include <linux/page_pinner.h>
|
||||
#include <linux/migrate.h>
|
||||
#include "internal.h"
|
||||
|
||||
@ -666,6 +667,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
||||
|
||||
out:
|
||||
trace_test_pages_isolated(start_pfn, end_pfn, pfn);
|
||||
if (pfn < end_pfn)
|
||||
page_pinner_failure_detect(pfn_to_page(pfn));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
434
mm/page_pinner.c
Normal file
434
mm/page_pinner.c
Normal file
@ -0,0 +1,434 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/page_pinner.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/migrate.h>
|
||||
#include <linux/stackdepot.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/sched/clock.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
#define PAGE_PINNER_STACK_DEPTH 16
|
||||
static unsigned long pp_buf_size = 4096;
|
||||
|
||||
struct page_pinner {
|
||||
depot_stack_handle_t handle;
|
||||
u64 ts_usec;
|
||||
atomic_t count;
|
||||
};
|
||||
|
||||
enum pp_state {
|
||||
PP_PUT,
|
||||
PP_FREE,
|
||||
PP_FAIL_DETECTED,
|
||||
};
|
||||
|
||||
struct captured_pinner {
|
||||
depot_stack_handle_t handle;
|
||||
union {
|
||||
u64 ts_usec;
|
||||
u64 elapsed;
|
||||
};
|
||||
|
||||
/* struct page fields */
|
||||
unsigned long pfn;
|
||||
int count;
|
||||
int mapcount;
|
||||
struct address_space *mapping;
|
||||
unsigned long flags;
|
||||
enum pp_state state;
|
||||
};
|
||||
|
||||
struct page_pinner_buffer {
|
||||
spinlock_t lock;
|
||||
unsigned long index;
|
||||
struct captured_pinner *buffer;
|
||||
};
|
||||
|
||||
/* alloc_contig failed pinner */
|
||||
static struct page_pinner_buffer pp_buffer;
|
||||
|
||||
static bool page_pinner_enabled;
|
||||
DEFINE_STATIC_KEY_FALSE(page_pinner_inited);
|
||||
EXPORT_SYMBOL_GPL(page_pinner_inited);
|
||||
|
||||
DEFINE_STATIC_KEY_TRUE(failure_tracking);
|
||||
|
||||
static depot_stack_handle_t failure_handle;
|
||||
|
||||
static int __init early_page_pinner_param(char *buf)
|
||||
{
|
||||
page_pinner_enabled = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("page_pinner", early_page_pinner_param);
|
||||
|
||||
static bool need_page_pinner(void)
|
||||
{
|
||||
return page_pinner_enabled;
|
||||
}
|
||||
|
||||
static noinline void register_failure_stack(void)
|
||||
{
|
||||
unsigned long entries[4];
|
||||
unsigned int nr_entries;
|
||||
|
||||
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
|
||||
failure_handle = stack_depot_save(entries, nr_entries, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void init_page_pinner(void)
|
||||
{
|
||||
if (!page_pinner_enabled)
|
||||
return;
|
||||
|
||||
pp_buffer.buffer = kvmalloc_array(pp_buf_size, sizeof(*pp_buffer.buffer),
|
||||
GFP_KERNEL);
|
||||
if (!pp_buffer.buffer) {
|
||||
pr_info("page_pinner disabled due to failure of buffer allocation\n");
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_init(&pp_buffer.lock);
|
||||
pp_buffer.index = 0;
|
||||
|
||||
register_failure_stack();
|
||||
static_branch_enable(&page_pinner_inited);
|
||||
}
|
||||
|
||||
struct page_ext_operations page_pinner_ops = {
|
||||
.size = sizeof(struct page_pinner),
|
||||
.need = need_page_pinner,
|
||||
.init = init_page_pinner,
|
||||
};
|
||||
|
||||
static inline struct page_pinner *get_page_pinner(struct page_ext *page_ext)
|
||||
{
|
||||
return (void *)page_ext + page_pinner_ops.offset;
|
||||
}
|
||||
|
||||
static noinline depot_stack_handle_t save_stack(gfp_t flags)
|
||||
{
|
||||
unsigned long entries[PAGE_PINNER_STACK_DEPTH];
|
||||
depot_stack_handle_t handle;
|
||||
unsigned int nr_entries;
|
||||
|
||||
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2);
|
||||
handle = stack_depot_save(entries, nr_entries, flags);
|
||||
if (!handle)
|
||||
handle = failure_handle;
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
static void capture_page_state(struct page *page,
|
||||
struct captured_pinner *record)
|
||||
{
|
||||
record->flags = page->flags;
|
||||
record->mapping = page_mapping(page);
|
||||
record->pfn = page_to_pfn(page);
|
||||
record->count = page_count(page);
|
||||
record->mapcount = page_mapcount(page);
|
||||
}
|
||||
|
||||
static void add_record(struct page_pinner_buffer *pp_buf,
|
||||
struct captured_pinner *record)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int idx;
|
||||
|
||||
spin_lock_irqsave(&pp_buf->lock, flags);
|
||||
idx = pp_buf->index++;
|
||||
pp_buf->index %= pp_buf_size;
|
||||
pp_buf->buffer[idx] = *record;
|
||||
spin_unlock_irqrestore(&pp_buf->lock, flags);
|
||||
}
|
||||
|
||||
void __free_page_pinner(struct page *page, unsigned int order)
|
||||
{
|
||||
struct page_pinner *page_pinner;
|
||||
struct page_ext *page_ext;
|
||||
int i;
|
||||
|
||||
/* free_page could be called before buffer is initialized */
|
||||
if (!pp_buffer.buffer)
|
||||
return;
|
||||
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
for (i = 0; i < (1 << order); i++) {
|
||||
struct captured_pinner record;
|
||||
|
||||
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags))
|
||||
continue;
|
||||
|
||||
page_pinner = get_page_pinner(page_ext);
|
||||
|
||||
record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
|
||||
record.ts_usec = (u64)ktime_to_us(ktime_get_boottime());
|
||||
record.state = PP_FREE;
|
||||
capture_page_state(page, &record);
|
||||
|
||||
add_record(&pp_buffer, &record);
|
||||
|
||||
atomic_set(&page_pinner->count, 0);
|
||||
page_pinner->ts_usec = 0;
|
||||
clear_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags);
|
||||
page_ext = page_ext_next(page_ext);
|
||||
}
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
print_page_pinner(char __user *buf, size_t count, struct captured_pinner *record)
|
||||
{
|
||||
int ret;
|
||||
unsigned long *entries;
|
||||
unsigned int nr_entries;
|
||||
char *kbuf;
|
||||
|
||||
count = min_t(size_t, count, PAGE_SIZE);
|
||||
kbuf = kmalloc(count, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (record->state == PP_PUT) {
|
||||
ret = snprintf(kbuf, count, "At least, pinned for %llu us\n",
|
||||
record->elapsed);
|
||||
} else {
|
||||
u64 ts_usec = record->ts_usec;
|
||||
unsigned long rem_usec = do_div(ts_usec, 1000000);
|
||||
|
||||
ret = snprintf(kbuf, count,
|
||||
"%s [%5lu.%06lu]\n",
|
||||
record->state == PP_FREE ? "Freed at" :
|
||||
"Failure detected at",
|
||||
(unsigned long)ts_usec, rem_usec);
|
||||
}
|
||||
|
||||
if (ret >= count)
|
||||
goto err;
|
||||
|
||||
/* Print information relevant to grouping pages by mobility */
|
||||
ret += snprintf(kbuf + ret, count - ret,
|
||||
"PFN 0x%lx Block %lu count %d mapcount %d mapping %pS Flags %#lx(%pGp)\n",
|
||||
record->pfn,
|
||||
record->pfn >> pageblock_order,
|
||||
record->count, record->mapcount,
|
||||
record->mapping,
|
||||
record->flags, &record->flags);
|
||||
|
||||
if (ret >= count)
|
||||
goto err;
|
||||
|
||||
nr_entries = stack_depot_fetch(record->handle, &entries);
|
||||
ret += stack_trace_snprint(kbuf + ret, count - ret, entries,
|
||||
nr_entries, 0);
|
||||
if (ret >= count)
|
||||
goto err;
|
||||
|
||||
ret += snprintf(kbuf + ret, count - ret, "\n");
|
||||
if (ret >= count)
|
||||
goto err;
|
||||
|
||||
if (copy_to_user(buf, kbuf, ret))
|
||||
ret = -EFAULT;
|
||||
|
||||
kfree(kbuf);
|
||||
return ret;
|
||||
|
||||
err:
|
||||
kfree(kbuf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void __page_pinner_failure_detect(struct page *page)
|
||||
{
|
||||
struct page_ext *page_ext;
|
||||
struct page_pinner *page_pinner;
|
||||
struct captured_pinner record;
|
||||
u64 now;
|
||||
|
||||
if (!static_branch_unlikely(&failure_tracking))
|
||||
return;
|
||||
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
if (test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) {
|
||||
page_ext_put(page_ext);
|
||||
return;
|
||||
}
|
||||
|
||||
now = (u64)ktime_to_us(ktime_get_boottime());
|
||||
page_pinner = get_page_pinner(page_ext);
|
||||
if (!page_pinner->ts_usec)
|
||||
page_pinner->ts_usec = now;
|
||||
set_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags);
|
||||
record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
|
||||
record.ts_usec = now;
|
||||
record.state = PP_FAIL_DETECTED;
|
||||
capture_page_state(page, &record);
|
||||
|
||||
add_record(&pp_buffer, &record);
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__page_pinner_failure_detect);
|
||||
|
||||
void __page_pinner_put_page(struct page *page)
|
||||
{
|
||||
struct page_ext *page_ext;
|
||||
struct page_pinner *page_pinner;
|
||||
struct captured_pinner record;
|
||||
u64 now, ts_usec;
|
||||
|
||||
if (!static_branch_unlikely(&failure_tracking))
|
||||
return;
|
||||
|
||||
page_ext = page_ext_get(page);
|
||||
if (unlikely(!page_ext))
|
||||
return;
|
||||
|
||||
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) {
|
||||
page_ext_put(page_ext);
|
||||
return;
|
||||
}
|
||||
|
||||
page_pinner = get_page_pinner(page_ext);
|
||||
record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
|
||||
now = (u64)ktime_to_us(ktime_get_boottime());
|
||||
ts_usec = page_pinner->ts_usec;
|
||||
|
||||
if (now > ts_usec)
|
||||
record.elapsed = now - ts_usec;
|
||||
else
|
||||
record.elapsed = 0;
|
||||
record.state = PP_PUT;
|
||||
capture_page_state(page, &record);
|
||||
|
||||
add_record(&pp_buffer, &record);
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__page_pinner_put_page);
|
||||
|
||||
static ssize_t read_buffer(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
u64 tmp;
|
||||
loff_t i, idx;
|
||||
struct captured_pinner record;
|
||||
unsigned long flags;
|
||||
|
||||
if (!static_branch_unlikely(&failure_tracking))
|
||||
return -EINVAL;
|
||||
|
||||
if (*ppos >= pp_buf_size)
|
||||
return 0;
|
||||
|
||||
i = *ppos;
|
||||
*ppos = i + 1;
|
||||
|
||||
/*
|
||||
* reading the records in the reverse order with newest one
|
||||
* being read first followed by older ones
|
||||
*/
|
||||
tmp = pp_buffer.index - 1 - i + pp_buf_size;
|
||||
idx = do_div(tmp, pp_buf_size);
|
||||
|
||||
spin_lock_irqsave(&pp_buffer.lock, flags);
|
||||
record = pp_buffer.buffer[idx];
|
||||
spin_unlock_irqrestore(&pp_buffer.lock, flags);
|
||||
if (!record.handle)
|
||||
return 0;
|
||||
|
||||
return print_page_pinner(buf, count, &record);
|
||||
}
|
||||
|
||||
static const struct file_operations proc_buffer_operations = {
|
||||
.read = read_buffer,
|
||||
};
|
||||
|
||||
static int failure_tracking_set(void *data, u64 val)
|
||||
{
|
||||
bool on;
|
||||
|
||||
on = (bool)val;
|
||||
if (on)
|
||||
static_branch_enable(&failure_tracking);
|
||||
else
|
||||
static_branch_disable(&failure_tracking);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int failure_tracking_get(void *data, u64 *val)
|
||||
{
|
||||
*val = static_branch_unlikely(&failure_tracking);
|
||||
return 0;
|
||||
}
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(failure_tracking_fops,
|
||||
failure_tracking_get,
|
||||
failure_tracking_set, "%llu\n");
|
||||
|
||||
static int buffer_size_set(void *data, u64 val)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct captured_pinner *new, *old;
|
||||
|
||||
new = kvmalloc_array(val, sizeof(*new), GFP_KERNEL);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&pp_buffer.lock, flags);
|
||||
old = pp_buffer.buffer;
|
||||
pp_buffer.buffer = new;
|
||||
pp_buffer.index = 0;
|
||||
pp_buf_size = val;
|
||||
spin_unlock_irqrestore(&pp_buffer.lock, flags);
|
||||
kvfree(old);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int buffer_size_get(void *data, u64 *val)
|
||||
{
|
||||
*val = pp_buf_size;
|
||||
return 0;
|
||||
}
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(buffer_size_fops,
|
||||
buffer_size_get,
|
||||
buffer_size_set, "%llu\n");
|
||||
|
||||
static int __init page_pinner_init(void)
|
||||
{
|
||||
struct dentry *pp_debugfs_root;
|
||||
|
||||
if (!static_branch_unlikely(&page_pinner_inited))
|
||||
return 0;
|
||||
|
||||
pr_info("page_pinner enabled\n");
|
||||
|
||||
pp_debugfs_root = debugfs_create_dir("page_pinner", NULL);
|
||||
|
||||
debugfs_create_file("buffer", 0444,
|
||||
pp_debugfs_root, NULL,
|
||||
&proc_buffer_operations);
|
||||
|
||||
debugfs_create_file("failure_tracking", 0644,
|
||||
pp_debugfs_root, NULL,
|
||||
&failure_tracking_fops);
|
||||
|
||||
debugfs_create_file("buffer_size", 0644,
|
||||
pp_debugfs_root, NULL,
|
||||
&buffer_size_fops);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(page_pinner_init)
|
@ -278,6 +278,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
|
||||
const struct dccp_hdr *dh, const unsigned int len);
|
||||
|
||||
void dccp_destruct_common(struct sock *sk);
|
||||
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
|
||||
void dccp_destroy_sock(struct sock *sk);
|
||||
|
||||
|
@ -1004,6 +1004,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
|
||||
.sockaddr_len = sizeof(struct sockaddr_in6),
|
||||
};
|
||||
|
||||
static void dccp_v6_sk_destruct(struct sock *sk)
|
||||
{
|
||||
dccp_destruct_common(sk);
|
||||
inet6_sock_destruct(sk);
|
||||
}
|
||||
|
||||
/* NOTE: A lot of things set to zero explicitly by call to
|
||||
* sk_alloc() so need not be done here.
|
||||
*/
|
||||
@ -1016,17 +1022,12 @@ static int dccp_v6_init_sock(struct sock *sk)
|
||||
if (unlikely(!dccp_v6_ctl_sock_initialized))
|
||||
dccp_v6_ctl_sock_initialized = 1;
|
||||
inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops;
|
||||
sk->sk_destruct = dccp_v6_sk_destruct;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void dccp_v6_destroy_sock(struct sock *sk)
|
||||
{
|
||||
dccp_destroy_sock(sk);
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
static struct timewait_sock_ops dccp6_timewait_sock_ops = {
|
||||
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
|
||||
};
|
||||
@ -1049,7 +1050,7 @@ static struct proto dccp_v6_prot = {
|
||||
.accept = inet_csk_accept,
|
||||
.get_port = inet_csk_get_port,
|
||||
.shutdown = dccp_shutdown,
|
||||
.destroy = dccp_v6_destroy_sock,
|
||||
.destroy = dccp_destroy_sock,
|
||||
.orphan_count = &dccp_orphan_count,
|
||||
.max_header = MAX_DCCP_HEADER,
|
||||
.obj_size = sizeof(struct dccp6_sock),
|
||||
|
@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type)
|
||||
|
||||
EXPORT_SYMBOL_GPL(dccp_packet_name);
|
||||
|
||||
static void dccp_sk_destruct(struct sock *sk)
|
||||
void dccp_destruct_common(struct sock *sk)
|
||||
{
|
||||
struct dccp_sock *dp = dccp_sk(sk);
|
||||
|
||||
ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
|
||||
dp->dccps_hc_tx_ccid = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dccp_destruct_common);
|
||||
|
||||
static void dccp_sk_destruct(struct sock *sk)
|
||||
{
|
||||
dccp_destruct_common(sk);
|
||||
inet_sock_destruct(sk);
|
||||
}
|
||||
|
||||
|
@ -114,6 +114,7 @@ void inet6_sock_destruct(struct sock *sk)
|
||||
inet6_cleanup_sock(sk);
|
||||
inet_sock_destruct(sk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet6_sock_destruct);
|
||||
|
||||
static int inet6_create(struct net *net, struct socket *sock, int protocol,
|
||||
int kern)
|
||||
|
@ -23,11 +23,6 @@
|
||||
#include <linux/bpf-cgroup.h>
|
||||
#include <net/ping.h>
|
||||
|
||||
static void ping_v6_destroy(struct sock *sk)
|
||||
{
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
/* Compatibility glue so we can support IPv6 when it's compiled as a module */
|
||||
static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len,
|
||||
int *addr_len)
|
||||
@ -205,7 +200,6 @@ struct proto pingv6_prot = {
|
||||
.owner = THIS_MODULE,
|
||||
.init = ping_init_sock,
|
||||
.close = ping_close,
|
||||
.destroy = ping_v6_destroy,
|
||||
.pre_connect = ping_v6_pre_connect,
|
||||
.connect = ip6_datagram_connect_v6_only,
|
||||
.disconnect = __udp_disconnect,
|
||||
|
@ -1175,8 +1175,6 @@ static void raw6_destroy(struct sock *sk)
|
||||
lock_sock(sk);
|
||||
ip6_flush_pending_frames(sk);
|
||||
release_sock(sk);
|
||||
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
static int rawv6_init_sk(struct sock *sk)
|
||||
|
@ -1951,12 +1951,6 @@ static int tcp_v6_init_sock(struct sock *sk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcp_v6_destroy_sock(struct sock *sk)
|
||||
{
|
||||
tcp_v4_destroy_sock(sk);
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
/* Proc filesystem TCPv6 sock list dumping. */
|
||||
static void get_openreq6(struct seq_file *seq,
|
||||
@ -2149,7 +2143,7 @@ struct proto tcpv6_prot = {
|
||||
.accept = inet_csk_accept,
|
||||
.ioctl = tcp_ioctl,
|
||||
.init = tcp_v6_init_sock,
|
||||
.destroy = tcp_v6_destroy_sock,
|
||||
.destroy = tcp_v4_destroy_sock,
|
||||
.shutdown = tcp_shutdown,
|
||||
.setsockopt = tcp_setsockopt,
|
||||
.getsockopt = tcp_getsockopt,
|
||||
|
@ -1668,8 +1668,6 @@ void udpv6_destroy_sock(struct sock *sk)
|
||||
udp_encap_disable();
|
||||
}
|
||||
}
|
||||
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -257,8 +257,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk)
|
||||
|
||||
if (tunnel)
|
||||
l2tp_tunnel_delete(tunnel);
|
||||
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
||||
|
@ -3939,12 +3939,6 @@ static const struct proto_ops mptcp_v6_stream_ops = {
|
||||
|
||||
static struct proto mptcp_v6_prot;
|
||||
|
||||
static void mptcp_v6_destroy(struct sock *sk)
|
||||
{
|
||||
mptcp_destroy(sk);
|
||||
inet6_destroy_sock(sk);
|
||||
}
|
||||
|
||||
static struct inet_protosw mptcp_v6_protosw = {
|
||||
.type = SOCK_STREAM,
|
||||
.protocol = IPPROTO_MPTCP,
|
||||
@ -3960,7 +3954,6 @@ int __init mptcp_proto_v6_init(void)
|
||||
mptcp_v6_prot = mptcp_prot;
|
||||
strcpy(mptcp_v6_prot.name, "MPTCPv6");
|
||||
mptcp_v6_prot.slab = NULL;
|
||||
mptcp_v6_prot.destroy = mptcp_v6_destroy;
|
||||
mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock);
|
||||
|
||||
err = proto_register(&mptcp_v6_prot, 1);
|
||||
|
@ -4936,12 +4936,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
}
|
||||
}
|
||||
|
||||
void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
if (nft_set_is_anonymous(set))
|
||||
nft_clear(ctx->net, set);
|
||||
|
||||
set->use++;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_activate_set);
|
||||
|
||||
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
switch (phase) {
|
||||
case NFT_TRANS_PREPARE:
|
||||
if (nft_set_is_anonymous(set))
|
||||
nft_deactivate_next(ctx->net, set);
|
||||
|
||||
set->use--;
|
||||
return;
|
||||
case NFT_TRANS_ABORT:
|
||||
|
@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
nf_tables_activate_set(ctx, priv->set);
|
||||
}
|
||||
|
||||
static void nft_dynset_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
nf_tables_activate_set(ctx, priv->set);
|
||||
}
|
||||
|
||||
static void nft_lookup_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -184,7 +184,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
priv->set->use++;
|
||||
nf_tables_activate_set(ctx, priv->set);
|
||||
}
|
||||
|
||||
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -5102,13 +5102,17 @@ static void sctp_destroy_sock(struct sock *sk)
|
||||
}
|
||||
|
||||
/* Triggered when there are no references on the socket anymore */
|
||||
static void sctp_destruct_sock(struct sock *sk)
|
||||
static void sctp_destruct_common(struct sock *sk)
|
||||
{
|
||||
struct sctp_sock *sp = sctp_sk(sk);
|
||||
|
||||
/* Free up the HMAC transform. */
|
||||
crypto_free_shash(sp->hmac);
|
||||
}
|
||||
|
||||
static void sctp_destruct_sock(struct sock *sk)
|
||||
{
|
||||
sctp_destruct_common(sk);
|
||||
inet_sock_destruct(sk);
|
||||
}
|
||||
|
||||
@ -9431,7 +9435,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
|
||||
sctp_sk(newsk)->reuse = sp->reuse;
|
||||
|
||||
newsk->sk_shutdown = sk->sk_shutdown;
|
||||
newsk->sk_destruct = sctp_destruct_sock;
|
||||
newsk->sk_destruct = sk->sk_destruct;
|
||||
newsk->sk_family = sk->sk_family;
|
||||
newsk->sk_protocol = IPPROTO_SCTP;
|
||||
newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
|
||||
@ -9666,11 +9670,20 @@ struct proto sctp_prot = {
|
||||
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
||||
#include <net/transp_v6.h>
|
||||
static void sctp_v6_destroy_sock(struct sock *sk)
|
||||
static void sctp_v6_destruct_sock(struct sock *sk)
|
||||
{
|
||||
sctp_destroy_sock(sk);
|
||||
inet6_destroy_sock(sk);
|
||||
sctp_destruct_common(sk);
|
||||
inet6_sock_destruct(sk);
|
||||
}
|
||||
|
||||
static int sctp_v6_init_sock(struct sock *sk)
|
||||
{
|
||||
int ret = sctp_init_sock(sk);
|
||||
|
||||
if (!ret)
|
||||
sk->sk_destruct = sctp_v6_destruct_sock;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct proto sctpv6_prot = {
|
||||
@ -9680,8 +9693,8 @@ struct proto sctpv6_prot = {
|
||||
.disconnect = sctp_disconnect,
|
||||
.accept = sctp_accept,
|
||||
.ioctl = sctp_ioctl,
|
||||
.init = sctp_init_sock,
|
||||
.destroy = sctp_v6_destroy_sock,
|
||||
.init = sctp_v6_init_sock,
|
||||
.destroy = sctp_destroy_sock,
|
||||
.shutdown = sctp_shutdown,
|
||||
.setsockopt = sctp_setsockopt,
|
||||
.getsockopt = sctp_getsockopt,
|
||||
|
@ -56,8 +56,8 @@ static void print_help(char *cmd)
|
||||
"Usage: %s <options>\n"
|
||||
" --help, -h this menu\n"
|
||||
" --image, -i <image> VM image file to load (e.g. a kernel Image) [Required]\n"
|
||||
" --dtb, -d <dtb> Devicetree to load [Required]\n"
|
||||
" --ramdisk, -r <ramdisk> Ramdisk to load\n"
|
||||
" --dtb, -d <dtb> Devicetree file to load [Required]\n"
|
||||
" --ramdisk, -r <ramdisk> Ramdisk file to load\n"
|
||||
" --base, -B <address> Set the base address of guest's memory [Default: 0x80000000]\n"
|
||||
" --size, -S <number> The number of bytes large to make the guest's memory [Default: 0x6400000 (100 MB)]\n"
|
||||
" --image_offset, -I <number> Offset into guest memory to load the VM image file [Default: 0x10000]\n"
|
||||
|
@ -625,7 +625,7 @@ int main(int argc, char **argv)
|
||||
p = strrchr(argv[1], '/');
|
||||
p = p ? p + 1 : argv[1];
|
||||
grammar_name = strdup(p);
|
||||
if (!p) {
|
||||
if (!grammar_name) {
|
||||
perror(NULL);
|
||||
exit(1);
|
||||
}
|
||||
|
@ -209,14 +209,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component,
|
||||
be_chan = soc_component_to_pcm(component_be)->chan[substream->stream];
|
||||
tmp_chan = be_chan;
|
||||
}
|
||||
if (!tmp_chan)
|
||||
tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx");
|
||||
if (!tmp_chan) {
|
||||
tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx");
|
||||
if (IS_ERR(tmp_chan)) {
|
||||
dev_err(dev, "failed to request DMA channel for Back-End\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each
|
||||
* peripheral, unlike SDMA channel that is allocated dynamically. So no
|
||||
* need to configure dma_request and dma_request2, but get dma_chan of
|
||||
* Back-End device directly via dma_request_slave_channel.
|
||||
* Back-End device directly via dma_request_chan.
|
||||
*/
|
||||
if (!asrc->use_edma) {
|
||||
/* Get DMA request of Back-End */
|
||||
|
@ -1541,7 +1541,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = {
|
||||
.use_imx_pcm = true,
|
||||
.use_edma = true,
|
||||
.fifo_depth = 64,
|
||||
.pins = 1,
|
||||
.pins = 4,
|
||||
.reg_offset = 0,
|
||||
.mclk0_is_mclk1 = false,
|
||||
.flags = 0,
|
||||
|
@ -183,6 +183,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
|
||||
const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
|
||||
pm_message_t pm_state;
|
||||
u32 target_state = snd_sof_dsp_power_target(sdev);
|
||||
u32 old_state = sdev->dsp_power_state.state;
|
||||
int ret;
|
||||
|
||||
/* do nothing if dsp suspend callback is not set */
|
||||
@ -192,7 +193,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
|
||||
if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
|
||||
return 0;
|
||||
|
||||
if (tplg_ops && tplg_ops->tear_down_all_pipelines)
|
||||
/* we need to tear down pipelines only if the DSP hardware is
|
||||
* active, which happens for PCI devices. if the device is
|
||||
* suspended, it is brought back to full power and then
|
||||
* suspended again
|
||||
*/
|
||||
if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0))
|
||||
tplg_ops->tear_down_all_pipelines(sdev, false);
|
||||
|
||||
if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
|
||||
|
@ -847,7 +847,7 @@ int main(int argc, char **argv)
|
||||
if (cull & CULL_PID || filter & FILTER_PID)
|
||||
fprintf(fout, ", PID %d", list[i].pid);
|
||||
if (cull & CULL_TGID || filter & FILTER_TGID)
|
||||
fprintf(fout, ", TGID %d", list[i].pid);
|
||||
fprintf(fout, ", TGID %d", list[i].tgid);
|
||||
if (cull & CULL_COMM || filter & FILTER_COMM)
|
||||
fprintf(fout, ", task_comm_name: %s", list[i].comm);
|
||||
if (cull & CULL_ALLOCATOR) {
|
||||
|
Loading…
Reference in New Issue
Block a user