Merge branch 'android12-5.10' into branch 'android12-5.10-lts'
Bring the LTS branch up to sync with the non-LTS branch. This contains the following commits: *36e3dadee0
FROMLIST: binder: check offset alignment in binder_get_object() *750e8cd3eb
ANDROID: enable CONFIG_USB_XHCI_PCI_RENESAS in gki_defconfig *17d3242550
Merge tag 'android12-5.10.209_r00' into android12-5.10 *42ae2a9c8c
UPSTREAM: usb: dwc3: core: set force_gen1 bit in USB31 devices if max speed is SS *a0b6273ec3
ANDROID: userfaultfd: abort uffdio ops if mmap_lock is contended *e3aabbf867
ANDROID: userfaultfd: add MMAP_TRYLOCK mode for COPY/ZEROPAGE *0ebc4699bd
UPSTREAM: coresight: etm4x: Remove bogous __exit annotation for some functions *40173a1428
UPSTREAM: ASoC: hdmi-codec: register hpd callback on component probe *438106d0e1
UPSTREAM: usb: typec: tcpm: Fix NULL pointer dereference in tcpm_pd_svdm() *4955298720
UPSTREAM: mm/damon/vaddr-test: fix memory leak in damon_do_test_apply_three_regions() *5d0d2934a6
UPSTREAM: task_work: add kerneldoc annotation for 'data' argument *b4c0e45118
UPSTREAM: x86/purgatory: Remove LTO flags *78254cf9c0
UPSTREAM: tcpm: Avoid soft reset when partner does not support get_status *f91d081f60
UPSTREAM: block/mq-deadline: use correct way to throttling write requests *d60601c3dd
UPSTREAM: usb: typec: tcpm: Fix response to vsafe0V event *ce364c0d34
UPSTREAM: clk: Fix memory leak in devm_clk_notifier_register() *eebfedfd22
UPSTREAM: selftests: damon: add config file *d93f79522f
ANDROID: abi_gki_aarch64_qcom: Export trace_android_vh_try_fixup_sea *f211036cf3
ANDROID: arm64: Call fixup_exception() within do_sea() *a905086c85
ANDROID: userfaultfd: allow SPF for UFFD_FEATURE_SIGBUS on private+anon *9171439117
UPSTREAM: usb: dwc3: gadget: Handle EP0 request dequeuing properly *5e21de099e
UPSTREAM: usb: dwc3: gadget: Refactor EP0 forced stall/restart into a separate API *f6c4f779c6
ANDROID: GKI: Update symbols to symbol list *94b9d8e513
ANDROID: add hooks into blk-mq-sched.c for customized I/O scheduler *6912e0c8e9
ANDROID: add hooks into blk-ma-tag.c for customized I/O scheduler *f76294a655
ANDROID: add hooks into blk-flush.c for customized I/O scheduler *d269992366
ANDROID: add hooks into blk-core.c for customized I/O scheduler *ccbc7f8808
ANDROID: add hooks into blk-mq.c for customized I/O scheduler. *34338029ab
ANDROID: add hooks into bio.c for customized I/O scheduler *b2e58fe5d7
ANDROID: ABI: Update oplus symbol list *517fdcc1f8
ANDROID: binder: Add vendor hook to fix priority restore *0b886c607a
ANDROID: GKI: Update symbol list *ca7dabaf67
ANDROID: Add vendor hook for task exiting routine *0415c5ccfe
UPSTREAM: netfilter: nft_set_rbtree: skip end interval element from gc *92d263f290
ANDROID: GKI: Update oplus symbol list *2a717853b3
UPSTREAM: usb: gadget: uvc: set v4l2_dev->dev in f_uvc *44817294cf
ANDROID: mm: Fix VMA ref count after fast-mremap Change-Id: I2b2128e5e94585032ec538c4c551213933a1472e Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
bb4ccced84
File diff suppressed because it is too large
Load Diff
@ -2976,6 +2976,7 @@
|
||||
__traceiter_android_vh_free_oem_binder_struct
|
||||
__traceiter_android_vh_binder_special_task
|
||||
__traceiter_android_vh_binder_free_buf
|
||||
__traceiter_android_vh_binder_buffer_release
|
||||
__tracepoint_android_rvh_account_irq
|
||||
__tracepoint_android_rvh_after_enqueue_task
|
||||
__tracepoint_android_rvh_build_perf_domains
|
||||
@ -3257,6 +3258,7 @@
|
||||
__tracepoint_android_vh_free_oem_binder_struct
|
||||
__tracepoint_android_vh_binder_special_task
|
||||
__tracepoint_android_vh_binder_free_buf
|
||||
__tracepoint_android_vh_binder_buffer_release
|
||||
trace_print_array_seq
|
||||
trace_print_flags_seq
|
||||
trace_print_hex_seq
|
||||
@ -3721,4 +3723,6 @@
|
||||
xhci_ring_cmd_db
|
||||
xhci_ring_free
|
||||
xhci_trb_virt_to_dma
|
||||
xt_register_match
|
||||
xt_unregister_match
|
||||
zero_pfn
|
||||
|
@ -2607,6 +2607,7 @@
|
||||
__traceiter_android_vh_show_suspend_epoch_val
|
||||
__traceiter_android_vh_subpage_dma_contig_alloc
|
||||
__traceiter_android_vh_timer_calc_index
|
||||
__traceiter_android_vh_try_fixup_sea
|
||||
__traceiter_android_vh_ufs_check_int_errors
|
||||
__traceiter_android_vh_ufs_clock_scaling
|
||||
__traceiter_android_vh_ufs_compl_command
|
||||
@ -2735,6 +2736,7 @@
|
||||
__tracepoint_android_vh_show_suspend_epoch_val
|
||||
__tracepoint_android_vh_subpage_dma_contig_alloc
|
||||
__tracepoint_android_vh_timer_calc_index
|
||||
__tracepoint_android_vh_try_fixup_sea
|
||||
__tracepoint_android_vh_ufs_check_int_errors
|
||||
__tracepoint_android_vh_ufs_clock_scaling
|
||||
__tracepoint_android_vh_ufs_compl_command
|
||||
|
@ -46,6 +46,26 @@
|
||||
__traceiter_android_vh_unuse_swap_page
|
||||
__traceiter_android_vh_waiting_for_page_migration
|
||||
__traceiter_android_vh_should_end_madvise
|
||||
__traceiter_android_vh_exit_check
|
||||
__traceiter_android_vh_bio_free
|
||||
__traceiter_android_rvh_internal_blk_mq_alloc_request
|
||||
__traceiter_android_vh_internal_blk_mq_free_request
|
||||
__traceiter_android_vh_blk_mq_complete_request
|
||||
__traceiter_android_vh_blk_mq_add_to_requeue_list
|
||||
__traceiter_android_rvh_blk_mq_delay_run_hw_queue
|
||||
__traceiter_android_vh_blk_mq_run_hw_queue
|
||||
__traceiter_android_vh_blk_mq_insert_request
|
||||
__traceiter_android_rvh_blk_mq_alloc_rq_map
|
||||
__traceiter_android_rvh_blk_mq_init_allocated_queue
|
||||
__traceiter_android_vh_blk_mq_exit_queue
|
||||
__traceiter_android_vh_blk_mq_alloc_tag_set
|
||||
__traceiter_android_rvh_blk_allocated_queue_init
|
||||
__traceiter_android_rvh_blk_flush_plug_list
|
||||
__traceiter_android_vh_blk_alloc_flush_queue
|
||||
__traceiter_android_vh_blk_mq_all_tag_iter
|
||||
__traceiter_android_vh_blk_mq_queue_tag_busy_iter
|
||||
__traceiter_android_vh_blk_mq_free_tags
|
||||
__traceiter_android_vh_blk_mq_sched_insert_request
|
||||
__tracepoint_android_rvh_alloc_si
|
||||
__tracepoint_android_rvh_alloc_swap_slot_cache
|
||||
__tracepoint_android_rvh_drain_slots_cache_cpu
|
||||
@ -82,4 +102,24 @@
|
||||
__tracepoint_android_vh_unuse_swap_page
|
||||
__tracepoint_android_vh_waiting_for_page_migration
|
||||
__tracepoint_android_vh_should_end_madvise
|
||||
__tracepoint_android_vh_exit_check
|
||||
__tracepoint_android_vh_bio_free
|
||||
__tracepoint_android_rvh_internal_blk_mq_alloc_request
|
||||
__tracepoint_android_vh_internal_blk_mq_free_request
|
||||
__tracepoint_android_vh_blk_mq_complete_request
|
||||
__tracepoint_android_vh_blk_mq_add_to_requeue_list
|
||||
__tracepoint_android_rvh_blk_mq_delay_run_hw_queue
|
||||
__tracepoint_android_vh_blk_mq_run_hw_queue
|
||||
__tracepoint_android_vh_blk_mq_insert_request
|
||||
__tracepoint_android_rvh_blk_mq_alloc_rq_map
|
||||
__tracepoint_android_rvh_blk_mq_init_allocated_queue
|
||||
__tracepoint_android_vh_blk_mq_exit_queue
|
||||
__tracepoint_android_vh_blk_mq_alloc_tag_set
|
||||
__tracepoint_android_rvh_blk_allocated_queue_init
|
||||
__tracepoint_android_rvh_blk_flush_plug_list
|
||||
__tracepoint_android_vh_blk_alloc_flush_queue
|
||||
__tracepoint_android_vh_blk_mq_all_tag_iter
|
||||
__tracepoint_android_vh_blk_mq_queue_tag_busy_iter
|
||||
__tracepoint_android_vh_blk_mq_free_tags
|
||||
__tracepoint_android_vh_blk_mq_sched_insert_request
|
||||
zero_pfn
|
||||
|
@ -478,6 +478,7 @@ CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_OTG=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_PCI_RENESAS=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
|
@ -728,6 +728,11 @@ static int do_sea(unsigned long far, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf;
|
||||
unsigned long siaddr;
|
||||
bool can_fixup = false;
|
||||
|
||||
trace_android_vh_try_fixup_sea(far, esr, regs, &can_fixup);
|
||||
if (can_fixup && fixup_exception(regs))
|
||||
return 0;
|
||||
|
||||
inf = esr_to_fault_info(esr);
|
||||
|
||||
|
@ -429,6 +429,7 @@ CONFIG_HID_WIIMOTE=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_PCI_RENESAS=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_ROOT_HUB_TT=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
|
@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
# When LTO is enabled, llvm emits many text sections, which is not supported
|
||||
# by kexec. Remove -flto=* flags.
|
||||
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -nostdlib -z nodefaultlib
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/blk-crypto.h>
|
||||
|
||||
#include <trace/events/block.h>
|
||||
#include <trace/hooks/block.h>
|
||||
#include "blk.h"
|
||||
#include "blk-rq-qos.h"
|
||||
|
||||
@ -252,6 +253,7 @@ static void bio_free(struct bio *bio)
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
void *p;
|
||||
|
||||
trace_android_vh_bio_free(bio);
|
||||
bio_uninit(bio);
|
||||
|
||||
if (bs) {
|
||||
|
@ -66,6 +66,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_merge);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_requeue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_complete);
|
||||
|
||||
#undef CREATE_TRACE_POINTS
|
||||
#include <trace/hooks/block.h>
|
||||
|
||||
DEFINE_IDA(blk_queue_ida);
|
||||
|
||||
/*
|
||||
@ -522,6 +525,7 @@ struct request_queue *blk_alloc_queue(int node_id)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int ret;
|
||||
bool skip = false;
|
||||
|
||||
q = kmem_cache_alloc_node(blk_requestq_cachep,
|
||||
GFP_KERNEL | __GFP_ZERO, node_id);
|
||||
@ -585,6 +589,10 @@ struct request_queue *blk_alloc_queue(int node_id)
|
||||
blk_set_default_limits(&q->limits);
|
||||
q->nr_requests = BLKDEV_MAX_RQ;
|
||||
|
||||
trace_android_rvh_blk_allocated_queue_init(&skip, q);
|
||||
if (skip)
|
||||
goto fail_ref;
|
||||
|
||||
return q;
|
||||
|
||||
fail_ref:
|
||||
@ -1761,6 +1769,7 @@ EXPORT_SYMBOL(blk_check_plugged);
|
||||
|
||||
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
||||
{
|
||||
trace_android_rvh_blk_flush_plug_list(plug, from_schedule);
|
||||
flush_plug_callbacks(plug, from_schedule);
|
||||
|
||||
if (!list_empty(&plug->mq_list))
|
||||
|
@ -467,11 +467,13 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask)
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_issue_flush);
|
||||
|
||||
#include <trace/hooks/block.h>
|
||||
struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct blk_flush_queue *fq;
|
||||
int rq_sz = sizeof(struct request);
|
||||
bool skip = false;
|
||||
|
||||
fq = kzalloc_node(sizeof(*fq), flags, node);
|
||||
if (!fq)
|
||||
@ -479,8 +481,12 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
||||
|
||||
spin_lock_init(&fq->mq_flush_lock);
|
||||
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
|
||||
trace_android_vh_blk_alloc_flush_queue(&skip, cmd_size, flags, node,
|
||||
fq);
|
||||
if (!skip) {
|
||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
|
||||
}
|
||||
if (!fq->flush_rq)
|
||||
goto fail_rq;
|
||||
|
||||
|
@ -422,6 +422,7 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
|
||||
return false;
|
||||
}
|
||||
|
||||
#include <trace/hooks/block.h>
|
||||
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||
bool run_queue, bool async)
|
||||
{
|
||||
@ -429,10 +430,13 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||
struct elevator_queue *e = q->elevator;
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
bool skip = false;
|
||||
|
||||
WARN_ON(e && (rq->tag != BLK_MQ_NO_TAG));
|
||||
|
||||
if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
|
||||
trace_android_vh_blk_mq_sched_insert_request(&skip, rq);
|
||||
|
||||
if (!skip && blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
|
||||
/*
|
||||
* Firstly normal IO request is inserted to scheduler queue or
|
||||
* sw queue, meantime we add flush request to dispatch queue(
|
||||
|
@ -15,6 +15,8 @@
|
||||
#include "blk-mq.h"
|
||||
#include "blk-mq-tag.h"
|
||||
|
||||
#include <trace/hooks/blk_mq.h>
|
||||
|
||||
/*
|
||||
* If a previously inactive queue goes active, bump the active user count.
|
||||
* We need to do this before try to allocate driver tag, then even if fail
|
||||
@ -336,8 +338,13 @@ static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
|
||||
static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags,
|
||||
busy_tag_iter_fn *fn, void *priv, unsigned int flags)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED);
|
||||
|
||||
trace_android_vh_blk_mq_all_tag_iter(&skip, tags, fn, priv);
|
||||
if (skip)
|
||||
return;
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_tags_for_each(tags, tags->breserved_tags, fn, priv,
|
||||
flags | BT_TAG_ITER_RESERVED);
|
||||
@ -438,6 +445,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
int i;
|
||||
bool skip = false;
|
||||
|
||||
/*
|
||||
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
|
||||
@ -457,6 +465,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
||||
if (!blk_mq_hw_queue_mapped(hctx))
|
||||
continue;
|
||||
|
||||
trace_android_vh_blk_mq_queue_tag_busy_iter(&skip, hctx, fn,
|
||||
priv);
|
||||
if (skip)
|
||||
continue;
|
||||
|
||||
if (tags->nr_reserved_tags)
|
||||
bt_for_each(hctx, tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, tags->bitmap_tags, fn, priv, false);
|
||||
@ -556,6 +569,12 @@ struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
|
||||
|
||||
void blk_mq_free_tags(struct blk_mq_tags *tags, unsigned int flags)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_blk_mq_free_tags(&skip, tags);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
if (!(flags & BLK_MQ_F_TAG_HCTX_SHARED)) {
|
||||
sbitmap_queue_free(tags->bitmap_tags);
|
||||
sbitmap_queue_free(tags->breserved_tags);
|
||||
|
@ -353,6 +353,7 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
struct elevator_queue *e = q->elevator;
|
||||
u64 alloc_time_ns = 0;
|
||||
unsigned int tag;
|
||||
bool skip = false;
|
||||
|
||||
/* alloc_time includes depth and tag waits */
|
||||
if (blk_queue_rq_alloc_time(q))
|
||||
@ -384,7 +385,9 @@ static struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data)
|
||||
* case just retry the hctx assignment and tag allocation as CPU hotplug
|
||||
* should have migrated us to an online CPU by now.
|
||||
*/
|
||||
tag = blk_mq_get_tag(data);
|
||||
trace_android_rvh_internal_blk_mq_alloc_request(&skip, &tag, data);
|
||||
if (!skip)
|
||||
tag = blk_mq_get_tag(data);
|
||||
if (tag == BLK_MQ_NO_TAG) {
|
||||
if (data->flags & BLK_MQ_REQ_NOWAIT)
|
||||
return NULL;
|
||||
@ -496,12 +499,17 @@ static void __blk_mq_free_request(struct request *rq)
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
|
||||
const int sched_tag = rq->internal_tag;
|
||||
bool skip = false;
|
||||
|
||||
blk_crypto_free_request(rq);
|
||||
blk_pm_mark_last_busy(rq);
|
||||
rq->mq_hctx = NULL;
|
||||
if (rq->tag != BLK_MQ_NO_TAG)
|
||||
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
|
||||
|
||||
trace_android_vh_internal_blk_mq_free_request(&skip, rq, hctx);
|
||||
if (!skip) {
|
||||
if (rq->tag != BLK_MQ_NO_TAG)
|
||||
blk_mq_put_tag(hctx->tags, ctx, rq->tag);
|
||||
}
|
||||
if (sched_tag != BLK_MQ_NO_TAG)
|
||||
blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
|
||||
blk_mq_sched_restart(hctx);
|
||||
@ -701,6 +709,11 @@ EXPORT_SYMBOL_GPL(blk_mq_complete_request_remote);
|
||||
**/
|
||||
void blk_mq_complete_request(struct request *rq)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_blk_mq_complete_request(&skip, rq);
|
||||
if (skip)
|
||||
return;
|
||||
if (!blk_mq_complete_request_remote(rq))
|
||||
rq->q->mq_ops->complete(rq);
|
||||
}
|
||||
@ -827,7 +840,12 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
unsigned long flags;
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_blk_mq_add_to_requeue_list(&skip, rq,
|
||||
kick_requeue_list);
|
||||
if (skip)
|
||||
return;
|
||||
/*
|
||||
* We abuse this flag that is otherwise used by the I/O scheduler to
|
||||
* request head insertion from the workqueue.
|
||||
@ -1593,9 +1611,15 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
||||
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
||||
unsigned long msecs)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
if (unlikely(blk_mq_hctx_stopped(hctx)))
|
||||
return;
|
||||
|
||||
trace_android_rvh_blk_mq_delay_run_hw_queue(&skip, hctx, async);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
int cpu = get_cpu();
|
||||
if (cpumask_test_cpu(cpu, hctx->cpumask)) {
|
||||
@ -1651,6 +1675,7 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
||||
blk_mq_hctx_has_pending(hctx);
|
||||
hctx_unlock(hctx, srcu_idx);
|
||||
|
||||
trace_android_vh_blk_mq_run_hw_queue(&need_run, hctx);
|
||||
if (need_run)
|
||||
__blk_mq_delay_run_hw_queue(hctx, async, 0);
|
||||
}
|
||||
@ -1877,9 +1902,14 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
|
||||
bool at_head)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
bool skip = false;
|
||||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
trace_android_vh_blk_mq_insert_request(&skip, hctx, rq);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
__blk_mq_insert_req_list(hctx, rq, at_head);
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
}
|
||||
@ -2419,12 +2449,15 @@ struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
|
||||
{
|
||||
struct blk_mq_tags *tags;
|
||||
int node;
|
||||
bool skip = false;
|
||||
|
||||
node = blk_mq_hw_queue_to_node(&set->map[HCTX_TYPE_DEFAULT], hctx_idx);
|
||||
if (node == NUMA_NO_NODE)
|
||||
node = set->numa_node;
|
||||
|
||||
tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
|
||||
trace_android_rvh_blk_mq_alloc_rq_map(&skip, &tags, set, node, flags);
|
||||
if (!skip)
|
||||
tags = blk_mq_init_tags(nr_tags, reserved_tags, node, flags);
|
||||
if (!tags)
|
||||
return NULL;
|
||||
|
||||
@ -3362,6 +3395,8 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||
blk_mq_add_queue_tag_set(set, q);
|
||||
blk_mq_map_swqueue(q);
|
||||
|
||||
trace_android_rvh_blk_mq_init_allocated_queue(q);
|
||||
|
||||
if (elevator_init)
|
||||
elevator_init_mq(q);
|
||||
|
||||
@ -3385,6 +3420,7 @@ void blk_mq_exit_queue(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_tag_set *set = q->tag_set;
|
||||
|
||||
trace_android_vh_blk_mq_exit_queue(q);
|
||||
/* Checks hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED. */
|
||||
blk_mq_exit_hw_queues(q, set, set->nr_hw_queues);
|
||||
/* May clear BLK_MQ_F_TAG_QUEUE_SHARED in hctx->flags. */
|
||||
@ -3575,6 +3611,8 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
||||
trace_android_vh_blk_mq_alloc_tag_set(set);
|
||||
|
||||
ret = blk_mq_alloc_map_and_requests(set);
|
||||
if (ret)
|
||||
goto out_free_mq_map;
|
||||
|
@ -549,8 +549,9 @@ static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct deadline_data *dd = q->elevator->elevator_data;
|
||||
struct blk_mq_tags *tags = hctx->sched_tags;
|
||||
unsigned int shift = tags->bitmap_tags->sb.shift;
|
||||
|
||||
dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
|
||||
dd->async_depth = max(1U, 3 * (1U << shift) / 4);
|
||||
|
||||
sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
|
||||
}
|
||||
|
@ -1800,8 +1800,10 @@ static size_t binder_get_object(struct binder_proc *proc,
|
||||
size_t object_size = 0;
|
||||
|
||||
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
|
||||
if (offset > buffer->data_size || read_size < sizeof(*hdr))
|
||||
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
|
||||
!IS_ALIGNED(offset, sizeof(u32)))
|
||||
return 0;
|
||||
|
||||
if (u) {
|
||||
if (copy_from_user(object, u + offset, read_size))
|
||||
return 0;
|
||||
@ -3830,12 +3832,14 @@ binder_free_buf(struct binder_proc *proc,
|
||||
struct binder_buffer *buffer, bool is_failure)
|
||||
{
|
||||
bool enqueue_task = true;
|
||||
bool has_transaction = false;
|
||||
|
||||
trace_android_vh_binder_free_buf(proc, thread, buffer);
|
||||
binder_inner_proc_lock(proc);
|
||||
if (buffer->transaction) {
|
||||
buffer->transaction->buffer = NULL;
|
||||
buffer->transaction = NULL;
|
||||
has_transaction = true;
|
||||
}
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (buffer->async_transaction && buffer->target_node) {
|
||||
@ -3859,6 +3863,8 @@ binder_free_buf(struct binder_proc *proc,
|
||||
}
|
||||
binder_node_inner_unlock(buf_node);
|
||||
}
|
||||
trace_android_vh_binder_buffer_release(proc, thread, buffer,
|
||||
has_transaction);
|
||||
trace_binder_transaction_buffer_release(buffer);
|
||||
binder_release_entire_buffer(proc, thread, buffer, is_failure);
|
||||
binder_alloc_free_buf(&proc->alloc, buffer);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <trace/hooks/thermal.h>
|
||||
#include <trace/hooks/ufshcd.h>
|
||||
#include <trace/hooks/block.h>
|
||||
#include <trace/hooks/blk_mq.h>
|
||||
#include <trace/hooks/cgroup.h>
|
||||
#include <trace/hooks/sys.h>
|
||||
#include <trace/hooks/traps.h>
|
||||
@ -218,6 +219,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_try_fixup_sea);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova);
|
||||
@ -506,6 +508,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_received);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_oem_binder_struct);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_special_task);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_free_buf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_buffer_release);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_perf_huristic_ctrl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command_post_change);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_abort_success_ctrl);
|
||||
@ -515,6 +518,26 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_check_ctrl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_err_print_ctrl);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmscan_kswapd_done);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_swap_page_spf);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_check);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_bio_free);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_internal_blk_mq_alloc_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_internal_blk_mq_free_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_complete_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_add_to_requeue_list);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_delay_run_hw_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_run_hw_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_insert_request);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_alloc_rq_map);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_mq_init_allocated_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_exit_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_alloc_tag_set);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_allocated_queue_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_blk_flush_plug_list);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_alloc_flush_queue);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_all_tag_iter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_queue_tag_busy_iter);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_free_tags);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_blk_mq_sched_insert_request);
|
||||
/*
|
||||
* For type visibility
|
||||
*/
|
||||
|
@ -4579,6 +4579,7 @@ int devm_clk_notifier_register(struct device *dev, struct clk *clk,
|
||||
if (!ret) {
|
||||
devres->clk = clk;
|
||||
devres->nb = nb;
|
||||
devres_add(dev, devres);
|
||||
} else {
|
||||
devres_free(devres);
|
||||
}
|
||||
|
@ -2052,7 +2052,7 @@ static void clear_etmdrvdata(void *info)
|
||||
etmdrvdata[cpu] = NULL;
|
||||
}
|
||||
|
||||
static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
|
||||
static void etm4_remove_dev(struct etmv4_drvdata *drvdata)
|
||||
{
|
||||
etm_perf_symlink(drvdata->csdev, false);
|
||||
/*
|
||||
@ -2074,7 +2074,7 @@ static void __exit etm4_remove_dev(struct etmv4_drvdata *drvdata)
|
||||
coresight_unregister(drvdata->csdev);
|
||||
}
|
||||
|
||||
static void __exit etm4_remove_amba(struct amba_device *adev)
|
||||
static void etm4_remove_amba(struct amba_device *adev)
|
||||
{
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(&adev->dev);
|
||||
|
||||
@ -2082,7 +2082,7 @@ static void __exit etm4_remove_amba(struct amba_device *adev)
|
||||
etm4_remove_dev(drvdata);
|
||||
}
|
||||
|
||||
static int __exit etm4_remove_platform_dev(struct platform_device *pdev)
|
||||
static int etm4_remove_platform_dev(struct platform_device *pdev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev);
|
||||
|
@ -1121,6 +1121,18 @@ static int dwc3_core_init(struct dwc3 *dwc)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Modify this for all supported Super Speed ports when
|
||||
* multiport support is added.
|
||||
*/
|
||||
if (hw_mode != DWC3_GHWPARAMS0_MODE_GADGET &&
|
||||
(DWC3_IP_IS(DWC31)) &&
|
||||
dwc->maximum_speed == USB_SPEED_SUPER) {
|
||||
reg = dwc3_readl(dwc->regs, DWC3_LLUCTL);
|
||||
reg |= DWC3_LLUCTL_FORCE_GEN1;
|
||||
dwc3_writel(dwc->regs, DWC3_LLUCTL, reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
|
@ -170,6 +170,8 @@
|
||||
#define DWC3_OEVTEN 0xcc0C
|
||||
#define DWC3_OSTS 0xcc10
|
||||
|
||||
#define DWC3_LLUCTL 0xd024
|
||||
|
||||
/* Bit fields */
|
||||
|
||||
/* Global SoC Bus Configuration INCRx Register 0 */
|
||||
@ -633,6 +635,9 @@
|
||||
#define DWC3_OSTS_VBUSVLD BIT(1)
|
||||
#define DWC3_OSTS_CONIDSTS BIT(0)
|
||||
|
||||
/* Force Gen1 speed on Gen2 link */
|
||||
#define DWC3_LLUCTL_FORCE_GEN1 BIT(10)
|
||||
|
||||
/* Structures */
|
||||
|
||||
struct dwc3_trb;
|
||||
|
@ -139,6 +139,24 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static void dwc3_ep0_reset_state(struct dwc3 *dwc)
|
||||
{
|
||||
unsigned int dir;
|
||||
|
||||
if (dwc->ep0state != EP0_SETUP_PHASE) {
|
||||
dir = !!dwc->ep0_expect_in;
|
||||
if (dwc->ep0state == EP0_DATA_PHASE)
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
|
||||
else
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
|
||||
|
||||
dwc->eps[0]->trb_enqueue = 0;
|
||||
dwc->eps[1]->trb_enqueue = 0;
|
||||
|
||||
dwc3_ep0_stall_and_restart(dwc);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* dwc3_ep_inc_trb - increment a trb index.
|
||||
* @index: Pointer to the TRB index to increment.
|
||||
@ -2068,7 +2086,17 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
|
||||
|
||||
list_for_each_entry(r, &dep->pending_list, list) {
|
||||
if (r == req) {
|
||||
dwc3_gadget_giveback(dep, req, -ECONNRESET);
|
||||
/*
|
||||
* Explicitly check for EP0/1 as dequeue for those
|
||||
* EPs need to be handled differently. Control EP
|
||||
* only deals with one USB req, and giveback will
|
||||
* occur during dwc3_ep0_stall_and_restart(). EP0
|
||||
* requests are never added to started_list.
|
||||
*/
|
||||
if (dep->number > 1)
|
||||
dwc3_gadget_giveback(dep, req, -ECONNRESET);
|
||||
else
|
||||
dwc3_ep0_reset_state(dwc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
@ -2547,16 +2575,9 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
|
||||
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
|
||||
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
|
||||
if (ret == 0) {
|
||||
unsigned int dir;
|
||||
|
||||
dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
dir = !!dwc->ep0_expect_in;
|
||||
if (dwc->ep0state == EP0_DATA_PHASE)
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
|
||||
else
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
|
||||
dwc3_ep0_stall_and_restart(dwc);
|
||||
dwc3_ep0_reset_state(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
}
|
||||
@ -3849,16 +3870,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
|
||||
dwc->setup_packet_pending = false;
|
||||
usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED);
|
||||
|
||||
if (dwc->ep0state != EP0_SETUP_PHASE) {
|
||||
unsigned int dir;
|
||||
|
||||
dir = !!dwc->ep0_expect_in;
|
||||
if (dwc->ep0state == EP0_DATA_PHASE)
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
|
||||
else
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
|
||||
dwc3_ep0_stall_and_restart(dwc);
|
||||
}
|
||||
dwc3_ep0_reset_state(dwc);
|
||||
}
|
||||
|
||||
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
|
||||
@ -3912,20 +3924,7 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
|
||||
* phase. So ensure that EP0 is in setup phase by issuing a stall
|
||||
* and restart if EP0 is not in setup phase.
|
||||
*/
|
||||
if (dwc->ep0state != EP0_SETUP_PHASE) {
|
||||
unsigned int dir;
|
||||
|
||||
dir = !!dwc->ep0_expect_in;
|
||||
if (dwc->ep0state == EP0_DATA_PHASE)
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
|
||||
else
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
|
||||
|
||||
dwc->eps[0]->trb_enqueue = 0;
|
||||
dwc->eps[1]->trb_enqueue = 0;
|
||||
|
||||
dwc3_ep0_stall_and_restart(dwc);
|
||||
}
|
||||
dwc3_ep0_reset_state(dwc);
|
||||
|
||||
/*
|
||||
* In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a
|
||||
|
@ -419,6 +419,7 @@ uvc_register_video(struct uvc_device *uvc)
|
||||
|
||||
/* TODO reference counting. */
|
||||
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
|
||||
uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev;
|
||||
uvc->vdev.fops = &uvc_v4l2_fops;
|
||||
uvc->vdev.ioctl_ops = &uvc_v4l2_ioctl_ops;
|
||||
uvc->vdev.release = video_device_release_empty;
|
||||
|
@ -1624,6 +1624,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
|
||||
if (PD_VDO_VID(p[0]) != USB_SID_PD)
|
||||
break;
|
||||
|
||||
if (IS_ERR_OR_NULL(port->partner))
|
||||
break;
|
||||
|
||||
if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
|
||||
typec_partner_set_svdm_version(port->partner,
|
||||
PD_VDO_SVDM_VER(p[0]));
|
||||
@ -2709,6 +2712,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
|
||||
port->sink_cap_done = true;
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
/*
|
||||
* Some port partners do not support GET_STATUS, avoid soft reset the link to
|
||||
* prevent redundant power re-negotiation
|
||||
*/
|
||||
case GET_STATUS_SEND:
|
||||
tcpm_set_state(port, ready_state(port), 0);
|
||||
break;
|
||||
case SRC_READY:
|
||||
case SNK_READY:
|
||||
if (port->vdm_state > VDM_STATE_READY) {
|
||||
@ -5333,6 +5343,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
|
||||
/* Do nothing, vbus drop expected */
|
||||
break;
|
||||
|
||||
case SNK_HARD_RESET_WAIT_VBUS:
|
||||
/* Do nothing, its OK to receive vbus off events */
|
||||
break;
|
||||
|
||||
default:
|
||||
if (port->pwr_role == TYPEC_SINK && port->attached)
|
||||
tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port));
|
||||
@ -5384,6 +5398,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
|
||||
case SNK_DEBOUNCED:
|
||||
/*Do nothing, still waiting for VSAFE5V for connect */
|
||||
break;
|
||||
case SNK_HARD_RESET_WAIT_VBUS:
|
||||
/* Do nothing, its OK to receive vbus off events */
|
||||
break;
|
||||
default:
|
||||
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
|
||||
tcpm_set_state(port, SNK_UNATTACHED, 0);
|
||||
|
105
fs/userfaultfd.c
105
fs/userfaultfd.c
@ -70,6 +70,7 @@ struct userfaultfd_ctx {
|
||||
bool mmap_changing;
|
||||
/* mm with one ore more vmas attached to this userfaultfd_ctx */
|
||||
struct mm_struct *mm;
|
||||
struct rcu_head rcu_head;
|
||||
};
|
||||
|
||||
struct userfaultfd_fork_ctx {
|
||||
@ -155,6 +156,13 @@ static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
|
||||
refcount_inc(&ctx->refcount);
|
||||
}
|
||||
|
||||
static void __free_userfaultfd_ctx(struct rcu_head *head)
|
||||
{
|
||||
struct userfaultfd_ctx *ctx = container_of(head, struct userfaultfd_ctx,
|
||||
rcu_head);
|
||||
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
|
||||
* context.
|
||||
@ -175,7 +183,7 @@ static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
|
||||
VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
|
||||
VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
|
||||
mmdrop(ctx->mm);
|
||||
kmem_cache_free(userfaultfd_ctx_cachep, ctx);
|
||||
call_rcu(&ctx->rcu_head, __free_userfaultfd_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -349,6 +357,24 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags)
|
||||
return TASK_UNINTERRUPTIBLE;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
bool userfaultfd_using_sigbus(struct vm_area_struct *vma)
|
||||
{
|
||||
struct userfaultfd_ctx *ctx;
|
||||
bool ret;
|
||||
|
||||
/*
|
||||
* Do it inside RCU section to ensure that the ctx doesn't
|
||||
* disappear under us.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
ctx = rcu_dereference(vma->vm_userfaultfd_ctx.ctx);
|
||||
ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The locking rules involved in returning VM_FAULT_RETRY depending on
|
||||
* FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
|
||||
@ -393,7 +419,8 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
||||
*/
|
||||
mmap_assert_locked(mm);
|
||||
|
||||
ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
|
||||
ctx = rcu_dereference_protected(vmf->vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&mm->mmap_lock));
|
||||
if (!ctx)
|
||||
goto out;
|
||||
|
||||
@ -610,8 +637,10 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
|
||||
/* the various vma->vm_userfaultfd_ctx still points to it */
|
||||
mmap_write_lock(mm);
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next)
|
||||
if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
if (rcu_access_pointer(vma->vm_userfaultfd_ctx.ctx) ==
|
||||
release_new_ctx) {
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx,
|
||||
NULL);
|
||||
vma->vm_flags &= ~__VM_UFFD_FLAGS;
|
||||
}
|
||||
mmap_write_unlock(mm);
|
||||
@ -641,10 +670,13 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
|
||||
struct userfaultfd_ctx *ctx = NULL, *octx;
|
||||
struct userfaultfd_fork_ctx *fctx;
|
||||
|
||||
octx = vma->vm_userfaultfd_ctx.ctx;
|
||||
octx = rcu_dereference_protected(
|
||||
vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&vma->vm_mm->mmap_lock));
|
||||
|
||||
if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
|
||||
vm_write_begin(vma);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
|
||||
WRITE_ONCE(vma->vm_flags,
|
||||
vma->vm_flags & ~__VM_UFFD_FLAGS);
|
||||
vm_write_end(vma);
|
||||
@ -683,7 +715,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
|
||||
list_add_tail(&fctx->list, fcs);
|
||||
}
|
||||
|
||||
vma->vm_userfaultfd_ctx.ctx = ctx;
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -716,7 +748,8 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
||||
{
|
||||
struct userfaultfd_ctx *ctx;
|
||||
|
||||
ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
ctx = rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&vma->vm_mm->mmap_lock));
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
@ -727,7 +760,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
|
||||
WRITE_ONCE(ctx->mmap_changing, true);
|
||||
} else {
|
||||
/* Drop uffd context if remap feature not enabled */
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
|
||||
vma->vm_flags &= ~__VM_UFFD_FLAGS;
|
||||
}
|
||||
}
|
||||
@ -764,7 +797,8 @@ bool userfaultfd_remove(struct vm_area_struct *vma,
|
||||
struct userfaultfd_ctx *ctx;
|
||||
struct userfaultfd_wait_queue ewq;
|
||||
|
||||
ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
ctx = rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&mm->mmap_lock));
|
||||
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
|
||||
return true;
|
||||
|
||||
@ -802,7 +836,9 @@ int userfaultfd_unmap_prep(struct vm_area_struct *vma,
|
||||
{
|
||||
for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
|
||||
struct userfaultfd_unmap_ctx *unmap_ctx;
|
||||
struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
|
||||
struct userfaultfd_ctx *ctx =
|
||||
rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&vma->vm_mm->mmap_lock));
|
||||
|
||||
if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
|
||||
has_unmap_ctx(ctx, unmaps, start, end))
|
||||
@ -867,10 +903,13 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||
mmap_write_lock(mm);
|
||||
prev = NULL;
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
struct userfaultfd_ctx *cur_uffd_ctx =
|
||||
rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&mm->mmap_lock));
|
||||
cond_resched();
|
||||
BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
|
||||
BUG_ON(!!cur_uffd_ctx ^
|
||||
!!(vma->vm_flags & __VM_UFFD_FLAGS));
|
||||
if (vma->vm_userfaultfd_ctx.ctx != ctx) {
|
||||
if (cur_uffd_ctx != ctx) {
|
||||
prev = vma;
|
||||
continue;
|
||||
}
|
||||
@ -887,7 +926,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
||||
prev = vma;
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
|
||||
vm_write_end(vma);
|
||||
}
|
||||
mmap_write_unlock(mm);
|
||||
@ -1350,9 +1389,12 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
found = false;
|
||||
basic_ioctls = false;
|
||||
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
|
||||
struct userfaultfd_ctx *cur_uffd_ctx =
|
||||
rcu_dereference_protected(cur->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&mm->mmap_lock));
|
||||
cond_resched();
|
||||
|
||||
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
|
||||
BUG_ON(!!cur_uffd_ctx ^
|
||||
!!(cur->vm_flags & __VM_UFFD_FLAGS));
|
||||
|
||||
/* check not compatible vmas */
|
||||
@ -1395,8 +1437,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
* wouldn't know which one to deliver the userfaults to.
|
||||
*/
|
||||
ret = -EBUSY;
|
||||
if (cur->vm_userfaultfd_ctx.ctx &&
|
||||
cur->vm_userfaultfd_ctx.ctx != ctx)
|
||||
if (cur_uffd_ctx && cur_uffd_ctx != ctx)
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
@ -1414,18 +1455,20 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
|
||||
ret = 0;
|
||||
do {
|
||||
struct userfaultfd_ctx *cur_uffd_ctx =
|
||||
rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&mm->mmap_lock));
|
||||
cond_resched();
|
||||
|
||||
BUG_ON(!vma_can_userfault(vma, vm_flags));
|
||||
BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
|
||||
vma->vm_userfaultfd_ctx.ctx != ctx);
|
||||
BUG_ON(cur_uffd_ctx && cur_uffd_ctx != ctx);
|
||||
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
|
||||
|
||||
/*
|
||||
* Nothing to do: this vma is already registered into this
|
||||
* userfaultfd and with the right tracking mode too.
|
||||
*/
|
||||
if (vma->vm_userfaultfd_ctx.ctx == ctx &&
|
||||
if (cur_uffd_ctx == ctx &&
|
||||
(vma->vm_flags & vm_flags) == vm_flags)
|
||||
goto skip;
|
||||
|
||||
@ -1461,7 +1504,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
*/
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx.ctx = ctx;
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx);
|
||||
vm_write_end(vma);
|
||||
|
||||
if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
|
||||
@ -1561,7 +1604,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
|
||||
cond_resched();
|
||||
|
||||
BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
|
||||
BUG_ON(!!rcu_access_pointer(cur->vm_userfaultfd_ctx.ctx) ^
|
||||
!!(cur->vm_flags & __VM_UFFD_FLAGS));
|
||||
|
||||
/*
|
||||
@ -1583,6 +1626,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
|
||||
ret = 0;
|
||||
do {
|
||||
struct userfaultfd_ctx *cur_uffd_ctx =
|
||||
rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx,
|
||||
lockdep_is_held(&mm->mmap_lock));
|
||||
cond_resched();
|
||||
|
||||
BUG_ON(!vma_can_userfault(vma, vma->vm_flags));
|
||||
@ -1591,7 +1637,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
* Nothing to do: this vma is already registered into this
|
||||
* userfaultfd and with the right tracking mode too.
|
||||
*/
|
||||
if (!vma->vm_userfaultfd_ctx.ctx)
|
||||
if (!cur_uffd_ctx)
|
||||
goto skip;
|
||||
|
||||
WARN_ON(!(vma->vm_flags & VM_MAYWRITE));
|
||||
@ -1610,7 +1656,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
struct userfaultfd_wake_range range;
|
||||
range.start = start;
|
||||
range.len = vma_end - start;
|
||||
wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
|
||||
wake_userfault(cur_uffd_ctx, &range);
|
||||
}
|
||||
|
||||
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
|
||||
@ -1641,7 +1687,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
*/
|
||||
vm_write_begin(vma);
|
||||
WRITE_ONCE(vma->vm_flags, new_flags);
|
||||
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
|
||||
rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL);
|
||||
vm_write_end(vma);
|
||||
|
||||
skip:
|
||||
@ -1723,7 +1769,9 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
|
||||
ret = -EINVAL;
|
||||
if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
|
||||
goto out;
|
||||
if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|UFFDIO_COPY_MODE_WP))
|
||||
if (uffdio_copy.mode & ~(UFFDIO_COPY_MODE_DONTWAKE|
|
||||
UFFDIO_COPY_MODE_WP|
|
||||
UFFDIO_COPY_MODE_MMAP_TRYLOCK))
|
||||
goto out;
|
||||
if (mmget_not_zero(ctx->mm)) {
|
||||
ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
|
||||
@ -1774,13 +1822,14 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
|
||||
if (ret)
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
|
||||
if (uffdio_zeropage.mode & ~(UFFDIO_ZEROPAGE_MODE_DONTWAKE|
|
||||
UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK))
|
||||
goto out;
|
||||
|
||||
if (mmget_not_zero(ctx->mm)) {
|
||||
ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
|
||||
uffdio_zeropage.range.len,
|
||||
&ctx->mmap_changing);
|
||||
&ctx->mmap_changing, uffdio_zeropage.mode);
|
||||
mmput(ctx->mm);
|
||||
} else {
|
||||
return -ESRCH;
|
||||
|
@ -292,7 +292,7 @@ struct vm_region {
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) { NULL, })
|
||||
struct vm_userfaultfd_ctx {
|
||||
struct userfaultfd_ctx *ctx;
|
||||
struct userfaultfd_ctx __rcu *ctx;
|
||||
};
|
||||
#else /* CONFIG_USERFAULTFD */
|
||||
#define NULL_VM_UFFD_CTX ((struct vm_userfaultfd_ctx) {})
|
||||
|
@ -33,9 +33,15 @@
|
||||
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
|
||||
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
|
||||
|
||||
static_assert(UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK == UFFDIO_COPY_MODE_MMAP_TRYLOCK);
|
||||
#define UFFDIO_MODE_MMAP_TRYLOCK UFFDIO_COPY_MODE_MMAP_TRYLOCK
|
||||
|
||||
extern int sysctl_unprivileged_userfaultfd;
|
||||
|
||||
extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
extern bool userfaultfd_using_sigbus(struct vm_area_struct *vma);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The mode of operation for __mcopy_atomic and its helpers.
|
||||
@ -62,9 +68,8 @@ extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
|
||||
unsigned long src_start, unsigned long len,
|
||||
bool *mmap_changing, __u64 mode);
|
||||
extern ssize_t mfill_zeropage(struct mm_struct *dst_mm,
|
||||
unsigned long dst_start,
|
||||
unsigned long len,
|
||||
bool *mmap_changing);
|
||||
unsigned long dst_start, unsigned long len,
|
||||
bool *mmap_changing, __u64 mode);
|
||||
extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start,
|
||||
unsigned long len, bool *mmap_changing);
|
||||
extern int mwriteprotect_range(struct mm_struct *dst_mm,
|
||||
@ -75,7 +80,7 @@ extern int mwriteprotect_range(struct mm_struct *dst_mm,
|
||||
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
|
||||
struct vm_userfaultfd_ctx vm_ctx)
|
||||
{
|
||||
return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
|
||||
return rcu_access_pointer(vma->vm_userfaultfd_ctx.ctx) == vm_ctx.ctx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -154,6 +159,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
|
||||
static inline bool userfaultfd_using_sigbus(struct vm_area_struct *vma)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
|
||||
struct vm_userfaultfd_ctx vm_ctx)
|
||||
{
|
||||
|
@ -140,6 +140,10 @@ DECLARE_HOOK(android_vh_binder_free_buf,
|
||||
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
|
||||
struct binder_buffer *buffer),
|
||||
TP_ARGS(proc, thread, buffer));
|
||||
DECLARE_HOOK(android_vh_binder_buffer_release,
|
||||
TP_PROTO(struct binder_proc *proc, struct binder_thread *thread,
|
||||
struct binder_buffer *buffer, bool has_transaction),
|
||||
TP_ARGS(proc, thread, buffer, has_transaction));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_BINDER_H */
|
||||
|
33
include/trace/hooks/blk_mq.h
Normal file
33
include/trace/hooks/blk_mq.h
Normal file
@ -0,0 +1,33 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM blk_mq
|
||||
|
||||
#define TRACE_INCLUDE_PATH trace/hooks
|
||||
|
||||
#if !defined(_TRACE_HOOK_BLK_MQ_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_HOOK_BLK_MQ_H
|
||||
|
||||
#include <trace/hooks/vendor_hooks.h>
|
||||
|
||||
struct blk_mq_tag_set;
|
||||
struct blk_mq_hw_ctx;
|
||||
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_all_tag_iter,
|
||||
TP_PROTO(bool *skip, struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
|
||||
void *priv),
|
||||
TP_ARGS(skip, tags, fn, priv));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_queue_tag_busy_iter,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, busy_iter_fn * fn,
|
||||
void *priv),
|
||||
TP_ARGS(skip, hctx, fn, priv));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_free_tags,
|
||||
TP_PROTO(bool *skip, struct blk_mq_tags *tags),
|
||||
TP_ARGS(skip, tags));
|
||||
|
||||
#endif /* _TRACE_HOOK_BLK_MQ_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -14,14 +14,20 @@
|
||||
struct blk_mq_tags;
|
||||
struct blk_mq_alloc_data;
|
||||
struct blk_mq_tag_set;
|
||||
struct blk_mq_hw_ctx;
|
||||
#else
|
||||
/* struct blk_mq_tags */
|
||||
#include <../block/blk-mq-tag.h>
|
||||
/* struct blk_mq_alloc_data */
|
||||
#include <../block/blk-mq.h>
|
||||
/* struct blk_mq_tag_set */
|
||||
/* struct blk_mq_tag_set struct blk_mq_hw_ctx*/
|
||||
#include <linux/blk-mq.h>
|
||||
#endif /* __GENKSYMS__ */
|
||||
struct bio;
|
||||
struct request_queue;
|
||||
struct request;
|
||||
struct blk_plug;
|
||||
struct blk_flush_queue;
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_alloc_rqs,
|
||||
TP_PROTO(size_t *rq_size, struct blk_mq_tag_set *set,
|
||||
@ -33,6 +39,84 @@ DECLARE_HOOK(android_vh_blk_rq_ctx_init,
|
||||
struct blk_mq_alloc_data *data, u64 alloc_time_ns),
|
||||
TP_ARGS(rq, tags, data, alloc_time_ns));
|
||||
|
||||
DECLARE_HOOK(android_vh_bio_free,
|
||||
TP_PROTO(struct bio *bio),
|
||||
TP_ARGS(bio));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_internal_blk_mq_alloc_request,
|
||||
TP_PROTO(bool *skip, int *tag, struct blk_mq_alloc_data *data),
|
||||
TP_ARGS(skip, tag, data), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_internal_blk_mq_free_request,
|
||||
TP_PROTO(bool *skip, struct request *rq, struct blk_mq_hw_ctx *hctx),
|
||||
TP_ARGS(skip, rq, hctx));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_complete_request,
|
||||
TP_PROTO(bool *skip, struct request *rq),
|
||||
TP_ARGS(skip, rq));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_add_to_requeue_list,
|
||||
TP_PROTO(bool *skip, struct request *rq, bool kick_requeue_list),
|
||||
TP_ARGS(skip, rq, kick_requeue_list));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_get_driver_tag,
|
||||
TP_PROTO(struct request *rq),
|
||||
TP_ARGS(rq));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_delay_run_hw_queue,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, bool async),
|
||||
TP_ARGS(skip, hctx, async), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_run_hw_queue,
|
||||
TP_PROTO(bool *need_run, struct blk_mq_hw_ctx *hctx),
|
||||
TP_ARGS(need_run, hctx));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_insert_request,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx, struct request *rq),
|
||||
TP_ARGS(skip, hctx, rq));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_alloc_rq_map,
|
||||
TP_PROTO(bool *skip, struct blk_mq_tags **tags,
|
||||
struct blk_mq_tag_set *set, int node, unsigned int flags),
|
||||
TP_ARGS(skip, tags, set, node, flags), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_hctx_notify_dead,
|
||||
TP_PROTO(bool *skip, struct blk_mq_hw_ctx *hctx),
|
||||
TP_ARGS(skip, hctx));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_mq_init_allocated_queue,
|
||||
TP_PROTO(struct request_queue *q),
|
||||
TP_ARGS(q), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_exit_queue,
|
||||
TP_PROTO(struct request_queue *q),
|
||||
TP_ARGS(q));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_alloc_tag_set,
|
||||
TP_PROTO(struct blk_mq_tag_set *set),
|
||||
TP_ARGS(set));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_update_nr_requests,
|
||||
TP_PROTO(bool *skip, struct request_queue *q),
|
||||
TP_ARGS(skip, q));
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_allocated_queue_init,
|
||||
TP_PROTO(bool *skip, struct request_queue *q),
|
||||
TP_ARGS(skip, q), 1);
|
||||
|
||||
DECLARE_RESTRICTED_HOOK(android_rvh_blk_flush_plug_list,
|
||||
TP_PROTO(struct blk_plug *plug, bool from_schedule),
|
||||
TP_ARGS(plug, from_schedule), 1);
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_alloc_flush_queue,
|
||||
TP_PROTO(bool *skip, int cmd_size, int flags, int node,
|
||||
struct blk_flush_queue *fq),
|
||||
TP_ARGS(skip, cmd_size, flags, node, fq));
|
||||
|
||||
DECLARE_HOOK(android_vh_blk_mq_sched_insert_request,
|
||||
TP_PROTO(bool *skip, struct request *rq),
|
||||
TP_ARGS(skip, rq));
|
||||
|
||||
#endif /* _TRACE_HOOK_BLOCK_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -104,7 +104,9 @@ DECLARE_HOOK(android_vh_percpu_rwsem_wq_add,
|
||||
TP_PROTO(struct percpu_rw_semaphore *sem, bool reader),
|
||||
TP_ARGS(sem, reader));
|
||||
|
||||
|
||||
DECLARE_HOOK(android_vh_exit_check,
|
||||
TP_PROTO(struct task_struct *tsk, long code, int group_dead),
|
||||
TP_ARGS(tsk, code, group_dead));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_DTASK_H */
|
||||
|
@ -40,6 +40,11 @@ DECLARE_HOOK(android_vh_handle_tlb_conf,
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
DECLARE_HOOK(android_vh_try_fixup_sea,
|
||||
TP_PROTO(unsigned long addr, unsigned long esr, struct pt_regs *regs,
|
||||
bool *can_fixup),
|
||||
TP_ARGS(addr, esr, regs, can_fixup));
|
||||
|
||||
#endif /* _TRACE_HOOK_FAULT_H */
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
||||
|
@ -237,6 +237,7 @@ struct uffdio_copy {
|
||||
* according to the uffdio_register.ioctls.
|
||||
*/
|
||||
#define UFFDIO_COPY_MODE_WP ((__u64)1<<1)
|
||||
#define UFFDIO_COPY_MODE_MMAP_TRYLOCK ((__u64)1<<63)
|
||||
__u64 mode;
|
||||
|
||||
/*
|
||||
@ -249,6 +250,7 @@ struct uffdio_copy {
|
||||
struct uffdio_zeropage {
|
||||
struct uffdio_range range;
|
||||
#define UFFDIO_ZEROPAGE_MODE_DONTWAKE ((__u64)1<<0)
|
||||
#define UFFDIO_ZEROPAGE_MODE_MMAP_TRYLOCK ((__u64)1<<63)
|
||||
__u64 mode;
|
||||
|
||||
/*
|
||||
|
@ -70,6 +70,7 @@
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#include <trace/hooks/dtask.h>
|
||||
|
||||
/*
|
||||
* The default value should be high enough to not crash a system that randomly
|
||||
@ -820,6 +821,7 @@ void __noreturn do_exit(long code)
|
||||
sync_mm_rss(tsk->mm);
|
||||
acct_update_integrals(tsk);
|
||||
group_dead = atomic_dec_and_test(&tsk->signal->live);
|
||||
trace_android_vh_exit_check(current, code, group_dead);
|
||||
if (group_dead) {
|
||||
/*
|
||||
* If the last thread of global init has exited, panic
|
||||
|
@ -65,6 +65,7 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
|
||||
* task_work_cancel_match - cancel a pending work added by task_work_add()
|
||||
* @task: the task which should execute the work
|
||||
* @match: match function to call
|
||||
* @data: data to be passed in to match function
|
||||
*
|
||||
* RETURNS:
|
||||
* The found work or NULL if not found.
|
||||
|
@ -152,6 +152,8 @@ static void damon_do_test_apply_three_regions(struct kunit *test,
|
||||
KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]);
|
||||
KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]);
|
||||
}
|
||||
|
||||
damon_destroy_target(t);
|
||||
}
|
||||
|
||||
/*
|
||||
|
34
mm/memory.c
34
mm/memory.c
@ -5040,6 +5040,7 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
pud_t pudval;
|
||||
int seq;
|
||||
vm_fault_t ret;
|
||||
bool uffd_missing_sigbus = false;
|
||||
|
||||
/* Clear flags that may lead to release the mmap_sem to retry */
|
||||
flags &= ~(FAULT_FLAG_ALLOW_RETRY|FAULT_FLAG_KILLABLE);
|
||||
@ -5052,20 +5053,31 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
|
||||
if (!vmf_allows_speculation(&vmf))
|
||||
return VM_FAULT_RETRY;
|
||||
|
||||
vmf.vma_flags = READ_ONCE(vmf.vma->vm_flags);
|
||||
vmf.vma_page_prot = READ_ONCE(vmf.vma->vm_page_prot);
|
||||
|
||||
#ifdef CONFIG_USERFAULTFD
|
||||
/* Can't call userland page fault handler in the speculative path */
|
||||
/*
|
||||
* Only support SPF for SIGBUS+MISSING userfaults in private anonymous
|
||||
* VMAs. Rest all should be retried with mmap_lock.
|
||||
*/
|
||||
if (unlikely(vmf.vma_flags & __VM_UFFD_FLAGS)) {
|
||||
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
|
||||
return VM_FAULT_RETRY;
|
||||
uffd_missing_sigbus = vma_is_anonymous(vmf.vma) &&
|
||||
(vmf.vma_flags & VM_UFFD_MISSING) &&
|
||||
userfaultfd_using_sigbus(vmf.vma);
|
||||
if (!uffd_missing_sigbus) {
|
||||
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
|
||||
return VM_FAULT_RETRY;
|
||||
}
|
||||
/* Not having anon_vma implies that the PTE is missing */
|
||||
if (!vmf.vma->anon_vma)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!vmf_allows_speculation(&vmf))
|
||||
return VM_FAULT_RETRY;
|
||||
|
||||
if (vmf.vma_flags & VM_GROWSDOWN || vmf.vma_flags & VM_GROWSUP) {
|
||||
/*
|
||||
* This could be detected by the check address against VMA's
|
||||
@ -5183,6 +5195,9 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
|
||||
local_irq_enable();
|
||||
|
||||
if (!vmf.pte && uffd_missing_sigbus)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
/*
|
||||
* We need to re-validate the VMA after checking the bounds, otherwise
|
||||
* we might have a false positive on the bounds.
|
||||
@ -5216,7 +5231,12 @@ static vm_fault_t ___handle_speculative_fault(struct mm_struct *mm,
|
||||
out_walk:
|
||||
trace_spf_vma_notsup(_RET_IP_, vmf.vma, address);
|
||||
local_irq_enable();
|
||||
return VM_FAULT_RETRY;
|
||||
/*
|
||||
* Failing page-table walk is similar to page-missing so give an
|
||||
* opportunity to SIGBUS+MISSING userfault to handle it before retrying
|
||||
* with mmap_lock
|
||||
*/
|
||||
return uffd_missing_sigbus ? VM_FAULT_SIGBUS : VM_FAULT_RETRY;
|
||||
|
||||
out_segv:
|
||||
trace_spf_vma_access(_RET_IP_, vmf.vma, address);
|
||||
|
@ -217,7 +217,7 @@ static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
|
||||
* If we have the only reference, swap the refcount to -1. This
|
||||
* will prevent other concurrent references by get_vma() for SPFs.
|
||||
*/
|
||||
return atomic_cmpxchg(&vma->vm_ref_count, 1, -1) == 1;
|
||||
return atomic_cmpxchg_acquire(&vma->vm_ref_count, 1, -1) == 1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -225,12 +225,13 @@ static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
|
||||
*/
|
||||
static inline void unlock_vma_ref_count(struct vm_area_struct *vma)
|
||||
{
|
||||
int old = atomic_xchg_release(&vma->vm_ref_count, 1);
|
||||
|
||||
/*
|
||||
* This should only be called after a corresponding,
|
||||
* successful trylock_vma_ref_count().
|
||||
*/
|
||||
VM_BUG_ON_VMA(atomic_cmpxchg(&vma->vm_ref_count, -1, 1) != -1,
|
||||
vma);
|
||||
VM_BUG_ON_VMA(old != -1, vma);
|
||||
}
|
||||
#else /* !CONFIG_SPECULATIVE_PAGE_FAULT */
|
||||
static inline bool trylock_vma_ref_count(struct vm_area_struct *vma)
|
||||
|
@ -42,7 +42,7 @@ struct vm_area_struct *find_dst_vma(struct mm_struct *dst_mm,
|
||||
* enforce the VM_MAYWRITE check done at uffd registration
|
||||
* time.
|
||||
*/
|
||||
if (!dst_vma->vm_userfaultfd_ctx.ctx)
|
||||
if (!rcu_access_pointer(dst_vma->vm_userfaultfd_ctx.ctx))
|
||||
return NULL;
|
||||
|
||||
return dst_vma;
|
||||
@ -559,14 +559,19 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
|
||||
copied = 0;
|
||||
page = NULL;
|
||||
retry:
|
||||
mmap_read_lock(dst_mm);
|
||||
err = -EAGAIN;
|
||||
if (mode & UFFDIO_MODE_MMAP_TRYLOCK) {
|
||||
if (!mmap_read_trylock(dst_mm))
|
||||
goto out;
|
||||
} else {
|
||||
mmap_read_lock(dst_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* If memory mappings are changing because of non-cooperative
|
||||
* operation (e.g. mremap) running in parallel, bail out and
|
||||
* request the user to retry later
|
||||
*/
|
||||
err = -EAGAIN;
|
||||
if (mmap_changing && READ_ONCE(*mmap_changing))
|
||||
goto out_unlock;
|
||||
|
||||
@ -659,6 +664,15 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
|
||||
if (unlikely(err == -ENOENT)) {
|
||||
void *page_kaddr;
|
||||
|
||||
/*
|
||||
* Return early due to mmap_lock contention only after
|
||||
* some pages are copied to ensure that jank sensitive
|
||||
* threads don't keep retrying for progress-critical
|
||||
* pages.
|
||||
*/
|
||||
if (copied && mmap_lock_is_contended(dst_mm))
|
||||
break;
|
||||
|
||||
mmap_read_unlock(dst_mm);
|
||||
BUG_ON(!page);
|
||||
|
||||
@ -683,6 +697,9 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm,
|
||||
|
||||
if (fatal_signal_pending(current))
|
||||
err = -EINTR;
|
||||
|
||||
if (mmap_lock_is_contended(dst_mm))
|
||||
err = -EAGAIN;
|
||||
}
|
||||
if (err)
|
||||
break;
|
||||
@ -708,10 +725,10 @@ ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
|
||||
}
|
||||
|
||||
ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long start,
|
||||
unsigned long len, bool *mmap_changing)
|
||||
unsigned long len, bool *mmap_changing, __u64 mode)
|
||||
{
|
||||
return __mcopy_atomic(dst_mm, start, 0, len, MCOPY_ATOMIC_ZEROPAGE,
|
||||
mmap_changing, 0);
|
||||
mmap_changing, mode);
|
||||
}
|
||||
|
||||
ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long start,
|
||||
|
@ -237,7 +237,7 @@ static void nft_rbtree_gc_remove(struct net *net, struct nft_set *set,
|
||||
|
||||
static const struct nft_rbtree_elem *
|
||||
nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
|
||||
struct nft_rbtree_elem *rbe, u8 genmask)
|
||||
struct nft_rbtree_elem *rbe)
|
||||
{
|
||||
struct nft_set *set = (struct nft_set *)__set;
|
||||
struct rb_node *prev = rb_prev(&rbe->node);
|
||||
@ -256,7 +256,7 @@ nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv,
|
||||
while (prev) {
|
||||
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||
if (nft_rbtree_interval_end(rbe_prev) &&
|
||||
nft_set_elem_active(&rbe_prev->ext, genmask))
|
||||
nft_set_elem_active(&rbe_prev->ext, NFT_GENMASK_ANY))
|
||||
break;
|
||||
|
||||
prev = rb_prev(prev);
|
||||
@ -367,7 +367,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
||||
nft_set_elem_active(&rbe->ext, cur_genmask)) {
|
||||
const struct nft_rbtree_elem *removed_end;
|
||||
|
||||
removed_end = nft_rbtree_gc_elem(set, priv, rbe, genmask);
|
||||
removed_end = nft_rbtree_gc_elem(set, priv, rbe);
|
||||
if (IS_ERR(removed_end))
|
||||
return PTR_ERR(removed_end);
|
||||
|
||||
|
@ -849,18 +849,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
|
||||
void *data)
|
||||
{
|
||||
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
|
||||
int ret = -ENOTSUPP;
|
||||
|
||||
if (hcp->hcd.ops->hook_plugged_cb) {
|
||||
hcp->jack = jack;
|
||||
ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
|
||||
hcp->hcd.data,
|
||||
plugged_cb,
|
||||
component->dev);
|
||||
if (ret)
|
||||
hcp->jack = NULL;
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
|
||||
@ -944,6 +939,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hdmi_probe(struct snd_soc_component *component)
|
||||
{
|
||||
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
|
||||
int ret = 0;
|
||||
|
||||
if (hcp->hcd.ops->hook_plugged_cb) {
|
||||
ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
|
||||
hcp->hcd.data,
|
||||
plugged_cb,
|
||||
component->dev);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hdmi_remove(struct snd_soc_component *component)
|
||||
{
|
||||
struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
|
||||
@ -954,6 +964,7 @@ static void hdmi_remove(struct snd_soc_component *component)
|
||||
}
|
||||
|
||||
static const struct snd_soc_component_driver hdmi_driver = {
|
||||
.probe = hdmi_probe,
|
||||
.remove = hdmi_remove,
|
||||
.dapm_widgets = hdmi_widgets,
|
||||
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
|
||||
|
7
tools/testing/selftests/damon/config
Normal file
7
tools/testing/selftests/damon/config
Normal file
@ -0,0 +1,7 @@
|
||||
CONFIG_DAMON=y
|
||||
CONFIG_DAMON_SYSFS=y
|
||||
CONFIG_DAMON_DBGFS=y
|
||||
CONFIG_DAMON_PADDR=y
|
||||
CONFIG_DAMON_VADDR=y
|
||||
CONFIG_DAMON_RECLAIM=y
|
||||
CONFIG_DAMON_LRU_SORT=y
|
Loading…
Reference in New Issue
Block a user