Merge branch 'android12-5.10' into branch 'android12-5.10-lts'
Catch up on a number of bugfixes and abi updates in the android12-5.10 branch. This merge contains the following commits:6af2483a2f
BACKPORT: ravb: Fix use-after-free issue in ravb_tx_timeout_work()d43bb6d288
UPSTREAM: ravb: Fix up dma_free_coherent() call in ravb_remove()e6ddd6b656
UPSTREAM: netfilter: ipset: Fix race between IPSET_CMD_CREATE and IPSET_CMD_SWAPe39ee80b1a
UPSTREAM: net: xfrm: Fix xfrm_address_filter OOB read40439d12b8
UPSTREAM: igb: set max size RX buffer when store bad packet is enabled44ddc37b10
UPSTREAM: netfilter: nfnetlink_osf: avoid OOB readede2f9b7e5
ANDROID: abi_gki_aarch64_qcom: Add wait_for_device_probe symboldb2f2cb88d
UPSTREAM: netfilter: xt_sctp: validate the flag_info countcba41df427
UPSTREAM: netfilter: xt_u32: validate user space input4865c22b17
UPSTREAM: net/sched: Retire rsvp classifierf8424e6555
UPSTREAM: ipv4: fix null-deref in ipv4_link_failurecd7744312f
UPSTREAM: netfilter: nf_tables: disallow rule removal from chain binding6cd0cd3893
UPSTREAM: netfilter: nf_tables: report use refcount overflow99d2841c81
UPSTREAM: usb: gadget: ncm: Handle decoding of multiple NTB's in unwrap call3cf9365660
Merge tag 'android12-5.10.189_r00' into android12-5.10785004172f
ANDROID: ABI: Update oplus symbol listd3961f624b
ANDROID: vendor_hooks: Add hooks for oem percpu-rwsem optimaton180ea0f6d4
UPSTREAM: net: sched: sch_qfq: Fix UAF in qfq_dequeue()6b6202b762
UPSTREAM: net/sched: sch_hfsc: Ensure inner classes have fsc curveff86e87c7e
FROMGIT: f2fs: split initial and dynamic conditions for extent_cachee22810eb27
ANDROID: GKI: Update moto symbols list3718ea1e77
ANDROID: vendor_hook: Add hook to skip swapcache909a83a97d
ANDROID: GKI: Update symbols to symbol list6356ed35b9
ANDROID: add vendor hook of add/delete/iterate node for swap_avail_headsbd34b88730
UPSTREAM: tty: n_gsm: fix the UAF caused by race condition in gsm_cleanup_mux7ba85ae103
UPSTREAM: netfilter: nf_tables: prevent OOB access in nft_byteorder_eval9f68d2976a
UPSTREAM: net/sched: sch_qfq: account for stab overhead in qfq_enqueue88525fa7d9
ANDROID: GKI update xiaomi symbols list1a6995cf14
UPSTREAM: af_unix: Fix null-ptr-deref in unix_stream_sendpage().37d958fa88
UPSTREAM: USB: dwc3: fix use-after-free on core driver unbind6388400ba5
UPSTREAM: xhci: Fix incorrect tracking of free space on transfer ringsbc78acae08
UPSTREAM: kfence: avoid passing -g for testb3bad92280
UPSTREAM: coresight: etm4x: Do not access TRCIDR1 for identificationb155bf3086
UPSTREAM: usb: typec: tcpm: fix warning when handle discover_identity message72a7bbb4a3
UPSTREAM: f2fs: fix to update age extent in f2fs_do_zero_range()1e1986dd44
UPSTREAM: f2fs: fix to update age extent correctly during truncation3e9ba88034
BACKPORT: f2fs: fix to do sanity check on extent cache correctlye95427763f
UPSTREAM: net/sched: sch_qfq: refactor parsing of netlink parameters216e24b08c
BACKPORT: f2fs: don't reset unchangable mount option in f2fs_remount()a96eb7c2ca
ANDROID: arm64: dts: qcom: sdm845-db845c: Do not point MDSS to the boot splash memory regionad067abfe1
BACKPORT: net: nfc: Fix use-after-free caused by nfc_llcp_find_local00c7bec87c
ANDROID: ABI: Update oplus symbol list ashmem: is_ashmem_file Export is_ashmem_file function which will be used by the minidump module to get ashmem info.300d1ff660
ANDROID: GKI: Update abi_gki_aarch64_qcom for page_owner symbolsa9c0f62260
ANDROID: mm: Export page_owner_inited and __set_page_ownerd26e92d681
FROMGIT: pstore/ram: Check start of empty przs during init966df87a17
UPSTREAM: exfat: check if filename entries exceeds max filename lengthdcb17f36f3
BACKPORT: FROMGIT: netfilter: nfnetlink_log: always add a timestampa8b58500cb
FROMGIT: arm64: dts: qcom: sdm845-db845c: Mark cont splash memory region as reserved8001debfc1
UPSTREAM: media: usb: siano: Fix warning due to null work_func_t function pointer8e682bb18a
UPSTREAM: Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb14ce45e3c7
ANDROID: ABI: Update oplus symbol list98a66e87c1
ANDROID: Export symbols to do reverse mapping within memcg in kernel modules.8af1bc5622
ANDROID: GKI: export symbols to modify lru statsb019a989a2
UPSTREAM: net: tap_open(): set sk_uid from current_fsuid()169c9f103f
UPSTREAM: net: tun_chr_open(): set sk_uid from current_fsuid()819a8605da
UPSTREAM: netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_IDf2545eebf0
BACKPORT: UPSTREAM: usb: dwc3: gadget: Execute gadget stop after halting the controller6d38ae2f4c
UPSTREAM: usb: dwc3: gadget: Stall and restart EP0 if host is unresponsive6f01e099d8
UPSTREAM: net/sched: cls_route: No longer copy tcf_result on update to avoid use-after-free0ebe76176b
UPSTREAM: net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after-free45edbf4058
UPSTREAM: net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after-freee172f5cfc0
ANDROID: GKI: update xiaomi symbol list28b82089b2
UPSTREAM: netfilter: nf_tables: skip bound chain on rule flushbad8adda41
UPSTREAM: net/sched: cls_fw: Fix improper refcount update leads to use-after-free84f8556570
UPSTREAM: tty: n_gsm: fix UAF in gsm_cleanup_mux0fa8d8cd99
UPSTREAM: netfilter: nft_set_pipapo: fix improper element removal6efa28db9a
BACKPORT: FROMGIT: irqchip/gic-v3: Workaround for GIC-700 erratum 2941627ec3f57af0a
ANDROID: vendor_hook: fix the error record position of mutex028e0fb3a4
FROMGIT: fs: drop_caches: draining pages before dropping caches67f65c7764
ANDROID: GKI: Update symbols to symbol list8e164a95fb
ANDROID: GKI: Update symbols to symbol listc4191be802
ANDROID: GKI: add symbol list file for moto2427afff1e
UPSTREAM: gfs2: Don't deref jdesc in evicte933b7be1c
UPSTREAM: media: dvb-core: Fix kernel WARNING for blocking operation in wait_event*() Change-Id: I7b09cba3db8ae584fd0821bfbecf945e7d0bf9a4 Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
04c980352b
@ -113,6 +113,9 @@ stable kernels.
|
||||
| ARM | MMU-500 | #841119,826419 | N/A |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| ARM | GIC-700 | #2941627 | ARM64_ERRATUM_2941627 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 |
|
||||
+----------------+-----------------+-----------------+-----------------------------+
|
||||
| Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_843419 |
|
||||
|
File diff suppressed because it is too large
Load Diff
6
android/abi_gki_aarch64_moto
Normal file
6
android/abi_gki_aarch64_moto
Normal file
@ -0,0 +1,6 @@
|
||||
[abi_symbol_list]
|
||||
swp_swap_info
|
||||
__traceiter_android_vh_skip_swapcache
|
||||
__traceiter_android_vh_tune_mmap_readaround
|
||||
__tracepoint_android_vh_skip_swapcache
|
||||
__tracepoint_android_vh_tune_mmap_readaround
|
@ -1357,6 +1357,7 @@
|
||||
irq_work_queue
|
||||
irq_work_queue_on
|
||||
irq_work_sync
|
||||
is_ashmem_file
|
||||
is_dma_buf_file
|
||||
isolate_and_split_free_page
|
||||
isolate_anon_lru_page
|
||||
@ -1549,6 +1550,7 @@
|
||||
__memcat_p
|
||||
memcg_kmem_enabled_key
|
||||
mem_cgroup_from_id
|
||||
mem_cgroup_update_lru_size
|
||||
memchr
|
||||
memchr_inv
|
||||
memcmp
|
||||
@ -1616,6 +1618,7 @@
|
||||
mmc_send_tuning
|
||||
mmput
|
||||
mod_delayed_work_on
|
||||
__mod_lruvec_state
|
||||
mod_node_page_state
|
||||
mod_timer
|
||||
mod_timer_pending
|
||||
@ -1815,6 +1818,7 @@
|
||||
__page_mapcount
|
||||
page_mapping
|
||||
__page_pinner_migration_failed
|
||||
page_referenced
|
||||
page_symlink
|
||||
page_to_lruvec
|
||||
panic
|
||||
@ -2253,6 +2257,7 @@
|
||||
rndis_set_param_vendor
|
||||
rndis_signal_connect
|
||||
rndis_uninit
|
||||
root_mem_cgroup
|
||||
root_task_group
|
||||
round_jiffies
|
||||
round_jiffies_relative
|
||||
@ -2879,6 +2884,7 @@
|
||||
__traceiter_android_vh_process_killed
|
||||
__traceiter_android_vh_record_mutex_lock_starttime
|
||||
__traceiter_android_vh_record_pcpu_rwsem_starttime
|
||||
__traceiter_android_vh_percpu_rwsem_wq_add
|
||||
__traceiter_android_vh_record_rtmutex_lock_starttime
|
||||
__traceiter_android_vh_record_rwsem_lock_starttime
|
||||
__traceiter_android_vh_remove_vmalloc_stack
|
||||
@ -3146,6 +3152,7 @@
|
||||
__tracepoint_android_vh_process_killed
|
||||
__tracepoint_android_vh_record_mutex_lock_starttime
|
||||
__tracepoint_android_vh_record_pcpu_rwsem_starttime
|
||||
__tracepoint_android_vh_percpu_rwsem_wq_add
|
||||
__tracepoint_android_vh_record_rtmutex_lock_starttime
|
||||
__tracepoint_android_vh_record_rwsem_lock_starttime
|
||||
__tracepoint_android_vh_remove_vmalloc_stack
|
||||
|
@ -1659,6 +1659,7 @@
|
||||
overflowuid
|
||||
page_endio
|
||||
page_mapping
|
||||
page_owner_inited
|
||||
__page_pinner_migration_failed
|
||||
__pagevec_release
|
||||
panic
|
||||
@ -2187,6 +2188,7 @@
|
||||
set_normalized_timespec64
|
||||
set_page_dirty_lock
|
||||
__SetPageMovable
|
||||
__set_page_owner
|
||||
set_task_cpu
|
||||
set_user_nice
|
||||
sg_alloc_table
|
||||
@ -3041,6 +3043,7 @@
|
||||
wait_for_completion_interruptible_timeout
|
||||
wait_for_completion_killable
|
||||
wait_for_completion_timeout
|
||||
wait_for_device_probe
|
||||
wait_on_page_bit
|
||||
__wait_rcu_gp
|
||||
wait_woken
|
||||
|
@ -9,12 +9,16 @@
|
||||
swap_alloc_cluster
|
||||
swapcache_free_entries
|
||||
swap_type_to_swap_info
|
||||
blkcg_schedule_throttle
|
||||
__traceiter_android_rvh_alloc_si
|
||||
__traceiter_android_rvh_alloc_swap_slot_cache
|
||||
__traceiter_android_rvh_drain_slots_cache_cpu
|
||||
__traceiter_android_rvh_free_swap_slot
|
||||
__traceiter_android_rvh_get_swap_page
|
||||
__traceiter_android_rvh_handle_pte_fault_end
|
||||
__traceiter_android_vh_add_to_avail_list
|
||||
__traceiter_android_vh_del_from_avail_list
|
||||
__traceiter_android_vh___cgroup_throttle_swaprate
|
||||
__traceiter_android_vh_account_swap_pages
|
||||
__traceiter_android_vh_alloc_si
|
||||
__traceiter_android_vh_alloc_swap_slot_cache
|
||||
@ -28,6 +32,7 @@
|
||||
__traceiter_android_vh_get_swap_page
|
||||
__traceiter_android_vh_handle_pte_fault_end
|
||||
__traceiter_android_vh_inactive_is_low
|
||||
__traceiter_android_vh_swap_avail_heads_init
|
||||
__traceiter_android_vh_init_swap_info_struct
|
||||
__traceiter_android_vh_migrate_page_states
|
||||
__traceiter_android_vh_page_isolated_for_reclaim
|
||||
@ -46,6 +51,9 @@
|
||||
__tracepoint_android_rvh_free_swap_slot
|
||||
__tracepoint_android_rvh_get_swap_page
|
||||
__tracepoint_android_rvh_handle_pte_fault_end
|
||||
__tracepoint_android_vh_add_to_avail_list
|
||||
__tracepoint_android_vh_del_from_avail_list
|
||||
__tracepoint_android_vh___cgroup_throttle_swaprate
|
||||
__tracepoint_android_vh_account_swap_pages
|
||||
__tracepoint_android_vh_alloc_si
|
||||
__tracepoint_android_vh_alloc_swap_slot_cache
|
||||
@ -59,6 +67,7 @@
|
||||
__tracepoint_android_vh_get_swap_page
|
||||
__tracepoint_android_vh_handle_pte_fault_end
|
||||
__tracepoint_android_vh_inactive_is_low
|
||||
__tracepoint_android_vh_swap_avail_heads_init
|
||||
__tracepoint_android_vh_init_swap_info_struct
|
||||
__tracepoint_android_vh_migrate_page_states
|
||||
__tracepoint_android_vh_page_isolated_for_reclaim
|
||||
|
@ -62,6 +62,14 @@
|
||||
ufshcd_update_evt_hist
|
||||
utf16s_to_utf8s
|
||||
wait_for_completion_io_timeout
|
||||
nr_free_buffer_pages
|
||||
mmc_set_blocklen
|
||||
|
||||
#required by mq-deadline module
|
||||
blk_mq_debugfs_rq_show
|
||||
seq_list_start
|
||||
seq_list_next
|
||||
__blk_mq_debugfs_rq_show
|
||||
|
||||
#required by cs35l41 module
|
||||
regcache_drop_region
|
||||
|
4
android/abi_gki_aarch64_zebra
Normal file
4
android/abi_gki_aarch64_zebra
Normal file
@ -0,0 +1,4 @@
|
||||
[abi_symbol_list]
|
||||
__traceiter_android_vh_wakeup_bypass
|
||||
__tracepoint_android_vh_wakeup_bypass
|
||||
tty_termios_hw_change
|
@ -48,6 +48,7 @@ static inline u32 read_ ## a64(void) \
|
||||
return read_sysreg(a32); \
|
||||
} \
|
||||
|
||||
CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
|
||||
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
|
||||
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
|
||||
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
|
||||
|
@ -85,6 +85,14 @@ hdmi_con: endpoint {
|
||||
};
|
||||
};
|
||||
|
||||
reserved-memory {
|
||||
/* Cont splash region set up by the bootloader */
|
||||
cont_splash_mem: framebuffer@9d400000 {
|
||||
reg = <0x0 0x9d400000 0x0 0x2400000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
lt9611_1v8: lt9611-vdd18-regulator {
|
||||
compatible = "regulator-fixed";
|
||||
regulator-name = "LT9611_1V8";
|
||||
|
@ -1794,6 +1794,7 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
|
||||
current->use_memdelay = use_memdelay;
|
||||
set_notify_resume(current);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkcg_schedule_throttle);
|
||||
|
||||
/**
|
||||
* blkcg_add_delay - add delay to this blkg
|
||||
|
@ -22,6 +22,7 @@ android/abi_gki_aarch64_hikey960
|
||||
android/abi_gki_aarch64_honor
|
||||
android/abi_gki_aarch64_imx
|
||||
android/abi_gki_aarch64_lenovo
|
||||
android/abi_gki_aarch64_moto
|
||||
android/abi_gki_aarch64_mtk
|
||||
android/abi_gki_aarch64_nothing
|
||||
android/abi_gki_aarch64_oplus
|
||||
@ -31,6 +32,7 @@ android/abi_gki_aarch64_unisoc
|
||||
android/abi_gki_aarch64_virtual_device
|
||||
android/abi_gki_aarch64_vivo
|
||||
android/abi_gki_aarch64_xiaomi
|
||||
android/abi_gki_aarch64_zebra
|
||||
android/abi_gki_aarch64_asus
|
||||
android/abi_gki_aarch64_transsion
|
||||
android/abi_gki_aarch64_tuxera
|
||||
|
@ -106,6 +106,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_process_killed);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_killed_process);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_percpu_rwsem_wq_add);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
|
||||
@ -468,12 +469,16 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_free_swap_slot);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_swap_slot);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_get_swap_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_swap_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_add_to_avail_list);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_del_from_avail_list);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh___cgroup_throttle_swaprate);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_isolated_for_reclaim);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inactive_is_low);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snapshot_refaults);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_account_swap_pages);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unuse_swap_page);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swap_avail_heads_init);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_init_swap_info_struct);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_si_swapinfo);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_alloc_si);
|
||||
@ -492,6 +497,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dma_buf_stats_teardown);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout_abort);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_compact_finished);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wakeup_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_skip_swapcache);
|
||||
/*
|
||||
* For type visibility
|
||||
*/
|
||||
|
@ -960,25 +960,21 @@ static bool etm4_init_iomem_access(struct etmv4_drvdata *drvdata,
|
||||
struct csdev_access *csa)
|
||||
{
|
||||
u32 devarch = readl_relaxed(drvdata->base + TRCDEVARCH);
|
||||
u32 idr1 = readl_relaxed(drvdata->base + TRCIDR1);
|
||||
|
||||
/*
|
||||
* All ETMs must implement TRCDEVARCH to indicate that
|
||||
* the component is an ETMv4. To support any broken
|
||||
* implementations we fall back to TRCIDR1 check, which
|
||||
* is not really reliable.
|
||||
* the component is an ETMv4. Even though TRCIDR1 also
|
||||
* contains the information, it is part of the "Trace"
|
||||
* register and must be accessed with the OSLK cleared,
|
||||
* with MMIO. But we cannot touch the OSLK until we are
|
||||
* sure this is an ETM. So rely only on the TRCDEVARCH.
|
||||
*/
|
||||
if ((devarch & ETM_DEVARCH_ID_MASK) == ETM_DEVARCH_ETMv4x_ARCH) {
|
||||
drvdata->arch = etm_devarch_to_arch(devarch);
|
||||
} else {
|
||||
pr_warn("CPU%d: ETM4x incompatible TRCDEVARCH: %x, falling back to TRCIDR1\n",
|
||||
smp_processor_id(), devarch);
|
||||
|
||||
if (ETM_TRCIDR1_ARCH_MAJOR(idr1) != ETM_TRCIDR1_ARCH_ETMv4)
|
||||
return false;
|
||||
drvdata->arch = etm_trcidr_to_arch(idr1);
|
||||
if ((devarch & ETM_DEVARCH_ID_MASK) != ETM_DEVARCH_ETMv4x_ARCH) {
|
||||
pr_warn_once("TRCDEVARCH doesn't match ETMv4 architecture\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
drvdata->arch = etm_devarch_to_arch(devarch);
|
||||
*csa = CSDEV_ACCESS_IOMEM(drvdata->base);
|
||||
return true;
|
||||
}
|
||||
|
@ -668,14 +668,12 @@
|
||||
* TRCDEVARCH - CoreSight architected register
|
||||
* - Bits[15:12] - Major version
|
||||
* - Bits[19:16] - Minor version
|
||||
* TRCIDR1 - ETM architected register
|
||||
* - Bits[11:8] - Major version
|
||||
* - Bits[7:4] - Minor version
|
||||
* We must rely on TRCDEVARCH for the version information,
|
||||
* however we don't want to break the support for potential
|
||||
* old implementations which might not implement it. Thus
|
||||
* we fall back to TRCIDR1 if TRCDEVARCH is not implemented
|
||||
* for memory mapped components.
|
||||
*
|
||||
* We must rely only on TRCDEVARCH for the version information. Even though,
|
||||
* TRCIDR1 also provides the architecture version, it is a "Trace" register
|
||||
* and as such must be accessed only with Trace power domain ON. This may
|
||||
* not be available at probe time.
|
||||
*
|
||||
* Now to make certain decisions easier based on the version
|
||||
* we use an internal representation of the version in the
|
||||
* driver, as follows :
|
||||
@ -701,12 +699,6 @@ static inline u8 etm_devarch_to_arch(u32 devarch)
|
||||
ETM_DEVARCH_REVISION(devarch));
|
||||
}
|
||||
|
||||
static inline u8 etm_trcidr_to_arch(u32 trcidr1)
|
||||
{
|
||||
return ETM_ARCH_VERSION(ETM_TRCIDR1_ARCH_MAJOR(trcidr1),
|
||||
ETM_TRCIDR1_ARCH_MINOR(trcidr1));
|
||||
}
|
||||
|
||||
enum etm_impdef_type {
|
||||
ETM4_IMPDEF_HISI_CORE_COMMIT,
|
||||
ETM4_IMPDEF_FEATURE_MAX,
|
||||
|
@ -51,6 +51,8 @@ struct redist_region {
|
||||
bool single_redist;
|
||||
};
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum);
|
||||
|
||||
static struct gic_chip_data gic_data __read_mostly;
|
||||
static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
|
||||
|
||||
@ -542,9 +544,39 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
|
||||
gic_irq_set_prio(d, GICD_INT_DEF_PRI);
|
||||
}
|
||||
|
||||
static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
|
||||
{
|
||||
enum gic_intid_range range;
|
||||
|
||||
if (!static_branch_unlikely(&gic_arm64_2941627_erratum))
|
||||
return false;
|
||||
|
||||
range = get_intid_range(d);
|
||||
|
||||
/*
|
||||
* The workaround is needed if the IRQ is an SPI and
|
||||
* the target cpu is different from the one we are
|
||||
* executing on.
|
||||
*/
|
||||
return (range == SPI_RANGE || range == ESPI_RANGE) &&
|
||||
!cpumask_test_cpu(raw_smp_processor_id(),
|
||||
irq_data_get_effective_affinity_mask(d));
|
||||
}
|
||||
|
||||
static void gic_eoi_irq(struct irq_data *d)
|
||||
{
|
||||
gic_write_eoir(gic_irq(d));
|
||||
write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
|
||||
isb();
|
||||
|
||||
if (gic_arm64_erratum_2941627_needed(d)) {
|
||||
/*
|
||||
* Make sure the GIC stream deactivate packet
|
||||
* issued by ICC_EOIR1_EL1 has completed before
|
||||
* deactivating through GICD_IACTIVER.
|
||||
*/
|
||||
dsb(sy);
|
||||
gic_poke_irq(d, GICD_ICACTIVER);
|
||||
}
|
||||
}
|
||||
|
||||
static void gic_eoimode1_eoi_irq(struct irq_data *d)
|
||||
@ -555,7 +587,11 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
|
||||
*/
|
||||
if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
|
||||
return;
|
||||
gic_write_dir(gic_irq(d));
|
||||
|
||||
if (!gic_arm64_erratum_2941627_needed(d))
|
||||
gic_write_dir(gic_irq(d));
|
||||
else
|
||||
gic_poke_irq(d, GICD_ICACTIVER);
|
||||
}
|
||||
|
||||
static int gic_set_type(struct irq_data *d, unsigned int type)
|
||||
@ -1642,6 +1678,12 @@ static bool gic_enable_quirk_hip06_07(void *data)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool gic_enable_quirk_arm64_2941627(void *data)
|
||||
{
|
||||
static_branch_enable(&gic_arm64_2941627_erratum);
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct gic_quirk gic_quirks[] = {
|
||||
{
|
||||
.desc = "GICv3: Qualcomm MSM8996 broken firmware",
|
||||
@ -1678,6 +1720,25 @@ static const struct gic_quirk gic_quirks[] = {
|
||||
.mask = 0xe8f00fff,
|
||||
.init = gic_enable_quirk_cavium_38539,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* GIC-700: 2941627 workaround - IP variant [0,1]
|
||||
*
|
||||
*/
|
||||
.desc = "GICv3: ARM64 erratum 2941627",
|
||||
.iidr = 0x0400043b,
|
||||
.mask = 0xff0e0fff,
|
||||
.init = gic_enable_quirk_arm64_2941627,
|
||||
},
|
||||
{
|
||||
/*
|
||||
* GIC-700: 2941627 workaround - IP variant [2]
|
||||
*/
|
||||
.desc = "GICv3: ARM64 erratum 2941627",
|
||||
.iidr = 0x0402043b,
|
||||
.mask = 0xff0f0fff,
|
||||
.init = gic_enable_quirk_arm64_2941627,
|
||||
},
|
||||
{
|
||||
}
|
||||
};
|
||||
|
@ -2179,8 +2179,10 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
|
||||
|
||||
/* Free up any link layer users and finally the control channel */
|
||||
for (i = NUM_DLCI - 1; i >= 0; i--)
|
||||
if (gsm->dlci[i])
|
||||
if (gsm->dlci[i]) {
|
||||
gsm_dlci_release(gsm->dlci[i]);
|
||||
gsm->dlci[i] = NULL;
|
||||
}
|
||||
mutex_unlock(&gsm->mutex);
|
||||
/* Now wipe the queues */
|
||||
tty_ldisc_flush(gsm->tty);
|
||||
|
@ -1741,6 +1741,11 @@ static int dwc3_remove(struct platform_device *pdev)
|
||||
pm_runtime_allow(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
/*
|
||||
* HACK: Clear the driver data, which is currently accessed by parent
|
||||
* glue drivers, before allowing the parent to suspend.
|
||||
*/
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
pm_runtime_set_suspended(&pdev->dev);
|
||||
|
||||
dwc3_free_event_buffers(dwc);
|
||||
|
@ -2512,29 +2512,17 @@ static int __dwc3_gadget_start(struct dwc3 *dwc);
|
||||
static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
dwc->connected = false;
|
||||
|
||||
/*
|
||||
* Per databook, when we want to stop the gadget, if a control transfer
|
||||
* is still in process, complete it and get the core into setup phase.
|
||||
* Attempt to end pending SETUP status phase, and not wait for the
|
||||
* function to do so.
|
||||
*/
|
||||
if (dwc->ep0state != EP0_SETUP_PHASE) {
|
||||
int ret;
|
||||
|
||||
if (dwc->delayed_status)
|
||||
dwc3_ep0_send_delayed_status(dwc);
|
||||
|
||||
reinit_completion(&dwc->ep0_in_setup);
|
||||
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
|
||||
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
if (ret == 0)
|
||||
dev_warn(dwc->dev, "timed out waiting for SETUP phase\n");
|
||||
}
|
||||
if (dwc->delayed_status)
|
||||
dwc3_ep0_send_delayed_status(dwc);
|
||||
|
||||
/*
|
||||
* In the Synopsys DesignWare Cores USB3 Databook Rev. 3.30a
|
||||
@ -2544,9 +2532,35 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
|
||||
* bit.
|
||||
*/
|
||||
dwc3_stop_active_transfers(dwc);
|
||||
__dwc3_gadget_stop(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
/*
|
||||
* Per databook, when we want to stop the gadget, if a control transfer
|
||||
* is still in process, complete it and get the core into setup phase.
|
||||
* In case the host is unresponsive to a SETUP transaction, forcefully
|
||||
* stall the transfer, and move back to the SETUP phase, so that any
|
||||
* pending endxfers can be executed.
|
||||
*/
|
||||
if (dwc->ep0state != EP0_SETUP_PHASE) {
|
||||
reinit_completion(&dwc->ep0_in_setup);
|
||||
|
||||
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
|
||||
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
|
||||
if (ret == 0) {
|
||||
unsigned int dir;
|
||||
|
||||
dev_warn(dwc->dev, "wait for SETUP phase timed out\n");
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
dir = !!dwc->ep0_expect_in;
|
||||
if (dwc->ep0state == EP0_DATA_PHASE)
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[dir]);
|
||||
else
|
||||
dwc3_ep0_end_control_data(dwc, dwc->eps[!dir]);
|
||||
dwc3_ep0_stall_and_restart(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: if the GEVNTCOUNT indicates events in the event buffer, the
|
||||
* driver needs to acknowledge them before the controller can halt.
|
||||
@ -2554,7 +2568,19 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc)
|
||||
* remaining event generated by the controller while polling for
|
||||
* DSTS.DEVCTLHLT.
|
||||
*/
|
||||
return dwc3_gadget_run_stop(dwc, false, false);
|
||||
ret = dwc3_gadget_run_stop(dwc, false, false);
|
||||
|
||||
/*
|
||||
* Stop the gadget after controller is halted, so that if needed, the
|
||||
* events to update EP0 state can still occur while the run/stop
|
||||
* routine polls for the halted state. DEVTEN is cleared as part of
|
||||
* gadget stop.
|
||||
*/
|
||||
spin_lock_irqsave(&dwc->lock, flags);
|
||||
__dwc3_gadget_stop(dwc);
|
||||
spin_unlock_irqrestore(&dwc->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
|
||||
|
@ -277,6 +277,26 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
||||
trace_xhci_inc_enq(ring);
|
||||
}
|
||||
|
||||
static int xhci_num_trbs_to(struct xhci_segment *start_seg, union xhci_trb *start,
|
||||
struct xhci_segment *end_seg, union xhci_trb *end,
|
||||
unsigned int num_segs)
|
||||
{
|
||||
union xhci_trb *last_on_seg;
|
||||
int num = 0;
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
if (start_seg == end_seg && end >= start)
|
||||
return num + (end - start);
|
||||
last_on_seg = &start_seg->trbs[TRBS_PER_SEGMENT - 1];
|
||||
num += last_on_seg - start;
|
||||
start_seg = start_seg->next;
|
||||
start = start_seg->trbs;
|
||||
} while (i++ <= num_segs);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check to see if there's room to enqueue num_trbs on the ring and make sure
|
||||
* enqueue pointer will not advance into dequeue segment. See rules above.
|
||||
@ -2209,6 +2229,7 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
|
||||
u32 trb_comp_code)
|
||||
{
|
||||
struct xhci_ep_ctx *ep_ctx;
|
||||
int trbs_freed;
|
||||
|
||||
ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
|
||||
|
||||
@ -2280,9 +2301,15 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep,
|
||||
}
|
||||
|
||||
/* Update ring dequeue pointer */
|
||||
trbs_freed = xhci_num_trbs_to(ep_ring->deq_seg, ep_ring->dequeue,
|
||||
td->last_trb_seg, td->last_trb,
|
||||
ep_ring->num_segs);
|
||||
if (trbs_freed < 0)
|
||||
xhci_dbg(xhci, "Failed to count freed trbs at TD finish\n");
|
||||
else
|
||||
ep_ring->num_trbs_free += trbs_freed;
|
||||
ep_ring->dequeue = td->last_trb;
|
||||
ep_ring->deq_seg = td->last_trb_seg;
|
||||
ep_ring->num_trbs_free += td->num_trbs - 1;
|
||||
inc_deq(xhci, ep_ring);
|
||||
|
||||
return xhci_td_cleanup(xhci, td, ep_ring, td->status);
|
||||
|
@ -1444,10 +1444,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
|
||||
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
|
||||
const u32 *data, int cnt)
|
||||
{
|
||||
u32 vdo_hdr = port->vdo_data[0];
|
||||
|
||||
WARN_ON(!mutex_is_locked(&port->lock));
|
||||
|
||||
/* Make sure we are not still processing a previous VDM packet */
|
||||
WARN_ON(port->vdm_state > VDM_STATE_DONE);
|
||||
/* If is sending discover_identity, handle received message first */
|
||||
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
|
||||
port->send_discover = true;
|
||||
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
|
||||
} else {
|
||||
/* Make sure we are not still processing a previous VDM packet */
|
||||
WARN_ON(port->vdm_state > VDM_STATE_DONE);
|
||||
}
|
||||
|
||||
port->vdo_count = cnt + 1;
|
||||
port->vdo_data[0] = header;
|
||||
@ -1965,11 +1973,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
|
||||
switch (PD_VDO_CMD(vdo_hdr)) {
|
||||
case CMD_DISCOVER_IDENT:
|
||||
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
|
||||
if (res == 0)
|
||||
if (res == 0) {
|
||||
port->send_discover = false;
|
||||
else if (res == -EAGAIN)
|
||||
} else if (res == -EAGAIN) {
|
||||
port->vdo_data[0] = 0;
|
||||
mod_send_discover_delayed_work(port,
|
||||
SEND_DISCOVER_RETRY_MS);
|
||||
}
|
||||
break;
|
||||
case CMD_DISCOVER_SVID:
|
||||
res = tcpm_ams_start(port, DISCOVER_SVIDS);
|
||||
@ -2052,6 +2062,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
|
||||
unsigned long timeout;
|
||||
|
||||
port->vdm_retries = 0;
|
||||
port->vdo_data[0] = 0;
|
||||
port->vdm_state = VDM_STATE_BUSY;
|
||||
timeout = vdm_ready_timeout(vdo_hdr);
|
||||
mod_vdm_delayed_work(port, timeout);
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/swap.h>
|
||||
#include "internal.h"
|
||||
|
||||
/* A global variable is a bit ugly, but it keeps the code simple */
|
||||
@ -58,6 +59,7 @@ int drop_caches_sysctl_handler(struct ctl_table *table, int write,
|
||||
static int stfu;
|
||||
|
||||
if (sysctl_drop_caches & 1) {
|
||||
lru_add_drain_all();
|
||||
iterate_supers(drop_pagecache_sb, NULL);
|
||||
count_vm_event(DROP_PAGECACHE);
|
||||
}
|
||||
|
@ -41,41 +41,14 @@ static void __set_extent_info(struct extent_info *ei,
|
||||
}
|
||||
}
|
||||
|
||||
static bool __may_read_extent_tree(struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
|
||||
if (!test_opt(sbi, READ_EXTENT_CACHE))
|
||||
return false;
|
||||
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
||||
return false;
|
||||
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
|
||||
!f2fs_sb_has_readonly(sbi))
|
||||
return false;
|
||||
return S_ISREG(inode->i_mode);
|
||||
}
|
||||
|
||||
static bool __may_age_extent_tree(struct inode *inode)
|
||||
{
|
||||
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
|
||||
|
||||
if (!test_opt(sbi, AGE_EXTENT_CACHE))
|
||||
return false;
|
||||
/* don't cache block age info for cold file */
|
||||
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
|
||||
return false;
|
||||
if (file_is_cold(inode))
|
||||
return false;
|
||||
|
||||
return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
|
||||
}
|
||||
|
||||
static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
|
||||
{
|
||||
if (type == EX_READ)
|
||||
return __may_read_extent_tree(inode);
|
||||
else if (type == EX_BLOCK_AGE)
|
||||
return __may_age_extent_tree(inode);
|
||||
return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
|
||||
S_ISREG(inode->i_mode);
|
||||
if (type == EX_BLOCK_AGE)
|
||||
return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
|
||||
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -88,7 +61,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
|
||||
if (list_empty(&F2FS_I_SB(inode)->s_list))
|
||||
return false;
|
||||
|
||||
return __init_may_extent_tree(inode, type);
|
||||
if (!__init_may_extent_tree(inode, type))
|
||||
return false;
|
||||
|
||||
if (type == EX_READ) {
|
||||
if (is_inode_flag_set(inode, FI_NO_EXTENT))
|
||||
return false;
|
||||
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
|
||||
!f2fs_sb_has_readonly(F2FS_I_SB(inode)))
|
||||
return false;
|
||||
} else if (type == EX_BLOCK_AGE) {
|
||||
if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
|
||||
return false;
|
||||
if (file_is_cold(inode))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void __try_update_largest_extent(struct extent_tree *et,
|
||||
|
@ -608,7 +608,7 @@ void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
|
||||
fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
|
||||
dn->inode) + ofs;
|
||||
f2fs_update_read_extent_cache_range(dn, fofs, 0, len);
|
||||
f2fs_update_age_extent_cache_range(dn, fofs, nr_free);
|
||||
f2fs_update_age_extent_cache_range(dn, fofs, len);
|
||||
dec_valid_block_count(sbi, dn->inode, nr_free);
|
||||
}
|
||||
dn->ofs_in_node = ofs;
|
||||
@ -1432,6 +1432,7 @@ static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
|
||||
}
|
||||
|
||||
f2fs_update_read_extent_cache_range(dn, start, 0, index - start);
|
||||
f2fs_update_age_extent_cache_range(dn, start, index - start);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -401,11 +401,6 @@ static int do_read_inode(struct inode *inode)
|
||||
fi->i_inline_xattr_size = 0;
|
||||
}
|
||||
|
||||
if (!sanity_check_inode(inode, node_page)) {
|
||||
f2fs_put_page(node_page, 1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
/* check data exist */
|
||||
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
|
||||
__recover_inline_status(inode, node_page);
|
||||
@ -472,6 +467,11 @@ static int do_read_inode(struct inode *inode)
|
||||
f2fs_init_read_extent_tree(inode, node_page);
|
||||
f2fs_init_age_extent_tree(inode);
|
||||
|
||||
if (!sanity_check_inode(inode, node_page)) {
|
||||
f2fs_put_page(node_page, 1);
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
f2fs_put_page(node_page, 1);
|
||||
|
||||
stat_inc_inline_xattr(inode);
|
||||
|
@ -1927,9 +1927,17 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void default_options(struct f2fs_sb_info *sbi)
|
||||
static void default_options(struct f2fs_sb_info *sbi, bool remount)
|
||||
{
|
||||
/* init some FS parameters */
|
||||
if (!remount) {
|
||||
set_opt(sbi, READ_EXTENT_CACHE);
|
||||
clear_opt(sbi, DISABLE_CHECKPOINT);
|
||||
|
||||
if (f2fs_hw_support_discard(sbi) || f2fs_hw_should_discard(sbi))
|
||||
set_opt(sbi, DISCARD);
|
||||
}
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
|
||||
else
|
||||
@ -1953,14 +1961,11 @@ static void default_options(struct f2fs_sb_info *sbi)
|
||||
set_opt(sbi, INLINE_XATTR);
|
||||
set_opt(sbi, INLINE_DATA);
|
||||
set_opt(sbi, INLINE_DENTRY);
|
||||
set_opt(sbi, READ_EXTENT_CACHE);
|
||||
set_opt(sbi, NOHEAP);
|
||||
clear_opt(sbi, DISABLE_CHECKPOINT);
|
||||
set_opt(sbi, MERGE_CHECKPOINT);
|
||||
F2FS_OPTION(sbi).unusable_cap = 0;
|
||||
sbi->sb->s_flags |= SB_LAZYTIME;
|
||||
set_opt(sbi, FLUSH_MERGE);
|
||||
set_opt(sbi, DISCARD);
|
||||
if (f2fs_sb_has_blkzoned(sbi))
|
||||
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
|
||||
else
|
||||
@ -2124,7 +2129,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
|
||||
}
|
||||
|
||||
default_options(sbi);
|
||||
default_options(sbi, true);
|
||||
|
||||
/* parse mount options */
|
||||
err = parse_options(sb, data, true);
|
||||
@ -3891,7 +3896,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
|
||||
sizeof(raw_super->uuid));
|
||||
|
||||
default_options(sbi);
|
||||
default_options(sbi, false);
|
||||
/* parse mount options */
|
||||
options = kstrdup((const char *)data, GFP_KERNEL);
|
||||
if (data && !options) {
|
||||
|
@ -99,6 +99,12 @@ DECLARE_HOOK(android_vh_record_pcpu_rwsem_starttime,
|
||||
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
|
||||
TP_ARGS(tsk, settime_jiffies));
|
||||
|
||||
struct percpu_rw_semaphore;
|
||||
DECLARE_HOOK(android_vh_percpu_rwsem_wq_add,
|
||||
TP_PROTO(struct percpu_rw_semaphore *sem, bool reader),
|
||||
TP_ARGS(sem, reader));
|
||||
|
||||
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_DTASK_H */
|
||||
|
@ -287,6 +287,15 @@ DECLARE_HOOK(android_vh_get_swap_page,
|
||||
TP_PROTO(struct page *page, swp_entry_t *entry,
|
||||
struct swap_slots_cache *cache, bool *found),
|
||||
TP_ARGS(page, entry, cache, found));
|
||||
DECLARE_HOOK(android_vh_add_to_avail_list,
|
||||
TP_PROTO(struct swap_info_struct *p, bool *skip),
|
||||
TP_ARGS(p, skip));
|
||||
DECLARE_HOOK(android_vh_del_from_avail_list,
|
||||
TP_PROTO(struct swap_info_struct *p, bool *skip),
|
||||
TP_ARGS(p, skip));
|
||||
DECLARE_HOOK(android_vh___cgroup_throttle_swaprate,
|
||||
TP_PROTO(int nid, bool *skip),
|
||||
TP_ARGS(nid, skip));
|
||||
DECLARE_HOOK(android_vh_madvise_cold_or_pageout,
|
||||
TP_PROTO(struct vm_area_struct *vma, bool *allow_shared),
|
||||
TP_ARGS(vma, allow_shared));
|
||||
@ -299,6 +308,9 @@ DECLARE_HOOK(android_vh_account_swap_pages,
|
||||
DECLARE_HOOK(android_vh_unuse_swap_page,
|
||||
TP_PROTO(struct swap_info_struct *si, struct page *page),
|
||||
TP_ARGS(si, page));
|
||||
DECLARE_HOOK(android_vh_swap_avail_heads_init,
|
||||
TP_PROTO(struct plist_head *swap_avail_heads),
|
||||
TP_ARGS(swap_avail_heads));
|
||||
DECLARE_HOOK(android_vh_init_swap_info_struct,
|
||||
TP_PROTO(struct swap_info_struct *p, struct plist_head *swap_avail_heads),
|
||||
TP_ARGS(p, swap_avail_heads));
|
||||
@ -344,6 +356,9 @@ DECLARE_HOOK(android_vh_compact_finished,
|
||||
DECLARE_HOOK(android_vh_madvise_cold_or_pageout_abort,
|
||||
TP_PROTO(struct vm_area_struct *vma, bool *abort_madvise),
|
||||
TP_ARGS(vma, abort_madvise));
|
||||
DECLARE_HOOK(android_vh_skip_swapcache,
|
||||
TP_PROTO(swp_entry_t entry, bool *skip),
|
||||
TP_ARGS(entry, skip));
|
||||
/* macro versions of hooks are no longer required */
|
||||
|
||||
#endif /* _TRACE_HOOK_MM_H */
|
||||
|
@ -761,8 +761,10 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne
|
||||
void __sched mutex_unlock(struct mutex *lock)
|
||||
{
|
||||
#ifndef CONFIG_DEBUG_LOCK_ALLOC
|
||||
if (__mutex_unlock_fast(lock))
|
||||
if (__mutex_unlock_fast(lock)) {
|
||||
trace_android_vh_record_mutex_lock_starttime(current, 0);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
__mutex_unlock_slowpath(lock, _RET_IP_);
|
||||
trace_android_vh_record_mutex_lock_starttime(current, 0);
|
||||
|
@ -166,6 +166,7 @@ static void percpu_rwsem_wait(struct percpu_rw_semaphore *sem, bool reader)
|
||||
if (wait) {
|
||||
wq_entry.flags |= WQ_FLAG_EXCLUSIVE | reader * WQ_FLAG_CUSTOM;
|
||||
__add_wait_queue_entry_tail(&sem->waiters, &wq_entry);
|
||||
trace_android_vh_percpu_rwsem_wq_add(sem, reader);
|
||||
}
|
||||
spin_unlock_irq(&sem->waiters.lock);
|
||||
|
||||
|
@ -2,5 +2,5 @@
|
||||
|
||||
obj-$(CONFIG_KFENCE) := core.o report.o
|
||||
|
||||
CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
|
||||
obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
|
||||
|
@ -73,6 +73,7 @@ struct cgroup_subsys memory_cgrp_subsys __read_mostly;
|
||||
EXPORT_SYMBOL(memory_cgrp_subsys);
|
||||
|
||||
struct mem_cgroup *root_mem_cgroup __read_mostly;
|
||||
EXPORT_SYMBOL_GPL(root_mem_cgroup);
|
||||
|
||||
/* Active memory cgroup to use from an interrupt context */
|
||||
DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg);
|
||||
@ -858,6 +859,7 @@ void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
|
||||
if (!mem_cgroup_disabled())
|
||||
__mod_memcg_lruvec_state(lruvec, idx, val);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__mod_lruvec_state);
|
||||
|
||||
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
|
||||
{
|
||||
@ -1442,6 +1444,7 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
|
||||
if (nr_pages > 0)
|
||||
*lru_size += nr_pages;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mem_cgroup_update_lru_size);
|
||||
|
||||
/**
|
||||
* mem_cgroup_margin - calculate chargeable space of a memory cgroup
|
||||
|
@ -3663,8 +3663,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
|
||||
|
||||
if (!page) {
|
||||
struct swap_info_struct *si = swp_swap_info(entry);
|
||||
bool skip_swapcache = false;
|
||||
|
||||
if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
|
||||
trace_android_vh_skip_swapcache(entry, &skip_swapcache);
|
||||
if ((data_race(si->flags & SWP_SYNCHRONOUS_IO) || skip_swapcache) &&
|
||||
__swap_count(entry) == 1) {
|
||||
/* skip swapcache */
|
||||
gfp_t flags = GFP_HIGHUSER_MOVABLE;
|
||||
|
@ -33,6 +33,7 @@ struct page_owner {
|
||||
|
||||
bool page_owner_enabled;
|
||||
DEFINE_STATIC_KEY_FALSE(page_owner_inited);
|
||||
EXPORT_SYMBOL_GPL(page_owner_inited);
|
||||
|
||||
static depot_stack_handle_t dummy_handle;
|
||||
static depot_stack_handle_t failure_handle;
|
||||
@ -222,6 +223,7 @@ noinline void __set_page_owner(struct page *page, unsigned int order,
|
||||
__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
|
||||
page_ext_put(page_ext);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__set_page_owner);
|
||||
|
||||
void __set_page_owner_migrate_reason(struct page *page, int reason)
|
||||
{
|
||||
|
@ -915,6 +915,7 @@ int page_referenced(struct page *page,
|
||||
|
||||
return rwc.contended ? -1 : pra.referenced;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(page_referenced);
|
||||
|
||||
static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
|
||||
unsigned long address, void *arg)
|
||||
|
@ -675,6 +675,12 @@ static void __del_from_avail_list(struct swap_info_struct *p)
|
||||
|
||||
static void del_from_avail_list(struct swap_info_struct *p)
|
||||
{
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_del_from_avail_list(p, &skip);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
spin_lock(&swap_avail_lock);
|
||||
__del_from_avail_list(p);
|
||||
spin_unlock(&swap_avail_lock);
|
||||
@ -700,6 +706,11 @@ static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset,
|
||||
static void add_to_avail_list(struct swap_info_struct *p)
|
||||
{
|
||||
int nid;
|
||||
bool skip = false;
|
||||
|
||||
trace_android_vh_add_to_avail_list(p, &skip);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
spin_lock(&swap_avail_lock);
|
||||
for_each_node(nid) {
|
||||
@ -3394,6 +3405,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
||||
if (swap_flags & SWAP_FLAG_PREFER)
|
||||
prio =
|
||||
(swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
|
||||
|
||||
trace_android_vh_swap_avail_heads_init(swap_avail_heads);
|
||||
enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
|
||||
|
||||
trace_android_vh_init_swap_info_struct(p, swap_avail_heads);
|
||||
@ -3848,6 +3861,7 @@ void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
|
||||
{
|
||||
struct swap_info_struct *si, *next;
|
||||
int nid = page_to_nid(page);
|
||||
bool skip = false;
|
||||
|
||||
if (!(gfp_mask & __GFP_IO))
|
||||
return;
|
||||
@ -3862,6 +3876,10 @@ void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
|
||||
if (current->throttle_queue)
|
||||
return;
|
||||
|
||||
trace_android_vh___cgroup_throttle_swaprate(nid, &skip);
|
||||
if (skip)
|
||||
return;
|
||||
|
||||
spin_lock(&swap_avail_lock);
|
||||
plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
|
||||
avail_lists[nid]) {
|
||||
|
@ -583,9 +583,9 @@ __build_packet_message(struct nfnl_log_net *log,
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
if (hooknum <= NF_INET_FORWARD && skb->tstamp) {
|
||||
if (hooknum <= NF_INET_FORWARD) {
|
||||
struct nfulnl_msg_packet_timestamp ts;
|
||||
struct timespec64 kts = ktime_to_timespec64(skb->tstamp);
|
||||
struct timespec64 kts = ktime_to_timespec64(skb->tstamp ?: ktime_get_real());
|
||||
ts.sec = cpu_to_be64(kts.tv_sec);
|
||||
ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC);
|
||||
|
||||
|
@ -1020,6 +1020,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
||||
if (parent == NULL)
|
||||
return -ENOENT;
|
||||
}
|
||||
if (!(parent->cl_flags & HFSC_FSC) && parent != &q->root) {
|
||||
NL_SET_ERR_MSG(extack, "Invalid parent - parent class must have FSC");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
|
||||
return -EINVAL;
|
||||
|
Loading…
Reference in New Issue
Block a user