Merge branch 'android12-5.10' into branch 'android12-5.10-lts'

Sync up with android12-5.10 for the following commits:

5545801f5c ANDROID: abi_gki_aarch64_qcom: Add android_vh_madvise_cold_or_pageout
d195c9f2bb ANDROID: force struct page_vma_mapped_walk to be defined in KMI
464a3706e6 Merge "Merge tag 'android12-5.10.136_r00' into android12-5.10" into android12-5.10
18cd39b706 Merge tag 'android12-5.10.136_r00' into android12-5.10
6d04d8ce90 ANDROID: vendor_hooks: Allow shared pages reclaim via MADV_PAGEOUT
2d8afda40e UPSTREAM: usb: gadget: mass_storage: Fix cdrom data transfers on MAC-OS
4135365b5d ANDROID: GKI: Update symbols to symbol list
c6f7a0ebd8 ANDROID: make sure all types for hooks are defined in KMI
b9ac329a83 ANDROID: force struct selinux_state to be defined in KMI
b71060e6eb BACKPORT: erofs: fix use-after-free of on-stack io[]
ecf5583fc7 ANDROID: GKI: Update symbols to symbol list
5c5b7a4da6 ANDROID: vendor_hook: rename the the name of hooks
2c625a20c0 ANDROID: ABI: Add extcon_get_property_capability symbol
72b1f9fd16 Revert "ANDROID: arm64: debug-monitors: export break hook APIs"
cc51dcbc60 Revert "ANDROID: vendor_hooks:vendor hook for __alloc_pages_slowpath."
9072e986bd Revert "ANDROID: Export functions to be used with dma_map_ops in modules"
2fc96f32ee FROMLIST: f2fs: let FI_OPU_WRITE override FADVISE_COLD_BIT
06b301069f ANDROID: remove unused xhci_get_endpoint_address export
bcf6dddd97 ANDROID: incfs: Add check for ATTR_KILL_SUID and ATTR_MODE in incfs_setattr
d915364e92 ANDROID: GKI: Update symbols to symbol list
db2516ff46 ANDROID: vendor_hooks: Add hooks for lookaround
feedd14d14 Revert "Revert "ANDROID: add for tuning readahead size""
9252f4d58b ANDROID: transsion: Update the ABI xml and symbol list
f50f24e781 ANDROID: vendor_hooks: Add hooks for lookaround
c762f435c0 BACKPORT: dm verity: set DM_TARGET_IMMUTABLE feature flag
2bd9e6cddc BACKPORT: pipe: Fix missing lock in pipe_resize_ring()
d7586fa209 BACKPORT: KVM: x86: avoid calling x86 emulator without a decoded instruction
cee231f83b ANDROID: GKI: add symbols in android/abi_gki_aarch64_oplus
8aaba3c5a1 BACKPORT: watchqueue: make sure to serialize 'wqueue->defunct' properly
7351343bc8 ANDROID: GKI: Update symbol list for Exynos SoC
9527907814 UPSTREAM: usb: dwc3: gadget: Avoid duplicate requests to enable Run/Stop
bda2986f13 UPSTREAM: usb: typec: ucsi: Acknowledge the GET_ERROR_STATUS command completion
eef3b6ff41 BACKPORT: scsi: ufs: core: Increase fDeviceInit poll frequency
eaa7364bf7 FROMGIT: f2fs: increase the limit for reserve_root
42aa1955c2 FROMGIT: f2fs: complete checkpoints during remount
1c5313a9f7 FROMGIT: f2fs: flush pending checkpoints when freezing super
604f2f5656 BACKPORT: f2fs: don't get FREEZE lock in f2fs_evict_inode in frozen fs
594835143a BACKPORT: f2fs: introduce F2FS_IPU_HONOR_OPU_WRITE ipu policy
85aff72329 Revert "ANDROID: GKI: signal: Export for __lock_task_sighand"
22b447e9bd BACKPORT: f2fs: invalidate meta pages only for post_read required inode
fa0cdb5b9d BACKPORT: f2fs: fix to invalidate META_MAPPING before DIO write
2301307412 BACKPORT: f2fs: invalidate META_MAPPING before IPU/DIO write
99f0160022 ANDROID: mm: page_pinner: use page_ext_get/put() to work with page_ext
2b3f9b8187 FROMLIST: mm: fix use-after free of page_ext after race with memory-offline
dec2f52d08 ANDROID: vendor_hooks:vendor hook for __alloc_pages_slowpath.
bc08447eb7 ANDROID: GKI: rockchip: add symbol netif_set_xps_queue
3f90d4f1f3 ANDROID: GKI: Update symbol list
7b0822a261 Revert "ANDROID: vendor_hooks: tune reclaim scan type for specified mem_cgroup"
425c0f18ed ANDROID: Fix a build warning inside early_memblock_nomap
84a0d243b6 ANDROID: mm/memory_hotplug: Fix error path handling
98e5fb34d1 Revert "ANDROID: add for tuning readahead size"
486580ffb5 Revert "ANDROID: vendor_hooks: Add hooks for mutex"

Update the .xml file to add the following new symbols that were now
started to be tracked in the android12-5.10 branch:

Leaf changes summary: 40 artifacts changed
Changed leaf types summary: 0 leaf type changed
Removed/Changed/Added functions summary: 1 Removed, 0 Changed, 18 Added functions
Removed/Changed/Added variables summary: 1 Removed, 0 Changed, 20 Added variables

1 Removed function:

  [D] 'function int __traceiter_android_vh_record_percpu_rwsem_lock_starttime(void*, task_struct*, unsigned long int)'

18 Added functions:

  [A] 'function void __page_frag_cache_drain(page*, unsigned int)'
  [A] 'function int __traceiter_android_vh_check_page_look_around_ref(void*, page*, int*)'
  [A] 'function int __traceiter_android_vh_do_futex(void*, int, unsigned int*, u32*)'
  [A] 'function int __traceiter_android_vh_futex_wait_end(void*, unsigned int, u32)'
  [A] 'function int __traceiter_android_vh_futex_wait_start(void*, unsigned int, u32)'
  [A] 'function int __traceiter_android_vh_futex_wake_this(void*, int, int, int, task_struct*)'
  [A] 'function int __traceiter_android_vh_futex_wake_traverse_plist(void*, plist_head*, int*, futex_key, u32)'
  [A] 'function int __traceiter_android_vh_futex_wake_up_q_finish(void*, int, int)'
  [A] 'function int __traceiter_android_vh_look_around(void*, page_vma_mapped_walk*, page*, vm_area_struct*, int*)'
  [A] 'function int __traceiter_android_vh_look_around_migrate_page(void*, page*, page*)'
  [A] 'function int __traceiter_android_vh_ra_tuning_max_page(void*, readahead_control*, unsigned long int*)'
  [A] 'function int __traceiter_android_vh_record_pcpu_rwsem_starttime(void*, task_struct*, unsigned long int)'
  [A] 'function int __traceiter_android_vh_test_clear_look_around_ref(void*, page*)'
  [A] 'function int extcon_get_property_capability(extcon_dev*, unsigned int, unsigned int)'
  [A] 'function int netif_set_xps_queue(net_device*, const cpumask*, u16)'
  [A] 'function void* page_frag_alloc(page_frag_cache*, unsigned int, gfp_t)'
  [A] 'function void page_frag_free(void*)'
  [A] 'function bool rng_is_initialized()'

1 Removed variable:

  [D] 'tracepoint __tracepoint_android_vh_record_percpu_rwsem_lock_starttime'

20 Added variables:

  [A] 'const gic_chip_data* GKI_struct_gic_chip_data'
  [A] 'selinux_state* GKI_struct_selinux_state'
  [A] 'const swap_slots_cache* GKI_struct_swap_slots_cache'
  [A] 'tracepoint __tracepoint_android_vh_check_page_look_around_ref'
  [A] 'tracepoint __tracepoint_android_vh_do_futex'
  [A] 'tracepoint __tracepoint_android_vh_futex_wait_end'
  [A] 'tracepoint __tracepoint_android_vh_futex_wait_start'
  [A] 'tracepoint __tracepoint_android_vh_futex_wake_this'
  [A] 'tracepoint __tracepoint_android_vh_futex_wake_traverse_plist'
  [A] 'tracepoint __tracepoint_android_vh_futex_wake_up_q_finish'
  [A] 'tracepoint __tracepoint_android_vh_look_around'
  [A] 'tracepoint __tracepoint_android_vh_look_around_migrate_page'
  [A] 'tracepoint __tracepoint_android_vh_madvise_cold_or_pageout'
  [A] 'tracepoint __tracepoint_android_vh_ra_tuning_max_page'
  [A] 'tracepoint __tracepoint_android_vh_record_pcpu_rwsem_starttime'
  [A] 'tracepoint __tracepoint_android_vh_test_clear_look_around_ref'
  [A] 'tracepoint __tracepoint_net_dev_queue'
  [A] 'tracepoint __tracepoint_net_dev_xmit'
  [A] 'tracepoint __tracepoint_netif_receive_skb'
  [A] 'tracepoint __tracepoint_netif_rx'

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: I300286a8e658793249f37797cd2ede7555dfacc9
This commit is contained in:
Greg Kroah-Hartman 2022-10-01 14:18:22 +02:00
commit 0e8dfc1216
51 changed files with 3881 additions and 3083 deletions

View File

@ -56,8 +56,9 @@ Description: Controls the in-place-update policy.
0x04 F2FS_IPU_UTIL
0x08 F2FS_IPU_SSR_UTIL
0x10 F2FS_IPU_FSYNC
0x20 F2FS_IPU_ASYNC,
0x20 F2FS_IPU_ASYNC
0x40 F2FS_IPU_NOCACHE
0x80 F2FS_IPU_HONOR_OPU_WRITE
==== =================
Refer segment.h for details.
@ -431,6 +432,7 @@ Description: Show status of f2fs superblock in real time.
0x800 SBI_QUOTA_SKIP_FLUSH skip flushing quota in current CP
0x1000 SBI_QUOTA_NEED_REPAIR quota file may be corrupted
0x2000 SBI_IS_RESIZEFS resizefs is in process
0x4000 SBI_IS_FREEZING freefs is in process
====== ===================== =================================
What: /sys/fs/f2fs/<disk>/ckpt_thread_ioprio

File diff suppressed because it is too large Load Diff

View File

@ -144,6 +144,7 @@
clk_set_rate
clk_unprepare
clockevents_config_and_register
clocks_calc_mult_shift
__clocksource_register_scale
__close_fd
cma_alloc
@ -198,6 +199,8 @@
cpufreq_table_index_unsorted
cpufreq_this_cpu_can_update
cpufreq_unregister_notifier
cpu_hotplug_disable
cpu_hotplug_enable
__cpuhp_remove_state
__cpuhp_setup_state
__cpuhp_setup_state_cpuslocked
@ -235,6 +238,7 @@
crypto_shash_update
crypto_unregister_alg
crypto_unregister_scomp
csum_ipv6_magic
csum_partial
csum_tcpudp_nofold
_ctype
@ -385,6 +389,7 @@
dev_pm_opp_add
dev_pm_opp_disable
dev_pm_opp_find_freq_ceil
dev_pm_opp_find_freq_ceil_by_volt
dev_pm_opp_find_freq_exact
dev_pm_opp_find_freq_floor
dev_pm_opp_get_freq
@ -428,6 +433,7 @@
dma_buf_export
dma_buf_fd
dma_buf_get
dma_buf_get_flags
dma_buf_map_attachment
dma_buf_mmap
dma_buf_move_notify
@ -509,6 +515,7 @@
drm_atomic_add_affected_connectors
drm_atomic_add_affected_planes
drm_atomic_commit
drm_atomic_get_connector_state
drm_atomic_get_crtc_state
drm_atomic_get_plane_state
drm_atomic_get_private_obj_state
@ -552,6 +559,7 @@
drm_atomic_set_fb_for_plane
drm_atomic_set_mode_for_crtc
drm_atomic_state_alloc
drm_atomic_state_clear
__drm_atomic_state_free
drm_bridge_add
drm_bridge_attach
@ -584,6 +592,7 @@
drm_crtc_vblank_off
drm_crtc_vblank_on
drm_crtc_vblank_put
drm_crtc_wait_one_vblank
drm_cvt_mode
__drm_dbg
__drm_debug
@ -745,6 +754,7 @@
drm_vma_node_allow
drm_vma_node_is_allowed
drm_vma_node_revoke
drm_wait_one_vblank
drm_writeback_connector_init
drm_writeback_queue_job
drm_writeback_signal_completion
@ -824,6 +834,7 @@
__get_free_pages
get_net_ns_by_fd
get_net_ns_by_pid
get_options
get_random_bytes
get_random_u32
__get_task_comm
@ -966,6 +977,7 @@
input_set_capability
input_unregister_device
input_unregister_handle
int_pow
int_sqrt
iomem_resource
iommu_alloc_resv_region
@ -975,6 +987,7 @@
iommu_device_sysfs_remove
iommu_device_unlink
iommu_device_unregister
iommu_dma_enable_best_fit_algo
iommu_dma_reserve_iova
iommu_domain_alloc
iommu_fwspec_add_ids
@ -1009,6 +1022,7 @@
irq_domain_set_info
irq_domain_xlate_onetwocell
irq_domain_xlate_twocell
irq_do_set_affinity
irq_find_mapping
irq_get_irqchip_state
irq_get_irq_data
@ -1038,6 +1052,7 @@
kasan_flag_enabled
kasprintf
kernel_kobj
kernfs_path_from_node
kern_mount
kern_unmount
key_create_or_update
@ -1074,6 +1089,8 @@
kobject_uevent
kobject_uevent_env
krealloc
kset_create_and_add
kset_unregister
kstat
kstrdup
kstrndup
@ -1091,6 +1108,7 @@
kstrtoull_from_user
ksys_sync_helper
kthread_bind
kthread_bind_mask
kthread_cancel_delayed_work_sync
kthread_cancel_work_sync
kthread_create_on_node
@ -1120,6 +1138,8 @@
kvfree
kvfree_call_rcu
kvmalloc_node
led_classdev_register_ext
led_classdev_unregister
__list_add_valid
__list_del_entry_valid
list_sort
@ -1144,6 +1164,7 @@
__memcpy_toio
memdup_user
memmove
memory_read_from_buffer
memparse
memremap
memset
@ -1159,6 +1180,7 @@
mii_nway_restart
mipi_dsi_attach
mipi_dsi_compression_mode
mipi_dsi_dcs_get_display_brightness
mipi_dsi_dcs_read
mipi_dsi_dcs_set_column_address
mipi_dsi_dcs_set_display_brightness
@ -1316,6 +1338,7 @@
of_property_read_string_helper
of_property_read_u32_index
of_property_read_u64
of_property_read_u64_index
of_property_read_variable_u16_array
of_property_read_variable_u32_array
of_property_read_variable_u8_array
@ -1331,6 +1354,9 @@
oops_in_progress
orderly_poweroff
page_endio
page_frag_alloc
__page_frag_cache_drain
page_frag_free
page_mapping
__page_pinner_migration_failed
panic
@ -1378,9 +1404,11 @@
pci_read_config_dword
pci_read_config_word
__pci_register_driver
pci_release_regions
pci_release_resource
pci_rescan_bus
pci_resize_resource
pci_restore_msi_state
pci_restore_state
pci_save_state
pci_set_master
@ -1388,6 +1416,7 @@
pci_store_saved_state
pci_unmap_rom
pci_unregister_driver
pci_wake_from_d3
pci_write_config_dword
pci_write_config_word
PDE_DATA
@ -1418,6 +1447,7 @@
pinctrl_utils_free_map
pin_get_name
pin_user_pages
pin_user_pages_fast
pin_user_pages_remote
platform_bus_type
platform_device_add
@ -1506,6 +1536,8 @@
pwm_set_chip_data
queue_delayed_work_on
queue_work_on
radix_tree_delete
radix_tree_lookup
radix_tree_tagged
___ratelimit
raw_notifier_call_chain
@ -1647,11 +1679,13 @@
sched_set_normal
sched_setscheduler
sched_setscheduler_nocheck
sched_uclamp_used
schedule
schedule_timeout
schedule_timeout_interruptible
scnprintf
scsi_block_when_processing_errors
scsi_dma_unmap
scsi_eh_ready_devs
__scsi_execute
scsi_print_sense_hdr
@ -1712,6 +1746,7 @@
skb_realloc_headroom
skb_trim
smp_call_function
smp_call_function_any
smp_call_function_many
smp_call_function_single
smp_call_on_cpu
@ -1783,6 +1818,7 @@
snd_soc_info_volsw_range
snd_soc_info_volsw_sx
snd_soc_info_xr_sx
snd_soc_lookup_component
snd_soc_new_compress
snd_soc_of_get_dai_link_codecs
snd_soc_of_get_dai_name
@ -1832,6 +1868,7 @@
__stack_chk_guard
static_key_slow_dec
static_key_slow_inc
stop_machine
stop_one_cpu_nowait
stpcpy
strcasecmp
@ -1903,7 +1940,15 @@
tasklet_init
tasklet_kill
__tasklet_schedule
tasklet_setup
task_rq_lock
tcp_register_congestion_control
tcp_reno_cong_avoid
tcp_reno_ssthresh
tcp_reno_undo_cwnd
tcp_slow_start
tcp_unregister_congestion_control
thermal_cdev_update
thermal_cooling_device_unregister
thermal_of_cooling_device_register
thermal_zone_device_disable
@ -1925,11 +1970,13 @@
trace_event_reg
trace_handle_return
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_cpu_cgroup_attach
__traceiter_android_rvh_cpu_cgroup_can_attach
__traceiter_android_rvh_dequeue_task
__traceiter_android_rvh_enqueue_task
__traceiter_android_rvh_find_lowest_rq
__traceiter_android_rvh_find_new_ilb
__traceiter_android_rvh_flush_task
__traceiter_android_rvh_gic_v3_set_affinity
__traceiter_android_rvh_post_init_entity_util_avg
__traceiter_android_rvh_replace_next_task_fair
@ -1937,18 +1984,24 @@
__traceiter_android_rvh_sched_newidle_balance
__traceiter_android_rvh_sched_nohz_balancer_kick
__traceiter_android_rvh_sched_rebalance_domains
__traceiter_android_rvh_schedule
__traceiter_android_rvh_select_fallback_rq
__traceiter_android_rvh_select_task_rq_fair
__traceiter_android_rvh_select_task_rq_rt
__traceiter_android_rvh_wake_up_new_task
__traceiter_android_vh_cgroup_attach
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_do_wake_up_sync
__traceiter_android_vh_ipi_stop
__traceiter_android_vh_is_fpsimd_save
__traceiter_android_vh_scheduler_tick
__traceiter_android_vh_set_wake_flags
__traceiter_android_vh_show_mem
__traceiter_android_vh_ufs_check_int_errors
__traceiter_android_vh_ufs_compl_command
__traceiter_android_vh_ufs_prepare_command
__traceiter_android_vh_ufs_send_tm_command
__traceiter_cpu_idle
__traceiter_device_pm_callback_end
__traceiter_device_pm_callback_start
@ -1976,11 +2029,13 @@
__traceiter_workqueue_execute_start
trace_output_call
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_cpu_cgroup_attach
__tracepoint_android_rvh_cpu_cgroup_can_attach
__tracepoint_android_rvh_dequeue_task
__tracepoint_android_rvh_enqueue_task
__tracepoint_android_rvh_find_lowest_rq
__tracepoint_android_rvh_find_new_ilb
__tracepoint_android_rvh_flush_task
__tracepoint_android_rvh_gic_v3_set_affinity
__tracepoint_android_rvh_post_init_entity_util_avg
__tracepoint_android_rvh_replace_next_task_fair
@ -1988,18 +2043,24 @@
__tracepoint_android_rvh_sched_newidle_balance
__tracepoint_android_rvh_sched_nohz_balancer_kick
__tracepoint_android_rvh_sched_rebalance_domains
__tracepoint_android_rvh_schedule
__tracepoint_android_rvh_select_fallback_rq
__tracepoint_android_rvh_select_task_rq_fair
__tracepoint_android_rvh_select_task_rq_rt
__tracepoint_android_rvh_wake_up_new_task
__tracepoint_android_vh_cgroup_attach
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_do_wake_up_sync
__tracepoint_android_vh_ipi_stop
__tracepoint_android_vh_is_fpsimd_save
__tracepoint_android_vh_scheduler_tick
__tracepoint_android_vh_set_wake_flags
__tracepoint_android_vh_show_mem
__tracepoint_android_vh_ufs_check_int_errors
__tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_prepare_command
__tracepoint_android_vh_ufs_send_tm_command
__tracepoint_cpu_idle
__tracepoint_device_pm_callback_end
__tracepoint_device_pm_callback_start
@ -2129,6 +2190,7 @@
unregister_shrinker
up
update_devfreq
update_rq_clock
up_read
up_write
usb_add_function
@ -2301,6 +2363,7 @@
wakeup_source_add
wakeup_source_destroy
wakeup_source_register
wakeup_source_remove
wakeup_source_unregister
__wake_up_sync
__wake_up_sync_key
@ -2322,6 +2385,7 @@
xhci_gen_setup
xhci_get_endpoint_index
xhci_get_ep_ctx
xhci_get_slot_ctx
xhci_init_driver
xhci_initialize_ring_info
xhci_link_segments

View File

@ -112,6 +112,7 @@
_raw_spin_lock
_raw_spin_unlock
refcount_warn_saturate
rng_is_initialized
scatterwalk_ffwd
scatterwalk_map_and_copy
sg_init_one

View File

@ -2684,6 +2684,7 @@
__traceiter_android_rvh_build_perf_domains
__traceiter_android_rvh_can_migrate_task
__traceiter_android_rvh_check_preempt_wakeup
__traceiter_android_rvh_check_preempt_tick
__traceiter_android_rvh_cpu_cgroup_attach
__traceiter_android_rvh_cpu_cgroup_online
__traceiter_android_rvh_cpu_overutilized
@ -2783,6 +2784,8 @@
__traceiter_android_vh_cpu_idle_enter
__traceiter_android_vh_cpu_idle_exit
__traceiter_android_vh_cpu_up
__traceiter_android_vh_check_page_look_around_ref
__traceiter_android_vh_do_futex
__traceiter_android_vh_do_send_sig_info
__traceiter_android_vh_drain_all_pages_bypass
__traceiter_android_vh_em_cpu_energy
@ -2797,6 +2800,11 @@
__traceiter_android_vh_ftrace_oops_exit
__traceiter_android_vh_ftrace_size_check
__traceiter_android_vh_futex_sleep_start
__traceiter_android_vh_futex_wait_end
__traceiter_android_vh_futex_wait_start
__traceiter_android_vh_futex_wake_this
__traceiter_android_vh_futex_wake_traverse_plist
__traceiter_android_vh_futex_wake_up_q_finish
__traceiter_android_vh_get_from_fragment_pool
__traceiter_android_vh_gpio_block_read
__traceiter_android_vh_handle_failed_page_trylock
@ -2810,6 +2818,8 @@
__traceiter_android_vh_killed_process
__traceiter_android_vh_kmalloc_slab
__traceiter_android_vh_logbuf
__traceiter_android_vh_look_around
__traceiter_android_vh_look_around_migrate_page
__traceiter_android_vh_mem_cgroup_alloc
__traceiter_android_vh_mem_cgroup_css_offline
__traceiter_android_vh_mem_cgroup_css_online
@ -2843,7 +2853,7 @@
__traceiter_android_vh_record_mutex_lock_starttime
__traceiter_android_vh_record_rtmutex_lock_starttime
__traceiter_android_vh_record_rwsem_lock_starttime
__traceiter_android_vh_record_percpu_rwsem_lock_starttime
__traceiter_android_vh_record_pcpu_rwsem_starttime
__traceiter_android_vh_rmqueue
__traceiter_android_vh_rwsem_init
__traceiter_android_vh_rwsem_mark_wake_readers
@ -2911,12 +2921,14 @@
__traceiter_suspend_resume
__traceiter_task_newtask
__traceiter_task_rename
__traceiter_android_vh_test_clear_look_around_ref
__traceiter_xhci_urb_giveback
__tracepoint_android_rvh_account_irq
__tracepoint_android_rvh_after_enqueue_task
__tracepoint_android_rvh_build_perf_domains
__tracepoint_android_rvh_can_migrate_task
__tracepoint_android_rvh_check_preempt_wakeup
__tracepoint_android_rvh_check_preempt_tick
__tracepoint_android_rvh_cpu_cgroup_attach
__tracepoint_android_rvh_cpu_cgroup_online
__tracepoint_android_rvh_cpu_overutilized
@ -3016,6 +3028,8 @@
__tracepoint_android_vh_cpu_idle_enter
__tracepoint_android_vh_cpu_idle_exit
__tracepoint_android_vh_cpu_up
__tracepoint_android_vh_check_page_look_around_ref
__tracepoint_android_vh_do_futex
__tracepoint_android_vh_do_send_sig_info
__tracepoint_android_vh_drain_all_pages_bypass
__tracepoint_android_vh_em_cpu_energy
@ -3030,6 +3044,11 @@
__tracepoint_android_vh_ftrace_oops_exit
__tracepoint_android_vh_ftrace_size_check
__tracepoint_android_vh_futex_sleep_start
__tracepoint_android_vh_futex_wait_end
__tracepoint_android_vh_futex_wait_start
__tracepoint_android_vh_futex_wake_this
__tracepoint_android_vh_futex_wake_traverse_plist
__tracepoint_android_vh_futex_wake_up_q_finish
__tracepoint_android_vh_get_from_fragment_pool
__tracepoint_android_vh_gpio_block_read
__tracepoint_android_vh_handle_failed_page_trylock
@ -3043,6 +3062,8 @@
__tracepoint_android_vh_killed_process
__tracepoint_android_vh_kmalloc_slab
__tracepoint_android_vh_logbuf
__tracepoint_android_vh_look_around
__tracepoint_android_vh_look_around_migrate_page
__tracepoint_android_vh_mem_cgroup_alloc
__tracepoint_android_vh_mem_cgroup_css_offline
__tracepoint_android_vh_mem_cgroup_css_online
@ -3076,7 +3097,7 @@
__tracepoint_android_vh_record_mutex_lock_starttime
__tracepoint_android_vh_record_rtmutex_lock_starttime
__tracepoint_android_vh_record_rwsem_lock_starttime
__tracepoint_android_vh_record_percpu_rwsem_lock_starttime
__tracepoint_android_vh_record_pcpu_rwsem_starttime
__tracepoint_android_vh_rmqueue
__tracepoint_android_vh_rwsem_init
__tracepoint_android_vh_rwsem_mark_wake_readers
@ -3116,6 +3137,7 @@
__tracepoint_android_vh_tune_inactive_ratio
__tracepoint_android_vh_tune_scan_type
__tracepoint_android_vh_tune_swappiness
__tracepoint_android_vh_test_clear_look_around_ref
__tracepoint_android_vh_ufs_compl_command
__tracepoint_android_vh_ufs_send_command
__tracepoint_android_vh_ufs_send_tm_command
@ -3128,6 +3150,10 @@
__tracepoint_ipi_entry
__tracepoint_ipi_raise
__tracepoint_irq_handler_entry
__tracepoint_net_dev_queue
__tracepoint_net_dev_xmit
__tracepoint_netif_receive_skb
__tracepoint_netif_rx
__tracepoint_pelt_se_tp
tracepoint_probe_register
tracepoint_probe_register_prio
@ -3610,3 +3636,4 @@
xhci_ring_cmd_db
xhci_ring_free
xhci_trb_virt_to_dma
zero_pfn

View File

@ -881,6 +881,7 @@
extcon_get_edev_name
extcon_get_extcon_dev
extcon_get_property
extcon_get_property_capability
extcon_get_state
extcon_register_notifier
extcon_set_state_sync
@ -2580,6 +2581,7 @@
__traceiter_android_vh_jiffies_update
__traceiter_android_vh_logbuf
__traceiter_android_vh_logbuf_pr_cont
__tracepoint_android_vh_madvise_cold_or_pageout
__traceiter_android_vh_printk_hotplug
__traceiter_android_vh_rproc_recovery
__traceiter_android_vh_rproc_recovery_set

View File

@ -1400,6 +1400,9 @@
mmc_sw_reset
mmc_wait_for_req
netdev_update_features
netif_napi_add
__netif_napi_del
netif_set_xps_queue
__netlink_kernel_create
netlink_kernel_release
nla_append

View File

@ -34,6 +34,7 @@
__traceiter_android_vh_alloc_si
__traceiter_android_vh_free_pages
__traceiter_android_vh_set_shmem_page_flag
__traceiter_android_vh_ra_tuning_max_page
__tracepoint_android_vh_handle_pte_fault_end
__tracepoint_android_vh_cow_user_page
__tracepoint_android_vh_swapin_add_anon_rmap
@ -57,4 +58,5 @@
__tracepoint_android_vh_si_swapinfo
__tracepoint_android_vh_alloc_si
__tracepoint_android_vh_free_pages
__tracepoint_android_vh_set_shmem_page_flag
__tracepoint_android_vh_set_shmem_page_flag
__tracepoint_android_vh_ra_tuning_max_page

View File

@ -0,0 +1,6 @@
[abi_symbol_list]
# for type visibility
GKI_struct_selinux_state
GKI_struct_gic_chip_data
GKI_struct_swap_slots_cache

View File

@ -283,13 +283,11 @@ void register_user_break_hook(struct break_hook *hook)
{
register_debug_hook(&hook->node, &user_break_hook);
}
EXPORT_SYMBOL_GPL(register_user_break_hook);
void unregister_user_break_hook(struct break_hook *hook)
{
unregister_debug_hook(&hook->node);
}
EXPORT_SYMBOL_GPL(unregister_user_break_hook);
void register_kernel_break_hook(struct break_hook *hook)
{
@ -301,7 +299,6 @@ void unregister_kernel_break_hook(struct break_hook *hook)
{
unregister_debug_hook(&hook->node);
}
EXPORT_SYMBOL_GPL(unregister_kernel_break_hook);
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
{

View File

@ -10,6 +10,7 @@ ABI_DEFINITION=android/abi_gki_aarch64.xml
TIDY_ABI=1
KMI_SYMBOL_LIST=android/abi_gki_aarch64
ADDITIONAL_KMI_SYMBOL_LISTS="
android/abi_gki_aarch64_type_visibility
android/abi_gki_aarch64_core
android/abi_gki_aarch64_db845c
android/abi_gki_aarch64_exynos

View File

@ -211,7 +211,7 @@ UBSAN_SANITIZE_jitterentropy-fips.o = n
# module that is in scope for FIPS 140-2 certification
#
crypto-fips-objs := drbg.o ecb.o cbc.o ctr.o cts.o gcm.o xts.o hmac.o cmac.o \
memneq.o gf128mul.o aes_generic.o lib-crypto-aes.o \
gf128mul.o aes_generic.o lib-crypto-aes.o \
jitterentropy.o jitterentropy-kcapi.o \
sha1_generic.o sha256_generic.o sha512_generic.o \
lib-sha1.o lib-crypto-sha256.o

View File

@ -6,6 +6,10 @@
* Copyright 2020 Google LLC
*/
#ifndef __GENKSYMS__
#include <linux/rmap.h>
#endif
#define CREATE_TRACE_POINTS
#include <trace/hooks/vendor_hooks.h>
#include <linux/tracepoint.h>
@ -53,7 +57,6 @@
#include <trace/hooks/logbuf.h>
#include <trace/hooks/vmscan.h>
#include <trace/hooks/psi.h>
#include <trace/hooks/selinux.h>
#include <trace/hooks/hung_task.h>
#include <trace/hooks/mmc_core.h>
#include <trace/hooks/v4l2core.h>
@ -249,7 +252,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_start_check_new_owner);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_ptrauth_fault);
@ -268,7 +270,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_revert_creds);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_mutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rtmutex_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_percpu_rwsem_lock_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_record_pcpu_rwsem_starttime);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_x);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_nx);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_memory_ro);
@ -337,7 +339,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_stat_runtime_rt);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_prepare_update_load_avg_se);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_finish_update_load_avg_se);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_is_initialized);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_inactive_ratio);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_of_i2c_get_board_info);
@ -430,7 +431,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_read_done);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_tlb_conf);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_shrink_node_memcgs);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ra_tuning_max_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_memcg_scan_type);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_handle_pte_fault_end);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cow_user_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_swapin_add_anon_rmap);
@ -445,6 +445,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_slots_cache_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_swap_slot_cache);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_swap_slot);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_swap_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_or_pageout);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_isolated_for_reclaim);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_inactive_is_low);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_snapshot_refaults);
@ -456,3 +457,20 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_si);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_pages);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_shmem_page_flag);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_pelt_multiplier);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_page_look_around_ref);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_page);
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref);
/*
* For type visibility
*/
#ifdef CONFIG_ARM64
#include <linux/irqchip/arm-gic-v3.h>
const struct gic_chip_data *GKI_struct_gic_chip_data;
EXPORT_SYMBOL_GPL(GKI_struct_gic_chip_data);
#endif
#include <linux/swap_slots.h>
const struct swap_slots_cache *GKI_struct_swap_slots_cache;
EXPORT_SYMBOL_GPL(GKI_struct_swap_slots_cache);

View File

@ -1253,6 +1253,7 @@ static struct target_type verity_target = {
.name = "verity",
.features = DM_TARGET_IMMUTABLE,
.version = {1, 7, 0},
.features = DM_TARGET_IMMUTABLE,
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,

View File

@ -4385,7 +4385,7 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
if (!flag_res)
break;
usleep_range(5000, 10000);
usleep_range(500, 1000);
} while (ktime_before(ktime_get(), timeout));
if (err) {

View File

@ -1334,7 +1334,6 @@ unsigned int xhci_get_endpoint_address(unsigned int ep_index)
unsigned int direction = ep_index % 2 ? USB_DIR_OUT : USB_DIR_IN;
return direction | number;
}
EXPORT_SYMBOL_GPL(xhci_get_endpoint_address);
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is

View File

@ -789,12 +789,9 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
/* wake up the caller thread for sync decompression */
if (sync) {
unsigned long flags;
spin_lock_irqsave(&io->u.wait.lock, flags);
if (!atomic_add_return(bios, &io->pending_bios))
wake_up_locked(&io->u.wait);
spin_unlock_irqrestore(&io->u.wait.lock, flags);
complete(&io->u.done);
return;
}
@ -1214,7 +1211,7 @@ jobqueue_init(struct super_block *sb,
} else {
fg_out:
q = fgq;
init_waitqueue_head(&fgq->u.wait);
init_completion(&fgq->u.done);
atomic_set(&fgq->pending_bios, 0);
}
q->sb = sb;
@ -1377,8 +1374,7 @@ static void z_erofs_runqueue(struct super_block *sb,
return;
/* wait until all bios are completed */
io_wait_event(io[JQ_SUBMIT].u.wait,
!atomic_read(&io[JQ_SUBMIT].pending_bios));
wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */
z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);

View File

@ -90,7 +90,7 @@ struct z_erofs_decompressqueue {
z_erofs_next_pcluster_t head;
union {
wait_queue_head_t wait;
struct completion done;
struct work_struct work;
} u;
};

View File

@ -1863,15 +1863,27 @@ int f2fs_start_ckpt_thread(struct f2fs_sb_info *sbi)
void f2fs_stop_ckpt_thread(struct f2fs_sb_info *sbi)
{
struct ckpt_req_control *cprc = &sbi->cprc_info;
struct task_struct *ckpt_task;
if (cprc->f2fs_issue_ckpt) {
struct task_struct *ckpt_task = cprc->f2fs_issue_ckpt;
if (!cprc->f2fs_issue_ckpt)
return;
cprc->f2fs_issue_ckpt = NULL;
kthread_stop(ckpt_task);
ckpt_task = cprc->f2fs_issue_ckpt;
cprc->f2fs_issue_ckpt = NULL;
kthread_stop(ckpt_task);
flush_remained_ckpt_reqs(sbi, NULL);
}
f2fs_flush_ckpt_thread(sbi);
}
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi)
{
struct ckpt_req_control *cprc = &sbi->cprc_info;
flush_remained_ckpt_reqs(sbi, NULL);
/* Let's wait for the previous dispatched checkpoint. */
while (atomic_read(&cprc->queued_ckpt))
io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
void f2fs_init_ckpt_req_control(struct f2fs_sb_info *sbi)

View File

@ -2479,6 +2479,9 @@ static inline bool check_inplace_update_policy(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
unsigned int policy = SM_I(sbi)->ipu_policy;
if (policy & (0x1 << F2FS_IPU_HONOR_OPU_WRITE) &&
is_inode_flag_set(inode, FI_OPU_WRITE))
return false;
if (policy & (0x1 << F2FS_IPU_FORCE))
return true;
if (policy & (0x1 << F2FS_IPU_SSR) && f2fs_need_SSR(sbi))
@ -2521,7 +2524,7 @@ bool f2fs_should_update_inplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
/* if this is cold file, we should overwrite to avoid fragmentation */
if (file_is_cold(inode))
if (file_is_cold(inode) && !is_inode_flag_set(inode, FI_OPU_WRITE))
return true;
return check_inplace_update_policy(inode, fio);
@ -2549,6 +2552,9 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
return true;
if (is_inode_flag_set(inode, FI_OPU_WRITE))
return true;
if (fio) {
if (page_private_gcing(fio->page))
return true;
@ -2714,6 +2720,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.submitted = false,
.compr_blocks = compr_blocks,
.need_lock = LOCK_RETRY,
.post_read = f2fs_post_read_required(inode),
.io_type = io_type,
.io_wbc = wbc,
.bio = bio,
@ -3172,8 +3179,8 @@ static int __f2fs_write_data_pages(struct address_space *mapping,
f2fs_available_free_memory(sbi, DIRTY_DENTS))
goto skip_write;
/* skip writing during file defragment */
if (is_inode_flag_set(inode, FI_DO_DEFRAG))
/* skip writing in file defragment preparing stage */
if (is_inode_flag_set(inode, FI_SKIP_WRITES))
goto skip_write;
trace_f2fs_writepages(mapping->host, wbc, DATA);
@ -3955,6 +3962,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
f2fs_down_write(&F2FS_I(inode)->i_mmap_sem);
set_inode_flag(inode, FI_ALIGNED_WRITE);
set_inode_flag(inode, FI_OPU_WRITE);
for (; secidx < end_sec; secidx++) {
f2fs_down_write(&sbi->pin_sem);
@ -3963,7 +3971,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false);
f2fs_unlock_op(sbi);
set_inode_flag(inode, FI_DO_DEFRAG);
set_inode_flag(inode, FI_SKIP_WRITES);
for (blkofs = 0; blkofs < blk_per_sec; blkofs++) {
struct page *page;
@ -3980,7 +3988,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
f2fs_put_page(page, 1);
}
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
ret = filemap_fdatawrite(inode->i_mapping);
@ -3991,7 +3999,8 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk,
}
done:
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
clear_inode_flag(inode, FI_OPU_WRITE);
clear_inode_flag(inode, FI_ALIGNED_WRITE);
f2fs_up_write(&F2FS_I(inode)->i_mmap_sem);

View File

@ -707,7 +707,8 @@ enum {
FI_DROP_CACHE, /* drop dirty page cache */
FI_DATA_EXIST, /* indicate data exists */
FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_DO_DEFRAG, /* indicate defragment is running */
FI_SKIP_WRITES, /* should skip data page writeback */
FI_OPU_WRITE, /* used for opu per file */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
FI_HOT_DATA, /* indicate file is hot */
@ -1161,6 +1162,7 @@ struct f2fs_io_info {
bool retry; /* need to reallocate block address */
int compr_blocks; /* # of compressed block addresses */
bool encrypted; /* indicate file is encrypted */
bool post_read; /* require post read */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
struct bio **bio; /* bio for ipu */
@ -1245,6 +1247,7 @@ enum {
SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
SBI_IS_RESIZEFS, /* resizefs is in process */
SBI_IS_FREEZING, /* freezefs is in process */
};
enum {
@ -3629,6 +3632,7 @@ unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
* checkpoint.c
*/
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
void f2fs_flush_ckpt_thread(struct f2fs_sb_info *sbi);
struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page_retry(struct f2fs_sb_info *sbi, pgoff_t index);

View File

@ -2597,10 +2597,6 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
bool fragmented = false;
int err;
/* if in-place-update policy is enabled, don't waste time here */
if (f2fs_should_update_inplace(inode, NULL))
return -EINVAL;
pg_start = range->start >> PAGE_SHIFT;
pg_end = (range->start + range->len) >> PAGE_SHIFT;
@ -2608,6 +2604,13 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
inode_lock(inode);
/* if in-place-update policy is enabled, don't waste time here */
set_inode_flag(inode, FI_OPU_WRITE);
if (f2fs_should_update_inplace(inode, NULL)) {
err = -EINVAL;
goto out;
}
/* writeback all dirty pages in the range */
err = filemap_write_and_wait_range(inode->i_mapping, range->start,
range->start + range->len - 1);
@ -2689,7 +2692,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
goto check;
}
set_inode_flag(inode, FI_DO_DEFRAG);
set_inode_flag(inode, FI_SKIP_WRITES);
idx = map.m_lblk;
while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
@ -2714,15 +2717,16 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
if (map.m_lblk < pg_end && cnt < blk_per_seg)
goto do_map;
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
err = filemap_fdatawrite(inode->i_mapping);
if (err)
goto out;
}
clear_out:
clear_inode_flag(inode, FI_DO_DEFRAG);
clear_inode_flag(inode, FI_SKIP_WRITES);
out:
clear_inode_flag(inode, FI_OPU_WRITE);
inode_unlock(inode);
if (!err)
range->len = (u64)total << PAGE_SHIFT;

View File

@ -763,7 +763,8 @@ void f2fs_evict_inode(struct inode *inode)
f2fs_remove_ino_entry(sbi, inode->i_ino, UPDATE_INO);
f2fs_remove_ino_entry(sbi, inode->i_ino, FLUSH_INO);
sb_start_intwrite(inode->i_sb);
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
sb_start_intwrite(inode->i_sb);
set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
retry:
@ -808,7 +809,8 @@ void f2fs_evict_inode(struct inode *inode)
if (dquot_initialize_needed(inode))
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
}
sb_end_intwrite(inode->i_sb);
if (!is_sbi_flag_set(sbi, SBI_IS_FREEZING))
sb_end_intwrite(inode->i_sb);
no_delete:
dquot_drop(inode);

View File

@ -3571,6 +3571,10 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
if (fio->post_read)
invalidate_mapping_pages(META_MAPPING(sbi),
fio->new_blkaddr, fio->new_blkaddr);
stat_inc_inplace_blocks(fio->sbi);
if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
@ -3750,10 +3754,16 @@ void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
block_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
block_t i;
if (!f2fs_post_read_required(inode))
return;
for (i = 0; i < len; i++)
f2fs_wait_on_block_writeback(inode, blkaddr + i);
invalidate_mapping_pages(META_MAPPING(sbi), blkaddr, blkaddr + len - 1);
}
static int read_compacted_summaries(struct f2fs_sb_info *sbi)

View File

@ -659,7 +659,9 @@ static inline int utilization(struct f2fs_sb_info *sbi)
* pages over min_fsync_blocks. (=default option)
* F2FS_IPU_ASYNC - do IPU given by asynchronous write requests.
* F2FS_IPU_NOCACHE - disable IPU bio cache.
* F2FS_IPUT_DISABLE - disable IPU. (=default option in LFS mode)
* F2FS_IPU_HONOR_OPU_WRITE - use OPU write prior to IPU write if inode has
* FI_OPU_WRITE flag.
* F2FS_IPU_DISABLE - disable IPU. (=default option in LFS mode)
*/
#define DEF_MIN_IPU_UTIL 70
#define DEF_MIN_FSYNC_BLOCKS 8
@ -675,6 +677,7 @@ enum {
F2FS_IPU_FSYNC,
F2FS_IPU_ASYNC,
F2FS_IPU_NOCACHE,
F2FS_IPU_HONOR_OPU_WRITE,
};
static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,

View File

@ -299,10 +299,10 @@ static void f2fs_destroy_casefold_cache(void) { }
static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
{
block_t limit = min((sbi->user_block_count << 1) / 1000,
block_t limit = min((sbi->user_block_count >> 3),
sbi->user_block_count - sbi->reserved_blocks);
/* limit is 0.2% */
/* limit is 12.5% */
if (test_opt(sbi, RESERVE_ROOT) &&
F2FS_OPTION(sbi).root_reserved_blocks > limit) {
F2FS_OPTION(sbi).root_reserved_blocks = limit;
@ -1555,14 +1555,17 @@ static int f2fs_freeze(struct super_block *sb)
if (is_sbi_flag_set(F2FS_SB(sb), SBI_IS_DIRTY))
return -EINVAL;
/* ensure no checkpoint required */
if (!llist_empty(&F2FS_SB(sb)->cprc_info.issue_list))
return -EINVAL;
/* Let's flush checkpoints and stop the thread. */
f2fs_flush_ckpt_thread(F2FS_SB(sb));
/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
set_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
static int f2fs_unfreeze(struct super_block *sb)
{
clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
@ -2025,6 +2028,9 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
f2fs_up_write(&sbi->gc_lock);
f2fs_sync_fs(sbi->sb, 1);
/* Let's ensure there's no pending checkpoint anymore */
f2fs_flush_ckpt_thread(sbi);
}
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
@ -2184,6 +2190,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
f2fs_stop_ckpt_thread(sbi);
need_restart_ckpt = true;
} else {
/* Flush if the prevous checkpoint, if exists. */
f2fs_flush_ckpt_thread(sbi);
err = f2fs_start_ckpt_thread(sbi);
if (err) {
f2fs_err(sbi,
@ -3773,7 +3782,8 @@ static void f2fs_tuning_parameters(struct f2fs_sb_info *sbi)
if (sm_i->main_segments <= SMALL_VOLUME_SEGMENTS) {
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE;
sm_i->dcc_info->discard_granularity = 1;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE |
1 << F2FS_IPU_HONOR_OPU_WRITE;
}
sbi->readdir_ra = 1;

View File

@ -1592,6 +1592,10 @@ static int incfs_setattr(struct dentry *dentry, struct iattr *ia)
if (ia->ia_valid & ATTR_SIZE)
return -EINVAL;
if ((ia->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) &&
(ia->ia_valid & ATTR_MODE))
return -EINVAL;
if (!di)
return -EINVAL;
backing_dentry = di->backing_path.dentry;

View File

@ -56,8 +56,9 @@ static inline void page_ext_init(void)
{
}
#endif
struct page_ext *lookup_page_ext(const struct page *page);
extern struct page_ext *page_ext_get(struct page *page);
extern void page_ext_put(struct page_ext *page_ext);
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
@ -73,11 +74,6 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}
static inline struct page_ext *lookup_page_ext(const struct page *page)
{
return NULL;
}
static inline void page_ext_init(void)
{
}
@ -85,5 +81,14 @@ static inline void page_ext_init(void)
static inline void page_ext_init_flatmem(void)
{
}
static inline struct page_ext *page_ext_get(struct page *page)
{
return NULL;
}
static inline void page_ext_put(struct page_ext *page_ext)
{
}
#endif /* CONFIG_PAGE_EXTENSION */
#endif /* __LINUX_PAGE_EXT_H */

View File

@ -47,62 +47,77 @@ extern struct page_ext_operations page_idle_ops;
static inline bool page_is_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_young;
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_young = test_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
return page_young;
}
static inline void set_page_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
}
static inline bool test_and_clear_page_young(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_young;
if (unlikely(!page_ext))
return false;
return test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_young = test_and_clear_bit(PAGE_EXT_YOUNG, &page_ext->flags);
page_ext_put(page_ext);
return page_young;
}
static inline bool page_is_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
bool page_idle;
if (unlikely(!page_ext))
return false;
return test_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
return page_idle;
}
static inline void set_page_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
set_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
}
static inline void clear_page_idle(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
clear_bit(PAGE_EXT_IDLE, &page_ext->flags);
page_ext_put(page_ext);
}
#endif /* CONFIG_64BIT */

View File

@ -9,7 +9,7 @@
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
void _trace_android_vh_record_percpu_rwsem_lock_starttime(
void _trace_android_vh_record_pcpu_rwsem_starttime(
struct task_struct *tsk, unsigned long settime);
struct percpu_rw_semaphore {
@ -76,7 +76,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* bleeding the critical section out.
*/
preempt_enable();
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
_trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
}
static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
@ -98,7 +98,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
*/
if (ret) {
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
_trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
}
@ -107,7 +107,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
_trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
_trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
preempt_disable();

View File

@ -68,9 +68,6 @@ DECLARE_HOOK(android_vh_mutex_unlock_slowpath,
DECLARE_HOOK(android_vh_mutex_unlock_slowpath_end,
TP_PROTO(struct mutex *lock, struct task_struct *next),
TP_ARGS(lock, next));
DECLARE_HOOK(android_vh_mutex_start_check_new_owner,
TP_PROTO(struct mutex *lock),
TP_ARGS(lock));
DECLARE_HOOK(android_vh_record_mutex_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
@ -80,9 +77,10 @@ DECLARE_HOOK(android_vh_record_rtmutex_lock_starttime,
DECLARE_HOOK(android_vh_record_rwsem_lock_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
DECLARE_HOOK(android_vh_record_percpu_rwsem_lock_starttime,
DECLARE_HOOK(android_vh_record_pcpu_rwsem_starttime,
TP_PROTO(struct task_struct *tsk, unsigned long settime_jiffies),
TP_ARGS(tsk, settime_jiffies));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_DTASK_H */

View File

@ -44,6 +44,7 @@ struct readahead_control;
#endif /* __GENKSYMS__ */
struct cma;
struct swap_slots_cache;
struct page_vma_mapped_walk;
DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
TP_PROTO(gfp_t *flags),
@ -236,6 +237,9 @@ DECLARE_HOOK(android_vh_get_swap_page,
TP_PROTO(struct page *page, swp_entry_t *entry,
struct swap_slots_cache *cache, bool *found),
TP_ARGS(page, entry, cache, found));
DECLARE_HOOK(android_vh_madvise_cold_or_pageout,
TP_PROTO(struct vm_area_struct *vma, bool *allow_shared),
TP_ARGS(vma, allow_shared));
DECLARE_HOOK(android_vh_page_isolated_for_reclaim,
TP_PROTO(struct mm_struct *mm, struct page *page),
TP_ARGS(mm, page));
@ -263,6 +267,16 @@ DECLARE_HOOK(android_vh_set_shmem_page_flag,
DECLARE_HOOK(android_vh_remove_vmalloc_stack,
TP_PROTO(struct vm_struct *vm),
TP_ARGS(vm));
DECLARE_HOOK(android_vh_test_clear_look_around_ref,
TP_PROTO(struct page *page),
TP_ARGS(page));
DECLARE_HOOK(android_vh_look_around_migrate_page,
TP_PROTO(struct page *old_page, struct page *new_page),
TP_ARGS(old_page, new_page));
DECLARE_HOOK(android_vh_look_around,
TP_PROTO(struct page_vma_mapped_walk *pvmw, struct page *page,
struct vm_area_struct *vma, int *referenced),
TP_ARGS(pvmw, page, vma, referenced));
/* macro versions of hooks are no longer required */
#endif /* _TRACE_HOOK_MM_H */

View File

@ -43,9 +43,6 @@ DECLARE_HOOK(android_vh_page_trylock_clear,
DECLARE_HOOK(android_vh_shrink_node_memcgs,
TP_PROTO(struct mem_cgroup *memcg, bool *skip),
TP_ARGS(memcg, skip));
DECLARE_HOOK(android_vh_tune_memcg_scan_type,
TP_PROTO(struct mem_cgroup *memcg, char *scan_type),
TP_ARGS(memcg, scan_type));
DECLARE_HOOK(android_vh_inactive_is_low,
TP_PROTO(unsigned long gb, unsigned long *inactive_ratio,
enum lru_list inactive_lru, bool *skip),
@ -53,6 +50,9 @@ DECLARE_HOOK(android_vh_inactive_is_low,
DECLARE_HOOK(android_vh_snapshot_refaults,
TP_PROTO(struct lruvec *target_lruvec),
TP_ARGS(target_lruvec));
DECLARE_HOOK(android_vh_check_page_look_around_ref,
TP_PROTO(struct page *page, int *skip),
TP_ARGS(page, skip));
#endif /* _TRACE_HOOK_VMSCAN_H */
/* This part must be outside protection */
#include <trace/define_trace.h>

View File

@ -43,7 +43,6 @@ u64 dma_direct_get_required_mask(struct device *dev)
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
EXPORT_SYMBOL_GPL(dma_direct_get_required_mask);
static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_limit)
@ -313,7 +312,6 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_free_contiguous(dev, page, size);
return NULL;
}
EXPORT_SYMBOL_GPL(dma_direct_alloc);
void dma_direct_free_pages(struct device *dev, size_t size,
struct page *page, dma_addr_t dma_addr,
@ -331,7 +329,6 @@ void dma_direct_free_pages(struct device *dev, size_t size,
dma_free_contiguous(dev, page, size);
}
EXPORT_SYMBOL_GPL(dma_direct_free);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)

View File

@ -27,7 +27,6 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}
EXPORT_SYMBOL_GPL(dma_common_get_sgtable);
/*
* Create userspace mapping for the DMA-coherent memory.
@ -58,7 +57,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return -ENXIO;
#endif /* CONFIG_MMU */
}
EXPORT_SYMBOL_GPL(dma_common_mmap);
struct page *dma_common_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)

View File

@ -1053,7 +1053,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err;
}
trace_android_vh_mutex_start_check_new_owner(lock);
spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();

View File

@ -13,17 +13,17 @@
#include <trace/hooks/dtask.h>
/*
* trace_android_vh_record_percpu_rwsem_lock_starttime is called in
* trace_android_vh_record_pcpu_rwsem_starttime is called in
* include/linux/percpu-rwsem.h by including include/hooks/dtask.h, which
* will result to build-err. So we create
* func:_trace_android_vh_record_percpu_rwsem_lock_starttime for percpu-rwsem.h to call.
* func:_trace_android_vh_record_pcpu_rwsem_starttime for percpu-rwsem.h to call.
*/
void _trace_android_vh_record_percpu_rwsem_lock_starttime(struct task_struct *tsk,
void _trace_android_vh_record_pcpu_rwsem_starttime(struct task_struct *tsk,
unsigned long settime)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(tsk, settime);
trace_android_vh_record_pcpu_rwsem_starttime(tsk, settime);
}
EXPORT_SYMBOL_GPL(_trace_android_vh_record_percpu_rwsem_lock_starttime);
EXPORT_SYMBOL_GPL(_trace_android_vh_record_pcpu_rwsem_starttime);
int __percpu_init_rwsem(struct percpu_rw_semaphore *sem,
const char *name, struct lock_class_key *key)
@ -252,13 +252,13 @@ void percpu_down_write(struct percpu_rw_semaphore *sem)
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_android_vh_record_percpu_rwsem_lock_starttime(current, jiffies);
trace_android_vh_record_pcpu_rwsem_starttime(current, jiffies);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
void percpu_up_write(struct percpu_rw_semaphore *sem)
{
trace_android_vh_record_percpu_rwsem_lock_starttime(current, 0);
trace_android_vh_record_pcpu_rwsem_starttime(current, 0);
rwsem_release(&sem->dep_map, _RET_IP_);
/*

View File

@ -1400,7 +1400,6 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
return sighand;
}
EXPORT_SYMBOL_GPL(__lock_task_sighand);
/*
* send signal info to all the members of a group

View File

@ -319,10 +319,12 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
spinlock_t *ptl;
struct page *page = NULL;
LIST_HEAD(page_list);
bool allow_shared = false;
if (fatal_signal_pending(current))
return -EINTR;
trace_android_vh_madvise_cold_or_pageout(vma, &allow_shared);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (pmd_trans_huge(*pmd)) {
pmd_t orig_pmd;
@ -438,7 +440,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
}
/* Do not interfere with other mappings of this page */
if (page_mapcount(page) != 1)
if (!allow_shared && page_mapcount(page) != 1)
continue;
VM_BUG_ON_PAGE(PageTransCompound(page), page);

View File

@ -1907,8 +1907,7 @@ early_param("memblock", early_memblock);
static int __init early_memblock_nomap(char *str)
{
kstrtobool(str, &memblock_nomap_remove);
return 0;
return kstrtobool(str, &memblock_nomap_remove);
}
early_param("android12_only.will_be_removed_soon.memblock_nomap_remove", early_memblock_nomap);

View File

@ -1156,14 +1156,22 @@ int add_memory_subsection(int nid, u64 start, u64 size)
ret = arch_add_memory(nid, start, size, &params);
if (ret) {
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
pr_err("%s failed to add subsection start 0x%llx size 0x%llx\n",
__func__, start, size);
goto err_add_memory;
}
mem_hotplug_done();
return ret;
err_add_memory:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
mem_hotplug_done();
release_memory_resource(res);
return ret;
}
EXPORT_SYMBOL_GPL(add_memory_subsection);

View File

@ -56,6 +56,7 @@
#include <trace/events/migrate.h>
#undef CREATE_TRACE_POINTS
#include <trace/hooks/mm.h>
#include <trace/hooks/vmscan.h>
#include "internal.h"
@ -606,6 +607,7 @@ void migrate_page_states(struct page *newpage, struct page *page)
SetPageChecked(newpage);
if (PageMappedToDisk(page))
SetPageMappedToDisk(newpage);
trace_android_vh_look_around_migrate_page(page, newpage);
/* Move dirty on pages not done by migrate_page_move_mapping() */
if (PageDirty(page))

View File

@ -72,6 +72,7 @@
#include <linux/padata.h>
#include <linux/khugepaged.h>
#include <trace/hooks/mm.h>
#include <trace/hooks/vmscan.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@ -2403,6 +2404,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags
set_page_pfmemalloc(page);
else
clear_page_pfmemalloc(page);
trace_android_vh_test_clear_look_around_ref(page);
}
/*

View File

@ -8,7 +8,7 @@
#include <linux/kmemleak.h>
#include <linux/page_owner.h>
#include <linux/page_idle.h>
#include <linux/rcupdate.h>
/*
* struct page extension
*
@ -58,6 +58,10 @@
* can utilize this callback to initialize the state of it correctly.
*/
#ifdef CONFIG_SPARSEMEM
#define PAGE_EXT_INVALID (0x1)
#endif
#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
static bool need_page_idle(void)
{
@ -117,6 +121,49 @@ static inline struct page_ext *get_entry(void *base, unsigned long index)
return base + page_ext_size * index;
}
/**
* page_ext_get() - Get the extended information for a page.
* @page: The page we're interested in.
*
* Ensures that the page_ext will remain valid until page_ext_put()
* is called.
*
* Return: NULL if no page_ext exists for this page.
* Context: Any context. Caller may not sleep until they have called
* page_ext_put().
*/
struct page_ext *page_ext_get(struct page *page)
{
struct page_ext *page_ext;
rcu_read_lock();
page_ext = lookup_page_ext(page);
if (!page_ext) {
rcu_read_unlock();
return NULL;
}
return page_ext;
}
/**
* page_ext_put() - Working with page extended information is done.
* @page_ext - Page extended information received from page_ext_get().
*
* The page extended information of the page may not be valid after this
* function is called.
*
* Return: None.
* Context: Any context with corresponding page_ext_get() is called.
*/
void page_ext_put(struct page_ext *page_ext)
{
if (unlikely(!page_ext))
return;
rcu_read_unlock();
}
#if !defined(CONFIG_SPARSEMEM)
@ -131,6 +178,7 @@ struct page_ext *lookup_page_ext(const struct page *page)
unsigned long index;
struct page_ext *base;
WARN_ON_ONCE(!rcu_read_lock_held());
base = NODE_DATA(page_to_nid(page))->node_page_ext;
/*
* The sanity checks the page allocator does upon freeing a
@ -200,20 +248,27 @@ void __init page_ext_init_flatmem(void)
}
#else /* CONFIG_FLAT_NODE_MEM_MAP */
static bool page_ext_invalid(struct page_ext *page_ext)
{
return !page_ext || (((unsigned long)page_ext & PAGE_EXT_INVALID) == PAGE_EXT_INVALID);
}
struct page_ext *lookup_page_ext(const struct page *page)
{
unsigned long pfn = page_to_pfn(page);
struct mem_section *section = __pfn_to_section(pfn);
struct page_ext *page_ext = READ_ONCE(section->page_ext);
WARN_ON_ONCE(!rcu_read_lock_held());
/*
* The sanity checks the page allocator does upon freeing a
* page can reach here before the page_ext arrays are
* allocated when feeding a range of pages to the allocator
* for the first time during bootup or memory hotplug.
*/
if (!section->page_ext)
if (page_ext_invalid(page_ext))
return NULL;
return get_entry(section->page_ext, pfn);
return get_entry(page_ext, pfn);
}
EXPORT_SYMBOL_GPL(lookup_page_ext);
@ -293,9 +348,30 @@ static void __free_page_ext(unsigned long pfn)
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
base = get_entry(ms->page_ext, pfn);
base = READ_ONCE(ms->page_ext);
/*
* page_ext here can be valid while doing the roll back
* operation in online_page_ext().
*/
if (page_ext_invalid(base))
base = (void *)base - PAGE_EXT_INVALID;
WRITE_ONCE(ms->page_ext, NULL);
base = get_entry(base, pfn);
free_page_ext(base);
ms->page_ext = NULL;
}
static void __invalidate_page_ext(unsigned long pfn)
{
struct mem_section *ms;
void *val;
ms = __pfn_to_section(pfn);
if (!ms || !ms->page_ext)
return;
val = (void *)ms->page_ext + PAGE_EXT_INVALID;
WRITE_ONCE(ms->page_ext, val);
}
static int __meminit online_page_ext(unsigned long start_pfn,
@ -338,6 +414,20 @@ static int __meminit offline_page_ext(unsigned long start_pfn,
start = SECTION_ALIGN_DOWN(start_pfn);
end = SECTION_ALIGN_UP(start_pfn + nr_pages);
/*
* Freeing of page_ext is done in 3 steps to avoid
* use-after-free of it:
* 1) Traverse all the sections and mark their page_ext
* as invalid.
* 2) Wait for all the existing users of page_ext who
* started before invalidation to finish.
* 3) Free the page_ext.
*/
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__invalidate_page_ext(pfn);
synchronize_rcu();
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION)
__free_page_ext(pfn);
return 0;

View File

@ -173,7 +173,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
for (i = 0; i < (1 << order); i++) {
@ -183,6 +183,7 @@ void __reset_page_owner(struct page *page, unsigned int order)
page_owner->free_ts_nsec = free_ts_nsec;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
static inline void __set_page_owner_handle(struct page *page,
@ -210,19 +211,21 @@ static inline void __set_page_owner_handle(struct page *page,
noinline void __set_page_owner(struct page *page, unsigned int order,
gfp_t gfp_mask)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext;
depot_stack_handle_t handle;
handle = save_stack(gfp_mask);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
handle = save_stack(gfp_mask);
__set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
page_ext_put(page_ext);
}
void __set_page_owner_migrate_reason(struct page *page, int reason)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
if (unlikely(!page_ext))
@ -230,12 +233,13 @@ void __set_page_owner_migrate_reason(struct page *page, int reason)
page_owner = get_page_owner(page_ext);
page_owner->last_migrate_reason = reason;
page_ext_put(page_ext);
}
void __split_page_owner(struct page *page, unsigned int nr)
{
int i;
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct page_owner *page_owner;
if (unlikely(!page_ext))
@ -246,17 +250,25 @@ void __split_page_owner(struct page *page, unsigned int nr)
page_owner->order = 0;
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
void __copy_page_owner(struct page *oldpage, struct page *newpage)
{
struct page_ext *old_ext = lookup_page_ext(oldpage);
struct page_ext *new_ext = lookup_page_ext(newpage);
struct page_ext *old_ext;
struct page_ext *new_ext;
struct page_owner *old_page_owner, *new_page_owner;
if (unlikely(!old_ext || !new_ext))
old_ext = page_ext_get(oldpage);
if (unlikely(!old_ext))
return;
new_ext = page_ext_get(newpage);
if (unlikely(!new_ext)) {
page_ext_put(old_ext);
return;
}
old_page_owner = get_page_owner(old_ext);
new_page_owner = get_page_owner(new_ext);
new_page_owner->order = old_page_owner->order;
@ -279,6 +291,8 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
*/
__set_bit(PAGE_EXT_OWNER, &new_ext->flags);
__set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
page_ext_put(new_ext);
page_ext_put(old_ext);
}
void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@ -335,12 +349,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
if (PageReserved(page))
continue;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
goto ext_put_continue;
page_owner = get_page_owner(page_ext);
page_mt = gfp_migratetype(page_owner->gfp_mask);
@ -351,9 +365,12 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
count[pageblock_mt]++;
pfn = block_end_pfn;
page_ext_put(page_ext);
break;
}
pfn += (1UL << page_owner->order) - 1;
ext_put_continue:
page_ext_put(page_ext);
}
}
@ -432,7 +449,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
void __dump_page_owner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get((void *)page);
struct page_owner *page_owner;
depot_stack_handle_t handle;
unsigned long *entries;
@ -451,6 +468,7 @@ void __dump_page_owner(struct page *page)
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
pr_alert("page_owner info is not present (never set?)\n");
page_ext_put(page_ext);
return;
}
@ -483,6 +501,7 @@ void __dump_page_owner(struct page *page)
if (page_owner->last_migrate_reason != -1)
pr_alert("page has been migrated, last migrate reason: %s\n",
migrate_reason_names[page_owner->last_migrate_reason]);
page_ext_put(page_ext);
}
static ssize_t
@ -508,6 +527,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
/* Find an allocated page */
for (; pfn < max_pfn; pfn++) {
/*
* This temporary page_owner is required so
* that we can avoid the context switches while holding
* the rcu lock and copying the page owner information to
* user through copy_to_user() or GFP_KERNEL allocations.
*/
struct page_owner page_owner_tmp;
/*
* If the new page is in a new MAX_ORDER_NR_PAGES area,
* validate the area as existing, skip it if not
@ -530,7 +557,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
continue;
}
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
@ -539,14 +566,14 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* because we don't hold the zone lock.
*/
if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
goto ext_put_continue;
/*
* Although we do have the info about past allocation of free
* pages, it's not relevant for current memory usage.
*/
if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
continue;
goto ext_put_continue;
page_owner = get_page_owner(page_ext);
@ -555,7 +582,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
* would inflate the stats.
*/
if (!IS_ALIGNED(pfn, 1 << page_owner->order))
continue;
goto ext_put_continue;
/*
* Access to page_ext->handle isn't synchronous so we should
@ -563,13 +590,17 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
*/
handle = READ_ONCE(page_owner->handle);
if (!handle)
continue;
goto ext_put_continue;
/* Record the next PFN to read in the file offset */
*ppos = (pfn - min_low_pfn) + 1;
page_owner_tmp = *page_owner;
page_ext_put(page_ext);
return print_page_owner(buf, count, pfn, page,
page_owner, handle);
&page_owner_tmp, handle);
ext_put_continue:
page_ext_put(page_ext);
}
return 0;
@ -627,18 +658,20 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone)
if (PageReserved(page))
continue;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
/* Maybe overlapping zone */
if (test_bit(PAGE_EXT_OWNER, &page_ext->flags))
continue;
goto ext_put_continue;
/* Found early allocated page */
__set_page_owner_handle(page, page_ext, early_handle,
0, 0);
count++;
ext_put_continue:
page_ext_put(page_ext);
}
cond_resched();
}

View File

@ -162,7 +162,7 @@ void __reset_page_pinner(struct page *page, unsigned int order, bool free)
struct page_ext *page_ext;
int i;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
@ -184,6 +184,7 @@ void __reset_page_pinner(struct page *page, unsigned int order, bool free)
clear_bit(PAGE_EXT_GET, &page_ext->flags);
page_ext = page_ext_next(page_ext);
}
page_ext_put(page_ext);
}
static inline void __set_page_pinner_handle(struct page *page,
@ -206,14 +207,16 @@ static inline void __set_page_pinner_handle(struct page *page,
noinline void __set_page_pinner(struct page *page, unsigned int order)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext;
depot_stack_handle_t handle;
handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
return;
handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
__set_page_pinner_handle(page, page_ext, handle, order);
page_ext_put(page_ext);
}
static ssize_t
@ -279,7 +282,7 @@ print_page_pinner(bool longterm, char __user *buf, size_t count, struct captured
void __dump_page_pinner(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct page_pinner *page_pinner;
depot_stack_handle_t handle;
unsigned long *entries;
@ -300,6 +303,7 @@ void __dump_page_pinner(struct page *page)
count = atomic_read(&page_pinner->count);
if (!count) {
pr_alert("page_pinner info is not present (never set?)\n");
page_ext_put(page_ext);
return;
}
@ -323,11 +327,12 @@ void __dump_page_pinner(struct page *page)
nr_entries = stack_depot_fetch(handle, &entries);
stack_trace_print(entries, nr_entries, 0);
}
page_ext_put(page_ext);
}
void __page_pinner_migration_failed(struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_ext *page_ext = page_ext_get(page);
struct captured_pinner record;
unsigned long flags;
unsigned int idx;
@ -335,9 +340,12 @@ void __page_pinner_migration_failed(struct page *page)
if (unlikely(!page_ext))
return;
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags))
if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) {
page_ext_put(page_ext);
return;
}
page_ext_put(page_ext);
record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN);
record.ts_usec = ktime_to_us(ktime_get_boottime());
capture_page_state(page, &record);
@ -359,10 +367,11 @@ void __page_pinner_mark_migration_failed_pages(struct list_head *page_list)
/* The page will be freed by putback_movable_pages soon */
if (page_count(page) == 1)
continue;
page_ext = lookup_page_ext(page);
page_ext = page_ext_get(page);
if (unlikely(!page_ext))
continue;
__set_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags);
page_ext_put(page_ext);
__page_pinner_migration_failed(page);
}
}

View File

@ -789,6 +789,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
}
if (pvmw.pte) {
trace_android_vh_look_around(&pvmw, page, vma, &referenced);
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte)) {
/*

View File

@ -1022,12 +1022,16 @@ static enum page_references page_check_references(struct page *page,
unsigned long vm_flags;
bool should_protect = false;
bool trylock_fail = false;
int ret = 0;
trace_android_vh_page_should_be_protected(page, &should_protect);
if (unlikely(should_protect))
return PAGEREF_ACTIVATE;
trace_android_vh_page_trylock_set(page);
trace_android_vh_check_page_look_around_ref(page, &ret);
if (ret)
return ret;
referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
&vm_flags);
referenced_page = TestClearPageReferenced(page);
@ -2397,7 +2401,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
denominator = ap + fp;
out:
trace_android_vh_tune_scan_type((char *)(&scan_balance));
trace_android_vh_tune_memcg_scan_type(memcg, (char *)(&scan_balance));
for_each_evictable_lru(lru) {
int file = is_file_lru(lru);
unsigned long lruvec_size;

View File

@ -58,3 +58,9 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kfree_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll);
EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset);
EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_queue);
EXPORT_TRACEPOINT_SYMBOL_GPL(net_dev_xmit);
EXPORT_TRACEPOINT_SYMBOL_GPL(netif_receive_skb);
EXPORT_TRACEPOINT_SYMBOL_GPL(netif_rx);

View File

@ -10,6 +10,8 @@ selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/context.o
selinux-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
selinux-$(CONFIG_NETLABEL) += netlabel.o

View File

@ -0,0 +1,22 @@
// SPDX-License-Identifier: GPL-2.0-only
/* vendor_hook.c
*
* Copyright 2022 Google LLC
*/
#ifndef __GENKSYMS__
#include "security.h"
#endif
#define CREATE_TRACE_POINTS
#include <trace/hooks/vendor_hooks.h>
#include <linux/tracepoint.h>
#include <trace/hooks/selinux.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_selinux_is_initialized);
/*
* For type visibility
*/
struct selinux_state *GKI_struct_selinux_state;
EXPORT_SYMBOL_GPL(GKI_struct_selinux_state);