From a5e46b0f3c0509fd641f81be12082dcc98d22857 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Sat, 17 Jun 2023 19:50:24 -0600 Subject: [PATCH 001/163] UPSTREAM: io_uring/poll: serialize poll linked timer start with poll removal Commit ef7dfac51d8ed961b742218f526bd589f3900a59 upstream. We selectively grab the ctx->uring_lock for poll update/removal, but we really should grab it from the start to fully synchronize with linked timeouts. Normally this is indeed the case, but if requests are forced async by the application, we don't fully cover removal and timer disarm within the uring_lock. Make this simpler by having consistent locking state for poll removal. Bug: 290270326 Cc: stable@vger.kernel.org # 6.1+ Reported-by: Querijn Voet Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 24f473769e7ecf35e2772469a063d5a8bbca6f63) Signed-off-by: Lee Jones Change-Id: I6632b7d78493b0dfc0fb26204d34823045c03f72 --- io_uring/poll.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/io_uring/poll.c b/io_uring/poll.c index 4788073ec45d..869e1d2a4413 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -993,8 +993,9 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) struct io_hash_bucket *bucket; struct io_kiocb *preq; int ret2, ret = 0; - bool locked; + bool locked = true; + io_ring_submit_lock(ctx, issue_flags); preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); ret2 = io_poll_disarm(preq); if (bucket) @@ -1006,12 +1007,10 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) goto out; } - io_ring_submit_lock(ctx, issue_flags); preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); ret2 = io_poll_disarm(preq); if (bucket) spin_unlock(&bucket->lock); - io_ring_submit_unlock(ctx, issue_flags); if (ret2) { ret = ret2; goto out; @@ -1035,7 +1034,7 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) if (poll_update->update_user_data) preq->cqe.user_data = poll_update->new_user_data; - ret2 = io_poll_add(preq, issue_flags); + ret2 = io_poll_add(preq, issue_flags & ~IO_URING_F_UNLOCKED); /* successfully updated, don't complete poll request */ if (!ret2 || ret2 == -EIOCBQUEUED) goto out; @@ -1043,9 +1042,9 @@ int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) req_set_fail(preq); io_req_set_res(preq, -ECANCELED, 0); - locked = !(issue_flags & IO_URING_F_UNLOCKED); io_req_task_complete(preq, &locked); out: + io_ring_submit_unlock(ctx, issue_flags); if (ret < 0) { req_set_fail(req); return ret; From 77ae3e7bb8cef4b25cc0d8a9e75905001b55407f Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:52 -0700 Subject: [PATCH 002/163] FROMGIT: swap: remove remnants of polling from read_swap_cache_async Patch series "Per-VMA lock support for swap and userfaults", v7. When per-VMA locks were introduced in [1] several types of page faults would still fall back to mmap_lock to keep the patchset simple. Among them are swap and userfault pages. The main reason for skipping those cases was the fact that mmap_lock could be dropped while handling these faults and that required additional logic to be implemented. Implement the mechanism to allow per-VMA locks to be dropped for these cases. First, change handle_mm_fault to drop per-VMA locks when returning VM_FAULT_RETRY or VM_FAULT_COMPLETED to be consistent with the way mmap_lock is handled. Then change folio_lock_or_retry to accept vm_fault and return vm_fault_t which simplifies later patches. Finally allow swap and uffd page faults to be handled under per-VMA locks by dropping per-VMA and retrying, the same way it's done under mmap_lock. Naturally, once VMA lock is dropped that VMA should be assumed unstable and can't be used. This patch (of 6): Commit [1] introduced IO polling support duding swapin to reduce swap read latency for block devices that can be polled. However later commit [2] removed polling support. Therefore it seems safe to remove do_poll parameter in read_swap_cache_async and always call swap_readpage with synchronous=false waiting for IO completion in folio_lock_or_retry. [1] commit 23955622ff8d ("swap: add block io poll in swapin path") [2] commit 9650b453a3d4 ("block: ignore RWF_HIPRI hint for sync dio") Link: https://lkml.kernel.org/r/20230630211957.1341547-1-surenb@google.com Link: https://lkml.kernel.org/r/20230630211957.1341547-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: "Huang, Ying" Reviewed-by: "Huang, Ying" Reviewed-by: Christoph Hellwig Cc: Alistair Popple Cc: Al Viro Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Peter Xu Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit 4296c6a817b421061d6e0b9c654c7d4d5a038a5b https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: I3d647ba4d6093f4e3db2c4ff759e5ce59b45b0e1 Signed-off-by: Suren Baghdasaryan --- mm/madvise.c | 4 ++-- mm/swap.h | 1 - mm/swap_state.c | 12 +++++------- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index f49a62a35827..42c5a65e1c2d 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -223,7 +223,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, trace_android_vh_madvise_swapin_walk_pmd_entry(entry); page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, - vma, index, false, &splug); + vma, index, &splug); if (page) put_page(page); } @@ -259,7 +259,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, rcu_read_unlock(); page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, - NULL, 0, false, &splug); + NULL, 0, &splug); if (page) put_page(page); diff --git a/mm/swap.h b/mm/swap.h index cc08c459c619..9ad061576192 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -46,7 +46,6 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index); struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, - bool do_poll, struct swap_iocb **plug); struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, diff --git a/mm/swap_state.c b/mm/swap_state.c index 438d0676c5be..3e7db8ea40f3 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -515,15 +515,14 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, */ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, - unsigned long addr, bool do_poll, - struct swap_iocb **plug) + unsigned long addr, struct swap_iocb **plug) { bool page_was_allocated; struct page *retpage = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_was_allocated); if (page_was_allocated) - swap_readpage(retpage, do_poll, plug); + swap_readpage(retpage, false, plug); return retpage; } @@ -618,7 +617,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct swap_info_struct *si = swp_swap_info(entry); struct blk_plug plug; struct swap_iocb *splug = NULL; - bool do_poll = true, page_allocated; + bool page_allocated; struct vm_area_struct *vma = vmf->vma; unsigned long addr = vmf->address; @@ -626,7 +625,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, if (!mask) goto skip; - do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; @@ -658,7 +656,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, lru_add_drain(); /* Push any new pages onto the LRU now */ skip: /* The page was likely read above, so no need for plugging here */ - return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL); + return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); } int init_swap_address_space(unsigned int type, unsigned long nr_pages) @@ -832,7 +830,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, skip: /* The page was likely read above, so no need for plugging here */ return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, - ra_info.win == 1, NULL); + NULL); } /** From 4a207efbe0b5b01cee3aabd804916e1968eabafb Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:53 -0700 Subject: [PATCH 003/163] FROMGIT: mm: add missing VM_FAULT_RESULT_TRACE name for VM_FAULT_COMPLETED VM_FAULT_RESULT_TRACE should contain an element for every vm_fault_reason to be used as flag_array inside trace_print_flags_seq(). The element for VM_FAULT_COMPLETED is missing, add it. Link: https://lkml.kernel.org/r/20230630211957.1341547-3-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Peter Xu Cc: Alistair Popple Cc: Al Viro Cc: Christian Brauner Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit 4669552b64a6cf9ba2b48cf719879867efadcd8b https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: Icef851c27ab1ea8e85c7fccc26b26480b9c42443 Signed-off-by: Suren Baghdasaryan --- include/linux/mm_types.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 248151aa6be9..00f5715a28c7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -942,7 +942,8 @@ enum vm_fault_reason { { VM_FAULT_RETRY, "RETRY" }, \ { VM_FAULT_FALLBACK, "FALLBACK" }, \ { VM_FAULT_DONE_COW, "DONE_COW" }, \ - { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } + { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \ + { VM_FAULT_COMPLETED, "COMPLETED" } struct vm_special_mapping { const char *name; /* The name, e.g. "[vdso]". */ From 7bfd71d29806aec1473bfb1ece0e35780251dd79 Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Wed, 19 Jul 2023 11:39:13 -0700 Subject: [PATCH 004/163] ANDROID: GKI: Update protected exports Run `bazel run @//common:kernel_aarch64_abi_update_protected_exports` on latest kernel to clean up the protected exports list. This is blocking updating the ABI since this list needs to be accurate before updating the ABI. Bug: 287170531 Change-Id: I8173060087cad060314ae0e494e30b71052e1d8f Signed-off-by: Will McVicker --- android/abi_gki_protected_exports_aarch64 | 2 -- android/abi_gki_protected_exports_x86_64 | 2 -- 2 files changed, 4 deletions(-) diff --git a/android/abi_gki_protected_exports_aarch64 b/android/abi_gki_protected_exports_aarch64 index e4792af0a0ef..7d97572e6175 100644 --- a/android/abi_gki_protected_exports_aarch64 +++ b/android/abi_gki_protected_exports_aarch64 @@ -336,12 +336,10 @@ wpan_phy_new wpan_phy_register wpan_phy_unregister wwan_create_port -wwan_get_debugfs_dir wwan_port_get_drvdata wwan_port_rx wwan_port_txoff wwan_port_txon -wwan_put_debugfs_dir wwan_register_ops wwan_remove_port wwan_unregister_ops \ No newline at end of file diff --git a/android/abi_gki_protected_exports_x86_64 b/android/abi_gki_protected_exports_x86_64 index e4792af0a0ef..7d97572e6175 100644 --- a/android/abi_gki_protected_exports_x86_64 +++ b/android/abi_gki_protected_exports_x86_64 @@ -336,12 +336,10 @@ wpan_phy_new wpan_phy_register wpan_phy_unregister wwan_create_port -wwan_get_debugfs_dir wwan_port_get_drvdata wwan_port_rx wwan_port_txoff wwan_port_txon -wwan_put_debugfs_dir wwan_register_ops wwan_remove_port wwan_unregister_ops \ No newline at end of file From 62ef90de0d67c6f068671850108dc98aa7238bdb Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Wed, 19 Jul 2023 11:30:33 -0700 Subject: [PATCH 005/163] ANDROID: GKI: Update the pixel symbol list These symbols are part of supporting Pixel devices on GKI kernels. 1 function symbol(s) added 'struct gpio_desc* devm_gpiod_get_index_optional(struct device*, const char*, unsigned int, enum gpiod_flags)' Bug: 279090118 Change-Id: I1bb36d65f928fac53e0a3dbdc2c0559349d5fc42 Signed-off-by: Will McVicker --- android/abi_gki_aarch64.stg | 10 ++++++++++ android/abi_gki_aarch64_pixel | 24 ++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 41f0eecb2f04..0dcb7d98a33a 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -334772,6 +334772,15 @@ elf_symbol { type_id: 0x5f3cfa16 full_name: "devm_gpiod_get_index" } +elf_symbol { + id: 0x241e9d4d + name: "devm_gpiod_get_index_optional" + is_defined: true + symbol_type: FUNCTION + crc: 0xf71fb74b + type_id: 0x5f3cfa16 + full_name: "devm_gpiod_get_index_optional" +} elf_symbol { id: 0xf6b9516e name: "devm_gpiod_get_optional" @@ -379092,6 +379101,7 @@ interface { symbol_id: 0x097ab520 symbol_id: 0xccb2ecff symbol_id: 0xd0f2d980 + symbol_id: 0x241e9d4d symbol_id: 0xf6b9516e symbol_id: 0xa2b20c15 symbol_id: 0x0ea63f59 diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index 73ea56df4c25..ec1294998154 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -369,15 +369,19 @@ devm_clk_put devm_device_add_group devm_device_add_groups + devm_device_remove_group __devm_drm_dev_alloc devm_drm_panel_bridge_add_typed devm_extcon_dev_allocate devm_extcon_dev_register devm_free_irq + devm_fwnode_gpiod_get_index + devm_fwnode_pwm_get devm_gen_pool_create devm_gpiochip_add_data_with_key devm_gpiod_get devm_gpiod_get_array + devm_gpiod_get_index_optional devm_gpiod_get_optional devm_gpiod_put_array devm_gpio_request @@ -396,6 +400,7 @@ devm_kmemdup devm_kstrdup devm_kstrdup_const + devm_led_classdev_register_ext devm_mfd_add_devices devm_nvmem_register __devm_of_phy_provider_register @@ -410,6 +415,7 @@ devm_platform_ioremap_resource devm_platform_ioremap_resource_byname devm_power_supply_register + devm_pwm_get devm_regmap_add_irq_chip __devm_regmap_init __devm_regmap_init_i2c @@ -962,6 +968,7 @@ int_to_scsilun iomem_resource iommu_alloc_resv_region + iommu_attach_device iommu_attach_device_pasid iommu_attach_group iommu_detach_device_pasid @@ -1124,6 +1131,7 @@ kvmalloc_node led_classdev_register_ext led_classdev_unregister + led_init_default_state_get __list_add_valid __list_del_entry_valid list_sort @@ -1505,6 +1513,7 @@ __put_task_struct put_unused_fd put_vaddr_frames + pwm_apply_state queue_delayed_work_on queue_work_on radix_tree_delete_item @@ -1607,6 +1616,7 @@ regulator_map_voltage_linear regulator_notifier_call_chain regulator_put + regulator_set_active_discharge_regmap regulator_set_voltage regulator_set_voltage_sel_regmap regulator_unregister @@ -1997,10 +2007,17 @@ __traceiter_device_pm_callback_end __traceiter_device_pm_callback_start __traceiter_gpu_mem_total + __traceiter_hrtimer_expire_entry + __traceiter_hrtimer_expire_exit + __traceiter_irq_handler_entry + __traceiter_irq_handler_exit __traceiter_mmap_lock_acquire_returned __traceiter_mmap_lock_released __traceiter_mmap_lock_start_locking + __traceiter_sched_switch __traceiter_suspend_resume + __traceiter_workqueue_execute_end + __traceiter_workqueue_execute_start trace_output_call __tracepoint_android_rvh_typec_tcpci_get_vbus __tracepoint_android_vh_cpu_idle_enter @@ -2025,12 +2042,19 @@ __tracepoint_device_pm_callback_end __tracepoint_device_pm_callback_start __tracepoint_gpu_mem_total + __tracepoint_hrtimer_expire_entry + __tracepoint_hrtimer_expire_exit + __tracepoint_irq_handler_entry + __tracepoint_irq_handler_exit __tracepoint_mmap_lock_acquire_returned __tracepoint_mmap_lock_released __tracepoint_mmap_lock_start_locking tracepoint_probe_register tracepoint_probe_unregister + __tracepoint_sched_switch __tracepoint_suspend_resume + __tracepoint_workqueue_execute_end + __tracepoint_workqueue_execute_start trace_print_array_seq trace_print_bitmask_seq trace_print_flags_seq From 6ca2ff04a1b85c0145c965ab8562cd8072b4f3f5 Mon Sep 17 00:00:00 2001 From: davidchao Date: Mon, 1 Feb 2021 16:46:13 +0800 Subject: [PATCH 006/163] ANDROID: thermal: Add vendor thermal genl check Add vendor enable_thermal_genl_check logic. Filter on-die tz genl event. To avoid thermal-hal being woken up all the time by thermal genl events, only the selected thermal_zone and cooling_device can send events from kernel. Bug: 170682696 Bug: 291846209 Test: boot and thermal-hal can receive thermal genl events from kernel Change-Id: Idb3f4b07a2a2740c01d8785910878bfe6edc832d Signed-off-by: davidchao Signed-off-by: Will McVicker --- drivers/android/vendor_hooks.c | 1 + drivers/thermal/thermal_netlink.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 482830505f1b..0a30c8cbe7bd 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -312,3 +312,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_madvise_cold_pageout_skip); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_smallest_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_one_page_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_regmap_update); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check); diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index e2d78a996b5f..468eaae5ca82 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "thermal_core.h" @@ -274,6 +275,11 @@ static int thermal_genl_send_event(enum thermal_genl_event event, struct sk_buff *msg; int ret = -EMSGSIZE; void *hdr; + int enable_thermal_genl = 1; + + trace_android_vh_enable_thermal_genl_check(event, p->tz_id, &enable_thermal_genl); + if (!enable_thermal_genl) + return 0; msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) From 3a8999c6830ef79a8d00301b633ae06899fc9b96 Mon Sep 17 00:00:00 2001 From: Will McVicker Date: Wed, 19 Jul 2023 15:22:06 -0700 Subject: [PATCH 007/163] ANDROID: GKI: Update pixel symbol list for thermal Add the following symbol to allow vendor module to filter on-die tz genl event. This helps avoid thermal-hal being woken up all the time by thermal genl events, only the selected thermal_zone and cooling_device can send events from kernel. 1 function symbol(s) added 'int __traceiter_android_vh_enable_thermal_genl_check(void*, int, int, int*)' 1 variable symbol(s) added 'struct tracepoint __tracepoint_android_vh_enable_thermal_genl_check' Bug: 291846209 Change-Id: I763595ff1366196c6a16ff57d608042743fbe9fd Signed-off-by: Will McVicker --- android/abi_gki_aarch64.stg | 28 ++++++++++++++++++++++++++++ android/abi_gki_aarch64_pixel | 2 ++ 2 files changed, 30 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 0dcb7d98a33a..2d8406a00dcd 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -301904,6 +301904,14 @@ function { parameter_id: 0x6720d32f parameter_id: 0x3c2755a3 } +function { + id: 0x9a2abc7b + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x6720d32f + parameter_id: 0x6720d32f + parameter_id: 0x13580d6c +} function { id: 0x9a2af11b return_type_id: 0x6720d32f @@ -321219,6 +321227,15 @@ elf_symbol { type_id: 0x9bdc9aae full_name: "__traceiter_android_vh_dup_task_struct" } +elf_symbol { + id: 0xdcaa59a3 + name: "__traceiter_android_vh_enable_thermal_genl_check" + is_defined: true + symbol_type: FUNCTION + crc: 0xc39a1e16 + type_id: 0x9a2abc7b + full_name: "__traceiter_android_vh_enable_thermal_genl_check" +} elf_symbol { id: 0x7ebac47a name: "__traceiter_android_vh_enable_thermal_power_throttle" @@ -324369,6 +324386,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_dup_task_struct" } +elf_symbol { + id: 0x54b2cd01 + name: "__tracepoint_android_vh_enable_thermal_genl_check" + is_defined: true + symbol_type: OBJECT + crc: 0x29cc54bf + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_enable_thermal_genl_check" +} elf_symbol { id: 0x188eab44 name: "__tracepoint_android_vh_enable_thermal_power_throttle" @@ -377594,6 +377620,7 @@ interface { symbol_id: 0x9dbd7b92 symbol_id: 0x42312ccc symbol_id: 0xf432d1c9 + symbol_id: 0xdcaa59a3 symbol_id: 0x7ebac47a symbol_id: 0xf586d5b6 symbol_id: 0x1f554c2a @@ -377944,6 +377971,7 @@ interface { symbol_id: 0xe2d7542c symbol_id: 0x988719fa symbol_id: 0x732a182b + symbol_id: 0x54b2cd01 symbol_id: 0x188eab44 symbol_id: 0xe7584e1c symbol_id: 0x0d418d38 diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index ec1294998154..dbd893b4d26b 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -1987,6 +1987,7 @@ __traceiter_android_rvh_typec_tcpci_get_vbus __traceiter_android_vh_cpu_idle_enter __traceiter_android_vh_cpu_idle_exit + __traceiter_android_vh_enable_thermal_genl_check __traceiter_android_vh_ipi_stop __traceiter_android_vh_scheduler_tick __traceiter_android_vh_sysrq_crash @@ -2022,6 +2023,7 @@ __tracepoint_android_rvh_typec_tcpci_get_vbus __tracepoint_android_vh_cpu_idle_enter __tracepoint_android_vh_cpu_idle_exit + __tracepoint_android_vh_enable_thermal_genl_check __tracepoint_android_vh_ipi_stop __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_sysrq_crash From f930b82d1651fbd9e00d0a96d56c4ba3555b7b41 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Tue, 18 Jul 2023 11:17:20 -0700 Subject: [PATCH 008/163] FROMLIST: fuse: revalidate: don't invalidate if interrupted If the LOOKUP request triggered from fuse_dentry_revalidate() is interrupted, then the dentry will be invalidated, possibly resulting in submounts being unmounted. Reported-by: Xu Rongbo Fixes: 9e6268db496a ("[PATCH] FUSE - read-write operations") Cc: Signed-off-by: Miklos Szeredi Bug: 282905757 Link: https://lore.kernel.org/all/CAJfpegswN_CJJ6C3RZiaK6rpFmNyWmXfaEpnQUJ42KCwNF5tWw@mail.gmail.com/ Signed-off-by: Paul Lawrence (cherry picked from https://android-review.googlesource.com/q/commit:ae5b9259d42efa1bbd42d807fd3b3a991ddb51be) Merged-In: I8c62f5aeeb450de78c6a38a6f8728c900a0fc9bd Change-Id: I8c62f5aeeb450de78c6a38a6f8728c900a0fc9bd --- fs/fuse/dir.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 933e4a727505..076a0bddef8f 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -321,7 +321,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags) spin_unlock(&fi->lock); } kfree(forget); - if (ret == -ENOMEM) + if (ret == -ENOMEM || ret == -EINTR) goto out; if (ret || fuse_invalid_attr(&outarg.attr) || fuse_stale_inode(inode, outarg.generation, &outarg.attr)) From 7dd60ce8046d1e791519c4128eff3018e3507ec6 Mon Sep 17 00:00:00 2001 From: lijun14 Date: Tue, 18 Jul 2023 20:09:31 +0800 Subject: [PATCH 009/163] ANDROID: vendor_hooks: add vendor hook to support SAGT Add vendor hook of android_rvh_before_do_sched_yield Bug: 291726037 Change-Id: I1f2d65739a297812f279b83085e3680e40d4cb6e Signed-off-by: lijun14 --- include/trace/hooks/sched.h | 4 ++++ kernel/sched/core.c | 5 +++++ kernel/sched/vendor_hooks.c | 1 + 3 files changed, 10 insertions(+) diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h index 7b5ab104d7fe..811f07f7be61 100644 --- a/include/trace/hooks/sched.h +++ b/include/trace/hooks/sched.h @@ -255,6 +255,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_do_sched_yield, TP_PROTO(struct rq *rq), TP_ARGS(rq), 1); +DECLARE_RESTRICTED_HOOK(android_rvh_before_do_sched_yield, + TP_PROTO(long *unused), + TP_ARGS(unused), 1); + DECLARE_HOOK(android_vh_free_task, TP_PROTO(struct task_struct *p), TP_ARGS(p)); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c9d59630444a..53faabdb3950 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8446,6 +8446,11 @@ static void do_sched_yield(void) { struct rq_flags rf; struct rq *rq; + long skip = 0; + + trace_android_rvh_before_do_sched_yield(&skip); + if (skip) + return; rq = this_rq_lock_irq(&rf); diff --git a/kernel/sched/vendor_hooks.c b/kernel/sched/vendor_hooks.c index e1e2601fd84f..d8d945fc20e3 100644 --- a/kernel/sched/vendor_hooks.c +++ b/kernel/sched/vendor_hooks.c @@ -75,6 +75,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_is_cpu_allowed); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_get_nohz_timer_target); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_getaffinity); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sched_yield); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_before_do_sched_yield); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork_init); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ttwu_cond); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule_bug); From 25a11995fb3795bb2b4a559216a4336952ff7f4d Mon Sep 17 00:00:00 2001 From: lijun14 Date: Tue, 18 Jul 2023 20:02:25 +0800 Subject: [PATCH 010/163] ANDROID: GKI: add ABI symbol for xiaomi abi symbol(s) added : __traceiter_android_rvh_before_do_sched_yield __tracepoint_android_rvh_before_do_sched_yield Bug: 291726037 Change-Id: I16278b0ca8eac03976543e27f21d82c3cec92af8 Signed-off-by: lijun14 --- android/abi_gki_aarch64.stg | 26 ++++++++++++++++++++++++++ android/abi_gki_aarch64_xiaomi | 4 ++++ 2 files changed, 30 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 2d8406a00dcd..2f172f835e4e 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -304498,6 +304498,12 @@ function { parameter_id: 0x2e029f76 parameter_id: 0x13580d6c } +function { + id: 0x9b79f498 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x3593bec8 +} function { id: 0x9b79f513 return_type_id: 0x6720d32f @@ -319751,6 +319757,15 @@ elf_symbol { type_id: 0x9b2ba01c full_name: "__traceiter_android_rvh_audio_usb_offload_disconnect" } +elf_symbol { + id: 0x144db0a1 + name: "__traceiter_android_rvh_before_do_sched_yield" + is_defined: true + symbol_type: FUNCTION + crc: 0xce266c8e + type_id: 0x9b79f498 + full_name: "__traceiter_android_rvh_before_do_sched_yield" +} elf_symbol { id: 0x192bbbd5 name: "__traceiter_android_rvh_build_perf_domains" @@ -322910,6 +322925,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_rvh_audio_usb_offload_disconnect" } +elf_symbol { + id: 0xd7757253 + name: "__tracepoint_android_rvh_before_do_sched_yield" + is_defined: true + symbol_type: OBJECT + crc: 0x94abc138 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_before_do_sched_yield" +} elf_symbol { id: 0x1e8a7e23 name: "__tracepoint_android_rvh_build_perf_domains" @@ -377456,6 +377480,7 @@ interface { symbol_id: 0xb3d70eab symbol_id: 0x0b48afa1 symbol_id: 0x48420da9 + symbol_id: 0x144db0a1 symbol_id: 0x192bbbd5 symbol_id: 0xadc13d20 symbol_id: 0xc93c7d6d @@ -377807,6 +377832,7 @@ interface { symbol_id: 0xcd36f539 symbol_id: 0x748c1fd7 symbol_id: 0xaf461bff + symbol_id: 0xd7757253 symbol_id: 0x1e8a7e23 symbol_id: 0xfe3875f6 symbol_id: 0x60b5a917 diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index b0e34e78b178..8209fb5955f2 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -306,3 +306,7 @@ __tracepoint_android_vh_rmqueue_smallest_bypass __traceiter_android_vh_free_one_page_bypass __tracepoint_android_vh_free_one_page_bypass + +# required by SAGT module + __traceiter_android_rvh_before_do_sched_yield + __tracepoint_android_rvh_before_do_sched_yield From d51e21b3941585c584f81edad7cd9818f6f86d5f Mon Sep 17 00:00:00 2001 From: John Scheible Date: Thu, 20 Jul 2023 12:24:34 -0700 Subject: [PATCH 011/163] ANDROID: ABI: Update pixel symbol list 1 function symbol(s) added 'void iommu_detach_device(struct iommu_domain *, struct device *)' Bug: 292121811 Change-Id: I7087f815af2a57d538484c66e0a4ab97e137c586 Signed-off-by: John Scheible --- android/abi_gki_aarch64_pixel | 1 + 1 file changed, 1 insertion(+) diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index dbd893b4d26b..b50236e61182 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -971,6 +971,7 @@ iommu_attach_device iommu_attach_device_pasid iommu_attach_group + iommu_detach_device iommu_detach_device_pasid iommu_device_register iommu_device_sysfs_add From 701f85c2a19d776f84469d34b4ade2ff95f34612 Mon Sep 17 00:00:00 2001 From: Lu Wang Date: Thu, 20 Jul 2023 08:49:13 +0800 Subject: [PATCH 012/163] ANDROID: abi_gki_aarch64_qcom: Update QCOM symbol list Update QCOM symbol list for walt vendor hook. Symbols added: __traceiter_android_rvh_before_do_sched_yield __tracepoint_android_rvh_before_do_sched_yield Bug: 291683326 Signed-off-by: Lu Wang Change-Id: I3fe2fb40f3da4ff6079e64d7badb4e9e63ee6248 --- android/abi_gki_aarch64_qcom | 2 ++ 1 file changed, 2 insertions(+) diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index df8f0db3cc5c..ec0a9c3f3bb3 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -3262,6 +3262,7 @@ __traceiter_android_rvh_after_dequeue_task __traceiter_android_rvh_after_enqueue_task __traceiter_android_rvh_audio_usb_offload_disconnect + __traceiter_android_rvh_before_do_sched_yield __traceiter_android_rvh_build_perf_domains __traceiter_android_rvh_can_migrate_task __traceiter_android_rvh_check_preempt_tick @@ -3405,6 +3406,7 @@ __tracepoint_android_rvh_after_dequeue_task __tracepoint_android_rvh_after_enqueue_task __tracepoint_android_rvh_audio_usb_offload_disconnect + __tracepoint_android_rvh_before_do_sched_yield __tracepoint_android_rvh_build_perf_domains __tracepoint_android_rvh_can_migrate_task __tracepoint_android_rvh_check_preempt_tick From 8bb470d6377f77b1b47ea1b130cee8a30755cfa8 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Fri, 12 May 2023 16:18:00 +0100 Subject: [PATCH 013/163] UPSTREAM: media: dvb-core: Fix kernel WARNING for blocking operation in wait_event*() [ Upstream commit b8c75e4a1b325ea0a9433fa8834be97b5836b946 ] Using a semaphore in the wait_event*() condition is no good idea. It hits a kernel WARN_ON() at prepare_to_wait_event() like: do not call blocking ops when !TASK_RUNNING; state=1 set at prepare_to_wait_event+0x6d/0x690 For avoiding the potential deadlock, rewrite to an open-coded loop instead. Unlike the loop in wait_event*(), this uses wait_woken() after the condition check, hence the task state stays consistent. CVE-2023-31084 was assigned to this bug. Link: https://lore.kernel.org/r/CA+UBctCu7fXn4q41O_3=id1+OdyQ85tZY1x+TkT-6OVBL6KAUw@mail.gmail.com/ Bug: 290204413 Link: https://lore.kernel.org/linux-media/20230512151800.1874-1-tiwai@suse.de Reported-by: Yu Hao Closes: https://nvd.nist.gov/vuln/detail/CVE-2023-31084 Signed-off-by: Takashi Iwai Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin (cherry picked from commit d0088ea444e676a0c75551efe183bee4a3d2cfc8) Signed-off-by: Lee Jones Change-Id: Id7cefa46b7d4189a0311e7e763b1c9be7ba9bdbd --- drivers/media/dvb-core/dvb_frontend.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index c41a7e5c2b92..fce0e2094078 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c @@ -293,14 +293,22 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe, } if (events->eventw == events->eventr) { - int ret; + struct wait_queue_entry wait; + int ret = 0; if (flags & O_NONBLOCK) return -EWOULDBLOCK; - ret = wait_event_interruptible(events->wait_queue, - dvb_frontend_test_event(fepriv, events)); - + init_waitqueue_entry(&wait, current); + add_wait_queue(&events->wait_queue, &wait); + while (!dvb_frontend_test_event(fepriv, events)) { + wait_woken(&wait, TASK_INTERRUPTIBLE, 0); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + remove_wait_queue(&events->wait_queue, &wait); if (ret < 0) return ret; } From ca372ba9e750c31045f366e262db01ef8663c7aa Mon Sep 17 00:00:00 2001 From: lambert wang Date: Fri, 21 Jul 2023 08:24:24 +0800 Subject: [PATCH 014/163] ANDROID: GKI: Update mtk ABI symbol list 6 function symbol(s) added 'struct device* device_find_child_by_name(struct device*, const char*)' 'void pci_free_irq(struct pci_dev*, unsigned int, void*)' 'int pci_request_irq(struct pci_dev*, unsigned int, irq_handler_t, irq_handler_t, void*, const char*, ...)' 'int pm_schedule_suspend(struct device*, unsigned int)' 'int rtnl_configure_link(struct net_device*, const struct ifinfomsg*)' 'struct net_device* rtnl_create_link(struct net*, const char*, unsigned char, const struct rtnl_link_ops*, struct nlattr**, struct netlink_ext_ack*)' in which: * device_find_child_by_name/rtnl_configure_link/rtnl_create_link are needed by wwan.ko. Bug: 291865296 Change-Id: I38b8a69313667318944387e1a19287b9c01cafee Signed-off-by: zhaoping shu Signed-off-by: xiayu zhang Signed-off-by: ivan yang Signed-off-by: lambert wang --- android/abi_gki_aarch64.stg | 153 ++++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_mtk | 24 ++++++ 2 files changed, 177 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 2f172f835e4e..291c26e645b8 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -27431,6 +27431,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xd06de2a9 } +pointer_reference { + id: 0x3e8d7c9a + kind: POINTER + pointee_type_id: 0xd07514f5 +} pointer_reference { id: 0x3e8e572f kind: POINTER @@ -30856,6 +30861,11 @@ qualified { qualifier: CONST qualified_type_id: 0x30a49fb4 } +qualified { + id: 0xd07514f5 + qualifier: CONST + qualified_type_id: 0x3193e55d +} qualified { id: 0xd08a0c68 qualifier: CONST @@ -39256,6 +39266,12 @@ member { name: "__i_nlink" type_id: 0x4585663f } +member { + id: 0xed0c7bc2 + name: "__ifi_pad" + type_id: 0x5d8155a5 + offset: 8 +} member { id: 0x1d90045d name: "__iter_idx" @@ -100711,6 +100727,35 @@ member { type_id: 0x0fa767da offset: 448 } +member { + id: 0x49aaa6b3 + name: "ifi_change" + type_id: 0x4585663f + offset: 96 +} +member { + id: 0xccfc5cc3 + name: "ifi_family" + type_id: 0x5d8155a5 +} +member { + id: 0xe5765ac4 + name: "ifi_flags" + type_id: 0x4585663f + offset: 64 +} +member { + id: 0x95c94564 + name: "ifi_index" + type_id: 0x6720d32f + offset: 32 +} +member { + id: 0x58fe8d2e + name: "ifi_type" + type_id: 0xc93e017b + offset: 16 +} member { id: 0x9056806a name: "ifindex" @@ -224281,6 +224326,20 @@ struct_union { member_id: 0x95dac005 } } +struct_union { + id: 0x3193e55d + kind: STRUCT + name: "ifinfomsg" + definition { + bytesize: 16 + member_id: 0xccfc5cc3 + member_id: 0xed0c7bc2 + member_id: 0x58fe8d2e + member_id: 0x95c94564 + member_id: 0xe5765ac4 + member_id: 0x49aaa6b3 + } +} struct_union { id: 0x41f1df67 kind: STRUCT @@ -280106,6 +280165,13 @@ function { return_type_id: 0x48b5725f parameter_id: 0x15a30023 } +function { + id: 0x15f1cac1 + return_type_id: 0x48b5725f + parameter_id: 0x11e6864c + parameter_id: 0x4585663f + parameter_id: 0x18bd6530 +} function { id: 0x15f330e3 return_type_id: 0x48b5725f @@ -294238,6 +294304,12 @@ function { parameter_id: 0x32a623d7 parameter_id: 0x3e10b518 } +function { + id: 0x91d35e28 + return_type_id: 0x6720d32f + parameter_id: 0x32a623d7 + parameter_id: 0x3e8d7c9a +} function { id: 0x91d5541c return_type_id: 0x6720d32f @@ -300498,6 +300570,17 @@ function { return_type_id: 0x6720d32f parameter_id: 0x156a41de } +function { + id: 0x98da7fb1 + return_type_id: 0x6720d32f + parameter_id: 0x11e6864c + parameter_id: 0x4585663f + parameter_id: 0xd92b1d75 + parameter_id: 0xd92b1d75 + parameter_id: 0x18bd6530 + parameter_id: 0x3e10b518 + parameter_id: 0xa52a0930 +} function { id: 0x98de7695 return_type_id: 0x6720d32f @@ -316675,6 +316758,16 @@ function { return_type_id: 0x6d7f5ff6 parameter_id: 0x040d1b01 } +function { + id: 0xffb4ff33 + return_type_id: 0x32a623d7 + parameter_id: 0x0ca27481 + parameter_id: 0x3e10b518 + parameter_id: 0x5d8155a5 + parameter_id: 0x337b7b81 + parameter_id: 0x0277bf8a + parameter_id: 0x07dcdbe1 +} function { id: 0xffbaa126 return_type_id: 0x32a623d7 @@ -334048,6 +334141,15 @@ elf_symbol { type_id: 0xadd088bd full_name: "device_find_child" } +elf_symbol { + id: 0x01805ccc + name: "device_find_child_by_name" + is_defined: true + symbol_type: FUNCTION + crc: 0xfe92284a + type_id: 0xad414cb1 + full_name: "device_find_child_by_name" +} elf_symbol { id: 0xd81e7ab3 name: "device_for_each_child" @@ -355516,6 +355618,15 @@ elf_symbol { type_id: 0x578fa618 full_name: "pci_find_next_capability" } +elf_symbol { + id: 0x08190210 + name: "pci_free_irq" + is_defined: true + symbol_type: FUNCTION + crc: 0xdaf171ab + type_id: 0x15f1cac1 + full_name: "pci_free_irq" +} elf_symbol { id: 0x8ffabaa9 name: "pci_free_irq_vectors" @@ -355813,6 +355924,15 @@ elf_symbol { type_id: 0x185a3adc full_name: "pci_remove_root_bus" } +elf_symbol { + id: 0xf6896e34 + name: "pci_request_irq" + is_defined: true + symbol_type: FUNCTION + crc: 0x17161b4b + type_id: 0x98da7fb1 + full_name: "pci_request_irq" +} elf_symbol { id: 0x324ff23b name: "pci_request_region" @@ -358108,6 +358228,15 @@ elf_symbol { type_id: 0x1192ec84 full_name: "pm_runtime_set_autosuspend_delay" } +elf_symbol { + id: 0xe263dcb4 + name: "pm_schedule_suspend" + is_defined: true + symbol_type: FUNCTION + crc: 0xe5c44150 + type_id: 0x9c00c8ec + full_name: "pm_schedule_suspend" +} elf_symbol { id: 0x59caaeac name: "pm_stay_awake" @@ -362014,6 +362143,24 @@ elf_symbol { type_id: 0x90657259 full_name: "rtc_valid_tm" } +elf_symbol { + id: 0x19b7aeab + name: "rtnl_configure_link" + is_defined: true + symbol_type: FUNCTION + crc: 0x85c289df + type_id: 0x91d35e28 + full_name: "rtnl_configure_link" +} +elf_symbol { + id: 0xfccc22f4 + name: "rtnl_create_link" + is_defined: true + symbol_type: FUNCTION + crc: 0x817f3567 + type_id: 0xffb4ff33 + full_name: "rtnl_create_link" +} elf_symbol { id: 0x50b92bc4 name: "rtnl_is_locked" @@ -379069,6 +379216,7 @@ interface { symbol_id: 0xe85fa1f1 symbol_id: 0xe6df6df5 symbol_id: 0x0b165427 + symbol_id: 0x01805ccc symbol_id: 0xd81e7ab3 symbol_id: 0x3b013a69 symbol_id: 0x0576df29 @@ -381452,6 +381600,7 @@ interface { symbol_id: 0x27f20808 symbol_id: 0x63876663 symbol_id: 0xdea420f5 + symbol_id: 0x08190210 symbol_id: 0x8ffabaa9 symbol_id: 0x50bce06e symbol_id: 0x133a7a3e @@ -381485,6 +381634,7 @@ interface { symbol_id: 0x5b0002a1 symbol_id: 0x2c8694e0 symbol_id: 0xa21a61f0 + symbol_id: 0xf6896e34 symbol_id: 0x324ff23b symbol_id: 0xde0961b5 symbol_id: 0x93ed1ac4 @@ -381740,6 +381890,7 @@ interface { symbol_id: 0x878b97bb symbol_id: 0x1f3f17bd symbol_id: 0x53f4166f + symbol_id: 0xe263dcb4 symbol_id: 0x59caaeac symbol_id: 0x64f92138 symbol_id: 0x2e13b831 @@ -382174,6 +382325,8 @@ interface { symbol_id: 0xa4ad8391 symbol_id: 0x5c1197ba symbol_id: 0x22e1072c + symbol_id: 0x19b7aeab + symbol_id: 0xfccc22f4 symbol_id: 0x50b92bc4 symbol_id: 0x8c0dd14a symbol_id: 0x3480e8df diff --git a/android/abi_gki_aarch64_mtk b/android/abi_gki_aarch64_mtk index e3872c033f45..9a933b3f74c6 100644 --- a/android/abi_gki_aarch64_mtk +++ b/android/abi_gki_aarch64_mtk @@ -395,6 +395,7 @@ device_del device_destroy device_find_child + device_find_child_by_name device_for_each_child device_get_child_node_count device_get_match_data @@ -1200,6 +1201,7 @@ ip_send_check __ipv6_addr_type ipv6_dev_find + ipv6_ext_hdr ipv6_skip_exthdr ipv6_stub __irq_apply_affinity_hint @@ -1337,10 +1339,14 @@ kthread_flush_work kthread_flush_worker __kthread_init_worker + kthread_park + kthread_parkme kthread_queue_delayed_work kthread_queue_work + kthread_should_park kthread_should_stop kthread_stop + kthread_unpark kthread_worker_fn ktime_get ktime_get_coarse_with_offset @@ -1692,6 +1698,7 @@ out_of_line_wait_on_bit_timeout overflowuid page_endio + page_frag_free page_pinner_inited __page_pinner_put_page page_pool_alloc_pages @@ -1715,6 +1722,7 @@ param_ops_uint param_ops_ullong param_ops_ulong + param_ops_ushort param_set_bool param_set_charp param_set_uint @@ -1722,18 +1730,25 @@ pci_alloc_irq_vectors_affinity pci_ats_supported pci_bus_type + pci_clear_master pci_device_group + pci_device_is_present pci_dev_put pci_disable_ats pci_disable_device + pcie_capability_clear_and_set_word + pcie_capability_read_word pci_enable_ats pci_find_ext_capability + pci_free_irq pci_free_irq_vectors pci_generic_config_read32 pci_generic_config_write32 pci_get_slot pci_host_probe pci_irq_vector + pci_load_and_free_saved_state + pci_load_saved_state pci_lock_rescan_remove pcim_enable_device pcim_iomap_regions @@ -1747,10 +1762,12 @@ pci_read_config_word __pci_register_driver pci_remove_root_bus + pci_request_irq pci_restore_state pci_save_state pci_set_master pci_stop_root_bus + pci_store_saved_state pci_unlock_rescan_remove pci_unregister_driver pci_write_config_dword @@ -1869,6 +1886,7 @@ __pm_runtime_set_status __pm_runtime_suspend __pm_runtime_use_autosuspend + pm_schedule_suspend __pm_stay_awake pm_stay_awake pm_suspend_default_s2idle @@ -1933,9 +1951,11 @@ queue_delayed_work_on queue_work_on radix_tree_delete + radix_tree_gang_lookup radix_tree_insert radix_tree_lookup radix_tree_maybe_preload + radix_tree_next_chunk radix_tree_tagged ___ratelimit raw_notifier_call_chain @@ -2123,6 +2143,8 @@ rtc_tm_to_time64 rtc_update_irq rtc_valid_tm + rtnl_configure_link + rtnl_create_link rtnl_is_locked rtnl_link_register rtnl_link_unregister @@ -2526,6 +2548,7 @@ timecounter_init timecounter_read timer_of_init + timer_reduce timer_unstable_counter_workaround topology_clear_scale_freq_source topology_update_thermal_pressure @@ -2733,6 +2756,7 @@ __tracepoint_task_newtask trace_print_array_seq trace_print_flags_seq + trace_print_hex_seq trace_print_symbols_seq __trace_puts trace_raw_output_prep From 13e8071ce0cf668209c94953c0148cf8f7d41d2a Mon Sep 17 00:00:00 2001 From: Xiaopeng Bai Date: Thu, 20 Jul 2023 17:15:11 +0800 Subject: [PATCH 015/163] ANDROID: update symbol list for unisoc regmap vendor hook 1 function symbol(s) added 'int __traceiter_android_vh_regmap_update(void*, const struct regmap_config*, struct regmap*)' 1 variable symbol(s) added 'struct tracepoint __tracepoint_android_vh_regmap_update' Bug: 232965613 Change-Id: I9b184315493eacc433b85cd36c3d1c34992b188b Signed-off-by: Xiaopeng Bai --- android/abi_gki_aarch64.stg | 27 +++++++++++++++++++++++++++ android/abi_gki_aarch64_unisoc | 2 ++ 2 files changed, 29 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 291c26e645b8..24f524b3e4c2 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -304086,6 +304086,13 @@ function { parameter_id: 0x188b9e81 parameter_id: 0x3ea31487 } +function { + id: 0x9b5a0fe0 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x3df7e337 + parameter_id: 0x09a83f1c +} function { id: 0x9b5aa874 return_type_id: 0x6720d32f @@ -321965,6 +321972,15 @@ elf_symbol { type_id: 0x9bd7019d full_name: "__traceiter_android_vh_record_rwsem_lock_starttime" } +elf_symbol { + id: 0xe2d75052 + name: "__traceiter_android_vh_regmap_update" + is_defined: true + symbol_type: FUNCTION + crc: 0x70e6bb0c + type_id: 0x9b5a0fe0 + full_name: "__traceiter_android_vh_regmap_update" +} elf_symbol { id: 0x8d62858f name: "__traceiter_android_vh_rmqueue_smallest_bypass" @@ -325133,6 +325149,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_record_rwsem_lock_starttime" } +elf_symbol { + id: 0x13b2fb38 + name: "__tracepoint_android_vh_regmap_update" + is_defined: true + symbol_type: OBJECT + crc: 0xcf37e88a + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_regmap_update" +} elf_symbol { id: 0x04365139 name: "__tracepoint_android_vh_rmqueue_smallest_bypass" @@ -377862,6 +377887,7 @@ interface { symbol_id: 0x0fa39b81 symbol_id: 0x92518ec5 symbol_id: 0x9792c22e + symbol_id: 0xe2d75052 symbol_id: 0x8d62858f symbol_id: 0xcef5d79f symbol_id: 0x91384eff @@ -378214,6 +378240,7 @@ interface { symbol_id: 0xef7ad117 symbol_id: 0x4568ff8f symbol_id: 0xe918e2ec + symbol_id: 0x13b2fb38 symbol_id: 0x04365139 symbol_id: 0xd94bc301 symbol_id: 0x3fc5ffc9 diff --git a/android/abi_gki_aarch64_unisoc b/android/abi_gki_aarch64_unisoc index bb8ad74d357d..867436314590 100644 --- a/android/abi_gki_aarch64_unisoc +++ b/android/abi_gki_aarch64_unisoc @@ -714,6 +714,7 @@ __traceiter_android_vh_get_thermal_zone_device __traceiter_android_vh_modify_thermal_request_freq __traceiter_android_vh_modify_thermal_target_freq + __traceiter_android_vh_regmap_update __traceiter_android_vh_scheduler_tick __traceiter_android_vh_thermal_power_cap __traceiter_android_vh_thermal_register @@ -792,6 +793,7 @@ __tracepoint_android_vh_get_thermal_zone_device __tracepoint_android_vh_modify_thermal_request_freq __tracepoint_android_vh_modify_thermal_target_freq + __tracepoint_android_vh_regmap_update __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_thermal_power_cap __tracepoint_android_vh_thermal_register From 7ed895f6b7924f1afebe4bf78f66342e694f8f6e Mon Sep 17 00:00:00 2001 From: Ramji Jiyani Date: Sun, 23 Jul 2023 08:28:40 +0000 Subject: [PATCH 016/163] ANDROID: GKI: Add Android ABI padding to wwan_ops Try to mitigate potential future api changes by adding a padding to struct wwan_ops. Fixes: 214e6f268b6a ("ANDROID: GKI: Add WWAN as GKI protected module") Bug: 287170531 Test: bazel run //common:kernel_aarch64_dist & TH Change-Id: I0a6f8a801503228af11dc227ce703b886a74f288 Signed-off-by: Ramji Jiyani --- include/linux/wwan.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 5ce2acf444fb..7c2d5db089a1 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -7,6 +7,7 @@ #include #include #include +#include /** * enum wwan_port_type - WWAN port types @@ -165,6 +166,9 @@ struct wwan_ops { u32 if_id, struct netlink_ext_ack *extack); void (*dellink)(void *ctxt, struct net_device *dev, struct list_head *head); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; int wwan_register_ops(struct device *parent, const struct wwan_ops *ops, From dd567c60ff3525ce38d45b0f2c612c82a010a114 Mon Sep 17 00:00:00 2001 From: Ramji Jiyani Date: Mon, 24 Jul 2023 07:15:49 +0000 Subject: [PATCH 017/163] ANDROID: GKI: Add Android ABI padding to wwan_port_ops Try to mitigate potential future api changes by adding a padding to struct wwan_port_ops. Fixes: 214e6f268b6a ("ANDROID: GKI: Add WWAN as GKI protected module") Bug: 287170531 Test: bazel run //common:kernel_aarch64_dist & TH Change-Id: I5589d9739ee547a3eb66ded432284691cf962023 Signed-off-by: Ramji Jiyani --- include/linux/wwan.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 7c2d5db089a1..e3ea3c12c588 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -61,6 +61,9 @@ struct wwan_port_ops { int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb); __poll_t (*tx_poll)(struct wwan_port *port, struct file *filp, poll_table *wait); + + ANDROID_KABI_RESERVE(1); + ANDROID_KABI_RESERVE(2); }; /** From 15a4b0d726c4ccd7c8b09cb97a54e4bce59c7f1d Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Thu, 20 Jul 2023 16:50:01 -0700 Subject: [PATCH 018/163] ANDROID: set kmi_symbol_list_add_only for Kleaf builds. On KMI frozen branches, symbols may no longer be removed from KMI symbol lists. This change sets kmi_symbol_list_add_only=true for Kleaf builds. Test: Treehugger Bug: 292106238 Change-Id: I74cf98ebad2705b92468c996e9b3b472447e8203 Signed-off-by: Yifan Hong --- BUILD.bazel | 2 ++ 1 file changed, 2 insertions(+) diff --git a/BUILD.bazel b/BUILD.bazel index b148c002b107..2a95ec15d4d2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -76,6 +76,7 @@ define_common_kernels(target_configs = { "kmi_symbol_list_strict_mode": True, "module_implicit_outs": COMMON_GKI_MODULES_LIST, "kmi_symbol_list": "android/abi_gki_aarch64", + "kmi_symbol_list_add_only": True, "additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"], "protected_exports_list": "android/abi_gki_protected_exports_aarch64", "protected_modules_list": "android/gki_aarch64_protected_modules", @@ -90,6 +91,7 @@ define_common_kernels(target_configs = { "kmi_symbol_list_strict_mode": False, "module_implicit_outs": COMMON_GKI_MODULES_LIST, "kmi_symbol_list": "android/abi_gki_aarch64", + "kmi_symbol_list_add_only": True, "additional_kmi_symbol_lists": [":aarch64_additional_kmi_symbol_lists"], "protected_exports_list": "android/abi_gki_protected_exports_aarch64", "protected_modules_list": "android/gki_aarch64_protected_modules", From 0abc74db1acf858c4baeeb648335ba2f3b8a231b Mon Sep 17 00:00:00 2001 From: Ramji Jiyani Date: Thu, 20 Jul 2023 18:15:27 -0700 Subject: [PATCH 019/163] ANDROID: GKI: Move GKI module headers to generated includes Change build time generated GKI module headers location From :- kernel/module/gki_module_*.h To :- include/generated/gki_module_*.h This prevents the kernel source from being contaminated. By placing the header files in a generated directory, the default filters that ignore certain files will work without any special handling required. Bug: 286529877 Test: Manual verification & TH Change-Id: Ie247d1c132ddae54906de2e2850e95d7ae9edd50 Signed-off-by: Ramji Jiyani (cherry picked from commit e9cba885543fc50a5b59ff7234d02b74a380573c) --- kernel/module/Makefile | 8 ++++---- kernel/module/gki_module.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/module/Makefile b/kernel/module/Makefile index a23e93c6ef10..458cb6e44e85 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -25,12 +25,12 @@ obj-$(CONFIG_MODULE_UNLOAD_TAINT_TRACKING) += tracking.o # ANDROID: GKI: Generate headerfiles required for gki_module.o # # Dependencies on generated files need to be listed explicitly -$(obj)/gki_module.o: $(obj)/gki_module_protected_exports.h \ - $(obj)/gki_module_unprotected.h +$(obj)/gki_module.o: include/generated/gki_module_protected_exports.h \ + include/generated/gki_module_unprotected.h ALL_KMI_SYMBOLS := all_kmi_symbols -$(obj)/gki_module_unprotected.h: $(ALL_KMI_SYMBOLS) \ +include/generated/gki_module_unprotected.h: $(ALL_KMI_SYMBOLS) \ $(srctree)/scripts/gen_gki_modules_headers.sh $(Q)$(CONFIG_SHELL) $(srctree)/scripts/gen_gki_modules_headers.sh $@ \ "$(srctree)" \ @@ -48,7 +48,7 @@ else ABI_PROTECTED_EXPORTS_FILE := $(wildcard $(srctree)/android/abi_gki_protected_exports_$(ARCH)) endif -$(obj)/gki_module_protected_exports.h: $(ABI_PROTECTED_EXPORTS_FILE) \ +include/generated/gki_module_protected_exports.h: $(ABI_PROTECTED_EXPORTS_FILE) \ $(srctree)/scripts/gen_gki_modules_headers.sh $(Q)$(CONFIG_SHELL) $(srctree)/scripts/gen_gki_modules_headers.sh $@ \ "$(srctree)" \ diff --git a/kernel/module/gki_module.c b/kernel/module/gki_module.c index 4f124f9a14ec..65a2883b539e 100644 --- a/kernel/module/gki_module.c +++ b/kernel/module/gki_module.c @@ -16,8 +16,8 @@ * gki_module_protected_exports.h -- Symbols protected from _export_ by unsigned modules * gki_module_unprotected.h -- Symbols allowed to _access_ by unsigned modules */ -#include "gki_module_protected_exports.h" -#include "gki_module_unprotected.h" +#include +#include #define MAX_STRCMP_LEN (max(MAX_UNPROTECTED_NAME_LEN, MAX_PROTECTED_EXPORTS_NAME_LEN)) From 17a080d04ef4475fc1cbac904aac942ee808dbb3 Mon Sep 17 00:00:00 2001 From: Samuel Gosselin Date: Sat, 22 Jul 2023 20:05:10 +0000 Subject: [PATCH 020/163] ANDROID: ABI: Update pixel symbol list 1 function symbol(s) added 'int extcon_set_property_sync(struct extcon_dev *, unsigned int, unsigned int, union extcon_property_value)' Bug: 289529571 Change-Id: I93a0a0e10540147fa2f626c6b1e8aa62800f58cb Signed-off-by: Samuel Gosselin --- android/abi_gki_aarch64.stg | 10 ++++++++++ android/abi_gki_aarch64_pixel | 1 + 2 files changed, 11 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 24f524b3e4c2..6922b6bf54ba 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -341753,6 +341753,15 @@ elf_symbol { type_id: 0x98851295 full_name: "extcon_set_property_capability" } +elf_symbol { + id: 0xb1dfbb02 + name: "extcon_set_property_sync" + is_defined: true + symbol_type: FUNCTION + crc: 0x710595c3 + type_id: 0x98850898 + full_name: "extcon_set_property_sync" +} elf_symbol { id: 0xacc42253 name: "extcon_set_state" @@ -380084,6 +380093,7 @@ interface { symbol_id: 0x3f648037 symbol_id: 0xc75616d8 symbol_id: 0x467358e5 + symbol_id: 0xb1dfbb02 symbol_id: 0xacc42253 symbol_id: 0x0a446897 symbol_id: 0xb107d2cd diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index b50236e61182..4ae71b6faf29 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -748,6 +748,7 @@ extcon_register_notifier extcon_set_property extcon_set_property_capability + extcon_set_property_sync extcon_set_state_sync extcon_unregister_notifier fasync_helper From 6eb48b89a513f80885fee2bffda0780aba392b23 Mon Sep 17 00:00:00 2001 From: kamasali Satyanarayan Date: Wed, 19 Jul 2023 14:57:13 +0530 Subject: [PATCH 021/163] ANDROID: GKI: Update abi_gki_aarch64_qcom Update abi_gki_aarch64_qcom with symbols needed for GCM_AES feature. Leaf changes summary: 3 artifacts changed Changed leaf types summary: 0 leaf type changed Removed/Changed/Added functions summary: 0 Removed, 0 Changed, 2 Added functions Removed/Changed/Added variables summary: 0 Removed, 0 Changed, 0 Added variables 2 Added functions: [A] 'function void crypto_inc(u8*, unsigned int)' [A] 'function void gf128mul_lle(be128*, const be128*)' 1 Added function symbol not referenced by debug info: [A] copy_page Bug: 279879797 Change-Id: I4b735b3517a4cd41c94731577a2b5ba6febaceed Signed-off-by: kamasali Satyanarayan --- android/abi_gki_aarch64.stg | 80 ++++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_qcom | 3 ++ 2 files changed, 83 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 6922b6bf54ba..44579ad07a54 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -12896,6 +12896,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x83286178 } +pointer_reference { + id: 0x2a5e3596 + kind: POINTER + pointee_type_id: 0x833830c5 +} pointer_reference { id: 0x2a5ed1c1 kind: POINTER @@ -23786,6 +23791,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xfcc23ab7 } +pointer_reference { + id: 0x35a7c1c3 + kind: POINTER + pointee_type_id: 0xfcdfe193 +} pointer_reference { id: 0x35ab6b03 kind: POINTER @@ -28341,6 +28351,11 @@ typedef { name: "bdaddr_t" referred_type_id: 0x0b690cd3 } +typedef { + id: 0x833830c5 + name: "be128" + referred_type_id: 0x3857842d +} typedef { id: 0x117ba19a name: "bh_end_io_t" @@ -33166,6 +33181,11 @@ qualified { qualifier: CONST qualified_type_id: 0x834f5c57 } +qualified { + id: 0xfcdfe193 + qualifier: CONST + qualified_type_id: 0x833830c5 +} qualified { id: 0xfd03f127 qualifier: CONST @@ -40221,6 +40241,11 @@ member { name: "a" type_id: 0xb02b353a } +member { + id: 0x80a3e3d3 + name: "a" + type_id: 0x7877cd32 +} member { id: 0x80f2085f name: "a" @@ -49151,6 +49176,12 @@ member { name: "b" type_id: 0x38df449f } +member { + id: 0x4ce00bb0 + name: "b" + type_id: 0x7877cd32 + offset: 64 +} member { id: 0x4cf8b2b0 name: "b" @@ -202748,6 +202779,15 @@ struct_union { member_id: 0x80f2085f } } +struct_union { + id: 0x3857842d + kind: STRUCT + definition { + bytesize: 16 + member_id: 0x80a3e3d3 + member_id: 0x4ce00bb0 + } +} struct_union { id: 0x3876ab11 kind: STRUCT @@ -277470,6 +277510,12 @@ function { return_type_id: 0x48b5725f parameter_id: 0x049d4e97 } +function { + id: 0x11bf8d42 + return_type_id: 0x48b5725f + parameter_id: 0x00c72527 + parameter_id: 0x4585663f +} function { id: 0x11c013b1 return_type_id: 0x48b5725f @@ -282808,6 +282854,12 @@ function { return_type_id: 0x48b5725f parameter_id: 0x2936263d } +function { + id: 0x1ad943f1 + return_type_id: 0x48b5725f + parameter_id: 0x2a5e3596 + parameter_id: 0x35a7c1c3 +} function { id: 0x1ad9d0a2 return_type_id: 0x48b5725f @@ -331299,6 +331351,13 @@ elf_symbol { type_id: 0x11228b4e full_name: "copy_highpage" } +elf_symbol { + id: 0xc1167624 + name: "copy_page" + is_defined: true + symbol_type: FUNCTION + crc: 0x4d0d163d +} elf_symbol { id: 0xd89255c2 name: "cpu_all_bits" @@ -332240,6 +332299,15 @@ elf_symbol { type_id: 0x9112accf full_name: "crypto_has_alg" } +elf_symbol { + id: 0x80e1f666 + name: "crypto_inc" + is_defined: true + symbol_type: FUNCTION + crc: 0x3ef051c8 + type_id: 0x11bf8d42 + full_name: "crypto_inc" +} elf_symbol { id: 0x62173925 name: "crypto_init_queue" @@ -343601,6 +343669,15 @@ elf_symbol { type_id: 0x11a59ba3 full_name: "getboottime64" } +elf_symbol { + id: 0x112db471 + name: "gf128mul_lle" + is_defined: true + symbol_type: FUNCTION + crc: 0x9e13f6f6 + type_id: 0x1ad943f1 + full_name: "gf128mul_lle" +} elf_symbol { id: 0xfe79963a name: "gfn_to_pfn_memslot" @@ -378933,6 +379010,7 @@ interface { symbol_id: 0x9e7d8d76 symbol_id: 0x610edc84 symbol_id: 0xd71898b4 + symbol_id: 0xc1167624 symbol_id: 0xd89255c2 symbol_id: 0x962b6a68 symbol_id: 0x33bbeca6 @@ -379038,6 +379116,7 @@ interface { symbol_id: 0xbf39e9a5 symbol_id: 0x4d4a15b0 symbol_id: 0xfc625698 + symbol_id: 0x80e1f666 symbol_id: 0x62173925 symbol_id: 0x053cd2eb symbol_id: 0xd1471c13 @@ -380298,6 +380377,7 @@ interface { symbol_id: 0x4ba4e06f symbol_id: 0xbac82e84 symbol_id: 0xa8319a8c + symbol_id: 0x112db471 symbol_id: 0xfe79963a symbol_id: 0xbc19d975 symbol_id: 0x6dc59ee7 diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index ec0a9c3f3bb3..f1155a5fb2b8 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -340,6 +340,7 @@ contig_page_data _copy_from_iter copy_from_kernel_nofault + copy_page __copy_overflow _copy_to_iter __cpu_active_mask @@ -429,6 +430,7 @@ crypto_get_default_rng crypto_has_ahash crypto_has_alg + crypto_inc crypto_init_queue __crypto_memneq crypto_put_default_rng @@ -1232,6 +1234,7 @@ get_user_ifreq get_user_pages get_zeroed_page + gf128mul_lle gh_rm_call gh_rm_notifier_register gh_rm_notifier_unregister From 0ee75a672ca5043d1465ac017c2d70879fbbf8f0 Mon Sep 17 00:00:00 2001 From: Konstantin Komarov Date: Mon, 10 Oct 2022 13:15:33 +0300 Subject: [PATCH 022/163] UPSTREAM: fs/ntfs3: Check fields while reading commit 0e8235d28f3a0e9eda9f02ff67ee566d5f42b66b upstream. Added new functions index_hdr_check and index_buf_check. Now we check all stuff for correctness while reading from disk. Also fixed bug with stale nfs data. Bug: 286390611 Reported-by: van fantasy Signed-off-by: Konstantin Komarov Fixes: 82cae269cfa95 ("fs/ntfs3: Add initialization of super block") Signed-off-by: Lee Jones Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 000a9a72efa4a9df289bab9c9e8ba1639c72e0d6) Signed-off-by: Lee Jones Change-Id: I2b17511acdef8617aea3fecb45d2f11e49145097 --- fs/ntfs3/index.c | 84 ++++++++++++++++++++++++++++++---- fs/ntfs3/inode.c | 18 ++++---- fs/ntfs3/ntfs_fs.h | 4 +- fs/ntfs3/run.c | 7 ++- fs/ntfs3/xattr.c | 109 +++++++++++++++++++++++++++++---------------- 5 files changed, 164 insertions(+), 58 deletions(-) diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index c27b4fe57513..24a26744a691 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -605,11 +605,58 @@ static const struct NTFS_DE *hdr_insert_head(struct INDEX_HDR *hdr, return e; } +/* + * index_hdr_check + * + * return true if INDEX_HDR is valid + */ +static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes) +{ + u32 end = le32_to_cpu(hdr->used); + u32 tot = le32_to_cpu(hdr->total); + u32 off = le32_to_cpu(hdr->de_off); + + if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot || + off + sizeof(struct NTFS_DE) > end) { + /* incorrect index buffer. */ + return false; + } + + return true; +} + +/* + * index_buf_check + * + * return true if INDEX_BUFFER seems is valid + */ +static bool index_buf_check(const struct INDEX_BUFFER *ib, u32 bytes, + const CLST *vbn) +{ + const struct NTFS_RECORD_HEADER *rhdr = &ib->rhdr; + u16 fo = le16_to_cpu(rhdr->fix_off); + u16 fn = le16_to_cpu(rhdr->fix_num); + + if (bytes <= offsetof(struct INDEX_BUFFER, ihdr) || + rhdr->sign != NTFS_INDX_SIGNATURE || + fo < sizeof(struct INDEX_BUFFER) + /* Check index buffer vbn. */ + || (vbn && *vbn != le64_to_cpu(ib->vbn)) || (fo % sizeof(short)) || + fo + fn * sizeof(short) >= bytes || + fn != ((bytes >> SECTOR_SHIFT) + 1)) { + /* incorrect index buffer. */ + return false; + } + + return index_hdr_check(&ib->ihdr, + bytes - offsetof(struct INDEX_BUFFER, ihdr)); +} + void fnd_clear(struct ntfs_fnd *fnd) { int i; - for (i = 0; i < fnd->level; i++) { + for (i = fnd->level - 1; i >= 0; i--) { struct indx_node *n = fnd->nodes[i]; if (!n) @@ -820,9 +867,16 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi, u32 t32; const struct INDEX_ROOT *root = resident_data(attr); + t32 = le32_to_cpu(attr->res.data_size); + if (t32 <= offsetof(struct INDEX_ROOT, ihdr) || + !index_hdr_check(&root->ihdr, + t32 - offsetof(struct INDEX_ROOT, ihdr))) { + goto out; + } + /* Check root fields. */ if (!root->index_block_clst) - return -EINVAL; + goto out; indx->type = type; indx->idx2vbn_bits = __ffs(root->index_block_clst); @@ -834,19 +888,19 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi, if (t32 < sbi->cluster_size) { /* Index record is smaller than a cluster, use 512 blocks. */ if (t32 != root->index_block_clst * SECTOR_SIZE) - return -EINVAL; + goto out; /* Check alignment to a cluster. */ if ((sbi->cluster_size >> SECTOR_SHIFT) & (root->index_block_clst - 1)) { - return -EINVAL; + goto out; } indx->vbn2vbo_bits = SECTOR_SHIFT; } else { /* Index record must be a multiple of cluster size. */ if (t32 != root->index_block_clst << sbi->cluster_bits) - return -EINVAL; + goto out; indx->vbn2vbo_bits = sbi->cluster_bits; } @@ -854,7 +908,14 @@ int indx_init(struct ntfs_index *indx, struct ntfs_sb_info *sbi, init_rwsem(&indx->run_lock); indx->cmp = get_cmp_func(root); - return indx->cmp ? 0 : -EINVAL; + if (!indx->cmp) + goto out; + + return 0; + +out: + ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); + return -EINVAL; } static struct indx_node *indx_new(struct ntfs_index *indx, @@ -1012,6 +1073,13 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn, goto out; ok: + if (!index_buf_check(ib, bytes, &vbn)) { + ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); + ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + goto out; + } + if (err == -E_NTFS_FIXUP) { ntfs_write_bh(ni->mi.sbi, &ib->rhdr, &in->nb, 0); err = 0; @@ -1599,9 +1667,9 @@ static int indx_insert_into_root(struct ntfs_index *indx, struct ntfs_inode *ni, if (err) { /* Restore root. */ - if (mi_resize_attr(mi, attr, -ds_root)) + if (mi_resize_attr(mi, attr, -ds_root)) { memcpy(attr, a_root, asize); - else { + } else { /* Bug? */ ntfs_set_state(sbi, NTFS_DIRTY_ERROR); } diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c index 22152300e60c..ece7daa2266a 100644 --- a/fs/ntfs3/inode.c +++ b/fs/ntfs3/inode.c @@ -81,7 +81,7 @@ static struct inode *ntfs_read_mft(struct inode *inode, le16_to_cpu(ref->seq), le16_to_cpu(rec->seq)); goto out; } else if (!is_rec_inuse(rec)) { - err = -EINVAL; + err = -ESTALE; ntfs_err(sb, "Inode r=%x is not in use!", (u32)ino); goto out; } @@ -92,8 +92,10 @@ static struct inode *ntfs_read_mft(struct inode *inode, goto out; } - if (!is_rec_base(rec)) - goto Ok; + if (!is_rec_base(rec)) { + err = -EINVAL; + goto out; + } /* Record should contain $I30 root. */ is_dir = rec->flags & RECORD_FLAG_DIR; @@ -466,7 +468,6 @@ static struct inode *ntfs_read_mft(struct inode *inode, inode->i_flags |= S_NOSEC; } -Ok: if (ino == MFT_REC_MFT && !sb->s_root) sbi->mft.ni = NULL; @@ -520,6 +521,9 @@ struct inode *ntfs_iget5(struct super_block *sb, const struct MFT_REF *ref, _ntfs_bad_inode(inode); } + if (IS_ERR(inode) && name) + ntfs_set_state(sb->s_fs_info, NTFS_DIRTY_ERROR); + return inode; } @@ -1635,10 +1639,8 @@ struct inode *ntfs_create_inode(struct user_namespace *mnt_userns, ntfs_remove_reparse(sbi, IO_REPARSE_TAG_SYMLINK, &new_de->ref); out5: - if (S_ISDIR(mode) || run_is_empty(&ni->file.run)) - goto out4; - - run_deallocate(sbi, &ni->file.run, false); + if (!S_ISDIR(mode)) + run_deallocate(sbi, &ni->file.run, false); out4: clear_rec_inuse(rec); diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index ca8b4d273feb..60c944d2811d 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -794,12 +794,12 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, u32 run_buf_size, CLST *packed_vcns); int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, - u32 run_buf_size); + int run_buf_size); #ifdef NTFS3_CHECK_FREE_CLST int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, - u32 run_buf_size); + int run_buf_size); #else #define run_unpack_ex run_unpack #endif diff --git a/fs/ntfs3/run.c b/fs/ntfs3/run.c index aaaa0d3d35a2..12d8682f33b5 100644 --- a/fs/ntfs3/run.c +++ b/fs/ntfs3/run.c @@ -919,12 +919,15 @@ int run_pack(const struct runs_tree *run, CLST svcn, CLST len, u8 *run_buf, */ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, - u32 run_buf_size) + int run_buf_size) { u64 prev_lcn, vcn64, lcn, next_vcn; const u8 *run_last, *run_0; bool is_mft = ino == MFT_REC_MFT; + if (run_buf_size < 0) + return -EINVAL; + /* Check for empty. */ if (evcn + 1 == svcn) return 0; @@ -1046,7 +1049,7 @@ int run_unpack(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, */ int run_unpack_ex(struct runs_tree *run, struct ntfs_sb_info *sbi, CLST ino, CLST svcn, CLST evcn, CLST vcn, const u8 *run_buf, - u32 run_buf_size) + int run_buf_size) { int ret, err; CLST next_vcn, lcn, len; diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c index ea582b4fe1d9..884781e423e1 100644 --- a/fs/ntfs3/xattr.c +++ b/fs/ntfs3/xattr.c @@ -42,28 +42,26 @@ static inline size_t packed_ea_size(const struct EA_FULL *ea) * Assume there is at least one xattr in the list. */ static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes, - const char *name, u8 name_len, u32 *off) + const char *name, u8 name_len, u32 *off, u32 *ea_sz) { - *off = 0; + u32 ea_size; - if (!ea_all || !bytes) + *off = 0; + if (!ea_all) return false; - for (;;) { + for (; *off < bytes; *off += ea_size) { const struct EA_FULL *ea = Add2Ptr(ea_all, *off); - u32 next_off = *off + unpacked_ea_size(ea); - - if (next_off > bytes) - return false; - + ea_size = unpacked_ea_size(ea); if (ea->name_len == name_len && - !memcmp(ea->name, name, name_len)) + !memcmp(ea->name, name, name_len)) { + if (ea_sz) + *ea_sz = ea_size; return true; - - *off = next_off; - if (next_off >= bytes) - return false; + } } + + return false; } /* @@ -74,12 +72,12 @@ static inline bool find_ea(const struct EA_FULL *ea_all, u32 bytes, static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea, size_t add_bytes, const struct EA_INFO **info) { - int err; + int err = -EINVAL; struct ntfs_sb_info *sbi = ni->mi.sbi; struct ATTR_LIST_ENTRY *le = NULL; struct ATTRIB *attr_info, *attr_ea; void *ea_p; - u32 size; + u32 size, off, ea_size; static_assert(le32_to_cpu(ATTR_EA_INFO) < le32_to_cpu(ATTR_EA)); @@ -96,24 +94,31 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea, *info = resident_data_ex(attr_info, sizeof(struct EA_INFO)); if (!*info) - return -EINVAL; + goto out; /* Check Ea limit. */ size = le32_to_cpu((*info)->size); - if (size > sbi->ea_max_size) - return -EFBIG; + if (size > sbi->ea_max_size) { + err = -EFBIG; + goto out; + } - if (attr_size(attr_ea) > sbi->ea_max_size) - return -EFBIG; + if (attr_size(attr_ea) > sbi->ea_max_size) { + err = -EFBIG; + goto out; + } + + if (!size) { + /* EA info persists, but xattr is empty. Looks like EA problem. */ + goto out; + } /* Allocate memory for packed Ea. */ ea_p = kmalloc(size_add(size, add_bytes), GFP_NOFS); if (!ea_p) return -ENOMEM; - if (!size) { - /* EA info persists, but xattr is empty. Looks like EA problem. */ - } else if (attr_ea->non_res) { + if (attr_ea->non_res) { struct runs_tree run; run_init(&run); @@ -124,24 +129,52 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea, run_close(&run); if (err) - goto out; + goto out1; } else { void *p = resident_data_ex(attr_ea, size); - if (!p) { - err = -EINVAL; - goto out; - } + if (!p) + goto out1; memcpy(ea_p, p, size); } memset(Add2Ptr(ea_p, size), 0, add_bytes); + + /* Check all attributes for consistency. */ + for (off = 0; off < size; off += ea_size) { + const struct EA_FULL *ef = Add2Ptr(ea_p, off); + u32 bytes = size - off; + + /* Check if we can use field ea->size. */ + if (bytes < sizeof(ef->size)) + goto out1; + + if (ef->size) { + ea_size = le32_to_cpu(ef->size); + if (ea_size > bytes) + goto out1; + continue; + } + + /* Check if we can use fields ef->name_len and ef->elength. */ + if (bytes < offsetof(struct EA_FULL, name)) + goto out1; + + ea_size = ALIGN(struct_size(ef, name, + 1 + ef->name_len + + le16_to_cpu(ef->elength)), + 4); + if (ea_size > bytes) + goto out1; + } + *ea = ea_p; return 0; -out: +out1: kfree(ea_p); - *ea = NULL; +out: + ntfs_set_state(sbi, NTFS_DIRTY_DIRTY); return err; } @@ -163,6 +196,7 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer, const struct EA_FULL *ea; u32 off, size; int err; + int ea_size; size_t ret; err = ntfs_read_ea(ni, &ea_all, 0, &info); @@ -175,8 +209,9 @@ static ssize_t ntfs_list_ea(struct ntfs_inode *ni, char *buffer, size = le32_to_cpu(info->size); /* Enumerate all xattrs. */ - for (ret = 0, off = 0; off < size; off += unpacked_ea_size(ea)) { + for (ret = 0, off = 0; off < size; off += ea_size) { ea = Add2Ptr(ea_all, off); + ea_size = unpacked_ea_size(ea); if (buffer) { if (ret + ea->name_len + 1 > bytes_per_buffer) { @@ -227,7 +262,8 @@ static int ntfs_get_ea(struct inode *inode, const char *name, size_t name_len, goto out; /* Enumerate all xattrs. */ - if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off)) { + if (!find_ea(ea_all, le32_to_cpu(info->size), name, name_len, &off, + NULL)) { err = -ENODATA; goto out; } @@ -269,7 +305,7 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name, struct EA_FULL *new_ea; struct EA_FULL *ea_all = NULL; size_t add, new_pack; - u32 off, size; + u32 off, size, ea_sz; __le16 size_pack; struct ATTRIB *attr; struct ATTR_LIST_ENTRY *le; @@ -304,9 +340,8 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name, size_pack = ea_info.size_pack; } - if (info && find_ea(ea_all, size, name, name_len, &off)) { + if (info && find_ea(ea_all, size, name, name_len, &off, &ea_sz)) { struct EA_FULL *ea; - size_t ea_sz; if (flags & XATTR_CREATE) { err = -EEXIST; @@ -329,8 +364,6 @@ static noinline int ntfs_set_ea(struct inode *inode, const char *name, if (ea->flags & FILE_NEED_EA) le16_add_cpu(&ea_info.count, -1); - ea_sz = unpacked_ea_size(ea); - le16_add_cpu(&ea_info.size_pack, 0 - packed_ea_size(ea)); memmove(ea, Add2Ptr(ea, ea_sz), size - off - ea_sz); From 1bb5e7fb374bcc59a940f3eb3dab1a1195ddbbca Mon Sep 17 00:00:00 2001 From: Venkata Rao Kakani Date: Tue, 18 Jul 2023 09:51:21 +0530 Subject: [PATCH 023/163] ANDROID: abi_gki_aarch64_qcom: update abi Update the qcom symbol list for iommu_group_remove_device Symbols added: iommu_group_remove_device Bug: 291567032 Change-Id: Ie53809a8b22259db07cc43b008a7fe5b324e3e65 Signed-off-by: Venkata Rao Kakani --- android/abi_gki_aarch64.stg | 10 ++++++++++ android/abi_gki_aarch64_qcom | 1 + 2 files changed, 11 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 44579ad07a54..e30c34f73e88 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -347323,6 +347323,15 @@ elf_symbol { type_id: 0x7ceab5d7 full_name: "iommu_group_ref_get" } +elf_symbol { + id: 0x87342c78 + name: "iommu_group_remove_device" + is_defined: true + symbol_type: FUNCTION + crc: 0x65e2cdf3 + type_id: 0x100e6fc8 + full_name: "iommu_group_remove_device" +} elf_symbol { id: 0x1f9ceb72 name: "iommu_group_set_iommudata" @@ -380783,6 +380792,7 @@ interface { symbol_id: 0xadf1bba5 symbol_id: 0x1a299344 symbol_id: 0xe52a90e5 + symbol_id: 0x87342c78 symbol_id: 0x1f9ceb72 symbol_id: 0x119c23e5 symbol_id: 0x9aea043a diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index f1155a5fb2b8..b2ac2bce8d43 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1544,6 +1544,7 @@ iommu_group_get_iommudata iommu_group_put iommu_group_ref_get + iommu_group_remove_device iommu_group_set_iommudata iommu_iova_to_phys iommu_map From f091cc74342604b2a2b0bb64114ccb5605e88737 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 16 Jun 2023 14:45:22 +0200 Subject: [PATCH 024/163] UPSTREAM: netfilter: nf_tables: fix chain binding transaction logic [ Upstream commit 4bedf9eee016286c835e3d8fa981ddece5338795 ] Add bound flag to rule and chain transactions as in 6a0a8d10a366 ("netfilter: nf_tables: use-after-free in failing rule with bound set") to skip them in case that the chain is already bound from the abort path. This patch fixes an imbalance in the chain use refcnt that triggers a WARN_ON on the table and chain destroy path. This patch also disallows nested chain bindings, which is not supported from userspace. The logic to deal with chain binding in nft_data_hold() and nft_data_release() is not correct. The NFT_TRANS_PREPARE state needs a special handling in case a chain is bound but next expressions in the same rule fail to initialize as described by 1240eb93f061 ("netfilter: nf_tables: incorrect error path handling with NFT_MSG_NEWRULE"). The chain is left bound if rule construction fails, so the objects stored in this chain (and the chain itself) are released by the transaction records from the abort path, follow up patch ("netfilter: nf_tables: add NFT_TRANS_PREPARE_ERROR to deal with bound set/chain") completes this error handling. When deleting an existing rule, chain bound flag is set off so the rule expression .destroy path releases the objects. Bug: 292097846 Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Sasha Levin (cherry picked from commit 891cd2edddc76c58e842706ad27e2ff96000bd5d) Signed-off-by: Lee Jones Change-Id: I8a8cf012e9e6fd0d0081f3f7616c9cf31ea02989 --- include/net/netfilter/nf_tables.h | 21 +++++++- net/netfilter/nf_tables_api.c | 86 +++++++++++++++++++----------- net/netfilter/nft_immediate.c | 87 +++++++++++++++++++++++++++---- 3 files changed, 153 insertions(+), 41 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index a1ccf1276f3e..cfc9aa5c059f 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1000,7 +1000,10 @@ static inline struct nft_userdata *nft_userdata(const struct nft_rule *rule) return (void *)&rule->data[rule->dlen]; } -void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule); +void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule); +void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule, + enum nft_trans_phase phase); +void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule); static inline void nft_set_elem_update_expr(const struct nft_set_ext *ext, struct nft_regs *regs, @@ -1083,6 +1086,7 @@ int nft_setelem_validate(const struct nft_ctx *ctx, struct nft_set *set, const struct nft_set_iter *iter, struct nft_set_elem *elem); int nft_set_catchall_validate(const struct nft_ctx *ctx, struct nft_set *set); +int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain); enum nft_chain_types { NFT_CHAIN_T_DEFAULT = 0, @@ -1119,11 +1123,17 @@ int nft_chain_validate_dependency(const struct nft_chain *chain, int nft_chain_validate_hooks(const struct nft_chain *chain, unsigned int hook_flags); +static inline bool nft_chain_binding(const struct nft_chain *chain) +{ + return chain->flags & NFT_CHAIN_BINDING; +} + static inline bool nft_chain_is_bound(struct nft_chain *chain) { return (chain->flags & NFT_CHAIN_BINDING) && chain->bound; } +int nft_chain_add(struct nft_table *table, struct nft_chain *chain); void nft_chain_del(struct nft_chain *chain); void nf_tables_chain_destroy(struct nft_ctx *ctx); @@ -1558,6 +1568,7 @@ struct nft_trans_rule { struct nft_rule *rule; struct nft_flow_rule *flow; u32 rule_id; + bool bound; }; #define nft_trans_rule(trans) \ @@ -1566,6 +1577,8 @@ struct nft_trans_rule { (((struct nft_trans_rule *)trans->data)->flow) #define nft_trans_rule_id(trans) \ (((struct nft_trans_rule *)trans->data)->rule_id) +#define nft_trans_rule_bound(trans) \ + (((struct nft_trans_rule *)trans->data)->bound) struct nft_trans_set { struct nft_set *set; @@ -1590,13 +1603,17 @@ struct nft_trans_set { (((struct nft_trans_set *)trans->data)->gc_int) struct nft_trans_chain { + struct nft_chain *chain; bool update; char *name; struct nft_stats __percpu *stats; u8 policy; + bool bound; u32 chain_id; }; +#define nft_trans_chain(trans) \ + (((struct nft_trans_chain *)trans->data)->chain) #define nft_trans_chain_update(trans) \ (((struct nft_trans_chain *)trans->data)->update) #define nft_trans_chain_name(trans) \ @@ -1605,6 +1622,8 @@ struct nft_trans_chain { (((struct nft_trans_chain *)trans->data)->stats) #define nft_trans_chain_policy(trans) \ (((struct nft_trans_chain *)trans->data)->policy) +#define nft_trans_chain_bound(trans) \ + (((struct nft_trans_chain *)trans->data)->bound) #define nft_trans_chain_id(trans) \ (((struct nft_trans_chain *)trans->data)->chain_id) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 7a2cc24e9a33..c8786b24ab42 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -195,6 +195,48 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) } } +static void nft_chain_trans_bind(const struct nft_ctx *ctx, struct nft_chain *chain) +{ + struct nftables_pernet *nft_net; + struct net *net = ctx->net; + struct nft_trans *trans; + + if (!nft_chain_binding(chain)) + return; + + nft_net = nft_pernet(net); + list_for_each_entry_reverse(trans, &nft_net->commit_list, list) { + switch (trans->msg_type) { + case NFT_MSG_NEWCHAIN: + if (nft_trans_chain(trans) == chain) + nft_trans_chain_bound(trans) = true; + break; + case NFT_MSG_NEWRULE: + if (trans->ctx.chain == chain) + nft_trans_rule_bound(trans) = true; + break; + } + } +} + +int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain) +{ + if (!nft_chain_binding(chain)) + return 0; + + if (nft_chain_binding(ctx->chain)) + return -EOPNOTSUPP; + + if (chain->bound) + return -EBUSY; + + chain->bound = true; + chain->use++; + nft_chain_trans_bind(ctx, chain); + + return 0; +} + static int nft_netdev_register_hooks(struct net *net, struct list_head *hook_list) { @@ -340,8 +382,9 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID])); } } - + nft_trans_chain(trans) = ctx->chain; nft_trans_commit_list_add_tail(ctx->net, trans); + return trans; } @@ -359,8 +402,7 @@ static int nft_delchain(struct nft_ctx *ctx) return 0; } -static void nft_rule_expr_activate(const struct nft_ctx *ctx, - struct nft_rule *rule) +void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_expr *expr; @@ -373,9 +415,8 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, } } -static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, - struct nft_rule *rule, - enum nft_trans_phase phase) +void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule, + enum nft_trans_phase phase) { struct nft_expr *expr; @@ -2188,7 +2229,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, return 0; } -static int nft_chain_add(struct nft_table *table, struct nft_chain *chain) +int nft_chain_add(struct nft_table *table, struct nft_chain *chain) { int err; @@ -3315,8 +3356,7 @@ static int nf_tables_getrule(struct sk_buff *skb, const struct nfnl_info *info, return err; } -static void nf_tables_rule_destroy(const struct nft_ctx *ctx, - struct nft_rule *rule) +void nf_tables_rule_destroy(const struct nft_ctx *ctx, struct nft_rule *rule) { struct nft_expr *expr, *next; @@ -3333,7 +3373,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, kfree(rule); } -void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) +static void nf_tables_rule_release(const struct nft_ctx *ctx, struct nft_rule *rule) { nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); nf_tables_rule_destroy(ctx, rule); @@ -6447,7 +6487,6 @@ static int nf_tables_newsetelem(struct sk_buff *skb, void nft_data_hold(const struct nft_data *data, enum nft_data_types type) { struct nft_chain *chain; - struct nft_rule *rule; if (type == NFT_DATA_VERDICT) { switch (data->verdict.code) { @@ -6455,15 +6494,6 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type) case NFT_GOTO: chain = data->verdict.chain; chain->use++; - - if (!nft_chain_is_bound(chain)) - break; - - chain->table->use++; - list_for_each_entry(rule, &chain->rules, list) - chain->use++; - - nft_chain_add(chain->table, chain); break; } } @@ -9325,7 +9355,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); } else { - if (nft_chain_is_bound(trans->ctx.chain)) { + if (nft_trans_chain_bound(trans)) { nft_trans_destroy(trans); break; } @@ -9342,6 +9372,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: + if (nft_trans_rule_bound(trans)) { + nft_trans_destroy(trans); + break; + } trans->ctx.chain->use--; list_del_rcu(&nft_trans_rule(trans)->list); nft_rule_expr_deactivate(&trans->ctx, @@ -9893,22 +9927,12 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, static void nft_verdict_uninit(const struct nft_data *data) { struct nft_chain *chain; - struct nft_rule *rule; switch (data->verdict.code) { case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; chain->use--; - - if (!nft_chain_is_bound(chain)) - break; - - chain->table->use--; - list_for_each_entry(rule, &chain->rules, list) - chain->use--; - - nft_chain_del(chain); break; } } diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 5f28b21abc7d..457fc1e21841 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -76,11 +76,9 @@ static int nft_immediate_init(const struct nft_ctx *ctx, switch (priv->data.verdict.code) { case NFT_JUMP: case NFT_GOTO: - if (nft_chain_is_bound(chain)) { - err = -EBUSY; - goto err1; - } - chain->bound = true; + err = nf_tables_bind_chain(ctx, chain); + if (err < 0) + return err; break; default: break; @@ -98,6 +96,31 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, const struct nft_expr *expr) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); + const struct nft_data *data = &priv->data; + struct nft_ctx chain_ctx; + struct nft_chain *chain; + struct nft_rule *rule; + + if (priv->dreg == NFT_REG_VERDICT) { + switch (data->verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + chain = data->verdict.chain; + if (!nft_chain_binding(chain)) + break; + + chain_ctx = *ctx; + chain_ctx.chain = chain; + + list_for_each_entry(rule, &chain->rules, list) + nft_rule_expr_activate(&chain_ctx, rule); + + nft_clear(ctx->net, chain); + break; + default: + break; + } + } return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg)); } @@ -107,6 +130,40 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, enum nft_trans_phase phase) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); + const struct nft_data *data = &priv->data; + struct nft_ctx chain_ctx; + struct nft_chain *chain; + struct nft_rule *rule; + + if (priv->dreg == NFT_REG_VERDICT) { + switch (data->verdict.code) { + case NFT_JUMP: + case NFT_GOTO: + chain = data->verdict.chain; + if (!nft_chain_binding(chain)) + break; + + chain_ctx = *ctx; + chain_ctx.chain = chain; + + list_for_each_entry(rule, &chain->rules, list) + nft_rule_expr_deactivate(&chain_ctx, rule, phase); + + switch (phase) { + case NFT_TRANS_PREPARE: + nft_deactivate_next(ctx->net, chain); + break; + default: + nft_chain_del(chain); + chain->bound = false; + chain->table->use--; + break; + } + break; + default: + break; + } + } if (phase == NFT_TRANS_COMMIT) return; @@ -131,15 +188,27 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, case NFT_GOTO: chain = data->verdict.chain; - if (!nft_chain_is_bound(chain)) + if (!nft_chain_binding(chain)) break; + /* Rule construction failed, but chain is already bound: + * let the transaction records release this chain and its rules. + */ + if (chain->bound) { + chain->use--; + break; + } + + /* Rule has been deleted, release chain and its rules. */ chain_ctx = *ctx; chain_ctx.chain = chain; - list_for_each_entry_safe(rule, n, &chain->rules, list) - nf_tables_rule_release(&chain_ctx, rule); - + chain->use--; + list_for_each_entry_safe(rule, n, &chain->rules, list) { + chain->use--; + list_del(&rule->list); + nf_tables_rule_destroy(&chain_ctx, rule); + } nf_tables_chain_destroy(&chain_ctx); break; default: From fcdea346bb076b07d4b7a697311217bd8b12e21a Mon Sep 17 00:00:00 2001 From: M A Ramdhan Date: Wed, 5 Jul 2023 12:15:30 -0400 Subject: [PATCH 025/163] UPSTREAM: net/sched: cls_fw: Fix improper refcount update leads to use-after-free [ Upstream commit 0323bce598eea038714f941ce2b22541c46d488f ] In the event of a failure in tcf_change_indev(), fw_set_parms() will immediately return an error after incrementing or decrementing reference counter in tcf_bind_filter(). If attacker can control reference counter to zero and make reference freed, leading to use after free. In order to prevent this, move the point of possible failure above the point where the TC_FW_CLASSID is handled. Bug: 292252062 Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: M A Ramdhan Signed-off-by: M A Ramdhan Acked-by: Jamal Hadi Salim Reviewed-by: Pedro Tammela Message-ID: <20230705161530.52003-1-ramdhan@starlabs.sg> Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin (cherry picked from commit c91fb29bb07ee4dd40aabd1e41f19c0f92ac3199) Signed-off-by: Lee Jones Change-Id: I9bf6f540b4eb23ea5641fb3efe6f3e621d7b6151 --- net/sched/cls_fw.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index a32351da968c..1212b057b129 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -210,11 +210,6 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, if (err < 0) return err; - if (tb[TCA_FW_CLASSID]) { - f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); - tcf_bind_filter(tp, &f->res, base); - } - if (tb[TCA_FW_INDEV]) { int ret; ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack); @@ -231,6 +226,11 @@ static int fw_set_parms(struct net *net, struct tcf_proto *tp, } else if (head->mask != 0xFFFFFFFF) return err; + if (tb[TCA_FW_CLASSID]) { + f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]); + tcf_bind_filter(tp, &f->res, base); + } + return 0; } From 342aff08ae23fc8432a2e83887841ce0e83a623c Mon Sep 17 00:00:00 2001 From: Jacky Liu Date: Tue, 25 Jul 2023 17:45:12 +0800 Subject: [PATCH 026/163] ANDROID: cgroup: Cleanup android_rvh_cgroup_force_kthread_migration android_rvh_cgroup_force_kthread_migration was removed by commit b0ea1feeefe0 ("Revert "ANDROID: cgroup: Add android_rvh_cgroup_force_kthread_migration"") but was then accidentally added back by commit 5f657b04f4f2 ("ANDROID: subsystem-specific vendor_hooks.c for sched"). It's not working, remove it again. Fixes: 5f657b04f4f2 ("ANDROID: subsystem-specific vendor_hooks.c for sched") Change-Id: Ia2d39824df2340f6b83050b2805a052ffa57f171 Signed-off-by: Jacky Liu --- include/trace/hooks/cgroup.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/include/trace/hooks/cgroup.h b/include/trace/hooks/cgroup.h index dc6b47dcb21d..a50e6abc55ee 100644 --- a/include/trace/hooks/cgroup.h +++ b/include/trace/hooks/cgroup.h @@ -21,11 +21,7 @@ DECLARE_RESTRICTED_HOOK(android_rvh_refrigerator, DECLARE_HOOK(android_vh_cgroup_attach, TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset), - TP_ARGS(ss, tset)) -DECLARE_RESTRICTED_HOOK(android_rvh_cgroup_force_kthread_migration, - TP_PROTO(struct task_struct *tsk, struct cgroup *dst_cgrp, bool *force_migration), - TP_ARGS(tsk, dst_cgrp, force_migration), 1); - + TP_ARGS(ss, tset)); DECLARE_RESTRICTED_HOOK(android_rvh_cpuset_fork, TP_PROTO(struct task_struct *p, bool *inherit_cpus), From 6c48edb9c92dfe3bba35fe56622bb0c0ee6b2b76 Mon Sep 17 00:00:00 2001 From: Cixi Geng Date: Tue, 25 Jul 2023 14:55:05 +0800 Subject: [PATCH 027/163] ANDROID: GKI: add function symbols for unisoc INFO: 10 function symbol(s) added 'void drm_send_event_timestamp_locked(struct drm_device*, struct drm_pending_event*, ktime_t)' 'int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device*, u16)' 'int of_get_drm_display_mode(struct device_node*, struct drm_display_mode*, u32*, int)' 'int regmap_get_reg_stride(struct regmap*)' 'struct regulator_dev* regulator_register(struct device*, const struct regulator_desc*, const struct regulator_config*)' 'struct snd_kcontrol* snd_ctl_find_id(struct snd_card*, struct snd_ctl_elem_id*)' 'int snd_info_get_line(struct snd_info_buffer*, char*, int)' 'unsigned int snd_pcm_rate_bit_to_rate(unsigned int)' 'unsigned int snd_pcm_rate_to_rate_bit(unsigned int)' 'void tty_port_link_device(struct tty_port*, struct tty_driver*, unsigned int)' Bug: 292812341 Change-Id: Ibaed96732ac53f824d4d12fb6ecad7bd63fcea8f Signed-off-by: Cixi Geng --- android/abi_gki_aarch64.stg | 135 +++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_unisoc | 14 ++++ 2 files changed, 149 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index e30c34f73e88..a8fc00774738 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -280801,6 +280801,13 @@ function { parameter_id: 0x105c8027 parameter_id: 0x914dbfdc } +function { + id: 0x16cab29d + return_type_id: 0x48b5725f + parameter_id: 0x1a1051a0 + parameter_id: 0x31e58fe0 + parameter_id: 0x4585663f +} function { id: 0x16cc357f return_type_id: 0x48b5725f @@ -285186,6 +285193,13 @@ function { parameter_id: 0x4585663f parameter_id: 0x6720d32f } +function { + id: 0x1e4106e1 + return_type_id: 0x48b5725f + parameter_id: 0x3b04bead + parameter_id: 0x07027638 + parameter_id: 0x11c404ba +} function { id: 0x1e4577e0 return_type_id: 0x48b5725f @@ -286568,6 +286582,12 @@ function { parameter_id: 0x32fddfe5 parameter_id: 0xf641dd8a } +function { + id: 0x20151959 + return_type_id: 0x2b8f13b3 + parameter_id: 0x33f8b54b + parameter_id: 0x0bf1a65b +} function { id: 0x209ae488 return_type_id: 0x37f9fd94 @@ -292279,6 +292299,14 @@ function { parameter_id: 0x92233392 parameter_id: 0x2e0f9112 } +function { + id: 0x9038705c + return_type_id: 0x6720d32f + parameter_id: 0x347303b4 + parameter_id: 0x2afee447 + parameter_id: 0x38d23361 + parameter_id: 0x6720d32f +} function { id: 0x9038edd5 return_type_id: 0x6720d32f @@ -300673,6 +300701,13 @@ function { parameter_id: 0x17047654 parameter_id: 0x295c7202 } +function { + id: 0x98e6779b + return_type_id: 0x6720d32f + parameter_id: 0x15b54c6f + parameter_id: 0x0483e6f8 + parameter_id: 0x6720d32f +} function { id: 0x98e6a470 return_type_id: 0x6720d32f @@ -340822,6 +340857,15 @@ elf_symbol { type_id: 0x1e4577e0 full_name: "drm_send_event_locked" } +elf_symbol { + id: 0xb701b4b1 + name: "drm_send_event_timestamp_locked" + is_defined: true + symbol_type: FUNCTION + crc: 0x2e16a8ee + type_id: 0x1e4106e1 + full_name: "drm_send_event_timestamp_locked" +} elf_symbol { id: 0x19652f5f name: "drm_set_preferred_mode" @@ -351667,6 +351711,15 @@ elf_symbol { type_id: 0x165fcf63 full_name: "mipi_dsi_picture_parameter_set" } +elf_symbol { + id: 0x6d579aaf + name: "mipi_dsi_set_maximum_return_packet_size" + is_defined: true + symbol_type: FUNCTION + crc: 0x24bb881a + type_id: 0x9d9d4f0f + full_name: "mipi_dsi_set_maximum_return_packet_size" +} elf_symbol { id: 0xe4059d72 name: "misc_deregister" @@ -354187,6 +354240,15 @@ elf_symbol { type_id: 0x91f5fad8 full_name: "of_get_display_timing" } +elf_symbol { + id: 0x05a46d27 + name: "of_get_drm_display_mode" + is_defined: true + symbol_type: FUNCTION + crc: 0x884bdf07 + type_id: 0x9038705c + full_name: "of_get_drm_display_mode" +} elf_symbol { id: 0xe3de7018 name: "of_get_i2c_adapter_by_node" @@ -360580,6 +360642,15 @@ elf_symbol { type_id: 0xaf453ff9 full_name: "regmap_get_device" } +elf_symbol { + id: 0x700d1b28 + name: "regmap_get_reg_stride" + is_defined: true + symbol_type: FUNCTION + crc: 0xb435d7cf + type_id: 0x9feaece8 + full_name: "regmap_get_reg_stride" +} elf_symbol { id: 0x248856c2 name: "regmap_get_val_bytes" @@ -361039,6 +361110,15 @@ elf_symbol { type_id: 0x10f3d61f full_name: "regulator_put" } +elf_symbol { + id: 0xfd977d86 + name: "regulator_register" + is_defined: true + symbol_type: FUNCTION + crc: 0xa01989d0 + type_id: 0xf5978397 + full_name: "regulator_register" +} elf_symbol { id: 0xddb9ed35 name: "regulator_register_notifier" @@ -365126,6 +365206,15 @@ elf_symbol { type_id: 0x9bbebc0c full_name: "snd_ctl_enum_info" } +elf_symbol { + id: 0xfc7ac85d + name: "snd_ctl_find_id" + is_defined: true + symbol_type: FUNCTION + crc: 0x0f5523c6 + type_id: 0x20151959 + full_name: "snd_ctl_find_id" +} elf_symbol { id: 0x6aca9744 name: "snd_ctl_new1" @@ -365270,6 +365359,15 @@ elf_symbol { type_id: 0x1f50da89 full_name: "snd_info_free_entry" } +elf_symbol { + id: 0x257f1e06 + name: "snd_info_get_line" + is_defined: true + symbol_type: FUNCTION + crc: 0x24a94b26 + type_id: 0x98e6779b + full_name: "snd_info_get_line" +} elf_symbol { id: 0x5e6e4a8e name: "snd_info_register" @@ -365549,6 +365647,24 @@ elf_symbol { type_id: 0x15b600dd full_name: "snd_pcm_period_elapsed" } +elf_symbol { + id: 0x11b8b797 + name: "snd_pcm_rate_bit_to_rate" + is_defined: true + symbol_type: FUNCTION + crc: 0xff6104d0 + type_id: 0xdfba2774 + full_name: "snd_pcm_rate_bit_to_rate" +} +elf_symbol { + id: 0x19ea44b2 + name: "snd_pcm_rate_to_rate_bit" + is_defined: true + symbol_type: FUNCTION + crc: 0xb9638db4 + type_id: 0xdfba2774 + full_name: "snd_pcm_rate_to_rate_bit" +} elf_symbol { id: 0xba998ee2 name: "snd_pcm_set_managed_buffer" @@ -370169,6 +370285,15 @@ elf_symbol { type_id: 0x9bc8ded8 full_name: "tty_port_install" } +elf_symbol { + id: 0x3ed74db1 + name: "tty_port_link_device" + is_defined: true + symbol_type: FUNCTION + crc: 0xebd3061e + type_id: 0x16cab29d + full_name: "tty_port_link_device" +} elf_symbol { id: 0x8c3087ea name: "tty_port_lower_dtr_rts" @@ -380070,6 +380195,7 @@ interface { symbol_id: 0x879ed3f8 symbol_id: 0xd7bee2cf symbol_id: 0x7826a8f0 + symbol_id: 0xb701b4b1 symbol_id: 0x19652f5f symbol_id: 0x78ae9c1c symbol_id: 0x3e16ebdf @@ -381275,6 +381401,7 @@ interface { symbol_id: 0x596b8466 symbol_id: 0xd9f124cf symbol_id: 0xdca2a3c4 + symbol_id: 0x6d579aaf symbol_id: 0xe4059d72 symbol_id: 0x842903b7 symbol_id: 0x354e8904 @@ -381555,6 +381682,7 @@ interface { symbol_id: 0xe2b0e5a5 symbol_id: 0xb8036e9c symbol_id: 0xe36e392a + symbol_id: 0x05a46d27 symbol_id: 0xe3de7018 symbol_id: 0x26fb2401 symbol_id: 0xec79392b @@ -382265,6 +382393,7 @@ interface { symbol_id: 0x6cde79b4 symbol_id: 0xd68bae0f symbol_id: 0x2b688ec7 + symbol_id: 0x700d1b28 symbol_id: 0x248856c2 symbol_id: 0x6ff192fd symbol_id: 0x3deea824 @@ -382316,6 +382445,7 @@ interface { symbol_id: 0x2804801a symbol_id: 0x4893b166 symbol_id: 0xbf6a903f + symbol_id: 0xfd977d86 symbol_id: 0xddb9ed35 symbol_id: 0x21d8367b symbol_id: 0xce959ab5 @@ -382770,6 +382900,7 @@ interface { symbol_id: 0x83c5422c symbol_id: 0xff4bd5dc symbol_id: 0x1adae35c + symbol_id: 0xfc7ac85d symbol_id: 0x6aca9744 symbol_id: 0x6b08a95c symbol_id: 0x238c5442 @@ -382786,6 +382917,7 @@ interface { symbol_id: 0x8a143ba0 symbol_id: 0x47548cf4 symbol_id: 0xfa53e7be + symbol_id: 0x257f1e06 symbol_id: 0x5e6e4a8e symbol_id: 0x32ffb327 symbol_id: 0x3491ba62 @@ -382817,6 +382949,8 @@ interface { symbol_id: 0x2c61b358 symbol_id: 0x352feb2c symbol_id: 0xf21d6619 + symbol_id: 0x11b8b797 + symbol_id: 0x19ea44b2 symbol_id: 0xba998ee2 symbol_id: 0x74420600 symbol_id: 0x92edca7e @@ -383331,6 +383465,7 @@ interface { symbol_id: 0x604f0f0b symbol_id: 0x5b997ef3 symbol_id: 0x6a405f9b + symbol_id: 0x3ed74db1 symbol_id: 0x8c3087ea symbol_id: 0x4e9dfcab symbol_id: 0x4a92dfd1 diff --git a/android/abi_gki_aarch64_unisoc b/android/abi_gki_aarch64_unisoc index 867436314590..ac818f2e495f 100644 --- a/android/abi_gki_aarch64_unisoc +++ b/android/abi_gki_aarch64_unisoc @@ -574,6 +574,8 @@ skb_unlink sk_error_report sk_free + snd_ctl_find_id + snd_info_get_line snprintf sock_alloc_send_pskb sock_create_kern @@ -1578,6 +1580,11 @@ spi_controller_suspend spi_finalize_current_transfer +# required by sprd-audio-codec.ko + regulator_register + snd_pcm_rate_bit_to_rate + snd_pcm_rate_to_rate_bit + # required by sprd-bc1p2.ko kthread_flush_worker __kthread_init_worker @@ -1662,14 +1669,18 @@ drm_poll drm_read drm_release + drm_send_event_timestamp_locked drm_vblank_init mipi_dsi_host_register mipi_dsi_host_unregister + mipi_dsi_set_maximum_return_packet_size of_drm_find_bridge + of_get_drm_display_mode of_graph_get_port_by_id of_graph_get_remote_node __platform_register_drivers platform_unregister_drivers + regmap_get_reg_stride # required by sprd-iommu.ko iommu_device_register @@ -1761,6 +1772,9 @@ devm_watchdog_register_device watchdog_init_timeout +# required by sprdbt_tty.ko + tty_port_link_device + # required by sysdump.ko android_rvh_probe_register input_close_device From 58870404915576c071d6f2a8388a3daa97837df6 Mon Sep 17 00:00:00 2001 From: Zhang Shurong Date: Sun, 25 Jun 2023 00:16:49 +0800 Subject: [PATCH 028/163] UPSTREAM: fbdev: fix potential OOB read in fast_imageblit() commit c2d22806aecb24e2de55c30a06e5d6eb297d161d upstream. There is a potential OOB read at fast_imageblit, for "colortab[(*src >> 4)]" can become a negative value due to "const char *s = image->data, *src". This change makes sure the index for colortab always positive or zero. Similar commit: https://patchwork.kernel.org/patch/11746067 Potential bug report: https://groups.google.com/g/syzkaller-bugs/c/9ubBXKeKXf4/m/k-QXy4UgAAAJ Signed-off-by: Zhang Shurong Cc: stable@vger.kernel.org Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman Change-Id: I8ae18dbee926cc8dcf5bac4dec584071e7bdb739 (cherry picked from commit c2d22806aecb24e2de55c30a06e5d6eb297d161d) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/core/sysimgblt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c index 335e92b813fc..665ef7a0a249 100644 --- a/drivers/video/fbdev/core/sysimgblt.c +++ b/drivers/video/fbdev/core/sysimgblt.c @@ -189,7 +189,7 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p, u32 fgx = fgcolor, bgx = bgcolor, bpp = p->var.bits_per_pixel; u32 ppw = 32/bpp, spitch = (image->width + 7)/8; u32 bit_mask, eorx, shift; - const char *s = image->data, *src; + const u8 *s = image->data, *src; u32 *dst; const u32 *tab; size_t tablen; From af2d741bf3d9a11e28b2a7785199829039bdfc12 Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Wed, 7 Jun 2023 09:27:08 +0200 Subject: [PATCH 029/163] UPSTREAM: can: isotp: isotp_sendmsg(): fix return error fix on TX path commit e38910c0072b541a91954682c8b074a93e57c09b upstream. With commit d674a8f123b4 ("can: isotp: isotp_sendmsg(): fix return error on FC timeout on TX path") the missing correct return value in the case of a protocol error was introduced. But the way the error value has been read and sent to the user space does not follow the common scheme to clear the error after reading which is provided by the sock_error() function. This leads to an error report at the following write() attempt although everything should be working. Fixes: d674a8f123b4 ("can: isotp: isotp_sendmsg(): fix return error on FC timeout on TX path") Reported-by: Carsten Schmidt Signed-off-by: Oliver Hartkopp Link: https://lore.kernel.org/all/20230607072708.38809-1-socketcan@hartkopp.net Cc: stable@vger.kernel.org Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman Change-Id: I6cb85ee1e6fdc609991c383e4f6fc71ea3c68c3a (cherry picked from commit e38910c0072b541a91954682c8b074a93e57c09b) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- net/can/isotp.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/can/isotp.c b/net/can/isotp.c index 5761d4ab839d..82280ac70df9 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -1079,8 +1079,9 @@ static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err) goto err_event_drop; - if (sk->sk_err) - return -sk->sk_err; + err = sock_error(sk); + if (err) + return err; } return size; From 0e477a82e6d9e013a7f26cba2a2012179d28ac74 Mon Sep 17 00:00:00 2001 From: Ludvig Michaelsson Date: Wed, 21 Jun 2023 13:17:43 +0200 Subject: [PATCH 030/163] UPSTREAM: HID: hidraw: fix data race on device refcount commit 944ee77dc6ec7b0afd8ec70ffc418b238c92f12b upstream. The hidraw_open() function increments the hidraw device reference counter. The counter has no dedicated synchronization mechanism, resulting in a potential data race when concurrently opening a device. The race is a regression introduced by commit 8590222e4b02 ("HID: hidraw: Replace hidraw device table mutex with a rwsem"). While minors_rwsem is intended to protect the hidraw_table itself, by instead acquiring the lock for writing, the reference counter is also protected. This is symmetrical to hidraw_release(). Link: https://github.com/systemd/systemd/issues/27947 Fixes: 8590222e4b02 ("HID: hidraw: Replace hidraw device table mutex with a rwsem") Cc: stable@vger.kernel.org Signed-off-by: Ludvig Michaelsson Link: https://lore.kernel.org/r/20230621-hidraw-race-v1-1-a58e6ac69bab@yubico.com Signed-off-by: Benjamin Tissoires Signed-off-by: Greg Kroah-Hartman Change-Id: I312349145e8f2d55ea2182b94a3b3293b839818d (cherry picked from commit 879e79c3aead41b8aa2e91164354b30bd1c4ef3b) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- drivers/hid/hidraw.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 197b1e7bf029..b617aada50b0 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -272,7 +272,12 @@ static int hidraw_open(struct inode *inode, struct file *file) goto out; } - down_read(&minors_rwsem); + /* + * Technically not writing to the hidraw_table but a write lock is + * required to protect the device refcount. This is symmetrical to + * hidraw_release(). + */ + down_write(&minors_rwsem); if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { err = -ENODEV; goto out_unlock; @@ -301,7 +306,7 @@ static int hidraw_open(struct inode *inode, struct file *file) spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags); file->private_data = list; out_unlock: - up_read(&minors_rwsem); + up_write(&minors_rwsem); out: if (err < 0) kfree(list); From d45a054f9ced465b079ad95619f562d2894317e3 Mon Sep 17 00:00:00 2001 From: Mike Hommey Date: Sun, 18 Jun 2023 08:09:57 +0900 Subject: [PATCH 031/163] UPSTREAM: HID: logitech-hidpp: add HIDPP_QUIRK_DELAYED_INIT for the T651. commit 5fe251112646d8626818ea90f7af325bab243efa upstream. commit 498ba2069035 ("HID: logitech-hidpp: Don't restart communication if not necessary") put restarting communication behind that flag, and this was apparently necessary on the T651, but the flag was not set for it. Fixes: 498ba2069035 ("HID: logitech-hidpp: Don't restart communication if not necessary") Cc: stable@vger.kernel.org Signed-off-by: Mike Hommey Link: https://lore.kernel.org/r/20230617230957.6mx73th4blv7owqk@glandium.org Signed-off-by: Benjamin Tissoires Signed-off-by: Greg Kroah-Hartman (cherry picked from commit a536383ef030b15ace93b2ca865c4132a1fd8794) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman Change-Id: Ic57d1d450ee4474cff51efca3d9b9607de6693d7 --- drivers/hid/hid-logitech-hidpp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index e906ee375298..f1d5b7c38abb 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -4299,7 +4299,7 @@ static const struct hid_device_id hidpp_devices[] = { { /* wireless touchpad T651 */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651), - .driver_data = HIDPP_QUIRK_CLASS_WTP }, + .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT }, { /* Mouse Logitech Anywhere MX */ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Mouse logitech M560 */ From 41b30362e99114d4d7ef753c612322471813ae39 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Sat, 17 Jun 2023 20:47:08 -0400 Subject: [PATCH 032/163] BACKPORT: mm/mmap: Fix error path in do_vmi_align_munmap() commit 606c812eb1d5b5fb0dd9e330ca94b52d7c227830 upstream The error unrolling was leaving the VMAs detached in many cases and leaving the locked_vm statistic altered, and skipping the unrolling entirely in the case of the vma tree write failing. Fix the error path by re-attaching the detached VMAs and adding the necessary goto for the failed vma tree write, and fix the locked_vm statistic by only updating after the vma tree write succeeds. Fixes: 763ecb035029 ("mm: remove the vma linked list") Reported-by: Vegard Nossum Signed-off-by: Liam R. Howlett Signed-off-by: Linus Torvalds [ dwmw2: Strictly, the original patch wasn't *re-attaching* the detached VMAs. They *were* still attached but just had the 'detached' flag set, which is an optimisation. Which doesn't exist in 6.3, so drop that. Also drop the call to vma_start_write() which came in with the per-VMA locking in 6.4. ] [ dwmw2 (6.1): It's do_mas_align_munmap() here. And has two call sites for the now-removed munmap_sidetree() function. Inline them both rather then trying to backport various dependencies with potentially subtle interactions. ] Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman [surenb: added needed vma_start_write and vma_vma_mark_detached calls] Signed-off-by: Suren Baghdasaryan Change-Id: I1e42347ecf9eb46077739a267ac00264f94fa59a --- mm/mmap.c | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 36f129f23dfb..a0af03d35e8c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2359,21 +2359,6 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, return __split_vma(mm, vma, addr, new_below); } -static inline int munmap_sidetree(struct vm_area_struct *vma, int count, - struct ma_state *mas_detach) -{ - vma_start_write(vma); - mas_set(mas_detach, count); - if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) - return -ENOMEM; - - vma_mark_detached(vma, true); - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm -= vma_pages(vma); - - return 0; -} - /* * do_mas_align_munmap() - munmap the aligned region from @start to @end. * @mas: The maple_state, ideally set up to alter the correct tree location. @@ -2395,6 +2380,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, struct maple_tree mt_detach; int count = 0; int error = -ENOMEM; + unsigned long locked_vm = 0; MA_STATE(mas_detach, &mt_detach, 0, 0); mt_init_flags(&mt_detach, mas->tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_set_external_lock(&mt_detach, &mm->mmap_lock); @@ -2450,18 +2436,27 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, mas_set(mas, end); split = mas_prev(mas, 0); - error = munmap_sidetree(split, count, &mas_detach); + vma_start_write(split); + mas_set(&mas_detach, count); + error = mas_store_gfp(&mas_detach, split, GFP_KERNEL); if (error) - goto munmap_sidetree_failed; + goto munmap_gather_failed; + vma_mark_detached(split, true); + if (split->vm_flags & VM_LOCKED) + locked_vm += vma_pages(split); count++; if (vma == next) vma = split; break; } - error = munmap_sidetree(next, count, &mas_detach); - if (error) - goto munmap_sidetree_failed; + vma_start_write(next); + mas_set(&mas_detach, count); + if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) + goto munmap_gather_failed; + vma_mark_detached(next, true); + if (next->vm_flags & VM_LOCKED) + locked_vm += vma_pages(next); count++; if (unlikely(uf)) { @@ -2519,6 +2514,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, if (mas_store_gfp(mas, NULL, GFP_KERNEL)) return -ENOMEM; + mm->locked_vm -= locked_vm; mm->map_count -= count; /* * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or @@ -2550,7 +2546,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, return downgrade ? 1 : 0; userfaultfd_error: -munmap_sidetree_failed: +munmap_gather_failed: end_split_failed: __mt_destroy(&mt_detach); start_split_failed: From 466448f55f0b6e14b6b1ab874ebe9a704fb7b821 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Wed, 28 Jun 2023 10:55:03 +0100 Subject: [PATCH 033/163] BACKPORT: mm/mmap: Fix error return in do_vmi_align_munmap() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 6c26bd4384da24841bac4f067741bbca18b0fb74 upstream, If mas_store_gfp() in the gather loop failed, the 'error' variable that ultimately gets returned was not being set. In many cases, its original value of -ENOMEM was still in place, and that was fine. But if VMAs had been split at the start or end of the range, then 'error' could be zero. Change to the 'error = foo(); if (error) goto …' idiom to fix the bug. Also clean up a later case which avoided the same bug by *explicitly* setting error = -ENOMEM right before calling the function that might return -ENOMEM. In a final cosmetic change, move the 'Point of no return' comment to *after* the goto. That's been in the wrong place since the preallocation was removed, and this new error path was added. Fixes: 606c812eb1d5 ("mm/mmap: Fix error path in do_vmi_align_munmap()") Signed-off-by: David Woodhouse Cc: stable@vger.kernel.org Reviewed-by: Greg Kroah-Hartman Reviewed-by: Liam R. Howlett Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 42a018a796d1eedb0d7c38b2778ef3dbf05aca36) Signed-off-by: Greg Kroah-Hartman Change-Id: I5da7b1e126968e174e733d45ff24439089de60af --- mm/mmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/mmap.c b/mm/mmap.c index a0af03d35e8c..751fcf6037b3 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2452,7 +2452,8 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, } vma_start_write(next); mas_set(&mas_detach, count); - if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) + error = mas_store_gfp(&mas_detach, next, GFP_KERNEL); + if (error) goto munmap_gather_failed; vma_mark_detached(next, true); if (next->vm_flags & VM_LOCKED) From a2dff37b0c2e589d376dfb3a1a49dfb0b9326db0 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 21 Oct 2022 13:01:19 -0700 Subject: [PATCH 034/163] UPSTREAM: mm, hwpoison: try to recover from copy-on write faults commit a873dfe1032a132bf89f9e19a6ac44f5a0b78754 upstream. Patch series "Copy-on-write poison recovery", v3. Part 1 deals with the process that triggered the copy on write fault with a store to a shared read-only page. That process is send a SIGBUS with the usual machine check decoration to specify the virtual address of the lost page, together with the scope. Part 2 sets up to asynchronously take the page with the uncorrected error offline to prevent additional machine check faults. H/t to Miaohe Lin and Shuai Xue for pointing me to the existing function to queue a call to memory_failure(). On x86 there is some duplicate reporting (because the error is also signalled by the memory controller as well as by the core that triggered the machine check). Console logs look like this: This patch (of 2): If the kernel is copying a page as the result of a copy-on-write fault and runs into an uncorrectable error, Linux will crash because it does not have recovery code for this case where poison is consumed by the kernel. It is easy to set up a test case. Just inject an error into a private page, fork(2), and have the child process write to the page. I wrapped that neatly into a test at: git://git.kernel.org/pub/scm/linux/kernel/git/aegl/ras-tools.git just enable ACPI error injection and run: # ./einj_mem-uc -f copy-on-write Add a new copy_user_highpage_mc() function that uses copy_mc_to_kernel() on architectures where that is available (currently x86 and powerpc). When an error is detected during the page copy, return VM_FAULT_HWPOISON to caller of wp_page_copy(). This propagates up the call stack. Both x86 and powerpc have code in their fault handler to deal with this code by sending a SIGBUS to the application. Note that this patch avoids a system crash and signals the process that triggered the copy-on-write action. It does not take any action for the memory error that is still in the shared page. To handle that a call to memory_failure() is needed. But this cannot be done from wp_page_copy() because it holds mmap_lock(). Perhaps the architecture fault handlers can deal with this loose end in a subsequent patch? On Intel/x86 this loose end will often be handled automatically because the memory controller provides an additional notification of the h/w poison in memory, the handler for this will call memory_failure(). This isn't a 100% solution. If there are multiple errors, not all may be logged in this way. [tony.luck@intel.com: add call to kmsan_unpoison_memory(), per Miaohe Lin] Link: https://lkml.kernel.org/r/20221031201029.102123-2-tony.luck@intel.com Link: https://lkml.kernel.org/r/20221021200120.175753-1-tony.luck@intel.com Link: https://lkml.kernel.org/r/20221021200120.175753-2-tony.luck@intel.com Signed-off-by: Tony Luck Reviewed-by: Dan Williams Reviewed-by: Naoya Horiguchi Reviewed-by: Miaohe Lin Reviewed-by: Alexander Potapenko Tested-by: Shuai Xue Cc: Christophe Leroy Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Nicholas Piggin Signed-off-by: Andrew Morton Igned-off-by: Jane Chu Signed-off-by: Greg Kroah-Hartman Change-Id: I7c35cd47de59611fcc0550b0a7fd4e3911bbb110 (cherry-picked from commit 4af5960d7cd46c3834f65b75577b775cbcd0f7b2) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- include/linux/highmem.h | 26 ++++++++++++++++++++++++++ mm/memory.c | 30 ++++++++++++++++++++---------- 2 files changed, 46 insertions(+), 10 deletions(-) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 94b50dc0e131..a2d4e15464c3 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -319,6 +319,32 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif +#ifdef copy_mc_to_kernel +static inline int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + unsigned long ret; + char *vfrom, *vto; + + vfrom = kmap_local_page(from); + vto = kmap_local_page(to); + ret = copy_mc_to_kernel(vto, vfrom, PAGE_SIZE); + if (!ret) + kmsan_unpoison_memory(page_address(to), PAGE_SIZE); + kunmap_local(vto); + kunmap_local(vfrom); + + return ret; +} +#else +static inline int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_user_highpage(to, from, vaddr, vma); + return 0; +} +#endif + #ifndef __HAVE_ARCH_COPY_HIGHPAGE static inline void copy_highpage(struct page *to, struct page *from) diff --git a/mm/memory.c b/mm/memory.c index 16063c490b7f..b52469032045 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2851,10 +2851,16 @@ static inline int pte_unmap_same(struct vm_fault *vmf) return same; } -static inline bool __wp_page_copy_user(struct page *dst, struct page *src, - struct vm_fault *vmf) +/* + * Return: + * 0: copied succeeded + * -EHWPOISON: copy failed due to hwpoison in source page + * -EAGAIN: copied failed (some other reason) + */ +static inline int __wp_page_copy_user(struct page *dst, struct page *src, + struct vm_fault *vmf) { - bool ret; + int ret; void *kaddr; void __user *uaddr; bool locked = false; @@ -2863,8 +2869,9 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, unsigned long addr = vmf->address; if (likely(src)) { - copy_user_highpage(dst, src, addr, vma); - return true; + if (copy_mc_user_highpage(dst, src, addr, vma)) + return -EHWPOISON; + return 0; } /* @@ -2891,7 +2898,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, * and update local tlb only */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; } @@ -2916,7 +2923,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; } @@ -2935,7 +2942,7 @@ static inline bool __wp_page_copy_user(struct page *dst, struct page *src, } } - ret = true; + ret = 0; pte_unlock: if (locked) @@ -3107,6 +3114,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) pte_t entry; int page_copied = 0; struct mmu_notifier_range range; + int ret; delayacct_wpcopy_start(); @@ -3124,19 +3132,21 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (!new_page) goto oom; - if (!__wp_page_copy_user(new_page, old_page, vmf)) { + ret = __wp_page_copy_user(new_page, old_page, vmf); + if (ret) { /* * COW failed, if the fault was solved by other, * it's fine. If not, userspace would re-fault on * the same address and we will handle the fault * from the second attempt. + * The -EHWPOISON case will not be retried. */ put_page(new_page); if (old_page) put_page(old_page); delayacct_wpcopy_end(); - return 0; + return ret == -EHWPOISON ? VM_FAULT_HWPOISON : 0; } kmsan_copy_page_meta(new_page, old_page); } From 53048f151cd72ef552a3c7e853f8e9726f46fc76 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Fri, 21 Oct 2022 13:01:20 -0700 Subject: [PATCH 035/163] BACKPORT: mm, hwpoison: when copy-on-write hits poison, take page offline commit d302c2398ba269e788a4f37ae57c07a7fcabaa42 upstream. Cannot call memory_failure() directly from the fault handler because mmap_lock (and others) are held. It is important, but not urgent, to mark the source page as h/w poisoned and unmap it from other tasks. Use memory_failure_queue() to request a call to memory_failure() for the page with the error. Also provide a stub version for CONFIG_MEMORY_FAILURE=n Link: https://lkml.kernel.org/r/20221021200120.175753-3-tony.luck@intel.com Signed-off-by: Tony Luck Reviewed-by: Miaohe Lin Cc: Christophe Leroy Cc: Dan Williams Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: Naoya Horiguchi Cc: Nicholas Piggin Cc: Shuai Xue Signed-off-by: Andrew Morton [ Due to missing commits e591ef7d96d6e ("mm,hwpoison,hugetlb,memory_hotplug: hotremove memory section with hwpoisoned hugepage") 5033091de814a ("mm/hwpoison: introduce per-memory_block hwpoison counter") The impact of e591ef7d96d6e is its introduction of an additional flag in __get_huge_page_for_hwpoison() that serves as an indication a hwpoisoned hugetlb page should have its migratable bit cleared. The impact of 5033091de814a is contexual. Resolve by ignoring both missing commits. - jane] Signed-off-by: Jane Chu Signed-off-by: Greg Kroah-Hartman Change-Id: Ica2c1970fe3cdfa9dc7d3f288e1e6a90378a9764 (cherry-picked from commit 84f077802e56ae43f4b6c6eb9ad59b19df9db374) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- include/linux/mm.h | 5 ++++- mm/memory.c | 4 +++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index ab2f33910d06..718bb0f8446c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3480,7 +3480,6 @@ enum mf_flags { int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index, unsigned long count, int mf_flags); extern int memory_failure(unsigned long pfn, int flags); -extern void memory_failure_queue(unsigned long pfn, int flags); extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int sysctl_memory_failure_early_kill; @@ -3489,8 +3488,12 @@ extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); #ifdef CONFIG_MEMORY_FAILURE +extern void memory_failure_queue(unsigned long pfn, int flags); extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags); #else +static inline void memory_failure_queue(unsigned long pfn, int flags) +{ +} static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags) { return 0; diff --git a/mm/memory.c b/mm/memory.c index b52469032045..d31c23419631 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2869,8 +2869,10 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, unsigned long addr = vmf->address; if (likely(src)) { - if (copy_mc_user_highpage(dst, src, addr, vma)) + if (copy_mc_user_highpage(dst, src, addr, vma)) { + memory_failure_queue(page_to_pfn(src), 0); return -EHWPOISON; + } return 0; } From 5c9836e66ddee801c4f7140f1081b1b517b40548 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Tue, 2 May 2023 19:53:50 +0200 Subject: [PATCH 036/163] UPSTREAM: x86/microcode/AMD: Load late on both threads too commit a32b0f0db3f396f1c9be2fe621e77c09ec3d8e7d upstream. Do the same as early loading - load on both threads. Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/20230605141332.25948-1-bp@alien8.de Signed-off-by: Greg Kroah-Hartman Change-Id: I857794a1b78974200aad02098a31c41576aed562 (cherry-picked from commit 94a69d6999419cd21365111b4493070182712299) Signed-off-by: Suren Baghdasaryan --- arch/x86/kernel/cpu/microcode/amd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 461e45d85add..9a3092ec9b27 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -705,7 +705,7 @@ static enum ucode_state apply_microcode_amd(int cpu) rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); /* need to apply patch? */ - if (rev >= mc_amd->hdr.patch_id) { + if (rev > mc_amd->hdr.patch_id) { ret = UCODE_OK; goto out; } From ba2ccba8634863235fbffa7350049de21c18eed5 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 26 Apr 2023 18:37:00 +0200 Subject: [PATCH 037/163] UPSTREAM: x86/smp: Make stop_other_cpus() more robust commit 1f5e7eb7868e42227ac426c96d437117e6e06e8e upstream. Tony reported intermittent lockups on poweroff. His analysis identified the wbinvd() in stop_this_cpu() as the culprit. This was added to ensure that on SME enabled machines a kexec() does not leave any stale data in the caches when switching from encrypted to non-encrypted mode or vice versa. That wbinvd() is conditional on the SME feature bit which is read directly from CPUID. But that readout does not check whether the CPUID leaf is available or not. If it's not available the CPU will return the value of the highest supported leaf instead. Depending on the content the "SME" bit might be set or not. That's incorrect but harmless. Making the CPUID readout conditional makes the observed hangs go away, but it does not fix the underlying problem: CPU0 CPU1 stop_other_cpus() send_IPIs(REBOOT); stop_this_cpu() while (num_online_cpus() > 1); set_online(false); proceed... -> hang wbinvd() WBINVD is an expensive operation and if multiple CPUs issue it at the same time the resulting delays are even larger. But CPU0 already observed num_online_cpus() going down to 1 and proceeds which causes the system to hang. This issue exists independent of WBINVD, but the delays caused by WBINVD make it more prominent. Make this more robust by adding a cpumask which is initialized to the online CPU mask before sending the IPIs and CPUs clear their bit in stop_this_cpu() after the WBINVD completed. Check for that cpumask to become empty in stop_other_cpus() instead of watching num_online_cpus(). The cpumask cannot plug all holes either, but it's better than a raw counter and allows to restrict the NMI fallback IPI to be sent only the CPUs which have not reported within the timeout window. Fixes: 08f253ec3767 ("x86/cpu: Clear SME feature flag when not in use") Reported-by: Tony Battersby Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov (AMD) Reviewed-by: Ashok Raj Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/3817d810-e0f1-8ef8-0bbd-663b919ca49b@cybernetics.com Link: https://lore.kernel.org/r/87h6r770bv.ffs@tglx Signed-off-by: Greg Kroah-Hartman Change-Id: I7154624285f081ac2f54617fb7b9f9cdd6b4f2e0 (cherry-picked from commit edadebb349e89461109643dd92ee986e01a47aa1) Signed-off-by: Suren Baghdasaryan --- arch/x86/include/asm/cpu.h | 2 ++ arch/x86/kernel/process.c | 23 ++++++++++++-- arch/x86/kernel/smp.c | 62 +++++++++++++++++++++++++------------- 3 files changed, 64 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index b472ef76826a..37639a2d9c34 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -96,4 +96,6 @@ static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, extern u64 x86_read_arch_cap_msr(void); +extern struct cpumask cpus_stop_mask; + #endif /* _ASM_X86_CPU_H */ diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e436c9c1ef3b..93df1f0feeb1 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -744,13 +744,23 @@ bool xen_set_default_idle(void) } #endif +struct cpumask cpus_stop_mask; + void __noreturn stop_this_cpu(void *dummy) { + unsigned int cpu = smp_processor_id(); + local_irq_disable(); + /* - * Remove this CPU: + * Remove this CPU from the online mask and disable it + * unconditionally. This might be redundant in case that the reboot + * vector was handled late and stop_other_cpus() sent an NMI. + * + * According to SDM and APM NMIs can be accepted even after soft + * disabling the local APIC. */ - set_cpu_online(smp_processor_id(), false); + set_cpu_online(cpu, false); disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); @@ -768,6 +778,15 @@ void __noreturn stop_this_cpu(void *dummy) */ if (cpuid_eax(0x8000001f) & BIT(0)) native_wbinvd(); + + /* + * This brings a cache line back and dirties it, but + * native_stop_other_cpus() will overwrite cpus_stop_mask after it + * observed that all CPUs reported stop. This write will invalidate + * the related cache line on this CPU. + */ + cpumask_clear_cpu(cpu, &cpus_stop_mask); + for (;;) { /* * Use native_halt() so that memory contents don't change diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 375b33ecafa2..935bc6562fa4 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -146,31 +147,43 @@ static int register_stop_handler(void) static void native_stop_other_cpus(int wait) { - unsigned long flags; - unsigned long timeout; + unsigned int cpu = smp_processor_id(); + unsigned long flags, timeout; if (reboot_force) return; - /* - * Use an own vector here because smp_call_function - * does lots of things not suitable in a panic situation. - */ + /* Only proceed if this is the first CPU to reach this code */ + if (atomic_cmpxchg(&stopping_cpu, -1, cpu) != -1) + return; /* - * We start by using the REBOOT_VECTOR irq. - * The irq is treated as a sync point to allow critical - * regions of code on other cpus to release their spin locks - * and re-enable irqs. Jumping straight to an NMI might - * accidentally cause deadlocks with further shutdown/panic - * code. By syncing, we give the cpus up to one second to - * finish their work before we force them off with the NMI. + * 1) Send an IPI on the reboot vector to all other CPUs. + * + * The other CPUs should react on it after leaving critical + * sections and re-enabling interrupts. They might still hold + * locks, but there is nothing which can be done about that. + * + * 2) Wait for all other CPUs to report that they reached the + * HLT loop in stop_this_cpu() + * + * 3) If #2 timed out send an NMI to the CPUs which did not + * yet report + * + * 4) Wait for all other CPUs to report that they reached the + * HLT loop in stop_this_cpu() + * + * #3 can obviously race against a CPU reaching the HLT loop late. + * That CPU will have reported already and the "have all CPUs + * reached HLT" condition will be true despite the fact that the + * other CPU is still handling the NMI. Again, there is no + * protection against that as "disabled" APICs still respond to + * NMIs. */ - if (num_online_cpus() > 1) { - /* did someone beat us here? */ - if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1) - return; + cpumask_copy(&cpus_stop_mask, cpu_online_mask); + cpumask_clear_cpu(cpu, &cpus_stop_mask); + if (!cpumask_empty(&cpus_stop_mask)) { /* sync above data before sending IRQ */ wmb(); @@ -183,12 +196,12 @@ static void native_stop_other_cpus(int wait) * CPUs reach shutdown state. */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && timeout--) + while (!cpumask_empty(&cpus_stop_mask) && timeout--) udelay(1); } /* if the REBOOT_VECTOR didn't work, try with the NMI */ - if (num_online_cpus() > 1) { + if (!cpumask_empty(&cpus_stop_mask)) { /* * If NMI IPI is enabled, try to register the stop handler * and send the IPI. In any case try to wait for the other @@ -200,7 +213,8 @@ static void native_stop_other_cpus(int wait) pr_emerg("Shutting down cpus with NMI\n"); - apic_send_IPI_allbutself(NMI_VECTOR); + for_each_cpu(cpu, &cpus_stop_mask) + apic->send_IPI(cpu, NMI_VECTOR); } /* * Don't wait longer than 10 ms if the caller didn't @@ -208,7 +222,7 @@ static void native_stop_other_cpus(int wait) * one or more CPUs do not reach shutdown state. */ timeout = USEC_PER_MSEC * 10; - while (num_online_cpus() > 1 && (wait || timeout--)) + while (!cpumask_empty(&cpus_stop_mask) && (wait || timeout--)) udelay(1); } @@ -216,6 +230,12 @@ static void native_stop_other_cpus(int wait) disable_local_APIC(); mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); local_irq_restore(flags); + + /* + * Ensure that the cpus_stop_mask cache lines are invalidated on + * the other CPUs. See comment vs. SME in stop_this_cpu(). + */ + cpumask_clear(&cpus_stop_mask); } /* From 6744547e9534eda6a3c91923e1ff287af68380f0 Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Thu, 15 Jun 2023 22:33:52 +0200 Subject: [PATCH 038/163] UPSTREAM: x86/smp: Dont access non-existing CPUID leaf commit 9b040453d4440659f33dc6f0aa26af418ebfe70b upstream. stop_this_cpu() tests CPUID leaf 0x8000001f::EAX unconditionally. Intel CPUs return the content of the highest supported leaf when a non-existing leaf is read, while AMD CPUs return all zeros for unsupported leafs. So the result of the test on Intel CPUs is lottery. While harmless it's incorrect and causes the conditional wbinvd() to be issued where not required. Check whether the leaf is supported before reading it. [ tglx: Adjusted changelog ] Fixes: 08f253ec3767 ("x86/cpu: Clear SME feature flag when not in use") Signed-off-by: Tony Battersby Signed-off-by: Thomas Gleixner Reviewed-by: Mario Limonciello Reviewed-by: Borislav Petkov (AMD) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/3817d810-e0f1-8ef8-0bbd-663b919ca49b@cybernetics.com Link: https://lore.kernel.org/r/20230615193330.322186388@linutronix.de Signed-off-by: Greg Kroah-Hartman Change-Id: Idc8aa8137c9044642f02ec157d18d035359f88ea (cherry-picked from commit e47037d28b7398d7a8f1a3e071087ea9dbfcebf5) Signed-off-by: Suren Baghdasaryan --- arch/x86/kernel/process.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 93df1f0feeb1..279b5e9be80f 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -748,6 +748,7 @@ struct cpumask cpus_stop_mask; void __noreturn stop_this_cpu(void *dummy) { + struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info); unsigned int cpu = smp_processor_id(); local_irq_disable(); @@ -762,7 +763,7 @@ void __noreturn stop_this_cpu(void *dummy) */ set_cpu_online(cpu, false); disable_local_APIC(); - mcheck_cpu_clear(this_cpu_ptr(&cpu_info)); + mcheck_cpu_clear(c); /* * Use wbinvd on processors that support SME. This provides support @@ -776,7 +777,7 @@ void __noreturn stop_this_cpu(void *dummy) * Test the CPUID bit directly because the machine might've cleared * X86_FEATURE_SME due to cmdline options. */ - if (cpuid_eax(0x8000001f) & BIT(0)) + if (c->extended_cpuid_level >= 0x8000001f && (cpuid_eax(0x8000001f) & BIT(0))) native_wbinvd(); /* From d8cb0365cbc724b76f680af68351aa15e20d1d3b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 15 Jun 2023 22:33:54 +0200 Subject: [PATCH 039/163] UPSTREAM: x86/smp: Remove pointless wmb()s from native_stop_other_cpus() commit 2affa6d6db28855e6340b060b809c23477aa546e upstream. The wmb()s before sending the IPIs are not synchronizing anything. If at all then the apic IPI functions have to provide or act as appropriate barriers. Remove these cargo cult barriers which have no explanation of what they are synchronizing. Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov (AMD) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230615193330.378358382@linutronix.de Signed-off-by: Greg Kroah-Hartman Change-Id: I7541e4c7c65f9bed9b1f28d6c858473986dd50b4 (cherry-picked from commit 50a1abc67702f76968162402d8fb113dd6e22f31) Signed-off-by: Suren Baghdasaryan --- arch/x86/kernel/smp.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 935bc6562fa4..d842875f986f 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -184,9 +184,6 @@ static void native_stop_other_cpus(int wait) cpumask_clear_cpu(cpu, &cpus_stop_mask); if (!cpumask_empty(&cpus_stop_mask)) { - /* sync above data before sending IRQ */ - wmb(); - apic_send_IPI_allbutself(REBOOT_VECTOR); /* @@ -208,9 +205,6 @@ static void native_stop_other_cpus(int wait) * CPUs to stop. */ if (!smp_no_nmi_ipi && !register_stop_handler()) { - /* Sync above data before sending IRQ */ - wmb(); - pr_emerg("Shutting down cpus with NMI\n"); for_each_cpu(cpu, &cpus_stop_mask) From 26260c4bd1d03cf04c535f6bd9a5ed59ccd6b919 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 15 Jun 2023 22:33:55 +0200 Subject: [PATCH 040/163] UPSTREAM: x86/smp: Use dedicated cache-line for mwait_play_dead() commit f9c9987bf52f4e42e940ae217333ebb5a4c3b506 upstream. Monitoring idletask::thread_info::flags in mwait_play_dead() has been an obvious choice as all what is needed is a cache line which is not written by other CPUs. But there is a use case where a "dead" CPU needs to be brought out of MWAIT: kexec(). This is required as kexec() can overwrite text, pagetables, stacks and the monitored cacheline of the original kernel. The latter causes MWAIT to resume execution which obviously causes havoc on the kexec kernel which results usually in triple faults. Use a dedicated per CPU storage to prepare for that. Signed-off-by: Thomas Gleixner Reviewed-by: Ashok Raj Reviewed-by: Borislav Petkov (AMD) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230615193330.434553750@linutronix.de Signed-off-by: Greg Kroah-Hartman Change-Id: I7cbfcec2d4e1bde18a9c45a7ccb7897ccaad7bd3 (cherry-picked from commit 6d3b2e0aef6c0118596928f697cb4471f6258a26) Signed-off-by: Suren Baghdasaryan --- arch/x86/kernel/smpboot.c | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 3f3ea0287f69..b96f983e64cc 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -99,6 +99,17 @@ EXPORT_PER_CPU_SYMBOL(cpu_die_map); DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); EXPORT_PER_CPU_SYMBOL(cpu_info); +struct mwait_cpu_dead { + unsigned int control; + unsigned int status; +}; + +/* + * Cache line aligned data for mwait_play_dead(). Separate on purpose so + * that it's unlikely to be touched by other CPUs. + */ +static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead); + /* Logical package management. We might want to allocate that dynamically */ unsigned int __max_logical_packages __read_mostly; EXPORT_SYMBOL(__max_logical_packages); @@ -1746,10 +1757,10 @@ EXPORT_SYMBOL_GPL(cond_wakeup_cpu0); */ static inline void mwait_play_dead(void) { + struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); unsigned int eax, ebx, ecx, edx; unsigned int highest_cstate = 0; unsigned int highest_subcstate = 0; - void *mwait_ptr; int i; if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || @@ -1784,13 +1795,6 @@ static inline void mwait_play_dead(void) (highest_subcstate - 1); } - /* - * This should be a memory location in a cache line which is - * unlikely to be touched by other processors. The actual - * content is immaterial as it is not actually modified in any way. - */ - mwait_ptr = ¤t_thread_info()->flags; - wbinvd(); while (1) { @@ -1802,9 +1806,9 @@ static inline void mwait_play_dead(void) * case where we return around the loop. */ mb(); - clflush(mwait_ptr); + clflush(md); mb(); - __monitor(mwait_ptr, 0, 0); + __monitor(md, 0, 0); mb(); __mwait(eax, 0); From 19dd4101e0e419fe89d538abfb5c1fc3f65ca238 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 15 Jun 2023 22:33:57 +0200 Subject: [PATCH 041/163] UPSTREAM: x86/smp: Cure kexec() vs. mwait_play_dead() breakage commit d7893093a7417527c0d73c9832244e65c9d0114f upstream. TLDR: It's a mess. When kexec() is executed on a system with offline CPUs, which are parked in mwait_play_dead() it can end up in a triple fault during the bootup of the kexec kernel or cause hard to diagnose data corruption. The reason is that kexec() eventually overwrites the previous kernel's text, page tables, data and stack. If it writes to the cache line which is monitored by a previously offlined CPU, MWAIT resumes execution and ends up executing the wrong text, dereferencing overwritten page tables or corrupting the kexec kernels data. Cure this by bringing the offlined CPUs out of MWAIT into HLT. Write to the monitored cache line of each offline CPU, which makes MWAIT resume execution. The written control word tells the offlined CPUs to issue HLT, which does not have the MWAIT problem. That does not help, if a stray NMI, MCE or SMI hits the offlined CPUs as those make it come out of HLT. A follow up change will put them into INIT, which protects at least against NMI and SMI. Fixes: ea53069231f9 ("x86, hotplug: Use mwait to offline a processor, fix the legacy case") Reported-by: Ashok Raj Signed-off-by: Thomas Gleixner Tested-by: Ashok Raj Reviewed-by: Ashok Raj Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230615193330.492257119@linutronix.de Signed-off-by: Greg Kroah-Hartman Change-Id: I80035e671b55732ac3d56c71dc53364e82238fe2 (cherry-picked from commit 0af4750eaaeda20bc2ce8da414d85cc1653ae240) Signed-off-by: Suren Baghdasaryan --- arch/x86/include/asm/smp.h | 2 ++ arch/x86/kernel/smp.c | 5 ++++ arch/x86/kernel/smpboot.c | 59 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+) diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index a73bced40e24..b3b34032ef23 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -132,6 +132,8 @@ void wbinvd_on_cpu(int cpu); int wbinvd_on_all_cpus(void); void cond_wakeup_cpu0(void); +void smp_kick_mwait_play_dead(void); + void native_smp_send_reschedule(int cpu); void native_send_call_func_ipi(const struct cpumask *mask); void native_send_call_func_single_ipi(int cpu); diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index d842875f986f..174d6232b87f 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -157,6 +158,10 @@ static void native_stop_other_cpus(int wait) if (atomic_cmpxchg(&stopping_cpu, -1, cpu) != -1) return; + /* For kexec, ensure that offline CPUs are out of MWAIT and in HLT */ + if (kexec_in_progress) + smp_kick_mwait_play_dead(); + /* * 1) Send an IPI on the reboot vector to all other CPUs. * diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index b96f983e64cc..f32ee967414e 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include #include @@ -104,6 +105,9 @@ struct mwait_cpu_dead { unsigned int status; }; +#define CPUDEAD_MWAIT_WAIT 0xDEADBEEF +#define CPUDEAD_MWAIT_KEXEC_HLT 0x4A17DEAD + /* * Cache line aligned data for mwait_play_dead(). Separate on purpose so * that it's unlikely to be touched by other CPUs. @@ -166,6 +170,10 @@ static void smp_callin(void) { int cpuid; + /* Mop up eventual mwait_play_dead() wreckage */ + this_cpu_write(mwait_cpu_dead.status, 0); + this_cpu_write(mwait_cpu_dead.control, 0); + /* * If waken up by an INIT in an 82489DX configuration * cpu_callout_mask guarantees we don't get here before @@ -1795,6 +1803,10 @@ static inline void mwait_play_dead(void) (highest_subcstate - 1); } + /* Set up state for the kexec() hack below */ + md->status = CPUDEAD_MWAIT_WAIT; + md->control = CPUDEAD_MWAIT_WAIT; + wbinvd(); while (1) { @@ -1812,10 +1824,57 @@ static inline void mwait_play_dead(void) mb(); __mwait(eax, 0); + if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) { + /* + * Kexec is about to happen. Don't go back into mwait() as + * the kexec kernel might overwrite text and data including + * page tables and stack. So mwait() would resume when the + * monitor cache line is written to and then the CPU goes + * south due to overwritten text, page tables and stack. + * + * Note: This does _NOT_ protect against a stray MCE, NMI, + * SMI. They will resume execution at the instruction + * following the HLT instruction and run into the problem + * which this is trying to prevent. + */ + WRITE_ONCE(md->status, CPUDEAD_MWAIT_KEXEC_HLT); + while(1) + native_halt(); + } + cond_wakeup_cpu0(); } } +/* + * Kick all "offline" CPUs out of mwait on kexec(). See comment in + * mwait_play_dead(). + */ +void smp_kick_mwait_play_dead(void) +{ + u32 newstate = CPUDEAD_MWAIT_KEXEC_HLT; + struct mwait_cpu_dead *md; + unsigned int cpu, i; + + for_each_cpu_andnot(cpu, cpu_present_mask, cpu_online_mask) { + md = per_cpu_ptr(&mwait_cpu_dead, cpu); + + /* Does it sit in mwait_play_dead() ? */ + if (READ_ONCE(md->status) != CPUDEAD_MWAIT_WAIT) + continue; + + /* Wait up to 5ms */ + for (i = 0; READ_ONCE(md->status) != newstate && i < 1000; i++) { + /* Bring it out of mwait */ + WRITE_ONCE(md->control, newstate); + udelay(5); + } + + if (READ_ONCE(md->status) != newstate) + pr_err_once("CPU%u is stuck in mwait_play_dead()\n", cpu); + } +} + void hlt_play_dead(void) { if (__this_cpu_read(cpu_info.x86) >= 4) From 66b5ad35078b3b86cc048755ab8bc109694d5740 Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Sat, 6 May 2023 10:47:52 +0800 Subject: [PATCH 042/163] BACKPORT: maple_tree: fix potential out-of-bounds access in mas_wr_end_piv() commit cd00dd2585c4158e81fdfac0bbcc0446afbad26d upstream. Check the write offset end bounds before using it as the offset into the pivot array. This avoids a possible out-of-bounds access on the pivot array if the write extends to the last slot in the node, in which case the node maximum should be used as the end pivot. akpm: this doesn't affect any current callers, but new users of mapletree may encounter this problem if backported into earlier kernels, so let's fix it in -stable kernels in case of this. Link: https://lkml.kernel.org/r/20230506024752.2550-1-zhangpeng.00@bytedance.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Peng Zhang Reviewed-by: Liam R. Howlett Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman Change-Id: I992549af25fa9c22f587893d004002d2e004d317 (cherry-picked from commit 4e2ad53ababeaac44d71162650984abfe783960c) Signed-off-by: Suren Baghdasaryan --- lib/maple_tree.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 24b31aee7880..b0a828f863c3 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4322,11 +4322,13 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) { wr_mas->end_piv = wr_mas->r_max; - while ((wr_mas->mas->last > wr_mas->end_piv) && - (wr_mas->offset_end < wr_mas->node_end)) - wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; + while ((wr_mas->offset_end < wr_mas->node_end) && + (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) + wr_mas->offset_end++; - if (wr_mas->mas->last > wr_mas->end_piv) + if (wr_mas->offset_end < wr_mas->node_end) + wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; + else wr_mas->end_piv = wr_mas->mas->max; if (!wr_mas->entry) From 3803ae4a2832706860970b8a81591bd0c7798636 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 15 Jun 2023 15:17:36 -0700 Subject: [PATCH 043/163] BACKPORT: mm: introduce new 'lock_mm_and_find_vma()' page fault helper commit c2508ec5a58db67093f4fb8bf89a9a7c53a109e9 upstream. .. and make x86 use it. This basically extracts the existing x86 "find and expand faulting vma" code, but extends it to also take the mmap lock for writing in case we actually do need to expand the vma. We've historically short-circuited that case, and have some rather ugly special logic to serialize the stack segment expansion (since we only hold the mmap lock for reading) that doesn't match the normal VM locking. That slight violation of locking worked well, right up until it didn't: the maple tree code really does want proper locking even for simple extension of an existing vma. So extract the code for "look up the vma of the fault" from x86, fix it up to do the necessary write locking, and make it available as a helper function for other architectures that can use the common helper. Note: I say "common helper", but it really only handles the normal stack-grows-down case. Which is all architectures except for PA-RISC and IA64. So some rare architectures can't use the helper, but if they care they'll just need to open-code this logic. It's also worth pointing out that this code really would like to have an optimistic "mmap_upgrade_trylock()" to make it quicker to go from a read-lock (for the common case) to taking the write lock (for having to extend the vma) in the normal single-threaded situation where there is no other locking activity. But that _is_ all the very uncommon special case, so while it would be nice to have such an operation, it probably doesn't matter in reality. I did put in the skeleton code for such a possible future expansion, even if it only acts as pseudo-documentation for what we're doing. Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman [surenb: this one is taken from 6.4.y stable branch] Change-Id: I6e16e6751245ac24adcbe78114bc57c726463acb (cherry-picked from commit d6a5c7a1a6e52d4c46fe181237ca96cd46a42386) Signed-off-by: Suren Baghdasaryan --- arch/x86/Kconfig | 1 + arch/x86/mm/fault.c | 52 +------------------ include/linux/mm.h | 2 + mm/Kconfig | 4 ++ mm/memory.c | 121 ++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 130 insertions(+), 50 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ab3ce4ea05d0..22cdc45031d4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -272,6 +272,7 @@ config X86 select HAVE_GENERIC_VDSO select HOTPLUG_SMT if SMP select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_PAGE_FIRST_CHUNK select NEED_SG_DMA_LENGTH diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6551d6249f8a..8a74089d9f2e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -901,12 +901,6 @@ __bad_area(struct pt_regs *regs, unsigned long error_code, __bad_area_nosemaphore(regs, error_code, address, pkey, si_code); } -static noinline void -bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address) -{ - __bad_area(regs, error_code, address, 0, SEGV_MAPERR); -} - static inline bool bad_area_access_from_pkeys(unsigned long error_code, struct vm_area_struct *vma) { @@ -1387,51 +1381,10 @@ void do_user_addr_fault(struct pt_regs *regs, lock_mmap: #endif /* CONFIG_PER_VMA_LOCK */ - /* - * Kernel-mode access to the user address space should only occur - * on well-defined single instructions listed in the exception - * tables. But, an erroneous kernel fault occurring outside one of - * those areas which also holds mmap_lock might deadlock attempting - * to validate the fault against the address space. - * - * Only do the expensive exception table search when we might be at - * risk of a deadlock. This happens if we - * 1. Failed to acquire mmap_lock, and - * 2. The access did not originate in userspace. - */ - if (unlikely(!mmap_read_trylock(mm))) { - if (!user_mode(regs) && !search_exception_tables(regs->ip)) { - /* - * Fault from code in kernel from - * which we do not expect faults. - */ - bad_area_nosemaphore(regs, error_code, address); - return; - } retry: - mmap_read_lock(mm); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case we'll have missed the might_sleep() from - * down_read(): - */ - might_sleep(); - } - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) { - bad_area(regs, error_code, address); - return; - } - if (likely(vma->vm_start <= address)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, error_code, address); - return; - } - if (unlikely(expand_stack(vma, address))) { - bad_area(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address); return; } @@ -1439,7 +1392,6 @@ void do_user_addr_fault(struct pt_regs *regs, * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: if (unlikely(access_error(error_code, vma))) { bad_area_access_error(regs, error_code, address, vma); return; diff --git a/include/linux/mm.h b/include/linux/mm.h index 718bb0f8446c..fa3de0b51a29 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2117,6 +2117,8 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t nr, bool even_cows); void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows); +struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long address, struct pt_regs *regs); #else static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags, diff --git a/mm/Kconfig b/mm/Kconfig index a58632a9fbd9..985ed3d2adbd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1196,6 +1196,10 @@ config PER_VMA_LOCK This feature allows locking each virtual memory area separately when handling page faults instead of taking mmap_lock. +config LOCK_MM_AND_FIND_VMA + bool + depends on !STACK_GROWSUP + source "mm/damon/Kconfig" endmenu diff --git a/mm/memory.c b/mm/memory.c index d31c23419631..78a9e3fb0e65 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5287,6 +5287,127 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(handle_mm_fault); +#ifdef CONFIG_LOCK_MM_AND_FIND_VMA +#include + +static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) +{ + /* Even if this succeeds, make it clear we *might* have slept */ + if (likely(mmap_read_trylock(mm))) { + might_sleep(); + return true; + } + + if (regs && !user_mode(regs)) { + unsigned long ip = instruction_pointer(regs); + if (!search_exception_tables(ip)) + return false; + } + + mmap_read_lock(mm); + return true; +} + +static inline bool mmap_upgrade_trylock(struct mm_struct *mm) +{ + /* + * We don't have this operation yet. + * + * It should be easy enough to do: it's basically a + * atomic_long_try_cmpxchg_acquire() + * from RWSEM_READER_BIAS -> RWSEM_WRITER_LOCKED, but + * it also needs the proper lockdep magic etc. + */ + return false; +} + +static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs *regs) +{ + mmap_read_unlock(mm); + if (regs && !user_mode(regs)) { + unsigned long ip = instruction_pointer(regs); + if (!search_exception_tables(ip)) + return false; + } + mmap_write_lock(mm); + return true; +} + +/* + * Helper for page fault handling. + * + * This is kind of equivalend to "mmap_read_lock()" followed + * by "find_extend_vma()", except it's a lot more careful about + * the locking (and will drop the lock on failure). + * + * For example, if we have a kernel bug that causes a page + * fault, we don't want to just use mmap_read_lock() to get + * the mm lock, because that would deadlock if the bug were + * to happen while we're holding the mm lock for writing. + * + * So this checks the exception tables on kernel faults in + * order to only do this all for instructions that are actually + * expected to fault. + * + * We can also actually take the mm lock for writing if we + * need to extend the vma, which helps the VM layer a lot. + */ +struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, + unsigned long addr, struct pt_regs *regs) +{ + struct vm_area_struct *vma; + + if (!get_mmap_lock_carefully(mm, regs)) + return NULL; + + vma = find_vma(mm, addr); + if (likely(vma && (vma->vm_start <= addr))) + return vma; + + /* + * Well, dang. We might still be successful, but only + * if we can extend a vma to do so. + */ + if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) { + mmap_read_unlock(mm); + return NULL; + } + + /* + * We can try to upgrade the mmap lock atomically, + * in which case we can continue to use the vma + * we already looked up. + * + * Otherwise we'll have to drop the mmap lock and + * re-take it, and also look up the vma again, + * re-checking it. + */ + if (!mmap_upgrade_trylock(mm)) { + if (!upgrade_mmap_lock_carefully(mm, regs)) + return NULL; + + vma = find_vma(mm, addr); + if (!vma) + goto fail; + if (vma->vm_start <= addr) + goto success; + if (!(vma->vm_flags & VM_GROWSDOWN)) + goto fail; + } + + if (expand_stack(vma, addr)) + goto fail; + +success: + mmap_write_downgrade(mm); + return vma; + +fail: + mmap_write_unlock(mm); + return NULL; +} +#endif + #ifdef CONFIG_PER_VMA_LOCK /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be From 1e114e6efac1254b0a5c11c1a8f9b6d7bdec0d7f Mon Sep 17 00:00:00 2001 From: Vincent Donnefort Date: Fri, 21 Jul 2023 16:48:06 +0100 Subject: [PATCH 044/163] ANDROID: KVM: arm64: Fix memory ordering for pKVM module callbacks Registration of module callbacks for the pKVM hypervisor is lockless thanks to the use of a cmpxchg. Problem, a CPU can speculatively execute an indirect branch and speculatively read variables used in that branch. We then need to order the memory access between variables potentially set in the driver init (before the callback registration happen) and the call to that registered callback. e.g. in the case of the serial. CPU0: CPU1: driver_init(): hyp_serial_enabled() base_addr = 0xdeadbeef; enabled = __hyp_putc barrier(); barrier(); ops->register_serial_driver(putc); if (enabled) __hyp_putc(); /* read base_addr */ This is the same for the SMC and PSCI handler callbacks. The abort and fault callbacks are not impacted: the driver init can only happen before the kernel is deprivileged i.e. before the host stage-2 is in place and then before any of those callbacks can be triggered. Instead of a full barrier, we can use the acquire/release semantics: relaxing cmpxchg to cmpxchg_release in the registration path and use a load_acquire in hyp_serial_enabled(). Bug: 292470326 Change-Id: I4b5fe3713fe40cc5ab42ea0e9cdf54e8315dfb44 Signed-off-by: Vincent Donnefort --- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 9 +++++++-- arch/arm64/kvm/hyp/nvhe/psci-relay.c | 9 +++++++-- arch/arm64/kvm/hyp/nvhe/serial.c | 10 ++++++++-- 3 files changed, 22 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 16abb1e0de08..48e20ad5ff38 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -39,7 +39,12 @@ static bool (*default_trap_handler)(struct kvm_cpu_context *host_ctxt); int __pkvm_register_host_smc_handler(bool (*cb)(struct kvm_cpu_context *)) { - return cmpxchg(&default_host_smc_handler, NULL, cb) ? -EBUSY : 0; + /* + * Paired with smp_load_acquire(&default_host_smc_handler) in + * handle_host_smc(). Ensure memory stores happening during a pKVM module + * init are observed before executing the callback. + */ + return cmpxchg_release(&default_host_smc_handler, NULL, cb) ? -EBUSY : 0; } int __pkvm_register_default_trap_handler(bool (*cb)(struct kvm_cpu_context *)) @@ -1376,7 +1381,7 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt) handled = kvm_host_psci_handler(host_ctxt); if (!handled) handled = kvm_host_ffa_handler(host_ctxt); - if (!handled && READ_ONCE(default_host_smc_handler)) + if (!handled && smp_load_acquire(&default_host_smc_handler)) handled = default_host_smc_handler(host_ctxt); if (!handled) __kvm_hyp_host_forward_smc(host_ctxt); diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c index d4825b6140ba..f8db5445b530 100644 --- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c +++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c @@ -28,14 +28,19 @@ struct kvm_host_psci_config __ro_after_init kvm_host_psci_config; static void (*pkvm_psci_notifier)(enum pkvm_psci_notification, struct kvm_cpu_context *); static void pkvm_psci_notify(enum pkvm_psci_notification notif, struct kvm_cpu_context *host_ctxt) { - if (READ_ONCE(pkvm_psci_notifier)) + if (smp_load_acquire(&pkvm_psci_notifier)) pkvm_psci_notifier(notif, host_ctxt); } #ifdef CONFIG_MODULES int __pkvm_register_psci_notifier(void (*cb)(enum pkvm_psci_notification, struct kvm_cpu_context *)) { - return cmpxchg(&pkvm_psci_notifier, NULL, cb) ? -EBUSY : 0; + /* + * Paired with smp_load_acquire(&pkvm_psci_notifier) in + * pkvm_psci_notify(). Ensure memory stores hapenning during a pKVM module + * init are observed before executing the callback. + */ + return cmpxchg_release(&pkvm_psci_notifier, NULL, cb) ? -EBUSY : 0; } #endif diff --git a/arch/arm64/kvm/hyp/nvhe/serial.c b/arch/arm64/kvm/hyp/nvhe/serial.c index 0b2cf3b6d6a5..475ebf4ba7de 100644 --- a/arch/arm64/kvm/hyp/nvhe/serial.c +++ b/arch/arm64/kvm/hyp/nvhe/serial.c @@ -35,7 +35,8 @@ static inline void __hyp_putx4n(unsigned long x, int n) static inline bool hyp_serial_enabled(void) { - return !!READ_ONCE(__hyp_putc); + /* Paired with __pkvm_register_serial_driver()'s cmpxchg */ + return !!smp_load_acquire(&__hyp_putc); } void hyp_puts(const char *s) @@ -64,5 +65,10 @@ void hyp_putc(char c) int __pkvm_register_serial_driver(void (*cb)(char)) { - return cmpxchg(&__hyp_putc, NULL, cb) ? -EBUSY : 0; + /* + * Paired with smp_load_acquire(&__hyp_putc) in + * hyp_serial_enabled(). Ensure memory stores hapenning during a pKVM + * module init are observed before executing the callback. + */ + return cmpxchg_release(&__hyp_putc, NULL, cb) ? -EBUSY : 0; } From ec419af28fddc79ed28fc81928b49d46679c623d Mon Sep 17 00:00:00 2001 From: lambert wang Date: Wed, 26 Jul 2023 16:28:48 +0800 Subject: [PATCH 045/163] ANDROID: devlink: Select CONFIG_NET_DEVLINK in Kconfig.gki Select hidden Kconfig: NET_DEVLINK. Required by device drivers to provide unified interface to expose device info, capture coredump and perform device flash. Bug: 283707518 Change-Id: I1cc5b7dce36c79549cd7f1d9b755f7bab3973f0e Signed-off-by: michael cai Signed-off-by: lambert wang --- init/Kconfig.gki | 1 + 1 file changed, 1 insertion(+) diff --git a/init/Kconfig.gki b/init/Kconfig.gki index 29eb1eefbd3d..081b1cdc9c7e 100644 --- a/init/Kconfig.gki +++ b/init/Kconfig.gki @@ -201,6 +201,7 @@ config GKI_HIDDEN_NET_CONFIGS bool "Hidden networking configuration needed for GKI" select PAGE_POOL select NET_PTP_CLASSIFY + select NET_DEVLINK help Dummy config option used to enable the networking hidden config, required by various SoC platforms. From b61f298c0d9acd229a3ad9d240dad78c6efbdce4 Mon Sep 17 00:00:00 2001 From: lambert wang Date: Wed, 26 Jul 2023 17:46:28 +0800 Subject: [PATCH 046/163] ANDROID: GKI: Add ABI symbol list(devlink) for MTK 17 function symbol(s) added 'bool device_remove_file_self(struct device*, const struct device_attribute*)' 'struct devlink* devlink_alloc_ns(const struct devlink_ops*, size_t, struct net*, struct device*)' 'void devlink_flash_update_status_notify(struct devlink*, const char*, const char*, unsigned long, unsigned long)' 'int devlink_fmsg_binary_pair_nest_end(struct devlink_fmsg*)' 'int devlink_fmsg_binary_pair_nest_start(struct devlink_fmsg*, const char*)' 'int devlink_fmsg_binary_put(struct devlink_fmsg*, const void*, u16)' 'void devlink_free(struct devlink*)' 'int devlink_health_report(struct devlink_health_reporter*, const char*, void*)' 'struct devlink_health_reporter* devlink_health_reporter_create(struct devlink*, const struct devlink_health_reporter_ops*, u64, void*)' 'void devlink_health_reporter_destroy(struct devlink_health_reporter*)' 'void* devlink_health_reporter_priv(struct devlink_health_reporter*)' 'void devlink_health_reporter_state_update(struct devlink_health_reporter*, enum devlink_health_reporter_state)' 'void* devlink_priv(struct devlink*)' 'struct devlink_region* devlink_region_create(struct devlink*, const struct devlink_region_ops*, u32, u64)' 'void devlink_region_destroy(struct devlink_region*)' 'void devlink_register(struct devlink*)' 'void devlink_unregister(struct devlink*)' type 'struct devlink' changed was only declared, is now fully defined type 'struct devlink_linecard' changed was only declared, is now fully defined Bug: 283707518 Change-Id: I686fd14c13863c27b3dfdb29cd7c6b6d5a0a3127 Signed-off-by: lambert wang Signed-off-by: iven yang Signed-off-by: michael cai --- android/abi_gki_aarch64.stg | 2838 ++++++++++++++++++++++++++++++++++- android/abi_gki_aarch64_mtk | 17 + 2 files changed, 2834 insertions(+), 21 deletions(-) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index a8fc00774738..13de9dcc4631 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -1301,6 +1301,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x39185662 } +pointer_reference { + id: 0x04d7fcdd + kind: POINTER + pointee_type_id: 0x391f15ea +} pointer_reference { id: 0x04dad728 kind: POINTER @@ -2671,6 +2676,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x03f06751 } +pointer_reference { + id: 0x0a70ce1b + kind: POINTER + pointee_type_id: 0x0383def3 +} pointer_reference { id: 0x0a747547 kind: POINTER @@ -4046,6 +4056,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x19832066 } +pointer_reference { + id: 0x0cf3d8fe + kind: POINTER + pointee_type_id: 0x198f8565 +} pointer_reference { id: 0x0cf80951 kind: POINTER @@ -4241,6 +4256,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x1e3b8bac } +pointer_reference { + id: 0x0d1f55de + kind: POINTER + pointee_type_id: 0x1e3db1e5 +} pointer_reference { id: 0x0d20d38c kind: POINTER @@ -4276,6 +4296,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x1edfcb66 } +pointer_reference { + id: 0x0d27dc9d + kind: POINTER + pointee_type_id: 0x1edf94e9 +} pointer_reference { id: 0x0d2958f6 kind: POINTER @@ -5346,6 +5371,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x12725ea0 } +pointer_reference { + id: 0x0e0dc148 + kind: POINTER + pointee_type_id: 0x1277e3bd +} pointer_reference { id: 0x0e0dc9f3 kind: POINTER @@ -5511,6 +5541,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x129eb456 } +pointer_reference { + id: 0x0e38185b + kind: POINTER + pointee_type_id: 0x12a087f3 +} pointer_reference { id: 0x0e395200 kind: POINTER @@ -5571,6 +5606,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x13525403 } +pointer_reference { + id: 0x0e44c87b + kind: POINTER + pointee_type_id: 0x1353c771 +} pointer_reference { id: 0x0e44f9f8 kind: POINTER @@ -5641,6 +5681,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x13797fb7 } +pointer_reference { + id: 0x0e4e7ccb + kind: POINTER + pointee_type_id: 0x137915b0 +} pointer_reference { id: 0x0e4f7e58 kind: POINTER @@ -6921,6 +6966,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x17ce1ca0 } +pointer_reference { + id: 0x0f657fc1 + kind: POINTER + pointee_type_id: 0x17d51999 +} pointer_reference { id: 0x0f677ef9 kind: POINTER @@ -8091,6 +8141,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x664d9cb2 } +pointer_reference { + id: 0x130aa721 + kind: POINTER + pointee_type_id: 0x666a7a1b +} pointer_reference { id: 0x13185ce8 kind: POINTER @@ -8836,6 +8891,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x76e78cd9 } +pointer_reference { + id: 0x173696bc + kind: POINTER + pointee_type_id: 0x769abc6d +} pointer_reference { id: 0x1740e61d kind: POINTER @@ -8976,6 +9036,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x755b6647 } +pointer_reference { + id: 0x17d51999 + kind: POINTER + pointee_type_id: 0x751480f9 +} pointer_reference { id: 0x17dabdcd kind: POINTER @@ -9436,6 +9501,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x4d320115 } +pointer_reference { + id: 0x19df035f + kind: POINTER + pointee_type_id: 0x4d3cebe1 +} pointer_reference { id: 0x19e0c64c kind: POINTER @@ -9721,6 +9791,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x475137a2 } +pointer_reference { + id: 0x1b4a1f75 + kind: POINTER + pointee_type_id: 0x47689b48 +} pointer_reference { id: 0x1b4ce091 kind: POINTER @@ -10826,6 +10901,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x551d0131 } +pointer_reference { + id: 0x1fdf8df4 + kind: POINTER + pointee_type_id: 0x553ed14c +} pointer_reference { id: 0x1fe51930 kind: POINTER @@ -11616,6 +11696,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xa57d1db8 } +pointer_reference { + id: 0x23d822f9 + kind: POINTER + pointee_type_id: 0xa5206d7a +} pointer_reference { id: 0x23d902f6 kind: POINTER @@ -11936,11 +12021,6 @@ pointer_reference { kind: POINTER pointee_type_id: 0xbceec39a } -pointer_reference { - id: 0x25b57283 - kind: POINTER - pointee_type_id: 0xbc952c91 -} pointer_reference { id: 0x25b73daa kind: POINTER @@ -12371,6 +12451,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xb4a26738 } +pointer_reference { + id: 0x27c7b3cb + kind: POINTER + pointee_type_id: 0xb55e29b2 +} pointer_reference { id: 0x27cf73eb kind: POINTER @@ -12806,16 +12891,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x8de7c9fe } +pointer_reference { + id: 0x29ef8105 + kind: POINTER + pointee_type_id: 0x8dfee289 +} pointer_reference { id: 0x29f9c70b kind: POINTER pointee_type_id: 0x8da7fab2 } -pointer_reference { - id: 0x2a0586b2 - kind: POINTER - pointee_type_id: 0x8256fc56 -} pointer_reference { id: 0x2a0a605f kind: POINTER @@ -15771,6 +15856,26 @@ pointer_reference { kind: POINTER pointee_type_id: 0x99be88a0 } +pointer_reference { + id: 0x2d000b85 + kind: POINTER + pointee_type_id: 0x9e40c88b +} +pointer_reference { + id: 0x2d00157c + kind: POINTER + pointee_type_id: 0x9e40b36c +} +pointer_reference { + id: 0x2d004103 + kind: POINTER + pointee_type_id: 0x9e41e293 +} +pointer_reference { + id: 0x2d004a69 + kind: POINTER + pointee_type_id: 0x9e41cf39 +} pointer_reference { id: 0x2d008d0e kind: POINTER @@ -15781,21 +15886,61 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e448e2a } +pointer_reference { + id: 0x2d018e8d + kind: POINTER + pointee_type_id: 0x9e46dca9 +} +pointer_reference { + id: 0x2d01e009 + kind: POINTER + pointee_type_id: 0x9e4766bb +} pointer_reference { id: 0x2d0240fc kind: POINTER pointee_type_id: 0x9e49e56e } +pointer_reference { + id: 0x2d02e4d4 + kind: POINTER + pointee_type_id: 0x9e4b75cd +} +pointer_reference { + id: 0x2d033017 + kind: POINTER + pointee_type_id: 0x9e4c26c1 +} pointer_reference { id: 0x2d036f50 kind: POINTER pointee_type_id: 0x9e4d5bdd } +pointer_reference { + id: 0x2d0429c2 + kind: POINTER + pointee_type_id: 0x9e504197 +} +pointer_reference { + id: 0x2d044ee7 + kind: POINTER + pointee_type_id: 0x9e51dd03 +} +pointer_reference { + id: 0x2d046cf4 + kind: POINTER + pointee_type_id: 0x9e51554f +} pointer_reference { id: 0x2d04a781 kind: POINTER pointee_type_id: 0x9e52789b } +pointer_reference { + id: 0x2d0679a1 + kind: POINTER + pointee_type_id: 0x9e590019 +} pointer_reference { id: 0x2d06e5cd kind: POINTER @@ -15811,6 +15956,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e5ef2fc } +pointer_reference { + id: 0x2d081f17 + kind: POINTER + pointee_type_id: 0x9e609ac2 +} +pointer_reference { + id: 0x2d085064 + kind: POINTER + pointee_type_id: 0x9e61a70c +} pointer_reference { id: 0x2d08a576 kind: POINTER @@ -15826,6 +15981,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e663e24 } +pointer_reference { + id: 0x2d0a0361 + kind: POINTER + pointee_type_id: 0x9e68eb19 +} +pointer_reference { + id: 0x2d0ab1eb + kind: POINTER + pointee_type_id: 0x9e6a2131 +} pointer_reference { id: 0x2d0b7a8e kind: POINTER @@ -15851,6 +16016,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e792fe6 } +pointer_reference { + id: 0x2d0e9268 + kind: POINTER + pointee_type_id: 0x9e7aaf3f +} pointer_reference { id: 0x2d0fdd7c kind: POINTER @@ -15886,6 +16056,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e026d74 } +pointer_reference { + id: 0x2d11ffb5 + kind: POINTER + pointee_type_id: 0x9e071849 +} pointer_reference { id: 0x2d126960 kind: POINTER @@ -15901,6 +16076,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e11d049 } +pointer_reference { + id: 0x2d154530 + kind: POINTER + pointee_type_id: 0x9e15f25c +} +pointer_reference { + id: 0x2d161d5c + kind: POINTER + pointee_type_id: 0x9e1893ee +} pointer_reference { id: 0x2d164af6 kind: POINTER @@ -15911,6 +16096,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e19651e } +pointer_reference { + id: 0x2d16aeaf + kind: POINTER + pointee_type_id: 0x9e1a5c22 +} pointer_reference { id: 0x2d16b2fd kind: POINTER @@ -15971,6 +16161,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9e2ef574 } +pointer_reference { + id: 0x2d1c1d12 + kind: POINTER + pointee_type_id: 0x9e3092d5 +} pointer_reference { id: 0x2d1c7478 kind: POINTER @@ -16301,6 +16496,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9ebfed71 } +pointer_reference { + id: 0x2d3ffbc6 + kind: POINTER + pointee_type_id: 0x9ebf0984 +} pointer_reference { id: 0x2d4051be kind: POINTER @@ -16406,6 +16606,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9f6f73ed } +pointer_reference { + id: 0x2d4c6881 + kind: POINTER + pointee_type_id: 0x9f71449b +} pointer_reference { id: 0x2d4eaa35 kind: POINTER @@ -16421,6 +16626,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9f7fc723 } +pointer_reference { + id: 0x2d4fcd25 + kind: POINTER + pointee_type_id: 0x9f7fd20b +} pointer_reference { id: 0x2d50c295 kind: POINTER @@ -16461,6 +16671,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9f1dcaea } +pointer_reference { + id: 0x2d594ead + kind: POINTER + pointee_type_id: 0x9f25dc29 +} pointer_reference { id: 0x2d59c606 kind: POINTER @@ -16691,6 +16906,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9f87b817 } +pointer_reference { + id: 0x2d7368ed + kind: POINTER + pointee_type_id: 0x9f8d452b +} +pointer_reference { + id: 0x2d739f19 + kind: POINTER + pointee_type_id: 0x9f8e9af9 +} pointer_reference { id: 0x2d7549b3 kind: POINTER @@ -16731,6 +16956,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9fa3b723 } +pointer_reference { + id: 0x2d7ab985 + kind: POINTER + pointee_type_id: 0x9faa0088 +} +pointer_reference { + id: 0x2d7ac448 + kind: POINTER + pointee_type_id: 0x9fabf7be +} pointer_reference { id: 0x2d7ae3a5 kind: POINTER @@ -16931,6 +17166,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9c7d7af2 } +pointer_reference { + id: 0x2d8fd28a + kind: POINTER + pointee_type_id: 0x9c7facb4 +} pointer_reference { id: 0x2d8fdd2c kind: POINTER @@ -17216,6 +17456,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9cf48276 } +pointer_reference { + id: 0x2dae0c91 + kind: POINTER + pointee_type_id: 0x9cf8d4d9 +} pointer_reference { id: 0x2dae560d kind: POINTER @@ -17951,6 +18196,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9da05299 } +pointer_reference { + id: 0x2df84197 + kind: POINTER + pointee_type_id: 0x9da1e0c3 +} pointer_reference { id: 0x2df9d3a1 kind: POINTER @@ -18116,21 +18366,56 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9266fead } +pointer_reference { + id: 0x2e0a2548 + kind: POINTER + pointee_type_id: 0x926873bc +} +pointer_reference { + id: 0x2e0a26c5 + kind: POINTER + pointee_type_id: 0x92687d88 +} pointer_reference { id: 0x2e0a4508 kind: POINTER pointee_type_id: 0x9269f2bc } +pointer_reference { + id: 0x2e0ab0cf + kind: POINTER + pointee_type_id: 0x926a25a2 +} +pointer_reference { + id: 0x2e0abaa7 + kind: POINTER + pointee_type_id: 0x926a0c03 +} +pointer_reference { + id: 0x2e0abbc6 + kind: POINTER + pointee_type_id: 0x926a0987 +} pointer_reference { id: 0x2e0ac0be kind: POINTER pointee_type_id: 0x926be467 } +pointer_reference { + id: 0x2e0ad762 + kind: POINTER + pointee_type_id: 0x926bbb17 +} pointer_reference { id: 0x2e0b4b4b kind: POINTER pointee_type_id: 0x926dcbb0 } +pointer_reference { + id: 0x2e0b7ad3 + kind: POINTER + pointee_type_id: 0x926d0dd0 +} pointer_reference { id: 0x2e0b807c kind: POINTER @@ -18186,6 +18471,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x920d4b76 } +pointer_reference { + id: 0x2e1466a0 + kind: POINTER + pointee_type_id: 0x92117c1d +} pointer_reference { id: 0x2e152fbb kind: POINTER @@ -18276,6 +18566,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x92c2d86d } +pointer_reference { + id: 0x2e20981d + kind: POINTER + pointee_type_id: 0x92c286e9 +} pointer_reference { id: 0x2e215a2d kind: POINTER @@ -18341,6 +18636,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x92f4a3cb } +pointer_reference { + id: 0x2e2ef220 + kind: POINTER + pointee_type_id: 0x92fb2e1e +} +pointer_reference { + id: 0x2e2f03e7 + kind: POINTER + pointee_type_id: 0x92fce902 +} pointer_reference { id: 0x2e2ff28a kind: POINTER @@ -18451,6 +18756,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9352af9c } +pointer_reference { + id: 0x2e477ad3 + kind: POINTER + pointee_type_id: 0x935d0dd3 +} pointer_reference { id: 0x2e47a18c kind: POINTER @@ -18551,6 +18861,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9313a933 } +pointer_reference { + id: 0x2e54f1b8 + kind: POINTER + pointee_type_id: 0x9313207d +} pointer_reference { id: 0x2e5535c9 kind: POINTER @@ -18581,6 +18896,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x931806a8 } +pointer_reference { + id: 0x2e5686ce + kind: POINTER + pointee_type_id: 0x931afda4 +} pointer_reference { id: 0x2e56b2c6 kind: POINTER @@ -18731,6 +19051,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x93907678 } +pointer_reference { + id: 0x2e7505e0 + kind: POINTER + pointee_type_id: 0x9394f11e +} pointer_reference { id: 0x2e76071a kind: POINTER @@ -18786,6 +19111,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x93b941f7 } +pointer_reference { + id: 0x2e7ffbfe + kind: POINTER + pointee_type_id: 0x93bf0967 +} pointer_reference { id: 0x2e804bb7 kind: POINTER @@ -21516,6 +21846,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xeba4823c } +pointer_reference { + id: 0x307eba5c + kind: POINTER + pointee_type_id: 0xebba0fee +} pointer_reference { id: 0x30887d17 kind: POINTER @@ -21781,6 +22116,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xeec4d3d8 } +pointer_reference { + id: 0x3121a074 + kind: POINTER + pointee_type_id: 0xeec6674e +} pointer_reference { id: 0x31287056 kind: POINTER @@ -22056,6 +22396,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xed225d89 } +pointer_reference { + id: 0x31d9e79a + kind: POINTER + pointee_type_id: 0xed2778f4 +} pointer_reference { id: 0x31da1e83 kind: POINTER @@ -22211,6 +22556,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xe3420903 } +pointer_reference { + id: 0x3247ae94 + kind: POINTER + pointee_type_id: 0xe35e5ccd +} pointer_reference { id: 0x324a2d7b kind: POINTER @@ -23006,6 +23356,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xe5ac437d } +pointer_reference { + id: 0x33fd261b + kind: POINTER + pointee_type_id: 0xe5b47ef3 +} pointer_reference { id: 0x34016e82 kind: POINTER @@ -23751,6 +24106,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xfc53e789 } +pointer_reference { + id: 0x358546f6 + kind: POINTER + pointee_type_id: 0xfc55fd47 +} pointer_reference { id: 0x358a1c52 kind: POINTER @@ -23836,6 +24196,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xfc8ba4d2 } +pointer_reference { + id: 0x35b3ea42 + kind: POINTER + pointee_type_id: 0xfc8f4f95 +} pointer_reference { id: 0x35bad1e5 kind: POINTER @@ -24631,6 +24996,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xf7e2e2fa } +pointer_reference { + id: 0x376c8705 + kind: POINTER + pointee_type_id: 0xf7f2fa8a +} pointer_reference { id: 0x376d0d4f kind: POINTER @@ -25431,6 +25801,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xce9dd989 } +pointer_reference { + id: 0x39388fd3 + kind: POINTER + pointee_type_id: 0xcea2d9d1 +} pointer_reference { id: 0x393f044a kind: POINTER @@ -25586,6 +25961,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xcc1251bf } +pointer_reference { + id: 0x3999579d + kind: POINTER + pointee_type_id: 0xcc25b8e9 +} pointer_reference { id: 0x399f63b7 kind: POINTER @@ -26131,6 +26511,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xc60bc51e } +pointer_reference { + id: 0x3b143836 + kind: POINTER + pointee_type_id: 0xc6100647 +} pointer_reference { id: 0x3b19594b kind: POINTER @@ -26261,6 +26646,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xc77aa47c } +pointer_reference { + id: 0x3b5017f2 + kind: POINTER + pointee_type_id: 0xc700b957 +} pointer_reference { id: 0x3b54fdd3 kind: POINTER @@ -26506,6 +26896,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xda6429b7 } +pointer_reference { + id: 0x3c0cf46a + kind: POINTER + pointee_type_id: 0xda733736 +} pointer_reference { id: 0x3c0e9e79 kind: POINTER @@ -26616,6 +27011,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xdb0ebdcd } +pointer_reference { + id: 0x3c53e119 + kind: POINTER + pointee_type_id: 0xdb0f62fb +} pointer_reference { id: 0x3c53eefd kind: POINTER @@ -26921,6 +27321,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xdf4d92db } +pointer_reference { + id: 0x3d46e073 + kind: POINTER + pointee_type_id: 0xdf5b6752 +} pointer_reference { id: 0x3d4bf55f kind: POINTER @@ -27941,6 +28346,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xd559247f } +pointer_reference { + id: 0x3fca7642 + kind: POINTER + pointee_type_id: 0xd5693f95 +} pointer_reference { id: 0x3fd0b78c kind: POINTER @@ -30331,6 +30741,11 @@ qualified { qualifier: CONST qualified_type_id: 0x6798ba36 } +qualified { + id: 0xc6100647 + qualifier: CONST + qualified_type_id: 0x6807af97 +} qualified { id: 0xc62583b0 qualifier: CONST @@ -31406,6 +31821,11 @@ qualified { qualifier: CONST qualified_type_id: 0x19d71054 } +qualified { + id: 0xda733736 + qualifier: CONST + qualified_type_id: 0x198b6a50 +} qualified { id: 0xda7a9f79 qualifier: CONST @@ -31431,6 +31851,11 @@ qualified { qualifier: CONST qualified_type_id: 0x1c7ac324 } +qualified { + id: 0xdb0f62fb + qualifier: CONST + qualified_type_id: 0x1c7a3d65 +} qualified { id: 0xdb10d97a qualifier: CONST @@ -31691,6 +32116,11 @@ qualified { qualifier: CONST qualified_type_id: 0x0d6c22fa } +qualified { + id: 0xdf5b6752 + qualifier: CONST + qualified_type_id: 0x0d2a2bc0 +} qualified { id: 0xdf6f33e0 qualifier: CONST @@ -32026,6 +32456,11 @@ qualified { qualifier: CONST qualified_type_id: 0xe6f6bb7d } +qualified { + id: 0xe5b47ef3 + qualifier: CONST + qualified_type_id: 0xe6964d46 +} qualified { id: 0xe5d16cd3 qualifier: CONST @@ -32326,6 +32761,11 @@ qualified { qualifier: CONST qualified_type_id: 0xded5be7b } +qualified { + id: 0xebba0fee + qualifier: CONST + qualified_type_id: 0xdeaf8933 +} qualified { id: 0xebc0e0d9 qualifier: CONST @@ -32431,6 +32871,11 @@ qualified { qualifier: CONST qualified_type_id: 0xc460028a } +qualified { + id: 0xed2778f4 + qualifier: CONST + qualified_type_id: 0xc4da5559 +} qualified { id: 0xed32e285 qualifier: CONST @@ -32531,6 +32976,11 @@ qualified { qualifier: CONST qualified_type_id: 0xcaab19b4 } +qualified { + id: 0xeec6674e + qualifier: CONST + qualified_type_id: 0xcb5e2bb0 +} qualified { id: 0xeee127c5 qualifier: CONST @@ -32906,6 +33356,11 @@ qualified { qualifier: CONST qualified_type_id: 0xae5a9933 } +qualified { + id: 0xf7f2fa8a + qualifier: CONST + qualified_type_id: 0xaf8c5ca0 +} qualified { id: 0xf824f490 qualifier: CONST @@ -38417,6 +38872,11 @@ member { type_id: 0x7edb75e7 offset: 96 } +member { + id: 0x3c8c9122 + type_id: 0x7d1c3635 + offset: 256 +} member { id: 0x3cac7aec type_id: 0x7d9f9ba1 @@ -39928,6 +40388,12 @@ member { name: "_msg" type_id: 0x3e10b518 } +member { + id: 0x3e75936d + name: "_net" + type_id: 0xb335d16f + offset: 3136 +} member { id: 0x3e759921 name: "_net" @@ -48600,12 +49066,24 @@ member { offset: 65 bitsize: 1 } +member { + id: 0x773b8f3f + name: "auto_dump" + type_id: 0x6d7f5ff6 + offset: 904 +} member { id: 0xaa0ea302 name: "auto_flowlabels" type_id: 0x295c7202 offset: 632 } +member { + id: 0xea4e825c + name: "auto_recover" + type_id: 0x6d7f5ff6 + offset: 896 +} member { id: 0xc5fa3041 name: "auto_runtime_pm" @@ -51712,6 +52190,12 @@ member { type_id: 0x326dfde1 offset: 256 } +member { + id: 0x4a47d75f + name: "bitwidth" + type_id: 0x4585663f + offset: 96 +} member { id: 0x9cc8208d name: "bkops" @@ -57175,6 +57659,12 @@ member { type_id: 0x208118b2 offset: 8640 } +member { + id: 0x08477548 + name: "cell_size" + type_id: 0xc9082b19 + offset: 96 +} member { id: 0xc909929f name: "cells" @@ -61334,6 +61824,12 @@ member { name: "common" type_id: 0xfb3bb098 } +member { + id: 0x999f46a7 + name: "comp" + type_id: 0x3fcbf304 + offset: 3648 +} member { id: 0x999f496f name: "comp" @@ -61927,6 +62423,12 @@ member { type_id: 0x35aebc23 offset: 64 } +member { + id: 0xe5d87fc5 + name: "component" + type_id: 0x3e10b518 + offset: 64 +} member { id: 0xe5eb95e0 name: "component" @@ -63226,6 +63728,12 @@ member { name: "controller" type_id: 0xc9082b19 } +member { + id: 0xd8dc9c99 + name: "controller" + type_id: 0xc9082b19 + offset: 64 +} member { id: 0x608f2d5b name: "controller_data" @@ -63238,6 +63746,13 @@ member { type_id: 0x18bd6530 offset: 7552 } +member { + id: 0x1db19505 + name: "controller_valid" + type_id: 0x295c7202 + offset: 145 + bitsize: 1 +} member { id: 0x474fe3e5 name: "controls" @@ -66368,6 +66883,12 @@ member { name: "cur_seq" type_id: 0x92233392 } +member { + id: 0x80fbe8a7 + name: "cur_snapshots" + type_id: 0xc9082b19 + offset: 864 +} member { id: 0x8b31830e name: "cur_stack" @@ -71057,6 +71578,12 @@ member { type_id: 0x0d27055d offset: 128 } +member { + id: 0xa4170b6e + name: "destructor" + type_id: 0x0d27dc9d + offset: 64 +} member { id: 0xa4b58e4f name: "destructor" @@ -71673,6 +72200,12 @@ member { type_id: 0x0258f96e offset: 2880 } +member { + id: 0xce3bb522 + name: "dev" + type_id: 0x0258f96e + offset: 3072 +} member { id: 0xce3bb837 name: "dev" @@ -73020,15 +73553,26 @@ member { offset: 6816 } member { - id: 0x5685a7bf + id: 0x56ace115 name: "devlink" - type_id: 0x25b57283 + type_id: 0x0cf3d8fe offset: 256 } member { - id: 0x5685afdd + id: 0x56ace1be name: "devlink" - type_id: 0x25b57283 + type_id: 0x0cf3d8fe +} +member { + id: 0x56ace87c + name: "devlink" + type_id: 0x0cf3d8fe + offset: 128 +} +member { + id: 0x56ace977 + name: "devlink" + type_id: 0x0cf3d8fe offset: 192 } member { @@ -73036,6 +73580,12 @@ member { name: "devlink_port" type_id: 0x3b68ec61 } +member { + id: 0xeb76e483 + name: "devlink_port" + type_id: 0x3b68ec61 + offset: 320 +} member { id: 0xf45f2394 name: "devlink_rate" @@ -73321,6 +73871,12 @@ member { type_id: 0x295c7202 offset: 224 } +member { + id: 0x30b83368 + name: "diagnose" + type_id: 0x2e2f03e7 + offset: 192 +} member { id: 0xc3552be5 name: "dialed_frequency" @@ -75684,6 +76240,18 @@ member { offset: 1034 bitsize: 1 } +member { + id: 0x32a55865 + name: "dpipe_headers" + type_id: 0x27c7b3cb + offset: 1472 +} +member { + id: 0x3d9266c0 + name: "dpipe_table_list" + type_id: 0xd3c80119 + offset: 448 +} member { id: 0x95152d3f name: "dplen" @@ -77276,6 +77844,12 @@ member { type_id: 0x2f1d9bf5 offset: 960 } +member { + id: 0x0a7d4966 + name: "dump" + type_id: 0x2e2ef220 + offset: 128 +} member { id: 0x0a7e5e7c name: "dump" @@ -77312,6 +77886,24 @@ member { type_id: 0x425c572c offset: 7808 } +member { + id: 0x210293e8 + name: "dump_fmsg" + type_id: 0x23d822f9 + offset: 384 +} +member { + id: 0x33fc8cf5 + name: "dump_lock" + type_id: 0xa7c362b0 + offset: 448 +} +member { + id: 0x352c4ca4 + name: "dump_real_ts" + type_id: 0x92233392 + offset: 1024 +} member { id: 0xc82958b0 name: "dump_segments" @@ -77336,6 +77928,12 @@ member { type_id: 0x2f288c5c offset: 832 } +member { + id: 0x1e719a84 + name: "dump_ts" + type_id: 0x92233392 + offset: 960 +} member { id: 0xa92e0890 name: "dump_vendor_regs" @@ -80470,6 +81068,12 @@ member { offset: 6530 bitsize: 1 } +member { + id: 0x0b6ea206 + name: "error_count" + type_id: 0x92233392 + offset: 1088 +} member { id: 0x0b9ba55b name: "error_count" @@ -80672,6 +81276,42 @@ member { type_id: 0x39470e64 offset: 256 } +member { + id: 0x2d2ca64a + name: "eswitch_encap_mode_get" + type_id: 0x2d046cf4 + offset: 1408 +} +member { + id: 0x0e5d040f + name: "eswitch_encap_mode_set" + type_id: 0x2df84197 + offset: 1472 +} +member { + id: 0xff091101 + name: "eswitch_inline_mode_get" + type_id: 0x2d3ffbc6 + offset: 1280 +} +member { + id: 0xf81283dc + name: "eswitch_inline_mode_set" + type_id: 0x2d161d5c + offset: 1344 +} +member { + id: 0x67ecafb3 + name: "eswitch_mode_get" + type_id: 0x2d11ffb5 + offset: 1152 +} +member { + id: 0x06f3214d + name: "eswitch_mode_set" + type_id: 0x2dae0c91 + offset: 1216 +} member { id: 0xca94b2d4 name: "eth_tp_mdix" @@ -84127,6 +84767,12 @@ member { type_id: 0x92233392 offset: 7744 } +member { + id: 0xc58a570e + name: "features" + type_id: 0x92233392 + offset: 2496 +} member { id: 0xc5a16345 name: "features" @@ -84946,6 +85592,12 @@ member { type_id: 0x7dbd382e offset: 352 } +member { + id: 0x784a4b9b + name: "fields" + type_id: 0x19df035f + offset: 128 +} member { id: 0x788054c9 name: "fields" @@ -84969,6 +85621,12 @@ member { type_id: 0x2db9a683 offset: 256 } +member { + id: 0x361d56de + name: "fields_count" + type_id: 0x4585663f + offset: 192 +} member { id: 0x53870b05 name: "fiemap" @@ -87515,6 +88173,12 @@ member { type_id: 0x0d9a8731 offset: 1600 } +member { + id: 0x5ef8d6d5 + name: "flash_update" + type_id: 0x2d16aeaf + offset: 1600 +} member { id: 0x4ad49fc7 name: "flat" @@ -87551,6 +88215,11 @@ member { type_id: 0x03e0374b offset: 64 } +member { + id: 0xdf2fd72b + name: "flavour" + type_id: 0x03e0374b +} member { id: 0x9a18c6a2 name: "flc_flock" @@ -90800,6 +91469,11 @@ member { type_id: 0x0faae5b1 offset: 96 } +member { + id: 0x91e86fed + name: "fw" + type_id: 0x33011141 +} member { id: 0x51f9c40d name: "fw_download" @@ -91410,6 +92084,18 @@ member { name: "generic" type_id: 0x09dc021e } +member { + id: 0x4257f83b + name: "generic" + type_id: 0x6d7f5ff6 + offset: 80 +} +member { + id: 0x4257f846 + name: "generic" + type_id: 0x6d7f5ff6 + offset: 64 +} member { id: 0x42d6db26 name: "generic" @@ -93283,6 +93969,12 @@ member { type_id: 0xe742397c offset: 576 } +member { + id: 0x55f132cc + name: "global" + type_id: 0x6d7f5ff6 + offset: 224 +} member { id: 0x9e65d647 name: "global" @@ -93614,6 +94306,12 @@ member { type_id: 0x1f3595db offset: 128 } +member { + id: 0x62d28eb5 + name: "graceful_period" + type_id: 0x92233392 + offset: 832 +} member { id: 0x0277381e name: "graft" @@ -96096,6 +96794,17 @@ member { name: "headers" type_id: 0x0524ca5a } +member { + id: 0x614c39f2 + name: "headers" + type_id: 0x0f657fc1 +} +member { + id: 0x958f08da + name: "headers_count" + type_id: 0x4585663f + offset: 64 +} member { id: 0x3e17b5ef name: "headroom" @@ -96143,6 +96852,12 @@ member { type_id: 0xc9082b19 offset: 896 } +member { + id: 0x768a691a + name: "health_state" + type_id: 0x295c7202 + offset: 912 +} member { id: 0x049198e2 name: "heap" @@ -99677,6 +100392,18 @@ member { name: "id" type_id: 0x914dbfdc } +member { + id: 0xccbe65b2 + name: "id" + type_id: 0x914dbfdc + offset: 80 +} +member { + id: 0xccbe65cf + name: "id" + type_id: 0x914dbfdc + offset: 64 +} member { id: 0xccc14505 name: "id" @@ -101945,6 +102672,11 @@ member { type_id: 0xe62ebf07 offset: 128 } +member { + id: 0xadf00094 + name: "index" + type_id: 0xc9082b19 +} member { id: 0xadf006ed name: "index" @@ -102232,6 +102964,12 @@ member { type_id: 0x4585663f offset: 224 } +member { + id: 0xe0c4d82e + name: "info_get" + type_id: 0x2d0679a1 + offset: 1536 +} member { id: 0x2f8cd3f3 name: "info_ident" @@ -102607,6 +103345,12 @@ member { offset: 608 bitsize: 1 } +member { + id: 0x3ecaad84 + name: "init_action" + type_id: 0x322b7a90 + offset: 32 +} member { id: 0xc9830a15 name: "init_addr" @@ -102637,6 +103381,12 @@ member { type_id: 0x0c7ce901 offset: 64 } +member { + id: 0xb36eb0c7 + name: "init_burst" + type_id: 0x92233392 + offset: 128 +} member { id: 0xed4a99de name: "init_callback" @@ -102697,6 +103447,12 @@ member { type_id: 0x2f05fd8b offset: 128 } +member { + id: 0xa4c7afe2 + name: "init_group_id" + type_id: 0x914dbfdc + offset: 192 +} member { id: 0xf587a1e9 name: "init_hctx" @@ -102751,12 +103507,24 @@ member { type_id: 0xc9082b19 offset: 864 } +member { + id: 0xf53cc107 + name: "init_policer_id" + type_id: 0xc9082b19 + offset: 96 +} member { id: 0x015559ae name: "init_qp_minus26" type_id: 0x901eaf6a offset: 32 } +member { + id: 0x2b41bb63 + name: "init_rate" + type_id: 0x92233392 + offset: 64 +} member { id: 0xf8fed40c name: "init_ready" @@ -107345,6 +108113,11 @@ member { name: "item" type_id: 0x3f8fe745 } +member { + id: 0x990317f0 + name: "item_list" + type_id: 0xd3c80119 +} member { id: 0xa70d7362 name: "item_ptr" @@ -109876,6 +110649,12 @@ member { type_id: 0x33756485 offset: 5504 } +member { + id: 0x41fdc1b3 + name: "last_recovery_ts" + type_id: 0x92233392 + offset: 1216 +} member { id: 0xc6b95840 name: "last_reset" @@ -111831,11 +112610,23 @@ member { offset: 576 } member { - id: 0xd1c402eb + id: 0xd1e477a3 name: "linecard" - type_id: 0x2a0586b2 + type_id: 0x0a70ce1b offset: 2688 } +member { + id: 0x2bafedce + name: "linecard_list" + type_id: 0xd3c80119 + offset: 1920 +} +member { + id: 0x66da3728 + name: "linecards_lock" + type_id: 0xa7c362b0 + offset: 2048 +} member { id: 0xc4fd637a name: "linedur_ns" @@ -113642,6 +114433,12 @@ member { type_id: 0xa7c362b0 offset: 1152 } +member { + id: 0x2d4b3330 + name: "lock" + type_id: 0xa7c362b0 + offset: 3200 +} member { id: 0x2d4b353f name: "lock" @@ -113891,6 +114688,12 @@ member { type_id: 0x1b44744f offset: 1280 } +member { + id: 0x5f5e621d + name: "lock_key" + type_id: 0x475137a2 + offset: 3584 +} member { id: 0x5f5e64df name: "lock_key" @@ -116469,6 +117272,12 @@ member { type_id: 0xc9082b19 offset: 3008 } +member { + id: 0x8b645ad4 + name: "mapping_type" + type_id: 0xe09c0936 + offset: 128 +} member { id: 0x4971df2a name: "mappings" @@ -117548,6 +118357,12 @@ member { type_id: 0x4585663f offset: 96 } +member { + id: 0x9c062023 + name: "max_burst" + type_id: 0x92233392 + offset: 320 +} member { id: 0x9c5d053e name: "max_burst" @@ -118494,6 +119309,12 @@ member { type_id: 0x33756485 offset: 320 } +member { + id: 0x5cea623b + name: "max_rate" + type_id: 0x92233392 + offset: 192 +} member { id: 0xf3d009b3 name: "max_ratio" @@ -118846,6 +119667,12 @@ member { type_id: 0x295c7202 offset: 672 } +member { + id: 0x2a2e382b + name: "max_snapshots" + type_id: 0xc9082b19 + offset: 832 +} member { id: 0x9213bcc2 name: "max_socks" @@ -120761,6 +121588,12 @@ member { type_id: 0x1e937ceb offset: 704 } +member { + id: 0xd492332a + name: "metadata_cap" + type_id: 0xc9082b19 + offset: 224 +} member { id: 0xf7b6b92a name: "metadata_ops" @@ -121220,6 +122053,12 @@ member { type_id: 0xc9082b19 offset: 96 } +member { + id: 0x7b580056 + name: "min_burst" + type_id: 0x92233392 + offset: 384 +} member { id: 0x603d49ac name: "min_capacity" @@ -121481,6 +122320,12 @@ member { type_id: 0x295c7202 offset: 8 } +member { + id: 0x7843c8a8 + name: "min_rate" + type_id: 0x92233392 + offset: 256 +} member { id: 0x78e29322 name: "min_rate" @@ -124049,6 +124894,11 @@ member { type_id: 0x0483e6f8 offset: 384 } +member { + id: 0xe2057d5b + name: "msg" + type_id: 0x054f691a +} member { id: 0xe260f8b5 name: "msg" @@ -127330,6 +128180,12 @@ member { type_id: 0x4585663f offset: 384 } +member { + id: 0xe3a3de7e + name: "nested_devlink" + type_id: 0x0cf3d8fe + offset: 1024 +} member { id: 0x5f6b4acf name: "nested_policy" @@ -135861,6 +136717,12 @@ member { type_id: 0x3d1ec847 offset: 64 } +member { + id: 0xafb646e8 + name: "ops" + type_id: 0x3d46e073 + offset: 256 +} member { id: 0xafb6613a name: "ops" @@ -136022,6 +136884,12 @@ member { type_id: 0x32e20efe offset: 17088 } +member { + id: 0xafba248b + name: "ops" + type_id: 0x3121a074 + offset: 2432 +} member { id: 0xafba3fb1 name: "ops" @@ -136056,6 +136924,11 @@ member { name: "ops" type_id: 0x31c93a7f } +member { + id: 0xafbad944 + name: "ops" + type_id: 0x31d9e79a +} member { id: 0xafbb0869 name: "ops" @@ -136086,6 +136959,12 @@ member { type_id: 0x3068cb56 offset: 64 } +member { + id: 0xafbb76d0 + name: "ops" + type_id: 0x307eba5c + offset: 192 +} member { id: 0xafbba914 name: "ops" @@ -137182,6 +138061,12 @@ member { type_id: 0x6720d32f offset: 224 } +member { + id: 0xc099cc3c + name: "overwrite_mask" + type_id: 0xc9082b19 + offset: 128 +} member { id: 0x9e1f254a name: "overwrite_state" @@ -138776,6 +139661,12 @@ member { type_id: 0x6720d32f offset: 640 } +member { + id: 0xb8b3f43c + name: "param_list" + type_id: 0xd3c80119 + offset: 704 +} member { id: 0xb8ba1efc name: "param_lock" @@ -141590,6 +142481,12 @@ member { type_id: 0x6e1fde8f offset: 64 } +member { + id: 0x6980d678 + name: "pfnum" + type_id: 0x914dbfdc + offset: 128 +} member { id: 0x88830a16 name: "pg" @@ -144369,6 +145266,11 @@ member { type_id: 0x4585663f offset: 64 } +member { + id: 0xe52eef97 + name: "pool_type" + type_id: 0xa7051d2f +} member { id: 0x6a296ea1 name: "pools" @@ -144468,6 +145370,12 @@ member { type_id: 0x44b60e20 offset: 384 } +member { + id: 0x48c075a0 + name: "port" + type_id: 0x3b68ec61 + offset: 64 +} member { id: 0x48cc7358 name: "port" @@ -144572,6 +145480,36 @@ member { name: "port_data" type_id: 0x18bd6530 } +member { + id: 0x9e84141c + name: "port_del" + type_id: 0x2d7ac448 + offset: 2560 +} +member { + id: 0xa5c2f472 + name: "port_fn_state_get" + type_id: 0x2e54f1b8 + offset: 2624 +} +member { + id: 0xeb46d6f6 + name: "port_fn_state_set" + type_id: 0x2e20981d + offset: 2688 +} +member { + id: 0x2e72e7e0 + name: "port_function_hw_addr_get" + type_id: 0x2e477ad3 + offset: 2368 +} +member { + id: 0x85af182d + name: "port_function_hw_addr_set" + type_id: 0x2e7ffbfe + offset: 2432 +} member { id: 0x97b320de name: "port_handed_over" @@ -144590,6 +145528,19 @@ member { type_id: 0xc9082b19 offset: 64 } +member { + id: 0xbf1e816a + name: "port_index" + type_id: 0x4585663f + offset: 32 +} +member { + id: 0x0317cd78 + name: "port_index_valid" + type_id: 0x295c7202 + offset: 144 + bitsize: 1 +} member { id: 0x1e504a04 name: "port_info" @@ -144620,6 +145571,12 @@ member { type_id: 0x0baa70a7 offset: 8256 } +member { + id: 0xcd6874e1 + name: "port_list" + type_id: 0xd3c80119 + offset: 64 +} member { id: 0x7daf8d82 name: "port_lock" @@ -144632,6 +145589,12 @@ member { type_id: 0x3e10b518 offset: 1216 } +member { + id: 0x90793300 + name: "port_new" + type_id: 0x2d044ee7 + offset: 2496 +} member { id: 0xe64fd4ef name: "port_num" @@ -144655,6 +145618,11 @@ member { name: "port_number" type_id: 0xc9082b19 } +member { + id: 0x217694f6 + name: "port_ops" + type_id: 0x33fd261b +} member { id: 0x2418a130 name: "port_power" @@ -144703,6 +145671,12 @@ member { type_id: 0x3bdc1cb2 offset: 8064 } +member { + id: 0xa22f8c42 + name: "port_split" + type_id: 0x2d000b85 + offset: 384 +} member { id: 0x2083fe73 name: "port_status" @@ -144750,6 +145724,18 @@ member { type_id: 0x2efc853f offset: 256 } +member { + id: 0xf43affcb + name: "port_type_set" + type_id: 0x2e5686ce + offset: 320 +} +member { + id: 0x5f87545a + name: "port_unsplit" + type_id: 0x2d0429c2 + offset: 448 +} member { id: 0x4b84c909 name: "port_usb" @@ -147057,6 +148043,12 @@ member { type_id: 0x18bd6530 offset: 448 } +member { + id: 0x59303e30 + name: "priv" + type_id: 0x391f15ea + offset: 64 +} member { id: 0x5935516c name: "priv" @@ -147105,6 +148097,12 @@ member { type_id: 0xc8e4d7d1 offset: 59520 } +member { + id: 0x59c303be + name: "priv" + type_id: 0xca2a51af + offset: 4096 +} member { id: 0x59c3092e name: "priv" @@ -149130,6 +150128,11 @@ member { type_id: 0x0483e6f8 offset: 320 } +member { + id: 0xd3c8aa61 + name: "provision" + type_id: 0x2d4c6881 +} member { id: 0x61ba1604 name: "proxy_ndp" @@ -149865,6 +150868,12 @@ member { type_id: 0x0d572692 offset: 256 } +member { + id: 0x82e1aed1 + name: "putting_binary" + type_id: 0x6d7f5ff6 + offset: 128 +} member { id: 0x26fa53f2 name: "putx64" @@ -151578,6 +152587,30 @@ member { type_id: 0x295c7202 offset: 104 } +member { + id: 0x7f7f584f + name: "rate_leaf_parent_set" + type_id: 0x2d0a0361 + offset: 3136 +} +member { + id: 0x54806d5b + name: "rate_leaf_tx_max_set" + type_id: 0x2d154530 + offset: 2816 +} +member { + id: 0x049c1f00 + name: "rate_leaf_tx_share_set" + type_id: 0x2d154530 + offset: 2752 +} +member { + id: 0x4134711d + name: "rate_list" + type_id: 0xd3c80119 + offset: 192 +} member { id: 0xd51bd1ef name: "rate_matching" @@ -151638,6 +152671,36 @@ member { type_id: 0x4585663f offset: 1472 } +member { + id: 0x3d9c68f5 + name: "rate_node_del" + type_id: 0x2d1c1d12 + offset: 3072 +} +member { + id: 0x573e57ed + name: "rate_node_new" + type_id: 0x2d081f17 + offset: 3008 +} +member { + id: 0x069fcabf + name: "rate_node_parent_set" + type_id: 0x2d0a0361 + offset: 3200 +} +member { + id: 0xeff7e2c9 + name: "rate_node_tx_max_set" + type_id: 0x2d154530 + offset: 2944 +} +member { + id: 0x7b7f9a93 + name: "rate_node_tx_share_set" + type_id: 0x2d154530 + offset: 2880 +} member { id: 0xbc979606 name: "rate_num" @@ -152513,6 +153576,12 @@ member { type_id: 0xe3222f5b offset: 1408 } +member { + id: 0x95dacba7 + name: "rcu" + type_id: 0xe3222f5b + offset: 3904 +} member { id: 0x95dacce8 name: "rcu" @@ -153799,6 +154868,12 @@ member { type_id: 0x74d29cf1 offset: 32 } +member { + id: 0x8a4b19d4 + name: "recover" + type_id: 0x2e1466a0 + offset: 64 +} member { id: 0x8a6b6b4e name: "recover" @@ -153816,6 +154891,12 @@ member { name: "recover_bus" type_id: 0x2fb994f1 } +member { + id: 0x629c6b33 + name: "recovery_count" + type_id: 0x92233392 + offset: 1152 +} member { id: 0xe75c7f73 name: "recovery_disabled" @@ -154514,17 +155595,35 @@ member { type_id: 0xa722c13e offset: 800 } +member { + id: 0x05243818 + name: "refcount" + type_id: 0xa722c13e + offset: 224 +} member { id: 0x05243b3c name: "refcount" type_id: 0xa722c13e } +member { + id: 0x05243b72 + name: "refcount" + type_id: 0xa722c13e + offset: 3616 +} member { id: 0x05243c4b name: "refcount" type_id: 0xa722c13e offset: 448 } +member { + id: 0x05243d41 + name: "refcount" + type_id: 0xa722c13e + offset: 1280 +} member { id: 0x053332c8 name: "refcount" @@ -155135,6 +156234,12 @@ member { type_id: 0x92233392 offset: 448 } +member { + id: 0x4fd311c5 + name: "region_list" + type_id: 0xd3c80119 + offset: 832 +} member { id: 0x4fd31bbd name: "region_list" @@ -155967,6 +157072,42 @@ member { type_id: 0x0c868f26 offset: 1536 } +member { + id: 0x83cb909a + name: "reload_actions" + type_id: 0x33756485 + offset: 64 +} +member { + id: 0xf8981c0e + name: "reload_down" + type_id: 0x2d594ead + offset: 192 +} +member { + id: 0xf5250b55 + name: "reload_failed" + type_id: 0x295c7202 + offset: 3584 + bitsize: 1 +} +member { + id: 0xb0509801 + name: "reload_limits" + type_id: 0x33756485 + offset: 128 +} +member { + id: 0x4492b926 + name: "reload_stats" + type_id: 0x93e3596e +} +member { + id: 0xa5ab7067 + name: "reload_up" + type_id: 0x2d8fd28a + offset: 256 +} member { id: 0x88c18b9a name: "relocs" @@ -156082,6 +157223,12 @@ member { type_id: 0xd3c80119 offset: 30080 } +member { + id: 0xadd03a27 + name: "remote_reload_stats" + type_id: 0x93e3596e + offset: 192 +} member { id: 0xa3daa863 name: "remote_sdu_itime" @@ -156760,12 +157907,24 @@ member { type_id: 0x384f7d7c offset: 1064 } +member { + id: 0x6ac681cd + name: "reporter_list" + type_id: 0xd3c80119 + offset: 960 +} member { id: 0x6ac6877c name: "reporter_list" type_id: 0xd3c80119 offset: 2112 } +member { + id: 0xefdb08b4 + name: "reporters_lock" + type_id: 0xa7c362b0 + offset: 1088 +} member { id: 0xefdb0c31 name: "reporters_lock" @@ -158801,6 +159960,12 @@ member { type_id: 0xb522cc16 offset: 9024 } +member { + id: 0x17c3228f + name: "resource_list" + type_id: 0xd3c80119 + offset: 576 +} member { id: 0x19740c14 name: "resources" @@ -163573,6 +164738,12 @@ member { type_id: 0x4aaca7b4 offset: 192 } +member { + id: 0xebfb4e21 + name: "same_provision" + type_id: 0x358546f6 + offset: 128 +} member { id: 0x9b9fa9a3 name: "same_root" @@ -163997,6 +165168,72 @@ member { type_id: 0xd3c80119 offset: 8448 } +member { + id: 0xe1b6bbe3 + name: "sb_list" + type_id: 0xd3c80119 + offset: 320 +} +member { + id: 0x1197fb66 + name: "sb_occ_max_clear" + type_id: 0x2d7ab985 + offset: 960 +} +member { + id: 0x80e35930 + name: "sb_occ_port_pool_get" + type_id: 0x2e0a2548 + offset: 1024 +} +member { + id: 0xa9d0bb48 + name: "sb_occ_snapshot" + type_id: 0x2d7ab985 + offset: 896 +} +member { + id: 0xde69b5bc + name: "sb_occ_tc_port_bind_get" + type_id: 0x2e0abaa7 + offset: 1088 +} +member { + id: 0x93cb2df4 + name: "sb_pool_get" + type_id: 0x2d739f19 + offset: 512 +} +member { + id: 0xeaf07952 + name: "sb_pool_set" + type_id: 0x2d7368ed + offset: 576 +} +member { + id: 0x78a8c4ae + name: "sb_port_pool_get" + type_id: 0x2e0a26c5 + offset: 640 +} +member { + id: 0x6f535872 + name: "sb_port_pool_set" + type_id: 0x2e0ad762 + offset: 704 +} +member { + id: 0x001ba0a6 + name: "sb_tc_pool_bind_get" + type_id: 0x2e0abbc6 + offset: 768 +} +member { + id: 0x07349e82 + name: "sb_tc_pool_bind_set" + type_id: 0x2e0ab0cf + offset: 832 +} member { id: 0x7ac264e0 name: "sbc" @@ -165842,6 +167079,18 @@ member { type_id: 0x0d9c47fd offset: 1408 } +member { + id: 0x561fd50f + name: "selftest_check" + type_id: 0x35b3ea42 + offset: 3264 +} +member { + id: 0xd95b0d92 + name: "selftest_run" + type_id: 0x1fdf8df4 + offset: 3328 +} member { id: 0xf53d14f9 name: "sem" @@ -168597,6 +169846,19 @@ member { type_id: 0x4585663f offset: 96 } +member { + id: 0x4c6b273e + name: "sfnum" + type_id: 0xc9082b19 + offset: 96 +} +member { + id: 0x38a7e1c1 + name: "sfnum_valid" + type_id: 0x295c7202 + offset: 146 + bitsize: 1 +} member { id: 0x65a2356f name: "sfp_bus" @@ -170358,6 +171620,12 @@ member { type_id: 0xc9082b19 offset: 224 } +member { + id: 0xd9b71962 + name: "size" + type_id: 0xc9082b19 + offset: 32 +} member { id: 0xd9b71c90 name: "size" @@ -170394,6 +171662,12 @@ member { type_id: 0x9565759f offset: 64 } +member { + id: 0xd9ec35e7 + name: "size" + type_id: 0x92233392 + offset: 896 +} member { id: 0xd9ec3683 name: "size" @@ -172229,6 +173503,18 @@ member { type_id: 0xe52a3418 offset: 416 } +member { + id: 0x0c805fde + name: "snapshot" + type_id: 0x2d0e9268 + offset: 128 +} +member { + id: 0x0c832449 + name: "snapshot" + type_id: 0x2e7505e0 + offset: 128 +} member { id: 0x0cc029ea name: "snapshot" @@ -172241,6 +173527,24 @@ member { type_id: 0x228d4605 offset: 1984 } +member { + id: 0xc24fadb5 + name: "snapshot_ids" + type_id: 0x80c20070 + offset: 2560 +} +member { + id: 0xcffde3b6 + name: "snapshot_list" + type_id: 0xd3c80119 + offset: 704 +} +member { + id: 0xcbc193d6 + name: "snapshot_lock" + type_id: 0xa7c362b0 + offset: 320 +} member { id: 0xae699d06 name: "snd" @@ -175182,6 +176486,12 @@ member { type_id: 0x4585663f offset: 7616 } +member { + id: 0x72b72416 + name: "state" + type_id: 0x44d985d5 + offset: 384 +} member { id: 0x72c0803e name: "state" @@ -175545,6 +176855,12 @@ member { type_id: 0x2360e10b offset: 2496 } +member { + id: 0x46ecc05d + name: "state_lock" + type_id: 0xa7c362b0 + offset: 448 +} member { id: 0xeccd8227 name: "state_machine" @@ -175876,6 +177192,12 @@ member { type_id: 0x2456537c offset: 256 } +member { + id: 0xb9583af1 + name: "stats" + type_id: 0x2d51c138 + offset: 2688 +} member { id: 0xb95cdf4e name: "stats" @@ -177885,6 +179207,11 @@ member { type_id: 0xb914bfab offset: 2944 } +member { + id: 0x9fc14365 + name: "supported_flash_update_params" + type_id: 0xc9082b19 +} member { id: 0x44116555 name: "supported_hw" @@ -181985,6 +183312,12 @@ member { name: "test" type_id: 0x2e3696f7 } +member { + id: 0x8db5ea4d + name: "test" + type_id: 0x2e0b7ad3 + offset: 256 +} member { id: 0x8db6a427 name: "test" @@ -182424,6 +183757,12 @@ member { type_id: 0x33756485 offset: 64 } +member { + id: 0x6ce0bb82 + name: "threshold_type" + type_id: 0xcbcc8512 + offset: 64 +} member { id: 0x3b00e790 name: "thresholds" @@ -184781,6 +186120,90 @@ member { type_id: 0x295c7202 offset: 1080 } +member { + id: 0x21a1a972 + name: "trap_action_set" + type_id: 0x2d004a69 + offset: 1792 +} +member { + id: 0x3c859cc4 + name: "trap_drop_counter_get" + type_id: 0x2d018e8d + offset: 2048 +} +member { + id: 0x24edb8c2 + name: "trap_fini" + type_id: 0x0e44c87b + offset: 1728 +} +member { + id: 0xb252ef85 + name: "trap_group_action_set" + type_id: 0x2d00157c + offset: 1984 +} +member { + id: 0x2b3b3947 + name: "trap_group_init" + type_id: 0x2d033017 + offset: 1856 +} +member { + id: 0x1877dc92 + name: "trap_group_list" + type_id: 0xd3c80119 + offset: 1664 +} +member { + id: 0x3a9bd5f4 + name: "trap_group_set" + type_id: 0x2d004103 + offset: 1920 +} +member { + id: 0x37b507df + name: "trap_init" + type_id: 0x2d02e4d4 + offset: 1664 +} +member { + id: 0x10281c82 + name: "trap_list" + type_id: 0xd3c80119 + offset: 1536 +} +member { + id: 0x5e35aa2e + name: "trap_policer_counter_get" + type_id: 0x2d0ab1eb + offset: 2304 +} +member { + id: 0x1ba59c6d + name: "trap_policer_fini" + type_id: 0x0e4e7ccb + offset: 2176 +} +member { + id: 0x950fbc14 + name: "trap_policer_init" + type_id: 0x2d085064 + offset: 2112 +} +member { + id: 0x9516c9d8 + name: "trap_policer_list" + type_id: 0xd3c80119 + offset: 1792 +} +member { + id: 0xc8b0c1d4 + name: "trap_policer_set" + type_id: 0x2d01e009 + offset: 2240 +} member { id: 0xc911730d name: "trb_address_map" @@ -187586,6 +189009,12 @@ member { name: "type" type_id: 0x3e10b518 } +member { + id: 0x5c68c671 + name: "type" + type_id: 0x3e10b518 + offset: 832 +} member { id: 0x5c68cd02 name: "type" @@ -187870,6 +189299,11 @@ member { type_id: 0xe62ebf07 offset: 64 } +member { + id: 0x5cbbb022 + name: "type" + type_id: 0xed655c73 +} member { id: 0x5cbbc962 name: "type" @@ -188340,12 +189774,36 @@ member { type_id: 0x86a931f9 offset: 192 } +member { + id: 0xf49ccf9e + name: "types" + type_id: 0x0e38185b + offset: 896 +} member { id: 0xf49ecb47 name: "types" type_id: 0x0c3286fb offset: 64 } +member { + id: 0x6e3b6a83 + name: "types_count" + type_id: 0x3999579d + offset: 192 +} +member { + id: 0x6e477688 + name: "types_count" + type_id: 0x4585663f + offset: 960 +} +member { + id: 0x88117783 + name: "types_get" + type_id: 0x0e0dc148 + offset: 256 +} member { id: 0x3f590b7c name: "types_mask" @@ -189937,6 +191395,12 @@ member { type_id: 0x33756485 offset: 2816 } +member { + id: 0x1f617d86 + name: "unprovision" + type_id: 0x2d4fcd25 + offset: 64 +} member { id: 0x8ea4f872 name: "unreg_list" @@ -193731,6 +195195,18 @@ member { name: "version" type_id: 0x914dbfdc } +member { + id: 0x8e9e82fd + name: "version_cb" + type_id: 0x0d1f55de + offset: 64 +} +member { + id: 0xca2aa534 + name: "version_cb_priv" + type_id: 0x18bd6530 + offset: 128 +} member { id: 0x88d1b3ba name: "version_get" @@ -206252,6 +207728,15 @@ struct_union { member_id: 0xe0feb68b } } +struct_union { + id: 0x7d1c3635 + kind: UNION + definition { + bytesize: 8 + member_id: 0xafbad944 + member_id: 0x217694f6 + } +} struct_union { id: 0x7d93a8c7 kind: UNION @@ -214144,14 +215629,260 @@ struct_union { } } struct_union { - id: 0xbc952c91 + id: 0x198f8565 kind: STRUCT name: "devlink" + definition { + bytesize: 512 + member_id: 0xadf00094 + member_id: 0xcd6874e1 + member_id: 0x4134711d + member_id: 0xe1b6bbe3 + member_id: 0x3d9266c0 + member_id: 0x17c3228f + member_id: 0xb8b3f43c + member_id: 0x4fd311c5 + member_id: 0x6ac681cd + member_id: 0xefdb08b4 + member_id: 0x32a55865 + member_id: 0x10281c82 + member_id: 0x1877dc92 + member_id: 0x9516c9d8 + member_id: 0x2bafedce + member_id: 0x66da3728 + member_id: 0xafba248b + member_id: 0xc58a570e + member_id: 0xc24fadb5 + member_id: 0xb9583af1 + member_id: 0xce3bb522 + member_id: 0x3e75936d + member_id: 0x2d4b3330 + member_id: 0x5f5e621d + member_id: 0xf5250b55 + member_id: 0x05243b72 + member_id: 0x999f46a7 + member_id: 0x95dacba7 + member_id: 0x59c303be + } } struct_union { - id: 0x8256fc56 + id: 0x2d51c138 + kind: STRUCT + name: "devlink_dev_stats" + definition { + bytesize: 48 + member_id: 0x4492b926 + member_id: 0xadd03a27 + } +} +struct_union { + id: 0x4d3cebe1 + kind: STRUCT + name: "devlink_dpipe_field" + definition { + bytesize: 24 + member_id: 0x0de57ce8 + member_id: 0xcc6aad16 + member_id: 0x4a47d75f + member_id: 0x8b645ad4 + } +} +struct_union { + id: 0x751480f9 + kind: STRUCT + name: "devlink_dpipe_header" + definition { + bytesize: 32 + member_id: 0x0de57ce8 + member_id: 0xcc6aad16 + member_id: 0x784a4b9b + member_id: 0x361d56de + member_id: 0x55f132cc + } +} +struct_union { + id: 0xb55e29b2 + kind: STRUCT + name: "devlink_dpipe_headers" + definition { + bytesize: 16 + member_id: 0x614c39f2 + member_id: 0x958f08da + } +} +struct_union { + id: 0x8dfee289 + kind: STRUCT + name: "devlink_flash_update_params" + definition { + bytesize: 24 + member_id: 0x91e86fed + member_id: 0xe5d87fc5 + member_id: 0xc099cc3c + } +} +struct_union { + id: 0xa5206d7a + kind: STRUCT + name: "devlink_fmsg" + definition { + bytesize: 24 + member_id: 0x990317f0 + member_id: 0x82e1aed1 + } +} +struct_union { + id: 0xd5693f95 + kind: STRUCT + name: "devlink_health_reporter" + definition { + bytesize: 168 + member_id: 0x7c00ef52 + member_id: 0x59119163 + member_id: 0xafbb76d0 + member_id: 0x56ace115 + member_id: 0xeb76e483 + member_id: 0x210293e8 + member_id: 0x33fc8cf5 + member_id: 0x62d28eb5 + member_id: 0xea4e825c + member_id: 0x773b8f3f + member_id: 0x768a691a + member_id: 0x1e719a84 + member_id: 0x352c4ca4 + member_id: 0x0b6ea206 + member_id: 0x629c6b33 + member_id: 0x41fdc1b3 + member_id: 0x05243d41 + } +} +struct_union { + id: 0xdeaf8933 + kind: STRUCT + name: "devlink_health_reporter_ops" + definition { + bytesize: 40 + member_id: 0x0ddfefbb + member_id: 0x8a4b19d4 + member_id: 0x0a7d4966 + member_id: 0x30b83368 + member_id: 0x8db5ea4d + } +} +struct_union { + id: 0xcea2d9d1 + kind: STRUCT + name: "devlink_info_req" + definition { + bytesize: 24 + member_id: 0xe2057d5b + member_id: 0x8e9e82fd + member_id: 0xca2aa534 + } +} +struct_union { + id: 0x0383def3 kind: STRUCT name: "devlink_linecard" + definition { + bytesize: 136 + member_id: 0x7c00ef52 + member_id: 0x56ace87c + member_id: 0xad7c8510 + member_id: 0x05243818 + member_id: 0xafb646e8 + member_id: 0x59119f66 + member_id: 0x72b72416 + member_id: 0x46ecc05d + member_id: 0x5c68c671 + member_id: 0xf49ccf9e + member_id: 0x6e477688 + member_id: 0xe3a3de7e + } +} +struct_union { + id: 0x0d2a2bc0 + kind: STRUCT + name: "devlink_linecard_ops" + definition { + bytesize: 40 + member_id: 0xd3c8aa61 + member_id: 0x1f617d86 + member_id: 0xebfb4e21 + member_id: 0x6e3b6a83 + member_id: 0x88117783 + } +} +struct_union { + id: 0x12a087f3 + kind: STRUCT + name: "devlink_linecard_type" + definition { + bytesize: 16 + member_id: 0x5c68c5cb + member_id: 0x59303e30 + } +} +struct_union { + id: 0xcb5e2bb0 + kind: STRUCT + name: "devlink_ops" + definition { + bytesize: 424 + member_id: 0x9fc14365 + member_id: 0x83cb909a + member_id: 0xb0509801 + member_id: 0xf8981c0e + member_id: 0xa5ab7067 + member_id: 0xf43affcb + member_id: 0xa22f8c42 + member_id: 0x5f87545a + member_id: 0x93cb2df4 + member_id: 0xeaf07952 + member_id: 0x78a8c4ae + member_id: 0x6f535872 + member_id: 0x001ba0a6 + member_id: 0x07349e82 + member_id: 0xa9d0bb48 + member_id: 0x1197fb66 + member_id: 0x80e35930 + member_id: 0xde69b5bc + member_id: 0x67ecafb3 + member_id: 0x06f3214d + member_id: 0xff091101 + member_id: 0xf81283dc + member_id: 0x2d2ca64a + member_id: 0x0e5d040f + member_id: 0xe0c4d82e + member_id: 0x5ef8d6d5 + member_id: 0x37b507df + member_id: 0x24edb8c2 + member_id: 0x21a1a972 + member_id: 0x2b3b3947 + member_id: 0x3a9bd5f4 + member_id: 0xb252ef85 + member_id: 0x3c859cc4 + member_id: 0x950fbc14 + member_id: 0x1ba59c6d + member_id: 0xc8b0c1d4 + member_id: 0x5e35aa2e + member_id: 0x2e72e7e0 + member_id: 0x85af182d + member_id: 0x90793300 + member_id: 0x9e84141c + member_id: 0xa5c2f472 + member_id: 0xeb46d6f6 + member_id: 0x049c1f00 + member_id: 0x54806d5b + member_id: 0x7b7f9a93 + member_id: 0xeff7e2c9 + member_id: 0x573e57ed + member_id: 0x3d9c68f5 + member_id: 0x7f7f584f + member_id: 0x069fcabf + member_id: 0x561fd50f + member_id: 0xd95b0d92 + } } struct_union { id: 0xc7e35718 @@ -214161,7 +215892,7 @@ struct_union { bytesize: 344 member_id: 0x7c00ef52 member_id: 0x4fd31bbd - member_id: 0x5685a7bf + member_id: 0x56ace115 member_id: 0xad7c8a1e member_id: 0xc5a12bbd member_id: 0x5c46dd88 @@ -214176,7 +215907,7 @@ struct_union { member_id: 0x6ac6877c member_id: 0xefdb0c31 member_id: 0xf45f2394 - member_id: 0xd1c402eb + member_id: 0xd1e477a3 } } struct_union { @@ -214193,6 +215924,22 @@ struct_union { member_id: 0x357170d2 } } +struct_union { + id: 0x6807af97 + kind: STRUCT + name: "devlink_port_new_attrs" + definition { + bytesize: 20 + member_id: 0xdf2fd72b + member_id: 0xbf1e816a + member_id: 0xd8dc9c99 + member_id: 0x4c6b273e + member_id: 0x6980d678 + member_id: 0x0317cd78 + member_id: 0x1db19505 + member_id: 0x38a7e1c1 + } +} struct_union { id: 0xf0f5f897 kind: STRUCT @@ -214238,6 +215985,18 @@ struct_union { member_id: 0xd69a643e } } +struct_union { + id: 0xe6964d46 + kind: STRUCT + name: "devlink_port_region_ops" + definition { + bytesize: 32 + member_id: 0x0de57ce8 + member_id: 0xa4170b6e + member_id: 0x0c832449 + member_id: 0x59119068 + } +} struct_union { id: 0x1777d31b kind: STRUCT @@ -214246,7 +216005,7 @@ struct_union { bytesize: 80 member_id: 0x7c00ef52 member_id: 0x5cf2824f - member_id: 0x5685afdd + member_id: 0x56ace977 member_id: 0x5911980a member_id: 0xd3866153 member_id: 0x6d8a9147 @@ -214254,6 +216013,89 @@ struct_union { member_id: 0x38c01e38 } } +struct_union { + id: 0x47689b48 + kind: STRUCT + name: "devlink_region" + definition { + bytesize: 120 + member_id: 0x56ace1be + member_id: 0x48c075a0 + member_id: 0x7c00e690 + member_id: 0x3c8c9122 + member_id: 0xcbc193d6 + member_id: 0xcffde3b6 + member_id: 0x2a2e382b + member_id: 0x80fbe8a7 + member_id: 0xd9ec35e7 + } +} +struct_union { + id: 0xc4da5559 + kind: STRUCT + name: "devlink_region_ops" + definition { + bytesize: 32 + member_id: 0x0de57ce8 + member_id: 0xa4170b6e + member_id: 0x0c805fde + member_id: 0x59119068 + } +} +struct_union { + id: 0xe35e5ccd + kind: STRUCT + name: "devlink_sb_pool_info" + definition { + bytesize: 16 + member_id: 0xe52eef97 + member_id: 0xd9b71962 + member_id: 0x6ce0bb82 + member_id: 0x08477548 + } +} +struct_union { + id: 0x1c7a3d65 + kind: STRUCT + name: "devlink_trap" + definition { + bytesize: 32 + member_id: 0x5cbbb022 + member_id: 0x3ecaad84 + member_id: 0x4257f846 + member_id: 0xccbe65b2 + member_id: 0x0de5752a + member_id: 0xa4c7afe2 + member_id: 0xd492332a + } +} +struct_union { + id: 0x198b6a50 + kind: STRUCT + name: "devlink_trap_group" + definition { + bytesize: 16 + member_id: 0x0de57ce8 + member_id: 0xccbe65cf + member_id: 0x4257f83b + member_id: 0xf53cc107 + } +} +struct_union { + id: 0xaf8c5ca0 + kind: STRUCT + name: "devlink_trap_policer" + definition { + bytesize: 56 + member_id: 0xcce624ba + member_id: 0x2b41bb63 + member_id: 0xb36eb0c7 + member_id: 0x5cea623b + member_id: 0x7843c8a8 + member_id: 0x9c062023 + member_id: 0x7b580056 + } +} struct_union { id: 0xd1d07704 kind: STRUCT @@ -263806,6 +265648,104 @@ enumeration { } } } +enumeration { + id: 0xe09c0936 + name: "devlink_dpipe_field_mapping_type" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE" + } + enumerator { + name: "DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX" + value: 1 + } + } +} +enumeration { + id: 0xc700b957 + name: "devlink_eswitch_encap_mode" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_ESWITCH_ENCAP_MODE_NONE" + } + enumerator { + name: "DEVLINK_ESWITCH_ENCAP_MODE_BASIC" + value: 1 + } + } +} +enumeration { + id: 0x1afd1fe8 + name: "devlink_health_reporter_state" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_HEALTH_REPORTER_STATE_HEALTHY" + } + enumerator { + name: "DEVLINK_HEALTH_REPORTER_STATE_ERROR" + value: 1 + } + } +} +enumeration { + id: 0x49f89a6d + name: "devlink_info_version_type" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_INFO_VERSION_TYPE_NONE" + } + enumerator { + name: "DEVLINK_INFO_VERSION_TYPE_COMPONENT" + value: 1 + } + } +} +enumeration { + id: 0x44d985d5 + name: "devlink_linecard_state" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_LINECARD_STATE_UNSPEC" + } + enumerator { + name: "DEVLINK_LINECARD_STATE_UNPROVISIONED" + value: 1 + } + enumerator { + name: "DEVLINK_LINECARD_STATE_UNPROVISIONING" + value: 2 + } + enumerator { + name: "DEVLINK_LINECARD_STATE_PROVISIONING" + value: 3 + } + enumerator { + name: "DEVLINK_LINECARD_STATE_PROVISIONING_FAILED" + value: 4 + } + enumerator { + name: "DEVLINK_LINECARD_STATE_PROVISIONED" + value: 5 + } + enumerator { + name: "DEVLINK_LINECARD_STATE_ACTIVE" + value: 6 + } + enumerator { + name: "__DEVLINK_LINECARD_STATE_MAX" + value: 7 + } + enumerator { + name: "DEVLINK_LINECARD_STATE_MAX" + value: 6 + } + } +} enumeration { id: 0x03e0374b name: "devlink_port_flavour" @@ -263844,6 +265784,34 @@ enumeration { } } } +enumeration { + id: 0x769abc6d + name: "devlink_port_fn_opstate" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_PORT_FN_OPSTATE_DETACHED" + } + enumerator { + name: "DEVLINK_PORT_FN_OPSTATE_ATTACHED" + value: 1 + } + } +} +enumeration { + id: 0x666a7a1b + name: "devlink_port_fn_state" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_PORT_FN_STATE_INACTIVE" + } + enumerator { + name: "DEVLINK_PORT_FN_STATE_ACTIVE" + value: 1 + } + } +} enumeration { id: 0x100964d4 name: "devlink_port_type" @@ -263880,6 +265848,136 @@ enumeration { } } } +enumeration { + id: 0xb38a8bec + name: "devlink_reload_action" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_RELOAD_ACTION_UNSPEC" + } + enumerator { + name: "DEVLINK_RELOAD_ACTION_DRIVER_REINIT" + value: 1 + } + enumerator { + name: "DEVLINK_RELOAD_ACTION_FW_ACTIVATE" + value: 2 + } + enumerator { + name: "__DEVLINK_RELOAD_ACTION_MAX" + value: 3 + } + enumerator { + name: "DEVLINK_RELOAD_ACTION_MAX" + value: 2 + } + } +} +enumeration { + id: 0x35c4d162 + name: "devlink_reload_limit" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_RELOAD_LIMIT_UNSPEC" + } + enumerator { + name: "DEVLINK_RELOAD_LIMIT_NO_RESET" + value: 1 + } + enumerator { + name: "__DEVLINK_RELOAD_LIMIT_MAX" + value: 2 + } + enumerator { + name: "DEVLINK_RELOAD_LIMIT_MAX" + value: 1 + } + } +} +enumeration { + id: 0xa7051d2f + name: "devlink_sb_pool_type" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_SB_POOL_TYPE_INGRESS" + } + enumerator { + name: "DEVLINK_SB_POOL_TYPE_EGRESS" + value: 1 + } + } +} +enumeration { + id: 0xcbcc8512 + name: "devlink_sb_threshold_type" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_SB_THRESHOLD_TYPE_STATIC" + } + enumerator { + name: "DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC" + value: 1 + } + } +} +enumeration { + id: 0xba990d57 + name: "devlink_selftest_status" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_SELFTEST_STATUS_SKIP" + } + enumerator { + name: "DEVLINK_SELFTEST_STATUS_PASS" + value: 1 + } + enumerator { + name: "DEVLINK_SELFTEST_STATUS_FAIL" + value: 2 + } + } +} +enumeration { + id: 0x322b7a90 + name: "devlink_trap_action" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_TRAP_ACTION_DROP" + } + enumerator { + name: "DEVLINK_TRAP_ACTION_TRAP" + value: 1 + } + enumerator { + name: "DEVLINK_TRAP_ACTION_MIRROR" + value: 2 + } + } +} +enumeration { + id: 0xed655c73 + name: "devlink_trap_type" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "DEVLINK_TRAP_TYPE_DROP" + } + enumerator { + name: "DEVLINK_TRAP_TYPE_EXCEPTION" + value: 1 + } + enumerator { + name: "DEVLINK_TRAP_TYPE_CONTROL" + value: 2 + } + } +} enumeration { id: 0x69efc53e name: "df_reason" @@ -278005,6 +280103,15 @@ function { return_type_id: 0x48b5725f parameter_id: 0x0bbe1c3e } +function { + id: 0x1277e3bd + return_type_id: 0x48b5725f + parameter_id: 0x0a70ce1b + parameter_id: 0x18bd6530 + parameter_id: 0x4585663f + parameter_id: 0x051414e1 + parameter_id: 0x04d7fcdd +} function { id: 0x127987a5 return_type_id: 0x48b5725f @@ -278530,6 +280637,22 @@ function { parameter_id: 0x054f691a parameter_id: 0x391f15ea } +function { + id: 0x1353a05d + return_type_id: 0x48b5725f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3e10b518 + parameter_id: 0x3e10b518 + parameter_id: 0x33756485 + parameter_id: 0x33756485 +} +function { + id: 0x1353c771 + return_type_id: 0x48b5725f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c53e119 + parameter_id: 0x18bd6530 +} function { id: 0x13544dcf return_type_id: 0x48b5725f @@ -278662,6 +280785,12 @@ function { parameter_id: 0x18bd6530 parameter_id: 0x6720d32f } +function { + id: 0x137915b0 + return_type_id: 0x48b5725f + parameter_id: 0x0cf3d8fe + parameter_id: 0x376c8705 +} function { id: 0x13797fb7 return_type_id: 0x48b5725f @@ -278767,6 +280896,11 @@ function { parameter_id: 0xe02e14d6 parameter_id: 0xf435685e } +function { + id: 0x13a4a7ac + return_type_id: 0x48b5725f + parameter_id: 0x0cf3d8fe +} function { id: 0x13a62397 return_type_id: 0x48b5725f @@ -280353,6 +282487,11 @@ function { return_type_id: 0x48b5725f parameter_id: 0x1b7b196f } +function { + id: 0x164ad64e + return_type_id: 0x48b5725f + parameter_id: 0x1b4a1f75 +} function { id: 0x164c5933 return_type_id: 0x48b5725f @@ -285186,6 +287325,13 @@ function { parameter_id: 0x3b04bead parameter_id: 0x18bd6530 } +function { + id: 0x1e3db1e5 + return_type_id: 0x48b5725f + parameter_id: 0x3e10b518 + parameter_id: 0x49f89a6d + parameter_id: 0x18bd6530 +} function { id: 0x1e3f491e return_type_id: 0x48b5725f @@ -285823,6 +287969,12 @@ function { return_type_id: 0x48b5725f parameter_id: 0x3e6239e1 } +function { + id: 0x1f01387c + return_type_id: 0x48b5725f + parameter_id: 0x3fca7642 + parameter_id: 0x1afd1fe8 +} function { id: 0x1f060fb8 return_type_id: 0x48b5725f @@ -286185,6 +288337,11 @@ function { return_type_id: 0x48b5725f parameter_id: 0x3fc475cd } +function { + id: 0x1f6acc03 + return_type_id: 0x48b5725f + parameter_id: 0x3fca7642 +} function { id: 0x1f729ba1 return_type_id: 0x48b5725f @@ -286791,6 +288948,14 @@ function { parameter_id: 0x6720d32f parameter_id: 0x6720d32f } +function { + id: 0x2720cd28 + return_type_id: 0x3fca7642 + parameter_id: 0x0cf3d8fe + parameter_id: 0x307eba5c + parameter_id: 0x92233392 + parameter_id: 0x18bd6530 +} function { id: 0x27d025a4 return_type_id: 0x40e51470 @@ -289032,6 +291197,13 @@ function { return_type_id: 0x08faf209 parameter_id: 0x11e6864c } +function { + id: 0x553ed14c + return_type_id: 0xba990d57 + parameter_id: 0x0cf3d8fe + parameter_id: 0x4585663f + parameter_id: 0x07dcdbe1 +} function { id: 0x55423178 return_type_id: 0x18bd6530 @@ -289057,6 +291229,11 @@ function { return_type_id: 0x18bd6530 parameter_id: 0x0c48c037 } +function { + id: 0x55aa47ce + return_type_id: 0x18bd6530 + parameter_id: 0x0cf3d8fe +} function { id: 0x55afd20f return_type_id: 0x2131312a @@ -289462,6 +291639,11 @@ function { parameter_id: 0x30cfc1c2 parameter_id: 0xf1a6dfed } +function { + id: 0x59642c61 + return_type_id: 0x18bd6530 + parameter_id: 0x3fca7642 +} function { id: 0x596454e5 return_type_id: 0x92233392 @@ -289710,6 +291892,14 @@ function { parameter_id: 0xf435685e parameter_id: 0x6720d32f } +function { + id: 0x5bbe2188 + return_type_id: 0x0cf3d8fe + parameter_id: 0x3121a074 + parameter_id: 0xf435685e + parameter_id: 0x0ca27481 + parameter_id: 0x0258f96e +} function { id: 0x5bd76b9c return_type_id: 0x18bd6530 @@ -294882,6 +297072,13 @@ function { return_type_id: 0x6720d32f parameter_id: 0x3e360385 } +function { + id: 0x92117c1d + return_type_id: 0x6720d32f + parameter_id: 0x3fca7642 + parameter_id: 0x18bd6530 + parameter_id: 0x07dcdbe1 +} function { id: 0x92121eb9 return_type_id: 0x6720d32f @@ -295278,6 +297475,23 @@ function { parameter_id: 0x3176a085 parameter_id: 0xeeed68e6 } +function { + id: 0x926873bc + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0x38d23361 + parameter_id: 0x38d23361 +} +function { + id: 0x92687d88 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0x38d23361 +} function { id: 0x92697f90 return_type_id: 0x6720d32f @@ -295288,11 +297502,57 @@ function { return_type_id: 0x3e10b518 parameter_id: 0x00b7947f } +function { + id: 0x926a0987 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0xa7051d2f + parameter_id: 0x2ec35650 + parameter_id: 0x38d23361 +} +function { + id: 0x926a0c03 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0xa7051d2f + parameter_id: 0x38d23361 + parameter_id: 0x38d23361 +} +function { + id: 0x926a25a2 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0xa7051d2f + parameter_id: 0x914dbfdc + parameter_id: 0xc9082b19 + parameter_id: 0x07dcdbe1 +} +function { + id: 0x926bbb17 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0xc9082b19 + parameter_id: 0x07dcdbe1 +} function { id: 0x926be467 return_type_id: 0x6720d32f parameter_id: 0x3fac1d22 } +function { + id: 0x926d0dd0 + return_type_id: 0x6720d32f + parameter_id: 0x3fca7642 + parameter_id: 0x07dcdbe1 +} function { id: 0x926dcbb0 return_type_id: 0x6720d32f @@ -295448,6 +297708,13 @@ function { parameter_id: 0x3f37d9d5 parameter_id: 0x32a063f3 } +function { + id: 0x928c1332 + return_type_id: 0x6720d32f + parameter_id: 0x3fca7642 + parameter_id: 0x3e10b518 + parameter_id: 0x18bd6530 +} function { id: 0x928d6faf return_type_id: 0x6720d32f @@ -295665,6 +297932,13 @@ function { parameter_id: 0x3e10b518 parameter_id: 0x38040a6c } +function { + id: 0x92c286e9 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x666a7a1b + parameter_id: 0x07dcdbe1 +} function { id: 0x92c2d86d return_type_id: 0x6720d32f @@ -295931,6 +298205,14 @@ function { parameter_id: 0x3e6396e0 parameter_id: 0x386badcf } +function { + id: 0x92fb2e1e + return_type_id: 0x6720d32f + parameter_id: 0x3fca7642 + parameter_id: 0x23d822f9 + parameter_id: 0x18bd6530 + parameter_id: 0x07dcdbe1 +} function { id: 0x92fc5924 return_type_id: 0x6720d32f @@ -295944,6 +298226,13 @@ function { parameter_id: 0x3e10b518 parameter_id: 0x3e10b518 } +function { + id: 0x92fce902 + return_type_id: 0x6720d32f + parameter_id: 0x3fca7642 + parameter_id: 0x23d822f9 + parameter_id: 0x07dcdbe1 +} function { id: 0x92fcfc63 return_type_id: 0x6720d32f @@ -296059,6 +298348,14 @@ function { parameter_id: 0x4585663f parameter_id: 0x00c72527 } +function { + id: 0x9313207d + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x130aa721 + parameter_id: 0x173696bc + parameter_id: 0x07dcdbe1 +} function { id: 0x9313a933 return_type_id: 0x6720d32f @@ -296154,6 +298451,12 @@ function { parameter_id: 0x6720d32f parameter_id: 0x3e6239e1 } +function { + id: 0x931afda4 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x100964d4 +} function { id: 0x931d2209 return_type_id: 0x6720d32f @@ -296345,6 +298648,14 @@ function { parameter_id: 0x3f37d9d5 parameter_id: 0x4585663f } +function { + id: 0x935d0dd3 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x00c72527 + parameter_id: 0x13580d6c + parameter_id: 0x07dcdbe1 +} function { id: 0x93627fe0 return_type_id: 0x6720d32f @@ -296537,6 +298848,14 @@ function { parameter_id: 0x39a8be0c parameter_id: 0x1e8e5a79 } +function { + id: 0x9394f11e + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x33fd261b + parameter_id: 0x07dcdbe1 + parameter_id: 0x0aa1f0ee +} function { id: 0x93980968 return_type_id: 0x6720d32f @@ -296682,6 +299001,14 @@ function { parameter_id: 0x3b4ce03a parameter_id: 0x3b4ce03a } +function { + id: 0x93bf0967 + return_type_id: 0x6720d32f + parameter_id: 0x3b68ec61 + parameter_id: 0x3f0185ef + parameter_id: 0x6720d32f + parameter_id: 0x07dcdbe1 +} function { id: 0x93bf967f return_type_id: 0x6720d32f @@ -297955,6 +300282,11 @@ function { parameter_id: 0x508a987d parameter_id: 0x2ac2dd67 } +function { + id: 0x9576eb91 + return_type_id: 0x6720d32f + parameter_id: 0x23d822f9 +} function { id: 0x957964bc return_type_id: 0x6720d32f @@ -298030,6 +300362,12 @@ function { parameter_id: 0x3f44b979 parameter_id: 0x3e10b518 } +function { + id: 0x958ea945 + return_type_id: 0x6720d32f + parameter_id: 0x23d822f9 + parameter_id: 0x3e10b518 +} function { id: 0x958f9102 return_type_id: 0x6720d32f @@ -298227,6 +300565,13 @@ function { parameter_id: 0x23f09c34 parameter_id: 0x334c07d5 } +function { + id: 0x95b6c4a9 + return_type_id: 0x6720d32f + parameter_id: 0x23d822f9 + parameter_id: 0x391f15ea + parameter_id: 0x914dbfdc +} function { id: 0x95b74be6 return_type_id: 0x6720d32f @@ -307323,6 +309668,15 @@ function { parameter_id: 0x04ca9246 parameter_id: 0x33756485 } +function { + id: 0x9c7facb4 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0xb38a8bec + parameter_id: 0x35c4d162 + parameter_id: 0x38d23361 + parameter_id: 0x07dcdbe1 +} function { id: 0x9c814f78 return_type_id: 0x6720d32f @@ -308017,6 +310371,13 @@ function { parameter_id: 0x6d7f5ff6 parameter_id: 0x33d0e528 } +function { + id: 0x9cf8d4d9 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x914dbfdc + parameter_id: 0x07dcdbe1 +} function { id: 0x9cf9beaa return_type_id: 0x6720d32f @@ -309202,6 +311563,13 @@ function { parameter_id: 0x2c982451 parameter_id: 0x13bdf349 } +function { + id: 0x9da1e0c3 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0xc700b957 + parameter_id: 0x07dcdbe1 +} function { id: 0x9da4bde5 return_type_id: 0x6720d32f @@ -310120,6 +312488,12 @@ function { parameter_id: 0x3e10b518 parameter_id: 0xf1a6dfed } +function { + id: 0x9e071849 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x2ec35650 +} function { id: 0x9e08dfbb return_type_id: 0x6720d32f @@ -310155,6 +312529,21 @@ function { parameter_id: 0x36c97631 parameter_id: 0x6720d32f } +function { + id: 0x9e15f25c + return_type_id: 0x6720d32f + parameter_id: 0x0f4dcd61 + parameter_id: 0x18bd6530 + parameter_id: 0x92233392 + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9e1893ee + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x295c7202 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e19651e return_type_id: 0x6720d32f @@ -310177,6 +312566,13 @@ function { parameter_id: 0x0db3ac0f parameter_id: 0x3d8951f4 } +function { + id: 0x9e1a5c22 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x29ef8105 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e1dd697 return_type_id: 0x6720d32f @@ -310281,6 +312677,13 @@ function { parameter_id: 0xc9082b19 parameter_id: 0x07dcdbe1 } +function { + id: 0x9e3092d5 + return_type_id: 0x6720d32f + parameter_id: 0x0f4dcd61 + parameter_id: 0x18bd6530 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e31377c return_type_id: 0x6720d32f @@ -310315,6 +312718,38 @@ function { return_type_id: 0x6720d32f parameter_id: 0x0effc5a1 } +function { + id: 0x9e40b36c + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c0cf46a + parameter_id: 0x322b7a90 + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9e40c88b + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3b68ec61 + parameter_id: 0x4585663f + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9e41cf39 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c53e119 + parameter_id: 0x322b7a90 + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9e41e293 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c0cf46a + parameter_id: 0x376c8705 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e41ea47 return_type_id: 0x6720d32f @@ -310336,12 +312771,62 @@ function { parameter_id: 0x452ab998 parameter_id: 0x6720d32f } +function { + id: 0x9e46dca9 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c53e119 + parameter_id: 0x2e18f543 +} +function { + id: 0x9e4766bb + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x376c8705 + parameter_id: 0x92233392 + parameter_id: 0x92233392 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e49e56e return_type_id: 0x6720d32f parameter_id: 0x0b7c4f67 parameter_id: 0x4585663f } +function { + id: 0x9e4b75cd + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c53e119 + parameter_id: 0x18bd6530 +} +function { + id: 0x9e4c26c1 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3c0cf46a +} +function { + id: 0x9e504197 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3b68ec61 + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9e51554f + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3b5017f2 +} +function { + id: 0x9e51dd03 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x3b143836 + parameter_id: 0x07dcdbe1 + parameter_id: 0x1bf16028 +} function { id: 0x9e52789b return_type_id: 0x6720d32f @@ -310363,6 +312848,13 @@ function { parameter_id: 0x1479c6e7 parameter_id: 0x2d8ee262 } +function { + id: 0x9e590019 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x39388fd3 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e5980cd return_type_id: 0x6720d32f @@ -310392,6 +312884,19 @@ function { return_type_id: 0x6720d32f parameter_id: 0x0f78474f } +function { + id: 0x9e609ac2 + return_type_id: 0x6720d32f + parameter_id: 0x0f4dcd61 + parameter_id: 0x0cbf60eb + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9e61a70c + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x376c8705 +} function { id: 0x9e61ffc7 return_type_id: 0x6720d32f @@ -310423,6 +312928,15 @@ function { parameter_id: 0x33b77109 parameter_id: 0x0277bf8a } +function { + id: 0x9e68eb19 + return_type_id: 0x6720d32f + parameter_id: 0x0f4dcd61 + parameter_id: 0x0f4dcd61 + parameter_id: 0x18bd6530 + parameter_id: 0x18bd6530 + parameter_id: 0x07dcdbe1 +} function { id: 0x9e69dafa return_type_id: 0x6720d32f @@ -310432,6 +312946,13 @@ function { parameter_id: 0x4585663f parameter_id: 0x11d941b8 } +function { + id: 0x9e6a2131 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x376c8705 + parameter_id: 0x2e18f543 +} function { id: 0x9e6bce91 return_type_id: 0x6720d32f @@ -310527,6 +313048,14 @@ function { parameter_id: 0x0db3ac0f parameter_id: 0x25653b02 } +function { + id: 0x9e7aaf3f + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x31d9e79a + parameter_id: 0x07dcdbe1 + parameter_id: 0x0aa1f0ee +} function { id: 0x9e7f936c return_type_id: 0x6720d32f @@ -310721,6 +313250,12 @@ function { parameter_id: 0x0effc5a1 parameter_id: 0x2060db23 } +function { + id: 0x9ebf0984 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x00c72527 +} function { id: 0x9ebfed71 return_type_id: 0x6720d32f @@ -311222,6 +313757,15 @@ function { parameter_id: 0x097315c2 parameter_id: 0x3e10b518 } +function { + id: 0x9f25dc29 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x6d7f5ff6 + parameter_id: 0xb38a8bec + parameter_id: 0x35c4d162 + parameter_id: 0x07dcdbe1 +} function { id: 0x9f25fe02 return_type_id: 0x6720d32f @@ -311556,6 +314100,15 @@ function { parameter_id: 0x0beab59b parameter_id: 0x054f691a } +function { + id: 0x9f71449b + return_type_id: 0x6720d32f + parameter_id: 0x0a70ce1b + parameter_id: 0x18bd6530 + parameter_id: 0x3e10b518 + parameter_id: 0x391f15ea + parameter_id: 0x07dcdbe1 +} function { id: 0x9f72f53e return_type_id: 0x6720d32f @@ -311606,6 +314159,13 @@ function { return_type_id: 0x6720d32f parameter_id: 0x0bfc9031 } +function { + id: 0x9f7fd20b + return_type_id: 0x6720d32f + parameter_id: 0x0a70ce1b + parameter_id: 0x18bd6530 + parameter_id: 0x07dcdbe1 +} function { id: 0x9f808c95 return_type_id: 0x6720d32f @@ -311678,6 +314238,24 @@ function { parameter_id: 0xf435685e parameter_id: 0x3e10b518 } +function { + id: 0x9f8d452b + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0xc9082b19 + parameter_id: 0xcbcc8512 + parameter_id: 0x07dcdbe1 +} +function { + id: 0x9f8e9af9 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x4585663f + parameter_id: 0x914dbfdc + parameter_id: 0x3247ae94 +} function { id: 0x9f93bc17 return_type_id: 0x6720d32f @@ -311769,6 +314347,12 @@ function { parameter_id: 0x08a8dfa4 parameter_id: 0x0258f96e } +function { + id: 0x9faa0088 + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x4585663f +} function { id: 0x9fab680a return_type_id: 0x6720d32f @@ -311776,6 +314360,13 @@ function { parameter_id: 0xc9082b19 parameter_id: 0xc9082b19 } +function { + id: 0x9fabf7be + return_type_id: 0x6720d32f + parameter_id: 0x0cf3d8fe + parameter_id: 0x4585663f + parameter_id: 0x07dcdbe1 +} function { id: 0x9fac2fbc return_type_id: 0x6720d32f @@ -314149,6 +316740,12 @@ function { parameter_id: 0x0a134144 parameter_id: 0x3360dff4 } +function { + id: 0xcc25b8e9 + return_type_id: 0x4585663f + parameter_id: 0x0a70ce1b + parameter_id: 0x18bd6530 +} function { id: 0xcc787cc3 return_type_id: 0x2efe8065 @@ -315143,6 +317740,14 @@ function { parameter_id: 0x10673339 parameter_id: 0x23e856d0 } +function { + id: 0xe97c10c0 + return_type_id: 0x1b4a1f75 + parameter_id: 0x0cf3d8fe + parameter_id: 0x31d9e79a + parameter_id: 0xc9082b19 + parameter_id: 0x92233392 +} function { id: 0xea3d26bb return_type_id: 0x2e8ed696 @@ -316463,6 +319068,14 @@ function { return_type_id: 0x368487be parameter_id: 0x368487be } +function { + id: 0xfc55fd47 + return_type_id: 0x6d7f5ff6 + parameter_id: 0x0a70ce1b + parameter_id: 0x18bd6530 + parameter_id: 0x3e10b518 + parameter_id: 0x391f15ea +} function { id: 0xfc59f36a return_type_id: 0x26e55184 @@ -316515,6 +319128,13 @@ function { parameter_id: 0x1e820193 parameter_id: 0x15a30023 } +function { + id: 0xfc8f4f95 + return_type_id: 0x6d7f5ff6 + parameter_id: 0x0cf3d8fe + parameter_id: 0x4585663f + parameter_id: 0x07dcdbe1 +} function { id: 0xfca015af return_type_id: 0x6d7f5ff6 @@ -316732,6 +319352,12 @@ function { parameter_id: 0x03942c7a parameter_id: 0x3fd547b8 } +function { + id: 0xfec047b0 + return_type_id: 0x6d7f5ff6 + parameter_id: 0x0258f96e + parameter_id: 0x3c88bbfa +} function { id: 0xfec3d248 return_type_id: 0x6d7f5ff6 @@ -334548,6 +337174,15 @@ elf_symbol { type_id: 0x10fc4d27 full_name: "device_remove_file" } +elf_symbol { + id: 0x22e51db4 + name: "device_remove_file_self" + is_defined: true + symbol_type: FUNCTION + crc: 0x7f6f4aa9 + type_id: 0xfec047b0 + full_name: "device_remove_file_self" +} elf_symbol { id: 0x5a62c5df name: "device_remove_groups" @@ -334656,6 +337291,150 @@ elf_symbol { type_id: 0x9d16dd74 full_name: "device_wakeup_enable" } +elf_symbol { + id: 0x884a3a76 + name: "devlink_alloc_ns" + is_defined: true + symbol_type: FUNCTION + crc: 0x81bd8c7b + type_id: 0x5bbe2188 + full_name: "devlink_alloc_ns" +} +elf_symbol { + id: 0xb54be30e + name: "devlink_flash_update_status_notify" + is_defined: true + symbol_type: FUNCTION + crc: 0x04c41c60 + type_id: 0x1353a05d + full_name: "devlink_flash_update_status_notify" +} +elf_symbol { + id: 0xead962b7 + name: "devlink_fmsg_binary_pair_nest_end" + is_defined: true + symbol_type: FUNCTION + crc: 0x5358864e + type_id: 0x9576eb91 + full_name: "devlink_fmsg_binary_pair_nest_end" +} +elf_symbol { + id: 0xfb50f564 + name: "devlink_fmsg_binary_pair_nest_start" + is_defined: true + symbol_type: FUNCTION + crc: 0x11df0e75 + type_id: 0x958ea945 + full_name: "devlink_fmsg_binary_pair_nest_start" +} +elf_symbol { + id: 0xff600ca5 + name: "devlink_fmsg_binary_put" + is_defined: true + symbol_type: FUNCTION + crc: 0x15510a89 + type_id: 0x95b6c4a9 + full_name: "devlink_fmsg_binary_put" +} +elf_symbol { + id: 0x266ac51c + name: "devlink_free" + is_defined: true + symbol_type: FUNCTION + crc: 0x660eb6bd + type_id: 0x13a4a7ac + full_name: "devlink_free" +} +elf_symbol { + id: 0xa8e06dd7 + name: "devlink_health_report" + is_defined: true + symbol_type: FUNCTION + crc: 0x93edef07 + type_id: 0x928c1332 + full_name: "devlink_health_report" +} +elf_symbol { + id: 0x52e65741 + name: "devlink_health_reporter_create" + is_defined: true + symbol_type: FUNCTION + crc: 0x0d26f5c4 + type_id: 0x2720cd28 + full_name: "devlink_health_reporter_create" +} +elf_symbol { + id: 0x2069fc41 + name: "devlink_health_reporter_destroy" + is_defined: true + symbol_type: FUNCTION + crc: 0x850bb6db + type_id: 0x1f6acc03 + full_name: "devlink_health_reporter_destroy" +} +elf_symbol { + id: 0x0abe7457 + name: "devlink_health_reporter_priv" + is_defined: true + symbol_type: FUNCTION + crc: 0xe40bb23e + type_id: 0x59642c61 + full_name: "devlink_health_reporter_priv" +} +elf_symbol { + id: 0x70ca4fad + name: "devlink_health_reporter_state_update" + is_defined: true + symbol_type: FUNCTION + crc: 0x2b4509dd + type_id: 0x1f01387c + full_name: "devlink_health_reporter_state_update" +} +elf_symbol { + id: 0xa164371a + name: "devlink_priv" + is_defined: true + symbol_type: FUNCTION + crc: 0x6e3347ec + type_id: 0x55aa47ce + full_name: "devlink_priv" +} +elf_symbol { + id: 0xb4634233 + name: "devlink_region_create" + is_defined: true + symbol_type: FUNCTION + crc: 0x6110ed39 + type_id: 0xe97c10c0 + full_name: "devlink_region_create" +} +elf_symbol { + id: 0x0a058c0b + name: "devlink_region_destroy" + is_defined: true + symbol_type: FUNCTION + crc: 0xa410a295 + type_id: 0x164ad64e + full_name: "devlink_region_destroy" +} +elf_symbol { + id: 0x5603c10b + name: "devlink_register" + is_defined: true + symbol_type: FUNCTION + crc: 0xc498bdc9 + type_id: 0x13a4a7ac + full_name: "devlink_register" +} +elf_symbol { + id: 0x7520d018 + name: "devlink_unregister" + is_defined: true + symbol_type: FUNCTION + crc: 0x946c0028 + type_id: 0x13a4a7ac + full_name: "devlink_unregister" +} elf_symbol { id: 0xde9ec7ca name: "devm_add_action" @@ -379496,6 +382275,7 @@ interface { symbol_id: 0x589e892d symbol_id: 0x25bf4477 symbol_id: 0x5b8e8574 + symbol_id: 0x22e51db4 symbol_id: 0x5a62c5df symbol_id: 0x20c43211 symbol_id: 0xcdcce9e8 @@ -379508,6 +382288,22 @@ interface { symbol_id: 0x440b32de symbol_id: 0x96ffcda6 symbol_id: 0x4b1a4683 + symbol_id: 0x884a3a76 + symbol_id: 0xb54be30e + symbol_id: 0xead962b7 + symbol_id: 0xfb50f564 + symbol_id: 0xff600ca5 + symbol_id: 0x266ac51c + symbol_id: 0xa8e06dd7 + symbol_id: 0x52e65741 + symbol_id: 0x2069fc41 + symbol_id: 0x0abe7457 + symbol_id: 0x70ca4fad + symbol_id: 0xa164371a + symbol_id: 0xb4634233 + symbol_id: 0x0a058c0b + symbol_id: 0x5603c10b + symbol_id: 0x7520d018 symbol_id: 0xde9ec7ca symbol_id: 0xa2a47944 symbol_id: 0x97ae66e9 diff --git a/android/abi_gki_aarch64_mtk b/android/abi_gki_aarch64_mtk index 9a933b3f74c6..bac4ddf87c67 100644 --- a/android/abi_gki_aarch64_mtk +++ b/android/abi_gki_aarch64_mtk @@ -416,6 +416,7 @@ device_release_driver device_remove_bin_file device_remove_file + device_remove_file_self device_rename __device_reset device_set_of_node_from_dev @@ -429,6 +430,22 @@ _dev_info __dev_kfree_skb_any __dev_kfree_skb_irq + devlink_alloc_ns + devlink_flash_update_status_notify + devlink_fmsg_binary_pair_nest_end + devlink_fmsg_binary_pair_nest_start + devlink_fmsg_binary_put + devlink_free + devlink_health_report + devlink_health_reporter_create + devlink_health_reporter_destroy + devlink_health_reporter_priv + devlink_health_reporter_state_update + devlink_priv + devlink_region_create + devlink_region_destroy + devlink_register + devlink_unregister dev_load devm_add_action __devm_alloc_percpu From 7edb035c79ac74397f9f09cf3775e6e751c3cd40 Mon Sep 17 00:00:00 2001 From: Kyongho Cho Date: Mon, 24 Jul 2023 13:17:51 -0700 Subject: [PATCH 047/163] ANDROID: drm/ttm: export ttm_tt_unpopulate() Xclipse GPU driver depends on TTM for graphics buffer allocation and management. It is required by customers to add graphics memory swap to improve overall memory efficiency. However TTM's swap feature can't be used since it selects victim buffer by LRU and we can't choose a specific buffer to swap. Xclipse GPU driver implements its own swap feature by means of APIs of TTM. But the problem is TTM's buffer allocations statistics in ttm_tt.c which are local to that file. Whenever a graphic buffer is swapped out, the size of total page allocation should be decreased but it is not possible from the outside of ttm_tt.c. If the statistics is not maintained well, TTM ends up swapping out TTM buffers globally which is unexpected. Bug: 291101811 Change-Id: I143c705834bcc196432c3ef59b49c9ec31f2e971 Signed-off-by: Kyongho Cho --- drivers/gpu/drm/ttm/ttm_tt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index d505603930a7..c9c297af53a7 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -370,6 +370,7 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED; } +EXPORT_SYMBOL_GPL(ttm_tt_unpopulate); #ifdef CONFIG_DEBUG_FS From db2c29e53dfe6a11d4565b5a462c6a640bf714ed Mon Sep 17 00:00:00 2001 From: Kyongho Cho Date: Thu, 13 Jul 2023 00:21:16 -0700 Subject: [PATCH 048/163] ANDROID: ABI: update symbol list for Xclipse GPU 1 function symbol(s) added 'void ttm_tt_unpopulate(struct ttm_device*, struct ttm_tt*)' Bug: 291101811 Change-Id: I0be29227b37734304f00fc7b8e2612a0fa6c3fff Signed-off-by: Kyongho Cho --- android/abi_gki_aarch64.stg | 10 ++++++++++ android/abi_gki_aarch64_exynos | 1 + 2 files changed, 11 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 13de9dcc4631..ec63c0c4c9fe 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -372812,6 +372812,15 @@ elf_symbol { type_id: 0x1210f89b full_name: "ttm_tt_fini" } +elf_symbol { + id: 0xcbabaff3 + name: "ttm_tt_unpopulate" + is_defined: true + symbol_type: FUNCTION + crc: 0xa5cacfb1 + type_id: 0x14b4088f + full_name: "ttm_tt_unpopulate" +} elf_symbol { id: 0x0b4dd20d name: "tty_chars_in_buffer" @@ -386233,6 +386242,7 @@ interface { symbol_id: 0x677985f3 symbol_id: 0x6c2259cd symbol_id: 0xacf009d6 + symbol_id: 0xcbabaff3 symbol_id: 0x0b4dd20d symbol_id: 0xae3ac3f6 symbol_id: 0xa7c71d5a diff --git a/android/abi_gki_aarch64_exynos b/android/abi_gki_aarch64_exynos index a3b16126c54e..e30927aa26b1 100644 --- a/android/abi_gki_aarch64_exynos +++ b/android/abi_gki_aarch64_exynos @@ -2304,6 +2304,7 @@ ttm_resource_manager_usage ttm_sg_tt_init ttm_tt_fini + ttm_tt_unpopulate vm_get_page_prot __wake_up_locked ww_mutex_lock_interruptible From 5e4a5dc82033d5b47e98b285906b366a864bb8a5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Fri, 9 Jun 2023 23:11:39 -0700 Subject: [PATCH 049/163] BACKPORT: blk-crypto: use dynamic lock class for blk_crypto_profile::lock When a device-mapper device is passing through the inline encryption support of an underlying device, calls to blk_crypto_evict_key() take the blk_crypto_profile::lock of the device-mapper device, then take the blk_crypto_profile::lock of the underlying device (nested). This isn't a real deadlock, but it causes a lockdep report because there is only one lock class for all instances of this lock. Lockdep subclasses don't really work here because the hierarchy of block devices is dynamic and could have more than 2 levels. Instead, register a dynamic lock class for each blk_crypto_profile, and associate that with the lock. This avoids false-positive lockdep reports like the following: ============================================ WARNING: possible recursive locking detected 6.4.0-rc5 #2 Not tainted -------------------------------------------- fscryptctl/1421 is trying to acquire lock: ffffff80829ca418 (&profile->lock){++++}-{3:3}, at: __blk_crypto_evict_key+0x44/0x1c0 but task is already holding lock: ffffff8086b68ca8 (&profile->lock){++++}-{3:3}, at: __blk_crypto_evict_key+0xc8/0x1c0 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(&profile->lock); lock(&profile->lock); *** DEADLOCK *** May be due to missing lock nesting notation Fixes: 1b2628397058 ("block: Keyslot Manager for Inline Encryption") Reported-by: Bart Van Assche Signed-off-by: Eric Biggers Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20230610061139.212085-1-ebiggers@kernel.org Signed-off-by: Jens Axboe Bug: 286427075 (cherry picked from commit 2fb48d88e77f29bf9d278f25bcfe82cf59a0e09b) (added '#ifdef CONFIG_LOCKDEP' to keep the KMI tooling happy) Change-Id: I21c0f941a36663c956a5c89324813bbaac0633ef Signed-off-by: Eric Biggers --- block/blk-crypto-profile.c | 16 +++++++++++++++- include/linux/blk-crypto-profile.h | 3 +++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c index fe550725c777..7cdef1bee6f7 100644 --- a/block/blk-crypto-profile.c +++ b/block/blk-crypto-profile.c @@ -79,7 +79,18 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile, unsigned int slot_hashtable_size; memset(profile, 0, sizeof(*profile)); + + /* + * profile->lock of an underlying device can nest inside profile->lock + * of a device-mapper device, so use a dynamic lock class to avoid + * false-positive lockdep reports. + */ +#ifdef CONFIG_LOCKDEP + lockdep_register_key(&profile->lockdep_key); + __init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key); +#else init_rwsem(&profile->lock); +#endif if (num_slots == 0) return 0; @@ -89,7 +100,7 @@ int blk_crypto_profile_init(struct blk_crypto_profile *profile, profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]), GFP_KERNEL); if (!profile->slots) - return -ENOMEM; + goto err_destroy; profile->num_slots = num_slots; @@ -443,6 +454,9 @@ void blk_crypto_profile_destroy(struct blk_crypto_profile *profile) { if (!profile) return; +#ifdef CONFIG_LOCKDEP + lockdep_unregister_key(&profile->lockdep_key); +#endif kvfree(profile->slot_hashtable); kvfree_sensitive(profile->slots, sizeof(profile->slots[0]) * profile->num_slots); diff --git a/include/linux/blk-crypto-profile.h b/include/linux/blk-crypto-profile.h index 8b30d04ef008..794f608a8994 100644 --- a/include/linux/blk-crypto-profile.h +++ b/include/linux/blk-crypto-profile.h @@ -131,6 +131,9 @@ struct blk_crypto_profile { * keyslots while ensuring that they can't be changed concurrently. */ struct rw_semaphore lock; +#ifdef CONFIG_LOCKDEP + struct lock_class_key lockdep_key; +#endif /* List of idle slots, with least recently used slot at front */ wait_queue_head_t idle_slots_wait_queue; From 544ae28cf6b57bcc2050973c1ee4eeaad1f87a1b Mon Sep 17 00:00:00 2001 From: xieliujie Date: Mon, 10 Jul 2023 20:05:46 +0800 Subject: [PATCH 050/163] ANDROID: Inherit "user-aware property" across rtmutex. Since upstream commit 715f7f9ece46 ("locking/rtmutex: Squash !RT tasks to DEFAULT_PRIO"), non-rt tasks do not inherit the nice-priority values across rt_mutexes. This removes the minor (and indirect) priority-inheritance that rt-mutexes provided for CFS tasks. Though without priority inheritance, time-bounded priority inversion can occur between CFS tasks of different nice priorities / cgroup limitations. The proxy-execution efforts are a work-in-progress to resolve this upstream, but in the meantime it is left to vendor hooks to provide a near term solution to avoid priority inversion between CFS tasks. In our oem scheduler, if a CFS thread has an "user-aware property", we will always pick it even if it's vruntime is bigger than the smallest one in runqueue. That's why the trace_android_rvh_replace_next_task_fair vendorhook was added previously in commit 53e809978443 ("ANDROID: vendor_hooks: Add hooks for scheduler"). Thus for our oem scheduler, important CFS tasks(like RenderThread) are marked with the "user-aware property" in their struct task_struct. If those tasks are blocked on an rtmutex, we want to allow the "user-aware property" to be inherited to lock owner, so it will be selected to run immediately to release the lock. To support this, we need new hooks to map "user-aware property" into different rtmutex_waiter prio and update the owner's "user-aware property" if needed. Thus these additional vendor hooks are needed. In the future, once an generalized upstream solution for CFS priority inheritance is in place, this will no longer be needed. Bug: 290585456 Change-Id: I6521ed2086b147400a54da6b84a324baf16bc649 Signed-off-by: xieliujie --- drivers/android/vendor_hooks.c | 2 ++ include/trace/hooks/dtask.h | 11 ++++++++++- include/trace/hooks/sched.h | 4 ++++ kernel/locking/rtmutex.c | 6 ++++++ kernel/sched/core.c | 6 ++++-- kernel/sched/vendor_hooks.c | 1 + 6 files changed, 27 insertions(+), 3 deletions(-) diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 0a30c8cbe7bd..35baee2a710d 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -88,6 +88,8 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_init); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_task_blocks_on_rtmutex); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_waiter_prio); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_opt_spin_start); diff --git a/include/trace/hooks/dtask.h b/include/trace/hooks/dtask.h index cbf3dfd38d36..1552b71c1792 100644 --- a/include/trace/hooks/dtask.h +++ b/include/trace/hooks/dtask.h @@ -91,7 +91,16 @@ DECLARE_HOOK(android_vh_alter_mutex_list_add, DECLARE_HOOK(android_vh_mutex_unlock_slowpath, TP_PROTO(struct mutex *lock), TP_ARGS(lock)); - +struct rt_mutex_waiter; +struct ww_acquire_ctx; +DECLARE_HOOK(android_vh_task_blocks_on_rtmutex, + TP_PROTO(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, + struct task_struct *task, struct ww_acquire_ctx *ww_ctx, + unsigned int *chwalk), + TP_ARGS(lock, waiter, task, ww_ctx, chwalk)); +DECLARE_HOOK(android_vh_rtmutex_waiter_prio, + TP_PROTO(struct task_struct *task, int *waiter_prio), + TP_ARGS(task, waiter_prio)); #endif /* _TRACE_HOOK_DTASK_H */ /* This part must be outside protection */ diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h index 811f07f7be61..4cc3f0cded7b 100644 --- a/include/trace/hooks/sched.h +++ b/include/trace/hooks/sched.h @@ -52,6 +52,10 @@ DECLARE_RESTRICTED_HOOK(android_rvh_finish_prio_fork, TP_PROTO(struct task_struct *p), TP_ARGS(p), 1); +DECLARE_RESTRICTED_HOOK(android_rvh_rtmutex_force_update, + TP_PROTO(struct task_struct *p, struct task_struct *pi_task, int *update), + TP_ARGS(p, pi_task, update), 1); + DECLARE_RESTRICTED_HOOK(android_rvh_rtmutex_prepare_setprio, TP_PROTO(struct task_struct *p, struct task_struct *pi_task), TP_ARGS(p, pi_task), 1); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 229ce6bc7d62..351716fe9138 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -327,6 +327,11 @@ static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock, static __always_inline int __waiter_prio(struct task_struct *task) { int prio = task->prio; + int waiter_prio = 0; + + trace_android_vh_rtmutex_waiter_prio(task, &waiter_prio); + if (waiter_prio > 0) + return waiter_prio; if (!rt_prio(prio)) return DEFAULT_PRIO; @@ -1151,6 +1156,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, if (owner == task && !(build_ww_mutex() && ww_ctx)) return -EDEADLK; + trace_android_vh_task_blocks_on_rtmutex(lock, waiter, task, ww_ctx, &chwalk); raw_spin_lock(&task->pi_lock); waiter->task = task; waiter->lock = lock; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 53faabdb3950..95843540088b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7043,15 +7043,17 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) const struct sched_class *prev_class; struct rq_flags rf; struct rq *rq; + int update = 0; trace_android_rvh_rtmutex_prepare_setprio(p, pi_task); /* XXX used to be waiter->prio, not waiter->task->prio */ prio = __rt_effective_prio(pi_task, p->normal_prio); + trace_android_rvh_rtmutex_force_update(p, pi_task, &update); /* * If nothing changed; bail early. */ - if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) + if (!update && p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) return; rq = __task_rq_lock(p, &rf); @@ -7071,7 +7073,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) /* * For FIFO/RR we only need to set prio, if that matches we're done. */ - if (prio == p->prio && !dl_prio(prio)) + if (!update && prio == p->prio && !dl_prio(prio)) goto out_unlock; /* diff --git a/kernel/sched/vendor_hooks.c b/kernel/sched/vendor_hooks.c index d8d945fc20e3..f528b1f6cbb9 100644 --- a/kernel/sched/vendor_hooks.c +++ b/kernel/sched/vendor_hooks.c @@ -22,6 +22,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_can_migrate_task); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_lowest_rq); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_force_update); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler); From cf70cb4f1f14ec29fa153152a60b66b5e0e5049f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 15 Jun 2023 16:17:48 -0700 Subject: [PATCH 051/163] UPSTREAM: mm: make the page fault mmap locking killable commit eda0047296a16d65a7f2bc60a408f70d178b2014 upstream. This is done as a separate patch from introducing the new lock_mm_and_find_vma() helper, because while it's an obvious change, it's not what x86 used to do in this area. We already abort the page fault on fatal signals anyway, so why should we wait for the mmap lock only to then abort later? With the new helper function that returns without the lock held on failure anyway, this is particularly easy and straightforward. Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: I9730b4543265a20253cbfc02de135cc77927f821 (cherry picked from commit eda0047296a16d65a7f2bc60a408f70d178b2014) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- mm/memory.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 78a9e3fb0e65..cd3cfc7753a9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5304,8 +5304,7 @@ static inline bool get_mmap_lock_carefully(struct mm_struct *mm, struct pt_regs return false; } - mmap_read_lock(mm); - return true; + return !mmap_read_lock_killable(mm); } static inline bool mmap_upgrade_trylock(struct mm_struct *mm) @@ -5329,8 +5328,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r if (!search_exception_tables(ip)) return false; } - mmap_write_lock(mm); - return true; + return !mmap_write_lock_killable(mm); } /* From 89298b8b3ce6a5de2167929c07c7e00df7df0efa Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 15 Jun 2023 17:11:44 -0700 Subject: [PATCH 052/163] BACKPORT: arm64/mm: Convert to using lock_mm_and_find_vma() commit ae870a68b5d13d67cf4f18d47bb01ee3fee40acb upstream. This converts arm64 to use the new page fault helper. It was very straightforward, but still needed a fix for the "obvious" conversion I initially did. Thanks to Suren for the fix and testing. Fixed-and-tested-by: Suren Baghdasaryan Unnecessary-code-removal-by: Liam R. Howlett Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman [surenb: this one is taken from 6.4.y stable branch] Change-Id: Ibda94ca9b3893b8961e1d6536c854c0aee559a6b (cherry picked from commit ae870a68b5d13d67cf4f18d47bb01ee3fee40acb) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/arm64/Kconfig | 1 + arch/arm64/mm/fault.c | 47 ++++++++----------------------------------- 2 files changed, 9 insertions(+), 39 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index bf11f89de29a..7dafeacab872 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -216,6 +216,7 @@ config ARM64 select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select NEED_DMA_MAP_STATE select NEED_SG_DMA_LENGTH diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 49e08cc145c0..16d8206e0470 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -502,27 +502,14 @@ static void do_bad_area(unsigned long far, unsigned long esr, #define VM_FAULT_BADMAP 0x010000 #define VM_FAULT_BADACCESS 0x020000 -static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, +static vm_fault_t __do_page_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long addr, unsigned int mm_flags, unsigned long vm_flags, struct pt_regs *regs) { - struct vm_area_struct *vma = find_vma(mm, addr); - - if (unlikely(!vma)) - return VM_FAULT_BADMAP; - /* * Ok, we have a good vm_area for this memory access, so we can handle * it. - */ - if (unlikely(vma->vm_start > addr)) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - return VM_FAULT_BADMAP; - if (expand_stack(vma, addr)) - return VM_FAULT_BADMAP; - } - - /* * Check that the permissions on the VMA allow for the fault which * occurred. */ @@ -643,31 +630,15 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, } lock_mmap: #endif /* CONFIG_PER_VMA_LOCK */ - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, - * we can bug out early if this is from code which shouldn't. - */ - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->pc)) - goto no_context; + retry: - mmap_read_lock(mm); - } else { - /* - * The above mmap_read_trylock() might have succeeded in which - * case, we'll have missed the might_sleep() from down_read(). - */ - might_sleep(); -#ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && !search_exception_tables(regs->pc)) { - mmap_read_unlock(mm); - goto no_context; - } -#endif + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) { + fault = VM_FAULT_BADMAP; + goto done; } - fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs); + fault = __do_page_fault(mm, vma, addr, mm_flags, vm_flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { @@ -686,9 +657,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, } mmap_read_unlock(mm); -#ifdef CONFIG_PER_VMA_LOCK done: -#endif /* * Handle the "normal" (no error) case first. */ From 1016faf509997c4c280f12e762c1df10530d8548 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 4 Jul 2023 01:00:03 +0000 Subject: [PATCH 053/163] BACKPORT: arch/arm64/mm/fault: Fix undeclared variable error in do_page_fault() commit 24be4d0b46bb0c3c1dc7bacd30957d6144a70dfc upstream. Commit ae870a68b5d1 ("arm64/mm: Convert to using lock_mm_and_find_vma()") made do_page_fault() to use 'vma' even if CONFIG_PER_VMA_LOCK is not defined, but the declaration is still in the ifdef. As a result, building kernel without the config fails with undeclared variable error as below: arch/arm64/mm/fault.c: In function 'do_page_fault': arch/arm64/mm/fault.c:624:2: error: 'vma' undeclared (first use in this function); did you mean 'vmap'? 624 | vma = lock_mm_and_find_vma(mm, addr, regs); | ^~~ | vmap Fix it by moving the declaration out of the ifdef. Fixes: ae870a68b5d1 ("arm64/mm: Convert to using lock_mm_and_find_vma()") Signed-off-by: SeongJae Park Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman [surenb: this one is taken from 6.4.y stable branch] Change-Id: Iba3153aa67f2dab347e4bc04a09c566b47cf4f63 (cherry picked from commit 24be4d0b46bb0c3c1dc7bacd30957d6144a70dfc) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/arm64/mm/fault.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 16d8206e0470..e34b46785150 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -541,9 +541,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, unsigned long vm_flags; unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned long addr = untagged_addr(far); -#ifdef CONFIG_PER_VMA_LOCK struct vm_area_struct *vma; -#endif if (kprobe_page_fault(regs, esr)) return 0; From 9cdce804c05a3c377bb053720837f678802d5fc8 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 16 Jun 2023 15:51:29 +1000 Subject: [PATCH 054/163] UPSTREAM: powerpc/mm: Convert to using lock_mm_and_find_vma() commit e6fe228c4ffafdfc970cf6d46883a1f481baf7ea upstream. Signed-off-by: Michael Ellerman Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: Ifeaee70ad1bdb9e583aaba137526cc49e2ecf8be (cherry picked from commit e6fe228c4ffafdfc970cf6d46883a1f481baf7ea) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/Kconfig | 1 + arch/powerpc/mm/fault.c | 39 +++------------------------------------ 2 files changed, 4 insertions(+), 36 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2b1141645d9e..6050e6e10d32 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -257,6 +257,7 @@ config PPC select IRQ_DOMAIN select IRQ_FORCED_THREADING select KASAN_VMALLOC if KASAN && MODULES + select LOCK_MM_AND_FIND_VMA select MMU_GATHER_PAGE_SIZE select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_MERGE_VMAS diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 531177a4ee08..5bfdf6ecfa96 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -84,11 +84,6 @@ static int __bad_area(struct pt_regs *regs, unsigned long address, int si_code) return __bad_area_nosemaphore(regs, address, si_code); } -static noinline int bad_area(struct pt_regs *regs, unsigned long address) -{ - return __bad_area(regs, address, SEGV_MAPERR); -} - static noinline int bad_access_pkey(struct pt_regs *regs, unsigned long address, struct vm_area_struct *vma) { @@ -515,40 +510,12 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, * we will deadlock attempting to validate the fault against the * address space. Luckily the kernel only validly references user * space from well defined areas of code, which are listed in the - * exceptions table. - * - * As the vast majority of faults will be valid we will only perform - * the source reference check when there is a possibility of a deadlock. - * Attempt to lock the address space, if we cannot we then validate the - * source. If this is invalid we can skip the address space check, - * thus avoiding the deadlock. + * exceptions table. lock_mm_and_find_vma() handles that logic. */ - if (unlikely(!mmap_read_trylock(mm))) { - if (!is_user && !search_exception_tables(regs->nip)) - return bad_area_nosemaphore(regs, address); - retry: - mmap_read_lock(mm); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case we'll have missed the might_sleep() from - * down_read(): - */ - might_sleep(); - } - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) - return bad_area(regs, address); - - if (unlikely(vma->vm_start > address)) { - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) - return bad_area(regs, address); - - if (unlikely(expand_stack(vma, address))) - return bad_area(regs, address); - } + return bad_area_nosemaphore(regs, address); if (unlikely(access_pkey_error(is_write, is_exec, (error_code & DSISR_KEYFAULT), vma))) From 053053fc68a49eda92d11b668acff2d41397b183 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 22 Jun 2023 18:47:40 +0200 Subject: [PATCH 055/163] UPSTREAM: mips/mm: Convert to using lock_mm_and_find_vma() commit 4bce37a68ff884e821a02a731897a8119e0c37b7 upstream. Signed-off-by: Ben Hutchings Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: Ie1ec8bd98c52086790adcd691370a76d135a333e (cherry picked from commit 4bce37a68ff884e821a02a731897a8119e0c37b7) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/mips/Kconfig | 1 + arch/mips/mm/fault.c | 12 ++---------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b26b77673c2c..fecb681ff264 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -93,6 +93,7 @@ config MIPS select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP select IRQ_FORCED_THREADING select ISA if EISA + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_REL if MODULES select MODULES_USE_ELF_RELA if MODULES && 64BIT select PERF_USE_VMALLOC diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index a27045f5a556..d7878208bd3f 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -99,21 +99,13 @@ static void __do_page_fault(struct pt_regs *regs, unsigned long write, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: si_code = SEGV_ACCERR; if (write) { From 9f136450af1aed45996d29cd333fbcd81fd104c8 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 22 Jun 2023 20:18:18 +0200 Subject: [PATCH 056/163] UPSTREAM: riscv/mm: Convert to using lock_mm_and_find_vma() commit 7267ef7b0b77f4ed23b7b3c87d8eca7bd9c2d007 upstream. Signed-off-by: Ben Hutchings Signed-off-by: Linus Torvalds [6.1: Kconfig context] Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: I601c5e4625e0357be7043026359aa85e5a63ade1 (cherry picked from commit 7267ef7b0b77f4ed23b7b3c87d8eca7bd9c2d007) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/riscv/Kconfig | 1 + arch/riscv/mm/fault.c | 31 +++++++++++++------------------ 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 06b9b2f60b9f..45d52d465e1d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -113,6 +113,7 @@ config RISCV select HAVE_RSEQ select IRQ_DOMAIN select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA if MODULES select MODULE_SECTIONS if MODULES select OF diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index eb0774d9c03b..274bc6dd839f 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -83,13 +83,13 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f BUG(); } -static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +static inline void +bad_area_nosemaphore(struct pt_regs *regs, int code, unsigned long addr) { /* * Something tried to access memory that isn't in our memory map. * Fix it, but check if it's kernel or user first. */ - mmap_read_unlock(mm); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { do_trap(regs, SIGSEGV, code, addr); @@ -99,6 +99,15 @@ static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code no_context(regs, addr); } +static inline void +bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, + unsigned long addr) +{ + mmap_read_unlock(mm); + + bad_area_nosemaphore(regs, code, addr); +} + static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr) { pgd_t *pgd, *pgd_k; @@ -281,23 +290,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs) else if (cause == EXC_INST_PAGE_FAULT) flags |= FAULT_FLAG_INSTRUCTION; retry: - mmap_read_lock(mm); - vma = find_vma(mm, addr); + vma = lock_mm_and_find_vma(mm, addr, regs); if (unlikely(!vma)) { tsk->thread.bad_cause = cause; - bad_area(regs, mm, code, addr); - return; - } - if (likely(vma->vm_start <= addr)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - tsk->thread.bad_cause = cause; - bad_area(regs, mm, code, addr); - return; - } - if (unlikely(expand_stack(vma, addr))) { - tsk->thread.bad_cause = cause; - bad_area(regs, mm, code, addr); + bad_area_nosemaphore(regs, code, addr); return; } @@ -305,7 +301,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * Ok, we have a good vm_area for this memory access, so * we can handle it. */ -good_area: code = SEGV_ACCERR; if (unlikely(access_error(cause, vma))) { From add0a1ea04ff78872b380fe9fd700b64114da678 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Thu, 22 Jun 2023 21:24:30 +0200 Subject: [PATCH 057/163] UPSTREAM: arm/mm: Convert to using lock_mm_and_find_vma() commit 8b35ca3e45e35a26a21427f35d4093606e93ad0a upstream. arm has an additional check for address < FIRST_USER_ADDRESS before expanding the stack. Since FIRST_USER_ADDRESS is defined everywhere (generally as 0), move that check to the generic expand_downwards(). Signed-off-by: Ben Hutchings Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: Ie1090f587090ef16de4bce224bbc52334bfe78fa (cherry picked from commit 8b35ca3e45e35a26a21427f35d4093606e93ad0a) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/arm/Kconfig | 1 + arch/arm/mm/fault.c | 63 ++++++++++----------------------------------- mm/mmap.c | 2 +- 3 files changed, 16 insertions(+), 50 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a08c9d092a33..0202e48e7a20 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -122,6 +122,7 @@ config ARM select HAVE_UID16 select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_REL select NEED_DMA_MAP_STATE select OF_EARLY_FLATTREE if OF diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index de988cba9a4b..b0db85310331 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -231,37 +231,11 @@ static inline bool is_permission_fault(unsigned int fsr) return false; } -static vm_fault_t __kprobes -__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags, - unsigned long vma_flags, struct pt_regs *regs) -{ - struct vm_area_struct *vma = find_vma(mm, addr); - if (unlikely(!vma)) - return VM_FAULT_BADMAP; - - if (unlikely(vma->vm_start > addr)) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - return VM_FAULT_BADMAP; - if (addr < FIRST_USER_ADDRESS) - return VM_FAULT_BADMAP; - if (expand_stack(vma, addr)) - return VM_FAULT_BADMAP; - } - - /* - * ok, we have a good vm_area for this memory access, check the - * permissions on the VMA allow for the fault which occurred. - */ - if (!(vma->vm_flags & vma_flags)) - return VM_FAULT_BADACCESS; - - return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); -} - static int __kprobes do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; int sig, code; vm_fault_t fault; unsigned int flags = FAULT_FLAG_DEFAULT; @@ -300,31 +274,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); - /* - * As per x86, we may deadlock here. However, since the kernel only - * validly references user space from well defined areas of the code, - * we can bug out early if this is from code which shouldn't. - */ - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) - goto no_context; retry: - mmap_read_lock(mm); - } else { - /* - * The above down_read_trylock() might have succeeded in - * which case, we'll have missed the might_sleep() from - * down_read() - */ - might_sleep(); -#ifdef CONFIG_DEBUG_VM - if (!user_mode(regs) && - !search_exception_tables(regs->ARM_pc)) - goto no_context; -#endif + vma = lock_mm_and_find_vma(mm, addr, regs); + if (unlikely(!vma)) { + fault = VM_FAULT_BADMAP; + goto bad_area; } - fault = __do_page_fault(mm, addr, flags, vm_flags, regs); + /* + * ok, we have a good vm_area for this memory access, check the + * permissions on the VMA allow for the fault which occurred. + */ + if (!(vma->vm_flags & vm_flags)) + fault = VM_FAULT_BADACCESS; + else + fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs); /* If we need to retry but a fatal signal is pending, handle the * signal first. We do not need to release the mmap_lock because @@ -355,6 +319,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) return 0; +bad_area: /* * If we are in kernel mode at this point, we * have no context to handle this fault with. diff --git a/mm/mmap.c b/mm/mmap.c index 751fcf6037b3..4eaf4762978c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2089,7 +2089,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) int error = 0; address &= PAGE_MASK; - if (address < mmap_min_addr) + if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) return -EPERM; /* Enforce stack_guard_gap */ From 6c33246824a5d258beaded8d7c98f932d76f928a Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 24 Jun 2023 10:55:38 -0700 Subject: [PATCH 058/163] UPSTREAM: mm/fault: convert remaining simple cases to lock_mm_and_find_vma() commit a050ba1e7422f2cc60ff8bfde3f96d34d00cb585 upstream. This does the simple pattern conversion of alpha, arc, csky, hexagon, loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma() helper. They all have the regular fault handling pattern without odd special cases. The remaining architectures all have something that keeps us from a straightforward conversion: ia64 and parisc have stacks that can grow both up as well as down (and ia64 has special address region checks). And m68k, microblaze, openrisc, sparc64, and um end up having extra rules about only expanding the stack down a limited amount below the user space stack pointer. That is something that x86 used to do too (long long ago), and it probably could just be skipped, but it still makes the conversion less than trivial. Note that this conversion was done manually and with the exception of alpha without any build testing, because I have a fairly limited cross- building environment. The cases are all simple, and I went through the changes several times, but... Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: I93e4ce3cb077329e202699a16db576be3a40285b (cherry picked from commit a050ba1e7422f2cc60ff8bfde3f96d34d00cb585) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/alpha/Kconfig | 1 + arch/alpha/mm/fault.c | 13 +++---------- arch/arc/Kconfig | 1 + arch/arc/mm/fault.c | 11 +++-------- arch/csky/Kconfig | 1 + arch/csky/mm/fault.c | 22 +++++----------------- arch/hexagon/Kconfig | 1 + arch/hexagon/mm/vm_fault.c | 18 ++++-------------- arch/loongarch/Kconfig | 1 + arch/loongarch/mm/fault.c | 16 ++++++---------- arch/nios2/Kconfig | 1 + arch/nios2/mm/fault.c | 17 ++--------------- arch/sh/Kconfig | 1 + arch/sh/mm/fault.c | 17 ++--------------- arch/sparc/Kconfig | 1 + arch/sparc/mm/fault_32.c | 32 ++++++++------------------------ arch/xtensa/Kconfig | 1 + arch/xtensa/mm/fault.c | 14 +++----------- 18 files changed, 45 insertions(+), 124 deletions(-) diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 97fce7386b00..d95d82abdf29 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -28,6 +28,7 @@ config ALPHA select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_AUDITSYSCALL select HAVE_MOD_ARCH_SPECIFIC + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select ODD_RT_SIGACTION select OLD_SIGSUSPEND diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index ef427a6bdd1a..2b49aa94e4de 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr, flags |= FAULT_FLAG_USER; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* Ok, we have a good vm_area for this memory access, so we can handle it. */ - good_area: si_code = SEGV_ACCERR; if (cause < 0) { if (!(vma->vm_flags & VM_EXEC)) @@ -189,6 +181,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, bad_area: mmap_read_unlock(mm); + bad_area_nosemaphore: if (user_mode(regs)) goto do_sigsegv; diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index d9a13ccf89a3..cb1074f74c3f 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -41,6 +41,7 @@ config ARC select HAVE_PERF_EVENTS select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select OF select OF_EARLY_FLATTREE diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 5ca59a482632..f59e722d147f 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (unlikely(address < vma->vm_start)) { - if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) - goto bad_area; - } + goto bad_area_nosemaphore; /* * vm_area is good, now check permissions for this memory access @@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: /* * Major/minor page fault accounting * (in case of retry we only land here once) diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index adee6ab36862..742009123fd5 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -96,6 +96,7 @@ config CSKY select HAVE_RSEQ select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS + select LOCK_MM_AND_FIND_VMA select MAY_HAVE_SPARSE_IRQ select MODULES_USE_ELF_RELA if MODULES select OF diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index e15f736cca4b..ae9781b7d92e 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f BUG(); } -static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) +static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) { /* * Something tried to access memory that isn't in our memory map. * Fix it, but check if it's kernel or user first. */ - mmap_read_unlock(mm); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { do_trap(regs, SIGSEGV, code, addr); @@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) if (is_write(regs)) flags |= FAULT_FLAG_WRITE; retry: - mmap_read_lock(mm); - vma = find_vma(mm, addr); + vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) { - bad_area(regs, mm, code, addr); - return; - } - if (likely(vma->vm_start <= addr)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, mm, code, addr); - return; - } - if (unlikely(expand_stack(vma, addr))) { - bad_area(regs, mm, code, addr); + bad_area_nosemaphore(regs, mm, code, addr); return; } @@ -259,11 +247,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs) * Ok, we have a good vm_area for this memory access, so * we can handle it. */ -good_area: code = SEGV_ACCERR; if (unlikely(access_error(regs, vma))) { - bad_area(regs, mm, code, addr); + mmap_read_unlock(mm); + bad_area_nosemaphore(regs, mm, code, addr); return; } diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 54eadf265178..6726f4941015 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -28,6 +28,7 @@ config HEXAGON select GENERIC_SMP_IDLE_THREAD select STACKTRACE_SUPPORT select GENERIC_CLOCKEVENTS_BROADCAST + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES select ARCH_WANT_LD_ORPHAN_WARN diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c index f73c7cbfe326..583b08727166 100644 --- a/arch/hexagon/mm/vm_fault.c +++ b/arch/hexagon/mm/vm_fault.c @@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); - if (!vma) - goto bad_area; + vma = lock_mm_and_find_vma(mm, address, regs); + if (unlikely(!vma)) + goto bad_area_nosemaphore; - if (vma->vm_start <= address) - goto good_area; - - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - - if (expand_stack(vma, address)) - goto bad_area; - -good_area: /* Address space is OK. Now check access rights. */ si_code = SEGV_ACCERR; @@ -140,6 +129,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { force_sig_fault(SIGSEGV, si_code, (void __user *)address); return; diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index 903096bd87f8..51d738ac12e5 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -107,6 +107,7 @@ config LOONGARCH select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP select IRQ_FORCED_THREADING select IRQ_LOONGARCH_CPU + select LOCK_MM_AND_FIND_VMA select MMU_GATHER_MERGE_VMAS if MMU select MODULES_USE_ELF_RELA if MODULES select NEED_PER_CPU_EMBED_FIRST_CHUNK diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c index 1ccd53655cab..b829ab911a17 100644 --- a/arch/loongarch/mm/fault.c +++ b/arch/loongarch/mm/fault.c @@ -166,22 +166,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); - if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (!expand_stack(vma, address)) - goto good_area; + vma = lock_mm_and_find_vma(mm, address, regs); + if (unlikely(!vma)) + goto bad_area_nosemaphore; + goto good_area; + /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: do_sigsegv(regs, write, address, si_code); return; diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index a582f72104f3..1fb78865a459 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -16,6 +16,7 @@ config NIOS2 select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_KGDB select IRQ_DOMAIN + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select OF select OF_EARLY_FLATTREE diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index edaca0a6c1c1..71939fb28c2e 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c @@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (!mmap_read_trylock(mm)) { - if (!user_mode(regs) && !search_exception_tables(regs->ea)) - goto bad_area_nosemaphore; retry: - mmap_read_lock(mm); - } - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: code = SEGV_ACCERR; switch (cause) { diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 5f220e903e5a..8e4d1f757bcc 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -56,6 +56,7 @@ config SUPERH select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select NEED_SG_DMA_LENGTH select NO_DMA if !MMU && !DMA_COHERENT diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index acd2f5e50bfc..06e6b4952924 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c @@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, } retry: - mmap_read_lock(mm); - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (unlikely(!vma)) { - bad_area(regs, error_code, address); - return; - } - if (likely(vma->vm_start <= address)) - goto good_area; - if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) { - bad_area(regs, error_code, address); - return; - } - if (unlikely(expand_stack(vma, address))) { - bad_area(regs, error_code, address); + bad_area_nosemaphore(regs, error_code, address); return; } @@ -461,7 +449,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: if (unlikely(access_error(error_code, vma))) { bad_area_access_error(regs, error_code, address); return; diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 84437a4c6545..dbb1760cbe8c 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -56,6 +56,7 @@ config SPARC32 select DMA_DIRECT_REMAP select GENERIC_ATOMIC64 select HAVE_UID16 + select LOCK_MM_AND_FIND_VMA select OLD_SIGACTION select ZONE_DMA diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 91259f291c54..aef2aebe2379 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, if (pagefault_disabled() || !mm) goto no_context; + if (!from_user && address >= PAGE_OFFSET) + goto no_context; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - - if (!from_user && address >= PAGE_OFFSET) - goto bad_area; - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) @@ -318,17 +309,9 @@ static void force_user_fault(unsigned long address, int write) code = SEGV_MAPERR; - mmap_read_lock(mm); - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; -good_area: + goto bad_area_nosemaphore; code = SEGV_ACCERR; if (write) { if (!(vma->vm_flags & VM_WRITE)) @@ -347,6 +330,7 @@ static void force_user_fault(unsigned long address, int write) return; bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); return; diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index bcb0c5d2abc2..6d3c9257aa13 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -49,6 +49,7 @@ config XTENSA select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_DOMAIN + select LOCK_MM_AND_FIND_VMA select MODULES_USE_ELF_RELA select PERF_USE_VMALLOC select TRACE_IRQFLAGS_SUPPORT diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 8c781b05c0bd..d89b193c779f 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); - + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: code = SEGV_ACCERR; if (is_write) { @@ -205,6 +196,7 @@ void do_page_fault(struct pt_regs *regs) */ bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { current->thread.bad_vaddr = address; current->thread.error_code = is_write; From 4087cac574fe843a1ad527fddb8a29ac854d669c Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 24 Jun 2023 11:17:05 -0700 Subject: [PATCH 059/163] UPSTREAM: powerpc/mm: convert coprocessor fault to lock_mm_and_find_vma() commit 2cd76c50d0b41cec5c87abfcdf25b236a2793fb6 upstream. This is one of the simple cases, except there's no pt_regs pointer. Which is fine, as lock_mm_and_find_vma() is set up to work fine with a NULL pt_regs. Powerpc already enabled LOCK_MM_AND_FIND_VMA for the main CPU faulting, so we can just use the helper without any extra work. Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: I5736f498b2f45625e46554520d3aeb679e680907 (cherry picked from commit 2cd76c50d0b41cec5c87abfcdf25b236a2793fb6) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/mm/copro_fault.c | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 7c507fb48182..f49fd873df8d 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c @@ -33,19 +33,11 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, if (mm->pgd == NULL) return -EFAULT; - mmap_read_lock(mm); - ret = -EFAULT; - vma = find_vma(mm, ea); + vma = lock_mm_and_find_vma(mm, ea, NULL); if (!vma) - goto out_unlock; - - if (ea < vma->vm_start) { - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto out_unlock; - if (expand_stack(vma, ea)) - goto out_unlock; - } + return -EFAULT; + ret = -EFAULT; is_write = dsisr & DSISR_ISSTORE; if (is_write) { if (!(vma->vm_flags & VM_WRITE)) From 1afccd42559716b6872319d97fb86343c89fb107 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 16 Jun 2023 15:58:54 -0700 Subject: [PATCH 060/163] UPSTREAM: mm: make find_extend_vma() fail if write lock not held commit f440fa1ac955e2898893f9301568435eb5cdfc4b upstream. Make calls to extend_vma() and find_extend_vma() fail if the write lock is required. To avoid making this a flag-day event, this still allows the old read-locking case for the trivial situations, and passes in a flag to say "is it write-locked". That way write-lockers can say "yes, I'm being careful", and legacy users will continue to work in all the common cases until they have been fully converted to the new world order. Co-Developed-by: Matthew Wilcox (Oracle) Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Liam R. Howlett Signed-off-by: Linus Torvalds Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: If12d2d68429b6d71393f02d5ed7e6939c3cd5405 (cherry picked from commit f440fa1ac955e2898893f9301568435eb5cdfc4b) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- fs/binfmt_elf.c | 6 +++--- fs/exec.c | 5 +++-- include/linux/mm.h | 10 +++++++--- mm/memory.c | 2 +- mm/mmap.c | 50 ++++++++++++++++++++++++++++++---------------- mm/nommu.c | 3 ++- 6 files changed, 49 insertions(+), 27 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 444302afc673..5688c3e6adc1 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -315,10 +315,10 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, * Grow the stack manually; some architectures have a limit on how * far ahead a user-space access may be in order to grow the stack. */ - if (mmap_read_lock_killable(mm)) + if (mmap_write_lock_killable(mm)) return -EINTR; - vma = find_extend_vma(mm, bprm->p); - mmap_read_unlock(mm); + vma = find_extend_vma_locked(mm, bprm->p, true); + mmap_write_unlock(mm); if (!vma) return -EFAULT; diff --git a/fs/exec.c b/fs/exec.c index 2d6bca1cda6e..c495f0f636e1 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -203,7 +203,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, #ifdef CONFIG_STACK_GROWSUP if (write) { - ret = expand_downwards(bprm->vma, pos); + /* We claim to hold the lock - nobody to race with */ + ret = expand_downwards(bprm->vma, pos, true); if (ret < 0) return NULL; } @@ -854,7 +855,7 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_base = vma->vm_start - stack_expand; #endif current->mm->start_stack = bprm->p; - ret = expand_stack(vma, stack_base); + ret = expand_stack_locked(vma, stack_base, true); if (ret) ret = -EFAULT; diff --git a/include/linux/mm.h b/include/linux/mm.h index fa3de0b51a29..0d1c8a97cffa 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2995,11 +2995,13 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ -extern int expand_stack(struct vm_area_struct *vma, unsigned long address); +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked); +#define expand_stack(vma,addr) expand_stack_locked(vma,addr,false) /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ -extern int expand_downwards(struct vm_area_struct *vma, - unsigned long address); +int expand_downwards(struct vm_area_struct *vma, unsigned long address, + bool write_locked); #if VM_GROWSUP extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #else @@ -3100,6 +3102,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, #endif struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, + unsigned long addr, bool write_locked); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/memory.c b/mm/memory.c index cd3cfc7753a9..038f6bf49429 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5393,7 +5393,7 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, goto fail; } - if (expand_stack(vma, addr)) + if (expand_stack_locked(vma, addr, true)) goto fail; success: diff --git a/mm/mmap.c b/mm/mmap.c index 4eaf4762978c..e0de931f1d2c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1988,7 +1988,8 @@ static int acct_stack_growth(struct vm_area_struct *vma, * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_upwards(struct vm_area_struct *vma, unsigned long address) +int expand_upwards(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; @@ -2012,6 +2013,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; + if (!write_locked) + return -EAGAIN; next = find_vma_intersection(mm, vma->vm_end, gap_addr); if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) @@ -2081,7 +2084,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) /* * vma is the first one with address < vma->vm_start. Have to extend vma. */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address) +int expand_downwards(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { struct mm_struct *mm = vma->vm_mm; MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); @@ -2095,10 +2099,13 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) /* Enforce stack_guard_gap */ prev = mas_prev(&mas, 0); /* Check that both stack segments have the same anon_vma? */ - if (prev && !(prev->vm_flags & VM_GROWSDOWN) && - vma_is_accessible(prev)) { - if (address - prev->vm_end < stack_guard_gap) + if (prev) { + if (!(prev->vm_flags & VM_GROWSDOWN) && + vma_is_accessible(prev) && + (address - prev->vm_end < stack_guard_gap)) return -ENOMEM; + if (!write_locked && (prev->vm_end == address)) + return -EAGAIN; } mas_set_range(&mas, address, vma->vm_end - 1); @@ -2177,13 +2184,14 @@ static int __init cmdline_parse_stack_guard_gap(char *p) __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { - return expand_upwards(vma, address); + return expand_upwards(vma, address, write_locked); } -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, + unsigned long addr, bool write_locked) { struct vm_area_struct *vma, *prev; @@ -2191,20 +2199,25 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) vma = find_vma_prev(mm, addr, &prev); if (vma && (vma->vm_start <= addr)) return vma; - if (!prev || expand_stack(prev, addr)) + if (!prev) + return NULL; + if (expand_stack_locked(prev, addr, write_locked)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { - return expand_downwards(vma, address); + if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) + return -EINVAL; + return expand_downwards(vma, address, write_locked); } -struct vm_area_struct * -find_extend_vma(struct mm_struct *mm, unsigned long addr) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, + unsigned long addr, bool write_locked) { struct vm_area_struct *vma; unsigned long start; @@ -2215,10 +2228,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) return NULL; if (vma->vm_start <= addr) return vma; - if (!(vma->vm_flags & VM_GROWSDOWN)) - return NULL; start = vma->vm_start; - if (expand_stack(vma, addr)) + if (expand_stack_locked(vma, addr, write_locked)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); @@ -2226,6 +2237,11 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) } #endif +struct vm_area_struct *find_extend_vma(struct mm_struct *mm, + unsigned long addr) +{ + return find_extend_vma_locked(mm, addr, false); +} EXPORT_SYMBOL_GPL(find_extend_vma); /* diff --git a/mm/nommu.c b/mm/nommu.c index 14ffd4c668fe..f92438102b1d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -694,7 +694,8 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) * expand a stack to a given address * - not supported under NOMMU conditions */ -int expand_stack(struct vm_area_struct *vma, unsigned long address) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, + bool write_locked) { return -ENOMEM; } From c8ad906849609399c347b2f35130315ef025d945 Mon Sep 17 00:00:00 2001 From: jianzhou Date: Mon, 24 Jul 2023 08:16:59 -0700 Subject: [PATCH 061/163] ANDROID: abi_gki_aarch64_qcom: ufshcd_mcq_poll_cqe_lock Symbols added: ufshcd_mcq_poll_cqe_lock Bug: 292490611 Change-Id: I0e26f360c56d302f9f980c9d43b7a3cc80d3a616 Signed-off-by: jianzhou --- android/abi_gki_aarch64_qcom | 1 + 1 file changed, 1 insertion(+) diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index b2ac2bce8d43..fcebe5582b85 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -3648,6 +3648,7 @@ ufshcd_hold ufshcd_mcq_config_esi ufshcd_mcq_enable_esi + ufshcd_mcq_poll_cqe_lock ufshcd_mcq_poll_cqe_nolock ufshcd_mcq_write_cqis ufshcd_pltfrm_init From 74efdc0966f53275bfb22ccda2e526c164b92790 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 19 Jun 2023 11:34:15 -0700 Subject: [PATCH 062/163] BACKPORT: execve: expand new process stack manually ahead of time commit f313c51d26aa87e69633c9b46efb37a930faca71 upstream. This is a small step towards a model where GUP itself would not expand the stack, and any user that needs GUP to not look up existing mappings, but actually expand on them, would have to do so manually before-hand, and with the mm lock held for writing. It turns out that execve() already did almost exactly that, except it didn't take the mm lock at all (it's single-threaded so no locking technically needed, but it could cause lockdep errors). And it only did it for the CONFIG_STACK_GROWSUP case, since in that case GUP has obviously never expanded the stack downwards. So just make that CONFIG_STACK_GROWSUP case do the right thing with locking, and enable it generally. This will eventually help GUP, and in the meantime avoids a special case and the lockdep issue. Signed-off-by: Linus Torvalds [6.1 Minor context from still having FOLL_FORCE flags set] Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman Change-Id: I24c652740dcfc674b0aef8e09ef72f09ad61254c (cherry picked from commit f313c51d26aa87e69633c9b46efb37a930faca71) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- fs/exec.c | 37 +++++++++++++++++++++---------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index c495f0f636e1..763b03c89614 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -198,34 +198,39 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, int write) { struct page *page; + struct vm_area_struct *vma = bprm->vma; + struct mm_struct *mm = bprm->mm; int ret; - unsigned int gup_flags = FOLL_FORCE; -#ifdef CONFIG_STACK_GROWSUP - if (write) { - /* We claim to hold the lock - nobody to race with */ - ret = expand_downwards(bprm->vma, pos, true); - if (ret < 0) + /* + * Avoid relying on expanding the stack down in GUP (which + * does not work for STACK_GROWSUP anyway), and just do it + * by hand ahead of time. + */ + if (write && pos < vma->vm_start) { + mmap_write_lock(mm); + ret = expand_downwards(vma, pos, true); + if (unlikely(ret < 0)) { + mmap_write_unlock(mm); return NULL; - } -#endif - - if (write) - gup_flags |= FOLL_WRITE; + } + mmap_write_downgrade(mm); + } else + mmap_read_lock(mm); /* * We are doing an exec(). 'current' is the process - * doing the exec and bprm->mm is the new process's mm. + * doing the exec and 'mm' is the new process's mm. */ - mmap_read_lock(bprm->mm); - ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, + ret = get_user_pages_remote(mm, pos, 1, + write ? FOLL_WRITE : 0, &page, NULL, NULL); - mmap_read_unlock(bprm->mm); + mmap_read_unlock(mm); if (ret <= 0) return NULL; if (write) - acct_arg_size(bprm, vma_pages(bprm->vma)); + acct_arg_size(bprm, vma_pages(vma)); return page; } From 188ce9572f119dd669d7408d154ffa6ad3029ad2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 24 Jun 2023 13:45:51 -0700 Subject: [PATCH 063/163] BACKPORT: mm: always expand the stack with the mmap write lock held commit 8d7071af890768438c14db6172cc8f9f4d04e184 upstream This finishes the job of always holding the mmap write lock when extending the user stack vma, and removes the 'write_locked' argument from the vm helper functions again. For some cases, we just avoid expanding the stack at all: drivers and page pinning really shouldn't be extending any stacks. Let's see if any strange users really wanted that. It's worth noting that architectures that weren't converted to the new lock_mm_and_find_vma() helper function are left using the legacy "expand_stack()" function, but it has been changed to drop the mmap_lock and take it for writing while expanding the vma. This makes it fairly straightforward to convert the remaining architectures. As a result of dropping and re-taking the lock, the calling conventions for this function have also changed, since the old vma may no longer be valid. So it will now return the new vma if successful, and NULL - and the lock dropped - if the area could not be extended. Signed-off-by: Linus Torvalds [6.1: Patch drivers/iommu/io-pgfault.c instead] Signed-off-by: Samuel Mendoza-Jonas Signed-off-by: David Woodhouse Signed-off-by: Greg Kroah-Hartman [surenb: change in io-pgfault.c was done in iommu-sva.c] Change-Id: Icdcdded08d7ad4eda8fae1120a3c8b3d957516c1 (cherry picked from commit 8d7071af890768438c14db6172cc8f9f4d04e184) Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- arch/ia64/mm/fault.c | 36 ++--------- arch/m68k/mm/fault.c | 9 ++- arch/microblaze/mm/fault.c | 5 +- arch/openrisc/mm/fault.c | 5 +- arch/parisc/mm/fault.c | 23 ++++--- arch/s390/mm/fault.c | 5 +- arch/sparc/mm/fault_64.c | 8 ++- arch/um/kernel/trap.c | 11 ++-- drivers/iommu/amd/iommu_v2.c | 4 +- drivers/iommu/iommu-sva.c | 2 +- fs/binfmt_elf.c | 2 +- fs/exec.c | 4 +- include/linux/mm.h | 16 ++--- mm/gup.c | 6 +- mm/memory.c | 10 ++- mm/mmap.c | 121 ++++++++++++++++++++++++++++------- mm/nommu.c | 18 ++---- 17 files changed, 169 insertions(+), 116 deletions(-) diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index ef78c2d66cdd..99a09abe1d2c 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -110,10 +110,12 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re * register backing store that needs to expand upwards, in * this case vma will be null, but prev_vma will ne non-null */ - if (( !vma && prev_vma ) || (address < vma->vm_start) ) - goto check_expansion; + if (( !vma && prev_vma ) || (address < vma->vm_start) ) { + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } - good_area: code = SEGV_ACCERR; /* OK, we've got a good vm_area for this memory area. Check the access permissions: */ @@ -174,35 +176,9 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re mmap_read_unlock(mm); return; - check_expansion: - if (!(prev_vma && (prev_vma->vm_flags & VM_GROWSUP) && (address == prev_vma->vm_end))) { - if (!vma) - goto bad_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) - || REGION_OFFSET(address) >= RGN_MAP_LIMIT) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; - } else { - vma = prev_vma; - if (REGION_NUMBER(address) != REGION_NUMBER(vma->vm_start) - || REGION_OFFSET(address) >= RGN_MAP_LIMIT) - goto bad_area; - /* - * Since the register backing store is accessed sequentially, - * we disallow growing it by more than a page at a time. - */ - if (address > vma->vm_end + PAGE_SIZE - sizeof(long)) - goto bad_area; - if (expand_upwards(vma, address)) - goto bad_area; - } - goto good_area; - bad_area: mmap_read_unlock(mm); + bad_area_nosemaphore: if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 4d2837eb3e2a..6f62af8e293a 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -105,8 +105,9 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, if (address + 256 < rdusp()) goto map_err; } - if (expand_stack(vma, address)) - goto map_err; + vma = expand_stack(mm, address); + if (!vma) + goto map_err_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so @@ -193,10 +194,12 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, goto send_sig; map_err: + mmap_read_unlock(mm); +map_err_nosemaphore: current->thread.signo = SIGSEGV; current->thread.code = SEGV_MAPERR; current->thread.faddr = address; - goto send_sig; + return send_fault_sig(regs); acc_err: current->thread.signo = SIGSEGV; diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5c40c3ebe52f..a409bb3f09f7 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -192,8 +192,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, && (kernel_mode(regs) || !store_updates_sp(regs))) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; good_area: code = SEGV_ACCERR; diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index b4762d66e9ef..e3ad46d02fbd 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c @@ -127,8 +127,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, if (address + PAGE_SIZE < regs->sp) goto bad_area; } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 869204e97ec9..1843b493910c 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -288,15 +288,19 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, retry: mmap_read_lock(mm); vma = find_vma_prev(mm, address, &prev_vma); - if (!vma || address < vma->vm_start) - goto check_expansion; + if (!vma || address < vma->vm_start) { + if (!prev || !(prev->vm_flags & VM_GROWSUP)) + goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; + } + /* * Ok, we have a good vm_area for this memory access. We still need to * check the access permissions. */ -good_area: - if ((vma->vm_flags & acc_type) != acc_type) goto bad_area; @@ -342,17 +346,13 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, mmap_read_unlock(mm); return; -check_expansion: - vma = prev_vma; - if (vma && (expand_stack(vma, address) == 0)) - goto good_area; - /* * Something tried to access memory that isn't in our memory map.. */ bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { int signo, si_code; @@ -444,7 +444,7 @@ handle_nadtlb_fault(struct pt_regs *regs) { unsigned long insn = regs->iir; int breg, treg, xreg, val = 0; - struct vm_area_struct *vma, *prev_vma; + struct vm_area_struct *vma; struct task_struct *tsk; struct mm_struct *mm; unsigned long address; @@ -480,7 +480,7 @@ handle_nadtlb_fault(struct pt_regs *regs) /* Search for VMA */ address = regs->ior; mmap_read_lock(mm); - vma = find_vma_prev(mm, address, &prev_vma); + vma = vma_lookup(mm, address); mmap_read_unlock(mm); /* @@ -489,7 +489,6 @@ handle_nadtlb_fault(struct pt_regs *regs) */ acc_type = (insn & 0x40) ? VM_WRITE : VM_READ; if (vma - && address >= vma->vm_start && (vma->vm_flags & acc_type) == acc_type) val = 1; } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 16223095045e..98a0091bb097 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -453,8 +453,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) goto out_up; - if (expand_stack(vma, address)) - goto out_up; + vma = expand_stack(mm, address); + if (!vma) + goto out; } /* diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4acc12eafbf5..df685a241855 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -383,8 +383,9 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) goto bad_area; } } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. @@ -482,8 +483,9 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) * Fix it, but check if it's kernel or user first.. */ bad_area: - insn = get_fault_insn(regs, insn); mmap_read_unlock(mm); +bad_area_nosemaphore: + insn = get_fault_insn(regs, insn); handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index d3ce21c4ca32..6d8ae86ae978 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -47,14 +47,15 @@ int handle_page_fault(unsigned long address, unsigned long ip, vma = find_vma(mm, address); if (!vma) goto out; - else if (vma->vm_start <= address) + if (vma->vm_start <= address) goto good_area; - else if (!(vma->vm_flags & VM_GROWSDOWN)) + if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; - else if (is_user && !ARCH_IS_STACKGROW(address)) - goto out; - else if (expand_stack(vma, address)) + if (is_user && !ARCH_IS_STACKGROW(address)) goto out; + vma = expand_stack(mm, address); + if (!vma) + goto out_nosemaphore; good_area: *code_out = SEGV_ACCERR; diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index 9f7fab49a5a9..75355ddca657 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -485,8 +485,8 @@ static void do_fault(struct work_struct *work) flags |= FAULT_FLAG_REMOTE; mmap_read_lock(mm); - vma = find_extend_vma(mm, address); - if (!vma || address < vma->vm_start) + vma = vma_lookup(mm, address); + if (!vma) /* failed to get a vma in the right range */ goto out; diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 24bf9b2b58aa..ed5f11eb92e5 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -203,7 +203,7 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) mmap_read_lock(mm); - vma = find_extend_vma(mm, prm->addr); + vma = vma_lookup(mm, prm->addr); if (!vma) /* Unmapped area */ goto out_put_mm; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 5688c3e6adc1..e6c9c0e08448 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -317,7 +317,7 @@ create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec, */ if (mmap_write_lock_killable(mm)) return -EINTR; - vma = find_extend_vma_locked(mm, bprm->p, true); + vma = find_extend_vma_locked(mm, bprm->p); mmap_write_unlock(mm); if (!vma) return -EFAULT; diff --git a/fs/exec.c b/fs/exec.c index 763b03c89614..ef93a4d5911b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -209,7 +209,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, */ if (write && pos < vma->vm_start) { mmap_write_lock(mm); - ret = expand_downwards(vma, pos, true); + ret = expand_downwards(vma, pos); if (unlikely(ret < 0)) { mmap_write_unlock(mm); return NULL; @@ -860,7 +860,7 @@ int setup_arg_pages(struct linux_binprm *bprm, stack_base = vma->vm_start - stack_expand; #endif current->mm->start_stack = bprm->p; - ret = expand_stack_locked(vma, stack_base, true); + ret = expand_stack_locked(vma, stack_base); if (ret) ret = -EFAULT; diff --git a/include/linux/mm.h b/include/linux/mm.h index 0d1c8a97cffa..a9a1b9f9f97c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2995,18 +2995,11 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf); extern unsigned long stack_guard_gap; /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */ -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked); -#define expand_stack(vma,addr) expand_stack_locked(vma,addr,false) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address); +struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr); /* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address, - bool write_locked); -#if VM_GROWSUP -extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); -#else - #define expand_upwards(vma, address) (0) -#endif +int expand_downwards(struct vm_area_struct *vma, unsigned long address); /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); @@ -3101,9 +3094,8 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif -struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, - unsigned long addr, bool write_locked); + unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, unsigned long pfn, unsigned long size, pgprot_t); int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/gup.c b/mm/gup.c index 028f3b4e8c3f..f4911ddd3070 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1182,7 +1182,7 @@ static long __get_user_pages(struct mm_struct *mm, /* first iteration or cross vma bound */ if (!vma || start >= vma->vm_end) { - vma = find_extend_vma(mm, start); + vma = vma_lookup(mm, start); if (!vma && in_gate_area(mm, start)) { ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, &vma, @@ -1351,8 +1351,8 @@ int fixup_user_fault(struct mm_struct *mm, fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; retry: - vma = find_extend_vma(mm, address); - if (!vma || address < vma->vm_start) + vma = vma_lookup(mm, address); + if (!vma) return -EFAULT; if (!vma_permits_fault(vma, fault_flags)) diff --git a/mm/memory.c b/mm/memory.c index 038f6bf49429..8f225f33a85c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5393,7 +5393,7 @@ struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm, goto fail; } - if (expand_stack_locked(vma, addr, true)) + if (expand_stack_locked(vma, addr)) goto fail; success: @@ -5738,6 +5738,14 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, if (mmap_read_lock_killable(mm)) return 0; + /* We might need to expand the stack to access it */ + vma = vma_lookup(mm, addr); + if (!vma) { + vma = expand_stack(mm, addr); + if (!vma) + return 0; + } + /* ignore errors, just check how much was successfully transferred */ while (len) { int bytes, ret, offset; diff --git a/mm/mmap.c b/mm/mmap.c index e0de931f1d2c..75703bcea8a7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1988,8 +1988,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, * PA-RISC uses this for its stack; IA64 for its Register Backing Store. * vma is the last one with address > vma->vm_end. Have to extend vma. */ -int expand_upwards(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +static int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *next; @@ -2013,8 +2012,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, if (gap_addr < address || gap_addr > TASK_SIZE) gap_addr = TASK_SIZE; - if (!write_locked) - return -EAGAIN; next = find_vma_intersection(mm, vma->vm_end, gap_addr); if (next && vma_is_accessible(next)) { if (!(next->vm_flags & VM_GROWSUP)) @@ -2083,15 +2080,18 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address, /* * vma is the first one with address < vma->vm_start. Have to extend vma. + * mmap_lock held for writing. */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); struct vm_area_struct *prev; int error = 0; + if (!(vma->vm_flags & VM_GROWSDOWN)) + return -EFAULT; + address &= PAGE_MASK; if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) return -EPERM; @@ -2104,8 +2104,6 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address, vma_is_accessible(prev) && (address - prev->vm_end < stack_guard_gap)) return -ENOMEM; - if (!write_locked && (prev->vm_end == address)) - return -EAGAIN; } mas_set_range(&mas, address, vma->vm_end - 1); @@ -2184,14 +2182,12 @@ static int __init cmdline_parse_stack_guard_gap(char *p) __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap); #ifdef CONFIG_STACK_GROWSUP -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { - return expand_upwards(vma, address, write_locked); + return expand_upwards(vma, address); } -struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, - unsigned long addr, bool write_locked) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma, *prev; @@ -2201,23 +2197,21 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, return vma; if (!prev) return NULL; - if (expand_stack_locked(prev, addr, write_locked)) + if (expand_stack_locked(prev, addr)) return NULL; if (prev->vm_flags & VM_LOCKED) populate_vma_page_range(prev, addr, prev->vm_end, NULL); return prev; } #else -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) { if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) return -EINVAL; - return expand_downwards(vma, address, write_locked); + return expand_downwards(vma, address); } -struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, - unsigned long addr, bool write_locked) +struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr) { struct vm_area_struct *vma; unsigned long start; @@ -2229,7 +2223,7 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, if (vma->vm_start <= addr) return vma; start = vma->vm_start; - if (expand_stack_locked(vma, addr, write_locked)) + if (expand_stack_locked(vma, addr)) return NULL; if (vma->vm_flags & VM_LOCKED) populate_vma_page_range(vma, addr, start, NULL); @@ -2237,12 +2231,91 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, } #endif -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, - unsigned long addr) +/* + * IA64 has some horrid mapping rules: it can expand both up and down, + * but with various special rules. + * + * We'll get rid of this architecture eventually, so the ugliness is + * temporary. + */ +#ifdef CONFIG_IA64 +static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr) { - return find_extend_vma_locked(mm, addr, false); + return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) && + REGION_OFFSET(addr) < RGN_MAP_LIMIT; +} + +/* + * IA64 stacks grow down, but there's a special register backing store + * that can grow up. Only sequentially, though, so the new address must + * match vm_end. + */ +static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr) +{ + if (!vma_expand_ok(vma, addr)) + return -EFAULT; + if (vma->vm_end != (addr & PAGE_MASK)) + return -EFAULT; + return expand_upwards(vma, addr); +} + +static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr) +{ + if (!vma_expand_ok(vma, addr)) + return -EFAULT; + return expand_downwards(vma, addr); +} + +#elif defined(CONFIG_STACK_GROWSUP) + +#define vma_expand_up(vma,addr) expand_upwards(vma, addr) +#define vma_expand_down(vma, addr) (-EFAULT) + +#else + +#define vma_expand_up(vma,addr) (-EFAULT) +#define vma_expand_down(vma, addr) expand_downwards(vma, addr) + +#endif + +/* + * expand_stack(): legacy interface for page faulting. Don't use unless + * you have to. + * + * This is called with the mm locked for reading, drops the lock, takes + * the lock for writing, tries to look up a vma again, expands it if + * necessary, and downgrades the lock to reading again. + * + * If no vma is found or it can't be expanded, it returns NULL and has + * dropped the lock. + */ +struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma, *prev; + + mmap_read_unlock(mm); + if (mmap_write_lock_killable(mm)) + return NULL; + + vma = find_vma_prev(mm, addr, &prev); + if (vma && vma->vm_start <= addr) + goto success; + + if (prev && !vma_expand_up(prev, addr)) { + vma = prev; + goto success; + } + + if (vma && !vma_expand_down(vma, addr)) + goto success; + + mmap_write_unlock(mm); + return NULL; + +success: + mmap_write_downgrade(mm); + return vma; } -EXPORT_SYMBOL_GPL(find_extend_vma); /* * Ok - we have the memory areas we should free on a maple tree so release them, diff --git a/mm/nommu.c b/mm/nommu.c index f92438102b1d..30cc1228bd06 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -681,25 +681,21 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) } EXPORT_SYMBOL(find_vma); -/* - * find a VMA - * - we don't extend stack VMAs under NOMMU conditions - */ -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) -{ - return find_vma(mm, addr); -} - /* * expand a stack to a given address * - not supported under NOMMU conditions */ -int expand_stack_locked(struct vm_area_struct *vma, unsigned long address, - bool write_locked) +int expand_stack_locked(struct vm_area_struct *vma, unsigned long addr) { return -ENOMEM; } +struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr) +{ + mmap_read_unlock(mm); + return NULL; +} + /* * look up the first VMA exactly that exactly matches addr * - should be called with mm->mmap_lock at least held readlocked From c0ba567af11a6fdb7686fb43a60b9e0bd6c6a744 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 27 Jul 2023 09:49:15 +0000 Subject: [PATCH 064/163] ANDROID: GKI: bring back find_extend_vma() In commit 8d7071af8907 ("mm: always expand the stack with the mmap write lock held"), find_extend_vma() was no longer being used in the tree, so it was removed. Unfortunately some GKI external module is using this, so bring it back to allow things to continue to work. Bug: 161946584 Fixes: 8d7071af8907 ("mm: always expand the stack with the mmap write lock held") Change-Id: I6f1fb1fd8193625fe3dac0bbc5b0aff653b3d879 Cc: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- include/linux/mm.h | 1 + mm/mmap.c | 14 ++++++++++++++ 2 files changed, 15 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index a9a1b9f9f97c..62ce759bdcff 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3094,6 +3094,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif +struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); struct vm_area_struct *find_extend_vma_locked(struct mm_struct *, unsigned long addr); int remap_pfn_range(struct vm_area_struct *, unsigned long addr, diff --git a/mm/mmap.c b/mm/mmap.c index 75703bcea8a7..a6857acce59f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2231,6 +2231,20 @@ struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned lon } #endif +/* + * ANDROID: Reintroduce find_extend_vma() as it's still used by some external + * modules. It was removed in commit 8d7071af8907 ("mm: always expand the + * stack with the mmap write lock held") + * In the future, everyone should just move to use the correct function instead + * of this old, legacy one. + */ +struct vm_area_struct *find_extend_vma(struct mm_struct *mm, + unsigned long addr) +{ + return find_extend_vma_locked(mm, addr); +} +EXPORT_SYMBOL_GPL(find_extend_vma); + /* * IA64 has some horrid mapping rules: it can expand both up and down, * but with various special rules. From 05f7c7fe72fe3ac5b1cfdc3f4cfebf87f2f16442 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 8 Jul 2023 12:12:10 -0700 Subject: [PATCH 065/163] UPSTREAM: mm: lock a vma before stack expansion With recent changes necessitating mmap_lock to be held for write while expanding a stack, per-VMA locks should follow the same rules and be write-locked to prevent page faults into the VMA being expanded. Add the necessary locking. Cc: stable@vger.kernel.org Signed-off-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds (cherry picked from commit c137381f71aec755fbf47cd4e9bd4dce752c054c) Change-Id: I3e6a8c89c1fb7c0669e1232176bb04ea6b09bc0a Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- mm/mmap.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/mmap.c b/mm/mmap.c index a6857acce59f..bf2298de8a0f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2029,6 +2029,8 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + /* Lock the VMA before expanding to prevent concurrent page faults */ + vma_start_write(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_lock in read mode. We need the @@ -2116,6 +2118,8 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) return -ENOMEM; } + /* Lock the VMA before expanding to prevent concurrent page faults */ + vma_start_write(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_lock in read mode. We need the From e3601b25aeaefac9529f4d965ca359b03c459ba9 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 8 Jul 2023 12:12:11 -0700 Subject: [PATCH 066/163] UPSTREAM: mm: lock newly mapped VMA which can be modified after it becomes visible mmap_region adds a newly created VMA into VMA tree and might modify it afterwards before dropping the mmap_lock. This poses a problem for page faults handled under per-VMA locks because they don't take the mmap_lock and can stumble on this VMA while it's still being modified. Currently this does not pose a problem since post-addition modifications are done only for file-backed VMAs, which are not handled under per-VMA lock. However, once support for handling file-backed page faults with per-VMA locks is added, this will become a race. Fix this by write-locking the VMA before inserting it into the VMA tree. Other places where a new VMA is added into VMA tree do not modify it after the insertion, so do not need the same locking. Cc: stable@vger.kernel.org Signed-off-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds (cherry picked from commit 33313a747e81af9f31d0d45de78c9397fa3655eb) Change-Id: I3bb6a7bc8dd579e11f9c18cbc8e4a6e7279bbfb2 Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- mm/mmap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mm/mmap.c b/mm/mmap.c index bf2298de8a0f..26cff775c25a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2902,6 +2902,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); + /* Lock the VMA since it is modified after insertion into VMA tree */ + vma_start_write(vma); mas_store_prealloc(&mas, vma); mm->map_count++; if (vma->vm_file) { From 0d9960403cb0fa936ab8dc34462ec446f3699c2e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sat, 8 Jul 2023 12:12:12 -0700 Subject: [PATCH 067/163] UPSTREAM: fork: lock VMAs of the parent process when forking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When forking a child process, the parent write-protects anonymous pages and COW-shares them with the child being forked using copy_present_pte(). We must not take any concurrent page faults on the source vma's as they are being processed, as we expect both the vma and the pte's behind it to be stable. For example, the anon_vma_fork() expects the parents vma->anon_vma to not change during the vma copy. A concurrent page fault on a page newly marked read-only by the page copy might trigger wp_page_copy() and a anon_vma_prepare(vma) on the source vma, defeating the anon_vma_clone() that wasn't done because the parent vma originally didn't have an anon_vma, but we now might end up copying a pte entry for a page that has one. Before the per-vma lock based changes, the mmap_lock guaranteed exclusion with concurrent page faults. But now we need to do a vma_start_write() to make sure no concurrent faults happen on this vma while it is being processed. This fix can potentially regress some fork-heavy workloads. Kernel build time did not show noticeable regression on a 56-core machine while a stress test mapping 10000 VMAs and forking 5000 times in a tight loop shows ~5% regression. If such fork time regression is unacceptable, disabling CONFIG_PER_VMA_LOCK should restore its performance. Further optimizations are possible if this regression proves to be problematic. Suggested-by: David Hildenbrand Reported-by: Jiri Slaby Closes: https://lore.kernel.org/all/dbdef34c-3a07-5951-e1ae-e9c6e3cdf51b@kernel.org/ Reported-by: Holger Hoffstätte Closes: https://lore.kernel.org/all/b198d649-f4bf-b971-31d0-e8433ec2a34c@applied-asynchrony.com/ Reported-by: Jacob Young Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217624 Fixes: 0bff0aaea03e ("x86/mm: try VMA lock-based page fault handling first") Cc: stable@vger.kernel.org Signed-off-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds (cherry picked from commit fb49c455323ff8319a123dd312be9082c49a23a5) Change-Id: Ic5aa9dc51a888b5b0319ec4ec6d2941424573ca0 Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- kernel/fork.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/fork.c b/kernel/fork.c index 67d61842d6b8..b890209ae115 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -694,6 +694,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, mas_for_each(&old_mas, mpnt, ULONG_MAX) { struct file *file; + vma_start_write(mpnt); if (mpnt->vm_flags & VM_DONTCOPY) { vm_stat_account(mm, mpnt->vm_flags, -vma_pages(mpnt)); continue; From 371f8d901ab5b93cae16b0506121e8aa8bcc83ba Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Sat, 8 Jul 2023 16:04:00 -0700 Subject: [PATCH 068/163] UPSTREAM: mm: lock newly mapped VMA with corrected ordering Lockdep is certainly right to complain about (&vma->vm_lock->lock){++++}-{3:3}, at: vma_start_write+0x2d/0x3f but task is already holding lock: (&mapping->i_mmap_rwsem){+.+.}-{3:3}, at: mmap_region+0x4dc/0x6db Invert those to the usual ordering. Fixes: 33313a747e81 ("mm: lock newly mapped VMA which can be modified after it becomes visible") Cc: stable@vger.kernel.org Signed-off-by: Hugh Dickins Tested-by: Suren Baghdasaryan Signed-off-by: Linus Torvalds (cherry picked from commit 1c7873e3364570ec89343ff4877e0f27a7b21a61) Change-Id: I85f9cfb6ee8f3d9fefda5518c5637a7dff64bac3 Signed-off-by: Suren Baghdasaryan Signed-off-by: Greg Kroah-Hartman --- mm/mmap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 26cff775c25a..9a61b1ce8b76 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2899,11 +2899,11 @@ unsigned long mmap_region(struct file *file, unsigned long addr, goto free_vma; } + /* Lock the VMA since it is modified after insertion into VMA tree */ + vma_start_write(vma); if (vma->vm_file) i_mmap_lock_write(vma->vm_file->f_mapping); - /* Lock the VMA since it is modified after insertion into VMA tree */ - vma_start_write(vma); mas_store_prealloc(&mas, vma); mm->map_count++; if (vma->vm_file) { From a89e2cbbc02613cbdfcc8b32efe499ecbd4bbabe Mon Sep 17 00:00:00 2001 From: wangshuai12 Date: Tue, 11 Jul 2023 16:08:29 +0800 Subject: [PATCH 069/163] ANDROID: GKI: update xiaomi symbol list 1 function symbol(s) added 'int __blk_mq_debugfs_rq_show(struct seq_file*, struct request*)' Bug: 290730657 Change-Id: Ib3711e9e875e3d6ccc809a87c607fae149159a58 Signed-off-by: wangshuai12 --- android/abi_gki_aarch64.stg | 16 ++++++++++++++++ android/abi_gki_aarch64_xiaomi | 6 ++++++ 2 files changed, 22 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index ec63c0c4c9fe..1e43b6fb21c5 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -309488,6 +309488,12 @@ function { parameter_id: 0x3e10b518 parameter_id: 0xe5e56f65 } +function { + id: 0x9c639284 + return_type_id: 0x6720d32f + parameter_id: 0x0665e6b6 + parameter_id: 0x1e820193 +} function { id: 0x9c660c95 return_type_id: 0x6720d32f @@ -319797,6 +319803,15 @@ elf_symbol { type_id: 0x475eeec2 full_name: "__blk_mq_alloc_disk" } +elf_symbol { + id: 0xcc33f78c + name: "__blk_mq_debugfs_rq_show" + is_defined: true + symbol_type: FUNCTION + crc: 0xe3d3f445 + type_id: 0x9c639284 + full_name: "__blk_mq_debugfs_rq_show" +} elf_symbol { id: 0x01badff0 name: "__blk_mq_end_request" @@ -380352,6 +380367,7 @@ interface { symbol_id: 0xbceb9c07 symbol_id: 0xe70766b6 symbol_id: 0xb339c336 + symbol_id: 0xcc33f78c symbol_id: 0x01badff0 symbol_id: 0x4df0b385 symbol_id: 0x35aa1afd diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index 8209fb5955f2..f89ba44c4afe 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -218,6 +218,12 @@ kernfs_path_from_node blkcg_activate_policy +#required by mq-deadline module + blk_mq_debugfs_rq_show + seq_list_start + seq_list_next + __blk_mq_debugfs_rq_show + #required by metis.ko module __traceiter_android_vh_rwsem_read_wait_start __traceiter_android_vh_rwsem_write_wait_start From d3b37a712ab7a5d73a2c66156debe2e3d48df007 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Tue, 4 Jul 2023 17:50:34 +0200 Subject: [PATCH 070/163] BACKPORT: FROMGIT: irqchip/gic-v3: Workaround for GIC-700 erratum 2941627 GIC700 erratum 2941627 may cause GIC-700 missing SPIs wake requests when SPIs are deactivated while targeting a sleeping CPU - ie a CPU for which the redistributor: GICR_WAKER.ProcessorSleep == 1 This runtime situation can happen if an SPI that has been activated on a core is retargeted to a different core, it becomes pending and the target core subsequently enters a power state quiescing the respective redistributor. When this situation is hit, the de-activation carried out on the core that activated the SPI (through either ICC_EOIR1_EL1 or ICC_DIR_EL1 register writes) does not trigger a wake requests for the sleeping GIC redistributor even if the SPI is pending. Work around the erratum by de-activating the SPI using the redistributor GICD_ICACTIVER register if the runtime conditions require it (ie the IRQ was retargeted between activation and de-activation). Bug: 292459437 Change-Id: Ide915b8c925a631a7fc9ccebca19d9175def162e Signed-off-by: Lorenzo Pieralisi Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230704155034.148262-1-lpieralisi@kernel.org (cherry picked from commit 6fe5c68ee6a1aae0ef291a56001e7888de547fa2 https://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms.git irq/irqchip-fixes) Signed-off-by: Carlos Galo --- Documentation/arm64/silicon-errata.rst | 3 ++ drivers/irqchip/irq-gic-v3.c | 62 +++++++++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 808ade4cc008..3283f49006d6 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -139,6 +139,9 @@ stable kernels. | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ +| ARM | GIC-700 | #2941627 | ARM64_ERRATUM_2941627 | ++----------------+-----------------+-----------------+-----------------------------+ ++----------------+-----------------+-----------------+-----------------------------+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | +----------------+-----------------+-----------------+-----------------------------+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_843419 | diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1684d19d46fb..a9d3d0e45d28 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -50,6 +50,8 @@ struct redist_region { static struct gic_chip_data_v3 gic_data __read_mostly; static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); +static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); + #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) @@ -547,10 +549,39 @@ static void gic_irq_nmi_teardown(struct irq_data *d) gic_irq_set_prio(d, GICD_INT_DEF_PRI); } +static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) +{ + enum gic_intid_range range; + + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) + return false; + + range = get_intid_range(d); + + /* + * The workaround is needed if the IRQ is an SPI and + * the target cpu is different from the one we are + * executing on. + */ + return (range == SPI_RANGE || range == ESPI_RANGE) && + !cpumask_test_cpu(raw_smp_processor_id(), + irq_data_get_effective_affinity_mask(d)); +} + static void gic_eoi_irq(struct irq_data *d) { write_gicreg(gic_irq(d), ICC_EOIR1_EL1); isb(); + + if (gic_arm64_erratum_2941627_needed(d)) { + /* + * Make sure the GIC stream deactivate packet + * issued by ICC_EOIR1_EL1 has completed before + * deactivating through GICD_IACTIVER. + */ + dsb(sy); + gic_poke_irq(d, GICD_ICACTIVER); + } } static void gic_eoimode1_eoi_irq(struct irq_data *d) @@ -561,7 +592,11 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d) */ if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) return; - gic_write_dir(gic_irq(d)); + + if (!gic_arm64_erratum_2941627_needed(d)) + gic_write_dir(gic_irq(d)); + else + gic_poke_irq(d, GICD_ICACTIVER); } static int gic_set_type(struct irq_data *d, unsigned int type) @@ -1747,6 +1782,12 @@ static bool gic_enable_quirk_hip06_07(void *data) return false; } +static bool gic_enable_quirk_arm64_2941627(void *data) +{ + static_branch_enable(&gic_arm64_2941627_erratum); + return true; +} + static const struct gic_quirk gic_quirks[] = { { .desc = "GICv3: Qualcomm MSM8996 broken firmware", @@ -1778,6 +1819,25 @@ static const struct gic_quirk gic_quirks[] = { .mask = 0xe8f00fff, .init = gic_enable_quirk_cavium_38539, }, + { + /* + * GIC-700: 2941627 workaround - IP variant [0,1] + * + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0400043b, + .mask = 0xff0e0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [2] + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0402043b, + .mask = 0xff0f0fff, + .init = gic_enable_quirk_arm64_2941627, + }, { } }; From 890b1aabb1f6796da62141334728b8f5e9faac2a Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 26 Jul 2023 23:41:03 +0200 Subject: [PATCH 071/163] BACKPORT: mm: lock_vma_under_rcu() must check vma->anon_vma under vma lock lock_vma_under_rcu() tries to guarantee that __anon_vma_prepare() can't be called in the VMA-locked page fault path by ensuring that vma->anon_vma is set. However, this check happens before the VMA is locked, which means a concurrent move_vma() can concurrently call unlink_anon_vmas(), which disassociates the VMA's anon_vma. This means we can get UAF in the following scenario: THREAD 1 THREAD 2 ======== ======== lock_vma_under_rcu() rcu_read_lock() mas_walk() check vma->anon_vma mremap() syscall move_vma() vma_start_write() unlink_anon_vmas() handle_mm_fault() __handle_mm_fault() handle_pte_fault() do_pte_missing() do_anonymous_page() anon_vma_prepare() __anon_vma_prepare() find_mergeable_anon_vma() mas_walk() [looks up VMA X] munmap() syscall (deletes VMA X) reusable_anon_vma() [called on freed VMA X] This is a security bug if you can hit it, although an attacker would have to win two races at once where the first race window is only a few instructions wide. This patch is based on some previous discussion with Linus Torvalds on the security list. Cc: stable@vger.kernel.org Fixes: 5e31275cc997 ("mm: add per-VMA lock and helper functions to control it") Signed-off-by: Jann Horn Signed-off-by: Linus Torvalds Bug: 293665307 (cherry picked from commit 657b5146955eba331e01b9a6ae89ce2e716ba306) [surenb: removed vma_is_tcp() call not present in 6.1] Change-Id: I4bd91e1db337ff35eb7c1d436f4372944556dd7d Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 8f225f33a85c..e9b7cd28ae02 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5428,27 +5428,28 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (!vma_is_anonymous(vma)) goto inval; - /* find_mergeable_anon_vma uses adjacent vmas which are not locked */ - if (!vma->anon_vma) - goto inval; - if (!vma_start_read(vma)) goto inval; + /* + * find_mergeable_anon_vma uses adjacent vmas which are not locked. + * This check must happen after vma_start_read(); otherwise, a + * concurrent mremap() with MREMAP_DONTUNMAP could dissociate the VMA + * from its anon_vma. + */ + if (unlikely(!vma->anon_vma)) + goto inval_end_read; + /* * Due to the possibility of userfault handler dropping mmap_lock, avoid * it for now and fall back to page fault handling under mmap_lock. */ - if (userfaultfd_armed(vma)) { - vma_end_read(vma); - goto inval; - } + if (userfaultfd_armed(vma)) + goto inval_end_read; /* Check since vm_start/vm_end might change before we lock the VMA */ - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { - vma_end_read(vma); - goto inval; - } + if (unlikely(address < vma->vm_start || address >= vma->vm_end)) + goto inval_end_read; /* Check if the VMA got isolated after we found it */ if (vma->detached) { @@ -5460,6 +5461,9 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, rcu_read_unlock(); return vma; + +inval_end_read: + vma_end_read(vma); inval: rcu_read_unlock(); count_vm_vma_lock_event(VMA_LOCK_ABORT); From f5c707dc65dfa366e13c2edf25e44378d6e302eb Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 28 Jul 2023 06:13:21 +0200 Subject: [PATCH 072/163] UPSTREAM: mm/mempolicy: Take VMA lock before replacing policy mbind() calls down into vma_replace_policy() without taking the per-VMA locks, replaces the VMA's vma->vm_policy pointer, and frees the old policy. That's bad; a concurrent page fault might still be using the old policy (in vma_alloc_folio()), resulting in use-after-free. Normally this will manifest as a use-after-free read first, but it can result in memory corruption, including because vma_alloc_folio() can call mpol_cond_put() on the freed policy, which conditionally changes the policy's refcount member. This bug is specific to CONFIG_NUMA, but it does also affect non-NUMA systems as long as the kernel was built with CONFIG_NUMA. Signed-off-by: Jann Horn Reviewed-by: Suren Baghdasaryan Fixes: 5e31275cc997 ("mm: add per-VMA lock and helper functions to control it") Cc: stable@kernel.org Signed-off-by: Linus Torvalds Bug: 293665307 (cherry picked from commit 6c21e066f9256ea1df6f88768f6ae1080b7cf509) Change-Id: I2e3a4ee8bad97457ee3e127694f0609e7a240a2f Signed-off-by: Suren Baghdasaryan --- mm/mempolicy.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f940395667c8..cd2fc238c24d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -384,8 +384,10 @@ void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) VMA_ITERATOR(vmi, mm, 0); mmap_write_lock(mm); - for_each_vma(vmi, vma) + for_each_vma(vmi, vma) { + vma_start_write(vma); mpol_rebind_policy(vma->vm_policy, new); + } mmap_write_unlock(mm); } @@ -759,6 +761,8 @@ static int vma_replace_policy(struct vm_area_struct *vma, struct mempolicy *old; struct mempolicy *new; + vma_assert_write_locked(vma); + pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n", vma->vm_start, vma->vm_end, vma->vm_pgoff, vma->vm_ops, vma->vm_file, @@ -1259,6 +1263,8 @@ static long do_mbind(unsigned long start, unsigned long len, nodemask_t *nmask, unsigned long flags) { struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vma_iterator vmi; struct mempolicy *new; unsigned long end; int err; @@ -1320,6 +1326,14 @@ static long do_mbind(unsigned long start, unsigned long len, if (err) goto mpol_out; + /* + * Lock the VMAs before scanning for pages to migrate, to ensure we don't + * miss a concurrently inserted page. + */ + vma_iter_init(&vmi, mm, start); + for_each_vma_range(vmi, vma, end) + vma_start_write(vma); + ret = queue_pages_range(mm, start, end, nmask, flags | MPOL_MF_INVERT, &pagelist); @@ -1546,6 +1560,7 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le break; } + vma_start_write(vma); new->home_node = home_node; err = mbind_range(mm, vmstart, vmend, new); mpol_put(new); From e341d2312c5faec4b5652650a87f2c2e8ef8e818 Mon Sep 17 00:00:00 2001 From: xieliujie Date: Mon, 31 Jul 2023 09:49:44 +0800 Subject: [PATCH 073/163] ANDROID: ABI: Update oplus symbol list 3 function symbol(s) added 'int __traceiter_android_rvh_rtmutex_force_update(void*, struct task_struct*, struct task_struct*, int*)' 'int __traceiter_android_vh_rtmutex_waiter_prio(void*, struct task_struct*, int*)' 'int __traceiter_android_vh_task_blocks_on_rtmutex(void*, struct rt_mutex_base*, struct rt_mutex_waiter*, struct task_struct*, struct ww_acquire_ctx*, unsigned int*)' 3 variable symbol(s) added 'struct tracepoint __tracepoint_android_rvh_rtmutex_force_update' 'struct tracepoint __tracepoint_android_vh_rtmutex_waiter_prio' 'struct tracepoint __tracepoint_android_vh_task_blocks_on_rtmutex' Bug: 290585456 Change-Id: I4af3d1c8df44822b7f5fd5d5682e65d7c6c4dcc3 Signed-off-by: xieliujie --- android/abi_gki_aarch64.stg | 85 +++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_oplus | 16 ++++--- 2 files changed, 96 insertions(+), 5 deletions(-) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 1e43b6fb21c5..8e0c51ae4146 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -308677,6 +308677,14 @@ function { parameter_id: 0x1d19a9d5 parameter_id: 0x1c3dbe5a } +function { + id: 0x9bdcd7ce + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x1d19a9d5 + parameter_id: 0x1d19a9d5 + parameter_id: 0x13580d6c +} function { id: 0x9bdcf60d return_type_id: 0x6720d32f @@ -308783,6 +308791,13 @@ function { parameter_id: 0x1d5bae2a parameter_id: 0x11cfee5a } +function { + id: 0x9bdf0ac7 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x1d19a9d5 + parameter_id: 0x13580d6c +} function { id: 0x9bdfa419 return_type_id: 0x6720d32f @@ -308877,6 +308892,16 @@ function { parameter_id: 0x2a670b41 parameter_id: 0x1c898f28 } +function { + id: 0x9be67f35 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x1013df15 + parameter_id: 0x27d4bd81 + parameter_id: 0x1d19a9d5 + parameter_id: 0x2c32dd96 + parameter_id: 0x1bf16028 +} function { id: 0x9be6a9ad return_type_id: 0x6720d32f @@ -323116,6 +323141,15 @@ elf_symbol { type_id: 0x9b427bba full_name: "__traceiter_android_rvh_revert_creds" } +elf_symbol { + id: 0xf0ffb4d4 + name: "__traceiter_android_rvh_rtmutex_force_update" + is_defined: true + symbol_type: FUNCTION + crc: 0xe3eba434 + type_id: 0x9bdcd7ce + full_name: "__traceiter_android_rvh_rtmutex_force_update" +} elf_symbol { id: 0xd90a9a58 name: "__traceiter_android_rvh_rtmutex_prepare_setprio" @@ -324754,6 +324788,15 @@ elf_symbol { type_id: 0x9beff51f full_name: "__traceiter_android_vh_rtmutex_wait_start" } +elf_symbol { + id: 0xc56d7179 + name: "__traceiter_android_vh_rtmutex_waiter_prio" + is_defined: true + symbol_type: FUNCTION + crc: 0x40a0002c + type_id: 0x9bdf0ac7 + full_name: "__traceiter_android_vh_rtmutex_waiter_prio" +} elf_symbol { id: 0x5858f827 name: "__traceiter_android_vh_rwsem_can_spin_on_owner" @@ -325060,6 +325103,15 @@ elf_symbol { type_id: 0x9bcd4ff7 full_name: "__traceiter_android_vh_sysrq_crash" } +elf_symbol { + id: 0xdd9dd67b + name: "__traceiter_android_vh_task_blocks_on_rtmutex" + is_defined: true + symbol_type: FUNCTION + crc: 0x698af67b + type_id: 0x9be67f35 + full_name: "__traceiter_android_vh_task_blocks_on_rtmutex" +} elf_symbol { id: 0x6befbf23 name: "__traceiter_android_vh_thermal_power_cap" @@ -326293,6 +326345,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_rvh_revert_creds" } +elf_symbol { + id: 0xf2fd13ea + name: "__tracepoint_android_rvh_rtmutex_force_update" + is_defined: true + symbol_type: OBJECT + crc: 0xa86a5262 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_rtmutex_force_update" +} elf_symbol { id: 0x69e37d02 name: "__tracepoint_android_rvh_rtmutex_prepare_setprio" @@ -327931,6 +327992,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_rtmutex_wait_start" } +elf_symbol { + id: 0xeaebbadf + name: "__tracepoint_android_vh_rtmutex_waiter_prio" + is_defined: true + symbol_type: OBJECT + crc: 0x0fbb21e2 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_rtmutex_waiter_prio" +} elf_symbol { id: 0xe471b8d5 name: "__tracepoint_android_vh_rwsem_can_spin_on_owner" @@ -328237,6 +328307,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_sysrq_crash" } +elf_symbol { + id: 0xe5bf742d + name: "__tracepoint_android_vh_task_blocks_on_rtmutex" + is_defined: true + symbol_type: OBJECT + crc: 0x5494b8bf + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_task_blocks_on_rtmutex" +} elf_symbol { id: 0x6f25dd05 name: "__tracepoint_android_vh_thermal_power_cap" @@ -380735,6 +380814,7 @@ interface { symbol_id: 0xe3e24295 symbol_id: 0xaedef3a2 symbol_id: 0xde725472 + symbol_id: 0xf0ffb4d4 symbol_id: 0xd90a9a58 symbol_id: 0xbf64b0b6 symbol_id: 0xb25ca194 @@ -380917,6 +380997,7 @@ interface { symbol_id: 0x91384eff symbol_id: 0x3ef508a2 symbol_id: 0xfb1b8d64 + symbol_id: 0xc56d7179 symbol_id: 0x5858f827 symbol_id: 0xb1847a6f symbol_id: 0x958d8cdb @@ -380951,6 +381032,7 @@ interface { symbol_id: 0x58e7556b symbol_id: 0x2ecf85e9 symbol_id: 0x34a01a22 + symbol_id: 0xdd9dd67b symbol_id: 0x6befbf23 symbol_id: 0x226cc38b symbol_id: 0xeecc1529 @@ -381088,6 +381170,7 @@ interface { symbol_id: 0x18bac297 symbol_id: 0x1a849f34 symbol_id: 0x3f328d3c + symbol_id: 0xf2fd13ea symbol_id: 0x69e37d02 symbol_id: 0xeda5c5b0 symbol_id: 0x3cd58ada @@ -381270,6 +381353,7 @@ interface { symbol_id: 0x3fc5ffc9 symbol_id: 0xa3915d70 symbol_id: 0xf01f02ea + symbol_id: 0xeaebbadf symbol_id: 0xe471b8d5 symbol_id: 0x84628825 symbol_id: 0x8d0ce77d @@ -381304,6 +381388,7 @@ interface { symbol_id: 0x39e68fed symbol_id: 0xefb9e5a3 symbol_id: 0x3fe0157c + symbol_id: 0xe5bf742d symbol_id: 0x6f25dd05 symbol_id: 0xa5c71571 symbol_id: 0xfa3284c7 diff --git a/android/abi_gki_aarch64_oplus b/android/abi_gki_aarch64_oplus index e06e98d72020..ea61781c1543 100644 --- a/android/abi_gki_aarch64_oplus +++ b/android/abi_gki_aarch64_oplus @@ -86,6 +86,7 @@ tcf_exts_validate tcf_queue_work __traceiter_android_rvh_post_init_entity_util_avg + __traceiter_android_rvh_rtmutex_force_update __traceiter_android_vh_account_process_tick_gran __traceiter_android_vh_account_task_time __traceiter_android_vh_do_futex @@ -99,11 +100,6 @@ __traceiter_android_vh_record_pcpu_rwsem_starttime __traceiter_android_vh_record_rtmutex_lock_starttime __traceiter_android_vh_record_rwsem_lock_starttime - __tracepoint_android_vh_record_mutex_lock_starttime - __tracepoint_android_vh_record_pcpu_rwsem_starttime - __tracepoint_android_vh_record_rtmutex_lock_starttime - __tracepoint_android_vh_record_rwsem_lock_starttime - __trace_puts __traceiter_android_vh_alter_mutex_list_add __traceiter_android_vh_binder_free_proc __traceiter_android_vh_binder_has_work_ilocked @@ -136,6 +132,7 @@ __traceiter_android_vh_cleanup_old_buffers_bypass __traceiter_android_vh_dm_bufio_shrink_scan_bypass __traceiter_android_vh_mutex_unlock_slowpath + __traceiter_android_vh_rtmutex_waiter_prio __traceiter_android_vh_rwsem_can_spin_on_owner __traceiter_android_vh_rwsem_opt_spin_finish __traceiter_android_vh_rwsem_opt_spin_start @@ -143,6 +140,7 @@ __traceiter_android_vh_sched_stat_runtime_rt __traceiter_android_vh_shrink_node_memcgs __traceiter_android_vh_sync_txn_recvd + __traceiter_android_vh_task_blocks_on_rtmutex __traceiter_block_bio_queue __traceiter_block_getrq __traceiter_block_rq_complete @@ -157,6 +155,7 @@ __traceiter_sched_waking __traceiter_task_rename __tracepoint_android_rvh_post_init_entity_util_avg + __tracepoint_android_rvh_rtmutex_force_update __tracepoint_android_vh_account_process_tick_gran __tracepoint_android_vh_account_task_time __tracepoint_android_vh_alter_mutex_list_add @@ -198,6 +197,11 @@ __tracepoint_android_vh_cleanup_old_buffers_bypass __tracepoint_android_vh_dm_bufio_shrink_scan_bypass __tracepoint_android_vh_mutex_unlock_slowpath + __tracepoint_android_vh_record_mutex_lock_starttime + __tracepoint_android_vh_record_pcpu_rwsem_starttime + __tracepoint_android_vh_record_rtmutex_lock_starttime + __tracepoint_android_vh_record_rwsem_lock_starttime + __tracepoint_android_vh_rtmutex_waiter_prio __tracepoint_android_vh_rwsem_can_spin_on_owner __tracepoint_android_vh_rwsem_opt_spin_finish __tracepoint_android_vh_rwsem_opt_spin_start @@ -205,6 +209,7 @@ __tracepoint_android_vh_sched_stat_runtime_rt __tracepoint_android_vh_shrink_node_memcgs __tracepoint_android_vh_sync_txn_recvd + __tracepoint_android_vh_task_blocks_on_rtmutex __tracepoint_block_bio_queue __tracepoint_block_getrq __tracepoint_block_rq_complete @@ -218,6 +223,7 @@ __tracepoint_sched_stat_wait __tracepoint_sched_waking __tracepoint_task_rename + __trace_puts try_to_free_mem_cgroup_pages typec_mux_get_drvdata unregister_memory_notifier From 84ac22a0d31aec44cc2bc7d9e5bd94bf95c4c32d Mon Sep 17 00:00:00 2001 From: Daniel Rosenberg Date: Thu, 4 May 2023 15:43:42 -0700 Subject: [PATCH 074/163] ANDROID: fuse-bpf: Add partial ioctl support This adds passthrough only support for ioctls with fuse-bpf. compat_ioctls will return -ENOTTY. Bug: 279519292 Test: F2fsMiscTest#testAtomicWrite Change-Id: Ia3052e465d87dc1d15ae13955fba8a7f93bc387b Signed-off-by: Daniel Rosenberg --- fs/fuse/backing.c | 13 +++++++++++++ fs/fuse/fuse_i.h | 2 ++ fs/fuse/ioctl.c | 9 +++++++++ 3 files changed, 24 insertions(+) diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c index 931c3397133c..1e403a090581 100644 --- a/fs/fuse/backing.c +++ b/fs/fuse/backing.c @@ -966,6 +966,19 @@ void *fuse_file_write_iter_finalize(struct fuse_bpf_args *fa, return ERR_PTR(fwio->ret); } +long fuse_backing_ioctl(struct file *file, unsigned int command, unsigned long arg, int flags) +{ + struct fuse_file *ff = file->private_data; + long ret; + + if (flags & FUSE_IOCTL_COMPAT) + ret = -ENOTTY; + else + ret = vfs_ioctl(ff->backing_file, command, arg); + + return ret; +} + int fuse_file_flock_backing(struct file *file, int cmd, struct file_lock *fl) { struct fuse_file *ff = file->private_data; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 475442c9ad7e..90d38da7f4f6 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1664,6 +1664,8 @@ int fuse_file_write_iter_backing(struct fuse_bpf_args *fa, void *fuse_file_write_iter_finalize(struct fuse_bpf_args *fa, struct kiocb *iocb, struct iov_iter *from); +long fuse_backing_ioctl(struct file *file, unsigned int command, unsigned long arg, int flags); + int fuse_file_flock_backing(struct file *file, int cmd, struct file_lock *fl); ssize_t fuse_backing_mmap(struct file *file, struct vm_area_struct *vma); diff --git a/fs/fuse/ioctl.c b/fs/fuse/ioctl.c index 8ba1545e01f9..d266f640266f 100644 --- a/fs/fuse/ioctl.c +++ b/fs/fuse/ioctl.c @@ -353,6 +353,15 @@ long fuse_ioctl_common(struct file *file, unsigned int cmd, if (fuse_is_bad(inode)) return -EIO; +#ifdef CONFIG_FUSE_BPF + { + struct fuse_file *ff = file->private_data; + + /* TODO - this is simply passthrough, not a proper BPF filter */ + if (ff->backing_file) + return fuse_backing_ioctl(file, cmd, arg, flags); + } +#endif return fuse_do_ioctl(file, cmd, arg, flags); } From 4bbda90bd8754e4bed1a5e4f5acb1212e8275fb1 Mon Sep 17 00:00:00 2001 From: Paul Lawrence Date: Mon, 24 Jul 2023 13:45:45 -0700 Subject: [PATCH 075/163] ANDROID: fuse-bpf: Fix flock test compile error Bug: 293161755 Test: fuse_test compiles Signed-off-by: Paul Lawrence Change-Id: I249672bab85966e20a26018f65f135fe15c6eff5 --- tools/testing/selftests/filesystems/fuse/fuse_test.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/filesystems/fuse/fuse_test.c b/tools/testing/selftests/filesystems/fuse/fuse_test.c index bdb70e23b349..fc0d83cd1332 100644 --- a/tools/testing/selftests/filesystems/fuse/fuse_test.c +++ b/tools/testing/selftests/filesystems/fuse/fuse_test.c @@ -1345,7 +1345,6 @@ static int flock_test(const char *mount_dir) int fuse_dev = -1; int fd = -1, fd2 = -1; int backing_fd = -1; - char *addr = NULL; TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC), src_fd != -1); From 6aef06abbad42c3d1b4588ac31e042f967304be7 Mon Sep 17 00:00:00 2001 From: Paul Lawrence Date: Wed, 26 Jul 2023 15:03:41 -0700 Subject: [PATCH 076/163] ANDROID: fuse-bpf: Check inode not null fuse_iget_backing returns an inode or null, not a ERR_PTR. So check it's not NULL Also make sure we put the inode if d_splice_alias fails Bug: 293349757 Test: fuse_test runs Signed_off_by: Paul Lawrence Change-Id: I1eadad32f80bab6730e461412b4b7ab4d6c56bf2 --- fs/fuse/backing.c | 17 ++++++++++------- fs/fuse/dir.c | 26 ++++++++++++-------------- 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c index 1e403a090581..652c5f3560f4 100644 --- a/fs/fuse/backing.c +++ b/fs/fuse/backing.c @@ -252,8 +252,8 @@ int fuse_create_open_backing( inode = fuse_iget_backing(dir->i_sb, target_nodeid, get_fuse_dentry(entry)->backing_path.dentry->d_inode); - if (IS_ERR(inode)) { - err = PTR_ERR(inode); + if (!inode) { + err = -EIO; goto out; } @@ -269,10 +269,12 @@ int fuse_create_open_backing( goto out; } + inode = NULL; entry = newent ? newent : entry; err = finish_open(file, entry, fuse_open_file_backing); out: + iput(inode); dput(backing_dentry); return err; } @@ -1240,7 +1242,7 @@ struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir, { struct fuse_dentry *fd; struct dentry *bd; - struct inode *inode, *backing_inode; + struct inode *inode = NULL, *backing_inode; struct inode *d_inode = entry->d_inode; struct fuse_entry_out *feo = fa->out_args[0].value; struct fuse_entry_bpf_out *febo = fa->out_args[1].value; @@ -1271,9 +1273,8 @@ struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir, target_nodeid = get_fuse_inode(d_inode)->nodeid; inode = fuse_iget_backing(dir->i_sb, target_nodeid, backing_inode); - - if (IS_ERR(inode)) { - ret = ERR_PTR(PTR_ERR(inode)); + if (!inode) { + ret = ERR_PTR(-EIO); goto out; } @@ -1290,9 +1291,11 @@ struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir, } get_fuse_inode(inode)->nodeid = feo->nodeid; - ret = d_splice_alias(inode, entry); + if (!IS_ERR(ret)) + inode = NULL; out: + iput(inode); if (feb->backing_file) fput(feb->backing_file); return ret; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 076a0bddef8f..c70a9f722074 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -504,7 +504,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name if (name->len > FUSE_NAME_MAX) goto out; - forget = fuse_alloc_forget(); err = -ENOMEM; if (!forget) @@ -523,32 +522,34 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name err = -ENOENT; if (!entry) - goto out_queue_forget; + goto out_put_forget; err = -EINVAL; backing_file = bpf_arg.backing_file; if (!backing_file) - goto out_queue_forget; + goto out_put_forget; if (IS_ERR(backing_file)) { err = PTR_ERR(backing_file); - goto out_queue_forget; + goto out_put_forget; } backing_inode = backing_file->f_inode; *inode = fuse_iget_backing(sb, outarg->nodeid, backing_inode); if (!*inode) - goto out; + goto out_put_forget; err = fuse_handle_backing(&bpf_arg, &get_fuse_inode(*inode)->backing_inode, &get_fuse_dentry(entry)->backing_path); - if (err) - goto out; - - err = fuse_handle_bpf_prog(&bpf_arg, NULL, &get_fuse_inode(*inode)->bpf); - if (err) - goto out; + if (!err) + err = fuse_handle_bpf_prog(&bpf_arg, NULL, + &get_fuse_inode(*inode)->bpf); + if (err) { + iput(*inode); + *inode = NULL; + goto out_put_forget; + } } else #endif { @@ -568,9 +569,6 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name } err = -ENOMEM; -#ifdef CONFIG_FUSE_BPF -out_queue_forget: -#endif if (!*inode && outarg->nodeid) { fuse_queue_forget(fm->fc, forget, outarg->nodeid, 1); goto out; From 74d9daa59a1e6a1207f7a198cfb8bbc49fd98872 Mon Sep 17 00:00:00 2001 From: Paul Lawrence Date: Tue, 25 Jul 2023 11:18:38 -0700 Subject: [PATCH 077/163] ANDROID: fuse-bpf: Add bpf to negative fuse_dentry Store the results of a negative lookup in the fuse_dentry so later opcodes can use them to create files Bug: 291705489 Test: fuse_test passes Signed-off-by: Paul Lawrence Change-Id: I725e714a1d6ce43f24431d07c24e96349ef1a55c --- fs/fuse/backing.c | 64 +++++++++++++++++++++++------------------------ fs/fuse/dir.c | 5 ++++ fs/fuse/fuse_i.h | 6 +++++ 3 files changed, 43 insertions(+), 32 deletions(-) diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c index 652c5f3560f4..fa3d8ef72974 100644 --- a/fs/fuse/backing.c +++ b/fs/fuse/backing.c @@ -1240,60 +1240,60 @@ int fuse_handle_bpf_prog(struct fuse_entry_bpf *feb, struct inode *parent, struct dentry *fuse_lookup_finalize(struct fuse_bpf_args *fa, struct inode *dir, struct dentry *entry, unsigned int flags) { - struct fuse_dentry *fd; - struct dentry *bd; + struct fuse_dentry *fuse_entry; + struct dentry *backing_entry; struct inode *inode = NULL, *backing_inode; - struct inode *d_inode = entry->d_inode; + struct inode *entry_inode = entry->d_inode; struct fuse_entry_out *feo = fa->out_args[0].value; struct fuse_entry_bpf_out *febo = fa->out_args[1].value; - struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, out); + struct fuse_entry_bpf *feb = container_of(febo, struct fuse_entry_bpf, + out); int error = -1; u64 target_nodeid = 0; - struct dentry *ret; + struct dentry *ret = NULL; - fd = get_fuse_dentry(entry); - if (!fd) { + fuse_entry = get_fuse_dentry(entry); + if (!fuse_entry) { ret = ERR_PTR(-EIO); goto out; } - bd = fd->backing_path.dentry; - if (!bd) { + backing_entry = fuse_entry->backing_path.dentry; + if (!backing_entry) { ret = ERR_PTR(-ENOENT); goto out; } - backing_inode = bd->d_inode; - if (!backing_inode) { - ret = 0; - goto out; - } + if (entry_inode) + target_nodeid = get_fuse_inode(entry_inode)->nodeid; - if (d_inode) - target_nodeid = get_fuse_inode(d_inode)->nodeid; + backing_inode = backing_entry->d_inode; + if (backing_inode) + inode = fuse_iget_backing(dir->i_sb, target_nodeid, + backing_inode); - inode = fuse_iget_backing(dir->i_sb, target_nodeid, backing_inode); - if (!inode) { - ret = ERR_PTR(-EIO); - goto out; - } - - error = fuse_handle_bpf_prog(feb, dir, &get_fuse_inode(inode)->bpf); + error = inode ? + fuse_handle_bpf_prog(feb, dir, &get_fuse_inode(inode)->bpf) : + fuse_handle_bpf_prog(feb, dir, &fuse_entry->bpf); if (error) { ret = ERR_PTR(error); goto out; } - error = fuse_handle_backing(feb, &get_fuse_inode(inode)->backing_inode, &fd->backing_path); - if (error) { - ret = ERR_PTR(error); - goto out; - } + if (inode) { + error = fuse_handle_backing(feb, + &get_fuse_inode(inode)->backing_inode, + &fuse_entry->backing_path); + if (error) { + ret = ERR_PTR(error); + goto out; + } - get_fuse_inode(inode)->nodeid = feo->nodeid; - ret = d_splice_alias(inode, entry); - if (!IS_ERR(ret)) - inode = NULL; + get_fuse_inode(inode)->nodeid = feo->nodeid; + ret = d_splice_alias(inode, entry); + if (!IS_ERR(ret)) + inode = NULL; + } out: iput(inode); if (feb->backing_file) diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c70a9f722074..59dee8d5b578 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -364,9 +364,14 @@ static void fuse_dentry_release(struct dentry *dentry) { struct fuse_dentry *fd = dentry->d_fsdata; +#ifdef CONFIG_FUSE_BPF if (fd && fd->backing_path.dentry) path_put(&fd->backing_path); + if (fd && fd->bpf) + bpf_prog_put(fd->bpf); +#endif + kfree_rcu(fd, rcu); } #endif diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 90d38da7f4f6..723d462a54eb 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -76,7 +76,13 @@ struct fuse_dentry { u64 time; struct rcu_head rcu; }; + +#ifdef CONFIG_FUSE_BPF struct path backing_path; + + /* bpf program *only* set for negative dentries */ + struct bpf_prog *bpf; +#endif }; static inline struct fuse_dentry *get_fuse_dentry(const struct dentry *entry) From 295e779e8f896de94df662a2c0e20d60ad69f6b2 Mon Sep 17 00:00:00 2001 From: Paul Lawrence Date: Tue, 25 Jul 2023 11:20:11 -0700 Subject: [PATCH 078/163] ANDROID: fuse-bpf: Use stored bpf for create_open create_open would always take its parent directory's bpf for the created object. Modify to use the bpf stored in fuse_dentry which is set by lookup. Bug: 291705489 Test: fuse_test passes, adb push file /sdcard/Android/data works Signed-off-by: Paul Lawrence Change-Id: I0a1ea2a291a8fdf67923f1827176b2ea96bd4c2d --- fs/fuse/backing.c | 16 ++++---- .../selftests/filesystems/fuse/fuse_test.c | 39 +++++++++++++++++++ .../selftests/filesystems/fuse/test_bpf.c | 26 +++++++++++++ 3 files changed, 73 insertions(+), 8 deletions(-) diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c index fa3d8ef72974..9df0535ad20f 100644 --- a/fs/fuse/backing.c +++ b/fs/fuse/backing.c @@ -208,6 +208,7 @@ int fuse_create_open_backing( struct file *file, unsigned int flags, umode_t mode) { struct fuse_inode *dir_fuse_inode = get_fuse_inode(dir); + struct fuse_dentry *fuse_entry = get_fuse_dentry(entry); struct fuse_dentry *dir_fuse_dentry = get_fuse_dentry(entry->d_parent); struct dentry *backing_dentry = NULL; struct inode *inode = NULL; @@ -239,19 +240,19 @@ int fuse_create_open_backing( if (err) goto out; - if (get_fuse_dentry(entry)->backing_path.dentry) - path_put(&get_fuse_dentry(entry)->backing_path); - get_fuse_dentry(entry)->backing_path = (struct path) { + if (fuse_entry->backing_path.dentry) + path_put(&fuse_entry->backing_path); + fuse_entry->backing_path = (struct path) { .mnt = dir_fuse_dentry->backing_path.mnt, .dentry = backing_dentry, }; - path_get(&get_fuse_dentry(entry)->backing_path); + path_get(&fuse_entry->backing_path); if (d_inode) target_nodeid = get_fuse_inode(d_inode)->nodeid; inode = fuse_iget_backing(dir->i_sb, target_nodeid, - get_fuse_dentry(entry)->backing_path.dentry->d_inode); + fuse_entry->backing_path.dentry->d_inode); if (!inode) { err = -EIO; goto out; @@ -259,9 +260,8 @@ int fuse_create_open_backing( if (get_fuse_inode(inode)->bpf) bpf_prog_put(get_fuse_inode(inode)->bpf); - get_fuse_inode(inode)->bpf = dir_fuse_inode->bpf; - if (get_fuse_inode(inode)->bpf) - bpf_prog_inc(dir_fuse_inode->bpf); + get_fuse_inode(inode)->bpf = fuse_entry->bpf; + fuse_entry->bpf = NULL; newent = d_splice_alias(inode, entry); if (IS_ERR(newent)) { diff --git a/tools/testing/selftests/filesystems/fuse/fuse_test.c b/tools/testing/selftests/filesystems/fuse/fuse_test.c index fc0d83cd1332..0bf1f030cbcd 100644 --- a/tools/testing/selftests/filesystems/fuse/fuse_test.c +++ b/tools/testing/selftests/filesystems/fuse/fuse_test.c @@ -2009,6 +2009,44 @@ static int bpf_test_lookup_postfilter(const char *mount_dir) return result; } +/** + * Test that a file made via create_and_open correctly gets the bpf assigned + * from the negative lookup + * bpf blocks file open, but also removes itself from children + * This test will fail if the 'remove' is unsuccessful + */ +static int bpf_test_create_and_remove_bpf(const char *mount_dir) +{ + const char *file = "file"; + + int result = TEST_FAILURE; + int src_fd = -1; + int bpf_fd = -1; + int fuse_dev = -1; + int fd = -1; + int fd2 = -1; + + TEST(src_fd = open(ft_src, O_DIRECTORY | O_RDONLY | O_CLOEXEC), + src_fd != -1); + TESTEQUAL(install_elf_bpf("test_bpf.bpf", "test_create_remove", &bpf_fd, + NULL, NULL), 0); + TESTEQUAL(mount_fuse_no_init(mount_dir, bpf_fd, src_fd, &fuse_dev), 0); + TEST(fd = s_creat(s_path(s(mount_dir), s(file)), 0777), + fd != -1); + TEST(fd2 = s_open(s_path(s(mount_dir), s(file)), O_RDONLY), + fd2 != -1); + + result = TEST_SUCCESS; +out: + close(fd2); + close(fd); + close(fuse_dev); + close(bpf_fd); + close(src_fd); + umount(mount_dir); + return result; +} + static void parse_range(const char *ranges, bool *run_test, size_t tests) { size_t i; @@ -2136,6 +2174,7 @@ int main(int argc, char *argv[]) MAKE_TEST(bpf_test_revalidate_handle_backing_fd), MAKE_TEST(bpf_test_lookup_postfilter), MAKE_TEST(flock_test), + MAKE_TEST(bpf_test_create_and_remove_bpf), }; #undef MAKE_TEST diff --git a/tools/testing/selftests/filesystems/fuse/test_bpf.c b/tools/testing/selftests/filesystems/fuse/test_bpf.c index 032cb1178f9f..e02bdb4a9380 100644 --- a/tools/testing/selftests/filesystems/fuse/test_bpf.c +++ b/tools/testing/selftests/filesystems/fuse/test_bpf.c @@ -505,3 +505,29 @@ int lookuppostfilter_test(struct fuse_bpf_args *fa) return FUSE_BPF_BACKING; } } + +SEC("test_create_remove") +int createremovebpf_test(struct fuse_bpf_args *fa) +{ + switch (fa->opcode) { + case FUSE_LOOKUP | FUSE_PREFILTER: { + return FUSE_BPF_BACKING | FUSE_BPF_POST_FILTER; + } + + case FUSE_LOOKUP | FUSE_POSTFILTER: { + struct fuse_entry_bpf_out *febo = fa->out_args[1].value; + + febo->bpf_action = FUSE_ACTION_REMOVE; + return 0; + } + + case FUSE_OPEN | FUSE_PREFILTER: { + return -EIO; + } + + default: + return FUSE_BPF_BACKING; + } +} + + From 7beed73af0664057745502e2af1b48edcc4ef834 Mon Sep 17 00:00:00 2001 From: Ramji Jiyani Date: Thu, 27 Jul 2023 17:57:09 +0000 Subject: [PATCH 079/163] ANDROID: GKI: Create symbol files in include/config Create input symbol files to generate GKI modules header under include/config. By placing files in this generated directory, the default filters that ignore certain files will work without any special handling required, and they will also be available to inspect after the build to inspect for the debugging purposes. abi_gki_protected_exports: Input for gki_module_protected_exports.h From :- ${objtree}/abi_gki_protected_exports To :- include/config/abi_gki_protected_exports all_kmi_symbols: Input for gki_module_unprotected.h - Rename to abi_gki_kmi_symbols From :- all_kmi_symbols To :- include/config/abi_gki_kmi_symbols Bug: 286529877 Test: TH Test: Manual verification of the generated files Change-Id: Iafa10631e7712a8e1e87a2f56cfd614de6b1053a Signed-off-by: Ramji Jiyani --- kernel/module/Makefile | 2 +- scripts/gen_gki_modules_headers.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/module/Makefile b/kernel/module/Makefile index 458cb6e44e85..5d9035643b9a 100644 --- a/kernel/module/Makefile +++ b/kernel/module/Makefile @@ -28,7 +28,7 @@ obj-$(CONFIG_MODULE_UNLOAD_TAINT_TRACKING) += tracking.o $(obj)/gki_module.o: include/generated/gki_module_protected_exports.h \ include/generated/gki_module_unprotected.h -ALL_KMI_SYMBOLS := all_kmi_symbols +ALL_KMI_SYMBOLS := include/config/abi_gki_kmi_symbols include/generated/gki_module_unprotected.h: $(ALL_KMI_SYMBOLS) \ $(srctree)/scripts/gen_gki_modules_headers.sh diff --git a/scripts/gen_gki_modules_headers.sh b/scripts/gen_gki_modules_headers.sh index 3aa221a058f4..ca435f49b62f 100755 --- a/scripts/gen_gki_modules_headers.sh +++ b/scripts/gen_gki_modules_headers.sh @@ -108,7 +108,7 @@ if [ "$(basename "${TARGET}")" = "gki_module_unprotected.h" ]; then generate_header "${TARGET}" "${GKI_VENDOR_SYMBOLS}" "unprotected" else # Sorted list of exported symbols - GKI_EXPORTED_SYMBOLS="${objtree}/abi_gki_protected_exports" + GKI_EXPORTED_SYMBOLS="include/config/abi_gki_protected_exports" if [ -z "${SYMBOL_LIST}" ]; then # Create empty list if ARCH doesn't have protected exports From 343b85ecadc2dae0780b1c7a76fa3dcb38468fbb Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 22 Mar 2023 05:13:04 +0000 Subject: [PATCH 080/163] UPSTREAM: media: Add P012 and P012M video format P012 is a YUV format with 12-bits per component with interleaved UV, like NV12, expanded to 16 bits. Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. And P012M has two non contiguous planes. Bug: 293213303 Change-Id: I1fbfa7c445bc682766f479cca07eb8cb16cbb44f (cherry picked from commit aa1080404200694aace5989f99664ca75e73b03d) Signed-off-by: Ming Qian Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jindong Yue --- .../media/v4l/pixfmt-yuv-planar.rst | 94 +++++++++++++++++++ drivers/media/v4l2-core/v4l2-common.c | 2 + drivers/media/v4l2-core/v4l2-ioctl.c | 2 + include/uapi/linux/videodev2.h | 2 + 4 files changed, 100 insertions(+) diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst index f1d5bb7b806d..72324274f20c 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-planar.rst @@ -123,6 +123,20 @@ All components are stored with the same number of bits per component. - Cb, Cr - Yes - 4x4 tiles + * - V4L2_PIX_FMT_P012 + - 'P012' + - 12 + - 4:2:0 + - Cb, Cr + - Yes + - Linear + * - V4L2_PIX_FMT_P012M + - 'PM12' + - 12 + - 4:2:0 + - Cb, Cr + - No + - Linear * - V4L2_PIX_FMT_NV16 - 'NV16' - 8 @@ -586,6 +600,86 @@ Data in the 10 high bits, zeros in the 6 low bits, arranged in little endian ord - Cb\ :sub:`11` - Cr\ :sub:`11` +.. _V4L2-PIX-FMT-P012: +.. _V4L2-PIX-FMT-P012M: + +P012 and P012M +-------------- + +P012 is like NV12 with 12 bits per component, expanded to 16 bits. +Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. + +.. flat-table:: Sample 4x4 P012 Image + :header-rows: 0 + :stub-columns: 0 + + * - start + 0: + - Y'\ :sub:`00` + - Y'\ :sub:`01` + - Y'\ :sub:`02` + - Y'\ :sub:`03` + * - start + 8: + - Y'\ :sub:`10` + - Y'\ :sub:`11` + - Y'\ :sub:`12` + - Y'\ :sub:`13` + * - start + 16: + - Y'\ :sub:`20` + - Y'\ :sub:`21` + - Y'\ :sub:`22` + - Y'\ :sub:`23` + * - start + 24: + - Y'\ :sub:`30` + - Y'\ :sub:`31` + - Y'\ :sub:`32` + - Y'\ :sub:`33` + * - start + 32: + - Cb\ :sub:`00` + - Cr\ :sub:`00` + - Cb\ :sub:`01` + - Cr\ :sub:`01` + * - start + 40: + - Cb\ :sub:`10` + - Cr\ :sub:`10` + - Cb\ :sub:`11` + - Cr\ :sub:`11` + +.. flat-table:: Sample 4x4 P012M Image + :header-rows: 0 + :stub-columns: 0 + + * - start0 + 0: + - Y'\ :sub:`00` + - Y'\ :sub:`01` + - Y'\ :sub:`02` + - Y'\ :sub:`03` + * - start0 + 8: + - Y'\ :sub:`10` + - Y'\ :sub:`11` + - Y'\ :sub:`12` + - Y'\ :sub:`13` + * - start0 + 16: + - Y'\ :sub:`20` + - Y'\ :sub:`21` + - Y'\ :sub:`22` + - Y'\ :sub:`23` + * - start0 + 24: + - Y'\ :sub:`30` + - Y'\ :sub:`31` + - Y'\ :sub:`32` + - Y'\ :sub:`33` + * - + * - start1 + 0: + - Cb\ :sub:`00` + - Cr\ :sub:`00` + - Cb\ :sub:`01` + - Cr\ :sub:`01` + * - start1 + 8: + - Cb\ :sub:`10` + - Cr\ :sub:`10` + - Cb\ :sub:`11` + - Cr\ :sub:`11` + Fully Planar YUV Formats ======================== diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 40f56e044640..a5e8ba370d33 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -267,6 +267,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format) { .format = V4L2_PIX_FMT_NV24, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV42, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_P010, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_YUV410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, { .format = V4L2_PIX_FMT_YVU410, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 4, .vdiv = 4 }, @@ -292,6 +293,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format) { .format = V4L2_PIX_FMT_NV21M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, { .format = V4L2_PIX_FMT_NV16M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_NV61M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_P012M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 2, .comp_planes = 2, .bpp = { 2, 4, 0, 0 }, .hdiv = 2, .vdiv = 2 }, /* Bayer RGB formats */ { .format = V4L2_PIX_FMT_SBGGR8, .pixel_enc = V4L2_PIXEL_ENC_BAYER, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 91ae25e092e2..15fba2e074f4 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1354,6 +1354,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_NV24: descr = "Y/UV 4:4:4"; break; case V4L2_PIX_FMT_NV42: descr = "Y/VU 4:4:4"; break; case V4L2_PIX_FMT_P010: descr = "10-bit Y/UV 4:2:0"; break; + case V4L2_PIX_FMT_P012: descr = "12-bit Y/UV 4:2:0"; break; case V4L2_PIX_FMT_NV12_4L4: descr = "Y/UV 4:2:0 (4x4 Linear)"; break; case V4L2_PIX_FMT_NV12_16L16: descr = "Y/UV 4:2:0 (16x16 Linear)"; break; case V4L2_PIX_FMT_NV12_32L32: descr = "Y/UV 4:2:0 (32x32 Linear)"; break; @@ -1364,6 +1365,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_NV61M: descr = "Y/VU 4:2:2 (N-C)"; break; case V4L2_PIX_FMT_NV12MT: descr = "Y/UV 4:2:0 (64x32 MB, N-C)"; break; case V4L2_PIX_FMT_NV12MT_16X16: descr = "Y/UV 4:2:0 (16x16 MB, N-C)"; break; + case V4L2_PIX_FMT_P012M: descr = "12-bit Y/UV 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YUV420M: descr = "Planar YUV 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YVU420M: descr = "Planar YVU 4:2:0 (N-C)"; break; case V4L2_PIX_FMT_YUV422M: descr = "Planar YUV 4:2:2 (N-C)"; break; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index d67a7ff0da9a..2f121ada6c31 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -626,12 +626,14 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */ #define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */ #define V4L2_PIX_FMT_P010 v4l2_fourcc('P', '0', '1', '0') /* 24 Y/CbCr 4:2:0 10-bit per component */ +#define V4L2_PIX_FMT_P012 v4l2_fourcc('P', '0', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */ /* two non contiguous planes - one Y, one Cr + Cb interleaved */ #define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */ #define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */ #define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */ #define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */ +#define V4L2_PIX_FMT_P012M v4l2_fourcc('P', 'M', '1', '2') /* 24 Y/CbCr 4:2:0 12-bit per component */ /* three planes - Y Cb, Cr */ #define V4L2_PIX_FMT_YUV410 v4l2_fourcc('Y', 'U', 'V', '9') /* 9 YUV 4:1:0 */ From ca7b45b12894126ffa637c5ebf57153348831f55 Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 22 Mar 2023 05:13:05 +0000 Subject: [PATCH 081/163] UPSTREAM: media: Add Y012 video format Y012 is a luma-only formats with 12-bits per pixel, expanded to 16bits. Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. Bug: 293213303 Change-Id: I1a8f73162932e0760aabbe44525d7c74ace9f7bd (cherry picked from commit a490ea68444084ec0368c019e11ee4a7e5c8bb13) Signed-off-by: Ming Qian Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jindong Yue --- .../userspace-api/media/v4l/pixfmt-yuv-luma.rst | 15 +++++++++++++++ drivers/media/v4l2-core/v4l2-ioctl.c | 1 + include/uapi/linux/videodev2.h | 1 + 3 files changed, 17 insertions(+) diff --git a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst index 6a387f9df3ba..26fd46fa4971 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-yuv-luma.rst @@ -103,6 +103,17 @@ are often referred to as greyscale formats. - ... - ... + * .. _V4L2-PIX-FMT-Y012: + + - ``V4L2_PIX_FMT_Y012`` + - 'Y012' + + - Y'\ :sub:`0`\ [3:0] `0000` + - Y'\ :sub:`0`\ [11:4] + - ... + - ... + - ... + * .. _V4L2-PIX-FMT-Y14: - ``V4L2_PIX_FMT_Y14`` @@ -146,3 +157,7 @@ are often referred to as greyscale formats. than 16 bits. For example, 10 bits per pixel uses values in the range 0 to 1023. For the IPU3_Y10 format 25 pixels are packed into 32 bytes, which leaves the 6 most significant bits of the last byte padded with 0. + + For Y012 and Y12 formats, Y012 places its data in the 12 high bits, with + padding zeros in the 4 low bits, in contrast to the Y12 format, which has + its padding located in the most significant bits of the 16 bit word. diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 15fba2e074f4..5393e07fdfba 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1309,6 +1309,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; case V4L2_PIX_FMT_Y10: descr = "10-bit Greyscale"; break; case V4L2_PIX_FMT_Y12: descr = "12-bit Greyscale"; break; + case V4L2_PIX_FMT_Y012: descr = "12-bit Greyscale (bits 15-4)"; break; case V4L2_PIX_FMT_Y14: descr = "14-bit Greyscale"; break; case V4L2_PIX_FMT_Y16: descr = "16-bit Greyscale"; break; case V4L2_PIX_FMT_Y16_BE: descr = "16-bit Greyscale BE"; break; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 2f121ada6c31..4a78d1bec619 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -583,6 +583,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */ #define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */ #define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */ +#define V4L2_PIX_FMT_Y012 v4l2_fourcc('Y', '0', '1', '2') /* 12 Greyscale */ #define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */ #define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */ #define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */ From 0f3f7a21aff380830d4b1c1224a8a3b55ef64524 Mon Sep 17 00:00:00 2001 From: Tomi Valkeinen Date: Wed, 21 Dec 2022 11:24:43 +0200 Subject: [PATCH 082/163] UPSTREAM: media: Add Y210, Y212 and Y216 formats Add Y210, Y212 and Y216 formats. Bug: 293213303 Change-Id: I2d580dd82481f6a1364dfcedfd918e82d25ac211 (cherry picked from commit 0dc1d7a79a8d13e316d3b168e9fc57e376099c7a) Signed-off-by: Tomi Valkeinen Reviewed-by: Laurent Pinchart Acked-by: Mauro Carvalho Chehab Acked-by: Hans Verkuil Signed-off-by: Laurent Pinchart Signed-off-by: Jindong Yue --- .../media/v4l/pixfmt-packed-yuv.rst | 49 ++++++++++++++++++- drivers/media/v4l2-core/v4l2-ioctl.c | 3 ++ include/uapi/linux/videodev2.h | 8 +++ 3 files changed, 58 insertions(+), 2 deletions(-) diff --git a/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst b/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst index bf283a1b5581..24a771542059 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst @@ -262,7 +262,12 @@ the second byte and Y'\ :sub:`7-0` in the third byte. ================= These formats, commonly referred to as YUYV or YUY2, subsample the chroma -components horizontally by 2, storing 2 pixels in 4 bytes. +components horizontally by 2, storing 2 pixels in a container. The container +is 32-bits for 8-bit formats, and 64-bits for 10+-bit formats. + +The packed YUYV formats with more than 8 bits per component are stored as four +16-bit little-endian words. Each word's most significant bits contain one +component, and the least significant bits are zero padding. .. raw:: latex @@ -270,7 +275,7 @@ components horizontally by 2, storing 2 pixels in 4 bytes. .. tabularcolumns:: |p{3.4cm}|p{1.2cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}| -.. flat-table:: Packed YUV 4:2:2 Formats +.. flat-table:: Packed YUV 4:2:2 Formats in 32-bit container :header-rows: 1 :stub-columns: 0 @@ -337,6 +342,46 @@ components horizontally by 2, storing 2 pixels in 4 bytes. - Y'\ :sub:`3` - Cb\ :sub:`2` +.. tabularcolumns:: |p{3.4cm}|p{1.2cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}|p{0.8cm}| + +.. flat-table:: Packed YUV 4:2:2 Formats in 64-bit container + :header-rows: 1 + :stub-columns: 0 + + * - Identifier + - Code + - Word 0 + - Word 1 + - Word 2 + - Word 3 + * .. _V4L2-PIX-FMT-Y210: + + - ``V4L2_PIX_FMT_Y210`` + - 'Y210' + + - Y'\ :sub:`0` (bits 15-6) + - Cb\ :sub:`0` (bits 15-6) + - Y'\ :sub:`1` (bits 15-6) + - Cr\ :sub:`0` (bits 15-6) + * .. _V4L2-PIX-FMT-Y212: + + - ``V4L2_PIX_FMT_Y212`` + - 'Y212' + + - Y'\ :sub:`0` (bits 15-4) + - Cb\ :sub:`0` (bits 15-4) + - Y'\ :sub:`1` (bits 15-4) + - Cr\ :sub:`0` (bits 15-4) + * .. _V4L2-PIX-FMT-Y216: + + - ``V4L2_PIX_FMT_Y216`` + - 'Y216' + + - Y'\ :sub:`0` (bits 15-0) + - Cb\ :sub:`0` (bits 15-0) + - Y'\ :sub:`1` (bits 15-0) + - Cr\ :sub:`0` (bits 15-0) + .. raw:: latex \normalsize diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 5393e07fdfba..69a7afcb59da 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1451,6 +1451,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_NV12M_8L128: descr = "NV12M (8x128 Linear)"; break; case V4L2_PIX_FMT_NV12_10BE_8L128: descr = "10-bit NV12 (8x128 Linear, BE)"; break; case V4L2_PIX_FMT_NV12M_10BE_8L128: descr = "10-bit NV12M (8x128 Linear, BE)"; break; + case V4L2_PIX_FMT_Y210: descr = "10-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y212: descr = "12-bit YUYV Packed"; break; + case V4L2_PIX_FMT_Y216: descr = "16-bit YUYV Packed"; break; default: /* Compressed formats */ diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 4a78d1bec619..1b5a20bb54bf 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -619,6 +619,14 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */ #define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */ +/* + * YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs + * of the 16 bit components, and 16-xx bits of zero padding occupy the LSBs. + */ +#define V4L2_PIX_FMT_Y210 v4l2_fourcc('Y', '2', '1', '0') /* 32 YUYV 4:2:2 */ +#define V4L2_PIX_FMT_Y212 v4l2_fourcc('Y', '2', '1', '2') /* 32 YUYV 4:2:2 */ +#define V4L2_PIX_FMT_Y216 v4l2_fourcc('Y', '2', '1', '6') /* 32 YUYV 4:2:2 */ + /* two planes -- one Y, one Cr + Cb interleaved */ #define V4L2_PIX_FMT_NV12 v4l2_fourcc('N', 'V', '1', '2') /* 12 Y/CbCr 4:2:0 */ #define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */ From b2cf7e426877526a9b58741261e529c8689b6dd1 Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 22 Mar 2023 05:13:06 +0000 Subject: [PATCH 083/163] UPSTREAM: media: Add Y212 v4l2 format info Y212 is a YUV format with 12-bits per component like YUYV, expanded to 16bits. Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. Add the missing v4l2 foramt info of Y212 Bug: 293213303 Change-Id: Ibdf9bb3a3f1eb895da9eca52d115e08b656b5153 (cherry picked from commit a178dd3bbecc3e26dfc2c72b6fe64d9bf7749de2) Signed-off-by: Ming Qian Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jindong Yue --- drivers/media/v4l2-core/v4l2-common.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index a5e8ba370d33..21ace56fac04 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -258,6 +258,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format) { .format = V4L2_PIX_FMT_YVYU, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, /* YUV planar formats */ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, From 892293272c10c65024c6c8bb50d18ade089f75f4 Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 22 Mar 2023 05:13:07 +0000 Subject: [PATCH 084/163] UPSTREAM: media: Add YUV48_12 video format YUV48_12 is a YUV format with 12-bits per component like YUV24, expanded to 16bits. Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. [hverkuil: replaced a . by ,] Bug: 293213303 Change-Id: I12e6f02b99918a429224320da2127d6b4d777584 (cherry picked from commit 99c954967762976b15265ea383354095e1ed1efa) Signed-off-by: Ming Qian Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jindong Yue --- .../media/v4l/pixfmt-packed-yuv.rst | 28 +++++++++++++++++++ drivers/media/v4l2-core/v4l2-common.c | 1 + drivers/media/v4l2-core/v4l2-ioctl.c | 1 + include/uapi/linux/videodev2.h | 1 + 4 files changed, 31 insertions(+) diff --git a/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst b/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst index 24a771542059..9f111ed594d2 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-packed-yuv.rst @@ -257,6 +257,34 @@ the second byte and Y'\ :sub:`7-0` in the third byte. - The padding bits contain undefined values that must be ignored by all applications and drivers. +The next table lists the packed YUV 4:4:4 formats with 12 bits per component. +Expand the bits per component to 16 bits, data in the high bits, zeros in the low bits, +arranged in little endian order, storing 1 pixel in 6 bytes. + +.. flat-table:: Packed YUV 4:4:4 Image Formats (12bpc) + :header-rows: 1 + :stub-columns: 0 + + * - Identifier + - Code + - Byte 1-0 + - Byte 3-2 + - Byte 5-4 + - Byte 7-6 + - Byte 9-8 + - Byte 11-10 + + * .. _V4L2-PIX-FMT-YUV48-12: + + - ``V4L2_PIX_FMT_YUV48_12`` + - 'Y312' + + - Y'\ :sub:`0` + - Cb\ :sub:`0` + - Cr\ :sub:`0` + - Y'\ :sub:`1` + - Cb\ :sub:`1` + - Cr\ :sub:`1` 4:2:2 Subsampling ================= diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 21ace56fac04..da313a0637de 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -259,6 +259,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format) { .format = V4L2_PIX_FMT_UYVY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_VYUY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, { .format = V4L2_PIX_FMT_Y212, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_YUV48_12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, /* YUV planar formats */ { .format = V4L2_PIX_FMT_NV12, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 }, diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 69a7afcb59da..508854959135 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1348,6 +1348,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break; case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break; case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break; + case V4L2_PIX_FMT_YUV48_12: descr = "12-bit YUV 4:4:4 Packed"; break; case V4L2_PIX_FMT_NV12: descr = "Y/UV 4:2:0"; break; case V4L2_PIX_FMT_NV21: descr = "Y/VU 4:2:0"; break; case V4L2_PIX_FMT_NV16: descr = "Y/UV 4:2:2"; break; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 1b5a20bb54bf..f3ee34d37e75 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -618,6 +618,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_YUVA32 v4l2_fourcc('Y', 'U', 'V', 'A') /* 32 YUVA-8-8-8-8 */ #define V4L2_PIX_FMT_YUVX32 v4l2_fourcc('Y', 'U', 'V', 'X') /* 32 YUVX-8-8-8-8 */ #define V4L2_PIX_FMT_M420 v4l2_fourcc('M', '4', '2', '0') /* 12 YUV 4:2:0 2 lines y, 1 line uv interleaved */ +#define V4L2_PIX_FMT_YUV48_12 v4l2_fourcc('Y', '3', '1', '2') /* 48 YUV 4:4:4 12-bit per component */ /* * YCbCr packed format. For each Y2xx format, xx bits of valid data occupy the MSBs From 86e2e8fd053e50693a7c3f1b1230dded1b322381 Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 22 Mar 2023 05:13:08 +0000 Subject: [PATCH 085/163] BACKPORT: media: Add BGR48_12 video format BGR48_12 is a reversed RGB format with 12 bits per component like BGR24, expanded to 16bits. Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. Bug: 293213303 Change-Id: I27d14a33c8e2b4847a63ea05b285786766949ebf (cherry picked from commit da0b7a400e4f39726c3c383f377fb51dbd8b0c71) [Jindong: Fixed conflicts in .rst file and v4l2-ioctl.c] Signed-off-by: Ming Qian Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jindong Yue --- .../userspace-api/media/v4l/pixfmt-rgb.rst | 33 +++++++++++++++++++ drivers/media/v4l2-core/v4l2-common.c | 1 + drivers/media/v4l2-core/v4l2-ioctl.c | 1 + include/uapi/linux/videodev2.h | 3 ++ 4 files changed, 38 insertions(+) diff --git a/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst b/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst index 30f51cd33f99..aadcfeea8b06 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst @@ -762,6 +762,39 @@ nomenclature that instead use the order of components as seen in a 24- or \normalsize +12 Bits Per Component +============================== + +These formats store an RGB triplet in six or eight bytes, with 12 bits per component. +Expand the bits per component to 16 bits, data in the high bits, zeros in the low bits, +arranged in little endian order. + +.. raw:: latex + + \small + +.. flat-table:: RGB Formats With 12 Bits Per Component + :header-rows: 1 + + * - Identifier + - Code + - Byte 1-0 + - Byte 3-2 + - Byte 5-4 + - Byte 7-6 + * .. _V4L2-PIX-FMT-BGR48-12: + + - ``V4L2_PIX_FMT_BGR48_12`` + - 'B312' + + - B\ :sub:`15-4` + - G\ :sub:`15-4` + - R\ :sub:`15-4` + - + +.. raw:: latex + + \normalsize Deprecated RGB Formats ====================== diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index da313a0637de..16d3c91c7da2 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -252,6 +252,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format) { .format = V4L2_PIX_FMT_RGB565, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, /* YUV packed formats */ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 508854959135..0b00530a29ac 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1304,6 +1304,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_BGRX32: descr = "32-bit XBGR 8-8-8-8"; break; case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break; case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break; + case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break; case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break; case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break; case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index f3ee34d37e75..a8b7071e0416 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -577,6 +577,9 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_ARGB32 v4l2_fourcc('B', 'A', '2', '4') /* 32 ARGB-8-8-8-8 */ #define V4L2_PIX_FMT_XRGB32 v4l2_fourcc('B', 'X', '2', '4') /* 32 XRGB-8-8-8-8 */ +/* RGB formats (6 or 8 bytes per pixel) */ +#define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */ + /* Grey formats */ #define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ #define V4L2_PIX_FMT_Y4 v4l2_fourcc('Y', '0', '4', ' ') /* 4 Greyscale */ From 126ef64cbaae66a10171e8aecbe4327f677e0ab2 Mon Sep 17 00:00:00 2001 From: Ming Qian Date: Wed, 22 Mar 2023 05:13:09 +0000 Subject: [PATCH 086/163] UPSTREAM: media: Add ABGR64_12 video format ABGR64_12 is a reversed RGB format with alpha channel last, 12 bits per component like ABGR32, expanded to 16bits. Data in the 12 high bits, zeros in the 4 low bits, arranged in little endian order. Bug: 293213303 Change-Id: Idc4e1100c9e2134a48b594151e3398f6436b010d (cherry picked from commit 302b988ca03d83da0a7e006a57efda646c30f978) Signed-off-by: Ming Qian Signed-off-by: Hans Verkuil Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Jindong Yue --- Documentation/userspace-api/media/v4l/pixfmt-rgb.rst | 9 +++++++++ drivers/media/v4l2-core/v4l2-common.c | 1 + drivers/media/v4l2-core/v4l2-ioctl.c | 1 + include/uapi/linux/videodev2.h | 1 + 4 files changed, 12 insertions(+) diff --git a/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst b/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst index aadcfeea8b06..4b8cbbc77b1b 100644 --- a/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst +++ b/Documentation/userspace-api/media/v4l/pixfmt-rgb.rst @@ -791,6 +791,15 @@ arranged in little endian order. - G\ :sub:`15-4` - R\ :sub:`15-4` - + * .. _V4L2-PIX-FMT-ABGR64-12: + + - ``V4L2_PIX_FMT_ABGR64_12`` + - 'B412' + + - B\ :sub:`15-4` + - G\ :sub:`15-4` + - R\ :sub:`15-4` + - A\ :sub:`15-4` .. raw:: latex diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 16d3c91c7da2..3c5ab5ecd678 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -253,6 +253,7 @@ const struct v4l2_format_info *v4l2_format_info(u32 format) { .format = V4L2_PIX_FMT_RGB555, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR666, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 4, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, { .format = V4L2_PIX_FMT_BGR48_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 6, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, + { .format = V4L2_PIX_FMT_ABGR64_12, .pixel_enc = V4L2_PIXEL_ENC_RGB, .mem_planes = 1, .comp_planes = 1, .bpp = { 8, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 }, /* YUV packed formats */ { .format = V4L2_PIX_FMT_YUYV, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 2, 0, 0, 0 }, .hdiv = 2, .vdiv = 1 }, diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index 0b00530a29ac..3a4785b3b59a 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -1305,6 +1305,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt) case V4L2_PIX_FMT_RGBA32: descr = "32-bit RGBA 8-8-8-8"; break; case V4L2_PIX_FMT_RGBX32: descr = "32-bit RGBX 8-8-8-8"; break; case V4L2_PIX_FMT_BGR48_12: descr = "12-bit Depth BGR"; break; + case V4L2_PIX_FMT_ABGR64_12: descr = "12-bit Depth BGRA"; break; case V4L2_PIX_FMT_GREY: descr = "8-bit Greyscale"; break; case V4L2_PIX_FMT_Y4: descr = "4-bit Greyscale"; break; case V4L2_PIX_FMT_Y6: descr = "6-bit Greyscale"; break; diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index a8b7071e0416..b43f3bbd55ee 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -579,6 +579,7 @@ struct v4l2_pix_format { /* RGB formats (6 or 8 bytes per pixel) */ #define V4L2_PIX_FMT_BGR48_12 v4l2_fourcc('B', '3', '1', '2') /* 48 BGR 12-bit per component */ +#define V4L2_PIX_FMT_ABGR64_12 v4l2_fourcc('B', '4', '1', '2') /* 64 BGRA 12-bit per component */ /* Grey formats */ #define V4L2_PIX_FMT_GREY v4l2_fourcc('G', 'R', 'E', 'Y') /* 8 Greyscale */ From ad0b008167cb40ec67be83937739e6ae5447e4bf Mon Sep 17 00:00:00 2001 From: sunshijie Date: Thu, 27 Jul 2023 11:50:56 +0800 Subject: [PATCH 087/163] FROMGIT: erofs: fix wrong primary bvec selection on deduplicated extents When handling deduplicated compressed data, there can be multiple decompressed extents pointing to the same compressed data in one shot. In such cases, the bvecs which belong to the longest extent will be selected as the primary bvecs for real decompressors to decode and the other duplicated bvecs will be directly copied from the primary bvecs. Previously, only relative offsets of the longest extent were checked to decompress the primary bvecs. On rare occasions, it can be incorrect if there are several extents with the same start relative offset. As a result, some short bvecs could be selected for decompression and then cause data corruption. For example, as Shijie Sun reported off-list, considering the following extents of a file: 117: 903345.. 915250 | 11905 : 385024.. 389120 | 4096 ... 119: 919729.. 930323 | 10594 : 385024.. 389120 | 4096 ... 124: 968881.. 980786 | 11905 : 385024.. 389120 | 4096 The start relative offset is the same: 2225, but extent 119 (919729.. 930323) is shorter than the others. Let's restrict the bvec length in addition to the start offset if bvecs are not full. Reported-by: Shijie Sun Fixes: 5c2a64252c5d ("erofs: introduce partial-referenced pclusters") Tested-by Shijie Sun Reviewed-by: Yue Hu Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20230719065459.60083-1-hsiangkao@linux.alibaba.com (cherry picked from commit 7d15c91a75aae55767f368e8abbabd7cedf4ec94 https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 293245292 Change-Id: Ic8ded9b2d3592ffd0863f4f0d2ac4ae6a1821a1b Signed-off-by: sunshijie --- fs/erofs/zdata.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index bc4971ee26d2..3d1b88efb075 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1013,9 +1013,11 @@ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec) { struct z_erofs_bvec_item *item; + unsigned int pgnr; - if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK)) { - unsigned int pgnr; + if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) && + (bvec->end == PAGE_SIZE || + bvec->offset + bvec->end == be->pcl->length)) { pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT; DBG_BUGON(pgnr >= be->nr_pages); From 3bd3d137019f24d73379e14e9a5384cd72d567a6 Mon Sep 17 00:00:00 2001 From: Jindong Yue Date: Tue, 1 Aug 2023 17:49:42 +0900 Subject: [PATCH 088/163] ANDROID: ABI: Update symbol list for imx 2 function symbol(s) added 'bool kthread_freezable_should_stop(bool*)' 'int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings*, const struct v4l2_dv_timings_cap*, v4l2_check_dv_timings_fnc*, void*)' Bug: 283014063 Change-Id: Ib4f8f9c67277501dcaa2fa5d8f2867d5fa670de3 Signed-off-by: Jindong Yue --- android/abi_gki_aarch64.stg | 59 +++++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_imx | 19 ++++++++++++ 2 files changed, 78 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 8e0c51ae4146..ab918ec5c587 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -18091,6 +18091,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9de31a69 } +pointer_reference { + id: 0x2de928d9 + kind: POINTER + pointee_type_id: 0x9de445fa +} pointer_reference { id: 0x2de9a54b kind: POINTER @@ -26806,6 +26811,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xc5725d9a } +pointer_reference { + id: 0x3bcd0c02 + kind: POINTER + pointee_type_id: 0xc574d697 +} pointer_reference { id: 0x3bd2bf42 kind: POINTER @@ -30356,6 +30366,11 @@ typedef { name: "uuid_t" referred_type_id: 0x0b526877 } +typedef { + id: 0x9de445fa + name: "v4l2_check_dv_timings_fnc" + referred_type_id: 0xf2553153 +} typedef { id: 0x7c355df7 name: "v4l2_ctrl_notify_fnc" @@ -30711,6 +30726,11 @@ qualified { qualifier: CONST qualified_type_id: 0x658ec0e0 } +qualified { + id: 0xc574d697 + qualifier: CONST + qualified_type_id: 0x6594ecd4 +} qualified { id: 0xc596e113 qualifier: CONST @@ -295072,6 +295092,14 @@ function { return_type_id: 0x6720d32f parameter_id: 0x34e62f02 } +function { + id: 0x90bd2dd7 + return_type_id: 0x6720d32f + parameter_id: 0x376789dd + parameter_id: 0x3bcd0c02 + parameter_id: 0x2de928d9 + parameter_id: 0x18bd6530 +} function { id: 0x90bfa7c3 return_type_id: 0x6720d32f @@ -318270,6 +318298,12 @@ function { return_type_id: 0x6d7f5ff6 parameter_id: 0x33d0e528 } +function { + id: 0xf2553153 + return_type_id: 0x6d7f5ff6 + parameter_id: 0x324e7f0f + parameter_id: 0x18bd6530 +} function { id: 0xf25d597f return_type_id: 0x6d7f5ff6 @@ -319009,6 +319043,11 @@ function { parameter_id: 0x11cfee5a parameter_id: 0x064d6086 } +function { + id: 0xfad7a092 + return_type_id: 0x6d7f5ff6 + parameter_id: 0x11cfee5a +} function { id: 0xfaddfa97 return_type_id: 0x6d7f5ff6 @@ -352419,6 +352458,15 @@ elf_symbol { type_id: 0x1dbb8bb2 full_name: "kthread_flush_worker" } +elf_symbol { + id: 0x2fbecafd + name: "kthread_freezable_should_stop" + is_defined: true + symbol_type: FUNCTION + crc: 0xca7d8764 + type_id: 0xfad7a092 + full_name: "kthread_freezable_should_stop" +} elf_symbol { id: 0x49232ca9 name: "kthread_mod_delayed_work" @@ -376938,6 +376986,15 @@ elf_symbol { type_id: 0x10e93841 full_name: "v4l2_device_unregister_subdev" } +elf_symbol { + id: 0x5c266e47 + name: "v4l2_enum_dv_timings_cap" + is_defined: true + symbol_type: FUNCTION + crc: 0x922ecd29 + type_id: 0x90bd2dd7 + full_name: "v4l2_enum_dv_timings_cap" +} elf_symbol { id: 0xd40ec4d6 name: "v4l2_event_dequeue" @@ -384066,6 +384123,7 @@ interface { symbol_id: 0xeae01788 symbol_id: 0x84839142 symbol_id: 0xa9c37a1d + symbol_id: 0x2fbecafd symbol_id: 0x49232ca9 symbol_id: 0xec609d3e symbol_id: 0x44f92a6d @@ -386791,6 +386849,7 @@ interface { symbol_id: 0xdc3fca57 symbol_id: 0x23051526 symbol_id: 0xad9b8781 + symbol_id: 0x5c266e47 symbol_id: 0xd40ec4d6 symbol_id: 0xcd00be9c symbol_id: 0xef302a24 diff --git a/android/abi_gki_aarch64_imx b/android/abi_gki_aarch64_imx index ac16191a3545..478cb5cab475 100644 --- a/android/abi_gki_aarch64_imx +++ b/android/abi_gki_aarch64_imx @@ -822,6 +822,7 @@ flush_delayed_work flush_work __flush_workqueue + __folio_put fortify_panic fput free_candev @@ -969,7 +970,9 @@ i2c_smbus_read_i2c_block_data i2c_smbus_write_byte i2c_smbus_write_byte_data + __i2c_smbus_xfer i2c_smbus_xfer + __i2c_transfer i2c_transfer i2c_transfer_buffer_flags i2c_unregister_device @@ -1143,6 +1146,7 @@ kstrtoull kthread_bind kthread_create_on_node + kthread_freezable_should_stop kthread_park kthread_parkme kthread_should_park @@ -1324,6 +1328,9 @@ nsecs_to_jiffies ns_to_timespec64 __num_online_cpus + nvmem_cell_get + nvmem_cell_put + nvmem_cell_read nvmem_cell_read_u32 nvmem_cell_read_u64 nvmem_device_read @@ -1377,6 +1384,7 @@ of_gen_pool_get of_get_child_by_name of_get_compatible_child + of_get_cpu_node of_get_display_timing of_get_i2c_adapter_by_node of_get_mac_address @@ -1442,6 +1450,8 @@ of_usb_update_otg_caps oops_in_progress open_candev + page_pinner_inited + __page_pinner_put_page page_pool_alloc_pages page_pool_create page_pool_destroy @@ -1586,6 +1596,7 @@ platform_irqchip_probe platform_irq_count platform_msi_create_irq_domain + pm_genpd_add_subdomain pm_genpd_init pm_genpd_remove pm_genpd_remove_device @@ -1597,6 +1608,7 @@ pm_runtime_forbid pm_runtime_force_resume pm_runtime_force_suspend + pm_runtime_get_if_active __pm_runtime_idle pm_runtime_no_callbacks __pm_runtime_resume @@ -1796,10 +1808,14 @@ rtc_time64_to_tm rtc_tm_to_time64 rtc_update_irq + rt_mutex_lock + rt_mutex_trylock + rt_mutex_unlock rtnl_is_locked rtnl_lock rtnl_unlock sched_clock + sched_setattr_nocheck sched_set_fifo_low schedule schedule_hrtimeout @@ -2248,6 +2264,7 @@ __v4l2_device_register_subdev_nodes v4l2_device_unregister v4l2_device_unregister_subdev + v4l2_enum_dv_timings_cap v4l2_event_dequeue v4l2_event_pending v4l2_event_queue @@ -2298,6 +2315,7 @@ v4l2_m2m_unregister_media_controller v4l2_m2m_update_start_streaming_state v4l2_m2m_update_stop_streaming_state + v4l2_match_dv_timings v4l2_s_parm_cap v4l2_src_change_event_subscribe v4l2_subdev_call_wrappers @@ -2414,6 +2432,7 @@ xdp_do_redirect xdp_master_redirect xdp_return_frame + xdp_return_frame_rx_napi xdp_rxq_info_is_reg __xdp_rxq_info_reg xdp_rxq_info_reg_mem_model From 29e2f3e3d1968adeff803689cb21434b9f12e3be Mon Sep 17 00:00:00 2001 From: Giuliano Procida Date: Wed, 2 Aug 2023 15:48:56 +0100 Subject: [PATCH 089/163] ANDROID: ABI: Update STG ABI to format version 2 If you have trouble reading this new file format, please refresh your prebuilt version of STG with repo sync. Bug: 294213765 Change-Id: I4d7ee716231956c5f4da1343cc0db5170aaaa3b1 Signed-off-by: Giuliano Procida --- android/abi_gki_aarch64.stg | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index ab918ec5c587..85fe49d8c144 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -1,10 +1,12 @@ -version: 0x00000001 +version: 0x00000002 root_id: 0x84ea5130 -void { +special { id: 0x48b5725f + kind: VOID } -variadic { +special { id: 0xa52a0930 + kind: VARIADIC } pointer_reference { id: 0x0006db1d From c3d26e2b5aae1e83eeb294a487aadad08453ef43 Mon Sep 17 00:00:00 2001 From: Peifeng Li Date: Tue, 13 Sep 2022 19:07:41 +0800 Subject: [PATCH 090/163] ANDROID: vendor_hooks: Add hooks for lookaround Add hooks for support lookaround in memory reclamation. - android_vh_test_clear_look_around_ref - android_vh_check_folio_look_around_ref - android_vh_look_around_migrate_folio - android_vh_look_around Bug: 292051411 Signed-off-by: Peifeng Li Change-Id: I9a606ae71d2f1303df3b02403b30bc8fdc9d06dd (cherry picked from commit f50f24e781738c8e5aa9f285d8726202f33107d6) [huzhanyuan: changed page to folio where appropriate] --- drivers/android/vendor_hooks.c | 4 ++++ include/trace/hooks/mm.h | 11 +++++++++++ include/trace/hooks/vmscan.h | 3 +++ mm/migrate.c | 6 ++++++ mm/page_alloc.c | 2 ++ mm/rmap.c | 1 + mm/vmscan.c | 5 +++++ 7 files changed, 32 insertions(+) diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 35baee2a710d..0b9d1866e38e 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -315,3 +315,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_smallest_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_one_page_bypass); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_regmap_update); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_enable_thermal_genl_check); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_folio_look_around_ref); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_folio); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref); diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h index cf27ab461c4e..e49cc31ea70d 100644 --- a/include/trace/hooks/mm.h +++ b/include/trace/hooks/mm.h @@ -11,6 +11,7 @@ struct shmem_inode_info; struct folio; +struct page_vma_mapped_walk; DECLARE_RESTRICTED_HOOK(android_rvh_shmem_get_folio, TP_PROTO(struct shmem_inode_info *info, struct folio **folio), @@ -129,6 +130,16 @@ DECLARE_HOOK(android_vh_free_one_page_bypass, TP_PROTO(struct page *page, struct zone *zone, int order, int migratetype, int fpi_flags, bool *bypass), TP_ARGS(page, zone, order, migratetype, fpi_flags, bypass)); +DECLARE_HOOK(android_vh_test_clear_look_around_ref, + TP_PROTO(struct page *page), + TP_ARGS(page)); +DECLARE_HOOK(android_vh_look_around_migrate_folio, + TP_PROTO(struct folio *old_folio, struct folio *new_folio), + TP_ARGS(old_folio, new_folio)); +DECLARE_HOOK(android_vh_look_around, + TP_PROTO(struct page_vma_mapped_walk *pvmw, struct folio *folio, + struct vm_area_struct *vma, int *referenced), + TP_ARGS(pvmw, folio, vma, referenced)); #endif /* _TRACE_HOOK_MM_H */ diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h index 0896b1134de5..a52ab44d135f 100644 --- a/include/trace/hooks/vmscan.h +++ b/include/trace/hooks/vmscan.h @@ -36,6 +36,9 @@ DECLARE_HOOK(android_vh_should_continue_reclaim, DECLARE_HOOK(android_vh_file_is_tiny_bypass, TP_PROTO(bool file_is_tiny, bool *bypass), TP_ARGS(file_is_tiny, bypass)); +DECLARE_HOOK(android_vh_check_folio_look_around_ref, + TP_PROTO(struct folio *folio, int *skip), + TP_ARGS(folio, skip)); #endif /* _TRACE_HOOK_VMSCAN_H */ /* This part must be outside protection */ #include diff --git a/mm/migrate.c b/mm/migrate.c index 5c61c3d5b646..ef490976c98e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -56,6 +56,10 @@ #include +#undef CREATE_TRACE_POINTS +#include +#include + #include "internal.h" int isolate_movable_page(struct page *page, isolate_mode_t mode) @@ -554,6 +558,8 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio) if (folio_test_mappedtodisk(folio)) folio_set_mappedtodisk(newfolio); + trace_android_vh_look_around_migrate_folio(folio, newfolio); + /* Move dirty on pages not done by folio_migrate_mapping() */ if (folio_test_dirty(folio)) folio_set_dirty(newfolio); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c6358579527e..d5ca46562e88 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -77,6 +77,7 @@ #include #include #include +#include #include #include @@ -2600,6 +2601,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags set_page_pfmemalloc(page); else clear_page_pfmemalloc(page); + trace_android_vh_test_clear_look_around_ref(page); } /* diff --git a/mm/rmap.c b/mm/rmap.c index 9cf4f09cd71d..d1603eb79818 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -826,6 +826,7 @@ static bool folio_referenced_one(struct folio *folio, } if (pvmw.pte) { + trace_android_vh_look_around(&pvmw, folio, vma, &referenced); if (lru_gen_enabled() && pte_young(*pvmw.pte)) { lru_gen_look_around(&pvmw); referenced++; diff --git a/mm/vmscan.c b/mm/vmscan.c index cd4323f336a6..c466a31736cb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1468,6 +1468,11 @@ static enum folio_references folio_check_references(struct folio *folio, { int referenced_ptes, referenced_folio; unsigned long vm_flags; + int ret = 0; + + trace_android_vh_check_folio_look_around_ref(folio, &ret); + if (ret) + return ret; referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, &vm_flags); From b283f9b41fc5f3032916637442e3e818ca42fcbc Mon Sep 17 00:00:00 2001 From: huzhanyuan Date: Fri, 28 Jul 2023 20:44:15 +0800 Subject: [PATCH 091/163] ANDROID: oplus: Update the ABI xml and symbol list INFO: ABI DIFFERENCES HAVE BEEN DETECTED! INFO: 4 function symbol(s) added 'int __traceiter_android_vh_check_folio_look_around_ref(void*, struct folio*,int*)' 'int __traceiter_android_vh_look_around(void*, struct page_vma_mapped_walk*,struct folio*, struct vm_area_struct*, int*)' 'int __traceiter_android_vh_look_around_migrate_folio(void*, struct folio*, struct folio*)' 'int __traceiter_android_vh_test_clear_look_around_ref(void*, struct page*)' 4 variable symbol(s) added 'struct tracepoint __tracepoint_android_vh_check_folio_look_around_ref' 'struct tracepoint __tracepoint_android_vh_look_around' 'struct tracepoint __tracepoint_android_vh_look_around_migrate_folio' 'struct tracepoint __tracepoint_android_vh_test_clear_look_around_ref' Bug: 292051411 Change-Id: I25fff4eefc6773d3e1130bd0ff3f3cc21d6c0964 signed-off-by: Zhanyuan Hu --- android/abi_gki_aarch64.stg | 172 ++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_oplus | 8 ++ 2 files changed, 180 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 85fe49d8c144..df6a890d2ac2 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -8483,6 +8483,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x79e2d3b1 } +pointer_reference { + id: 0x14f37d47 + kind: POINTER + pointee_type_id: 0x798d1382 +} pointer_reference { id: 0x14fb0ab0 kind: POINTER @@ -42793,6 +42798,12 @@ member { type_id: 0x33756485 offset: 512 } +member { + id: 0x30e26822 + name: "address" + type_id: 0x33756485 + offset: 256 +} member { id: 0x30e26c68 name: "address" @@ -131796,6 +131807,12 @@ member { type_id: 0xfc0e1dbd offset: 768 } +member { + id: 0xe3b1b19e + name: "nr_pages" + type_id: 0x33756485 + offset: 64 +} member { id: 0xe3b1b2b8 name: "nr_pages" @@ -142461,6 +142478,11 @@ member { type_id: 0x03913382 offset: 52352 } +member { + id: 0x64bb7964 + name: "pfn" + type_id: 0x33756485 +} member { id: 0xeb463452 name: "pfn_base" @@ -144349,6 +144371,12 @@ member { type_id: 0x21082bfc offset: 384 } +member { + id: 0x56639dd1 + name: "pmd" + type_id: 0x21082bfc + offset: 320 +} member { id: 0x569cedab name: "pmd" @@ -150421,11 +150449,23 @@ member { type_id: 0x32bee099 offset: 704 } +member { + id: 0xa17fe2dc + name: "pte" + type_id: 0x32bee099 + offset: 384 +} member { id: 0xa18e706b name: "pte" type_id: 0xc32dc55c } +member { + id: 0xce4422cd + name: "ptl" + type_id: 0x3654c061 + offset: 448 +} member { id: 0xce442d14 name: "ptl" @@ -196497,6 +196537,12 @@ member { type_id: 0x04fd6761 offset: 1280 } +member { + id: 0x239192da + name: "vma" + type_id: 0x0a134144 + offset: 192 +} member { id: 0x23919a13 name: "vma" @@ -236307,6 +236353,23 @@ struct_union { member_id: 0xb59c78b7 } } +struct_union { + id: 0x798d1382 + kind: STRUCT + name: "page_vma_mapped_walk" + definition { + bytesize: 72 + member_id: 0x64bb7964 + member_id: 0xe3b1b19e + member_id: 0xadfdef1c + member_id: 0x239192da + member_id: 0x30e26822 + member_id: 0x56639dd1 + member_id: 0xa17fe2dc + member_id: 0xce4422cd + member_id: 0x2d2d0a20 + } +} struct_union { id: 0x3844dda9 kind: STRUCT @@ -305815,6 +305878,13 @@ function { parameter_id: 0x1b8590a8 parameter_id: 0x107606b0 } +function { + id: 0x9b222516 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x2170d06d + parameter_id: 0x2170d06d +} function { id: 0x9b2239e7 return_type_id: 0x6720d32f @@ -305961,6 +306031,13 @@ function { parameter_id: 0x21069feb parameter_id: 0x1a6ea392 } +function { + id: 0x9b2eaf21 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x2170d06d + parameter_id: 0x13580d6c +} function { id: 0x9b2eba1d return_type_id: 0x6720d32f @@ -307749,6 +307826,12 @@ function { parameter_id: 0x11cfee5a parameter_id: 0x11cfee5a } +function { + id: 0x9bb5b719 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x06835e9c +} function { id: 0x9bb5c5c3 return_type_id: 0x6720d32f @@ -309018,6 +309101,15 @@ function { parameter_id: 0x1b2ca025 parameter_id: 0x2e2c982d } +function { + id: 0x9bf40739 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x14f37d47 + parameter_id: 0x2170d06d + parameter_id: 0x0a134144 + parameter_id: 0x13580d6c +} function { id: 0x9bf6c118 return_type_id: 0x6720d32f @@ -323956,6 +324048,15 @@ elf_symbol { type_id: 0x9b4b913b full_name: "__traceiter_android_vh_check_file_open" } +elf_symbol { + id: 0x6aac0cf8 + name: "__traceiter_android_vh_check_folio_look_around_ref" + is_defined: true + symbol_type: FUNCTION + crc: 0xa2856bd1 + type_id: 0x9b2eaf21 + full_name: "__traceiter_android_vh_check_folio_look_around_ref" +} elf_symbol { id: 0x96d1c9c4 name: "__traceiter_android_vh_check_hibernation_swap" @@ -324460,6 +324561,24 @@ elf_symbol { type_id: 0x9a36ff29 full_name: "__traceiter_android_vh_kswapd_per_node" } +elf_symbol { + id: 0xe19d2bf8 + name: "__traceiter_android_vh_look_around" + is_defined: true + symbol_type: FUNCTION + crc: 0x4d18aae7 + type_id: 0x9bf40739 + full_name: "__traceiter_android_vh_look_around" +} +elf_symbol { + id: 0x993f42ff + name: "__traceiter_android_vh_look_around_migrate_folio" + is_defined: true + symbol_type: FUNCTION + crc: 0xbed1988a + type_id: 0x9b222516 + full_name: "__traceiter_android_vh_look_around_migrate_folio" +} elf_symbol { id: 0xfb6a92a8 name: "__traceiter_android_vh_madvise_cold_pageout_skip" @@ -325153,6 +325272,15 @@ elf_symbol { type_id: 0x9be67f35 full_name: "__traceiter_android_vh_task_blocks_on_rtmutex" } +elf_symbol { + id: 0x48f0cf25 + name: "__traceiter_android_vh_test_clear_look_around_ref" + is_defined: true + symbol_type: FUNCTION + crc: 0x6a7e50c3 + type_id: 0x9bb5b719 + full_name: "__traceiter_android_vh_test_clear_look_around_ref" +} elf_symbol { id: 0x6befbf23 name: "__traceiter_android_vh_thermal_power_cap" @@ -327160,6 +327288,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_check_file_open" } +elf_symbol { + id: 0xca5cbc9a + name: "__tracepoint_android_vh_check_folio_look_around_ref" + is_defined: true + symbol_type: OBJECT + crc: 0xdaaccf03 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_check_folio_look_around_ref" +} elf_symbol { id: 0xaa072f92 name: "__tracepoint_android_vh_check_hibernation_swap" @@ -327664,6 +327801,24 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_kswapd_per_node" } +elf_symbol { + id: 0xda2d53f2 + name: "__tracepoint_android_vh_look_around" + is_defined: true + symbol_type: OBJECT + crc: 0x738994e9 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_look_around" +} +elf_symbol { + id: 0x50a5a949 + name: "__tracepoint_android_vh_look_around_migrate_folio" + is_defined: true + symbol_type: OBJECT + crc: 0x8b32227d + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_look_around_migrate_folio" +} elf_symbol { id: 0xcb34ca12 name: "__tracepoint_android_vh_madvise_cold_pageout_skip" @@ -328357,6 +328512,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_task_blocks_on_rtmutex" } +elf_symbol { + id: 0x4ef2c337 + name: "__tracepoint_android_vh_test_clear_look_around_ref" + is_defined: true + symbol_type: OBJECT + crc: 0x4ffca4ae + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_test_clear_look_around_ref" +} elf_symbol { id: 0x6f25dd05 name: "__tracepoint_android_vh_thermal_power_cap" @@ -380959,6 +381123,7 @@ interface { symbol_id: 0x33c527ab symbol_id: 0x5012fcd8 symbol_id: 0x67bab494 + symbol_id: 0x6aac0cf8 symbol_id: 0x96d1c9c4 symbol_id: 0x42428033 symbol_id: 0x005c7625 @@ -381015,6 +381180,8 @@ interface { symbol_id: 0x4dca46cc symbol_id: 0xf83fbd26 symbol_id: 0x18fde973 + symbol_id: 0xe19d2bf8 + symbol_id: 0x993f42ff symbol_id: 0xfb6a92a8 symbol_id: 0xa94ef105 symbol_id: 0x0e1f9e23 @@ -381092,6 +381259,7 @@ interface { symbol_id: 0x2ecf85e9 symbol_id: 0x34a01a22 symbol_id: 0xdd9dd67b + symbol_id: 0x48f0cf25 symbol_id: 0x6befbf23 symbol_id: 0x226cc38b symbol_id: 0xeecc1529 @@ -381315,6 +381483,7 @@ interface { symbol_id: 0x6f146fe1 symbol_id: 0x678bb5ba symbol_id: 0xf1ec5ef2 + symbol_id: 0xca5cbc9a symbol_id: 0xaa072f92 symbol_id: 0x9620eac1 symbol_id: 0x5cc4ca5b @@ -381371,6 +381540,8 @@ interface { symbol_id: 0x62c13726 symbol_id: 0xafbca760 symbol_id: 0x586a06d1 + symbol_id: 0xda2d53f2 + symbol_id: 0x50a5a949 symbol_id: 0xcb34ca12 symbol_id: 0x2f768c2b symbol_id: 0xc34a5545 @@ -381448,6 +381619,7 @@ interface { symbol_id: 0xefb9e5a3 symbol_id: 0x3fe0157c symbol_id: 0xe5bf742d + symbol_id: 0x4ef2c337 symbol_id: 0x6f25dd05 symbol_id: 0xa5c71571 symbol_id: 0xfa3284c7 diff --git a/android/abi_gki_aarch64_oplus b/android/abi_gki_aarch64_oplus index ea61781c1543..afe0f5c8aa05 100644 --- a/android/abi_gki_aarch64_oplus +++ b/android/abi_gki_aarch64_oplus @@ -117,8 +117,11 @@ __traceiter_android_vh_binder_thread_release __traceiter_android_vh_binder_wait_for_work __traceiter_android_vh_cgroup_set_task + __traceiter_android_vh_check_folio_look_around_ref __traceiter_android_vh_dup_task_struct __traceiter_android_vh_exit_signal + __traceiter_android_vh_look_around + __traceiter_android_vh_look_around_migrate_folio __traceiter_android_vh_mem_cgroup_id_remove __traceiter_android_vh_mem_cgroup_css_offline __traceiter_android_vh_mem_cgroup_css_online @@ -154,6 +157,7 @@ __traceiter_sched_stat_wait __traceiter_sched_waking __traceiter_task_rename + __traceiter_android_vh_test_clear_look_around_ref __tracepoint_android_rvh_post_init_entity_util_avg __tracepoint_android_rvh_rtmutex_force_update __tracepoint_android_vh_account_process_tick_gran @@ -175,6 +179,7 @@ __tracepoint_android_vh_binder_thread_release __tracepoint_android_vh_binder_wait_for_work __tracepoint_android_vh_cgroup_set_task + __tracepoint_android_vh_check_folio_look_around_ref __tracepoint_android_vh_do_futex __tracepoint_android_vh_dup_task_struct __tracepoint_android_vh_exit_signal @@ -190,6 +195,8 @@ __tracepoint_android_vh_futex_wake_traverse_plist __tracepoint_android_vh_futex_wake_up_q_finish __tracepoint_android_vh_irqtime_account_process_tick + __tracepoint_android_vh_look_around + __tracepoint_android_vh_look_around_migrate_folio __tracepoint_android_vh_mutex_can_spin_on_owner __tracepoint_android_vh_mutex_opt_spin_finish __tracepoint_android_vh_mutex_opt_spin_start @@ -210,6 +217,7 @@ __tracepoint_android_vh_shrink_node_memcgs __tracepoint_android_vh_sync_txn_recvd __tracepoint_android_vh_task_blocks_on_rtmutex + __tracepoint_android_vh_test_clear_look_around_ref __tracepoint_block_bio_queue __tracepoint_block_getrq __tracepoint_block_rq_complete From 03812b904e8d763f26573753e5582c7fb847ebd4 Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Wed, 2 Aug 2023 18:25:21 +0900 Subject: [PATCH 092/163] ANDROID: ABI: update symbol list for galaxy INFO: 1 function symbol(s) added 'int cleancache_register_ops(const struct cleancache_ops*) Bug: 294177078 Change-Id: Ic22ddae4e92896ed28bc876d98969c6c3e94cb9d Signed-off-by: Jaewon Kim --- android/abi_gki_aarch64.stg | 200 +++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_galaxy | 1 + 2 files changed, 201 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index df6a890d2ac2..66f8158b4269 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -2158,6 +2158,21 @@ pointer_reference { kind: POINTER pointee_type_id: 0x0a52df14 } +pointer_reference { + id: 0x080d391b + kind: POINTER + pointee_type_id: 0x0a7402f1 +} +pointer_reference { + id: 0x080d3f98 + kind: POINTER + pointee_type_id: 0x0a7418fc +} +pointer_reference { + id: 0x080e08ce + kind: POINTER + pointee_type_id: 0x0a78c5a5 +} pointer_reference { id: 0x080fbe64 kind: POINTER @@ -2333,6 +2348,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x0942f1ca } +pointer_reference { + id: 0x08c420f1 + kind: POINTER + pointee_type_id: 0x09506558 +} pointer_reference { id: 0x08e43718 kind: POINTER @@ -11483,6 +11503,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xa0815516 } +pointer_reference { + id: 0x22b357e9 + kind: POINTER + pointee_type_id: 0xa08db938 +} pointer_reference { id: 0x22b3ece7 kind: POINTER @@ -13143,6 +13168,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x86a9103f } +pointer_reference { + id: 0x2b4b15b4 + kind: POINTER + pointee_type_id: 0x876cb04d +} pointer_reference { id: 0x2b584612 kind: POINTER @@ -15563,6 +15593,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x99c11430 } +pointer_reference { + id: 0x2ce2190b + kind: POINTER + pointee_type_id: 0x99c882b3 +} pointer_reference { id: 0x2ce315c4 kind: POINTER @@ -22563,6 +22598,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xe2a728bc } +pointer_reference { + id: 0x323d798e + kind: POINTER + pointee_type_id: 0xe2b500a6 +} pointer_reference { id: 0x3240bbe7 kind: POINTER @@ -29223,6 +29263,11 @@ typedef { name: "hfn_t" referred_type_id: 0x92233392 } +typedef { + id: 0xee72cbfc + name: "ino_t" + referred_type_id: 0x21d43a7b +} typedef { id: 0x7e8f5c14 name: "int32" @@ -32338,6 +32383,11 @@ qualified { qualifier: CONST qualified_type_id: 0xfa2455af } +qualified { + id: 0xe2b500a6 + qualifier: CONST + qualified_type_id: 0xfa93b413 +} qualified { id: 0xe2cebd77 qualifier: CONST @@ -85010,6 +85060,11 @@ member { type_id: 0x318f8bcb offset: 256 } +member { + id: 0x4e824c05 + name: "fh" + type_id: 0x982afc69 +} member { id: 0x2db4ae9c name: "fh_list" @@ -93177,6 +93232,12 @@ member { type_id: 0x0cbb9c80 offset: 320 } +member { + id: 0xcf06c106 + name: "get_page" + type_id: 0x2b4b15b4 + offset: 128 +} member { id: 0xacf61961 name: "get_params" @@ -103474,6 +103535,11 @@ member { type_id: 0x295c7202 offset: 128 } +member { + id: 0xedc364a3 + name: "init_fs" + type_id: 0x22b357e9 +} member { id: 0x35f4d7c6 name: "init_fs_context" @@ -103587,6 +103653,12 @@ member { type_id: 0x3292450a offset: 64 } +member { + id: 0xcc9b87bc + name: "init_shared_fs" + type_id: 0x2ce2190b + offset: 64 +} member { id: 0x7ab1d042 name: "init_speed" @@ -103915,6 +103987,11 @@ member { type_id: 0xd5df6730 offset: 256 } +member { + id: 0x0ca7cb72 + name: "ino" + type_id: 0xee72cbfc +} member { id: 0x0cdb9a21 name: "ino" @@ -105094,6 +105171,18 @@ member { type_id: 0x0c5a56fb offset: 512 } +member { + id: 0x6a15223b + name: "invalidate_fs" + type_id: 0x08c420f1 + offset: 384 +} +member { + id: 0x6f43a794 + name: "invalidate_inode" + type_id: 0x080e08ce + offset: 320 +} member { id: 0x17dabd89 name: "invalidate_lock" @@ -105106,6 +105195,12 @@ member { type_id: 0x475137a2 offset: 576 } +member { + id: 0x107f64d3 + name: "invalidate_page" + type_id: 0x080d3f98 + offset: 256 +} member { id: 0xa4356576 name: "invalidate_seq" @@ -108938,6 +109033,11 @@ member { type_id: 0x92233392 offset: 640 } +member { + id: 0x20c5e9f7 + name: "key" + type_id: 0x93e3596e +} member { id: 0x20c661f5 name: "key" @@ -150888,6 +150988,12 @@ member { type_id: 0x2c0bb831 offset: 64 } +member { + id: 0x68c6881b + name: "put_page" + type_id: 0x080d391b + offset: 192 +} member { id: 0xee3c87e4 name: "put_port" @@ -189965,6 +190071,11 @@ member { type_id: 0x5574fba9 offset: 128 } +member { + id: 0xec2a9cea + name: "u" + type_id: 0x55c087bf +} member { id: 0xec2ac37d name: "u" @@ -205792,6 +205903,16 @@ struct_union { member_id: 0xa0d5322b } } +struct_union { + id: 0x55c087bf + kind: UNION + definition { + bytesize: 24 + member_id: 0x0ca7cb72 + member_id: 0x4e824c05 + member_id: 0x20c5e9f7 + } +} struct_union { id: 0x56037e9c kind: UNION @@ -213144,6 +213265,30 @@ struct_union { member_id: 0xd0a3be49 } } +struct_union { + id: 0xca283f54 + kind: STRUCT + name: "cleancache_filekey" + definition { + bytesize: 24 + member_id: 0xec2a9cea + } +} +struct_union { + id: 0xfa93b413 + kind: STRUCT + name: "cleancache_ops" + definition { + bytesize: 56 + member_id: 0xedc364a3 + member_id: 0xcc9b87bc + member_id: 0xcf06c106 + member_id: 0x68c6881b + member_id: 0x107f64d3 + member_id: 0x6f43a794 + member_id: 0x6a15223b + } +} struct_union { id: 0xdd7b47eb kind: STRUCT @@ -277979,6 +278124,27 @@ function { parameter_id: 0x091f4a0b parameter_id: 0x3286774f } +function { + id: 0x0a7402f1 + return_type_id: 0x48b5725f + parameter_id: 0x6720d32f + parameter_id: 0xca283f54 + parameter_id: 0x33756485 + parameter_id: 0x06835e9c +} +function { + id: 0x0a7418fc + return_type_id: 0x48b5725f + parameter_id: 0x6720d32f + parameter_id: 0xca283f54 + parameter_id: 0x33756485 +} +function { + id: 0x0a78c5a5 + return_type_id: 0x48b5725f + parameter_id: 0x6720d32f + parameter_id: 0xca283f54 +} function { id: 0x0a9e8df2 return_type_id: 0x079ff791 @@ -293898,6 +294064,14 @@ function { parameter_id: 0x21003da7 parameter_id: 0x21530c77 } +function { + id: 0x876cb04d + return_type_id: 0x6720d32f + parameter_id: 0x6720d32f + parameter_id: 0xca283f54 + parameter_id: 0x33756485 + parameter_id: 0x06835e9c +} function { id: 0x87739e97 return_type_id: 0x3a583251 @@ -295465,6 +295639,11 @@ function { parameter_id: 0xf435685e parameter_id: 0x0ab1f084 } +function { + id: 0x910fbd4c + return_type_id: 0x6720d32f + parameter_id: 0x323d798e +} function { id: 0x91117703 return_type_id: 0x6720d32f @@ -304094,6 +304273,12 @@ function { return_type_id: 0x6720d32f parameter_id: 0x111ee6f8 } +function { + id: 0x99c882b3 + return_type_id: 0x6720d32f + parameter_id: 0x1e62d0f5 + parameter_id: 0xf435685e +} function { id: 0x99c885a2 return_type_id: 0x6720d32f @@ -315002,6 +315187,11 @@ function { parameter_id: 0x4585663f parameter_id: 0x33756485 } +function { + id: 0xa08db938 + return_type_id: 0x6720d32f + parameter_id: 0xf435685e +} function { id: 0xa08f5503 return_type_id: 0xfc0e1dbd @@ -333278,6 +333468,15 @@ elf_symbol { type_id: 0x1a0b4b72 full_name: "class_unregister" } +elf_symbol { + id: 0xd156aa2c + name: "cleancache_register_ops" + is_defined: true + symbol_type: FUNCTION + crc: 0x5fa588cd + type_id: 0x910fbd4c + full_name: "cleancache_register_ops" +} elf_symbol { id: 0x00d9abe7 name: "cleanup_srcu_struct" @@ -382149,6 +382348,7 @@ interface { symbol_id: 0xb29100f2 symbol_id: 0xef9eb644 symbol_id: 0xf91cb171 + symbol_id: 0xd156aa2c symbol_id: 0x00d9abe7 symbol_id: 0xb63845e5 symbol_id: 0x5332f89b diff --git a/android/abi_gki_aarch64_galaxy b/android/abi_gki_aarch64_galaxy index ebe4fa10620a..9bb57a86455d 100644 --- a/android/abi_gki_aarch64_galaxy +++ b/android/abi_gki_aarch64_galaxy @@ -35,6 +35,7 @@ class_create_file_ns class_find_device class_remove_file_ns + cleancache_register_ops __const_udelay copy_from_kernel_nofault cpu_hwcaps From a7adb988970e13c42f3c7ca4fe157c35e8e885fe Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 1 Aug 2023 19:56:02 -0700 Subject: [PATCH 093/163] FROMGIT: Multi-gen LRU: Fix per-zone reclaim MGLRU has a LRU list for each zone for each type (anon/file) in each generation: long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; The min_seq (oldest generation) can progress independently for each type but the max_seq (youngest generation) is shared for both anon and file. This is to maintain a common frame of reference. In order for eviction to advance the min_seq of a type, all the per-zone lists in the oldest generation of that type must be empty. The eviction logic only considers pages from eligible zones for eviction or promotion. scan_folios() { ... for (zone = sc->reclaim_idx; zone >= 0; zone--) { ... sort_folio(); // Promote ... isolate_folio(); // Evict } ... } Consider the system has the movable zone configured and default 4 generations. The current state of the system is as shown below (only illustrating one type for simplicity): Type: ANON Zone DMA32 Normal Movable Device Gen 0 0 0 4GB 0 Gen 1 0 1GB 1MB 0 Gen 2 1MB 4GB 1MB 0 Gen 3 1MB 1MB 1MB 0 Now consider there is a GFP_KERNEL allocation request (eligible zone index <= Normal), evict_folios() will return without doing any work since there are no pages to scan in the eligible zones of the oldest generation. Reclaim won't make progress until triggered from a ZONE_MOVABLE allocation request; which may not happen soon if there is a lot of free memory in the movable zone. This can lead to OOM kills, although there is 1GB pages in the Normal zone of Gen 1 that we have not yet tried to reclaim. This issue is not seen in the conventional active/inactive LRU since there are no per-zone lists. If there are no (not enough) folios to scan in the eligible zones, move folios from ineligible zone (zone_index > reclaim_index) to the next generation. This allows for the progression of min_seq and reclaiming from the next generation (Gen 1). Qualcomm, Mediatek and raspberrypi [1] discovered this issue independently. [1] https://github.com/raspberrypi/linux/issues/5395 Link: https://lkml.kernel.org/r/20230802025606.346758-1-kaleshsingh@google.com Fixes: ac35a4902374 ("mm: multi-gen LRU: minimal implementation") Change-Id: I5bbf44bd7ffe42f4347df4be59a75c1603c9b947 Signed-off-by: Kalesh Singh Reported-by: Charan Teja Kalla Reported-by: Lecopzer Chen Tested-by: AngeloGioacchino Del Regno [mediatek] Tested-by: Charan Teja Kalla Cc: Yu Zhao Cc: Barry Song Cc: Brian Geffon Cc: Jan Alexander Steffens (heftig) Cc: Matthias Brugger Cc: Oleksandr Natalenko Cc: Qi Zheng Cc: Steven Barrett Cc: Suleiman Souhlal Cc: Suren Baghdasaryan Cc: Aneesh Kumar K V Cc: Signed-off-by: Andrew Morton (cherry picked from commit 1462260adc41c5974362cb54ff577c2a15b8c7b2 https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 288383787 Bug: 291719697 Signed-off-by: Kalesh Singh --- mm/vmscan.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index c466a31736cb..68e4c3193f7a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4794,7 +4794,8 @@ static int lru_gen_memcg_seg(struct lruvec *lruvec) * the eviction ******************************************************************************/ -static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx) +static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_control *sc, + int tier_idx) { bool success; int gen = folio_lru_gen(folio); @@ -4844,6 +4845,13 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx) return true; } + /* ineligible */ + if (zone > sc->reclaim_idx) { + gen = folio_inc_gen(lruvec, folio, false); + list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); + return true; + } + /* waiting for writeback */ if (folio_test_locked(folio) || folio_test_writeback(folio) || (type == LRU_GEN_FILE && folio_test_dirty(folio))) { @@ -4892,7 +4900,8 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, int type, int tier, struct list_head *list) { - int gen, zone; + int i; + int gen; enum vm_event_item item; int sorted = 0; int scanned = 0; @@ -4908,9 +4917,10 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, gen = lru_gen_from_seq(lrugen->min_seq[type]); - for (zone = sc->reclaim_idx; zone >= 0; zone--) { + for (i = MAX_NR_ZONES; i > 0; i--) { LIST_HEAD(moved); int skipped = 0; + int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; struct list_head *head = &lrugen->folios[gen][type][zone]; while (!list_empty(head)) { @@ -4924,7 +4934,7 @@ static int scan_folios(struct lruvec *lruvec, struct scan_control *sc, scanned += delta; - if (sort_folio(lruvec, folio, tier)) + if (sort_folio(lruvec, folio, sc, tier)) sorted += delta; else if (isolate_folio(lruvec, folio, sc)) { list_add(&folio->lru, list); From addf1a9a65a9eb4db8de8d2459e6070d4641c030 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 1 Aug 2023 19:56:03 -0700 Subject: [PATCH 094/163] FROMGIT: Multi-gen LRU: Avoid race in inc_min_seq() inc_max_seq() will try to inc_min_seq() if nr_gens == MAX_NR_GENS. This is because the generations are reused (the last oldest now empty generation will become the next youngest generation). inc_min_seq() is retried until successful, dropping the lru_lock and yielding the CPU on each failure, and retaking the lock before trying again: while (!inc_min_seq(lruvec, type, can_swap)) { spin_unlock_irq(&lruvec->lru_lock); cond_resched(); spin_lock_irq(&lruvec->lru_lock); } However, the initial condition that required incrementing the min_seq (nr_gens == MAX_NR_GENS) is not retested. This can change by another call to inc_max_seq() from run_aging() with force_scan=true from the debugfs interface. Since the eviction stalls when the nr_gens == MIN_NR_GENS, avoid unnecessarily incrementing the min_seq by rechecking the number of generations before each attempt. This issue was uncovered in previous discussion on the list by Yu Zhao and Aneesh Kumar [1]. [1] https://lore.kernel.org/linux-mm/CAOUHufbO7CaVm=xjEb1avDhHVvnC8pJmGyKcFf2iY_dpf+zR3w@mail.gmail.com/ Link: https://lkml.kernel.org/r/20230802025606.346758-2-kaleshsingh@google.com Fixes: d6c3af7d8a2b ("mm: multi-gen LRU: debugfs interface") Change-Id: I89e84ef2927eb1b0091f1be28bd03eb04dee4c57 Signed-off-by: Kalesh Singh Tested-by: AngeloGioacchino Del Regno [mediatek] Tested-by: Charan Teja Kalla Cc: Yu Zhao Cc: Aneesh Kumar K V Cc: Barry Song Cc: Brian Geffon Cc: Jan Alexander Steffens (heftig) Cc: Lecopzer Chen Cc: Matthias Brugger Cc: Oleksandr Natalenko Cc: Qi Zheng Cc: Steven Barrett Cc: Suleiman Souhlal Cc: Suren Baghdasaryan Cc: Signed-off-by: Andrew Morton (cherry picked from commit 250dbd10306126b06415afda8adfc27b2b780428 https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 288383787 Bug: 291719697 Signed-off-by: Kalesh Singh --- mm/vmscan.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 68e4c3193f7a..b2573a5ee2f7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4348,7 +4348,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) int prev, next; int type, zone; struct lru_gen_folio *lrugen = &lruvec->lrugen; - +restart: spin_lock_irq(&lruvec->lru_lock); VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); @@ -4359,11 +4359,12 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); - while (!inc_min_seq(lruvec, type, can_swap)) { - spin_unlock_irq(&lruvec->lru_lock); - cond_resched(); - spin_lock_irq(&lruvec->lru_lock); - } + if (inc_min_seq(lruvec, type, can_swap)) + continue; + + spin_unlock_irq(&lruvec->lru_lock); + cond_resched(); + goto restart; } /* From 5e1d25ac2ab670561949d82de7b5027e5a9676d5 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 1 Aug 2023 19:56:04 -0700 Subject: [PATCH 095/163] FROMGIT: BACKPORT: Multi-gen LRU: Fix can_swap in lru_gen_look_around() walk->can_swap might be invalid since it's not guaranteed to be initialized for the particular lruvec. Instead deduce it from the folio type (anon/file). Link: https://lkml.kernel.org/r/20230802025606.346758-3-kaleshsingh@google.com Fixes: 018ee47f1489 ("mm: multi-gen LRU: exploit locality in rmap") Change-Id: I1ae78011d4972d87bac9f2db8c56352cdb7a9be6 Signed-off-by: Kalesh Singh Tested-by: AngeloGioacchino Del Regno [mediatek] Tested-by: Charan Teja Kalla Cc: Yu Zhao Cc: Aneesh Kumar K V Cc: Barry Song Cc: Brian Geffon Cc: Jan Alexander Steffens (heftig) Cc: Lecopzer Chen Cc: Matthias Brugger Cc: Oleksandr Natalenko Cc: Qi Zheng Cc: Steven Barrett Cc: Suleiman Souhlal Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit fdf19e8c8f1cdcee4eccf4c98a875f44f39d8b9d https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 288383787 Bug: 291719697 [ Kalesh Singh - Fix trivial conflict in lru_gen_look_around() ] Signed-off-by: Kalesh Singh --- mm/vmscan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index b2573a5ee2f7..b15bec001fa1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4565,6 +4565,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) pte_t *pte = pvmw->pte; unsigned long addr = pvmw->address; struct folio *folio = pfn_folio(pvmw->pfn); + bool can_swap = !folio_is_file_lru(folio); struct mem_cgroup *memcg = folio_memcg(folio); struct pglist_data *pgdat = folio_pgdat(folio); struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); @@ -4612,7 +4613,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) if (!pte_young(pte[i])) continue; - folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap); + folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); if (!folio) continue; From dbb09068c1df2a75dd712fc0663434d088f8b6d1 Mon Sep 17 00:00:00 2001 From: Jiewen Wang Date: Wed, 2 Aug 2023 19:40:04 +0800 Subject: [PATCH 096/163] ANDROID: vendor_hooks: Add tune scan type hook in get_scan_count() Add hook in get_scan_count() for oem to wield customized reclamation strategy Bug: 294180281 Change-Id: Ic54d35128e458661fc2b641809f5371b1d9a488e Signed-off-by: Jiewen Wang --- drivers/android/vendor_hooks.c | 1 + include/trace/hooks/vmscan.h | 4 ++++ mm/vmscan.c | 1 + 3 files changed, 6 insertions(+) diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 0b9d1866e38e..229f0e712f93 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -319,3 +319,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_folio_look_around_ref); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_look_around_migrate_folio); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_test_clear_look_around_ref); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_tune_scan_type); diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h index a52ab44d135f..d66ab9279266 100644 --- a/include/trace/hooks/vmscan.h +++ b/include/trace/hooks/vmscan.h @@ -39,6 +39,10 @@ DECLARE_HOOK(android_vh_file_is_tiny_bypass, DECLARE_HOOK(android_vh_check_folio_look_around_ref, TP_PROTO(struct folio *folio, int *skip), TP_ARGS(folio, skip)); +enum scan_balance; +DECLARE_HOOK(android_vh_tune_scan_type, + TP_PROTO(enum scan_balance *scan_type), + TP_ARGS(scan_type)); #endif /* _TRACE_HOOK_VMSCAN_H */ /* This part must be outside protection */ #include diff --git a/mm/vmscan.c b/mm/vmscan.c index b15bec001fa1..73e96cd78b21 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3020,6 +3020,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, fraction[1] = fp; denominator = ap + fp; out: + trace_android_vh_tune_scan_type(&scan_balance); for_each_evictable_lru(lru) { int file = is_file_lru(lru); unsigned long lruvec_size; From 3926cc6ef8cc05e8e85613b0a26acb4e37753442 Mon Sep 17 00:00:00 2001 From: Jiewen Wang Date: Wed, 2 Aug 2023 20:15:25 +0800 Subject: [PATCH 097/163] ANDROID: GKI: Add symbols to symbol list for vivo INFO: 1 function symbol(s) added 'int __traceiter_android_vh_tune_scan_type(void*, enum scan_balance*)' 1 variable symbol(s) added 'struct tracepoint __tracepoint_android_vh_tune_scan_type' Bug: 294180281 Change-Id: I171099cdbe68c04885e286554f56290356d543d2 Signed-off-by: Jiewen Wang --- android/abi_gki_aarch64.stg | 53 ++++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_vivo | 2 ++ 2 files changed, 55 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 66f8158b4269..b14165dbe8c8 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -2743,6 +2743,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x0028f2f5 } +pointer_reference { + id: 0x0a9e3ca3 + kind: POINTER + pointee_type_id: 0x00381413 +} pointer_reference { id: 0x0aa1f0ee kind: POINTER @@ -273756,6 +273761,28 @@ enumeration { } } } +enumeration { + id: 0x00381413 + name: "scan_balance" + definition { + underlying_type_id: 0x4585663f + enumerator { + name: "SCAN_EQUAL" + } + enumerator { + name: "SCAN_FRACT" + value: 1 + } + enumerator { + name: "SCAN_ANON" + value: 2 + } + enumerator { + name: "SCAN_FILE" + value: 3 + } + } +} enumeration { id: 0xbcb85241 name: "scsi_cmnd_submitter" @@ -307421,6 +307448,12 @@ function { return_type_id: 0x6720d32f parameter_id: 0x18150d9f } +function { + id: 0x9b85c291 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x0a9e3ca3 +} function { id: 0x9b85c36d return_type_id: 0x6720d32f @@ -325534,6 +325567,15 @@ elf_symbol { type_id: 0x9b2837bd full_name: "__traceiter_android_vh_try_to_unmap_one" } +elf_symbol { + id: 0x39155e73 + name: "__traceiter_android_vh_tune_scan_type" + is_defined: true + symbol_type: FUNCTION + crc: 0x24602ed2 + type_id: 0x9b85c291 + full_name: "__traceiter_android_vh_tune_scan_type" +} elf_symbol { id: 0x8a773cc3 name: "__traceiter_android_vh_typec_store_partner_src_caps" @@ -328774,6 +328816,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_try_to_unmap_one" } +elf_symbol { + id: 0x49b955bd + name: "__tracepoint_android_vh_tune_scan_type" + is_defined: true + symbol_type: OBJECT + crc: 0x45da6384 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_tune_scan_type" +} elf_symbol { id: 0x18e67da1 name: "__tracepoint_android_vh_typec_store_partner_src_caps" @@ -381466,6 +381517,7 @@ interface { symbol_id: 0x2bc25325 symbol_id: 0x0119fc41 symbol_id: 0xd9f43028 + symbol_id: 0x39155e73 symbol_id: 0x8a773cc3 symbol_id: 0x9545623c symbol_id: 0x558490b1 @@ -381826,6 +381878,7 @@ interface { symbol_id: 0xd9d2bcff symbol_id: 0x09ba106b symbol_id: 0xf9580976 + symbol_id: 0x49b955bd symbol_id: 0x18e67da1 symbol_id: 0x75a2f39e symbol_id: 0x7b5c377f diff --git a/android/abi_gki_aarch64_vivo b/android/abi_gki_aarch64_vivo index 69d634eb4fa5..b17c82fe8684 100644 --- a/android/abi_gki_aarch64_vivo +++ b/android/abi_gki_aarch64_vivo @@ -419,6 +419,7 @@ __traceiter_android_vh_try_to_freeze_todo __traceiter_android_vh_try_to_freeze_todo_unfrozen __traceiter_android_vh_try_to_unmap_one + __traceiter_android_vh_tune_scan_type __traceiter_android_vh_ufs_check_int_errors __traceiter_android_vh_ufs_clock_scaling __traceiter_android_vh_ufs_compl_command @@ -588,6 +589,7 @@ __tracepoint_android_vh_try_to_unmap_one __tracepoint_android_vh_try_to_freeze_todo __tracepoint_android_vh_try_to_freeze_todo_unfrozen + __tracepoint_android_vh_tune_scan_type __tracepoint_android_vh_ufs_check_int_errors __tracepoint_android_vh_ufs_clock_scaling __tracepoint_android_vh_ufs_compl_command From 960d9828eee1f1e74682e84cbab856bdb0c9d126 Mon Sep 17 00:00:00 2001 From: Junki Min Date: Fri, 4 Aug 2023 15:01:02 +0900 Subject: [PATCH 098/163] ANDROID: ABI: Update symbol for Exynos SoC Update symbols for Exynos WLBT driver. 1 function symbol(s) added 'unsigned long __find_nth_bit(const unsigned long*, unsigned long, unsigned long)' Bug: 294470344 Change-Id: I9f8d9d20f643b34bbc475dde468dbaa11f56e667 Signed-off-by: Junki Min --- android/abi_gki_aarch64.stg | 10 ++++++++++ android/abi_gki_aarch64_exynos | 17 +++++++++++++---- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index b14165dbe8c8..337a5867ef08 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -321120,6 +321120,15 @@ elf_symbol { type_id: 0x20cd94dc full_name: "__fdget" } +elf_symbol { + id: 0xaf8ee687 + name: "__find_nth_bit" + is_defined: true + symbol_type: FUNCTION + crc: 0x3eccbe2c + type_id: 0x3ec500b9 + full_name: "__find_nth_bit" +} elf_symbol { id: 0x746a66fc name: "__flush_workqueue" @@ -381023,6 +381032,7 @@ interface { symbol_id: 0x80f1cf36 symbol_id: 0x3e32c80e symbol_id: 0x5298aa39 + symbol_id: 0xaf8ee687 symbol_id: 0x746a66fc symbol_id: 0x47a334c4 symbol_id: 0xebf4b11f diff --git a/android/abi_gki_aarch64_exynos b/android/abi_gki_aarch64_exynos index e30927aa26b1..d8c9ffac57b6 100644 --- a/android/abi_gki_aarch64_exynos +++ b/android/abi_gki_aarch64_exynos @@ -42,6 +42,7 @@ blocking_notifier_chain_register blocking_notifier_chain_unregister bpf_trace_run1 + bpf_trace_run10 bpf_trace_run2 bpf_trace_run3 bpf_trace_run4 @@ -325,6 +326,7 @@ fd_install fget _find_first_bit + _find_first_zero_bit _find_last_bit _find_next_and_bit _find_next_bit @@ -701,6 +703,7 @@ ___ratelimit raw_notifier_call_chain raw_notifier_chain_register + raw_notifier_chain_unregister _raw_read_lock _raw_read_unlock _raw_spin_lock @@ -1025,7 +1028,6 @@ ww_mutex_unlock # required by cfg80211.ko - bpf_trace_run10 csum_partial debugfs_rename __dev_change_net_namespace @@ -1227,8 +1229,10 @@ match_string memory_read_from_buffer migrate_swap + perf_event_create_kernel_counter + perf_event_enable + perf_event_read_local pick_highest_pushable_task - raw_notifier_chain_unregister raw_spin_rq_lock_nested raw_spin_rq_unlock _raw_write_trylock @@ -1272,6 +1276,7 @@ __traceiter_android_vh_binder_restore_priority __traceiter_android_vh_binder_set_priority __traceiter_android_vh_binder_wakeup_ilocked + __traceiter_android_vh_jiffies_update __traceiter_android_vh_scheduler_tick __traceiter_android_vh_syscall_prctl_finished __traceiter_binder_transaction_received @@ -1302,6 +1307,7 @@ __tracepoint_android_vh_binder_restore_priority __tracepoint_android_vh_binder_set_priority __tracepoint_android_vh_binder_wakeup_ilocked + __tracepoint_android_vh_jiffies_update __tracepoint_android_vh_scheduler_tick __tracepoint_android_vh_syscall_prctl_finished __tracepoint_binder_transaction_received @@ -2048,6 +2054,9 @@ # required by scsc_wlan.ko arp_tbl + __cpuhp_remove_state + __cpuhp_state_add_instance + __cpuhp_state_remove_instance dev_addr_mod dev_alloc_name __dev_queue_xmit @@ -2056,6 +2065,7 @@ dql_reset dst_release ether_setup + __find_nth_bit for_each_kernel_tracepoint in4_pton in6_pton @@ -2200,7 +2210,6 @@ drm_syncobj_get_handle drm_syncobj_replace_fence __fdget - _find_first_zero_bit __folio_put get_random_u32 __get_task_comm @@ -2261,7 +2270,6 @@ __traceiter_gpu_mem_total __tracepoint_android_vh_meminfo_proc_show __tracepoint_gpu_mem_total - ttm_bo_eviction_valuable ttm_bo_init_reserved ttm_bo_kmap ttm_bo_kunmap @@ -2576,5 +2584,6 @@ __skb_get_hash __skb_gso_segment tasklet_unlock_wait + ttm_bo_eviction_valuable ufshcd_mcq_poll_cqe_nolock unregister_netdevice_many From 8e86825eecfaaa582ab51a0924b469d2d2adc743 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 4 Aug 2023 10:05:12 -0700 Subject: [PATCH 099/163] ANDROID: uid_sys_stats: Use a single work for deferred updates uid_sys_stats tries to acquire a lock when any task exits to do some bookkeeping in common data structure. If the lock is contended, it allocates and schedules a work to do the work later to avoid task exit latency. In a stress test which creates many tasks exiting, the workqueue can be overwhelmed by the number of works being scheduled and allocates more worker threads to handle queue. The growth of the number of threads is effectively unbounded and can exhaust the process table. This causes denial of service to userspace trying to fork(). Instead of allocating a new work each, create a linked list of the update stats deferred work and have a single work to drain the linked list. The linked list is implemented using an atomic_long_t. Bug: 294468796 Fixes: 5586278c0fe6 ("ANDROID: uid_sys_stats: defer process_notifier work if uid_lock is contended") Change-Id: I15f20f4f69ea66a452bdf815c4ef3a0da3edfd36 Signed-off-by: Elliot Berman --- drivers/misc/uid_sys_stats.c | 50 +++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 21 deletions(-) diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index 36a34b1461cf..f5eaa63035ca 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -629,7 +629,6 @@ static const struct proc_ops uid_procstat_fops = { }; struct update_stats_work { - struct work_struct work; uid_t uid; #ifdef CONFIG_UID_SYS_STATS_DEBUG struct task_struct *task; @@ -637,38 +636,46 @@ struct update_stats_work { struct task_io_accounting ioac; u64 utime; u64 stime; + struct update_stats_work *next; }; +static atomic_long_t work_usw; + static void update_stats_workfn(struct work_struct *work) { - struct update_stats_work *usw = - container_of(work, struct update_stats_work, work); + struct update_stats_work *usw; struct uid_entry *uid_entry; struct task_entry *task_entry __maybe_unused; rt_mutex_lock(&uid_lock); - uid_entry = find_uid_entry(usw->uid); - if (!uid_entry) - goto exit; + while ((usw = (struct update_stats_work *)atomic_long_read(&work_usw))) { + if (atomic_long_cmpxchg(&work_usw, (long)usw, (long)(usw->next)) != (long)usw) + continue; - uid_entry->utime += usw->utime; - uid_entry->stime += usw->stime; + uid_entry = find_uid_entry(usw->uid); + if (!uid_entry) + goto next; + + uid_entry->utime += usw->utime; + uid_entry->stime += usw->stime; #ifdef CONFIG_UID_SYS_STATS_DEBUG - task_entry = find_task_entry(uid_entry, usw->task); - if (!task_entry) - goto exit; - add_uid_tasks_io_stats(task_entry, &usw->ioac, - UID_STATE_DEAD_TASKS); + task_entry = find_task_entry(uid_entry, usw->task); + if (!task_entry) + goto next; + add_uid_tasks_io_stats(task_entry, &usw->ioac, + UID_STATE_DEAD_TASKS); #endif - __add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS); -exit: + __add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS); +next: +#ifdef CONFIG_UID_SYS_STATS_DEBUG + put_task_struct(usw->task); +#endif + kfree(usw); + } rt_mutex_unlock(&uid_lock); -#ifdef CONFIG_UID_SYS_STATS_DEBUG - put_task_struct(usw->task); -#endif - kfree(usw); } +static DECLARE_WORK(update_stats_work, update_stats_workfn); static int process_notifier(struct notifier_block *self, unsigned long cmd, void *v) @@ -687,7 +694,6 @@ static int process_notifier(struct notifier_block *self, usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL); if (usw) { - INIT_WORK(&usw->work, update_stats_workfn); usw->uid = uid; #ifdef CONFIG_UID_SYS_STATS_DEBUG usw->task = get_task_struct(task); @@ -698,7 +704,9 @@ static int process_notifier(struct notifier_block *self, */ usw->ioac = task->ioac; task_cputime_adjusted(task, &usw->utime, &usw->stime); - schedule_work(&usw->work); + usw->next = (struct update_stats_work *)atomic_long_xchg(&work_usw, + (long)usw); + schedule_work(&update_stats_work); } return NOTIFY_OK; } From 2f76bb83b14d8ad3462bb73a304a887a1418020a Mon Sep 17 00:00:00 2001 From: Author Name Date: Wed, 2 Aug 2023 20:02:21 +0800 Subject: [PATCH 100/163] ANDROID: GKI: update symbol list file for xiaomi INFO: ABI DIFFERENCES HAVE BEEN DETECTED! INFO: 8 function symbol(s) added 'int sock_wake_async(struct socket_wq *wq, int how, int band)' 'void bpf_map_put(struct bpf_map *map)' 'void bpf_map_inc(struct bpf_map *map)' 'int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id)' 'void napi_busy_loop(unsigned int napi_id,bool (*loop_end)(void *, unsigned long),void *loop_end_arg, bool prefer_busy_poll, u16 budget)' 'bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)' 'void page_pool_put_page_bulk(struct page_pool *pool, void **data, int count)' 'struct sk_buff *build_skb_around(struct sk_buff *skb,void *data, unsigned int frag_size)' INFO: 2 variable symbol(s) added 'DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info), 'DEFINE_PER_CPU(struct mem_cgroup *, int_active_memcg)' Bug: 294257769 Change-Id: I98da395227810eecb1fd978dedd20fba445757d0 Signed-off-by: dongziqi --- android/abi_gki_aarch64.stg | 250 +++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_xiaomi | 12 ++ 2 files changed, 262 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 337a5867ef08..0eb6bf6028d8 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -23683,6 +23683,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xf8453f2e } +pointer_reference { + id: 0x34818c51 + kind: POINTER + pointee_type_id: 0xf846d7da +} pointer_reference { id: 0x3486508f kind: POINTER @@ -38711,6 +38716,11 @@ member { type_id: 0x69fa9768 offset: 256 } +member { + id: 0x39d6d358 + type_id: 0x6875099c + offset: 32 +} member { id: 0x39e9b87f type_id: 0x6888b6e5 @@ -106474,6 +106484,11 @@ member { type_id: 0x34544a3f offset: 1152 } +member { + id: 0xfa946861 + name: "ipv4_nh" + type_id: 0xc9082b19 +} member { id: 0x3b9e102b name: "ipv6" @@ -106520,6 +106535,11 @@ member { type_id: 0x2d271bc7 offset: 1344 } +member { + id: 0xab39140b + name: "ipv6_nh" + type_id: 0x6d25e07f +} member { id: 0xdf622691 name: "ipv6_route_input" @@ -108815,6 +108835,12 @@ member { type_id: 0x340dea21 offset: 8384 } +member { + id: 0x632a4ba2 + name: "kern_flags" + type_id: 0xc9082b19 + offset: 256 +} member { id: 0x3afd0925 name: "kern_hyp_va" @@ -116809,6 +116835,12 @@ member { name: "map" type_id: 0x04b193cc } +member { + id: 0x8df2c9e6 + name: "map" + type_id: 0x04b193cc + offset: 128 +} member { id: 0x8df87907 name: "map" @@ -117025,6 +117057,12 @@ member { name: "map_id" type_id: 0xe62ebf07 } +member { + id: 0x86e8bd6c + name: "map_id" + type_id: 0xc9082b19 + offset: 192 +} member { id: 0xa32be5db name: "map_ifindex" @@ -117292,6 +117330,12 @@ member { type_id: 0x6e73208e offset: 192 } +member { + id: 0x2a093f90 + name: "map_type" + type_id: 0x6e73208e + offset: 224 +} member { id: 0x2a81612b name: "map_type" @@ -129546,6 +129590,12 @@ member { name: "nh" type_id: 0x1f9da9a8 } +member { + id: 0x713b460f + name: "nh" + type_id: 0x1e9c55da + offset: 288 +} member { id: 0xdb1c1848 name: "nh_all" @@ -129574,6 +129624,11 @@ member { name: "nh_entry" type_id: 0x17b2105b } +member { + id: 0xd51ec347 + name: "nh_family" + type_id: 0xc9082b19 +} member { id: 0x9fb787a3 name: "nh_flags" @@ -183637,6 +183692,18 @@ member { type_id: 0x49b889e7 offset: 12704 } +member { + id: 0x07611cab + name: "tgt_index" + type_id: 0xc9082b19 + offset: 32 +} +member { + id: 0xc9d6516c + name: "tgt_value" + type_id: 0x18bd6530 + offset: 64 +} member { id: 0x141d2755 name: "thaw" @@ -206907,6 +206974,15 @@ struct_union { member_id: 0x12f2249e } } +struct_union { + id: 0x6875099c + kind: UNION + definition { + bytesize: 16 + member_id: 0xfa946861 + member_id: 0xab39140b + } +} struct_union { id: 0x6888b6e5 kind: UNION @@ -211185,6 +211261,16 @@ struct_union { member_id: 0x2c23d29d } } +struct_union { + id: 0x1e9c55da + kind: STRUCT + name: "bpf_nh_params" + definition { + bytesize: 20 + member_id: 0xd51ec347 + member_id: 0x39d6d358 + } +} struct_union { id: 0xb52b1ec7 kind: STRUCT @@ -211401,6 +211487,22 @@ struct_union { member_id: 0x0082372e } } +struct_union { + id: 0x212d37a1 + kind: STRUCT + name: "bpf_redirect_info" + definition { + bytesize: 56 + member_id: 0x2da18c75 + member_id: 0x07611cab + member_id: 0xc9d6516c + member_id: 0x8df2c9e6 + member_id: 0x86e8bd6c + member_id: 0x2a093f90 + member_id: 0x632a4ba2 + member_id: 0x713b460f + } +} struct_union { id: 0x3a354a67 kind: STRUCT @@ -277795,6 +277897,15 @@ function { id: 0x012b29b0 return_type_id: 0x914dbfdc } +function { + id: 0x012cb0da + return_type_id: 0x48b5725f + parameter_id: 0x4585663f + parameter_id: 0x34818c51 + parameter_id: 0x18bd6530 + parameter_id: 0x6d7f5ff6 + parameter_id: 0x914dbfdc +} function { id: 0x013800d7 return_type_id: 0x48b5725f @@ -281671,6 +281782,13 @@ function { parameter_id: 0x12bb35ff parameter_id: 0x1582ab06 } +function { + id: 0x1462512b + return_type_id: 0x48b5725f + parameter_id: 0x1344d43c + parameter_id: 0x0cbf60eb + parameter_id: 0x6720d32f +} function { id: 0x14667c52 return_type_id: 0x48b5725f @@ -292771,6 +292889,13 @@ function { parameter_id: 0x2584a3b9 parameter_id: 0x6d7f5ff6 } +function { + id: 0x69be77aa + return_type_id: 0x054f691a + parameter_id: 0x054f691a + parameter_id: 0x18bd6530 + parameter_id: 0x4585663f +} function { id: 0x69cde367 return_type_id: 0x054f691a @@ -306650,6 +306775,13 @@ function { parameter_id: 0x1bf16028 parameter_id: 0x4585663f } +function { + id: 0x9b4f443a + return_type_id: 0x6720d32f + parameter_id: 0x1d2bb1b6 + parameter_id: 0x6720d32f + parameter_id: 0x6720d32f +} function { id: 0x9b4f857e return_type_id: 0x6720d32f @@ -313541,6 +313673,12 @@ function { return_type_id: 0x6720d32f parameter_id: 0x0c56e1ee } +function { + id: 0x9e960f96 + return_type_id: 0x6720d32f + parameter_id: 0x054f691a + parameter_id: 0x914dbfdc +} function { id: 0x9e9a6f39 return_type_id: 0x6720d32f @@ -319177,6 +319315,12 @@ function { return_type_id: 0x6d7f5ff6 parameter_id: 0x1b8590a8 } +function { + id: 0xf846d7da + return_type_id: 0x6d7f5ff6 + parameter_id: 0x18bd6530 + parameter_id: 0x33756485 +} function { id: 0xf857e1a7 return_type_id: 0x6d7f5ff6 @@ -319641,6 +319785,12 @@ function { parameter_id: 0x0258f96e parameter_id: 0xe5b69de1 } +function { + id: 0xfdb2dd0c + return_type_id: 0x6d7f5ff6 + parameter_id: 0x0258f96e + parameter_id: 0xe02e14d6 +} function { id: 0xfdd651d2 return_type_id: 0x6d7f5ff6 @@ -320526,6 +320676,15 @@ elf_symbol { type_id: 0x9115faa6 full_name: "__dev_change_net_namespace" } +elf_symbol { + id: 0x8f70cdad + name: "__dev_direct_xmit" + is_defined: true + symbol_type: FUNCTION + crc: 0x1f8aaf23 + type_id: 0x9e960f96 + full_name: "__dev_direct_xmit" +} elf_symbol { id: 0x2d91cc56 name: "__dev_get_by_index" @@ -332207,6 +332366,24 @@ elf_symbol { type_id: 0xc0405795 full_name: "bpf_dispatcher_xdp_func" } +elf_symbol { + id: 0xd1f054c0 + name: "bpf_map_inc" + is_defined: true + symbol_type: FUNCTION + crc: 0xb21c378c + type_id: 0x11b43560 + full_name: "bpf_map_inc" +} +elf_symbol { + id: 0x2e575a11 + name: "bpf_map_put" + is_defined: true + symbol_type: FUNCTION + crc: 0xc1113f69 + type_id: 0x11b43560 + full_name: "bpf_map_put" +} elf_symbol { id: 0xf737e767 name: "bpf_master_redirect_enabled_key" @@ -332243,6 +332420,15 @@ elf_symbol { type_id: 0x1de47b51 full_name: "bpf_prog_sub" } +elf_symbol { + id: 0x56b332ad + name: "bpf_redirect_info" + is_defined: true + symbol_type: OBJECT + crc: 0x33823370 + type_id: 0x212d37a1 + full_name: "bpf_redirect_info" +} elf_symbol { id: 0x6a712ee1 name: "bpf_stats_enabled_key" @@ -332711,6 +332897,15 @@ elf_symbol { type_id: 0x6fa775f5 full_name: "build_skb" } +elf_symbol { + id: 0x521ad765 + name: "build_skb_around" + is_defined: true + symbol_type: FUNCTION + crc: 0xcdb7b9ec + type_id: 0x69be77aa + full_name: "build_skb_around" +} elf_symbol { id: 0x0f9d7b5e name: "bus_find_device" @@ -340108,6 +340303,15 @@ elf_symbol { type_id: 0x9d038726 full_name: "dma_mmap_pages" } +elf_symbol { + id: 0x843aec6c + name: "dma_need_sync" + is_defined: true + symbol_type: FUNCTION + crc: 0xcec723d1 + type_id: 0xfdb2dd0c + full_name: "dma_need_sync" +} elf_symbol { id: 0xeb70c9c5 name: "dma_pool_alloc" @@ -350344,6 +350548,15 @@ elf_symbol { type_id: 0x93bee8a1 full_name: "insert_resource" } +elf_symbol { + id: 0xcec28ada + name: "int_active_memcg" + is_defined: true + symbol_type: OBJECT + crc: 0x6f91b30c + type_id: 0x1d5bae2a + full_name: "int_active_memcg" +} elf_symbol { id: 0xbd83f7de name: "int_pow" @@ -355777,6 +355990,15 @@ elf_symbol { type_id: 0x6fa775f5 full_name: "napi_build_skb" } +elf_symbol { + id: 0x760780be + name: "napi_busy_loop" + is_defined: true + symbol_type: FUNCTION + crc: 0x29604158 + type_id: 0x012cb0da + full_name: "napi_busy_loop" +} elf_symbol { id: 0xc258a893 name: "napi_complete_done" @@ -358552,6 +358774,15 @@ elf_symbol { type_id: 0x1443bd42 full_name: "page_pool_put_defragged_page" } +elf_symbol { + id: 0x7490fd3a + name: "page_pool_put_page_bulk" + is_defined: true + symbol_type: FUNCTION + crc: 0x898d9639 + type_id: 0x1462512b + full_name: "page_pool_put_page_bulk" +} elf_symbol { id: 0xfdf9f7ee name: "page_pool_release_page" @@ -370613,6 +370844,15 @@ elf_symbol { type_id: 0x09506558 full_name: "sock_unregister" } +elf_symbol { + id: 0xeab36c96 + name: "sock_wake_async" + is_defined: true + symbol_type: FUNCTION + crc: 0xc356c393 + type_id: 0x9b4f443a + full_name: "sock_wake_async" +} elf_symbol { id: 0xba26b6a3 name: "sock_wfree" @@ -380966,6 +381206,7 @@ interface { symbol_id: 0x857a0e42 symbol_id: 0xe889d441 symbol_id: 0x087c7a06 + symbol_id: 0x8f70cdad symbol_id: 0x2d91cc56 symbol_id: 0xdc24797a symbol_id: 0xf2144a48 @@ -382264,10 +382505,13 @@ interface { symbol_id: 0xd4a0f991 symbol_id: 0x864cb6b1 symbol_id: 0x55c7afec + symbol_id: 0xd1f054c0 + symbol_id: 0x2e575a11 symbol_id: 0xf737e767 symbol_id: 0x6ad917a1 symbol_id: 0xdeb1861d symbol_id: 0x7661d150 + symbol_id: 0x56b332ad symbol_id: 0x6a712ee1 symbol_id: 0xe594a242 symbol_id: 0x3afeb397 @@ -382320,6 +382564,7 @@ interface { symbol_id: 0xdded4dcd symbol_id: 0x9dea2dda symbol_id: 0x8ea00f54 + symbol_id: 0x521ad765 symbol_id: 0x0f9d7b5e symbol_id: 0x0be8da0e symbol_id: 0x921644f5 @@ -383141,6 +383386,7 @@ interface { symbol_id: 0xb989e3a2 symbol_id: 0xd140139f symbol_id: 0x009463a5 + symbol_id: 0x843aec6c symbol_id: 0xeb70c9c5 symbol_id: 0xafa3c498 symbol_id: 0x083d52da @@ -384278,6 +384524,7 @@ interface { symbol_id: 0xc73383c1 symbol_id: 0x0ee6aecf symbol_id: 0x3c5daf32 + symbol_id: 0xcec28ada symbol_id: 0xbd83f7de symbol_id: 0xb6637ce6 symbol_id: 0x5ce8149d @@ -384882,6 +385129,7 @@ interface { symbol_id: 0x55bed293 symbol_id: 0xc1add767 symbol_id: 0x9a02788c + symbol_id: 0x760780be symbol_id: 0xc258a893 symbol_id: 0x2ce3e2c6 symbol_id: 0x04096200 @@ -385190,6 +385438,7 @@ interface { symbol_id: 0xdc14ff55 symbol_id: 0x2616989d symbol_id: 0xc922a019 + symbol_id: 0x7490fd3a symbol_id: 0xfdf9f7ee symbol_id: 0x3c537500 symbol_id: 0x1b814fa6 @@ -386530,6 +386779,7 @@ interface { symbol_id: 0xeffe0f16 symbol_id: 0x7b771682 symbol_id: 0x39362be4 + symbol_id: 0xeab36c96 symbol_id: 0xba26b6a3 symbol_id: 0xfdaa1c0d symbol_id: 0xe86ce747 diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index f89ba44c4afe..21ad5310ac70 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -316,3 +316,15 @@ # required by SAGT module __traceiter_android_rvh_before_do_sched_yield __tracepoint_android_rvh_before_do_sched_yield + +#required by minetwork.ko + sock_wake_async + bpf_map_put + bpf_map_inc + __dev_direct_xmit + napi_busy_loop + int_active_memcg + bpf_redirect_info + dma_need_sync + page_pool_put_page_bulk + build_skb_around From b0c06048a8ff95e4599e761714866deb6f3f9e0f Mon Sep 17 00:00:00 2001 From: Andrew Yang Date: Fri, 30 Jun 2023 17:22:02 +0800 Subject: [PATCH 101/163] FROMGIT: fs: drop_caches: draining pages before dropping caches We expect a file page access after dropping caches should be a major fault, but sometimes it's still a minor fault. That's because a file page can't be dropped if it's in a per-cpu pagevec. Draining all pages from per-cpu pagevec to lru list before trying to drop caches. Link: https://lkml.kernel.org/r/20230630092203.16080-1-andrew.yang@mediatek.com Change-Id: I9b03c53e39b87134d5ddd0c40ac9b36cf4d190cd Signed-off-by: Andrew Yang Cc: Al Viro Cc: AngeloGioacchino Del Regno Cc: Christian Brauner Cc: Matthias Brugger Signed-off-by: Andrew Morton Bug: 285794522 (cherry picked from commit a481c6fdf3e4fdf31bda91098dfbf46098037e76 https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) --- fs/drop_caches.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/drop_caches.c b/fs/drop_caches.c index e619c31b6bd9..b9575957a7c2 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "internal.h" /* A global variable is a bit ugly, but it keeps the code simple */ @@ -59,6 +60,7 @@ int drop_caches_sysctl_handler(struct ctl_table *table, int write, static int stfu; if (sysctl_drop_caches & 1) { + lru_add_drain_all(); iterate_supers(drop_pagecache_sb, NULL); count_vm_event(DROP_PAGECACHE); } From 64787ee45196879413bb5987b69b4ea6e59c5da8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Draszik?= Date: Thu, 10 Aug 2023 12:16:22 +0100 Subject: [PATCH 102/163] ANDROID: GKI: update pixel symbol list for xhci MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pixel is using these symbols in its USB driver implementation. 3 function symbol(s) added 'int xhci_address_device(struct usb_hcd*, struct usb_device*)' 'int xhci_bus_resume(struct usb_hcd*)' 'int xhci_bus_suspend(struct usb_hcd*)' Bug: 277396090 Bug: 287008367 Change-Id: Id89097ab094e0582560383793c91278c88cb078f Signed-off-by: André Draszik --- android/abi_gki_aarch64_pixel | 3 +++ 1 file changed, 3 insertions(+) diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index 4ae71b6faf29..4fe7be5b8bbe 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -2290,6 +2290,9 @@ __xfrm_state_destroy xfrm_state_lookup_byspi xfrm_stateonly_find + xhci_address_device + xhci_bus_resume + xhci_bus_suspend xhci_gen_setup xhci_init_driver xhci_resume From c2cbb3cc246828bc2a4465110966e4ff2cf2fef3 Mon Sep 17 00:00:00 2001 From: Howard Yen Date: Fri, 23 Jul 2021 18:56:32 +0800 Subject: [PATCH 103/163] ANDROID: usb: host: fix slab-out-of-bounds in xhci_vendor_get_ops slab-out-of-bounds happens if the xhci platform drivers don't define the extra_priv_size in their xhci_driver_overrides structure. Move xhci_vendor_ops structure to xhci main structure to avoid extra_priv_size affacts xhci_vendor_get_ops which causes the slab-out-of-bounds error. Fixes: 90ab8e7f988d ("ANDROID: usb: host: add xhci hooks for USB offload") Bug: 293869685 Bug: 194461020 Test: build and boot pass Change-Id: Id17fdfbfd3e8edcc89a05c9c2f553ffab494215e Signed-off-by: Howard Yen Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 34f6c9c3088b13884567429e3c2ceb08d2235b5b) (cherry picked from commit 00666b8e3e6ed6ba82fd23d8c83390c30f426469) --- drivers/usb/host/xhci-plat.c | 8 +++----- drivers/usb/host/xhci-plat.h | 1 - drivers/usb/host/xhci.c | 3 +-- drivers/usb/host/xhci.h | 4 +++- 4 files changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 42909318609c..3829e1be0383 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -188,11 +188,10 @@ EXPORT_SYMBOL_GPL(xhci_plat_register_vendor_ops); static int xhci_vendor_init(struct xhci_hcd *xhci) { - struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); - struct xhci_plat_priv *priv = xhci_to_priv(xhci); + struct xhci_vendor_ops *ops = NULL; if (xhci_plat_vendor_overwrite.vendor_ops) - ops = priv->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops; + ops = xhci->vendor_ops = xhci_plat_vendor_overwrite.vendor_ops; if (ops && ops->vendor_init) return ops->vendor_init(xhci); @@ -202,12 +201,11 @@ static int xhci_vendor_init(struct xhci_hcd *xhci) static void xhci_vendor_cleanup(struct xhci_hcd *xhci) { struct xhci_vendor_ops *ops = xhci_vendor_get_ops(xhci); - struct xhci_plat_priv *priv = xhci_to_priv(xhci); if (ops && ops->vendor_cleanup) ops->vendor_cleanup(xhci); - priv->vendor_ops = NULL; + xhci->vendor_ops = NULL; } static int xhci_plat_probe(struct platform_device *pdev) diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 5b096f72636f..e726a572321d 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -13,7 +13,6 @@ struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; - struct xhci_vendor_ops *vendor_ops; struct xhci_vendor_data *vendor_data; int (*plat_setup)(struct usb_hcd *); void (*plat_start)(struct usb_hcd *); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 9304e0d42982..be041bd65b19 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -25,7 +25,6 @@ #include "xhci-trace.h" #include "xhci-debugfs.h" #include "xhci-dbgcap.h" -#include "xhci-plat.h" #define DRIVER_AUTHOR "Sarah Sharp" #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" @@ -4517,7 +4516,7 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, struct xhci_vendor_ops *xhci_vendor_get_ops(struct xhci_hcd *xhci) { - return xhci_to_priv(xhci)->vendor_ops; + return xhci->vendor_ops; } EXPORT_SYMBOL_GPL(xhci_vendor_get_ops); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 5d1c57bfd0af..dd634668f5d4 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1941,7 +1941,9 @@ struct xhci_hcd { void *dbc; - ANDROID_KABI_RESERVE(1); + /* Used for bug 194461020 */ + ANDROID_KABI_USE(1, struct xhci_vendor_ops *vendor_ops); + ANDROID_KABI_RESERVE(2); ANDROID_KABI_RESERVE(3); ANDROID_KABI_RESERVE(4); From b520b909137c569fdf9829ce583b25e328ea01db Mon Sep 17 00:00:00 2001 From: Ramji Jiyani Date: Wed, 9 Aug 2023 18:12:40 +0000 Subject: [PATCH 104/163] ANDROID: ABI: Update to fix slab-out-of-bounds in xhci_vendor_get_ops type 'struct xhci_hcd' changed member 'union { struct xhci_vendor_ops* vendor_ops; struct { u64 android_kabi_reserved1; }; union { }; }' was added member 'u64 android_kabi_reserved1' was removed Bug: 293869685 Test: TH Change-Id: I1fa551fc1b9263302d38f4e2989eed9f5f0d816a Signed-off-by: Ramji Jiyani --- android/abi_gki_aarch64.stg | 44 +++++++++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 7 deletions(-) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 0eb6bf6028d8..180374c4cc50 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -37663,6 +37663,10 @@ member { id: 0x26e6f511 type_id: 0x14b5a454 } +member { + id: 0x27000c61 + type_id: 0x132e4197 +} member { id: 0x27031642 type_id: 0x13222919 @@ -38053,6 +38057,11 @@ member { id: 0x3190e1a6 type_id: 0x496df688 } +member { + id: 0x31a5da6f + type_id: 0x49b90a29 + offset: 59264 +} member { id: 0x31aa1057 type_id: 0x49861740 @@ -38515,6 +38524,10 @@ member { type_id: 0x56faddc1 offset: 840 } +member { + id: 0x36752b74 + type_id: 0x56faddc1 +} member { id: 0x36961302 type_id: 0x557609c1 @@ -44612,12 +44625,6 @@ member { type_id: 0x92233392 offset: 2496 } -member { - id: 0x2d0811d3 - name: "android_kabi_reserved1" - type_id: 0x92233392 - offset: 59264 -} member { id: 0x2d0811f9 name: "android_kabi_reserved1" @@ -195159,6 +195166,11 @@ member { type_id: 0x3e10b518 offset: 6336 } +member { + id: 0x7d2869d8 + name: "vendor_ops" + type_id: 0x2e19617e +} member { id: 0xfd44cde8 name: "vendor_oui" @@ -202677,6 +202689,14 @@ struct_union { member_id: 0x3c63eaff } } +struct_union { + id: 0x132e4197 + kind: STRUCT + definition { + bytesize: 8 + member_id: 0x2d081532 + } +} struct_union { id: 0x132f610b kind: STRUCT @@ -205377,6 +205397,16 @@ struct_union { member_id: 0x25ab4212 } } +struct_union { + id: 0x49b90a29 + kind: UNION + definition { + bytesize: 8 + member_id: 0x7d2869d8 + member_id: 0x27000c61 + member_id: 0x36752b74 + } +} struct_union { id: 0x49df90a7 kind: UNION @@ -260756,7 +260786,7 @@ struct_union { member_id: 0x06879837 member_id: 0xf762467b member_id: 0x26b32295 - member_id: 0x2d0811d3 + member_id: 0x31a5da6f member_id: 0x63760917 member_id: 0xac894244 member_id: 0xe0f63158 From a5a662187fe247e255ebdabbd2d545c70c0aa80b Mon Sep 17 00:00:00 2001 From: Jaewon Kim Date: Thu, 10 Aug 2023 16:04:05 +0900 Subject: [PATCH 105/163] ANDROID: gfp: add __GFP_CMA in gfpflag_names The __GFP_CMA was added but not added to the gfpflag_names. Let me add it to show on %pGg printk. Bug: 295271520 Signed-off-by: Jaewon Kim Change-Id: I155fdcc0e2c18db390b5166ba8d2b93c793caae6 --- include/trace/events/mmflags.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index e87cb2b80ed3..3e06b3fe81a6 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -51,7 +51,8 @@ gfpflag_string(__GFP_RECLAIM), \ gfpflag_string(__GFP_DIRECT_RECLAIM), \ gfpflag_string(__GFP_KSWAPD_RECLAIM), \ - gfpflag_string(__GFP_ZEROTAGS) + gfpflag_string(__GFP_ZEROTAGS), \ + gfpflag_string(__GFP_CMA) #ifdef CONFIG_KASAN_HW_TAGS #define __def_gfpflag_names_kasan , \ From 3fc69d3f70e14f8e31f4c7dad08ed4246368d2e2 Mon Sep 17 00:00:00 2001 From: Woogeun Lee Date: Wed, 2 Aug 2023 10:40:35 +0900 Subject: [PATCH 106/163] ANDROID: ABI: add allowed list for galaxy 19 function symbol(s) added 'int __fsnotify_parent(struct dentry*, __u32, const void*, int)' 'int __traceiter_android_vh_wq_lockup_pool(void*, int, unsigned long)' 'int cleancache_register_ops(const struct cleancache_ops*)' 'int fsnotify(__u32, const void*, int, struct inode*, const struct qstr*, struct inode*, u32)' 'void kernel_neon_begin()' 'void kernel_neon_end()' 'int kstrtos16(const char*, unsigned int, s16*)' 'int regulator_get_current_limit(struct regulator*)' 'int smpboot_register_percpu_thread(struct smp_hotplug_thread*)' 'void smpboot_unregister_percpu_thread(struct smp_hotplug_thread*)' 'int snd_soc_add_card_controls(struct snd_soc_card*, const struct snd_kcontrol_new*, int)' 'unsigned int stack_trace_save_regs(struct pt_regs*, unsigned long*, unsigned int, unsigned int)' 'int tcp_register_congestion_control(struct tcp_congestion_ops*)' 'void tcp_reno_cong_avoid(struct sock*, u32, u32)' 'u32 tcp_reno_ssthresh(struct sock*)' 'u32 tcp_reno_undo_cwnd(struct sock*)' 'u32 tcp_slow_start(struct tcp_sock*, u32)' 'void tcp_unregister_congestion_control(struct tcp_congestion_ops*)' 'int usb_set_configuration(struct usb_device*, int)' 1 variable symbol(s) added 'struct tracepoint __tracepoint_android_vh_wq_lockup_pool' Bug: 294125592 Change-Id: I6c2f2fb274dbe45263e39e43b4b8bc3766ef2bab Signed-off-by: Woogeun Lee --- android/abi_gki_aarch64.stg | 4107 ++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_galaxy | 19 + 2 files changed, 4126 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 180374c4cc50..56e8d3b856c6 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -203,6 +203,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x29600806 } +pointer_reference { + id: 0x00cc5f1f + kind: POINTER + pointee_type_id: 0x29719ae3 +} pointer_reference { id: 0x00d1ba62 kind: POINTER @@ -248,6 +253,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x29b83e1a } +pointer_reference { + id: 0x00fff809 + kind: POINTER + pointee_type_id: 0x29bf06ba +} pointer_reference { id: 0x010934b0 kind: POINTER @@ -438,6 +448,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x2cf89e50 } +pointer_reference { + id: 0x01ae5751 + kind: POINTER + pointee_type_id: 0x2cf9bbda +} pointer_reference { id: 0x01b63d17 kind: POINTER @@ -1258,6 +1273,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x38da44ec } +pointer_reference { + id: 0x04a9576c + kind: POINTER + pointee_type_id: 0x38e5bb2f +} pointer_reference { id: 0x04ac88c5 kind: POINTER @@ -2693,6 +2713,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x03cc00a2 } +pointer_reference { + id: 0x0a67a4e3 + kind: POINTER + pointee_type_id: 0x03de7511 +} pointer_reference { id: 0x0a6c2073 kind: POINTER @@ -2723,6 +2748,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x00447895 } +pointer_reference { + id: 0x0a8304ff + kind: POINTER + pointee_type_id: 0x004cf563 +} pointer_reference { id: 0x0a85fcb6 kind: POINTER @@ -3068,6 +3098,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x04a91d68 } +pointer_reference { + id: 0x0bbb7f1b + kind: POINTER + pointee_type_id: 0x04ad1af1 +} pointer_reference { id: 0x0bbc5d54 kind: POINTER @@ -3078,6 +3113,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x04b89667 } +pointer_reference { + id: 0x0bbfad26 + kind: POINTER + pointee_type_id: 0x04be5205 +} pointer_reference { id: 0x0bc3e3e1 kind: POINTER @@ -5133,6 +5173,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x1d5cf24d } +pointer_reference { + id: 0x0dc966bc + kind: POINTER + pointee_type_id: 0x1d657c6d +} pointer_reference { id: 0x0dc9e98b kind: POINTER @@ -6918,6 +6963,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x1705e250 } +pointer_reference { + id: 0x0f56bda3 + kind: POINTER + pointee_type_id: 0x171a1012 +} +pointer_reference { + id: 0x0f596314 + kind: POINTER + pointee_type_id: 0x17256acc +} pointer_reference { id: 0x0f5a291c kind: POINTER @@ -6943,6 +6998,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x172e6ba7 } +pointer_reference { + id: 0x0f5c0d73 + kind: POINTER + pointee_type_id: 0x1730d353 +} pointer_reference { id: 0x0f5e0dda kind: POINTER @@ -6998,6 +7058,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x17d51999 } +pointer_reference { + id: 0x0f67218c + kind: POINTER + pointee_type_id: 0x17dc60ac +} pointer_reference { id: 0x0f677ef9 kind: POINTER @@ -8018,6 +8083,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x6df67d28 } +pointer_reference { + id: 0x120033c5 + kind: POINTER + pointee_type_id: 0x6240298a +} pointer_reference { id: 0x1200e2aa kind: POINTER @@ -8128,6 +8198,16 @@ pointer_reference { kind: POINTER pointee_type_id: 0x6101d583 } +pointer_reference { + id: 0x12d9a766 + kind: POINTER + pointee_type_id: 0x61267b05 +} +pointer_reference { + id: 0x12e0cbae + kind: POINTER + pointee_type_id: 0x61c3c826 +} pointer_reference { id: 0x12e24ee1 kind: POINTER @@ -8363,6 +8443,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x65b569e7 } +pointer_reference { + id: 0x13fe8737 + kind: POINTER + pointee_type_id: 0x65bafa41 +} pointer_reference { id: 0x14191d75 kind: POINTER @@ -9968,6 +10053,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x45a8a3c4 } +pointer_reference { + id: 0x1bfb2a60 + kind: POINTER + pointee_type_id: 0x45ac4f1e +} pointer_reference { id: 0x1c126813 kind: POINTER @@ -13538,6 +13628,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9ac13218 } +pointer_reference { + id: 0x2c209d56 + kind: POINTER + pointee_type_id: 0x9ac293c4 +} pointer_reference { id: 0x2c210d23 kind: POINTER @@ -15518,6 +15613,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x9921a72c } +pointer_reference { + id: 0x2cd9dff0 + kind: POINTER + pointee_type_id: 0x9927995d +} pointer_reference { id: 0x2cda29dd kind: POINTER @@ -19088,6 +19188,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x938c1916 } +pointer_reference { + id: 0x2e735a06 + kind: POINTER + pointee_type_id: 0x938d8e84 +} pointer_reference { id: 0x2e73c057 kind: POINTER @@ -22953,6 +23058,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xe644a39b } +pointer_reference { + id: 0x33086ec3 + kind: POINTER + pointee_type_id: 0xe6615d93 +} pointer_reference { id: 0x330db442 kind: POINTER @@ -23148,6 +23258,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xe7c3994d } +pointer_reference { + id: 0x33642722 + kind: POINTER + pointee_type_id: 0xe7d07a15 +} pointer_reference { id: 0x33657329 kind: POINTER @@ -23243,6 +23358,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xe42d7586 } +pointer_reference { + id: 0x339b8baf + kind: POINTER + pointee_type_id: 0xe42ec820 +} pointer_reference { id: 0x339ebb61 kind: POINTER @@ -24418,6 +24538,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xf25d597f } +pointer_reference { + id: 0x3609467c + kind: POINTER + pointee_type_id: 0xf265ff6f +} pointer_reference { id: 0x360c66c5 kind: POINTER @@ -25113,6 +25238,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xf4616560 } +pointer_reference { + id: 0x378dbf55 + kind: POINTER + pointee_type_id: 0xf4761bc8 +} pointer_reference { id: 0x37944814 kind: POINTER @@ -25353,6 +25483,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xca62a8b3 } +pointer_reference { + id: 0x38090ad2 + kind: POINTER + pointee_type_id: 0xca64cdd4 +} pointer_reference { id: 0x380a78f0 kind: POINTER @@ -25428,6 +25563,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xcaaeb726 } +pointer_reference { + id: 0x383bad14 + kind: POINTER + pointee_type_id: 0xcaae52cc +} pointer_reference { id: 0x383bcea3 kind: POINTER @@ -25483,6 +25623,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xcb720070 } +pointer_reference { + id: 0x384d1346 + kind: POINTER + pointee_type_id: 0xcb74ab84 +} pointer_reference { id: 0x384ff235 kind: POINTER @@ -26958,6 +27103,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xda60e9a6 } +pointer_reference { + id: 0x3c087d20 + kind: POINTER + pointee_type_id: 0xda61121d +} pointer_reference { id: 0x3c0933ca kind: POINTER @@ -27483,6 +27633,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xdfba2774 } +pointer_reference { + id: 0x3d801074 + kind: POINTER + pointee_type_id: 0xdc40a74d +} pointer_reference { id: 0x3d828cd9 kind: POINTER @@ -27823,6 +27978,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xd2e0ae36 } +pointer_reference { + id: 0x3e292ad1 + kind: POINTER + pointee_type_id: 0xd2e44dd9 +} pointer_reference { id: 0x3e29be7b kind: POINTER @@ -28353,6 +28513,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0xd41e888f } +pointer_reference { + id: 0x3f985801 + kind: POINTER + pointee_type_id: 0xd421869b +} pointer_reference { id: 0x3f9adc09 kind: POINTER @@ -31133,6 +31298,11 @@ qualified { qualifier: CONST qualified_type_id: 0x5d8fb74a } +qualified { + id: 0xcb74ab84 + qualifier: CONST + qualified_type_id: 0x5d95189a +} qualified { id: 0xcb987dd0 qualifier: CONST @@ -33263,6 +33433,11 @@ qualified { qualifier: CONST qualified_type_id: 0xa0bcedf5 } +qualified { + id: 0xf4761bc8 + qualifier: CONST + qualified_type_id: 0xa19fd9aa +} qualified { id: 0xf4a306a1 qualifier: CONST @@ -34577,6 +34752,11 @@ array { number_of_elements: 64 element_type_id: 0x295c7202 } +array { + id: 0x3a8e7b26 + number_of_elements: 3 + element_type_id: 0xa12e384a +} array { id: 0x3b24b1ed number_of_elements: 20 @@ -35202,6 +35382,11 @@ array { number_of_elements: 7 element_type_id: 0x295c7202 } +array { + id: 0x62a5e001 + number_of_elements: 13 + element_type_id: 0x92233392 +} array { id: 0x62e6d4e3 number_of_elements: 34 @@ -35502,6 +35687,11 @@ array { number_of_elements: 256 element_type_id: 0x2eab5b8a } +array { + id: 0x729479be + number_of_elements: 2 + element_type_id: 0x78f4e574 +} array { id: 0x7349866a number_of_elements: 49 @@ -35962,6 +36152,11 @@ array { number_of_elements: 4 element_type_id: 0x0ab21434 } +array { + id: 0x8fb3c5ee + number_of_elements: 1 + element_type_id: 0xe9e88d93 +} array { id: 0x8fc9368a number_of_elements: 1 @@ -36402,6 +36597,11 @@ array { number_of_elements: 4 element_type_id: 0xe62ebf07 } +array { + id: 0xb637307e + number_of_elements: 4 + element_type_id: 0xe9e88d93 +} array { id: 0xb6bc1f4d number_of_elements: 17 @@ -37013,6 +37213,10 @@ array { id: 0xdf3f459c element_type_id: 0x6c1b2db0 } +array { + id: 0xdf70f6ef + element_type_id: 0x6d25e07f +} array { id: 0xdfed0371 element_type_id: 0x6f523604 @@ -37410,6 +37614,10 @@ array { number_of_elements: 44 element_type_id: 0x5d8155a5 } +array { + id: 0xfca4258b + element_type_id: 0xe276adef +} array { id: 0xfd17183f element_type_id: 0xe4ba5b3d @@ -38092,6 +38300,11 @@ member { type_id: 0x48ff9190 offset: 192 } +member { + id: 0x31fb35e2 + type_id: 0x48c28092 + offset: 128 +} member { id: 0x321ecf49 type_id: 0x47554d36 @@ -39759,6 +39972,12 @@ member { type_id: 0x1162409a offset: 16 } +member { + id: 0xf4a9b31e + name: "__data" + type_id: 0xd359db99 + offset: 128 +} member { id: 0xf4b20642 name: "__data" @@ -39943,6 +40162,12 @@ member { type_id: 0x3a8cf6d8 offset: 64 } +member { + id: 0x86165645 + name: "__pad2" + type_id: 0x5d8155a5 + offset: 120 +} member { id: 0x86715b21 name: "__pad2" @@ -40304,6 +40529,20 @@ member { type_id: 0x4585663f offset: 384 } +member { + id: 0x370a9ced + name: "__unused_1" + type_id: 0xe8034002 + offset: 448 + bitsize: 7 +} +member { + id: 0x8c5039ca + name: "__unused_2" + type_id: 0xe8034002 + offset: 465 + bitsize: 6 +} member { id: 0x206928e2 name: "__use" @@ -41583,6 +41822,11 @@ member { type_id: 0x6d7f5ff6 offset: 688 } +member { + id: 0x7980c676 + name: "acl_addr" + type_id: 0x6d25e07f +} member { id: 0xf17c8f4c name: "acl_cnt" @@ -41595,6 +41839,12 @@ member { type_id: 0x020f69fc offset: 128 } +member { + id: 0xa102d1d4 + name: "acl_ifindex" + type_id: 0x6720d32f + offset: 128 +} member { id: 0xdc1b3211 name: "acl_last_tx" @@ -41607,6 +41857,12 @@ member { type_id: 0x4585663f offset: 13152 } +member { + id: 0x23f246a8 + name: "acl_next" + type_id: 0x01ae5751 + offset: 192 +} member { id: 0xe08e16a4 name: "acl_num" @@ -42649,6 +42905,12 @@ member { type_id: 0xe02e14d6 offset: 256 } +member { + id: 0x24cfa4f0 + name: "addr" + type_id: 0xe276adef + offset: 32 +} member { id: 0x24d1edc6 name: "addr" @@ -42694,6 +42956,12 @@ member { type_id: 0x2d8abcdd offset: 256 } +member { + id: 0x09692cc4 + name: "addr2sockaddr" + type_id: 0x0f56bda3 + offset: 576 +} member { id: 0x29cfb070 name: "addr2str" @@ -43188,6 +43456,13 @@ member { type_id: 0x901eaf6a offset: 39112 } +member { + id: 0xec6d552f + name: "advanced" + type_id: 0x295c7202 + offset: 174 + bitsize: 1 +} member { id: 0x686a7031 name: "advertised" @@ -43253,6 +43528,12 @@ member { type_id: 0x6d7f5ff6 offset: 50368 } +member { + id: 0x1d3891d6 + name: "advmss" + type_id: 0x914dbfdc + offset: 13696 +} member { id: 0x1ffcf224 name: "ae" @@ -43638,6 +43919,11 @@ member { name: "all" type_id: 0x7584e7da } +member { + id: 0xee87ba00 + name: "all" + type_id: 0xe8034002 +} member { id: 0xeebc7141 name: "all" @@ -44936,6 +45222,12 @@ member { type_id: 0x92233392 offset: 4096 } +member { + id: 0x2d08191c + name: "android_kabi_reserved1" + type_id: 0x92233392 + offset: 8576 +} member { id: 0x2d08193b name: "android_kabi_reserved1" @@ -45062,6 +45354,12 @@ member { type_id: 0x92233392 offset: 1152 } +member { + id: 0x2d081c07 + name: "android_kabi_reserved1" + type_id: 0x92233392 + offset: 11392 +} member { id: 0x2d081c0d name: "android_kabi_reserved1" @@ -45206,6 +45504,12 @@ member { type_id: 0x92233392 offset: 1856 } +member { + id: 0x2d081e39 + name: "android_kabi_reserved1" + type_id: 0x92233392 + offset: 18880 +} member { id: 0x2d081eb7 name: "android_kabi_reserved1" @@ -45751,6 +46055,12 @@ member { type_id: 0x92233392 offset: 2176 } +member { + id: 0x63760c6b + name: "android_kabi_reserved2" + type_id: 0x92233392 + offset: 8640 +} member { id: 0x63760c9c name: "android_kabi_reserved2" @@ -47172,6 +47482,12 @@ member { name: "apoll_events" type_id: 0xb94e10c7 } +member { + id: 0x484997ff + name: "app_limited" + type_id: 0xc9082b19 + offset: 15328 +} member { id: 0x6e3edc08 name: "appearance" @@ -48113,6 +48429,12 @@ member { type_id: 0x399c459b offset: 512 } +member { + id: 0x168605e8 + name: "ato" + type_id: 0xe62ebf07 + offset: 32 +} member { id: 0x176dead7 name: "atomic" @@ -49233,6 +49555,20 @@ member { offset: 256 bitsize: 1 } +member { + id: 0x6854e737 + name: "autoflowlabel" + type_id: 0xe8034002 + offset: 571 + bitsize: 1 +} +member { + id: 0xe803bc2e + name: "autoflowlabel_set" + type_id: 0xe8034002 + offset: 572 + bitsize: 1 +} member { id: 0xf1e86eba name: "automatic_shrinking" @@ -50572,6 +50908,11 @@ member { type_id: 0xe9735eb8 offset: 832 } +member { + id: 0x853440fd + name: "base" + type_id: 0xeeae7608 +} member { id: 0x853678f4 name: "base" @@ -51878,6 +52219,13 @@ member { type_id: 0x2c16e301 offset: 1088 } +member { + id: 0x87d4aa19 + name: "bind_address_no_port" + type_id: 0xb3e7bac9 + offset: 7112 + bitsize: 1 +} member { id: 0x23af1384 name: "bind_bucket_cachep" @@ -52214,6 +52562,11 @@ member { type_id: 0x7a6db264 offset: 216 } +member { + id: 0xd74e0402 + name: "bits" + type_id: 0x1b949b56 +} member { id: 0xd78825be name: "bits" @@ -53239,6 +53592,13 @@ member { type_id: 0x6d7f5ff6 offset: 12928 } +member { + id: 0xf5c438ad + name: "bpf_chg_cc_inprogress" + type_id: 0x295c7202 + offset: 18216 + bitsize: 1 +} member { id: 0xa9117c13 name: "bpf_cookie" @@ -53303,6 +53663,12 @@ member { type_id: 0xc9082b19 offset: 33920 } +member { + id: 0xa6861f94 + name: "bpf_sock_ops_cb_flags" + type_id: 0x295c7202 + offset: 18208 +} member { id: 0xede394b6 name: "bpf_storage" @@ -55553,6 +55919,12 @@ member { type_id: 0xf435685e offset: 320 } +member { + id: 0x3ed6f46c + name: "bytes_acked" + type_id: 0x92233392 + offset: 12736 +} member { id: 0x4d36c98b name: "bytes_ext_ops" @@ -55588,12 +55960,30 @@ member { type_id: 0xf435685e offset: 128 } +member { + id: 0xda945faa + name: "bytes_received" + type_id: 0x92233392 + offset: 12352 +} +member { + id: 0xd77f2af8 + name: "bytes_retrans" + type_id: 0x92233392 + offset: 17984 +} member { id: 0xf2787e23 name: "bytes_rx" type_id: 0x3a3eb2f9 offset: 512 } +member { + id: 0x840adfe0 + name: "bytes_sent" + type_id: 0x92233392 + offset: 12672 +} member { id: 0x8cf55cde name: "bytes_transferred" @@ -59234,6 +59624,25 @@ member { type_id: 0xe62ebf07 offset: 160 } +member { + id: 0x6c3076ab + name: "chrono_start" + type_id: 0xc9082b19 + offset: 13728 +} +member { + id: 0x4b0fd146 + name: "chrono_stat" + type_id: 0x2087fff2 + offset: 13760 +} +member { + id: 0x5514b7d1 + name: "chrono_type" + type_id: 0x295c7202 + offset: 13856 + bitsize: 2 +} member { id: 0x0c0ce1e9 name: "chunk_mask" @@ -59510,6 +59919,12 @@ member { type_id: 0x3104c07e offset: 1280 } +member { + id: 0x1dfe157e + name: "cipso" + type_id: 0x5d8155a5 + offset: 112 +} member { id: 0xcf891492 name: "cis" @@ -59759,6 +60174,12 @@ member { type_id: 0x6720d32f offset: 9088 } +member { + id: 0x200938c6 + name: "cleanup" + type_id: 0x0a8304ff + offset: 448 +} member { id: 0x200c257e name: "cleanup" @@ -60533,6 +60954,12 @@ member { name: "clockid" type_id: 0x4478ba6b } +member { + id: 0xf6c2da43 + name: "clone" + type_id: 0x0dc966bc + offset: 448 +} member { id: 0xcf60e1c3 name: "cloned" @@ -61230,6 +61657,12 @@ member { name: "cmp" type_id: 0x36fc2198 } +member { + id: 0x9e4ebac2 + name: "cmsg_flags" + type_id: 0xe8034002 + offset: 6960 +} member { id: 0x141e1b04 name: "cnf" @@ -62676,6 +63109,24 @@ member { type_id: 0x6d7f5ff6 offset: 672 } +member { + id: 0x4aed4e02 + name: "compressed_ack" + type_id: 0x295c7202 + offset: 13712 +} +member { + id: 0x036494f3 + name: "compressed_ack_rcv_nxt" + type_id: 0xc9082b19 + offset: 12992 +} +member { + id: 0xe98a4697 + name: "compressed_ack_timer" + type_id: 0xcd7704bf + offset: 16320 +} member { id: 0xfb567eb3 name: "compressed_header_size" @@ -63153,6 +63604,12 @@ member { type_id: 0x7bfd6d29 offset: 3648 } +member { + id: 0x495c1c6d + name: "conn_request" + type_id: 0x2c210d23 + offset: 256 +} member { id: 0x220e0197 name: "conn_state" @@ -63912,6 +64369,12 @@ member { type_id: 0x1e4bcad9 offset: 320 } +member { + id: 0x3c401b61 + name: "convert_csum" + type_id: 0xb3e7bac9 + offset: 7128 +} member { id: 0x3f2fb089 name: "convert_ctx_access" @@ -63941,6 +64404,11 @@ member { type_id: 0x0fec2355 offset: 64 } +member { + id: 0x5846e373 + name: "cookie" + type_id: 0x0cb73b55 +} member { id: 0x5852e92d name: "cookie" @@ -64057,6 +64525,18 @@ member { type_id: 0xedf277ba offset: 192 } +member { + id: 0x5414dc08 + name: "copied" + type_id: 0x6720d32f + offset: 320 +} +member { + id: 0x1ec2d3d6 + name: "copied_seq" + type_id: 0xc9082b19 + offset: 12512 +} member { id: 0x58138131 name: "copied_timestamp" @@ -64284,12 +64764,24 @@ member { type_id: 0xedf277ba offset: 1472 } +member { + id: 0xa608f9d7 + name: "cork" + type_id: 0x60790295 + offset: 7296 +} member { id: 0xa64d7141 name: "cork" type_id: 0x25fbbcbe offset: 256 } +member { + id: 0xa677c461 + name: "cork" + type_id: 0x1f434195 + offset: 1088 +} member { id: 0x7105015a name: "cork_bytes" @@ -65650,6 +66142,12 @@ member { type_id: 0x70e54b38 offset: 64 } +member { + id: 0x0f89928e + name: "create" + type_id: 0x0aee7ba0 + offset: 320 +} member { id: 0x0fae5380 name: "create" @@ -66643,6 +67141,12 @@ member { type_id: 0x0d68946d offset: 64 } +member { + id: 0xec94e17c + name: "ctx" + type_id: 0x0d12f45b + offset: 256 +} member { id: 0xeca4247d name: "ctx" @@ -67405,6 +67909,12 @@ member { type_id: 0x0f9357d8 offset: 192 } +member { + id: 0xd7cfbd25 + name: "cwnd_usage_seq" + type_id: 0xc9082b19 + offset: 14592 +} member { id: 0x1d7a6d67 name: "cwr" @@ -67874,6 +68384,12 @@ member { type_id: 0xe276adef offset: 32 } +member { + id: 0xca75de5a + name: "daddr_cache" + type_id: 0x3b461cc8 + offset: 320 +} member { id: 0xf25f00aa name: "dai_elems" @@ -68560,6 +69076,12 @@ member { type_id: 0x29de0844 offset: 64 } +member { + id: 0xffbef712 + name: "data" + type_id: 0x2cdbb77a + offset: 192 +} member { id: 0xffc40278 name: "data" @@ -68908,6 +69430,18 @@ member { type_id: 0x4585663f offset: 800 } +member { + id: 0x7ecbe350 + name: "data_segs_in" + type_id: 0xc9082b19 + offset: 12448 +} +member { + id: 0xdb2f5379 + name: "data_segs_out" + type_id: 0xc9082b19 + offset: 12640 +} member { id: 0x9e8ccb80 name: "data_shift" @@ -69590,6 +70124,12 @@ member { type_id: 0x74d29cf1 offset: 288 } +member { + id: 0xbc5ebfd1 + name: "deathrow" + type_id: 0x5e8dc7f4 + offset: 448 +} member { id: 0x6b1d26fa name: "debounce_period_us" @@ -70301,6 +70841,13 @@ member { type_id: 0x6d7f5ff6 offset: 4096 } +member { + id: 0x35577470 + name: "defer_connect" + type_id: 0xb3e7bac9 + offset: 7114 + bitsize: 1 +} member { id: 0x81689872 name: "defer_count" @@ -70745,12 +71292,24 @@ member { type_id: 0x6d7f5ff6 offset: 480 } +member { + id: 0x986caa86 + name: "delivered" + type_id: 0xc9082b19 + offset: 15232 +} member { id: 0x9871b666 name: "delivered" type_id: 0xd41e888f offset: 128 } +member { + id: 0xe06780dd + name: "delivered_ce" + type_id: 0xc9082b19 + offset: 15264 +} member { id: 0xe07a91f0 name: "delivered_ce" @@ -70763,6 +71322,12 @@ member { type_id: 0xd3c80119 offset: 3008 } +member { + id: 0xc6e15ebb + name: "delivered_mstamp" + type_id: 0x92233392 + offset: 15424 +} member { id: 0xb362d7b2 name: "dellink" @@ -76192,6 +76757,13 @@ member { type_id: 0x03913382 offset: 6336 } +member { + id: 0x4b741bd1 + name: "dontfrag" + type_id: 0xe8034002 + offset: 570 + bitsize: 1 +} member { id: 0xbd60b76c name: "doorbell" @@ -77365,6 +77937,26 @@ member { name: "dsa" type_id: 0x0a5c3627 } +member { + id: 0x09aae020 + name: "dsack" + type_id: 0x914dbfdc + offset: 130 + bitsize: 1 +} +member { + id: 0x7efdf5c5 + name: "dsack_dups" + type_id: 0xc9082b19 + offset: 12800 +} +member { + id: 0x28f28f83 + name: "dsack_seen" + type_id: 0x295c7202 + offset: 173 + bitsize: 1 +} member { id: 0x3c3a7c4f name: "dsc" @@ -77567,6 +78159,12 @@ member { type_id: 0x1259e377 offset: 64 } +member { + id: 0xbbe3f808 + name: "dst" + type_id: 0x1259e377 + offset: 192 +} member { id: 0xbbf78102 name: "dst" @@ -77579,12 +78177,24 @@ member { type_id: 0xe8034002 offset: 48 } +member { + id: 0xebf05092 + name: "dst0opt" + type_id: 0x33642722 + offset: 192 +} member { id: 0x5493d7f6 name: "dst1" type_id: 0xe8034002 offset: 80 } +member { + id: 0xfb3c6654 + name: "dst1opt" + type_id: 0x33642722 + offset: 320 +} member { id: 0x59edf322 name: "dst_addr" @@ -77621,6 +78231,12 @@ member { type_id: 0x78f4e574 offset: 64 } +member { + id: 0x3e931a99 + name: "dst_cookie" + type_id: 0xe62ebf07 + offset: 640 +} member { id: 0x24a0666b name: "dst_csets" @@ -77763,6 +78379,13 @@ member { type_id: 0xe8034002 offset: 144 } +member { + id: 0xcc7711fa + name: "dstopts" + type_id: 0xe8034002 + offset: 8 + bitsize: 1 +} member { id: 0x58e808e0 name: "dt" @@ -78081,6 +78704,13 @@ member { type_id: 0x2f2aa245 offset: 64 } +member { + id: 0x330d862e + name: "dup_ack_counter" + type_id: 0x295c7202 + offset: 13720 + bitsize: 2 +} member { id: 0x74874671 name: "dup_xol_addr" @@ -78140,6 +78770,12 @@ member { type_id: 0x2a0a605f offset: 832 } +member { + id: 0xac4c8092 + name: "duplicate_sack" + type_id: 0x8fb3c5ee + offset: 17152 +} member { id: 0xe631c0e0 name: "duplicated" @@ -78664,6 +79300,12 @@ member { type_id: 0x4585663f offset: 1568 } +member { + id: 0x3dfa7679 + name: "ecn_flags" + type_id: 0x295c7202 + offset: 14640 +} member { id: 0xd4971fcb name: "ed" @@ -79751,6 +80393,13 @@ member { name: "enabled" type_id: 0x74d29cf1 } +member { + id: 0x7a56a000 + name: "enabled" + type_id: 0xc9082b19 + offset: 95 + bitsize: 1 +} member { id: 0x7a56a64a name: "enabled" @@ -80161,6 +80810,18 @@ member { type_id: 0xd0ccb483 offset: 64 } +member { + id: 0xd5a56009 + name: "end_seq" + type_id: 0xc9082b19 + offset: 32 +} +member { + id: 0xd5a564b1 + name: "end_seq" + type_id: 0xc9082b19 + offset: 96 +} member { id: 0x357800a2 name: "end_station" @@ -82543,6 +83204,12 @@ member { type_id: 0x3fcbf304 offset: 1024 } +member { + id: 0x9633fcdf + name: "exp" + type_id: 0x6d7f5ff6 + offset: 136 +} member { id: 0x964efc36 name: "exp" @@ -82722,6 +83389,12 @@ member { type_id: 0x33756485 offset: 256 } +member { + id: 0x91036748 + name: "expires" + type_id: 0x33756485 + offset: 704 +} member { id: 0x91036cb7 name: "expires" @@ -83588,6 +84261,11 @@ member { type_id: 0x6720d32f offset: 672 } +member { + id: 0xe37696ac + name: "faddr" + type_id: 0xe276adef +} member { id: 0x0bad70a4 name: "fade_length" @@ -83704,6 +84382,12 @@ member { name: "family" type_id: 0xc93e017b } +member { + id: 0x9e9c9ad1 + name: "family" + type_id: 0xc93e017b + offset: 112 +} member { id: 0x9ebdabf6 name: "family" @@ -83750,6 +84434,18 @@ member { name: "fast_io" type_id: 0x6d7f5ff6 } +member { + id: 0xcee88548 + name: "fast_ipv6_only" + type_id: 0x6d7f5ff6 + offset: 336 +} +member { + id: 0xfd693ce8 + name: "fast_rcv_saddr" + type_id: 0xe276adef + offset: 288 +} member { id: 0xd0fd5f6f name: "fast_reconnect" @@ -83762,6 +84458,12 @@ member { type_id: 0xc93e017b offset: 704 } +member { + id: 0x247ebe29 + name: "fast_sk_family" + type_id: 0xc93e017b + offset: 320 +} member { id: 0xe96aa3ec name: "fast_start_pfn" @@ -83786,6 +84488,12 @@ member { type_id: 0x6d7f5ff6 offset: 4992 } +member { + id: 0x6398a0a9 + name: "fast_v6_rcv_saddr" + type_id: 0x6d25e07f + offset: 160 +} member { id: 0xbe23cb67 name: "fastchannel_db_ring" @@ -83798,6 +84506,63 @@ member { type_id: 0x0d53cd40 offset: 192 } +member { + id: 0xa42f5b5e + name: "fastopen_client_fail" + type_id: 0x295c7202 + offset: 13862 + bitsize: 2 +} +member { + id: 0x5d29f448 + name: "fastopen_connect" + type_id: 0x295c7202 + offset: 13859 + bitsize: 1 +} +member { + id: 0x72c8b468 + name: "fastopen_no_cookie" + type_id: 0x295c7202 + offset: 13860 + bitsize: 1 +} +member { + id: 0x349100e3 + name: "fastopen_req" + type_id: 0x04a9576c + offset: 18688 +} +member { + id: 0x7a4a99f1 + name: "fastopen_rsk" + type_id: 0x27847a9a + offset: 18752 +} +member { + id: 0x5aaa76bb + name: "fastopenq" + type_id: 0xce180920 + offset: 320 +} +member { + id: 0xa6e81f22 + name: "fastreuse" + type_id: 0x5ab350f8 + offset: 112 +} +member { + id: 0x244da0b8 + name: "fastreuseport" + type_id: 0x5ab350f8 + offset: 120 +} +member { + id: 0x73b4d4d5 + name: "fastuid" + type_id: 0xe90b32b7 + offset: 128 +} member { id: 0xa3521312 name: "fasync" @@ -86627,6 +87392,12 @@ member { type_id: 0x3846864c offset: 6720 } +member { + id: 0xc8ee40be + name: "first_tx_mstamp" + type_id: 0x92233392 + offset: 15360 +} member { id: 0x7fb051f9 name: "firstuse" @@ -86713,6 +87484,18 @@ member { type_id: 0x0ece1cac offset: 832 } +member { + id: 0x75129e6a + name: "fl" + type_id: 0x0bbb7f1b + offset: 64 +} +member { + id: 0x75577b27 + name: "fl" + type_id: 0x4e5da42b + offset: 448 +} member { id: 0x0476553f name: "fl_blocked_member" @@ -86795,6 +87578,12 @@ member { type_id: 0x3d3ea9eb offset: 1408 } +member { + id: 0x1f4e61c9 + name: "fl_net" + type_id: 0x0ca27481 + offset: 768 +} member { id: 0xda5f8429 name: "fl_ops" @@ -88413,6 +89202,12 @@ member { type_id: 0xb3e7bac9 offset: 416 } +member { + id: 0x2ac20e0b + name: "flow_label" + type_id: 0xe276adef + offset: 384 +} member { id: 0xe809ed63 name: "flow_limit" @@ -89582,6 +90377,12 @@ member { type_id: 0x06835e9c offset: 1792 } +member { + id: 0x3c1c76af + name: "frag_size" + type_id: 0xe62ebf07 + offset: 416 +} member { id: 0x3c33517f name: "frag_size" @@ -89676,6 +90477,12 @@ member { type_id: 0x34544a3f offset: 192 } +member { + id: 0xe25ba6f2 + name: "fragsize" + type_id: 0x4585663f + offset: 128 +} member { id: 0x16107977 name: "frame" @@ -90394,6 +91201,13 @@ member { type_id: 0x038cb58c offset: 64 } +member { + id: 0x0ccb8adb + name: "freebind" + type_id: 0xb3e7bac9 + offset: 7106 + bitsize: 1 +} member { id: 0xedafcd8e name: "freehigh" @@ -90835,6 +91649,13 @@ member { type_id: 0x0fd41724 offset: 384 } +member { + id: 0xdaf769b4 + name: "frto" + type_id: 0x295c7202 + offset: 13871 + bitsize: 1 +} member { id: 0x9442b15a name: "fs" @@ -92915,12 +93736,24 @@ member { type_id: 0x2db9b3cf offset: 128 } +member { + id: 0x1d9b9b9d + name: "get_info" + type_id: 0x2e735a06 + offset: 320 +} member { id: 0x1d9baad0 name: "get_info" type_id: 0x2e43a9f5 offset: 640 } +member { + id: 0xbaa156f0 + name: "get_info_size" + type_id: 0x2cd9dff0 + offset: 384 +} member { id: 0x8e932e8e name: "get_ino_and_lblk_bits" @@ -93937,6 +94770,12 @@ member { type_id: 0x2c451522 offset: 640 } +member { + id: 0xd9ad3ae7 + name: "getsockopt" + type_id: 0x2c451522 + offset: 512 +} member { id: 0xd9adeb4d name: "getsockopt" @@ -94922,6 +95761,18 @@ member { type_id: 0x2584a3b9 offset: 2240 } +member { + id: 0x085eba8a + name: "gso_segs" + type_id: 0x914dbfdc + offset: 12304 +} +member { + id: 0xeaa4b2c9 + name: "gso_size" + type_id: 0xe8034002 + offset: 304 +} member { id: 0xcf0db7be name: "gso_skb" @@ -96511,6 +97362,19 @@ member { type_id: 0x295c7202 offset: 480 } +member { + id: 0x3cd68edd + name: "hdrincl" + type_id: 0xb3e7bac9 + offset: 7107 + bitsize: 1 +} +member { + id: 0xd608b573 + name: "hdrlen" + type_id: 0xb3e7bac9 + offset: 8 +} member { id: 0xd6dc741c name: "hdrlen" @@ -97211,6 +98075,12 @@ member { type_id: 0x33756485 offset: 64 } +member { + id: 0x39a33a5e + name: "hi" + type_id: 0xe8034002 + offset: 16 +} member { id: 0xf5cc8294 name: "hibern8_exit_cnt" @@ -97343,6 +98213,12 @@ member { type_id: 0x5395485e offset: 3712 } +member { + id: 0x60f3e5b5 + name: "high_seq" + type_id: 0xc9082b19 + offset: 17856 +} member { id: 0x15795ada name: "high_speed" @@ -97379,6 +98255,12 @@ member { type_id: 0x3b04bc55 offset: 12992 } +member { + id: 0x6da3106a + name: "highest_sack" + type_id: 0x054f691a + offset: 17728 +} member { id: 0x2ca966e1 name: "highest_zoneidx" @@ -97698,12 +98580,38 @@ member { type_id: 0xe8a3834b offset: 1152 } +member { + id: 0x46cd5abc + name: "hop_limit" + type_id: 0x007e8ce4 + offset: 455 + bitsize: 9 +} member { id: 0x9f4d6a46 name: "hop_limit" type_id: 0x0faae5b1 offset: 32 } +member { + id: 0x9f6b950b + name: "hop_limit" + type_id: 0x295c7202 + offset: 64 +} +member { + id: 0x111085fe + name: "hopopt" + type_id: 0x33642722 + offset: 128 +} +member { + id: 0xede7771d + name: "hopopts" + type_id: 0xe8034002 + offset: 6 + bitsize: 1 +} member { id: 0xe90d2c02 name: "hops" @@ -99832,6 +100740,11 @@ member { offset: 3664 bitsize: 1 } +member { + id: 0xe50fbded + name: "ib_net" + type_id: 0xb335d16f +} member { id: 0xe8f0d6c1 name: "ib_window_map" @@ -100053,6 +100966,201 @@ member { type_id: 0x80c20070 offset: 192 } +member { + id: 0x2a778fa0 + name: "icsk_accept_queue" + type_id: 0xf05a506e + offset: 8704 +} +member { + id: 0xc36d8395 + name: "icsk_ack" + type_id: 0x260445a5 + offset: 11008 +} +member { + id: 0xeb6a5fa1 + name: "icsk_af_ops" + type_id: 0x384d1346 + offset: 10624 +} +member { + id: 0xb27d04f5 + name: "icsk_backoff" + type_id: 0xb3e7bac9 + offset: 10968 +} +member { + id: 0xf1a367c5 + name: "icsk_bind2_hash" + type_id: 0x0a67a4e3 + offset: 9408 +} +member { + id: 0xa53e6076 + name: "icsk_bind_hash" + type_id: 0x33086ec3 + offset: 9344 +} +member { + id: 0x8e756dfb + name: "icsk_ca_dst_locked" + type_id: 0xb3e7bac9 + offset: 10951 + bitsize: 1 +} +member { + id: 0x9b3f0555 + name: "icsk_ca_initialized" + type_id: 0xb3e7bac9 + offset: 10949 + bitsize: 1 +} +member { + id: 0x2b917283 + name: "icsk_ca_ops" + type_id: 0x30ba0612 + offset: 10560 +} +member { + id: 0xa86967ec + name: "icsk_ca_priv" + type_id: 0x62a5e001 + offset: 11456 +} +member { + id: 0x5c570222 + name: "icsk_ca_setsockopt" + type_id: 0xb3e7bac9 + offset: 10950 + bitsize: 1 +} +member { + id: 0x8adfff2f + name: "icsk_ca_state" + type_id: 0xb3e7bac9 + offset: 10944 + bitsize: 5 +} +member { + id: 0x411180b7 + name: "icsk_clean_acked" + type_id: 0x0fab66ce + offset: 10816 +} +member { + id: 0xcfd1f1ed + name: "icsk_delack_max" + type_id: 0xe62ebf07 + offset: 10496 +} +member { + id: 0xbf3d6991 + name: "icsk_delack_timer" + type_id: 0xd298e888 + offset: 9984 +} +member { + id: 0xd7cc2503 + name: "icsk_ext_hdr_len" + type_id: 0xe8034002 + offset: 10992 +} +member { + id: 0x9bd0d4a0 + name: "icsk_inet" + type_id: 0xd2a4529d +} +member { + id: 0xc75c6970 + name: "icsk_mtup" + type_id: 0x249bcdf3 + offset: 11200 +} +member { + id: 0xfd5174f1 + name: "icsk_pending" + type_id: 0xb3e7bac9 + offset: 10960 +} +member { + id: 0x9796567d + name: "icsk_pmtu_cookie" + type_id: 0xe62ebf07 + offset: 10528 +} +member { + id: 0x2d061585 + name: "icsk_probes_out" + type_id: 0xb3e7bac9 + offset: 10984 +} +member { + id: 0xf4f5c9e8 + name: "icsk_probes_tstamp" + type_id: 0xc9082b19 + offset: 11328 +} +member { + id: 0x3adf52b3 + name: "icsk_retransmit_timer" + type_id: 0xd298e888 + offset: 9536 +} +member { + id: 0x6bd77d33 + name: "icsk_retransmits" + type_id: 0xb3e7bac9 + offset: 10952 +} +member { + id: 0xaa84a388 + name: "icsk_rto" + type_id: 0xe62ebf07 + offset: 10432 +} +member { + id: 0x7c9184fd + name: "icsk_rto_min" + type_id: 0xe62ebf07 + offset: 10464 +} +member { + id: 0x2446b1e6 + name: "icsk_syn_retries" + type_id: 0xb3e7bac9 + offset: 10976 +} +member { + id: 0x5392fe03 + name: "icsk_sync_mss" + type_id: 0x383bad14 + offset: 10880 +} +member { + id: 0xe54062f3 + name: "icsk_timeout" + type_id: 0x33756485 + offset: 9472 +} +member { + id: 0x69ab5d87 + name: "icsk_ulp_data" + type_id: 0x18bd6530 + offset: 10752 +} +member { + id: 0x50d22351 + name: "icsk_ulp_ops" + type_id: 0x378dbf55 + offset: 10688 +} +member { + id: 0x973a1269 + name: "icsk_user_timeout" + type_id: 0xc9082b19 + offset: 11360 +} member { id: 0xa75f3275 name: "icv_len" @@ -102871,6 +103979,35 @@ member { type_id: 0x1d33485a offset: 4992 } +member { + id: 0x07676cab + name: "inet_conn" + type_id: 0x82dbb487 +} +member { + id: 0x78639f81 + name: "inet_id" + type_id: 0xe8034002 + offset: 7056 +} +member { + id: 0x38f59a08 + name: "inet_opt" + type_id: 0x38090ad2 + offset: 6976 +} +member { + id: 0x8be10b49 + name: "inet_saddr" + type_id: 0xe276adef + offset: 6912 +} +member { + id: 0x674b03be + name: "inet_sport" + type_id: 0x7584e7da + offset: 7040 +} member { id: 0x3e2089f0 name: "inexact_bins" @@ -103336,6 +104473,12 @@ member { type_id: 0x2c24424a offset: 384 } +member { + id: 0x1ac6e8a5 + name: "init" + type_id: 0x2c24424a + offset: 128 +} member { id: 0x1ac72504 name: "init" @@ -106416,6 +107559,17 @@ member { type_id: 0x23317b59 offset: 256 } +member { + id: 0xce9a5c98 + name: "ipi6_addr" + type_id: 0x6d25e07f +} +member { + id: 0x8e98680e + name: "ipi6_ifindex" + type_id: 0x6720d32f + offset: 128 +} member { id: 0xe69d21c0 name: "ipi_list" @@ -106524,6 +107678,12 @@ member { name: "ipv6" type_id: 0x09547003 } +member { + id: 0xe2141007 + name: "ipv6_ac_list" + type_id: 0x01ae5751 + offset: 768 +} member { id: 0x12832f3f name: "ipv6_dev_find" @@ -106536,12 +107696,24 @@ member { type_id: 0x221aed72 offset: 128 } +member { + id: 0x1e958d31 + name: "ipv6_fl_list" + type_id: 0x3f985801 + offset: 832 +} member { id: 0xcf78c1e2 name: "ipv6_fragment" type_id: 0x2d271bc7 offset: 1344 } +member { + id: 0xfce07b0f + name: "ipv6_mc_list" + type_id: 0x12e0cbae + offset: 704 +} member { id: 0xab39140b name: "ipv6_nh" @@ -107410,6 +108582,13 @@ member { offset: 9248 bitsize: 1 } +member { + id: 0xb9615f80 + name: "is_changed" + type_id: 0x5d8155a5 + offset: 98 + bitsize: 1 +} member { id: 0xcf3f1b2e name: "is_child_subreaper" @@ -107441,6 +108620,13 @@ member { type_id: 0x6d7f5ff6 offset: 128 } +member { + id: 0xb9645bc8 + name: "is_cwnd_limited" + type_id: 0x295c7202 + offset: 13887 + bitsize: 1 +} member { id: 0x454d50bd name: "is_dead" @@ -107584,6 +108770,13 @@ member { type_id: 0x6d7f5ff6 offset: 64 } +member { + id: 0x1facb5a4 + name: "is_icsk" + type_id: 0xb3e7bac9 + offset: 7105 + bitsize: 1 +} member { id: 0xbbefba04 name: "is_initialized" @@ -107881,6 +109074,13 @@ member { type_id: 0x6d7f5ff6 offset: 968 } +member { + id: 0xb9f127f6 + name: "is_sack_reneg" + type_id: 0x295c7202 + offset: 13861 + bitsize: 1 +} member { id: 0x7855a324 name: "is_second_field" @@ -107928,6 +109128,13 @@ member { type_id: 0x295c7202 offset: 464 } +member { + id: 0x1e6a6587 + name: "is_strictroute" + type_id: 0x5d8155a5 + offset: 96 + bitsize: 1 +} member { id: 0x3fe7857f name: "is_string" @@ -108830,6 +110037,24 @@ member { type_id: 0x0f054e36 offset: 704 } +member { + id: 0xb4c1a226 + name: "keepalive_intvl" + type_id: 0x4585663f + offset: 18144 +} +member { + id: 0xeb2f3b5f + name: "keepalive_probes" + type_id: 0x295c7202 + offset: 14648 +} +member { + id: 0x9e7c1e77 + name: "keepalive_time" + type_id: 0x4585663f + offset: 18112 +} member { id: 0x3f65622e name: "keepout" @@ -110052,6 +111277,12 @@ member { type_id: 0x295c7202 offset: 328 } +member { + id: 0xe6f3661d + name: "l3mdev" + type_id: 0x6720d32f + offset: 64 +} member { id: 0xa2b82643 name: "l3num" @@ -110118,6 +111349,12 @@ member { type_id: 0x3e10b518 offset: 8768 } +member { + id: 0x05cb6176 + name: "label" + type_id: 0xe276adef + offset: 64 +} member { id: 0x05cb61ce name: "label" @@ -110470,6 +111707,12 @@ member { type_id: 0x92233392 offset: 2624 } +member { + id: 0x8762b004 + name: "last_delivered" + type_id: 0xc9082b19 + offset: 128 +} member { id: 0x0c7f9894 name: "last_delta" @@ -110725,6 +111968,12 @@ member { type_id: 0x33756485 offset: 128 } +member { + id: 0x41099e1a + name: "last_oow_ack_time" + type_id: 0xc9082b19 + offset: 12960 +} member { id: 0x7c2533b6 name: "last_overrun" @@ -110875,6 +112124,12 @@ member { type_id: 0x3ef55b88 offset: 64 } +member { + id: 0x4c233807 + name: "last_seg_size" + type_id: 0xe8034002 + offset: 160 +} member { id: 0xadd33b5a name: "last_seq" @@ -111071,6 +112326,12 @@ member { type_id: 0xedf277ba offset: 64 } +member { + id: 0xccd48510 + name: "lastuse" + type_id: 0x33756485 + offset: 640 +} member { id: 0xccd48a1b name: "lastuse" @@ -111899,6 +113160,12 @@ member { type_id: 0x0baa70a7 offset: 192 } +member { + id: 0xb82c6bed + name: "len" + type_id: 0x29b77961 + offset: 128 +} member { id: 0xb82c8b76 name: "len" @@ -112204,6 +113471,12 @@ member { type_id: 0x6720d32f offset: 64 } +member { + id: 0xb50a4f6b + name: "length" + type_id: 0x6720d32f + offset: 160 +} member { id: 0xb518ea7e name: "length" @@ -112829,6 +114102,18 @@ member { type_id: 0xc9082b19 offset: 8192 } +member { + id: 0xe93fdbc8 + name: "linger" + type_id: 0x33756485 + offset: 320 +} +member { + id: 0x0ab38392 + name: "linger2" + type_id: 0x6720d32f + offset: 18176 +} member { id: 0x55610d26 name: "link" @@ -113940,6 +115225,11 @@ member { offset: 34 bitsize: 1 } +member { + id: 0x9f6503ee + name: "lo" + type_id: 0xe8034002 +} member { id: 0x9fbe75ca name: "lo" @@ -114145,6 +115435,12 @@ member { type_id: 0x2d2736e0 offset: 768 } +member { + id: 0x5b66efeb + name: "local_port_range" + type_id: 0x3fbb6cee + offset: 8512 +} member { id: 0x11369f07 name: "local_sdu_itime" @@ -115530,6 +116826,18 @@ member { type_id: 0x299c4193 offset: 576 } +member { + id: 0x14a57188 + name: "lost" + type_id: 0xc9082b19 + offset: 15296 +} +member { + id: 0x36a89416 + name: "lost_cnt_hint" + type_id: 0x6720d32f + offset: 17792 +} member { id: 0xc431d6f2 name: "lost_events" @@ -115558,12 +116866,24 @@ member { name: "lost_msgs" type_id: 0x6f3d464c } +member { + id: 0x2d8724a1 + name: "lost_out" + type_id: 0xc9082b19 + offset: 15680 +} member { id: 0xa0d3e60e name: "lost_samples" type_id: 0x1f4573ef offset: 7808 } +member { + id: 0x86cd98a0 + name: "lost_skb_hint" + type_id: 0x054f691a + offset: 16896 +} member { id: 0xe4294f6c name: "low" @@ -115836,6 +117156,12 @@ member { type_id: 0x1dd474ea offset: 832 } +member { + id: 0x9d3fc94e + name: "lrcvtime" + type_id: 0xe62ebf07 + offset: 128 +} member { id: 0x5434db0f name: "lru" @@ -115953,6 +117279,12 @@ member { type_id: 0x4585663f offset: 1312 } +member { + id: 0xcaaa728e + name: "lsndtime" + type_id: 0xc9082b19 + offset: 12928 +} member { id: 0xabed406f name: "lsr_save_mask" @@ -117538,6 +118870,12 @@ member { name: "mark" type_id: 0xc9082b19 } +member { + id: 0x8196ad18 + name: "mark" + type_id: 0xc9082b19 + offset: 384 +} member { id: 0x81b98a1e name: "mark" @@ -119350,6 +120688,12 @@ member { type_id: 0x4585663f offset: 96 } +member { + id: 0x1d340096 + name: "max_packets_out" + type_id: 0xc9082b19 + offset: 14560 +} member { id: 0xc09a441d name: "max_page" @@ -119468,6 +120812,12 @@ member { type_id: 0x4585663f offset: 224 } +member { + id: 0x6bf829f7 + name: "max_qlen" + type_id: 0x6720d32f + offset: 192 +} member { id: 0xfbb3d064 name: "max_queue" @@ -120193,6 +121543,12 @@ member { type_id: 0xc9082b19 offset: 2048 } +member { + id: 0xb0b5b654 + name: "max_window" + type_id: 0xc9082b19 + offset: 13376 +} member { id: 0x02a4136b name: "max_workers" @@ -120565,6 +121921,26 @@ member { type_id: 0x4f5972f9 offset: 5056 } +member { + id: 0x5a22c9ad + name: "mc_addr" + type_id: 0xe276adef + offset: 7200 +} +member { + id: 0x1b81e20c + name: "mc_all" + type_id: 0xe8034002 + offset: 573 + bitsize: 1 +} +member { + id: 0x1bda0ea3 + name: "mc_all" + type_id: 0xb3e7bac9 + offset: 7110 + bitsize: 1 +} member { id: 0xedd041f2 name: "mc_autojoin_sk" @@ -120631,12 +122007,24 @@ member { type_id: 0xf1159c31 offset: 1728 } +member { + id: 0x02fcf451 + name: "mc_index" + type_id: 0x6720d32f + offset: 7168 +} member { id: 0x9c4c6ca6 name: "mc_list" type_id: 0x2d5e3e76 offset: 192 } +member { + id: 0x9c6a8ab2 + name: "mc_list" + type_id: 0x0bbfad26 + offset: 7232 +} member { id: 0x9c6ad1bd name: "mc_list" @@ -120649,6 +122037,20 @@ member { type_id: 0xa7c362b0 offset: 6528 } +member { + id: 0xa582fe9b + name: "mc_loop" + type_id: 0xe8034002 + offset: 464 + bitsize: 1 +} +member { + id: 0xa5d9163f + name: "mc_loop" + type_id: 0xb3e7bac9 + offset: 7108 + bitsize: 1 +} member { id: 0x99f606f1 name: "mc_maxdelay" @@ -120733,6 +122135,12 @@ member { type_id: 0xf313e71a offset: 352 } +member { + id: 0x7008fda6 + name: "mc_ttl" + type_id: 0xb3e7bac9 + offset: 7088 +} member { id: 0xd4582df4 name: "mc_v1_seen" @@ -120810,6 +122218,19 @@ member { type_id: 0xf1159c31 offset: 576 } +member { + id: 0x34d8f2b6 + name: "mcast_hops" + type_id: 0x007e8ce4 + offset: 471 + bitsize: 9 +} +member { + id: 0xe14bd1fe + name: "mcast_oif" + type_id: 0x6720d32f + offset: 512 +} member { id: 0x2cf10ddb name: "mcgrp_offset" @@ -120946,6 +122367,18 @@ member { type_id: 0x16239d0c offset: 64 } +member { + id: 0xf098d631 + name: "mdev_max_us" + type_id: 0xc9082b19 + offset: 14208 +} +member { + id: 0x00d04522 + name: "mdev_us" + type_id: 0xc9082b19 + offset: 14176 +} member { id: 0xc96faa23 name: "mdio" @@ -122421,6 +123854,12 @@ member { type_id: 0x295c7202 offset: 16 } +member { + id: 0x78ae54cb + name: "min_hopcount" + type_id: 0xb3e7bac9 + offset: 576 +} member { id: 0x402764fb name: "min_hw_heartbeat_ms" @@ -122605,6 +124044,12 @@ member { type_id: 0x1a3a7059 offset: 384 } +member { + id: 0xa28c3827 + name: "min_ttl" + type_id: 0xb3e7bac9 + offset: 7080 +} member { id: 0x47fc27ee name: "min_tx_rate" @@ -125501,6 +126946,23 @@ member { type_id: 0x914dbfdc offset: 1152 } +member { + id: 0x8571ffda + name: "mss_cache" + type_id: 0xc9082b19 + offset: 13408 +} +member { + id: 0x5ddbff68 + name: "mss_clamp" + type_id: 0x914dbfdc + offset: 176 +} +member { + id: 0xf3d1e1b3 + name: "mstamp" + type_id: 0x92233392 +} member { id: 0xeb62513d name: "mt" @@ -125584,6 +127046,24 @@ member { type_id: 0x0faae5b1 offset: 64 } +member { + id: 0x3641013e + name: "mtu_info" + type_id: 0xc9082b19 + offset: 18624 +} +member { + id: 0x541b592a + name: "mtu_probe" + type_id: 0x0904167f + offset: 18560 +} +member { + id: 0x2e3a46f9 + name: "mtu_reduced" + type_id: 0x0f626ee5 + offset: 640 +} member { id: 0x3a75162c name: "mtx" @@ -125626,6 +127106,12 @@ member { type_id: 0x6720d32f offset: 64 } +member { + id: 0xe7efca93 + name: "multi" + type_id: 0xf86b845e + offset: 64 +} member { id: 0xfcd3aab9 name: "multi_block" @@ -126535,6 +128021,12 @@ member { type_id: 0x42201dce offset: 128 } +member { + id: 0x0d994758 + name: "name" + type_id: 0x42201dce + offset: 512 +} member { id: 0x0d9948a1 name: "name" @@ -128450,6 +129942,18 @@ member { type_id: 0x92233392 offset: 30720 } +member { + id: 0x171ea46a + name: "net_frag_header_len" + type_id: 0x914dbfdc + offset: 400 +} +member { + id: 0x729f250d + name: "net_header_len" + type_id: 0x914dbfdc + offset: 384 +} member { id: 0x61d0c33e name: "net_id" @@ -128849,6 +130353,11 @@ member { name: "next" type_id: 0x3846864c } +member { + id: 0x11cc9887 + name: "next" + type_id: 0x3f985801 +} member { id: 0x11ccea6a name: "next" @@ -128950,6 +130459,12 @@ member { name: "next" type_id: 0x2d5e3e76 } +member { + id: 0x11e1e8dd + name: "next" + type_id: 0x12e0cbae + offset: 192 +} member { id: 0x11e57880 name: "next" @@ -129092,6 +130607,11 @@ member { type_id: 0x0b561305 offset: 64 } +member { + id: 0x11f8bba0 + name: "next" + type_id: 0x0bbb7f1b +} member { id: 0x11f8e110 name: "next" @@ -129325,6 +130845,11 @@ member { name: "next_rcu" type_id: 0x2d5e3e76 } +member { + id: 0x11613e69 + name: "next_rcu" + type_id: 0x0bbfad26 +} member { id: 0x4074d0ef name: "next_request" @@ -129425,6 +130950,11 @@ member { name: "nextevt" type_id: 0x92233392 } +member { + id: 0xe85e1d32 + name: "nexthdr" + type_id: 0xb3e7bac9 +} member { id: 0xe8c4a21b name: "nexthdr" @@ -129443,6 +130973,12 @@ member { type_id: 0x81cadb9e offset: 4608 } +member { + id: 0xe2d3a144 + name: "nexthop" + type_id: 0xe276adef + offset: 32 +} member { id: 0xd02f52bb name: "nf" @@ -130398,6 +131934,18 @@ member { name: "node" type_id: 0x6c32e522 } +member { + id: 0x0f3ca80d + name: "node" + type_id: 0x49a73111 + offset: 384 +} +member { + id: 0x0f3ca934 + name: "node" + type_id: 0x49a73111 + offset: 256 +} member { id: 0x0f3ca99f name: "node" @@ -130695,6 +132243,13 @@ member { name: "node_zones" type_id: 0x8cbe1332 } +member { + id: 0xb932820e + name: "nodefrag" + type_id: 0xb3e7bac9 + offset: 7111 + bitsize: 1 +} member { id: 0xd988ce91 name: "nodeinfo" @@ -130908,6 +132463,13 @@ member { offset: 86 bitsize: 1 } +member { + id: 0xa073f76a + name: "nonagle" + type_id: 0x295c7202 + offset: 13864 + bitsize: 4 +} member { id: 0x5cf54d0d name: "nonatomic" @@ -131310,6 +132872,12 @@ member { type_id: 0x04b89667 offset: 9088 } +member { + id: 0xfdfb52fb + name: "notsent_lowat" + type_id: 0xc9082b19 + offset: 15616 +} member { id: 0xf8489542 name: "now_frame" @@ -134133,6 +135701,12 @@ member { type_id: 0x4585663f offset: 6272 } +member { + id: 0x7cf21fa8 + name: "num_sacks" + type_id: 0x295c7202 + offset: 152 +} member { id: 0xbcb6b731 name: "num_sample_rates" @@ -134931,6 +136505,13 @@ member { type_id: 0x0e156b94 offset: 736 } +member { + id: 0x4f7607b0 + name: "odstopts" + type_id: 0xe8034002 + offset: 9 + bitsize: 1 +} member { id: 0xc03481fc name: "oemid" @@ -135722,6 +137303,13 @@ member { type_id: 0x087aa58e offset: 5632 } +member { + id: 0xba0eaaec + name: "ohopopts" + type_id: 0xe8034002 + offset: 7 + bitsize: 1 +} member { id: 0xa5064bbb name: "oifindex" @@ -136106,6 +137694,12 @@ member { type_id: 0xb0312d5a offset: 7776 } +member { + id: 0x78ec3163 + name: "ooo_last_skb" + type_id: 0x054f691a + offset: 17088 +} member { id: 0xdfd3d3d5 name: "ooo_okay" @@ -137352,17 +138946,46 @@ member { type_id: 0xa7c362b0 offset: 640 } +member { + id: 0xf64ae666 + name: "opt" + type_id: 0x00cc5f1f + offset: 64 +} member { id: 0xf6548174 name: "opt" type_id: 0x1eafac69 } +member { + id: 0xf6635680 + name: "opt" + type_id: 0x29719ae3 + offset: 128 +} member { id: 0xf6637b68 name: "opt" type_id: 0x295c7202 offset: 128 } +member { + id: 0xf676260e + name: "opt" + type_id: 0x3c087d20 + offset: 256 +} +member { + id: 0xf67626a5 + name: "opt" + type_id: 0x3c087d20 +} +member { + id: 0xf6762c03 + name: "opt" + type_id: 0x3c087d20 + offset: 896 +} member { id: 0xf6da3e67 name: "opt" @@ -137381,12 +139004,24 @@ member { type_id: 0x6d7f5ff6 offset: 392 } +member { + id: 0x5e1efe31 + name: "opt_flen" + type_id: 0xe8034002 + offset: 64 +} member { id: 0xb8d181f5 name: "opt_mapping_size" type_id: 0x2f5fcbf3 offset: 1472 } +member { + id: 0x00c73957 + name: "opt_nflen" + type_id: 0xe8034002 + offset: 80 +} member { id: 0xb435ce80 name: "opt_sectors" @@ -137449,6 +139084,12 @@ member { type_id: 0x295c7202 offset: 576 } +member { + id: 0x3cedbd03 + name: "optlen" + type_id: 0x5d8155a5 + offset: 64 +} member { id: 0x39c46a92 name: "opts" @@ -137756,6 +139397,13 @@ member { type_id: 0x54bb1a65 offset: 96 } +member { + id: 0x1e410881 + name: "osrcrt" + type_id: 0xe8034002 + offset: 1 + bitsize: 1 +} member { id: 0x9dbc540c name: "otg" @@ -137902,6 +139550,12 @@ member { type_id: 0x295c7202 offset: 1088 } +member { + id: 0x0b88a7b8 + name: "out_of_order_queue" + type_id: 0xeb923a9b + offset: 17024 +} member { id: 0x21d1ea91 name: "out_q_ctx" @@ -138575,6 +140229,12 @@ member { name: "owner" type_id: 0x30f06408 } +member { + id: 0x4ad6a726 + name: "owner" + type_id: 0x4bdbd862 + offset: 576 +} member { id: 0x4aeb46f7 name: "owner" @@ -138629,6 +140289,18 @@ member { type_id: 0xc6cbbd05 offset: 640 } +member { + id: 0xb8ff9609 + name: "owners" + type_id: 0x5e8dc7f4 + offset: 384 +} +member { + id: 0xb8ff9c83 + name: "owners" + type_id: 0x5e8dc7f4 + offset: 512 +} member { id: 0xa6673206 name: "p" @@ -138940,6 +140612,12 @@ member { name: "p_vp9_frame" type_id: 0x1f6d4933 } +member { + id: 0xb18c6cab + name: "pacing_timer" + type_id: 0xcd7704bf + offset: 15744 +} member { id: 0xa9322d92 name: "pack_id" @@ -139016,6 +140694,12 @@ member { type_id: 0xe62ebf07 offset: 64 } +member { + id: 0xfdbc865b + name: "packets_out" + type_id: 0xc9082b19 + offset: 14496 +} member { id: 0x716716c4 name: "packing" @@ -139221,6 +140905,13 @@ member { type_id: 0x3e3c1b86 offset: 8808 } +member { + id: 0xb13666fe + name: "padding" + type_id: 0xe8034002 + offset: 566 + bitsize: 1 +} member { id: 0x1366b916 name: "padding0" @@ -140375,6 +142066,12 @@ member { type_id: 0xe62ebf07 offset: 224 } +member { + id: 0xc70bed2a + name: "park" + type_id: 0x0aee7ba0 + offset: 512 +} member { id: 0x53dc4767 name: "park_pending" @@ -141822,6 +143519,11 @@ member { type_id: 0x9ec07527 offset: 17856 } +member { + id: 0xf97ca310 + name: "pending" + type_id: 0xb3e7bac9 +} member { id: 0xf98ac68d name: "pending" @@ -143464,6 +145166,11 @@ member { name: "pid" type_id: 0xe62ebf07 } +member { + id: 0x749120ed + name: "pid" + type_id: 0x18456730 +} member { id: 0x7491240c name: "pid" @@ -143787,6 +145494,12 @@ member { type_id: 0x290604c6 offset: 512 } +member { + id: 0xe47590e7 + name: "pinet6" + type_id: 0x13fe8737 + offset: 6848 +} member { id: 0x1762f1c8 name: "ping" @@ -143799,6 +145512,12 @@ member { type_id: 0x5d6e0aa2 offset: 4800 } +member { + id: 0x517ebeed + name: "pingpong" + type_id: 0xb3e7bac9 + offset: 16 +} member { id: 0xc9b75710 name: "pinned" @@ -144170,6 +145889,12 @@ member { type_id: 0xb3e7bac9 bitsize: 3 } +member { + id: 0xe8802f7b + name: "pktoptions" + type_id: 0x054f691a + offset: 960 +} member { id: 0x0a3be422 name: "pkts_acked" @@ -144611,6 +146336,19 @@ member { type_id: 0xf313e71a offset: 2368 } +member { + id: 0x6e852b14 + name: "pmtudisc" + type_id: 0xe8034002 + offset: 563 + bitsize: 3 +} +member { + id: 0x79ed9153 + name: "pmtudisc" + type_id: 0xb3e7bac9 + offset: 7096 +} member { id: 0x933349d1 name: "pmu" @@ -145519,6 +147257,12 @@ member { type_id: 0xfe384bcc offset: 2432 } +member { + id: 0x48322e2f + name: "port" + type_id: 0xc93e017b + offset: 96 +} member { id: 0x4848f266 name: "port" @@ -147040,6 +148784,12 @@ member { type_id: 0x39c49895 offset: 64 } +member { + id: 0xfcce6b61 + name: "pred_flags" + type_id: 0xe276adef + offset: 12320 +} member { id: 0xd9582306 name: "pred_probs" @@ -147999,6 +149749,12 @@ member { type_id: 0xc9082b19 offset: 128 } +member { + id: 0xa9ca3fe9 + name: "prior_cwnd" + type_id: 0xc9082b19 + offset: 15136 +} member { id: 0x8be7c123 name: "prior_delivered" @@ -148022,6 +149778,12 @@ member { name: "prior_mstamp" type_id: 0x92233392 } +member { + id: 0x17b8fb75 + name: "prior_ssthresh" + type_id: 0xc9082b19 + offset: 17824 +} member { id: 0x10027c29 name: "priority" @@ -148087,6 +149849,12 @@ member { type_id: 0x4585663f offset: 512 } +member { + id: 0x10dc10b1 + name: "priority" + type_id: 0x384f7d7c + offset: 288 +} member { id: 0x10ebf2d4 name: "priority" @@ -149063,6 +150831,30 @@ member { type_id: 0xc9082b19 offset: 2208 } +member { + id: 0x934c941a + name: "probe_seq_end" + type_id: 0xc9082b19 + offset: 32 +} +member { + id: 0x45320605 + name: "probe_seq_start" + type_id: 0xc9082b19 +} +member { + id: 0x745418c4 + name: "probe_size" + type_id: 0xc9082b19 + offset: 64 + bitsize: 31 +} +member { + id: 0x60865bc2 + name: "probe_timestamp" + type_id: 0xc9082b19 + offset: 96 +} member { id: 0xf69ec1af name: "probe_type" @@ -150374,6 +152166,18 @@ member { type_id: 0xd298e888 offset: 4480 } +member { + id: 0x3ce5a47e + name: "prr_delivered" + type_id: 0xc9082b19 + offset: 15168 +} +member { + id: 0xe9e7b435 + name: "prr_out" + type_id: 0xc9082b19 + offset: 15200 +} member { id: 0x282acc7e name: "prrr" @@ -150977,6 +152781,12 @@ member { type_id: 0xae1656c9 offset: 13184 } +member { + id: 0xa598ac7c + name: "pushed_seq" + type_id: 0xc9082b19 + offset: 15648 +} member { id: 0x4ac85a2c name: "put" @@ -151451,6 +153261,18 @@ member { type_id: 0x4585663f offset: 192 } +member { + id: 0x9be65a12 + name: "qlen" + type_id: 0x74d29cf1 + offset: 96 +} +member { + id: 0x9bf5ac3e + name: "qlen" + type_id: 0x6720d32f + offset: 160 +} member { id: 0xba491c1c name: "qlen_notify" @@ -151974,6 +153796,11 @@ member { name: "queue_sz" type_id: 0xf435685e } +member { + id: 0x52c10ec0 + name: "queue_xmit" + type_id: 0x2c209d56 +} member { id: 0xaa971b00 name: "queuecommand" @@ -152094,6 +153921,12 @@ member { type_id: 0x1457cd38 offset: 5568 } +member { + id: 0xdcf7e916 + name: "quick" + type_id: 0xb3e7bac9 + offset: 8 +} member { id: 0x625ab578 name: "quick_threads" @@ -152398,6 +154231,12 @@ member { type_id: 0x33756485 offset: 384 } +member { + id: 0x2c2c5bfb + name: "rack" + type_id: 0xd4132c46 + offset: 13504 +} member { id: 0x7a85796a name: "radar_detect_regions" @@ -152774,6 +154613,13 @@ member { type_id: 0x33756485 offset: 192 } +member { + id: 0xd6330125 + name: "rate_app_limited" + type_id: 0x295c7202 + offset: 13858 + bitsize: 1 +} member { id: 0x94569300 name: "rate_bytes_ps" @@ -152798,6 +154644,12 @@ member { type_id: 0xe62ebf07 offset: 2144 } +member { + id: 0x18e683dd + name: "rate_delivered" + type_id: 0xc9082b19 + offset: 15488 +} member { id: 0x0f313303 name: "rate_den" @@ -152822,6 +154674,12 @@ member { type_id: 0x295c7202 offset: 104 } +member { + id: 0x47a61a29 + name: "rate_interval_us" + type_id: 0xc9082b19 + offset: 15520 +} member { id: 0x7f7f584f name: "rate_leaf_parent_set" @@ -153975,6 +155833,12 @@ member { type_id: 0x33756485 offset: 192 } +member { + id: 0x608ed11c + name: "rcv_flowinfo" + type_id: 0xe276adef + offset: 608 +} member { id: 0xeae5b95c name: "rcv_interval_us" @@ -153987,6 +155851,24 @@ member { type_id: 0x18d413f1 offset: 1280 } +member { + id: 0xa514bd9b + name: "rcv_mss" + type_id: 0xe8034002 + offset: 176 +} +member { + id: 0x722b8728 + name: "rcv_nxt" + type_id: 0xc9082b19 + offset: 12480 +} +member { + id: 0x3a366be0 + name: "rcv_ooopack" + type_id: 0xc9082b19 + offset: 18240 +} member { id: 0xd699ecd4 name: "rcv_probes_mcast" @@ -153999,6 +155881,53 @@ member { type_id: 0x33756485 offset: 448 } +member { + id: 0x035a9f8f + name: "rcv_rtt_est" + type_id: 0x13c65297 + offset: 18304 +} +member { + id: 0xcebd1125 + name: "rcv_rtt_last_tsecr" + type_id: 0xc9082b19 + offset: 18272 +} +member { + id: 0x0249b0df + name: "rcv_saddr" + type_id: 0xe276adef +} +member { + id: 0x8a514678 + name: "rcv_ssthresh" + type_id: 0xc9082b19 + offset: 13472 +} +member { + id: 0x1ad125b2 + name: "rcv_tos" + type_id: 0xb3e7bac9 + offset: 7120 +} +member { + id: 0xa56c13a3 + name: "rcv_tsecr" + type_id: 0xc9082b19 + offset: 96 +} +member { + id: 0x9a3cd5ff + name: "rcv_tstamp" + type_id: 0xc9082b19 + offset: 12896 +} +member { + id: 0x908732db + name: "rcv_tsval" + type_id: 0xc9082b19 + offset: 64 +} member { id: 0x1f3accfe name: "rcv_unacked" @@ -154011,12 +155940,37 @@ member { type_id: 0x914dbfdc offset: 7856 } +member { + id: 0x073e90bb + name: "rcv_wnd" + type_id: 0xc9082b19 + offset: 15552 +} +member { + id: 0x64ea7467 + name: "rcv_wscale" + type_id: 0x914dbfdc + offset: 140 + bitsize: 4 +} +member { + id: 0x93db6466 + name: "rcv_wup" + type_id: 0xc9082b19 + offset: 12544 +} member { id: 0x75627b5b name: "rcvlists_lock" type_id: 0xf313e71a offset: 704 } +member { + id: 0x38961e9d + name: "rcvq_space" + type_id: 0x0e7661fa + offset: 18432 +} member { id: 0x6bca0440 name: "rd" @@ -154980,6 +156934,12 @@ member { type_id: 0x449a775b offset: 640 } +member { + id: 0x198e2327 + name: "rebuild_header" + type_id: 0x2c24424a + offset: 128 +} member { id: 0x659c25bc name: "recalc_accuracy" @@ -155208,6 +157168,12 @@ member { type_id: 0x054f691a offset: 26176 } +member { + id: 0x2ae3d7d3 + name: "recv_sack_cache" + type_id: 0xb637307e + offset: 17472 +} member { id: 0xc16c3147 name: "recv_seq" @@ -155228,6 +157194,41 @@ member { type_id: 0x0fc38cea offset: 2432 } +member { + id: 0xd1ac6cca + name: "recverr" + type_id: 0xb3e7bac9 + offset: 7104 + bitsize: 1 +} +member { + id: 0xd1f78df1 + name: "recverr" + type_id: 0xe8034002 + offset: 560 + bitsize: 1 +} +member { + id: 0x7e1e1e3e + name: "recverr_rfc4884" + type_id: 0xe8034002 + offset: 574 + bitsize: 1 +} +member { + id: 0x7e45f9f6 + name: "recverr_rfc4884" + type_id: 0xb3e7bac9 + offset: 7113 + bitsize: 1 +} +member { + id: 0xf5e277a0 + name: "recvfragsize" + type_id: 0xe8034002 + offset: 14 + bitsize: 1 +} member { id: 0x732441ac name: "recvmsg" @@ -155240,6 +157241,13 @@ member { type_id: 0x2c07bcbe offset: 896 } +member { + id: 0xd4dd684d + name: "recvmsg_inq" + type_id: 0x295c7202 + offset: 13869 + bitsize: 1 +} member { id: 0xfd896136 name: "recycle_ibi_slot" @@ -157092,6 +159100,12 @@ member { name: "release" type_id: 0x0f852e4a } +member { + id: 0xae97905e + name: "release" + type_id: 0x0f626ee5 + offset: 256 +} member { id: 0xae979233 name: "release" @@ -157813,12 +159827,31 @@ member { type_id: 0xe8034002 offset: 992 } +member { + id: 0x599289d3 + name: "reo_wnd_persist" + type_id: 0x295c7202 + offset: 168 + bitsize: 5 +} +member { + id: 0xc2607053 + name: "reo_wnd_steps" + type_id: 0x295c7202 + offset: 160 +} member { id: 0x5f8678ed name: "reoffload" type_id: 0x2d08fece offset: 832 } +member { + id: 0xbffa6140 + name: "reord_seen" + type_id: 0xc9082b19 + offset: 14688 +} member { id: 0x2e9f9cf8 name: "reorder_q" @@ -157843,12 +159876,31 @@ member { type_id: 0x6720d32f offset: 1536 } +member { + id: 0x6e16f71f + name: "reordering" + type_id: 0xc9082b19 + offset: 14656 +} member { id: 0x52e0ed5f name: "rep" type_id: 0x75617428 offset: 2624 } +member { + id: 0xf06d788d + name: "repair" + type_id: 0x295c7202 + offset: 13870 + bitsize: 1 +} +member { + id: 0xd0cd0f99 + name: "repair_queue" + type_id: 0x295c7202 + offset: 13872 +} member { id: 0x345e6415 name: "repeat" @@ -157867,6 +159919,13 @@ member { type_id: 0x4585663f offset: 2112 } +member { + id: 0x7560b519 + name: "repflow" + type_id: 0xe8034002 + offset: 562 + bitsize: 1 +} member { id: 0x4b7a046e name: "repl_mode" @@ -160812,6 +162871,18 @@ member { type_id: 0xa9daefe1 offset: 7104 } +member { + id: 0x0b8a1650 + name: "retrans_out" + type_id: 0xc9082b19 + offset: 14528 +} +member { + id: 0x92525caa + name: "retrans_stamp" + type_id: 0xc9082b19 + offset: 17888 +} member { id: 0x2f1b8a8e name: "retrans_timeout" @@ -160824,6 +162895,12 @@ member { type_id: 0xf1159c31 offset: 3264 } +member { + id: 0x05aa0dfa + name: "retransmit_skb_hint" + type_id: 0x054f691a + offset: 16960 +} member { id: 0x4f0d9bec name: "retries" @@ -160865,6 +162942,12 @@ member { name: "retry" type_id: 0xe8d14bc6 } +member { + id: 0x7fdde492 + name: "retry" + type_id: 0xb3e7bac9 + offset: 24 +} member { id: 0x0a6b3115 name: "retry_count" @@ -161887,6 +163970,12 @@ member { type_id: 0x0b131b04 offset: 704 } +member { + id: 0xe1754ad1 + name: "router_alert" + type_id: 0x5d8155a5 + offset: 104 +} member { id: 0x1061afd7 name: "routing" @@ -162123,6 +164212,19 @@ member { type_id: 0x0d30b9c3 offset: 1152 } +member { + id: 0xe752e605 + name: "rr" + type_id: 0x5d8155a5 + offset: 80 +} +member { + id: 0xe5e354be + name: "rr_needaddr" + type_id: 0x5d8155a5 + offset: 99 + bitsize: 1 +} member { id: 0xf254d9f5 name: "rr_nr_running" @@ -162219,6 +164321,40 @@ member { type_id: 0xd298e888 offset: 1216 } +member { + id: 0x5367b67f + name: "rskq_accept_head" + type_id: 0x27847a9a + offset: 192 +} +member { + id: 0x07b1da25 + name: "rskq_accept_tail" + type_id: 0x27847a9a + offset: 256 +} +member { + id: 0x6f5c1006 + name: "rskq_defer_accept" + type_id: 0x295c7202 + offset: 32 +} +member { + id: 0xf1af1f8d + name: "rskq_lock" + type_id: 0xf313e71a +} +member { + id: 0x4fed2df8 + name: "rskq_rst_head" + type_id: 0x27847a9a +} +member { + id: 0xe2520be5 + name: "rskq_rst_tail" + type_id: 0x27847a9a + offset: 64 +} member { id: 0x94b16558 name: "rslot_limit" @@ -162700,6 +164836,13 @@ member { type_id: 0xc93e017b offset: 16 } +member { + id: 0xe5f931e3 + name: "rtalert_isolate" + type_id: 0xe8034002 + offset: 575 + bitsize: 1 +} member { id: 0xe06b5dfe name: "rtc" @@ -162837,18 +164980,47 @@ member { type_id: 0xc9082b19 offset: 1632 } +member { + id: 0xbb70420d + name: "rtt_min" + type_id: 0x98a86aa3 + offset: 14304 +} +member { + id: 0x50d5f815 + name: "rtt_seq" + type_id: 0xc9082b19 + offset: 14272 +} member { id: 0x2e4fe3d2 name: "rtt_us" type_id: 0xd41e888f offset: 32 } +member { + id: 0x2e52f84a + name: "rtt_us" + type_id: 0xc9082b19 +} +member { + id: 0x2e52fcab + name: "rtt_us" + type_id: 0xc9082b19 + offset: 64 +} member { id: 0x2e67f9bb name: "rtt_us" type_id: 0xfc0e1dbd offset: 320 } +member { + id: 0x861205e4 + name: "rttvar_us" + type_id: 0xc9082b19 + offset: 14240 +} member { id: 0xe53f0bca name: "rtx_syn_ack" @@ -163768,6 +165940,12 @@ member { type_id: 0xb02b353a offset: 384 } +member { + id: 0xedd761bd + name: "rx_opt" + type_id: 0xfb9626d5 + offset: 14752 +} member { id: 0x92162365 name: "rx_otherhost_dropped" @@ -164094,6 +166272,67 @@ member { type_id: 0xe8034002 offset: 16 } +member { + id: 0xf63d13ec + name: "rxflow" + type_id: 0xe8034002 + offset: 10 + bitsize: 1 +} +member { + id: 0x27542d26 + name: "rxhlim" + type_id: 0xe8034002 + offset: 4 + bitsize: 1 +} +member { + id: 0x0ab41655 + name: "rxinfo" + type_id: 0xe8034002 + offset: 2 + bitsize: 1 +} +member { + id: 0xba676c19 + name: "rxohlim" + type_id: 0xe8034002 + offset: 5 + bitsize: 1 +} +member { + id: 0xdb4e2124 + name: "rxoinfo" + type_id: 0xe8034002 + offset: 3 + bitsize: 1 +} +member { + id: 0xca5da826 + name: "rxopt" + type_id: 0x6312fd2f + offset: 544 +} +member { + id: 0xb7565f87 + name: "rxorigdstaddr" + type_id: 0xe8034002 + offset: 13 + bitsize: 1 +} +member { + id: 0x1f5f2358 + name: "rxpmtu" + type_id: 0xe8034002 + offset: 12 + bitsize: 1 +} +member { + id: 0x80588b5f + name: "rxpmtu" + type_id: 0x054f691a + offset: 1024 +} member { id: 0x94aefae8 name: "rxq" @@ -164118,6 +166357,13 @@ member { type_id: 0xe62ebf07 offset: 416 } +member { + id: 0xd94d5094 + name: "rxtclass" + type_id: 0xe8034002 + offset: 11 + bitsize: 1 +} member { id: 0x2aa685e6 name: "rxtstamp" @@ -164133,6 +166379,11 @@ member { name: "s" type_id: 0x36592664 } +member { + id: 0x67c839a9 + name: "s" + type_id: 0x3a8e7b26 +} member { id: 0x67cca386 name: "s" @@ -164916,6 +167167,19 @@ member { type_id: 0xd3c80119 offset: 3072 } +member { + id: 0xa9edea21 + name: "sack_ok" + type_id: 0x914dbfdc + offset: 132 + bitsize: 3 +} +member { + id: 0xbd3e4a51 + name: "sacked_out" + type_id: 0xc9082b19 + offset: 15712 +} member { id: 0xf8038083 name: "saddr" @@ -165214,6 +167478,13 @@ member { type_id: 0x2d5e4b40 offset: 512 } +member { + id: 0x1d3cf444 + name: "save_syn" + type_id: 0x295c7202 + offset: 13880 + bitsize: 2 +} member { id: 0x10476fa4 name: "saved" @@ -165344,6 +167615,12 @@ member { type_id: 0x10c6675a offset: 1792 } +member { + id: 0x72f4ca16 + name: "saved_syn" + type_id: 0x10c6675a + offset: 18816 +} member { id: 0x424e9efd name: "saved_tmo" @@ -165368,6 +167645,20 @@ member { type_id: 0x0f626ee5 offset: 1408 } +member { + id: 0x037e8aa7 + name: "saw_tstamp" + type_id: 0x914dbfdc + offset: 128 + bitsize: 1 +} +member { + id: 0x9eb3a9b9 + name: "saw_unknown" + type_id: 0x295c7202 + offset: 144 + bitsize: 1 +} member { id: 0x597655fc name: "sb" @@ -166700,12 +168991,23 @@ member { type_id: 0x4585663f offset: 32 } +member { + id: 0xf32eb68b + name: "search_high" + type_id: 0x6720d32f +} member { id: 0x39bd6d5d name: "search_list" type_id: 0xd3c80119 offset: 448 } +member { + id: 0xf0e605cb + name: "search_low" + type_id: 0x6720d32f + offset: 32 +} member { id: 0xbf942607 name: "search_order" @@ -167147,6 +169449,24 @@ member { name: "segment_ptr" type_id: 0x78f4e574 } +member { + id: 0x498b56ac + name: "segments_left" + type_id: 0xb3e7bac9 + offset: 24 +} +member { + id: 0x43536458 + name: "segs_in" + type_id: 0xc9082b19 + offset: 12416 +} +member { + id: 0xef3672e0 + name: "segs_out" + type_id: 0xc9082b19 + offset: 12608 +} member { id: 0x339d15e8 name: "sel" @@ -167202,6 +169522,12 @@ member { type_id: 0x2c40c92f offset: 640 } +member { + id: 0x3a30a25b + name: "selective_acks" + type_id: 0xb637307e + offset: 17216 +} member { id: 0x104f57f6 name: "selector" @@ -167314,6 +169640,12 @@ member { type_id: 0x0d9c47fd offset: 1408 } +member { + id: 0x5937e22e + name: "selfparking" + type_id: 0x6d7f5ff6 + offset: 640 +} member { id: 0x561fd50f name: "selftest_check" @@ -167374,6 +169706,12 @@ member { type_id: 0x2dd0b9c3 offset: 448 } +member { + id: 0x6d7c55af + name: "send_check" + type_id: 0x0f67218c + offset: 64 +} member { id: 0xbd9e514e name: "send_data" @@ -167756,6 +170094,12 @@ member { type_id: 0xc9082b19 offset: 256 } +member { + id: 0xfad50a5a + name: "seq" + type_id: 0xc9082b19 + offset: 32 +} member { id: 0xfad50a81 name: "seq" @@ -169794,6 +172138,12 @@ member { type_id: 0x2c97b369 offset: 896 } +member { + id: 0x852593c2 + name: "setsockopt" + type_id: 0x2c45f2d3 + offset: 448 +} member { id: 0x85259dc7 name: "setsockopt" @@ -169842,6 +172192,12 @@ member { type_id: 0x6720d32f offset: 3072 } +member { + id: 0x84c3a238 + name: "setup" + type_id: 0x0aee7ba0 + offset: 384 +} member { id: 0x84c4d659 name: "setup" @@ -170075,12 +172431,30 @@ member { type_id: 0x295c7202 offset: 240 } +member { + id: 0x25d5628c + name: "sflist" + type_id: 0x339b8baf + offset: 256 +} +member { + id: 0x25db7175 + name: "sflist" + type_id: 0x3d801074 + offset: 192 +} member { id: 0x00c081a9 name: "sfmode" type_id: 0x4585663f offset: 96 } +member { + id: 0x00c085ca + name: "sfmode" + type_id: 0x4585663f + offset: 160 +} member { id: 0x4c6b273e name: "sfnum" @@ -170377,6 +172751,12 @@ member { type_id: 0x4585663f offset: 96 } +member { + id: 0xeb4f31b9 + name: "share" + type_id: 0x295c7202 + offset: 512 +} member { id: 0xeb4f3b1e name: "share" @@ -171760,6 +174140,12 @@ member { name: "size" type_id: 0xf435685e } +member { + id: 0xd98a29b1 + name: "size" + type_id: 0xf435685e + offset: 256 +} member { id: 0xd98a2d0c name: "size" @@ -172426,6 +174812,12 @@ member { type_id: 0x6720d32f offset: 1152 } +member { + id: 0x68c86260 + name: "sk_rx_dst_set" + type_id: 0x0f5c0d73 + offset: 192 +} member { id: 0x30149a66 name: "sk_security" @@ -173049,6 +175441,29 @@ member { offset: 11 bitsize: 2 } +member { + id: 0x7f9c0ca0 + name: "sl_addr" + type_id: 0xdf70f6ef + offset: 192 +} +member { + id: 0x7fbfd873 + name: "sl_addr" + type_id: 0xfca4258b + offset: 192 +} +member { + id: 0xeea06d44 + name: "sl_count" + type_id: 0x4585663f + offset: 32 +} +member { + id: 0x047148a0 + name: "sl_max" + type_id: 0x4585663f +} member { id: 0xe0c5ed6b name: "slab" @@ -173690,6 +176105,13 @@ member { name: "smc_hash" type_id: 0x3c4ed50c } +member { + id: 0x7d4497ce + name: "smc_ok" + type_id: 0x914dbfdc + offset: 135 + bitsize: 1 +} member { id: 0x0697a2b0 name: "smem_len" @@ -173798,12 +176220,48 @@ member { type_id: 0x33f8b54b offset: 576 } +member { + id: 0x3b23e448 + name: "snd_cwnd" + type_id: 0xc9082b19 + offset: 14976 +} +member { + id: 0x0ed29348 + name: "snd_cwnd_clamp" + type_id: 0xc9082b19 + offset: 15040 +} +member { + id: 0xdef99a11 + name: "snd_cwnd_cnt" + type_id: 0xc9082b19 + offset: 15008 +} +member { + id: 0xe301bb69 + name: "snd_cwnd_stamp" + type_id: 0xc9082b19 + offset: 15104 +} +member { + id: 0x9e51f1aa + name: "snd_cwnd_used" + type_id: 0xc9082b19 + offset: 15072 +} member { id: 0xc2ae7a6f name: "snd_interval_us" type_id: 0xc9082b19 offset: 256 } +member { + id: 0x24e972c0 + name: "snd_nxt" + type_id: 0xc9082b19 + offset: 12576 +} member { id: 0x05fda169 name: "snd_portid" @@ -173815,12 +176273,55 @@ member { name: "snd_seq" type_id: 0xc9082b19 } +member { + id: 0x9c800493 + name: "snd_sml" + type_id: 0xc9082b19 + offset: 12864 +} +member { + id: 0xcf588653 + name: "snd_ssthresh" + type_id: 0xc9082b19 + offset: 14944 +} +member { + id: 0x8248546f + name: "snd_una" + type_id: 0xc9082b19 + offset: 12832 +} +member { + id: 0x611d6152 + name: "snd_up" + type_id: 0xc9082b19 + offset: 14720 +} member { id: 0xac63a41d name: "snd_win" type_id: 0x914dbfdc offset: 7808 } +member { + id: 0x1f8008f5 + name: "snd_wl1" + type_id: 0xc9082b19 + offset: 13312 +} +member { + id: 0xb8d73d0d + name: "snd_wnd" + type_id: 0xc9082b19 + offset: 13344 +} +member { + id: 0xa597551f + name: "snd_wscale" + type_id: 0x914dbfdc + offset: 136 + bitsize: 4 +} member { id: 0xb368ac36 name: "sndbit" @@ -173839,6 +176340,13 @@ member { type_id: 0x1a3a7059 offset: 576 } +member { + id: 0xc40b184c + name: "sndflow" + type_id: 0xe8034002 + offset: 561 + bitsize: 1 +} member { id: 0x8f0cab7f name: "sniff_max_interval" @@ -173928,6 +176436,12 @@ member { type_id: 0xf435685e offset: 5888 } +member { + id: 0x094f9496 + name: "sockaddr_len" + type_id: 0x914dbfdc + offset: 416 +} member { id: 0x4f73c01d name: "socket_pressure" @@ -174281,6 +176795,11 @@ member { type_id: 0xe02e14d6 offset: 64 } +member { + id: 0x589235ff + name: "space" + type_id: 0xc9082b19 +} member { id: 0x4b5ea168 name: "space_available" @@ -175187,6 +177706,25 @@ member { type_id: 0x1c3dbe5a offset: 416 } +member { + id: 0xaab1780b + name: "srcprefs" + type_id: 0xe8034002 + offset: 567 + bitsize: 3 +} +member { + id: 0x0ffd3550 + name: "srcrt" + type_id: 0xe8034002 + bitsize: 1 +} +member { + id: 0xcd7a45a0 + name: "srcrt" + type_id: 0x12d9a766 + offset: 256 +} member { id: 0xcd809b0d name: "srcrt" @@ -175437,6 +177975,25 @@ member { type_id: 0x6d7f5ff6 offset: 24 } +member { + id: 0x3a20fe11 + name: "srr" + type_id: 0x5d8155a5 + offset: 72 +} +member { + id: 0x3feffe90 + name: "srr_is_hit" + type_id: 0x5d8155a5 + offset: 97 + bitsize: 1 +} +member { + id: 0x28d4f0ae + name: "srtt_us" + type_id: 0xc9082b19 + offset: 14144 +} member { id: 0x8c8649b8 name: "ss" @@ -176243,6 +178800,11 @@ member { type_id: 0x865acc96 offset: 11904 } +member { + id: 0x3d5d011f + name: "start_seq" + type_id: 0xc9082b19 +} member { id: 0x00cda1f9 name: "start_signal_voltage_switch" @@ -177929,6 +180491,12 @@ member { type_id: 0x33756485 offset: 1472 } +member { + id: 0x39865348 + name: "sticky_pktinfo" + type_id: 0xe2574ab7 + offset: 128 +} member { id: 0x5922f81b name: "stime" @@ -178209,6 +180777,11 @@ member { type_id: 0x0d918eed offset: 192 } +member { + id: 0xacd44104 + name: "store" + type_id: 0x0dd653d2 +} member { id: 0xacd4aa03 name: "store" @@ -180428,6 +183001,47 @@ member { type_id: 0x0ddb38e3 offset: 448 } +member { + id: 0x590d4988 + name: "syn_data" + type_id: 0x295c7202 + offset: 13882 + bitsize: 1 +} +member { + id: 0x7d5a2639 + name: "syn_data_acked" + type_id: 0x295c7202 + offset: 13886 + bitsize: 1 +} +member { + id: 0x5e175424 + name: "syn_fastopen" + type_id: 0x295c7202 + offset: 13883 + bitsize: 1 +} +member { + id: 0x1273b273 + name: "syn_fastopen_ch" + type_id: 0x295c7202 + offset: 13885 + bitsize: 1 +} +member { + id: 0xe4473696 + name: "syn_fastopen_exp" + type_id: 0x295c7202 + offset: 13884 + bitsize: 1 +} +member { + id: 0x472be94a + name: "syn_recv_sock" + type_id: 0x120033c5 + offset: 320 +} member { id: 0x501d2a59 name: "sync" @@ -180737,6 +183351,12 @@ member { type_id: 0x104ad696 offset: 256 } +member { + id: 0x77b35ac5 + name: "synflood_warned" + type_id: 0xc9082b19 + offset: 64 +} member { id: 0x8e7c5fc5 name: "synq_overflow_ts" @@ -181726,6 +184346,11 @@ member { name: "t" type_id: 0x9c649622 } +member { + id: 0xab76db82 + name: "t" + type_id: 0xc9082b19 +} member { id: 0x87bba0ec name: "t1" @@ -182958,12 +185583,24 @@ member { type_id: 0x914dbfdc offset: 64 } +member { + id: 0x51753049 + name: "tclass" + type_id: 0xb3e7bac9 + offset: 584 +} member { id: 0x51753743 name: "tclass" type_id: 0xb3e7bac9 offset: 288 } +member { + id: 0x51ef8423 + name: "tclass" + type_id: 0x295c7202 + offset: 72 +} member { id: 0xbb99034f name: "tclassid" @@ -183063,6 +185700,12 @@ member { type_id: 0xc9082b19 offset: 4544 } +member { + id: 0x5e1e9277 + name: "tcp_clock_cache" + type_id: 0x92233392 + offset: 14016 +} member { id: 0x77c51ee3 name: "tcp_congestion_control" @@ -183092,6 +185735,12 @@ member { type_id: 0xc9082b19 offset: 64 } +member { + id: 0xc07533a2 + name: "tcp_header_len" + type_id: 0x914dbfdc + offset: 12288 +} member { id: 0xc0a9db63 name: "tcp_ignore_invalid_rst" @@ -183120,6 +185769,12 @@ member { type_id: 0x295c7202 offset: 464 } +member { + id: 0x0c6e4e2a + name: "tcp_mstamp" + type_id: 0x92233392 + offset: 14080 +} member { id: 0x92478c56 name: "tcp_rtx_queue" @@ -183148,6 +185803,18 @@ member { name: "tcp_tsorted_anchor" type_id: 0xd3c80119 } +member { + id: 0x5005e20e + name: "tcp_tx_delay" + type_id: 0xc9082b19 + offset: 13920 +} +member { + id: 0x2c472957 + name: "tcp_wstamp_ns" + type_id: 0x92233392 + offset: 13952 +} member { id: 0xee1e03ec name: "tcpc" @@ -183747,6 +186414,13 @@ member { type_id: 0xd3c80119 offset: 8960 } +member { + id: 0xe07791d5 + name: "thin_lto" + type_id: 0x295c7202 + offset: 13868 + bitsize: 1 +} member { id: 0xca538c03 name: "think_time" @@ -183842,6 +186516,12 @@ member { type_id: 0x965abf66 offset: 29312 } +member { + id: 0xa81d1890 + name: "thread_comm" + type_id: 0x3e10b518 + offset: 704 +} member { id: 0x8b87e4f0 name: "thread_flags" @@ -183854,6 +186534,12 @@ member { type_id: 0xd92b1d75 offset: 256 } +member { + id: 0x3e886d7b + name: "thread_fn" + type_id: 0x0aee7ba0 + offset: 256 +} member { id: 0xb282a928 name: "thread_group" @@ -183912,6 +186598,12 @@ member { type_id: 0x18456730 offset: 13504 } +member { + id: 0x4118a1ce + name: "thread_should_run" + type_id: 0x29a8570f + offset: 192 +} member { id: 0x90132b6a name: "thread_sibling" @@ -184218,6 +186910,12 @@ member { type_id: 0x92233392 offset: 192 } +member { + id: 0x74712a8a + name: "time" + type_id: 0x92233392 + offset: 64 +} member { id: 0x74712c56 name: "time" @@ -184551,6 +187249,12 @@ member { type_id: 0x33756485 offset: 256 } +member { + id: 0x54f0f552 + name: "timeout" + type_id: 0x33756485 + offset: 64 +} member { id: 0x54f0f7ca name: "timeout" @@ -184603,6 +187307,12 @@ member { name: "timeout_ns" type_id: 0x4585663f } +member { + id: 0x203d8bb9 + name: "timeout_rehash" + type_id: 0x914dbfdc + offset: 18224 +} member { id: 0x39fee070 name: "timeout_set" @@ -185070,6 +187780,19 @@ member { type_id: 0x295c7202 offset: 144 } +member { + id: 0x81d25425 + name: "tlp_high_seq" + type_id: 0xc9082b19 + offset: 13888 +} +member { + id: 0xdab01e8a + name: "tlp_retrans" + type_id: 0x295c7202 + offset: 13722 + bitsize: 1 +} member { id: 0x7404e298 name: "tlv" @@ -185552,6 +188275,12 @@ member { type_id: 0x22198273 offset: 64 } +member { + id: 0xf113a55a + name: "tos" + type_id: 0x007e8ce4 + offset: 272 +} member { id: 0xf13a894d name: "tos" @@ -185576,6 +188305,12 @@ member { type_id: 0xb3e7bac9 offset: 24 } +member { + id: 0xf1a03e73 + name: "tos" + type_id: 0xb3e7bac9 + offset: 7072 +} member { id: 0xd7e12520 name: "tot_busy_t" @@ -185594,6 +188329,12 @@ member { type_id: 0x7584e7da offset: 16 } +member { + id: 0x3eb7848c + name: "tot_len" + type_id: 0x6720d32f + offset: 32 +} member { id: 0x408f26e7 name: "tot_write_bandwidth" @@ -185722,6 +188463,12 @@ member { type_id: 0x4585663f offset: 2112 } +member { + id: 0x1ab0aff5 + name: "total_retrans" + type_id: 0xc9082b19 + offset: 18048 +} member { id: 0x80674f84 name: "total_rx_match_ratio" @@ -186296,6 +189043,12 @@ member { name: "transmit_time" type_id: 0x92233392 } +member { + id: 0x1b258c6d + name: "transmit_time" + type_id: 0x92233392 + offset: 320 +} member { id: 0x45be24a0 name: "transmitting" @@ -186314,6 +189067,13 @@ member { type_id: 0xd529883e offset: 544 } +member { + id: 0x73793851 + name: "transparent" + type_id: 0xb3e7bac9 + offset: 7109 + bitsize: 1 +} member { id: 0x738f5598 name: "transparent" @@ -187019,6 +189779,12 @@ member { type_id: 0x11c404ba offset: 960 } +member { + id: 0x95c9b8f6 + name: "ts" + type_id: 0x5d8155a5 + offset: 88 +} member { id: 0x846f423a name: "ts_info" @@ -187031,6 +189797,20 @@ member { type_id: 0x92233392 offset: 192 } +member { + id: 0x5ffbc843 + name: "ts_needaddr" + type_id: 0x5d8155a5 + offset: 101 + bitsize: 1 +} +member { + id: 0xb15e52c6 + name: "ts_needtime" + type_id: 0x5d8155a5 + offset: 100 + bitsize: 1 +} member { id: 0x6f338061 name: "ts_real" @@ -187042,6 +189822,17 @@ member { type_id: 0xc9082b19 offset: 1184 } +member { + id: 0x851fefad + name: "ts_recent" + type_id: 0xc9082b19 + offset: 32 +} +member { + id: 0x8131134d + name: "ts_recent_stamp" + type_id: 0x6720d32f +} member { id: 0x0ac38887 name: "tseg1_max" @@ -187137,6 +189928,24 @@ member { type_id: 0x4585663f offset: 18208 } +member { + id: 0xaf1c2858 + name: "tsoffset" + type_id: 0xc9082b19 + offset: 13024 +} +member { + id: 0x3fe882fb + name: "tsorted_sent_queue" + type_id: 0xd3c80119 + offset: 13184 +} +member { + id: 0xb4b04624 + name: "tsq_node" + type_id: 0xd3c80119 + offset: 13056 +} member { id: 0x7f1030b7 name: "tstamp" @@ -187172,6 +189981,13 @@ member { type_id: 0x6720d32f offset: 1472 } +member { + id: 0xcd6e1313 + name: "tstamp_ok" + type_id: 0x914dbfdc + offset: 129 + bitsize: 1 +} member { id: 0x724f5b37 name: "tstamp_type" @@ -187289,6 +190105,12 @@ member { type_id: 0x295c7202 offset: 48 } +member { + id: 0xf4da47b1 + name: "ttl" + type_id: 0xb3e7bac9 + offset: 264 +} member { id: 0xf4da4d28 name: "ttl" @@ -188186,6 +191008,12 @@ member { type_id: 0x10617c73 offset: 704 } +member { + id: 0x34444209 + name: "tx_flags" + type_id: 0x295c7202 + offset: 256 +} member { id: 0x13091341 name: "tx_frames" @@ -189637,6 +192465,12 @@ member { type_id: 0xb3e7bac9 offset: 128 } +member { + id: 0x5ce53c7a + name: "type" + type_id: 0xb3e7bac9 + offset: 16 +} member { id: 0x5ce53fde name: "type" @@ -190345,6 +193179,12 @@ member { type_id: 0xa7c362b0 offset: 64 } +member { + id: 0x50ba6c8c + name: "uarg" + type_id: 0x35dbe029 + offset: 384 +} member { id: 0x6a0a74ef name: "uart_port" @@ -190390,12 +193230,30 @@ member { name: "uc" type_id: 0x6dad7303 } +member { + id: 0x1d8e0899 + name: "uc_index" + type_id: 0x6720d32f + offset: 7136 +} member { id: 0x70a44081 name: "uc_promisc" type_id: 0x6d7f5ff6 offset: 5696 } +member { + id: 0x8e099fc0 + name: "uc_ttl" + type_id: 0x007e8ce4 + offset: 6944 +} +member { + id: 0x95dcef8c + name: "ucast_oif" + type_id: 0x6720d32f + offset: 480 +} member { id: 0xd2f70d3b name: "ucd_prdt_dma_addr" @@ -191244,6 +194102,18 @@ member { type_id: 0x1a3a7059 offset: 512 } +member { + id: 0x44e15491 + name: "undo_marker" + type_id: 0xc9082b19 + offset: 17920 +} +member { + id: 0x7392ff81 + name: "undo_retrans" + type_id: 0x6720d32f + offset: 17952 +} member { id: 0x0645ccd5 name: "unfreeze_fs" @@ -191556,6 +194426,12 @@ member { type_id: 0x4585663f offset: 2848 } +member { + id: 0x6fd6cd07 + name: "unpark" + type_id: 0x0aee7ba0 + offset: 576 +} member { id: 0x343d462b name: "unpin" @@ -191806,6 +194682,20 @@ member { type_id: 0x33756485 offset: 768 } +member { + id: 0xc2fb89b4 + name: "unused" + type_id: 0x295c7202 + offset: 145 + bitsize: 7 +} +member { + id: 0xd0fa2acf + name: "unused" + type_id: 0x295c7202 + offset: 13723 + bitsize: 5 +} member { id: 0xd4ec9f32 name: "unused" @@ -191891,6 +194781,12 @@ member { type_id: 0x0d88e184 offset: 128 } +member { + id: 0xd6aa460e + name: "update" + type_id: 0x0f596314 + offset: 192 +} member { id: 0xd6ab4b69 name: "update" @@ -192276,12 +195172,24 @@ member { offset: 109 bitsize: 1 } +member { + id: 0x6e240c8e + name: "urg_data" + type_id: 0x914dbfdc + offset: 14624 +} member { id: 0xcd5c6021 name: "urg_ptr" type_id: 0x7584e7da offset: 144 } +member { + id: 0xf6fff0ee + name: "urg_seq" + type_id: 0xc9082b19 + offset: 18080 +} member { id: 0x3bf0a986 name: "urgent_bkops_lvl" @@ -193371,6 +196279,12 @@ member { type_id: 0x724805ea offset: 9856 } +member { + id: 0xc4835a3d + name: "user_mss" + type_id: 0x914dbfdc + offset: 160 +} member { id: 0xf44982ea name: "user_name" @@ -193605,6 +196519,12 @@ member { type_id: 0x6720d32f offset: 960 } +member { + id: 0x85d92902 + name: "users" + type_id: 0x74d29cf1 + offset: 96 +} member { id: 0x85e87d65 name: "users" @@ -193950,6 +196870,12 @@ member { name: "v" type_id: 0xe62ebf07 } +member { + id: 0x28f6afd5 + name: "v" + type_id: 0xc9082b19 + offset: 32 +} member { id: 0x543f6773 name: "v1" @@ -193994,6 +196920,11 @@ member { name: "v6" type_id: 0x7c866968 } +member { + id: 0x78e33cfc + name: "v6_rcv_saddr" + type_id: 0x6d25e07f +} member { id: 0x80b1cd58 name: "v_1p2" @@ -194105,6 +197036,11 @@ member { name: "val" type_id: 0x75617428 } +member { + id: 0x4f647aa3 + name: "val" + type_id: 0x729479be +} member { id: 0x4f67c989 name: "val" @@ -198705,6 +201641,12 @@ member { type_id: 0x4585663f offset: 64 } +member { + id: 0x98ad753d + name: "window_clamp" + type_id: 0xc9082b19 + offset: 13440 +} member { id: 0x1afab587 name: "window_len" @@ -200038,6 +202980,12 @@ member { type_id: 0x2f582494 offset: 1536 } +member { + id: 0xd5cb059f + name: "write_seq" + type_id: 0xc9082b19 + offset: 15584 +} member { id: 0x0a06727e name: "write_stamp" @@ -200311,6 +203259,13 @@ member { type_id: 0xc93e017b offset: 48 } +member { + id: 0xf0ca4be8 + name: "wscale_ok" + type_id: 0x914dbfdc + offset: 131 + bitsize: 1 +} member { id: 0x8986dade name: "wspecversion" @@ -201263,6 +204218,12 @@ member { type_id: 0xe62ebf07 offset: 160 } +member { + id: 0x2baf73ff + name: "young" + type_id: 0x74d29cf1 + offset: 128 +} member { id: 0x05f8b7b6 name: "ypanstep" @@ -201877,6 +204838,15 @@ struct_union { member_id: 0xd052cad3 } } +struct_union { + id: 0x0904167f + kind: STRUCT + definition { + bytesize: 8 + member_id: 0x45320605 + member_id: 0x934c941a + } +} struct_union { id: 0x0922f100 kind: STRUCT @@ -202331,6 +205301,16 @@ struct_union { member_id: 0x96fedfda } } +struct_union { + id: 0x0e7661fa + kind: STRUCT + definition { + bytesize: 16 + member_id: 0x589235ff + member_id: 0xfad50a5a + member_id: 0x74712a8a + } +} struct_union { id: 0x0eac683f kind: STRUCT @@ -202781,6 +205761,16 @@ struct_union { member_id: 0xc46d15c4 } } +struct_union { + id: 0x13c65297 + kind: STRUCT + definition { + bytesize: 16 + member_id: 0x2e52f84a + member_id: 0xfad50a5a + member_id: 0x74712a8a + } +} struct_union { id: 0x14096380 kind: STRUCT @@ -203247,6 +206237,28 @@ struct_union { member_id: 0xf5eb6b20 } } +struct_union { + id: 0x1b949b56 + kind: STRUCT + definition { + bytesize: 2 + member_id: 0x0ffd3550 + member_id: 0x1e410881 + member_id: 0x0ab41655 + member_id: 0xdb4e2124 + member_id: 0x27542d26 + member_id: 0xba676c19 + member_id: 0xede7771d + member_id: 0xba0eaaec + member_id: 0xcc7711fa + member_id: 0x4f7607b0 + member_id: 0xf63d13ec + member_id: 0xd94d5094 + member_id: 0x1f5f2358 + member_id: 0xb7565f87 + member_id: 0xf5e277a0 + } +} struct_union { id: 0x1b966255 kind: STRUCT @@ -203654,6 +206666,18 @@ struct_union { member_id: 0x41322da8 } } +struct_union { + id: 0x249bcdf3 + kind: STRUCT + definition { + bytesize: 16 + member_id: 0xf32eb68b + member_id: 0xf0e605cb + member_id: 0x745418c4 + member_id: 0x7a56a000 + member_id: 0x60865bc2 + } +} struct_union { id: 0x2548e662 kind: STRUCT @@ -203663,6 +206687,22 @@ struct_union { member_id: 0x0193fe4b } } +struct_union { + id: 0x260445a5 + kind: STRUCT + definition { + bytesize: 24 + member_id: 0xf97ca310 + member_id: 0xdcf7e916 + member_id: 0x517ebeed + member_id: 0x7fdde492 + member_id: 0x168605e8 + member_id: 0x54f0f552 + member_id: 0x9d3fc94e + member_id: 0x4c233807 + member_id: 0xa514bd9b + } +} struct_union { id: 0x2612bf2a kind: STRUCT @@ -204935,6 +207975,15 @@ struct_union { member_id: 0x83ba53fe } } +struct_union { + id: 0x3fbb6cee + kind: STRUCT + definition { + bytesize: 4 + member_id: 0x9f6503ee + member_id: 0x39a33a5e + } +} struct_union { id: 0x3fbd06d5 kind: STRUCT @@ -205331,6 +208380,15 @@ struct_union { member_id: 0x5f7a4496 } } +struct_union { + id: 0x48c28092 + kind: UNION + definition { + bytesize: 16 + member_id: 0x78e33cfc + member_id: 0x0249b0df + } +} struct_union { id: 0x48fb1ff5 kind: UNION @@ -205513,6 +208571,15 @@ struct_union { member_id: 0x89460966 } } +struct_union { + id: 0x4bdbd862 + kind: UNION + definition { + bytesize: 8 + member_id: 0x749120ed + member_id: 0x1536626c + } +} struct_union { id: 0x4c6fa86e kind: UNION @@ -206822,6 +209889,15 @@ struct_union { member_id: 0x5e3692cb } } +struct_union { + id: 0x6312fd2f + kind: UNION + definition { + bytesize: 2 + member_id: 0xd74e0402 + member_id: 0xee87ba00 + } +} struct_union { id: 0x6366329e kind: UNION @@ -221024,6 +224100,20 @@ struct_union { member_id: 0xa6673206 } } +struct_union { + id: 0xce180920 + kind: STRUCT + name: "fastopen_queue" + definition { + bytesize: 40 + member_id: 0x4fed2df8 + member_id: 0xe2520be5 + member_id: 0x2d1fe547 + member_id: 0x9bf5ac3e + member_id: 0x6bf829f7 + member_id: 0xec94e17c + } +} struct_union { id: 0xbdd1c219 kind: STRUCT @@ -226922,6 +230012,16 @@ struct_union { member_id: 0xb4bb2348 } } +struct_union { + id: 0xe2574ab7 + kind: STRUCT + name: "in6_pktinfo" + definition { + bytesize: 20 + member_id: 0xce9a5c98 + member_id: 0x8e98680e + } +} struct_union { id: 0xa54936d5 kind: STRUCT @@ -226988,6 +230088,17 @@ struct_union { member_id: 0xb121c544 } } +struct_union { + id: 0x1f434195 + kind: STRUCT + name: "inet6_cork" + definition { + bytesize: 16 + member_id: 0xf67626a5 + member_id: 0x9f6b950b + member_id: 0x51ef8423 + } +} struct_union { id: 0xc4c18dc3 kind: STRUCT @@ -227055,6 +230166,42 @@ struct_union { member_id: 0xc5c8b651 } } +struct_union { + id: 0x03de7511 + kind: STRUCT + name: "inet_bind2_bucket" + definition { + bytesize: 64 + member_id: 0xe50fbded + member_id: 0xe6f3661d + member_id: 0x48322e2f + member_id: 0x9e9c9ad1 + member_id: 0x31fb35e2 + member_id: 0x0f3ca934 + member_id: 0xb8ff9609 + member_id: 0xbc5ebfd1 + } +} +struct_union { + id: 0xe6615d93 + kind: STRUCT + name: "inet_bind_bucket" + definition { + bytesize: 72 + member_id: 0xe50fbded + member_id: 0xe6f3661d + member_id: 0x48322e2f + member_id: 0xa6e81f22 + member_id: 0x244da0b8 + member_id: 0x73b4d4d5 + member_id: 0x6398a0a9 + member_id: 0xfd693ce8 + member_id: 0x247ebe29 + member_id: 0xcee88548 + member_id: 0x0f3ca80d + member_id: 0xb8ff9c83 + } +} struct_union { id: 0x89a37894 kind: STRUCT @@ -227065,6 +230212,100 @@ struct_union { member_id: 0x152992ef } } +struct_union { + id: 0x82dbb487 + kind: STRUCT + name: "inet_connection_sock" + definition { + bytesize: 1536 + member_id: 0x9bd0d4a0 + member_id: 0x2a778fa0 + member_id: 0xa53e6076 + member_id: 0xf1a367c5 + member_id: 0xe54062f3 + member_id: 0x3adf52b3 + member_id: 0xbf3d6991 + member_id: 0xaa84a388 + member_id: 0x7c9184fd + member_id: 0xcfd1f1ed + member_id: 0x9796567d + member_id: 0x2b917283 + member_id: 0xeb6a5fa1 + member_id: 0x50d22351 + member_id: 0x69ab5d87 + member_id: 0x411180b7 + member_id: 0x5392fe03 + member_id: 0x8adfff2f + member_id: 0x9b3f0555 + member_id: 0x5c570222 + member_id: 0x8e756dfb + member_id: 0x6bd77d33 + member_id: 0xfd5174f1 + member_id: 0xb27d04f5 + member_id: 0x2446b1e6 + member_id: 0x2d061585 + member_id: 0xd7cc2503 + member_id: 0xc36d8395 + member_id: 0xc75c6970 + member_id: 0xf4f5c9e8 + member_id: 0x973a1269 + member_id: 0x2d081c07 + member_id: 0xa86967ec + } +} +struct_union { + id: 0x5d95189a + kind: STRUCT + name: "inet_connection_sock_af_ops" + definition { + bytesize: 96 + member_id: 0x52c10ec0 + member_id: 0x6d7c55af + member_id: 0x198e2327 + member_id: 0x68c86260 + member_id: 0x495c1c6d + member_id: 0x472be94a + member_id: 0x729f250d + member_id: 0x171ea46a + member_id: 0x094f9496 + member_id: 0x852593c2 + member_id: 0xd9ad3ae7 + member_id: 0x09692cc4 + member_id: 0x2e3a46f9 + member_id: 0x2d08170f + } +} +struct_union { + id: 0xeeae7608 + kind: STRUCT + name: "inet_cork" + definition { + bytesize: 56 + member_id: 0x2d2d0138 + member_id: 0x24cfa4f0 + member_id: 0xf64ae666 + member_id: 0xe25ba6f2 + member_id: 0xb50a4f6b + member_id: 0xbbe3f808 + member_id: 0x34444209 + member_id: 0xf4da47b1 + member_id: 0xf113a55a + member_id: 0x10dc10b1 + member_id: 0xeaa4b2c9 + member_id: 0x1b258c6d + member_id: 0x8196ad18 + } +} +struct_union { + id: 0x60790295 + kind: STRUCT + name: "inet_cork_full" + definition { + bytesize: 152 + member_id: 0x853440fd + member_id: 0x75577b27 + } +} struct_union { id: 0x28cc893a kind: STRUCT @@ -227156,6 +230397,47 @@ struct_union { member_id: 0x9d345b6b } } +struct_union { + id: 0xd2a4529d + kind: STRUCT + name: "inet_sock" + definition { + bytesize: 1088 + member_id: 0x82ce9da8 + member_id: 0xe47590e7 + member_id: 0x8be10b49 + member_id: 0x8e099fc0 + member_id: 0x9e4ebac2 + member_id: 0x38f59a08 + member_id: 0x674b03be + member_id: 0x78639f81 + member_id: 0xf1a03e73 + member_id: 0xa28c3827 + member_id: 0x7008fda6 + member_id: 0x79ed9153 + member_id: 0xd1ac6cca + member_id: 0x1facb5a4 + member_id: 0x0ccb8adb + member_id: 0x3cd68edd + member_id: 0xa5d9163f + member_id: 0x73793851 + member_id: 0x1bda0ea3 + member_id: 0xb932820e + member_id: 0x87d4aa19 + member_id: 0x7e45f9f6 + member_id: 0x35577470 + member_id: 0x1ad125b2 + member_id: 0x3c401b61 + member_id: 0x1d8e0899 + member_id: 0x02fcf451 + member_id: 0x5a22c9ad + member_id: 0x9c6a8ab2 + member_id: 0xa608f9d7 + member_id: 0x5b66efeb + member_id: 0x2d08191c + member_id: 0x63760c6b + } +} struct_union { id: 0x53a9065e kind: STRUCT @@ -228583,6 +231865,26 @@ struct_union { member_id: 0x962b59ac } } +struct_union { + id: 0x04ad1af1 + kind: STRUCT + name: "ip6_flowlabel" + definition { + bytesize: 104 + member_id: 0x11f8bba0 + member_id: 0x05cb6176 + member_id: 0x85d92902 + member_id: 0xbb9c8500 + member_id: 0xf676260e + member_id: 0xe93fdbc8 + member_id: 0x95dac8e5 + member_id: 0xeb4f31b9 + member_id: 0x4ad6a726 + member_id: 0xccd48510 + member_id: 0x91036748 + member_id: 0x1f4e61c9 + } +} struct_union { id: 0xdcb15adf kind: STRUCT @@ -228598,6 +231900,18 @@ struct_union { member_id: 0x95dac8e5 } } +struct_union { + id: 0xe42ec820 + kind: STRUCT + name: "ip6_sf_socklist" + definition { + bytesize: 24 + member_id: 0x047148a0 + member_id: 0xeea06d44 + member_id: 0x95dacd96 + member_id: 0x7f9c0ca0 + } +} struct_union { id: 0x5096f267 kind: STRUCT @@ -228691,6 +232005,19 @@ struct_union { member_id: 0x95dac7ab } } +struct_union { + id: 0x04be5205 + kind: STRUCT + name: "ip_mc_socklist" + definition { + bytesize: 48 + member_id: 0x11613e69 + member_id: 0xe7efca93 + member_id: 0x00c085ca + member_id: 0x25db7175 + member_id: 0x95dac9dc + } +} struct_union { id: 0xf86b845e kind: STRUCT @@ -228702,6 +232029,40 @@ struct_union { member_id: 0xf77583a5 } } +struct_union { + id: 0x29719ae3 + kind: STRUCT + name: "ip_options" + definition { + bytesize: 16 + member_id: 0xe37696ac + member_id: 0xe2d3a144 + member_id: 0x3cedbd03 + member_id: 0x3a20fe11 + member_id: 0xe752e605 + member_id: 0x95c9b8f6 + member_id: 0x1e6a6587 + member_id: 0x3feffe90 + member_id: 0xb9615f80 + member_id: 0xe5e354be + member_id: 0xb15e52c6 + member_id: 0x5ffbc843 + member_id: 0xe1754ad1 + member_id: 0x1dfe157e + member_id: 0x86165645 + member_id: 0xf4a9b31e + } +} +struct_union { + id: 0xca64cdd4 + kind: STRUCT + name: "ip_options_rcu" + definition { + bytesize: 32 + member_id: 0x95dac977 + member_id: 0xf6635680 + } +} struct_union { id: 0xe6a8bc0b kind: STRUCT @@ -228728,6 +232089,18 @@ struct_union { member_id: 0x494491e1 } } +struct_union { + id: 0xdc40a74d + kind: STRUCT + name: "ip_sf_socklist" + definition { + bytesize: 24 + member_id: 0x047148a0 + member_id: 0xeea06d44 + member_id: 0x95dacd96 + member_id: 0x7fbfd873 + } +} struct_union { id: 0x19e78670 kind: STRUCT @@ -228863,6 +232236,17 @@ struct_union { member_id: 0x724ae662 } } +struct_union { + id: 0x2cf9bbda + kind: STRUCT + name: "ipv6_ac_socklist" + definition { + bytesize: 32 + member_id: 0x7980c676 + member_id: 0xa102d1d4 + member_id: 0x23f246a8 + } +} struct_union { id: 0x66c6fbfa kind: STRUCT @@ -228946,6 +232330,97 @@ struct_union { member_id: 0xda3c6150 } } +struct_union { + id: 0xd421869b + kind: STRUCT + name: "ipv6_fl_socklist" + definition { + bytesize: 32 + member_id: 0x11cc9887 + member_id: 0x75129e6a + member_id: 0x95dac0b5 + } +} +struct_union { + id: 0x61c3c826 + kind: STRUCT + name: "ipv6_mc_socklist" + definition { + bytesize: 56 + member_id: 0x2440fa86 + member_id: 0x90568e8d + member_id: 0x00c085ca + member_id: 0x11e1e8dd + member_id: 0x25d5628c + member_id: 0x95daceb0 + } +} +struct_union { + id: 0xe7d07a15 + kind: STRUCT + name: "ipv6_opt_hdr" + definition { + bytesize: 2 + member_id: 0xe85e1d32 + member_id: 0xd608b573 + } +} +struct_union { + id: 0x65bafa41 + kind: STRUCT + name: "ipv6_pinfo" + definition { + bytesize: 152 + member_id: 0xf85f9f77 + member_id: 0x39865348 + member_id: 0xca75de5a + member_id: 0x2ac20e0b + member_id: 0x3c1c76af + member_id: 0x370a9ced + member_id: 0x46cd5abc + member_id: 0xa582fe9b + member_id: 0x8c5039ca + member_id: 0x34d8f2b6 + member_id: 0x95dcef8c + member_id: 0xe14bd1fe + member_id: 0xca5da826 + member_id: 0xd1f78df1 + member_id: 0xc40b184c + member_id: 0x7560b519 + member_id: 0x6e852b14 + member_id: 0xb13666fe + member_id: 0xaab1780b + member_id: 0x4b741bd1 + member_id: 0x6854e737 + member_id: 0xe803bc2e + member_id: 0x1b81e20c + member_id: 0x7e1e1e3e + member_id: 0xe5f931e3 + member_id: 0x78ae54cb + member_id: 0x51753049 + member_id: 0x608ed11c + member_id: 0x3e931a99 + member_id: 0xfce07b0f + member_id: 0xe2141007 + member_id: 0x1e958d31 + member_id: 0xf6762c03 + member_id: 0xe8802f7b + member_id: 0x80588b5f + member_id: 0xa677c461 + } +} +struct_union { + id: 0x61267b05 + kind: STRUCT + name: "ipv6_rt_hdr" + definition { + bytesize: 4 + member_id: 0xe85e1d32 + member_id: 0xd608b573 + member_id: 0x5ce53c7a + member_id: 0x498b56ac + } +} struct_union { id: 0x8b8aa3c2 kind: STRUCT @@ -228987,6 +232462,23 @@ struct_union { member_id: 0x12832f3f } } +struct_union { + id: 0xda61121d + kind: STRUCT + name: "ipv6_txoptions" + definition { + bytesize: 64 + member_id: 0xb7dcf8ac + member_id: 0x3eb7848c + member_id: 0x5e1efe31 + member_id: 0x00c73957 + member_id: 0x111085fe + member_id: 0xebf05092 + member_id: 0xcd7a45a0 + member_id: 0xfb3c6654 + member_id: 0x95dac8e5 + } +} struct_union { id: 0x97012809 kind: STRUCT @@ -232562,6 +236054,25 @@ struct_union { member_id: 0x73407d9e } } +struct_union { + id: 0x98a86aa3 + kind: STRUCT + name: "minmax" + definition { + bytesize: 24 + member_id: 0x67c839a9 + } +} +struct_union { + id: 0xa12e384a + kind: STRUCT + name: "minmax_sample" + definition { + bytesize: 8 + member_id: 0xab76db82 + member_id: 0x28f6afd5 + } +} struct_union { id: 0x0fc94b61 kind: STRUCT @@ -241995,6 +245506,22 @@ struct_union { member_id: 0xd072e4b9 } } +struct_union { + id: 0xf05a506e + kind: STRUCT + name: "request_sock_queue" + definition { + bytesize: 80 + member_id: 0xf1af1f8d + member_id: 0x6f5c1006 + member_id: 0x77b35ac5 + member_id: 0x9be65a12 + member_id: 0x2baf73ff + member_id: 0x5367b67f + member_id: 0x07b1da25 + member_id: 0x5aaa76bb + } +} struct_union { id: 0x85641a49 kind: STRUCT @@ -245831,6 +249358,25 @@ struct_union { kind: STRUCT name: "smc_hashinfo" } +struct_union { + id: 0xf265ff6f + kind: STRUCT + name: "smp_hotplug_thread" + definition { + bytesize: 96 + member_id: 0xacd44104 + member_id: 0x7c00ebb3 + member_id: 0x4118a1ce + member_id: 0x3e886d7b + member_id: 0x0f89928e + member_id: 0x84c3a238 + member_id: 0x200938c6 + member_id: 0xc70bed2a + member_id: 0x6fd6cd07 + member_id: 0x5937e22e + member_id: 0xa81d1890 + } +} struct_union { id: 0x6c8e86b4 kind: STRUCT @@ -250212,6 +253758,30 @@ struct_union { member_id: 0x95daceb0 } } +struct_union { + id: 0x0cb73b55 + kind: STRUCT + name: "tcp_fastopen_cookie" + definition { + bytesize: 24 + member_id: 0x4f647aa3 + member_id: 0xb82c6bed + member_id: 0x9633fcdf + } +} +struct_union { + id: 0x38e5bb2f + kind: STRUCT + name: "tcp_fastopen_request" + definition { + bytesize: 56 + member_id: 0x5846e373 + member_id: 0xffbef712 + member_id: 0xd98a29b1 + member_id: 0x5414dc08 + member_id: 0x50ba6c8c + } +} struct_union { id: 0x85ab6757 kind: STRUCT @@ -250221,6 +253791,220 @@ struct_union { member_id: 0x0d3e3662 } } +struct_union { + id: 0xfb9626d5 + kind: STRUCT + name: "tcp_options_received" + definition { + bytesize: 24 + member_id: 0x8131134d + member_id: 0x851fefad + member_id: 0x908732db + member_id: 0xa56c13a3 + member_id: 0x037e8aa7 + member_id: 0xcd6e1313 + member_id: 0x09aae020 + member_id: 0xf0ca4be8 + member_id: 0xa9edea21 + member_id: 0x7d4497ce + member_id: 0xa597551f + member_id: 0x64ea7467 + member_id: 0x9eb3a9b9 + member_id: 0xc2fb89b4 + member_id: 0x7cf21fa8 + member_id: 0xc4835a3d + member_id: 0x5ddbff68 + } +} +struct_union { + id: 0xd4132c46 + kind: STRUCT + name: "tcp_rack" + definition { + bytesize: 24 + member_id: 0xf3d1e1b3 + member_id: 0x2e52fcab + member_id: 0xd5a564b1 + member_id: 0x8762b004 + member_id: 0xc2607053 + member_id: 0x599289d3 + member_id: 0x28f28f83 + member_id: 0xec6d552f + } +} +struct_union { + id: 0xe9e88d93 + kind: STRUCT + name: "tcp_sack_block" + definition { + bytesize: 8 + member_id: 0x3d5d011f + member_id: 0xd5a56009 + } +} +struct_union { + id: 0x45ac4f1e + kind: STRUCT + name: "tcp_sock" + definition { + bytesize: 2368 + member_id: 0x07676cab + member_id: 0xc07533a2 + member_id: 0x085eba8a + member_id: 0xfcce6b61 + member_id: 0xda945faa + member_id: 0x43536458 + member_id: 0x7ecbe350 + member_id: 0x722b8728 + member_id: 0x1ec2d3d6 + member_id: 0x93db6466 + member_id: 0x24e972c0 + member_id: 0xef3672e0 + member_id: 0xdb2f5379 + member_id: 0x840adfe0 + member_id: 0x3ed6f46c + member_id: 0x7efdf5c5 + member_id: 0x8248546f + member_id: 0x9c800493 + member_id: 0x9a3cd5ff + member_id: 0xcaaa728e + member_id: 0x41099e1a + member_id: 0x036494f3 + member_id: 0xaf1c2858 + member_id: 0xb4b04624 + member_id: 0x3fe882fb + member_id: 0x1f8008f5 + member_id: 0xb8d73d0d + member_id: 0xb0b5b654 + member_id: 0x8571ffda + member_id: 0x98ad753d + member_id: 0x8a514678 + member_id: 0x2c2c5bfb + member_id: 0x1d3891d6 + member_id: 0x4aed4e02 + member_id: 0x330d862e + member_id: 0xdab01e8a + member_id: 0xd0fa2acf + member_id: 0x6c3076ab + member_id: 0x4b0fd146 + member_id: 0x5514b7d1 + member_id: 0xd6330125 + member_id: 0x5d29f448 + member_id: 0x72c8b468 + member_id: 0xb9f127f6 + member_id: 0xa42f5b5e + member_id: 0xa073f76a + member_id: 0xe07791d5 + member_id: 0xd4dd684d + member_id: 0xf06d788d + member_id: 0xdaf769b4 + member_id: 0xd0cd0f99 + member_id: 0x1d3cf444 + member_id: 0x590d4988 + member_id: 0x5e175424 + member_id: 0xe4473696 + member_id: 0x1273b273 + member_id: 0x7d5a2639 + member_id: 0xb9645bc8 + member_id: 0x81d25425 + member_id: 0x5005e20e + member_id: 0x2c472957 + member_id: 0x5e1e9277 + member_id: 0x0c6e4e2a + member_id: 0x28d4f0ae + member_id: 0x00d04522 + member_id: 0xf098d631 + member_id: 0x861205e4 + member_id: 0x50d5f815 + member_id: 0xbb70420d + member_id: 0xfdbc865b + member_id: 0x0b8a1650 + member_id: 0x1d340096 + member_id: 0xd7cfbd25 + member_id: 0x6e240c8e + member_id: 0x3dfa7679 + member_id: 0xeb2f3b5f + member_id: 0x6e16f71f + member_id: 0xbffa6140 + member_id: 0x611d6152 + member_id: 0xedd761bd + member_id: 0xcf588653 + member_id: 0x3b23e448 + member_id: 0xdef99a11 + member_id: 0x0ed29348 + member_id: 0x9e51f1aa + member_id: 0xe301bb69 + member_id: 0xa9ca3fe9 + member_id: 0x3ce5a47e + member_id: 0xe9e7b435 + member_id: 0x986caa86 + member_id: 0xe06780dd + member_id: 0x14a57188 + member_id: 0x484997ff + member_id: 0xc8ee40be + member_id: 0xc6e15ebb + member_id: 0x18e683dd + member_id: 0x47a61a29 + member_id: 0x073e90bb + member_id: 0xd5cb059f + member_id: 0xfdfb52fb + member_id: 0xa598ac7c + member_id: 0x2d8724a1 + member_id: 0xbd3e4a51 + member_id: 0xb18c6cab + member_id: 0xe98a4697 + member_id: 0x86cd98a0 + member_id: 0x05aa0dfa + member_id: 0x0b88a7b8 + member_id: 0x78ec3163 + member_id: 0xac4c8092 + member_id: 0x3a30a25b + member_id: 0x2ae3d7d3 + member_id: 0x6da3106a + member_id: 0x36a89416 + member_id: 0x17b8fb75 + member_id: 0x60f3e5b5 + member_id: 0x92525caa + member_id: 0x44e15491 + member_id: 0x7392ff81 + member_id: 0xd77f2af8 + member_id: 0x1ab0aff5 + member_id: 0xf6fff0ee + member_id: 0x9e7c1e77 + member_id: 0xb4c1a226 + member_id: 0x0ab38392 + member_id: 0xa6861f94 + member_id: 0xf5c438ad + member_id: 0x203d8bb9 + member_id: 0x3a366be0 + member_id: 0xcebd1125 + member_id: 0x035a9f8f + member_id: 0x38961e9d + member_id: 0x541b592a + member_id: 0x3641013e + member_id: 0x349100e3 + member_id: 0x7a4a99f1 + member_id: 0x72f4ca16 + member_id: 0x2d081e39 + } +} +struct_union { + id: 0xa19fd9aa + kind: STRUCT + name: "tcp_ulp_ops" + definition { + bytesize: 88 + member_id: 0x7c00ef52 + member_id: 0x1ac6e8a5 + member_id: 0xd6aa460e + member_id: 0xae97905e + member_id: 0x1d9b9b9d + member_id: 0xbaa156f0 + member_id: 0xf6c2da43 + member_id: 0x0d994758 + member_id: 0x4a965429 + } +} struct_union { id: 0xcfff757d kind: STRUCT @@ -277884,6 +281668,12 @@ enumeration { } } } +function { + id: 0x004cf563 + return_type_id: 0x48b5725f + parameter_id: 0x4585663f + parameter_id: 0x6d7f5ff6 +} function { id: 0x00a508d8 return_type_id: 0x100a15ee @@ -283603,12 +287393,25 @@ function { parameter_id: 0x1d19a9d5 parameter_id: 0x310ec01d } +function { + id: 0x171a1012 + return_type_id: 0x48b5725f + parameter_id: 0x1d44326e + parameter_id: 0x34d3469d +} function { id: 0x171c8621 return_type_id: 0xd5cc9c9a parameter_id: 0x09626b7f parameter_id: 0x6d7f5ff6 } +function { + id: 0x17256acc + return_type_id: 0x48b5725f + parameter_id: 0x1d44326e + parameter_id: 0x3bfbd7cf + parameter_id: 0x0f626ee5 +} function { id: 0x172842ec return_type_id: 0x48b5725f @@ -283660,6 +287463,12 @@ function { parameter_id: 0x6720d32f parameter_id: 0x18bd6530 } +function { + id: 0x1730d353 + return_type_id: 0x48b5725f + parameter_id: 0x1d44326e + parameter_id: 0x3e6396e0 +} function { id: 0x1731208f return_type_id: 0x48b5725f @@ -286925,6 +290734,11 @@ function { parameter_id: 0x36194830 parameter_id: 0x026525e9 } +function { + id: 0x1d1a000c + return_type_id: 0x48b5725f + parameter_id: 0x3609467c +} function { id: 0x1d1cf212 return_type_id: 0x48b5725f @@ -287117,6 +290931,13 @@ function { parameter_id: 0x347303b4 parameter_id: 0x3e10b518 } +function { + id: 0x1d657c6d + return_type_id: 0x48b5725f + parameter_id: 0x36d15200 + parameter_id: 0x1d44326e + parameter_id: 0xf1a6dfed +} function { id: 0x1d6aeee7 return_type_id: 0x48b5725f @@ -288435,6 +292256,11 @@ function { parameter_id: 0x0cbf60eb parameter_id: 0x095a3c37 } +function { + id: 0x1f121b27 + return_type_id: 0x48b5725f + parameter_id: 0x3e292ad1 +} function { id: 0x1f15c803 return_type_id: 0x48b5725f @@ -290719,6 +294545,12 @@ function { parameter_id: 0x0258f96e parameter_id: 0xf435685e } +function { + id: 0x4022c156 + return_type_id: 0xc9082b19 + parameter_id: 0x1bfb2a60 + parameter_id: 0xc9082b19 +} function { id: 0x40326129 return_type_id: 0x31675062 @@ -292681,6 +296513,16 @@ function { return_type_id: 0x249959de parameter_id: 0x347303b4 } +function { + id: 0x6240298a + return_type_id: 0x1d44326e + parameter_id: 0x3861403d + parameter_id: 0x054f691a + parameter_id: 0x27847a9a + parameter_id: 0x1259e377 + parameter_id: 0x27847a9a + parameter_id: 0x11cfee5a +} function { id: 0x624b0f22 return_type_id: 0x0537fb37 @@ -294742,6 +298584,11 @@ function { parameter_id: 0x3e10b518 parameter_id: 0x18bd6530 } +function { + id: 0x9002b2b0 + return_type_id: 0x6720d32f + parameter_id: 0x3609467c +} function { id: 0x9006b123 return_type_id: 0x6720d32f @@ -297515,6 +301362,11 @@ function { parameter_id: 0x0490bb4a parameter_id: 0xc9082b19 } +function { + id: 0x920aa99b + return_type_id: 0x6720d32f + parameter_id: 0x3e292ad1 +} function { id: 0x920d4b76 return_type_id: 0x6720d32f @@ -298788,6 +302640,13 @@ function { parameter_id: 0x064d6086 parameter_id: 0x6720d32f } +function { + id: 0x9312e40f + return_type_id: 0x6720d32f + parameter_id: 0x3e10b518 + parameter_id: 0x4585663f + parameter_id: 0x00fff809 +} function { id: 0x9312e488 return_type_id: 0x6720d32f @@ -299276,6 +303135,12 @@ function { parameter_id: 0x3b04bead parameter_id: 0x33756485 } +function { + id: 0x938d8e84 + return_type_id: 0x6720d32f + parameter_id: 0x3861403d + parameter_id: 0x054f691a +} function { id: 0x938ec5ae return_type_id: 0x6720d32f @@ -303758,6 +307623,11 @@ function { return_type_id: 0x6720d32f parameter_id: 0x1285100d } +function { + id: 0x9927995d + return_type_id: 0xf435685e + parameter_id: 0x3861403d +} function { id: 0x9927b218 return_type_id: 0x6720d32f @@ -305132,6 +309002,13 @@ function { parameter_id: 0x342a8622 parameter_id: 0x6720d32f } +function { + id: 0x9a3fe476 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x6720d32f + parameter_id: 0x33756485 +} function { id: 0x9a403d53 return_type_id: 0x6720d32f @@ -305442,6 +309319,14 @@ function { return_type_id: 0x6720d32f parameter_id: 0x1c50c4ad } +function { + id: 0x9a96d2a1 + return_type_id: 0x6720d32f + parameter_id: 0x120540d1 + parameter_id: 0xe62ebf07 + parameter_id: 0x391f15ea + parameter_id: 0x6720d32f +} function { id: 0x9a9d1e6d return_type_id: 0xf435685e @@ -306535,6 +310420,13 @@ function { parameter_id: 0x914dbfdc parameter_id: 0x07dcdbe1 } +function { + id: 0x9b3e4950 + return_type_id: 0x6720d32f + parameter_id: 0x191de370 + parameter_id: 0x3806a5e7 + parameter_id: 0x6720d32f +} function { id: 0x9b3e6548 return_type_id: 0x6720d32f @@ -315576,6 +319468,17 @@ function { parameter_id: 0x0ae4a2c7 parameter_id: 0x3e10b518 } +function { + id: 0xa4f69947 + return_type_id: 0x6720d32f + parameter_id: 0xe62ebf07 + parameter_id: 0x391f15ea + parameter_id: 0x6720d32f + parameter_id: 0x1b8590a8 + parameter_id: 0x3e001c39 + parameter_id: 0x1b8590a8 + parameter_id: 0xc9082b19 +} function { id: 0xa52e0ac1 return_type_id: 0x3dcee85d @@ -317215,6 +321118,12 @@ function { return_type_id: 0x4585663f parameter_id: 0x12e75923 } +function { + id: 0xcaae52cc + return_type_id: 0x4585663f + parameter_id: 0x1d44326e + parameter_id: 0xc9082b19 +} function { id: 0xcabd891e return_type_id: 0x4585663f @@ -317438,6 +321347,14 @@ function { return_type_id: 0x4585663f parameter_id: 0x01241c02 } +function { + id: 0xce9ab7ea + return_type_id: 0x4585663f + parameter_id: 0x01222f7d + parameter_id: 0x064d6086 + parameter_id: 0x4585663f + parameter_id: 0x4585663f +} function { id: 0xceb0f977 return_type_id: 0x4585663f @@ -321354,6 +325271,15 @@ elf_symbol { type_id: 0x102e93ac full_name: "__free_pages" } +elf_symbol { + id: 0xe458ae39 + name: "__fsnotify_parent" + is_defined: true + symbol_type: FUNCTION + crc: 0x2aa72d57 + type_id: 0x9a96d2a1 + full_name: "__fsnotify_parent" +} elf_symbol { id: 0x129eb7ed name: "__genphy_config_aneg" @@ -325936,6 +329862,15 @@ elf_symbol { type_id: 0x9a32c459 full_name: "__traceiter_android_vh_watchdog_timer_softlockup" } +elf_symbol { + id: 0xae5e5469 + name: "__traceiter_android_vh_wq_lockup_pool" + is_defined: true + symbol_type: FUNCTION + crc: 0x62c05483 + type_id: 0x9a3fe476 + full_name: "__traceiter_android_vh_wq_lockup_pool" +} elf_symbol { id: 0x6911084f name: "__traceiter_binder_transaction_received" @@ -329185,6 +333120,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_watchdog_timer_softlockup" } +elf_symbol { + id: 0xa13f65ff + name: "__tracepoint_android_vh_wq_lockup_pool" + is_defined: true + symbol_type: OBJECT + crc: 0x4fd35a54 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_wq_lockup_pool" +} elf_symbol { id: 0xf57e8f65 name: "__tracepoint_binder_transaction_received" @@ -346093,6 +350037,15 @@ elf_symbol { type_id: 0x1d030c3c full_name: "fsg_config_from_params" } +elf_symbol { + id: 0xe7fde0db + name: "fsnotify" + is_defined: true + symbol_type: FUNCTION + crc: 0x382d8219 + type_id: 0xa4f69947 + full_name: "fsnotify" +} elf_symbol { id: 0xee139066 name: "fsync_bdev" @@ -352208,6 +356161,24 @@ elf_symbol { type_id: 0x98046a12 full_name: "kernel_listen" } +elf_symbol { + id: 0x52f17879 + name: "kernel_neon_begin" + is_defined: true + symbol_type: FUNCTION + crc: 0x8fd180e7 + type_id: 0x10985193 + full_name: "kernel_neon_begin" +} +elf_symbol { + id: 0xa84cb89b + name: "kernel_neon_end" + is_defined: true + symbol_type: FUNCTION + crc: 0xa8a8110c + type_id: 0x10985193 + full_name: "kernel_neon_end" +} elf_symbol { id: 0xff4a14be name: "kernel_param_lock" @@ -352901,6 +356872,15 @@ elf_symbol { type_id: 0x9313eb8f full_name: "kstrtoll" } +elf_symbol { + id: 0xf44e0659 + name: "kstrtos16" + is_defined: true + symbol_type: FUNCTION + crc: 0xe0419ac4 + type_id: 0x9312e40f + full_name: "kstrtos16" +} elf_symbol { id: 0x9c1bb84e name: "kstrtos8" @@ -364528,6 +368508,15 @@ elf_symbol { type_id: 0x8556217d full_name: "regulator_get" } +elf_symbol { + id: 0x90e2334a + name: "regulator_get_current_limit" + is_defined: true + symbol_type: FUNCTION + crc: 0xf9e34a3c + type_id: 0x9deb64a3 + full_name: "regulator_get_current_limit" +} elf_symbol { id: 0xfd198070 name: "regulator_get_current_limit_regmap" @@ -368714,6 +372703,24 @@ elf_symbol { type_id: 0x8c57cf58 full_name: "smp_call_on_cpu" } +elf_symbol { + id: 0xd7393cd7 + name: "smpboot_register_percpu_thread" + is_defined: true + symbol_type: FUNCTION + crc: 0xed22557a + type_id: 0x9002b2b0 + full_name: "smpboot_register_percpu_thread" +} +elf_symbol { + id: 0xbf61e6bf + name: "smpboot_unregister_percpu_thread" + is_defined: true + symbol_type: FUNCTION + crc: 0x62650cbc + type_id: 0x1d1a000c + full_name: "smpboot_unregister_percpu_thread" +} elf_symbol { id: 0x675bcd74 name: "snapshot_get_image_size" @@ -369326,6 +373333,15 @@ elf_symbol { type_id: 0xb3725ca4 full_name: "snd_sgbuf_get_addr" } +elf_symbol { + id: 0x31ef5894 + name: "snd_soc_add_card_controls" + is_defined: true + symbol_type: FUNCTION + crc: 0x3e965e80 + type_id: 0x9b3e4950 + full_name: "snd_soc_add_card_controls" +} elf_symbol { id: 0x0af8ff72 name: "snd_soc_add_component_controls" @@ -371441,6 +375457,15 @@ elf_symbol { type_id: 0xce4f521b full_name: "stack_trace_save" } +elf_symbol { + id: 0x9ae5b413 + name: "stack_trace_save_regs" + is_defined: true + symbol_type: FUNCTION + crc: 0x2731e15c + type_id: 0xce9ab7ea + full_name: "stack_trace_save_regs" +} elf_symbol { id: 0xe69a5898 name: "stack_trace_save_tsk" @@ -372488,6 +376513,60 @@ elf_symbol { type_id: 0x5760fe36 full_name: "tcp_hashinfo" } +elf_symbol { + id: 0x5f96413a + name: "tcp_register_congestion_control" + is_defined: true + symbol_type: FUNCTION + crc: 0x6a326cc1 + type_id: 0x920aa99b + full_name: "tcp_register_congestion_control" +} +elf_symbol { + id: 0x34a062e9 + name: "tcp_reno_cong_avoid" + is_defined: true + symbol_type: FUNCTION + crc: 0x49080385 + type_id: 0x14df3fae + full_name: "tcp_reno_cong_avoid" +} +elf_symbol { + id: 0x97edc1c5 + name: "tcp_reno_ssthresh" + is_defined: true + symbol_type: FUNCTION + crc: 0xfc040e49 + type_id: 0x42a927f9 + full_name: "tcp_reno_ssthresh" +} +elf_symbol { + id: 0xdb8e8104 + name: "tcp_reno_undo_cwnd" + is_defined: true + symbol_type: FUNCTION + crc: 0x4dece522 + type_id: 0x42a927f9 + full_name: "tcp_reno_undo_cwnd" +} +elf_symbol { + id: 0xe6a46ba8 + name: "tcp_slow_start" + is_defined: true + symbol_type: FUNCTION + crc: 0x1cf8d7ac + type_id: 0x4022c156 + full_name: "tcp_slow_start" +} +elf_symbol { + id: 0xefab10bf + name: "tcp_unregister_congestion_control" + is_defined: true + symbol_type: FUNCTION + crc: 0xc80e5686 + type_id: 0x1f121b27 + full_name: "tcp_unregister_congestion_control" +} elf_symbol { id: 0x857e5dc9 name: "tcpci_get_tcpm_port" @@ -377231,6 +381310,15 @@ elf_symbol { type_id: 0x96011ad3 full_name: "usb_serial_suspend" } +elf_symbol { + id: 0xedbb00b2 + name: "usb_set_configuration" + is_defined: true + symbol_type: FUNCTION + crc: 0x58ece092 + type_id: 0x9f435990 + full_name: "usb_set_configuration" +} elf_symbol { id: 0x34af8a35 name: "usb_set_device_state" @@ -381308,6 +385396,7 @@ interface { symbol_id: 0x47a334c4 symbol_id: 0xebf4b11f symbol_id: 0x5b1ea047 + symbol_id: 0xe458ae39 symbol_id: 0x129eb7ed symbol_id: 0x63083569 symbol_id: 0x46f82598 @@ -381817,6 +385906,7 @@ interface { symbol_id: 0xcb4d15f3 symbol_id: 0x6a8145ff symbol_id: 0xb0bf7fd6 + symbol_id: 0xae5e5469 symbol_id: 0x6911084f symbol_id: 0x8068eeb3 symbol_id: 0xfdf83a19 @@ -382178,6 +386268,7 @@ interface { symbol_id: 0x743ea36d symbol_id: 0xc09d36c9 symbol_id: 0x42dbeb24 + symbol_id: 0xa13f65ff symbol_id: 0xf57e8f65 symbol_id: 0x18343f2d symbol_id: 0xef8c9bf3 @@ -384056,6 +388147,7 @@ interface { symbol_id: 0x07715f1a symbol_id: 0x2c0dfd47 symbol_id: 0xb3bb06f4 + symbol_id: 0xe7fde0db symbol_id: 0xee139066 symbol_id: 0x613adcb1 symbol_id: 0x370e6f08 @@ -384735,6 +388827,8 @@ interface { symbol_id: 0x1b2f5f11 symbol_id: 0x6746106c symbol_id: 0xc5f22a59 + symbol_id: 0x52f17879 + symbol_id: 0xa84cb89b symbol_id: 0xff4a14be symbol_id: 0x4e60d546 symbol_id: 0x0e6a648c @@ -384812,6 +388906,7 @@ interface { symbol_id: 0x46063085 symbol_id: 0x8ab0e185 symbol_id: 0xbeb6fe53 + symbol_id: 0xf44e0659 symbol_id: 0x9c1bb84e symbol_id: 0xa656e08e symbol_id: 0x4c5f49dc @@ -386104,6 +390199,7 @@ interface { symbol_id: 0xc91eefc6 symbol_id: 0x7511baca symbol_id: 0x29553efc + symbol_id: 0x90e2334a symbol_id: 0xfd198070 symbol_id: 0x4a781b7f symbol_id: 0xc92def38 @@ -386569,6 +390665,8 @@ interface { symbol_id: 0x4c674d1e symbol_id: 0x1f9b728b symbol_id: 0x99a17453 + symbol_id: 0xd7393cd7 + symbol_id: 0xbf61e6bf symbol_id: 0x675bcd74 symbol_id: 0x35280a80 symbol_id: 0x03fd01bd @@ -386637,6 +390735,7 @@ interface { symbol_id: 0xb2f7eb17 symbol_id: 0x8eb5b50d symbol_id: 0x1f5649eb + symbol_id: 0x31ef5894 symbol_id: 0x0af8ff72 symbol_id: 0xe3ec4d34 symbol_id: 0xf23ecd34 @@ -386872,6 +390971,7 @@ interface { symbol_id: 0xf4594c32 symbol_id: 0xa33694f6 symbol_id: 0x7e8e12de + symbol_id: 0x9ae5b413 symbol_id: 0xe69a5898 symbol_id: 0xbd07e567 symbol_id: 0xf304733e @@ -386989,6 +391089,12 @@ interface { symbol_id: 0xd6adc7e6 symbol_id: 0xcc386657 symbol_id: 0xeb0e3e9b + symbol_id: 0x5f96413a + symbol_id: 0x34a062e9 + symbol_id: 0x97edc1c5 + symbol_id: 0xdb8e8104 + symbol_id: 0xe6a46ba8 + symbol_id: 0xefab10bf symbol_id: 0x857e5dc9 symbol_id: 0x85459353 symbol_id: 0x1ecfd7d9 @@ -387516,6 +391622,7 @@ interface { symbol_id: 0xbc49d007 symbol_id: 0xa20893c4 symbol_id: 0xf13a39cb + symbol_id: 0xedbb00b2 symbol_id: 0x34af8a35 symbol_id: 0x3cc50b4b symbol_id: 0x85997d50 diff --git a/android/abi_gki_aarch64_galaxy b/android/abi_gki_aarch64_galaxy index 9bb57a86455d..d14cade9b61a 100644 --- a/android/abi_gki_aarch64_galaxy +++ b/android/abi_gki_aarch64_galaxy @@ -100,6 +100,8 @@ __free_pages free_pages free_pages_exact + fsnotify + __fsnotify_parent generic_file_read_iter generic_mii_ioctl generic_perform_write @@ -150,6 +152,8 @@ kasan_flag_enabled kasprintf kernel_cpustat + kernel_neon_begin + kernel_neon_end kernfs_find_and_get_ns kfree __kfree_skb @@ -165,6 +169,7 @@ kobject_put kstrdup kstrtoint + kstrtos16 kstrtouint kstrtoull kthread_create_on_node @@ -258,6 +263,7 @@ register_reboot_notifier register_restart_handler register_syscore_ops + regulator_get_current_limit remove_cpu rtc_class_open rtc_read_time @@ -278,6 +284,9 @@ single_open single_release skb_copy_ubufs + smpboot_register_percpu_thread + smpboot_unregister_percpu_thread + snd_soc_add_card_controls snd_soc_find_dai snd_soc_info_volsw_sx snd_soc_put_volsw_sx @@ -286,6 +295,7 @@ sprintf sscanf __stack_chk_fail + stack_trace_save_regs stpcpy strcmp strim @@ -307,6 +317,12 @@ system_long_wq system_unbound_wq sys_tz + tcp_register_congestion_control + tcp_reno_cong_avoid + tcp_reno_ssthresh + tcp_reno_undo_cwnd + tcp_slow_start + tcp_unregister_congestion_control time64_to_tm __traceiter_android_rvh_arm64_serror_panic __traceiter_android_rvh_die_kernel_fault @@ -340,6 +356,7 @@ __traceiter_android_vh_try_to_freeze_todo __traceiter_android_vh_try_to_freeze_todo_unfrozen __traceiter_android_vh_watchdog_timer_softlockup + __traceiter_android_vh_wq_lockup_pool __traceiter_block_rq_insert __traceiter_console __traceiter_hrtimer_expire_entry @@ -381,6 +398,7 @@ __tracepoint_android_vh_try_to_freeze_todo __tracepoint_android_vh_try_to_freeze_todo_unfrozen __tracepoint_android_vh_watchdog_timer_softlockup + __tracepoint_android_vh_wq_lockup_pool __tracepoint_block_rq_insert __tracepoint_console __tracepoint_hrtimer_expire_entry @@ -400,6 +418,7 @@ up_write usb_alloc_dev usb_gstrings_attach + usb_set_configuration usbnet_get_endpoints usbnet_link_change usb_set_device_state From fadc35923d8b9f0198b410e9b763eb839082bf64 Mon Sep 17 00:00:00 2001 From: xieliujie Date: Fri, 4 Aug 2023 14:35:39 +0800 Subject: [PATCH 107/163] ANDROID: vendor_hook: fix the error record position of mutex Make sure vendorhook trace_android_vh_record_mutex_lock_starttime woking both in fastpath unlock and slowpath unlock. Fixes: 57750518de5b ("ANDROID: vendor_hook: Avoid clearing protect-flag before waking waiters") Bug: 286024926 Change-Id: Ib91c1b88d27aaa4ef872d44102969ffc3c9adb58 Signed-off-by: xieliujie --- kernel/locking/mutex.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index 4ff70da18e3c..525648da693f 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c @@ -556,8 +556,10 @@ static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigne void __sched mutex_unlock(struct mutex *lock) { #ifndef CONFIG_DEBUG_LOCK_ALLOC - if (__mutex_unlock_fast(lock)) + if (__mutex_unlock_fast(lock)) { + trace_android_vh_record_mutex_lock_starttime(current, 0); return; + } #endif __mutex_unlock_slowpath(lock, _RET_IP_); trace_android_vh_record_mutex_lock_starttime(current, 0); From 4dc009c3a8aa8ffc815af31a396a9a50156828e9 Mon Sep 17 00:00:00 2001 From: Zhanyuan Hu Date: Fri, 11 Aug 2023 09:58:42 +0800 Subject: [PATCH 108/163] ANDROID: GKI: Update symbols to symbol list Update symbols to symbol list externed by oppo memory group. ABI DIFFERENCES HAVE BEEN DETECTED! 1 variable symbol(s) added 'unsigned long zero_pfn' Bug: 292051411 Change-Id: I913c01c7671729bf33b78a218c61cfb94628fb0e Signed-off-by: huzhanyuan --- android/abi_gki_aarch64.stg | 10 ++++++++++ android/abi_gki_aarch64_oplus | 1 + 2 files changed, 11 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 56e8d3b856c6..0a050f0bf426 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -385072,6 +385072,15 @@ elf_symbol { type_id: 0x12dd8909 full_name: "zap_vma_ptes" } +elf_symbol { + id: 0xe8e0ea6a + name: "zero_pfn" + is_defined: true + symbol_type: OBJECT + crc: 0x85efc7e0 + type_id: 0x33756485 + full_name: "zero_pfn" +} elf_symbol { id: 0xa5d58813 name: "zlib_deflate" @@ -392040,6 +392049,7 @@ interface { symbol_id: 0xb73c4905 symbol_id: 0xbb650596 symbol_id: 0x0fc8c78d + symbol_id: 0xe8e0ea6a symbol_id: 0xa5d58813 symbol_id: 0xde6620d5 symbol_id: 0xdf18b1a9 diff --git a/android/abi_gki_aarch64_oplus b/android/abi_gki_aarch64_oplus index afe0f5c8aa05..ee42a50dcbdb 100644 --- a/android/abi_gki_aarch64_oplus +++ b/android/abi_gki_aarch64_oplus @@ -241,3 +241,4 @@ wait_for_completion_killable_timeout wakeup_source_remove wq_worker_comm + zero_pfn From d7dacaa439c7cd0f3e2130e90ac8f2b91370d3da Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Sat, 29 Jul 2023 10:59:38 -0400 Subject: [PATCH 109/163] UPSTREAM: USB: Gadget: core: Help prevent panic during UVC unconfigure Avichal Rakesh reported a kernel panic that occurred when the UVC gadget driver was removed from a gadget's configuration. The panic involves a somewhat complicated interaction between the kernel driver and a userspace component (as described in the Link tag below), but the analysis did make one thing clear: The Gadget core should accomodate gadget drivers calling usb_gadget_deactivate() as part of their unbind procedure. Currently this doesn't work. gadget_unbind_driver() calls driver->unbind() while holding the udc->connect_lock mutex, and usb_gadget_deactivate() attempts to acquire that mutex, which will result in a deadlock. The simple fix is for gadget_unbind_driver() to release the mutex when invoking the ->unbind() callback. There is no particular reason for it to be holding the mutex at that time, and the mutex isn't held while the ->bind() callback is invoked. So we'll drop the mutex before performing the unbind callback and reacquire it afterward. We'll also add a couple of comments to usb_gadget_activate() and usb_gadget_deactivate(). Because they run in process context they must not be called from a gadget driver's ->disconnect() callback, which (according to the kerneldoc for struct usb_gadget_driver in include/linux/usb/gadget.h) may run in interrupt context. This may help prevent similar bugs from arising in the future. Reported-and-tested-by: Avichal Rakesh Signed-off-by: Alan Stern Fixes: 286d9975a838 ("usb: gadget: udc: core: Prevent soft_connect_store() race") Link: https://lore.kernel.org/linux-usb/4d7aa3f4-22d9-9f5a-3d70-1bd7148ff4ba@google.com/ Cc: Badhri Jagan Sridharan Cc: Link: https://lore.kernel.org/r/48b2f1f1-0639-46bf-bbfc-98cb05a24914@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman Bug: 291976100 Change-Id: Icff01d8e88f041af4bda8726242de9cd518a247a (cherry picked from commit 65dadb2beeb7360232b09ebc4585b54475dfee06) Signed-off-by: Avichal Rakesh --- drivers/usb/gadget/udc/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 9568b13c05f4..93695ce5fef0 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -795,6 +795,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_disconnect); * usb_gadget_activate() is called. For example, user mode components may * need to be activated before the system can talk to hosts. * + * This routine may sleep; it must not be called in interrupt context + * (such as from within a gadget driver's disconnect() callback). + * * Returns zero on success, else negative errno. */ int usb_gadget_deactivate(struct usb_gadget *gadget) @@ -833,6 +836,8 @@ EXPORT_SYMBOL_GPL(usb_gadget_deactivate); * This routine activates gadget which was previously deactivated with * usb_gadget_deactivate() call. It calls usb_gadget_connect() if needed. * + * This routine may sleep; it must not be called in interrupt context. + * * Returns zero on success, else negative errno. */ int usb_gadget_activate(struct usb_gadget *gadget) @@ -1625,7 +1630,11 @@ static void gadget_unbind_driver(struct device *dev) usb_gadget_disable_async_callbacks(udc); if (gadget->irq) synchronize_irq(gadget->irq); + mutex_unlock(&udc->connect_lock); + udc->driver->unbind(gadget); + + mutex_lock(&udc->connect_lock); usb_gadget_udc_stop_locked(udc); mutex_unlock(&udc->connect_lock); From 37f450940760d2318d829c611662e3e4ff35af7b Mon Sep 17 00:00:00 2001 From: Yifan Hong Date: Tue, 8 Aug 2023 13:50:56 -0700 Subject: [PATCH 110/163] ANDROID: Add checkpatch target. Running the following will run scripts/checkpatch.pl on a patch of HEAD tools/bazel run //common:checkpatch or a given Git SHA1: tools/bazel run //common:checkpatch -- --git_sha1 ... For additional flags, see tools/bazel run //common:checkpatch -- --help For details, see build/kernel/kleaf/docs/checkpatch.md in your source tree. Test: TH Bug: 259995152 Change-Id: Iaad8fd69508cf9be11340166aafbb84930d4805c Signed-off-by: Yifan Hong (cherry picked from commit 7dbf26568fcccde88470e7a25c07f0c7229e85f1) --- BUILD.bazel | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/BUILD.bazel b/BUILD.bazel index 2a95ec15d4d2..a9f5348d052e 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -6,6 +6,7 @@ load("//build/bazel_common_rules/dist:dist.bzl", "copy_to_dist_dir") load("//build/kernel/kleaf:common_kernels.bzl", "define_common_kernels") load( "//build/kernel/kleaf:kernel.bzl", + "checkpatch", "ddk_headers", "kernel_abi", "kernel_build", @@ -40,6 +41,11 @@ _GKI_X86_64_MAKE_GOALS = [ "modules", ] +checkpatch( + name = "checkpatch", + checkpatch_pl = "scripts/checkpatch.pl", +) + write_file( name = "gki_system_dlkm_modules", out = "android/gki_system_dlkm_modules", From 76881029499ada5d48d2658739f1bcf9a4156772 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 19 Jul 2023 21:08:21 +0200 Subject: [PATCH 111/163] UPSTREAM: netfilter: nft_set_pipapo: fix improper element removal [ Upstream commit 87b5a5c209405cb6b57424cdfa226a6dbd349232 ] end key should be equal to start unless NFT_SET_EXT_KEY_END is present. Its possible to add elements that only have a start key ("{ 1.0.0.0 . 2.0.0.0 }") without an internval end. Insertion treats this via: if (nft_set_ext_exists(ext, NFT_SET_EXT_KEY_END)) end = (const u8 *)nft_set_ext_key_end(ext)->data; else end = start; but removal side always uses nft_set_ext_key_end(). This is wrong and leads to garbage remaining in the set after removal next lookup/insert attempt will give: BUG: KASAN: slab-use-after-free in pipapo_get+0x8eb/0xb90 Read of size 1 at addr ffff888100d50586 by task nft-pipapo_uaf_/1399 Call Trace: kasan_report+0x105/0x140 pipapo_get+0x8eb/0xb90 nft_pipapo_insert+0x1dc/0x1710 nf_tables_newsetelem+0x31f5/0x4e00 .. Bug: 293587745 Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges") Reported-by: lonial con Reviewed-by: Stefano Brivio Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin (cherry picked from commit 90c3955beb858bb52a9e5c4380ed0e520e3730d1) Signed-off-by: Lee Jones Change-Id: I51a423aaa2c31c4df89776505b602aa2c1523b82 --- net/netfilter/nft_set_pipapo.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 06d46d182634..4b79df6ecf6c 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1908,7 +1908,11 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set, int i, start, rules_fx; match_start = data; - match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data; + + if (nft_set_ext_exists(&e->ext, NFT_SET_EXT_KEY_END)) + match_end = (const u8 *)nft_set_ext_key_end(&e->ext)->data; + else + match_end = data; start = first_rule; rules_fx = rules_f0; From 9db1437238460a879df963dc357c0663ae50f1fe Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Sat, 22 Apr 2023 12:56:11 -0300 Subject: [PATCH 112/163] UPSTREAM: net/sched: sch_qfq: refactor parsing of netlink parameters [ Upstream commit 25369891fcef373540f8b4e0b3bccf77a04490d5 ] Two parameters can be transformed into netlink policies and validated while parsing the netlink message. Bug: 292249631 Reviewed-by: Simon Horman Acked-by: Jamal Hadi Salim Signed-off-by: Pedro Tammela Signed-off-by: David S. Miller Stable-dep-of: 3e337087c3b5 ("net/sched: sch_qfq: account for stab overhead in qfq_enqueue") Signed-off-by: Sasha Levin (cherry picked from commit 4b33836824052c91cfa812f1222cdfa0ff8daa41) Signed-off-by: Lee Jones Change-Id: Ifce65b6b0ce2f7dee2040a4c91fd90ea7b2e8f3c --- net/sched/sch_qfq.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 02098a02943e..2f3629c85158 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -113,6 +113,7 @@ #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */ #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */ +#define QFQ_MAX_LMAX (1UL << QFQ_MTU_SHIFT) #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */ @@ -214,9 +215,14 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) return container_of(clc, struct qfq_class, common); } +static struct netlink_range_validation lmax_range = { + .min = QFQ_MIN_LMAX, + .max = QFQ_MAX_LMAX, +}; + static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { - [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, - [TCA_QFQ_LMAX] = { .type = NLA_U32 }, + [TCA_QFQ_WEIGHT] = NLA_POLICY_RANGE(NLA_U32, 1, QFQ_MAX_WEIGHT), + [TCA_QFQ_LMAX] = NLA_POLICY_FULL_RANGE(NLA_U32, &lmax_range), }; /* @@ -408,17 +414,13 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, } err = nla_parse_nested_deprecated(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], - qfq_policy, NULL); + qfq_policy, extack); if (err < 0) return err; - if (tb[TCA_QFQ_WEIGHT]) { + if (tb[TCA_QFQ_WEIGHT]) weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]); - if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) { - pr_notice("qfq: invalid weight %u\n", weight); - return -EINVAL; - } - } else + else weight = 1; if (tb[TCA_QFQ_LMAX]) @@ -426,11 +428,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, else lmax = psched_mtu(qdisc_dev(sch)); - if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) { - pr_notice("qfq: invalid max length %u\n", lmax); - return -EINVAL; - } - inv_w = ONE_FP / weight; weight = ONE_FP / inv_w; From 110a26edd10082bc251341a3192244496175932c Mon Sep 17 00:00:00 2001 From: Pedro Tammela Date: Tue, 11 Jul 2023 18:01:02 -0300 Subject: [PATCH 113/163] UPSTREAM: net/sched: sch_qfq: account for stab overhead in qfq_enqueue [ Upstream commit 3e337087c3b5805fe0b8a46ba622a962880b5d64 ] Lion says: ------- In the QFQ scheduler a similar issue to CVE-2023-31436 persists. Consider the following code in net/sched/sch_qfq.c: static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { unsigned int len = qdisc_pkt_len(skb), gso_segs; // ... if (unlikely(cl->agg->lmax < len)) { pr_debug("qfq: increasing maxpkt from %u to %u for class %u", cl->agg->lmax, len, cl->common.classid); err = qfq_change_agg(sch, cl, cl->agg->class_weight, len); if (err) { cl->qstats.drops++; return qdisc_drop(skb, sch, to_free); } // ... } Similarly to CVE-2023-31436, "lmax" is increased without any bounds checks according to the packet length "len". Usually this would not impose a problem because packet sizes are naturally limited. This is however not the actual packet length, rather the "qdisc_pkt_len(skb)" which might apply size transformations according to "struct qdisc_size_table" as created by "qdisc_get_stab()" in net/sched/sch_api.c if the TCA_STAB option was set when modifying the qdisc. A user may choose virtually any size using such a table. As a result the same issue as in CVE-2023-31436 can occur, allowing heap out-of-bounds read / writes in the kmalloc-8192 cache. ------- We can create the issue with the following commands: tc qdisc add dev $DEV root handle 1: stab mtu 2048 tsize 512 mpu 0 \ overhead 999999999 linklayer ethernet qfq tc class add dev $DEV parent 1: classid 1:1 htb rate 6mbit burst 15k tc filter add dev $DEV parent 1: matchall classid 1:1 ping -I $DEV 1.1.1.2 This is caused by incorrectly assuming that qdisc_pkt_len() returns a length within the QFQ_MIN_LMAX < len < QFQ_MAX_LMAX. Bug: 292249631 Fixes: 462dbc9101ac ("pkt_sched: QFQ Plus: fair-queueing service at DRR cost") Reported-by: Lion Reviewed-by: Eric Dumazet Signed-off-by: Jamal Hadi Salim Signed-off-by: Pedro Tammela Reviewed-by: Simon Horman Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin (cherry picked from commit 70feebdbfad85772ab3ef152812729cab5c6c426) Signed-off-by: Lee Jones Change-Id: I69bec7b092e980fe8e0946c26ed9b5ac7c57bf3d --- net/sched/sch_qfq.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 2f3629c85158..d5610e145da2 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -381,8 +381,13 @@ static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight, u32 lmax) { struct qfq_sched *q = qdisc_priv(sch); - struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight); + struct qfq_aggregate *new_agg; + /* 'lmax' can range from [QFQ_MIN_LMAX, pktlen + stab overhead] */ + if (lmax > QFQ_MAX_LMAX) + return -EINVAL; + + new_agg = qfq_find_agg(q, lmax, weight); if (new_agg == NULL) { /* create new aggregate */ new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC); if (new_agg == NULL) From fda157ce154fb2e8ae6a9190969ad22236716f8c Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 20 Jul 2023 09:17:21 +0200 Subject: [PATCH 114/163] UPSTREAM: netfilter: nf_tables: skip bound chain on rule flush [ Upstream commit 6eaf41e87a223ae6f8e7a28d6e78384ad7e407f8 ] Skip bound chain when flushing table rules, the rule that owns this chain releases these objects. Otherwise, the following warning is triggered: WARNING: CPU: 2 PID: 1217 at net/netfilter/nf_tables_api.c:2013 nf_tables_chain_destroy+0x1f7/0x210 [nf_tables] CPU: 2 PID: 1217 Comm: chain-flush Not tainted 6.1.39 #1 RIP: 0010:nf_tables_chain_destroy+0x1f7/0x210 [nf_tables] Bug: 294357305 Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING") Reported-by: Kevin Rich Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin (cherry picked from commit e18922ce3e3169eb97838d1dcba2d679bcca446c) Signed-off-by: Lee Jones Change-Id: I48f43d0ce3410efec2513479a1f4c7708a097b01 --- net/netfilter/nf_tables_api.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index c8786b24ab42..ea2216826e11 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3800,6 +3800,8 @@ static int nf_tables_delrule(struct sk_buff *skb, const struct nfnl_info *info, list_for_each_entry(chain, &table->chains, list) { if (!nft_is_active_next(net, chain)) continue; + if (nft_chain_is_bound(chain)) + continue; ctx.chain = chain; err = nft_delrule_by_chain(&ctx); From e61d76121fff421b48ad4c018f06d5b97b04c14f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 13 Jun 2023 10:09:20 +0200 Subject: [PATCH 115/163] UPSTREAM: dma-buf: keep the signaling time of merged fences v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some Android CTS is testing if the signaling time keeps consistent during merges. v2: use the current time if the fence is still in the signaling path and the timestamp not yet available. v3: improve comment, fix one more case to use the correct timestamp Bug: 286438670 Signed-off-by: Christian König Reviewed-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/20230630120041.109216-1-christian.koenig@amd.com (cherry picked from commit f781f661e8c99b0cb34129f2e374234d61864e77) Change-Id: I5cd3178213fc28ac67146f58fddf83f7d482fd76 Signed-off-by: Jindong Yue --- drivers/dma-buf/dma-fence-unwrap.c | 26 ++++++++++++++++++++++---- drivers/dma-buf/dma-fence.c | 5 +++-- drivers/gpu/drm/drm_syncobj.c | 2 +- include/linux/dma-fence.h | 2 +- 4 files changed, 27 insertions(+), 8 deletions(-) diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c index 7002bca792ff..c625bb2b5d56 100644 --- a/drivers/dma-buf/dma-fence-unwrap.c +++ b/drivers/dma-buf/dma-fence-unwrap.c @@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, { struct dma_fence_array *result; struct dma_fence *tmp, **array; + ktime_t timestamp; unsigned int i; size_t count; count = 0; + timestamp = ns_to_ktime(0); for (i = 0; i < num_fences; ++i) { - dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) - if (!dma_fence_is_signaled(tmp)) + dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) { + if (!dma_fence_is_signaled(tmp)) { ++count; + } else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, + &tmp->flags)) { + if (ktime_after(tmp->timestamp, timestamp)) + timestamp = tmp->timestamp; + } else { + /* + * Use the current time if the fence is + * currently signaling. + */ + timestamp = ktime_get(); + } + } } + /* + * If we couldn't find a pending fence just return a private signaled + * fence with the timestamp of the last signaled one. + */ if (count == 0) - return dma_fence_get_stub(); + return dma_fence_allocate_private_stub(timestamp); array = kmalloc_array(count, sizeof(*array), GFP_KERNEL); if (!array) @@ -138,7 +156,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences, } while (tmp); if (count == 0) { - tmp = dma_fence_get_stub(); + tmp = dma_fence_allocate_private_stub(ktime_get()); goto return_tmp; } diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 0de0482cd36e..3855dc747fe5 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -150,10 +150,11 @@ EXPORT_SYMBOL(dma_fence_get_stub); /** * dma_fence_allocate_private_stub - return a private, signaled fence + * @timestamp: timestamp when the fence was signaled * * Return a newly allocated and signaled stub fence. */ -struct dma_fence *dma_fence_allocate_private_stub(void) +struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) { struct dma_fence *fence; @@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void) set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags); - dma_fence_signal(fence); + dma_fence_signal_timestamp(fence, timestamp); return fence; } diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 0c2be8360525..04589a35eb09 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -353,7 +353,7 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence); */ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) { - struct dma_fence *fence = dma_fence_allocate_private_stub(); + struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get()); if (IS_ERR(fence)) return PTR_ERR(fence); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 775cdc0b4f24..be572c3a4dcd 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -584,7 +584,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr) } struct dma_fence *dma_fence_get_stub(void); -struct dma_fence *dma_fence_allocate_private_stub(void); +struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp); u64 dma_fence_context_alloc(unsigned num); extern const struct dma_fence_ops dma_fence_array_ops; From 7666325265b26b83d0bcbe7ba97d738527bc39b4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 6 Jul 2023 15:37:51 +0300 Subject: [PATCH 116/163] UPSTREAM: dma-buf: fix an error pointer vs NULL bug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Smatch detected potential error pointer dereference. drivers/gpu/drm/drm_syncobj.c:888 drm_syncobj_transfer_to_timeline() error: 'fence' dereferencing possible ERR_PTR() The error pointer comes from dma_fence_allocate_private_stub(). One caller expected error pointers and one expected NULL pointers. Change it to return NULL and update the caller which expected error pointers, drm_syncobj_assign_null_handle(), to check for NULL instead. Bug: 286438670 Fixes: f781f661e8c9 ("dma-buf: keep the signaling time of merged fences v3") Signed-off-by: Dan Carpenter Reviewed-by: Christian König Reviewed-by: Sumit Semwal Signed-off-by: Sumit Semwal Link: https://patchwork.freedesktop.org/patch/msgid/b09f1996-3838-4fa2-9193-832b68262e43@moroto.mountain (cherry picked from commit 00ae1491f970acc454be0df63f50942d94825860) Change-Id: I9fe1e61543e84a0f22d8ec26e01d94b809620744 Signed-off-by: Jindong Yue --- drivers/dma-buf/dma-fence.c | 2 +- drivers/gpu/drm/drm_syncobj.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 3855dc747fe5..eef4786aaf86 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -160,7 +160,7 @@ struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp) fence = kzalloc(sizeof(*fence), GFP_KERNEL); if (fence == NULL) - return ERR_PTR(-ENOMEM); + return NULL; dma_fence_init(fence, &dma_fence_stub_ops, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 04589a35eb09..e592c5da70ce 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -355,8 +355,8 @@ static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj) { struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get()); - if (IS_ERR(fence)) - return PTR_ERR(fence); + if (!fence) + return -ENOMEM; drm_syncobj_replace_fence(syncobj, fence); dma_fence_put(fence); From 7ae1e02abbfb50f70a4eb3f8b094e8b77dbbe4d0 Mon Sep 17 00:00:00 2001 From: Zhaoyang Huang Date: Wed, 31 May 2023 10:51:01 +0800 Subject: [PATCH 117/163] UPSTREAM: mm: skip CMA pages when they are not available This patch fixes unproductive reclaiming of CMA pages by skipping them when they are not available for current context. It arises from the below OOM issue, which was caused by a large proportion of MIGRATE_CMA pages among free pages. [ 36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0 [ 36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB [ 36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB ... [ 36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC) [ 36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0 [ 36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0 This change further decreases the chance for wrong OOMs in the presence of a lot of CMA memory. [david@redhat.com: changelog addition] Link: https://lkml.kernel.org/r/1685501461-19290-1-git-send-email-zhaoyang.huang@unisoc.com Change-Id: I84f1145c38b5ff7b825f2122b33bc55997931bd7 Signed-off-by: Zhaoyang Huang Acked-by: David Hildenbrand Cc: ke.wang Cc: Matthew Wilcox Cc: Minchan Kim Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit 5da226dbfce3a2f44978c2c7cf88166e69a6788b) Bug: 288383787 Bug: 291719697 Signed-off-by: Kalesh Singh --- mm/vmscan.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 73e96cd78b21..5d2260bbd04f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2181,6 +2181,25 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, } +#ifdef CONFIG_CMA +/* + * It is waste of effort to scan and reclaim CMA pages if it is not available + * for current allocation context. Kswapd can not be enrolled as it can not + * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL + */ +static bool skip_cma(struct folio *folio, struct scan_control *sc) +{ + return !current_is_kswapd() && + gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && + get_pageblock_migratetype(&folio->page) == MIGRATE_CMA; +} +#else +static bool skip_cma(struct folio *folio, struct scan_control *sc) +{ + return false; +} +#endif + /* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * @@ -2227,7 +2246,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, nr_pages = folio_nr_pages(folio); total_scan += nr_pages; - if (folio_zonenum(folio) > sc->reclaim_idx) { + if (folio_zonenum(folio) > sc->reclaim_idx || + skip_cma(folio, sc)) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; goto move; From f86c79eb86ad278c48ac976d9d7d08fd36115dd7 Mon Sep 17 00:00:00 2001 From: Charan Teja Kalla Date: Wed, 9 Aug 2023 13:35:44 +0530 Subject: [PATCH 118/163] FROMGIT: Multi-gen LRU: skip CMA pages when they are not eligible This patch is based on the commit 5da226dbfce3("mm: skip CMA pages when they are not available") which skips cma pages reclaim when they are not eligible for the current allocation context. In mglru, such pages are added to the tail of the immediate generation to maintain better LRU order, which is unlike the case of conventional LRU where such pages are directly added to the head of the LRU list(akin to adding to head of the youngest generation in mglru). No observable issue without this patch on MGLRU, but logically it make sense to skip the CMA page reclaim when those pages can't be satisfied for the current allocation context. Link: https://lkml.kernel.org/r/1691568344-13475-1-git-send-email-quic_charante@quicinc.com Change-Id: I586415b3e3a92da23f3e79b9d63802a2ced03432 Signed-off-by: Charan Teja Kalla Reviewed-by: Kalesh Singh Cc: David Hildenbrand Cc: Suren Baghdasaryan Cc: Yu Zhao Cc: Zhaoyang Huang Signed-off-by: Andrew Morton (cherry picked from commit 75d52d9304ef5b268eb798b0c679815290a0fc83 https://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 288383787 Bug: 291719697 Signed-off-by: Kalesh Singh --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 5d2260bbd04f..85846ee87ca7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4869,7 +4869,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* ineligible */ - if (zone > sc->reclaim_idx) { + if (zone > sc->reclaim_idx || skip_cma(folio, sc)) { gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); return true; From 683966ac69a05c07817ed4a8f4c4c44601adc133 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 6 Jul 2023 14:51:35 -0400 Subject: [PATCH 119/163] UPSTREAM: mm/mmap: Fix extra maple tree write based on commit 0503ea8f5ba73eb3ab13a81c1eefbaf51405385a upstream. This was inadvertently fixed during the removal of __vma_adjust(). When __vma_adjust() is adjusting next with a negative value (pushing vma->vm_end lower), there would be two writes to the maple tree. The first write is unnecessary and uses all allocated nodes in the maple state. The second write is necessary but will need to allocate nodes since the first write has used the allocated nodes. This may be a problem as it may not be safe to allocate at this time, such as a low memory situation. Fix the issue by avoiding the first write and only write the adjusted "next" VMA. Reported-by: John Hsu Link: https://lore.kernel.org/lkml/9cb8c599b1d7f9c1c300d1a334d5eb70ec4d7357.camel@mediatek.com/ Cc: stable@vger.kernel.org Cc: linux-mm@kvack.org Signed-off-by: Liam R. Howlett Signed-off-by: Greg Kroah-Hartman (cherry picked from commit a02c6dc0eff249970a74c4111985b6c1fabe9851 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git linux-6.1.y) Bug: 295269894 Change-Id: I1a4bdc080d4ee92dbe06dc788961532d0c85fd7c Signed-off-by: Suren Baghdasaryan --- mm/mmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/mmap.c b/mm/mmap.c index 9a61b1ce8b76..2a8e7396413a 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -801,7 +801,8 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, } if (end != vma->vm_end) { if (vma->vm_end > end) { - if (adjust_next >= 0 && !insert) { + if ((vma->vm_end + adjust_next != end) && + (!insert || (insert->vm_start != end))) { vma_mas_szero(&mas, end, vma->vm_end); mas_reset(&mas); VM_WARN_ON(insert && From 7fa88611307e911f9fe45ef28d4471210b4d8de5 Mon Sep 17 00:00:00 2001 From: Chaoyuan Peng Date: Tue, 18 Jul 2023 04:39:43 +0000 Subject: [PATCH 120/163] UPSTREAM: tty: n_gsm: fix UAF in gsm_cleanup_mux commit 9b9c8195f3f0d74a826077fc1c01b9ee74907239 upstream. In gsm_cleanup_mux() the 'gsm->dlci' pointer was not cleaned properly, leaving it a dangling pointer after gsm_dlci_release. This leads to use-after-free where 'gsm->dlci[0]' are freed and accessed by the subsequent gsm_cleanup_mux(). Such is the case in the following call trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1e3/0x2cb lib/dump_stack.c:106 print_address_description+0x63/0x3b0 mm/kasan/report.c:248 __kasan_report mm/kasan/report.c:434 [inline] kasan_report+0x16b/0x1c0 mm/kasan/report.c:451 gsm_cleanup_mux+0x76a/0x850 drivers/tty/n_gsm.c:2397 gsm_config drivers/tty/n_gsm.c:2653 [inline] gsmld_ioctl+0xaae/0x15b0 drivers/tty/n_gsm.c:2986 tty_ioctl+0x8ff/0xc50 drivers/tty/tty_io.c:2816 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb Allocated by task 3501: kasan_save_stack mm/kasan/common.c:38 [inline] kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] ____kasan_kmalloc+0xba/0xf0 mm/kasan/common.c:513 kasan_kmalloc include/linux/kasan.h:264 [inline] kmem_cache_alloc_trace+0x143/0x290 mm/slub.c:3247 kmalloc include/linux/slab.h:591 [inline] kzalloc include/linux/slab.h:721 [inline] gsm_dlci_alloc+0x53/0x3a0 drivers/tty/n_gsm.c:1932 gsm_activate_mux+0x1c/0x330 drivers/tty/n_gsm.c:2438 gsm_config drivers/tty/n_gsm.c:2677 [inline] gsmld_ioctl+0xd46/0x15b0 drivers/tty/n_gsm.c:2986 tty_ioctl+0x8ff/0xc50 drivers/tty/tty_io.c:2816 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb Freed by task 3501: kasan_save_stack mm/kasan/common.c:38 [inline] kasan_set_track+0x4b/0x80 mm/kasan/common.c:46 kasan_set_free_info+0x1f/0x40 mm/kasan/generic.c:360 ____kasan_slab_free+0xd8/0x120 mm/kasan/common.c:366 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1705 [inline] slab_free_freelist_hook+0xdd/0x160 mm/slub.c:1731 slab_free mm/slub.c:3499 [inline] kfree+0xf1/0x270 mm/slub.c:4559 dlci_put drivers/tty/n_gsm.c:1988 [inline] gsm_dlci_release drivers/tty/n_gsm.c:2021 [inline] gsm_cleanup_mux+0x574/0x850 drivers/tty/n_gsm.c:2415 gsm_config drivers/tty/n_gsm.c:2653 [inline] gsmld_ioctl+0xaae/0x15b0 drivers/tty/n_gsm.c:2986 tty_ioctl+0x8ff/0xc50 drivers/tty/tty_io.c:2816 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb Bug: 291178675 Fixes: aa371e96f05d ("tty: n_gsm: fix restart handling via CLD command") Signed-off-by: Chaoyuan Peng Cc: stable Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 9615ca54bc138e35353a001e8b5d4824dce72188) Signed-off-by: Lee Jones Change-Id: I947cad0e8080378b40d4098add48992ade5fe638 --- drivers/tty/n_gsm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index b6e0cc4571ea..59a559366b61 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2508,8 +2508,10 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) gsm->has_devices = false; } for (i = NUM_DLCI - 1; i >= 0; i--) - if (gsm->dlci[i]) + if (gsm->dlci[i]) { gsm_dlci_release(gsm->dlci[i]); + gsm->dlci[i] = NULL; + } mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); From 2d3351bd5ef5731cb6e916f833625d7323a10206 Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Wed, 9 Aug 2023 10:44:31 +0800 Subject: [PATCH 121/163] FROMGIT: BACKPORT: usb: ehci: add workaround for chipidea PORTSC.PEC bug Some NXP processor using chipidea IP has a bug when frame babble is detected. As per 4.15.1.1.1 Serial Bus Babble: A babble condition also exists if IN transaction is in progress at High-speed SOF2 point. This is called frame babble. The host controller must disable the port to which the frame babble is detected. The USB controller has disabled the port (PE cleared) and has asserted USBERRINT when frame babble is detected, but PEC is not asserted. Therefore, the SW isn't aware that port has been disabled. Then the SW keeps sending packets to this port, but all of the transfers will fail. This workaround will firstly assert PCD by SW when USBERRINT is detected and then judge whether port change has really occurred or not by polling roothub status. Because the PEC doesn't get asserted in our case, this patch will also assert it by SW when specific conditions are satisfied. Bug: 295046582 Signed-off-by: Xu Yang Acked-by: Peter Chen Link: https://lore.kernel.org/r/20230809024432.535160-1-xu.yang_2@nxp.com Signed-off-by: Greg Kroah-Hartman (cherry picked from commit dda4b60ed70bd670eefda081f70c0cb20bbeb1fa https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git usb-next) [JD: replaced has_ci_pec_bug with existing has_fsl_port_bug to avoid abi breakage] Change-Id: I7d36cf656efda2dd46c0ddcca252b3de6ea434ee Signed-off-by: Jindong Yue --- drivers/usb/host/ehci-hcd.c | 8 ++++++-- drivers/usb/host/ehci-hub.c | 10 +++++++++- drivers/usb/host/ehci.h | 9 +++++++++ 3 files changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index a1930db0da1c..68674b19f15d 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -755,10 +755,14 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) /* normal [4.15.1.2] or error [4.15.1.1] completion */ if (likely ((status & (STS_INT|STS_ERR)) != 0)) { - if (likely ((status & STS_ERR) == 0)) + if (likely ((status & STS_ERR) == 0)) { INCR(ehci->stats.normal); - else + } else { + /* Force to check port status */ + if (ehci->has_fsl_port_bug) + status |= STS_PCD; INCR(ehci->stats.error); + } bh = 1; } diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index efe30e3be22f..1aee392e8492 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -674,7 +674,8 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf) if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend) || (ehci->reset_done[i] && time_after_eq( - jiffies, ehci->reset_done[i]))) { + jiffies, ehci->reset_done[i])) + || ehci_has_ci_pec_bug(ehci, temp)) { if (i < 7) buf [0] |= 1 << (i + 1); else @@ -875,6 +876,13 @@ int ehci_hub_control( if (temp & PORT_PEC) status |= USB_PORT_STAT_C_ENABLE << 16; + if (ehci_has_ci_pec_bug(ehci, temp)) { + status |= USB_PORT_STAT_C_ENABLE << 16; + ehci_info(ehci, + "PE is cleared by HW port:%d PORTSC:%08x\n", + wIndex + 1, temp); + } + if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){ status |= USB_PORT_STAT_C_OVERCURRENT << 16; diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h index ad3f13a3eaf1..4ee0d34323cf 100644 --- a/drivers/usb/host/ehci.h +++ b/drivers/usb/host/ehci.h @@ -707,6 +707,15 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) */ #define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata) +/* + * Some Freescale/NXP processors using ChipIdea IP have a bug in which + * disabling the port (PE is cleared) does not cause PEC to be asserted + * when frame babble is detected. + */ +#define ehci_has_ci_pec_bug(e, portsc) \ + ((e)->has_fsl_port_bug && ((e)->command & CMD_PSE) \ + && !(portsc & PORT_PEC) && !(portsc & PORT_PE)) + /* * While most USB host controllers implement their registers in * little-endian format, a minority (celleb companion chip) implement From 3378cbd2649f3c104e9339e4f28c64c3c8c724bb Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Wed, 9 Aug 2023 14:53:27 +0800 Subject: [PATCH 122/163] FROMGIT: usb: host: ehci-sched: try to turn on io watchdog as long as periodic_count > 0 If initially isoc_count = 0, periodic_count > 0 and the io watchdog is not started (e.g. just timed out), then the io watchdog may not run after submitting isoc urbs and enable_periodic(). The isoc urbs may not complete forever if the controller had already stopped periodic schedule. This will try to call turn_on_io_watchdog() for each enable_periodic() to ensure the io watchdog functions properly. Bug: 295046582 Signed-off-by: Xu Yang Reviewed-by: Alan Stern Link: https://lore.kernel.org/r/20230809065327.952368-1-xu.yang_2@nxp.com Signed-off-by: Greg Kroah-Hartman (cherry picked from commit c272dabf2d43c3523af1a40be3127e7a1f84540a https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git usb-next) Change-Id: I0f10ec8bcf0e14269b2a9693617dd83327c26a20 Signed-off-by: Jindong Yue --- drivers/usb/host/ehci-sched.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index bd542b6fc46b..7e834587e7de 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -490,13 +490,14 @@ static int tt_no_collision( static void enable_periodic(struct ehci_hcd *ehci) { if (ehci->periodic_count++) - return; + goto out; /* Stop waiting to turn off the periodic schedule */ ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); /* Don't start the schedule until PSS is 0 */ ehci_poll_PSS(ehci); +out: turn_on_io_watchdog(ehci); } From 0ee0062c9486e1cff101fb2ee169bb0a31384208 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Sat, 22 Jul 2023 00:51:07 +0200 Subject: [PATCH 123/163] UPSTREAM: mm: fix memory ordering for mm_lock_seq and vm_lock_seq mm->mm_lock_seq effectively functions as a read/write lock; therefore it must be used with acquire/release semantics. A specific example is the interaction between userfaultfd_register() and lock_vma_under_rcu(). userfaultfd_register() does the following from the point where it changes a VMA's flags to the point where concurrent readers are permitted again (in a simple scenario where only a single private VMA is accessed and no merging/splitting is involved): userfaultfd_register userfaultfd_set_vm_flags vm_flags_reset vma_start_write down_write(&vma->vm_lock->lock) vma->vm_lock_seq = mm_lock_seq [marks VMA as busy] up_write(&vma->vm_lock->lock) vm_flags_init [sets VM_UFFD_* in __vm_flags] vma->vm_userfaultfd_ctx.ctx = ctx mmap_write_unlock vma_end_write_all WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1) [unlocks VMA] There are no memory barriers in between the __vm_flags update and the mm->mm_lock_seq update that unlocks the VMA, so the unlock can be reordered to above the `vm_flags_init()` call, which means from the perspective of a concurrent reader, a VMA can be marked as a userfaultfd VMA while it is not VMA-locked. That's bad, we definitely need a store-release for the unlock operation. The non-atomic write to vma->vm_lock_seq in vma_start_write() is mostly fine because all accesses to vma->vm_lock_seq that matter are always protected by the VMA lock. There is a racy read in vma_start_read() though that can tolerate false-positives, so we should be using WRITE_ONCE() to keep things tidy and data-race-free (including for KCSAN). On the other side, lock_vma_under_rcu() works as follows in the relevant region for locking and userfaultfd check: lock_vma_under_rcu vma_start_read vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq) [early bailout] down_read_trylock(&vma->vm_lock->lock) vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq) [main check] userfaultfd_armed checks vma->vm_flags & __VM_UFFD_FLAGS Here, the interesting aspect is how far down the mm->mm_lock_seq read can be reordered - if this read is reordered down below the vma->vm_flags access, this could cause lock_vma_under_rcu() to partly operate on information that was read while the VMA was supposed to be locked. To prevent this kind of downwards bleeding of the mm->mm_lock_seq read, we need to read it with a load-acquire. Some of the comment wording is based on suggestions by Suren. BACKPORT WARNING: One of the functions changed by this patch (which I've written against Linus' tree) is vma_try_start_write(), but this function no longer exists in mm/mm-everything. I don't know whether the merged version of this patch will be ordered before or after the patch that removes vma_try_start_write(). If you're backporting this patch to a tree with vma_try_start_write(), make sure this patch changes that function. Link: https://lkml.kernel.org/r/20230721225107.942336-1-jannh@google.com Fixes: 5e31275cc997 ("mm: add per-VMA lock and helper functions to control it") Signed-off-by: Jann Horn Reviewed-by: Suren Baghdasaryan Cc: Signed-off-by: Andrew Morton (cherry picked from commit b1f02b95758d05b799731d939e76a0bd6da312db) Bug: 293665307 Change-Id: Ifbf30a8ee7211f9c7fe26b923ca33ffde68b6a7b Signed-off-by: Suren Baghdasaryan --- include/linux/mm.h | 29 +++++++++++++++++++++++------ include/linux/mm_types.h | 28 ++++++++++++++++++++++++++++ include/linux/mmap_lock.h | 10 ++++++++-- 3 files changed, 59 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 62ce759bdcff..82b05bb18af3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -635,8 +635,14 @@ struct vm_operations_struct { */ static inline bool vma_start_read(struct vm_area_struct *vma) { - /* Check before locking. A race might cause false locked result. */ - if (vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq)) + /* + * Check before locking. A race might cause false locked result. + * We can use READ_ONCE() for the mm_lock_seq here, and don't need + * ACQUIRE semantics, because this is just a lockless check whose result + * we don't rely on for anything - the mm_lock_seq read against which we + * need ordering is below. + */ + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) return false; if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) @@ -647,8 +653,13 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * False unlocked result is impossible because we modify and check * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq * modification invalidates all existing locks. + * + * We must use ACQUIRE semantics for the mm_lock_seq so that if we are + * racing with vma_end_write_all(), we only start reading from the VMA + * after it has been unlocked. + * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == READ_ONCE(vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { up_read(&vma->vm_lock->lock); return false; } @@ -670,7 +681,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified. */ - *mm_lock_seq = READ_ONCE(vma->vm_mm->mm_lock_seq); + *mm_lock_seq = vma->vm_mm->mm_lock_seq; return (vma->vm_lock_seq == *mm_lock_seq); } @@ -682,7 +693,13 @@ static inline void vma_start_write(struct vm_area_struct *vma) return; down_write(&vma->vm_lock->lock); - vma->vm_lock_seq = mm_lock_seq; + /* + * We should use WRITE_ONCE() here because we can have concurrent reads + * from the early lockless pessimistic check in vma_start_read(). + * We don't really care about the correctness of that early check, but + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. + */ + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); up_write(&vma->vm_lock->lock); } @@ -696,7 +713,7 @@ static inline bool vma_try_start_write(struct vm_area_struct *vma) if (!down_write_trylock(&vma->vm_lock->lock)) return false; - vma->vm_lock_seq = mm_lock_seq; + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); up_write(&vma->vm_lock->lock); return true; } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 00f5715a28c7..4e9453ab5985 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -478,6 +478,20 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK + /* + * Can only be written (using WRITE_ONCE()) while holding both: + * - mmap_lock (in write mode) + * - vm_lock->lock (in write mode) + * Can be read reliably while holding one of: + * - mmap_lock (in read or write mode) + * - vm_lock->lock (in read or write mode) + * Can be read unreliably (using READ_ONCE()) for pessimistic bailout + * while holding nothing (except RCU to keep the VMA struct allocated). + * + * This sequence counter is explicitly allowed to overflow; sequence + * counter reuse can only lead to occasional unnecessary use of the + * slowpath. + */ int vm_lock_seq; struct vma_lock *vm_lock; @@ -618,6 +632,20 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + /* + * This field has lock-like semantics, meaning it is sometimes + * accessed with ACQUIRE/RELEASE semantics. + * Roughly speaking, incrementing the sequence number is + * equivalent to releasing locks on VMAs; reading the sequence + * number can be part of taking a read lock on a VMA. + * + * Can be modified under write mmap_lock using RELEASE + * semantics. + * Can be read with no other protection when holding write + * mmap_lock. + * Can be read with ACQUIRE semantics if not holding write + * mmap_lock. + */ int mm_lock_seq; #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index aab8f1b28d26..e05e167dbd16 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -76,8 +76,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm) static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); - /* No races during update due to exclusive mmap_lock being held */ - WRITE_ONCE(mm->mm_lock_seq, mm->mm_lock_seq + 1); + /* + * Nobody can concurrently modify mm->mm_lock_seq due to exclusive + * mmap_lock being held. + * We need RELEASE semantics here to ensure that preceding stores into + * the VMA take effect before we unlock it with this store. + * Pairs with ACQUIRE semantics in vma_start_read(). + */ + smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1); } #else static inline void vma_end_write_all(struct mm_struct *mm) {} From b6093c47fe2ac9ec4dbbed37d98db4a04f7244f7 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 21 Jul 2023 05:46:43 +0200 Subject: [PATCH 124/163] BACKPORT: mm: lock VMA in dup_anon_vma() before setting ->anon_vma When VMAs are merged, dup_anon_vma() is called with `dst` pointing to the VMA that is being expanded to cover the area previously occupied by another VMA. This currently happens while `dst` is not write-locked. This means that, in the `src->anon_vma && !dst->anon_vma` case, as soon as the assignment `dst->anon_vma = src->anon_vma` has happened, concurrent page faults can happen on `dst` under the per-VMA lock. This is already icky in itself, since such page faults can now install pages into `dst` that are attached to an `anon_vma` that is not yet tied back to the `anon_vma` with an `anon_vma_chain`. But if `anon_vma_clone()` fails due to an out-of-memory error, things get much worse: `anon_vma_clone()` then reverts `dst->anon_vma` back to NULL, and `dst` remains completely unconnected to the `anon_vma`, even though we can have pages in the area covered by `dst` that point to the `anon_vma`. This means the `anon_vma` of such pages can be freed while the pages are still mapped into userspace, which leads to UAF when a helper like folio_lock_anon_vma_read() tries to look up the anon_vma of such a page. This theoretically is a security bug, but I believe it is really hard to actually trigger as an unprivileged user because it requires that you can make an order-0 GFP_KERNEL allocation fail, and the page allocator tries pretty hard to prevent that. I think doing the vma_start_write() call inside dup_anon_vma() is the most straightforward fix for now. For a kernel-assisted reproducer, see the notes section of the patch mail. Link: https://lkml.kernel.org/r/20230721034643.616851-1-jannh@google.com Fixes: 5e31275cc997 ("mm: add per-VMA lock and helper functions to control it") Signed-off-by: Jann Horn Reviewed-by: Suren Baghdasaryan Cc: Signed-off-by: Andrew Morton (cherry picked from commit d8ab9f7b644a2c9b64de405c1953c905ff219dc9) [surenb: since dup_anon_vma() is missing, add vma_start_write() directly before anon_vma is assigned] Bug: 293665307 Change-Id: I1b44e6278e464157e666cc5dbdb0fcc29bcf665e Signed-off-by: Suren Baghdasaryan --- mm/mmap.c | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/mmap.c b/mm/mmap.c index 2a8e7396413a..5d7129b3f03f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -537,6 +537,7 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, int error; anon_vma = next->anon_vma; + vma_start_write(vma); vma->anon_vma = anon_vma; error = anon_vma_clone(vma, next); if (error) From 3c187b4a1250b4a98e2e4a7aa8e84c79be8e53aa Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Aug 2023 08:27:19 -0700 Subject: [PATCH 125/163] BACKPORT: FROMGIT: mm: enable page walking API to lock vmas during the walk walk_page_range() and friends often operate under write-locked mmap_lock. With introduction of vma locks, the vmas have to be locked as well during such walks to prevent concurrent page faults in these areas. Add an additional member to mm_walk_ops to indicate locking requirements for the walk. The change ensures that page walks which prevent concurrent page faults by write-locking mmap_lock, operate correctly after introduction of per-vma locks. With per-vma locks page faults can be handled under vma lock without taking mmap_lock at all, so write locking mmap_lock would not stop them. The change ensures vmas are properly locked during such walks. A sample issue this solves is do_mbind() performing queue_pages_range() to queue pages for migration. Without this change a concurrent page can be faulted into the area and be left out of migration. Link: https://lkml.kernel.org/r/20230804152724.3090321-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: Linus Torvalds Suggested-by: Jann Horn Cc: David Hildenbrand Cc: Davidlohr Bueso Cc: Hugh Dickins Cc: Johannes Weiner Cc: Laurent Dufour Cc: Liam Howlett Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Michel Lespinasse Cc: Peter Xu Cc: Vlastimil Babka Cc: Signed-off-by: Andrew Morton (cherry picked from commit 2ebc368f59eedcef0de7c832fe1d62935cd3a7ff https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: changed locking in break_ksm since it's done differently, skipped the change in the missing __ksm_del_vma(), skipped the change in the missing walk_page_range_vma(), removed unused local variables] Bug: 293665307 Change-Id: Iede9eaa950ea59a268a2e74a8d3022162f0bbd80 Signed-off-by: Suren Baghdasaryan --- arch/powerpc/mm/book3s64/subpage_prot.c | 1 + arch/riscv/mm/pageattr.c | 1 + arch/s390/mm/gmap.c | 5 ++++ fs/proc/task_mmu.c | 5 ++++ include/linux/pagewalk.h | 11 +++++++++ mm/damon/vaddr.c | 2 ++ mm/hmm.c | 1 + mm/ksm.c | 17 ++++++++----- mm/madvise.c | 3 +++ mm/memcontrol.c | 2 ++ mm/memory-failure.c | 1 + mm/mempolicy.c | 24 ++++++++++-------- mm/migrate_device.c | 1 + mm/mincore.c | 1 + mm/mlock.c | 1 + mm/mprotect.c | 1 + mm/pagewalk.c | 33 +++++++++++++++++++++++-- mm/vmscan.c | 1 + 18 files changed, 93 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/mm/book3s64/subpage_prot.c b/arch/powerpc/mm/book3s64/subpage_prot.c index b75a9fb99599..b0eea434ef08 100644 --- a/arch/powerpc/mm/book3s64/subpage_prot.c +++ b/arch/powerpc/mm/book3s64/subpage_prot.c @@ -143,6 +143,7 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, static const struct mm_walk_ops subpage_walk_ops = { .pmd_entry = subpage_walk_pmd_entry, + .walk_lock = PGWALK_WRLOCK_VERIFY, }; static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr, diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 86c56616e5de..0f33fc40c911 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -102,6 +102,7 @@ static const struct mm_walk_ops pageattr_ops = { .pmd_entry = pageattr_pmd_entry, .pte_entry = pageattr_pte_entry, .pte_hole = pageattr_pte_hole, + .walk_lock = PGWALK_RDLOCK, }; static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index e4567b5fcdda..d3f4b2c0dd9a 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2510,6 +2510,7 @@ static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr, static const struct mm_walk_ops thp_split_walk_ops = { .pmd_entry = thp_split_walk_pmd_entry, + .walk_lock = PGWALK_WRLOCK_VERIFY, }; static inline void thp_split_mm(struct mm_struct *mm) @@ -2554,6 +2555,7 @@ static int __zap_zero_pages(pmd_t *pmd, unsigned long start, static const struct mm_walk_ops zap_zero_walk_ops = { .pmd_entry = __zap_zero_pages, + .walk_lock = PGWALK_WRLOCK, }; /* @@ -2655,6 +2657,7 @@ static const struct mm_walk_ops enable_skey_walk_ops = { .hugetlb_entry = __s390_enable_skey_hugetlb, .pte_entry = __s390_enable_skey_pte, .pmd_entry = __s390_enable_skey_pmd, + .walk_lock = PGWALK_WRLOCK, }; int s390_enable_skey(void) @@ -2692,6 +2695,7 @@ static int __s390_reset_cmma(pte_t *pte, unsigned long addr, static const struct mm_walk_ops reset_cmma_walk_ops = { .pte_entry = __s390_reset_cmma, + .walk_lock = PGWALK_WRLOCK, }; void s390_reset_cmma(struct mm_struct *mm) @@ -2728,6 +2732,7 @@ static int s390_gather_pages(pte_t *ptep, unsigned long addr, static const struct mm_walk_ops gather_pages_ops = { .pte_entry = s390_gather_pages, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index a23541614a0d..fe244a271620 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -758,12 +758,14 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops smaps_walk_ops = { .pmd_entry = smaps_pte_range, .hugetlb_entry = smaps_hugetlb_range, + .walk_lock = PGWALK_RDLOCK, }; static const struct mm_walk_ops smaps_shmem_walk_ops = { .pmd_entry = smaps_pte_range, .hugetlb_entry = smaps_hugetlb_range, .pte_hole = smaps_pte_hole, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -1247,6 +1249,7 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end, static const struct mm_walk_ops clear_refs_walk_ops = { .pmd_entry = clear_refs_pte_range, .test_walk = clear_refs_test_walk, + .walk_lock = PGWALK_WRLOCK, }; static ssize_t clear_refs_write(struct file *file, const char __user *buf, @@ -1623,6 +1626,7 @@ static const struct mm_walk_ops pagemap_ops = { .pmd_entry = pagemap_pmd_range, .pte_hole = pagemap_pte_hole, .hugetlb_entry = pagemap_hugetlb_range, + .walk_lock = PGWALK_RDLOCK, }; /* @@ -1929,6 +1933,7 @@ static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops show_numa_ops = { .hugetlb_entry = gather_hugetlb_stats, .pmd_entry = gather_pte_stats, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index f3fafb731ffd..d29ada832fa1 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -6,6 +6,16 @@ struct mm_walk; +/* Locking requirement during a page walk. */ +enum page_walk_lock { + /* mmap_lock should be locked for read to stabilize the vma tree */ + PGWALK_RDLOCK = 0, + /* vma will be write-locked during the walk */ + PGWALK_WRLOCK = 1, + /* vma is expected to be already write-locked during the walk */ + PGWALK_WRLOCK_VERIFY = 2, +}; + /** * struct mm_walk_ops - callbacks for walk_page_range * @pgd_entry: if set, called for each non-empty PGD (top-level) entry @@ -55,6 +65,7 @@ struct mm_walk_ops { int (*pre_vma)(unsigned long start, unsigned long end, struct mm_walk *walk); void (*post_vma)(struct mm_walk *walk); + enum page_walk_lock walk_lock; }; /* diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 15f03df66db6..3036ebfcdd83 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -384,6 +384,7 @@ static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops damon_mkold_ops = { .pmd_entry = damon_mkold_pmd_entry, .hugetlb_entry = damon_mkold_hugetlb_entry, + .walk_lock = PGWALK_RDLOCK, }; static void damon_va_mkold(struct mm_struct *mm, unsigned long addr) @@ -521,6 +522,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask, static const struct mm_walk_ops damon_young_ops = { .pmd_entry = damon_young_pmd_entry, .hugetlb_entry = damon_young_hugetlb_entry, + .walk_lock = PGWALK_RDLOCK, }; static bool damon_va_young(struct mm_struct *mm, unsigned long addr, diff --git a/mm/hmm.c b/mm/hmm.c index 3850fb625dda..b69d1591e392 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -548,6 +548,7 @@ static const struct mm_walk_ops hmm_walk_ops = { .pte_hole = hmm_vma_walk_hole, .hugetlb_entry = hmm_vma_walk_hugetlb_entry, .test_walk = hmm_vma_walk_test, + .walk_lock = PGWALK_RDLOCK, }; /** diff --git a/mm/ksm.c b/mm/ksm.c index cb272b6fde59..c5a2f83f62f1 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -434,13 +434,18 @@ static inline bool ksm_test_exit(struct mm_struct *mm) * of the process that owns 'vma'. We also do not want to enforce * protection keys here anyway. */ -static int break_ksm(struct vm_area_struct *vma, unsigned long addr) +static int break_ksm(struct vm_area_struct *vma, unsigned long addr, bool lock_vma) { struct page *page; vm_fault_t ret = 0; do { cond_resched(); + if (lock_vma) + vma_start_write(vma); + else + mmap_assert_locked(vma->vm_mm); + page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); if (IS_ERR_OR_NULL(page)) @@ -511,7 +516,7 @@ static void break_cow(struct ksm_rmap_item *rmap_item) mmap_read_lock(mm); vma = find_mergeable_vma(mm, addr); if (vma) - break_ksm(vma, addr); + break_ksm(vma, addr, false); mmap_read_unlock(mm); } @@ -814,7 +819,7 @@ static void remove_trailing_rmap_items(struct ksm_rmap_item **rmap_list) * in cmp_and_merge_page on one of the rmap_items we would be removing. */ static int unmerge_ksm_pages(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, bool lock_vma) { unsigned long addr; int err = 0; @@ -825,7 +830,7 @@ static int unmerge_ksm_pages(struct vm_area_struct *vma, if (signal_pending(current)) err = -ERESTARTSYS; else - err = break_ksm(vma, addr); + err = break_ksm(vma, addr, lock_vma); } return err; } @@ -972,7 +977,7 @@ static int unmerge_and_remove_all_rmap_items(void) if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) continue; err = unmerge_ksm_pages(vma, - vma->vm_start, vma->vm_end); + vma->vm_start, vma->vm_end, false); if (err) goto error; } @@ -2487,7 +2492,7 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start, return 0; /* just ignore the advice */ if (vma->anon_vma) { - err = unmerge_ksm_pages(vma, start, end); + err = unmerge_ksm_pages(vma, start, end, true); if (err) return err; } diff --git a/mm/madvise.c b/mm/madvise.c index 42c5a65e1c2d..105c17011a2c 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -234,6 +234,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, static const struct mm_walk_ops swapin_walk_ops = { .pmd_entry = swapin_walk_pmd_entry, + .walk_lock = PGWALK_RDLOCK, }; static void force_shm_swapin_readahead(struct vm_area_struct *vma, @@ -541,6 +542,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, static const struct mm_walk_ops cold_walk_ops = { .pmd_entry = madvise_cold_or_pageout_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static void madvise_cold_page_range(struct mmu_gather *tlb, @@ -763,6 +765,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, static const struct mm_walk_ops madvise_free_walk_ops = { .pmd_entry = madvise_free_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static int madvise_free_single_vma(struct vm_area_struct *vma, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index aa09cf4e8bb5..5879b5d6b483 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5966,6 +5966,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, static const struct mm_walk_ops precharge_walk_ops = { .pmd_entry = mem_cgroup_count_precharge_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm) @@ -6242,6 +6243,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, static const struct mm_walk_ops charge_walk_ops = { .pmd_entry = mem_cgroup_move_charge_pte_range, + .walk_lock = PGWALK_RDLOCK, }; static void mem_cgroup_move_charge(void) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4457f9423e2c..3badad5ab293 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -722,6 +722,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask, static const struct mm_walk_ops hwp_walk_ops = { .pmd_entry = hwpoison_pte_range, .hugetlb_entry = hwpoison_hugetlb_range, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cd2fc238c24d..7deb394c669a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -709,6 +709,14 @@ static const struct mm_walk_ops queue_pages_walk_ops = { .hugetlb_entry = queue_pages_hugetlb, .pmd_entry = queue_pages_pte_range, .test_walk = queue_pages_test_walk, + .walk_lock = PGWALK_RDLOCK, +}; + +static const struct mm_walk_ops queue_pages_lock_vma_walk_ops = { + .hugetlb_entry = queue_pages_hugetlb, + .pmd_entry = queue_pages_pte_range, + .test_walk = queue_pages_test_walk, + .walk_lock = PGWALK_WRLOCK, }; /* @@ -729,7 +737,7 @@ static const struct mm_walk_ops queue_pages_walk_ops = { static int queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, nodemask_t *nodes, unsigned long flags, - struct list_head *pagelist) + struct list_head *pagelist, bool lock_vma) { int err; struct queue_pages qp = { @@ -740,8 +748,10 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, .end = end, .first = NULL, }; + const struct mm_walk_ops *ops = lock_vma ? + &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; - err = walk_page_range(mm, start, end, &queue_pages_walk_ops, &qp); + err = walk_page_range(mm, start, end, ops, &qp); if (!qp.first) /* whole range in hole */ @@ -1086,7 +1096,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, vma = find_vma(mm, 0); VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))); queue_pages_range(mm, vma->vm_start, mm->task_size, &nmask, - flags | MPOL_MF_DISCONTIG_OK, &pagelist); + flags | MPOL_MF_DISCONTIG_OK, &pagelist, false); if (!list_empty(&pagelist)) { err = migrate_pages(&pagelist, alloc_migration_target, NULL, @@ -1263,8 +1273,6 @@ static long do_mbind(unsigned long start, unsigned long len, nodemask_t *nmask, unsigned long flags) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; - struct vma_iterator vmi; struct mempolicy *new; unsigned long end; int err; @@ -1330,12 +1338,8 @@ static long do_mbind(unsigned long start, unsigned long len, * Lock the VMAs before scanning for pages to migrate, to ensure we don't * miss a concurrently inserted page. */ - vma_iter_init(&vmi, mm, start); - for_each_vma_range(vmi, vma, end) - vma_start_write(vma); - ret = queue_pages_range(mm, start, end, nmask, - flags | MPOL_MF_INVERT, &pagelist); + flags | MPOL_MF_INVERT, &pagelist, true); if (ret < 0) { err = ret; diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 721b2365dbca..4b1491d5710a 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -286,6 +286,7 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp, static const struct mm_walk_ops migrate_vma_walk_ops = { .pmd_entry = migrate_vma_collect_pmd, .pte_hole = migrate_vma_collect_hole, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/mm/mincore.c b/mm/mincore.c index 1eb6aac88d84..dd8f8837f7d5 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -177,6 +177,7 @@ static const struct mm_walk_ops mincore_walk_ops = { .pmd_entry = mincore_pte_range, .pte_hole = mincore_unmapped_range, .hugetlb_entry = mincore_hugetlb, + .walk_lock = PGWALK_RDLOCK, }; /* diff --git a/mm/mlock.c b/mm/mlock.c index 9738d49bbc24..827df077a5eb 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -365,6 +365,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma, { static const struct mm_walk_ops mlock_walk_ops = { .pmd_entry = mlock_pte_range, + .walk_lock = PGWALK_WRLOCK_VERIFY, }; /* diff --git a/mm/mprotect.c b/mm/mprotect.c index 1b9198d38cdf..3a04999e8353 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -542,6 +542,7 @@ static const struct mm_walk_ops prot_none_walk_ops = { .pte_entry = prot_none_pte_entry, .hugetlb_entry = prot_none_hugetlb_entry, .test_walk = prot_none_test, + .walk_lock = PGWALK_WRLOCK, }; int diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 2ff3a5bebceb..d92c5b567837 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -384,6 +384,33 @@ static int __walk_page_range(unsigned long start, unsigned long end, return err; } +static inline void process_mm_walk_lock(struct mm_struct *mm, + enum page_walk_lock walk_lock) +{ + if (walk_lock == PGWALK_RDLOCK) + mmap_assert_locked(mm); + else + mmap_assert_write_locked(mm); +} + +static inline void process_vma_walk_lock(struct vm_area_struct *vma, + enum page_walk_lock walk_lock) +{ +#ifdef CONFIG_PER_VMA_LOCK + switch (walk_lock) { + case PGWALK_WRLOCK: + vma_start_write(vma); + break; + case PGWALK_WRLOCK_VERIFY: + vma_assert_write_locked(vma); + break; + case PGWALK_RDLOCK: + /* PGWALK_RDLOCK is handled by process_mm_walk_lock */ + break; + } +#endif +} + /** * walk_page_range - walk page table with caller specific callbacks * @mm: mm_struct representing the target process of page table walk @@ -443,7 +470,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, if (!walk.mm) return -EINVAL; - mmap_assert_locked(walk.mm); + process_mm_walk_lock(walk.mm, ops->walk_lock); vma = find_vma(walk.mm, start); do { @@ -458,6 +485,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, if (ops->pte_hole) err = ops->pte_hole(start, next, -1, &walk); } else { /* inside vma */ + process_vma_walk_lock(vma, ops->walk_lock); walk.vma = vma; next = min(end, vma->vm_end); vma = find_vma(mm, vma->vm_end); @@ -531,7 +559,8 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, if (!walk.mm) return -EINVAL; - mmap_assert_locked(walk.mm); + process_mm_walk_lock(walk.mm, ops->walk_lock); + process_vma_walk_lock(vma, ops->walk_lock); err = walk_page_test(vma->vm_start, vma->vm_end, &walk); if (err > 0) diff --git a/mm/vmscan.c b/mm/vmscan.c index 85846ee87ca7..b12f027ff098 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4214,6 +4214,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ static const struct mm_walk_ops mm_walk_ops = { .test_walk = should_skip_vma, .p4d_entry = walk_pud_range, + .walk_lock = PGWALK_RDLOCK, }; int err; From 365af746f5ad32f183a9959c9ae2b2338717d3da Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 24 May 2023 00:59:42 +0800 Subject: [PATCH 126/163] BACKPORT: riscv: mm: try VMA lock-based page fault handling first Attempt VMA lock-based page fault handling first, and fall back to the existing mmap_lock-based handling if that fails. A simple running the ebizzy benchmark on Lichee Pi 4A shows that PER_VMA_LOCK can improve the ebizzy benchmark by about 32.68%. In theory, the more CPUs, the bigger improvement, but I don't have any HW platform which has more than 4 CPUs. This is the riscv variant of "x86/mm: try VMA lock-based page fault handling first". Signed-off-by: Jisheng Zhang Reviewed-by: Guo Ren Reviewed-by: Suren Baghdasaryan Link: https://lore.kernel.org/r/20230523165942.2630-1-jszhang@kernel.org Signed-off-by: Palmer Dabbelt (cherry picked from commit 648321fa0d970c04b4327ac1a053abf43d285931) Bug: 293665307 Change-Id: I59b63add96645d2483f87c2b680d4a7afa86f7b6 Signed-off-by: Suren Baghdasaryan --- arch/riscv/Kconfig | 1 + arch/riscv/mm/fault.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 45d52d465e1d..ac586089767d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -39,6 +39,7 @@ config RISCV select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU + select ARCH_SUPPORTS_PER_VMA_LOCK if MMU select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 274bc6dd839f..209cb580a3b0 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -289,6 +289,36 @@ asmlinkage void do_page_fault(struct pt_regs *regs) flags |= FAULT_FLAG_WRITE; else if (cause == EXC_INST_PAGE_FAULT) flags |= FAULT_FLAG_INSTRUCTION; +#ifdef CONFIG_PER_VMA_LOCK + if (!(flags & FAULT_FLAG_USER)) + goto lock_mmap; + + vma = lock_vma_under_rcu(mm, addr); + if (!vma) + goto lock_mmap; + + if (unlikely(access_error(cause, vma))) { + vma_end_read(vma); + goto lock_mmap; + } + + fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); + vma_end_read(vma); + + if (!(fault & VM_FAULT_RETRY)) { + count_vm_vma_lock_event(VMA_LOCK_SUCCESS); + goto done; + } + count_vm_vma_lock_event(VMA_LOCK_RETRY); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + no_context(regs, addr); + return; + } +lock_mmap: +#endif /* CONFIG_PER_VMA_LOCK */ + retry: vma = lock_mm_and_find_vma(mm, addr, regs); if (unlikely(!vma)) { @@ -341,6 +371,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) mmap_read_unlock(mm); +#ifdef CONFIG_PER_VMA_LOCK +done: +#endif if (unlikely(fault & VM_FAULT_ERROR)) { tsk->thread.bad_cause = cause; mm_fault_error(regs, addr, fault); From abb0f2767ee843b5d4cf5787d76988a6877bbc6e Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Thu, 20 Jul 2023 21:34:36 +0200 Subject: [PATCH 127/163] FROMGIT: mm: don't drop VMA locks in mm_drop_all_locks() Despite its name, mm_drop_all_locks() does not drop _all_ locks; the mmap lock is held write-locked by the caller, and the caller is responsible for dropping the mmap lock at a later point (which will also release the VMA locks). Calling vma_end_write_all() here is dangerous because the caller might have write-locked a VMA with the expectation that it will stay write-locked until the mmap_lock is released, as usual. This _almost_ becomes a problem in the following scenario: An anonymous VMA A and an SGX VMA B are mapped adjacent to each other. Userspace calls munmap() on a range starting at the start address of A and ending in the middle of B. Hypothetical call graph with additional notes in brackets: do_vmi_align_munmap [begin first for_each_vma_range loop] vma_start_write [on VMA A] vma_mark_detached [on VMA A] __split_vma [on VMA B] sgx_vma_open [== new->vm_ops->open] sgx_encl_mm_add __mmu_notifier_register [luckily THIS CAN'T ACTUALLY HAPPEN] mm_take_all_locks mm_drop_all_locks vma_end_write_all [drops VMA lock taken on VMA A before] vma_start_write [on VMA B] vma_mark_detached [on VMA B] [end first for_each_vma_range loop] vma_iter_clear_gfp [removes VMAs from maple tree] mmap_write_downgrade unmap_region mmap_read_unlock In this hypothetical scenario, while do_vmi_align_munmap() thinks it still holds a VMA write lock on VMA A, the VMA write lock has actually been invalidated inside __split_vma(). The call from sgx_encl_mm_add() to __mmu_notifier_register() can't actually happen here, as far as I understand, because we are duplicating an existing SGX VMA, but sgx_encl_mm_add() only calls __mmu_notifier_register() for the first SGX VMA created in a given process. So this could only happen in fork(), not on munmap(). But in my view it is just pure luck that this can't happen. Also, we wouldn't actually have any bad consequences from this in do_vmi_align_munmap(), because by the time the bug drops the lock on VMA A, we've already marked VMA A as detached, which makes it completely ineligible for any VMA-locked page faults. But again, that's just pure luck. So remove the vma_end_write_all(), so that VMA write locks are only ever released on mmap_write_unlock() or mmap_write_downgrade(). Also add comments to document the locking rules established by this patch. Link: https://lkml.kernel.org/r/20230720193436.454247-1-jannh@google.com Fixes: eeff9a5d47f8 ("mm/mmap: prevent pagefault handler from racing with mmu_notifier registration") Signed-off-by: Jann Horn Reviewed-by: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit 28ed252b44fb2f1efaef1287eea267d54e79f7d5 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: Ic0b28229d175e3125de1ef274282fbf43b556db7 Signed-off-by: Suren Baghdasaryan --- include/linux/mm.h | 5 +++++ include/linux/mmap_lock.h | 8 ++++++++ mm/mmap.c | 7 ++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 82b05bb18af3..4eeb8b98a2e8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -685,6 +685,11 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) return (vma->vm_lock_seq == *mm_lock_seq); } +/* + * Begin writing to a VMA. + * Exclude concurrent readers under the per-VMA lock until the currently + * write-locked mmap_lock is dropped or downgraded. + */ static inline void vma_start_write(struct vm_area_struct *vma) { int mm_lock_seq; diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index e05e167dbd16..4745ea859398 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -73,6 +73,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK +/* + * Drop all currently-held per-VMA locks. + * This is called from the mmap_lock implementation directly before releasing + * a write-locked mmap_lock (or downgrading it to read-locked). + * This should normally NOT be called manually from other places. + * If you want to call this manually anyway, keep in mind that this will release + * *all* VMA write locks, including ones from further up the stack. + */ static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); diff --git a/mm/mmap.c b/mm/mmap.c index 5d7129b3f03f..e7bbe2cf8f03 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -3768,6 +3768,12 @@ int mm_take_all_locks(struct mm_struct *mm) mutex_lock(&mm_all_locks_mutex); + /* + * vma_start_write() does not have a complement in mm_drop_all_locks() + * because vma_start_write() is always asymmetrical; it marks a VMA as + * being written to until mmap_write_unlock() or mmap_write_downgrade() + * is reached. + */ mas_for_each(&mas, vma, ULONG_MAX) { if (signal_pending(current)) goto out_unlock; @@ -3864,7 +3870,6 @@ void mm_drop_all_locks(struct mm_struct *mm) if (vma->vm_file && vma->vm_file->f_mapping) vm_unlock_mapping(vma->vm_file->f_mapping); } - vma_end_write_all(mm); mutex_unlock(&mm_all_locks_mutex); } From 5f0ca924aa0fb574f671ca45ad801a0f510f7d04 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Aug 2023 08:27:20 -0700 Subject: [PATCH 128/163] FROMGIT: mm: for !CONFIG_PER_VMA_LOCK equate write lock assertion for vma and mmap When CONFIG_PER_VMA_LOCK=n, vma_assert_write_locked() should be equivalent to mmap_assert_write_locked(). Link: https://lkml.kernel.org/r/20230804152724.3090321-3-surenb@google.com Suggested-by: Jann Horn Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Cc: Linus Torvalds Signed-off-by: Andrew Morton (cherry picked from commit f0cdd55d6dd8c7a1b333049e4f83eb25fef312ad https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: Ie20ff6c35fb2f561f2061c5d0135238b7b03afa5 Signed-off-by: Suren Baghdasaryan --- include/linux/mm.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 4eeb8b98a2e8..54e1b27a65cb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -750,7 +750,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline bool vma_try_start_write(struct vm_area_struct *vma) { return true; } -static inline void vma_assert_write_locked(struct vm_area_struct *vma) {} +static inline void vma_assert_write_locked(struct vm_area_struct *vma) + { mmap_assert_write_locked(vma->vm_mm); } static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) {} From ad18923856548e80f7264f37a7aaffbe1a6de793 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Aug 2023 08:27:21 -0700 Subject: [PATCH 129/163] FROMGIT: mm: replace mmap with vma write lock assertions when operating on a vma Vma write lock assertion always includes mmap write lock assertion and additional vma lock checks when per-VMA locks are enabled. Replace weaker mmap_assert_write_locked() assertions with stronger vma_assert_write_locked() ones when we are operating on a vma which is expected to be locked. Link: https://lkml.kernel.org/r/20230804152724.3090321-4-surenb@google.com Suggested-by: Jann Horn Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Cc: Linus Torvalds Signed-off-by: Andrew Morton (cherry picked from commit 928a31b91cf64aa99a8999dcd66bec0ad02f64ef https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I861db0510612f571f2ca44e0a9d7e01274d4eb36 Signed-off-by: Suren Baghdasaryan --- mm/hugetlb.c | 2 +- mm/memory.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 34d7816b13bc..fe2fcc57328f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4968,7 +4968,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, src_vma->vm_start, src_vma->vm_end); mmu_notifier_invalidate_range_start(&range); - mmap_assert_write_locked(src); + vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src->write_protect_seq); } else { /* diff --git a/mm/memory.c b/mm/memory.c index e9b7cd28ae02..a9ffa95e2386 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1319,7 +1319,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) * Use the raw variant of the seqcount_t write API to avoid * lockdep complaining about preemptibility. */ - mmap_assert_write_locked(src_mm); + vma_assert_write_locked(src_vma); raw_write_seqcount_begin(&src_mm->write_protect_seq); } From a8a479ed9629c63cf4d1dbf31e4d068e9db6629f Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Aug 2023 08:27:22 -0700 Subject: [PATCH 130/163] FROMGIT: mm: lock vma explicitly before doing vm_flags_reset and vm_flags_reset_once Implicit vma locking inside vm_flags_reset() and vm_flags_reset_once() is not obvious and makes it hard to understand where vma locking is happening. Also in some cases (like in dup_userfaultfd()) vma should be locked earlier than vma_flags modification. To make locking more visible, change these functions to assert that the vma write lock is taken and explicitly lock the vma beforehand. Fix userfaultfd functions which should lock the vma earlier. Link: https://lkml.kernel.org/r/20230804152724.3090321-5-surenb@google.com Suggested-by: Linus Torvalds Signed-off-by: Suren Baghdasaryan Cc: Jann Horn Cc: Liam R. Howlett Signed-off-by: Andrew Morton (cherry picked from commit f26ee2701ab3ecd771084b44f262bd010accab72 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I62f0f25c883588c3ba7a322b3a4929df01413591 Signed-off-by: Suren Baghdasaryan --- arch/powerpc/kvm/book3s_hv_uvmem.c | 1 + fs/userfaultfd.c | 6 ++++++ include/linux/mm.h | 10 +++++++--- mm/madvise.c | 5 ++--- mm/mlock.c | 3 ++- mm/mprotect.c | 1 + 6 files changed, 19 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 303869f4855f..6f121a034cef 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -410,6 +410,7 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm, ret = H_STATE; break; } + vma_start_write(vma); /* Copy vm_flags to avoid partial modifications in ksm_madvise */ vm_flags = vma->vm_flags; ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 65bf92615772..b739aa508877 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -632,6 +632,7 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, mmap_write_lock(mm); for_each_vma(vmi, vma) { if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) { + vma_start_write(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); @@ -667,6 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs) octx = vma->vm_userfaultfd_ctx.ctx; if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) { + vma_start_write(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); return 0; @@ -748,6 +750,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma, atomic_inc(&ctx->mmap_changing); } else { /* Drop uffd context if remap feature not enabled */ + vma_start_write(vma); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS); } @@ -906,6 +909,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file) prev = vma; } + vma_start_write(vma); userfaultfd_set_vm_flags(vma, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; } @@ -1474,6 +1478,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ + vma_start_write(vma); userfaultfd_set_vm_flags(vma, new_flags); vma->vm_userfaultfd_ctx.ctx = ctx; @@ -1662,6 +1667,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, * the next vma was merged into the current one and * the current one has not been updated yet. */ + vma_start_write(vma); userfaultfd_set_vm_flags(vma, new_flags); vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; diff --git a/include/linux/mm.h b/include/linux/mm.h index 54e1b27a65cb..9acb8a04116d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -779,18 +779,22 @@ static inline void vm_flags_init(struct vm_area_struct *vma, ACCESS_PRIVATE(vma, __vm_flags) = flags; } -/* Use when VMA is part of the VMA tree and modifications need coordination */ +/* + * Use when VMA is part of the VMA tree and modifications need coordination + * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and + * it should be locked explicitly beforehand. + */ static inline void vm_flags_reset(struct vm_area_struct *vma, vm_flags_t flags) { - vma_start_write(vma); + vma_assert_write_locked(vma); vm_flags_init(vma, flags); } static inline void vm_flags_reset_once(struct vm_area_struct *vma, vm_flags_t flags) { - vma_start_write(vma); + vma_assert_write_locked(vma); WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags); } diff --git a/mm/madvise.c b/mm/madvise.c index 105c17011a2c..21b5de11c329 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -180,9 +180,8 @@ static int madvise_update_vma(struct vm_area_struct *vma, } success: - /* - * vm_flags is protected by the mmap_lock held in write mode. - */ + /* vm_flags is protected by the mmap_lock held in write mode. */ + vma_start_write(vma); vm_flags_reset(vma, new_flags); if (!vma->vm_file) { error = replace_anon_vma_name(vma, anon_name); diff --git a/mm/mlock.c b/mm/mlock.c index 827df077a5eb..580f1d39f454 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -381,6 +381,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma, */ if (newflags & VM_LOCKED) newflags |= VM_IO; + vma_start_write(vma); vm_flags_reset_once(vma, newflags); lru_add_drain(); @@ -454,9 +455,9 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, * It's okay if try_to_unmap_one unmaps a page just after we * set VM_LOCKED, populate_vma_page_range will bring it back. */ - if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) { /* No work to do, and mlocking twice would be wrong */ + vma_start_write(vma); vm_flags_reset(vma, newflags); } else { mlock_vma_pages_range(vma, start, end, newflags); diff --git a/mm/mprotect.c b/mm/mprotect.c index 3a04999e8353..45a87f0ce625 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -631,6 +631,7 @@ mprotect_fixup(struct mmu_gather *tlb, struct vm_area_struct *vma, * vm_flags and vm_page_prot are protected by the mmap_lock * held in write mode. */ + vma_start_write(vma); vm_flags_reset(vma, newflags); /* * We want to check manually if we can change individual PTEs writable From 0f0b09c02c2adb521a9cd4a0dc023ced3d16e465 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Aug 2023 08:27:23 -0700 Subject: [PATCH 131/163] BACKPORT: FROMGIT: mm: always lock new vma before inserting into vma tree While it's not strictly necessary to lock a newly created vma before adding it into the vma tree (as long as no further changes are performed to it), it seems like a good policy to lock it and prevent accidental changes after it becomes visible to the page faults. Lock the vma before adding it into the vma tree. Link: https://lkml.kernel.org/r/20230804152724.3090321-6-surenb@google.com Suggested-by: Jann Horn Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Cc: Linus Torvalds Signed-off-by: Andrew Morton (cherry picked from commit c3249c06c48dda30f93e62b57773d5ed409d4f77 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: resolved conflicts due to changes in vma_merge() and __vma_adjust()] Bug: 293665307 Change-Id: I4ee0d2abcc8a3f45545f470f1bf7f0be728d6f44 Signed-off-by: Suren Baghdasaryan --- mm/mmap.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index e7bbe2cf8f03..bfe139bf94af 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -484,6 +484,8 @@ static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) if (mas_preallocate(&mas, vma, GFP_KERNEL)) return -ENOMEM; + vma_start_write(vma); + if (vma->vm_file) { mapping = vma->vm_file->f_mapping; i_mmap_lock_write(mapping); @@ -744,8 +746,10 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, if (adjust_next < 0) mas_set_range(&mas, next->vm_start + adjust_next, next->vm_end - 1); - else if (insert) + else if (insert) { + vma_start_write(insert); mas_set_range(&mas, insert->vm_start, insert->vm_end - 1); + } if (mas_preallocate(&mas, vma, GFP_KERNEL)) @@ -843,6 +847,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, * (it may either follow vma or precede it). */ mas_reset(&mas); + vma_start_write(insert); vma_mas_store(insert, &mas); mm->map_count++; } @@ -3204,6 +3209,7 @@ static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, vma->vm_pgoff = addr >> PAGE_SHIFT; vm_flags_init(vma, flags); vma->vm_page_prot = vm_get_page_prot(flags); + vma_start_write(vma); mas_set_range(mas, vma->vm_start, addr + len - 1); if (mas_store_gfp(mas, vma, GFP_KERNEL)) goto mas_store_fail; @@ -3450,7 +3456,6 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, get_file(new_vma->vm_file); if (new_vma->vm_ops && new_vma->vm_ops->open) new_vma->vm_ops->open(new_vma); - vma_start_write(new_vma); if (vma_link(mm, new_vma)) goto out_vma_link; *need_rmap_locks = false; From 939d4b1ccc068d96bb8d10ea705f9bd464e53807 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 4 Aug 2023 08:27:24 -0700 Subject: [PATCH 132/163] BACKPORT: FROMGIT: mm: move vma locking out of vma_prepare and dup_anon_vma vma_prepare() is currently the central place where vmas are being locked before vma_complete() applies changes to them. While this is convenient, it also obscures vma locking and makes it harder to follow the locking rules. Move vma locking out of vma_prepare() and take vma locks explicitly at the locations where vmas are being modified. Move vma locking and replace it with an assertion inside dup_anon_vma() to further clarify the locking pattern inside vma_merge(). Link: https://lkml.kernel.org/r/20230804152724.3090321-7-surenb@google.com Suggested-by: Linus Torvalds Suggested-by: Liam R. Howlett Signed-off-by: Suren Baghdasaryan Cc: Jann Horn Signed-off-by: Andrew Morton (cherry picked from commit b1985ca5e7e6464d205a98a78cca229224346c21 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: skip changes in vma_prepare() which does not exist, skip changes in vma_merge() since required locks are already in __vma_adjust(), skip change in dup_anon_vma() since required locks are already in place, skip unnecessary lock in do_brk_flags()] Bug: 293665307 Change-Id: I99261aa1db3bec73795e63c333768bc68da8045c Signed-off-by: Suren Baghdasaryan --- mm/mmap.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index bfe139bf94af..bd2140cfcf36 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -531,6 +531,7 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, struct file *file = vma->vm_file; bool remove_next = false; + vma_start_write(vma); if (next && (vma != next) && (end == next->vm_end)) { remove_next = true; /* Lock the VMA before removing it */ @@ -539,7 +540,6 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, int error; anon_vma = next->anon_vma; - vma_start_write(vma); vma->anon_vma = anon_vma; error = anon_vma_clone(vma, next); if (error) @@ -556,7 +556,6 @@ inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, if (mas_preallocate(mas, vma, GFP_KERNEL)) goto nomem; - vma_start_write(vma); vma_adjust_trans_huge(vma, start, end, 0); if (file) { @@ -2433,6 +2432,9 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, if (new->vm_ops && new->vm_ops->open) new->vm_ops->open(new); + vma_start_write(vma); + vma_start_write(new); + if (new_below) err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT), new); From 693d905ec0dab0a4a2ed0646c8c0ad2c769096b6 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:54 -0700 Subject: [PATCH 133/163] BACKPORT: FROMGIT: mm: drop per-VMA lock when returning VM_FAULT_RETRY or VM_FAULT_COMPLETED handle_mm_fault returning VM_FAULT_RETRY or VM_FAULT_COMPLETED means mmap_lock has been released. However with per-VMA locks behavior is different and the caller should still release it. To make the rules consistent for the caller, drop the per-VMA lock when returning VM_FAULT_RETRY or VM_FAULT_COMPLETED. Currently the only path returning VM_FAULT_RETRY under per-VMA locks is do_swap_page and no path returns VM_FAULT_COMPLETED for now. Link: https://lkml.kernel.org/r/20230630211957.1341547-4-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Peter Xu Cc: Alistair Popple Cc: Al Viro Cc: Christian Brauner Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit 5197d920745dd42eae023986dbf053107ac238db https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: add the code from missing sanitize_fault_flags directly into handle_mm_fault, add the fix for riscv] Bug: 161210518 Change-Id: Iefd4e49bda940c457a70ecf40d074ad532959759 Signed-off-by: Suren Baghdasaryan --- arch/arm64/mm/fault.c | 3 ++- arch/powerpc/mm/fault.c | 3 ++- arch/riscv/mm/fault.c | 3 ++- arch/s390/mm/fault.c | 3 ++- arch/x86/mm/fault.c | 3 ++- mm/memory.c | 12 ++++++++++++ 6 files changed, 22 insertions(+), 5 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index e34b46785150..b6ac6caeb662 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -612,7 +612,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, goto lock_mmap; } fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs); - vma_end_read(vma); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5bfdf6ecfa96..82954d0e6906 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -489,7 +489,8 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); - vma_end_read(vma); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 209cb580a3b0..88b70983bfb7 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -303,7 +303,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs) } fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs); - vma_end_read(vma); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 98a0091bb097..4e1f2790e6ff 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -414,7 +414,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) goto lock_mmap; } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); - vma_end_read(vma); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); goto out; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8a74089d9f2e..72044a9342d0 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1362,7 +1362,8 @@ void do_user_addr_fault(struct pt_regs *regs, goto lock_mmap; } fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs); - vma_end_read(vma); + if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED))) + vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); diff --git a/mm/memory.c b/mm/memory.c index a9ffa95e2386..d1df4eac9d13 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3771,6 +3771,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (vmf->flags & FAULT_FLAG_VMA_LOCK) { ret = VM_FAULT_RETRY; + vma_end_read(vma); goto out; } @@ -5243,6 +5244,17 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, __set_current_state(TASK_RUNNING); +#ifdef CONFIG_PER_VMA_LOCK + /* + * Per-VMA locks can't be used with FAULT_FLAG_RETRY_NOWAIT because of + * the assumption that lock is dropped on VM_FAULT_RETRY. + */ + if (WARN_ON_ONCE((flags & + (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT)) == + (FAULT_FLAG_VMA_LOCK | FAULT_FLAG_RETRY_NOWAIT))) + return VM_FAULT_SIGSEGV; +#endif + /* do counter updates before entering really critical section. */ check_sync_rss_stat(current); From f8a65b694b03f93375690519370bdc43b0364529 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:55 -0700 Subject: [PATCH 134/163] FROMGIT: mm: change folio_lock_or_retry to use vm_fault directly Change folio_lock_or_retry to accept vm_fault struct and return the vm_fault_t directly. Link: https://lkml.kernel.org/r/20230630211957.1341547-5-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: Matthew Wilcox Acked-by: Peter Xu Cc: Alistair Popple Cc: Al Viro Cc: Christian Brauner Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit af27bb856a0a29a0673aabe163e4774df67a8bcd https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: I9d203e801f0d5517fba8430f9ab82d4063b517f3 Signed-off-by: Suren Baghdasaryan --- include/linux/pagemap.h | 11 ++++++----- mm/filemap.c | 22 ++++++++++++---------- mm/memory.c | 14 ++++++-------- 3 files changed, 24 insertions(+), 23 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index df232c0003de..17a18aed632b 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -882,8 +882,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page, void __folio_lock(struct folio *folio); int __folio_lock_killable(struct folio *folio); -bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, - unsigned int flags); +vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf); void unlock_page(struct page *page); void folio_unlock(struct folio *folio); @@ -997,11 +996,13 @@ static inline int lock_page_killable(struct page *page) * Return value and mmap_lock implications depend on flags; see * __folio_lock_or_retry(). */ -static inline bool folio_lock_or_retry(struct folio *folio, - struct mm_struct *mm, unsigned int flags) +static inline vm_fault_t folio_lock_or_retry(struct folio *folio, + struct vm_fault *vmf) { might_sleep(); - return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags); + if (!folio_trylock(folio)) + return __folio_lock_or_retry(folio, vmf); + return 0; } /* diff --git a/mm/filemap.c b/mm/filemap.c index 695d92428173..ab91e60d2d29 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1707,32 +1707,34 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) /* * Return values: - * true - folio is locked; mmap_lock is still held. - * false - folio is not locked. + * 0 - folio is locked. + * non-zero - folio is not locked. * mmap_lock has been released (mmap_read_unlock(), unless flags had both * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in * which case mmap_lock is still held. * - * If neither ALLOW_RETRY nor KILLABLE are set, will always return true + * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 * with the folio locked and the mmap_lock unperturbed. */ -bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, - unsigned int flags) +vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) { + struct mm_struct *mm = vmf->vma->vm_mm; + unsigned int flags = vmf->flags; + if (fault_flag_allow_retry_first(flags)) { /* * CAUTION! In this case, mmap_lock is not released - * even though return 0. + * even though return VM_FAULT_RETRY. */ if (flags & FAULT_FLAG_RETRY_NOWAIT) - return false; + return VM_FAULT_RETRY; mmap_read_unlock(mm); if (flags & FAULT_FLAG_KILLABLE) folio_wait_locked_killable(folio); else folio_wait_locked(folio); - return false; + return VM_FAULT_RETRY; } if (flags & FAULT_FLAG_KILLABLE) { bool ret; @@ -1740,13 +1742,13 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, ret = __folio_lock_killable(folio); if (ret) { mmap_read_unlock(mm); - return false; + return VM_FAULT_RETRY; } } else { __folio_lock(folio); } - return true; + return 0; } /** diff --git a/mm/memory.c b/mm/memory.c index d1df4eac9d13..53b1759524a7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3638,6 +3638,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) struct folio *folio = page_folio(vmf->page); struct vm_area_struct *vma = vmf->vma; struct mmu_notifier_range range; + vm_fault_t ret; /* * We need a reference to lock the folio because we don't hold @@ -3650,9 +3651,10 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) if (!folio_try_get(folio)) return 0; - if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags)) { + ret = folio_lock_or_retry(folio, vmf); + if (ret) { folio_put(folio); - return VM_FAULT_RETRY; + return ret; } mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma, vma->vm_mm, vmf->address & PAGE_MASK, @@ -3762,7 +3764,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) bool exclusive = false; swp_entry_t entry; pte_t pte; - int locked; vm_fault_t ret = 0; void *shadow = NULL; @@ -3887,12 +3888,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) goto out_release; } - locked = folio_lock_or_retry(folio, vma->vm_mm, vmf->flags); - - if (!locked) { - ret |= VM_FAULT_RETRY; + ret |= folio_lock_or_retry(folio, vmf); + if (ret & VM_FAULT_RETRY) goto out_release; - } if (swapcache) { /* From e704d0e4f9eefecab3253831d9e12494a7e7a88a Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:56 -0700 Subject: [PATCH 135/163] FROMGIT: mm: handle swap page faults under per-VMA lock When page fault is handled under per-VMA lock protection, all swap page faults are retried with mmap_lock because folio_lock_or_retry has to drop and reacquire mmap_lock if folio could not be immediately locked. Follow the same pattern as mmap_lock to drop per-VMA lock when waiting for folio and retrying once folio is available. With this obstacle removed, enable do_swap_page to operate under per-VMA lock protection. Drivers implementing ops->migrate_to_ram might still rely on mmap_lock, therefore we have to fall back to mmap_lock in that particular case. Note that the only time do_swap_page calls synchronous swap_readpage is when SWP_SYNCHRONOUS_IO is set, which is only set for QUEUE_FLAG_SYNCHRONOUS devices: brd, zram and nvdimms (both btt and pmem). Therefore we don't sleep in this path, and there's no need to drop the mmap or per-VMA lock. Link: https://lkml.kernel.org/r/20230630211957.1341547-6-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Alistair Popple Reviewed-by: Alistair Popple Acked-by: Peter Xu Cc: Al Viro Cc: Christian Brauner Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit cc989adb5544594d8c12893eda3c6df8682de11b https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: I5d80f435b2dbdc3f3d02be056e893f6fedbc7a98 Signed-off-by: Suren Baghdasaryan --- include/linux/mm.h | 13 +++++++++++++ mm/filemap.c | 17 ++++++++--------- mm/memory.c | 16 ++++++++++------ 3 files changed, 31 insertions(+), 15 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 9acb8a04116d..4e52968266e3 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -738,6 +738,14 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) vma->detached = detached; } +static inline void release_fault_lock(struct vm_fault *vmf) +{ + if (vmf->flags & FAULT_FLAG_VMA_LOCK) + vma_end_read(vmf->vma); + else + mmap_read_unlock(vmf->vma->vm_mm); +} + struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address); @@ -755,6 +763,11 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) {} +static inline void release_fault_lock(struct vm_fault *vmf) +{ + mmap_read_unlock(vmf->vma->vm_mm); +} + #endif /* CONFIG_PER_VMA_LOCK */ /* diff --git a/mm/filemap.c b/mm/filemap.c index ab91e60d2d29..65bc874b7996 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1709,27 +1709,26 @@ static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait) * Return values: * 0 - folio is locked. * non-zero - folio is not locked. - * mmap_lock has been released (mmap_read_unlock(), unless flags had both - * FAULT_FLAG_ALLOW_RETRY and FAULT_FLAG_RETRY_NOWAIT set, in - * which case mmap_lock is still held. + * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or + * vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and + * FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held. * * If neither ALLOW_RETRY nor KILLABLE are set, will always return 0 - * with the folio locked and the mmap_lock unperturbed. + * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed. */ vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) { - struct mm_struct *mm = vmf->vma->vm_mm; unsigned int flags = vmf->flags; if (fault_flag_allow_retry_first(flags)) { /* - * CAUTION! In this case, mmap_lock is not released - * even though return VM_FAULT_RETRY. + * CAUTION! In this case, mmap_lock/per-VMA lock is not + * released even though returning VM_FAULT_RETRY. */ if (flags & FAULT_FLAG_RETRY_NOWAIT) return VM_FAULT_RETRY; - mmap_read_unlock(mm); + release_fault_lock(vmf); if (flags & FAULT_FLAG_KILLABLE) folio_wait_locked_killable(folio); else @@ -1741,7 +1740,7 @@ vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf) ret = __folio_lock_killable(folio); if (ret) { - mmap_read_unlock(mm); + release_fault_lock(vmf); return VM_FAULT_RETRY; } } else { diff --git a/mm/memory.c b/mm/memory.c index 53b1759524a7..e8841ce67244 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3770,12 +3770,6 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) if (!pte_unmap_same(vmf)) goto out; - if (vmf->flags & FAULT_FLAG_VMA_LOCK) { - ret = VM_FAULT_RETRY; - vma_end_read(vma); - goto out; - } - entry = pte_to_swp_entry(vmf->orig_pte); if (unlikely(non_swap_entry(entry))) { if (is_migration_entry(entry)) { @@ -3785,6 +3779,16 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->page = pfn_swap_entry_to_page(entry); ret = remove_device_exclusive_entry(vmf); } else if (is_device_private_entry(entry)) { + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + /* + * migrate_to_ram is not yet ready to operate + * under VMA lock. + */ + vma_end_read(vma); + ret = VM_FAULT_RETRY; + goto out; + } + vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); From 250f19771f4cb35cd59481af24464f79a40752c0 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:57 -0700 Subject: [PATCH 136/163] FROMGIT: mm: handle userfaults under VMA lock Enable handle_userfault to operate under VMA lock by releasing VMA lock instead of mmap_lock and retrying. Note that FAULT_FLAG_RETRY_NOWAIT should never be used when handling faults under per-VMA lock protection because that would break the assumption that lock is dropped on retry. Link: https://lkml.kernel.org/r/20230630211957.1341547-7-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Peter Xu Cc: Alistair Popple Cc: Al Viro Cc: Christian Brauner Cc: Christoph Hellwig Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit c3c986f59c814edecc096a049d67e5791083388b https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: I9df667dae39024e5473252d7347ec7929f7f999e Signed-off-by: Suren Baghdasaryan --- fs/userfaultfd.c | 34 ++++++++++++++-------------------- include/linux/mm.h | 24 ++++++++++++++++++++++++ mm/memory.c | 7 ------- 3 files changed, 38 insertions(+), 27 deletions(-) diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index b739aa508877..bec3e2341421 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -247,18 +247,17 @@ static inline struct uffd_msg userfault_msg(unsigned long address, * hugepmd ranges. */ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, - struct vm_area_struct *vma, - unsigned long address, - unsigned long flags, - unsigned long reason) + struct vm_fault *vmf, + unsigned long reason) { + struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = ctx->mm; pte_t *ptep, pte; bool ret = true; - mmap_assert_locked(mm); + assert_fault_locked(vmf); - ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma)); + ptep = huge_pte_offset(mm, vmf->address, vma_mmu_pagesize(vma)); if (!ptep) goto out; @@ -280,10 +279,8 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, } #else static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, - struct vm_area_struct *vma, - unsigned long address, - unsigned long flags, - unsigned long reason) + struct vm_fault *vmf, + unsigned long reason) { return false; /* should never get here */ } @@ -297,11 +294,11 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, * threads. */ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, - unsigned long address, - unsigned long flags, + struct vm_fault *vmf, unsigned long reason) { struct mm_struct *mm = ctx->mm; + unsigned long address = vmf->address; pgd_t *pgd; p4d_t *p4d; pud_t *pud; @@ -309,7 +306,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, pte_t *pte; bool ret = true; - mmap_assert_locked(mm); + assert_fault_locked(vmf); pgd = pgd_offset(mm, address); if (!pgd_present(*pgd)) @@ -416,7 +413,7 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) * Coredumping runs without mmap_lock so we can only check that * the mmap_lock is held, if PF_DUMPCORE was not set. */ - mmap_assert_locked(mm); + assert_fault_locked(vmf); ctx = vmf->vma->vm_userfaultfd_ctx.ctx; if (!ctx) @@ -523,13 +520,10 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) spin_unlock_irq(&ctx->fault_pending_wqh.lock); if (!is_vm_hugetlb_page(vmf->vma)) - must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, - reason); + must_wait = userfaultfd_must_wait(ctx, vmf, reason); else - must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, - vmf->address, - vmf->flags, reason); - mmap_read_unlock(mm); + must_wait = userfaultfd_huge_must_wait(ctx, vmf, reason); + release_fault_lock(vmf); if (likely(must_wait && !READ_ONCE(ctx->released))) { wake_up_poll(&ctx->fd_wqh, EPOLLIN); diff --git a/include/linux/mm.h b/include/linux/mm.h index 4e52968266e3..64eb00e3c58b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -723,6 +723,17 @@ static inline bool vma_try_start_write(struct vm_area_struct *vma) return true; } +static inline void vma_assert_locked(struct vm_area_struct *vma) +{ + int mm_lock_seq; + + if (__is_vma_write_locked(vma, &mm_lock_seq)) + return; + + lockdep_assert_held(&vma->vm_lock->lock); + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_lock->lock), vma); +} + static inline void vma_assert_write_locked(struct vm_area_struct *vma) { int mm_lock_seq; @@ -746,6 +757,14 @@ static inline void release_fault_lock(struct vm_fault *vmf) mmap_read_unlock(vmf->vma->vm_mm); } +static inline void assert_fault_locked(struct vm_fault *vmf) +{ + if (vmf->flags & FAULT_FLAG_VMA_LOCK) + vma_assert_locked(vmf->vma); + else + mmap_assert_locked(vmf->vma->vm_mm); +} + struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address); @@ -768,6 +787,11 @@ static inline void release_fault_lock(struct vm_fault *vmf) mmap_read_unlock(vmf->vma->vm_mm); } +static inline void assert_fault_locked(struct vm_fault *vmf) +{ + mmap_assert_locked(vmf->vma->vm_mm); +} + #endif /* CONFIG_PER_VMA_LOCK */ /* diff --git a/mm/memory.c b/mm/memory.c index e8841ce67244..d8a225dca633 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5454,13 +5454,6 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (unlikely(!vma->anon_vma)) goto inval_end_read; - /* - * Due to the possibility of userfault handler dropping mmap_lock, avoid - * it for now and fall back to page fault handling under mmap_lock. - */ - if (userfaultfd_armed(vma)) - goto inval_end_read; - /* Check since vm_start/vm_end might change before we lock the VMA */ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read; From f4b32b7f15433d69095bff1310ea7ce701bbee86 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Wed, 12 Jul 2023 12:56:52 -0700 Subject: [PATCH 137/163] FROMGIT: mm: fix a lockdep issue in vma_assert_write_locked __is_vma_write_locked() can be used only when mmap_lock is write-locked to guarantee vm_lock_seq and mm_lock_seq stability during the check. Therefore it asserts this condition before further checks. Because of that it can't be used unless the user expects the mmap_lock to be write-locked. vma_assert_locked() can't assume this before ensuring that VMA is not read-locked. Change the order of the checks in vma_assert_locked() to check if the VMA is read-locked first and only then assert if it's not write-locked. Link: https://lkml.kernel.org/r/20230712195652.969194-1-surenb@google.com Fixes: 50b88b63e3e4 ("mm: handle userfaults under VMA lock") Signed-off-by: Suren Baghdasaryan Reported-by: Liam R. Howlett Closes: https://lore.kernel.org/all/20230712022620.3yytbdh24b7i4zrn@revolver/ Reported-by: syzbot+339b02f826caafd5f7a8@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/0000000000002db68f05ffb791bc@google.com/ Cc: Christian Brauner Cc: Laurent Dufour Cc: Matthew Wilcox (Oracle) Cc: Michel Lespinasse Cc: Paul E. McKenney Cc: Vlastimil Babka Signed-off-by: Andrew Morton (cherry picked from commit 781537884e9905f2df812c8b754165dd606ae300 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: Ida7831576918000bb73850e639aae0d82f0c9fca Signed-off-by: Suren Baghdasaryan --- include/linux/mm.h | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 64eb00e3c58b..b2b3990aa1f6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -673,6 +673,7 @@ static inline void vma_end_read(struct vm_area_struct *vma) rcu_read_unlock(); } +/* WARNING! Can only be used if mmap_lock is expected to be write-locked */ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -723,17 +724,6 @@ static inline bool vma_try_start_write(struct vm_area_struct *vma) return true; } -static inline void vma_assert_locked(struct vm_area_struct *vma) -{ - int mm_lock_seq; - - if (__is_vma_write_locked(vma, &mm_lock_seq)) - return; - - lockdep_assert_held(&vma->vm_lock->lock); - VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_lock->lock), vma); -} - static inline void vma_assert_write_locked(struct vm_area_struct *vma) { int mm_lock_seq; @@ -741,6 +731,12 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); } +static inline void vma_assert_locked(struct vm_area_struct *vma) +{ + if (!rwsem_is_locked(&vma->vm_lock->lock)) + vma_assert_write_locked(vma); +} + static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) { /* When detaching vma should be write-locked */ From 4cb518a06f90657200a98930aa55f40300ad1e20 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:01 +0100 Subject: [PATCH 138/163] FROMGIT: mm: remove CONFIG_PER_VMA_LOCK ifdefs Patch series "Handle most file-backed faults under the VMA lock", v3. This patchset adds the ability to handle page faults on parts of files which are already in the page cache without taking the mmap lock. This patch (of 10): Provide lock_vma_under_rcu() when CONFIG_PER_VMA_LOCK is not defined to eliminate ifdefs in the users. Link: https://lkml.kernel.org/r/20230724185410.1124082-1-willy@infradead.org Link: https://lkml.kernel.org/r/20230724185410.1124082-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Suren Baghdasaryan Cc: Punit Agrawal Cc: Arjun Roy Cc: Eric Dumazet Signed-off-by: Andrew Morton (cherry picked from commit a457f3e92ccba03be36e8c04c77992d87004806c https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I8cb7d9536b8c54925d04945566b75d4ea2ff042c Signed-off-by: Suren Baghdasaryan --- arch/arm64/mm/fault.c | 2 -- arch/powerpc/mm/fault.c | 4 ---- arch/riscv/mm/fault.c | 4 ---- arch/s390/mm/fault.c | 2 -- arch/x86/mm/fault.c | 4 ---- include/linux/mm.h | 6 ++++++ 6 files changed, 6 insertions(+), 16 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index b6ac6caeb662..e5e07212126d 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -599,7 +599,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); -#ifdef CONFIG_PER_VMA_LOCK if (!(mm_flags & FAULT_FLAG_USER)) goto lock_mmap; @@ -628,7 +627,6 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr, return 0; } lock_mmap: -#endif /* CONFIG_PER_VMA_LOCK */ retry: vma = lock_mm_and_find_vma(mm, addr, regs); diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 82954d0e6906..b1723094d464 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -469,7 +469,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, if (is_exec) flags |= FAULT_FLAG_INSTRUCTION; -#ifdef CONFIG_PER_VMA_LOCK if (!(flags & FAULT_FLAG_USER)) goto lock_mmap; @@ -502,7 +501,6 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, return user_mode(regs) ? 0 : SIGBUS; lock_mmap: -#endif /* CONFIG_PER_VMA_LOCK */ /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the @@ -552,9 +550,7 @@ static int ___do_page_fault(struct pt_regs *regs, unsigned long address, mmap_read_unlock(current->mm); -#ifdef CONFIG_PER_VMA_LOCK done: -#endif if (unlikely(fault & VM_FAULT_ERROR)) return mm_fault_error(regs, address, fault); diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 88b70983bfb7..34a44febae86 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -289,7 +289,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs) flags |= FAULT_FLAG_WRITE; else if (cause == EXC_INST_PAGE_FAULT) flags |= FAULT_FLAG_INSTRUCTION; -#ifdef CONFIG_PER_VMA_LOCK if (!(flags & FAULT_FLAG_USER)) goto lock_mmap; @@ -318,7 +317,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs) return; } lock_mmap: -#endif /* CONFIG_PER_VMA_LOCK */ retry: vma = lock_mm_and_find_vma(mm, addr, regs); @@ -372,9 +370,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) mmap_read_unlock(mm); -#ifdef CONFIG_PER_VMA_LOCK done: -#endif if (unlikely(fault & VM_FAULT_ERROR)) { tsk->thread.bad_cause = cause; mm_fault_error(regs, addr, fault); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 4e1f2790e6ff..0843adb266d1 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -403,7 +403,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) access = VM_WRITE; if (access == VM_WRITE) flags |= FAULT_FLAG_WRITE; -#ifdef CONFIG_PER_VMA_LOCK if (!(flags & FAULT_FLAG_USER)) goto lock_mmap; vma = lock_vma_under_rcu(mm, address); @@ -427,7 +426,6 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) goto out; } lock_mmap: -#endif /* CONFIG_PER_VMA_LOCK */ mmap_read_lock(mm); gmap = NULL; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 72044a9342d0..97599581ec6b 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1349,7 +1349,6 @@ void do_user_addr_fault(struct pt_regs *regs, } #endif -#ifdef CONFIG_PER_VMA_LOCK if (!(flags & FAULT_FLAG_USER)) goto lock_mmap; @@ -1380,7 +1379,6 @@ void do_user_addr_fault(struct pt_regs *regs, return; } lock_mmap: -#endif /* CONFIG_PER_VMA_LOCK */ retry: vma = lock_mm_and_find_vma(mm, address, regs); @@ -1440,9 +1438,7 @@ void do_user_addr_fault(struct pt_regs *regs, } mmap_read_unlock(mm); -#ifdef CONFIG_PER_VMA_LOCK done: -#endif if (likely(!(fault & VM_FAULT_ERROR))) return; diff --git a/include/linux/mm.h b/include/linux/mm.h index b2b3990aa1f6..8c1bfae77799 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -788,6 +788,12 @@ static inline void assert_fault_locked(struct vm_fault *vmf) mmap_assert_locked(vmf->vma->vm_mm); } +static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, + unsigned long address) +{ + return NULL; +} + #endif /* CONFIG_PER_VMA_LOCK */ /* From e26044769f1dcb61ad152a63c7f07ffdd685f7f4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:02 +0100 Subject: [PATCH 139/163] BACKPORT: FROMGIT: mm: allow per-VMA locks on file-backed VMAs Remove the TCP layering violation by allowing per-VMA locks on all VMAs. The fault path will immediately fail in handle_mm_fault(). There may be a small performance reduction from this patch as a little unnecessary work will be done on each page fault. See later patches for the improvement. Link: https://lkml.kernel.org/r/20230724185410.1124082-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Suren Baghdasaryan Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Signed-off-by: Andrew Morton (cherry picked from commit 698dcd77360a3ce15dfc6fe55f9b5572ad4c4291 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: skip tcp-related changes] Bug: 293665307 Change-Id: I73d9d1e4f96419d4723a920fc5960e806749c368 Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index d8a225dca633..52327d11b3ff 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5267,6 +5267,11 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, goto out; } + if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + /* * Enable the memcg OOM handling for faults triggered in user * space. Kernel faults are handled more gracefully. @@ -5438,10 +5443,6 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (!vma) goto inval; - /* Only anonymous vmas are supported for now */ - if (!vma_is_anonymous(vma)) - goto inval; - if (!vma_start_read(vma)) goto inval; From 66cbbe6b3144b6877a76bbcf464544209338fbfc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:03 +0100 Subject: [PATCH 140/163] FROMGIT: mm: move FAULT_FLAG_VMA_LOCK check from handle_mm_fault() Handle a little more of the page fault path outside the mmap sem. The hugetlb path doesn't need to check whether the VMA is anonymous; the VM_HUGETLB flag is only set on hugetlbfs VMAs. There should be no performance change from the previous commit; this is simply a step to ease bisection of any problems. Link: https://lkml.kernel.org/r/20230724185410.1124082-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Suren Baghdasaryan Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Signed-off-by: Andrew Morton (cherry picked from commit 51db5e8974cafee10b2252efa78f89af7d60cd11 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I300c7105fa3530e8eb05862cb3f66b7adac99420 Signed-off-by: Suren Baghdasaryan --- mm/hugetlb.c | 6 ++++++ mm/memory.c | 18 +++++++++--------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fe2fcc57328f..4737d9fc505d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6005,6 +6005,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, int need_wait_lock = 0; unsigned long haddr = address & huge_page_mask(h); + /* TODO: Handle faults under the VMA lock */ + if (flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + ptep = huge_pte_offset(mm, haddr, huge_page_size(h)); if (ptep) { /* diff --git a/mm/memory.c b/mm/memory.c index 52327d11b3ff..69ede01a372d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5042,10 +5042,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) } /* - * By the time we get here, we already hold the mm semaphore - * - * The mmap_lock may have been released depending on flags and our - * return value. See filemap_fault() and __folio_lock_or_retry(). + * On entry, we hold either the VMA lock or the mmap_lock + * (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in + * the result, the mmap_lock is not held on exit. See filemap_fault() + * and __folio_lock_or_retry(). */ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, unsigned int flags) @@ -5064,6 +5064,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, p4d_t *p4d; vm_fault_t ret; + if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + pgd = pgd_offset(mm, address); p4d = p4d_alloc(mm, pgd, address); if (!p4d) @@ -5267,11 +5272,6 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, goto out; } - if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) { - vma_end_read(vma); - return VM_FAULT_RETRY; - } - /* * Enable the memcg OOM handling for faults triggered in user * space. Kernel faults are handled more gracefully. From 8594d6a30f8ed439d0cd60f11ccbe34363bb6f78 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:04 +0100 Subject: [PATCH 141/163] BACKPORT: FROMGIT: mm: handle PUD faults under the VMA lock Postpone checking the VMA_LOCK flag until we've attempted to handle faults on PUDs. There's a mild upside to this patch in that we'll allocate the page tables while under the VMA lock rather than the mmap lock, reducing the hold time on the mmap lock, since the retry will find the page tables already populated. The real purpose here is to make a commit that shows we don't call ->huge_fault under the VMA lock. We do now handle setting the accessed bit on a PUD fault under the VMA lock, but that doesn't seem likely to be a measurable difference. Link: https://lkml.kernel.org/r/20230724185410.1124082-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit 3c04dd18ba57c6753a7ddc6e6c902550a7ac54d9 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: resolved merge conflicts in wp_huge_pud()] Bug: 293665307 Change-Id: Ife20ed7de6444c0e424e12f9fdcdc8f8ecaed2aa Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 69ede01a372d..de54cc5d3c75 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4896,11 +4896,17 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + struct vm_area_struct *vma = vmf->vma; /* No support for anonymous transparent PUD pages yet */ - if (vma_is_anonymous(vmf->vma)) + if (vma_is_anonymous(vma)) return VM_FAULT_FALLBACK; - if (vmf->vma->vm_ops->huge_fault) - return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); + if (vma->vm_ops->huge_fault) { + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + return vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); + } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ return VM_FAULT_FALLBACK; } @@ -4909,18 +4915,25 @@ static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) { #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + struct vm_area_struct *vma = vmf->vma; + vm_fault_t ret; + /* No support for anonymous transparent PUD pages yet */ - if (vma_is_anonymous(vmf->vma)) + if (vma_is_anonymous(vma)) goto split; - if (vmf->vma->vm_ops->huge_fault) { - vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); + if (vma->vm_ops->huge_fault) { + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + ret = vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); if (!(ret & VM_FAULT_FALLBACK)) return ret; } split: /* COW or write-notify not handled on PUD level: split pud.*/ - __split_huge_pud(vmf->vma, vmf->pud, vmf->address); + __split_huge_pud(vma, vmf->pud, vmf->address); #endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ return VM_FAULT_FALLBACK; } @@ -5064,11 +5077,6 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, p4d_t *p4d; vm_fault_t ret; - if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) { - vma_end_read(vma); - return VM_FAULT_RETRY; - } - pgd = pgd_offset(mm, address); p4d = p4d_alloc(mm, pgd, address); if (!p4d) @@ -5112,6 +5120,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_trans_unstable(vmf.pud)) goto retry_pud; + if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + if (pmd_none(*vmf.pmd) && hugepage_vma_check(vma, vm_flags, false, true, true)) { ret = create_huge_pmd(&vmf); From dd621869c1e65e93edd43388d10bf6c5fade3079 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:05 +0100 Subject: [PATCH 142/163] BACKPORT: FROMGIT: mm: handle some PMD faults under the VMA lock Push the VMA_LOCK check down from __handle_mm_fault() to handle_pte_fault(). Once again, we refuse to call ->huge_fault() with the VMA lock held, but we will wait for a PMD migration entry with the VMA lock held, handle NUMA migration and set the accessed bit. We were already doing this for anonymous VMAs, so it should be safe. Link: https://lkml.kernel.org/r/20230724185410.1124082-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit b7b8f56db92f56ce812e305f84aef0404287b534 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: resolved merge conflicts in create_huge_pmd() and wp_huge_pmd()] Bug: 293665307 Change-Id: I3ec9042b2e39a5caf6b6f3a478bf9ba337012aa4 Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 38 +++++++++++++++++++++++++------------- 1 file changed, 25 insertions(+), 13 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index de54cc5d3c75..2f1b1ad8c5e4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4861,33 +4861,45 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf) { - if (vma_is_anonymous(vmf->vma)) + struct vm_area_struct *vma = vmf->vma; + if (vma_is_anonymous(vma)) return do_huge_pmd_anonymous_page(vmf); - if (vmf->vma->vm_ops->huge_fault) - return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); + if (vma->vm_ops->huge_fault) { + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + return vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); + } return VM_FAULT_FALLBACK; } /* `inline' is required to avoid gcc 4.1.2 build error */ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf) { + struct vm_area_struct *vma = vmf->vma; const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE; + vm_fault_t ret; - if (vma_is_anonymous(vmf->vma)) { + if (vma_is_anonymous(vma)) { if (likely(!unshare) && - userfaultfd_huge_pmd_wp(vmf->vma, vmf->orig_pmd)) + userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd)) return handle_userfault(vmf, VM_UFFD_WP); return do_huge_pmd_wp_page(vmf); } - if (vmf->vma->vm_ops->huge_fault) { - vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); + if (vma->vm_ops->huge_fault) { + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + ret = vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD); if (!(ret & VM_FAULT_FALLBACK)) return ret; } /* COW or write-notify handled on pte level: split pmd. */ - __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL); + __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL); return VM_FAULT_FALLBACK; } @@ -4957,6 +4969,11 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) { pte_t entry; + if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) { + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + if (unlikely(pmd_none(*vmf->pmd))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may @@ -5120,11 +5137,6 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma, if (pud_trans_unstable(vmf.pud)) goto retry_pud; - if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) { - vma_end_read(vma); - return VM_FAULT_RETRY; - } - if (pmd_none(*vmf.pmd) && hugepage_vma_check(vma, vm_flags, false, true, true)) { ret = create_huge_pmd(&vmf); From fa9a8adff04d1c7d3cf030f0f67d7f5bb2116939 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:06 +0100 Subject: [PATCH 143/163] FROMGIT: mm: move FAULT_FLAG_VMA_LOCK check down in handle_pte_fault() Call do_pte_missing() under the VMA lock ... then immediately retry in do_fault(). Link: https://lkml.kernel.org/r/20230724185410.1124082-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Suren Baghdasaryan Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Signed-off-by: Andrew Morton (cherry picked from commit 4c753b25481499cd1cb6a8ddba18bc5585f34296 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I8c8f2feaade7c40daf37b63e43111d22ec147e5f Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 2f1b1ad8c5e4..e3a683bd9e70 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4695,6 +4695,11 @@ static vm_fault_t do_fault(struct vm_fault *vmf) struct mm_struct *vm_mm = vma->vm_mm; vm_fault_t ret; + if (vmf->flags & FAULT_FLAG_VMA_LOCK){ + vma_end_read(vma); + return VM_FAULT_RETRY; + } + /* * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ @@ -4969,11 +4974,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) { pte_t entry; - if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) { - vma_end_read(vmf->vma); - return VM_FAULT_RETRY; - } - if (unlikely(pmd_none(*vmf->pmd))) { /* * Leave __pte_alloc() until later: because vm_ops->fault may @@ -5030,6 +5030,12 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) return do_fault(vmf); } + if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) { + pte_unmap(vmf->pte); + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + if (!pte_present(vmf->orig_pte)) return do_swap_page(vmf); From 072c35fb69e238b377bddea576a405ab8ecddf9a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:07 +0100 Subject: [PATCH 144/163] FROMGIT: mm: move FAULT_FLAG_VMA_LOCK check down from do_fault() Perform the check at the start of do_read_fault(), do_cow_fault() and do_shared_fault() instead. Should be no performance change from the last commit. Link: https://lkml.kernel.org/r/20230724185410.1124082-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Suren Baghdasaryan Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Signed-off-by: Andrew Morton (cherry picked from commit 4e105ec567c874c166a8e5a9b2dd849c8ec2055e https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I37be370a0378afd094d880bb8e538e4e7874499e Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index e3a683bd9e70..de01fb70f5f0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4585,6 +4585,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) { vm_fault_t ret = 0; + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + /* * Let's call ->map_pages() first and use ->fault() as fallback * if page by the offset is not ready to be mapped (cold cache or @@ -4612,6 +4617,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret; + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + if (unlikely(anon_vma_prepare(vma))) return VM_FAULT_OOM; @@ -4651,6 +4661,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf) struct vm_area_struct *vma = vmf->vma; vm_fault_t ret, tmp; + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vma); + return VM_FAULT_RETRY; + } + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; @@ -4695,11 +4710,6 @@ static vm_fault_t do_fault(struct vm_fault *vmf) struct mm_struct *vm_mm = vma->vm_mm; vm_fault_t ret; - if (vmf->flags & FAULT_FLAG_VMA_LOCK){ - vma_end_read(vma); - return VM_FAULT_RETRY; - } - /* * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ From ffcebdef16579f8957e116726ab5425bb22fbede Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:08 +0100 Subject: [PATCH 145/163] FROMGIT: mm: run the fault-around code under the VMA lock The map_pages fs method should be safe to run under the VMA lock instead of the mmap lock. This should have a measurable reduction in contention on the mmap lock. Link: https://lkml.kernel.org/r/20230724185410.1124082-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Suren Baghdasaryan Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Signed-off-by: Andrew Morton (cherry picked from commit 7456c15600264d635293c91df1e0c0b5a1e73578 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: Iaa1b0c2deeade361b34118f41b5deb591268a269 Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index de01fb70f5f0..64383bda8359 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4585,11 +4585,6 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) { vm_fault_t ret = 0; - if (vmf->flags & FAULT_FLAG_VMA_LOCK) { - vma_end_read(vmf->vma); - return VM_FAULT_RETRY; - } - /* * Let's call ->map_pages() first and use ->fault() as fallback * if page by the offset is not ready to be mapped (cold cache or @@ -4601,6 +4596,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf) return ret; } + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + ret = __do_fault(vmf); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY))) return ret; From 83ab9863246c33c4e4a6c70bcf3e1abb4612625b Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:09 +0100 Subject: [PATCH 146/163] FROMGIT: mm: handle swap and NUMA PTE faults under the VMA lock Move the FAULT_FLAG_VMA_LOCK check down in handle_pte_fault(). This is probably not a huge win in its own right, but is a nicely separable bit from the next patch. Link: https://lkml.kernel.org/r/20230724185410.1124082-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit 51c4fdc72be2287960ab5c1f5beae84f3039fd01 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 293665307 Change-Id: I6cf9cb1d40c23287ce179a8c435427c3d88d2528 Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 64383bda8359..7586f5f0f414 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5040,18 +5040,18 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) return do_fault(vmf); } - if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) { - pte_unmap(vmf->pte); - vma_end_read(vmf->vma); - return VM_FAULT_RETRY; - } - if (!pte_present(vmf->orig_pte)) return do_swap_page(vmf); if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); + if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) { + pte_unmap(vmf->pte); + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); entry = vmf->orig_pte; From 9e066d4b35fe9eb9cf007702bf6c3e50623026fd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 12 Aug 2023 01:20:33 +0100 Subject: [PATCH 147/163] FROMLIST: mm: Allow fault_dirty_shared_page() to be called under the VMA lock By making maybe_unlock_mmap_for_io() handle the VMA lock correctly, we make fault_dirty_shared_page() safe to be called without the mmap lock held. Signed-off-by: Matthew Wilcox (Oracle) Reported-by: David Hildenbrand Tested-by: Suren Baghdasaryan Link: https://lore.kernel.org/all/20230812002033.1002367-1-willy@infradead.org/ Bug: 293665307 Change-Id: Ifed050cc4d194c538765ab403de09199b94c7b1b Signed-off-by: Suren Baghdasaryan --- mm/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/internal.h b/mm/internal.h index fe0925d7cbb5..0997c0c82c02 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -619,7 +619,7 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf, if (fault_flag_allow_retry_first(flags) && !(flags & FAULT_FLAG_RETRY_NOWAIT)) { fpin = get_file(vmf->vma->vm_file); - mmap_read_unlock(vmf->vma->vm_mm); + release_fault_lock(vmf); } return fpin; } From 3ebafb7b468ba6595d9f1a2b899c8555374779aa Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 24 Jul 2023 19:54:10 +0100 Subject: [PATCH 148/163] BACKPORT: FROMGIT: mm: handle faults that merely update the accessed bit under the VMA lock Move FAULT_FLAG_VMA_LOCK check out of handle_pte_fault(). This should have a significant performance improvement for mmaped files. Write faults (on read-only shared pages) still take the mmap lock as we do not want to audit all the implementations of ->pfn_mkwrite() and ->page_mkwrite(). However write-faults on private mappings are handled under the VMA lock. Link: https://lkml.kernel.org/r/20230724185410.1124082-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Arjun Roy Cc: Eric Dumazet Cc: Punit Agrawal Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton (cherry picked from commit 88e2667632d43928d3ed50d0163ecd73aaa2d455 https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) [surenb: replaced folio_put() with put_page() in wp_page_shared()] Bug: 293665307 Change-Id: I27ac40bb0f7347083f641e0cfc8ab33e182c4c5b Signed-off-by: Suren Baghdasaryan --- mm/memory.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 7586f5f0f414..0f780a8dfaef 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3314,6 +3314,11 @@ static vm_fault_t wp_pfn_shared(struct vm_fault *vmf) vm_fault_t ret; pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + vmf->flags |= FAULT_FLAG_MKWRITE; ret = vma->vm_ops->pfn_mkwrite(vmf); if (ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)) @@ -3336,6 +3341,12 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf) vm_fault_t tmp; pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->flags & FAULT_FLAG_VMA_LOCK) { + put_page(vmf->page); + vma_end_read(vmf->vma); + return VM_FAULT_RETRY; + } + tmp = do_page_mkwrite(vmf); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { @@ -5046,12 +5057,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); - if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vmf->vma)) { - pte_unmap(vmf->pte); - vma_end_read(vmf->vma); - return VM_FAULT_RETRY; - } - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); entry = vmf->orig_pte; From 64b479e43bf315acb0c0c9da707f09f87ea0de6a Mon Sep 17 00:00:00 2001 From: Zhenhua Huang Date: Thu, 17 Aug 2023 23:18:05 +0800 Subject: [PATCH 149/163] ANDROID: consolidate.fragment: Enable slub debug in consolidate-fragment Enable slub_debug in consolidate build. These are helpful in debugging slub related issues. Bug: 296476380 Change-Id: I7a9d39764601e10e0093cb3b0138ff0bfdd30a41 Signed-off-by: Zhenhua Huang --- arch/arm64/configs/consolidate.fragment | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/configs/consolidate.fragment b/arch/arm64/configs/consolidate.fragment index 8747c79cef12..ff0f1385054b 100644 --- a/arch/arm64/configs/consolidate.fragment +++ b/arch/arm64/configs/consolidate.fragment @@ -3,7 +3,7 @@ # CONFIG_BITFIELD_KUNIT is not set # CONFIG_BITS_TEST is not set CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -CONFIG_CMDLINE="console=ttyMSM0,115200n8 kasan.stacktrace=off stack_depot_disable=off page_owner=on no_hash_pointers panic_on_taint=0x20 page_pinner=on" +CONFIG_CMDLINE="console=ttyMSM0,115200n8 kasan.stacktrace=off stack_depot_disable=off page_owner=on no_hash_pointers panic_on_taint=0x20 page_pinner=on slub_debug=FZP,zs_handle,zspage;FZPU" CONFIG_DEBUG_ATOMIC_SLEEP=y CONFIG_DEBUG_IRQFLAGS=y CONFIG_DEBUG_KMEMLEAK=y From 39bfcdd03539458949eab7b8feb566369d1416e7 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Mon, 28 Nov 2022 11:16:12 -0800 Subject: [PATCH 150/163] UPSTREAM: zsmalloc: consolidate zs_pool's migrate_lock and size_class's locks Currently, zsmalloc has a hierarchy of locks, which includes a pool-level migrate_lock, and a lock for each size class. We have to obtain both locks in the hotpath in most cases anyway, except for zs_malloc. This exception will no longer exist when we introduce a LRU into the zs_pool for the new writeback functionality - we will need to obtain a pool-level lock to synchronize LRU handling even in zs_malloc. In preparation for zsmalloc writeback, consolidate these locks into a single pool-level lock, which drastically reduces the complexity of synchronization in zsmalloc. We have also benchmarked the lock consolidation to see the performance effect of this change on zram. First, we ran a synthetic FS workload on a server machine with 36 cores (same machine for all runs), using fs_mark -d ../zram1mnt -s 100000 -n 2500 -t 32 -k before and after for btrfs and ext4 on zram (FS usage is 80%). Here is the result (unit is file/second): With lock consolidation (btrfs): Average: 13520.2, Median: 13531.0, Stddev: 137.5961482019028 Without lock consolidation (btrfs): Average: 13487.2, Median: 13575.0, Stddev: 309.08283679298665 With lock consolidation (ext4): Average: 16824.4, Median: 16839.0, Stddev: 89.97388510006668 Without lock consolidation (ext4) Average: 16958.0, Median: 16986.0, Stddev: 194.7370021336469 As you can see, we observe a 0.3% regression for btrfs, and a 0.9% regression for ext4. This is a small, barely measurable difference in my opinion. For a more realistic scenario, we also tries building the kernel on zram. Here is the time it takes (in seconds): With lock consolidation (btrfs): real Average: 319.6, Median: 320.0, Stddev: 0.8944271909999159 user Average: 6894.2, Median: 6895.0, Stddev: 25.528415540334656 sys Average: 521.4, Median: 522.0, Stddev: 1.51657508881031 Without lock consolidation (btrfs): real Average: 319.8, Median: 320.0, Stddev: 0.8366600265340756 user Average: 6896.6, Median: 6899.0, Stddev: 16.04057355583023 sys Average: 520.6, Median: 521.0, Stddev: 1.140175425099138 With lock consolidation (ext4): real Average: 320.0, Median: 319.0, Stddev: 1.4142135623730951 user Average: 6896.8, Median: 6878.0, Stddev: 28.621670111997307 sys Average: 521.2, Median: 521.0, Stddev: 1.7888543819998317 Without lock consolidation (ext4) real Average: 319.6, Median: 319.0, Stddev: 0.8944271909999159 user Average: 6886.2, Median: 6887.0, Stddev: 16.93221781102523 sys Average: 520.4, Median: 520.0, Stddev: 1.140175425099138 The difference is entirely within the noise of a typical run on zram. This hardly justifies the complexity of maintaining both the pool lock and the class lock. In fact, for writeback, we would need to introduce yet another lock to prevent data races on the pool's LRU, further complicating the lock handling logic. IMHO, it is just better to collapse all of these into a single pool-level lock. Link: https://lkml.kernel.org/r/20221128191616.1261026-4-nphamcs@gmail.com Change-Id: Ib0eb09d7a69190fc4ffea8f819423c7f66d83379 Signed-off-by: Nhat Pham Suggested-by: Johannes Weiner Acked-by: Minchan Kim Acked-by: Johannes Weiner Reviewed-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Nitin Gupta Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton (cherry picked from commit c0547d0b6a4b637db05406b90ba82e1b2e71de56) Bug: 297093100 Bug: 298150234 Signed-off-by: Kalesh Singh --- mm/zsmalloc.c | 87 ++++++++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 50 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 649376f17bd9..33d8357fdbf2 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -33,8 +33,7 @@ /* * lock ordering: * page_lock - * pool->migrate_lock - * class->lock + * pool->lock * zspage->lock */ @@ -192,7 +191,6 @@ static const int fullness_threshold_frac = 4; static size_t huge_class_size; struct size_class { - spinlock_t lock; struct list_head fullness_list[NR_ZS_FULLNESS]; /* * Size of objects stored in this class. Must be multiple @@ -247,8 +245,7 @@ struct zs_pool { #ifdef CONFIG_COMPACTION struct work_struct free_work; #endif - /* protect page/zspage migration */ - rwlock_t migrate_lock; + spinlock_t lock; }; struct zspage { @@ -355,7 +352,7 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) kmem_cache_free(pool->zspage_cachep, zspage); } -/* class->lock(which owns the handle) synchronizes races */ +/* pool->lock(which owns the handle) synchronizes races */ static void record_obj(unsigned long handle, unsigned long obj) { *(unsigned long *)handle = obj; @@ -452,7 +449,7 @@ static __maybe_unused int is_first_page(struct page *page) return PagePrivate(page); } -/* Protected by class->lock */ +/* Protected by pool->lock */ static inline int get_zspage_inuse(struct zspage *zspage) { return zspage->inuse; @@ -597,13 +594,13 @@ static int zs_stats_size_show(struct seq_file *s, void *v) if (class->index != i) continue; - spin_lock(&class->lock); + spin_lock(&pool->lock); class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL); class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY); obj_allocated = zs_stat_get(class, OBJ_ALLOCATED); obj_used = zs_stat_get(class, OBJ_USED); freeable = zs_can_compact(class); - spin_unlock(&class->lock); + spin_unlock(&pool->lock); objs_per_zspage = class->objs_per_zspage; pages_used = obj_allocated / objs_per_zspage * @@ -916,7 +913,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, get_zspage_mapping(zspage, &class_idx, &fg); - assert_spin_locked(&class->lock); + assert_spin_locked(&pool->lock); VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(fg != ZS_EMPTY); @@ -1247,19 +1244,19 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, BUG_ON(in_interrupt()); /* It guarantees it can get zspage from handle safely */ - read_lock(&pool->migrate_lock); + spin_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); /* - * migration cannot move any zpages in this zspage. Here, class->lock + * migration cannot move any zpages in this zspage. Here, pool->lock * is too heavy since callers would take some time until they calls * zs_unmap_object API so delegate the locking from class to zspage * which is smaller granularity. */ migrate_read_lock(zspage); - read_unlock(&pool->migrate_lock); + spin_unlock(&pool->lock); class = zspage_class(pool, zspage); off = (class->size * obj_idx) & ~PAGE_MASK; @@ -1412,8 +1409,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) size += ZS_HANDLE_SIZE; class = pool->size_class[get_size_class_index(size)]; - /* class->lock effectively protects the zpage migration */ - spin_lock(&class->lock); + /* pool->lock effectively protects the zpage migration */ + spin_lock(&pool->lock); zspage = find_get_zspage(class); if (likely(zspage)) { obj = obj_malloc(pool, zspage, handle); @@ -1421,12 +1418,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) fix_fullness_group(class, zspage); record_obj(handle, obj); class_stat_inc(class, OBJ_USED, 1); - spin_unlock(&class->lock); + spin_unlock(&pool->lock); return handle; } - spin_unlock(&class->lock); + spin_unlock(&pool->lock); zspage = alloc_zspage(pool, class, gfp); if (!zspage) { @@ -1434,7 +1431,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) return (unsigned long)ERR_PTR(-ENOMEM); } - spin_lock(&class->lock); + spin_lock(&pool->lock); obj = obj_malloc(pool, zspage, handle); newfg = get_fullness_group(class, zspage); insert_zspage(class, zspage, newfg); @@ -1447,7 +1444,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); - spin_unlock(&class->lock); + spin_unlock(&pool->lock); return handle; } @@ -1491,16 +1488,14 @@ void zs_free(struct zs_pool *pool, unsigned long handle) return; /* - * The pool->migrate_lock protects the race with zpage's migration + * The pool->lock protects the race with zpage's migration * so it's safe to get the page from handle. */ - read_lock(&pool->migrate_lock); + spin_lock(&pool->lock); obj = handle_to_obj(handle); obj_to_page(obj, &f_page); zspage = get_zspage(f_page); class = zspage_class(pool, zspage); - spin_lock(&class->lock); - read_unlock(&pool->migrate_lock); obj_free(class->size, obj); class_stat_dec(class, OBJ_USED, 1); @@ -1510,7 +1505,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) free_zspage(pool, class, zspage); out: - spin_unlock(&class->lock); + spin_unlock(&pool->lock); cache_free_handle(pool, handle); } EXPORT_SYMBOL_GPL(zs_free); @@ -1867,16 +1862,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page, pool = zspage->pool; /* - * The pool migrate_lock protects the race between zpage migration + * The pool's lock protects the race between zpage migration * and zs_free. */ - write_lock(&pool->migrate_lock); + spin_lock(&pool->lock); class = zspage_class(pool, zspage); - /* - * the class lock protects zpage alloc/free in the zspage. - */ - spin_lock(&class->lock); /* the migrate_write_lock protects zpage access via zs_map_object */ migrate_write_lock(zspage); @@ -1906,10 +1897,9 @@ static int zs_page_migrate(struct page *newpage, struct page *page, replace_sub_page(class, zspage, newpage, page); /* * Since we complete the data copy and set up new zspage structure, - * it's okay to release migration_lock. + * it's okay to release the pool's lock. */ - write_unlock(&pool->migrate_lock); - spin_unlock(&class->lock); + spin_unlock(&pool->lock); dec_zspage_isolation(zspage); migrate_write_unlock(zspage); @@ -1964,9 +1954,9 @@ static void async_free_zspage(struct work_struct *work) if (class->index != i) continue; - spin_lock(&class->lock); + spin_lock(&pool->lock); list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages); - spin_unlock(&class->lock); + spin_unlock(&pool->lock); } list_for_each_entry_safe(zspage, tmp, &free_pages, list) { @@ -1976,9 +1966,9 @@ static void async_free_zspage(struct work_struct *work) get_zspage_mapping(zspage, &class_idx, &fullness); VM_BUG_ON(fullness != ZS_EMPTY); class = pool->size_class[class_idx]; - spin_lock(&class->lock); + spin_lock(&pool->lock); __free_zspage(pool, class, zspage); - spin_unlock(&class->lock); + spin_unlock(&pool->lock); } }; @@ -2039,10 +2029,11 @@ static unsigned long __zs_compact(struct zs_pool *pool, struct zspage *dst_zspage = NULL; unsigned long pages_freed = 0; - /* protect the race between zpage migration and zs_free */ - write_lock(&pool->migrate_lock); - /* protect zpage allocation/free */ - spin_lock(&class->lock); + /* + * protect the race between zpage migration and zs_free + * as well as zpage allocation/free + */ + spin_lock(&pool->lock); while ((src_zspage = isolate_zspage(class, true))) { /* protect someone accessing the zspage(i.e., zs_map_object) */ migrate_write_lock(src_zspage); @@ -2067,7 +2058,7 @@ static unsigned long __zs_compact(struct zs_pool *pool, putback_zspage(class, dst_zspage); migrate_write_unlock(dst_zspage); dst_zspage = NULL; - if (rwlock_is_contended(&pool->migrate_lock)) + if (spin_is_contended(&pool->lock)) break; } @@ -2084,11 +2075,9 @@ static unsigned long __zs_compact(struct zs_pool *pool, pages_freed += class->pages_per_zspage; } else migrate_write_unlock(src_zspage); - spin_unlock(&class->lock); - write_unlock(&pool->migrate_lock); + spin_unlock(&pool->lock); cond_resched(); - write_lock(&pool->migrate_lock); - spin_lock(&class->lock); + spin_lock(&pool->lock); } if (src_zspage) { @@ -2096,8 +2085,7 @@ static unsigned long __zs_compact(struct zs_pool *pool, migrate_write_unlock(src_zspage); } - spin_unlock(&class->lock); - write_unlock(&pool->migrate_lock); + spin_unlock(&pool->lock); return pages_freed; } @@ -2200,7 +2188,7 @@ struct zs_pool *zs_create_pool(const char *name) return NULL; init_deferred_free(pool); - rwlock_init(&pool->migrate_lock); + spin_lock_init(&pool->lock); pool->name = kstrdup(name, GFP_KERNEL); if (!pool->name) @@ -2271,7 +2259,6 @@ struct zs_pool *zs_create_pool(const char *name) class->index = i; class->pages_per_zspage = pages_per_zspage; class->objs_per_zspage = objs_per_zspage; - spin_lock_init(&class->lock); pool->size_class[i] = class; for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS; fullness++) From 8722a68dd20776bdb6423fac0044aabeb77b1072 Mon Sep 17 00:00:00 2001 From: Andrew Yang Date: Fri, 21 Jul 2023 14:37:01 +0800 Subject: [PATCH 151/163] BACKPORT: zsmalloc: fix races between modifications of fullness and isolated We encountered many kernel exceptions of VM_BUG_ON(zspage->isolated == 0) in dec_zspage_isolation() and BUG_ON(!pages[1]) in zs_unmap_object() lately. This issue only occurs when migration and reclamation occur at the same time. With our memory stress test, we can reproduce this issue several times a day. We have no idea why no one else encountered this issue. BTW, we switched to the new kernel version with this defect a few months ago. Since fullness and isolated share the same unsigned int, modifications of them should be protected by the same lock. [andrew.yang@mediatek.com: move comment] Link: https://lkml.kernel.org/r/20230727062910.6337-1-andrew.yang@mediatek.com Link: https://lkml.kernel.org/r/20230721063705.11455-1-andrew.yang@mediatek.com Fixes: c4549b871102 ("zsmalloc: remove zspage isolation for migration") Change-Id: I4aeda0715d65f828bb88ad6fbf36b9927c7a5c4b Signed-off-by: Andrew Yang Reviewed-by: Sergey Senozhatsky Cc: AngeloGioacchino Del Regno Cc: Matthias Brugger Cc: Minchan Kim Cc: Sebastian Andrzej Siewior Cc: Signed-off-by: Andrew Morton (cherry picked from commit 4b5d1e47b69426c0f7491d97d73ad0152d02d437) Bug: 297093100 Bug: 298150234 [ Kalesh Singh - Fix trivial conflicts in zs_page_putback()] Signed-off-by: Kalesh Singh --- mm/zsmalloc.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 33d8357fdbf2..9fb906d56e0b 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1816,6 +1816,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, static bool zs_page_isolate(struct page *page, isolate_mode_t mode) { + struct zs_pool *pool; struct zspage *zspage; /* @@ -1826,9 +1827,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode) VM_BUG_ON_PAGE(PageIsolated(page), page); zspage = get_zspage(page); - migrate_write_lock(zspage); + pool = zspage->pool; + spin_lock(&pool->lock); inc_zspage_isolation(zspage); - migrate_write_unlock(zspage); + spin_unlock(&pool->lock); return true; } @@ -1895,12 +1897,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page, kunmap_atomic(s_addr); replace_sub_page(class, zspage, newpage, page); + dec_zspage_isolation(zspage); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release the pool's lock. */ spin_unlock(&pool->lock); - dec_zspage_isolation(zspage); migrate_write_unlock(zspage); get_page(newpage); @@ -1918,14 +1920,16 @@ static int zs_page_migrate(struct page *newpage, struct page *page, static void zs_page_putback(struct page *page) { struct zspage *zspage; + struct zs_pool *pool; VM_BUG_ON_PAGE(!PageMovable(page), page); VM_BUG_ON_PAGE(!PageIsolated(page), page); zspage = get_zspage(page); - migrate_write_lock(zspage); + pool = zspage->pool; + spin_lock(&pool->lock); dec_zspage_isolation(zspage); - migrate_write_unlock(zspage); + spin_unlock(&pool->lock); } static const struct movable_operations zsmalloc_mops = { From 4b2386aaae72ce45a6c1c64955a9c4e20460abd2 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Fri, 25 Aug 2023 09:58:36 -0700 Subject: [PATCH 152/163] ANDROID: GKI: Update ABI for zsmalloc fixes zs_pool->lock was added upstream as a replacement for the size_class locks. The tooling over-cautiously reports this as a ABI breakage but both of these structs (zs_pool and size_class) are internal to zsmalloc.c. Update the ABI to allow these changes. Bug: 297093100 Bug: 298150234 Change-Id: Ib9fc5a036f75d89fb6bee4c146034f6c81759e04 Signed-off-by: Kalesh Singh --- android/abi_gki_aarch64.stg | 68 ++++++++++++++++++------------------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 0a050f0bf426..92a4330310cc 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -91948,10 +91948,9 @@ member { offset: 712 } member { - id: 0xf667d80f + id: 0xf667dcee name: "fullness_list" type_id: 0xb8bf135c - offset: 64 } member { id: 0xfeb50ea0 @@ -103779,12 +103778,6 @@ member { type_id: 0x4585663f offset: 320 } -member { - id: 0xad7c8a98 - name: "index" - type_id: 0x4585663f - offset: 672 -} member { id: 0xad7c8ba4 name: "index" @@ -103797,6 +103790,12 @@ member { type_id: 0x4585663f offset: 480 } +member { + id: 0xad7c8d2b + name: "index" + type_id: 0x4585663f + offset: 608 +} member { id: 0xad7c8d72 name: "index" @@ -115578,6 +115577,12 @@ member { type_id: 0xf313e71a offset: 768 } +member { + id: 0x2d1fe43b + name: "lock" + type_id: 0xf313e71a + offset: 17536 +} member { id: 0x2d1fe44c name: "lock" @@ -123438,12 +123443,6 @@ member { type_id: 0x2c8b0a9f offset: 768 } -member { - id: 0xdb33fcdf - name: "migrate_lock" - type_id: 0xf4933b90 - offset: 17536 -} member { id: 0x8edaa968 name: "migrate_page" @@ -136400,10 +136399,10 @@ member { type_id: 0xad7c0a89 } member { - id: 0x7a226550 + id: 0x7a226b7d name: "objs_per_zspage" type_id: 0x6720d32f - offset: 608 + offset: 544 } member { id: 0x33953b25 @@ -141342,10 +141341,10 @@ member { bitsize: 1 } member { - id: 0x338646f2 + id: 0x338649f9 name: "pages_per_zspage" type_id: 0x6720d32f - offset: 640 + offset: 576 } member { id: 0xf9521fd2 @@ -173982,18 +173981,18 @@ member { type_id: 0x6720d32f offset: 96 } -member { - id: 0xd91935d3 - name: "size" - type_id: 0x6720d32f - offset: 576 -} member { id: 0xd9193607 name: "size" type_id: 0x6720d32f offset: 896 } +member { + id: 0xd91937b9 + name: "size" + type_id: 0x6720d32f + offset: 512 +} member { id: 0xd9193b66 name: "size" @@ -179931,10 +179930,10 @@ member { offset: 896 } member { - id: 0xb91e0d04 + id: 0xb91e0940 name: "stats" type_id: 0x6b61371d - offset: 704 + offset: 640 } member { id: 0xb920e0d3 @@ -249070,14 +249069,13 @@ struct_union { kind: STRUCT name: "size_class" definition { - bytesize: 136 - member_id: 0x2d1fec85 - member_id: 0xf667d80f - member_id: 0xd91935d3 - member_id: 0x7a226550 - member_id: 0x338646f2 - member_id: 0xad7c8a98 - member_id: 0xb91e0d04 + bytesize: 128 + member_id: 0xf667dcee + member_id: 0xd91937b9 + member_id: 0x7a226b7d + member_id: 0x338649f9 + member_id: 0xad7c8d2b + member_id: 0xb91e0940 } } struct_union { @@ -265101,7 +265099,7 @@ struct_union { member_id: 0xb9089225 member_id: 0x868caa9e member_id: 0x8a67a9e5 - member_id: 0xdb33fcdf + member_id: 0x2d1fe43b } } struct_union { From 733464d354f8fc3518bcb022594694d9a7a5a354 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 20:16:48 +0800 Subject: [PATCH 153/163] UPSTREAM: erofs: initialize packed inode after root inode is assigned As commit 8f7acdae2cd4 ("staging: erofs: kill all failure handling in fill_super()"), move the initialization of packed inode after root inode is assigned, so that the iput() in .put_super() is adequate as the failure handling. Otherwise, iput() is also needed in .kill_sb(), in case of the mounting fails halfway. Signed-off-by: Jingbo Xu Reviewed-by: Yue Hu Fixes: b15b2e307c3a ("erofs: support on-disk compressed fragments data") Reviewed-by: Gao Xiang Acked-by: Chao Yu Link: https://lore.kernel.org/r/20230407141710.113882-3-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang (cherry picked from commit cb9bce79514392a9a216ff67148e05e2d72c28bd https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I3cec91605b42c588e2c8f69629f0bdcc20078de2 Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit 9089c10d9c5aed96b32144b06884904a85a35fbd) --- fs/erofs/internal.h | 1 + fs/erofs/super.c | 22 +++++++++++----------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index e51f27b6bde1..4868000806d8 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -154,6 +154,7 @@ struct erofs_sb_info { /* what we really care is nid, rather than ino.. */ erofs_nid_t root_nid; + erofs_nid_t packed_nid; /* used for statfs, f_files - f_favail */ u64 inos; diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 626a615dafc2..bd8bf8fc2f5d 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -381,17 +381,7 @@ static int erofs_read_superblock(struct super_block *sb) #endif sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); sbi->root_nid = le16_to_cpu(dsb->root_nid); -#ifdef CONFIG_EROFS_FS_ZIP - sbi->packed_inode = NULL; - if (erofs_sb_has_fragments(sbi) && dsb->packed_nid) { - sbi->packed_inode = - erofs_iget(sb, le64_to_cpu(dsb->packed_nid)); - if (IS_ERR(sbi->packed_inode)) { - ret = PTR_ERR(sbi->packed_inode); - goto out; - } - } -#endif + sbi->packed_nid = le64_to_cpu(dsb->packed_nid); sbi->inos = le64_to_cpu(dsb->inos); sbi->build_time = le64_to_cpu(dsb->build_time); @@ -800,6 +790,16 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) erofs_shrinker_register(sb); /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ +#ifdef CONFIG_EROFS_FS_ZIP + if (erofs_sb_has_fragments(sbi) && sbi->packed_nid) { + sbi->packed_inode = erofs_iget(sb, sbi->packed_nid); + if (IS_ERR(sbi->packed_inode)) { + err = PTR_ERR(sbi->packed_inode); + sbi->packed_inode = NULL; + return err; + } + } +#endif err = erofs_init_managed_cache(sb); if (err) return err; From 3a48a9a0b7be6548c135d6cc413446650dd17177 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 20:32:51 +0800 Subject: [PATCH 154/163] UPSTREAM: erofs: stop parsing non-compact HEAD index if clusterofs is invalid Syzbot generated a crafted image [1] with a non-compact HEAD index of clusterofs 33024 while valid numbers should be 0 ~ lclustersize-1, which causes the following unexpected behavior as below: BUG: unable to handle page fault for address: fffff52101a3fff9 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 23ffed067 P4D 23ffed067 PUD 0 Oops: 0000 [#1] PREEMPT SMP KASAN CPU: 1 PID: 4398 Comm: kworker/u5:1 Not tainted 6.3.0-rc6-syzkaller-g09a9639e56c0 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/30/2023 Workqueue: erofs_worker z_erofs_decompressqueue_work RIP: 0010:z_erofs_decompress_queue+0xb7e/0x2b40 ... Call Trace: z_erofs_decompressqueue_work+0x99/0xe0 process_one_work+0x8f6/0x1170 worker_thread+0xa63/0x1210 kthread+0x270/0x300 ret_from_fork+0x1f/0x30 Note that normal images or images using compact indexes are not impacted. Let's fix this now. [1] https://lore.kernel.org/r/000000000000ec75b005ee97fbaa@google.com Reported-and-tested-by: syzbot+aafb3f37cfeb6534c4ac@syzkaller.appspotmail.com Fixes: 02827e1796b3 ("staging: erofs: add erofs_map_blocks_iter") Fixes: 152a333a5895 ("staging: erofs: add compacted compression indexes support") Signed-off-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20230410173714.104604-1-hsiangkao@linux.alibaba.com (cherry picked from commit cc4efd3dd2ac9f89143e5d881609747ecff04164 https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I8e4d7d3f30d70f8c4ab42b33f215af1292c57fcf Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit 6ec6eee87e03e09ad850b003c50f15d9910da6a8) --- fs/erofs/zmap.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 39cc014dba40..bb91cc649972 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -211,6 +211,10 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m, if (advise & Z_EROFS_VLE_DI_PARTIAL_REF) m->partialref = true; m->clusterofs = le16_to_cpu(di->di_clusterofs); + if (m->clusterofs >= 1 << vi->z_logical_clusterbits) { + DBG_BUGON(1); + return -EFSCORRUPTED; + } m->pblk = le32_to_cpu(di->di_u.blkaddr); break; default: From 6a068e79e0522518fd013d093a76d79551404607 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 20:37:38 +0800 Subject: [PATCH 155/163] UPSTREAM: erofs: fix potential overflow calculating xattr_isize Given on-disk i_xattr_icount is 16 bits and xattr_isize is calculated from i_xattr_icount multiplying 4, xattr_isize has a theoretical maximum of 256K (64K * 4). Thus declare xattr_isize as unsigned int to avoid the potential overflow. Fixes: bfb8674dc044 ("staging: erofs: add erofs in-memory stuffs") Signed-off-by: Jingbo Xu Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20230414061810.6479-1-jefflexu@linux.alibaba.com Signed-off-by: Gao Xiang (cherry picked from commit 1b3567a1969b26f709d82a874498c0754ea841c3 https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I43d88c7ebc3b320e226ab4d7bc6717432ef5ad82 Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit 7521b904dce7df25e44fde5d679169fda99402a8) --- fs/erofs/internal.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 4868000806d8..340bd56a5755 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -311,7 +311,7 @@ struct erofs_inode { unsigned char datalayout; unsigned char inode_isize; - unsigned short xattr_isize; + unsigned int xattr_isize; unsigned int xattr_shared_count; unsigned int *xattr_shared_xattrs; From 454b13b72e8fbc4933a5ff8ec958e4bf9203a907 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 20:51:35 +0800 Subject: [PATCH 156/163] UPSTREAM: erofs: kill hooked chains to avoid loops on deduplicated compressed images After heavily stressing EROFS with several images which include a hand-crafted image of repeated patterns for more than 46 days, I found two chains could be linked with each other almost simultaneously and form a loop so that the entire loop won't be submitted. As a consequence, the corresponding file pages will remain locked forever. It can be _only_ observed on data-deduplicated compressed images. For example, consider two chains with five pclusters in total: Chain 1: 2->3->4->5 -- The tail pcluster is 5; Chain 2: 5->1->2 -- The tail pcluster is 2. Chain 2 could link to Chain 1 with pcluster 5; and Chain 1 could link to Chain 2 at the same time with pcluster 2. Since hooked chains are all linked locklessly now, I have no idea how to simply avoid the race. Instead, let's avoid hooked chains completely until I could work out a proper way to fix this and end users finally tell us that it's needed to add it back. Actually, this optimization can be found with multi-threaded workloads (especially even more often on deduplicated compressed images), yet I'm not sure about the overall system impacts of not having this compared with implementation complexity. Fixes: 267f2492c8f7 ("erofs: introduce multi-reference pclusters (fully-referenced)") Signed-off-by: Gao Xiang Reviewed-by: Yue Hu Link: https://lore.kernel.org/r/20230526201459.128169-4-hsiangkao@linux.alibaba.com Signed-off-by: Gao Xiang (cherry picked from commit 967c28b23f6c89bb8eef6a046ea88afe0d7c1029 https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I33607c174bfeb54119c6de271b44c9fe2a7399e6 Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit f11ccb03a03e6c164b0300540d7abe0fb1c4096f) --- fs/erofs/zdata.c | 67 ++++++++---------------------------------------- fs/erofs/zdata.h | 5 +--- 2 files changed, 11 insertions(+), 61 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 3d1b88efb075..9076f3324cb1 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -355,20 +355,6 @@ int __init z_erofs_init_zip_subsystem(void) enum z_erofs_pclustermode { Z_EROFS_PCLUSTER_INFLIGHT, - /* - * The current pclusters was the tail of an exist chain, in addition - * that the previous processed chained pclusters are all decided to - * be hooked up to it. - * A new chain will be created for the remaining pclusters which are - * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED, - * the next pcluster cannot reuse the whole page safely for inplace I/O - * in the following scenario: - * ________________________________________________________________ - * | tail (partial) page | head (partial) page | - * | (belongs to the next pcl) | (belongs to the current pcl) | - * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________| - */ - Z_EROFS_PCLUSTER_HOOKED, /* * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it * could be dispatched into bypass queue later due to uptodated managed @@ -386,8 +372,8 @@ enum z_erofs_pclustermode { * ________________________________________________________________ * | tail (partial) page | head (partial) page | * | (of the current cl) | (of the previous collection) | - * | PCLUSTER_FOLLOWED or | | - * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________| + * | | | + * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________| * * [ (*) the above page can be used as inplace I/O. ] */ @@ -400,7 +386,7 @@ struct z_erofs_decompress_frontend { struct z_erofs_bvec_iter biter; struct page *candidate_bvpage; - struct z_erofs_pcluster *pcl, *tailpcl; + struct z_erofs_pcluster *pcl; z_erofs_next_pcluster_t owned_head; enum z_erofs_pclustermode mode; @@ -589,19 +575,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) return; } - /* - * type 2, link to the end of an existing open chain, be careful - * that its submission is controlled by the original attached chain. - */ - if (*owned_head != &pcl->next && pcl != f->tailpcl && - cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - *owned_head) == Z_EROFS_PCLUSTER_TAIL) { - *owned_head = Z_EROFS_PCLUSTER_TAIL; - f->mode = Z_EROFS_PCLUSTER_HOOKED; - f->tailpcl = NULL; - return; - } - /* type 3, it belongs to a chain, but it isn't the end of the chain */ + /* type 2, it belongs to an ongoing chain */ f->mode = Z_EROFS_PCLUSTER_INFLIGHT; } @@ -662,9 +636,6 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) goto err_out; } } - /* used to check tail merging loop due to corrupted images */ - if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) - fe->tailpcl = pcl; fe->owned_head = &pcl->next; fe->pcl = pcl; return 0; @@ -685,7 +656,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */ DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL); - DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); if (!(map->m_flags & EROFS_MAP_META)) { grp = erofs_find_workgroup(fe->inode->i_sb, @@ -704,10 +674,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe) if (ret == -EEXIST) { mutex_lock(&fe->pcl->lock); - /* used to check tail merging loop due to corrupted images */ - if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL) - fe->tailpcl = fe->pcl; - z_erofs_try_to_claim_pcluster(fe); } else if (ret) { return ret; @@ -887,8 +853,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, * those chains are handled asynchronously thus the page cannot be used * for inplace I/O or bvpage (should be processed in a strict order.) */ - tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED && - fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); + tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); cur = end - min_t(unsigned int, offset + end - map->m_la, end); if (!(map->m_flags & EROFS_MAP_MAPPED)) { @@ -1270,11 +1235,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, LIST_HEAD_INIT(be.decompressed_secondary_bvecs), }; z_erofs_next_pcluster_t owned = io->head; - - while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { - /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */ - DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); - /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */ + while (owned != Z_EROFS_PCLUSTER_TAIL) { DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); be.pcl = container_of(owned, struct z_erofs_pcluster, next); @@ -1291,7 +1252,7 @@ static void z_erofs_decompressqueue_work(struct work_struct *work) container_of(work, struct z_erofs_decompressqueue, u.work); struct page *pagepool = NULL; - DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED); + DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL); z_erofs_decompress_queue(bgq, &pagepool); erofs_release_pages(&pagepool); kvfree(bgq); @@ -1483,7 +1444,7 @@ jobqueue_init(struct super_block *sb, q->eio = false; } q->sb = sb; - q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED; + q->head = Z_EROFS_PCLUSTER_TAIL; return q; } @@ -1515,11 +1476,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT]; z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS]; - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); - if (owned_head == Z_EROFS_PCLUSTER_TAIL) - owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED; - - WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED); + WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL); WRITE_ONCE(*submit_qtail, owned_head); WRITE_ONCE(*bypass_qtail, &pcl->next); @@ -1586,15 +1543,11 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, unsigned int i = 0; bool bypass = true; - /* no possible 'owned_head' equals the following */ - DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL); pcl = container_of(owned_head, struct z_erofs_pcluster, next); + owned_head = READ_ONCE(pcl->next); - /* close the main owned chain at first */ - owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, - Z_EROFS_PCLUSTER_TAIL_CLOSED); if (z_erofs_is_inline_pcluster(pcl)) { move_to_bypass_jobqueue(pcl, qtail, owned_head); continue; diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index 4588a47c867e..9fd6abe967a9 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -94,11 +94,8 @@ struct z_erofs_pcluster { /* let's avoid the valid 32-bit kernel addresses */ -/* the chained workgroup has't submitted io (still open) */ +/* the end of a chain of pclusters */ #define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE) -/* the chained workgroup has already submitted io */ -#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD) - #define Z_EROFS_PCLUSTER_NIL (NULL) struct z_erofs_decompressqueue { From e4a984b57293d74ebc1f978e754d89d1bfd5313f Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 20:55:20 +0800 Subject: [PATCH 157/163] UPSTREAM: erofs: fix compact 4B support for 16k block size In compact 4B, two adjacent lclusters are packed together as a unit to form on-disk indexes for effective random access, as below: (amortized = 4, vcnt = 2) _____________________________________________ |___@_____ encoded bits __________|_ blkaddr _| 0 . amortized * vcnt = 8 . . . . amortized * vcnt - 4 = 4 . . .____________________________. |_type (2 bits)_|_clusterofs_| Therefore, encoded bits for each pack are 32 bits (4 bytes). IOWs, since each lcluster can get 16 bits for its type and clusterofs, the maximum supported lclustersize for compact 4B format is 16k (14 bits). Fix this to enable compact 4B format for 16k lclusters (blocks), which is tested on an arm64 server with 16k page size. Fixes: 152a333a5895 ("staging: erofs: add compacted compression indexes support") Signed-off-by: Gao Xiang Link: https://lore.kernel.org/r/20230601112341.56960-1-hsiangkao@linux.alibaba.com Signed-off-by: Gao Xiang (cherry picked from commit 001b8ccd0650727e54ec16ef72bf1b8eeab7168e https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I97918294a1d00a65223e741c3d153f375ab50507 Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit cc6111a28721e80f741d0ed05c1d6badddb1bcee) --- fs/erofs/zmap.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index bb91cc649972..3adab0d9cbe0 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -273,7 +273,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, u8 *in, type; bool big_pcluster; - if (1 << amortizedshift == 4) + if (1 << amortizedshift == 4 && lclusterbits <= 14) vcnt = 2; else if (1 << amortizedshift == 2 && lclusterbits == 12) vcnt = 16; @@ -375,7 +375,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, { struct inode *const inode = m->inode; struct erofs_inode *const vi = EROFS_I(inode); - const unsigned int lclusterbits = vi->z_logical_clusterbits; const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) + vi->inode_isize + vi->xattr_isize, 8) + sizeof(struct z_erofs_map_header); @@ -384,9 +383,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m, unsigned int amortizedshift; erofs_off_t pos; - if (lclusterbits != 12) - return -EOPNOTSUPP; - if (lcn >= totalidx) return -EINVAL; From c99b4f29fb0235a0be1c494732ebf4480ee0a7d5 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 20:57:44 +0800 Subject: [PATCH 158/163] UPSTREAM: erofs: Fix detection of atomic context Current check for atomic context is not sufficient as z_erofs_decompressqueue_endio can be called under rcu lock from blk_mq_flush_plug_list(). See the stacktrace [1] In such case we should hand off the decompression work for async processing rather than trying to do sync decompression in current context. Patch fixes the detection by checking for rcu_read_lock_any_held() and while at it use more appropriate !in_task() check than in_atomic(). Background: Historically erofs would always schedule a kworker for decompression which would incur the scheduling cost regardless of the context. But z_erofs_decompressqueue_endio() may not always be in atomic context and we could actually benefit from doing the decompression in z_erofs_decompressqueue_endio() if we are in thread context, for example when running with dm-verity. This optimization was later added in patch [2] which has shown improvement in performance benchmarks. ============================================== [1] Problem stacktrace [name:core&]BUG: sleeping function called from invalid context at kernel/locking/mutex.c:291 [name:core&]in_atomic(): 0, irqs_disabled(): 0, non_block: 0, pid: 1615, name: CpuMonitorServi [name:core&]preempt_count: 0, expected: 0 [name:core&]RCU nest depth: 1, expected: 0 CPU: 7 PID: 1615 Comm: CpuMonitorServi Tainted: G S W OE 6.1.25-android14-5-maybe-dirty-mainline #1 Hardware name: MT6897 (DT) Call trace: dump_backtrace+0x108/0x15c show_stack+0x20/0x30 dump_stack_lvl+0x6c/0x8c dump_stack+0x20/0x48 __might_resched+0x1fc/0x308 __might_sleep+0x50/0x88 mutex_lock+0x2c/0x110 z_erofs_decompress_queue+0x11c/0xc10 z_erofs_decompress_kickoff+0x110/0x1a4 z_erofs_decompressqueue_endio+0x154/0x180 bio_endio+0x1b0/0x1d8 __dm_io_complete+0x22c/0x280 clone_endio+0xe4/0x280 bio_endio+0x1b0/0x1d8 blk_update_request+0x138/0x3a4 blk_mq_plug_issue_direct+0xd4/0x19c blk_mq_flush_plug_list+0x2b0/0x354 __blk_flush_plug+0x110/0x160 blk_finish_plug+0x30/0x4c read_pages+0x2fc/0x370 page_cache_ra_unbounded+0xa4/0x23c page_cache_ra_order+0x290/0x320 do_sync_mmap_readahead+0x108/0x2c0 filemap_fault+0x19c/0x52c __do_fault+0xc4/0x114 handle_mm_fault+0x5b4/0x1168 do_page_fault+0x338/0x4b4 do_translation_fault+0x40/0x60 do_mem_abort+0x60/0xc8 el0_da+0x4c/0xe0 el0t_64_sync_handler+0xd4/0xfc el0t_64_sync+0x1a0/0x1a4 [2] Link: https://lore.kernel.org/all/20210317035448.13921-1-huangjianan@oppo.com/ Reported-by: Will Shiu Suggested-by: Gao Xiang Signed-off-by: Sandeep Dhavale Reviewed-by: Gao Xiang Reviewed-by: Alexandre Mergnat Link: https://lore.kernel.org/r/20230621220848.3379029-1-dhavale@google.com Signed-off-by: Gao Xiang (cherry picked from commit 12d0a24afd9ea58e581ea64d64e066f2027b28d9 https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I652b189e316b26ca56e1d7b6f1e4c52ae20bb3b7 Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit 2f805fb91250695fe5a0475cd6dcdc3337114fb6) --- fs/erofs/zdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 9076f3324cb1..e9a07ac7bb26 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1280,7 +1280,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, if (atomic_add_return(bios, &io->pending_bios)) return; /* Use (kthread_)work and sync decompression for atomic contexts only */ - if (in_atomic() || irqs_disabled()) { + if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) { #ifdef CONFIG_EROFS_FS_PCPU_KTHREAD struct kthread_worker *worker; From 8fdad5df397ecb1f035f7b2f279f6448855d8f60 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 21:00:15 +0800 Subject: [PATCH 159/163] UPSTREAM: erofs: avoid useless loops in z_erofs_pcluster_readmore() when reading beyond EOF z_erofs_pcluster_readmore() may take a long time to loop when the page offset is large enough, which is unnecessary should be prevented. For example, when the following case is encountered, it will loop 4691368 times, taking about 27 seconds: - offset = 19217289215 - inode_size = 1442672 Signed-off-by: Chunhai Guo Fixes: 386292919c25 ("erofs: introduce readmore decompression strategy") Reviewed-by: Gao Xiang Reviewed-by: Yue Hu Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20230710042531.28761-1-guochunhai@vivo.com Signed-off-by: Gao Xiang (cherry picked from commit 936aa701d82d397c2d1afcd18ce2c739471d978d https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I279b0fadcfa8c0ff0d638a86c7bb2c6b4d07f194 Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit 8497f46a873656ba8c6f79895d70107e2d6ac9d0) --- fs/erofs/zdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index e9a07ac7bb26..01c1ab655277 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1691,7 +1691,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, } cur = map->m_la + map->m_llen - 1; - while (cur >= end) { + while ((cur >= end) && (cur < i_size_read(inode))) { pgoff_t index = cur >> PAGE_SHIFT; struct page *page; From 3fafd915fc395d93f23235ced0b362d8e25a3e00 Mon Sep 17 00:00:00 2001 From: sunshijie Date: Mon, 21 Aug 2023 21:02:05 +0800 Subject: [PATCH 160/163] UPSTREAM: erofs: avoid infinite loop in z_erofs_do_read_page() when reading beyond EOF z_erofs_do_read_page() may loop infinitely due to the inappropriate truncation in the below statement. Since the offset is 64 bits and min_t() truncates the result to 32 bits. The solution is to replace unsigned int with a 64-bit type, such as erofs_off_t. cur = end - min_t(unsigned int, offset + end - map->m_la, end); - For example: - offset = 0x400160000 - end = 0x370 - map->m_la = 0x160370 - offset + end - map->m_la = 0x400000000 - offset + end - map->m_la = 0x00000000 (truncated as unsigned int) - Expected result: - cur = 0 - Actual result: - cur = 0x370 Signed-off-by: Chunhai Guo Fixes: 3883a79abd02 ("staging: erofs: introduce VLE decompression support") Reviewed-by: Gao Xiang Reviewed-by: Chao Yu Link: https://lore.kernel.org/r/20230710093410.44071-1-guochunhai@vivo.com Signed-off-by: Gao Xiang (cherry picked from commit 8191213a5835b0317c5e4d0d337ae1ae00c75253 https://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git dev) Bug: 296824280 Bug: 298150234 Change-Id: I152508ba4c0eb83aeae5d753e22b0ca8d3ada56d Signed-off-by: sunshijie Signed-off-by: sunshijie (cherry picked from commit ffaab71302ca81ac4addbc63f5a81be37988595a) --- fs/erofs/zdata.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 01c1ab655277..a08299d80ca1 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -855,7 +855,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, */ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - cur = end - min_t(unsigned int, offset + end - map->m_la, end); + cur = end - min_t(erofs_off_t, offset + end - map->m_la, end); if (!(map->m_flags & EROFS_MAP_MAPPED)) { zero_user_segment(page, cur, end); goto next_part; From 270b36a3a18a0bfb5271be22a3370d9ddf371903 Mon Sep 17 00:00:00 2001 From: zhengjiacheng Date: Fri, 18 Aug 2023 14:29:46 +0800 Subject: [PATCH 161/163] ANDROID: GKI: update symbol list file for xiaomi INFO: ABI DIFFERENCES HAVE BEEN DETECTED! INFO: 2 function symbol(s) added 'void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)' 'int pci_disable_pcie_error_reporting(struct pci_dev *dev)' Bug: 296484741 Bug: 298146754 Bug: 298150234 Change-Id: I8e4b871947929f3ba55f1cad5a922b9c377044d1 Signed-off-by: zhengjiacheng (cherry picked from commit c2611a04b92f0e6a38f718c50605300a325b7c7b) --- android/abi_gki_aarch64.stg | 26 ++++++++++++++++++++++++++ android/abi_gki_aarch64_xiaomi | 4 ++++ 2 files changed, 30 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 92a4330310cc..a725534e2b17 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -295321,6 +295321,12 @@ function { id: 0x534abe77 return_type_id: 0x295c7202 } +function { + id: 0x5373932e + return_type_id: 0x18bd6530 + parameter_id: 0x11e6864c + parameter_id: 0x6720d32f +} function { id: 0x537f4e77 return_type_id: 0x18bd6530 @@ -363268,6 +363274,15 @@ elf_symbol { type_id: 0x14e1f000 full_name: "pci_disable_msix" } +elf_symbol { + id: 0xc3f9d559 + name: "pci_disable_pcie_error_reporting" + is_defined: true + symbol_type: FUNCTION + crc: 0xb4006e89 + type_id: 0x99f942bc + full_name: "pci_disable_pcie_error_reporting" +} elf_symbol { id: 0xb9d0b3a8 name: "pci_disable_sriov" @@ -363493,6 +363508,15 @@ elf_symbol { type_id: 0x537f83a2 full_name: "pci_iomap_range" } +elf_symbol { + id: 0x1c994923 + name: "pci_ioremap_bar" + is_defined: true + symbol_type: FUNCTION + crc: 0xccd07b76 + type_id: 0x5373932e + full_name: "pci_ioremap_bar" +} elf_symbol { id: 0xec861eec name: "pci_iounmap" @@ -389624,6 +389648,7 @@ interface { symbol_id: 0x7cd4622d symbol_id: 0x5dfeb70f symbol_id: 0x01a00013 + symbol_id: 0xc3f9d559 symbol_id: 0xb9d0b3a8 symbol_id: 0x96b46225 symbol_id: 0x35a3d772 @@ -389649,6 +389674,7 @@ interface { symbol_id: 0xbe6406c3 symbol_id: 0x9c6c58ea symbol_id: 0x2fefe933 + symbol_id: 0x1c994923 symbol_id: 0xec861eec symbol_id: 0x5810bdc3 symbol_id: 0x3cbd6214 diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index 21ad5310ac70..5a0852cf19f6 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -328,3 +328,7 @@ dma_need_sync page_pool_put_page_bulk build_skb_around + +#required by xm_ispv4_pcie.ko + pci_ioremap_bar + pci_disable_pcie_error_reporting From eb739ed4cf12407543b02fb484eda2a546fcecff Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Tue, 6 Jun 2023 14:58:02 +0300 Subject: [PATCH 162/163] UPSTREAM: usb: typec: ucsi: Fix command cancellation The Cancel command was passed to the write callback as the offset instead of as the actual command which caused NULL pointer dereference. Reported-by: Stephan Bolten Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217517 Fixes: 094902bc6a3c ("usb: typec: ucsi: Always cancel the command if PPM reports BUSY condition") Cc: stable@vger.kernel.org Signed-off-by: Heikki Krogerus Message-ID: <20230606115802.79339-1-heikki.krogerus@linux.intel.com> Signed-off-by: Greg Kroah-Hartman Bug: 298914157 Bug: 298597334 Change-Id: I7f23e49c58b566f462ba34f76966db662308a5bc (cherry picked from commit c4a8bfabefed706bb9150867db528ceefd5cb5fe) Signed-off-by: Udipto Goswami (cherry picked from commit b15c3a3df0a82212ed2d1f2088e2246ec2b6d965) --- drivers/usb/typec/ucsi/ucsi.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 086b50968983..47a2c73df342 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -132,10 +132,8 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) if (ret) return ret; - if (cci & UCSI_CCI_BUSY) { - ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0); - return -EBUSY; - } + if (cmd != UCSI_CANCEL && cci & UCSI_CCI_BUSY) + return ucsi_exec_command(ucsi, UCSI_CANCEL); if (!(cci & UCSI_CCI_COMMAND_COMPLETE)) return -EIO; @@ -149,6 +147,11 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd) return ucsi_read_error(ucsi); } + if (cmd == UCSI_CANCEL && cci & UCSI_CCI_CANCEL_COMPLETE) { + ret = ucsi_acknowledge_command(ucsi); + return ret ? ret : -EBUSY; + } + return UCSI_CCI_LENGTH(cci); } From f580df859bb06948e26f249d348a74348c237271 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Sat, 26 Aug 2023 01:32:59 +0000 Subject: [PATCH 163/163] ANDROID: uid_sys_stats: Use llist for deferred work A use-after-free bug was found in the previous custom lock-free list implementation for the deferred work, so switch functionality to llist implementation. While the previous approach atomically handled the list head, it did not assure the new node's next pointer was assigned before the head was pointed to the node, allowing the consumer to traverse to an invalid next pointer. Additionally, in switching to llists, this patch pulls the entire list off the list head once and processes it separately, reducing the number of atomic operations compared with the custom lists's implementation which pulled one node at a time atomically from the list head. BUG: KASAN: use-after-free in process_notifier+0x270/0x2dc Write of size 8 at addr d4ffff89545c3c58 by task Blocking Thread/3431 Pointer tag: [d4], memory tag: [fe] call trace: dump_backtrace+0xf8/0x118 show_stack+0x18/0x24 dump_stack_lvl+0x60/0x78 print_report+0x178/0x470 kasan_report+0x8c/0xbc kasan_tag_mismatch+0x28/0x3c __hwasan_tag_mismatch+0x30/0x60 process_notifier+0x270/0x2dc notifier_call_chain+0xb4/0x108 blocking_notifier_call_chain+0x54/0x80 profile_task_exit+0x20/0x2c do_exit+0xec/0x1114 __arm64_sys_exit_group+0x0/0x24 get_signal+0x93c/0xa78 do_notify_resume+0x158/0x3fc el0_svc+0x54/0x78 el0t_64_sync_handler+0x44/0xe4 el0t_64_sync+0x190/0x194 Bug: 294468796 Bug: 295787403 Bug: 299197985 Fixes: 8e86825eecfa ("ANDROID: uid_sys_stats: Use a single work for deferred updates") Change-Id: Id377348c239ec720a5237726bc3632544d737e3b Signed-off-by: John Stultz [nkapron: Squashed with other changes and rewrote the commit message] Signed-off-by: Neill Kapron (cherry picked from commit 87647c0c54bbfe865691d8b58988a3ce941b905e) --- drivers/misc/uid_sys_stats.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index f5eaa63035ca..842597bbeb89 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -636,22 +637,22 @@ struct update_stats_work { struct task_io_accounting ioac; u64 utime; u64 stime; - struct update_stats_work *next; + struct llist_node node; }; -static atomic_long_t work_usw; +static LLIST_HEAD(work_usw); static void update_stats_workfn(struct work_struct *work) { - struct update_stats_work *usw; + struct update_stats_work *usw, *t; struct uid_entry *uid_entry; struct task_entry *task_entry __maybe_unused; + struct llist_node *node; rt_mutex_lock(&uid_lock); - while ((usw = (struct update_stats_work *)atomic_long_read(&work_usw))) { - if (atomic_long_cmpxchg(&work_usw, (long)usw, (long)(usw->next)) != (long)usw) - continue; + node = llist_del_all(&work_usw); + llist_for_each_entry_safe(usw, t, node, node) { uid_entry = find_uid_entry(usw->uid); if (!uid_entry) goto next; @@ -664,7 +665,7 @@ static void update_stats_workfn(struct work_struct *work) if (!task_entry) goto next; add_uid_tasks_io_stats(task_entry, &usw->ioac, - UID_STATE_DEAD_TASKS); + UID_STATE_DEAD_TASKS); #endif __add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS); next: @@ -704,8 +705,7 @@ static int process_notifier(struct notifier_block *self, */ usw->ioac = task->ioac; task_cputime_adjusted(task, &usw->utime, &usw->stime); - usw->next = (struct update_stats_work *)atomic_long_xchg(&work_usw, - (long)usw); + llist_add(&usw->node, &work_usw); schedule_work(&update_stats_work); } return NOTIFY_OK;