d1e5e6408b
The more sockets we have in the hash table, the longer we spend looking up the socket. While running a number of small workloads on the same host, they penalise each other and cause performance degradation. The root cause might be a single workload that consumes much more resources than the others. It often happens on a cloud service where different workloads share the same computing resource. On EC2 c5.24xlarge instance (196 GiB memory and 524288 (1Mi / 2) ehash entries), after running iperf3 in different netns, creating 24Mi sockets without data transfer in the root netns causes about 10% performance regression for the iperf3's connection. thash_entries sockets length Gbps 524288 1 1 50.7 24Mi 48 45.1 It is basically related to the length of the list of each hash bucket. For testing purposes to see how performance drops along the length, I set 131072 (1Mi / 8) to thash_entries, and here's the result. thash_entries sockets length Gbps 131072 1 1 50.7 1Mi 8 49.9 2Mi 16 48.9 4Mi 32 47.3 8Mi 64 44.6 16Mi 128 40.6 24Mi 192 36.3 32Mi 256 32.5 40Mi 320 27.0 48Mi 384 25.0 To resolve the socket lookup degradation, we introduce an optional per-netns hash table for TCP, but it's just ehash, and we still share the global bhash, bhash2 and lhash2. With a smaller ehash, we can look up non-listener sockets faster and isolate such noisy neighbours. In addition, we can reduce lock contention. We can control the ehash size by a new sysctl knob. However, depending on workloads, it will require very sensitive tuning, so we disable the feature by default (net.ipv4.tcp_child_ehash_entries == 0). Moreover, we can fall back to using the global ehash in case we fail to allocate enough memory for a new ehash. The maximum size is 16Mi, which is large enough that even if we have 48Mi sockets, the average list length is 3, and regression would be less than 1%. We can check the current ehash size by another read-only sysctl knob, net.ipv4.tcp_ehash_entries. A negative value means the netns shares the global ehash (per-netns ehash is disabled or failed to allocate memory). # dmesg | cut -d ' ' -f 5- | grep "established hash" TCP established hash table entries: 524288 (order: 10, 4194304 bytes, vmalloc hugepage) # sysctl net.ipv4.tcp_ehash_entries net.ipv4.tcp_ehash_entries = 524288 # can be changed by thash_entries # sysctl net.ipv4.tcp_child_ehash_entries net.ipv4.tcp_child_ehash_entries = 0 # disabled by default # ip netns add test1 # ip netns exec test1 sysctl net.ipv4.tcp_ehash_entries net.ipv4.tcp_ehash_entries = -524288 # share the global ehash # sysctl -w net.ipv4.tcp_child_ehash_entries=100 net.ipv4.tcp_child_ehash_entries = 100 # ip netns add test2 # ip netns exec test2 sysctl net.ipv4.tcp_ehash_entries net.ipv4.tcp_ehash_entries = 128 # own a per-netns ehash with 2^n buckets When more than two processes in the same netns create per-netns ehash concurrently with different sizes, we need to guarantee the size in one of the following ways: 1) Share the global ehash and create per-netns ehash First, unshare() with tcp_child_ehash_entries==0. It creates dedicated netns sysctl knobs where we can safely change tcp_child_ehash_entries and clone()/unshare() to create a per-netns ehash. 2) Control write on sysctl by BPF We can use BPF_PROG_TYPE_CGROUP_SYSCTL to allow/deny read/write on sysctl knobs. Note that the global ehash allocated at the boot time is spread over available NUMA nodes, but inet_pernet_hashinfo_alloc() will allocate pages for each per-netns ehash depending on the current process's NUMA policy. By default, the allocation is done in the local node only, so the per-netns hash table could fully reside on a random node. Thus, depending on the NUMA policy the netns is created with and the CPU the current thread is running on, we could see some performance differences for highly optimised networking applications. Note also that the default values of two sysctl knobs depend on the ehash size and should be tuned carefully: tcp_max_tw_buckets : tcp_child_ehash_entries / 2 tcp_max_syn_backlog : max(128, tcp_child_ehash_entries / 128) As a bonus, we can dismantle netns faster. Currently, while destroying netns, we call inet_twsk_purge(), which walks through the global ehash. It can be potentially big because it can have many sockets other than TIME_WAIT in all netns. Splitting ehash changes that situation, where it's only necessary for inet_twsk_purge() to clean up TIME_WAIT sockets in each netns. With regard to this, we do not free the per-netns ehash in inet_twsk_kill() to avoid UAF while iterating the per-netns ehash in inet_twsk_purge(). Instead, we do it in tcp_sk_exit_batch() after calling tcp_twsk_purge() to keep it protocol-family-independent. In the future, we could optimise ehash lookup/iteration further by removing netns comparison for the per-netns ehash. Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
234 lines
5.8 KiB
C
234 lines
5.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* ipv4 in net namespaces
|
|
*/
|
|
|
|
#ifndef __NETNS_IPV4_H__
|
|
#define __NETNS_IPV4_H__
|
|
|
|
#include <linux/uidgid.h>
|
|
#include <net/inet_frag.h>
|
|
#include <linux/rcupdate.h>
|
|
#include <linux/seqlock.h>
|
|
#include <linux/siphash.h>
|
|
|
|
struct ctl_table_header;
|
|
struct ipv4_devconf;
|
|
struct fib_rules_ops;
|
|
struct hlist_head;
|
|
struct fib_table;
|
|
struct sock;
|
|
struct local_ports {
|
|
seqlock_t lock;
|
|
int range[2];
|
|
bool warned;
|
|
};
|
|
|
|
struct ping_group_range {
|
|
seqlock_t lock;
|
|
kgid_t range[2];
|
|
};
|
|
|
|
struct inet_hashinfo;
|
|
|
|
struct inet_timewait_death_row {
|
|
refcount_t tw_refcount;
|
|
|
|
/* Padding to avoid false sharing, tw_refcount can be often written */
|
|
struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
|
|
int sysctl_max_tw_buckets;
|
|
};
|
|
|
|
struct tcp_fastopen_context;
|
|
|
|
struct netns_ipv4 {
|
|
struct inet_timewait_death_row tcp_death_row;
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
struct ctl_table_header *forw_hdr;
|
|
struct ctl_table_header *frags_hdr;
|
|
struct ctl_table_header *ipv4_hdr;
|
|
struct ctl_table_header *route_hdr;
|
|
struct ctl_table_header *xfrm4_hdr;
|
|
#endif
|
|
struct ipv4_devconf *devconf_all;
|
|
struct ipv4_devconf *devconf_dflt;
|
|
struct ip_ra_chain __rcu *ra_chain;
|
|
struct mutex ra_mutex;
|
|
#ifdef CONFIG_IP_MULTIPLE_TABLES
|
|
struct fib_rules_ops *rules_ops;
|
|
struct fib_table __rcu *fib_main;
|
|
struct fib_table __rcu *fib_default;
|
|
unsigned int fib_rules_require_fldissect;
|
|
bool fib_has_custom_rules;
|
|
#endif
|
|
bool fib_has_custom_local_routes;
|
|
bool fib_offload_disabled;
|
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
|
atomic_t fib_num_tclassid_users;
|
|
#endif
|
|
struct hlist_head *fib_table_hash;
|
|
struct sock *fibnl;
|
|
|
|
struct sock *mc_autojoin_sk;
|
|
|
|
struct inet_peer_base *peers;
|
|
struct fqdir *fqdir;
|
|
|
|
u8 sysctl_icmp_echo_ignore_all;
|
|
u8 sysctl_icmp_echo_enable_probe;
|
|
u8 sysctl_icmp_echo_ignore_broadcasts;
|
|
u8 sysctl_icmp_ignore_bogus_error_responses;
|
|
u8 sysctl_icmp_errors_use_inbound_ifaddr;
|
|
int sysctl_icmp_ratelimit;
|
|
int sysctl_icmp_ratemask;
|
|
|
|
u32 ip_rt_min_pmtu;
|
|
int ip_rt_mtu_expires;
|
|
int ip_rt_min_advmss;
|
|
|
|
struct local_ports ip_local_ports;
|
|
|
|
u8 sysctl_tcp_ecn;
|
|
u8 sysctl_tcp_ecn_fallback;
|
|
|
|
u8 sysctl_ip_default_ttl;
|
|
u8 sysctl_ip_no_pmtu_disc;
|
|
u8 sysctl_ip_fwd_use_pmtu;
|
|
u8 sysctl_ip_fwd_update_priority;
|
|
u8 sysctl_ip_nonlocal_bind;
|
|
u8 sysctl_ip_autobind_reuse;
|
|
/* Shall we try to damage output packets if routing dev changes? */
|
|
u8 sysctl_ip_dynaddr;
|
|
u8 sysctl_ip_early_demux;
|
|
#ifdef CONFIG_NET_L3_MASTER_DEV
|
|
u8 sysctl_raw_l3mdev_accept;
|
|
#endif
|
|
u8 sysctl_tcp_early_demux;
|
|
u8 sysctl_udp_early_demux;
|
|
|
|
u8 sysctl_nexthop_compat_mode;
|
|
|
|
u8 sysctl_fwmark_reflect;
|
|
u8 sysctl_tcp_fwmark_accept;
|
|
#ifdef CONFIG_NET_L3_MASTER_DEV
|
|
u8 sysctl_tcp_l3mdev_accept;
|
|
#endif
|
|
u8 sysctl_tcp_mtu_probing;
|
|
int sysctl_tcp_mtu_probe_floor;
|
|
int sysctl_tcp_base_mss;
|
|
int sysctl_tcp_min_snd_mss;
|
|
int sysctl_tcp_probe_threshold;
|
|
u32 sysctl_tcp_probe_interval;
|
|
|
|
int sysctl_tcp_keepalive_time;
|
|
int sysctl_tcp_keepalive_intvl;
|
|
u8 sysctl_tcp_keepalive_probes;
|
|
|
|
u8 sysctl_tcp_syn_retries;
|
|
u8 sysctl_tcp_synack_retries;
|
|
u8 sysctl_tcp_syncookies;
|
|
u8 sysctl_tcp_migrate_req;
|
|
u8 sysctl_tcp_comp_sack_nr;
|
|
int sysctl_tcp_reordering;
|
|
u8 sysctl_tcp_retries1;
|
|
u8 sysctl_tcp_retries2;
|
|
u8 sysctl_tcp_orphan_retries;
|
|
u8 sysctl_tcp_tw_reuse;
|
|
int sysctl_tcp_fin_timeout;
|
|
unsigned int sysctl_tcp_notsent_lowat;
|
|
u8 sysctl_tcp_sack;
|
|
u8 sysctl_tcp_window_scaling;
|
|
u8 sysctl_tcp_timestamps;
|
|
u8 sysctl_tcp_early_retrans;
|
|
u8 sysctl_tcp_recovery;
|
|
u8 sysctl_tcp_thin_linear_timeouts;
|
|
u8 sysctl_tcp_slow_start_after_idle;
|
|
u8 sysctl_tcp_retrans_collapse;
|
|
u8 sysctl_tcp_stdurg;
|
|
u8 sysctl_tcp_rfc1337;
|
|
u8 sysctl_tcp_abort_on_overflow;
|
|
u8 sysctl_tcp_fack; /* obsolete */
|
|
int sysctl_tcp_max_reordering;
|
|
int sysctl_tcp_adv_win_scale;
|
|
u8 sysctl_tcp_dsack;
|
|
u8 sysctl_tcp_app_win;
|
|
u8 sysctl_tcp_frto;
|
|
u8 sysctl_tcp_nometrics_save;
|
|
u8 sysctl_tcp_no_ssthresh_metrics_save;
|
|
u8 sysctl_tcp_moderate_rcvbuf;
|
|
u8 sysctl_tcp_tso_win_divisor;
|
|
u8 sysctl_tcp_workaround_signed_windows;
|
|
int sysctl_tcp_limit_output_bytes;
|
|
int sysctl_tcp_challenge_ack_limit;
|
|
int sysctl_tcp_min_rtt_wlen;
|
|
u8 sysctl_tcp_min_tso_segs;
|
|
u8 sysctl_tcp_tso_rtt_log;
|
|
u8 sysctl_tcp_autocorking;
|
|
u8 sysctl_tcp_reflect_tos;
|
|
int sysctl_tcp_invalid_ratelimit;
|
|
int sysctl_tcp_pacing_ss_ratio;
|
|
int sysctl_tcp_pacing_ca_ratio;
|
|
int sysctl_tcp_wmem[3];
|
|
int sysctl_tcp_rmem[3];
|
|
unsigned int sysctl_tcp_child_ehash_entries;
|
|
unsigned long sysctl_tcp_comp_sack_delay_ns;
|
|
unsigned long sysctl_tcp_comp_sack_slack_ns;
|
|
int sysctl_max_syn_backlog;
|
|
int sysctl_tcp_fastopen;
|
|
const struct tcp_congestion_ops __rcu *tcp_congestion_control;
|
|
struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
|
|
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
|
|
atomic_t tfo_active_disable_times;
|
|
unsigned long tfo_active_disable_stamp;
|
|
u32 tcp_challenge_timestamp;
|
|
u32 tcp_challenge_count;
|
|
|
|
int sysctl_udp_wmem_min;
|
|
int sysctl_udp_rmem_min;
|
|
|
|
u8 sysctl_fib_notify_on_flag_change;
|
|
|
|
#ifdef CONFIG_NET_L3_MASTER_DEV
|
|
u8 sysctl_udp_l3mdev_accept;
|
|
#endif
|
|
|
|
u8 sysctl_igmp_llm_reports;
|
|
int sysctl_igmp_max_memberships;
|
|
int sysctl_igmp_max_msf;
|
|
int sysctl_igmp_qrv;
|
|
|
|
struct ping_group_range ping_group_range;
|
|
|
|
atomic_t dev_addr_genid;
|
|
|
|
#ifdef CONFIG_SYSCTL
|
|
unsigned long *sysctl_local_reserved_ports;
|
|
int sysctl_ip_prot_sock;
|
|
#endif
|
|
|
|
#ifdef CONFIG_IP_MROUTE
|
|
#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
|
|
struct mr_table *mrt;
|
|
#else
|
|
struct list_head mr_tables;
|
|
struct fib_rules_ops *mr_rules_ops;
|
|
#endif
|
|
#endif
|
|
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
|
u32 sysctl_fib_multipath_hash_fields;
|
|
u8 sysctl_fib_multipath_use_neigh;
|
|
u8 sysctl_fib_multipath_hash_policy;
|
|
#endif
|
|
|
|
struct fib_notifier_ops *notifier_ops;
|
|
unsigned int fib_seq; /* protected by rtnl_mutex */
|
|
|
|
struct fib_notifier_ops *ipmr_notifier_ops;
|
|
unsigned int ipmr_seq; /* protected by rtnl_mutex */
|
|
|
|
atomic_t rt_genid;
|
|
siphash_key_t ip_id_key;
|
|
};
|
|
#endif
|