Merge keystone/android12-5.10-keystone-qcom-release.101+ (a0a7006
) into msm-5.10
* refs/heads/tmp-a0a7006: ANDROID: Fix the drain_all_pages default condition broken by a hook UPSTREAM: Revert "xfrm: xfrm_state_mtu should return at least 1280 for ipv6" UPSTREAM: xfrm: fix MTU regression FROMGIT: net: fix wrong network header length Change-Id: Ia72d25a98cd228e76a9f466d777d2caafaf82951 Signed-off-by: Sivasri Kumar, Vanka <quic_svanka@quicinc.com>
This commit is contained in:
commit
53f97d77bb
@ -1 +1 @@
|
|||||||
0fb7dc78c77bb4b783715bed1f0b1c793ab63ad6
|
f4c0e37dbcde2d3bf3e078bc0ab092d67f7b2598
|
||||||
|
@ -1546,7 +1546,6 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
|
|||||||
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
|
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
|
||||||
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
|
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
|
||||||
int xfrm_init_replay(struct xfrm_state *x);
|
int xfrm_init_replay(struct xfrm_state *x);
|
||||||
u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
|
|
||||||
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
|
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
|
||||||
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
|
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
|
||||||
int xfrm_init_state(struct xfrm_state *x);
|
int xfrm_init_state(struct xfrm_state *x);
|
||||||
|
@ -8772,7 +8772,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
|
|||||||
|
|
||||||
trace_android_vh_cma_drain_all_pages_bypass(migratetype,
|
trace_android_vh_cma_drain_all_pages_bypass(migratetype,
|
||||||
&skip_drain_all_pages);
|
&skip_drain_all_pages);
|
||||||
if (skip_drain_all_pages)
|
if (!skip_drain_all_pages)
|
||||||
drain_all_pages(cc.zone);
|
drain_all_pages(cc.zone);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3669,7 +3669,7 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
|
|||||||
unsigned int delta_len = 0;
|
unsigned int delta_len = 0;
|
||||||
struct sk_buff *tail = NULL;
|
struct sk_buff *tail = NULL;
|
||||||
struct sk_buff *nskb, *tmp;
|
struct sk_buff *nskb, *tmp;
|
||||||
int err;
|
int len_diff, err;
|
||||||
|
|
||||||
skb_push(skb, -skb_network_offset(skb) + offset);
|
skb_push(skb, -skb_network_offset(skb) + offset);
|
||||||
|
|
||||||
@ -3709,9 +3709,11 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
|
|||||||
skb_push(nskb, -skb_network_offset(nskb) + offset);
|
skb_push(nskb, -skb_network_offset(nskb) + offset);
|
||||||
|
|
||||||
skb_release_head_state(nskb);
|
skb_release_head_state(nskb);
|
||||||
|
len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
|
||||||
__copy_skb_header(nskb, skb);
|
__copy_skb_header(nskb, skb);
|
||||||
|
|
||||||
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
|
skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
|
||||||
|
nskb->transport_header += len_diff;
|
||||||
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
|
skb_copy_from_linear_data_offset(skb, -tnl_hlen,
|
||||||
nskb->data - tnl_hlen,
|
nskb->data - tnl_hlen,
|
||||||
offset + tnl_hlen);
|
offset + tnl_hlen);
|
||||||
|
@ -673,7 +673,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||||||
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
||||||
u32 padto;
|
u32 padto;
|
||||||
|
|
||||||
padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
|
padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
|
||||||
if (skb->len < padto)
|
if (skb->len < padto)
|
||||||
esp.tfclen = padto - skb->len;
|
esp.tfclen = padto - skb->len;
|
||||||
}
|
}
|
||||||
|
@ -708,7 +708,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
|
|||||||
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
|
||||||
u32 padto;
|
u32 padto;
|
||||||
|
|
||||||
padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
|
padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached));
|
||||||
if (skb->len < padto)
|
if (skb->len < padto)
|
||||||
esp.tfclen = padto - skb->len;
|
esp.tfclen = padto - skb->len;
|
||||||
}
|
}
|
||||||
|
@ -1432,8 +1432,6 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
|
|||||||
if (np->frag_size)
|
if (np->frag_size)
|
||||||
mtu = np->frag_size;
|
mtu = np->frag_size;
|
||||||
}
|
}
|
||||||
if (mtu < IPV6_MIN_MTU)
|
|
||||||
return -EINVAL;
|
|
||||||
cork->base.fragsize = mtu;
|
cork->base.fragsize = mtu;
|
||||||
cork->base.gso_size = ipc6->gso_size;
|
cork->base.gso_size = ipc6->gso_size;
|
||||||
cork->base.tx_flags = 0;
|
cork->base.tx_flags = 0;
|
||||||
@ -1495,8 +1493,6 @@ static int __ip6_append_data(struct sock *sk,
|
|||||||
|
|
||||||
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
|
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
|
||||||
(opt ? opt->opt_nflen : 0);
|
(opt ? opt->opt_nflen : 0);
|
||||||
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
|
|
||||||
sizeof(struct frag_hdr);
|
|
||||||
|
|
||||||
headersize = sizeof(struct ipv6hdr) +
|
headersize = sizeof(struct ipv6hdr) +
|
||||||
(opt ? opt->opt_flen + opt->opt_nflen : 0) +
|
(opt ? opt->opt_flen + opt->opt_nflen : 0) +
|
||||||
@ -1504,6 +1500,13 @@ static int __ip6_append_data(struct sock *sk,
|
|||||||
sizeof(struct frag_hdr) : 0) +
|
sizeof(struct frag_hdr) : 0) +
|
||||||
rt->rt6i_nfheader_len;
|
rt->rt6i_nfheader_len;
|
||||||
|
|
||||||
|
if (mtu < fragheaderlen ||
|
||||||
|
((mtu - fragheaderlen) & ~7) + fragheaderlen < sizeof(struct frag_hdr))
|
||||||
|
goto emsgsize;
|
||||||
|
|
||||||
|
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
|
||||||
|
sizeof(struct frag_hdr);
|
||||||
|
|
||||||
/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
|
/* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
|
||||||
* the first fragment
|
* the first fragment
|
||||||
*/
|
*/
|
||||||
|
@ -2519,7 +2519,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
|
EXPORT_SYMBOL(xfrm_state_delete_tunnel);
|
||||||
|
|
||||||
u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
|
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
|
||||||
{
|
{
|
||||||
const struct xfrm_type *type = READ_ONCE(x->type);
|
const struct xfrm_type *type = READ_ONCE(x->type);
|
||||||
struct crypto_aead *aead;
|
struct crypto_aead *aead;
|
||||||
@ -2550,17 +2550,7 @@ u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu)
|
|||||||
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
|
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
|
||||||
net_adj) & ~(blksize - 1)) + net_adj - 2;
|
net_adj) & ~(blksize - 1)) + net_adj - 2;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(__xfrm_state_mtu);
|
EXPORT_SYMBOL_GPL(xfrm_state_mtu);
|
||||||
|
|
||||||
u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
|
|
||||||
{
|
|
||||||
mtu = __xfrm_state_mtu(x, mtu);
|
|
||||||
|
|
||||||
if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU)
|
|
||||||
return IPV6_MIN_MTU;
|
|
||||||
|
|
||||||
return mtu;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
|
int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload)
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user