Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (45 commits) [IPV4]: Restore multipath routing after rt_next changes. [XFRM] IPV6: Fix outbound RO transformation which is broken by IPsec tunnel patch. [NET]: Reorder fields of struct dst_entry [DECNET]: Convert decnet route to use the new dst_entry 'next' pointer [IPV6]: Convert ipv6 route to use the new dst_entry 'next' pointer [IPV4]: Convert ipv4 route to use the new dst_entry 'next' pointer [NET]: Introduce union in struct dst_entry to hold 'next' pointer [DECNET]: fix misannotation of linkinfo_dn [DECNET]: FRA_{DST,SRC} are le16 for decnet [UDP]: UDP can use sk_hash to speedup lookups [NET]: Fix whitespace errors. [NET] XFRM: Fix whitespace errors. [NET] X25: Fix whitespace errors. [NET] WANROUTER: Fix whitespace errors. [NET] UNIX: Fix whitespace errors. [NET] TIPC: Fix whitespace errors. [NET] SUNRPC: Fix whitespace errors. [NET] SCTP: Fix whitespace errors. [NET] SCHED: Fix whitespace errors. [NET] RXRPC: Fix whitespace errors. ...
This commit is contained in:
commit
cb18eccff4
@ -68,9 +68,10 @@ extern void dn_rt_cache_flush(int delay);
|
||||
struct dn_route {
|
||||
union {
|
||||
struct dst_entry dst;
|
||||
struct dn_route *rt_next;
|
||||
} u;
|
||||
|
||||
struct flowi fl;
|
||||
|
||||
__le16 rt_saddr;
|
||||
__le16 rt_daddr;
|
||||
__le16 rt_gateway;
|
||||
@ -80,8 +81,6 @@ struct dn_route {
|
||||
|
||||
unsigned rt_flags;
|
||||
unsigned rt_type;
|
||||
|
||||
struct flowi fl;
|
||||
};
|
||||
|
||||
extern void dn_route_init(void);
|
||||
|
@ -37,9 +37,7 @@ struct sk_buff;
|
||||
|
||||
struct dst_entry
|
||||
{
|
||||
struct dst_entry *next;
|
||||
atomic_t __refcnt; /* client references */
|
||||
int __use;
|
||||
struct rcu_head rcu_head;
|
||||
struct dst_entry *child;
|
||||
struct net_device *dev;
|
||||
short error;
|
||||
@ -50,7 +48,6 @@ struct dst_entry
|
||||
#define DST_NOPOLICY 4
|
||||
#define DST_NOHASH 8
|
||||
#define DST_BALANCED 0x10
|
||||
unsigned long lastuse;
|
||||
unsigned long expires;
|
||||
|
||||
unsigned short header_len; /* more space at head required */
|
||||
@ -75,8 +72,16 @@ struct dst_entry
|
||||
#endif
|
||||
|
||||
struct dst_ops *ops;
|
||||
struct rcu_head rcu_head;
|
||||
|
||||
unsigned long lastuse;
|
||||
atomic_t __refcnt; /* client references */
|
||||
int __use;
|
||||
union {
|
||||
struct dst_entry *next;
|
||||
struct rtable *rt_next;
|
||||
struct rt6_info *rt6_next;
|
||||
struct dn_route *dn_next;
|
||||
};
|
||||
char info[0];
|
||||
};
|
||||
|
||||
|
@ -83,7 +83,6 @@ struct rt6_info
|
||||
{
|
||||
union {
|
||||
struct dst_entry dst;
|
||||
struct rt6_info *next;
|
||||
} u;
|
||||
|
||||
struct inet6_dev *rt6i_idev;
|
||||
|
@ -53,9 +53,11 @@ struct rtable
|
||||
union
|
||||
{
|
||||
struct dst_entry dst;
|
||||
struct rtable *rt_next;
|
||||
} u;
|
||||
|
||||
/* Cache lookup keys */
|
||||
struct flowi fl;
|
||||
|
||||
struct in_device *idev;
|
||||
|
||||
unsigned rt_flags;
|
||||
@ -69,9 +71,6 @@ struct rtable
|
||||
/* Info on neighbour */
|
||||
__be32 rt_gateway;
|
||||
|
||||
/* Cache lookup keys */
|
||||
struct flowi fl;
|
||||
|
||||
/* Miscellaneous cached information */
|
||||
__be32 rt_spec_dst; /* RFC1122 specific destination */
|
||||
struct inet_peer *peer; /* long-living peer info */
|
||||
|
@ -167,11 +167,11 @@ static void dn_dst_check_expire(unsigned long dummy)
|
||||
while((rt=*rtp) != NULL) {
|
||||
if (atomic_read(&rt->u.dst.__refcnt) ||
|
||||
(now - rt->u.dst.lastuse) < expire) {
|
||||
rtp = &rt->u.rt_next;
|
||||
rtp = &rt->u.dst.dn_next;
|
||||
continue;
|
||||
}
|
||||
*rtp = rt->u.rt_next;
|
||||
rt->u.rt_next = NULL;
|
||||
*rtp = rt->u.dst.dn_next;
|
||||
rt->u.dst.dn_next = NULL;
|
||||
dnrt_free(rt);
|
||||
}
|
||||
spin_unlock(&dn_rt_hash_table[i].lock);
|
||||
@ -198,11 +198,11 @@ static int dn_dst_gc(void)
|
||||
while((rt=*rtp) != NULL) {
|
||||
if (atomic_read(&rt->u.dst.__refcnt) ||
|
||||
(now - rt->u.dst.lastuse) < expire) {
|
||||
rtp = &rt->u.rt_next;
|
||||
rtp = &rt->u.dst.dn_next;
|
||||
continue;
|
||||
}
|
||||
*rtp = rt->u.rt_next;
|
||||
rt->u.rt_next = NULL;
|
||||
*rtp = rt->u.dst.dn_next;
|
||||
rt->u.dst.dn_next = NULL;
|
||||
dnrt_drop(rt);
|
||||
break;
|
||||
}
|
||||
@ -286,8 +286,8 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
|
||||
while((rth = *rthp) != NULL) {
|
||||
if (compare_keys(&rth->fl, &rt->fl)) {
|
||||
/* Put it first */
|
||||
*rthp = rth->u.rt_next;
|
||||
rcu_assign_pointer(rth->u.rt_next,
|
||||
*rthp = rth->u.dst.dn_next;
|
||||
rcu_assign_pointer(rth->u.dst.dn_next,
|
||||
dn_rt_hash_table[hash].chain);
|
||||
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
|
||||
|
||||
@ -300,10 +300,10 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
|
||||
*rp = rth;
|
||||
return 0;
|
||||
}
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.dn_next;
|
||||
}
|
||||
|
||||
rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain);
|
||||
rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
|
||||
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
|
||||
|
||||
dst_hold(&rt->u.dst);
|
||||
@ -326,8 +326,8 @@ void dn_run_flush(unsigned long dummy)
|
||||
goto nothing_to_declare;
|
||||
|
||||
for(; rt; rt=next) {
|
||||
next = rt->u.rt_next;
|
||||
rt->u.rt_next = NULL;
|
||||
next = rt->u.dst.dn_next;
|
||||
rt->u.dst.dn_next = NULL;
|
||||
dst_free((struct dst_entry *)rt);
|
||||
}
|
||||
|
||||
@ -1169,7 +1169,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
|
||||
if (!(flags & MSG_TRYHARD)) {
|
||||
rcu_read_lock_bh();
|
||||
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt;
|
||||
rt = rcu_dereference(rt->u.rt_next)) {
|
||||
rt = rcu_dereference(rt->u.dst.dn_next)) {
|
||||
if ((flp->fld_dst == rt->fl.fld_dst) &&
|
||||
(flp->fld_src == rt->fl.fld_src) &&
|
||||
(flp->mark == rt->fl.mark) &&
|
||||
@ -1443,7 +1443,7 @@ int dn_route_input(struct sk_buff *skb)
|
||||
|
||||
rcu_read_lock();
|
||||
for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
|
||||
rt = rcu_dereference(rt->u.rt_next)) {
|
||||
rt = rcu_dereference(rt->u.dst.dn_next)) {
|
||||
if ((rt->fl.fld_src == cb->src) &&
|
||||
(rt->fl.fld_dst == cb->dst) &&
|
||||
(rt->fl.oif == 0) &&
|
||||
@ -1627,7 +1627,7 @@ int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
rcu_read_lock_bh();
|
||||
for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0;
|
||||
rt;
|
||||
rt = rcu_dereference(rt->u.rt_next), idx++) {
|
||||
rt = rcu_dereference(rt->u.dst.dn_next), idx++) {
|
||||
if (idx < s_idx)
|
||||
continue;
|
||||
skb->dst = dst_clone(&rt->u.dst);
|
||||
@ -1673,7 +1673,7 @@ static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_rou
|
||||
{
|
||||
struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private);
|
||||
|
||||
rt = rt->u.rt_next;
|
||||
rt = rt->u.dst.dn_next;
|
||||
while(!rt) {
|
||||
rcu_read_unlock_bh();
|
||||
if (--s->bucket < 0)
|
||||
|
@ -143,7 +143,7 @@ static void drr_select_route(const struct flowi *flp,
|
||||
result = NULL;
|
||||
cur_min = NULL;
|
||||
for (nh = rcu_dereference(first); nh;
|
||||
nh = rcu_dereference(nh->u.rt_next)) {
|
||||
nh = rcu_dereference(nh->u.dst.rt_next)) {
|
||||
if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
multipath_comparekeys(&nh->fl, flp)) {
|
||||
int nh_ifidx = nh->u.dst.dev->ifindex;
|
||||
|
@ -74,7 +74,7 @@ static void random_select_route(const struct flowi *flp,
|
||||
|
||||
/* count all candidate */
|
||||
for (rt = rcu_dereference(first); rt;
|
||||
rt = rcu_dereference(rt->u.rt_next)) {
|
||||
rt = rcu_dereference(rt->u.dst.rt_next)) {
|
||||
if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
multipath_comparekeys(&rt->fl, flp))
|
||||
++candidate_count;
|
||||
@ -90,7 +90,7 @@ static void random_select_route(const struct flowi *flp,
|
||||
/* find chosen candidate and adjust GC data for all candidates
|
||||
* to ensure they stay in cache
|
||||
*/
|
||||
for (rt = first; rt; rt = rt->u.rt_next) {
|
||||
for (rt = first; rt; rt = rt->u.dst.rt_next) {
|
||||
if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
multipath_comparekeys(&rt->fl, flp)) {
|
||||
rt->u.dst.lastuse = jiffies;
|
||||
|
@ -58,7 +58,7 @@ static void rr_select_route(const struct flowi *flp,
|
||||
*/
|
||||
result = NULL;
|
||||
for (nh = rcu_dereference(first); nh;
|
||||
nh = rcu_dereference(nh->u.rt_next)) {
|
||||
nh = rcu_dereference(nh->u.dst.rt_next)) {
|
||||
if ((nh->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
multipath_comparekeys(&nh->fl, flp)) {
|
||||
nh->u.dst.lastuse = jiffies;
|
||||
|
@ -167,7 +167,7 @@ static void wrandom_select_route(const struct flowi *flp,
|
||||
|
||||
/* collect all candidates and identify their weights */
|
||||
for (rt = rcu_dereference(first); rt;
|
||||
rt = rcu_dereference(rt->u.rt_next)) {
|
||||
rt = rcu_dereference(rt->u.dst.rt_next)) {
|
||||
if ((rt->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
multipath_comparekeys(&rt->fl, flp)) {
|
||||
struct multipath_candidate* mpc =
|
||||
|
@ -289,7 +289,7 @@ static struct rtable *rt_cache_get_next(struct seq_file *seq, struct rtable *r)
|
||||
{
|
||||
struct rt_cache_iter_state *st = rcu_dereference(seq->private);
|
||||
|
||||
r = r->u.rt_next;
|
||||
r = r->u.dst.rt_next;
|
||||
while (!r) {
|
||||
rcu_read_unlock_bh();
|
||||
if (--st->bucket < 0)
|
||||
@ -512,7 +512,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
|
||||
/* Kill broadcast/multicast entries very aggresively, if they
|
||||
collide in hash table with more useful entries */
|
||||
return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
|
||||
rth->fl.iif && rth->u.rt_next;
|
||||
rth->fl.iif && rth->u.dst.rt_next;
|
||||
}
|
||||
|
||||
static __inline__ int rt_valuable(struct rtable *rth)
|
||||
@ -595,10 +595,10 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
|
||||
if (((*rthp)->u.dst.flags & DST_BALANCED) != 0 &&
|
||||
compare_keys(&(*rthp)->fl, &expentry->fl)) {
|
||||
if (*rthp == expentry) {
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
continue;
|
||||
} else {
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
rt_free(rth);
|
||||
if (removed_count)
|
||||
++(*removed_count);
|
||||
@ -606,9 +606,9 @@ static struct rtable **rt_remove_balanced_route(struct rtable **chain_head,
|
||||
} else {
|
||||
if (!((*rthp)->u.dst.flags & DST_BALANCED) &&
|
||||
passedexpired && !nextstep)
|
||||
nextstep = &rth->u.rt_next;
|
||||
nextstep = &rth->u.dst.rt_next;
|
||||
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.rt_next;
|
||||
}
|
||||
}
|
||||
|
||||
@ -649,12 +649,12 @@ static void rt_check_expire(unsigned long dummy)
|
||||
/* Entry is expired even if it is in use */
|
||||
if (time_before_eq(now, rth->u.dst.expires)) {
|
||||
tmo >>= 1;
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.rt_next;
|
||||
continue;
|
||||
}
|
||||
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
|
||||
tmo >>= 1;
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.rt_next;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -668,11 +668,11 @@ static void rt_check_expire(unsigned long dummy)
|
||||
if (!rthp)
|
||||
break;
|
||||
} else {
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
rt_free(rth);
|
||||
}
|
||||
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
rt_free(rth);
|
||||
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
}
|
||||
@ -706,7 +706,7 @@ static void rt_run_flush(unsigned long dummy)
|
||||
spin_unlock_bh(rt_hash_lock_addr(i));
|
||||
|
||||
for (; rth; rth = next) {
|
||||
next = rth->u.rt_next;
|
||||
next = rth->u.dst.rt_next;
|
||||
rt_free(rth);
|
||||
}
|
||||
}
|
||||
@ -840,7 +840,7 @@ static int rt_garbage_collect(void)
|
||||
while ((rth = *rthp) != NULL) {
|
||||
if (!rt_may_expire(rth, tmo, expire)) {
|
||||
tmo >>= 1;
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.rt_next;
|
||||
continue;
|
||||
}
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
|
||||
@ -858,12 +858,12 @@ static int rt_garbage_collect(void)
|
||||
if (!rthp)
|
||||
break;
|
||||
} else {
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
rt_free(rth);
|
||||
goal--;
|
||||
}
|
||||
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
rt_free(rth);
|
||||
goal--;
|
||||
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
|
||||
@ -947,13 +947,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
|
||||
if (compare_keys(&rth->fl, &rt->fl)) {
|
||||
#endif
|
||||
/* Put it first */
|
||||
*rthp = rth->u.rt_next;
|
||||
*rthp = rth->u.dst.rt_next;
|
||||
/*
|
||||
* Since lookup is lockfree, the deletion
|
||||
* must be visible to another weakly ordered CPU before
|
||||
* the insertion at the start of the hash chain.
|
||||
*/
|
||||
rcu_assign_pointer(rth->u.rt_next,
|
||||
rcu_assign_pointer(rth->u.dst.rt_next,
|
||||
rt_hash_table[hash].chain);
|
||||
/*
|
||||
* Since lookup is lockfree, the update writes
|
||||
@ -983,7 +983,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
|
||||
|
||||
chain_length++;
|
||||
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.rt_next;
|
||||
}
|
||||
|
||||
if (cand) {
|
||||
@ -994,7 +994,7 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
|
||||
* only 2 entries per bucket. We will see.
|
||||
*/
|
||||
if (chain_length > ip_rt_gc_elasticity) {
|
||||
*candp = cand->u.rt_next;
|
||||
*candp = cand->u.dst.rt_next;
|
||||
rt_free(cand);
|
||||
}
|
||||
}
|
||||
@ -1034,13 +1034,13 @@ static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
|
||||
}
|
||||
}
|
||||
|
||||
rt->u.rt_next = rt_hash_table[hash].chain;
|
||||
rt->u.dst.rt_next = rt_hash_table[hash].chain;
|
||||
#if RT_CACHE_DEBUG >= 2
|
||||
if (rt->u.rt_next) {
|
||||
if (rt->u.dst.rt_next) {
|
||||
struct rtable *trt;
|
||||
printk(KERN_DEBUG "rt_cache @%02x: %u.%u.%u.%u", hash,
|
||||
NIPQUAD(rt->rt_dst));
|
||||
for (trt = rt->u.rt_next; trt; trt = trt->u.rt_next)
|
||||
for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
|
||||
printk(" . %u.%u.%u.%u", NIPQUAD(trt->rt_dst));
|
||||
printk("\n");
|
||||
}
|
||||
@ -1117,9 +1117,9 @@ static void rt_del(unsigned hash, struct rtable *rt)
|
||||
spin_lock_bh(rt_hash_lock_addr(hash));
|
||||
ip_rt_put(rt);
|
||||
for (rthp = &rt_hash_table[hash].chain; *rthp;
|
||||
rthp = &(*rthp)->u.rt_next)
|
||||
rthp = &(*rthp)->u.dst.rt_next)
|
||||
if (*rthp == rt) {
|
||||
*rthp = rt->u.rt_next;
|
||||
*rthp = rt->u.dst.rt_next;
|
||||
rt_free(rt);
|
||||
break;
|
||||
}
|
||||
@ -1167,7 +1167,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|
||||
rth->fl.fl4_src != skeys[i] ||
|
||||
rth->fl.oif != ikeys[k] ||
|
||||
rth->fl.iif != 0) {
|
||||
rthp = &rth->u.rt_next;
|
||||
rthp = &rth->u.dst.rt_next;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1416,7 +1416,7 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
|
||||
|
||||
rcu_read_lock();
|
||||
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
|
||||
rth = rcu_dereference(rth->u.rt_next)) {
|
||||
rth = rcu_dereference(rth->u.dst.rt_next)) {
|
||||
if (rth->fl.fl4_dst == daddr &&
|
||||
rth->fl.fl4_src == skeys[i] &&
|
||||
rth->rt_dst == daddr &&
|
||||
@ -2099,7 +2099,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
|
||||
|
||||
rcu_read_lock();
|
||||
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
|
||||
rth = rcu_dereference(rth->u.rt_next)) {
|
||||
rth = rcu_dereference(rth->u.dst.rt_next)) {
|
||||
if (rth->fl.fl4_dst == daddr &&
|
||||
rth->fl.fl4_src == saddr &&
|
||||
rth->fl.iif == iif &&
|
||||
@ -2563,7 +2563,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
|
||||
|
||||
rcu_read_lock_bh();
|
||||
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
|
||||
rth = rcu_dereference(rth->u.rt_next)) {
|
||||
rth = rcu_dereference(rth->u.dst.rt_next)) {
|
||||
if (rth->fl.fl4_dst == flp->fl4_dst &&
|
||||
rth->fl.fl4_src == flp->fl4_src &&
|
||||
rth->fl.iif == 0 &&
|
||||
@ -2825,7 +2825,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
||||
s_idx = 0;
|
||||
rcu_read_lock_bh();
|
||||
for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
|
||||
rt = rcu_dereference(rt->u.rt_next), idx++) {
|
||||
rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
|
||||
if (idx < s_idx)
|
||||
continue;
|
||||
skb->dst = dst_clone(&rt->u.dst);
|
||||
|
@ -120,7 +120,7 @@ static inline int __udp_lib_lport_inuse(__u16 num, struct hlist_head udptable[])
|
||||
struct hlist_node *node;
|
||||
|
||||
sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
|
||||
if (inet_sk(sk)->num == num)
|
||||
if (sk->sk_hash == num)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -191,7 +191,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
|
||||
|
||||
sk_for_each(sk2, node, head)
|
||||
if (inet_sk(sk2)->num == snum &&
|
||||
if (sk2->sk_hash == snum &&
|
||||
sk2 != sk &&
|
||||
(!sk2->sk_reuse || !sk->sk_reuse) &&
|
||||
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
|
||||
@ -200,6 +200,7 @@ int __udp_lib_get_port(struct sock *sk, unsigned short snum,
|
||||
goto fail;
|
||||
}
|
||||
inet_sk(sk)->num = snum;
|
||||
sk->sk_hash = snum;
|
||||
if (sk_unhashed(sk)) {
|
||||
head = &udptable[snum & (UDP_HTABLE_SIZE - 1)];
|
||||
sk_add_node(sk, head);
|
||||
@ -247,7 +248,7 @@ static struct sock *__udp4_lib_lookup(__be32 saddr, __be16 sport,
|
||||
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
if (inet->num == hnum && !ipv6_only_sock(sk)) {
|
||||
if (sk->sk_hash == hnum && !ipv6_only_sock(sk)) {
|
||||
int score = (sk->sk_family == PF_INET ? 1 : 0);
|
||||
if (inet->rcv_saddr) {
|
||||
if (inet->rcv_saddr != daddr)
|
||||
@ -296,7 +297,7 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
|
||||
sk_for_each_from(s, node) {
|
||||
struct inet_sock *inet = inet_sk(s);
|
||||
|
||||
if (inet->num != hnum ||
|
||||
if (s->sk_hash != hnum ||
|
||||
(inet->daddr && inet->daddr != rmt_addr) ||
|
||||
(inet->dport != rmt_port && inet->dport) ||
|
||||
(inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
|
||||
|
@ -297,7 +297,7 @@ static int fib6_dump_node(struct fib6_walker_t *w)
|
||||
int res;
|
||||
struct rt6_info *rt;
|
||||
|
||||
for (rt = w->leaf; rt; rt = rt->u.next) {
|
||||
for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
|
||||
res = rt6_dump_route(rt, w->args);
|
||||
if (res < 0) {
|
||||
/* Frame is full, suspend walking */
|
||||
@ -623,11 +623,11 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
fn->leaf == &ip6_null_entry &&
|
||||
!(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){
|
||||
fn->leaf = rt;
|
||||
rt->u.next = NULL;
|
||||
rt->u.dst.rt6_next = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (iter = fn->leaf; iter; iter=iter->u.next) {
|
||||
for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) {
|
||||
/*
|
||||
* Search for duplicates
|
||||
*/
|
||||
@ -655,7 +655,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
if (iter->rt6i_metric > rt->rt6i_metric)
|
||||
break;
|
||||
|
||||
ins = &iter->u.next;
|
||||
ins = &iter->u.dst.rt6_next;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -663,7 +663,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
||||
*/
|
||||
|
||||
out:
|
||||
rt->u.next = iter;
|
||||
rt->u.dst.rt6_next = iter;
|
||||
*ins = rt;
|
||||
rt->rt6i_node = fn;
|
||||
atomic_inc(&rt->rt6i_ref);
|
||||
@ -1104,7 +1104,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
|
||||
RT6_TRACE("fib6_del_route\n");
|
||||
|
||||
/* Unlink it */
|
||||
*rtp = rt->u.next;
|
||||
*rtp = rt->u.dst.rt6_next;
|
||||
rt->rt6i_node = NULL;
|
||||
rt6_stats.fib_rt_entries--;
|
||||
rt6_stats.fib_discarded_routes++;
|
||||
@ -1114,14 +1114,14 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
|
||||
FOR_WALKERS(w) {
|
||||
if (w->state == FWS_C && w->leaf == rt) {
|
||||
RT6_TRACE("walker %p adjusted by delroute\n", w);
|
||||
w->leaf = rt->u.next;
|
||||
w->leaf = rt->u.dst.rt6_next;
|
||||
if (w->leaf == NULL)
|
||||
w->state = FWS_U;
|
||||
}
|
||||
}
|
||||
read_unlock(&fib6_walker_lock);
|
||||
|
||||
rt->u.next = NULL;
|
||||
rt->u.dst.rt6_next = NULL;
|
||||
|
||||
if (fn->leaf == NULL && fn->fn_flags&RTN_TL_ROOT)
|
||||
fn->leaf = &ip6_null_entry;
|
||||
@ -1189,7 +1189,7 @@ int fib6_del(struct rt6_info *rt, struct nl_info *info)
|
||||
* Walk the leaf entries looking for ourself
|
||||
*/
|
||||
|
||||
for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.next) {
|
||||
for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->u.dst.rt6_next) {
|
||||
if (*rtp == rt) {
|
||||
fib6_del_route(fn, rtp, info);
|
||||
return 0;
|
||||
@ -1316,7 +1316,7 @@ static int fib6_clean_node(struct fib6_walker_t *w)
|
||||
struct rt6_info *rt;
|
||||
struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;
|
||||
|
||||
for (rt = w->leaf; rt; rt = rt->u.next) {
|
||||
for (rt = w->leaf; rt; rt = rt->u.dst.rt6_next) {
|
||||
res = c->func(rt, c->arg);
|
||||
if (res < 0) {
|
||||
w->leaf = rt;
|
||||
|
@ -243,7 +243,7 @@ static __inline__ struct rt6_info *rt6_device_match(struct rt6_info *rt,
|
||||
struct rt6_info *sprt;
|
||||
|
||||
if (oif) {
|
||||
for (sprt = rt; sprt; sprt = sprt->u.next) {
|
||||
for (sprt = rt; sprt; sprt = sprt->u.dst.rt6_next) {
|
||||
struct net_device *dev = sprt->rt6i_dev;
|
||||
if (dev->ifindex == oif)
|
||||
return sprt;
|
||||
@ -376,7 +376,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
|
||||
|
||||
for (rt = rt0, metric = rt0->rt6i_metric;
|
||||
rt && rt->rt6i_metric == metric && (!last || rt != rt0);
|
||||
rt = rt->u.next) {
|
||||
rt = rt->u.dst.rt6_next) {
|
||||
int m;
|
||||
|
||||
if (rt6_check_expired(rt))
|
||||
@ -404,9 +404,9 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
|
||||
/* no entries matched; do round-robin */
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
spin_lock(&lock);
|
||||
*head = rt0->u.next;
|
||||
rt0->u.next = last->u.next;
|
||||
last->u.next = rt0;
|
||||
*head = rt0->u.dst.rt6_next;
|
||||
rt0->u.dst.rt6_next = last->u.dst.rt6_next;
|
||||
last->u.dst.rt6_next = rt0;
|
||||
spin_unlock(&lock);
|
||||
}
|
||||
|
||||
@ -1278,7 +1278,7 @@ static int ip6_route_del(struct fib6_config *cfg)
|
||||
&cfg->fc_src, cfg->fc_src_len);
|
||||
|
||||
if (fn) {
|
||||
for (rt = fn->leaf; rt; rt = rt->u.next) {
|
||||
for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
|
||||
if (cfg->fc_ifindex &&
|
||||
(rt->rt6i_dev == NULL ||
|
||||
rt->rt6i_dev->ifindex != cfg->fc_ifindex))
|
||||
@ -1329,7 +1329,7 @@ static struct rt6_info *__ip6_route_redirect(struct fib6_table *table,
|
||||
read_lock_bh(&table->tb6_lock);
|
||||
fn = fib6_lookup(&table->tb6_root, &fl->fl6_dst, &fl->fl6_src);
|
||||
restart:
|
||||
for (rt = fn->leaf; rt; rt = rt->u.next) {
|
||||
for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
|
||||
/*
|
||||
* Current route is on-link; redirect is always invalid.
|
||||
*
|
||||
@ -1590,7 +1590,7 @@ static struct rt6_info *rt6_get_route_info(struct in6_addr *prefix, int prefixle
|
||||
if (!fn)
|
||||
goto out;
|
||||
|
||||
for (rt = fn->leaf; rt; rt = rt->u.next) {
|
||||
for (rt = fn->leaf; rt; rt = rt->u.dst.rt6_next) {
|
||||
if (rt->rt6i_dev->ifindex != ifindex)
|
||||
continue;
|
||||
if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
|
||||
@ -1641,7 +1641,7 @@ struct rt6_info *rt6_get_dflt_router(struct in6_addr *addr, struct net_device *d
|
||||
return NULL;
|
||||
|
||||
write_lock_bh(&table->tb6_lock);
|
||||
for (rt = table->tb6_root.leaf; rt; rt=rt->u.next) {
|
||||
for (rt = table->tb6_root.leaf; rt; rt=rt->u.dst.rt6_next) {
|
||||
if (dev == rt->rt6i_dev &&
|
||||
((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
|
||||
ipv6_addr_equal(&rt->rt6i_gateway, addr))
|
||||
@ -1684,7 +1684,7 @@ void rt6_purge_dflt_routers(void)
|
||||
|
||||
restart:
|
||||
read_lock_bh(&table->tb6_lock);
|
||||
for (rt = table->tb6_root.leaf; rt; rt = rt->u.next) {
|
||||
for (rt = table->tb6_root.leaf; rt; rt = rt->u.dst.rt6_next) {
|
||||
if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
|
||||
dst_hold(&rt->u.dst);
|
||||
read_unlock_bh(&table->tb6_lock);
|
||||
|
@ -71,7 +71,7 @@ static struct sock *__udp6_lib_lookup(struct in6_addr *saddr, __be16 sport,
|
||||
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
if (inet->num == hnum && sk->sk_family == PF_INET6) {
|
||||
if (sk->sk_hash == hnum && sk->sk_family == PF_INET6) {
|
||||
struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
int score = 0;
|
||||
if (inet->dport) {
|
||||
@ -309,7 +309,7 @@ static struct sock *udp_v6_mcast_next(struct sock *sk,
|
||||
sk_for_each_from(s, node) {
|
||||
struct inet_sock *inet = inet_sk(s);
|
||||
|
||||
if (inet->num == num && s->sk_family == PF_INET6) {
|
||||
if (s->sk_hash == num && s->sk_family == PF_INET6) {
|
||||
struct ipv6_pinfo *np = inet6_sk(s);
|
||||
if (inet->dport) {
|
||||
if (inet->dport != rmt_port)
|
||||
|
@ -178,7 +178,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
|
||||
__xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
|
||||
trailer_len += xfrm[i]->props.trailer_len;
|
||||
|
||||
if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
|
||||
if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL ||
|
||||
xfrm[i]->props.mode == XFRM_MODE_ROUTEOPTIMIZATION) {
|
||||
unsigned short encap_family = xfrm[i]->props.family;
|
||||
switch(encap_family) {
|
||||
case AF_INET:
|
||||
@ -186,8 +187,9 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
|
||||
fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
|
||||
break;
|
||||
case AF_INET6:
|
||||
ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
|
||||
ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
|
||||
ipv6_addr_copy(&fl_tunnel.fl6_dst, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_dst));
|
||||
|
||||
ipv6_addr_copy(&fl_tunnel.fl6_src, __xfrm6_bundle_addr_remote(xfrm[i], &fl->fl6_src));
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
|
Loading…
Reference in New Issue
Block a user