aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-04-14 02:30:23 -0700
committerDavid S. Miller <davem@davemloft.net>2008-04-14 02:30:23 -0700
commitdf39e8ba56a788733d369068c7319e04b1da3cd5 (patch)
tree1e9be853bdb455e341cdbf957656f342cfa2eb9e /net/ipv4
parentf5572855ec492334d8c3ec0e0e86c31865d5cf07 (diff)
parent159d83363b629c91d020734207c1bc788b96af5a (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/ehea/ehea_main.c drivers/net/wireless/iwlwifi/Kconfig drivers/net/wireless/rt2x00/rt61pci.c net/ipv4/inet_timewait_sock.c net/ipv6/raw.c net/mac80211/ieee80211_sta.c
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/icmp.c24
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/tcp_input.c65
-rw-r--r--net/ipv4/tcp_output.c3
6 files changed, 70 insertions, 30 deletions
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index efc7cbe759c..3e14d9cd29b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -578,7 +578,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
}
if (xfrm_decode_session_reverse(skb_in, &fl, AF_INET))
- goto ende;
+ goto relookup_failed;
if (inet_addr_type(net, fl.fl4_src) == RTN_LOCAL)
err = __ip_route_output_key(net, &rt2, &fl);
@@ -588,7 +588,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
fl2.fl4_dst = fl.fl4_src;
if (ip_route_output_key(net, &rt2, &fl2))
- goto ende;
+ goto relookup_failed;
/* Ugh! */
odst = skb_in->dst;
@@ -601,21 +601,23 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
}
if (err)
- goto ende;
+ goto relookup_failed;
err = xfrm_lookup((struct dst_entry **)&rt2, &fl, NULL,
XFRM_LOOKUP_ICMP);
- if (err == -ENOENT) {
+ switch (err) {
+ case 0:
+ dst_release(&rt->u.dst);
+ rt = rt2;
+ break;
+ case -EPERM:
+ goto ende;
+ default:
+relookup_failed:
if (!rt)
goto out_unlock;
- goto route_done;
+ break;
}
-
- dst_release(&rt->u.dst);
- rt = rt2;
-
- if (err)
- goto out_unlock;
}
route_done:
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d6e76f5229c..d8adfd4972e 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1133,7 +1133,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
}
release_sock(sk);
- if (len < sizeof(int) && len > 0 && val>=0 && val<255) {
+ if (len < sizeof(int) && len > 0 && val>=0 && val<=255) {
unsigned char ucval = (unsigned char)val;
len = 1;
if (put_user(len, optlen))
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 965b08a7d73..380d8daac72 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -82,8 +82,8 @@ clusterip_config_put(struct clusterip_config *c)
static inline void
clusterip_config_entry_put(struct clusterip_config *c)
{
+ write_lock_bh(&clusterip_lock);
if (atomic_dec_and_test(&c->entries)) {
- write_lock_bh(&clusterip_lock);
list_del(&c->list);
write_unlock_bh(&clusterip_lock);
@@ -96,7 +96,9 @@ clusterip_config_entry_put(struct clusterip_config *c)
#ifdef CONFIG_PROC_FS
remove_proc_entry(c->pde->name, c->pde->parent);
#endif
+ return;
}
+ write_unlock_bh(&clusterip_lock);
}
static struct clusterip_config *
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 0d5fa3a54d0..36b4e3bb056 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -629,6 +629,8 @@ static int __init nf_nat_init(void)
size_t i;
int ret;
+ need_ipv4_conntrack();
+
ret = nf_ct_extend_register(&nat_extend);
if (ret < 0) {
printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 74361195604..bd0ee8ca8b2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1625,13 +1625,11 @@ out:
return flag;
}
-/* If we receive more dupacks than we expected counting segments
- * in assumption of absent reordering, interpret this as reordering.
- * The only another reason could be bug in receiver TCP.
+/* Limits sacked_out so that sum with lost_out isn't ever larger than
+ * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
*/
-static void tcp_check_reno_reordering(struct sock *sk, const int addend)
+int tcp_limit_reno_sacked(struct tcp_sock *tp)
{
- struct tcp_sock *tp = tcp_sk(sk);
u32 holes;
holes = max(tp->lost_out, 1U);
@@ -1639,8 +1637,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
- tcp_update_reordering(sk, tp->packets_out + addend, 0);
+ return 1;
}
+ return 0;
+}
+
+/* If we receive more dupacks than we expected counting segments
+ * in assumption of absent reordering, interpret this as reordering.
+ * The only another reason could be bug in receiver TCP.
+ */
+static void tcp_check_reno_reordering(struct sock *sk, const int addend)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ if (tcp_limit_reno_sacked(tp))
+ tcp_update_reordering(sk, tp->packets_out + addend, 0);
}
/* Emulate SACKs for SACKless connection: account for a new dupack. */
@@ -1681,11 +1691,16 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
int tcp_use_frto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
if (!sysctl_tcp_frto)
return 0;
+ /* MTU probe and F-RTO won't really play nicely along currently */
+ if (icsk->icsk_mtup.probe_size)
+ return 0;
+
if (IsSackFrto())
return 1;
@@ -2134,11 +2149,13 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
* is against sacked "cnt", otherwise it's against facked "cnt"
*/
-static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
+static void tcp_mark_head_lost(struct sock *sk, int packets)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int cnt;
+ int cnt, oldcnt;
+ int err;
+ unsigned int mss;
BUG_TRAP(packets <= tp->packets_out);
if (tp->lost_skb_hint) {
@@ -2157,13 +2174,25 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
tp->lost_skb_hint = skb;
tp->lost_cnt_hint = cnt;
+ if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
+ break;
+
+ oldcnt = cnt;
if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
cnt += tcp_skb_pcount(skb);
- if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) ||
- after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
- break;
+ if (cnt > packets) {
+ if (tcp_is_sack(tp) || (oldcnt >= packets))
+ break;
+
+ mss = skb_shinfo(skb)->gso_size;
+ err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
+ if (err < 0)
+ break;
+ cnt = packets;
+ }
+
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
@@ -2180,17 +2209,17 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_is_reno(tp)) {
- tcp_mark_head_lost(sk, 1, fast_rexmit);
+ tcp_mark_head_lost(sk, 1);
} else if (tcp_is_fack(tp)) {
int lost = tp->fackets_out - tp->reordering;
if (lost <= 0)
lost = 1;
- tcp_mark_head_lost(sk, lost, fast_rexmit);
+ tcp_mark_head_lost(sk, lost);
} else {
int sacked_upto = tp->sacked_out - tp->reordering;
- if (sacked_upto < 0)
- sacked_upto = 0;
- tcp_mark_head_lost(sk, sacked_upto, fast_rexmit);
+ if (sacked_upto < fast_rexmit)
+ sacked_upto = fast_rexmit;
+ tcp_mark_head_lost(sk, sacked_upto);
}
/* New heuristics: it is possible only after we switched
@@ -2524,7 +2553,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
before(tp->snd_una, tp->high_seq) &&
icsk->icsk_ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
- tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
+ tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
}
@@ -2586,6 +2615,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
icsk->icsk_retransmits = 0;
+ if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
+ tcp_reset_reno_sack(tp);
if (!tcp_try_undo_loss(sk)) {
tcp_moderate_cwnd(tp);
tcp_xmit_retransmit_queue(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 76b3653e9b4..90270cbdf42 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1808,6 +1808,9 @@ void tcp_simple_retransmit(struct sock *sk)
if (!lost)
return;
+ if (tcp_is_reno(tp))
+ tcp_limit_reno_sacked(tp);
+
tcp_verify_left_out(tp);
/* Don't muck with the congestion window here.