aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/request_sock.c35
-rw-r--r--net/ipv4/route.c3
-rw-r--r--net/ipv4/tcp_input.c16
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/ieee80211_sta.c8
-rw-r--r--net/netfilter/nf_conntrack_extend.c2
-rw-r--r--net/netfilter/nf_sockopt.c6
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
10 files changed, 70 insertions, 11 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index dd40b35bb00..86d62611f2f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1171,6 +1171,8 @@ rollback:
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
}
}
+
+ raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 5f0818d815e..45aed75cb57 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -71,6 +71,41 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
EXPORT_SYMBOL(reqsk_queue_alloc);
+void __reqsk_queue_destroy(struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt;
+ size_t lopt_size;
+
+ /*
+ * this is an error recovery path only
+ * no locking needed and the lopt is not NULL
+ */
+
+ lopt = queue->listen_opt;
+ lopt_size = sizeof(struct listen_sock) +
+ lopt->nr_table_entries * sizeof(struct request_sock *);
+
+ if (lopt_size > PAGE_SIZE)
+ vfree(lopt);
+ else
+ kfree(lopt);
+}
+
+EXPORT_SYMBOL(__reqsk_queue_destroy);
+
+static inline struct listen_sock *reqsk_queue_yank_listen_sk(
+ struct request_sock_queue *queue)
+{
+ struct listen_sock *lopt;
+
+ write_lock_bh(&queue->syn_wait_lock);
+ lopt = queue->listen_opt;
+ queue->listen_opt = NULL;
+ write_unlock_bh(&queue->syn_wait_lock);
+
+ return lopt;
+}
+
void reqsk_queue_destroy(struct request_sock_queue *queue)
{
/* make all the listen_opt local to us */
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 45651834e1e..1bff9ed349f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -578,6 +578,9 @@ static void rt_check_expire(struct work_struct *work)
i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain;
+ if (need_resched())
+ cond_resched();
+
if (*rthp == NULL)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 20c9440ab85..0f0c1c9829a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1269,6 +1269,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
return 0;
+ if (!tp->packets_out)
+ goto out;
+
/* SACK fastpath:
* if the only SACK change is the increase of the end_seq of
* the first block then only apply that SACK block
@@ -1515,6 +1518,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+out:
+
#if FASTRETRANS_DEBUG > 0
BUG_TRAP((int)tp->sacked_out >= 0);
BUG_TRAP((int)tp->lost_out >= 0);
@@ -1669,6 +1674,9 @@ void tcp_enter_frto(struct sock *sk)
}
tcp_verify_left_out(tp);
+ /* Too bad if TCP was application limited */
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
+
/* Earlier loss recovery underway (see RFC4138; Appendix B).
* The last condition is necessary at least in tp->frto_counter case.
*/
@@ -1701,6 +1709,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
+
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
/*
* Count the retransmission made on RTO correctly (only when
* waiting for the first ACK and did not get it)...
@@ -1714,7 +1724,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
} else {
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
- TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
}
/* Don't lost mark skbs that were fwd transmitted after RTO */
@@ -3103,11 +3113,11 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
+ if (tp->frto_counter)
+ frto_cwnd = tcp_process_frto(sk, flag);
/* Guarantee sacktag reordering detection against wrap-arounds */
if (before(tp->frto_highmark, tp->snd_una))
tp->frto_highmark = 0;
- if (tp->frto_counter)
- frto_cwnd = tcp_process_frto(sk, flag);
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b4e32ab3664..72e1c93dd87 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -242,6 +242,8 @@ struct ieee80211_if_sta {
u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
size_t ssid_len;
+ u8 scan_ssid[IEEE80211_MAX_SSID_LEN];
+ size_t scan_ssid_len;
u16 aid;
u16 ap_capab, capab;
u8 *extra_ie; /* to be added to the end of AssocReq */
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index 2079e988fc5..015b3f879aa 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -2002,7 +2002,10 @@ void ieee80211_sta_work(struct work_struct *work)
if (ifsta->state != IEEE80211_AUTHENTICATE &&
ifsta->state != IEEE80211_ASSOCIATE &&
test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request)) {
- ieee80211_sta_start_scan(dev, NULL, 0);
+ if (ifsta->scan_ssid_len)
+ ieee80211_sta_start_scan(dev, ifsta->scan_ssid, ifsta->scan_ssid_len);
+ else
+ ieee80211_sta_start_scan(dev, NULL, 0);
return;
}
@@ -2872,6 +2875,9 @@ int ieee80211_sta_req_scan(struct net_device *dev, u8 *ssid, size_t ssid_len)
return -EBUSY;
}
+ ifsta->scan_ssid_len = ssid_len;
+ if (ssid_len)
+ memcpy(ifsta->scan_ssid, ssid, ssid_len);
set_bit(IEEE80211_STA_REQ_SCAN, &ifsta->request);
queue_work(local->hw.workqueue, &ifsta->work);
return 0;
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index a1a65a1313b..cf6ba6659a8 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -109,7 +109,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[i]);
if (t && t->move)
- t->move(ct, ct->ext + ct->ext->offset[id]);
+ t->move(ct, ct->ext + ct->ext->offset[i]);
rcu_read_unlock();
}
kfree(ct->ext);
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index 87bc1443c52..3dd4b3c76d8 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -143,12 +143,12 @@ static int compat_nf_sockopt(struct sock *sk, int pf, int val,
if (ops->compat_get)
ret = ops->compat_get(sk, val, opt, len);
else
- ret = ops->get(sk, val, ops, len);
+ ret = ops->get(sk, val, opt, len);
} else {
if (ops->compat_set)
- ret = ops->compat_set(sk, val, ops, *len);
+ ret = ops->compat_set(sk, val, opt, *len);
else
- ret = ops->set(sk, val, ops, *len);
+ ret = ops->set(sk, val, opt, *len);
}
module_put(ops->owner);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fa1a6f45dc4..e595e6570ce 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -134,7 +134,7 @@ static inline int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
- int ret;
+ int ret = NETDEV_TX_BUSY;
/* Dequeue packet */
if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
@@ -145,7 +145,8 @@ static inline int qdisc_restart(struct net_device *dev)
spin_unlock(&dev->queue_lock);
HARD_TX_LOCK(dev, smp_processor_id());
- ret = dev_hard_start_xmit(skb, dev);
+ if (!netif_subqueue_stopped(dev, skb))
+ ret = dev_hard_start_xmit(skb, dev);
HARD_TX_UNLOCK(dev);
spin_lock(&dev->queue_lock);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index dc55cc974c9..1afeb3eb8e4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -320,9 +320,9 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->slot = kcalloc(xprt->max_reqs,
sizeof(struct rpc_rqst), GFP_KERNEL);
if (xprt->slot == NULL) {
- kfree(xprt);
dprintk("RPC: %s: couldn't allocate %d slots\n",
__func__, xprt->max_reqs);
+ kfree(xprt);
return ERR_PTR(-ENOMEM);
}