aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan.h1
-rw-r--r--net/8021q/vlan_dev.c26
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/core/dev.c9
-rw-r--r--net/core/dev_mcast.c16
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/fib_rules.c22
-rw-r--r--net/core/neighbour.c2
-rw-r--r--net/core/net_namespace.c46
-rw-r--r--net/core/sock.c73
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/proto.c9
-rw-r--r--net/decnet/dn_dev.c2
-rw-r--r--net/decnet/dn_route.c16
-rw-r--r--net/decnet/dn_rules.c13
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c1
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c1
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_wx.c2
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_rules.c51
-rw-r--r--net/ipv4/inet_diag.c9
-rw-r--r--net/ipv4/inet_hashtables.c7
-rw-r--r--net/ipv4/inet_timewait_sock.c13
-rw-r--r--net/ipv4/inetpeer.c42
-rw-r--r--net/ipv4/ip_output.c28
-rw-r--r--net/ipv4/ip_sockglue.c39
-rw-r--r--net/ipv4/ipcomp.c3
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c18
-rw-r--r--net/ipv4/ipvs/ip_vs_core.c20
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c25
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c24
-rw-r--r--net/ipv4/netfilter/Makefile20
-rw-r--r--net/ipv4/netfilter/ip_queue.c37
-rw-r--r--net/ipv4/netfilter/nf_nat_amanda.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_ftp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c18
-rw-r--r--net/ipv4/netfilter/nf_nat_irc.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_pptp.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_tftp.c2
-rw-r--r--net/ipv4/proc.c19
-rw-r--r--net/ipv4/raw.c3
-rw-r--r--net/ipv4/route.c20
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_input.c115
-rw-r--r--net/ipv4/tcp_ipv4.c14
-rw-r--r--net/ipv4/tunnel4.c24
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/udplite.c3
-rw-r--r--net/ipv6/fib6_rules.c37
-rw-r--r--net/ipv6/inet6_hashtables.c19
-rw-r--r--net/ipv6/ip6_output.c36
-rw-r--r--net/ipv6/ipcomp6.c3
-rw-r--r--net/ipv6/ndisc.c1
-rw-r--r--net/ipv6/netfilter/Makefile28
-rw-r--r--net/ipv6/netfilter/ip6_queue.c37
-rw-r--r--net/ipv6/proc.c19
-rw-r--r--net/ipv6/raw.c3
-rw-r--r--net/ipv6/route.c97
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/ipv6/udp.c8
-rw-r--r--net/ipv6/udplite.c3
-rw-r--r--net/ipx/af_ipx.c22
-rw-r--r--net/mac80211/Kconfig12
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/ieee80211.c16
-rw-r--r--net/mac80211/ieee80211_common.h91
-rw-r--r--net/mac80211/ieee80211_i.h2
-rw-r--r--net/mac80211/ieee80211_ioctl.c21
-rw-r--r--net/mac80211/ieee80211_rate.c24
-rw-r--r--net/mac80211/ieee80211_rate.h3
-rw-r--r--net/mac80211/ieee80211_sta.c18
-rw-r--r--net/mac80211/rc80211_simple.c25
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/wep.c2
-rw-r--r--net/mac80211/wpa.c18
-rw-r--r--net/netfilter/Makefile14
-rw-r--r--net/netfilter/nf_sockopt.c117
-rw-r--r--net/netfilter/xt_connlimit.c5
-rw-r--r--net/netfilter/xt_time.c5
-rw-r--r--net/netfilter/xt_u32.c5
-rw-r--r--net/netlink/af_netlink.c12
-rw-r--r--net/packet/af_packet.c31
-rw-r--r--net/rfkill/rfkill.c37
-rw-r--r--net/rxrpc/ar-local.c4
-rw-r--r--net/sched/cls_u32.c14
-rw-r--r--net/sched/sch_generic.c5
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sctp/associola.c10
-rw-r--r--net/sctp/bind_addr.c13
-rw-r--r--net/sctp/endpointola.c35
-rw-r--r--net/sctp/input.c43
-rw-r--r--net/sctp/inqueue.c4
-rw-r--r--net/sctp/outqueue.c41
-rw-r--r--net/sctp/proc.c6
-rw-r--r--net/sctp/protocol.c7
-rw-r--r--net/sctp/sm_make_chunk.c170
-rw-r--r--net/sctp/sm_sideeffect.c10
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c22
-rw-r--r--net/sctp/sysctl.c9
-rw-r--r--net/sctp/transport.c5
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/socket.c6
-rw-r--r--net/sunrpc/xprtrdma/transport.c2
-rw-r--r--net/unix/af_unix.c9
-rw-r--r--net/unix/garbage.c26
111 files changed, 1086 insertions, 1004 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 3fe4fc86055..6567213959c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -376,6 +376,7 @@ void vlan_setup(struct net_device *new_dev)
new_dev->init = vlan_dev_init;
new_dev->open = vlan_dev_open;
new_dev->stop = vlan_dev_stop;
+ new_dev->set_mac_address = vlan_set_mac_address;
new_dev->set_multicast_list = vlan_dev_set_multicast_list;
new_dev->change_rx_flags = vlan_change_rx_flags;
new_dev->destructor = free_netdev;
@@ -636,6 +637,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
if (!vlandev)
continue;
+ flgs = vlandev->flags;
+ if (!(flgs & IFF_UP))
+ continue;
+
vlan_sync_address(dev, vlandev);
}
break;
@@ -747,6 +752,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
vlan_dev_set_ingress_priority(dev,
args.u.skb_priority,
args.vlan_qos);
+ err = 0;
break;
case SET_VLAN_EGRESS_PRIORITY_CMD:
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index cf4a80d06b3..2cd1393073e 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -60,6 +60,7 @@ int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, struct net_device *dev
int vlan_dev_change_mtu(struct net_device *dev, int new_mtu);
int vlan_dev_open(struct net_device* dev);
int vlan_dev_stop(struct net_device* dev);
+int vlan_set_mac_address(struct net_device *dev, void *p);
int vlan_dev_ioctl(struct net_device* dev, struct ifreq *ifr, int cmd);
void vlan_dev_set_ingress_priority(const struct net_device *dev,
u32 skb_prio, short vlan_prio);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1a1740aa9a8..7a36878241d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -665,6 +665,32 @@ int vlan_dev_stop(struct net_device *dev)
return 0;
}
+int vlan_set_mac_address(struct net_device *dev, void *p)
+{
+ struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
+ struct sockaddr *addr = p;
+ int err;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (!(dev->flags & IFF_UP))
+ goto out;
+
+ if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
+ err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN);
+ if (err < 0)
+ return err;
+ }
+
+ if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
+ dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN);
+
+out:
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ return 0;
+}
+
int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct net_device *real_dev = VLAN_DEV_INFO(dev)->real_dev;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index da22f900e89..c1757c79dfb 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -766,6 +766,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
if (!nf_bridge)
return NF_ACCEPT;
+ if (!(nf_bridge->mask & (BRNF_BRIDGED | BRNF_BRIDGED_DNAT)))
+ return NF_ACCEPT;
+
if (!realoutdev)
return NF_DROP;
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index 1a46952a56d..18141392a9b 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -34,7 +34,7 @@ static int ebt_filter_arp(const struct sk_buff *skb, const struct net_device *in
ah->ar_pro, EBT_ARP_PTYPE))
return EBT_NOMATCH;
- if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP)) {
+ if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) {
__be32 saddr, daddr, *sap, *dap;
if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP))
diff --git a/net/core/dev.c b/net/core/dev.c
index be6cedab5aa..86d62611f2f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1171,6 +1171,8 @@ rollback:
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
}
}
+
+ raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
@@ -2688,7 +2690,7 @@ static void __net_exit dev_proc_net_exit(struct net *net)
proc_net_remove(net, "dev");
}
-static struct pernet_operations dev_proc_ops = {
+static struct pernet_operations __net_initdata dev_proc_ops = {
.init = dev_proc_net_init,
.exit = dev_proc_net_exit,
};
@@ -4330,7 +4332,6 @@ static struct hlist_head *netdev_create_hash(void)
static int __net_init netdev_init(struct net *net)
{
INIT_LIST_HEAD(&net->dev_base_head);
- rwlock_init(&dev_base_lock);
net->dev_name_head = netdev_create_hash();
if (net->dev_name_head == NULL)
@@ -4354,7 +4355,7 @@ static void __net_exit netdev_exit(struct net *net)
kfree(net->dev_index_head);
}
-static struct pernet_operations netdev_net_ops = {
+static struct pernet_operations __net_initdata netdev_net_ops = {
.init = netdev_init,
.exit = netdev_exit,
};
@@ -4385,7 +4386,7 @@ static void __net_exit default_device_exit(struct net *net)
rtnl_unlock();
}
-static struct pernet_operations default_device_ops = {
+static struct pernet_operations __net_initdata default_device_ops = {
.exit = default_device_exit,
};
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index ae354057d84..69fff16ece1 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -168,13 +168,13 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
da = from->mc_list;
while (da != NULL) {
next = da->next;
- if (!da->da_synced)
- continue;
- __dev_addr_delete(&to->mc_list, &to->mc_count,
- da->da_addr, da->da_addrlen, 0);
- da->da_synced = 0;
- __dev_addr_delete(&from->mc_list, &from->mc_count,
- da->da_addr, da->da_addrlen, 0);
+ if (da->da_synced) {
+ __dev_addr_delete(&to->mc_list, &to->mc_count,
+ da->da_addr, da->da_addrlen, 0);
+ da->da_synced = 0;
+ __dev_addr_delete(&from->mc_list, &from->mc_count,
+ da->da_addr, da->da_addrlen, 0);
+ }
da = next;
}
__dev_set_rx_mode(to);
@@ -285,7 +285,7 @@ static void __net_exit dev_mc_net_exit(struct net *net)
proc_net_remove(net, "dev_mcast");
}
-static struct pernet_operations dev_mc_net_ops = {
+static struct pernet_operations __net_initdata dev_mc_net_ops = {
.init = dev_mc_net_init,
.exit = dev_mc_net_exit,
};
diff --git a/net/core/dst.c b/net/core/dst.c
index 16958e64e57..03daead3592 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -18,7 +18,6 @@
#include <linux/types.h>
#include <net/net_namespace.h>
-#include <net/net_namespace.h>
#include <net/dst.h>
/*
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 13de6f53f09..848132b6cb7 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -18,6 +18,28 @@
static LIST_HEAD(rules_ops);
static DEFINE_SPINLOCK(rules_mod_lock);
+int fib_default_rule_add(struct fib_rules_ops *ops,
+ u32 pref, u32 table, u32 flags)
+{
+ struct fib_rule *r;
+
+ r = kzalloc(ops->rule_size, GFP_KERNEL);
+ if (r == NULL)
+ return -ENOMEM;
+
+ atomic_set(&r->refcnt, 1);
+ r->action = FR_ACT_TO_TBL;
+ r->pref = pref;
+ r->table = table;
+ r->flags = flags;
+
+ /* The lock is not required here, the list in unreacheable
+ * at the moment this function is called */
+ list_add_tail(&r->list, &ops->rules_list);
+ return 0;
+}
+EXPORT_SYMBOL(fib_default_rule_add);
+
static void notify_rule_change(int event, struct fib_rule *rule,
struct fib_rules_ops *ops, struct nlmsghdr *nlh,
u32 pid);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 05979e35696..29b8ee4e35d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1435,6 +1435,8 @@ int neigh_table_clear(struct neigh_table *tbl)
kfree(tbl->phash_buckets);
tbl->phash_buckets = NULL;
+ remove_proc_entry(tbl->id, init_net.proc_net_stat);
+
free_percpu(tbl->stats);
tbl->stats = NULL;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index e9f0964ce70..383252b5041 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -64,6 +64,20 @@ static struct net *net_alloc(void)
return kmem_cache_zalloc(net_cachep, GFP_KERNEL);
}
+static void net_free(struct net *net)
+{
+ if (!net)
+ return;
+
+ if (unlikely(atomic_read(&net->use_count) != 0)) {
+ printk(KERN_EMERG "network namespace not free! Usage: %d\n",
+ atomic_read(&net->use_count));
+ return;
+ }
+
+ kmem_cache_free(net_cachep, net);
+}
+
struct net *copy_net_ns(unsigned long flags, struct net *old_net)
{
struct net *new_net = NULL;
@@ -100,20 +114,6 @@ out:
return new_net;
}
-static void net_free(struct net *net)
-{
- if (!net)
- return;
-
- if (unlikely(atomic_read(&net->use_count) != 0)) {
- printk(KERN_EMERG "network namespace not free! Usage: %d\n",
- atomic_read(&net->use_count));
- return;
- }
-
- kmem_cache_free(net_cachep, net);
-}
-
static void cleanup_net(struct work_struct *work)
{
struct pernet_operations *ops;
@@ -188,6 +188,7 @@ static int __init net_ns_init(void)
pure_initcall(net_ns_init);
+#ifdef CONFIG_NET_NS
static int register_pernet_operations(struct list_head *list,
struct pernet_operations *ops)
{
@@ -228,6 +229,23 @@ static void unregister_pernet_operations(struct pernet_operations *ops)
ops->exit(net);
}
+#else
+
+static int register_pernet_operations(struct list_head *list,
+ struct pernet_operations *ops)
+{
+ if (ops->init == NULL)
+ return 0;
+ return ops->init(&init_net);
+}
+
+static void unregister_pernet_operations(struct pernet_operations *ops)
+{
+ if (ops->exit)
+ ops->exit(&init_net);
+}
+#endif
+
/**
* register_pernet_subsys - register a network namespace subsystem
* @ops: pernet operations structure for the subsystem
diff --git a/net/core/sock.c b/net/core/sock.c
index 12ad2067a98..c519b439b8b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1801,11 +1801,65 @@ EXPORT_SYMBOL(sk_common_release);
static DEFINE_RWLOCK(proto_list_lock);
static LIST_HEAD(proto_list);
+#ifdef CONFIG_SMP
+/*
+ * Define default functions to keep track of inuse sockets per protocol
+ * Note that often used protocols use dedicated functions to get a speed increase.
+ * (see DEFINE_PROTO_INUSE/REF_PROTO_INUSE)
+ */
+static void inuse_add(struct proto *prot, int inc)
+{
+ per_cpu_ptr(prot->inuse_ptr, smp_processor_id())[0] += inc;
+}
+
+static int inuse_get(const struct proto *prot)
+{
+ int res = 0, cpu;
+ for_each_possible_cpu(cpu)
+ res += per_cpu_ptr(prot->inuse_ptr, cpu)[0];
+ return res;
+}
+
+static int inuse_init(struct proto *prot)
+{
+ if (!prot->inuse_getval || !prot->inuse_add) {
+ prot->inuse_ptr = alloc_percpu(int);
+ if (prot->inuse_ptr == NULL)
+ return -ENOBUFS;
+
+ prot->inuse_getval = inuse_get;
+ prot->inuse_add = inuse_add;
+ }
+ return 0;
+}
+
+static void inuse_fini(struct proto *prot)
+{
+ if (prot->inuse_ptr != NULL) {
+ free_percpu(prot->inuse_ptr);
+ prot->inuse_ptr = NULL;
+ prot->inuse_getval = NULL;
+ prot->inuse_add = NULL;
+ }
+}
+#else
+static inline int inuse_init(struct proto *prot)
+{
+ return 0;
+}
+
+static inline void inuse_fini(struct proto *prot)
+{
+}
+#endif
+
int proto_register(struct proto *prot, int alloc_slab)
{
char *request_sock_slab_name = NULL;
char *timewait_sock_slab_name;
- int rc = -ENOBUFS;
+
+ if (inuse_init(prot))
+ goto out;
if (alloc_slab) {
prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
@@ -1814,7 +1868,7 @@ int proto_register(struct proto *prot, int alloc_slab)
if (prot->slab == NULL) {
printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
prot->name);
- goto out;
+ goto out_free_inuse;
}
if (prot->rsk_prot != NULL) {
@@ -1858,9 +1912,8 @@ int proto_register(struct proto *prot, int alloc_slab)
write_lock(&proto_list_lock);
list_add(&prot->node, &proto_list);
write_unlock(&proto_list_lock);
- rc = 0;
-out:
- return rc;
+ return 0;
+
out_free_timewait_sock_slab_name:
kfree(timewait_sock_slab_name);
out_free_request_sock_slab:
@@ -1873,7 +1926,10 @@ out_free_request_sock_slab_name:
out_free_sock_slab:
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
- goto out;
+out_free_inuse:
+ inuse_fini(prot);
+out:
+ return -ENOBUFS;
}
EXPORT_SYMBOL(proto_register);
@@ -1884,6 +1940,7 @@ void proto_unregister(struct proto *prot)
list_del(&prot->node);
write_unlock(&proto_list_lock);
+ inuse_fini(prot);
if (prot->slab != NULL) {
kmem_cache_destroy(prot->slab);
prot->slab = NULL;
@@ -2040,7 +2097,3 @@ EXPORT_SYMBOL(sock_wmalloc);
EXPORT_SYMBOL(sock_i_uid);
EXPORT_SYMBOL(sock_i_ino);
EXPORT_SYMBOL(sysctl_optmem_max);
-#ifdef CONFIG_SYSCTL
-EXPORT_SYMBOL(sysctl_rmem_max);
-EXPORT_SYMBOL(sysctl_wmem_max);
-#endif
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 01a6a808bdb..db17b83e8d3 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -922,6 +922,8 @@ static struct timewait_sock_ops dccp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct inet_timewait_sock),
};
+DEFINE_PROTO_INUSE(dccp_v4)
+
static struct proto dccp_v4_prot = {
.name = "DCCP",
.owner = THIS_MODULE,
@@ -950,6 +952,7 @@ static struct proto dccp_v4_prot = {
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
+ REF_PROTO_INUSE(dccp_v4)
};
static struct net_protocol dccp_v4_protocol = {
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 62428ff137d..87c98fb86fa 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1107,6 +1107,8 @@ static struct timewait_sock_ops dccp6_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct dccp6_timewait_sock),
};
+DEFINE_PROTO_INUSE(dccp_v6)
+
static struct proto dccp_v6_prot = {
.name = "DCCPv6",
.owner = THIS_MODULE,
@@ -1135,6 +1137,7 @@ static struct proto dccp_v6_prot = {
.compat_setsockopt = compat_dccp_setsockopt,
.compat_getsockopt = compat_dccp_getsockopt,
#endif
+ REF_PROTO_INUSE(dccp_v6)
};
static struct inet6_protocol dccp_v6_protocol = {
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index d8497392803..7a3bea9c28c 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1072,11 +1072,13 @@ static int __init dccp_init(void)
}
for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
- rwlock_init(&dccp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
}
+ if (inet_ehash_locks_alloc(&dccp_hashinfo))
+ goto out_free_dccp_ehash;
+
bhash_order = ehash_order;
do {
@@ -1091,7 +1093,7 @@ static int __init dccp_init(void)
if (!dccp_hashinfo.bhash) {
DCCP_CRIT("Failed to allocate DCCP bind hash table");
- goto out_free_dccp_ehash;
+ goto out_free_dccp_locks;
}
for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
@@ -1121,6 +1123,8 @@ out_free_dccp_mib:
out_free_dccp_bhash:
free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
dccp_hashinfo.bhash = NULL;
+out_free_dccp_locks:
+ inet_ehash_locks_free(&dccp_hashinfo);
out_free_dccp_ehash:
free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
dccp_hashinfo.ehash = NULL;
@@ -1139,6 +1143,7 @@ static void __exit dccp_fini(void)
free_pages((unsigned long)dccp_hashinfo.ehash,
get_order(dccp_hashinfo.ehash_size *
sizeof(struct inet_ehash_bucket)));
+ inet_ehash_locks_free(&dccp_hashinfo);
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
dccp_ackvec_exit();
dccp_sysctl_exit();
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 26130afd802..66e266fb590 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -1439,7 +1439,7 @@ static const struct file_operations dn_dev_seq_fops = {
#endif /* CONFIG_PROC_FS */
-static int __initdata addr[2];
+static int addr[2];
module_param_array(addr, int, NULL, 0444);
MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 97eee5e8fbb..66663e5d7ac 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -293,9 +293,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
- rth->u.dst.__use++;
- dst_hold(&rth->u.dst);
- rth->u.dst.lastuse = now;
+ dst_use(&rth->u.dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
dnrt_drop(rt);
@@ -308,9 +306,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route *
rcu_assign_pointer(rt->u.dst.dn_next, dn_rt_hash_table[hash].chain);
rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
- dst_hold(&rt->u.dst);
- rt->u.dst.__use++;
- rt->u.dst.lastuse = now;
+ dst_use(&rt->u.dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
*rp = rt;
return 0;
@@ -1182,9 +1178,7 @@ static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *fl
(flp->mark == rt->fl.mark) &&
(rt->fl.iif == 0) &&
(rt->fl.oif == flp->oif)) {
- rt->u.dst.lastuse = jiffies;
- dst_hold(&rt->u.dst);
- rt->u.dst.__use++;
+ dst_use(&rt->u.dst, jiffies);
rcu_read_unlock_bh();
*pprt = &rt->u.dst;
return 0;
@@ -1456,9 +1450,7 @@ int dn_route_input(struct sk_buff *skb)
(rt->fl.oif == 0) &&
(rt->fl.mark == skb->mark) &&
(rt->fl.iif == cb->iif)) {
- rt->u.dst.lastuse = jiffies;
- dst_hold(&rt->u.dst);
- rt->u.dst.__use++;
+ dst_use(&rt->u.dst, jiffies);
rcu_read_unlock();
skb->dst = (struct dst_entry *)rt;
return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index ddd3f04f091..ffebea04cc9 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -48,15 +48,6 @@ struct dn_fib_rule
u8 flags;
};
-static struct dn_fib_rule default_rule = {
- .common = {
- .refcnt = ATOMIC_INIT(2),
- .pref = 0x7fff,
- .table = RT_TABLE_MAIN,
- .action = FR_ACT_TO_TBL,
- },
-};
-
int dn_fib_lookup(struct flowi *flp, struct dn_fib_res *res)
{
@@ -262,8 +253,8 @@ static struct fib_rules_ops dn_fib_rules_ops = {
void __init dn_fib_rules_init(void)
{
- list_add_tail(&default_rule.common.list,
- &dn_fib_rules_ops.rules_list);
+ BUG_ON(fib_default_rule_add(&dn_fib_rules_ops, 0x7fff,
+ RT_TABLE_MAIN, 0));
fib_rules_register(&dn_fib_rules_ops);
}
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
index 4cce3534e40..58b22619ab1 100644
--- a/net/ieee80211/ieee80211_crypt_tkip.c
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -25,7 +25,6 @@
#include <net/ieee80211.h>
#include <linux/crypto.h>
-#include <linux/scatterlist.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
index 866fc04c44f..3fa30c40779 100644
--- a/net/ieee80211/ieee80211_crypt_wep.c
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -22,7 +22,6 @@
#include <net/ieee80211.h>
#include <linux/crypto.h>
-#include <linux/scatterlist.h>
#include <linux/crc32.h>
MODULE_AUTHOR("Jouni Malinen");
diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c
index ac36767b56e..e01b59aedc5 100644
--- a/net/ieee80211/softmac/ieee80211softmac_wx.c
+++ b/net/ieee80211/softmac/ieee80211softmac_wx.c
@@ -470,7 +470,7 @@ ieee80211softmac_wx_set_mlme(struct net_device *dev,
{
struct ieee80211softmac_device *mac = ieee80211_priv(dev);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
- u16 reason = cpu_to_le16(mlme->reason_code);
+ u16 reason = mlme->reason_code;
struct ieee80211softmac_network *net;
int err = -EINVAL;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 60123905dbb..732d8f088b1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -59,6 +59,13 @@ struct fib_table *ip_fib_main_table;
#define FIB_TABLE_HASHSZ 1
static struct hlist_head fib_table_hash[FIB_TABLE_HASHSZ];
+static void __init fib4_rules_init(void)
+{
+ ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL);
+ hlist_add_head_rcu(&ip_fib_local_table->tb_hlist, &fib_table_hash[0]);
+ ip_fib_main_table = fib_hash_init(RT_TABLE_MAIN);
+ hlist_add_head_rcu(&ip_fib_main_table->tb_hlist, &fib_table_hash[0]);
+}
#else
#define FIB_TABLE_HASHSZ 256
@@ -905,14 +912,8 @@ void __init ip_fib_init(void)
for (i = 0; i < FIB_TABLE_HASHSZ; i++)
INIT_HLIST_HEAD(&fib_table_hash[i]);
-#ifndef CONFIG_IP_MULTIPLE_TABLES
- ip_fib_local_table = fib_hash_init(RT_TABLE_LOCAL);
- hlist_add_head_rcu(&ip_fib_local_table->tb_hlist, &fib_table_hash[0]);
- ip_fib_main_table = fib_hash_init(RT_TABLE_MAIN);
- hlist_add_head_rcu(&ip_fib_main_table->tb_hlist, &fib_table_hash[0]);
-#else
+
fib4_rules_init();
-#endif
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index f16839c6a72..a0ada3a8d8d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -49,33 +49,6 @@ struct fib4_rule
#endif
};
-static struct fib4_rule default_rule = {
- .common = {
- .refcnt = ATOMIC_INIT(2),
- .pref = 0x7FFF,
- .table = RT_TABLE_DEFAULT,
- .action = FR_ACT_TO_TBL,
- },
-};
-
-static struct fib4_rule main_rule = {
- .common = {
- .refcnt = ATOMIC_INIT(2),
- .pref = 0x7FFE,
- .table = RT_TABLE_MAIN,
- .action = FR_ACT_TO_TBL,
- },
-};
-
-static struct fib4_rule local_rule = {
- .common = {
- .refcnt = ATOMIC_INIT(2),
- .table = RT_TABLE_LOCAL,
- .action = FR_ACT_TO_TBL,
- .flags = FIB_RULE_PERMANENT,
- },
-};
-
#ifdef CONFIG_NET_CLS_ROUTE
u32 fib_rules_tclass(struct fib_result *res)
{
@@ -319,11 +292,27 @@ static struct fib_rules_ops fib4_rules_ops = {
.owner = THIS_MODULE,
};
-void __init fib4_rules_init(void)
+static int __init fib_default_rules_init(void)
{
- list_add_tail(&local_rule.common.list, &fib4_rules_ops.rules_list);
- list_add_tail(&main_rule.common.list, &fib4_rules_ops.rules_list);
- list_add_tail(&default_rule.common.list, &fib4_rules_ops.rules_list);
+ int err;
+
+ err = fib_default_rule_add(&fib4_rules_ops, 0,
+ RT_TABLE_LOCAL, FIB_RULE_PERMANENT);
+ if (err < 0)
+ return err;
+ err = fib_default_rule_add(&fib4_rules_ops, 0x7FFE,
+ RT_TABLE_MAIN, 0);
+ if (err < 0)
+ return err;
+ err = fib_default_rule_add(&fib4_rules_ops, 0x7FFF,
+ RT_TABLE_DEFAULT, 0);
+ if (err < 0)
+ return err;
+ return 0;
+}
+void __init fib4_rules_init(void)
+{
+ BUG_ON(fib_default_rules_init());
fib_rules_register(&fib4_rules_ops);
}
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index dc429b6b0ba..b0170732b5e 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -747,13 +747,14 @@ skip_listen_ht:
for (i = s_i; i < hashinfo->ehash_size; i++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[i];
+ rwlock_t *lock = inet_ehash_lockp(hashinfo, i);
struct sock *sk;
struct hlist_node *node;
if (i > s_i)
s_num = 0;
- read_lock_bh(&head->lock);
+ read_lock_bh(lock);
num = 0;
sk_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
@@ -769,7 +770,7 @@ skip_listen_ht:
r->id.idiag_dport)
goto next_normal;
if (inet_csk_diag_dump(sk, skb, cb) < 0) {
- read_unlock_bh(&head->lock);
+ read_unlock_bh(lock);
goto done;
}
next_normal:
@@ -791,14 +792,14 @@ next_normal:
r->id.idiag_dport)
goto next_dying;
if (inet_twsk_diag_dump(tw, skb, cb) < 0) {
- read_unlock_bh(&head->lock);
+ read_unlock_bh(lock);
goto done;
}
next_dying:
++num;
}
}
- read_unlock_bh(&head->lock);
+ read_unlock_bh(lock);
}
done:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 16eecc7046a..67704da04fc 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -204,12 +204,13 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
const __portpair ports = INET_COMBINED_PORTS(inet->dport, lport);
unsigned int hash = inet_ehashfn(daddr, lport, saddr, inet->dport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
+ rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_node *node;
struct inet_timewait_sock *tw;
prefetch(head->chain.first);
- write_lock(&head->lock);
+ write_lock(lock);
/* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &head->twchain) {
@@ -239,7 +240,7 @@ unique:
BUG_TRAP(sk_unhashed(sk));
__sk_add_node(sk, &head->chain);
sock_prot_inc_use(sk->sk_prot);
- write_unlock(&head->lock);
+ write_unlock(lock);
if (twp) {
*twp = tw;
@@ -255,7 +256,7 @@ unique:
return 0;
not_unique:
- write_unlock(&head->lock);
+ write_unlock(lock);
return -EADDRNOTAVAIL;
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 4e189e28f30..a60b99e0ebd 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -20,16 +20,16 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
struct inet_bind_hashbucket *bhead;
struct inet_bind_bucket *tb;
/* Unlink from established hashes. */
- struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash);
+ rwlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
- write_lock(&ehead->lock);
+ write_lock(lock);
if (hlist_unhashed(&tw->tw_node)) {
- write_unlock(&ehead->lock);
+ write_unlock(lock);
return;
}
__hlist_del(&tw->tw_node);
sk_node_init(&tw->tw_node);
- write_unlock(&ehead->lock);
+ write_unlock(lock);
/* Disassociate with bind bucket. */
bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
@@ -59,6 +59,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
+ rwlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too.
Note, that any socket with inet->num != 0 MUST be bound in
@@ -71,7 +72,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock);
- write_lock(&ehead->lock);
+ write_lock(lock);
/* Step 2: Remove SK from established hash. */
if (__sk_del_node_init(sk))
@@ -81,7 +82,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
inet_twsk_add_node(tw, &ehead->twchain);
atomic_inc(&tw->tw_refcnt);
- write_unlock(&ehead->lock);
+ write_unlock(lock);
}
EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 771031dfbd0..af995198f64 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -61,7 +61,7 @@
* 4. Global variable peer_total is modified under the pool lock.
* 5. struct inet_peer fields modification:
* avl_left, avl_right, avl_parent, avl_height: pool lock
- * unused_next, unused_prevp: unused node list lock
+ * unused: unused node list lock
* refcnt: atomically against modifications on other CPU;
* usually under some other lock to prevent node disappearing
* dtime: unused node list lock
@@ -94,8 +94,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min
int inet_peer_gc_mintime __read_mostly = 10 * HZ;
int inet_peer_gc_maxtime __read_mostly = 120 * HZ;
-static struct inet_peer *inet_peer_unused_head;
-static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
+static LIST_HEAD(unused_peers);
static DEFINE_SPINLOCK(inet_peer_unused_lock);
static void peer_check_expire(unsigned long dummy);
@@ -138,15 +137,7 @@ void __init inet_initpeers(void)
static void unlink_from_unused(struct inet_peer *p)
{
spin_lock_bh(&inet_peer_unused_lock);
- if (p->unused_prevp != NULL) {
- /* On unused list. */
- *p->unused_prevp = p->unused_next;
- if (p->unused_next != NULL)
- p->unused_next->unused_prevp = p->unused_prevp;
- else
- inet_peer_unused_tailp = p->unused_prevp;
- p->unused_prevp = NULL; /* mark it as removed */
- }
+ list_del_init(&p->unused);
spin_unlock_bh(&inet_peer_unused_lock);
}
@@ -337,24 +328,24 @@ static void unlink_from_pool(struct inet_peer *p)
/* May be called with local BH enabled. */
static int cleanup_once(unsigned long ttl)
{
- struct inet_peer *p;
+ struct inet_peer *p = NULL;
/* Remove the first entry from the list of unused nodes. */
spin_lock_bh(&inet_peer_unused_lock);
- p = inet_peer_unused_head;
- if (p != NULL) {
- __u32 delta = (__u32)jiffies - p->dtime;
+ if (!list_empty(&unused_peers)) {
+ __u32 delta;
+
+ p = list_first_entry(&unused_peers, struct inet_peer, unused);
+ delta = (__u32)jiffies - p->dtime;
+
if (delta < ttl) {
/* Do not prune fresh entries. */
spin_unlock_bh(&inet_peer_unused_lock);
return -1;
}
- inet_peer_unused_head = p->unused_next;
- if (p->unused_next != NULL)
- p->unused_next->unused_prevp = p->unused_prevp;
- else
- inet_peer_unused_tailp = p->unused_prevp;
- p->unused_prevp = NULL; /* mark as not on the list */
+
+ list_del_init(&p->unused);
+
/* Grab an extra reference to prevent node disappearing
* before unlink_from_pool() call. */
atomic_inc(&p->refcnt);
@@ -412,7 +403,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
/* Link the node. */
link_to_pool(n);
- n->unused_prevp = NULL; /* not on the list */
+ INIT_LIST_HEAD(&n->unused);
peer_total++;
write_unlock_bh(&peer_pool_lock);
@@ -467,10 +458,7 @@ void inet_putpeer(struct inet_peer *p)
{
spin_lock_bh(&inet_peer_unused_lock);
if (atomic_dec_and_test(&p->refcnt)) {
- p->unused_prevp = inet_peer_unused_tailp;
- p->unused_next = NULL;
- *inet_peer_unused_tailp = p;
- inet_peer_unused_tailp = &p->unused_next;
+ list_add_tail(&p->unused, &unused_peers);
p->dtime = (__u32)jiffies;
}
spin_unlock_bh(&inet_peer_unused_lock);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index e5f7dc2de30..fd99fbd685e 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1183,6 +1183,17 @@ error:
return err;
}
+static void ip_cork_release(struct inet_sock *inet)
+{
+ inet->cork.flags &= ~IPCORK_OPT;
+ kfree(inet->cork.opt);
+ inet->cork.opt = NULL;
+ if (inet->cork.rt) {
+ ip_rt_put(inet->cork.rt);
+ inet->cork.rt = NULL;
+ }
+}
+
/*
* Combined all pending IP fragments on the socket as one IP datagram
* and push them out.
@@ -1276,13 +1287,7 @@ int ip_push_pending_frames(struct sock *sk)
}
out:
- inet->cork.flags &= ~IPCORK_OPT;
- kfree(inet->cork.opt);
- inet->cork.opt = NULL;
- if (inet->cork.rt) {
- ip_rt_put(inet->cork.rt);
- inet->cork.rt = NULL;
- }
+ ip_cork_release(inet);
return err;
error:
@@ -1295,19 +1300,12 @@ error:
*/
void ip_flush_pending_frames(struct sock *sk)
{
- struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
- inet->cork.flags &= ~IPCORK_OPT;
- kfree(inet->cork.opt);
- inet->cork.opt = NULL;
- if (inet->cork.rt) {
- ip_rt_put(inet->cork.rt);
- inet->cork.rt = NULL;
- }
+ ip_cork_release(inet_sk(sk));
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f51f20e487c..82817e55436 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -437,10 +437,8 @@ static int do_ip_setsockopt(struct sock *sk, int level,
/* If optlen==0, it is equivalent to val == 0 */
-#ifdef CONFIG_IP_MROUTE
- if (optname >= MRT_BASE && optname <= (MRT_BASE + 10))
+ if (ip_mroute_opt(optname))
return ip_mroute_setsockopt(sk,optname,optval,optlen);
-#endif
err = 0;
lock_sock(sk);
@@ -909,11 +907,9 @@ int ip_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
- optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY
-#ifdef CONFIG_IP_MROUTE
- && (optname < MRT_BASE || optname > (MRT_BASE + 10))
-#endif
- ) {
+ optname != IP_IPSEC_POLICY &&
+ optname != IP_XFRM_POLICY &&
+ !ip_mroute_opt(optname)) {
lock_sock(sk);
err = nf_setsockopt(sk, PF_INET, optname, optval, optlen);
release_sock(sk);
@@ -935,11 +931,9 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
if (err == -ENOPROTOOPT && optname != IP_HDRINCL &&
- optname != IP_IPSEC_POLICY && optname != IP_XFRM_POLICY
-#ifdef CONFIG_IP_MROUTE
- && (optname < MRT_BASE || optname > (MRT_BASE + 10))
-#endif
- ) {
+ optname != IP_IPSEC_POLICY &&
+ optname != IP_XFRM_POLICY &&
+ !ip_mroute_opt(optname)) {
lock_sock(sk);
err = compat_nf_setsockopt(sk, PF_INET, optname,
optval, optlen);
@@ -967,11 +961,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
if (level != SOL_IP)
return -EOPNOTSUPP;
-#ifdef CONFIG_IP_MROUTE
- if (optname >= MRT_BASE && optname <= MRT_BASE+10) {
+ if (ip_mroute_opt(optname))
return ip_mroute_getsockopt(sk,optname,optval,optlen);
- }
-#endif
if (get_user(len,optlen))
return -EFAULT;
@@ -1171,11 +1162,8 @@ int ip_getsockopt(struct sock *sk, int level,
err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
- if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS
-#ifdef CONFIG_IP_MROUTE
- && (optname < MRT_BASE || optname > MRT_BASE+10)
-#endif
- ) {
+ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
+ !ip_mroute_opt(optname)) {
int len;
if (get_user(len,optlen))
@@ -1200,11 +1188,8 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
int err = do_ip_getsockopt(sk, level, optname, optval, optlen);
#ifdef CONFIG_NETFILTER
/* we need to exclude all possible ENOPROTOOPTs except default case */
- if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS
-#ifdef CONFIG_IP_MROUTE
- && (optname < MRT_BASE || optname > MRT_BASE+10)
-#endif
- ) {
+ if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
+ !ip_mroute_opt(optname)) {
int len;
if (get_user(len, optlen))
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index ca1b5fdb8d3..2c44a94c213 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <asm/semaphore.h>
#include <linux/crypto.h>
+#include <linux/err.h>
#include <linux/pfkeyv2.h>
#include <linux/percpu.h>
#include <linux/smp.h>
@@ -344,7 +345,7 @@ static struct crypto_comp **ipcomp_alloc_tfms(const char *alg_name)
for_each_possible_cpu(cpu) {
struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
CRYPTO_ALG_ASYNC);
- if (!tfm)
+ if (IS_ERR(tfm))
goto error;
*per_cpu_ptr(tfms, cpu) = tfm;
}
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index 4b702f708d3..0a9f3c37e18 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -426,6 +426,24 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
/*
+ * Check if there is a destination for the connection, if so
+ * bind the connection to the destination.
+ */
+struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
+{
+ struct ip_vs_dest *dest;
+
+ if ((cp) && (!cp->dest)) {
+ dest = ip_vs_find_dest(cp->daddr, cp->dport,
+ cp->vaddr, cp->vport, cp->protocol);
+ ip_vs_bind_dest(cp, dest);
+ return dest;
+ } else
+ return NULL;
+}
+
+
+/*
* Unbind a connection entry with its VS destination
* Called by the ip_vs_conn_expire function.
*/
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index c6ed7654e83..20c884a5772 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -979,15 +979,23 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
ret = NF_ACCEPT;
}
- /* increase its packet counter and check if it is needed
- to be synchronized */
+ /* Increase its packet counter and check if it is needed
+ * to be synchronized
+ *
+ * Sync connection if it is about to close to
+ * encorage the standby servers to update the connections timeout
+ */
atomic_inc(&cp->in_pkts);
if ((ip_vs_sync_state & IP_VS_STATE_MASTER) &&
- (cp->protocol != IPPROTO_TCP ||
- cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
- == sysctl_ip_vs_sync_threshold[0]))
+ (((cp->protocol != IPPROTO_TCP ||
+ cp->state == IP_VS_TCP_S_ESTABLISHED) &&
+ (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
+ == sysctl_ip_vs_sync_threshold[0])) ||
+ ((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
+ ((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
+ (cp->state == IP_VS_TCP_S_CLOSE)))))
ip_vs_sync_conn(cp);
+ cp->old_state = cp->state;
ip_vs_conn_put(cp);
return ret;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 7345fc252a2..b64cf45a9ea 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -579,6 +579,31 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, __be32 daddr, __be16 dport)
return NULL;
}
+/*
+ * Find destination by {daddr,dport,vaddr,protocol}
+ * Cretaed to be used in ip_vs_process_message() in
+ * the backup synchronization daemon. It finds the
+ * destination to be bound to the received connection
+ * on the backup.
+ *
+ * ip_vs_lookup_real_service() looked promissing, but
+ * seems not working as expected.
+ */
+struct ip_vs_dest *ip_vs_find_dest(__be32 daddr, __be16 dport,
+ __be32 vaddr, __be16 vport, __u16 protocol)
+{
+ struct ip_vs_dest *dest;
+ struct ip_vs_service *svc;
+
+ svc = ip_vs_service_get(0, protocol, vaddr, vport);
+ if (!svc)
+ return NULL;
+ dest = ip_vs_lookup_dest(svc, daddr, dport);
+ if (dest)
+ atomic_inc(&dest->refcnt);
+ ip_vs_service_put(svc);
+ return dest;
+}
/*
* Lookup dest by {svc,addr,port} in the destination trash.
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 0d4d9721cbd..bd930efc18d 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -284,6 +284,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
struct ip_vs_sync_conn_options *opt;
struct ip_vs_conn *cp;
struct ip_vs_protocol *pp;
+ struct ip_vs_dest *dest;
char *p;
int i;
@@ -317,20 +318,34 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
s->caddr, s->cport,
s->vaddr, s->vport);
if (!cp) {
+ /*
+ * Find the appropriate destination for the connection.
+ * If it is not found the connection will remain unbound
+ * but still handled.
+ */
+ dest = ip_vs_find_dest(s->daddr, s->dport,
+ s->vaddr, s->vport,
+ s->protocol);
cp = ip_vs_conn_new(s->protocol,
s->caddr, s->cport,
s->vaddr, s->vport,
s->daddr, s->dport,
- flags, NULL);
+ flags, dest);
+ if (dest)
+ atomic_dec(&dest->refcnt);
if (!cp) {
IP_VS_ERR("ip_vs_conn_new failed\n");
return;
}
cp->state = ntohs(s->state);
} else if (!cp->dest) {
- /* it is an entry created by the synchronization */
- cp->state = ntohs(s->state);
- cp->flags = flags | IP_VS_CONN_F_HASHED;
+ dest = ip_vs_try_bind_dest(cp);
+ if (!dest) {
+ /* it is an unbound entry created by
+ * synchronization */
+ cp->flags = flags | IP_VS_CONN_F_HASHED;
+ } else
+ atomic_dec(&dest->refcnt);
} /* Note that we don't touch its state and flags
if it is a normal entry. */
@@ -342,6 +357,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
p += SIMPLE_CONN_SIZE;
atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
+ cp->state = ntohs(s->state);
pp = ip_vs_proto_get(s->protocol);
cp->timeout = pp->timeout_table[cp->state];
ip_vs_conn_put(cp);
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 409d273f6f8..7456833d6ad 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -41,27 +41,27 @@ obj-$(CONFIG_NF_NAT) += iptable_nat.o
obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
# matches
+obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
-obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
-obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
-obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
+obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
# targets
-obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
-obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
+obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
+obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
-obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
+obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
+obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
-obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
-obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
-obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
+obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
+obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
# generic ARP tables
obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 10a2ce09fd8..14d64a383db 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/security.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
@@ -607,15 +608,11 @@ static ctl_table ipq_root_table[] = {
{ .ctl_name = 0 }
};
-#ifdef CONFIG_PROC_FS
-static int
-ipq_get_info(char *buffer, char **start, off_t offset, int length)
+static int ip_queue_show(struct seq_file *m, void *v)
{
- int len;
-
read_lock_bh(&queue_lock);
- len = sprintf(buffer,
+ seq_printf(m,
"Peer PID : %d\n"
"Copy mode : %hu\n"
"Copy range : %u\n"
@@ -632,16 +629,21 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
queue_user_dropped);
read_unlock_bh(&queue_lock);
+ return 0;
+}
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- else if (len < 0)
- len = 0;
- return len;
+static int ip_queue_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ip_queue_show, NULL);
}
-#endif /* CONFIG_PROC_FS */
+
+static const struct file_operations ip_queue_proc_fops = {
+ .open = ip_queue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
static struct nf_queue_handler nfqh = {
.name = "ip_queue",
@@ -661,10 +663,11 @@ static int __init ip_queue_init(void)
goto cleanup_netlink_notifier;
}
- proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info);
- if (proc)
+ proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
+ if (proc) {
proc->owner = THIS_MODULE;
- else {
+ proc->proc_fops = &ip_queue_proc_fops;
+ } else {
printk(KERN_ERR "ip_queue: failed to create proc entry\n");
goto cleanup_ipqnl;
}
diff --git a/net/ipv4/netfilter/nf_nat_amanda.c b/net/ipv4/netfilter/nf_nat_amanda.c
index 35a5aa69cd9..c31b8766825 100644
--- a/net/ipv4/netfilter/nf_nat_amanda.c
+++ b/net/ipv4/netfilter/nf_nat_amanda.c
@@ -69,7 +69,7 @@ static void __exit nf_nat_amanda_fini(void)
static int __init nf_nat_amanda_init(void)
{
- BUG_ON(rcu_dereference(nf_nat_amanda_hook));
+ BUG_ON(nf_nat_amanda_hook != NULL);
rcu_assign_pointer(nf_nat_amanda_hook, help);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 56e93f692e8..70e7997ea28 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -681,7 +681,7 @@ static int clean_nat(struct nf_conn *i, void *data)
if (!nat)
return 0;
- memset(nat, 0, sizeof(nat));
+ memset(nat, 0, sizeof(*nat));
i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | IPS_SEQ_ADJUST);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_ftp.c b/net/ipv4/netfilter/nf_nat_ftp.c
index e1a16d3ea4c..a1d5d58a58b 100644
--- a/net/ipv4/netfilter/nf_nat_ftp.c
+++ b/net/ipv4/netfilter/nf_nat_ftp.c
@@ -147,7 +147,7 @@ static void __exit nf_nat_ftp_fini(void)
static int __init nf_nat_ftp_init(void)
{
- BUG_ON(rcu_dereference(nf_nat_ftp_hook));
+ BUG_ON(nf_nat_ftp_hook != NULL);
rcu_assign_pointer(nf_nat_ftp_hook, nf_nat_ftp);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index a868c8c4132..93e18ef114f 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -544,15 +544,15 @@ static int nat_callforwarding(struct sk_buff *skb, struct nf_conn *ct,
/****************************************************************************/
static int __init init(void)
{
- BUG_ON(rcu_dereference(set_h245_addr_hook) != NULL);
- BUG_ON(rcu_dereference(set_h225_addr_hook) != NULL);
- BUG_ON(rcu_dereference(set_sig_addr_hook) != NULL);
- BUG_ON(rcu_dereference(set_ras_addr_hook) != NULL);
- BUG_ON(rcu_dereference(nat_rtp_rtcp_hook) != NULL);
- BUG_ON(rcu_dereference(nat_t120_hook) != NULL);
- BUG_ON(rcu_dereference(nat_h245_hook) != NULL);
- BUG_ON(rcu_dereference(nat_callforwarding_hook) != NULL);
- BUG_ON(rcu_dereference(nat_q931_hook) != NULL);
+ BUG_ON(set_h245_addr_hook != NULL);
+ BUG_ON(set_h225_addr_hook != NULL);
+ BUG_ON(set_sig_addr_hook != NULL);
+ BUG_ON(set_ras_addr_hook != NULL);
+ BUG_ON(nat_rtp_rtcp_hook != NULL);
+ BUG_ON(nat_t120_hook != NULL);
+ BUG_ON(nat_h245_hook != NULL);
+ BUG_ON(nat_callforwarding_hook != NULL);
+ BUG_ON(nat_q931_hook != NULL);
rcu_assign_pointer(set_h245_addr_hook, set_h245_addr);
rcu_assign_pointer(set_h225_addr_hook, set_h225_addr);
diff --git a/net/ipv4/netfilter/nf_nat_irc.c b/net/ipv4/netfilter/nf_nat_irc.c
index 766e2c16c6b..fe6f9cef6c8 100644
--- a/net/ipv4/netfilter/nf_nat_irc.c
+++ b/net/ipv4/netfilter/nf_nat_irc.c
@@ -74,7 +74,7 @@ static void __exit nf_nat_irc_fini(void)
static int __init nf_nat_irc_init(void)
{
- BUG_ON(rcu_dereference(nf_nat_irc_hook));
+ BUG_ON(nf_nat_irc_hook != NULL);
rcu_assign_pointer(nf_nat_irc_hook, help);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index e1385a09907..6817e7995f3 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -281,16 +281,16 @@ static int __init nf_nat_helper_pptp_init(void)
{
nf_nat_need_gre();
- BUG_ON(rcu_dereference(nf_nat_pptp_hook_outbound));
+ BUG_ON(nf_nat_pptp_hook_outbound != NULL);
rcu_assign_pointer(nf_nat_pptp_hook_outbound, pptp_outbound_pkt);
- BUG_ON(rcu_dereference(nf_nat_pptp_hook_inbound));
+ BUG_ON(nf_nat_pptp_hook_inbound != NULL);
rcu_assign_pointer(nf_nat_pptp_hook_inbound, pptp_inbound_pkt);
- BUG_ON(rcu_dereference(nf_nat_pptp_hook_exp_gre));
+ BUG_ON(nf_nat_pptp_hook_exp_gre != NULL);
rcu_assign_pointer(nf_nat_pptp_hook_exp_gre, pptp_exp_gre);
- BUG_ON(rcu_dereference(nf_nat_pptp_hook_expectfn));
+ BUG_ON(nf_nat_pptp_hook_expectfn != NULL);
rcu_assign_pointer(nf_nat_pptp_hook_expectfn, pptp_nat_expected);
return 0;
}
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index ce9edbcc01e..3ca98971a1e 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -293,8 +293,8 @@ static void __exit nf_nat_sip_fini(void)
static int __init nf_nat_sip_init(void)
{
- BUG_ON(rcu_dereference(nf_nat_sip_hook));
- BUG_ON(rcu_dereference(nf_nat_sdp_hook));
+ BUG_ON(nf_nat_sip_hook != NULL);
+ BUG_ON(nf_nat_sdp_hook != NULL);
rcu_assign_pointer(nf_nat_sip_hook, ip_nat_sip);
rcu_assign_pointer(nf_nat_sdp_hook, ip_nat_sdp);
return 0;
diff --git a/net/ipv4/netfilter/nf_nat_tftp.c b/net/ipv4/netfilter/nf_nat_tftp.c
index 0ecec701cb4..1360a94766d 100644
--- a/net/ipv4/netfilter/nf_nat_tftp.c
+++ b/net/ipv4/netfilter/nf_nat_tftp.c
@@ -43,7 +43,7 @@ static void __exit nf_nat_tftp_fini(void)
static int __init nf_nat_tftp_init(void)
{
- BUG_ON(rcu_dereference(nf_nat_tftp_hook));
+ BUG_ON(nf_nat_tftp_hook != NULL);
rcu_assign_pointer(nf_nat_tftp_hook, help);
return 0;
}
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index ffdccc0972e..ce34b281803 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -46,17 +46,6 @@
#include <net/sock.h>
#include <net/raw.h>
-static int fold_prot_inuse(struct proto *proto)
-{
- int res = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- res += proto->stats[cpu].inuse;
-
- return res;
-}
-
/*
* Report socket allocation statistics [mea@utu.fi]
*/
@@ -64,12 +53,12 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
{
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
- fold_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count),
+ sock_prot_inuse(&tcp_prot), atomic_read(&tcp_orphan_count),
tcp_death_row.tw_count, atomic_read(&tcp_sockets_allocated),
atomic_read(&tcp_memory_allocated));
- seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot));
- seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot));
- seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot));
+ seq_printf(seq, "UDP: inuse %d\n", sock_prot_inuse(&udp_prot));
+ seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse(&udplite_prot));
+ seq_printf(seq, "RAW: inuse %d\n", sock_prot_inuse(&raw_prot));
seq_printf(seq, "FRAG: inuse %d memory %d\n",
ip_frag_nqueues(), ip_frag_mem());
return 0;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 3916faca3af..66b42f547bf 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -760,6 +760,8 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
}
}
+DEFINE_PROTO_INUSE(raw)
+
struct proto raw_prot = {
.name = "RAW",
.owner = THIS_MODULE,
@@ -781,6 +783,7 @@ struct proto raw_prot = {
.compat_setsockopt = compat_raw_setsockopt,
.compat_getsockopt = compat_raw_getsockopt,
#endif
+ REF_PROTO_INUSE(raw)
};
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 21b12de9e65..1bff9ed349f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -578,6 +578,9 @@ static void rt_check_expire(struct work_struct *work)
i = (i + 1) & rt_hash_mask;
rthp = &rt_hash_table[i].chain;
+ if (need_resched())
+ cond_resched();
+
if (*rthp == NULL)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
@@ -851,9 +854,7 @@ restart:
*/
rcu_assign_pointer(rt_hash_table[hash].chain, rth);
- rth->u.dst.__use++;
- dst_hold(&rth->u.dst);
- rth->u.dst.lastuse = now;
+ dst_use(&rth->u.dst, now);
spin_unlock_bh(rt_hash_lock_addr(hash));
rt_drop(rt);
@@ -1813,11 +1814,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto martian_destination;
err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
- if (err == -ENOBUFS)
- goto e_nobufs;
- if (err == -EINVAL)
- goto e_inval;
-
done:
in_dev_put(in_dev);
if (free_res)
@@ -1935,9 +1931,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
rth->fl.oif == 0 &&
rth->fl.mark == skb->mark &&
rth->fl.fl4_tos == tos) {
- rth->u.dst.lastuse = jiffies;
- dst_hold(&rth->u.dst);
- rth->u.dst.__use++;
+ dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(in_hit);
rcu_read_unlock();
skb->dst = (struct dst_entry*)rth;
@@ -2331,9 +2325,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK))) {
- rth->u.dst.lastuse = jiffies;
- dst_hold(&rth->u.dst);
- rth->u.dst.__use++;
+ dst_use(&rth->u.dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
*rp = rth;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c64072bb504..8e65182f7af 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2456,11 +2456,11 @@ void __init tcp_init(void)
thash_entries ? 0 : 512 * 1024);
tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
- rwlock_init(&tcp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
}
-
+ if (inet_ehash_locks_alloc(&tcp_hashinfo))
+ panic("TCP: failed to alloc ehash_locks");
tcp_hashinfo.bhash =
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ca9590f4f52..0f0c1c9829a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1269,6 +1269,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
return 0;
+ if (!tp->packets_out)
+ goto out;
+
/* SACK fastpath:
* if the only SACK change is the increase of the end_seq of
* the first block then only apply that SACK block
@@ -1400,11 +1403,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* DSACK info lost if out-of-mem, try SACK still */
if (in_sack <= 0)
in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
- if (in_sack < 0)
+ if (unlikely(in_sack < 0))
break;
- fack_count += tcp_skb_pcount(skb);
-
sacked = TCP_SKB_CB(skb)->sacked;
/* Account D-SACK for retransmitted packet. */
@@ -1419,19 +1420,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if ((dup_sack && in_sack) &&
(sacked&TCPCB_SACKED_ACKED))
reord = min(fack_count, reord);
- } else {
- /* If it was in a hole, we detected reordering. */
- if (fack_count < prior_fackets &&
- !(sacked&TCPCB_SACKED_ACKED))
- reord = min(fack_count, reord);
}
/* Nothing to do; acked frame is about to be dropped. */
+ fack_count += tcp_skb_pcount(skb);
continue;
}
- if (!in_sack)
+ if (!in_sack) {
+ fack_count += tcp_skb_pcount(skb);
continue;
+ }
if (!(sacked&TCPCB_SACKED_ACKED)) {
if (sacked & TCPCB_SACKED_RETRANS) {
@@ -1448,12 +1447,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
tp->retransmit_skb_hint = NULL;
}
} else {
- /* New sack for not retransmitted frame,
- * which was in hole. It is reordering.
- */
- if (!(sacked & TCPCB_RETRANS) &&
- fack_count < prior_fackets)
- reord = min(fack_count, reord);
+ if (!(sacked & TCPCB_RETRANS)) {
+ /* New sack for not retransmitted frame,
+ * which was in hole. It is reordering.
+ */
+ if (fack_count < prior_fackets)
+ reord = min(fack_count, reord);
+
+ /* SACK enhanced F-RTO (RFC4138; Appendix B) */
+ if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
+ flag |= FLAG_ONLY_ORIG_SACKED;
+ }
if (sacked & TCPCB_LOST) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
@@ -1462,24 +1466,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* clear lost hint */
tp->retransmit_skb_hint = NULL;
}
- /* SACK enhanced F-RTO detection.
- * Set flag if and only if non-rexmitted
- * segments below frto_highmark are
- * SACKed (RFC4138; Appendix B).
- * Clearing correct due to in-order walk
- */
- if (after(end_seq, tp->frto_highmark)) {
- flag &= ~FLAG_ONLY_ORIG_SACKED;
- } else {
- if (!(sacked & TCPCB_RETRANS))
- flag |= FLAG_ONLY_ORIG_SACKED;
- }
}
TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
flag |= FLAG_DATA_SACKED;
tp->sacked_out += tcp_skb_pcount(skb);
+ fack_count += tcp_skb_pcount(skb);
if (fack_count > tp->fackets_out)
tp->fackets_out = fack_count;
@@ -1490,6 +1483,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
} else {
if (dup_sack && (sacked&TCPCB_RETRANS))
reord = min(fack_count, reord);
+
+ fack_count += tcp_skb_pcount(skb);
}
/* D-SACK. We can detect redundant retransmission
@@ -1504,6 +1499,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
tp->retransmit_skb_hint = NULL;
}
}
+
+ /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
+ * due to in-order walk
+ */
+ if (after(end_seq, tp->frto_highmark))
+ flag &= ~FLAG_ONLY_ORIG_SACKED;
}
if (tp->retrans_out &&
@@ -1515,7 +1516,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if ((reord < tp->fackets_out) && icsk->icsk_ca_state != TCP_CA_Loss &&
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
- tcp_update_reordering(sk, ((tp->fackets_out + 1) - reord), 0);
+ tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+
+out:
#if FASTRETRANS_DEBUG > 0
BUG_TRAP((int)tp->sacked_out >= 0);
@@ -1671,6 +1674,9 @@ void tcp_enter_frto(struct sock *sk)
}
tcp_verify_left_out(tp);
+ /* Too bad if TCP was application limited */
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
+
/* Earlier loss recovery underway (see RFC4138; Appendix B).
* The last condition is necessary at least in tp->frto_counter case.
*/
@@ -1703,6 +1709,8 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
+
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
/*
* Count the retransmission made on RTO correctly (only when
* waiting for the first ACK and did not get it)...
@@ -1716,7 +1724,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
} else {
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
- TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
}
/* Don't lost mark skbs that were fwd transmitted after RTO */
@@ -2630,7 +2638,8 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
* is before the ack sequence we can discard it as it's confirmed to have
* arrived at the other end.
*/
-static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
+static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p,
+ int prior_fackets)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2639,6 +2648,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
int fully_acked = 1;
int flag = 0;
int prior_packets = tp->packets_out;
+ u32 cnt = 0;
+ u32 reord = tp->packets_out;
s32 seq_rtt = -1;
ktime_t last_ackt = net_invalid_timestamp();
@@ -2679,10 +2690,14 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
if ((flag & FLAG_DATA_ACKED) ||
(packets_acked > 1))
flag |= FLAG_NONHEAD_RETRANS_ACKED;
- } else if (seq_rtt < 0) {
- seq_rtt = now - scb->when;
- if (fully_acked)
- last_ackt = skb->tstamp;
+ } else {
+ if (seq_rtt < 0) {
+ seq_rtt = now - scb->when;
+ if (fully_acked)
+ last_ackt = skb->tstamp;
+ }
+ if (!(sacked & TCPCB_SACKED_ACKED))
+ reord = min(cnt, reord);
}
if (sacked & TCPCB_SACKED_ACKED)
@@ -2693,12 +2708,16 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
if ((sacked & TCPCB_URG) && tp->urg_mode &&
!before(end_seq, tp->snd_up))
tp->urg_mode = 0;
- } else if (seq_rtt < 0) {
- seq_rtt = now - scb->when;
- if (fully_acked)
- last_ackt = skb->tstamp;
+ } else {
+ if (seq_rtt < 0) {
+ seq_rtt = now - scb->when;
+ if (fully_acked)
+ last_ackt = skb->tstamp;
+ }
+ reord = min(cnt, reord);
}
tp->packets_out -= packets_acked;
+ cnt += packets_acked;
/* Initial outgoing SYN's get put onto the write_queue
* just like anything else we transmit. It is not
@@ -2730,13 +2749,18 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p)
tcp_ack_update_rtt(sk, flag, seq_rtt);
tcp_rearm_rto(sk);
+ if (tcp_is_reno(tp)) {
+ tcp_remove_reno_sacks(sk, pkts_acked);
+ } else {
+ /* Non-retransmitted hole got filled? That's reordering */
+ if (reord < prior_fackets)
+ tcp_update_reordering(sk, tp->fackets_out - reord, 0);
+ }
+
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
/* hint's skb might be NULL but we don't need to care */
tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
tp->fastpath_cnt_hint);
- if (tcp_is_reno(tp))
- tcp_remove_reno_sacks(sk, pkts_acked);
-
if (ca_ops->pkts_acked) {
s32 rtt_us = -1;
@@ -3019,6 +3043,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
u32 prior_in_flight;
+ u32 prior_fackets;
s32 seq_rtt;
int prior_packets;
int frto_cwnd = 0;
@@ -3043,6 +3068,8 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
tp->bytes_acked += min(ack - prior_snd_una, tp->mss_cache);
}
+ prior_fackets = tp->fackets_out;
+
if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
/* Window is constant, pure forward advance.
* No more checks are required.
@@ -3084,13 +3111,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
prior_in_flight = tcp_packets_in_flight(tp);
/* See if we can take anything off of the retransmit queue. */
- flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
+ flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets);
+ if (tp->frto_counter)
+ frto_cwnd = tcp_process_frto(sk, flag);
/* Guarantee sacktag reordering detection against wrap-arounds */
if (before(tp->frto_highmark, tp->snd_una))
tp->frto_highmark = 0;
- if (tp->frto_counter)
- frto_cwnd = tcp_process_frto(sk, flag);
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d438dfb0c8f..e566f3c6767 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2049,8 +2049,9 @@ static void *established_get_first(struct seq_file *seq)
struct sock *sk;
struct hlist_node *node;
struct inet_timewait_sock *tw;
+ rwlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
- read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
+ read_lock_bh(lock);
sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family) {
continue;
@@ -2067,7 +2068,7 @@ static void *established_get_first(struct seq_file *seq)
rc = tw;
goto out;
}
- read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
+ read_unlock_bh(lock);
st->state = TCP_SEQ_STATE_ESTABLISHED;
}
out:
@@ -2094,11 +2095,11 @@ get_tw:
cur = tw;
goto out;
}
- read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
+ read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
st->state = TCP_SEQ_STATE_ESTABLISHED;
if (++st->bucket < tcp_hashinfo.ehash_size) {
- read_lock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
+ read_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
} else {
cur = NULL;
@@ -2206,7 +2207,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
case TCP_SEQ_STATE_TIME_WAIT:
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
- read_unlock_bh(&tcp_hashinfo.ehash[st->bucket].lock);
+ read_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
break;
}
}
@@ -2417,6 +2418,8 @@ void tcp4_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
+DEFINE_PROTO_INUSE(tcp)
+
struct proto tcp_prot = {
.name = "TCP",
.owner = THIS_MODULE,
@@ -2451,6 +2454,7 @@ struct proto tcp_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
+ REF_PROTO_INUSE(tcp)
};
void __init tcp_v4_init(struct net_proto_family *ops)
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index a794a8ca8b4..978b3fd61e6 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -17,6 +17,11 @@ static struct xfrm_tunnel *tunnel4_handlers;
static struct xfrm_tunnel *tunnel64_handlers;
static DEFINE_MUTEX(tunnel4_mutex);
+static inline struct xfrm_tunnel **fam_handlers(unsigned short family)
+{
+ return (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
+}
+
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
{
struct xfrm_tunnel **pprev;
@@ -25,8 +30,7 @@ int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
mutex_lock(&tunnel4_mutex);
- for (pprev = (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
- *pprev; pprev = &(*pprev)->next) {
+ for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
if ((*pprev)->priority > priority)
break;
if ((*pprev)->priority == priority)
@@ -53,8 +57,7 @@ int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
mutex_lock(&tunnel4_mutex);
- for (pprev = (family == AF_INET) ? &tunnel4_handlers : &tunnel64_handlers;
- *pprev; pprev = &(*pprev)->next) {
+ for (pprev = fam_handlers(family); *pprev; pprev = &(*pprev)->next) {
if (*pprev == handler) {
*pprev = handler->next;
ret = 0;
@@ -118,6 +121,17 @@ static void tunnel4_err(struct sk_buff *skb, u32 info)
break;
}
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+static void tunnel64_err(struct sk_buff *skb, u32 info)
+{
+ struct xfrm_tunnel *handler;
+
+ for (handler = tunnel64_handlers; handler; handler = handler->next)
+ if (!handler->err_handler(skb, info))
+ break;
+}
+#endif
+
static struct net_protocol tunnel4_protocol = {
.handler = tunnel4_rcv,
.err_handler = tunnel4_err,
@@ -127,7 +141,7 @@ static struct net_protocol tunnel4_protocol = {
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
static struct net_protocol tunnel64_protocol = {
.handler = tunnel64_rcv,
- .err_handler = tunnel4_err,
+ .err_handler = tunnel64_err,
.no_policy = 1,
};
#endif
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4bc25b46f33..03c400ca14c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1430,6 +1430,8 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
}
+DEFINE_PROTO_INUSE(udp)
+
struct proto udp_prot = {
.name = "UDP",
.owner = THIS_MODULE,
@@ -1452,6 +1454,7 @@ struct proto udp_prot = {
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
+ REF_PROTO_INUSE(udp)
};
/* ------------------------------------------------------------------------ */
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index 94977205abb..f5baeb3e8b8 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -44,6 +44,8 @@ static struct net_protocol udplite_protocol = {
.no_policy = 1,
};
+DEFINE_PROTO_INUSE(udplite)
+
struct proto udplite_prot = {
.name = "UDP-Lite",
.owner = THIS_MODULE,
@@ -67,6 +69,7 @@ struct proto udplite_prot = {
.compat_setsockopt = compat_udp_setsockopt,
.compat_getsockopt = compat_udp_getsockopt,
#endif
+ REF_PROTO_INUSE(udplite)
};
static struct inet_protosw udplite4_protosw = {
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 706622af206..428c6b0e26d 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -31,25 +31,6 @@ struct fib6_rule
static struct fib_rules_ops fib6_rules_ops;
-static struct fib6_rule main_rule = {
- .common = {
- .refcnt = ATOMIC_INIT(2),
- .pref = 0x7FFE,
- .action = FR_ACT_TO_TBL,
- .table = RT6_TABLE_MAIN,
- },
-};
-
-static struct fib6_rule local_rule = {
- .common = {
- .refcnt = ATOMIC_INIT(2),
- .pref = 0,
- .action = FR_ACT_TO_TBL,
- .table = RT6_TABLE_LOCAL,
- .flags = FIB_RULE_PERMANENT,
- },
-};
-
struct dst_entry *fib6_rule_lookup(struct flowi *fl, int flags,
pol_lookup_t lookup)
{
@@ -270,11 +251,23 @@ static struct fib_rules_ops fib6_rules_ops = {
.owner = THIS_MODULE,
};
-void __init fib6_rules_init(void)
+static int __init fib6_default_rules_init(void)
{
- list_add_tail(&local_rule.common.list, &fib6_rules_ops.rules_list);
- list_add_tail(&main_rule.common.list, &fib6_rules_ops.rules_list);
+ int err;
+
+ err = fib_default_rule_add(&fib6_rules_ops, 0,
+ RT6_TABLE_LOCAL, FIB_RULE_PERMANENT);
+ if (err < 0)
+ return err;
+ err = fib_default_rule_add(&fib6_rules_ops, 0x7FFE, RT6_TABLE_MAIN, 0);
+ if (err < 0)
+ return err;
+ return 0;
+}
+void __init fib6_rules_init(void)
+{
+ BUG_ON(fib6_default_rules_init());
fib_rules_register(&fib6_rules_ops);
}
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index d6f1026f194..adc73adadfa 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -37,9 +37,8 @@ void __inet6_hash(struct inet_hashinfo *hashinfo,
} else {
unsigned int hash;
sk->sk_hash = hash = inet6_sk_ehashfn(sk);
- hash &= (hashinfo->ehash_size - 1);
- list = &hashinfo->ehash[hash].chain;
- lock = &hashinfo->ehash[hash].lock;
+ list = &inet_ehash_bucket(hashinfo, hash)->chain;
+ lock = inet_ehash_lockp(hashinfo, hash);
write_lock(lock);
}
@@ -70,9 +69,10 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
*/
unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
+ rwlock_t *lock = inet_ehash_lockp(hashinfo, hash);
prefetch(head->chain.first);
- read_lock(&head->lock);
+ read_lock(lock);
sk_for_each(sk, node, &head->chain) {
/* For IPV6 do the cheaper port and family tests first. */
if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
@@ -92,12 +92,12 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
goto hit;
}
}
- read_unlock(&head->lock);
+ read_unlock(lock);
return NULL;
hit:
sock_hold(sk);
- read_unlock(&head->lock);
+ read_unlock(lock);
return sk;
}
EXPORT_SYMBOL(__inet6_lookup_established);
@@ -175,12 +175,13 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
const unsigned int hash = inet6_ehashfn(daddr, lport, saddr,
inet->dport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hinfo, hash);
+ rwlock_t *lock = inet_ehash_lockp(hinfo, hash);
struct sock *sk2;
const struct hlist_node *node;
struct inet_timewait_sock *tw;
prefetch(head->chain.first);
- write_lock(&head->lock);
+ write_lock(lock);
/* Check TIME-WAIT sockets first. */
sk_for_each(sk2, node, &head->twchain) {
@@ -216,7 +217,7 @@ unique:
__sk_add_node(sk, &head->chain);
sk->sk_hash = hash;
sock_prot_inc_use(sk->sk_prot);
- write_unlock(&head->lock);
+ write_unlock(lock);
if (twp != NULL) {
*twp = tw;
@@ -231,7 +232,7 @@ unique:
return 0;
not_unique:
- write_unlock(&head->lock);
+ write_unlock(lock);
return -EADDRNOTAVAIL;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 653fc0a8235..86e1835ce4e 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1339,6 +1339,19 @@ error:
return err;
}
+static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
+{
+ inet->cork.flags &= ~IPCORK_OPT;
+ kfree(np->cork.opt);
+ np->cork.opt = NULL;
+ if (np->cork.rt) {
+ dst_release(&np->cork.rt->u.dst);
+ np->cork.rt = NULL;
+ inet->cork.flags &= ~IPCORK_ALLFRAG;
+ }
+ memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
+}
+
int ip6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb, *tmp_skb;
@@ -1415,15 +1428,7 @@ int ip6_push_pending_frames(struct sock *sk)
}
out:
- inet->cork.flags &= ~IPCORK_OPT;
- kfree(np->cork.opt);
- np->cork.opt = NULL;
- if (np->cork.rt) {
- dst_release(&np->cork.rt->u.dst);
- np->cork.rt = NULL;
- inet->cork.flags &= ~IPCORK_ALLFRAG;
- }
- memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
+ ip6_cork_release(inet, np);
return err;
error:
goto out;
@@ -1431,8 +1436,6 @@ error:
void ip6_flush_pending_frames(struct sock *sk)
{
- struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *np = inet6_sk(sk);
struct sk_buff *skb;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
@@ -1442,14 +1445,5 @@ void ip6_flush_pending_frames(struct sock *sk)
kfree_skb(skb);
}
- inet->cork.flags &= ~IPCORK_OPT;
-
- kfree(np->cork.opt);
- np->cork.opt = NULL;
- if (np->cork.rt) {
- dst_release(&np->cork.rt->u.dst);
- np->cork.rt = NULL;
- inet->cork.flags &= ~IPCORK_ALLFRAG;
- }
- memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
+ ip6_cork_release(inet_sk(sk), inet6_sk(sk));
}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 85eb4798d8d..0cd4056f912 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -36,6 +36,7 @@
#include <net/ipcomp.h>
#include <asm/semaphore.h>
#include <linux/crypto.h>
+#include <linux/err.h>
#include <linux/pfkeyv2.h>
#include <linux/random.h>
#include <linux/percpu.h>
@@ -358,7 +359,7 @@ static struct crypto_comp **ipcomp6_alloc_tfms(const char *alg_name)
for_each_possible_cpu(cpu) {
struct crypto_comp *tfm = crypto_alloc_comp(alg_name, 0,
CRYPTO_ALG_ASYNC);
- if (!tfm)
+ if (IS_ERR(tfm))
goto error;
*per_cpu_ptr(tfms, cpu) = tfm;
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 36f7dbfb6db..67997a74ddc 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1037,6 +1037,7 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
ndmsg = nlmsg_data(nlh);
ndmsg->nduseropt_family = AF_INET6;
+ ndmsg->nduseropt_ifindex = ra->dev->ifindex;
ndmsg->nduseropt_icmp_type = icmp6h->icmp6_type;
ndmsg->nduseropt_icmp_code = icmp6h->icmp6_code;
ndmsg->nduseropt_opts_len = opt->nd_opt_len << 3;
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 4513eab7739..e789ec44d23 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -4,25 +4,29 @@
# Link order matters here.
obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
-obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
-obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
-obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
-obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
-obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
-obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
-obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
-obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
-obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
-obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
-obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
-obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
# objects for l3 independent conntrack
nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
# l3 independent conntrack
obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
+
+# matches
+obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
+obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
+obj-$(CONFIG_IP6_NF_MATCH_FRAG) += ip6t_frag.o
+obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
+obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
+obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
+obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o
+obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
+obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
+
+# targets
+obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
+obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
+obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 6413a30d9f6..e273605eef8 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/mutex.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -596,15 +597,11 @@ static ctl_table ipq_root_table[] = {
{ .ctl_name = 0 }
};
-#ifdef CONFIG_PROC_FS
-static int
-ipq_get_info(char *buffer, char **start, off_t offset, int length)
+static int ip6_queue_show(struct seq_file *m, void *v)
{
- int len;
-
read_lock_bh(&queue_lock);
- len = sprintf(buffer,
+ seq_printf(m,
"Peer PID : %d\n"
"Copy mode : %hu\n"
"Copy range : %u\n"
@@ -621,16 +618,21 @@ ipq_get_info(char *buffer, char **start, off_t offset, int length)
queue_user_dropped);
read_unlock_bh(&queue_lock);
+ return 0;
+}
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
- else if (len < 0)
- len = 0;
- return len;
+static int ip6_queue_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ip6_queue_show, NULL);
}
-#endif /* CONFIG_PROC_FS */
+
+static const struct file_operations ip6_queue_proc_fops = {
+ .open = ip6_queue_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
static struct nf_queue_handler nfqh = {
.name = "ip6_queue",
@@ -650,10 +652,11 @@ static int __init ip6_queue_init(void)
goto cleanup_netlink_notifier;
}
- proc = proc_net_create(&init_net, IPQ_PROC_FS_NAME, 0, ipq_get_info);
- if (proc)
+ proc = create_proc_entry(IPQ_PROC_FS_NAME, 0, init_net.proc_net);
+ if (proc) {
proc->owner = THIS_MODULE;
- else {
+ proc->proc_fops = &ip6_queue_proc_fops;
+ } else {
printk(KERN_ERR "ip6_queue: failed to create proc entry\n");
goto cleanup_ipqnl;
}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index be526ad9254..8631ed7fe8a 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -32,27 +32,16 @@
static struct proc_dir_entry *proc_net_devsnmp6;
-static int fold_prot_inuse(struct proto *proto)
-{
- int res = 0;
- int cpu;
-
- for_each_possible_cpu(cpu)
- res += proto->stats[cpu].inuse;
-
- return res;
-}
-
static int sockstat6_seq_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "TCP6: inuse %d\n",
- fold_prot_inuse(&tcpv6_prot));
+ sock_prot_inuse(&tcpv6_prot));
seq_printf(seq, "UDP6: inuse %d\n",
- fold_prot_inuse(&udpv6_prot));
+ sock_prot_inuse(&udpv6_prot));
seq_printf(seq, "UDPLITE6: inuse %d\n",
- fold_prot_inuse(&udplitev6_prot));
+ sock_prot_inuse(&udplitev6_prot));
seq_printf(seq, "RAW6: inuse %d\n",
- fold_prot_inuse(&rawv6_prot));
+ sock_prot_inuse(&rawv6_prot));
seq_printf(seq, "FRAG6: inuse %d memory %d\n",
ip6_frag_nqueues(), ip6_frag_mem());
return 0;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ca24ef19cd8..807260d0358 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1144,6 +1144,8 @@ static int rawv6_init_sk(struct sock *sk)
return(0);
}
+DEFINE_PROTO_INUSE(rawv6)
+
struct proto rawv6_prot = {
.name = "RAWv6",
.owner = THIS_MODULE,
@@ -1166,6 +1168,7 @@ struct proto rawv6_prot = {
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
#endif
+ REF_PROTO_INUSE(rawv6)
};
#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 95f8e4a62f6..6ecb5e6fae2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -38,12 +38,8 @@
#include <linux/in6.h>
#include <linux/init.h>
#include <linux/if_arp.h>
-
-#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#endif
-
#include <net/net_namespace.h>
#include <net/snmp.h>
#include <net/ipv6.h>
@@ -548,12 +544,8 @@ restart:
rt = rt6_device_match(rt, fl->oif, flags);
BACKTRACK(&fl->fl6_src);
out:
- dst_hold(&rt->u.dst);
+ dst_use(&rt->u.dst, jiffies);
read_unlock_bh(&table->tb6_lock);
-
- rt->u.dst.lastuse = jiffies;
- rt->u.dst.__use++;
-
return rt;
}
@@ -2288,71 +2280,50 @@ struct rt6_proc_arg
static int rt6_info_route(struct rt6_info *rt, void *p_arg)
{
- struct rt6_proc_arg *arg = (struct rt6_proc_arg *) p_arg;
-
- if (arg->skip < arg->offset / RT6_INFO_LEN) {
- arg->skip++;
- return 0;
- }
-
- if (arg->len >= arg->length)
- return 0;
+ struct seq_file *m = p_arg;
- arg->len += sprintf(arg->buffer + arg->len,
- NIP6_SEQFMT " %02x ",
- NIP6(rt->rt6i_dst.addr),
- rt->rt6i_dst.plen);
+ seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_dst.addr),
+ rt->rt6i_dst.plen);
#ifdef CONFIG_IPV6_SUBTREES
- arg->len += sprintf(arg->buffer + arg->len,
- NIP6_SEQFMT " %02x ",
- NIP6(rt->rt6i_src.addr),
- rt->rt6i_src.plen);
+ seq_printf(m, NIP6_SEQFMT " %02x ", NIP6(rt->rt6i_src.addr),
+ rt->rt6i_src.plen);
#else
- arg->len += sprintf(arg->buffer + arg->len,
- "00000000000000000000000000000000 00 ");
+ seq_puts(m, "00000000000000000000000000000000 00 ");
#endif
if (rt->rt6i_nexthop) {
- arg->len += sprintf(arg->buffer + arg->len,
- NIP6_SEQFMT,
- NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
+ seq_printf(m, NIP6_SEQFMT,
+ NIP6(*((struct in6_addr *)rt->rt6i_nexthop->primary_key)));
} else {
- arg->len += sprintf(arg->buffer + arg->len,
- "00000000000000000000000000000000");
+ seq_puts(m, "00000000000000000000000000000000");
}
- arg->len += sprintf(arg->buffer + arg->len,
- " %08x %08x %08x %08x %8s\n",
- rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
- rt->u.dst.__use, rt->rt6i_flags,
- rt->rt6i_dev ? rt->rt6i_dev->name : "");
+ seq_printf(m, " %08x %08x %08x %08x %8s\n",
+ rt->rt6i_metric, atomic_read(&rt->u.dst.__refcnt),
+ rt->u.dst.__use, rt->rt6i_flags,
+ rt->rt6i_dev ? rt->rt6i_dev->name : "");
return 0;
}
-static int rt6_proc_info(char *buffer, char **start, off_t offset, int length)
+static int ipv6_route_show(struct seq_file *m, void *v)
{
- struct rt6_proc_arg arg = {
- .buffer = buffer,
- .offset = offset,
- .length = length,
- };
-
- fib6_clean_all(rt6_info_route, 0, &arg);
-
- *start = buffer;
- if (offset)
- *start += offset % RT6_INFO_LEN;
-
- arg.len -= offset % RT6_INFO_LEN;
-
- if (arg.len > length)
- arg.len = length;
- if (arg.len < 0)
- arg.len = 0;
+ fib6_clean_all(rt6_info_route, 0, m);
+ return 0;
+}
- return arg.len;
+static int ipv6_route_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ipv6_route_show, NULL);
}
+static const struct file_operations ipv6_route_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = ipv6_route_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int rt6_stats_seq_show(struct seq_file *seq, void *v)
{
seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
@@ -2489,22 +2460,14 @@ ctl_table ipv6_route_table[] = {
void __init ip6_route_init(void)
{
-#ifdef CONFIG_PROC_FS
- struct proc_dir_entry *p;
-#endif
ip6_dst_ops.kmem_cachep =
kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops.kmem_cachep;
fib6_init();
-#ifdef CONFIG_PROC_FS
- p = proc_net_create(&init_net, "ipv6_route", 0, rt6_proc_info);
- if (p)
- p->owner = THIS_MODULE;
-
+ proc_net_fops_create(&init_net, "ipv6_route", 0, &ipv6_route_proc_fops);
proc_net_fops_create(&init_net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
-#endif
#ifdef CONFIG_XFRM
xfrm6_init();
#endif
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 06be2a1f273..3aad861975a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -2107,6 +2107,8 @@ void tcp6_proc_exit(void)
}
#endif
+DEFINE_PROTO_INUSE(tcpv6)
+
struct proto tcpv6_prot = {
.name = "TCPv6",
.owner = THIS_MODULE,
@@ -2141,6 +2143,7 @@ struct proto tcpv6_prot = {
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
+ REF_PROTO_INUSE(tcpv6)
};
static struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index caebad6ee51..ee1cc3f8599 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -205,12 +205,11 @@ out:
return err;
csum_copy_err:
+ UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
skb_kill_datagram(sk, skb, flags);
- if (flags & MSG_DONTWAIT) {
- UDP6_INC_STATS_USER(UDP_MIB_INERRORS, is_udplite);
+ if (flags & MSG_DONTWAIT)
return -EAGAIN;
- }
goto try_again;
}
@@ -971,6 +970,8 @@ void udp6_proc_exit(void) {
/* ------------------------------------------------------------------------ */
+DEFINE_PROTO_INUSE(udpv6)
+
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
@@ -992,6 +993,7 @@ struct proto udpv6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
+ REF_PROTO_INUSE(udpv6)
};
static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 766566f7de4..5a0379f7141 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -40,6 +40,8 @@ static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
}
+DEFINE_PROTO_INUSE(udplitev6)
+
struct proto udplitev6_prot = {
.name = "UDPLITEv6",
.owner = THIS_MODULE,
@@ -62,6 +64,7 @@ struct proto udplitev6_prot = {
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
+ REF_PROTO_INUSE(udplitev6)
};
static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index a195a66e0cc..c76a9523091 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -92,11 +92,6 @@ extern int ipxrtr_route_skb(struct sk_buff *skb);
extern struct ipx_route *ipxrtr_lookup(__be32 net);
extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
-#undef IPX_REFCNT_DEBUG
-#ifdef IPX_REFCNT_DEBUG
-atomic_t ipx_sock_nr;
-#endif
-
struct ipx_interface *ipx_interfaces_head(void)
{
struct ipx_interface *rc = NULL;
@@ -151,14 +146,7 @@ static void ipx_destroy_socket(struct sock *sk)
{
ipx_remove_socket(sk);
skb_queue_purge(&sk->sk_receive_queue);
-#ifdef IPX_REFCNT_DEBUG
- atomic_dec(&ipx_sock_nr);
- printk(KERN_DEBUG "IPX socket %p released, %d are still alive\n", sk,
- atomic_read(&ipx_sock_nr));
- if (atomic_read(&sk->sk_refcnt) != 1)
- printk(KERN_DEBUG "Destruction sock ipx %p delayed, cnt=%d\n",
- sk, atomic_read(&sk->sk_refcnt));
-#endif
+ sk_refcnt_debug_dec(sk);
sock_put(sk);
}
@@ -1384,11 +1372,8 @@ static int ipx_create(struct net *net, struct socket *sock, int protocol)
sk = sk_alloc(net, PF_IPX, GFP_KERNEL, &ipx_proto);
if (!sk)
goto out;
-#ifdef IPX_REFCNT_DEBUG
- atomic_inc(&ipx_sock_nr);
- printk(KERN_DEBUG "IPX socket %p created, now we have %d alive\n", sk,
- atomic_read(&ipx_sock_nr));
-#endif
+
+ sk_refcnt_debug_inc(sk);
sock_init_data(sock, sk);
sk->sk_no_check = 1; /* Checksum off by default */
sock->ops = &ipx_dgram_ops;
@@ -1409,6 +1394,7 @@ static int ipx_release(struct socket *sock)
sock_set_flag(sk, SOCK_DEAD);
sock->sk = NULL;
+ sk_refcnt_debug_release(sk);
ipx_destroy_socket(sk);
out:
return 0;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 6fffb3845ab..ce176e691af 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -13,6 +13,18 @@ config MAC80211
This option enables the hardware independent IEEE 802.11
networking stack.
+config MAC80211_RCSIMPLE
+ bool "'simple' rate control algorithm" if EMBEDDED
+ default y
+ depends on MAC80211
+ help
+ This option allows you to turn off the 'simple' rate
+ control algorithm in mac80211. If you do turn it off,
+ you absolutely need another rate control algorithm.
+
+ Say Y unless you know you will have another algorithm
+ available.
+
config MAC80211_LEDS
bool "Enable LED triggers"
depends on MAC80211 && LEDS_TRIGGERS
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 219cd9f9341..1e6237b3484 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -1,8 +1,9 @@
-obj-$(CONFIG_MAC80211) += mac80211.o rc80211_simple.o
+obj-$(CONFIG_MAC80211) += mac80211.o
mac80211-objs-$(CONFIG_MAC80211_LEDS) += ieee80211_led.o
mac80211-objs-$(CONFIG_MAC80211_DEBUGFS) += debugfs.o debugfs_sta.o debugfs_netdev.o debugfs_key.o
mac80211-objs-$(CONFIG_NET_SCHED) += wme.o
+mac80211-objs-$(CONFIG_MAC80211_RCSIMPLE) += rc80211_simple.o
mac80211-objs := \
ieee80211.o \
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c
index f484ca7ade9..e0ee65a969b 100644
--- a/net/mac80211/ieee80211.c
+++ b/net/mac80211/ieee80211.c
@@ -1072,7 +1072,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_debugfs_add_netdev(IEEE80211_DEV_TO_SUB_IF(local->mdev));
ieee80211_if_set_type(local->mdev, IEEE80211_IF_TYPE_AP);
- result = ieee80211_init_rate_ctrl_alg(local, NULL);
+ result = ieee80211_init_rate_ctrl_alg(local,
+ hw->rate_control_algorithm);
if (result < 0) {
printk(KERN_DEBUG "%s: Failed to initialize rate control "
"algorithm\n", wiphy_name(local->hw.wiphy));
@@ -1233,8 +1234,17 @@ static int __init ieee80211_init(void)
BUILD_BUG_ON(sizeof(struct ieee80211_tx_packet_data) > sizeof(skb->cb));
+#ifdef CONFIG_MAC80211_RCSIMPLE
+ ret = ieee80211_rate_control_register(&mac80211_rcsimple);
+ if (ret)
+ return ret;
+#endif
+
ret = ieee80211_wme_register();
if (ret) {
+#ifdef CONFIG_MAC80211_RCSIMPLE
+ ieee80211_rate_control_unregister(&mac80211_rcsimple);
+#endif
printk(KERN_DEBUG "ieee80211_init: failed to "
"initialize WME (err=%d)\n", ret);
return ret;
@@ -1248,6 +1258,10 @@ static int __init ieee80211_init(void)
static void __exit ieee80211_exit(void)
{
+#ifdef CONFIG_MAC80211_RCSIMPLE
+ ieee80211_rate_control_unregister(&mac80211_rcsimple);
+#endif
+
ieee80211_wme_unregister();
ieee80211_debugfs_netdev_exit();
}
diff --git a/net/mac80211/ieee80211_common.h b/net/mac80211/ieee80211_common.h
deleted file mode 100644
index c15295d43d8..00000000000
--- a/net/mac80211/ieee80211_common.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * IEEE 802.11 driver (80211.o) -- hostapd interface
- * Copyright 2002-2004, Instant802 Networks, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef IEEE80211_COMMON_H
-#define IEEE80211_COMMON_H
-
-#include <linux/types.h>
-
-/*
- * This is common header information with user space. It is used on all
- * frames sent to wlan#ap interface.
- */
-
-#define IEEE80211_FI_VERSION 0x80211001
-
-struct ieee80211_frame_info {
- __be32 version;
- __be32 length;
- __be64 mactime;
- __be64 hosttime;
- __be32 phytype;
- __be32 channel;
- __be32 datarate;
- __be32 antenna;
- __be32 priority;
- __be32 ssi_type;
- __be32 ssi_signal;
- __be32 ssi_noise;
- __be32 preamble;
- __be32 encoding;
-
- /* Note: this structure is otherwise identical to capture format used
- * in linux-wlan-ng, but this additional field is used to provide meta
- * data about the frame to hostapd. This was the easiest method for
- * providing this information, but this might change in the future. */
- __be32 msg_type;
-} __attribute__ ((packed));
-
-
-enum ieee80211_msg_type {
- ieee80211_msg_normal = 0,
- ieee80211_msg_tx_callback_ack = 1,
- ieee80211_msg_tx_callback_fail = 2,
- /* hole at 3, was ieee80211_msg_passive_scan but unused */
- /* hole at 4, was ieee80211_msg_wep_frame_unknown_key but now unused */
- ieee80211_msg_michael_mic_failure = 5,
- /* hole at 6, was monitor but never sent to userspace */
- ieee80211_msg_sta_not_assoc = 7,
- /* 8 was ieee80211_msg_set_aid_for_sta */
- /* 9 was ieee80211_msg_key_threshold_notification */
- /* 11 was ieee80211_msg_radar */
-};
-
-struct ieee80211_msg_key_notification {
- int tx_rx_count;
- char ifname[IFNAMSIZ];
- u8 addr[ETH_ALEN]; /* ff:ff:ff:ff:ff:ff for broadcast keys */
-};
-
-
-enum ieee80211_phytype {
- ieee80211_phytype_fhss_dot11_97 = 1,
- ieee80211_phytype_dsss_dot11_97 = 2,
- ieee80211_phytype_irbaseband = 3,
- ieee80211_phytype_dsss_dot11_b = 4,
- ieee80211_phytype_pbcc_dot11_b = 5,
- ieee80211_phytype_ofdm_dot11_g = 6,
- ieee80211_phytype_pbcc_dot11_g = 7,
- ieee80211_phytype_ofdm_dot11_a = 8,
-};
-
-enum ieee80211_ssi_type {
- ieee80211_ssi_none = 0,
- ieee80211_ssi_norm = 1, /* normalized, 0-1000 */
- ieee80211_ssi_dbm = 2,
- ieee80211_ssi_raw = 3, /* raw SSI */
-};
-
-struct ieee80211_radar_info {
- int channel;
- int radar;
- int radar_type;
-};
-
-#endif /* IEEE80211_COMMON_H */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index d575ccd67e9..72e1c93dd87 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -230,6 +230,7 @@ struct ieee80211_if_vlan {
#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
+#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
struct ieee80211_if_sta {
enum {
IEEE80211_DISABLED, IEEE80211_AUTHENTICATE,
@@ -261,7 +262,6 @@ struct ieee80211_if_sta {
unsigned long request;
struct sk_buff_head skb_queue;
- int key_management_enabled;
unsigned long last_probe;
#define IEEE80211_AUTH_ALG_OPEN BIT(0)
diff --git a/net/mac80211/ieee80211_ioctl.c b/net/mac80211/ieee80211_ioctl.c
index 6caa3ec2cff..7027eed4d4a 100644
--- a/net/mac80211/ieee80211_ioctl.c
+++ b/net/mac80211/ieee80211_ioctl.c
@@ -917,7 +917,6 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *data, char *extra)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
int ret = 0;
@@ -927,18 +926,21 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_WPA_ENABLED:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- break;
case IW_AUTH_KEY_MGMT:
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
if (sdata->type != IEEE80211_IF_TYPE_STA)
ret = -EINVAL;
else {
+ sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
/*
- * Key management was set by wpa_supplicant,
- * we only need this to associate to a network
- * that has privacy enabled regardless of not
- * having a key.
+ * Privacy invoked by wpa_supplicant, store the
+ * value and allow associating to a protected
+ * network without having a key up front.
*/
- sdata->u.sta.key_management_enabled = !!data->value;
+ if (data->value)
+ sdata->u.sta.flags |=
+ IEEE80211_STA_PRIVACY_INVOKED;
}
break;
case IW_AUTH_80211_AUTH_ALG:
@@ -948,11 +950,6 @@ static int ieee80211_ioctl_siwauth(struct net_device *dev,
else
ret = -EOPNOTSUPP;
break;
- case IW_AUTH_PRIVACY_INVOKED:
- if (local->ops->set_privacy_invoked)
- ret = local->ops->set_privacy_invoked(
- local_to_hw(local), data->value);
- break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/net/mac80211/ieee80211_rate.c b/net/mac80211/ieee80211_rate.c
index 93abb8fff14..7254bd60983 100644
--- a/net/mac80211/ieee80211_rate.c
+++ b/net/mac80211/ieee80211_rate.c
@@ -25,13 +25,25 @@ int ieee80211_rate_control_register(struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
+ if (!ops->name)
+ return -EINVAL;
+
+ mutex_lock(&rate_ctrl_mutex);
+ list_for_each_entry(alg, &rate_ctrl_algs, list) {
+ if (!strcmp(alg->ops->name, ops->name)) {
+ /* don't register an algorithm twice */
+ WARN_ON(1);
+ return -EALREADY;
+ }
+ }
+
alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL) {
+ mutex_unlock(&rate_ctrl_mutex);
return -ENOMEM;
}
alg->ops = ops;
- mutex_lock(&rate_ctrl_mutex);
list_add_tail(&alg->list, &rate_ctrl_algs);
mutex_unlock(&rate_ctrl_mutex);
@@ -61,9 +73,12 @@ ieee80211_try_rate_control_ops_get(const char *name)
struct rate_control_alg *alg;
struct rate_control_ops *ops = NULL;
+ if (!name)
+ return NULL;
+
mutex_lock(&rate_ctrl_mutex);
list_for_each_entry(alg, &rate_ctrl_algs, list) {
- if (!name || !strcmp(alg->ops->name, name))
+ if (!strcmp(alg->ops->name, name))
if (try_module_get(alg->ops->module)) {
ops = alg->ops;
break;
@@ -80,9 +95,12 @@ ieee80211_rate_control_ops_get(const char *name)
{
struct rate_control_ops *ops;
+ if (!name)
+ name = "simple";
+
ops = ieee80211_try_rate_control_ops_get(name);
if (!ops) {
- request_module("rc80211_%s", name ? name : "default");
+ request_module("rc80211_%s", name);
ops = ieee80211_try_rate_control_ops_get(name);
}
return ops;
diff --git a/net/mac80211/ieee80211_rate.h b/net/mac80211/ieee80211_rate.h
index 7cd1ebab4f8..23688139ffb 100644
--- a/net/mac80211/ieee80211_rate.h
+++ b/net/mac80211/ieee80211_rate.h
@@ -65,6 +65,9 @@ struct rate_control_ref {
struct kref kref;
};
+/* default 'simple' algorithm */
+extern struct rate_control_ops mac80211_rcsimple;
+
int ieee80211_rate_control_register(struct rate_control_ops *ops);
void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c
index fc6a3ff3d90..015b3f879aa 100644
--- a/net/mac80211/ieee80211_sta.c
+++ b/net/mac80211/ieee80211_sta.c
@@ -704,10 +704,11 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_sta_bss *bss;
- int res = 0;
+ int bss_privacy;
+ int wep_privacy;
+ int privacy_invoked;
- if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL) ||
- ifsta->key_management_enabled)
+ if (!ifsta || (ifsta->flags & IEEE80211_STA_MIXED_CELL))
return 0;
bss = ieee80211_rx_bss_get(dev, ifsta->bssid, local->hw.conf.channel,
@@ -715,13 +716,16 @@ static int ieee80211_privacy_mismatch(struct net_device *dev,
if (!bss)
return 0;
- if (ieee80211_sta_wep_configured(dev) !=
- !!(bss->capability & WLAN_CAPABILITY_PRIVACY))
- res = 1;
+ bss_privacy = !!(bss->capability & WLAN_CAPABILITY_PRIVACY);
+ wep_privacy = !!ieee80211_sta_wep_configured(dev);
+ privacy_invoked = !!(ifsta->flags & IEEE80211_STA_PRIVACY_INVOKED);
ieee80211_rx_bss_put(dev, bss);
- return res;
+ if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked))
+ return 0;
+
+ return 1;
}
diff --git a/net/mac80211/rc80211_simple.c b/net/mac80211/rc80211_simple.c
index 314b8de8886..da72737364e 100644
--- a/net/mac80211/rc80211_simple.c
+++ b/net/mac80211/rc80211_simple.c
@@ -7,7 +7,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/types.h>
@@ -29,8 +28,6 @@
#define RATE_CONTROL_INTERVAL (HZ / 20)
#define RATE_CONTROL_MIN_TX 10
-MODULE_ALIAS("rc80211_default");
-
static void rate_control_rate_inc(struct ieee80211_local *local,
struct sta_info *sta)
{
@@ -394,8 +391,7 @@ static void rate_control_simple_remove_sta_debugfs(void *priv, void *priv_sta)
}
#endif
-static struct rate_control_ops rate_control_simple = {
- .module = THIS_MODULE,
+struct rate_control_ops mac80211_rcsimple = {
.name = "simple",
.tx_status = rate_control_simple_tx_status,
.get_rate = rate_control_simple_get_rate,
@@ -410,22 +406,3 @@ static struct rate_control_ops rate_control_simple = {
.remove_sta_debugfs = rate_control_simple_remove_sta_debugfs,
#endif
};
-
-
-static int __init rate_control_simple_init(void)
-{
- return ieee80211_rate_control_register(&rate_control_simple);
-}
-
-
-static void __exit rate_control_simple_exit(void)
-{
- ieee80211_rate_control_unregister(&rate_control_simple);
-}
-
-
-subsys_initcall(rate_control_simple_init);
-module_exit(rate_control_simple_exit);
-
-MODULE_DESCRIPTION("Simple rate control algorithm for ieee80211");
-MODULE_LICENSE("GPL");
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index ece77766ea2..428a9fcf57d 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -509,9 +509,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_txrx_data *rx)
rx->key->tx_rx_count++;
/* TODO: add threshold stuff again */
} else {
+#ifdef CONFIG_MAC80211_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: RX protected frame,"
" but have no key\n", rx->dev->name);
+#endif /* CONFIG_MAC80211_DEBUG */
return TXRX_DROP;
}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index a84a23310ff..9bf0e1cc530 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -314,9 +314,11 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_txrx_data *rx)
if (!(rx->u.rx.status->flag & RX_FLAG_DECRYPTED)) {
if (ieee80211_wep_decrypt(rx->local, rx->skb, rx->key)) {
+#ifdef CONFIG_MAC80211_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: RX WEP frame, decrypt "
"failed\n", rx->dev->name);
+#endif /* CONFIG_MAC80211_DEBUG */
return TXRX_DROP;
}
} else if (!(rx->u.rx.status->flag & RX_FLAG_IV_STRIPPED)) {
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 6695efba57e..20cec1cb956 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -323,9 +323,12 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_txrx_data *rx)
&rx->u.rx.tkip_iv32,
&rx->u.rx.tkip_iv16);
if (res != TKIP_DECRYPT_OK || wpa_test) {
- printk(KERN_DEBUG "%s: TKIP decrypt failed for RX frame from "
- "%s (res=%d)\n",
- rx->dev->name, print_mac(mac, rx->sta->addr), res);
+#ifdef CONFIG_MAC80211_DEBUG
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: TKIP decrypt failed for RX "
+ "frame from %s (res=%d)\n", rx->dev->name,
+ print_mac(mac, rx->sta->addr), res);
+#endif /* CONFIG_MAC80211_DEBUG */
return TXRX_DROP;
}
@@ -594,9 +597,12 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_txrx_data *rx)
skb->data + hdrlen + CCMP_HDR_LEN, data_len,
skb->data + skb->len - CCMP_MIC_LEN,
skb->data + hdrlen + CCMP_HDR_LEN)) {
- printk(KERN_DEBUG "%s: CCMP decrypt failed for RX "
- "frame from %s\n", rx->dev->name,
- print_mac(mac, rx->sta->addr));
+#ifdef CONFIG_MAC80211_DEBUG
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: CCMP decrypt failed "
+ "for RX frame from %s\n", rx->dev->name,
+ print_mac(mac, rx->sta->addr));
+#endif /* CONFIG_MAC80211_DEBUG */
return TXRX_DROP;
}
}
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 93c58f97383..ad0e36ebea3 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -40,15 +40,15 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
# targets
obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
-obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_TRACE) += xt_TRACE.o
# matches
obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
@@ -59,22 +59,22 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
obj-$(CONFIG_NETFILTER_XT_MATCH_MULTIPORT) += xt_multiport.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
obj-$(CONFIG_NETFILTER_XT_MATCH_STATISTIC) += xt_statistic.o
obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_TIME) += xt_time.o
obj-$(CONFIG_NETFILTER_XT_MATCH_U32) += xt_u32.o
-obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
index aa2831587b8..87bc1443c52 100644
--- a/net/netfilter/nf_sockopt.c
+++ b/net/netfilter/nf_sockopt.c
@@ -23,14 +23,13 @@ static inline int overlap(int min1, int max1, int min2, int max2)
/* Functions to register sockopt ranges (exclusive). */
int nf_register_sockopt(struct nf_sockopt_ops *reg)
{
- struct list_head *i;
+ struct nf_sockopt_ops *ops;
int ret = 0;
if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
return -EINTR;
- list_for_each(i, &nf_sockopts) {
- struct nf_sockopt_ops *ops = (struct nf_sockopt_ops *)i;
+ list_for_each_entry(ops, &nf_sockopts, list) {
if (ops->pf == reg->pf
&& (overlap(ops->set_optmin, ops->set_optmax,
reg->set_optmin, reg->set_optmax)
@@ -61,48 +60,57 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
}
EXPORT_SYMBOL(nf_unregister_sockopt);
-/* Call get/setsockopt() */
-static int nf_sockopt(struct sock *sk, int pf, int val,
- char __user *opt, int *len, int get)
+static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, int pf,
+ int val, int get)
{
- struct list_head *i;
struct nf_sockopt_ops *ops;
- int ret;
if (sk->sk_net != &init_net)
- return -ENOPROTOOPT;
+ return ERR_PTR(-ENOPROTOOPT);
if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
- return -EINTR;
+ return ERR_PTR(-EINTR);
- list_for_each(i, &nf_sockopts) {
- ops = (struct nf_sockopt_ops *)i;
+ list_for_each_entry(ops, &nf_sockopts, list) {
if (ops->pf == pf) {
if (!try_module_get(ops->owner))
goto out_nosup;
+
if (get) {
- if (val >= ops->get_optmin
- && val < ops->get_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- ret = ops->get(sk, val, opt, len);
+ if (val >= ops->get_optmin &&
+ val < ops->get_optmax)
goto out;
- }
} else {
- if (val >= ops->set_optmin
- && val < ops->set_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- ret = ops->set(sk, val, opt, *len);
+ if (val >= ops->set_optmin &&
+ val < ops->set_optmax)
goto out;
- }
}
module_put(ops->owner);
}
}
- out_nosup:
+out_nosup:
+ ops = ERR_PTR(-ENOPROTOOPT);
+out:
mutex_unlock(&nf_sockopt_mutex);
- return -ENOPROTOOPT;
+ return ops;
+}
+
+/* Call get/setsockopt() */
+static int nf_sockopt(struct sock *sk, int pf, int val,
+ char __user *opt, int *len, int get)
+{
+ struct nf_sockopt_ops *ops;
+ int ret;
+
+ ops = nf_sockopt_find(sk, pf, val, get);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (get)
+ ret = ops->get(sk, val, opt, len);
+ else
+ ret = ops->set(sk, val, opt, *len);
- out:
module_put(ops->owner);
return ret;
}
@@ -124,56 +132,25 @@ EXPORT_SYMBOL(nf_getsockopt);
static int compat_nf_sockopt(struct sock *sk, int pf, int val,
char __user *opt, int *len, int get)
{
- struct list_head *i;
struct nf_sockopt_ops *ops;
int ret;
- if (sk->sk_net != &init_net)
- return -ENOPROTOOPT;
-
-
- if (mutex_lock_interruptible(&nf_sockopt_mutex) != 0)
- return -EINTR;
-
- list_for_each(i, &nf_sockopts) {
- ops = (struct nf_sockopt_ops *)i;
- if (ops->pf == pf) {
- if (!try_module_get(ops->owner))
- goto out_nosup;
-
- if (get) {
- if (val >= ops->get_optmin
- && val < ops->get_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- if (ops->compat_get)
- ret = ops->compat_get(sk,
- val, opt, len);
- else
- ret = ops->get(sk,
- val, opt, len);
- goto out;
- }
- } else {
- if (val >= ops->set_optmin
- && val < ops->set_optmax) {
- mutex_unlock(&nf_sockopt_mutex);
- if (ops->compat_set)
- ret = ops->compat_set(sk,
- val, opt, *len);
- else
- ret = ops->set(sk,
- val, opt, *len);
- goto out;
- }
- }
- module_put(ops->owner);
- }
+ ops = nf_sockopt_find(sk, pf, val, get);
+ if (IS_ERR(ops))
+ return PTR_ERR(ops);
+
+ if (get) {
+ if (ops->compat_get)
+ ret = ops->compat_get(sk, val, opt, len);
+ else
+ ret = ops->get(sk, val, ops, len);
+ } else {
+ if (ops->compat_set)
+ ret = ops->compat_set(sk, val, ops, *len);
+ else
+ ret = ops->set(sk, val, ops, *len);
}
- out_nosup:
- mutex_unlock(&nf_sockopt_mutex);
- return -ENOPROTOOPT;
- out:
module_put(ops->owner);
return ret;
}
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index 06cff1d1369..d7becf08a93 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -4,7 +4,8 @@
* (c) 2000 Gerd Knorr <kraxel@bytesex.org>
* Nov 2002: Martin Bene <martin.bene@icomedias.com>:
* only ignore TIME_WAIT or gone connections
- * Copyright © Jan Engelhardt <jengelh@gmx.de>, 2007
+ * (C) CC Computer Consultants GmbH, 2007
+ * Contact: <jengelh@computergmbh.de>
*
* based on ...
*
@@ -306,7 +307,7 @@ static void __exit xt_connlimit_exit(void)
module_init(xt_connlimit_init);
module_exit(xt_connlimit_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
MODULE_DESCRIPTION("netfilter xt_connlimit match module");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_connlimit");
diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c
index ef48bbd9357..f9c55dcd894 100644
--- a/net/netfilter/xt_time.c
+++ b/net/netfilter/xt_time.c
@@ -1,6 +1,7 @@
/*
* xt_time
- * Copyright © Jan Engelhardt <jengelh@computergmbh.de>, 2007
+ * Copyright © CC Computer Consultants GmbH, 2007
+ * Contact: <jengelh@computergmbh.de>
*
* based on ipt_time by Fabrice MARIE <fabrice@netfilter.org>
* This is a module which is used for time matching
@@ -169,7 +170,7 @@ static bool xt_time_match(const struct sk_buff *skb,
if (skb->tstamp.tv64 == 0)
__net_timestamp((struct sk_buff *)skb);
- stamp = skb->tstamp.tv64;
+ stamp = ktime_to_ns(skb->tstamp);
do_div(stamp, NSEC_PER_SEC);
if (info->flags & XT_TIME_LOCAL_TZ)
diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c
index bec427915b3..af75b8c3f20 100644
--- a/net/netfilter/xt_u32.c
+++ b/net/netfilter/xt_u32.c
@@ -2,7 +2,8 @@
* xt_u32 - kernel module to match u32 packet content
*
* Original author: Don Cohen <don@isis.cs3-inc.com>
- * © Jan Engelhardt <jengelh@gmx.de>, 2007
+ * (C) CC Computer Consultants GmbH, 2007
+ * Contact: <jengelh@computergmbh.de>
*/
#include <linux/module.h>
@@ -129,7 +130,7 @@ static void __exit xt_u32_exit(void)
module_init(xt_u32_init);
module_exit(xt_u32_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
MODULE_DESCRIPTION("netfilter u32 match module");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_u32");
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 26017125557..de3988ba1f4 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -752,7 +752,7 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
- long timeo, struct sock *ssk)
+ long *timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
@@ -761,7 +761,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) {
DECLARE_WAITQUEUE(wait, current);
- if (!timeo) {
+ if (!*timeo) {
if (!ssk || netlink_is_kernel(ssk))
netlink_overrun(sk);
sock_put(sk);
@@ -775,7 +775,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
test_bit(0, &nlk->state)) &&
!sock_flag(sk, SOCK_DEAD))
- timeo = schedule_timeout(timeo);
+ *timeo = schedule_timeout(*timeo);
__set_current_state(TASK_RUNNING);
remove_wait_queue(&nlk->wait, &wait);
@@ -783,7 +783,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
if (signal_pending(current)) {
kfree_skb(skb);
- return sock_intr_errno(timeo);
+ return sock_intr_errno(*timeo);
}
return 1;
}
@@ -877,7 +877,7 @@ retry:
if (netlink_is_kernel(sk))
return netlink_unicast_kernel(sk, skb);
- err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
+ err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
if (err == 1)
goto retry;
if (err)
@@ -1888,7 +1888,7 @@ static void __net_exit netlink_net_exit(struct net *net)
#endif
}
-static struct pernet_operations netlink_net_ops = {
+static struct pernet_operations __net_initdata netlink_net_ops = {
.init = netlink_net_init,
.exit = netlink_net_exit,
};
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4cb2dfba099..8a7807dbba0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -139,9 +139,6 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
static HLIST_HEAD(packet_sklist);
static DEFINE_RWLOCK(packet_sklist_lock);
-static atomic_t packet_socks_nr;
-
-
/* Private packet socket structures. */
struct packet_mclist
@@ -236,10 +233,7 @@ static void packet_sock_destruct(struct sock *sk)
return;
}
- atomic_dec(&packet_socks_nr);
-#ifdef PACKET_REFCNT_DEBUG
- printk(KERN_DEBUG "PACKET socket %p is free, %d are alive\n", sk, atomic_read(&packet_socks_nr));
-#endif
+ sk_refcnt_debug_dec(sk);
}
@@ -515,7 +509,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST)
+ if (unlikely(po->origdev))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -661,7 +655,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
sll->sll_pkttype = skb->pkt_type;
- if (unlikely(po->origdev) && skb->pkt_type == PACKET_HOST)
+ if (unlikely(po->origdev))
sll->sll_ifindex = orig_dev->ifindex;
else
sll->sll_ifindex = dev->ifindex;
@@ -849,6 +843,7 @@ static int packet_release(struct socket *sock)
/* Purge queues */
skb_queue_purge(&sk->sk_receive_queue);
+ sk_refcnt_debug_release(sk);
sock_put(sk);
return 0;
@@ -886,20 +881,14 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
if (protocol == 0)
goto out_unlock;
- if (dev) {
- if (dev->flags&IFF_UP) {
- dev_add_pack(&po->prot_hook);
- sock_hold(sk);
- po->running = 1;
- } else {
- sk->sk_err = ENETDOWN;
- if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_error_report(sk);
- }
- } else {
+ if (!dev || (dev->flags & IFF_UP)) {
dev_add_pack(&po->prot_hook);
sock_hold(sk);
po->running = 1;
+ } else {
+ sk->sk_err = ENETDOWN;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
}
out_unlock:
@@ -1010,7 +999,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
po->num = proto;
sk->sk_destruct = packet_sock_destruct;
- atomic_inc(&packet_socks_nr);
+ sk_refcnt_debug_inc(sk);
/*
* Attach a protocol block
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index 51d151c0e96..73d60a30712 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -27,6 +27,10 @@
#include <linux/mutex.h>
#include <linux/rfkill.h>
+/* Get declaration of rfkill_switch_all() to shut up sparse. */
+#include "rfkill-input.h"
+
+
MODULE_AUTHOR("Ivo van Doorn <IvDoorn@gmail.com>");
MODULE_VERSION("1.0");
MODULE_DESCRIPTION("RF switch support");
@@ -276,21 +280,17 @@ static struct class rfkill_class = {
static int rfkill_add_switch(struct rfkill *rfkill)
{
- int retval;
-
- retval = mutex_lock_interruptible(&rfkill_mutex);
- if (retval)
- return retval;
+ int error;
- retval = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]);
- if (retval)
- goto out;
+ mutex_lock(&rfkill_mutex);
- list_add_tail(&rfkill->node, &rfkill_list);
+ error = rfkill_toggle_radio(rfkill, rfkill_states[rfkill->type]);
+ if (!error)
+ list_add_tail(&rfkill->node, &rfkill_list);
- out:
mutex_unlock(&rfkill_mutex);
- return retval;
+
+ return error;
}
static void rfkill_remove_switch(struct rfkill *rfkill)
@@ -387,20 +387,23 @@ int rfkill_register(struct rfkill *rfkill)
if (!rfkill->toggle_radio)
return -EINVAL;
+ if (rfkill->type >= RFKILL_TYPE_MAX)
+ return -EINVAL;
+
+ snprintf(dev->bus_id, sizeof(dev->bus_id),
+ "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
+
+ rfkill_led_trigger_register(rfkill);
error = rfkill_add_switch(rfkill);
if (error)
return error;
- snprintf(dev->bus_id, sizeof(dev->bus_id),
- "rfkill%ld", (long)atomic_inc_return(&rfkill_no) - 1);
-
error = device_add(dev);
if (error) {
rfkill_remove_switch(rfkill);
return error;
}
- rfkill_led_trigger_register(rfkill);
return 0;
}
@@ -416,9 +419,9 @@ EXPORT_SYMBOL(rfkill_register);
*/
void rfkill_unregister(struct rfkill *rfkill)
{
- rfkill_led_trigger_unregister(rfkill);
device_del(&rfkill->dev);
rfkill_remove_switch(rfkill);
+ rfkill_led_trigger_unregister(rfkill);
put_device(&rfkill->dev);
}
EXPORT_SYMBOL(rfkill_unregister);
@@ -448,5 +451,5 @@ static void __exit rfkill_exit(void)
class_unregister(&rfkill_class);
}
-module_init(rfkill_init);
+subsys_initcall(rfkill_init);
module_exit(rfkill_exit);
diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
index fe03f71f17d..f3a2bd747a8 100644
--- a/net/rxrpc/ar-local.c
+++ b/net/rxrpc/ar-local.c
@@ -114,7 +114,7 @@ static int rxrpc_create_local(struct rxrpc_local *local)
return 0;
error:
- local->socket->ops->shutdown(local->socket, 2);
+ kernel_sock_shutdown(local->socket, SHUT_RDWR);
local->socket->sk->sk_user_data = NULL;
sock_release(local->socket);
local->socket = NULL;
@@ -267,7 +267,7 @@ static void rxrpc_destroy_local(struct work_struct *work)
/* finish cleaning up the local descriptor */
rxrpc_purge_queue(&local->accept_queue);
rxrpc_purge_queue(&local->reject_queue);
- local->socket->ops->shutdown(local->socket, 2);
+ kernel_sock_shutdown(local->socket, SHUT_RDWR);
sock_release(local->socket);
up_read(&rxrpc_local_sem);
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 9e98c6e567d..c3900820916 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -91,7 +91,7 @@ static struct tc_u_common *u32_list;
static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
{
- unsigned h = (key & sel->hmask)>>fshift;
+ unsigned h = ntohl(key & sel->hmask)>>fshift;
return h;
}
@@ -613,17 +613,7 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
n->ht_up = ht;
n->handle = handle;
-{
- u8 i = 0;
- u32 mask = s->hmask;
- if (mask) {
- while (!(mask & 1)) {
- i++;
- mask>>=1;
- }
- }
- n->fshift = i;
-}
+ n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
#ifdef CONFIG_CLS_U32_MARK
if (tb[TCA_U32_MARK-1]) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fa1a6f45dc4..e595e6570ce 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -134,7 +134,7 @@ static inline int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
- int ret;
+ int ret = NETDEV_TX_BUSY;
/* Dequeue packet */
if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
@@ -145,7 +145,8 @@ static inline int qdisc_restart(struct net_device *dev)
spin_unlock(&dev->queue_lock);
HARD_TX_LOCK(dev, smp_processor_id());
- ret = dev_hard_start_xmit(skb, dev);
+ if (!netif_subqueue_stopped(dev, skb))
+ ret = dev_hard_start_xmit(skb, dev);
HARD_TX_UNLOCK(dev);
spin_lock(&dev->queue_lock);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 421281d9dd1..c0ed06d4a50 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -252,6 +252,9 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
static inline int teql_resolve(struct sk_buff *skb,
struct sk_buff *skb_res, struct net_device *dev)
{
+ if (dev->qdisc == &noop_qdisc)
+ return -ENODEV;
+
if (dev->header_ops == NULL ||
skb->dst == NULL ||
skb->dst->neighbour == NULL)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 03158e3665d..013e3d3ab0f 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -262,10 +262,14 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
*/
asoc->peer.sack_needed = 1;
- /* Assume that the peer recongizes ASCONF until reported otherwise
- * via an ERROR chunk.
+ /* Assume that the peer will tell us if he recognizes ASCONF
+ * as part of INIT exchange.
+ * The sctp_addip_noauth option is there for backward compatibilty
+ * and will revert old behavior.
*/
- asoc->peer.asconf_capable = 1;
+ asoc->peer.asconf_capable = 0;
+ if (sctp_addip_noauth)
+ asoc->peer.asconf_capable = 1;
/* Create an input queue. */
sctp_inq_init(&asoc->base.inqueue);
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index dfffa94fb9f..cae95af9a8c 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -180,9 +180,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new,
/* Delete an address from the bind address list in the SCTP_bind_addr
* structure.
*/
-int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
- void fastcall (*rcu_call)(struct rcu_head *head,
- void (*func)(struct rcu_head *head)))
+int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr)
{
struct sctp_sockaddr_entry *addr, *temp;
@@ -198,15 +196,10 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr,
}
}
- /* Call the rcu callback provided in the args. This function is
- * called by both BH packet processing and user side socket option
- * processing, but it works on different lists in those 2 contexts.
- * Each context provides it's own callback, whether call_rcu_bh()
- * or call_rcu(), to make sure that we wait for an appropriate time.
- */
if (addr && !addr->valid) {
- rcu_call(&addr->rcu, sctp_local_addr_free);
+ call_rcu(&addr->rcu, sctp_local_addr_free);
SCTP_DBG_OBJCNT_DEC(addr);
+ return 0;
}
return -EINVAL;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 2d2d81ef4a6..de6f505d6ff 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -328,24 +328,35 @@ static struct sctp_association *__sctp_endpoint_lookup_assoc(
const union sctp_addr *paddr,
struct sctp_transport **transport)
{
+ struct sctp_association *asoc = NULL;
+ struct sctp_transport *t = NULL;
+ struct sctp_hashbucket *head;
+ struct sctp_ep_common *epb;
+ struct hlist_node *node;
+ int hash;
int rport;
- struct sctp_association *asoc;
- struct list_head *pos;
+ *transport = NULL;
rport = ntohs(paddr->v4.sin_port);
- list_for_each(pos, &ep->asocs) {
- asoc = list_entry(pos, struct sctp_association, asocs);
- if (rport == asoc->peer.port) {
- *transport = sctp_assoc_lookup_paddr(asoc, paddr);
-
- if (*transport)
- return asoc;
+ hash = sctp_assoc_hashfn(ep->base.bind_addr.port, rport);
+ head = &sctp_assoc_hashtable[hash];
+ read_lock(&head->lock);
+ sctp_for_each_hentry(epb, node, &head->chain) {
+ asoc = sctp_assoc(epb);
+ if (asoc->ep != ep || rport != asoc->peer.port)
+ goto next;
+
+ t = sctp_assoc_lookup_paddr(asoc, paddr);
+ if (t) {
+ *transport = t;
+ break;
}
+next:
+ asoc = NULL;
}
-
- *transport = NULL;
- return NULL;
+ read_unlock(&head->lock);
+ return asoc;
}
/* Lookup association on an endpoint based on a peer address. BH-safe. */
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 86503e7fa21..91ae463b079 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -656,7 +656,6 @@ discard:
/* Insert endpoint into the hash table. */
static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
{
- struct sctp_ep_common **epp;
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
@@ -666,12 +665,7 @@ static void __sctp_hash_endpoint(struct sctp_endpoint *ep)
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- epp = &head->chain;
- epb->next = *epp;
- if (epb->next)
- (*epp)->pprev = &epb->next;
- *epp = epb;
- epb->pprev = epp;
+ hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
}
@@ -691,19 +685,15 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
+ if (hlist_unhashed(&epb->node))
+ return;
+
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
-
- if (epb->pprev) {
- if (epb->next)
- epb->next->pprev = epb->pprev;
- *epb->pprev = epb->next;
- epb->pprev = NULL;
- }
-
+ __hlist_del(&epb->node);
sctp_write_unlock(&head->lock);
}
@@ -721,12 +711,13 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l
struct sctp_hashbucket *head;
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
+ struct hlist_node *node;
int hash;
hash = sctp_ep_hashfn(ntohs(laddr->v4.sin_port));
head = &sctp_ep_hashtable[hash];
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb);
if (sctp_endpoint_is_match(ep, laddr))
goto hit;
@@ -744,7 +735,6 @@ hit:
/* Insert association into the hash table. */
static void __sctp_hash_established(struct sctp_association *asoc)
{
- struct sctp_ep_common **epp;
struct sctp_ep_common *epb;
struct sctp_hashbucket *head;
@@ -756,12 +746,7 @@ static void __sctp_hash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- epp = &head->chain;
- epb->next = *epp;
- if (epb->next)
- (*epp)->pprev = &epb->next;
- *epp = epb;
- epb->pprev = epp;
+ hlist_add_head(&epb->node, &head->chain);
sctp_write_unlock(&head->lock);
}
@@ -790,14 +775,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
-
- if (epb->pprev) {
- if (epb->next)
- epb->next->pprev = epb->pprev;
- *epb->pprev = epb->next;
- epb->pprev = NULL;
- }
-
+ __hlist_del(&epb->node);
sctp_write_unlock(&head->lock);
}
@@ -822,6 +800,7 @@ static struct sctp_association *__sctp_lookup_association(
struct sctp_ep_common *epb;
struct sctp_association *asoc;
struct sctp_transport *transport;
+ struct hlist_node *node;
int hash;
/* Optimize here for direct hit, only listening connections can
@@ -830,7 +809,7 @@ static struct sctp_association *__sctp_lookup_association(
hash = sctp_assoc_hashfn(ntohs(local->v4.sin_port), ntohs(peer->v4.sin_port));
head = &sctp_assoc_hashtable[hash];
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
asoc = sctp_assoc(epb);
transport = sctp_assoc_is_match(asoc, local, peer);
if (transport)
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index f10fe7fbf24..cf4b7eb023b 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -90,6 +90,10 @@ void sctp_inq_free(struct sctp_inq *queue)
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
{
/* Directly call the packet handling routine. */
+ if (chunk->rcvr->dead) {
+ sctp_chunk_free(chunk);
+ return;
+ }
/* We are now calling this either from the soft interrupt
* or from the backlog processing.
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 28f4fe77cee..fa76f235169 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -382,7 +382,7 @@ static void sctp_insert_list(struct list_head *head, struct list_head *new)
/* Mark all the eligible packets on a transport for retransmission. */
void sctp_retransmit_mark(struct sctp_outq *q,
struct sctp_transport *transport,
- __u8 fast_retransmit)
+ __u8 reason)
{
struct list_head *lchunk, *ltemp;
struct sctp_chunk *chunk;
@@ -412,20 +412,20 @@ void sctp_retransmit_mark(struct sctp_outq *q,
continue;
}
- /* If we are doing retransmission due to a fast retransmit,
- * only the chunk's that are marked for fast retransmit
- * should be added to the retransmit queue. If we are doing
- * retransmission due to a timeout or pmtu discovery, only the
- * chunks that are not yet acked should be added to the
- * retransmit queue.
+ /* If we are doing retransmission due to a timeout or pmtu
+ * discovery, only the chunks that are not yet acked should
+ * be added to the retransmit queue.
*/
- if ((fast_retransmit && (chunk->fast_retransmit > 0)) ||
- (!fast_retransmit && !chunk->tsn_gap_acked)) {
+ if ((reason == SCTP_RTXR_FAST_RTX &&
+ (chunk->fast_retransmit > 0)) ||
+ (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
/* If this chunk was sent less then 1 rto ago, do not
* retransmit this chunk, but give the peer time
- * to acknowlege it.
+ * to acknowlege it. Do this only when
+ * retransmitting due to T3 timeout.
*/
- if ((jiffies - chunk->sent_at) < transport->rto)
+ if (reason == SCTP_RTXR_T3_RTX &&
+ (jiffies - chunk->sent_at) < transport->last_rto)
continue;
/* RFC 2960 6.2.1 Processing a Received SACK
@@ -467,10 +467,10 @@ void sctp_retransmit_mark(struct sctp_outq *q,
}
}
- SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, "
+ SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
"cwnd: %d, ssthresh: %d, flight_size: %d, "
"pba: %d\n", __FUNCTION__,
- transport, fast_retransmit,
+ transport, reason,
transport->cwnd, transport->ssthresh,
transport->flight_size,
transport->partial_bytes_acked);
@@ -484,7 +484,6 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
sctp_retransmit_reason_t reason)
{
int error = 0;
- __u8 fast_retransmit = 0;
switch(reason) {
case SCTP_RTXR_T3_RTX:
@@ -499,16 +498,18 @@ void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
case SCTP_RTXR_FAST_RTX:
SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
- fast_retransmit = 1;
break;
case SCTP_RTXR_PMTUD:
SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
break;
+ case SCTP_RTXR_T1_RTX:
+ SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
+ break;
default:
BUG();
}
- sctp_retransmit_mark(q, transport, fast_retransmit);
+ sctp_retransmit_mark(q, transport, reason);
/* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
* the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
@@ -641,7 +642,8 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
/* If we are here due to a retransmit timeout or a fast
* retransmit and if there are any chunks left in the retransmit
- * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit.
+ * queue that could not fit in the PMTU sized packet, they need
+ * to be marked as ineligible for a subsequent fast retransmit.
*/
if (rtx_timeout && !lchunk) {
list_for_each(lchunk1, lqueue) {
@@ -660,10 +662,9 @@ static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
int sctp_outq_uncork(struct sctp_outq *q)
{
int error = 0;
- if (q->cork) {
+ if (q->cork)
q->cork = 0;
- error = sctp_outq_flush(q, 0);
- }
+ error = sctp_outq_flush(q, 0);
return error;
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index e4cd841a22e..24997320407 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -225,6 +225,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_endpoint *ep;
struct sock *sk;
+ struct hlist_node *node;
int hash = *(loff_t *)v;
if (hash >= sctp_ep_hashsize)
@@ -233,7 +234,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
head = &sctp_ep_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
ep = sctp_ep(epb);
sk = epb->sk;
seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
@@ -328,6 +329,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
struct sctp_ep_common *epb;
struct sctp_association *assoc;
struct sock *sk;
+ struct hlist_node *node;
int hash = *(loff_t *)v;
if (hash >= sctp_assoc_hashsize)
@@ -336,7 +338,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
head = &sctp_assoc_hashtable[hash];
sctp_local_bh_disable();
read_lock(&head->lock);
- for (epb = head->chain; epb; epb = epb->next) {
+ sctp_for_each_hentry(epb, node, &head->chain) {
assoc = sctp_assoc(epb);
sk = epb->sk;
seq_printf(seq,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 40c1a47d1b8..d50f610d1b0 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1137,7 +1137,7 @@ SCTP_STATIC __init int sctp_init(void)
}
for (i = 0; i < sctp_assoc_hashsize; i++) {
rwlock_init(&sctp_assoc_hashtable[i].lock);
- sctp_assoc_hashtable[i].chain = NULL;
+ INIT_HLIST_HEAD(&sctp_assoc_hashtable[i].chain);
}
/* Allocate and initialize the endpoint hash table. */
@@ -1151,7 +1151,7 @@ SCTP_STATIC __init int sctp_init(void)
}
for (i = 0; i < sctp_ep_hashsize; i++) {
rwlock_init(&sctp_ep_hashtable[i].lock);
- sctp_ep_hashtable[i].chain = NULL;
+ INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
}
/* Allocate and initialize the SCTP port hash table. */
@@ -1170,7 +1170,7 @@ SCTP_STATIC __init int sctp_init(void)
}
for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock);
- sctp_port_hashtable[i].chain = NULL;
+ INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
}
printk(KERN_INFO "SCTP: Hash tables configured "
@@ -1179,6 +1179,7 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable ADDIP by default. */
sctp_addip_enable = 0;
+ sctp_addip_noauth = 0;
/* Enable PR-SCTP by default. */
sctp_prsctp_enable = 1;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index c377e4e8f65..5a9783c38de 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1788,9 +1788,14 @@ static int sctp_process_inv_paramlength(const struct sctp_association *asoc,
sizeof(sctp_paramhdr_t);
+ /* This is a fatal error. Any accumulated non-fatal errors are
+ * not reported.
+ */
+ if (*errp)
+ sctp_chunk_free(*errp);
+
/* Create an error chunk and fill it in with our payload. */
- if (!*errp)
- *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
+ *errp = sctp_make_op_error_space(asoc, chunk, payload_len);
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION,
@@ -1813,9 +1818,15 @@ static int sctp_process_hn_param(const struct sctp_association *asoc,
{
__u16 len = ntohs(param.p->length);
- /* Make an ERROR chunk. */
- if (!*errp)
- *errp = sctp_make_op_error_space(asoc, chunk, len);
+ /* Processing of the HOST_NAME parameter will generate an
+ * ABORT. If we've accumulated any non-fatal errors, they
+ * would be unrecognized parameters and we should not include
+ * them in the ABORT.
+ */
+ if (*errp)
+ sctp_chunk_free(*errp);
+
+ *errp = sctp_make_op_error_space(asoc, chunk, len);
if (*errp) {
sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, len);
@@ -1847,7 +1858,7 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
break;
case SCTP_CID_ASCONF:
case SCTP_CID_ASCONF_ACK:
- asoc->peer.addip_capable = 1;
+ asoc->peer.asconf_capable = 1;
break;
default:
break;
@@ -1862,56 +1873,40 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
* taken if the processing endpoint does not recognize the
* Parameter Type.
*
- * 00 - Stop processing this SCTP chunk and discard it,
- * do not process any further chunks within it.
+ * 00 - Stop processing this parameter; do not process any further
+ * parameters within this chunk
*
- * 01 - Stop processing this SCTP chunk and discard it,
- * do not process any further chunks within it, and report
- * the unrecognized parameter in an 'Unrecognized
- * Parameter Type' (in either an ERROR or in the INIT ACK).
+ * 01 - Stop processing this parameter, do not process any further
+ * parameters within this chunk, and report the unrecognized
+ * parameter in an 'Unrecognized Parameter' ERROR chunk.
*
* 10 - Skip this parameter and continue processing.
*
* 11 - Skip this parameter and continue processing but
* report the unrecognized parameter in an
- * 'Unrecognized Parameter Type' (in either an ERROR or in
- * the INIT ACK).
+ * 'Unrecognized Parameter' ERROR chunk.
*
* Return value:
- * 0 - discard the chunk
- * 1 - continue with the chunk
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
+ * SCTP_IERROR_ERROR - stop and report an error.
+ * SCTP_IERROR_NOMEME - out of memory.
*/
-static int sctp_process_unk_param(const struct sctp_association *asoc,
- union sctp_params param,
- struct sctp_chunk *chunk,
- struct sctp_chunk **errp)
+static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **errp)
{
- int retval = 1;
+ int retval = SCTP_IERROR_NO_ERROR;
switch (param.p->type & SCTP_PARAM_ACTION_MASK) {
case SCTP_PARAM_ACTION_DISCARD:
- retval = 0;
- break;
- case SCTP_PARAM_ACTION_DISCARD_ERR:
- retval = 0;
- /* Make an ERROR chunk, preparing enough room for
- * returning multiple unknown parameters.
- */
- if (NULL == *errp)
- *errp = sctp_make_op_error_space(asoc, chunk,
- ntohs(chunk->chunk_hdr->length));
-
- if (*errp) {
- sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM,
- WORD_ROUND(ntohs(param.p->length)));
- sctp_addto_chunk(*errp,
- WORD_ROUND(ntohs(param.p->length)),
- param.v);
- }
-
+ retval = SCTP_IERROR_ERROR;
break;
case SCTP_PARAM_ACTION_SKIP:
break;
+ case SCTP_PARAM_ACTION_DISCARD_ERR:
+ retval = SCTP_IERROR_ERROR;
+ /* Fall through */
case SCTP_PARAM_ACTION_SKIP_ERR:
/* Make an ERROR chunk, preparing enough room for
* returning multiple unknown parameters.
@@ -1932,9 +1927,8 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
* to the peer and the association won't be
* established.
*/
- retval = 0;
+ retval = SCTP_IERROR_NOMEM;
}
-
break;
default:
break;
@@ -1943,18 +1937,20 @@ static int sctp_process_unk_param(const struct sctp_association *asoc,
return retval;
}
-/* Find unrecognized parameters in the chunk.
+/* Verify variable length parameters
* Return values:
- * 0 - discard the chunk
- * 1 - continue with the chunk
+ * SCTP_IERROR_ABORT - trigger an ABORT
+ * SCTP_IERROR_NOMEM - out of memory (abort)
+ * SCTP_IERROR_ERROR - stop processing, trigger an ERROR
+ * SCTP_IERROR_NO_ERROR - continue with the chunk
*/
-static int sctp_verify_param(const struct sctp_association *asoc,
- union sctp_params param,
- sctp_cid_t cid,
- struct sctp_chunk *chunk,
- struct sctp_chunk **err_chunk)
+static sctp_ierror_t sctp_verify_param(const struct sctp_association *asoc,
+ union sctp_params param,
+ sctp_cid_t cid,
+ struct sctp_chunk *chunk,
+ struct sctp_chunk **err_chunk)
{
- int retval = 1;
+ int retval = SCTP_IERROR_NO_ERROR;
/* FIXME - This routine is not looking at each parameter per the
* chunk type, i.e., unrecognized parameters should be further
@@ -1976,7 +1972,9 @@ static int sctp_verify_param(const struct sctp_association *asoc,
case SCTP_PARAM_HOST_NAME_ADDRESS:
/* Tell the peer, we won't support this param. */
- return sctp_process_hn_param(asoc, param, chunk, err_chunk);
+ sctp_process_hn_param(asoc, param, chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ break;
case SCTP_PARAM_FWD_TSN_SUPPORT:
if (sctp_prsctp_enable)
@@ -1993,9 +1991,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
* cause 'Protocol Violation'.
*/
if (SCTP_AUTH_RANDOM_LENGTH !=
- ntohs(param.p->length) - sizeof(sctp_paramhdr_t))
- return sctp_process_inv_paramlength(asoc, param.p,
+ ntohs(param.p->length) - sizeof(sctp_paramhdr_t)) {
+ sctp_process_inv_paramlength(asoc, param.p,
chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
break;
case SCTP_PARAM_CHUNKS:
@@ -2007,9 +2007,11 @@ static int sctp_verify_param(const struct sctp_association *asoc,
* INIT-ACK chunk if the sender wants to receive authenticated
* chunks. Its maximum length is 260 bytes.
*/
- if (260 < ntohs(param.p->length))
- return sctp_process_inv_paramlength(asoc, param.p,
- chunk, err_chunk);
+ if (260 < ntohs(param.p->length)) {
+ sctp_process_inv_paramlength(asoc, param.p,
+ chunk, err_chunk);
+ retval = SCTP_IERROR_ABORT;
+ }
break;
case SCTP_PARAM_HMAC_ALGO:
@@ -2020,8 +2022,7 @@ fallthrough:
default:
SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n",
ntohs(param.p->type), cid);
- return sctp_process_unk_param(asoc, param, chunk, err_chunk);
-
+ retval = sctp_process_unk_param(asoc, param, chunk, err_chunk);
break;
}
return retval;
@@ -2036,6 +2037,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
{
union sctp_params param;
int has_cookie = 0;
+ int result;
/* Verify stream values are non-zero. */
if ((0 == peer_init->init_hdr.num_outbound_streams) ||
@@ -2043,8 +2045,7 @@ int sctp_verify_init(const struct sctp_association *asoc,
(0 == peer_init->init_hdr.init_tag) ||
(SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
- sctp_process_inv_mandatory(asoc, chunk, errp);
- return 0;
+ return sctp_process_inv_mandatory(asoc, chunk, errp);
}
/* Check for missing mandatory parameters. */
@@ -2062,29 +2063,29 @@ int sctp_verify_init(const struct sctp_association *asoc,
* VIOLATION error. We build the ERROR chunk here and let the normal
* error handling code build and send the packet.
*/
- if (param.v != (void*)chunk->chunk_end) {
- sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
- return 0;
- }
+ if (param.v != (void*)chunk->chunk_end)
+ return sctp_process_inv_paramlength(asoc, param.p, chunk, errp);
/* The only missing mandatory param possible today is
* the state cookie for an INIT-ACK chunk.
*/
- if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) {
- sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
- chunk, errp);
- return 0;
- }
-
- /* Find unrecognized parameters. */
+ if ((SCTP_CID_INIT_ACK == cid) && !has_cookie)
+ return sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE,
+ chunk, errp);
+ /* Verify all the variable length parameters */
sctp_walk_params(param, peer_init, init_hdr.params) {
- if (!sctp_verify_param(asoc, param, cid, chunk, errp)) {
- if (SCTP_PARAM_HOST_NAME_ADDRESS == param.p->type)
+ result = sctp_verify_param(asoc, param, cid, chunk, errp);
+ switch (result) {
+ case SCTP_IERROR_ABORT:
+ case SCTP_IERROR_NOMEM:
return 0;
- else
+ case SCTP_IERROR_ERROR:
return 1;
+ case SCTP_IERROR_NO_ERROR:
+ default:
+ break;
}
} /* for (loop through all parameters) */
@@ -2137,11 +2138,14 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
/* If the peer claims support for ADD-IP without support
* for AUTH, disable support for ADD-IP.
+ * Do this only if backward compatible mode is turned off.
*/
- if (asoc->peer.addip_capable && !asoc->peer.auth_capable) {
+ if (!sctp_addip_noauth &&
+ (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
SCTP_PARAM_DEL_IP |
SCTP_PARAM_SET_PRIMARY);
+ asoc->peer.asconf_capable = 0;
}
/* Walk list of transports, removing transports in the UNKNOWN state. */
@@ -2848,10 +2852,11 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
__be16 err_code;
int length = 0;
- int chunk_len = asconf->skb->len;
+ int chunk_len;
__u32 serial;
int all_param_pass = 1;
+ chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
hdr = (sctp_addiphdr_t *)asconf->skb->data;
serial = ntohl(hdr->serial);
@@ -2952,13 +2957,17 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
/* This is always done in BH context with a socket lock
* held, so the list can not change.
*/
+ local_bh_disable();
list_for_each_entry(saddr, &bp->address_list, list) {
if (sctp_cmp_addr_exact(&saddr->a, &addr))
saddr->use_as_src = 1;
}
+ local_bh_enable();
break;
case SCTP_PARAM_DEL_IP:
- retval = sctp_del_bind_addr(bp, &addr, call_rcu_bh);
+ local_bh_disable();
+ retval = sctp_del_bind_addr(bp, &addr);
+ local_bh_enable();
list_for_each(pos, &asoc->peer.transport_addr_list) {
transport = list_entry(pos, struct sctp_transport,
transports);
@@ -2990,7 +2999,7 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
sctp_addip_param_t *asconf_ack_param;
sctp_errhdr_t *err_param;
int length;
- int asconf_ack_len = asconf_ack->skb->len;
+ int asconf_ack_len;
__be16 err_code;
if (no_err)
@@ -2998,6 +3007,9 @@ static __be16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack,
else
err_code = SCTP_ERROR_REQ_REFUSED;
+ asconf_ack_len = ntohs(asconf_ack->chunk_hdr->length) -
+ sizeof(sctp_chunkhdr_t);
+
/* Skip the addiphdr from the asconf_ack chunk and store a pointer to
* the first asconf_ack parameter.
*/
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bbdc938da86..78d1a8a49bd 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -453,6 +453,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
+ transport->last_rto = transport->rto;
transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
}
@@ -1267,6 +1268,12 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
sctp_ootb_pkt_free(packet);
break;
+ case SCTP_CMD_T1_RETRAN:
+ /* Mark a transport for retransmission. */
+ sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
+ SCTP_RTXR_T1_RTX);
+ break;
+
case SCTP_CMD_RETRAN:
/* Mark a transport for retransmission. */
sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
@@ -1393,7 +1400,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
list_for_each(pos, &asoc->peer.transport_addr_list) {
t = list_entry(pos, struct sctp_transport,
transports);
- sctp_retransmit_mark(&asoc->outqueue, t, 0);
+ sctp_retransmit_mark(&asoc->outqueue, t,
+ SCTP_RTXR_T1_RTX);
}
sctp_add_cmd_sf(commands,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f01b408508f..5ebbe808d80 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2305,7 +2305,7 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep,
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
- sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN,
+ sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just
@@ -4064,11 +4064,6 @@ static sctp_disposition_t sctp_sf_abort_violation(
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
- /* Make the abort chunk. */
- abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
- if (!abort)
- goto nomem;
-
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
@@ -4083,6 +4078,11 @@ static sctp_disposition_t sctp_sf_abort_violation(
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
+ /* Make the abort chunk. */
+ abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
+ if (!abort)
+ goto nomem;
+
if (asoc) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index bd6f42a15a4..ff8bc95670e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -660,7 +660,7 @@ static int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt)
* socket routing and failover schemes. Refer to comments in
* sctp_do_bind(). -daisy
*/
- retval = sctp_del_bind_addr(bp, sa_addr, call_rcu);
+ retval = sctp_del_bind_addr(bp, sa_addr);
addr_buf += af->sockaddr_len;
err_bindx_rem:
@@ -5307,6 +5307,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
{
struct sctp_bind_hashbucket *head; /* hash list */
struct sctp_bind_bucket *pp; /* hash list port iterator */
+ struct hlist_node *node;
unsigned short snum;
int ret;
@@ -5331,7 +5332,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
index = sctp_phashfn(rover);
head = &sctp_port_hashtable[index];
sctp_spin_lock(&head->lock);
- for (pp = head->chain; pp; pp = pp->next)
+ sctp_for_each_hentry(pp, node, &head->chain)
if (pp->port == rover)
goto next;
break;
@@ -5358,7 +5359,7 @@ static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
*/
head = &sctp_port_hashtable[sctp_phashfn(snum)];
sctp_spin_lock(&head->lock);
- for (pp = head->chain; pp; pp = pp->next) {
+ sctp_for_each_hentry(pp, node, &head->chain) {
if (pp->port == snum)
goto pp_found;
}
@@ -5702,10 +5703,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
pp->port = snum;
pp->fastreuse = 0;
INIT_HLIST_HEAD(&pp->owner);
- if ((pp->next = head->chain) != NULL)
- pp->next->pprev = &pp->next;
- head->chain = pp;
- pp->pprev = &head->chain;
+ hlist_add_head(&pp->node, &head->chain);
}
return pp;
}
@@ -5714,9 +5712,7 @@ static struct sctp_bind_bucket *sctp_bucket_create(
static void sctp_bucket_destroy(struct sctp_bind_bucket *pp)
{
if (pp && hlist_empty(&pp->owner)) {
- if (pp->next)
- pp->next->pprev = pp->pprev;
- *(pp->pprev) = pp->next;
+ __hlist_del(&pp->node);
kmem_cache_free(sctp_bucket_cachep, pp);
SCTP_DBG_OBJCNT_DEC(bind_bucket);
}
@@ -6455,6 +6451,8 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
}
+DEFINE_PROTO_INUSE(sctp)
+
/* This proto struct describes the ULP interface for SCTP. */
struct proto sctp_prot = {
.name = "SCTP",
@@ -6483,9 +6481,12 @@ struct proto sctp_prot = {
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
+ REF_PROTO_INUSE(sctp)
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+DEFINE_PROTO_INUSE(sctpv6)
+
struct proto sctpv6_prot = {
.name = "SCTPv6",
.owner = THIS_MODULE,
@@ -6513,5 +6514,6 @@ struct proto sctpv6_prot = {
.memory_pressure = &sctp_memory_pressure,
.enter_memory_pressure = sctp_enter_memory_pressure,
.memory_allocated = &sctp_memory_allocated,
+ REF_PROTO_INUSE(sctpv6)
};
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 0669778e433..da4f15734fb 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -263,6 +263,15 @@ static ctl_table sctp_table[] = {
.proc_handler = &proc_dointvec,
.strategy = &sysctl_intvec
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "addip_noauth_enable",
+ .data = &sctp_addip_noauth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec
+ },
{ .ctl_name = 0 }
};
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 5f467c914f8..d55ce83a020 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -74,8 +74,8 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
* given destination transport address, set RTO to the protocol
* parameter 'RTO.Initial'.
*/
+ peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rtt = 0;
- peer->rto = msecs_to_jiffies(sctp_rto_initial);
peer->rttvar = 0;
peer->srtt = 0;
peer->rto_pending = 0;
@@ -385,6 +385,7 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
tp->rto = tp->asoc->rto_max;
tp->rtt = rtt;
+ tp->last_rto = tp->rto;
/* Reset rto_pending so that a new RTT measurement is started when a
* new data chunk is sent.
@@ -578,7 +579,7 @@ void sctp_transport_reset(struct sctp_transport *t)
*/
t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
t->ssthresh = asoc->peer.i.a_rwnd;
- t->rto = asoc->rto_initial;
+ t->last_rto = t->rto = asoc->rto_initial;
t->rtt = 0;
t->srtt = 0;
t->rttvar = 0;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 4be92d0a2ca..4908041ffb3 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -862,7 +862,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
continue;
/* see if this ssn has been marked by skipping */
- if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
+ if (!SSN_lte(cssn, sctp_ssn_peek(in, csid)))
break;
__skb_unlink(pos, &ulpq->lobby);
diff --git a/net/socket.c b/net/socket.c
index 5d879fd3d01..74784dfe8e5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2319,6 +2319,11 @@ int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
return err;
}
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
+{
+ return sock->ops->shutdown(sock, how);
+}
+
/* ABI emulation layers need these two */
EXPORT_SYMBOL(move_addr_to_kernel);
EXPORT_SYMBOL(move_addr_to_user);
@@ -2345,3 +2350,4 @@ EXPORT_SYMBOL(kernel_getsockopt);
EXPORT_SYMBOL(kernel_setsockopt);
EXPORT_SYMBOL(kernel_sendpage);
EXPORT_SYMBOL(kernel_sock_ioctl);
+EXPORT_SYMBOL(kernel_sock_shutdown);
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index dc55cc974c9..1afeb3eb8e4 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -320,9 +320,9 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->slot = kcalloc(xprt->max_reqs,
sizeof(struct rpc_rqst), GFP_KERNEL);
if (xprt->slot == NULL) {
- kfree(xprt);
dprintk("RPC: %s: couldn't allocate %d slots\n",
__func__, xprt->max_reqs);
+ kfree(xprt);
return ERR_PTR(-ENOMEM);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 515e7a692f9..e835da8fc09 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -457,7 +457,7 @@ static int unix_release_sock (struct sock *sk, int embrion)
* What the above comment does talk about? --ANK(980817)
*/
- if (atomic_read(&unix_tot_inflight))
+ if (unix_tot_inflight)
unix_gc(); /* Garbage collect fds */
return 0;
@@ -599,15 +599,14 @@ static struct sock * unix_create1(struct net *net, struct socket *sock)
struct sock *sk = NULL;
struct unix_sock *u;
- if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
+ atomic_inc(&unix_nr_socks);
+ if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
goto out;
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
if (!sk)
goto out;
- atomic_inc(&unix_nr_socks);
-
sock_init_data(sock,sk);
lockdep_set_class(&sk->sk_receive_queue.lock,
&af_unix_sk_receive_queue_lock_key);
@@ -625,6 +624,8 @@ static struct sock * unix_create1(struct net *net, struct socket *sock)
init_waitqueue_head(&u->peer_wait);
unix_insert_socket(unix_sockets_unbound, sk);
out:
+ if (sk == NULL)
+ atomic_dec(&unix_nr_socks);
return sk;
}
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 406b6433e46..ebdff3d877a 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -92,7 +92,7 @@ static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
-atomic_t unix_tot_inflight = ATOMIC_INIT(0);
+unsigned int unix_tot_inflight;
static struct sock *unix_get_socket(struct file *filp)
@@ -133,7 +133,7 @@ void unix_inflight(struct file *fp)
} else {
BUG_ON(list_empty(&u->link));
}
- atomic_inc(&unix_tot_inflight);
+ unix_tot_inflight++;
spin_unlock(&unix_gc_lock);
}
}
@@ -147,7 +147,7 @@ void unix_notinflight(struct file *fp)
BUG_ON(list_empty(&u->link));
if (atomic_dec_and_test(&u->inflight))
list_del_init(&u->link);
- atomic_dec(&unix_tot_inflight);
+ unix_tot_inflight--;
spin_unlock(&unix_gc_lock);
}
}
@@ -161,7 +161,7 @@ static inline struct sk_buff *sock_queue_head(struct sock *sk)
for (skb = sock_queue_head(sk)->next, next = skb->next; \
skb != sock_queue_head(sk); skb = next, next = skb->next)
-static void scan_inflight(struct sock *x, void (*func)(struct sock *),
+static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
@@ -185,9 +185,9 @@ static void scan_inflight(struct sock *x, void (*func)(struct sock *),
* if it indeed does so
*/
struct sock *sk = unix_get_socket(*fp++);
- if(sk) {
+ if (sk) {
hit = true;
- func(sk);
+ func(unix_sk(sk));
}
}
if (hit && hitlist != NULL) {
@@ -199,7 +199,7 @@ static void scan_inflight(struct sock *x, void (*func)(struct sock *),
spin_unlock(&x->sk_receive_queue.lock);
}
-static void scan_children(struct sock *x, void (*func)(struct sock *),
+static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN)
@@ -235,20 +235,18 @@ static void scan_children(struct sock *x, void (*func)(struct sock *),
}
}
-static void dec_inflight(struct sock *sk)
+static void dec_inflight(struct unix_sock *usk)
{
- atomic_dec(&unix_sk(sk)->inflight);
+ atomic_dec(&usk->inflight);
}
-static void inc_inflight(struct sock *sk)
+static void inc_inflight(struct unix_sock *usk)
{
- atomic_inc(&unix_sk(sk)->inflight);
+ atomic_inc(&usk->inflight);
}
-static void inc_inflight_move_tail(struct sock *sk)
+static void inc_inflight_move_tail(struct unix_sock *u)
{
- struct unix_sock *u = unix_sk(sk);
-
atomic_inc(&u->inflight);
/*
* If this is still a candidate, move it to the end of the