aboutsummaryrefslogtreecommitdiff
path: root/net/netfilter
diff options
context:
space:
mode:
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/nf_conntrack_core.c25
-rw-r--r--net/netfilter/nf_log.c16
-rw-r--r--net/netfilter/xt_NFQUEUE.c8
-rw-r--r--net/netfilter/xt_cluster.c8
-rw-r--r--net/netfilter/xt_quota.c1
-rw-r--r--net/netfilter/xt_rateest.c2
6 files changed, 41 insertions, 19 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5f72b94b491..7508f11c5b3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -335,7 +335,8 @@ begin:
h = __nf_conntrack_find(net, tuple);
if (h) {
ct = nf_ct_tuplehash_to_ctrack(h);
- if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ if (unlikely(nf_ct_is_dying(ct) ||
+ !atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple))) {
@@ -425,7 +426,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
/* Remove from unconfirmed list */
hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
- __nf_conntrack_hash_insert(ct, hash, repl_hash);
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
@@ -433,8 +433,16 @@ __nf_conntrack_confirm(struct sk_buff *skb)
add_timer(&ct->timeout);
atomic_inc(&ct->ct_general.use);
set_bit(IPS_CONFIRMED_BIT, &ct->status);
+
+ /* Since the lookup is lockless, hash insertion must be done after
+ * starting the timer and setting the CONFIRMED bit. The RCU barriers
+ * guarantee that no other CPU can find the conntrack before the above
+ * stores are visible.
+ */
+ __nf_conntrack_hash_insert(ct, hash, repl_hash);
NF_CT_STAT_INC(net, insert);
spin_unlock_bh(&nf_conntrack_lock);
+
help = nfct_help(ct);
if (help && help->helper)
nf_conntrack_event_cache(IPCT_HELPER, ct);
@@ -503,7 +511,8 @@ static noinline int early_drop(struct net *net, unsigned int hash)
cnt++;
}
- if (ct && unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
+ if (ct && unlikely(nf_ct_is_dying(ct) ||
+ !atomic_inc_not_zero(&ct->ct_general.use)))
ct = NULL;
if (ct || cnt >= NF_CT_EVICTION_RANGE)
break;
@@ -1267,13 +1276,19 @@ err_cache:
return ret;
}
+/*
+ * We need to use special "null" values, not used in hash table
+ */
+#define UNCONFIRMED_NULLS_VAL ((1<<30)+0)
+#define DYING_NULLS_VAL ((1<<30)+1)
+
static int nf_conntrack_init_net(struct net *net)
{
int ret;
atomic_set(&net->ct.count, 0);
- INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, 0);
- INIT_HLIST_NULLS_HEAD(&net->ct.dying, 0);
+ INIT_HLIST_NULLS_HEAD(&net->ct.unconfirmed, UNCONFIRMED_NULLS_VAL);
+ INIT_HLIST_NULLS_HEAD(&net->ct.dying, DYING_NULLS_VAL);
net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
if (!net->ct.stat) {
ret = -ENOMEM;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 2fefe147750..4e620305f28 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -47,7 +47,6 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {
- int i;
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
} else {
@@ -216,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
#endif /* PROC_FS */
#ifdef CONFIG_SYSCTL
-struct ctl_path nf_log_sysctl_path[] = {
+static struct ctl_path nf_log_sysctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "netfilter", .ctl_name = NET_NETFILTER, },
{ .procname = "nf_log", .ctl_name = CTL_UNNUMBERED, },
@@ -228,19 +227,26 @@ static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
static struct ctl_table_header *nf_log_dir_header;
static int nf_log_proc_dostring(ctl_table *table, int write, struct file *filp,
- void *buffer, size_t *lenp, loff_t *ppos)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
const struct nf_logger *logger;
+ char buf[NFLOGGER_NAME_LEN];
+ size_t size = *lenp;
int r = 0;
int tindex = (unsigned long)table->extra1;
if (write) {
- if (!strcmp(buffer, "NONE")) {
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+ if (copy_from_user(buf, buffer, size))
+ return -EFAULT;
+
+ if (!strcmp(buf, "NONE")) {
nf_log_unbind_pf(tindex);
return 0;
}
mutex_lock(&nf_log_mutex);
- logger = __find_logger(tindex, buffer);
+ logger = __find_logger(tindex, buf);
if (logger == NULL) {
mutex_unlock(&nf_log_mutex);
return -ENOENT;
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 498b45101df..f28f6a5fc02 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -40,12 +40,12 @@ nfqueue_tg(struct sk_buff *skb, const struct xt_target_param *par)
static u32 hash_v4(const struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
- u32 ipaddr;
+ __be32 ipaddr;
/* packets in either direction go into same queue */
ipaddr = iph->saddr ^ iph->daddr;
- return jhash_2words(ipaddr, iph->protocol, jhash_initval);
+ return jhash_2words((__force u32)ipaddr, iph->protocol, jhash_initval);
}
static unsigned int
@@ -63,14 +63,14 @@ nfqueue_tg4_v1(struct sk_buff *skb, const struct xt_target_param *par)
static u32 hash_v6(const struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
- u32 addr[4];
+ __be32 addr[4];
addr[0] = ip6h->saddr.s6_addr32[0] ^ ip6h->daddr.s6_addr32[0];
addr[1] = ip6h->saddr.s6_addr32[1] ^ ip6h->daddr.s6_addr32[1];
addr[2] = ip6h->saddr.s6_addr32[2] ^ ip6h->daddr.s6_addr32[2];
addr[3] = ip6h->saddr.s6_addr32[3] ^ ip6h->daddr.s6_addr32[3];
- return jhash2(addr, ARRAY_SIZE(addr), jhash_initval);
+ return jhash2((__force u32 *)addr, ARRAY_SIZE(addr), jhash_initval);
}
static unsigned int
diff --git a/net/netfilter/xt_cluster.c b/net/netfilter/xt_cluster.c
index 69a639f3540..225ee3ecd69 100644
--- a/net/netfilter/xt_cluster.c
+++ b/net/netfilter/xt_cluster.c
@@ -15,14 +15,14 @@
#include <net/netfilter/nf_conntrack.h>
#include <linux/netfilter/xt_cluster.h>
-static inline u_int32_t nf_ct_orig_ipv4_src(const struct nf_conn *ct)
+static inline u32 nf_ct_orig_ipv4_src(const struct nf_conn *ct)
{
- return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ return (__force u32)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
}
-static inline const void *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
+static inline const u32 *nf_ct_orig_ipv6_src(const struct nf_conn *ct)
{
- return ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
+ return (__force u32 *)ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6;
}
static inline u_int32_t
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 01dd07b764e..98fc190e8f0 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -54,6 +54,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
if (q->master == NULL)
return -ENOMEM;
+ q->master->quota = q->quota;
return true;
}
diff --git a/net/netfilter/xt_rateest.c b/net/netfilter/xt_rateest.c
index 220a1d588ee..4fc6a917f6d 100644
--- a/net/netfilter/xt_rateest.c
+++ b/net/netfilter/xt_rateest.c
@@ -66,7 +66,7 @@ xt_rateest_mt(const struct sk_buff *skb, const struct xt_match_param *par)
if (info->flags & XT_RATEEST_MATCH_BPS)
ret &= bps1 == bps2;
if (info->flags & XT_RATEEST_MATCH_PPS)
- ret &= pps2 == pps2;
+ ret &= pps1 == pps2;
break;
}