aboutsummaryrefslogtreecommitdiff
path: root/net/ipv4/route.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/route.c')
-rw-r--r--net/ipv4/route.c231
1 files changed, 176 insertions, 55 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2ea6dcc3e2c..97f71153584 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
+static int rt_chain_length_max __read_mostly = 20;
static void rt_worker_func(struct work_struct *work);
static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
@@ -145,6 +146,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
static int rt_garbage_collect(struct dst_ops *ops);
+static void rt_emergency_hash_rebuild(struct net *net);
static struct dst_ops ipv4_dst_ops = {
@@ -158,7 +160,6 @@ static struct dst_ops ipv4_dst_ops = {
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
.local_out = __ip_local_out,
- .entry_size = sizeof(struct rtable),
.entries = ATOMIC_INIT(0),
};
@@ -201,6 +202,7 @@ const __u8 ip_tos2prio[16] = {
struct rt_hash_bucket {
struct rtable *chain;
};
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
defined(CONFIG_PROVE_LOCKING)
/*
@@ -427,7 +429,7 @@ static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
@@ -440,7 +442,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
- for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
@@ -674,6 +676,20 @@ static inline u32 rt_score(struct rtable *rt)
return score;
}
+static inline bool rt_caching(const struct net *net)
+{
+ return net->ipv4.current_rt_cache_rebuild_count <=
+ net->ipv4.sysctl_rt_cache_rebuild_count;
+}
+
+static inline bool compare_hash_inputs(const struct flowi *fl1,
+ const struct flowi *fl2)
+{
+ return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+ (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
+ (fl1->iif ^ fl2->iif)) == 0);
+}
+
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
@@ -753,11 +769,24 @@ static void rt_do_flush(int process_context)
}
}
+/*
+ * While freeing expired entries, we compute average chain length
+ * and standard deviation, using fixed-point arithmetic.
+ * This to have an estimation of rt_chain_length_max
+ * rt_chain_length_max = max(elasticity, AVG + 4*SD)
+ * We use 3 bits for frational part, and 29 (or 61) for magnitude.
+ */
+
+#define FRACT_BITS 3
+#define ONE (1UL << FRACT_BITS)
+
static void rt_check_expire(void)
{
static unsigned int rover;
unsigned int i = rover, goal;
struct rtable *rth, **rthp;
+ unsigned long length = 0, samples = 0;
+ unsigned long sum = 0, sum2 = 0;
u64 mult;
mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
@@ -766,6 +795,7 @@ static void rt_check_expire(void)
goal = (unsigned int)mult;
if (goal > rt_hash_mask)
goal = rt_hash_mask + 1;
+ length = 0;
for (; goal > 0; goal--) {
unsigned long tmo = ip_rt_gc_timeout;
@@ -775,6 +805,8 @@ static void rt_check_expire(void)
if (need_resched())
cond_resched();
+ samples++;
+
if (*rthp == NULL)
continue;
spin_lock_bh(rt_hash_lock_addr(i));
@@ -789,11 +821,29 @@ static void rt_check_expire(void)
if (time_before_eq(jiffies, rth->u.dst.expires)) {
tmo >>= 1;
rthp = &rth->u.dst.rt_next;
+ /*
+ * Only bump our length if the hash
+ * inputs on entries n and n+1 are not
+ * the same, we only count entries on
+ * a chain with equal hash inputs once
+ * so that entries for different QOS
+ * levels, and other non-hash input
+ * attributes don't unfairly skew
+ * the length computation
+ */
+ if ((*rthp == NULL) ||
+ !compare_hash_inputs(&(*rthp)->fl,
+ &rth->fl))
+ length += ONE;
continue;
}
} else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
tmo >>= 1;
rthp = &rth->u.dst.rt_next;
+ if ((*rthp == NULL) ||
+ !compare_hash_inputs(&(*rthp)->fl,
+ &rth->fl))
+ length += ONE;
continue;
}
@@ -802,6 +852,15 @@ static void rt_check_expire(void)
rt_free(rth);
}
spin_unlock_bh(rt_hash_lock_addr(i));
+ sum += length;
+ sum2 += length*length;
+ }
+ if (samples) {
+ unsigned long avg = sum / samples;
+ unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
+ rt_chain_length_max = max_t(unsigned long,
+ ip_rt_gc_elasticity,
+ (avg + 4*sd) >> FRACT_BITS);
}
rover = i;
}
@@ -851,6 +910,26 @@ static void rt_secret_rebuild(unsigned long __net)
mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
}
+static void rt_secret_rebuild_oneshot(struct net *net)
+{
+ del_timer_sync(&net->ipv4.rt_secret_timer);
+ rt_cache_invalidate(net);
+ if (ip_rt_secret_interval) {
+ net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
+ add_timer(&net->ipv4.rt_secret_timer);
+ }
+}
+
+static void rt_emergency_hash_rebuild(struct net *net)
+{
+ if (net_ratelimit()) {
+ printk(KERN_WARNING "Route hash chain too long!\n");
+ printk(KERN_WARNING "Adjust your secret_interval!\n");
+ }
+
+ rt_secret_rebuild_oneshot(net);
+}
+
/*
Short description of GC goals.
@@ -989,6 +1068,7 @@ out: return 0;
static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
{
struct rtable *rth, **rthp;
+ struct rtable *rthi;
unsigned long now;
struct rtable *cand, **candp;
u32 min_score;
@@ -1002,7 +1082,13 @@ restart:
candp = NULL;
now = jiffies;
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ rt_drop(rt);
+ return 0;
+ }
+
rthp = &rt_hash_table[hash].chain;
+ rthi = NULL;
spin_lock_bh(rt_hash_lock_addr(hash));
while ((rth = *rthp) != NULL) {
@@ -1048,6 +1134,17 @@ restart:
chain_length++;
rthp = &rth->u.dst.rt_next;
+
+ /*
+ * check to see if the next entry in the chain
+ * contains the same hash input values as rt. If it does
+ * This is where we will insert into the list, instead of
+ * at the head. This groups entries that differ by aspects not
+ * relvant to the hash function together, which we use to adjust
+ * our chain length
+ */
+ if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
+ rthi = rth;
}
if (cand) {
@@ -1061,6 +1158,16 @@ restart:
*candp = cand->u.dst.rt_next;
rt_free(cand);
}
+ } else {
+ if (chain_length > rt_chain_length_max) {
+ struct net *net = dev_net(rt->u.dst.dev);
+ int num = ++net->ipv4.current_rt_cache_rebuild_count;
+ if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
+ rt->u.dst.dev->name, num);
+ }
+ rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
+ }
}
/* Try to bind route to arp only if it is output
@@ -1098,14 +1205,17 @@ restart:
}
}
- rt->u.dst.rt_next = rt_hash_table[hash].chain;
+ if (rthi)
+ rt->u.dst.rt_next = rthi->u.dst.rt_next;
+ else
+ rt->u.dst.rt_next = rt_hash_table[hash].chain;
+
#if RT_CACHE_DEBUG >= 2
if (rt->u.dst.rt_next) {
struct rtable *trt;
- printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash,
- NIPQUAD(rt->rt_dst));
+ printk(KERN_DEBUG "rt_cache @%02x: %pI4", hash, &rt->rt_dst);
for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
- printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst));
+ printk(" . %pI4", &trt->rt_dst);
printk("\n");
}
#endif
@@ -1114,7 +1224,11 @@ restart:
* previous writes to rt are comitted to memory
* before making rt visible to other CPUS.
*/
- rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+ if (rthi)
+ rcu_assign_pointer(rthi->u.dst.rt_next, rt);
+ else
+ rcu_assign_pointer(rt_hash_table[hash].chain, rt);
+
spin_unlock_bh(rt_hash_lock_addr(hash));
*rp = rt;
return 0;
@@ -1217,6 +1331,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
|| ipv4_is_zeronet(new_gw))
goto reject_redirect;
+ if (!rt_caching(net))
+ goto reject_redirect;
+
if (!IN_DEV_SHARED_MEDIA(in_dev)) {
if (!inet_addr_onlink(in_dev, new_gw, old_gw))
goto reject_redirect;
@@ -1267,7 +1384,6 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
/* Copy all the information. */
*rt = *rth;
- INIT_RCU_HEAD(&rt->u.dst.rcu_head);
rt->u.dst.__use = 1;
atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.child = NULL;
@@ -1280,7 +1396,9 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
rt->u.dst.path = &rt->u.dst;
rt->u.dst.neighbour = NULL;
rt->u.dst.hh = NULL;
+#ifdef CONFIG_XFRM
rt->u.dst.xfrm = NULL;
+#endif
rt->rt_genid = rt_genid(net);
rt->rt_flags |= RTCF_REDIRECTED;
@@ -1324,11 +1442,10 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
reject_redirect:
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about "
- NIPQUAD_FMT " ignored.\n"
- " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
- NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
- NIPQUAD(saddr), NIPQUAD(daddr));
+ printk(KERN_INFO "Redirect from %pI4 on %s about %pI4 ignored.\n"
+ " Advised path = %pI4 -> %pI4\n",
+ &old_gw, dev->name, &new_gw,
+ &saddr, &daddr);
#endif
in_dev_put(in_dev);
}
@@ -1348,9 +1465,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
rt->fl.oif,
rt_genid(dev_net(dst->dev)));
#if RT_CACHE_DEBUG >= 1
- printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
- NIPQUAD_FMT "/%02x dropped\n",
- NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
+ printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
+ &rt->rt_dst, rt->fl.fl4_tos);
#endif
rt_del(hash, rt);
ret = NULL;
@@ -1414,10 +1530,9 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (IN_DEV_LOG_MARTIANS(in_dev) &&
rt->u.dst.rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
- printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores "
- "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n",
- NIPQUAD(rt->rt_src), rt->rt_iif,
- NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
+ printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
+ &rt->rt_src, rt->rt_iif,
+ &rt->rt_dst, &rt->rt_gateway);
#endif
}
out:
@@ -1610,8 +1725,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
static int ip_rt_bug(struct sk_buff *skb)
{
- printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n",
- NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
+ printk(KERN_DEBUG "ip_rt_bug: %pI4 -> %pI4, %s\n",
+ &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
skb->dev ? skb->dev->name : "?");
kfree_skb(skb);
return 0;
@@ -1788,9 +1903,8 @@ static void ip_handle_martian_source(struct net_device *dev,
* RFC1812 recommendation, if source is martian,
* the only hint is MAC header.
*/
- printk(KERN_WARNING "martian source " NIPQUAD_FMT " from "
- NIPQUAD_FMT", on dev %s\n",
- NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+ printk(KERN_WARNING "martian source %pI4 from %pI4, on dev %s\n",
+ &daddr, &saddr, dev->name);
if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
int i;
const unsigned char *p = skb_mac_header(skb);
@@ -2099,9 +2213,8 @@ martian_destination:
RT_CACHE_STAT_INC(in_martian_dst);
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
- printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from "
- NIPQUAD_FMT ", dev %s\n",
- NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
+ printk(KERN_WARNING "martian destination %pI4 from %pI4, dev %s\n",
+ &daddr, &saddr, dev->name);
#endif
e_hostunreach:
@@ -2130,6 +2243,10 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
struct net *net;
net = dev_net(dev);
+
+ if (!rt_caching(net))
+ goto skip_cache;
+
tos &= IPTOS_RT_MASK;
hash = rt_hash(daddr, saddr, iif, rt_genid(net));
@@ -2154,6 +2271,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
}
rcu_read_unlock();
+skip_cache:
/* Multicast recognition logic is moved from route cache to here.
The problem was that too many Ethernet cards have broken/missing
hardware multicast filters :-( As result the host on multicasting
@@ -2539,6 +2657,9 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
unsigned hash;
struct rtable *rth;
+ if (!rt_caching(net))
+ goto slow_output;
+
hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
rcu_read_lock_bh();
@@ -2563,6 +2684,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
}
rcu_read_unlock_bh();
+slow_output:
return ip_route_output_slow(net, rp, flp);
}
@@ -2578,7 +2700,6 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
.destroy = ipv4_dst_destroy,
.check = ipv4_dst_check,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
- .entry_size = sizeof(struct rtable),
.entries = ATOMIC_INIT(0),
};
@@ -2640,7 +2761,7 @@ int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
flp->fl4_src = (*rp)->rt_src;
if (!flp->fl4_dst)
flp->fl4_dst = (*rp)->rt_dst;
- err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
+ err = __xfrm_lookup(net, (struct dst_entry **)rp, flp, sk,
flags ? XFRM_LOOKUP_WAIT : 0);
if (err == -EREMOTE)
err = ipv4_dst_blackhole(net, rp, flp);
@@ -2995,7 +3116,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ipv4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
@@ -3003,7 +3124,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_max_size,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
/* Deprecated. Use gc_min_interval_ms */
@@ -3013,8 +3134,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
@@ -3022,8 +3143,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_min_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_ms_jiffies,
- .strategy = &sysctl_ms_jiffies,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
@@ -3031,8 +3152,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_timeout,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
@@ -3040,8 +3161,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
@@ -3049,7 +3170,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_redirect_load,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
@@ -3057,7 +3178,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_redirect_number,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
@@ -3065,7 +3186,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_redirect_silence,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_ERROR_COST,
@@ -3073,7 +3194,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_error_cost,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
@@ -3081,7 +3202,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_error_burst,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
@@ -3089,7 +3210,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_gc_elasticity,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
@@ -3097,8 +3218,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_mtu_expires,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
},
{
.ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
@@ -3106,7 +3227,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_pmtu,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
@@ -3114,7 +3235,7 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_min_advmss,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
@@ -3122,8 +3243,8 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_secret_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &ipv4_sysctl_rt_secret_interval,
- .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
+ .proc_handler = ipv4_sysctl_rt_secret_interval,
+ .strategy = ipv4_sysctl_rt_secret_interval_strategy,
},
{ .ctl_name = 0 }
};
@@ -3151,8 +3272,8 @@ static struct ctl_table ipv4_route_flush_table[] = {
.procname = "flush",
.maxlen = sizeof(int),
.mode = 0200,
- .proc_handler = &ipv4_sysctl_rtcache_flush,
- .strategy = &ipv4_sysctl_rtcache_flush_strategy,
+ .proc_handler = ipv4_sysctl_rtcache_flush,
+ .strategy = ipv4_sysctl_rtcache_flush_strategy,
},
{ .ctl_name = 0 },
};