diff options
author | Jeff Garzik <jgarzik@pretzel.yyz.us> | 2005-06-26 18:06:06 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-06-26 18:06:06 -0400 |
commit | aef7b83c92dd0b7e994805440655d1d64147287b (patch) | |
tree | 981f373358c1988e061625e8f272013065cb086f /net/core | |
parent | b1fc5505e0dbcc3fd7c75bfe6bee39ec50080963 (diff) | |
parent | 8678887e7fb43cd6c9be6c9807b05e77848e0920 (diff) |
Merge /spare/repo/linux-2.6/
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/Makefile | 3 | ||||
-rw-r--r-- | net/core/dev.c | 138 | ||||
-rw-r--r-- | net/core/ethtool.c | 22 | ||||
-rw-r--r-- | net/core/neighbour.c | 336 | ||||
-rw-r--r-- | net/core/net-sysfs.c | 20 | ||||
-rw-r--r-- | net/core/netfilter.c | 138 | ||||
-rw-r--r-- | net/core/netpoll.c | 80 | ||||
-rw-r--r-- | net/core/request_sock.c | 64 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 33 | ||||
-rw-r--r-- | net/core/skbuff.c | 163 | ||||
-rw-r--r-- | net/core/sock.c | 35 | ||||
-rw-r--r-- | net/core/sysctl_net_core.c | 61 | ||||
-rw-r--r-- | net/core/wireless.c | 74 |
13 files changed, 802 insertions, 365 deletions
diff --git a/net/core/Makefile b/net/core/Makefile index 81f03243fe2..5e0c56b7f60 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -2,7 +2,8 @@ # Makefile for the Linux networking core. # -obj-y := sock.o skbuff.o iovec.o datagram.o stream.o scm.o gen_stats.o gen_estimator.o +obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \ + gen_stats.o gen_estimator.o obj-$(CONFIG_SYSCTL) += sysctl_net_core.o diff --git a/net/core/dev.c b/net/core/dev.c index d4d9e2680ad..7016e0c36b3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -115,18 +115,6 @@ #endif /* CONFIG_NET_RADIO */ #include <asm/current.h> -/* This define, if set, will randomly drop a packet when congestion - * is more than moderate. It helps fairness in the multi-interface - * case when one of them is a hog, but it kills performance for the - * single interface case so it is off now by default. - */ -#undef RAND_LIE - -/* Setting this will sample the queue lengths and thus congestion - * via a timer instead of as each packet is received. - */ -#undef OFFLINE_SAMPLE - /* * The list of packet types we will receive (as opposed to discard) * and the routines to invoke. @@ -159,11 +147,6 @@ static DEFINE_SPINLOCK(ptype_lock); static struct list_head ptype_base[16]; /* 16 way hashed list */ static struct list_head ptype_all; /* Taps */ -#ifdef OFFLINE_SAMPLE -static void sample_queue(unsigned long dummy); -static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0); -#endif - /* * The @dev_base list is protected by @dev_base_lock and the rtln * semaphore. @@ -215,7 +198,7 @@ static struct notifier_block *netdev_chain; * Device drivers call our routines to queue packets here. We empty the * queue in the local softnet handler. */ -DEFINE_PER_CPU(struct softnet_data, softnet_data) = { 0, }; +DEFINE_PER_CPU(struct softnet_data, softnet_data) = { NULL }; #ifdef CONFIG_SYSFS extern int netdev_sysfs_init(void); @@ -761,6 +744,18 @@ int dev_change_name(struct net_device *dev, char *newname) } /** + * netdev_features_change - device changes fatures + * @dev: device to cause notification + * + * Called to indicate a device has changed features. + */ +void netdev_features_change(struct net_device *dev) +{ + notifier_call_chain(&netdev_chain, NETDEV_FEAT_CHANGE, dev); +} +EXPORT_SYMBOL(netdev_features_change); + +/** * netdev_state_change - device changes state * @dev: device to cause notification * @@ -1351,71 +1346,13 @@ out: Receiver routines =======================================================================*/ -int netdev_max_backlog = 300; +int netdev_max_backlog = 1000; +int netdev_budget = 300; int weight_p = 64; /* old backlog weight */ -/* These numbers are selected based on intuition and some - * experimentatiom, if you have more scientific way of doing this - * please go ahead and fix things. - */ -int no_cong_thresh = 10; -int no_cong = 20; -int lo_cong = 100; -int mod_cong = 290; DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; -static void get_sample_stats(int cpu) -{ -#ifdef RAND_LIE - unsigned long rd; - int rq; -#endif - struct softnet_data *sd = &per_cpu(softnet_data, cpu); - int blog = sd->input_pkt_queue.qlen; - int avg_blog = sd->avg_blog; - - avg_blog = (avg_blog >> 1) + (blog >> 1); - - if (avg_blog > mod_cong) { - /* Above moderate congestion levels. */ - sd->cng_level = NET_RX_CN_HIGH; -#ifdef RAND_LIE - rd = net_random(); - rq = rd % netdev_max_backlog; - if (rq < avg_blog) /* unlucky bastard */ - sd->cng_level = NET_RX_DROP; -#endif - } else if (avg_blog > lo_cong) { - sd->cng_level = NET_RX_CN_MOD; -#ifdef RAND_LIE - rd = net_random(); - rq = rd % netdev_max_backlog; - if (rq < avg_blog) /* unlucky bastard */ - sd->cng_level = NET_RX_CN_HIGH; -#endif - } else if (avg_blog > no_cong) - sd->cng_level = NET_RX_CN_LOW; - else /* no congestion */ - sd->cng_level = NET_RX_SUCCESS; - - sd->avg_blog = avg_blog; -} - -#ifdef OFFLINE_SAMPLE -static void sample_queue(unsigned long dummy) -{ -/* 10 ms 0r 1ms -- i don't care -- JHS */ - int next_tick = 1; - int cpu = smp_processor_id(); - - get_sample_stats(cpu); - next_tick += jiffies; - mod_timer(&samp_timer, next_tick); -} -#endif - - /** * netif_rx - post buffer to the network code * @skb: buffer to post @@ -1436,7 +1373,6 @@ static void sample_queue(unsigned long dummy) int netif_rx(struct sk_buff *skb) { - int this_cpu; struct softnet_data *queue; unsigned long flags; @@ -1452,38 +1388,22 @@ int netif_rx(struct sk_buff *skb) * short when CPU is congested, but is still operating. */ local_irq_save(flags); - this_cpu = smp_processor_id(); queue = &__get_cpu_var(softnet_data); __get_cpu_var(netdev_rx_stat).total++; if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { if (queue->input_pkt_queue.qlen) { - if (queue->throttle) - goto drop; - enqueue: dev_hold(skb->dev); __skb_queue_tail(&queue->input_pkt_queue, skb); -#ifndef OFFLINE_SAMPLE - get_sample_stats(this_cpu); -#endif local_irq_restore(flags); - return queue->cng_level; + return NET_RX_SUCCESS; } - if (queue->throttle) - queue->throttle = 0; - netif_rx_schedule(&queue->backlog_dev); goto enqueue; } - if (!queue->throttle) { - queue->throttle = 1; - __get_cpu_var(netdev_rx_stat).throttled++; - } - -drop: __get_cpu_var(netdev_rx_stat).dropped++; local_irq_restore(flags); @@ -1732,6 +1652,7 @@ static int process_backlog(struct net_device *backlog_dev, int *budget) struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; + backlog_dev->weight = weight_p; for (;;) { struct sk_buff *skb; struct net_device *dev; @@ -1767,8 +1688,6 @@ job_done: smp_mb__before_clear_bit(); netif_poll_enable(backlog_dev); - if (queue->throttle) - queue->throttle = 0; local_irq_enable(); return 0; } @@ -1777,8 +1696,7 @@ static void net_rx_action(struct softirq_action *h) { struct softnet_data *queue = &__get_cpu_var(softnet_data); unsigned long start_time = jiffies; - int budget = netdev_max_backlog; - + int budget = netdev_budget; local_irq_disable(); @@ -2042,15 +1960,9 @@ static int softnet_seq_show(struct seq_file *seq, void *v) struct netif_rx_stats *s = v; seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", - s->total, s->dropped, s->time_squeeze, s->throttled, - s->fastroute_hit, s->fastroute_success, s->fastroute_defer, - s->fastroute_deferred_out, -#if 0 - s->fastroute_latency_reduction -#else - s->cpu_collision -#endif - ); + s->total, s->dropped, s->time_squeeze, 0, + 0, 0, 0, 0, /* was fastroute */ + s->cpu_collision ); return 0; } @@ -3292,9 +3204,6 @@ static int __init net_dev_init(void) queue = &per_cpu(softnet_data, i); skb_queue_head_init(&queue->input_pkt_queue); - queue->throttle = 0; - queue->cng_level = 0; - queue->avg_blog = 10; /* arbitrary non-zero */ queue->completion_queue = NULL; INIT_LIST_HEAD(&queue->poll_list); set_bit(__LINK_STATE_START, &queue->backlog_dev.state); @@ -3303,11 +3212,6 @@ static int __init net_dev_init(void) atomic_set(&queue->backlog_dev.refcnt, 1); } -#ifdef OFFLINE_SAMPLE - samp_timer.expires = jiffies + (10 * HZ); - add_timer(&samp_timer); -#endif - dev_boot_phase = 0; open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); diff --git a/net/core/ethtool.c b/net/core/ethtool.c index f05fde97c43..a3eeb88e1c8 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -29,7 +29,7 @@ u32 ethtool_op_get_link(struct net_device *dev) u32 ethtool_op_get_tx_csum(struct net_device *dev) { - return (dev->features & NETIF_F_IP_CSUM) != 0; + return (dev->features & (NETIF_F_IP_CSUM | NETIF_F_HW_CSUM)) != 0; } int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) @@ -42,6 +42,15 @@ int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) return 0; } +int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) +{ + if (data) + dev->features |= NETIF_F_HW_CSUM; + else + dev->features &= ~NETIF_F_HW_CSUM; + + return 0; +} u32 ethtool_op_get_sg(struct net_device *dev) { return (dev->features & NETIF_F_SG) != 0; @@ -347,7 +356,7 @@ static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) { struct ethtool_coalesce coalesce; - if (!dev->ethtool_ops->get_coalesce) + if (!dev->ethtool_ops->set_coalesce) return -EOPNOTSUPP; if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) @@ -682,6 +691,7 @@ int dev_ethtool(struct ifreq *ifr) void __user *useraddr = ifr->ifr_data; u32 ethcmd; int rc; + unsigned long old_features; /* * XXX: This can be pushed down into the ethtool_* handlers that @@ -703,6 +713,8 @@ int dev_ethtool(struct ifreq *ifr) if ((rc = dev->ethtool_ops->begin(dev)) < 0) return rc; + old_features = dev->features; + switch (ethcmd) { case ETHTOOL_GSET: rc = ethtool_get_settings(dev, useraddr); @@ -712,7 +724,6 @@ int dev_ethtool(struct ifreq *ifr) break; case ETHTOOL_GDRVINFO: rc = ethtool_get_drvinfo(dev, useraddr); - break; case ETHTOOL_GREGS: rc = ethtool_get_regs(dev, useraddr); @@ -801,6 +812,10 @@ int dev_ethtool(struct ifreq *ifr) if(dev->ethtool_ops->complete) dev->ethtool_ops->complete(dev); + + if (old_features != dev->features) + netdev_features_change(dev); + return rc; ioctl: @@ -817,3 +832,4 @@ EXPORT_SYMBOL(ethtool_op_get_tx_csum); EXPORT_SYMBOL(ethtool_op_set_sg); EXPORT_SYMBOL(ethtool_op_set_tso); EXPORT_SYMBOL(ethtool_op_set_tx_csum); +EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 43bdc521e20..851eb927ed9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -32,6 +32,7 @@ #include <net/sock.h> #include <linux/rtnetlink.h> #include <linux/random.h> +#include <linux/string.h> #define NEIGH_DEBUG 1 @@ -1276,9 +1277,14 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, INIT_RCU_HEAD(&p->rcu_head); p->reachable_time = neigh_rand_reach_time(p->base_reachable_time); - if (dev && dev->neigh_setup && dev->neigh_setup(dev, p)) { - kfree(p); - return NULL; + if (dev) { + if (dev->neigh_setup && dev->neigh_setup(dev, p)) { + kfree(p); + return NULL; + } + + dev_hold(dev); + p->dev = dev; } p->sysctl_table = NULL; write_lock_bh(&tbl->lock); @@ -1309,6 +1315,8 @@ void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms) *p = parms->next; parms->dead = 1; write_unlock_bh(&tbl->lock); + if (parms->dev) + dev_put(parms->dev); call_rcu(&parms->rcu_head, neigh_rcu_free_parms); return; } @@ -1546,20 +1554,323 @@ out: return err; } +static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms) +{ + struct rtattr *nest = NULL; + + nest = RTA_NEST(skb, NDTA_PARMS); + + if (parms->dev) + RTA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); + + RTA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); + RTA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len); + RTA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); + RTA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); + RTA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); + RTA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes); + RTA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time); + RTA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME, + parms->base_reachable_time); + RTA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime); + RTA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time); + RTA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time); + RTA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay); + RTA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay); + RTA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime); + + return RTA_NEST_END(skb, nest); + +rtattr_failure: + return RTA_NEST_CANCEL(skb, nest); +} + +static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct nlmsghdr *nlh; + struct ndtmsg *ndtmsg; + + nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg), + NLM_F_MULTI); + + ndtmsg = NLMSG_DATA(nlh); + + read_lock_bh(&tbl->lock); + ndtmsg->ndtm_family = tbl->family; + + RTA_PUT_STRING(skb, NDTA_NAME, tbl->id); + RTA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); + RTA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1); + RTA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2); + RTA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3); + + { + unsigned long now = jiffies; + unsigned int flush_delta = now - tbl->last_flush; + unsigned int rand_delta = now - tbl->last_rand; + + struct ndt_config ndc = { + .ndtc_key_len = tbl->key_len, + .ndtc_entry_size = tbl->entry_size, + .ndtc_entries = atomic_read(&tbl->entries), + .ndtc_last_flush = jiffies_to_msecs(flush_delta), + .ndtc_last_rand = jiffies_to_msecs(rand_delta), + .ndtc_hash_rnd = tbl->hash_rnd, + .ndtc_hash_mask = tbl->hash_mask, + .ndtc_hash_chain_gc = tbl->hash_chain_gc, + .ndtc_proxy_qlen = tbl->proxy_queue.qlen, + }; + + RTA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); + } + + { + int cpu; + struct ndt_stats ndst; + + memset(&ndst, 0, sizeof(ndst)); + + for (cpu = 0; cpu < NR_CPUS; cpu++) { + struct neigh_statistics *st; + + if (!cpu_possible(cpu)) + continue; + + st = per_cpu_ptr(tbl->stats, cpu); + ndst.ndts_allocs += st->allocs; + ndst.ndts_destroys += st->destroys; + ndst.ndts_hash_grows += st->hash_grows; + ndst.ndts_res_failed += st->res_failed; + ndst.ndts_lookups += st->lookups; + ndst.ndts_hits += st->hits; + ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast; + ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast; + ndst.ndts_periodic_gc_runs += st->periodic_gc_runs; + ndst.ndts_forced_gc_runs += st->forced_gc_runs; + } + + RTA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst); + } + + BUG_ON(tbl->parms.dev); + if (neightbl_fill_parms(skb, &tbl->parms) < 0) + goto rtattr_failure; + + read_unlock_bh(&tbl->lock); + return NLMSG_END(skb, nlh); + +rtattr_failure: + read_unlock_bh(&tbl->lock); + return NLMSG_CANCEL(skb, nlh); + +nlmsg_failure: + return -1; +} + +static int neightbl_fill_param_info(struct neigh_table *tbl, + struct neigh_parms *parms, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct ndtmsg *ndtmsg; + struct nlmsghdr *nlh; + + nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWNEIGHTBL, sizeof(struct ndtmsg), + NLM_F_MULTI); + + ndtmsg = NLMSG_DATA(nlh); + + read_lock_bh(&tbl->lock); + ndtmsg->ndtm_family = tbl->family; + RTA_PUT_STRING(skb, NDTA_NAME, tbl->id); + + if (neightbl_fill_parms(skb, parms) < 0) + goto rtattr_failure; + + read_unlock_bh(&tbl->lock); + return NLMSG_END(skb, nlh); + +rtattr_failure: + read_unlock_bh(&tbl->lock); + return NLMSG_CANCEL(skb, nlh); + +nlmsg_failure: + return -1; +} + +static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl, + int ifindex) +{ + struct neigh_parms *p; + + for (p = &tbl->parms; p; p = p->next) + if ((p->dev && p->dev->ifindex == ifindex) || + (!p->dev && !ifindex)) + return p; + + return NULL; +} + +int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) +{ + struct neigh_table *tbl; + struct ndtmsg *ndtmsg = NLMSG_DATA(nlh); + struct rtattr **tb = arg; + int err = -EINVAL; + + if (!tb[NDTA_NAME - 1] || !RTA_PAYLOAD(tb[NDTA_NAME - 1])) + return -EINVAL; + + read_lock(&neigh_tbl_lock); + for (tbl = neigh_tables; tbl; tbl = tbl->next) { + if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family) + continue; + + if (!rtattr_strcmp(tb[NDTA_NAME - 1], tbl->id)) + break; + } + + if (tbl == NULL) { + err = -ENOENT; + goto errout; + } + + /* + * We acquire tbl->lock to be nice to the periodic timers and + * make sure they always see a consistent set of values. + */ + write_lock_bh(&tbl->lock); + + if (tb[NDTA_THRESH1 - 1]) + tbl->gc_thresh1 = RTA_GET_U32(tb[NDTA_THRESH1 - 1]); + + if (tb[NDTA_THRESH2 - 1]) + tbl->gc_thresh2 = RTA_GET_U32(tb[NDTA_THRESH2 - 1]); + + if (tb[NDTA_THRESH3 - 1]) + tbl->gc_thresh3 = RTA_GET_U32(tb[NDTA_THRESH3 - 1]); + + if (tb[NDTA_GC_INTERVAL - 1]) + tbl->gc_interval = RTA_GET_MSECS(tb[NDTA_GC_INTERVAL - 1]); + + if (tb[NDTA_PARMS - 1]) { + struct rtattr *tbp[NDTPA_MAX]; + struct neigh_parms *p; + u32 ifindex = 0; + + if (rtattr_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS - 1]) < 0) + goto rtattr_failure; + + if (tbp[NDTPA_IFINDEX - 1]) + ifindex = RTA_GET_U32(tbp[NDTPA_IFINDEX - 1]); + + p = lookup_neigh_params(tbl, ifindex); + if (p == NULL) { + err = -ENOENT; + goto rtattr_failure; + } + + if (tbp[NDTPA_QUEUE_LEN - 1]) + p->queue_len = RTA_GET_U32(tbp[NDTPA_QUEUE_LEN - 1]); + + if (tbp[NDTPA_PROXY_QLEN - 1]) + p->proxy_qlen = RTA_GET_U32(tbp[NDTPA_PROXY_QLEN - 1]); + + if (tbp[NDTPA_APP_PROBES - 1]) + p->app_probes = RTA_GET_U32(tbp[NDTPA_APP_PROBES - 1]); + + if (tbp[NDTPA_UCAST_PROBES - 1]) + p->ucast_probes = + RTA_GET_U32(tbp[NDTPA_UCAST_PROBES - 1]); + + if (tbp[NDTPA_MCAST_PROBES - 1]) + p->mcast_probes = + RTA_GET_U32(tbp[NDTPA_MCAST_PROBES - 1]); + + if (tbp[NDTPA_BASE_REACHABLE_TIME - 1]) + p->base_reachable_time = + RTA_GET_MSECS(tbp[NDTPA_BASE_REACHABLE_TIME - 1]); + + if (tbp[NDTPA_GC_STALETIME - 1]) + p->gc_staletime = + RTA_GET_MSECS(tbp[NDTPA_GC_STALETIME - 1]); + + if (tbp[NDTPA_DELAY_PROBE_TIME - 1]) + p->delay_probe_time = + RTA_GET_MSECS(tbp[NDTPA_DELAY_PROBE_TIME - 1]); + + if (tbp[NDTPA_RETRANS_TIME - 1]) + p->retrans_time = + RTA_GET_MSECS(tbp[NDTPA_RETRANS_TIME - 1]); + + if (tbp[NDTPA_ANYCAST_DELAY - 1]) + p->anycast_delay = + RTA_GET_MSECS(tbp[NDTPA_ANYCAST_DELAY - 1]); + + if (tbp[NDTPA_PROXY_DELAY - 1]) + p->proxy_delay = + RTA_GET_MSECS(tbp[NDTPA_PROXY_DELAY - 1]); + + if (tbp[NDTPA_LOCKTIME - 1]) + p->locktime = RTA_GET_MSECS(tbp[NDTPA_LOCKTIME - 1]); + } + + err = 0; + +rtattr_failure: + write_unlock_bh(&tbl->lock); +errout: + read_unlock(&neigh_tbl_lock); + return err; +} + +int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb) +{ + int idx, family; + int s_idx = cb->args[0]; + struct neigh_table *tbl; + + family = ((struct rtgenmsg *)NLMSG_DATA(cb->nlh))->rtgen_family; + + read_lock(&neigh_tbl_lock); + for (tbl = neigh_tables, idx = 0; tbl; tbl = tbl->next) { + struct neigh_parms *p; + + if (idx < s_idx || (family && tbl->family != family)) + continue; + + if (neightbl_fill_info(tbl, skb, cb) <= 0) + break; + + for (++idx, p = tbl->parms.next; p; p = p->next, idx++) { + if (idx < s_idx) + continue; + + if (neightbl_fill_param_info(tbl, p, skb, cb) <= 0) + goto out; + } + + } +out: + read_unlock(&neigh_tbl_lock); + cb->args[0] = idx; + + return skb->len; +} static int neigh_fill_info(struct sk_buff *skb, struct neighbour *n, - u32 pid, u32 seq, int event) + u32 pid, u32 seq, int event, unsigned int flags) { unsigned long now = jiffies; unsigned char *b = skb->tail; struct nda_cacheinfo ci; int locked = 0; u32 probes; - struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, event, - sizeof(struct ndmsg)); + struct nlmsghdr *nlh = NLMSG_NEW(skb, pid, seq, event, + sizeof(struct ndmsg), flags); struct ndmsg *ndm = NLMSG_DATA(nlh); - nlh->nlmsg_flags = pid ? NLM_F_MULTI : 0; ndm->ndm_family = n->ops->family; ndm->ndm_flags = n->flags; ndm->ndm_type = n->type; @@ -1609,7 +1920,8 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, continue; if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, - RTM_NEWNEIGH) <= 0) { + RTM_NEWNEIGH, + NLM_F_MULTI) <= 0) { read_unlock_bh(&tbl->lock); rc = -1; goto out; @@ -2018,7 +2330,7 @@ void neigh_app_ns(struct neighbour *n) if (!skb) return; - if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH) < 0) { + if (neigh_fill_info(skb, n, 0, 0, RTM_GETNEIGH, 0) < 0) { kfree_skb(skb); return; } @@ -2037,7 +2349,7 @@ static void neigh_app_notify(struct neighbour *n) if (!skb) return; - if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH) < 0) { + if (neigh_fill_info(skb, n, 0, 0, RTM_NEWNEIGH, 0) < 0) { kfree_skb(skb); return; } @@ -2281,7 +2593,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[17].extra1 = dev; } - dev_name = net_sysctl_strdup(dev_name_source); + dev_name = kstrdup(dev_name_source, GFP_KERNEL); if (!dev_name) { err = -ENOBUFS; goto free; @@ -2352,6 +2664,8 @@ EXPORT_SYMBOL(neigh_update); EXPORT_SYMBOL(neigh_update_hhs); EXPORT_SYMBOL(pneigh_enqueue); EXPORT_SYMBOL(pneigh_lookup); +EXPORT_SYMBOL(neightbl_dump_info); +EXPORT_SYMBOL(neightbl_set); #ifdef CONFIG_ARPD EXPORT_SYMBOL(neigh_app_ns); diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 060f703659e..e2137f3e489 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -21,6 +21,7 @@ #define to_net_dev(class) container_of(class, struct net_device, class_dev) static const char fmt_hex[] = "%#x\n"; +static const char fmt_long_hex[] = "%#lx\n"; static const char fmt_dec[] = "%d\n"; static const char fmt_ulong[] = "%lu\n"; @@ -91,7 +92,7 @@ static CLASS_DEVICE_ATTR(field, S_IRUGO, show_##field, NULL) \ NETDEVICE_ATTR(addr_len, fmt_dec); NETDEVICE_ATTR(iflink, fmt_dec); NETDEVICE_ATTR(ifindex, fmt_dec); -NETDEVICE_ATTR(features, fmt_hex); +NETDEVICE_ATTR(features, fmt_long_hex); NETDEVICE_ATTR(type, fmt_dec); /* use same locking rules as GIFHWADDR ioctl's */ @@ -184,6 +185,22 @@ static ssize_t store_tx_queue_len(struct class_device *dev, const char *buf, siz static CLASS_DEVICE_ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len, store_tx_queue_len); +NETDEVICE_SHOW(weight, fmt_dec); + +static int change_weight(struct net_device *net, unsigned long new_weight) +{ + net->weight = new_weight; + return 0; +} + +static ssize_t store_weight(struct class_device *dev, const char *buf, size_t len) +{ + return netdev_store(dev, buf, len, change_weight); +} + +static CLASS_DEVICE_ATTR(weight, S_IRUGO | S_IWUSR, show_weight, + store_weight); + static struct class_device_attribute *net_class_attributes[] = { &class_device_attr_ifindex, @@ -193,6 +210,7 @@ static struct class_device_attribute *net_class_attributes[] = { &class_device_attr_features, &class_device_attr_mtu, &class_device_attr_flags, + &class_device_attr_weight, &class_device_attr_type, &class_device_attr_address, &class_device_attr_broadcast, diff --git a/net/core/netfilter.c b/net/core/netfilter.c index 22a8f127c4a..076c156d5ed 100644 --- a/net/core/netfilter.c +++ b/net/core/netfilter.c @@ -141,136 +141,6 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg) up(&nf_sockopt_mutex); } -#ifdef CONFIG_NETFILTER_DEBUG -#include <net/ip.h> -#include <net/tcp.h> -#include <linux/netfilter_ipv4.h> - -static void debug_print_hooks_ip(unsigned int nf_debug) -{ - if (nf_debug & (1 << NF_IP_PRE_ROUTING)) { - printk("PRE_ROUTING "); - nf_debug ^= (1 << NF_IP_PRE_ROUTING); - } - if (nf_debug & (1 << NF_IP_LOCAL_IN)) { - printk("LOCAL_IN "); - nf_debug ^= (1 << NF_IP_LOCAL_IN); - } - if (nf_debug & (1 << NF_IP_FORWARD)) { - printk("FORWARD "); - nf_debug ^= (1 << NF_IP_FORWARD); - } - if (nf_debug & (1 << NF_IP_LOCAL_OUT)) { - printk("LOCAL_OUT "); - nf_debug ^= (1 << NF_IP_LOCAL_OUT); - } - if (nf_debug & (1 << NF_IP_POST_ROUTING)) { - printk("POST_ROUTING "); - nf_debug ^= (1 << NF_IP_POST_ROUTING); - } - if (nf_debug) - printk("Crap bits: 0x%04X", nf_debug); - printk("\n"); -} - -static void nf_dump_skb(int pf, struct sk_buff *skb) -{ - printk("skb: pf=%i %s dev=%s len=%u\n", - pf, - skb->sk ? "(owned)" : "(unowned)", - skb->dev ? skb->dev->name : "(no dev)", - skb->len); - switch (pf) { - case PF_INET: { - const struct iphdr *ip = skb->nh.iph; - __u32 *opt = (__u32 *) (ip + 1); - int opti; - __u16 src_port = 0, dst_port = 0; - - if (ip->protocol == IPPROTO_TCP - || ip->protocol == IPPROTO_UDP) { - struct tcphdr *tcp=(struct tcphdr *)((__u32 *)ip+ip->ihl); - src_port = ntohs(tcp->source); - dst_port = ntohs(tcp->dest); - } - - printk("PROTO=%d %u.%u.%u.%u:%hu %u.%u.%u.%u:%hu" - " L=%hu S=0x%2.2hX I=%hu F=0x%4.4hX T=%hu", - ip->protocol, NIPQUAD(ip->saddr), - src_port, NIPQUAD(ip->daddr), - dst_port, - ntohs(ip->tot_len), ip->tos, ntohs(ip->id), - ntohs(ip->frag_off), ip->ttl); - - for (opti = 0; opti < (ip->ihl - sizeof(struct iphdr) / 4); opti++) - printk(" O=0x%8.8X", *opt++); - printk("\n"); - } - } -} - -void nf_debug_ip_local_deliver(struct sk_buff *skb) -{ - /* If it's a loopback packet, it must have come through - * NF_IP_LOCAL_OUT, NF_IP_RAW_INPUT, NF_IP_PRE_ROUTING and - * NF_IP_LOCAL_IN. Otherwise, must have gone through - * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */ - if (!skb->dev) { - printk("ip_local_deliver: skb->dev is NULL.\n"); - } else { - if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING) - | (1<<NF_IP_LOCAL_IN))) { - printk("ip_local_deliver: bad skb: "); - debug_print_hooks_ip(skb->nf_debug); - nf_dump_skb(PF_INET, skb); - } - } -} - -void nf_debug_ip_loopback_xmit(struct sk_buff *newskb) -{ - if (newskb->nf_debug != ((1 << NF_IP_LOCAL_OUT) - | (1 << NF_IP_POST_ROUTING))) { - printk("ip_dev_loopback_xmit: bad owned skb = %p: ", - newskb); - debug_print_hooks_ip(newskb->nf_debug); - nf_dump_skb(PF_INET, newskb); - } -} - -void nf_debug_ip_finish_output2(struct sk_buff *skb) -{ - /* If it's owned, it must have gone through the - * NF_IP_LOCAL_OUT and NF_IP_POST_ROUTING. - * Otherwise, must have gone through - * NF_IP_PRE_ROUTING, NF_IP_FORWARD and NF_IP_POST_ROUTING. - */ - if (skb->sk) { - if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT) - | (1 << NF_IP_POST_ROUTING))) { - printk("ip_finish_output: bad owned skb = %p: ", skb); - debug_print_hooks_ip(skb->nf_debug); - nf_dump_skb(PF_INET, skb); - } - } else { - if (skb->nf_debug != ((1 << NF_IP_PRE_ROUTING) - | (1 << NF_IP_FORWARD) - | (1 << NF_IP_POST_ROUTING))) { - /* Fragments, entunnelled packets, TCP RSTs - generated by ipt_REJECT will have no - owners, but still may be local */ - if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT) - | (1 << NF_IP_POST_ROUTING))){ - printk("ip_finish_output:" - " bad unowned skb = %p: ",skb); - debug_print_hooks_ip(skb->nf_debug); - nf_dump_skb(PF_INET, skb); - } - } - } -} -#endif /*CONFIG_NETFILTER_DEBUG*/ - /* Call get/setsockopt() */ static int nf_sockopt(struct sock *sk, int pf, int val, char __user *opt, int *len, int get) @@ -488,14 +358,6 @@ int nf_hook_slow(int pf, unsigned int hook, struct sk_buff **pskb, /* We may already have this, but read-locks nest anyway */ rcu_read_lock(); -#ifdef CONFIG_NETFILTER_DEBUG - if (unlikely((*pskb)->nf_debug & (1 << hook))) { - printk("nf_hook: hook %i already set.\n", hook); - nf_dump_skb(pf, *pskb); - } - (*pskb)->nf_debug |= (1 << hook); -#endif - elem = &nf_hooks[pf][hook]; next_hook: verdict = nf_iterate(&nf_hooks[pf][hook], pskb, hook, indev, diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a119696d552..c327c9edadc 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -130,19 +130,20 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, */ static void poll_napi(struct netpoll *np) { + struct netpoll_info *npinfo = np->dev->npinfo; int budget = 16; if (test_bit(__LINK_STATE_RX_SCHED, &np->dev->state) && - np->poll_owner != smp_processor_id() && - spin_trylock(&np->poll_lock)) { - np->rx_flags |= NETPOLL_RX_DROP; + npinfo->poll_owner != smp_processor_id() && + spin_trylock(&npinfo->poll_lock)) { + npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); np->dev->poll(np->dev, &budget); atomic_dec(&trapped); - np->rx_flags &= ~NETPOLL_RX_DROP; - spin_unlock(&np->poll_lock); + npinfo->rx_flags &= ~NETPOLL_RX_DROP; + spin_unlock(&npinfo->poll_lock); } } @@ -245,6 +246,7 @@ repeat: static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { int status; + struct netpoll_info *npinfo; repeat: if(!np || !np->dev || !netif_running(np->dev)) { @@ -253,8 +255,9 @@ repeat: } /* avoid recursion */ - if(np->poll_owner == smp_processor_id() || - np->dev->xmit_lock_owner == smp_processor_id()) { + npinfo = np->dev->npinfo; + if (npinfo->poll_owner == smp_processor_id() || + np->dev->xmit_lock_owner == smp_processor_id()) { if (np->drop) np->drop(skb); else @@ -341,14 +344,22 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) static void arp_reply(struct sk_buff *skb) { + struct netpoll_info *npinfo = skb->dev->npinfo; struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; u32 sip, tip; + unsigned long flags; struct sk_buff *send_skb; - struct netpoll *np = skb->dev->np; + struct netpoll *np = NULL; + + spin_lock_irqsave(&npinfo->rx_lock, flags); + if (npinfo->rx_np && npinfo->rx_np->dev == skb->dev) + np = npinfo->rx_np; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); - if (!np) return; + if (!np) + return; /* No arp on this interface */ if (skb->dev->flags & IFF_NOARP) @@ -429,9 +440,9 @@ int __netpoll_rx(struct sk_buff *skb) int proto, len, ulen; struct iphdr *iph; struct udphdr *uh; - struct netpoll *np = skb->dev->np; + struct netpoll *np = skb->dev->npinfo->rx_np; - if (!np->rx_hook) + if (!np) goto out; if (skb->dev->type != ARPHRD_ETHER) goto out; @@ -611,9 +622,8 @@ int netpoll_setup(struct netpoll *np) { struct net_device *ndev = NULL; struct in_device *in_dev; - - np->poll_lock = SPIN_LOCK_UNLOCKED; - np->poll_owner = -1; + struct netpoll_info *npinfo; + unsigned long flags; if (np->dev_name) ndev = dev_get_by_name(np->dev_name); @@ -624,7 +634,17 @@ int netpoll_setup(struct netpoll *np) } np->dev = ndev; - ndev->np = np; + if (!ndev->npinfo) { + npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); + if (!npinfo) + goto release; + + npinfo->rx_np = NULL; + npinfo->poll_lock = SPIN_LOCK_UNLOCKED; + npinfo->poll_owner = -1; + npinfo->rx_lock = SPIN_LOCK_UNLOCKED; + } else + npinfo = ndev->npinfo; if (!ndev->poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", @@ -692,13 +712,20 @@ int netpoll_setup(struct netpoll *np) np->name, HIPQUAD(np->local_ip)); } - if(np->rx_hook) - np->rx_flags = NETPOLL_RX_ENABLED; + if (np->rx_hook) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_flags |= NETPOLL_RX_ENABLED; + npinfo->rx_np = np; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + /* last thing to do is link it to the net device structure */ + ndev->npinfo = npinfo; return 0; release: - ndev->np = NULL; + if (!ndev->npinfo) + kfree(npinfo); np->dev = NULL; dev_put(ndev); return -1; @@ -706,9 +733,20 @@ int netpoll_setup(struct netpoll *np) void netpoll_cleanup(struct netpoll *np) { - if (np->dev) - np->dev->np = NULL; - dev_put(np->dev); + struct netpoll_info *npinfo; + unsigned long flags; + + if (np->dev) { + npinfo = np->dev->npinfo; + if (npinfo && npinfo->rx_np == np) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_np = NULL; + npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + dev_put(np->dev); + } + np->dev = NULL; } diff --git a/net/core/request_sock.c b/net/core/request_sock.c new file mode 100644 index 00000000000..bb55675f068 --- /dev/null +++ b/net/core/request_sock.c @@ -0,0 +1,64 @@ +/* + * NET Generic infrastructure for Network protocols. + * + * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br> + * + * From code originally in include/net/tcp.h + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <net/request_sock.h> + +/* + * Maximum number of SYN_RECV sockets in queue per LISTEN socket. + * One SYN_RECV socket costs about 80bytes on a 32bit machine. + * It would be better to replace it with a global counter for all sockets + * but then some measure against one socket starving all other sockets + * would be needed. + * + * It was 128 by default. Experiments with real servers show, that + * it is absolutely not enough even at 100conn/sec. 256 cures most + * of problems. This value is adjusted to 128 for very small machines + * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb). + * Further increasing requires to change hash table size. + */ +int sysctl_max_syn_backlog = 256; +EXPORT_SYMBOL(sysctl_max_syn_backlog); + +int reqsk_queue_alloc(struct request_sock_queue *queue, + const int nr_table_entries) +{ + const int lopt_size = sizeof(struct listen_sock) + + nr_table_entries * sizeof(struct request_sock *); + struct listen_sock *lopt = kmalloc(lopt_size, GFP_KERNEL); + + if (lopt == NULL) + return -ENOMEM; + + memset(lopt, 0, lopt_size); + + for (lopt->max_qlen_log = 6; + (1 << lopt->max_qlen_log) < sysctl_max_syn_backlog; + lopt->max_qlen_log++); + + get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); + rwlock_init(&queue->syn_wait_lock); + queue->rskq_accept_head = queue->rskq_accept_head = NULL; + + write_lock_bh(&queue->syn_wait_lock); + queue->listen_opt = lopt; + write_unlock_bh(&queue->syn_wait_lock); + + return 0; +} + +EXPORT_SYMBOL(reqsk_queue_alloc); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 00caf4b318b..e013d836a7a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -100,6 +100,7 @@ static const int rtm_min[RTM_NR_FAMILIES] = [RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), + [RTM_FAM(RTM_NEWNEIGHTBL)] = NLMSG_LENGTH(sizeof(struct ndtmsg)), }; static const int rta_max[RTM_NR_FAMILIES] = @@ -113,6 +114,7 @@ static const int rta_max[RTM_NR_FAMILIES] = [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX, [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX, [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, + [RTM_FAM(RTM_NEWNEIGHTBL)] = NDTA_MAX, }; void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data) @@ -176,14 +178,14 @@ rtattr_failure: static int rtnetlink_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, - int type, u32 pid, u32 seq, u32 change) + int type, u32 pid, u32 seq, u32 change, + unsigned int flags) { struct ifinfomsg *r; struct nlmsghdr *nlh; unsigned char *b = skb->tail; - nlh = NLMSG_PUT(skb, pid, seq, type, sizeof(*r)); - if (pid) nlh->nlmsg_flags |= NLM_F_MULTI; + nlh = NLMSG_NEW(skb, pid, seq, type, sizeof(*r), flags); r = NLMSG_DATA(nlh); r->ifi_family = AF_UNSPEC; r->ifi_type = dev->type; @@ -273,7 +275,10 @@ static int rtnetlink_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *c for (dev=dev_base, idx=0; dev; dev = dev->next, idx++) { if (idx < s_idx) continue; - if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, 0) <= 0) + if (rtnetlink_fill_ifinfo(skb, dev, RTM_NEWLINK, + NETLINK_CB(cb->skb).pid, + cb->nlh->nlmsg_seq, 0, + NLM_F_MULTI) <= 0) break; } read_unlock(&dev_base_lock); @@ -447,7 +452,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) if (!skb) return; - if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change) < 0) { + if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) { kfree_skb(skb); return; } @@ -649,14 +654,16 @@ static void rtnetlink_rcv(struct sock *sk, int len) static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = { - [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo }, - [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink }, - [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnetlink_dump_all }, - [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnetlink_dump_all }, - [RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add }, - [RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete }, - [RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info }, - [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnetlink_dump_all }, + [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo }, + [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink }, + [RTM_GETADDR - RTM_BASE] = { .dumpit = rtnetlink_dump_all }, + [RTM_GETROUTE - RTM_BASE] = { .dumpit = rtnetlink_dump_all }, + [RTM_NEWNEIGH - RTM_BASE] = { .doit = neigh_add }, + [RTM_DELNEIGH - RTM_BASE] = { .doit = neigh_delete }, + [RTM_GETNEIGH - RTM_BASE] = { .dumpit = neigh_dump_info }, + [RTM_GETRULE - RTM_BASE] = { .dumpit = rtnetlink_dump_all }, + [RTM_GETNEIGHTBL - RTM_BASE] = { .dumpit = neightbl_dump_info }, + [RTM_SETNEIGHTBL - RTM_BASE] = { .doit = neightbl_set }, }; static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f65b3de590a..bb73b2190ec 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -365,9 +365,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) C(nfct); nf_conntrack_get(skb->nfct); C(nfctinfo); -#ifdef CONFIG_NETFILTER_DEBUG - C(nf_debug); -#endif #ifdef CONFIG_BRIDGE_NETFILTER C(nf_bridge); nf_bridge_get(skb->nf_bridge); @@ -432,9 +429,6 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->nfct = old->nfct; nf_conntrack_get(old->nfct); new->nfctinfo = old->nfctinfo; -#ifdef CONFIG_NETFILTER_DEBUG - new->nf_debug = old->nf_debug; -#endif #ifdef CONFIG_BRIDGE_NETFILTER new->nf_bridge = old->nf_bridge; nf_bridge_get(old->nf_bridge); @@ -1506,6 +1500,159 @@ void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) skb_split_no_header(skb, skb1, len, pos); } +/** + * skb_prepare_seq_read - Prepare a sequential read of skb data + * @skb: the buffer to read + * @from: lower offset of data to be read + * @to: upper offset of data to be read + * @st: state variable + * + * Initializes the specified state variable. Must be called before + * invoking skb_seq_read() for the first time. + */ +void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, + unsigned int to, struct skb_seq_state *st) +{ + st->lower_offset = from; + st->upper_offset = to; + st->root_skb = st->cur_skb = skb; + st->frag_idx = st->stepped_offset = 0; + st->frag_data = NULL; +} + +/** + * skb_seq_read - Sequentially read skb data + * @consumed: number of bytes consumed by the caller so far + * @data: destination pointer for data to be returned + * @st: state variable + * + * Reads a block of skb data at &consumed relative to the + * lower offset specified to skb_prepare_seq_read(). Assigns + * the head of the data block to &data and returns the length + * of the block or 0 if the end of the skb data or the upper + * offset has been reached. + * + * The caller is not required to consume all of the data + * returned, i.e. &consumed is typically set to the number + * of bytes already consumed and the next call to + * skb_seq_read() will return the remaining part of the block. + * + * Note: The size of each block of data returned can be arbitary, + * this limitation is the cost for zerocopy seqeuental + * reads of potentially non linear data. + * + * Note: Fragment lists within fragments are not implemented + * at the moment, state->root_skb could be replaced with + * a stack for this purpose. + */ +unsigned int skb_seq_read(unsigned int consumed, const u8 **data, + struct skb_seq_state *st) +{ + unsigned int block_limit, abs_offset = consumed + st->lower_offset; + skb_frag_t *frag; + + if (unlikely(abs_offset >= st->upper_offset)) + return 0; + +next_skb: + block_limit = skb_headlen(st->cur_skb); + + if (abs_offset < block_limit) { + *data = st->cur_skb->data + abs_offset; + return block_limit - abs_offset; + } + + if (st->frag_idx == 0 && !st->frag_data) + st->stepped_offset += skb_headlen(st->cur_skb); + + while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { + frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; + block_limit = frag->size + st->stepped_offset; + + if (abs_offset < block_limit) { + if (!st->frag_data) + st->frag_data = kmap_skb_frag(frag); + + *data = (u8 *) st->frag_data + frag->page_offset + + (abs_offset - st->stepped_offset); + + return block_limit - abs_offset; + } + + if (st->frag_data) { + kunmap_skb_frag(st->frag_data); + st->frag_data = NULL; + } + + st->frag_idx++; + st->stepped_offset += frag->size; + } + + if (st->cur_skb->next) { + st->cur_skb = st->cur_skb->next; + st->frag_idx = 0; + goto next_skb; + } else if (st->root_skb == st->cur_skb && + skb_shinfo(st->root_skb)->frag_list) { + st->cur_skb = skb_shinfo(st->root_skb)->frag_list; + goto next_skb; + } + + return 0; +} + +/** + * skb_abort_seq_read - Abort a sequential read of skb data + * @st: state variable + * + * Must be called if skb_seq_read() was not called until it + * returned 0. + */ +void skb_abort_seq_read(struct skb_seq_state *st) +{ + if (st->frag_data) + kunmap_skb_frag(st->frag_data); +} + +#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb)) + +static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, + struct ts_config *conf, + struct ts_state *state) +{ + return skb_seq_read(offset, text, TS_SKB_CB(state)); +} + +static void skb_ts_finish(struct ts_config *conf, struct ts_state *state) +{ + skb_abort_seq_read(TS_SKB_CB(state)); +} + +/** + * skb_find_text - Find a text pattern in skb data + * @skb: the buffer to look in + * @from: search offset + * @to: search limit + * @config: textsearch configuration + * @state: uninitialized textsearch state variable + * + * Finds a pattern in the skb data according to the specified + * textsearch configuration. Use textsearch_next() to retrieve + * subsequent occurrences of the pattern. Returns the offset + * to the first occurrence or UINT_MAX if no match was found. + */ +unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, + unsigned int to, struct ts_config *config, + struct ts_state *state) +{ + config->get_next_block = skb_ts_get_next_block; + config->finish = skb_ts_finish; + + skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); + + return textsearch_find(config, state); +} + void __init skb_init(void) { skbuff_head_cache = kmem_cache_create("skbuff_head_cache", @@ -1544,3 +1691,7 @@ EXPORT_SYMBOL(skb_queue_tail); EXPORT_SYMBOL(skb_unlink); EXPORT_SYMBOL(skb_append); EXPORT_SYMBOL(skb_split); +EXPORT_SYMBOL(skb_prepare_seq_read); +EXPORT_SYMBOL(skb_seq_read); +EXPORT_SYMBOL(skb_abort_seq_read); +EXPORT_SYMBOL(skb_find_text); diff --git a/net/core/sock.c b/net/core/sock.c index 96e00b08698..a6ec3ada7f9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -118,6 +118,7 @@ #include <linux/netdevice.h> #include <net/protocol.h> #include <linux/skbuff.h> +#include <net/request_sock.h> #include <net/sock.h> #include <net/xfrm.h> #include <linux/ipsec.h> @@ -1363,6 +1364,7 @@ static LIST_HEAD(proto_list); int proto_register(struct proto *prot, int alloc_slab) { + char *request_sock_slab_name; int rc = -ENOBUFS; if (alloc_slab) { @@ -1374,6 +1376,25 @@ int proto_register(struct proto *prot, int alloc_slab) prot->name); goto out; } + + if (prot->rsk_prot != NULL) { + static const char mask[] = "request_sock_%s"; + + request_sock_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL); + if (request_sock_slab_name == NULL) + goto out_free_sock_slab; + + sprintf(request_sock_slab_name, mask, prot->name); + prot->rsk_prot->slab = kmem_cache_create(request_sock_slab_name, + prot->rsk_prot->obj_size, 0, + SLAB_HWCACHE_ALIGN, NULL, NULL); + + if (prot->rsk_prot->slab == NULL) { + printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n", + prot->name); + goto out_free_request_sock_slab_name; + } + } } write_lock(&proto_list_lock); @@ -1382,6 +1403,12 @@ int proto_register(struct proto *prot, int alloc_slab) rc = 0; out: return rc; +out_free_request_sock_slab_name: + kfree(request_sock_slab_name); +out_free_sock_slab: + kmem_cache_destroy(prot->slab); + prot->slab = NULL; + goto out; } EXPORT_SYMBOL(proto_register); @@ -1395,6 +1422,14 @@ void proto_unregister(struct proto *prot) prot->slab = NULL; } + if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) { + const char *name = kmem_cache_name(prot->rsk_prot->slab); + + kmem_cache_destroy(prot->rsk_prot->slab); + kfree(name); + prot->rsk_prot->slab = NULL; + } + list_del(&prot->node); write_unlock(&proto_list_lock); } diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index c8be646cb19..8f817ad9f54 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -13,12 +13,8 @@ #ifdef CONFIG_SYSCTL extern int netdev_max_backlog; +extern int netdev_budget; extern int weight_p; -extern int no_cong_thresh; -extern int no_cong; -extern int lo_cong; -extern int mod_cong; -extern int netdev_fastroute; extern int net_msg_cost; extern int net_msg_burst; @@ -35,19 +31,6 @@ extern int sysctl_somaxconn; extern char sysctl_divert_version[]; #endif /* CONFIG_NET_DIVERT */ -/* - * This strdup() is used for creating copies of network - * device names to be handed over to sysctl. - */ - -char *net_sysctl_strdup(const char *s) -{ - char *rv = kmalloc(strlen(s)+1, GFP_KERNEL); - if (rv) - strcpy(rv, s); - return rv; -} - ctl_table core_table[] = { #ifdef CONFIG_NET { @@ -99,38 +82,6 @@ ctl_table core_table[] = { .proc_handler = &proc_dointvec }, { - .ctl_name = NET_CORE_NO_CONG_THRESH, - .procname = "no_cong_thresh", - .data = &no_cong_thresh, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = NET_CORE_NO_CONG, - .procname = "no_cong", - .data = &no_cong, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = NET_CORE_LO_CONG, - .procname = "lo_cong", - .data = &lo_cong, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { - .ctl_name = NET_CORE_MOD_CONG, - .procname = "mod_cong", - .data = &mod_cong, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec - }, - { .ctl_name = NET_CORE_MSG_COST, .procname = "message_cost", .data = &net_msg_cost, @@ -174,9 +125,15 @@ ctl_table core_table[] = { .mode = 0644, .proc_handler = &proc_dointvec }, + { + .ctl_name = NET_CORE_BUDGET, + .procname = "netdev_budget", + .data = &netdev_budget, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &proc_dointvec + }, { .ctl_name = 0 } }; -EXPORT_SYMBOL(net_sysctl_strdup); - #endif diff --git a/net/core/wireless.c b/net/core/wireless.c index 750cc5daeb0..b2fe378dfbf 100644 --- a/net/core/wireless.c +++ b/net/core/wireless.c @@ -2,7 +2,7 @@ * This file implement the Wireless Extensions APIs. * * Authors : Jean Tourrilhes - HPL - <jt@hpl.hp.com> - * Copyright (c) 1997-2004 Jean Tourrilhes, All Rights Reserved. + * Copyright (c) 1997-2005 Jean Tourrilhes, All Rights Reserved. * * (As all part of the Linux kernel, this file is GPL) */ @@ -187,6 +187,12 @@ static const struct iw_ioctl_description standard_ioctl[] = { .header_type = IW_HEADER_TYPE_ADDR, .flags = IW_DESCR_FLAG_DUMP, }, + [SIOCSIWMLME - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_mlme), + .max_tokens = sizeof(struct iw_mlme), + }, [SIOCGIWAPLIST - SIOCIWFIRST] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + @@ -195,7 +201,10 @@ static const struct iw_ioctl_description standard_ioctl[] = { .flags = IW_DESCR_FLAG_NOMAX, }, [SIOCSIWSCAN - SIOCIWFIRST] = { - .header_type = IW_HEADER_TYPE_PARAM, + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = 0, + .max_tokens = sizeof(struct iw_scan_req), }, [SIOCGIWSCAN - SIOCIWFIRST] = { .header_type = IW_HEADER_TYPE_POINT, @@ -273,6 +282,42 @@ static const struct iw_ioctl_description standard_ioctl[] = { [SIOCGIWPOWER - SIOCIWFIRST] = { .header_type = IW_HEADER_TYPE_PARAM, }, + [SIOCSIWGENIE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [SIOCGIWGENIE - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [SIOCSIWAUTH - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCGIWAUTH - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_PARAM, + }, + [SIOCSIWENCODEEXT - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_encode_ext), + .max_tokens = sizeof(struct iw_encode_ext) + + IW_ENCODING_TOKEN_MAX, + }, + [SIOCGIWENCODEEXT - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_encode_ext), + .max_tokens = sizeof(struct iw_encode_ext) + + IW_ENCODING_TOKEN_MAX, + }, + [SIOCSIWPMKSA - SIOCIWFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .min_tokens = sizeof(struct iw_pmksa), + .max_tokens = sizeof(struct iw_pmksa), + }, }; static const int standard_ioctl_num = (sizeof(standard_ioctl) / sizeof(struct iw_ioctl_description)); @@ -299,6 +344,31 @@ static const struct iw_ioctl_description standard_event[] = { [IWEVEXPIRED - IWEVFIRST] = { .header_type = IW_HEADER_TYPE_ADDR, }, + [IWEVGENIE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [IWEVMICHAELMICFAILURE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = sizeof(struct iw_michaelmicfailure), + }, + [IWEVASSOCREQIE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [IWEVASSOCRESPIE - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = IW_GENERIC_IE_MAX, + }, + [IWEVPMKIDCAND - IWEVFIRST] = { + .header_type = IW_HEADER_TYPE_POINT, + .token_size = 1, + .max_tokens = sizeof(struct iw_pmkid_cand), + }, }; static const int standard_event_num = (sizeof(standard_event) / sizeof(struct iw_ioctl_description)); |