aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/Makefile1
-rw-r--r--net/core/datagram.c89
-rw-r--r--net/core/dev.c199
-rw-r--r--net/core/dev_mcast.c2
-rw-r--r--net/core/dst.c1
-rw-r--r--net/core/neighbour.c21
-rw-r--r--net/core/net-sysfs.c36
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/pktgen.c37
-rw-r--r--net/core/rtnetlink.c15
-rw-r--r--net/core/skb_dma_map.c66
-rw-r--r--net/core/skbuff.c77
-rw-r--r--net/core/sock.c13
-rw-r--r--net/core/stream.c2
14 files changed, 465 insertions, 95 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index b1332f6d004..26a37cb3192 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -6,6 +6,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
gen_stats.o gen_estimator.o net_namespace.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
+obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
neighbour.o rtnetlink.o utils.o link_watch.o filter.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index dd61dcad601..ee631843c2f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -9,7 +9,7 @@
* identical recvmsg() code. So we share it here. The poll was
* shared before but buried in udp.c so I moved it.
*
- * Authors: Alan Cox <alan@redhat.com>. (datagram_poll() from old
+ * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
* udp.c code)
*
* Fixes:
@@ -339,6 +339,93 @@ fault:
return -EFAULT;
}
+/**
+ * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying to
+ * @from: io vector to copy to
+ * @len: amount of data to copy to buffer from iovec
+ *
+ * Returns 0 or -EFAULT.
+ * Note: the iovec is modified during the copy.
+ */
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+ struct iovec *from, int len)
+{
+ int start = skb_headlen(skb);
+ int i, copy = start - offset;
+
+ /* Copy header. */
+ if (copy > 0) {
+ if (copy > len)
+ copy = len;
+ if (memcpy_fromiovec(skb->data + offset, from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+
+ /* Copy paged appendix. Hmm... why does this look so complicated? */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end - offset) > 0) {
+ int err;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = frag->page;
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap(page);
+ err = memcpy_fromiovec(vaddr + frag->page_offset +
+ offset - start, from, copy);
+ kunmap(page);
+ if (err)
+ goto fault;
+
+ if (!(len -= copy))
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list; list = list->next) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + list->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_copy_datagram_from_iovec(list,
+ offset - start,
+ from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+ }
+ if (!len)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 600bb23c4c2..1408a083fe4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -122,6 +122,7 @@
#include <linux/if_arp.h>
#include <linux/if_vlan.h>
#include <linux/ip.h>
+#include <net/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/jhash.h>
@@ -890,7 +891,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
* Change name of a device, can pass format strings "eth%d".
* for wildcarding.
*/
-int dev_change_name(struct net_device *dev, char *newname)
+int dev_change_name(struct net_device *dev, const char *newname)
{
char oldname[IFNAMSIZ];
int err = 0;
@@ -916,7 +917,6 @@ int dev_change_name(struct net_device *dev, char *newname)
err = dev_alloc_name(dev, newname);
if (err < 0)
return err;
- strcpy(newname, dev->name);
}
else if (__dev_get_by_name(net, newname))
return -EEXIST;
@@ -954,6 +954,38 @@ rollback:
}
/**
+ * dev_set_alias - change ifalias of a device
+ * @dev: device
+ * @alias: name up to IFALIASZ
+ * @len: limit of bytes to copy from info
+ *
+ * Set ifalias for a device,
+ */
+int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+{
+ ASSERT_RTNL();
+
+ if (len >= IFALIASZ)
+ return -EINVAL;
+
+ if (!len) {
+ if (dev->ifalias) {
+ kfree(dev->ifalias);
+ dev->ifalias = NULL;
+ }
+ return 0;
+ }
+
+ dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
+ if (!dev->ifalias)
+ return -ENOMEM;
+
+ strlcpy(dev->ifalias, alias, len+1);
+ return len;
+}
+
+
+/**
* netdev_features_change - device changes features
* @dev: device to cause notification
*
@@ -1339,19 +1371,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
}
-void __netif_schedule(struct Qdisc *q)
+static inline void __netif_reschedule(struct Qdisc *q)
{
- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
- struct softnet_data *sd;
- unsigned long flags;
+ struct softnet_data *sd;
+ unsigned long flags;
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- }
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
@@ -1663,7 +1699,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
{
u32 addr1, addr2, ports;
u32 hash, ihl;
- u8 ip_proto;
+ u8 ip_proto = 0;
if (unlikely(!simple_tx_hashrnd_initialized)) {
get_random_bytes(&simple_tx_hashrnd, 4);
@@ -1671,13 +1707,14 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
}
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
- ip_proto = ip_hdr(skb)->protocol;
+ case htons(ETH_P_IP):
+ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
+ ip_proto = ip_hdr(skb)->protocol;
addr1 = ip_hdr(skb)->saddr;
addr2 = ip_hdr(skb)->daddr;
ihl = ip_hdr(skb)->ihl;
break;
- case __constant_htons(ETH_P_IPV6):
+ case htons(ETH_P_IPV6):
ip_proto = ipv6_hdr(skb)->nexthdr;
addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
@@ -1800,9 +1837,13 @@ gso:
spin_lock(root_lock);
- rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
-
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
spin_unlock(root_lock);
goto out;
@@ -1974,15 +2015,22 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
- smp_mb__before_clear_bit();
- clear_bit(__QDISC_STATE_SCHED, &q->state);
-
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
- __netif_schedule(q);
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state)) {
+ __netif_reschedule(q);
+ } else {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+ }
}
}
}
@@ -2084,7 +2132,8 @@ static int ing_filter(struct sk_buff *skb)
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
- result = qdisc_enqueue_root(skb, q);
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}
@@ -2900,6 +2949,12 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
return 0;
}
+static void dev_change_rx_flags(struct net_device *dev, int flags)
+{
+ if (dev->flags & IFF_UP && dev->change_rx_flags)
+ dev->change_rx_flags(dev, flags);
+}
+
static int __dev_set_promiscuity(struct net_device *dev, int inc)
{
unsigned short old_flags = dev->flags;
@@ -2937,8 +2992,7 @@ static int __dev_set_promiscuity(struct net_device *dev, int inc)
current->uid, current->gid,
audit_get_sessionid(current));
- if (dev->change_rx_flags)
- dev->change_rx_flags(dev, IFF_PROMISC);
+ dev_change_rx_flags(dev, IFF_PROMISC);
}
return 0;
}
@@ -3004,8 +3058,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
}
}
if (dev->flags ^ old_flags) {
- if (dev->change_rx_flags)
- dev->change_rx_flags(dev, IFF_ALLMULTI);
+ dev_change_rx_flags(dev, IFF_ALLMULTI);
dev_set_rx_mode(dev);
}
return 0;
@@ -3284,6 +3337,12 @@ static void dev_addr_discard(struct net_device *dev)
netif_addr_unlock_bh(dev);
}
+/**
+ * dev_get_flags - get flags reported to userspace
+ * @dev: device
+ *
+ * Get the combination of flag bits exported through APIs to userspace.
+ */
unsigned dev_get_flags(const struct net_device *dev)
{
unsigned flags;
@@ -3308,6 +3367,14 @@ unsigned dev_get_flags(const struct net_device *dev)
return flags;
}
+/**
+ * dev_change_flags - change device settings
+ * @dev: device
+ * @flags: device state flags
+ *
+ * Change settings on device based state flags. The flags are
+ * in the userspace exported format.
+ */
int dev_change_flags(struct net_device *dev, unsigned flags)
{
int ret, changes;
@@ -3329,8 +3396,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
* Load in the correct multicast list now the flags have changed.
*/
- if (dev->change_rx_flags && (old_flags ^ flags) & IFF_MULTICAST)
- dev->change_rx_flags(dev, IFF_MULTICAST);
+ if ((old_flags ^ flags) & IFF_MULTICAST)
+ dev_change_rx_flags(dev, IFF_MULTICAST);
dev_set_rx_mode(dev);
@@ -3377,6 +3444,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
return ret;
}
+/**
+ * dev_set_mtu - Change maximum transfer unit
+ * @dev: device
+ * @new_mtu: new transfer unit
+ *
+ * Change the maximum transfer size of the network device.
+ */
int dev_set_mtu(struct net_device *dev, int new_mtu)
{
int err;
@@ -3401,6 +3475,13 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
return err;
}
+/**
+ * dev_set_mac_address - Change Media Access Control Address
+ * @dev: device
+ * @sa: new address
+ *
+ * Change the hardware (MAC) address of the device
+ */
int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
{
int err;
@@ -3790,14 +3871,11 @@ static int dev_new_index(struct net *net)
}
/* Delayed registration/unregisteration */
-static DEFINE_SPINLOCK(net_todo_list_lock);
static LIST_HEAD(net_todo_list);
static void net_set_todo(struct net_device *dev)
{
- spin_lock(&net_todo_list_lock);
list_add_tail(&dev->todo_list, &net_todo_list);
- spin_unlock(&net_todo_list_lock);
}
static void rollback_registered(struct net_device *dev)
@@ -4124,33 +4202,24 @@ static void netdev_wait_allrefs(struct net_device *dev)
* free_netdev(y1);
* free_netdev(y2);
*
- * We are invoked by rtnl_unlock() after it drops the semaphore.
+ * We are invoked by rtnl_unlock().
* This allows us to deal with problems:
* 1) We can delete sysfs objects which invoke hotplug
* without deadlocking with linkwatch via keventd.
* 2) Since we run with the RTNL semaphore not held, we can sleep
* safely in order to wait for the netdev refcnt to drop to zero.
+ *
+ * We must not return until all unregister events added during
+ * the interval the lock was held have been completed.
*/
-static DEFINE_MUTEX(net_todo_run_mutex);
void netdev_run_todo(void)
{
struct list_head list;
- /* Need to guard against multiple cpu's getting out of order. */
- mutex_lock(&net_todo_run_mutex);
-
- /* Not safe to do outside the semaphore. We must not return
- * until all unregister events invoked by the local processor
- * have been completed (either by this todo run, or one on
- * another cpu).
- */
- if (list_empty(&net_todo_list))
- goto out;
-
/* Snapshot list, allow later requests */
- spin_lock(&net_todo_list_lock);
list_replace_init(&net_todo_list, &list);
- spin_unlock(&net_todo_list_lock);
+
+ __rtnl_unlock();
while (!list_empty(&list)) {
struct net_device *dev
@@ -4182,9 +4251,6 @@ void netdev_run_todo(void)
/* Free network device */
kobject_put(&dev->dev.kobj);
}
-
-out:
- mutex_unlock(&net_todo_run_mutex);
}
static struct net_device_stats *internal_stats(struct net_device *dev)
@@ -4304,7 +4370,12 @@ void free_netdev(struct net_device *dev)
put_device(&dev->dev);
}
-/* Synchronize with packet receive processing. */
+/**
+ * synchronize_net - Synchronize with packet receive processing
+ *
+ * Wait for packets currently being received to be done.
+ * Does not block later packets from starting.
+ */
void synchronize_net(void)
{
might_sleep();
@@ -4606,7 +4677,7 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
}
/**
- * netdev_dma_regiser - register the networking subsystem as a DMA client
+ * netdev_dma_register - register the networking subsystem as a DMA client
*/
static int __init netdev_dma_register(void)
{
@@ -4652,6 +4723,12 @@ int netdev_compute_features(unsigned long all, unsigned long one)
one |= NETIF_F_GSO_SOFTWARE;
one |= NETIF_F_GSO;
+ /*
+ * If even one device supports a GSO protocol with software fallback,
+ * enable it for all.
+ */
+ all |= one & NETIF_F_GSO_SOFTWARE;
+
/* If even one device supports robust GSO, enable it for all. */
if (one & NETIF_F_GSO_ROBUST)
all |= NETIF_F_GSO_ROBUST;
@@ -4701,10 +4778,18 @@ err_name:
return -ENOMEM;
}
-char *netdev_drivername(struct net_device *dev, char *buffer, int len)
+/**
+ * netdev_drivername - network driver for the device
+ * @dev: network device
+ * @buffer: buffer for resulting name
+ * @len: size of buffer
+ *
+ * Determine network driver for device.
+ */
+char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
{
- struct device_driver *driver;
- struct device *parent;
+ const struct device_driver *driver;
+ const struct device *parent;
if (len <= 0 || !buffer)
return buffer;
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
index 5402b3b38e0..9e2fa39f22a 100644
--- a/net/core/dev_mcast.c
+++ b/net/core/dev_mcast.c
@@ -6,7 +6,7 @@
* Richard Underwood <richard@wuzz.demon.co.uk>
*
* Stir fried together from the IP multicast and CAP patches above
- * Alan Cox <Alan.Cox@linux.org>
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* Fixes:
* Alan Cox : Update the device on a real delete
diff --git a/net/core/dst.c b/net/core/dst.c
index fe03266130b..09c1530f468 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -203,6 +203,7 @@ void __dst_free(struct dst_entry * dst)
if (dst_garbage.timer_inc > DST_GC_INC) {
dst_garbage.timer_inc = DST_GC_INC;
dst_garbage.timer_expires = DST_GC_MIN;
+ cancel_delayed_work(&dst_gc_work);
schedule_delayed_work(&dst_gc_work, dst_garbage.timer_expires);
}
spin_unlock_bh(&dst_garbage.lock);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 9d92e41826e..1dc728b3858 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -927,8 +927,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
if (skb_queue_len(&neigh->arp_queue) >=
neigh->parms->queue_len) {
struct sk_buff *buff;
- buff = neigh->arp_queue.next;
- __skb_unlink(buff, &neigh->arp_queue);
+ buff = __skb_dequeue(&neigh->arp_queue);
kfree_skb(buff);
NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
}
@@ -1259,24 +1258,20 @@ static void neigh_proxy_process(unsigned long arg)
struct neigh_table *tbl = (struct neigh_table *)arg;
long sched_next = 0;
unsigned long now = jiffies;
- struct sk_buff *skb;
+ struct sk_buff *skb, *n;
spin_lock(&tbl->proxy_queue.lock);
- skb = tbl->proxy_queue.next;
-
- while (skb != (struct sk_buff *)&tbl->proxy_queue) {
- struct sk_buff *back = skb;
- long tdif = NEIGH_CB(back)->sched_next - now;
+ skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
+ long tdif = NEIGH_CB(skb)->sched_next - now;
- skb = skb->next;
if (tdif <= 0) {
- struct net_device *dev = back->dev;
- __skb_unlink(back, &tbl->proxy_queue);
+ struct net_device *dev = skb->dev;
+ __skb_unlink(skb, &tbl->proxy_queue);
if (tbl->proxy_redo && netif_running(dev))
- tbl->proxy_redo(back);
+ tbl->proxy_redo(skb);
else
- kfree_skb(back);
+ kfree_skb(skb);
dev_put(dev);
} else if (!sched_next || tdif < sched_next)
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index c1f4e0d428c..92d6b946731 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -209,9 +209,44 @@ static ssize_t store_tx_queue_len(struct device *dev,
return netdev_store(dev, attr, buf, len, change_tx_queue_len);
}
+static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ size_t count = len;
+ ssize_t ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* ignore trailing newline */
+ if (len > 0 && buf[len - 1] == '\n')
+ --count;
+
+ rtnl_lock();
+ ret = dev_set_alias(netdev, buf, count);
+ rtnl_unlock();
+
+ return ret < 0 ? ret : len;
+}
+
+static ssize_t show_ifalias(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = 0;
+
+ rtnl_lock();
+ if (netdev->ifalias)
+ ret = sprintf(buf, "%s\n", netdev->ifalias);
+ rtnl_unlock();
+ return ret;
+}
+
static struct device_attribute net_class_attributes[] = {
__ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
__ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
+ __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
__ATTR(iflink, S_IRUGO, show_iflink, NULL),
__ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
__ATTR(features, S_IRUGO, show_features, NULL),
@@ -418,6 +453,7 @@ static void netdev_release(struct device *d)
BUG_ON(dev->reg_state != NETREG_RELEASED);
+ kfree(dev->ifalias);
kfree((char *)dev - dev->padded);
}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7c52fe277b6..b0dc818a91d 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -18,6 +18,7 @@ static struct list_head *first_device = &pernet_list;
static DEFINE_MUTEX(net_mutex);
LIST_HEAD(net_namespace_list);
+EXPORT_SYMBOL_GPL(net_namespace_list);
struct net init_net;
EXPORT_SYMBOL(init_net);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 52623645390..99f656d35b4 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
*/
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{
+ int ntxq;
+
if (!pkt_dev->odev) {
printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
"setup_inject.\n");
@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
return;
}
+ /* make sure that we don't pick a non-existing transmit queue */
+ ntxq = pkt_dev->odev->real_num_tx_queues;
+ if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
+ printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
+ "disabled because CPU count (%d) exceeds number ",
+ num_online_cpus());
+ printk(KERN_WARNING "pktgen: WARNING: of tx queues "
+ "(%d) on %s \n", ntxq, pkt_dev->odev->name);
+ pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
+ }
+ if (ntxq <= pkt_dev->queue_map_min) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_min (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_min);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_min = ntxq - 1;
+ }
+ if (ntxq <= pkt_dev->queue_map_max) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_max (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_max);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_max = ntxq - 1;
+ }
+
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
@@ -2445,7 +2474,7 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
if (ret < 0) {
printk(KERN_ERR "Error expanding "
"ipsec packet %d\n",ret);
- return 0;
+ goto err;
}
}
@@ -2455,8 +2484,7 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
if (ret) {
printk(KERN_ERR "Error creating ipsec "
"packet %d\n",ret);
- kfree_skb(skb);
- return 0;
+ goto err;
}
/* restore ll */
eth = (__u8 *) skb_push(skb, ETH_HLEN);
@@ -2465,6 +2493,9 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
}
}
return 1;
+err:
+ kfree_skb(skb);
+ return 0;
}
#endif
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 71edb8b3634..3630131fa1f 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -73,7 +73,7 @@ void __rtnl_unlock(void)
void rtnl_unlock(void)
{
- mutex_unlock(&rtnl_mutex);
+ /* This fellow will unlock it for us. */
netdev_run_todo();
}
@@ -586,6 +586,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
+ nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ nla_total_size(sizeof(struct rtnl_link_ifmap))
+ nla_total_size(sizeof(struct rtnl_link_stats))
@@ -640,6 +641,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (txq->qdisc_sleeping)
NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
+ if (dev->ifalias)
+ NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+
if (1) {
struct rtnl_link_ifmap map = {
.mem_start = dev->mem_start,
@@ -713,6 +717,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_LINKMODE] = { .type = NLA_U8 },
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
+ [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -853,6 +858,14 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
modified = 1;
}
+ if (tb[IFLA_IFALIAS]) {
+ err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
+ nla_len(tb[IFLA_IFALIAS]));
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
if (tb[IFLA_BROADCAST]) {
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
send_addr_notify = 1;
diff --git a/net/core/skb_dma_map.c b/net/core/skb_dma_map.c
new file mode 100644
index 00000000000..86234923a3b
--- /dev/null
+++ b/net/core/skb_dma_map.c
@@ -0,0 +1,66 @@
+/* skb_dma_map.c: DMA mapping helpers for socket buffers.
+ *
+ * Copyright (C) David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+
+int skb_dma_map(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir)
+{
+ struct skb_shared_info *sp = skb_shinfo(skb);
+ dma_addr_t map;
+ int i;
+
+ map = dma_map_single(dev, skb->data,
+ skb_headlen(skb), dir);
+ if (dma_mapping_error(dev, map))
+ goto out_err;
+
+ sp->dma_maps[0] = map;
+ for (i = 0; i < sp->nr_frags; i++) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ map = dma_map_page(dev, fp->page, fp->page_offset,
+ fp->size, dir);
+ if (dma_mapping_error(dev, map))
+ goto unwind;
+ sp->dma_maps[i + 1] = map;
+ }
+ sp->num_dma_maps = i + 1;
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ dma_unmap_page(dev, sp->dma_maps[i + 1],
+ fp->size, dir);
+ }
+ dma_unmap_single(dev, sp->dma_maps[0],
+ skb_headlen(skb), dir);
+out_err:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(skb_dma_map);
+
+void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir)
+{
+ struct skb_shared_info *sp = skb_shinfo(skb);
+ int i;
+
+ dma_unmap_single(dev, sp->dma_maps[0],
+ skb_headlen(skb), dir);
+ for (i = 0; i < sp->nr_frags; i++) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ dma_unmap_page(dev, sp->dma_maps[i + 1],
+ fp->size, dir);
+ }
+}
+EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 84640172d65..4e22e3a3535 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1,7 +1,7 @@
/*
* Routines having to do with the 'struct sk_buff' memory handlers.
*
- * Authors: Alan Cox <iiitac@pyr.swan.ac.uk>
+ * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
* Florian La Roche <rzsfl@rz.uni-sb.de>
*
* Fixes:
@@ -263,6 +263,26 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
return skb;
}
+struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
+{
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ struct page *page;
+
+ page = alloc_pages_node(node, gfp_mask, 0);
+ return page;
+}
+EXPORT_SYMBOL(__netdev_alloc_page);
+
+void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
+ int size)
+{
+ skb_fill_page_desc(skb, i, page, off, size);
+ skb->len += size;
+ skb->data_len += size;
+ skb->truesize += size;
+}
+EXPORT_SYMBOL(skb_add_rx_frag);
+
/**
* dev_alloc_skb - allocate an skbuff for receiving
* @length: length to allocate
@@ -363,8 +383,7 @@ static void kfree_skbmem(struct sk_buff *skb)
}
}
-/* Free everything but the sk_buff shell. */
-static void skb_release_all(struct sk_buff *skb)
+static void skb_release_head_state(struct sk_buff *skb)
{
dst_release(skb->dst);
#ifdef CONFIG_XFRM
@@ -388,6 +407,12 @@ static void skb_release_all(struct sk_buff *skb)
skb->tc_verd = 0;
#endif
#endif
+}
+
+/* Free everything but the sk_buff shell. */
+static void skb_release_all(struct sk_buff *skb)
+{
+ skb_release_head_state(skb);
skb_release_data(skb);
}
@@ -424,6 +449,38 @@ void kfree_skb(struct sk_buff *skb)
__kfree_skb(skb);
}
+int skb_recycle_check(struct sk_buff *skb, int skb_size)
+{
+ struct skb_shared_info *shinfo;
+
+ if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
+ return 0;
+
+ skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
+ if (skb_end_pointer(skb) - skb->head < skb_size)
+ return 0;
+
+ if (skb_shared(skb) || skb_cloned(skb))
+ return 0;
+
+ skb_release_head_state(skb);
+ shinfo = skb_shinfo(skb);
+ atomic_set(&shinfo->dataref, 1);
+ shinfo->nr_frags = 0;
+ shinfo->gso_size = 0;
+ shinfo->gso_segs = 0;
+ shinfo->gso_type = 0;
+ shinfo->ip6_frag_id = 0;
+ shinfo->frag_list = NULL;
+
+ memset(skb, 0, offsetof(struct sk_buff, tail));
+ skb_reset_tail_pointer(skb);
+ skb->data = skb->head + NET_SKB_PAD;
+
+ return 1;
+}
+EXPORT_SYMBOL(skb_recycle_check);
+
static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
{
new->tstamp = old->tstamp;
@@ -701,6 +758,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
#endif
long off;
+ BUG_ON(nhead < 0);
+
if (skb_shared(skb))
BUG();
@@ -2256,14 +2315,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
segs = nskb;
tail = nskb;
- nskb->dev = skb->dev;
- skb_copy_queue_mapping(nskb, skb);
- nskb->priority = skb->priority;
- nskb->protocol = skb->protocol;
- nskb->vlan_tci = skb->vlan_tci;
- nskb->dst = dst_clone(skb->dst);
- memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
- nskb->pkt_type = skb->pkt_type;
+ __copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
skb_reserve(nskb, headroom);
@@ -2274,6 +2326,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
doffset);
if (!sg) {
+ nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(skb, offset,
skb_put(nskb, len),
len, 0);
@@ -2283,8 +2336,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
frag = skb_shinfo(nskb)->frags;
k = 0;
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum = skb->csum;
skb_copy_from_linear_data_offset(skb, offset,
skb_put(nskb, hsize), hsize);
diff --git a/net/core/sock.c b/net/core/sock.c
index 91f8bbc9352..5e2a3132a8c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -154,7 +154,8 @@ static const char *af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
"sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
"sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
- "sk_lock-AF_RXRPC" , "sk_lock-AF_MAX"
+ "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
+ "sk_lock-AF_MAX"
};
static const char *af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -168,7 +169,8 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
"slock-27" , "slock-28" , "slock-AF_CAN" ,
"slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
- "slock-AF_RXRPC" , "slock-AF_MAX"
+ "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
+ "slock-AF_MAX"
};
static const char *af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -182,7 +184,8 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
"clock-27" , "clock-28" , "clock-AF_CAN" ,
"clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
- "clock-AF_RXRPC" , "clock-AF_MAX"
+ "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
+ "clock-AF_MAX"
};
#endif
@@ -324,7 +327,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
- rc = sk->sk_backlog_rcv(sk, skb);
+ rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else
@@ -1371,7 +1374,7 @@ static void __release_sock(struct sock *sk)
struct sk_buff *next = skb->next;
skb->next = NULL;
- sk->sk_backlog_rcv(sk, skb);
+ sk_backlog_rcv(sk, skb);
/*
* We are in process context here with softirqs
diff --git a/net/core/stream.c b/net/core/stream.c
index a6b3437ff08..8727cead64a 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -9,7 +9,7 @@
*
* Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* (from old tcp.c code)
- * Alan Cox <alan@redhat.com> (Borrowed comments 8-))
+ * Alan Cox <alan@lxorguk.ukuu.org.uk> (Borrowed comments 8-))
*/
#include <linux/module.h>