aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c14
-rw-r--r--net/core/dev.c36
-rw-r--r--net/core/ethtool.c3
-rw-r--r--net/core/netpoll.c31
-rw-r--r--net/core/pktgen.c1
-rw-r--r--net/core/skbuff.c29
-rw-r--r--net/core/sock.c8
7 files changed, 71 insertions, 51 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index d0de644b378..b01a76abe1d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -64,13 +64,25 @@ static inline int connection_based(struct sock *sk)
return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
}
+static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync,
+ void *key)
+{
+ unsigned long bits = (unsigned long)key;
+
+ /*
+ * Avoid a wakeup if event not interesting for us
+ */
+ if (bits && !(bits & (POLLIN | POLLERR)))
+ return 0;
+ return autoremove_wake_function(wait, mode, sync, key);
+}
/*
* Wait for a packet..
*/
static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
{
int error;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, receiver_wake_function);
prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
diff --git a/net/core/dev.c b/net/core/dev.c
index 52fea5b28ca..e2e9e4af3ac 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
+#ifdef CONFIG_NET_CLS_ACT
+ if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
+ net_timestamp(skb);
+#else
net_timestamp(skb);
+#endif
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
@@ -1430,7 +1435,7 @@ void netif_device_detach(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
}
}
EXPORT_SYMBOL(netif_device_detach);
@@ -1445,7 +1450,7 @@ void netif_device_attach(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
netif_running(dev)) {
- netif_wake_queue(dev);
+ netif_tx_wake_all_queues(dev);
__netdev_watchdog_up(dev);
}
}
@@ -1730,11 +1735,12 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
{
u32 hash;
- if (skb_rx_queue_recorded(skb)) {
- hash = skb_get_rx_queue(skb);
- } else if (skb->sk && skb->sk->sk_hash) {
+ if (skb_rx_queue_recorded(skb))
+ return skb_get_rx_queue(skb) % dev->real_num_tx_queues;
+
+ if (skb->sk && skb->sk->sk_hash)
hash = skb->sk->sk_hash;
- } else
+ else
hash = skb->protocol;
hash = jhash_1word(hash, skb_tx_hashrnd);
@@ -2328,8 +2334,10 @@ static int napi_gro_complete(struct sk_buff *skb)
struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
int err = -ENOENT;
- if (NAPI_GRO_CB(skb)->count == 1)
+ if (NAPI_GRO_CB(skb)->count == 1) {
+ skb_shinfo(skb)->gso_size = 0;
goto out;
+ }
rcu_read_lock();
list_for_each_entry_rcu(ptype, head, list) {
@@ -2348,7 +2356,6 @@ static int napi_gro_complete(struct sk_buff *skb)
}
out:
- skb_shinfo(skb)->gso_size = 0;
return netif_receive_skb(skb);
}
@@ -2472,8 +2479,9 @@ static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
return GRO_NORMAL;
for (p = napi->gro_list; p; p = p->next) {
- NAPI_GRO_CB(p)->same_flow = !compare_ether_header(
- skb_mac_header(p), skb_gro_mac_header(skb));
+ NAPI_GRO_CB(p)->same_flow = (p->dev == skb->dev)
+ && !compare_ether_header(skb_mac_header(p),
+ skb_gro_mac_header(skb));
NAPI_GRO_CB(p)->flush = 0;
}
@@ -2538,9 +2546,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
}
BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
- frag = &info->frags[info->nr_frags - 1];
+ frag = info->frags;
- for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
+ for (i = 0; i < info->nr_frags; i++) {
skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
frag->size);
frag++;
@@ -4398,7 +4406,7 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
#ifdef CONFIG_COMPAT_NET_DEV_OPS
- /* Netdevice_ops API compatiability support.
+ /* Netdevice_ops API compatibility support.
* This is temporary until all network devices are converted.
*/
if (dev->netdev_ops) {
@@ -4409,7 +4417,7 @@ int register_netdevice(struct net_device *dev)
dev->name, netdev_drivername(dev, drivername, 64));
/* This works only because net_device_ops and the
- compatiablity structure are the same. */
+ compatibility structure are the same. */
dev->netdev_ops = (void *) &(dev->init);
}
#endif
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 244ca56dffa..d9d5160610d 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -261,8 +261,7 @@ static int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr)
ret = 0;
err_out:
- if (rule_buf)
- kfree(rule_buf);
+ kfree(rule_buf);
return ret;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 755414cd49d..b5873bdff61 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -345,8 +345,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
udph->dest = htons(np->remote_port);
udph->len = htons(udp_len);
udph->check = 0;
- udph->check = csum_tcpudp_magic(htonl(np->local_ip),
- htonl(np->remote_ip),
+ udph->check = csum_tcpudp_magic(np->local_ip,
+ np->remote_ip,
udp_len, IPPROTO_UDP,
csum_partial(udph, udp_len, 0));
if (udph->check == 0)
@@ -365,8 +365,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
iph->ttl = 64;
iph->protocol = IPPROTO_UDP;
iph->check = 0;
- put_unaligned(htonl(np->local_ip), &(iph->saddr));
- put_unaligned(htonl(np->remote_ip), &(iph->daddr));
+ put_unaligned(np->local_ip, &(iph->saddr));
+ put_unaligned(np->remote_ip, &(iph->daddr));
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
@@ -424,7 +424,7 @@ static void arp_reply(struct sk_buff *skb)
memcpy(&tip, arp_ptr, 4);
/* Should we ignore arp? */
- if (tip != htonl(np->local_ip) ||
+ if (tip != np->local_ip ||
ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
return;
@@ -533,9 +533,9 @@ int __netpoll_rx(struct sk_buff *skb)
goto out;
if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
goto out;
- if (np->local_ip && np->local_ip != ntohl(iph->daddr))
+ if (np->local_ip && np->local_ip != iph->daddr)
goto out;
- if (np->remote_ip && np->remote_ip != ntohl(iph->saddr))
+ if (np->remote_ip && np->remote_ip != iph->saddr)
goto out;
if (np->local_port && np->local_port != ntohs(uh->dest))
goto out;
@@ -560,14 +560,14 @@ void netpoll_print_options(struct netpoll *np)
{
printk(KERN_INFO "%s: local port %d\n",
np->name, np->local_port);
- printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
- np->name, HIPQUAD(np->local_ip));
+ printk(KERN_INFO "%s: local IP %pI4\n",
+ np->name, &np->local_ip);
printk(KERN_INFO "%s: interface %s\n",
np->name, np->dev_name);
printk(KERN_INFO "%s: remote port %d\n",
np->name, np->remote_port);
- printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n",
- np->name, HIPQUAD(np->remote_ip));
+ printk(KERN_INFO "%s: remote IP %pI4\n",
+ np->name, &np->remote_ip);
printk(KERN_INFO "%s: remote ethernet address %pM\n",
np->name, np->remote_mac);
}
@@ -589,7 +589,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
- np->local_ip = ntohl(in_aton(cur));
+ np->local_ip = in_aton(cur);
cur = delim;
}
cur++;
@@ -618,7 +618,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
- np->remote_ip = ntohl(in_aton(cur));
+ np->remote_ip = in_aton(cur);
cur = delim + 1;
if (*cur != 0) {
@@ -759,10 +759,9 @@ int netpoll_setup(struct netpoll *np)
goto release;
}
- np->local_ip = ntohl(in_dev->ifa_list->ifa_local);
+ np->local_ip = in_dev->ifa_list->ifa_local;
rcu_read_unlock();
- printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n",
- np->name, HIPQUAD(np->local_ip));
+ printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
}
if (np->rx_hook) {
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 32d419f5ac9..3779c1438c1 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3806,7 +3806,6 @@ static int __init pg_init(void)
pg_proc_dir = proc_mkdir(PG_PROC_DIR, init_net.proc_net);
if (!pg_proc_dir)
return -ENODEV;
- pg_proc_dir->owner = THIS_MODULE;
pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops);
if (pe == NULL) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 6acbf9e79eb..f091a5a845c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1365,9 +1365,8 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
static inline struct page *linear_to_page(struct page *page, unsigned int *len,
unsigned int *offset,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct sock *sk)
{
- struct sock *sk = skb->sk;
struct page *p = sk->sk_sndmsg_page;
unsigned int off;
@@ -1405,13 +1404,14 @@ new_page:
*/
static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
unsigned int *len, unsigned int offset,
- struct sk_buff *skb, int linear)
+ struct sk_buff *skb, int linear,
+ struct sock *sk)
{
if (unlikely(spd->nr_pages == PIPE_BUFFERS))
return 1;
if (linear) {
- page = linear_to_page(page, len, &offset, skb);
+ page = linear_to_page(page, len, &offset, skb, sk);
if (!page)
return 1;
} else
@@ -1442,7 +1442,8 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
static inline int __splice_segment(struct page *page, unsigned int poff,
unsigned int plen, unsigned int *off,
unsigned int *len, struct sk_buff *skb,
- struct splice_pipe_desc *spd, int linear)
+ struct splice_pipe_desc *spd, int linear,
+ struct sock *sk)
{
if (!*len)
return 1;
@@ -1465,7 +1466,7 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
/* the linear region may spread across several pages */
flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
- if (spd_fill_page(spd, page, &flen, poff, skb, linear))
+ if (spd_fill_page(spd, page, &flen, poff, skb, linear, sk))
return 1;
__segment_seek(&page, &poff, &plen, flen);
@@ -1481,8 +1482,8 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
* pipe is full or if we already spliced the requested length.
*/
static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
- unsigned int *len,
- struct splice_pipe_desc *spd)
+ unsigned int *len, struct splice_pipe_desc *spd,
+ struct sock *sk)
{
int seg;
@@ -1492,7 +1493,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
if (__splice_segment(virt_to_page(skb->data),
(unsigned long) skb->data & (PAGE_SIZE - 1),
skb_headlen(skb),
- offset, len, skb, spd, 1))
+ offset, len, skb, spd, 1, sk))
return 1;
/*
@@ -1502,7 +1503,7 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
if (__splice_segment(f->page, f->page_offset, f->size,
- offset, len, skb, spd, 0))
+ offset, len, skb, spd, 0, sk))
return 1;
}
@@ -1528,12 +1529,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
.ops = &sock_pipe_buf_ops,
.spd_release = sock_spd_release,
};
+ struct sock *sk = skb->sk;
/*
* __skb_splice_bits() only fails if the output has no room left,
* so no point in going over the frag_list for the error case.
*/
- if (__skb_splice_bits(skb, &offset, &tlen, &spd))
+ if (__skb_splice_bits(skb, &offset, &tlen, &spd, sk))
goto done;
else if (!tlen)
goto done;
@@ -1545,14 +1547,13 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
struct sk_buff *list = skb_shinfo(skb)->frag_list;
for (; list && tlen; list = list->next) {
- if (__skb_splice_bits(list, &offset, &tlen, &spd))
+ if (__skb_splice_bits(list, &offset, &tlen, &spd, sk))
break;
}
}
done:
if (spd.nr_pages) {
- struct sock *sk = skb->sk;
int ret;
/*
@@ -2579,7 +2580,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_network_header_len(skb));
skb_copy_from_linear_data(skb, nskb->data, doffset);
- if (pos >= offset + len)
+ if (fskb != skb_shinfo(skb)->frag_list)
continue;
if (!sg) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 0620046e4eb..7dbf3ffb35c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1677,7 +1677,7 @@ static void sock_def_error_report(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible(sk->sk_sleep);
+ wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
read_unlock(&sk->sk_callback_lock);
}
@@ -1686,7 +1686,8 @@ static void sock_def_readable(struct sock *sk, int len)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
+ POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
read_unlock(&sk->sk_callback_lock);
}
@@ -1700,7 +1701,8 @@ static void sock_def_write_space(struct sock *sk)
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))