aboutsummaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c5
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/core/sock.c4
3 files changed, 6 insertions, 7 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 95161054c4d..fcdf03cf3b3 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2143,7 +2143,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
*
* The entry's receive function will be scheduled to run
*/
-void fastcall __napi_schedule(struct napi_struct *n)
+void __napi_schedule(struct napi_struct *n)
{
unsigned long flags;
@@ -3038,8 +3038,7 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
EXPORT_SYMBOL(dev_unicast_sync);
/**
- * dev_unicast_unsync - Remove synchronized addresses from the destination
- * device
+ * dev_unicast_unsync - Remove synchronized addresses from the destination device
* @to: destination device
* @from: source device
*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 40dddcc6dc3..0d0fd28a904 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1907,11 +1907,11 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
* of bytes already consumed and the next call to
* skb_seq_read() will return the remaining part of the block.
*
- * Note: The size of each block of data returned can be arbitary,
+ * Note 1: The size of each block of data returned can be arbitary,
* this limitation is the cost for zerocopy seqeuental
* reads of potentially non linear data.
*
- * Note: Fragment lists within fragments are not implemented
+ * Note 2: Fragment lists within fragments are not implemented
* at the moment, state->root_skb could be replaced with
* a stack for this purpose.
*/
diff --git a/net/core/sock.c b/net/core/sock.c
index 433715fb141..09cb3a74de7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1731,7 +1731,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
atomic_set(&sk->sk_drops, 0);
}
-void fastcall lock_sock_nested(struct sock *sk, int subclass)
+void lock_sock_nested(struct sock *sk, int subclass)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
@@ -1748,7 +1748,7 @@ void fastcall lock_sock_nested(struct sock *sk, int subclass)
EXPORT_SYMBOL(lock_sock_nested);
-void fastcall release_sock(struct sock *sk)
+void release_sock(struct sock *sk)
{
/*
* The sk_lock has mutex_unlock() semantics: