aboutsummaryrefslogtreecommitdiff
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h71
1 files changed, 71 insertions, 0 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 07133c5e986..2c0da9239b9 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -54,6 +54,7 @@
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
+#include <linux/poll.h>
#include <asm/atomic.h>
#include <net/dst.h>
@@ -1241,6 +1242,74 @@ static inline int sk_has_allocations(const struct sock *sk)
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
+/**
+ * sk_has_sleeper - check if there are any waiting processes
+ * @sk: socket
+ *
+ * Returns true if socket has waiting processes
+ *
+ * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
+ * barrier call. They were added due to the race found within the tcp code.
+ *
+ * Consider following tcp code paths:
+ *
+ * CPU1 CPU2
+ *
+ * sys_select receive packet
+ * ... ...
+ * __add_wait_queue update tp->rcv_nxt
+ * ... ...
+ * tp->rcv_nxt check sock_def_readable
+ * ... {
+ * schedule ...
+ * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ * wake_up_interruptible(sk->sk_sleep)
+ * ...
+ * }
+ *
+ * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
+ * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
+ * could then endup calling schedule and sleep forever if there are no more
+ * data on the socket.
+ *
+ * The sk_has_sleeper is always called right after a call to read_lock, so we
+ * can use smp_mb__after_lock barrier.
+ */
+static inline int sk_has_sleeper(struct sock *sk)
+{
+ /*
+ * We need to be sure we are in sync with the
+ * add_wait_queue modifications to the wait queue.
+ *
+ * This memory barrier is paired in the sock_poll_wait.
+ */
+ smp_mb__after_lock();
+ return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * @filp: file
+ * @wait_address: socket wait queue
+ * @p: poll_table
+ *
+ * See the comments in the sk_has_sleeper function.
+ */
+static inline void sock_poll_wait(struct file *filp,
+ wait_queue_head_t *wait_address, poll_table *p)
+{
+ if (p && wait_address) {
+ poll_wait(filp, wait_address, p);
+ /*
+ * We need to be sure we are in sync with the
+ * socket flags modification.
+ *
+ * This memory barrier is paired in the sk_has_sleeper.
+ */
+ smp_mb();
+ }
+}
+
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in
@@ -1252,6 +1321,7 @@ static inline int sk_has_allocations(const struct sock *sk)
static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
{
+ skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_wfree;
/*
@@ -1264,6 +1334,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
+ skb_orphan(skb);
skb->sk = sk;
skb->destructor = sock_rfree;
atomic_add(skb->truesize, &sk->sk_rmem_alloc);