aboutsummaryrefslogtreecommitdiff
path: root/net/sctp/socket.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sctp/socket.c')
-rw-r--r--net/sctp/socket.c31
1 files changed, 13 insertions, 18 deletions
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b529af5e6f2..9df888e932c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -156,10 +156,6 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk)
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
- sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk) +
- sizeof(struct sk_buff) +
- sizeof(struct sctp_chunk);
-
atomic_add(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
}
@@ -1932,7 +1928,6 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
- sp->ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
return 0;
}
@@ -3426,7 +3421,7 @@ static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_add
}
static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port,
- void * __user *to, size_t space_left)
+ void __user **to, size_t space_left)
{
struct list_head *pos;
struct sctp_sockaddr_entry *addr;
@@ -4427,7 +4422,7 @@ cleanup:
* tcp_poll(). Note that, based on these implementations, we don't
* lock the socket in this function, even though it seems that,
* ideally, locking or some other mechanisms can be used to ensure
- * the integrity of the counters (sndbuf and wmem_queued) used
+ * the integrity of the counters (sndbuf and wmem_alloc) used
* in this place. We assume that we don't need locks either until proven
* otherwise.
*
@@ -4744,11 +4739,6 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
struct sk_buff *skb;
long timeo;
- /* Caller is allowed not to check sk->sk_err before calling. */
- error = sock_error(sk);
- if (error)
- goto no_packet;
-
timeo = sock_rcvtimeo(sk, noblock);
SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n",
@@ -4775,6 +4765,11 @@ static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
if (skb)
return skb;
+ /* Caller is allowed not to check sk->sk_err before calling. */
+ error = sock_error(sk);
+ if (error)
+ goto no_packet;
+
if (sk->sk_shutdown & RCV_SHUTDOWN)
break;
@@ -4834,10 +4829,6 @@ static void sctp_wfree(struct sk_buff *skb)
sizeof(struct sk_buff) +
sizeof(struct sctp_chunk);
- sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk) +
- sizeof(struct sk_buff) +
- sizeof(struct sctp_chunk);
-
atomic_sub(sizeof(struct sctp_chunk), &sk->sk_wmem_alloc);
sock_wfree(skb);
@@ -4921,7 +4912,7 @@ void sctp_write_space(struct sock *sk)
/* Is there any sndbuf space available on the socket?
*
- * Note that wmem_queued is the sum of the send buffers on all of the
+ * Note that sk_wmem_alloc is the sum of the send buffers on all of the
* associations on the same socket. For a UDP-style socket with
* multiple associations, it is possible for it to be "unwriteable"
* prematurely. I assume that this is acceptable because
@@ -4934,7 +4925,7 @@ static int sctp_writeable(struct sock *sk)
{
int amt = 0;
- amt = sk->sk_sndbuf - sk->sk_wmem_queued;
+ amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
if (amt < 0)
amt = 0;
return amt;
@@ -5115,8 +5106,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
+ sock_rfree(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
+ skb_set_owner_r(skb, newsk);
}
}
@@ -5144,8 +5137,10 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
+ sock_rfree(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
+ skb_set_owner_r(skb, newsk);
}
}