aboutsummaryrefslogtreecommitdiff
path: root/net/can
diff options
context:
space:
mode:
authormerge <null@invalid>2009-01-22 13:55:32 +0000
committerAndy Green <agreen@octopus.localdomain>2009-01-22 13:55:32 +0000
commitaa6f5ffbdba45aa8e19e5048648fc6c7b25376d3 (patch)
treefbb786d0ac6f8a774fd834e9ce951197e60fbffa /net/can
parentf2d78193eae5dccd3d588d2c8ea0866efc368332 (diff)
MERGE-via-pending-tracking-hist-MERGE-via-stable-tracking-MERGE-via-mokopatches-tracking-fix-stray-endmenu-patch-1232632040-1232632141
pending-tracking-hist top was MERGE-via-stable-tracking-MERGE-via-mokopatches-tracking-fix-stray-endmenu-patch-1232632040-1232632141 / fdf777a63bcb59e0dfd78bfe2c6242e01f6d4eb9 ... parent commitmessage: From: merge <null@invalid> MERGE-via-stable-tracking-hist-MERGE-via-mokopatches-tracking-fix-stray-endmenu-patch-1232632040 stable-tracking-hist top was MERGE-via-mokopatches-tracking-fix-stray-endmenu-patch-1232632040 / 90463bfd2d5a3c8b52f6e6d71024a00e052b0ced ... parent commitmessage: From: merge <null@invalid> MERGE-via-mokopatches-tracking-hist-fix-stray-endmenu-patch mokopatches-tracking-hist top was fix-stray-endmenu-patch / 3630e0be570de8057e7f8d2fe501ed353cdf34e6 ... parent commitmessage: From: Andy Green <andy@openmoko.com> fix-stray-endmenu.patch Signed-off-by: Andy Green <andy@openmoko.com>
Diffstat (limited to 'net/can')
-rw-r--r--net/can/af_can.c83
-rw-r--r--net/can/bcm.c208
-rw-r--r--net/can/raw.c37
3 files changed, 206 insertions, 122 deletions
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 7d4d2b3c137..fa417ca6cbe 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -319,23 +319,52 @@ static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
return n ? d : NULL;
}
+/**
+ * find_rcv_list - determine optimal filterlist inside device filter struct
+ * @can_id: pointer to CAN identifier of a given can_filter
+ * @mask: pointer to CAN mask of a given can_filter
+ * @d: pointer to the device filter struct
+ *
+ * Description:
+ * Returns the optimal filterlist to reduce the filter handling in the
+ * receive path. This function is called by service functions that need
+ * to register or unregister a can_filter in the filter lists.
+ *
+ * A filter matches in general, when
+ *
+ * <received_can_id> & mask == can_id & mask
+ *
+ * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
+ * relevant bits for the filter.
+ *
+ * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
+ * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
+ * there is a special filterlist and a special rx path filter handling.
+ *
+ * Return:
+ * Pointer to optimal filterlist for the given can_id/mask pair.
+ * Constistency checked mask.
+ * Reduced can_id to have a preprocessed filter compare value.
+ */
static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
struct dev_rcv_lists *d)
{
canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
- /* filter error frames */
+ /* filter for error frames in extra filterlist */
if (*mask & CAN_ERR_FLAG) {
- /* clear CAN_ERR_FLAG in list entry */
+ /* clear CAN_ERR_FLAG in filter entry */
*mask &= CAN_ERR_MASK;
return &d->rx[RX_ERR];
}
- /* ensure valid values in can_mask */
- if (*mask & CAN_EFF_FLAG)
- *mask &= (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG);
- else
- *mask &= (CAN_SFF_MASK | CAN_RTR_FLAG);
+ /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
+
+#define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
+
+ /* ensure valid values in can_mask for 'SFF only' frame filtering */
+ if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
+ *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
/* reduce condition testing at receive time */
*can_id &= *mask;
@@ -348,15 +377,19 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
if (!(*mask))
return &d->rx[RX_ALL];
- /* use extra filterset for the subscription of exactly *ONE* can_id */
- if (*can_id & CAN_EFF_FLAG) {
- if (*mask == (CAN_EFF_MASK | CAN_EFF_FLAG)) {
- /* RFC: a use-case for hash-tables in the future? */
- return &d->rx[RX_EFF];
+ /* extra filterlists for the subscription of a single non-RTR can_id */
+ if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS)
+ && !(*can_id & CAN_RTR_FLAG)) {
+
+ if (*can_id & CAN_EFF_FLAG) {
+ if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
+ /* RFC: a future use-case for hash-tables? */
+ return &d->rx[RX_EFF];
+ }
+ } else {
+ if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
+ return &d->rx_sff[*can_id];
}
- } else {
- if (*mask == CAN_SFF_MASK)
- return &d->rx_sff[*can_id];
}
/* default: filter via can_id/can_mask */
@@ -381,6 +414,12 @@ static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
* The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
* filter for error frames (CAN_ERR_FLAG bit set in mask).
*
+ * The provided pointer to the sk_buff is guaranteed to be valid as long as
+ * the callback function is running. The callback function must *not* free
+ * the given sk_buff while processing it's task. When the given sk_buff is
+ * needed after the end of the callback function it must be cloned inside
+ * the callback function with skb_clone().
+ *
* Return:
* 0 on success
* -ENOMEM on missing cache mem to create subscription entry
@@ -536,13 +575,8 @@ EXPORT_SYMBOL(can_rx_unregister);
static inline void deliver(struct sk_buff *skb, struct receiver *r)
{
- struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
-
- if (clone) {
- clone->sk = skb->sk;
- r->func(clone, r->data);
- r->matches++;
- }
+ r->func(skb, r->data);
+ r->matches++;
}
static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
@@ -589,7 +623,10 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
}
}
- /* check CAN_ID specific entries */
+ /* check filterlists for single non-RTR can_ids */
+ if (can_id & CAN_RTR_FLAG)
+ return matches;
+
if (can_id & CAN_EFF_FLAG) {
hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
if (r->can_id == can_id) {
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d0dd382001e..b7c7d465113 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -64,12 +64,13 @@
#define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
/* get best masking value for can_rx_register() for a given single can_id */
-#define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \
- (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK))
+#define REGMASK(id) ((id & CAN_EFF_FLAG) ? \
+ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
+ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-#define CAN_BCM_VERSION "20080415"
+#define CAN_BCM_VERSION CAN_VERSION
static __initdata const char banner[] = KERN_INFO
- "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
+ "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n";
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
MODULE_LICENSE("Dual BSD/GPL");
@@ -89,6 +90,7 @@ struct bcm_op {
unsigned long frames_abs, frames_filtered;
struct timeval ival1, ival2;
struct hrtimer timer, thrtimer;
+ struct tasklet_struct tsklet, thrtsklet;
ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
int rx_ifindex;
int count;
@@ -340,19 +342,15 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
}
}
-/*
- * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
- */
-static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+static void bcm_tx_timeout_tsklet(unsigned long data)
{
- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
- enum hrtimer_restart ret = HRTIMER_NORESTART;
+ struct bcm_op *op = (struct bcm_op *)data;
+ struct bcm_msg_head msg_head;
if (op->kt_ival1.tv64 && (op->count > 0)) {
op->count--;
if (!op->count && (op->flags & TX_COUNTEVT)) {
- struct bcm_msg_head msg_head;
/* create notification to user */
msg_head.opcode = TX_EXPIRED;
@@ -371,20 +369,32 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
/* send (next) frame */
bcm_can_tx(op);
- hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
- ret = HRTIMER_RESTART;
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival1),
+ HRTIMER_MODE_ABS);
} else {
if (op->kt_ival2.tv64) {
/* send (next) frame */
bcm_can_tx(op);
- hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
- ret = HRTIMER_RESTART;
+ hrtimer_start(&op->timer,
+ ktime_add(ktime_get(), op->kt_ival2),
+ HRTIMER_MODE_ABS);
}
}
+}
- return ret;
+/*
+ * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
+ */
+static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
+{
+ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+
+ tasklet_schedule(&op->tsklet);
+
+ return HRTIMER_NORESTART;
}
/*
@@ -401,6 +411,9 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
if (op->frames_filtered > ULONG_MAX/100)
op->frames_filtered = op->frames_abs = 0;
+ /* this element is not throttled anymore */
+ data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV);
+
head.opcode = RX_CHANGED;
head.flags = op->flags;
head.count = op->count;
@@ -419,37 +432,32 @@ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
*/
static void bcm_rx_update_and_send(struct bcm_op *op,
struct can_frame *lastdata,
- struct can_frame *rxdata)
+ const struct can_frame *rxdata)
{
memcpy(lastdata, rxdata, CFSIZ);
- /* mark as used */
- lastdata->can_dlc |= RX_RECV;
+ /* mark as used and throttled by default */
+ lastdata->can_dlc |= (RX_RECV|RX_THR);
- /* throtteling mode inactive OR data update already on the run ? */
- if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) {
+ /* throtteling mode inactive ? */
+ if (!op->kt_ival2.tv64) {
/* send RX_CHANGED to the user immediately */
- bcm_rx_changed(op, rxdata);
+ bcm_rx_changed(op, lastdata);
return;
}
- if (hrtimer_active(&op->thrtimer)) {
- /* mark as 'throttled' */
- lastdata->can_dlc |= RX_THR;
+ /* with active throttling timer we are just done here */
+ if (hrtimer_active(&op->thrtimer))
return;
- }
- if (!op->kt_lastmsg.tv64) {
- /* send first RX_CHANGED to the user immediately */
- bcm_rx_changed(op, rxdata);
- op->kt_lastmsg = ktime_get();
- return;
- }
+ /* first receiption with enabled throttling mode */
+ if (!op->kt_lastmsg.tv64)
+ goto rx_changed_settime;
+ /* got a second frame inside a potential throttle period? */
if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
ktime_to_us(op->kt_ival2)) {
- /* mark as 'throttled' and start timer */
- lastdata->can_dlc |= RX_THR;
+ /* do not send the saved data - only start throttle timer */
hrtimer_start(&op->thrtimer,
ktime_add(op->kt_lastmsg, op->kt_ival2),
HRTIMER_MODE_ABS);
@@ -457,7 +465,8 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
}
/* the gap was that big, that throttling was not needed here */
- bcm_rx_changed(op, rxdata);
+rx_changed_settime:
+ bcm_rx_changed(op, lastdata);
op->kt_lastmsg = ktime_get();
}
@@ -466,7 +475,7 @@ static void bcm_rx_update_and_send(struct bcm_op *op,
* received data stored in op->last_frames[]
*/
static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
- struct can_frame *rxdata)
+ const struct can_frame *rxdata)
{
/*
* no one uses the MSBs of can_dlc for comparation,
@@ -510,14 +519,12 @@ static void bcm_rx_starttimer(struct bcm_op *op)
hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
}
-/*
- * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
- */
-static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+static void bcm_rx_timeout_tsklet(unsigned long data)
{
- struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+ struct bcm_op *op = (struct bcm_op *)data;
struct bcm_msg_head msg_head;
+ /* create notification to user */
msg_head.opcode = RX_TIMEOUT;
msg_head.flags = op->flags;
msg_head.count = op->count;
@@ -527,6 +534,17 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
msg_head.nframes = 0;
bcm_send_to_user(op, &msg_head, NULL, 0);
+}
+
+/*
+ * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
+ */
+static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
+{
+ struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
+
+ /* schedule before NET_RX_SOFTIRQ */
+ tasklet_hi_schedule(&op->tsklet);
/* no restart of the timer is done here! */
@@ -540,9 +558,25 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
}
/*
+ * bcm_rx_do_flush - helper for bcm_rx_thr_flush
+ */
+static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index)
+{
+ if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) {
+ if (update)
+ bcm_rx_changed(op, &op->last_frames[index]);
+ return 1;
+ }
+ return 0;
+}
+
+/*
* bcm_rx_thr_flush - Check for throttled data and send it to the userspace
+ *
+ * update == 0 : just check if throttled data is available (any irq context)
+ * update == 1 : check and send throttled data to userspace (soft_irq context)
*/
-static int bcm_rx_thr_flush(struct bcm_op *op)
+static int bcm_rx_thr_flush(struct bcm_op *op, int update)
{
int updated = 0;
@@ -550,27 +584,25 @@ static int bcm_rx_thr_flush(struct bcm_op *op)
int i;
/* for MUX filter we start at index 1 */
- for (i = 1; i < op->nframes; i++) {
- if ((op->last_frames) &&
- (op->last_frames[i].can_dlc & RX_THR)) {
- op->last_frames[i].can_dlc &= ~RX_THR;
- bcm_rx_changed(op, &op->last_frames[i]);
- updated++;
- }
- }
+ for (i = 1; i < op->nframes; i++)
+ updated += bcm_rx_do_flush(op, update, i);
} else {
/* for RX_FILTER_ID and simple filter */
- if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) {
- op->last_frames[0].can_dlc &= ~RX_THR;
- bcm_rx_changed(op, &op->last_frames[0]);
- updated++;
- }
+ updated += bcm_rx_do_flush(op, update, 0);
}
return updated;
}
+static void bcm_rx_thr_tsklet(unsigned long data)
+{
+ struct bcm_op *op = (struct bcm_op *)data;
+
+ /* push the changed data to the userspace */
+ bcm_rx_thr_flush(op, 1);
+}
+
/*
* bcm_rx_thr_handler - the time for blocked content updates is over now:
* Check for throttled data and send it to the userspace
@@ -579,7 +611,9 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
{
struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
- if (bcm_rx_thr_flush(op)) {
+ tasklet_schedule(&op->thrtsklet);
+
+ if (bcm_rx_thr_flush(op, 0)) {
hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
return HRTIMER_RESTART;
} else {
@@ -595,29 +629,21 @@ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
static void bcm_rx_handler(struct sk_buff *skb, void *data)
{
struct bcm_op *op = (struct bcm_op *)data;
- struct can_frame rxframe;
+ const struct can_frame *rxframe = (struct can_frame *)skb->data;
int i;
/* disable timeout */
hrtimer_cancel(&op->timer);
- if (skb->len == sizeof(rxframe)) {
- memcpy(&rxframe, skb->data, sizeof(rxframe));
- /* save rx timestamp */
- op->rx_stamp = skb->tstamp;
- /* save originator for recvfrom() */
- op->rx_ifindex = skb->dev->ifindex;
- /* update statistics */
- op->frames_abs++;
- kfree_skb(skb);
-
- } else {
- kfree_skb(skb);
+ if (op->can_id != rxframe->can_id)
return;
- }
- if (op->can_id != rxframe.can_id)
- return;
+ /* save rx timestamp */
+ op->rx_stamp = skb->tstamp;
+ /* save originator for recvfrom() */
+ op->rx_ifindex = skb->dev->ifindex;
+ /* update statistics */
+ op->frames_abs++;
if (op->flags & RX_RTR_FRAME) {
/* send reply for RTR-request (placed in op->frames[0]) */
@@ -627,16 +653,14 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
if (op->flags & RX_FILTER_ID) {
/* the easiest case */
- bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe);
- bcm_rx_starttimer(op);
- return;
+ bcm_rx_update_and_send(op, &op->last_frames[0], rxframe);
+ goto rx_starttimer;
}
if (op->nframes == 1) {
/* simple compare with index 0 */
- bcm_rx_cmp_to_index(op, 0, &rxframe);
- bcm_rx_starttimer(op);
- return;
+ bcm_rx_cmp_to_index(op, 0, rxframe);
+ goto rx_starttimer;
}
if (op->nframes > 1) {
@@ -648,15 +672,17 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data)
*/
for (i = 1; i < op->nframes; i++) {
- if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) ==
+ if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) ==
(GET_U64(&op->frames[0]) &
GET_U64(&op->frames[i]))) {
- bcm_rx_cmp_to_index(op, i, &rxframe);
+ bcm_rx_cmp_to_index(op, i, rxframe);
break;
}
}
- bcm_rx_starttimer(op);
}
+
+rx_starttimer:
+ bcm_rx_starttimer(op);
}
/*
@@ -680,6 +706,12 @@ static void bcm_remove_op(struct bcm_op *op)
hrtimer_cancel(&op->timer);
hrtimer_cancel(&op->thrtimer);
+ if (op->tsklet.func)
+ tasklet_kill(&op->tsklet);
+
+ if (op->thrtsklet.func)
+ tasklet_kill(&op->thrtsklet);
+
if ((op->frames) && (op->frames != &op->sframe))
kfree(op->frames);
@@ -890,6 +922,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->timer.function = bcm_tx_timeout_handler;
+ /* initialize tasklet for tx countevent notification */
+ tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet,
+ (unsigned long) op);
+
/* currently unused in tx_ops */
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1053,9 +1089,17 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->timer.function = bcm_rx_timeout_handler;
+ /* initialize tasklet for rx timeout notification */
+ tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet,
+ (unsigned long) op);
+
hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
op->thrtimer.function = bcm_rx_thr_handler;
+ /* initialize tasklet for rx throttle handling */
+ tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet,
+ (unsigned long) op);
+
/* add this bcm_op to the list of the rx_ops */
list_add(&op->list, &bo->rx_ops);
@@ -1101,7 +1145,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
*/
op->kt_lastmsg = ktime_set(0, 0);
hrtimer_cancel(&op->thrtimer);
- bcm_rx_thr_flush(op);
+ bcm_rx_thr_flush(op, 1);
}
if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
diff --git a/net/can/raw.c b/net/can/raw.c
index 6e0663faaf9..0703cba4bf9 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -99,13 +99,14 @@ static void raw_rcv(struct sk_buff *skb, void *data)
struct raw_sock *ro = raw_sk(sk);
struct sockaddr_can *addr;
- if (!ro->recv_own_msgs) {
- /* check the received tx sock reference */
- if (skb->sk == sk) {
- kfree_skb(skb);
- return;
- }
- }
+ /* check the received tx sock reference */
+ if (!ro->recv_own_msgs && skb->sk == sk)
+ return;
+
+ /* clone the given skb to be able to enqueue it into the rcv queue */
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ return;
/*
* Put the datagram to the queue so that raw_recvmsg() can
@@ -641,17 +642,12 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT,
&err);
- if (!skb) {
- dev_put(dev);
- return err;
- }
+ if (!skb)
+ goto put_dev;
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
- if (err < 0) {
- kfree_skb(skb);
- dev_put(dev);
- return err;
- }
+ if (err < 0)
+ goto free_skb;
skb->dev = dev;
skb->sk = sk;
@@ -660,9 +656,16 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
dev_put(dev);
if (err)
- return err;
+ goto send_failed;
return size;
+
+free_skb:
+ kfree_skb(skb);
+put_dev:
+ dev_put(dev);
+send_failed:
+ return err;
}
static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,