From 52bad64d95bd89e08c49ec5a071fa6dcbe5a1a9c Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Nov 2006 14:54:01 +0000 Subject: WorkStruct: Separate delayable and non-delayable events. Separate delayable work items from non-delayable work items be splitting them into a separate structure (delayed_work), which incorporates a work_struct and the timer_list removed from work_struct. The work_struct struct is huge, and this limits it's usefulness. On a 64-bit architecture it's nearly 100 bytes in size. This reduces that by half for the non-delayable type of event. Signed-Off-By: David Howells --- net/core/link_watch.c | 9 ++++----- net/sunrpc/cache.c | 4 ++-- net/sunrpc/rpc_pipe.c | 3 ++- net/sunrpc/xprtsock.c | 6 +++--- 4 files changed, 11 insertions(+), 11 deletions(-) (limited to 'net') diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4b36114744c..f2ed09e25df 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -35,7 +35,7 @@ static unsigned long linkwatch_flags; static unsigned long linkwatch_nextevent; static void linkwatch_event(void *dummy); -static DECLARE_WORK(linkwatch_work, linkwatch_event, NULL); +static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL); static LIST_HEAD(lweventlist); static DEFINE_SPINLOCK(lweventlist_lock); @@ -171,10 +171,9 @@ void linkwatch_fire_event(struct net_device *dev) unsigned long delay = linkwatch_nextevent - jiffies; /* If we wrap around we'll delay it by at most HZ. */ - if (!delay || delay > HZ) - schedule_work(&linkwatch_work); - else - schedule_delayed_work(&linkwatch_work, delay); + if (delay > HZ) + delay = 0; + schedule_delayed_work(&linkwatch_work, delay); } } } diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 00cb388ece0..d5725cb1491 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -285,7 +285,7 @@ static struct file_operations content_file_operations; static struct file_operations cache_flush_operations; static void do_cache_clean(void *data); -static DECLARE_WORK(cache_cleaner, do_cache_clean, NULL); +static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL); void cache_register(struct cache_detail *cd) { @@ -337,7 +337,7 @@ void cache_register(struct cache_detail *cd) spin_unlock(&cache_list_lock); /* start the cleaning process */ - schedule_work(&cache_cleaner); + schedule_delayed_work(&cache_cleaner, 0); } int cache_unregister(struct cache_detail *cd) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 9a0b41a97f9..97be3f7fed4 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -837,7 +837,8 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) INIT_LIST_HEAD(&rpci->pipe); rpci->pipelen = 0; init_waitqueue_head(&rpci->waitq); - INIT_WORK(&rpci->queue_timeout, rpc_timeout_upcall_queue, rpci); + INIT_DELAYED_WORK(&rpci->queue_timeout, + rpc_timeout_upcall_queue, rpci); rpci->ops = NULL; } } diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 757fc91ef25..3c7532cd009 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1262,7 +1262,7 @@ static void xs_connect(struct rpc_task *task) xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; } else { dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); - schedule_work(&xprt->connect_worker); + schedule_delayed_work(&xprt->connect_worker, 0); /* flush_scheduled_work can sleep... */ if (!RPC_IS_ASYNC(task)) @@ -1375,7 +1375,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); + INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_UDP_CONN_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO; @@ -1420,7 +1420,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; - INIT_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); + INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_TCP_CONN_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; -- cgit v1.2.3 From 65f27f38446e1976cc98fd3004b110fedcddd189 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Nov 2006 14:55:48 +0000 Subject: WorkStruct: Pass the work_struct pointer instead of context data Pass the work_struct pointer to the work function rather than context data. The work function can use container_of() to work out the data. For the cases where the container of the work_struct may go away the moment the pending bit is cleared, it is made possible to defer the release of the structure by deferring the clearing of the pending bit. To make this work, an extra flag is introduced into the management side of the work_struct. This governs auto-release of the structure upon execution. Ordinarily, the work queue executor would release the work_struct for further scheduling or deallocation by clearing the pending bit prior to jumping to the work function. This means that, unless the driver makes some guarantee itself that the work_struct won't go away, the work function may not access anything else in the work_struct or its container lest they be deallocated.. This is a problem if the auxiliary data is taken away (as done by the last patch). However, if the pending bit is *not* cleared before jumping to the work function, then the work function *may* access the work_struct and its container with no problems. But then the work function must itself release the work_struct by calling work_release(). In most cases, automatic release is fine, so this is the default. Special initiators exist for the non-auto-release case (ending in _NAR). Signed-Off-By: David Howells --- net/core/link_watch.c | 6 +++--- net/ipv4/inet_timewait_sock.c | 5 +++-- net/ipv4/tcp_minisocks.c | 3 +-- net/sunrpc/cache.c | 6 +++--- net/sunrpc/rpc_pipe.c | 7 ++++--- net/sunrpc/sched.c | 8 ++++---- net/sunrpc/xprt.c | 7 ++++--- net/sunrpc/xprtsock.c | 18 ++++++++++-------- 8 files changed, 32 insertions(+), 28 deletions(-) (limited to 'net') diff --git a/net/core/link_watch.c b/net/core/link_watch.c index f2ed09e25df..549a2ce951b 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -34,8 +34,8 @@ enum lw_bits { static unsigned long linkwatch_flags; static unsigned long linkwatch_nextevent; -static void linkwatch_event(void *dummy); -static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event, NULL); +static void linkwatch_event(struct work_struct *dummy); +static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); static LIST_HEAD(lweventlist); static DEFINE_SPINLOCK(lweventlist_lock); @@ -127,7 +127,7 @@ void linkwatch_run_queue(void) } -static void linkwatch_event(void *dummy) +static void linkwatch_event(struct work_struct *dummy) { /* Limit the number of linkwatch events to one * per second so that a runaway driver does not diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index cdd805344c6..8c74f9168b7 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -197,9 +197,10 @@ EXPORT_SYMBOL_GPL(inet_twdr_hangman); extern void twkill_slots_invalid(void); -void inet_twdr_twkill_work(void *data) +void inet_twdr_twkill_work(struct work_struct *work) { - struct inet_timewait_death_row *twdr = data; + struct inet_timewait_death_row *twdr = + container_of(work, struct inet_timewait_death_row, twkill_work); int i; if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8)) diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 0163d982690..af7b2c986b1 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -45,8 +45,7 @@ struct inet_timewait_death_row tcp_death_row = { .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&tcp_death_row), .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, - inet_twdr_twkill_work, - &tcp_death_row), + inet_twdr_twkill_work), /* Short-time timewait calendar */ .twcal_hand = -1, diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index d5725cb1491..d96fd466a9a 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -284,8 +284,8 @@ static struct file_operations cache_file_operations; static struct file_operations content_file_operations; static struct file_operations cache_flush_operations; -static void do_cache_clean(void *data); -static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean, NULL); +static void do_cache_clean(struct work_struct *work); +static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); void cache_register(struct cache_detail *cd) { @@ -461,7 +461,7 @@ static int cache_clean(void) /* * We want to regularly clean the cache, so we need to schedule some work ... */ -static void do_cache_clean(void *data) +static void do_cache_clean(struct work_struct *work) { int delay = 5; if (cache_clean() == -1) diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 97be3f7fed4..49dba5febbb 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -54,10 +54,11 @@ static void rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, } static void -rpc_timeout_upcall_queue(void *data) +rpc_timeout_upcall_queue(struct work_struct *work) { LIST_HEAD(free_list); - struct rpc_inode *rpci = (struct rpc_inode *)data; + struct rpc_inode *rpci = + container_of(work, struct rpc_inode, queue_timeout.work); struct inode *inode = &rpci->vfs_inode; void (*destroy_msg)(struct rpc_pipe_msg *); @@ -838,7 +839,7 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags) rpci->pipelen = 0; init_waitqueue_head(&rpci->waitq); INIT_DELAYED_WORK(&rpci->queue_timeout, - rpc_timeout_upcall_queue, rpci); + rpc_timeout_upcall_queue); rpci->ops = NULL; } } diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index a1ab4eed41f..eff44bcdc95 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -41,7 +41,7 @@ static mempool_t *rpc_buffer_mempool __read_mostly; static void __rpc_default_timer(struct rpc_task *task); static void rpciod_killall(void); -static void rpc_async_schedule(void *); +static void rpc_async_schedule(struct work_struct *); /* * RPC tasks sit here while waiting for conditions to improve. @@ -305,7 +305,7 @@ static void rpc_make_runnable(struct rpc_task *task) if (RPC_IS_ASYNC(task)) { int status; - INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task); + INIT_WORK(&task->u.tk_work, rpc_async_schedule); status = queue_work(task->tk_workqueue, &task->u.tk_work); if (status < 0) { printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); @@ -695,9 +695,9 @@ rpc_execute(struct rpc_task *task) return __rpc_execute(task); } -static void rpc_async_schedule(void *arg) +static void rpc_async_schedule(struct work_struct *work) { - __rpc_execute((struct rpc_task *)arg); + __rpc_execute(container_of(work, struct rpc_task, u.tk_work)); } /** diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 80857470dc1..4f9a5d9791f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -479,9 +479,10 @@ int xprt_adjust_timeout(struct rpc_rqst *req) return status; } -static void xprt_autoclose(void *args) +static void xprt_autoclose(struct work_struct *work) { - struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct rpc_xprt *xprt = + container_of(work, struct rpc_xprt, task_cleanup); xprt_disconnect(xprt); xprt->ops->close(xprt); @@ -932,7 +933,7 @@ struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t si INIT_LIST_HEAD(&xprt->free); INIT_LIST_HEAD(&xprt->recv); - INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt); + INIT_WORK(&xprt->task_cleanup, xprt_autoclose); init_timer(&xprt->timer); xprt->timer.function = xprt_init_autodisconnect; xprt->timer.data = (unsigned long) xprt; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 3c7532cd009..cfe3c15be94 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1060,13 +1060,14 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) /** * xs_udp_connect_worker - set up a UDP socket - * @args: RPC transport to connect + * @work: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_udp_connect_worker(void *args) +static void xs_udp_connect_worker(struct work_struct *work) { - struct rpc_xprt *xprt = (struct rpc_xprt *) args; + struct rpc_xprt *xprt = + container_of(work, struct rpc_xprt, connect_worker.work); struct socket *sock = xprt->sock; int err, status = -EIO; @@ -1144,13 +1145,14 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) /** * xs_tcp_connect_worker - connect a TCP socket to a remote endpoint - * @args: RPC transport to connect + * @work: RPC transport to connect * * Invoked by a work queue tasklet. */ -static void xs_tcp_connect_worker(void *args) +static void xs_tcp_connect_worker(struct work_struct *work) { - struct rpc_xprt *xprt = (struct rpc_xprt *)args; + struct rpc_xprt *xprt = + container_of(work, struct rpc_xprt, connect_worker.work); struct socket *sock = xprt->sock; int err, status = -EIO; @@ -1375,7 +1377,7 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) /* XXX: header size can vary due to auth type, IPv6, etc. */ xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); - INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker, xprt); + INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_UDP_CONN_TO; xprt->reestablish_timeout = XS_UDP_REEST_TO; @@ -1420,7 +1422,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; - INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker, xprt); + INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); xprt->bind_timeout = XS_BIND_TO; xprt->connect_timeout = XS_TCP_CONN_TO; xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; -- cgit v1.2.3 From c4028958b6ecad064b1a6303a6a5906d4fe48d73 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 22 Nov 2006 14:57:56 +0000 Subject: WorkStruct: make allyesconfig Fix up for make allyesconfig. Signed-Off-By: David Howells --- net/atm/lec.c | 9 +++++---- net/atm/lec.h | 2 +- net/bluetooth/hci_sysfs.c | 12 ++++++------ net/bridge/br_if.c | 10 +++++++--- net/bridge/br_private.h | 2 +- net/core/netpoll.c | 4 ++-- net/dccp/minisocks.c | 3 +-- net/ieee80211/softmac/ieee80211softmac_assoc.c | 18 +++++++++++------- net/ieee80211/softmac/ieee80211softmac_auth.c | 23 +++++++++++++---------- net/ieee80211/softmac/ieee80211softmac_event.c | 12 +++++++----- net/ieee80211/softmac/ieee80211softmac_module.c | 4 ++-- net/ieee80211/softmac/ieee80211softmac_priv.h | 13 +++++++------ net/ieee80211/softmac/ieee80211softmac_scan.c | 13 ++++++++----- net/ieee80211/softmac/ieee80211softmac_wx.c | 6 +++--- net/ipv4/ipvs/ip_vs_ctl.c | 6 +++--- net/irda/ircomm/ircomm_tty.c | 11 ++++++----- net/sctp/associola.c | 11 ++++++----- net/sctp/endpointola.c | 10 ++++++---- net/sctp/inqueue.c | 9 ++++----- net/xfrm/xfrm_policy.c | 8 ++++---- net/xfrm/xfrm_state.c | 8 ++++---- 21 files changed, 107 insertions(+), 87 deletions(-) (limited to 'net') diff --git a/net/atm/lec.c b/net/atm/lec.c index 66c57c1091a..e801fff69dc 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1458,7 +1458,7 @@ static void lane2_associate_ind(struct net_device *dev, u8 *mac_addr, #define LEC_ARP_REFRESH_INTERVAL (3*HZ) -static void lec_arp_check_expire(void *data); +static void lec_arp_check_expire(struct work_struct *work); static void lec_arp_expire_arp(unsigned long data); /* @@ -1481,7 +1481,7 @@ static void lec_arp_init(struct lec_priv *priv) INIT_HLIST_HEAD(&priv->lec_no_forward); INIT_HLIST_HEAD(&priv->mcast_fwds); spin_lock_init(&priv->lec_arp_lock); - INIT_WORK(&priv->lec_arp_work, lec_arp_check_expire, priv); + INIT_DELAYED_WORK(&priv->lec_arp_work, lec_arp_check_expire); schedule_delayed_work(&priv->lec_arp_work, LEC_ARP_REFRESH_INTERVAL); } @@ -1879,10 +1879,11 @@ static void lec_arp_expire_vcc(unsigned long data) * to ESI_FORWARD_DIRECT. This causes the flush period to end * regardless of the progress of the flush protocol. */ -static void lec_arp_check_expire(void *data) +static void lec_arp_check_expire(struct work_struct *work) { unsigned long flags; - struct lec_priv *priv = data; + struct lec_priv *priv = + container_of(work, struct lec_priv, lec_arp_work.work); struct hlist_node *node, *next; struct lec_arp_table *entry; unsigned long now; diff --git a/net/atm/lec.h b/net/atm/lec.h index 877f5093969..984e8e6e083 100644 --- a/net/atm/lec.h +++ b/net/atm/lec.h @@ -92,7 +92,7 @@ struct lec_priv { spinlock_t lec_arp_lock; struct atm_vcc *mcast_vcc; /* Default Multicast Send VCC */ struct atm_vcc *lecd; - struct work_struct lec_arp_work; /* C10 */ + struct delayed_work lec_arp_work; /* C10 */ unsigned int maximum_unknown_frame_count; /* * Within the period of time defined by this variable, the client will send diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 3eeeb7a86e7..d4c935692cc 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -237,9 +237,9 @@ static void bt_release(struct device *dev) kfree(data); } -static void add_conn(void *data) +static void add_conn(struct work_struct *work) { - struct hci_conn *conn = data; + struct hci_conn *conn = container_of(work, struct hci_conn, work); int i; if (device_register(&conn->dev) < 0) { @@ -272,14 +272,14 @@ void hci_conn_add_sysfs(struct hci_conn *conn) dev_set_drvdata(&conn->dev, conn); - INIT_WORK(&conn->work, add_conn, (void *) conn); + INIT_WORK(&conn->work, add_conn); schedule_work(&conn->work); } -static void del_conn(void *data) +static void del_conn(struct work_struct *work) { - struct hci_conn *conn = data; + struct hci_conn *conn = container_of(work, struct hci_conn, work); device_del(&conn->dev); } @@ -287,7 +287,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn) { BT_DBG("conn %p", conn); - INIT_WORK(&conn->work, del_conn, (void *) conn); + INIT_WORK(&conn->work, del_conn); schedule_work(&conn->work); } diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index f753c40c11d..55bb2634c08 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -77,12 +77,16 @@ static int port_cost(struct net_device *dev) * Called from work queue to allow for calling functions that * might sleep (such as speed check), and to debounce. */ -static void port_carrier_check(void *arg) +static void port_carrier_check(struct work_struct *work) { - struct net_device *dev = arg; struct net_bridge_port *p; + struct net_device *dev; struct net_bridge *br; + dev = container_of(work, struct net_bridge_port, + carrier_check.work)->dev; + work_release(work); + rtnl_lock(); p = dev->br_port; if (!p) @@ -276,7 +280,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br, p->port_no = index; br_init_port(p); p->state = BR_STATE_DISABLED; - INIT_WORK(&p->carrier_check, port_carrier_check, dev); + INIT_DELAYED_WORK_NAR(&p->carrier_check, port_carrier_check); br_stp_port_timer_init(p); kobject_init(&p->kobj); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 74258d86f25..3a534e94c7f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -82,7 +82,7 @@ struct net_bridge_port struct timer_list hold_timer; struct timer_list message_age_timer; struct kobject kobj; - struct work_struct carrier_check; + struct delayed_work carrier_check; struct rcu_head rcu; }; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6589adb14cb..63f24c914dd 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -56,7 +56,7 @@ static atomic_t trapped; static void zap_completion_queue(void); static void arp_reply(struct sk_buff *skb); -static void queue_process(void *p) +static void queue_process(struct work_struct *work) { unsigned long flags; struct sk_buff *skb; @@ -77,7 +77,7 @@ static void queue_process(void *p) } } -static DECLARE_WORK(send_queue, queue_process, NULL); +static DECLARE_WORK(send_queue, queue_process); void netpoll_queue(struct sk_buff *skb) { diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 9045438d6b3..36db5be2a9e 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -31,8 +31,7 @@ struct inet_timewait_death_row dccp_death_row = { .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, (unsigned long)&dccp_death_row), .twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work, - inet_twdr_twkill_work, - &dccp_death_row), + inet_twdr_twkill_work), /* Short-time timewait calendar */ .twcal_hand = -1, diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c index cf51c87a971..08386c10295 100644 --- a/net/ieee80211/softmac/ieee80211softmac_assoc.c +++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c @@ -58,9 +58,11 @@ ieee80211softmac_assoc(struct ieee80211softmac_device *mac, struct ieee80211soft } void -ieee80211softmac_assoc_timeout(void *d) +ieee80211softmac_assoc_timeout(struct work_struct *work) { - struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; + struct ieee80211softmac_device *mac = + container_of(work, struct ieee80211softmac_device, + associnfo.timeout.work); struct ieee80211softmac_network *n; mutex_lock(&mac->associnfo.mutex); @@ -186,9 +188,11 @@ ieee80211softmac_assoc_notify_auth(struct net_device *dev, int event_type, void /* This function is called to handle userspace requests (asynchronously) */ void -ieee80211softmac_assoc_work(void *d) +ieee80211softmac_assoc_work(struct work_struct *work) { - struct ieee80211softmac_device *mac = (struct ieee80211softmac_device *)d; + struct ieee80211softmac_device *mac = + container_of(work, struct ieee80211softmac_device, + associnfo.work.work); struct ieee80211softmac_network *found = NULL; struct ieee80211_network *net = NULL, *best = NULL; int bssvalid; @@ -412,7 +416,7 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev, network->authenticated = 0; /* we don't want to do this more than once ... */ network->auth_desynced_once = 1; - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); break; } default: @@ -446,7 +450,7 @@ ieee80211softmac_handle_disassoc(struct net_device * dev, ieee80211softmac_disassoc(mac); /* try to reassociate */ - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); return 0; } @@ -466,7 +470,7 @@ ieee80211softmac_handle_reassoc_req(struct net_device * dev, dprintkl(KERN_INFO PFX "reassoc request from unknown network\n"); return 0; } - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_auth.c b/net/ieee80211/softmac/ieee80211softmac_auth.c index 4cef39e171d..2ae1833b657 100644 --- a/net/ieee80211/softmac/ieee80211softmac_auth.c +++ b/net/ieee80211/softmac/ieee80211softmac_auth.c @@ -26,7 +26,7 @@ #include "ieee80211softmac_priv.h" -static void ieee80211softmac_auth_queue(void *data); +static void ieee80211softmac_auth_queue(struct work_struct *work); /* Queues an auth request to the desired AP */ int @@ -54,14 +54,14 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, auth->mac = mac; auth->retry = IEEE80211SOFTMAC_AUTH_RETRY_LIMIT; auth->state = IEEE80211SOFTMAC_AUTH_OPEN_REQUEST; - INIT_WORK(&auth->work, &ieee80211softmac_auth_queue, (void *)auth); + INIT_DELAYED_WORK(&auth->work, ieee80211softmac_auth_queue); /* Lock (for list) */ spin_lock_irqsave(&mac->lock, flags); /* add to list */ list_add_tail(&auth->list, &mac->auth_queue); - schedule_work(&auth->work); + schedule_delayed_work(&auth->work, 0); spin_unlock_irqrestore(&mac->lock, flags); return 0; @@ -70,14 +70,15 @@ ieee80211softmac_auth_req(struct ieee80211softmac_device *mac, /* Sends an auth request to the desired AP and handles timeouts */ static void -ieee80211softmac_auth_queue(void *data) +ieee80211softmac_auth_queue(struct work_struct *work) { struct ieee80211softmac_device *mac; struct ieee80211softmac_auth_queue_item *auth; struct ieee80211softmac_network *net; unsigned long flags; - auth = (struct ieee80211softmac_auth_queue_item *)data; + auth = container_of(work, struct ieee80211softmac_auth_queue_item, + work.work); net = auth->net; mac = auth->mac; @@ -118,9 +119,11 @@ ieee80211softmac_auth_queue(void *data) /* Sends a response to an auth challenge (for shared key auth). */ static void -ieee80211softmac_auth_challenge_response(void *_aq) +ieee80211softmac_auth_challenge_response(struct work_struct *work) { - struct ieee80211softmac_auth_queue_item *aq = _aq; + struct ieee80211softmac_auth_queue_item *aq = + container_of(work, struct ieee80211softmac_auth_queue_item, + work.work); /* Send our response */ ieee80211softmac_send_mgt_frame(aq->mac, aq->net, IEEE80211_STYPE_AUTH, aq->state); @@ -228,8 +231,8 @@ ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *auth) * we have obviously already sent the initial auth * request. */ cancel_delayed_work(&aq->work); - INIT_WORK(&aq->work, &ieee80211softmac_auth_challenge_response, (void *)aq); - schedule_work(&aq->work); + INIT_DELAYED_WORK(&aq->work, &ieee80211softmac_auth_challenge_response); + schedule_delayed_work(&aq->work, 0); spin_unlock_irqrestore(&mac->lock, flags); return 0; case IEEE80211SOFTMAC_AUTH_SHARED_PASS: @@ -392,6 +395,6 @@ ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *de ieee80211softmac_deauth_from_net(mac, net); /* let's try to re-associate */ - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_event.c b/net/ieee80211/softmac/ieee80211softmac_event.c index f34fa2ef666..b9015656cfb 100644 --- a/net/ieee80211/softmac/ieee80211softmac_event.c +++ b/net/ieee80211/softmac/ieee80211softmac_event.c @@ -73,10 +73,12 @@ static char *event_descriptions[IEEE80211SOFTMAC_EVENT_LAST+1] = { static void -ieee80211softmac_notify_callback(void *d) +ieee80211softmac_notify_callback(struct work_struct *work) { - struct ieee80211softmac_event event = *(struct ieee80211softmac_event*) d; - kfree(d); + struct ieee80211softmac_event *pevent = + container_of(work, struct ieee80211softmac_event, work.work); + struct ieee80211softmac_event event = *pevent; + kfree(pevent); event.fun(event.mac->dev, event.event_type, event.context); } @@ -99,7 +101,7 @@ ieee80211softmac_notify_internal(struct ieee80211softmac_device *mac, return -ENOMEM; eventptr->event_type = event; - INIT_WORK(&eventptr->work, ieee80211softmac_notify_callback, eventptr); + INIT_DELAYED_WORK(&eventptr->work, ieee80211softmac_notify_callback); eventptr->fun = fun; eventptr->context = context; eventptr->mac = mac; @@ -170,7 +172,7 @@ ieee80211softmac_call_events_locked(struct ieee80211softmac_device *mac, int eve /* User may have subscribed to ANY event, so * we tell them which event triggered it. */ eventptr->event_type = event; - schedule_work(&eventptr->work); + schedule_delayed_work(&eventptr->work, 0); } } } diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c index 33aff4f4a47..256207b71dc 100644 --- a/net/ieee80211/softmac/ieee80211softmac_module.c +++ b/net/ieee80211/softmac/ieee80211softmac_module.c @@ -58,8 +58,8 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv) INIT_LIST_HEAD(&softmac->events); mutex_init(&softmac->associnfo.mutex); - INIT_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work, softmac); - INIT_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout, softmac); + INIT_DELAYED_WORK(&softmac->associnfo.work, ieee80211softmac_assoc_work); + INIT_DELAYED_WORK(&softmac->associnfo.timeout, ieee80211softmac_assoc_timeout); softmac->start_scan = ieee80211softmac_start_scan_implementation; softmac->wait_for_scan = ieee80211softmac_wait_for_scan_implementation; softmac->stop_scan = ieee80211softmac_stop_scan_implementation; diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h index 0642e090b8a..c0dbe070e54 100644 --- a/net/ieee80211/softmac/ieee80211softmac_priv.h +++ b/net/ieee80211/softmac/ieee80211softmac_priv.h @@ -78,7 +78,7 @@ /* private definitions and prototypes */ /*** prototypes from _scan.c */ -void ieee80211softmac_scan(void *sm); +void ieee80211softmac_scan(struct work_struct *work); /* for internal use if scanning is needed */ int ieee80211softmac_start_scan(struct ieee80211softmac_device *mac); void ieee80211softmac_stop_scan(struct ieee80211softmac_device *mac); @@ -149,7 +149,7 @@ int ieee80211softmac_auth_resp(struct net_device *dev, struct ieee80211_auth *au int ieee80211softmac_deauth_resp(struct net_device *dev, struct ieee80211_deauth *deauth); /*** prototypes from _assoc.c */ -void ieee80211softmac_assoc_work(void *d); +void ieee80211softmac_assoc_work(struct work_struct *work); int ieee80211softmac_handle_assoc_response(struct net_device * dev, struct ieee80211_assoc_response * resp, struct ieee80211_network * network); @@ -157,7 +157,7 @@ int ieee80211softmac_handle_disassoc(struct net_device * dev, struct ieee80211_disassoc * disassoc); int ieee80211softmac_handle_reassoc_req(struct net_device * dev, struct ieee80211_reassoc_request * reassoc); -void ieee80211softmac_assoc_timeout(void *d); +void ieee80211softmac_assoc_timeout(struct work_struct *work); void ieee80211softmac_send_disassoc_req(struct ieee80211softmac_device *mac, u16 reason); void ieee80211softmac_disassoc(struct ieee80211softmac_device *mac); @@ -207,7 +207,7 @@ struct ieee80211softmac_auth_queue_item { struct ieee80211softmac_device *mac; /* SoftMAC device */ u8 retry; /* Retry limit */ u8 state; /* Auth State */ - struct work_struct work; /* Work queue */ + struct delayed_work work; /* Work queue */ }; /* scanning information */ @@ -219,7 +219,8 @@ struct ieee80211softmac_scaninfo { stop:1; u8 skip_flags; struct completion finished; - struct work_struct softmac_scan; + struct delayed_work softmac_scan; + struct ieee80211softmac_device *mac; }; /* private event struct */ @@ -227,7 +228,7 @@ struct ieee80211softmac_event { struct list_head list; int event_type; void *event_context; - struct work_struct work; + struct delayed_work work; notify_function_ptr fun; void *context; struct ieee80211softmac_device *mac; diff --git a/net/ieee80211/softmac/ieee80211softmac_scan.c b/net/ieee80211/softmac/ieee80211softmac_scan.c index d31cf77498c..a8326076581 100644 --- a/net/ieee80211/softmac/ieee80211softmac_scan.c +++ b/net/ieee80211/softmac/ieee80211softmac_scan.c @@ -91,12 +91,14 @@ ieee80211softmac_wait_for_scan(struct ieee80211softmac_device *sm) /* internal scanning implementation follows */ -void ieee80211softmac_scan(void *d) +void ieee80211softmac_scan(struct work_struct *work) { int invalid_channel; u8 current_channel_idx; - struct ieee80211softmac_device *sm = (struct ieee80211softmac_device *)d; - struct ieee80211softmac_scaninfo *si = sm->scaninfo; + struct ieee80211softmac_scaninfo *si = + container_of(work, struct ieee80211softmac_scaninfo, + softmac_scan.work); + struct ieee80211softmac_device *sm = si->mac; unsigned long flags; while (!(si->stop) && (si->current_channel_idx < si->number_channels)) { @@ -146,7 +148,8 @@ static inline struct ieee80211softmac_scaninfo *allocate_scaninfo(struct ieee802 struct ieee80211softmac_scaninfo *info = kmalloc(sizeof(struct ieee80211softmac_scaninfo), GFP_ATOMIC); if (unlikely(!info)) return NULL; - INIT_WORK(&info->softmac_scan, ieee80211softmac_scan, mac); + INIT_DELAYED_WORK(&info->softmac_scan, ieee80211softmac_scan); + info->mac = mac; init_completion(&info->finished); return info; } @@ -189,7 +192,7 @@ int ieee80211softmac_start_scan_implementation(struct net_device *dev) sm->scaninfo->started = 1; sm->scaninfo->stop = 0; INIT_COMPLETION(sm->scaninfo->finished); - schedule_work(&sm->scaninfo->softmac_scan); + schedule_delayed_work(&sm->scaninfo->softmac_scan, 0); spin_unlock_irqrestore(&sm->lock, flags); return 0; } diff --git a/net/ieee80211/softmac/ieee80211softmac_wx.c b/net/ieee80211/softmac/ieee80211softmac_wx.c index 23068a830f7..2ffaebd21c5 100644 --- a/net/ieee80211/softmac/ieee80211softmac_wx.c +++ b/net/ieee80211/softmac/ieee80211softmac_wx.c @@ -122,7 +122,7 @@ ieee80211softmac_wx_set_essid(struct net_device *net_dev, sm->associnfo.associating = 1; /* queue lower level code to do work (if necessary) */ - schedule_work(&sm->associnfo.work); + schedule_delayed_work(&sm->associnfo.work, 0); out: mutex_unlock(&sm->associnfo.mutex); @@ -356,7 +356,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* force reassociation */ mac->associnfo.bssvalid = 0; if (mac->associnfo.associated) - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); } else if (is_zero_ether_addr(data->ap_addr.sa_data)) { /* the bssid we have is no longer fixed */ mac->associnfo.bssfixed = 0; @@ -373,7 +373,7 @@ ieee80211softmac_wx_set_wap(struct net_device *net_dev, /* tell the other code that this bssid should be used no matter what */ mac->associnfo.bssfixed = 1; /* queue associate if new bssid or (old one again and not associated) */ - schedule_work(&mac->associnfo.work); + schedule_delayed_work(&mac->associnfo.work, 0); } out: diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index f261616e460..9b933381ebb 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -221,10 +221,10 @@ static void update_defense_level(void) * Timer for checking the defense */ #define DEFENSE_TIMER_PERIOD 1*HZ -static void defense_work_handler(void *data); -static DECLARE_WORK(defense_work, defense_work_handler, NULL); +static void defense_work_handler(struct work_struct *work); +static DECLARE_DELAYED_WORK(defense_work, defense_work_handler); -static void defense_work_handler(void *data) +static void defense_work_handler(struct work_struct *work) { update_defense_level(); if (atomic_read(&ip_vs_dropentry)) diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index d50a02030ad..262bda808d9 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -61,7 +61,7 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty); static void ircomm_tty_send_xchar(struct tty_struct *tty, char ch); static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout); static void ircomm_tty_hangup(struct tty_struct *tty); -static void ircomm_tty_do_softint(void *private_); +static void ircomm_tty_do_softint(struct work_struct *work); static void ircomm_tty_shutdown(struct ircomm_tty_cb *self); static void ircomm_tty_stop(struct tty_struct *tty); @@ -389,7 +389,7 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp) self->flow = FLOW_STOP; self->line = line; - INIT_WORK(&self->tqueue, ircomm_tty_do_softint, self); + INIT_WORK(&self->tqueue, ircomm_tty_do_softint); self->max_header_size = IRCOMM_TTY_HDR_UNINITIALISED; self->max_data_size = IRCOMM_TTY_DATA_UNINITIALISED; self->close_delay = 5*HZ/10; @@ -594,15 +594,16 @@ static void ircomm_tty_flush_buffer(struct tty_struct *tty) } /* - * Function ircomm_tty_do_softint (private_) + * Function ircomm_tty_do_softint (work) * * We use this routine to give the write wakeup to the user at at a * safe time (as fast as possible after write have completed). This * can be compared to the Tx interrupt. */ -static void ircomm_tty_do_softint(void *private_) +static void ircomm_tty_do_softint(struct work_struct *work) { - struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) private_; + struct ircomm_tty_cb *self = + container_of(work, struct ircomm_tty_cb, tqueue); struct tty_struct *tty; unsigned long flags; struct sk_buff *skb, *ctrl_skb; diff --git a/net/sctp/associola.c b/net/sctp/associola.c index ed0445fe85e..88124696ba6 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -61,7 +61,7 @@ #include /* Forward declarations for internal functions. */ -static void sctp_assoc_bh_rcv(struct sctp_association *asoc); +static void sctp_assoc_bh_rcv(struct work_struct *work); /* 1st Level Abstractions. */ @@ -269,9 +269,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a /* Create an input queue. */ sctp_inq_init(&asoc->base.inqueue); - sctp_inq_set_th_handler(&asoc->base.inqueue, - (void (*)(void *))sctp_assoc_bh_rcv, - asoc); + sctp_inq_set_th_handler(&asoc->base.inqueue, sctp_assoc_bh_rcv); /* Create an output queue. */ sctp_outq_init(asoc, &asoc->outqueue); @@ -944,8 +942,11 @@ out: } /* Do delayed input processing. This is scheduled by sctp_rcv(). */ -static void sctp_assoc_bh_rcv(struct sctp_association *asoc) +static void sctp_assoc_bh_rcv(struct work_struct *work) { + struct sctp_association *asoc = + container_of(work, struct sctp_association, + base.inqueue.immediate); struct sctp_endpoint *ep; struct sctp_chunk *chunk; struct sock *sk; diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index 9b6b394b66f..a2b55372151 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -61,7 +61,7 @@ #include /* Forward declarations for internal helpers. */ -static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); +static void sctp_endpoint_bh_rcv(struct work_struct *work); /* * Initialize the base fields of the endpoint structure. @@ -85,8 +85,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, sctp_inq_init(&ep->base.inqueue); /* Set its top-half handler */ - sctp_inq_set_th_handler(&ep->base.inqueue, - (void (*)(void *))sctp_endpoint_bh_rcv, ep); + sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); @@ -311,8 +310,11 @@ int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, /* Do delayed input processing. This is scheduled by sctp_rcv(). * This may be called on BH or task time. */ -static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) +static void sctp_endpoint_bh_rcv(struct work_struct *work) { + struct sctp_endpoint *ep = + container_of(work, struct sctp_endpoint, + base.inqueue.immediate); struct sctp_association *asoc; struct sock *sk; struct sctp_transport *transport; diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c index cf6deed7e84..71b07466e88 100644 --- a/net/sctp/inqueue.c +++ b/net/sctp/inqueue.c @@ -54,7 +54,7 @@ void sctp_inq_init(struct sctp_inq *queue) queue->in_progress = NULL; /* Create a task for delivering data. */ - INIT_WORK(&queue->immediate, NULL, NULL); + INIT_WORK(&queue->immediate, NULL); queue->malloced = 0; } @@ -97,7 +97,7 @@ void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) * on the BH related data structures. */ list_add_tail(&chunk->list, &q->in_chunk_list); - q->immediate.func(q->immediate.data); + q->immediate.func(&q->immediate); } /* Extract a chunk from an SCTP inqueue. @@ -205,9 +205,8 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) * The intent is that this routine will pull stuff out of the * inqueue and process it. */ -void sctp_inq_set_th_handler(struct sctp_inq *q, - void (*callback)(void *), void *arg) +void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) { - INIT_WORK(&q->immediate, callback, arg); + INIT_WORK(&q->immediate, callback); } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 7736b23c3f0..ba924d40df7 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -358,7 +358,7 @@ static void xfrm_policy_gc_kill(struct xfrm_policy *policy) xfrm_pol_put(policy); } -static void xfrm_policy_gc_task(void *data) +static void xfrm_policy_gc_task(struct work_struct *work) { struct xfrm_policy *policy; struct hlist_node *entry, *tmp; @@ -546,7 +546,7 @@ static inline int xfrm_byidx_should_resize(int total) static DEFINE_MUTEX(hash_resize_mutex); -static void xfrm_hash_resize(void *__unused) +static void xfrm_hash_resize(struct work_struct *__unused) { int dir, total; @@ -563,7 +563,7 @@ static void xfrm_hash_resize(void *__unused) mutex_unlock(&hash_resize_mutex); } -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); /* Generate new index... KAME seems to generate them ordered by cost * of an absolute inpredictability of ordering of rules. This will not pass. */ @@ -2080,7 +2080,7 @@ static void __init xfrm_policy_init(void) panic("XFRM: failed to allocate bydst hash\n"); } - INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); + INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task); register_netdevice_notifier(&xfrm_dev_notifier); } diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 899de9ed22a..40c52717984 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -115,7 +115,7 @@ static unsigned long xfrm_hash_new_size(void) static DEFINE_MUTEX(hash_resize_mutex); -static void xfrm_hash_resize(void *__unused) +static void xfrm_hash_resize(struct work_struct *__unused) { struct hlist_head *ndst, *nsrc, *nspi, *odst, *osrc, *ospi; unsigned long nsize, osize; @@ -168,7 +168,7 @@ out_unlock: mutex_unlock(&hash_resize_mutex); } -static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize, NULL); +static DECLARE_WORK(xfrm_hash_work, xfrm_hash_resize); DECLARE_WAIT_QUEUE_HEAD(km_waitq); EXPORT_SYMBOL(km_waitq); @@ -207,7 +207,7 @@ static void xfrm_state_gc_destroy(struct xfrm_state *x) kfree(x); } -static void xfrm_state_gc_task(void *data) +static void xfrm_state_gc_task(struct work_struct *data) { struct xfrm_state *x; struct hlist_node *entry, *tmp; @@ -1568,6 +1568,6 @@ void __init xfrm_state_init(void) panic("XFRM: Cannot allocate bydst/bysrc/byspi hashes."); xfrm_state_hmask = ((sz / sizeof(struct hlist_head)) - 1); - INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task, NULL); + INIT_WORK(&xfrm_state_gc_work, xfrm_state_gc_task); } -- cgit v1.2.3