diff options
Diffstat (limited to 'net')
87 files changed, 516 insertions, 433 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index fc60c6d096b..77f04e49a1a 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -366,8 +366,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); - DECLARE_MAC_BUF(mac); - DECLARE_MAC_BUF(mac2); + /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING @@ -405,8 +404,11 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) pr_debug("%s: about to send skb: %p to dev: %s\n", __FUNCTION__, skb, skb->dev->name); - pr_debug(" %s %s %4hx %4hx %4hx\n", - print_mac(mac, veth->h_dest), print_mac(mac2, veth->h_source), + pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", + veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], + veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], + veth->h_source[0], veth->h_source[1], veth->h_source[2], + veth->h_source[3], veth->h_source[4], veth->h_source[5], veth->h_vlan_proto, veth->h_vlan_TCI, veth->h_vlan_encapsulated_proto); diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index a0ec4792559..146cfb0e988 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -161,11 +161,10 @@ int __init vlan_proc_init(void) if (!proc_vlan_dir) goto err; - proc_vlan_conf = create_proc_entry(name_conf, S_IFREG|S_IRUSR|S_IWUSR, - proc_vlan_dir); + proc_vlan_conf = proc_create(name_conf, S_IFREG|S_IRUSR|S_IWUSR, + proc_vlan_dir, &vlan_fops); if (!proc_vlan_conf) goto err; - proc_vlan_conf->proc_fops = &vlan_fops; return 0; err: @@ -182,13 +181,11 @@ int vlan_proc_add_dev(struct net_device *vlandev) { struct vlan_dev_info *dev_info = vlan_dev_info(vlandev); - dev_info->dent = create_proc_entry(vlandev->name, - S_IFREG|S_IRUSR|S_IWUSR, - proc_vlan_dir); + dev_info->dent = proc_create(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR, + proc_vlan_dir, &vlandev_fops); if (!dev_info->dent) return -ENOBUFS; - dev_info->dent->proc_fops = &vlandev_fops; dev_info->dent->data = vlandev; return 0; } diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 9e3d81cb9f0..de7a9f532ed 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -110,7 +110,7 @@ static struct p9_req_t *p9_lookup_tag(struct virtio_chan *c, u16 tag) } for (count = old_max; count < c->max_tag; count++) { c->reqs[count].status = REQ_STATUS_IDLE; - c->reqs[count].wq = kmalloc(sizeof(wait_queue_t), + c->reqs[count].wq = kmalloc(sizeof(wait_queue_head_t), GFP_ATOMIC); if (!c->reqs[count].wq) { printk(KERN_ERR "Couldn't grow tag array\n"); diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c index 8e8dcfd532d..162199a2d74 100644 --- a/net/appletalk/atalk_proc.c +++ b/net/appletalk/atalk_proc.c @@ -283,25 +283,24 @@ int __init atalk_proc_init(void) goto out; atalk_proc_dir->owner = THIS_MODULE; - p = create_proc_entry("interface", S_IRUGO, atalk_proc_dir); + p = proc_create("interface", S_IRUGO, atalk_proc_dir, + &atalk_seq_interface_fops); if (!p) goto out_interface; - p->proc_fops = &atalk_seq_interface_fops; - p = create_proc_entry("route", S_IRUGO, atalk_proc_dir); + p = proc_create("route", S_IRUGO, atalk_proc_dir, + &atalk_seq_route_fops); if (!p) goto out_route; - p->proc_fops = &atalk_seq_route_fops; - p = create_proc_entry("socket", S_IRUGO, atalk_proc_dir); + p = proc_create("socket", S_IRUGO, atalk_proc_dir, + &atalk_seq_socket_fops); if (!p) goto out_socket; - p->proc_fops = &atalk_seq_socket_fops; - p = create_proc_entry("arp", S_IRUGO, atalk_proc_dir); + p = proc_create("arp", S_IRUGO, atalk_proc_dir, &atalk_seq_arp_fops); if (!p) goto out_arp; - p->proc_fops = &atalk_seq_arp_fops; rc = 0; out: diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 574d9a96417..1b228065e74 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -742,9 +742,9 @@ static int __init br2684_init(void) { #ifdef CONFIG_PROC_FS struct proc_dir_entry *p; - if ((p = create_proc_entry("br2684", 0, atm_proc_root)) == NULL) + p = proc_create("br2684", 0, atm_proc_root, &br2684_proc_ops); + if (p == NULL) return -ENOMEM; - p->proc_fops = &br2684_proc_ops; #endif register_atm_ioctl(&br2684_ioctl_ops); return 0; diff --git a/net/atm/clip.c b/net/atm/clip.c index 86b885ec1cb..d30167c0b48 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -962,9 +962,7 @@ static int __init atm_clip_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("arp", S_IRUGO, atm_proc_root); - if (p) - p->proc_fops = &arp_seq_fops; + p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops); } #endif diff --git a/net/atm/lec.c b/net/atm/lec.c index 1a8c4c6c0cd..0e450d12f03 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -1249,9 +1249,7 @@ static int __init lane_module_init(void) #ifdef CONFIG_PROC_FS struct proc_dir_entry *p; - p = create_proc_entry("lec", S_IRUGO, atm_proc_root); - if (p) - p->proc_fops = &lec_seq_fops; + p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops); #endif register_atm_ioctl(&lane_ioctl_ops); diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c index 91f3ffc90db..4990541ef5d 100644 --- a/net/atm/mpoa_proc.c +++ b/net/atm/mpoa_proc.c @@ -276,12 +276,11 @@ int mpc_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry(STAT_FILE_NAME, 0, atm_proc_root); + p = proc_create(STAT_FILE_NAME, 0, atm_proc_root, &mpc_file_operations); if (!p) { printk(KERN_ERR "Unable to initialize /proc/atm/%s\n", STAT_FILE_NAME); return -ENOMEM; } - p->proc_fops = &mpc_file_operations; p->owner = THIS_MODULE; return 0; } diff --git a/net/atm/proc.c b/net/atm/proc.c index 49125110bb8..e9693aed7ef 100644 --- a/net/atm/proc.c +++ b/net/atm/proc.c @@ -435,11 +435,11 @@ int atm_proc_dev_register(struct atm_dev *dev) goto err_out; sprintf(dev->proc_name,"%s:%d",dev->type, dev->number); - dev->proc_entry = create_proc_entry(dev->proc_name, 0, atm_proc_root); + dev->proc_entry = proc_create(dev->proc_name, 0, atm_proc_root, + &proc_atm_dev_ops); if (!dev->proc_entry) goto err_free_name; dev->proc_entry->data = dev; - dev->proc_entry->proc_fops = &proc_atm_dev_ops; dev->proc_entry->owner = THIS_MODULE; return 0; err_free_name: @@ -492,10 +492,10 @@ int __init atm_proc_init(void) for (e = atm_proc_ents; e->name; e++) { struct proc_dir_entry *dirent; - dirent = create_proc_entry(e->name, S_IRUGO, atm_proc_root); + dirent = proc_create(e->name, S_IRUGO, + atm_proc_root, e->proc_fops); if (!dirent) goto err_out_remove; - dirent->proc_fops = e->proc_fops; dirent->owner = THIS_MODULE; e->dirent = dirent; } diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h index a2992280c3d..e69244dd8de 100644 --- a/net/bluetooth/bnep/bnep.h +++ b/net/bluetooth/bnep/bnep.h @@ -174,7 +174,7 @@ struct bnep_session { void bnep_net_setup(struct net_device *dev); int bnep_sock_init(void); -int bnep_sock_cleanup(void); +void bnep_sock_cleanup(void); static inline int bnep_mc_hash(__u8 *addr) { diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c index 81065e548a1..201e5b1ce47 100644 --- a/net/bluetooth/bnep/sock.c +++ b/net/bluetooth/bnep/sock.c @@ -257,12 +257,10 @@ error: return err; } -int __exit bnep_sock_cleanup(void) +void __exit bnep_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_BNEP) < 0) BT_ERR("Can't unregister BNEP socket"); proto_unregister(&bnep_proto); - - return 0; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 930b58e7149..aec6929f5c1 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -902,8 +902,6 @@ int hci_unregister_dev(struct hci_dev *hdev) BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type); - hci_unregister_sysfs(hdev); - write_lock_bh(&hci_dev_list_lock); list_del(&hdev->list); write_unlock_bh(&hci_dev_list_lock); @@ -915,6 +913,8 @@ int hci_unregister_dev(struct hci_dev *hdev) hci_notify(hdev, HCI_DEV_UNREG); + hci_unregister_sysfs(hdev); + __hci_dev_put(hdev); return 0; diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 14991323c27..b5d4019d357 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -734,7 +734,7 @@ error: return err; } -int __exit hci_sock_cleanup(void) +void __exit hci_sock_cleanup(void) { if (bt_sock_unregister(BTPROTO_HCI) < 0) BT_ERR("HCI socket unregistration failed"); @@ -742,6 +742,4 @@ int __exit hci_sock_cleanup(void) hci_unregister_notifier(&hci_sock_nblock); proto_unregister(&hci_sk_proto); - - return 0; } diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index a8811c0a0ce..34f8bf98bc0 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -417,6 +417,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_sock_kill(sk); } + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) + del_timer_sync(&conn->info_timer); + hcon->l2cap_data = NULL; kfree(conn); } diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c index e700cbf634c..ca64c1cc1b4 100644 --- a/net/bridge/netfilter/ebt_dnat.c +++ b/net/bridge/netfilter/ebt_dnat.c @@ -20,8 +20,8 @@ static int ebt_target_dnat(struct sk_buff *skb, unsigned int hooknr, { const struct ebt_nat_info *info = data; - if (skb_make_writable(skb, 0)) - return NF_DROP; + if (!skb_make_writable(skb, 0)) + return EBT_DROP; memcpy(eth_hdr(skb)->h_dest, info->mac, ETH_ALEN); return info->target; diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c index bfdf2fb60b1..b8afe850cf1 100644 --- a/net/bridge/netfilter/ebt_redirect.c +++ b/net/bridge/netfilter/ebt_redirect.c @@ -21,8 +21,8 @@ static int ebt_target_redirect(struct sk_buff *skb, unsigned int hooknr, { const struct ebt_redirect_info *info = data; - if (skb_make_writable(skb, 0)) - return NF_DROP; + if (!skb_make_writable(skb, 0)) + return EBT_DROP; if (hooknr != NF_BR_BROUTING) memcpy(eth_hdr(skb)->h_dest, diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c index e252dabbb14..5425333dda0 100644 --- a/net/bridge/netfilter/ebt_snat.c +++ b/net/bridge/netfilter/ebt_snat.c @@ -22,8 +22,8 @@ static int ebt_target_snat(struct sk_buff *skb, unsigned int hooknr, { const struct ebt_nat_info *info = data; - if (skb_make_writable(skb, 0)) - return NF_DROP; + if (!skb_make_writable(skb, 0)) + return EBT_DROP; memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN); if (!(info->target & NAT_ARP_BIT) && diff --git a/net/core/dev.c b/net/core/dev.c index 908f07c3bd7..fcdf03cf3b3 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2900,7 +2900,7 @@ int __dev_addr_add(struct dev_addr_list **list, int *count, } } - da = kmalloc(sizeof(*da), GFP_ATOMIC); + da = kzalloc(sizeof(*da), GFP_ATOMIC); if (da == NULL) return -ENOMEM; memcpy(da->da_addr, addr, alen); diff --git a/net/core/neighbour.c b/net/core/neighbour.c index a16cf1ec5e5..d9a02b2cc28 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -358,11 +358,12 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey, { struct neighbour *n; int key_len = tbl->key_len; - u32 hash_val = tbl->hash(pkey, dev); + u32 hash_val; NEIGH_CACHE_STAT_INC(tbl, lookups); read_lock_bh(&tbl->lock); + hash_val = tbl->hash(pkey, dev); for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) { neigh_hold(n); @@ -379,11 +380,12 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net, { struct neighbour *n; int key_len = tbl->key_len; - u32 hash_val = tbl->hash(pkey, NULL); + u32 hash_val; NEIGH_CACHE_STAT_INC(tbl, lookups); read_lock_bh(&tbl->lock); + hash_val = tbl->hash(pkey, NULL); for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) { if (!memcmp(n->primary_key, pkey, key_len) && (net == n->dev->nd_net)) { @@ -507,6 +509,7 @@ struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl, if (tbl->pconstructor && tbl->pconstructor(n)) { if (dev) dev_put(dev); + release_net(net); kfree(n); n = NULL; goto out; @@ -836,7 +839,7 @@ static void neigh_timer_handler(unsigned long arg) struct sk_buff *skb = skb_peek(&neigh->arp_queue); /* keep skb alive even if arp_queue overflows */ if (skb) - skb_get(skb); + skb = skb_copy(skb, GFP_ATOMIC); write_unlock(&neigh->lock); neigh->ops->solicit(neigh, skb); atomic_inc(&neigh->probes); @@ -1386,10 +1389,10 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl) panic("cannot create neighbour cache statistics"); #ifdef CONFIG_PROC_FS - tbl->pde = create_proc_entry(tbl->id, 0, init_net.proc_net_stat); + tbl->pde = proc_create(tbl->id, 0, init_net.proc_net_stat, + &neigh_stat_seq_fops); if (!tbl->pde) panic("cannot create neighbour proc dir entry"); - tbl->pde->proc_fops = &neigh_stat_seq_fops; tbl->pde->data = tbl; #endif diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6faa128a4c8..4b7e756181c 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool; static atomic_t trapped; #define USEC_PER_POLL 50 +#define NETPOLL_RX_ENABLED 1 +#define NETPOLL_RX_DROP 2 #define MAX_SKB_SIZE \ (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ @@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo, if (!test_bit(NAPI_STATE_SCHED, &napi->state)) return budget; + npinfo->rx_flags |= NETPOLL_RX_DROP; atomic_inc(&trapped); work = napi->poll(napi, budget); atomic_dec(&trapped); + npinfo->rx_flags &= ~NETPOLL_RX_DROP; return budget - work; } @@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb) if (skb->dev->type != ARPHRD_ETHER) goto out; - /* if receive ARP during middle of NAPI poll, then queue */ + /* check if netpoll clients need ARP */ if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) { skb_queue_tail(&npi->arp_tx, skb); @@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb) return 1; out: - /* If packet received while already in poll then just - * silently drop. - */ if (atomic_read(&trapped)) { kfree_skb(skb); return 1; @@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np) goto release; } + npinfo->rx_flags = 0; npinfo->rx_np = NULL; spin_lock_init(&npinfo->rx_lock); @@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np) if (np->rx_hook) { spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_flags |= NETPOLL_RX_ENABLED; npinfo->rx_np = np; spin_unlock_irqrestore(&npinfo->rx_lock, flags); } @@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np) if (npinfo->rx_np == np) { spin_lock_irqsave(&npinfo->rx_lock, flags); npinfo->rx_np = NULL; + npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; spin_unlock_irqrestore(&npinfo->rx_lock, flags); } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index bfcdfaebca5..20e63b302ba 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -3570,14 +3570,14 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname) if (err) goto out1; - pkt_dev->entry = create_proc_entry(ifname, 0600, pg_proc_dir); + pkt_dev->entry = proc_create(ifname, 0600, + pg_proc_dir, &pktgen_if_fops); if (!pkt_dev->entry) { printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", PG_PROC_DIR, ifname); err = -EINVAL; goto out2; } - pkt_dev->entry->proc_fops = &pktgen_if_fops; pkt_dev->entry->data = pkt_dev; #ifdef CONFIG_XFRM pkt_dev->ipsmode = XFRM_MODE_TRANSPORT; @@ -3628,7 +3628,7 @@ static int __init pktgen_create_thread(int cpu) kthread_bind(p, cpu); t->tsk = p; - pe = create_proc_entry(t->tsk->comm, 0600, pg_proc_dir); + pe = proc_create(t->tsk->comm, 0600, pg_proc_dir, &pktgen_thread_fops); if (!pe) { printk(KERN_ERR "pktgen: cannot create %s/%s procfs entry.\n", PG_PROC_DIR, t->tsk->comm); @@ -3638,7 +3638,6 @@ static int __init pktgen_create_thread(int cpu) return -EINVAL; } - pe->proc_fops = &pktgen_thread_fops; pe->data = t; wake_up_process(p); @@ -3709,7 +3708,7 @@ static int __init pg_init(void) return -ENODEV; pg_proc_dir->owner = THIS_MODULE; - pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir); + pe = proc_create(PGCTRL, 0600, pg_proc_dir, &pktgen_fops); if (pe == NULL) { printk(KERN_ERR "pktgen: ERROR: cannot create %s " "procfs entry.\n", PGCTRL); @@ -3717,7 +3716,6 @@ static int __init pg_init(void) return -EINVAL; } - pe->proc_fops = &pktgen_fops; pe->data = NULL; /* Register us to receive netdevice events */ diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 61ac8d06292..2bd9c5f7627 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -689,10 +689,12 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN }, [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) }, [IFLA_MTU] = { .type = NLA_U32 }, + [IFLA_LINK] = { .type = NLA_U32 }, [IFLA_TXQLEN] = { .type = NLA_U32 }, [IFLA_WEIGHT] = { .type = NLA_U32 }, [IFLA_OPERSTATE] = { .type = NLA_U8 }, [IFLA_LINKMODE] = { .type = NLA_U8 }, + [IFLA_LINKINFO] = { .type = NLA_NESTED }, [IFLA_NET_NS_PID] = { .type = NLA_U32 }, }; @@ -720,6 +722,21 @@ static struct net *get_net_ns_by_pid(pid_t pid) return net; } +static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) +{ + if (dev) { + if (tb[IFLA_ADDRESS] && + nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) + return -EINVAL; + + if (tb[IFLA_BROADCAST] && + nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) + return -EINVAL; + } + + return 0; +} + static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, struct nlattr **tb, char *ifname, int modified) { @@ -892,12 +909,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) goto errout; } - if (tb[IFLA_ADDRESS] && - nla_len(tb[IFLA_ADDRESS]) < dev->addr_len) - goto errout_dev; - - if (tb[IFLA_BROADCAST] && - nla_len(tb[IFLA_BROADCAST]) < dev->addr_len) + if ((err = validate_linkmsg(dev, tb)) < 0) goto errout_dev; err = do_setlink(dev, ifm, tb, ifname, 0); @@ -1018,6 +1030,9 @@ replay: else dev = NULL; + if ((err = validate_linkmsg(dev, tb)) < 0) + return err; + if (tb[IFLA_LINKINFO]) { err = nla_parse_nested(linkinfo, IFLA_INFO_MAX, tb[IFLA_LINKINFO], ifla_info_policy); diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 19880b086e7..9c7e5ffb223 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -343,7 +343,7 @@ config INET_ESP tristate "IP: ESP transformation" select XFRM select CRYPTO - select CRYPTO_AEAD + select CRYPTO_AUTHENC select CRYPTO_HMAC select CRYPTO_MD5 select CRYPTO_CBC diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f282b26f63e..87490f7bb0f 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -752,6 +752,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg) inet_del_ifa(in_dev, ifap, 0); ifa->ifa_broadcast = 0; ifa->ifa_anycast = 0; + ifa->ifa_scope = 0; } ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr; diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c index 76b9c684ccc..8d58d85dfac 100644 --- a/net/ipv4/fib_hash.c +++ b/net/ipv4/fib_hash.c @@ -372,7 +372,8 @@ static struct fib_node *fib_find_node(struct fn_zone *fz, __be32 key) static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) { struct fn_hash *table = (struct fn_hash *) tb->tb_data; - struct fib_node *new_f, *f; + struct fib_node *new_f = NULL; + struct fib_node *f; struct fib_alias *fa, *new_fa; struct fn_zone *fz; struct fib_info *fi; @@ -496,7 +497,6 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) err = -ENOBUFS; - new_f = NULL; if (!f) { new_f = kmem_cache_zalloc(fn_hash_kmem, GFP_KERNEL); if (new_f == NULL) @@ -512,7 +512,7 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) if (new_fa->fa_info != NULL) { new_fa = kmem_cache_alloc(fn_alias_kmem, GFP_KERNEL); if (new_fa == NULL) - goto out_free_new_f; + goto out; } new_fa->fa_info = fi; new_fa->fa_tos = tos; @@ -540,9 +540,9 @@ static int fn_hash_insert(struct fib_table *tb, struct fib_config *cfg) &cfg->fc_nlinfo, 0); return 0; -out_free_new_f: - kmem_cache_free(fn_hash_kmem, new_f); out: + if (new_f) + kmem_cache_free(fn_hash_kmem, new_f); fib_release_info(fi); return err; } diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 63f69171935..e7821ba7a9a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -259,35 +259,31 @@ static struct ip_tunnel * ipgre_tunnel_locate(struct ip_tunnel_parm *parms, int if (parms->name[0]) strlcpy(name, parms->name, IFNAMSIZ); - else { - int i; - for (i=1; i<100; i++) { - sprintf(name, "gre%d", i); - if (__dev_get_by_name(&init_net, name) == NULL) - break; - } - if (i==100) - goto failed; - } + else + sprintf(name, "gre%%d"); dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup); if (!dev) return NULL; + if (strchr(name, '%')) { + if (dev_alloc_name(dev, name) < 0) + goto failed_free; + } + dev->init = ipgre_tunnel_init; nt = netdev_priv(dev); nt->parms = *parms; - if (register_netdevice(dev) < 0) { - free_netdev(dev); - goto failed; - } + if (register_netdevice(dev) < 0) + goto failed_free; dev_hold(dev); ipgre_tunnel_link(nt); return nt; -failed: +failed_free: + free_netdev(dev); return NULL; } diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c index ae1f45fc23b..58b60b2fb01 100644 --- a/net/ipv4/ipcomp.c +++ b/net/ipv4/ipcomp.c @@ -108,8 +108,11 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb) const int cpu = get_cpu(); u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); - int err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); + int err; + local_bh_disable(); + err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); + local_bh_enable(); if (err) goto out; diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 10013ccee8d..5dd938579ee 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -753,9 +753,9 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d printk("Unknown ARP type 0x%04x for device %s\n", dev->type, dev->name); b->htype = dev->type; /* can cause undefined behavior */ } + + /* server_ip and your_ip address are both already zero per RFC2131 */ b->hlen = dev->addr_len; - b->your_ip = NONE; - b->server_ip = NONE; memcpy(b->hw_addr, dev->dev_addr, dev->addr_len); b->secs = htons(jiffies_diff / HZ); b->xid = d->xid; diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index da281581692..dbaed69de06 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -221,35 +221,31 @@ static struct ip_tunnel * ipip_tunnel_locate(struct ip_tunnel_parm *parms, int c if (parms->name[0]) strlcpy(name, parms->name, IFNAMSIZ); - else { - int i; - for (i=1; i<100; i++) { - sprintf(name, "tunl%d", i); - if (__dev_get_by_name(&init_net, name) == NULL) - break; - } - if (i==100) - goto failed; - } + else + sprintf(name, "tunl%%d"); dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup); if (dev == NULL) return NULL; + if (strchr(name, '%')) { + if (dev_alloc_name(dev, name) < 0) + goto failed_free; + } + nt = netdev_priv(dev); dev->init = ipip_tunnel_init; nt->parms = *parms; - if (register_netdevice(dev) < 0) { - free_netdev(dev); - goto failed; - } + if (register_netdevice(dev) < 0) + goto failed_free; dev_hold(dev); ipip_tunnel_link(nt); return nt; -failed: +failed_free: + free_netdev(dev); return NULL; } diff --git a/net/ipv4/netfilter/arpt_mangle.c b/net/ipv4/netfilter/arpt_mangle.c index 45fa4e20094..3f4222b0a80 100644 --- a/net/ipv4/netfilter/arpt_mangle.c +++ b/net/ipv4/netfilter/arpt_mangle.c @@ -19,7 +19,7 @@ target(struct sk_buff *skb, unsigned char *arpptr; int pln, hln; - if (skb_make_writable(skb, skb->len)) + if (!skb_make_writable(skb, skb->len)) return NF_DROP; arp = arp_hdr(skb); diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 6bda1102851..fe05da41d6b 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c @@ -283,8 +283,8 @@ static int ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e) { int diff; - int err; struct iphdr *user_iph = (struct iphdr *)v->payload; + struct sk_buff *nskb; if (v->data_len < sizeof(*user_iph)) return 0; @@ -296,14 +296,16 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e) if (v->data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { - err = pskb_expand_head(e->skb, 0, + nskb = skb_copy_expand(e->skb, 0, diff - skb_tailroom(e->skb), GFP_ATOMIC); - if (err) { + if (!nskb) { printk(KERN_WARNING "ip_queue: error " - "in mangle, dropping packet: %d\n", -err); - return err; + "in mangle, dropping packet\n"); + return -ENOMEM; } + kfree_skb(e->skb); + e->skb = nskb; } skb_put(e->skb, diff); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 525787b52b7..7b5e8e1d94b 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -542,12 +542,11 @@ static __init int ip_rt_proc_init(struct net *net) if (!pde) goto err1; - pde = create_proc_entry("rt_cache", S_IRUGO, net->proc_net_stat); + pde = proc_create("rt_cache", S_IRUGO, + net->proc_net_stat, &rt_cpu_seq_fops); if (!pde) goto err2; - pde->proc_fops = &rt_cpu_seq_fops; - #ifdef CONFIG_NET_CLS_ROUTE pde = create_proc_read_entry("rt_acct", 0, net->proc_net, ip_rt_acct_read, NULL); diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 5212ed9b0c9..7eb7636db0d 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -1,12 +1,13 @@ /* * Binary Increase Congestion control for TCP - * + * Home page: + * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC * This is from the implementation of BICTCP in * Lison-Xu, Kahaled Harfoush, and Injong Rhee. * "Binary Increase Congestion Control for Fast, Long Distance * Networks" in InfoComm 2004 * Available from: - * http://www.csc.ncsu.edu/faculty/rhee/export/bitcp.pdf + * http://netsrv.csc.ncsu.edu/export/bitcp.pdf * * Unless BIC is enabled and congestion window is large * this behaves the same as the original Reno. diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 19c449f6267..7facdb0f696 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1367,7 +1367,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, * a normal way */ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, - u32 skip_to_seq) + u32 skip_to_seq, int *fack_count) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) @@ -1375,6 +1375,8 @@ static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; + + *fack_count += tcp_skb_pcount(skb); } return skb; } @@ -1390,7 +1392,7 @@ static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, return skb; if (before(next_dup->start_seq, skip_to_seq)) { - skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq); + skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); tcp_sacktag_walk(skb, sk, NULL, next_dup->start_seq, next_dup->end_seq, 1, fack_count, reord, flag); @@ -1537,7 +1539,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, /* Head todo? */ if (before(start_seq, cache->start_seq)) { - skb = tcp_sacktag_skip(skb, sk, start_seq); + skb = tcp_sacktag_skip(skb, sk, start_seq, + &fack_count); skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, cache->start_seq, @@ -1565,7 +1568,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, goto walk; } - skb = tcp_sacktag_skip(skb, sk, cache->end_seq); + skb = tcp_sacktag_skip(skb, sk, cache->end_seq, + &fack_count); /* Check overlap against next cached too (past this one already) */ cache++; continue; @@ -1577,7 +1581,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, break; fack_count = tp->fackets_out; } - skb = tcp_sacktag_skip(skb, sk, start_seq); + skb = tcp_sacktag_skip(skb, sk, start_seq, &fack_count); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq, diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index ed750f9ceb0..01578f544ad 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1035,6 +1035,13 @@ static void tcp_cwnd_validate(struct sock *sk) * introducing MSS oddities to segment boundaries. In rare cases where * mss_now != mss_cache, we will request caller to create a small skb * per input skb which could be mostly avoided here (if desired). + * + * We explicitly want to create a request for splitting write queue tail + * to a small skb for Nagle purposes while avoiding unnecessary modulos, + * thus all the complexity (cwnd_len is always MSS multiple which we + * return whenever allowed by the other factors). Basically we need the + * modulo only when the receiver window alone is the limiting factor or + * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) @@ -1048,10 +1055,11 @@ static unsigned int tcp_mss_split_point(struct sock *sk, struct sk_buff *skb, if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) return cwnd_len; - if (skb == tcp_write_queue_tail(sk) && cwnd_len <= skb->len) + needed = min(skb->len, window); + + if (skb == tcp_write_queue_tail(sk) && cwnd_len <= needed) return cwnd_len; - needed = min(skb->len, window); return needed - needed % mss_now; } diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 3ffb0323668..58219dfffef 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -85,7 +85,7 @@ config INET6_ESP depends on IPV6 select XFRM select CRYPTO - select CRYPTO_AEAD + select CRYPTO_AUTHENC select CRYPTO_HMAC select CRYPTO_MD5 select CRYPTO_CBC diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e40213db9e4..101e0e70ba2 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1557,6 +1557,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev, .fc_expires = expires, .fc_dst_len = plen, .fc_flags = RTF_UP | flags, + .fc_nlinfo.nl_net = &init_net, }; ipv6_addr_copy(&cfg.fc_dst, pfx); @@ -1583,6 +1584,7 @@ static void addrconf_add_mroute(struct net_device *dev) .fc_ifindex = dev->ifindex, .fc_dst_len = 8, .fc_flags = RTF_UP, + .fc_nlinfo.nl_net = &init_net, }; ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0); @@ -1599,6 +1601,7 @@ static void sit_route_add(struct net_device *dev) .fc_ifindex = dev->ifindex, .fc_dst_len = 96, .fc_flags = RTF_UP | RTF_NONEXTHOP, + .fc_nlinfo.nl_net = &init_net, }; /* prefix length - 96 bits "::d.d.d.d" */ diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index cd940647bd1..78f43888092 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -229,33 +229,33 @@ static struct ip6_tnl *ip6_tnl_create(struct ip6_tnl_parm *p) char name[IFNAMSIZ]; int err; - if (p->name[0]) { + if (p->name[0]) strlcpy(name, p->name, IFNAMSIZ); - } else { - int i; - for (i = 1; i < IP6_TNL_MAX; i++) { - sprintf(name, "ip6tnl%d", i); - if (__dev_get_by_name(&init_net, name) == NULL) - break; - } - if (i == IP6_TNL_MAX) - goto failed; - } + else + sprintf(name, "ip6tnl%%d"); + dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup); if (dev == NULL) goto failed; + if (strchr(name, '%')) { + if (dev_alloc_name(dev, name) < 0) + goto failed_free; + } + t = netdev_priv(dev); dev->init = ip6_tnl_dev_init; t->parms = *p; - if ((err = register_netdevice(dev)) < 0) { - free_netdev(dev); - goto failed; - } + if ((err = register_netdevice(dev)) < 0) + goto failed_free; + dev_hold(dev); ip6_tnl_link(t); return t; + +failed_free: + free_netdev(dev); failed: return NULL; } diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c index b90039593a7..e3dcfa2f436 100644 --- a/net/ipv6/ipcomp6.c +++ b/net/ipv6/ipcomp6.c @@ -146,7 +146,9 @@ static int ipcomp6_output(struct xfrm_state *x, struct sk_buff *skb) scratch = *per_cpu_ptr(ipcomp6_scratches, cpu); tfm = *per_cpu_ptr(ipcd->tfms, cpu); + local_bh_disable(); err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); + local_bh_enable(); if (err || (dlen + sizeof(*ipch)) >= plen) { put_cpu(); goto out_ok; diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index e869916b05f..cc2f9afcf80 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c @@ -285,8 +285,8 @@ static int ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e) { int diff; - int err; struct ipv6hdr *user_iph = (struct ipv6hdr *)v->payload; + struct sk_buff *nskb; if (v->data_len < sizeof(*user_iph)) return 0; @@ -298,14 +298,16 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e) if (v->data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { - err = pskb_expand_head(e->skb, 0, + nskb = skb_copy_expand(e->skb, 0, diff - skb_tailroom(e->skb), GFP_ATOMIC); - if (err) { + if (!nskb) { printk(KERN_WARNING "ip6_queue: OOM " "in mangle, dropping packet\n"); - return err; + return -ENOMEM; } + kfree_skb(e->skb); + e->skb = nskb; } skb_put(e->skb, diff); } diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index 35e502a7249..199ef379e50 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -217,12 +217,12 @@ int snmp6_register_dev(struct inet6_dev *idev) if (!proc_net_devsnmp6) return -ENOENT; - p = create_proc_entry(idev->dev->name, S_IRUGO, proc_net_devsnmp6); + p = proc_create(idev->dev->name, S_IRUGO, + proc_net_devsnmp6, &snmp6_seq_fops); if (!p) return -ENOMEM; p->data = idev; - p->proc_fops = &snmp6_seq_fops; idev->stats.proc_dir_entry = p; return 0; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 6e7b56ef444..e8b241cb60b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1719,6 +1719,8 @@ static void rtmsg_to_fib6_config(struct in6_rtmsg *rtmsg, cfg->fc_src_len = rtmsg->rtmsg_src_len; cfg->fc_flags = rtmsg->rtmsg_flags; + cfg->fc_nlinfo.nl_net = &init_net; + ipv6_addr_copy(&cfg->fc_dst, &rtmsg->rtmsg_dst); ipv6_addr_copy(&cfg->fc_src, &rtmsg->rtmsg_src); ipv6_addr_copy(&cfg->fc_gateway, &rtmsg->rtmsg_gateway); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e77239d02bf..1656c003b98 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -164,21 +164,18 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int if (parms->name[0]) strlcpy(name, parms->name, IFNAMSIZ); - else { - int i; - for (i=1; i<100; i++) { - sprintf(name, "sit%d", i); - if (__dev_get_by_name(&init_net, name) == NULL) - break; - } - if (i==100) - goto failed; - } + else + sprintf(name, "sit%%d"); dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup); if (dev == NULL) return NULL; + if (strchr(name, '%')) { + if (dev_alloc_name(dev, name) < 0) + goto failed_free; + } + nt = netdev_priv(dev); dev->init = ipip6_tunnel_init; nt->parms = *parms; @@ -186,16 +183,16 @@ static struct ip_tunnel * ipip6_tunnel_locate(struct ip_tunnel_parm *parms, int if (parms->i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; - if (register_netdevice(dev) < 0) { - free_netdev(dev); - goto failed; - } + if (register_netdevice(dev) < 0) + goto failed_free; dev_hold(dev); ipip6_tunnel_link(nt); return nt; +failed_free: + free_netdev(dev); failed: return NULL; } diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 408691b777c..d6d3e68086f 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -102,9 +102,6 @@ static int ipv6_sysctl_net_init(struct net *net) net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path, ipv6_table); if (!net->ipv6.sysctl.table) - return -ENOMEM; - - if (!net->ipv6.sysctl.table) goto out_ipv6_icmp_table; err = 0; diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c index d483a00dc42..5ed97ad0e2e 100644 --- a/net/ipx/ipx_proc.c +++ b/net/ipx/ipx_proc.c @@ -358,22 +358,19 @@ int __init ipx_proc_init(void) if (!ipx_proc_dir) goto out; - p = create_proc_entry("interface", S_IRUGO, ipx_proc_dir); + p = proc_create("interface", S_IRUGO, + ipx_proc_dir, &ipx_seq_interface_fops); if (!p) goto out_interface; - p->proc_fops = &ipx_seq_interface_fops; - p = create_proc_entry("route", S_IRUGO, ipx_proc_dir); + p = proc_create("route", S_IRUGO, ipx_proc_dir, &ipx_seq_route_fops); if (!p) goto out_route; - p->proc_fops = &ipx_seq_route_fops; - p = create_proc_entry("socket", S_IRUGO, ipx_proc_dir); + p = proc_create("socket", S_IRUGO, ipx_proc_dir, &ipx_seq_socket_fops); if (!p) goto out_socket; - p->proc_fops = &ipx_seq_socket_fops; - rc = 0; out: return rc; diff --git a/net/irda/ircomm/ircomm_core.c b/net/irda/ircomm/ircomm_core.c index b825399fc16..6eef1f2a755 100644 --- a/net/irda/ircomm/ircomm_core.c +++ b/net/irda/ircomm/ircomm_core.c @@ -76,9 +76,11 @@ static int __init ircomm_init(void) #ifdef CONFIG_PROC_FS { struct proc_dir_entry *ent; - ent = create_proc_entry("ircomm", 0, proc_irda); - if (ent) - ent->proc_fops = &ircomm_proc_fops; + ent = proc_create("ircomm", 0, proc_irda, &ircomm_proc_fops); + if (!ent) { + printk(KERN_ERR "ircomm_init: can't create /proc entry!\n"); + return -ENODEV; + } } #endif /* CONFIG_PROC_FS */ diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c index a4b56e25a91..1eb4bbcb1c9 100644 --- a/net/irda/irlan/irlan_common.c +++ b/net/irda/irlan/irlan_common.c @@ -128,13 +128,11 @@ static int __init irlan_init(void) #ifdef CONFIG_PROC_FS { struct proc_dir_entry *proc; - proc = create_proc_entry("irlan", 0, proc_irda); + proc = proc_create("irlan", 0, proc_irda, &irlan_fops); if (!proc) { printk(KERN_ERR "irlan_init: can't create /proc entry!\n"); return -ENODEV; } - - proc->proc_fops = &irlan_fops; } #endif /* CONFIG_PROC_FS */ diff --git a/net/irda/irproc.c b/net/irda/irproc.c index cae24fbda96..88e80a31273 100644 --- a/net/irda/irproc.c +++ b/net/irda/irproc.c @@ -72,11 +72,9 @@ void __init irda_proc_register(void) return; proc_irda->owner = THIS_MODULE; - for (i=0; i<ARRAY_SIZE(irda_dirs); i++) { - d = create_proc_entry(irda_dirs[i].name, 0, proc_irda); - if (d) - d->proc_fops = irda_dirs[i].fops; - } + for (i = 0; i < ARRAY_SIZE(irda_dirs); i++) + d = proc_create(irda_dirs[i].name, 0, proc_irda, + irda_dirs[i].fops); } /* diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 2753b0c448f..d764f4c1b7e 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -621,7 +621,6 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) return iucv_call_b2f0(IUCV_SEVER, parm); } -#ifdef CONFIG_SMP /** * __iucv_cleanup_queue * @dummy: unused dummy argument @@ -632,7 +631,6 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) static void __iucv_cleanup_queue(void *dummy) { } -#endif /** * iucv_cleanup_queue diff --git a/net/key/af_key.c b/net/key/af_key.c index 1c853927810..8b5f486ac80 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3807,17 +3807,16 @@ static int pfkey_init_proc(void) { struct proc_dir_entry *e; - e = create_proc_entry("pfkey", 0, init_net.proc_net); + e = proc_net_fops_create(&init_net, "pfkey", 0, &pfkey_proc_ops); if (e == NULL) return -ENOMEM; - e->proc_fops = &pfkey_proc_ops; return 0; } static void pfkey_exit_proc(void) { - remove_proc_entry("net/pfkey", NULL); + proc_net_remove(&init_net, "pfkey"); } #else static inline int pfkey_init_proc(void) diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c index cb34bc0518e..48212c0a961 100644 --- a/net/llc/llc_proc.c +++ b/net/llc/llc_proc.c @@ -239,18 +239,14 @@ int __init llc_proc_init(void) goto out; llc_proc_dir->owner = THIS_MODULE; - p = create_proc_entry("socket", S_IRUGO, llc_proc_dir); + p = proc_create("socket", S_IRUGO, llc_proc_dir, &llc_seq_socket_fops); if (!p) goto out_socket; - p->proc_fops = &llc_seq_socket_fops; - - p = create_proc_entry("core", S_IRUGO, llc_proc_dir); + p = proc_create("core", S_IRUGO, llc_proc_dir, &llc_seq_core_fops); if (!p) goto out_core; - p->proc_fops = &llc_seq_core_fops; - rc = 0; out: return rc; diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index 2019b4f0528..9aeed532022 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c @@ -1116,9 +1116,10 @@ static void ieee80211_sta_process_addba_request(struct net_device *dev, /* prepare reordering buffer */ tid_agg_rx->reorder_buf = kmalloc(buf_size * sizeof(struct sk_buf *), GFP_ATOMIC); - if ((!tid_agg_rx->reorder_buf) && net_ratelimit()) { - printk(KERN_ERR "can not allocate reordering buffer " - "to tid %d\n", tid); + if (!tid_agg_rx->reorder_buf) { + if (net_ratelimit()) + printk(KERN_ERR "can not allocate reordering buffer " + "to tid %d\n", tid); goto end; } memset(tid_agg_rx->reorder_buf, 0, diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c index c339571632b..3b77410588e 100644 --- a/net/mac80211/rc80211_pid_algo.c +++ b/net/mac80211/rc80211_pid_algo.c @@ -2,7 +2,7 @@ * Copyright 2002-2005, Instant802 Networks, Inc. * Copyright 2005, Devicescape Software, Inc. * Copyright 2007, Mattias Nissler <mattias.nissler@gmx.de> - * Copyright 2007, Stefano Brivio <stefano.brivio@polimi.it> + * Copyright 2007-2008, Stefano Brivio <stefano.brivio@polimi.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -63,72 +63,66 @@ * RC_PID_ARITH_SHIFT. */ - -/* Shift the adjustment so that we won't switch to a lower rate if it exhibited - * a worse failed frames behaviour and we'll choose the highest rate whose - * failed frames behaviour is not worse than the one of the original rate - * target. While at it, check that the adjustment is within the ranges. Then, - * provide the new rate index. */ -static int rate_control_pid_shift_adjust(struct rc_pid_rateinfo *r, - int adj, int cur, int l) -{ - int i, j, k, tmp; - - j = r[cur].rev_index; - i = j + adj; - - if (i < 0) - return r[0].index; - if (i >= l - 1) - return r[l - 1].index; - - tmp = i; - - if (adj < 0) { - for (k = j; k >= i; k--) - if (r[k].diff <= r[j].diff) - tmp = k; - } else { - for (k = i + 1; k + i < l; k++) - if (r[k].diff <= r[i].diff) - tmp = k; - } - - return r[tmp].index; -} - +/* Adjust the rate while ensuring that we won't switch to a lower rate if it + * exhibited a worse failed frames behaviour and we'll choose the highest rate + * whose failed frames behaviour is not worse than the one of the original rate + * target. While at it, check that the new rate is valid. */ static void rate_control_pid_adjust_rate(struct ieee80211_local *local, struct sta_info *sta, int adj, struct rc_pid_rateinfo *rinfo) { struct ieee80211_sub_if_data *sdata; struct ieee80211_hw_mode *mode; - int newidx; - int maxrate; - int back = (adj > 0) ? 1 : -1; + int cur_sorted, new_sorted, probe, tmp, n_bitrates; + int cur = sta->txrate; sdata = IEEE80211_DEV_TO_SUB_IF(sta->dev); mode = local->oper_hw_mode; - maxrate = sdata->bss ? sdata->bss->max_ratectrl_rateidx : -1; + n_bitrates = mode->num_rates; - newidx = rate_control_pid_shift_adjust(rinfo, adj, sta->txrate, - mode->num_rates); + /* Map passed arguments to sorted values. */ + cur_sorted = rinfo[cur].rev_index; + new_sorted = cur_sorted + adj; - while (newidx != sta->txrate) { - if (rate_supported(sta, mode, newidx) && - (maxrate < 0 || newidx <= maxrate)) { - sta->txrate = newidx; - break; - } + /* Check limits. */ + if (new_sorted < 0) + new_sorted = rinfo[0].rev_index; + else if (new_sorted >= n_bitrates) + new_sorted = rinfo[n_bitrates - 1].rev_index; - newidx += back; + tmp = new_sorted; + + if (adj < 0) { + /* Ensure that the rate decrease isn't disadvantageous. */ + for (probe = cur_sorted; probe >= new_sorted; probe--) + if (rinfo[probe].diff <= rinfo[cur_sorted].diff && + rate_supported(sta, mode, rinfo[probe].index)) + tmp = probe; + } else { + /* Look for rate increase with zero (or below) cost. */ + for (probe = new_sorted + 1; probe < n_bitrates; probe++) + if (rinfo[probe].diff <= rinfo[new_sorted].diff && + rate_supported(sta, mode, rinfo[probe].index)) + tmp = probe; } + /* Fit the rate found to the nearest supported rate. */ + do { + if (rate_supported(sta, mode, rinfo[tmp].index)) { + sta->txrate = rinfo[tmp].index; + break; + } + if (adj < 0) + tmp--; + else + tmp++; + } while (tmp < n_bitrates && tmp >= 0); + #ifdef CONFIG_MAC80211_DEBUGFS rate_control_pid_event_rate_change( &((struct rc_pid_sta_info *)sta->rate_ctrl_priv)->events, - newidx, mode->rates[newidx].rate); + cur, mode->rates[cur].rate); #endif } diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 327e847d270..b77eb56a87e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -256,13 +256,19 @@ __nf_conntrack_find(const struct nf_conntrack_tuple *tuple) struct hlist_node *n; unsigned int hash = hash_conntrack(tuple); + /* Disable BHs the entire time since we normally need to disable them + * at least once for the stats anyway. + */ + local_bh_disable(); hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { if (nf_ct_tuple_equal(tuple, &h->tuple)) { NF_CT_STAT_INC(found); + local_bh_enable(); return h; } NF_CT_STAT_INC(searched); } + local_bh_enable(); return NULL; } @@ -400,17 +406,20 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, struct hlist_node *n; unsigned int hash = hash_conntrack(tuple); - rcu_read_lock(); + /* Disable BHs the entire time since we need to disable them at + * least once for the stats anyway. + */ + rcu_read_lock_bh(); hlist_for_each_entry_rcu(h, n, &nf_conntrack_hash[hash], hnode) { if (nf_ct_tuplehash_to_ctrack(h) != ignored_conntrack && nf_ct_tuple_equal(tuple, &h->tuple)) { NF_CT_STAT_INC(found); - rcu_read_unlock(); + rcu_read_unlock_bh(); return 1; } NF_CT_STAT_INC(searched); } - rcu_read_unlock(); + rcu_read_unlock_bh(); return 0; } diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index e06bf0028bb..684ec9c1ad3 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -381,7 +381,7 @@ int nf_ct_expect_related(struct nf_conntrack_expect *expect) if (nf_ct_expect_count >= nf_ct_expect_max) { if (net_ratelimit()) printk(KERN_WARNING - "nf_conntrack: expectation table full"); + "nf_conntrack: expectation table full\n"); ret = -EMFILE; goto out; } diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 8b9be1e978c..2bd9963b5b3 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -19,14 +19,6 @@ static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM]; static DEFINE_MUTEX(nf_ct_ext_type_mutex); -/* Horrible trick to figure out smallest amount worth kmallocing. */ -#define CACHE(x) (x) + 0 * -enum { - NF_CT_EXT_MIN_SIZE = -#include <linux/kmalloc_sizes.h> - 1 }; -#undef CACHE - void __nf_ct_ext_destroy(struct nf_conn *ct) { unsigned int i; @@ -53,7 +45,7 @@ EXPORT_SYMBOL(__nf_ct_ext_destroy); static void * nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) { - unsigned int off, len, real_len; + unsigned int off, len; struct nf_ct_ext_type *t; rcu_read_lock(); @@ -61,16 +53,14 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) BUG_ON(t == NULL); off = ALIGN(sizeof(struct nf_ct_ext), t->align); len = off + t->len; - real_len = t->alloc_size; rcu_read_unlock(); - *ext = kzalloc(real_len, gfp); + *ext = kzalloc(t->alloc_size, gfp); if (!*ext) return NULL; (*ext)->offset[id] = off; (*ext)->len = len; - (*ext)->real_len = real_len; return (void *)(*ext) + off; } @@ -95,7 +85,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) newlen = newoff + t->len; rcu_read_unlock(); - if (newlen >= ct->ext->real_len) { + if (newlen >= ksize(ct->ext)) { new = kmalloc(newlen, gfp); if (!new) return NULL; @@ -114,7 +104,6 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) rcu_read_unlock(); } kfree(ct->ext); - new->real_len = newlen; ct->ext = new; } @@ -156,8 +145,6 @@ static void update_alloc_size(struct nf_ct_ext_type *type) t1->alloc_size = ALIGN(t1->alloc_size, t2->align) + t2->len; } - if (t1->alloc_size < NF_CT_EXT_MIN_SIZE) - t1->alloc_size = NF_CT_EXT_MIN_SIZE; } } diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bfc2928c191..ddc80ea114c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c @@ -51,7 +51,7 @@ int nf_unregister_queue_handler(int pf, const struct nf_queue_handler *qh) return -EINVAL; mutex_lock(&queue_handler_mutex); - if (queue_handler[pf] != qh) { + if (queue_handler[pf] && queue_handler[pf] != qh) { mutex_unlock(&queue_handler_mutex); return -EINVAL; } diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 7efa40d4739..bf3f19b21fe 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c @@ -556,7 +556,7 @@ nfulnl_log_packet(unsigned int pf, /* FIXME: do we want to make the size calculation conditional based on * what is actually present? way more branches and checks, but more * memory efficient... */ - size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) + size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ @@ -702,20 +702,30 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, struct nfgenmsg *nfmsg = NLMSG_DATA(nlh); u_int16_t group_num = ntohs(nfmsg->res_id); struct nfulnl_instance *inst; + struct nfulnl_msg_config_cmd *cmd = NULL; int ret = 0; + if (nfula[NFULA_CFG_CMD]) { + u_int8_t pf = nfmsg->nfgen_family; + cmd = nla_data(nfula[NFULA_CFG_CMD]); + + /* Commands without queue context */ + switch (cmd->command) { + case NFULNL_CFG_CMD_PF_BIND: + return nf_log_register(pf, &nfulnl_logger); + case NFULNL_CFG_CMD_PF_UNBIND: + nf_log_unregister_pf(pf); + return 0; + } + } + inst = instance_lookup_get(group_num); if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { ret = -EPERM; goto out_put; } - if (nfula[NFULA_CFG_CMD]) { - u_int8_t pf = nfmsg->nfgen_family; - struct nfulnl_msg_config_cmd *cmd; - - cmd = nla_data(nfula[NFULA_CFG_CMD]); - + if (cmd != NULL) { switch (cmd->command) { case NFULNL_CFG_CMD_BIND: if (inst) { @@ -738,14 +748,6 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, instance_destroy(inst); goto out; - case NFULNL_CFG_CMD_PF_BIND: - ret = nf_log_register(pf, &nfulnl_logger); - break; - case NFULNL_CFG_CMD_PF_UNBIND: - /* This is a bug and a feature. We cannot unregister - * other handlers, like nfnetlink_inst can */ - nf_log_unregister_pf(pf); - break; default: ret = -ENOTSUPP; break; diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index a48b20fe9cd..012cb691082 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c @@ -224,7 +224,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, struct net_device *indev; struct net_device *outdev; - size = NLMSG_ALIGN(sizeof(struct nfgenmsg)) + size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ @@ -443,8 +443,8 @@ err_out: static int nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) { + struct sk_buff *nskb; int diff; - int err; diff = data_len - e->skb->len; if (diff < 0) { @@ -454,14 +454,16 @@ nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e) if (data_len > 0xFFFF) return -EINVAL; if (diff > skb_tailroom(e->skb)) { - err = pskb_expand_head(e->skb, 0, + nskb = skb_copy_expand(e->skb, 0, diff - skb_tailroom(e->skb), GFP_ATOMIC); - if (err) { + if (!nskb) { printk(KERN_WARNING "nf_queue: OOM " "in mangle, dropping packet\n"); - return err; + return -ENOMEM; } + kfree_skb(e->skb); + e->skb = nskb; } skb_put(e->skb, diff); } @@ -701,19 +703,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb, /* Commands without queue context - might sleep */ switch (cmd->command) { case NFQNL_CFG_CMD_PF_BIND: - ret = nf_register_queue_handler(ntohs(cmd->pf), - &nfqh); - break; + return nf_register_queue_handler(ntohs(cmd->pf), + &nfqh); case NFQNL_CFG_CMD_PF_UNBIND: - ret = nf_unregister_queue_handler(ntohs(cmd->pf), - &nfqh); - break; - default: - break; + return nf_unregister_queue_handler(ntohs(cmd->pf), + &nfqh); } - - if (ret < 0) - return ret; } rcu_read_lock(); diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c index 85330856a29..0c50b289405 100644 --- a/net/netfilter/xt_conntrack.c +++ b/net/netfilter/xt_conntrack.c @@ -122,7 +122,7 @@ conntrack_addrcmp(const union nf_inet_addr *kaddr, const union nf_inet_addr *umask, unsigned int l3proto) { if (l3proto == AF_INET) - return (kaddr->ip & umask->ip) == uaddr->ip; + return ((kaddr->ip ^ uaddr->ip) & umask->ip) == 0; else if (l3proto == AF_INET6) return ipv6_masked_addr_cmp(&kaddr->in6, &umask->in6, &uaddr->in6) == 0; @@ -231,7 +231,7 @@ conntrack_mt(const struct sk_buff *skb, const struct net_device *in, if (test_bit(IPS_DST_NAT_BIT, &ct->status)) statebit |= XT_CONNTRACK_STATE_DNAT; } - if ((info->state_mask & statebit) ^ + if (!!(info->state_mask & statebit) ^ !(info->invert_flags & XT_CONNTRACK_STATE)) return false; } diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 744c7f2ab0b..5418ce59ac3 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -774,9 +774,6 @@ hashlimit_mt_check(const char *tablename, const void *inf, return false; } mutex_unlock(&hlimit_mutex); - - /* Ugly hack: For SMP, we only want to use one set */ - info->master = info; return true; } diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c index 4f984dc6031..500528d60cd 100644 --- a/net/netfilter/xt_iprange.c +++ b/net/netfilter/xt_iprange.c @@ -102,7 +102,7 @@ iprange_ipv6_sub(const struct in6_addr *a, const struct in6_addr *b) int r; for (i = 0; i < 4; ++i) { - r = (__force u32)a->s6_addr32[i] - (__force u32)b->s6_addr32[i]; + r = ntohl(a->s6_addr32[i]) - ntohl(b->s6_addr32[i]); if (r != 0) return r; } diff --git a/net/netfilter/xt_time.c b/net/netfilter/xt_time.c index e9a8794bc3a..9fa2e082470 100644 --- a/net/netfilter/xt_time.c +++ b/net/netfilter/xt_time.c @@ -95,8 +95,11 @@ static inline void localtime_2(struct xtm *r, time_t time) */ r->dse = time / 86400; - /* 1970-01-01 (w=0) was a Thursday (4). */ - r->weekday = (4 + r->dse) % 7; + /* + * 1970-01-01 (w=0) was a Thursday (4). + * -1 and +1 map Sunday properly onto 7. + */ + r->weekday = (4 + r->dse - 1) % 7 + 1; } static void localtime_3(struct xtm *r, time_t time) diff --git a/net/netfilter/xt_u32.c b/net/netfilter/xt_u32.c index 9b8ed390a8e..627e0f336d5 100644 --- a/net/netfilter/xt_u32.c +++ b/net/netfilter/xt_u32.c @@ -26,7 +26,6 @@ static bool u32_match_it(const struct xt_u32 *data, u_int32_t pos; u_int32_t val; u_int32_t at; - int ret; /* * Small example: "0 >> 28 == 4 && 8 & 0xFF0000 >> 16 = 6, 17" @@ -40,8 +39,8 @@ static bool u32_match_it(const struct xt_u32 *data, if (skb->len < 4 || pos > skb->len - 4) return false; - ret = skb_copy_bits(skb, pos, &n, sizeof(n)); - BUG_ON(ret < 0); + if (skb_copy_bits(skb, pos, &n, sizeof(n)) < 0) + BUG(); val = ntohl(n); nnums = ct->nnums; @@ -67,9 +66,9 @@ static bool u32_match_it(const struct xt_u32 *data, pos > skb->len - at - 4) return false; - ret = skb_copy_bits(skb, at + pos, &n, - sizeof(n)); - BUG_ON(ret < 0); + if (skb_copy_bits(skb, at + pos, &n, + sizeof(n)) < 0) + BUG(); val = ntohl(n); break; } diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 1a47f5d1be1..140a0a8c6b0 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -232,7 +232,7 @@ static int rfkill_suspend(struct device *dev, pm_message_t state) struct rfkill *rfkill = to_rfkill(dev); if (dev->power.power_state.event != state.event) { - if (state.event == PM_EVENT_SUSPEND) { + if (state.event & PM_EVENT_SLEEP) { mutex_lock(&rfkill->mutex); if (rfkill->state == RFKILL_STATE_ON) diff --git a/net/rxrpc/ar-recvmsg.c b/net/rxrpc/ar-recvmsg.c index f19121d4795..a39bf97f883 100644 --- a/net/rxrpc/ar-recvmsg.c +++ b/net/rxrpc/ar-recvmsg.c @@ -143,7 +143,8 @@ int rxrpc_recvmsg(struct kiocb *iocb, struct socket *sock, /* copy the peer address and timestamp */ if (!continue_call) { if (msg->msg_name && msg->msg_namelen > 0) - memcpy(&msg->msg_name, &call->conn->trans->peer->srx, + memcpy(msg->msg_name, + &call->conn->trans->peer->srx, sizeof(call->conn->trans->peer->srx)); sock_recv_timestamp(msg, &rx->sk, skb); } diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 8bb79f28177..675a5c3e68a 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -838,11 +838,11 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, } /* Create a new key data based on the info passed in */ - key = sctp_auth_create_key(auth_key->sca_keylen, GFP_KERNEL); + key = sctp_auth_create_key(auth_key->sca_keylength, GFP_KERNEL); if (!key) goto nomem; - memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylen); + memcpy(key->data, &auth_key->sca_key[0], auth_key->sca_keylength); /* If we are replacing, remove the old keys data from the * key id. If we are adding new key id, add it to the diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index a27511ebc4c..ceefda025e2 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -209,6 +209,7 @@ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) { struct sctp_sockaddr_entry *addr, *temp; + int found = 0; /* We hold the socket lock when calling this function, * and that acts as a writer synchronizing lock. @@ -216,13 +217,14 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) list_for_each_entry_safe(addr, temp, &bp->address_list, list) { if (sctp_cmp_addr_exact(&addr->a, del_addr)) { /* Found the exact match. */ + found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } - if (addr && !addr->valid) { + if (found) { call_rcu(&addr->rcu, sctp_local_addr_free); SCTP_DBG_OBJCNT_DEC(addr); return 0; diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 4d7ec961ae1..9aa0733aee8 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -89,6 +89,7 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; + int found = 0; switch (ev) { case NETDEV_UP: @@ -111,13 +112,14 @@ static int sctp_inet6addr_event(struct notifier_block *this, unsigned long ev, &sctp_local_addr_list, list) { if (ipv6_addr_equal(&addr->a.v6.sin6_addr, &ifa->addr)) { + found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } spin_unlock_bh(&sctp_local_addr_lock); - if (addr && !addr->valid) + if (found) call_rcu(&addr->rcu, sctp_local_addr_free); break; } @@ -966,7 +968,7 @@ static struct inet6_protocol sctpv6_protocol = { .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, }; -static struct sctp_af sctp_ipv6_specific = { +static struct sctp_af sctp_af_inet6 = { .sa_family = AF_INET6, .sctp_xmit = sctp_v6_xmit, .setsockopt = ipv6_setsockopt, @@ -998,7 +1000,7 @@ static struct sctp_af sctp_ipv6_specific = { #endif }; -static struct sctp_pf sctp_pf_inet6_specific = { +static struct sctp_pf sctp_pf_inet6 = { .event_msgname = sctp_inet6_event_msgname, .skb_msgname = sctp_inet6_skb_msgname, .af_supported = sctp_inet6_af_supported, @@ -1008,7 +1010,7 @@ static struct sctp_pf sctp_pf_inet6_specific = { .supported_addrs = sctp_inet6_supported_addrs, .create_accept_sk = sctp_v6_create_accept_sk, .addr_v4map = sctp_v6_addr_v4map, - .af = &sctp_ipv6_specific, + .af = &sctp_af_inet6, }; /* Initialize IPv6 support and register with socket layer. */ @@ -1017,10 +1019,10 @@ int sctp_v6_init(void) int rc; /* Register the SCTP specific PF_INET6 functions. */ - sctp_register_pf(&sctp_pf_inet6_specific, PF_INET6); + sctp_register_pf(&sctp_pf_inet6, PF_INET6); /* Register the SCTP specific AF_INET6 functions. */ - sctp_register_af(&sctp_ipv6_specific); + sctp_register_af(&sctp_af_inet6); rc = proto_register(&sctpv6_prot, 1); if (rc) @@ -1051,7 +1053,7 @@ void sctp_v6_exit(void) inet6_unregister_protosw(&sctpv6_seqpacket_protosw); inet6_unregister_protosw(&sctpv6_stream_protosw); proto_unregister(&sctpv6_prot); - list_del(&sctp_ipv6_specific.list); + list_del(&sctp_af_inet6.list); } /* Unregister with inet6 layer. */ diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c index 14e294e3762..cfeb07ea1b0 100644 --- a/net/sctp/objcnt.c +++ b/net/sctp/objcnt.c @@ -132,12 +132,11 @@ void sctp_dbg_objcnt_init(void) { struct proc_dir_entry *ent; - ent = create_proc_entry("sctp_dbg_objcnt", 0, proc_net_sctp); + ent = proc_create("sctp_dbg_objcnt", 0, + proc_net_sctp, &sctp_objcnt_ops); if (!ent) printk(KERN_WARNING "sctp_dbg_objcnt: Unable to create /proc entry.\n"); - else - ent->proc_fops = &sctp_objcnt_ops; } /* Cleanup the objcount entry in the proc filesystem. */ diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 69bb5a63fd8..973f1dbc2ec 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -108,12 +108,10 @@ int __init sctp_snmp_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("snmp", S_IRUGO, proc_net_sctp); + p = proc_create("snmp", S_IRUGO, proc_net_sctp, &sctp_snmp_seq_fops); if (!p) return -ENOMEM; - p->proc_fops = &sctp_snmp_seq_fops; - return 0; } @@ -258,12 +256,10 @@ int __init sctp_eps_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("eps", S_IRUGO, proc_net_sctp); + p = proc_create("eps", S_IRUGO, proc_net_sctp, &sctp_eps_seq_fops); if (!p) return -ENOMEM; - p->proc_fops = &sctp_eps_seq_fops; - return 0; } @@ -369,12 +365,11 @@ int __init sctp_assocs_proc_init(void) { struct proc_dir_entry *p; - p = create_proc_entry("assocs", S_IRUGO, proc_net_sctp); + p = proc_create("assocs", S_IRUGO, proc_net_sctp, + &sctp_assocs_seq_fops); if (!p) return -ENOMEM; - p->proc_fops = &sctp_assocs_seq_fops; - return 0; } diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 22a16571499..ad0a4069b95 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -628,6 +628,7 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; struct sctp_sockaddr_entry *addr = NULL; struct sctp_sockaddr_entry *temp; + int found = 0; switch (ev) { case NETDEV_UP: @@ -647,13 +648,14 @@ static int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, list_for_each_entry_safe(addr, temp, &sctp_local_addr_list, list) { if (addr->a.v4.sin_addr.s_addr == ifa->ifa_local) { + found = 1; addr->valid = 0; list_del_rcu(&addr->list); break; } } spin_unlock_bh(&sctp_local_addr_lock); - if (addr && !addr->valid) + if (found) call_rcu(&addr->rcu, sctp_local_addr_free); break; } @@ -832,7 +834,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, return ip_queue_xmit(skb, ipfragok); } -static struct sctp_af sctp_ipv4_specific; +static struct sctp_af sctp_af_inet; static struct sctp_pf sctp_pf_inet = { .event_msgname = sctp_inet_event_msgname, @@ -844,7 +846,7 @@ static struct sctp_pf sctp_pf_inet = { .supported_addrs = sctp_inet_supported_addrs, .create_accept_sk = sctp_v4_create_accept_sk, .addr_v4map = sctp_v4_addr_v4map, - .af = &sctp_ipv4_specific, + .af = &sctp_af_inet }; /* Notifier for inetaddr addition/deletion events. */ @@ -906,7 +908,7 @@ static struct net_protocol sctp_protocol = { }; /* IPv4 address related functions. */ -static struct sctp_af sctp_ipv4_specific = { +static struct sctp_af sctp_af_inet = { .sa_family = AF_INET, .sctp_xmit = sctp_v4_xmit, .setsockopt = ip_setsockopt, @@ -1192,7 +1194,7 @@ SCTP_STATIC __init int sctp_init(void) sctp_sysctl_register(); INIT_LIST_HEAD(&sctp_address_families); - sctp_register_af(&sctp_ipv4_specific); + sctp_register_af(&sctp_af_inet); status = proto_register(&sctp_prot, 1); if (status) @@ -1249,7 +1251,7 @@ err_v6_init: proto_unregister(&sctp_prot); err_proto_register: sctp_sysctl_unregister(); - list_del(&sctp_ipv4_specific.list); + list_del(&sctp_af_inet.list); free_pages((unsigned long)sctp_port_hashtable, get_order(sctp_port_hashsize * sizeof(struct sctp_bind_hashbucket))); @@ -1299,7 +1301,7 @@ SCTP_STATIC __exit void sctp_exit(void) inet_unregister_protosw(&sctp_seqpacket_protosw); sctp_sysctl_unregister(); - list_del(&sctp_ipv4_specific.list); + list_del(&sctp_af_inet.list); free_pages((unsigned long)sctp_assoc_hashtable, get_order(sctp_assoc_hashsize * diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index e45be4e3f80..578630e8e00 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -2375,6 +2375,14 @@ static int sctp_process_param(struct sctp_association *asoc, asoc->peer.ipv4_address = 0; asoc->peer.ipv6_address = 0; + /* Assume that peer supports the address family + * by which it sends a packet. + */ + if (peer_addr->sa.sa_family == AF_INET6) + asoc->peer.ipv6_address = 1; + else if (peer_addr->sa.sa_family == AF_INET) + asoc->peer.ipv4_address = 1; + /* Cycle through address types; avoid divide by 0. */ sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); if (sat) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d47d5787e2e..d994d822900 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1964,7 +1964,7 @@ static int sctp_setsockopt_disable_fragments(struct sock *sk, static int sctp_setsockopt_events(struct sock *sk, char __user *optval, int optlen) { - if (optlen != sizeof(struct sctp_event_subscribe)) + if (optlen > sizeof(struct sctp_event_subscribe)) return -EINVAL; if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) return -EFAULT; @@ -2933,17 +2933,39 @@ static int sctp_setsockopt_maxburst(struct sock *sk, char __user *optval, int optlen) { + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; int val; + int assoc_id = 0; - if (optlen != sizeof(int)) + if (optlen < sizeof(int)) return -EINVAL; - if (get_user(val, (int __user *)optval)) - return -EFAULT; - if (val < 0) + if (optlen == sizeof(int)) { + printk(KERN_WARNING + "SCTP: Use of int in max_burst socket option deprecated\n"); + printk(KERN_WARNING + "SCTP: Use struct sctp_assoc_value instead\n"); + if (copy_from_user(&val, optval, optlen)) + return -EFAULT; + } else if (optlen == sizeof(struct sctp_assoc_value)) { + if (copy_from_user(¶ms, optval, optlen)) + return -EFAULT; + val = params.assoc_value; + assoc_id = params.assoc_id; + } else return -EINVAL; - sctp_sk(sk)->max_burst = val; + sp = sctp_sk(sk); + + if (assoc_id != 0) { + asoc = sctp_id2assoc(sk, assoc_id); + if (!asoc) + return -EINVAL; + asoc->max_burst = val; + } else + sp->max_burst = val; return 0; } @@ -5005,20 +5027,45 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len, char __user *optval, int __user *optlen) { - int val; + struct sctp_assoc_value params; + struct sctp_sock *sp; + struct sctp_association *asoc; if (len < sizeof(int)) return -EINVAL; - len = sizeof(int); + if (len == sizeof(int)) { + printk(KERN_WARNING + "SCTP: Use of int in max_burst socket option deprecated\n"); + printk(KERN_WARNING + "SCTP: Use struct sctp_assoc_value instead\n"); + params.assoc_id = 0; + } else if (len == sizeof (struct sctp_assoc_value)) { + if (copy_from_user(¶ms, optval, len)) + return -EFAULT; + } else + return -EINVAL; - val = sctp_sk(sk)->max_burst; - if (put_user(len, optlen)) - return -EFAULT; - if (copy_to_user(optval, &val, len)) - return -EFAULT; + sp = sctp_sk(sk); + + if (params.assoc_id != 0) { + asoc = sctp_id2assoc(sk, params.assoc_id); + if (!asoc) + return -EINVAL; + params.assoc_value = asoc->max_burst; + } else + params.assoc_value = sp->max_burst; + + if (len == sizeof(int)) { + if (copy_to_user(optval, ¶ms.assoc_value, len)) + return -EFAULT; + } else { + if (copy_to_user(optval, ¶ms, len)) + return -EFAULT; + } + + return 0; - return -ENOTSUPP; } static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, @@ -5070,6 +5117,7 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; + u32 num_chunks; char __user *to; if (len <= sizeof(struct sctp_authchunks)) @@ -5086,12 +5134,15 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len, ch = asoc->peer.peer_chunks; /* See if the user provided enough room for all the data */ - if (len < ntohs(ch->param_hdr.length)) + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); + if (len < num_chunks) return -EINVAL; - len = ntohs(ch->param_hdr.length); + len = num_chunks; if (put_user(len, optlen)) return -EFAULT; + if (put_user(num_chunks, &p->gauth_number_of_chunks)) + return -EFAULT; if (copy_to_user(to, ch->chunks, len)) return -EFAULT; @@ -5105,6 +5156,7 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, struct sctp_authchunks val; struct sctp_association *asoc; struct sctp_chunks_param *ch; + u32 num_chunks; char __user *to; if (len <= sizeof(struct sctp_authchunks)) @@ -5123,12 +5175,15 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len, else ch = sctp_sk(sk)->ep->auth_chunk_list; - if (len < ntohs(ch->param_hdr.length)) + num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t); + if (len < num_chunks) return -EINVAL; - len = ntohs(ch->param_hdr.length); + len = num_chunks; if (put_user(len, optlen)) return -EFAULT; + if (put_user(num_chunks, &p->gauth_number_of_chunks)) + return -EFAULT; if (copy_to_user(to, ch->chunks, len)) return -EFAULT; @@ -6488,6 +6543,7 @@ struct proto sctp_prot = { .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, + .sockets_allocated = &sctp_sockets_allocated, REF_PROTO_INUSE(sctp) }; @@ -6521,6 +6577,7 @@ struct proto sctpv6_prot = { .memory_pressure = &sctp_memory_pressure, .enter_memory_pressure = sctp_enter_memory_pressure, .memory_allocated = &sctp_memory_allocated, + .sockets_allocated = &sctp_sockets_allocated, REF_PROTO_INUSE(sctpv6) }; #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index e27b11f18b7..b43f1f110f8 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -206,7 +206,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( * This field is the total length of the notification data, including * the notification header. */ - sac->sac_length = sizeof(struct sctp_assoc_change); + sac->sac_length = skb->len; /* Socket Extensions for SCTP * 5.3.1.1 SCTP_ASSOC_CHANGE diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 636c8e04e0b..b5f2786251b 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -316,31 +316,29 @@ static int create_cache_proc_entries(struct cache_detail *cd) cd->proc_ent->owner = cd->owner; cd->channel_ent = cd->content_ent = NULL; - p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, cd->proc_ent); + p = proc_create("flush", S_IFREG|S_IRUSR|S_IWUSR, + cd->proc_ent, &cache_flush_operations); cd->flush_ent = p; if (p == NULL) goto out_nomem; - p->proc_fops = &cache_flush_operations; p->owner = cd->owner; p->data = cd; if (cd->cache_request || cd->cache_parse) { - p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, - cd->proc_ent); + p = proc_create("channel", S_IFREG|S_IRUSR|S_IWUSR, + cd->proc_ent, &cache_file_operations); cd->channel_ent = p; if (p == NULL) goto out_nomem; - p->proc_fops = &cache_file_operations; p->owner = cd->owner; p->data = cd; } if (cd->cache_show) { - p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, - cd->proc_ent); + p = proc_create("content", S_IFREG|S_IRUSR|S_IWUSR, + cd->proc_ent, &content_file_operations); cd->content_ent = p; if (p == NULL) goto out_nomem; - p->proc_fops = &content_file_operations; p->owner = cd->owner; p->data = cd; } diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index 5a16875f5ac..c6061a4346c 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -229,9 +229,8 @@ do_register(const char *name, void *data, const struct file_operations *fops) rpc_proc_init(); dprintk("RPC: registering /proc/net/rpc/%s\n", name); - ent = create_proc_entry(name, 0, proc_net_rpc); + ent = proc_create(name, 0, proc_net_rpc, fops); if (ent) { - ent->proc_fops = fops; ent->data = data; } return ent; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 1d3e5fcc2cc..c475977de05 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -175,7 +175,7 @@ static int svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) size_t base = xdr->page_base; unsigned int pglen = xdr->page_len; unsigned int flags = MSG_MORE; - char buf[RPC_MAX_ADDRBUFLEN]; + RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); slen = xdr->len; @@ -716,7 +716,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) struct socket *newsock; struct svc_sock *newsvsk; int err, slen; - char buf[RPC_MAX_ADDRBUFLEN]; + RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); if (!sock) @@ -1206,10 +1206,10 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv, struct socket *sock; int error; int type; - char buf[RPC_MAX_ADDRBUFLEN]; struct sockaddr_storage addr; struct sockaddr *newsin = (struct sockaddr *)&addr; int newlen; + RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]); dprintk("svc: svc_create_socket(%s, %d, %s)\n", serv->sv_program->pg_name, protocol, diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 0598b229c11..981f190c1b3 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c @@ -156,7 +156,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp, struct svc_rdma_op_ctxt *ctxt; int ret = 0; - BUG_ON(sge_count >= 32); + BUG_ON(sge_count > RPCSVC_MAXPAGES); dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, " "write_len=%d, xdr_sge=%p, sge_count=%d\n", rmr, (unsigned long long)to, xdr_off, diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index f09444c451b..16fd3f6718f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -54,7 +54,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, int flags); static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt); static void svc_rdma_release_rqst(struct svc_rqst *); -static void rdma_destroy_xprt(struct svcxprt_rdma *xprt); static void dto_tasklet_func(unsigned long data); static void svc_rdma_detach(struct svc_xprt *xprt); static void svc_rdma_free(struct svc_xprt *xprt); @@ -247,6 +246,7 @@ static void dto_tasklet_func(unsigned long data) sq_cq_reap(xprt); } + svc_xprt_put(&xprt->sc_xprt); spin_lock_irqsave(&dto_lock, flags); } spin_unlock_irqrestore(&dto_lock, flags); @@ -275,8 +275,10 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context) * add it */ spin_lock_irqsave(&dto_lock, flags); - if (list_empty(&xprt->sc_dto_q)) + if (list_empty(&xprt->sc_dto_q)) { + svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); + } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ @@ -386,8 +388,10 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context) * add it */ spin_lock_irqsave(&dto_lock, flags); - if (list_empty(&xprt->sc_dto_q)) + if (list_empty(&xprt->sc_dto_q)) { + svc_xprt_get(&xprt->sc_xprt); list_add_tail(&xprt->sc_dto_q, &dto_xprt_q); + } spin_unlock_irqrestore(&dto_lock, flags); /* Tasklet does all the work to avoid irqsave locks. */ @@ -611,6 +615,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id, switch (event->event) { case RDMA_CM_EVENT_ESTABLISHED: /* Accept complete */ + svc_xprt_get(xprt); dprintk("svcrdma: Connection completed on DTO xprt=%p, " "cm_id=%p\n", xprt, cma_id); clear_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags); @@ -661,15 +666,15 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); if (IS_ERR(listen_id)) { - rdma_destroy_xprt(cma_xprt); + svc_xprt_put(&cma_xprt->sc_xprt); dprintk("svcrdma: rdma_create_id failed = %ld\n", PTR_ERR(listen_id)); return (void *)listen_id; } ret = rdma_bind_addr(listen_id, sa); if (ret) { - rdma_destroy_xprt(cma_xprt); rdma_destroy_id(listen_id); + svc_xprt_put(&cma_xprt->sc_xprt); dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret); return ERR_PTR(ret); } @@ -678,8 +683,9 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG); if (ret) { rdma_destroy_id(listen_id); - rdma_destroy_xprt(cma_xprt); + svc_xprt_put(&cma_xprt->sc_xprt); dprintk("svcrdma: rdma_listen failed = %d\n", ret); + return ERR_PTR(ret); } /* @@ -820,6 +826,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) newxprt->sc_sq_depth = qp_attr.cap.max_send_wr; newxprt->sc_max_requests = qp_attr.cap.max_recv_wr; } + svc_xprt_get(&newxprt->sc_xprt); newxprt->sc_qp = newxprt->sc_cm_id->qp; /* Register all of physical memory */ @@ -891,8 +898,15 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) errout: dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret); + /* Take a reference in case the DTO handler runs */ + svc_xprt_get(&newxprt->sc_xprt); + if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) { + ib_destroy_qp(newxprt->sc_qp); + svc_xprt_put(&newxprt->sc_xprt); + } rdma_destroy_id(newxprt->sc_cm_id); - rdma_destroy_xprt(newxprt); + /* This call to put will destroy the transport */ + svc_xprt_put(&newxprt->sc_xprt); return NULL; } @@ -919,54 +933,60 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp) rqstp->rq_xprt_ctxt = NULL; } -/* Disable data ready events for this connection */ +/* + * When connected, an svc_xprt has at least three references: + * + * - A reference held by the QP. We still hold that here because this + * code deletes the QP and puts the reference. + * + * - A reference held by the cm_id between the ESTABLISHED and + * DISCONNECTED events. If the remote peer disconnected first, this + * reference could be gone. + * + * - A reference held by the svc_recv code that called this function + * as part of close processing. + * + * At a minimum two references should still be held. + */ static void svc_rdma_detach(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt); - unsigned long flags; - dprintk("svc: svc_rdma_detach(%p)\n", xprt); - /* - * Shutdown the connection. This will ensure we don't get any - * more events from the provider. - */ + + /* Disconnect and flush posted WQE */ rdma_disconnect(rdma->sc_cm_id); - rdma_destroy_id(rdma->sc_cm_id); - /* We may already be on the DTO list */ - spin_lock_irqsave(&dto_lock, flags); - if (!list_empty(&rdma->sc_dto_q)) - list_del_init(&rdma->sc_dto_q); - spin_unlock_irqrestore(&dto_lock, flags); + /* Destroy the QP if present (not a listener) */ + if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) { + ib_destroy_qp(rdma->sc_qp); + svc_xprt_put(xprt); + } + + /* Destroy the CM ID */ + rdma_destroy_id(rdma->sc_cm_id); } static void svc_rdma_free(struct svc_xprt *xprt) { struct svcxprt_rdma *rdma = (struct svcxprt_rdma *)xprt; dprintk("svcrdma: svc_rdma_free(%p)\n", rdma); - rdma_destroy_xprt(rdma); - kfree(rdma); -} - -static void rdma_destroy_xprt(struct svcxprt_rdma *xprt) -{ - if (xprt->sc_qp && !IS_ERR(xprt->sc_qp)) - ib_destroy_qp(xprt->sc_qp); - - if (xprt->sc_sq_cq && !IS_ERR(xprt->sc_sq_cq)) - ib_destroy_cq(xprt->sc_sq_cq); + /* We should only be called from kref_put */ + BUG_ON(atomic_read(&xprt->xpt_ref.refcount) != 0); + if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq)) + ib_destroy_cq(rdma->sc_sq_cq); - if (xprt->sc_rq_cq && !IS_ERR(xprt->sc_rq_cq)) - ib_destroy_cq(xprt->sc_rq_cq); + if (rdma->sc_rq_cq && !IS_ERR(rdma->sc_rq_cq)) + ib_destroy_cq(rdma->sc_rq_cq); - if (xprt->sc_phys_mr && !IS_ERR(xprt->sc_phys_mr)) - ib_dereg_mr(xprt->sc_phys_mr); + if (rdma->sc_phys_mr && !IS_ERR(rdma->sc_phys_mr)) + ib_dereg_mr(rdma->sc_phys_mr); - if (xprt->sc_pd && !IS_ERR(xprt->sc_pd)) - ib_dealloc_pd(xprt->sc_pd); + if (rdma->sc_pd && !IS_ERR(rdma->sc_pd)) + ib_dealloc_pd(rdma->sc_pd); - destroy_context_cache(xprt->sc_ctxt_head); + destroy_context_cache(rdma->sc_ctxt_head); + kfree(rdma); } static int svc_rdma_has_wspace(struct svc_xprt *xprt) diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 02c522c17de..a564c1a39ec 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -614,7 +614,11 @@ xprt_rdma_free(void *buffer) return; req = container_of(buffer, struct rpcrdma_req, rl_xdr_buf[0]); - r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); + if (req->rl_iov.length == 0) { /* see allocate above */ + r_xprt = container_of(((struct rpcrdma_req *) req->rl_buffer)->rl_buffer, + struct rpcrdma_xprt, rx_buf); + } else + r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf); rep = req->rl_reply; dprintk("RPC: %s: called on 0x%p%s\n", diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c index 95b373913aa..4bb3404f610 100644 --- a/net/tipc/cluster.c +++ b/net/tipc/cluster.c @@ -142,7 +142,7 @@ void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr) max_n_num = tipc_highest_allowed_slave; assert(n_num > 0); assert(n_num <= max_n_num); - assert(c_ptr->nodes[n_num] == 0); + assert(c_ptr->nodes[n_num] == NULL); c_ptr->nodes[n_num] = n_ptr; if (n_num > c_ptr->highest_node) c_ptr->highest_node = n_num; diff --git a/net/tipc/link.c b/net/tipc/link.c index 1b17fecee74..cefa99824c5 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -3251,7 +3251,7 @@ static void link_print(struct link *l_ptr, struct print_buf *buf, if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - msg_seqno(buf_msg(l_ptr->first_out))) != (l_ptr->out_queue_size - 1)) - || (l_ptr->last_out->next != 0)) { + || (l_ptr->last_out->next != NULL)) { tipc_printf(buf, "\nSend queue inconsistency\n"); tipc_printf(buf, "first_out= %x ", l_ptr->first_out); tipc_printf(buf, "next_out= %x ", l_ptr->next_out); diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 6704a58c785..c38744c96ed 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -148,7 +148,7 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock) reference = (next_plus_upper & ~index_mask) + index; entry->data.reference = reference; entry->object = object; - if (lock != 0) + if (lock != NULL) *lock = &entry->lock; spin_unlock_bh(&entry->lock); } diff --git a/net/tipc/zone.c b/net/tipc/zone.c index 114e173f11a..3506f856344 100644 --- a/net/tipc/zone.c +++ b/net/tipc/zone.c @@ -82,7 +82,7 @@ void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr) assert(c_ptr->addr); assert(c_num <= tipc_max_clusters); - assert(z_ptr->clusters[c_num] == 0); + assert(z_ptr->clusters[c_num] == NULL); z_ptr->clusters[c_num] = c_ptr; } diff --git a/net/wanrouter/wanproc.c b/net/wanrouter/wanproc.c index f2e54c3f064..5bebe40bf4e 100644 --- a/net/wanrouter/wanproc.c +++ b/net/wanrouter/wanproc.c @@ -292,14 +292,12 @@ int __init wanrouter_proc_init(void) if (!proc_router) goto fail; - p = create_proc_entry("config", S_IRUGO, proc_router); + p = proc_create("config", S_IRUGO, proc_router, &config_fops); if (!p) goto fail_config; - p->proc_fops = &config_fops; - p = create_proc_entry("status", S_IRUGO, proc_router); + p = proc_create("status", S_IRUGO, proc_router, &status_fops); if (!p) goto fail_stat; - p->proc_fops = &status_fops; return 0; fail_stat: remove_proc_entry("config", proc_router); @@ -329,10 +327,10 @@ int wanrouter_proc_add(struct wan_device* wandev) if (wandev->magic != ROUTER_MAGIC) return -EINVAL; - wandev->dent = create_proc_entry(wandev->name, S_IRUGO, proc_router); + wandev->dent = proc_create(wandev->name, S_IRUGO, + proc_router, &wandev_fops); if (!wandev->dent) return -ENOMEM; - wandev->dent->proc_fops = &wandev_fops; wandev->dent->data = wandev; return 0; } diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c index 3f52b09bed0..1afa44d25be 100644 --- a/net/x25/x25_proc.c +++ b/net/x25/x25_proc.c @@ -312,20 +312,18 @@ int __init x25_proc_init(void) if (!x25_proc_dir) goto out; - p = create_proc_entry("route", S_IRUGO, x25_proc_dir); + p = proc_create("route", S_IRUGO, x25_proc_dir, &x25_seq_route_fops); if (!p) goto out_route; - p->proc_fops = &x25_seq_route_fops; - p = create_proc_entry("socket", S_IRUGO, x25_proc_dir); + p = proc_create("socket", S_IRUGO, x25_proc_dir, &x25_seq_socket_fops); if (!p) goto out_socket; - p->proc_fops = &x25_seq_socket_fops; - p = create_proc_entry("forward", S_IRUGO, x25_proc_dir); + p = proc_create("forward", S_IRUGO, x25_proc_dir, + &x25_seq_forward_fops); if (!p) goto out_forward; - p->proc_fops = &x25_seq_forward_fops; rc = 0; out: |