aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/8021q/vlan.c27
-rw-r--r--net/8021q/vlan_dev.c107
-rw-r--r--net/8021q/vlan_netlink.c20
-rw-r--r--net/8021q/vlanproc.c2
-rw-r--r--net/9p/client.c21
-rw-r--r--net/9p/error.c2
-rw-r--r--net/9p/trans_fd.c8
-rw-r--r--net/9p/trans_rdma.c9
-rw-r--r--net/9p/trans_virtio.c11
-rw-r--r--net/Kconfig20
-rw-r--r--net/Makefile1
-rw-r--r--net/appletalk/aarp.c16
-rw-r--r--net/appletalk/ddp.c71
-rw-r--r--net/atm/br2684.c44
-rw-r--r--net/atm/clip.c17
-rw-r--r--net/atm/lec.c23
-rw-r--r--net/atm/mpc.c8
-rw-r--r--net/atm/proc.c9
-rw-r--r--net/bluetooth/Kconfig1
-rw-r--r--net/bluetooth/af_bluetooth.c4
-rw-r--r--net/bluetooth/bnep/core.c5
-rw-r--r--net/bluetooth/bnep/netdev.c9
-rw-r--r--net/bluetooth/hci_conn.c17
-rw-r--r--net/bluetooth/hci_core.c2
-rw-r--r--net/bluetooth/hci_event.c2
-rw-r--r--net/bluetooth/hci_sysfs.c4
-rw-r--r--net/bluetooth/hidp/core.c67
-rw-r--r--net/bluetooth/hidp/hidp.h2
-rw-r--r--net/bluetooth/l2cap.c1357
-rw-r--r--net/bluetooth/rfcomm/core.c69
-rw-r--r--net/bluetooth/sco.c49
-rw-r--r--net/bridge/br_device.c4
-rw-r--r--net/bridge/br_forward.c3
-rw-r--r--net/bridge/br_if.c7
-rw-r--r--net/bridge/br_netfilter.c98
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_sysfs_if.c17
-rw-r--r--net/bridge/netfilter/ebt_log.c29
-rw-r--r--net/bridge/netfilter/ebt_ulog.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c8
-rw-r--r--net/bridge/netfilter/ebtable_nat.c6
-rw-r--r--net/bridge/netfilter/ebtables.c13
-rw-r--r--net/can/af_can.c24
-rw-r--r--net/can/bcm.c85
-rw-r--r--net/can/proc.c281
-rw-r--r--net/can/raw.c1
-rw-r--r--net/compat.c17
-rw-r--r--net/core/datagram.c3
-rw-r--r--net/core/dev.c662
-rw-r--r--net/core/drop_monitor.c14
-rw-r--r--net/core/ethtool.c32
-rw-r--r--net/core/gen_estimator.c12
-rw-r--r--net/core/gen_stats.c11
-rw-r--r--net/core/neighbour.c95
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c35
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/pktgen.c700
-rw-r--r--net/core/rtnetlink.c37
-rw-r--r--net/core/skbuff.c3
-rw-r--r--net/core/sock.c22
-rw-r--r--net/dcb/dcbnl.c130
-rw-r--r--net/dccp/ccids/Kconfig6
-rw-r--r--net/dccp/ccids/ccid2.c2
-rw-r--r--net/dccp/ccids/ccid2.h8
-rw-r--r--net/dccp/ccids/ccid3.c9
-rw-r--r--net/dccp/ccids/ccid3.h50
-rw-r--r--net/dccp/ccids/lib/loss_interval.c7
-rw-r--r--net/dccp/ccids/lib/loss_interval.h2
-rw-r--r--net/dccp/ccids/lib/packet_history.c4
-rw-r--r--net/dccp/ccids/lib/packet_history.h1
-rw-r--r--net/dccp/ccids/lib/tfrc.h4
-rw-r--r--net/dccp/ccids/lib/tfrc_equation.c26
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dccp/ipv4.c4
-rw-r--r--net/dccp/ipv6.c12
-rw-r--r--net/dccp/proto.c11
-rw-r--r--net/decnet/dn_neigh.c6
-rw-r--r--net/decnet/dn_route.c4
-rw-r--r--net/dsa/dsa_priv.h6
-rw-r--r--net/dsa/tag_dsa.c2
-rw-r--r--net/dsa/tag_edsa.c2
-rw-r--r--net/dsa/tag_trailer.c2
-rw-r--r--net/econet/af_econet.c5
-rw-r--r--net/ieee802154/Makefile2
-rw-r--r--net/ieee802154/af_ieee802154.c10
-rw-r--r--net/ieee802154/dgram.c88
-rw-r--r--net/ieee802154/netlink.c176
-rw-r--r--net/ieee802154/nl_policy.c4
-rw-r--r--net/ieee802154/raw.c19
-rw-r--r--net/ieee802154/wpan-class.c159
-rw-r--r--net/ipv4/af_inet.c142
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c8
-rw-r--r--net/ipv4/devinet.c6
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_trie.c101
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c22
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_gre.c12
-rw-r--r--net/ipv4/ip_input.c2
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ipcomp.c2
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c10
-rw-r--r--net/ipv4/netfilter/arp_tables.c47
-rw-r--r--net/ipv4/netfilter/arptable_filter.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c51
-rw-r--r--net/ipv4/netfilter/iptable_filter.c10
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c16
-rw-r--r--net/ipv4/netfilter/iptable_raw.c10
-rw-r--r--net/ipv4/netfilter/iptable_security.c12
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c22
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c6
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c8
-rw-r--r--net/ipv4/protocol.c25
-rw-r--r--net/ipv4/raw.c9
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/syncookies.c5
-rw-r--r--net/ipv4/tcp.c22
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/ipv4/tcp_input.c7
-rw-r--r--net/ipv4/tcp_ipv4.c70
-rw-r--r--net/ipv4/tcp_minisocks.c29
-rw-r--r--net/ipv4/tcp_output.c63
-rw-r--r--net/ipv4/tcp_timer.c16
-rw-r--r--net/ipv4/tunnel4.c4
-rw-r--r--net/ipv4/udp.c156
-rw-r--r--net/ipv4/udplite.c2
-rw-r--r--net/ipv4/xfrm4_policy.c37
-rw-r--r--net/ipv6/addrconf.c35
-rw-r--r--net/ipv6/af_inet6.c34
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/esp6.c2
-rw-r--r--net/ipv6/exthdrs.c6
-rw-r--r--net/ipv6/icmp.c21
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_input.c8
-rw-r--r--net/ipv6/ip6_output.c44
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/ip6mr.c17
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c5
-rw-r--r--net/ipv6/mcast.c20
-rw-r--r--net/ipv6/ndisc.c24
-rw-r--r--net/ipv6/netfilter/ip6_tables.c48
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c9
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c10
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c16
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c10
-rw-r--r--net/ipv6/netfilter/ip6table_security.c12
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c18
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/protocol.c21
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/reassembly.c2
-rw-r--r--net/ipv6/route.c36
-rw-r--r--net/ipv6/sit.c11
-rw-r--r--net/ipv6/syncookies.c5
-rw-r--r--net/ipv6/sysctl_net_ipv6.c4
-rw-r--r--net/ipv6/tcp_ipv6.c28
-rw-r--r--net/ipv6/tunnel6.c4
-rw-r--r--net/ipv6/udp.c160
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_policy.c38
-rw-r--r--net/irda/af_irda.c1
-rw-r--r--net/irda/ircomm/ircomm_event.c4
-rw-r--r--net/irda/ircomm/ircomm_tty_attach.c4
-rw-r--r--net/irda/iriap.c4
-rw-r--r--net/irda/irlan/irlan_common.c4
-rw-r--r--net/irda/irlan/irlan_eth.c10
-rw-r--r--net/irda/irlap.c2
-rw-r--r--net/irda/irlap_event.c4
-rw-r--r--net/irda/irlmp_event.c6
-rw-r--r--net/irda/irnet/irnet_ppp.h2
-rw-r--r--net/irda/irnetlink.c2
-rw-r--r--net/irda/irproc.c14
-rw-r--r--net/iucv/af_iucv.c33
-rw-r--r--net/iucv/iucv.c38
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/lapb/lapb_iface.c2
-rw-r--r--net/llc/af_llc.c1
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/mac80211/Kconfig42
-rw-r--r--net/mac80211/Makefile4
-rw-r--r--net/mac80211/agg-tx.c11
-rw-r--r--net/mac80211/cfg.c234
-rw-r--r--net/mac80211/debugfs.c2
-rw-r--r--net/mac80211/debugfs_netdev.c52
-rw-r--r--net/mac80211/debugfs_sta.c98
-rw-r--r--net/mac80211/driver-ops.h119
-rw-r--r--net/mac80211/driver-trace.c9
-rw-r--r--net/mac80211/driver-trace.h672
-rw-r--r--net/mac80211/event.c23
-rw-r--r--net/mac80211/ibss.c23
-rw-r--r--net/mac80211/ieee80211_i.h260
-rw-r--r--net/mac80211/iface.c170
-rw-r--r--net/mac80211/key.c28
-rw-r--r--net/mac80211/main.c200
-rw-r--r--net/mac80211/mesh.c207
-rw-r--r--net/mac80211/mesh.h35
-rw-r--r--net/mac80211/mesh_hwmp.c47
-rw-r--r--net/mac80211/mesh_pathtbl.c177
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c1988
-rw-r--r--net/mac80211/pm.c19
-rw-r--r--net/mac80211/rate.c31
-rw-r--r--net/mac80211/rc80211_minstrel.c25
-rw-r--r--net/mac80211/rc80211_minstrel.h1
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c4
-rw-r--r--net/mac80211/rc80211_pid_algo.c28
-rw-r--r--net/mac80211/rc80211_pid_debugfs.c2
-rw-r--r--net/mac80211/rx.c259
-rw-r--r--net/mac80211/scan.c375
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h30
-rw-r--r--net/mac80211/tx.c580
-rw-r--r--net/mac80211/util.c130
-rw-r--r--net/mac80211/wep.c52
-rw-r--r--net/mac80211/wep.h7
-rw-r--r--net/mac80211/wext.c633
-rw-r--r--net/mac80211/wme.c6
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/wpa.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c56
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c67
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c17
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_ah_esp.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c41
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c53
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c14
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c43
-rw-r--r--net/netfilter/nf_conntrack_core.c12
-rw-r--r--net/netfilter/nf_conntrack_netlink.c54
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue.c9
-rw-r--r--net/netfilter/x_tables.c9
-rw-r--r--net/netfilter/xt_CONNMARK.c134
-rw-r--r--net/netfilter/xt_DSCP.c46
-rw-r--r--net/netfilter/xt_MARK.c163
-rw-r--r--net/netfilter/xt_RATEEST.c2
-rw-r--r--net/netfilter/xt_connmark.c101
-rw-r--r--net/netfilter/xt_conntrack.c155
-rw-r--r--net/netfilter/xt_dscp.c17
-rw-r--r--net/netfilter/xt_hashlimit.c8
-rw-r--r--net/netfilter/xt_iprange.c45
-rw-r--r--net/netfilter/xt_mark.c86
-rw-r--r--net/netfilter/xt_osf.c6
-rw-r--r--net/netfilter/xt_owner.c130
-rw-r--r--net/netfilter/xt_quota.c2
-rw-r--r--net/netlabel/netlabel_kapi.c6
-rw-r--r--net/netlink/af_netlink.c127
-rw-r--r--net/netlink/genetlink.c191
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/netrom/nr_dev.c6
-rw-r--r--net/netrom/nr_route.c21
-rw-r--r--net/packet/af_packet.c246
-rw-r--r--net/phonet/datagram.c5
-rw-r--r--net/phonet/pep-gprs.c6
-rw-r--r--net/phonet/pep.c7
-rw-r--r--net/phonet/pn_dev.c43
-rw-r--r--net/phonet/pn_netlink.c2
-rw-r--r--net/phonet/socket.c99
-rw-r--r--net/rds/Kconfig26
-rw-r--r--net/rds/Makefile11
-rw-r--r--net/rds/af_rds.c10
-rw-r--r--net/rds/bind.c3
-rw-r--r--net/rds/cong.c1
-rw-r--r--net/rds/connection.c54
-rw-r--r--net/rds/ib.c7
-rw-r--r--net/rds/ib.h18
-rw-r--r--net/rds/ib_cm.c62
-rw-r--r--net/rds/ib_rdma.c12
-rw-r--r--net/rds/ib_recv.c53
-rw-r--r--net/rds/ib_stats.c4
-rw-r--r--net/rds/ib_sysctl.c12
-rw-r--r--net/rds/info.c3
-rw-r--r--net/rds/iw.c16
-rw-r--r--net/rds/iw.h1
-rw-r--r--net/rds/iw_rdma.c28
-rw-r--r--net/rds/iw_send.c2
-rw-r--r--net/rds/iw_stats.c4
-rw-r--r--net/rds/message.c6
-rw-r--r--net/rds/page.c3
-rw-r--r--net/rds/rdma_transport.c16
-rw-r--r--net/rds/rds.h9
-rw-r--r--net/rds/recv.c28
-rw-r--r--net/rds/send.c3
-rw-r--r--net/rds/stats.c6
-rw-r--r--net/rds/tcp.c320
-rw-r--r--net/rds/tcp.h93
-rw-r--r--net/rds/tcp_connect.c153
-rw-r--r--net/rds/tcp_listen.c199
-rw-r--r--net/rds/tcp_recv.c356
-rw-r--r--net/rds/tcp_send.c263
-rw-r--r--net/rds/tcp_stats.c74
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rds/transport.c31
-rw-r--r--net/rfkill/core.c14
-rw-r--r--net/rose/af_rose.c5
-rw-r--r--net/rose/rose_dev.c4
-rw-r--r--net/rxrpc/ar-ack.c8
-rw-r--r--net/rxrpc/ar-call.c2
-rw-r--r--net/rxrpc/ar-internal.h32
-rw-r--r--net/rxrpc/ar-key.c914
-rw-r--r--net/rxrpc/ar-security.c8
-rw-r--r--net/rxrpc/rxkad.c47
-rw-r--r--net/sched/Makefile2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/cls_api.c12
-rw-r--r--net/sched/sch_api.c152
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sched/sch_cbq.c40
-rw-r--r--net/sched/sch_drr.c6
-rw-r--r--net/sched/sch_generic.c223
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_htb.c37
-rw-r--r--net/sched/sch_ingress.c14
-rw-r--r--net/sched/sch_mq.c241
-rw-r--r--net/sched/sch_multiq.c34
-rw-r--r--net/sched/sch_prio.c33
-rw-r--r--net/sched/sch_red.c21
-rw-r--r--net/sched/sch_sfq.c7
-rw-r--r--net/sched/sch_tbf.c22
-rw-r--r--net/sched/sch_teql.c10
-rw-r--r--net/sctp/associola.c93
-rw-r--r--net/sctp/bind_addr.c21
-rw-r--r--net/sctp/chunk.c62
-rw-r--r--net/sctp/debug.c14
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/output.c188
-rw-r--r--net/sctp/outqueue.c47
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/sctp/protocol.c20
-rw-r--r--net/sctp/sm_make_chunk.c25
-rw-r--r--net/sctp/sm_sideeffect.c56
-rw-r--r--net/sctp/sm_statefuns.c68
-rw-r--r--net/sctp/socket.c38
-rw-r--r--net/sctp/sysctl.c12
-rw-r--r--net/sctp/transport.c3
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/unix/af_unix.c5
-rw-r--r--net/wireless/Kconfig75
-rw-r--r--net/wireless/Makefile5
-rw-r--r--net/wireless/chan.c89
-rw-r--r--net/wireless/core.c499
-rw-r--r--net/wireless/core.h249
-rw-r--r--net/wireless/debugfs.c14
-rw-r--r--net/wireless/debugfs.h8
-rw-r--r--net/wireless/ibss.c294
-rw-r--r--net/wireless/mlme.c627
-rw-r--r--net/wireless/nl80211.c1724
-rw-r--r--net/wireless/nl80211.h71
-rw-r--r--net/wireless/reg.c169
-rw-r--r--net/wireless/reg.h15
-rw-r--r--net/wireless/scan.c164
-rw-r--r--net/wireless/sme.c938
-rw-r--r--net/wireless/util.c190
-rw-r--r--net/wireless/wext-compat.c882
-rw-r--r--net/wireless/wext-compat.h49
-rw-r--r--net/wireless/wext-sme.c404
-rw-r--r--net/wireless/wext.c257
-rw-r--r--net/xfrm/xfrm_hash.h2
-rw-r--r--net/xfrm/xfrm_proc.c2
389 files changed, 18279 insertions, 8502 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index fe649081fbd..8836575f9d7 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -225,12 +225,6 @@ int vlan_check_real_dev(struct net_device *real_dev, u16 vlan_id)
return -EOPNOTSUPP;
}
- /* The real device must be up and operating in order to
- * assosciate a VLAN device with it.
- */
- if (!(real_dev->flags & IFF_UP))
- return -ENETDOWN;
-
if (__find_vlan_dev(real_dev, vlan_id) != NULL)
return -EEXIST;
@@ -336,12 +330,13 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
}
- new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
- vlan_setup);
+ new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
+ vlan_setup, real_dev->num_tx_queues);
if (new_dev == NULL)
return -ENOBUFS;
+ new_dev->real_num_tx_queues = real_dev->real_num_tx_queues;
dev_net_set(new_dev, net);
/* need 4 bytes for extra VLAN header info,
* hope the underlying device can handle it.
@@ -397,6 +392,9 @@ static void vlan_transfer_features(struct net_device *dev,
vlandev->features &= ~dev->vlan_features;
vlandev->features |= dev->features & dev->vlan_features;
vlandev->gso_max_size = dev->gso_max_size;
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
+#endif
if (old_features != vlandev->features)
netdev_features_change(vlandev);
@@ -468,6 +466,19 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
}
break;
+ case NETDEV_CHANGEMTU:
+ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
+ vlandev = vlan_group_get_device(grp, i);
+ if (!vlandev)
+ continue;
+
+ if (vlandev->mtu <= dev->mtu)
+ continue;
+
+ dev_set_mtu(vlandev, dev->mtu);
+ }
+ break;
+
case NETDEV_FEAT_CHANGE:
/* Propagate device features to underlying device */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 96bad8f233e..4198ec5c8ab 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -288,10 +288,14 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
return rc;
}
-static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ int i = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
+ unsigned int len;
+ int ret;
/* Handle non-VLAN frames if they are sent to us, for example by DHCP.
*
@@ -317,29 +321,43 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
}
- txq->tx_packets++;
- txq->tx_bytes += skb->len;
skb->dev = vlan_dev_info(dev)->real_dev;
- dev_queue_xmit(skb);
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ txq->tx_packets++;
+ txq->tx_bytes += len;
+ } else
+ txq->tx_dropped++;
+
return NETDEV_TX_OK;
}
-static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ int i = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
u16 vlan_tci;
+ unsigned int len;
+ int ret;
vlan_tci = vlan_dev_info(dev)->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
- txq->tx_packets++;
- txq->tx_bytes += skb->len;
-
skb->dev = vlan_dev_info(dev)->real_dev;
- dev_queue_xmit(skb);
+ len = skb->len;
+ ret = dev_queue_xmit(skb);
+
+ if (likely(ret == NET_XMIT_SUCCESS)) {
+ txq->tx_packets++;
+ txq->tx_bytes += len;
+ } else
+ txq->tx_dropped++;
+
return NETDEV_TX_OK;
}
@@ -561,6 +579,55 @@ static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa)
return err;
}
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+static int vlan_dev_fcoe_ddp_setup(struct net_device *dev, u16 xid,
+ struct scatterlist *sgl, unsigned int sgc)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = 0;
+
+ if (ops->ndo_fcoe_ddp_setup)
+ rc = ops->ndo_fcoe_ddp_setup(real_dev, xid, sgl, sgc);
+
+ return rc;
+}
+
+static int vlan_dev_fcoe_ddp_done(struct net_device *dev, u16 xid)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int len = 0;
+
+ if (ops->ndo_fcoe_ddp_done)
+ len = ops->ndo_fcoe_ddp_done(real_dev, xid);
+
+ return len;
+}
+
+static int vlan_dev_fcoe_enable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_enable)
+ rc = ops->ndo_fcoe_enable(real_dev);
+ return rc;
+}
+
+static int vlan_dev_fcoe_disable(struct net_device *dev)
+{
+ struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = real_dev->netdev_ops;
+ int rc = -EINVAL;
+
+ if (ops->ndo_fcoe_disable)
+ rc = ops->ndo_fcoe_disable(real_dev);
+ return rc;
+}
+#endif
+
static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
@@ -635,6 +702,10 @@ static int vlan_dev_init(struct net_device *dev)
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ dev->fcoe_ddp_xid = real_dev->fcoe_ddp_xid;
+#endif
+
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
dev->header_ops = real_dev->header_ops;
dev->hard_header_len = real_dev->hard_header_len;
@@ -715,6 +786,12 @@ static const struct net_device_ops vlan_netdev_ops = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+#endif
};
static const struct net_device_ops vlan_netdev_accel_ops = {
@@ -731,6 +808,12 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
.ndo_change_rx_flags = vlan_dev_change_rx_flags,
.ndo_do_ioctl = vlan_dev_ioctl,
.ndo_neigh_setup = vlan_dev_neigh_setup,
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+#endif
};
void vlan_setup(struct net_device *dev)
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index e9c91dcecc9..343146e1bce 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -100,6 +100,25 @@ static int vlan_changelink(struct net_device *dev,
return 0;
}
+static int vlan_get_tx_queues(struct net *net,
+ struct nlattr *tb[],
+ unsigned int *num_tx_queues,
+ unsigned int *real_num_tx_queues)
+{
+ struct net_device *real_dev;
+
+ if (!tb[IFLA_LINK])
+ return -EINVAL;
+
+ real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev)
+ return -ENODEV;
+
+ *num_tx_queues = real_dev->num_tx_queues;
+ *real_num_tx_queues = real_dev->real_num_tx_queues;
+ return 0;
+}
+
static int vlan_newlink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -216,6 +235,7 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
.maxtype = IFLA_VLAN_MAX,
.policy = vlan_policy,
.priv_size = sizeof(struct vlan_dev_info),
+ .get_tx_queues = vlan_get_tx_queues,
.setup = vlan_setup,
.validate = vlan_validate,
.newlink = vlan_newlink,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index b55a091a33d..6262c335f3c 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -107,7 +107,7 @@ static const struct file_operations vlandev_fops = {
*/
/* Strings */
-static const char *vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
+static const char *const vlan_name_type_str[VLAN_NAME_TYPE_HIGHEST] = {
[VLAN_NAME_TYPE_RAW_PLUS_VID] = "VLAN_NAME_TYPE_RAW_PLUS_VID",
[VLAN_NAME_TYPE_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_PLUS_VID_NO_PAD",
[VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD] = "VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD",
diff --git a/net/9p/client.c b/net/9p/client.c
index 787ccddb85e..5bf5f227dbe 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -60,9 +60,9 @@ static struct p9_req_t *
p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...);
/**
- * v9fs_parse_options - parse mount options into session structure
- * @options: options string passed from mount
- * @v9ses: existing v9fs session information
+ * parse_options - parse mount options into client structure
+ * @opts: options string passed from mount
+ * @clnt: existing v9fs client information
*
* Return 0 upon success, -ERRNO upon failure
*/
@@ -232,7 +232,7 @@ EXPORT_SYMBOL(p9_tag_lookup);
/**
* p9_tag_init - setup tags structure and contents
- * @tags: tags structure from the client struct
+ * @c: v9fs client struct
*
* This initializes the tags structure for each client instance.
*
@@ -258,7 +258,7 @@ error:
/**
* p9_tag_cleanup - cleans up tags structure and reclaims resources
- * @tags: tags structure from the client struct
+ * @c: v9fs client struct
*
* This frees resources associated with the tags structure
*
@@ -411,14 +411,9 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
if (c->dotu)
err = -ecode;
- if (!err) {
+ if (!err || !IS_ERR_VALUE(err))
err = p9_errstr2errno(ename, strlen(ename));
- /* string match failed */
- if (!err)
- err = -ESERVERFAULT;
- }
-
P9_DPRINTK(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", -ecode, ename);
kfree(ename);
@@ -430,8 +425,8 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
/**
* p9_client_flush - flush (cancel) a request
- * c: client state
- * req: request to cancel
+ * @c: client state
+ * @oldreq: request to cancel
*
* This sents a flush for a particular requests and links
* the flush request to the original request. The current
diff --git a/net/9p/error.c b/net/9p/error.c
index fdebe431406..52518512a93 100644
--- a/net/9p/error.c
+++ b/net/9p/error.c
@@ -239,7 +239,7 @@ int p9_errstr2errno(char *errstr, int len)
errstr[len] = 0;
printk(KERN_ERR "%s: server reported unknown error %s\n",
__func__, errstr);
- errno = 1;
+ errno = ESERVERFAULT;
}
return -errno;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index 8c2588e4edc..8d934dd7fd5 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -119,8 +119,8 @@ struct p9_poll_wait {
* @wpos: write position for current frame
* @wsize: amount of data to write for current frame
* @wbuf: current write buffer
+ * @poll_pending_link: pending links to be polled per conn
* @poll_wait: array of wait_q's for various worker threads
- * @poll_waddr: ????
* @pt: poll state
* @rq: current read work
* @wq: current write work
@@ -700,9 +700,9 @@ static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
}
/**
- * parse_options - parse mount options into session structure
- * @options: options string passed from mount
- * @opts: transport-specific structure to parse options into
+ * parse_opts - parse mount options into p9_fd_opts structure
+ * @params: options string passed from mount
+ * @opts: fd transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index ac4990041eb..65cb29db03f 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -67,14 +67,15 @@
* @pd: Protection Domain pointer
* @qp: Queue Pair pointer
* @cq: Completion Queue pointer
+ * @dm_mr: DMA Memory Region pointer
* @lkey: The local access only memory region key
* @timeout: Number of uSecs to wait for connection management events
* @sq_depth: The depth of the Send Queue
* @sq_sem: Semaphore for the SQ
* @rq_depth: The depth of the Receive Queue.
+ * @rq_count: Count of requests in the Receive Queue.
* @addr: The remote peer's address
* @req_lock: Protects the active request list
- * @send_wait: Wait list when the SQ fills up
* @cm_done: Completion event for connection management tracking
*/
struct p9_trans_rdma {
@@ -154,9 +155,9 @@ static match_table_t tokens = {
};
/**
- * parse_options - parse mount options into session structure
- * @options: options string passed from mount
- * @opts: transport-specific structure to parse options into
+ * parse_opts - parse mount options into rdma options structure
+ * @params: options string passed from mount
+ * @opts: rdma transport-specific structure to parse options into
*
* Returns 0 upon success, -ERRNO upon failure
*/
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index a49484e67e1..9bf0b737aa5 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -57,11 +57,9 @@ static int chan_index;
* @initialized: whether the channel is initialized
* @inuse: whether the channel is in use
* @lock: protects multiple elements within this structure
+ * @client: client instance
* @vdev: virtio dev associated with this channel
* @vq: virtio queue associated with this channel
- * @tagpool: accounting for tag ids (and request slots)
- * @reqs: array of request slots
- * @max_tag: current number of request_slots allocated
* @sg: scatter gather list which is used to pack a request (protected?)
*
* We keep all per-channel information in a structure.
@@ -92,7 +90,7 @@ static unsigned int rest_of_page(void *data)
/**
* p9_virtio_close - reclaim resources of a channel
- * @trans: transport state
+ * @client: client instance
*
* This reclaims a channel by freeing its resources and
* reseting its inuse flag.
@@ -181,9 +179,8 @@ static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req)
/**
* p9_virtio_request - issue a request
- * @t: transport state
- * @tc: &p9_fcall request to transmit
- * @rc: &p9_fcall to put reponse into
+ * @client: client instance issuing the request
+ * @req: request to be issued
*
*/
diff --git a/net/Kconfig b/net/Kconfig
index 7051b971067..041c35edb76 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -23,6 +23,26 @@ menuconfig NET
if NET
+config WANT_COMPAT_NETLINK_MESSAGES
+ bool
+ help
+ This option can be selected by other options that need compat
+ netlink messages.
+
+config COMPAT_NETLINK_MESSAGES
+ def_bool y
+ depends on COMPAT
+ depends on WIRELESS_EXT || WANT_COMPAT_NETLINK_MESSAGES
+ help
+ This option makes it possible to send different netlink messages
+ to tasks depending on whether the task is a compat task or not. To
+ achieve this, you need to set skb_shinfo(skb)->frag_list to the
+ compat skb before sending the skb, the netlink code will sort out
+ which message to actually pass to the task.
+
+ Newly written code should NEVER need this option but do
+ compat-independent messages instead!
+
menu "Networking options"
source "net/packet/Kconfig"
diff --git a/net/Makefile b/net/Makefile
index ba324aefda7..1542e7268a7 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -24,7 +24,6 @@ obj-y += ipv6/
endif
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
-obj-$(CONFIG_NET_SCHED) += sched/
obj-$(CONFIG_BRIDGE) += bridge/
obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_IPX) += ipx/
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index 89f99d3beb6..9d4adfd2275 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -599,7 +599,7 @@ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb,
/* Non ELAP we cannot do. */
if (dev->type != ARPHRD_ETHER)
- return -1;
+ goto free_it;
skb->dev = dev;
skb->protocol = htons(ETH_P_ATALK);
@@ -634,7 +634,7 @@ int aarp_send_ddp(struct net_device *dev, struct sk_buff *skb,
if (!a) {
/* Whoops slipped... good job it's an unreliable protocol 8) */
write_unlock_bh(&aarp_lock);
- return -1;
+ goto free_it;
}
/* Set up the queue */
@@ -663,15 +663,21 @@ out_unlock:
write_unlock_bh(&aarp_lock);
/* Tell the ddp layer we have taken over for this frame. */
- return 0;
+ goto sent;
sendit:
if (skb->sk)
skb->priority = skb->sk->sk_priority;
- dev_queue_xmit(skb);
+ if (dev_queue_xmit(skb))
+ goto drop;
sent:
- return 1;
+ return NET_XMIT_SUCCESS;
+free_it:
+ kfree_skb(skb);
+drop:
+ return NET_XMIT_DROP;
}
+EXPORT_SYMBOL(aarp_send_ddp);
/*
* An entry in the aarp unresolved queue has become resolved. Send
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfbe13786bb..b1a4290996b 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1238,6 +1238,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
return -ENOBUFS;
*uaddr_len = sizeof(struct sockaddr_at);
+ memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
if (peer) {
if (sk->sk_state != TCP_ESTABLISHED)
@@ -1269,8 +1270,10 @@ static int handle_ip_over_ddp(struct sk_buff *skb)
struct net_device_stats *stats;
/* This needs to be able to handle ipddp"N" devices */
- if (!dev)
- return -ENODEV;
+ if (!dev) {
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, 13);
@@ -1280,8 +1283,7 @@ static int handle_ip_over_ddp(struct sk_buff *skb)
stats = netdev_priv(dev);
stats->rx_packets++;
stats->rx_bytes += skb->len + 13;
- netif_rx(skb); /* Send the SKB up to a higher place. */
- return 0;
+ return netif_rx(skb); /* Send the SKB up to a higher place. */
}
#else
/* make it easy for gcc to optimize this test out, i.e. kill the code */
@@ -1289,9 +1291,8 @@ static int handle_ip_over_ddp(struct sk_buff *skb)
#define handle_ip_over_ddp(skb) 0
#endif
-static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
- struct ddpehdr *ddp, __u16 len_hops,
- int origlen)
+static int atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
+ struct ddpehdr *ddp, __u16 len_hops, int origlen)
{
struct atalk_route *rt;
struct atalk_addr ta;
@@ -1358,8 +1359,6 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
/* 22 bytes - 12 ether, 2 len, 3 802.2 5 snap */
struct sk_buff *nskb = skb_realloc_headroom(skb, 32);
kfree_skb(skb);
- if (!nskb)
- goto out;
skb = nskb;
} else
skb = skb_unshare(skb, GFP_ATOMIC);
@@ -1368,12 +1367,16 @@ static void atalk_route_packet(struct sk_buff *skb, struct net_device *dev,
* If the buffer didn't vanish into the lack of space bitbucket we can
* send it.
*/
- if (skb && aarp_send_ddp(rt->dev, skb, &ta, NULL) == -1)
- goto free_it;
-out:
- return;
+ if (skb == NULL)
+ goto drop;
+
+ if (aarp_send_ddp(rt->dev, skb, &ta, NULL) == NET_XMIT_DROP)
+ return NET_RX_DROP;
+ return NET_RX_SUCCESS;
free_it:
kfree_skb(skb);
+drop:
+ return NET_RX_DROP;
}
/**
@@ -1399,7 +1402,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
__u16 len_hops;
if (!net_eq(dev_net(dev), &init_net))
- goto freeit;
+ goto drop;
/* Don't mangle buffer if shared */
if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
@@ -1407,7 +1410,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
/* Size check and make sure header is contiguous */
if (!pskb_may_pull(skb, sizeof(*ddp)))
- goto freeit;
+ goto drop;
ddp = ddp_hdr(skb);
@@ -1425,7 +1428,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
if (skb->len < sizeof(*ddp) || skb->len < (len_hops & 1023)) {
pr_debug("AppleTalk: dropping corrupted frame (deh_len=%u, "
"skb->len=%u)\n", len_hops & 1023, skb->len);
- goto freeit;
+ goto drop;
}
/*
@@ -1435,7 +1438,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
if (ddp->deh_sum &&
atalk_checksum(skb, len_hops & 1023) != ddp->deh_sum)
/* Not a valid AppleTalk frame - dustbin time */
- goto freeit;
+ goto drop;
/* Check the packet is aimed at us */
if (!ddp->deh_dnet) /* Net 0 is 'this network' */
@@ -1447,8 +1450,7 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
/* Not ours, so we route the packet via the correct
* AppleTalk iface
*/
- atalk_route_packet(skb, dev, ddp, len_hops, origlen);
- goto out;
+ return atalk_route_packet(skb, dev, ddp, len_hops, origlen);
}
/* if IP over DDP is not selected this code will be optimized out */
@@ -1464,18 +1466,21 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
sock = atalk_search_socket(&tosat, atif);
if (!sock) /* But not one of our sockets */
- goto freeit;
+ goto drop;
/* Queue packet (standard) */
skb->sk = sock;
if (sock_queue_rcv_skb(sock, skb) < 0)
- goto freeit;
-out:
- return 0;
-freeit:
+ goto drop;
+
+ return NET_RX_SUCCESS;
+
+drop:
kfree_skb(skb);
- goto out;
+out:
+ return NET_RX_DROP;
+
}
/*
@@ -1651,10 +1656,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
if (skb2) {
loopback = 1;
SOCK_DEBUG(sk, "SK %p: send out(copy).\n", sk);
- if (aarp_send_ddp(dev, skb2,
- &usat->sat_addr, NULL) == -1)
- kfree_skb(skb2);
- /* else queued/sent above in the aarp queue */
+ /*
+ * If it fails it is queued/sent above in the aarp queue
+ */
+ aarp_send_ddp(dev, skb2, &usat->sat_addr, NULL);
}
}
@@ -1684,9 +1689,10 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
usat = &gsat;
}
- if (aarp_send_ddp(dev, skb, &usat->sat_addr, NULL) == -1)
- kfree_skb(skb);
- /* else queued/sent above in the aarp queue */
+ /*
+ * If it fails it is queued/sent above in the aarp queue
+ */
+ aarp_send_ddp(dev, skb, &usat->sat_addr, NULL);
}
SOCK_DEBUG(sk, "SK %p: Done write (%Zd).\n", sk, len);
@@ -1864,7 +1870,6 @@ static struct packet_type ppptalk_packet_type __read_mostly = {
static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
/* Export symbols for use by drivers when AppleTalk is a module */
-EXPORT_SYMBOL(aarp_send_ddp);
EXPORT_SYMBOL(atrtr_get_dev);
EXPORT_SYMBOL(atalk_find_dev_addr);
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 2912665fc58..26a646d4eb3 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -69,7 +69,7 @@ struct br2684_vcc {
struct net_device *device;
/* keep old push, pop functions for chaining */
void (*old_push) (struct atm_vcc * vcc, struct sk_buff * skb);
- /* void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb); */
+ void (*old_pop)(struct atm_vcc *vcc, struct sk_buff *skb);
enum br2684_encaps encaps;
struct list_head brvccs;
#ifdef CONFIG_ATM_BR2684_IPFILTER
@@ -142,6 +142,22 @@ static struct net_device *br2684_find_dev(const struct br2684_if_spec *s)
return NULL;
}
+/* chained vcc->pop function. Check if we should wake the netif_queue */
+static void br2684_pop(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ struct br2684_vcc *brvcc = BR2684_VCC(vcc);
+ struct net_device *net_dev = skb->dev;
+
+ pr_debug("br2684_pop(vcc %p ; net_dev %p )\n", vcc, net_dev);
+ brvcc->old_pop(vcc, skb);
+
+ if (!net_dev)
+ return;
+
+ if (atm_may_send(vcc, 0))
+ netif_wake_queue(net_dev);
+
+}
/*
* Send a packet out a particular vcc. Not to useful right now, but paves
* the way for multiple vcc's per itf. Returns true if we can send,
@@ -200,20 +216,19 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
- if (!atm_may_send(atmvcc, skb->truesize)) {
- /*
- * We free this here for now, because we cannot know in a higher
- * layer whether the skb pointer it supplied wasn't freed yet.
- * Now, it always is.
- */
- dev_kfree_skb(skb);
- return 0;
- }
atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
atmvcc->send(atmvcc, skb);
+
+ if (!atm_may_send(atmvcc, 0)) {
+ netif_stop_queue(brvcc->device);
+ /*check for race with br2684_pop*/
+ if (atm_may_send(atmvcc, 0))
+ netif_start_queue(brvcc->device);
+ }
+
return 1;
}
@@ -223,7 +238,8 @@ static inline struct br2684_vcc *pick_outgoing_vcc(const struct sk_buff *skb,
return list_empty(&brdev->brvccs) ? NULL : list_entry_brvcc(brdev->brvccs.next); /* 1 vcc/dev right now */
}
-static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t br2684_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct br2684_dev *brdev = BRPRIV(dev);
struct br2684_vcc *brvcc;
@@ -238,7 +254,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* netif_stop_queue(dev); */
dev_kfree_skb(skb);
read_unlock(&devs_lock);
- return 0;
+ return NETDEV_TX_OK;
}
if (!br2684_xmit_vcc(skb, dev, brvcc)) {
/*
@@ -252,7 +268,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_fifo_errors++;
}
read_unlock(&devs_lock);
- return 0;
+ return NETDEV_TX_OK;
}
/*
@@ -503,8 +519,10 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg)
atmvcc->user_back = brvcc;
brvcc->encaps = (enum br2684_encaps)be.encaps;
brvcc->old_push = atmvcc->push;
+ brvcc->old_pop = atmvcc->pop;
barrier();
atmvcc->push = br2684_push;
+ atmvcc->pop = br2684_pop;
__skb_queue_head_init(&queue);
rq = &sk_atm(atmvcc)->sk_receive_queue;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index e65a3b1477f..64629c35434 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -267,7 +267,7 @@ static void clip_neigh_error(struct neighbour *neigh, struct sk_buff *skb)
kfree_skb(skb);
}
-static struct neigh_ops clip_neigh_ops = {
+static const struct neigh_ops clip_neigh_ops = {
.family = AF_INET,
.solicit = clip_neigh_solicit,
.error_report = clip_neigh_error,
@@ -360,7 +360,8 @@ static int clip_encap(struct atm_vcc *vcc, int mode)
return 0;
}
-static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct clip_priv *clip_priv = PRIV(dev);
struct atmarp_entry *entry;
@@ -373,7 +374,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR "clip_start_xmit: skb_dst(skb) == NULL\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
if (!skb_dst(skb)->neighbour) {
#if 0
@@ -387,7 +388,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
entry = NEIGH2ENTRY(skb_dst(skb)->neighbour);
if (!entry->vccs) {
@@ -402,7 +403,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
}
- return 0;
+ return NETDEV_TX_OK;
}
pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
@@ -421,14 +422,14 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
if (old) {
printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
- return 0;
+ return NETDEV_TX_OK;
}
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
vcc->send(vcc, skb);
if (atm_may_send(vcc, 0)) {
entry->vccs->xoff = 0;
- return 0;
+ return NETDEV_TX_OK;
}
spin_lock_irqsave(&clip_priv->xoff_lock, flags);
netif_stop_queue(dev); /* XOFF -> throttle immediately */
@@ -440,7 +441,7 @@ static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
of the brief netif_stop_queue. If this isn't true or if it
changes, use netif_wake_queue instead. */
spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
- return 0;
+ return NETDEV_TX_OK;
}
static int clip_mkip(struct atm_vcc *vcc, int timeout)
diff --git a/net/atm/lec.c b/net/atm/lec.c
index ff2e594dca9..b2d64456032 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -59,7 +59,8 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
*/
static int lec_open(struct net_device *dev);
-static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static int lec_close(struct net_device *dev);
static void lec_init(struct net_device *dev);
static struct lec_arp_table *lec_arp_find(struct lec_priv *priv,
@@ -247,7 +248,8 @@ static void lec_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t lec_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct sk_buff *skb2;
struct lec_priv *priv = netdev_priv(dev);
@@ -289,7 +291,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
kfree_skb(skb);
if (skb2 == NULL)
- return 0;
+ return NETDEV_TX_OK;
skb = skb2;
}
skb_push(skb, 2);
@@ -307,7 +309,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb2 = skb_realloc_headroom(skb, LEC_HEADER_LEN);
kfree_skb(skb);
if (skb2 == NULL)
- return 0;
+ return NETDEV_TX_OK;
skb = skb2;
}
#endif
@@ -345,7 +347,7 @@ static int lec_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
if (skb2 == NULL) {
dev->stats.tx_dropped++;
- return 0;
+ return NETDEV_TX_OK;
}
skb = skb2;
}
@@ -416,7 +418,7 @@ out:
if (entry)
lec_arp_put(entry);
dev->trans_start = jiffies;
- return 0;
+ return NETDEV_TX_OK;
}
/* The inverse routine to net_open(). */
@@ -935,9 +937,9 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
}
#ifdef CONFIG_PROC_FS
-static char *lec_arp_get_status_string(unsigned char status)
+static const char *lec_arp_get_status_string(unsigned char status)
{
- static char *lec_arp_status_string[] = {
+ static const char *const lec_arp_status_string[] = {
"ESI_UNKNOWN ",
"ESI_ARP_PENDING ",
"ESI_VC_PENDING ",
@@ -1121,7 +1123,8 @@ static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static int lec_seq_show(struct seq_file *seq, void *v)
{
- static char lec_banner[] = "Itf MAC ATM destination"
+ static const char lec_banner[] =
+ "Itf MAC ATM destination"
" Status Flags "
"VPI/VCI Recv VPI/VCI\n";
@@ -1505,7 +1508,7 @@ lec_arp_remove(struct lec_priv *priv, struct lec_arp_table *to_remove)
}
#if DEBUG_ARP_TABLE
-static char *get_status_string(unsigned char st)
+static const char *get_status_string(unsigned char st)
{
switch (st) {
case ESI_UNKNOWN:
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index e5bf11453a1..38a6cb0863f 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -73,7 +73,8 @@ static void mpoad_close(struct atm_vcc *vcc);
static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb);
static void mpc_push(struct atm_vcc *vcc, struct sk_buff *skb);
-static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
+ struct net_device *dev);
static int mpoa_event_listener(struct notifier_block *mpoa_notifier, unsigned long event, void *dev);
static void mpc_timer_refresh(void);
static void mpc_cache_check( unsigned long checking_time );
@@ -528,7 +529,8 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
/*
* Probably needs some error checks and locking, not sure...
*/
-static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t mpc_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
{
struct mpoa_client *mpc;
struct ethhdr *eth;
@@ -554,7 +556,7 @@ static int mpc_send_packet(struct sk_buff *skb, struct net_device *dev)
while (i < mpc->number_of_mps_macs) {
if (!compare_ether_addr(eth->h_dest, (mpc->mps_macs + i*ETH_ALEN)))
if ( send_via_shortcut(skb, mpc) == 0 ) /* try shortcut */
- return 0; /* success! */
+ return NETDEV_TX_OK; /* success! */
i++;
}
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 38de5ff61ec..ab8419a324b 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -151,8 +151,9 @@ static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
{
- static const char *class_name[] = { "off","UBR","CBR","VBR","ABR" };
- static const char *aal_name[] = {
+ static const char *const class_name[] =
+ {"off","UBR","CBR","VBR","ABR"};
+ static const char *const aal_name[] = {
"---", "1", "2", "3/4", /* 0- 3 */
"???", "5", "???", "???", /* 4- 7 */
"???", "???", "???", "???", /* 8-11 */
@@ -178,7 +179,7 @@ static void pvc_info(struct seq_file *seq, struct atm_vcc *vcc)
static const char *vcc_state(struct atm_vcc *vcc)
{
- static const char *map[] = { ATM_VS2TXT_MAP };
+ static const char *const map[] = { ATM_VS2TXT_MAP };
return map[ATM_VF2VS(vcc->flags)];
}
@@ -335,7 +336,7 @@ static const struct file_operations vcc_seq_fops = {
static int svc_seq_show(struct seq_file *seq, void *v)
{
- static char atm_svc_banner[] =
+ static const char atm_svc_banner[] =
"Itf VPI VCI State Remote\n";
if (v == SEQ_START_TOKEN)
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index 59fdb1d2e8e..ed371684c13 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -34,6 +34,7 @@ menuconfig BT
config BT_L2CAP
tristate "L2CAP protocol support"
depends on BT
+ select CRC16
help
L2CAP (Logical Link Control and Adaptation Protocol) provides
connection oriented and connection-less data transport. L2CAP
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 0250e060015..8cfb5a84984 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -49,7 +49,7 @@ static struct net_proto_family *bt_proto[BT_MAX_PROTO];
static DEFINE_RWLOCK(bt_proto_lock);
static struct lock_class_key bt_lock_key[BT_MAX_PROTO];
-static const char *bt_key_strings[BT_MAX_PROTO] = {
+static const char *const bt_key_strings[BT_MAX_PROTO] = {
"sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP",
"sk_lock-AF_BLUETOOTH-BTPROTO_HCI",
"sk_lock-AF_BLUETOOTH-BTPROTO_SCO",
@@ -61,7 +61,7 @@ static const char *bt_key_strings[BT_MAX_PROTO] = {
};
static struct lock_class_key bt_slock_key[BT_MAX_PROTO];
-static const char *bt_slock_key_strings[BT_MAX_PROTO] = {
+static const char *const bt_slock_key_strings[BT_MAX_PROTO] = {
"slock-AF_BLUETOOTH-BTPROTO_L2CAP",
"slock-AF_BLUETOOTH-BTPROTO_HCI",
"slock-AF_BLUETOOTH-BTPROTO_SCO",
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 52a6ce0d772..cafe9f54d84 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -533,6 +533,10 @@ static struct device *bnep_get_device(struct bnep_session *session)
return conn ? &conn->dev : NULL;
}
+static struct device_type bnep_type = {
+ .name = "bluetooth",
+};
+
int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
{
struct net_device *dev;
@@ -586,6 +590,7 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
#endif
SET_NETDEV_DEV(dev, bnep_get_device(s));
+ SET_NETDEV_DEVTYPE(dev, &bnep_type);
err = register_netdev(dev);
if (err) {
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index d7a0e9722de..26fb831ef7e 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -165,7 +165,8 @@ static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session
}
#endif
-static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct bnep_session *s = netdev_priv(dev);
struct sock *sk = s->sock->sk;
@@ -175,14 +176,14 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
#ifdef CONFIG_BT_BNEP_MC_FILTER
if (bnep_net_mc_filter(skb, s)) {
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
#endif
#ifdef CONFIG_BT_BNEP_PROTO_FILTER
if (bnep_net_proto_filter(skb, s)) {
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
#endif
@@ -203,7 +204,7 @@ static int bnep_net_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
}
- return 0;
+ return NETDEV_TX_OK;
}
static const struct net_device_ops bnep_netdev_ops = {
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index fa47d5d84f5..a9750984f77 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -246,6 +246,8 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
if (hdev->notify)
hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
+ atomic_set(&conn->devref, 0);
+
hci_conn_init_sysfs(conn);
tasklet_enable(&hdev->tx_task);
@@ -288,7 +290,7 @@ int hci_conn_del(struct hci_conn *conn)
skb_queue_purge(&conn->data_q);
- hci_conn_del_sysfs(conn);
+ hci_conn_put_device(conn);
hci_dev_put(hdev);
@@ -583,6 +585,19 @@ void hci_conn_check_pending(struct hci_dev *hdev)
hci_dev_unlock(hdev);
}
+void hci_conn_hold_device(struct hci_conn *conn)
+{
+ atomic_inc(&conn->devref);
+}
+EXPORT_SYMBOL(hci_conn_hold_device);
+
+void hci_conn_put_device(struct hci_conn *conn)
+{
+ if (atomic_dec_and_test(&conn->devref))
+ hci_conn_del_sysfs(conn);
+}
+EXPORT_SYMBOL(hci_conn_put_device);
+
int hci_get_conn_list(void __user *arg)
{
struct hci_conn_list_req req, *cl;
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 406ad07cdea..e1da8f68759 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -911,7 +911,7 @@ int hci_register_dev(struct hci_dev *hdev)
hdev->reassembly[i] = NULL;
init_waitqueue_head(&hdev->req_wait_q);
- init_MUTEX(&hdev->req_lock);
+ mutex_init(&hdev->req_lock);
inquiry_cache_init(hdev);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 184ba0a88ec..e99fe385fba 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -887,6 +887,7 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
} else
conn->state = BT_CONNECTED;
+ hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
if (test_bit(HCI_AUTH, &hdev->flags))
@@ -1693,6 +1694,7 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
+ hci_conn_hold_device(conn);
hci_conn_add_sysfs(conn);
break;
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 95f7a7a544b..7f939ce2980 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -68,7 +68,7 @@ static struct attribute_group bt_link_group = {
.attrs = bt_link_attrs,
};
-static struct attribute_group *bt_link_groups[] = {
+static const struct attribute_group *bt_link_groups[] = {
&bt_link_group,
NULL
};
@@ -392,7 +392,7 @@ static struct attribute_group bt_host_group = {
.attrs = bt_host_attrs,
};
-static struct attribute_group *bt_host_groups[] = {
+static const struct attribute_group *bt_host_groups[] = {
&bt_host_group,
NULL
};
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index b18676870d5..49d8495d69b 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -40,6 +40,7 @@
#include <linux/input.h>
#include <linux/hid.h>
+#include <linux/hidraw.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -92,10 +93,14 @@ static void __hidp_link_session(struct hidp_session *session)
{
__module_get(THIS_MODULE);
list_add(&session->list, &hidp_session_list);
+
+ hci_conn_hold_device(session->conn);
}
static void __hidp_unlink_session(struct hidp_session *session)
{
+ hci_conn_put_device(session->conn);
+
list_del(&session->list);
module_put(THIS_MODULE);
}
@@ -374,6 +379,7 @@ static void hidp_process_hid_control(struct hidp_session *session,
/* Kill session thread */
atomic_inc(&session->terminate);
+ hidp_schedule(session);
}
}
@@ -571,9 +577,8 @@ static int hidp_session(void *arg)
}
if (session->hid) {
- if (session->hid->claimed & HID_CLAIMED_INPUT)
- hidinput_disconnect(session->hid);
hid_destroy_device(session->hid);
+ session->hid = NULL;
}
/* Wakeup user-space polling for socket errors */
@@ -601,25 +606,27 @@ static struct device *hidp_get_device(struct hidp_session *session)
{
bdaddr_t *src = &bt_sk(session->ctrl_sock->sk)->src;
bdaddr_t *dst = &bt_sk(session->ctrl_sock->sk)->dst;
+ struct device *device = NULL;
struct hci_dev *hdev;
- struct hci_conn *conn;
hdev = hci_get_route(dst, src);
if (!hdev)
return NULL;
- conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ session->conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
+ if (session->conn)
+ device = &session->conn->dev;
hci_dev_put(hdev);
- return conn ? &conn->dev : NULL;
+ return device;
}
static int hidp_setup_input(struct hidp_session *session,
struct hidp_connadd_req *req)
{
struct input_dev *input;
- int i;
+ int err, i;
input = input_allocate_device();
if (!input)
@@ -666,7 +673,13 @@ static int hidp_setup_input(struct hidp_session *session,
input->event = hidp_input_event;
- return input_register_device(input);
+ err = input_register_device(input);
+ if (err < 0) {
+ hci_conn_put_device(session->conn);
+ return err;
+ }
+
+ return 0;
}
static int hidp_open(struct hid_device *hid)
@@ -729,8 +742,6 @@ static void hidp_stop(struct hid_device *hid)
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
- if (hid->claimed & HID_CLAIMED_INPUT)
- hidinput_disconnect(hid);
hid->claimed = 0;
}
@@ -748,13 +759,11 @@ static int hidp_setup_hid(struct hidp_session *session,
{
struct hid_device *hid;
bdaddr_t src, dst;
- int ret;
+ int err;
hid = hid_allocate_device();
- if (IS_ERR(hid)) {
- ret = PTR_ERR(session->hid);
- goto err;
- }
+ if (IS_ERR(hid))
+ return PTR_ERR(session->hid);
session->hid = hid;
session->req = req;
@@ -776,16 +785,17 @@ static int hidp_setup_hid(struct hidp_session *session,
hid->dev.parent = hidp_get_device(session);
hid->ll_driver = &hidp_hid_driver;
- ret = hid_add_device(hid);
- if (ret)
- goto err_hid;
+ err = hid_add_device(hid);
+ if (err < 0)
+ goto failed;
return 0;
-err_hid:
+
+failed:
hid_destroy_device(hid);
session->hid = NULL;
-err:
- return ret;
+
+ return err;
}
int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock)
@@ -835,13 +845,13 @@ int hidp_add_connection(struct hidp_connadd_req *req, struct socket *ctrl_sock,
if (req->rd_size > 0) {
err = hidp_setup_hid(session, req);
if (err && err != -ENODEV)
- goto err_skb;
+ goto purge;
}
if (!session->hid) {
err = hidp_setup_input(session, req);
if (err < 0)
- goto err_skb;
+ goto purge;
}
__hidp_link_session(session);
@@ -869,13 +879,20 @@ unlink:
__hidp_unlink_session(session);
- if (session->input)
+ if (session->input) {
input_unregister_device(session->input);
- if (session->hid)
+ session->input = NULL;
+ }
+
+ if (session->hid) {
hid_destroy_device(session->hid);
-err_skb:
+ session->hid = NULL;
+ }
+
+purge:
skb_queue_purge(&session->ctrl_transmit);
skb_queue_purge(&session->intr_transmit);
+
failed:
up_write(&hidp_session_sem);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index e503c89057a..faf3d74c358 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -126,6 +126,8 @@ int hidp_get_conninfo(struct hidp_conninfo *ci);
struct hidp_session {
struct list_head list;
+ struct hci_conn *conn;
+
struct socket *ctrl_sock;
struct socket *intr_sock;
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index bd0a4c1bced..b0301256464 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -41,6 +41,7 @@
#include <linux/list.h>
#include <linux/device.h>
#include <linux/uaccess.h>
+#include <linux/crc16.h>
#include <net/sock.h>
#include <asm/system.h>
@@ -50,7 +51,9 @@
#include <net/bluetooth/hci_core.h>
#include <net/bluetooth/l2cap.h>
-#define VERSION "2.13"
+#define VERSION "2.14"
+
+static int enable_ertm = 0;
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
static u8 l2cap_fixed_chan[8] = { 0x02, };
@@ -331,6 +334,48 @@ static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16
return hci_send_acl(conn->hcon, skb, 0);
}
+static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
+{
+ struct sk_buff *skb;
+ struct l2cap_hdr *lh;
+ struct l2cap_conn *conn = pi->conn;
+ int count, hlen = L2CAP_HDR_SIZE + 2;
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ hlen += 2;
+
+ BT_DBG("pi %p, control 0x%2.2x", pi, control);
+
+ count = min_t(unsigned int, conn->mtu, hlen);
+ control |= L2CAP_CTRL_FRAME_TYPE;
+
+ skb = bt_skb_alloc(count, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(pi->dcid);
+ put_unaligned_le16(control, skb_put(skb, 2));
+
+ if (pi->fcs == L2CAP_FCS_CRC16) {
+ u16 fcs = crc16(0, (u8 *)lh, count - 2);
+ put_unaligned_le16(fcs, skb_put(skb, 2));
+ }
+
+ return hci_send_acl(pi->conn->hcon, skb, 0);
+}
+
+static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
+{
+ if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
+ control |= L2CAP_SUPER_RCV_NOT_READY;
+ else
+ control |= L2CAP_SUPER_RCV_READY;
+
+ return l2cap_send_sframe(pi, control);
+}
+
static void l2cap_do_start(struct sock *sk)
{
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
@@ -364,6 +409,16 @@ static void l2cap_do_start(struct sock *sk)
}
}
+static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
+{
+ struct l2cap_disconn_req req;
+
+ req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_DISCONN_REQ, sizeof(req), &req);
+}
+
/* ---- L2CAP connections ---- */
static void l2cap_conn_start(struct l2cap_conn *conn)
{
@@ -648,15 +703,10 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
case BT_CONFIG:
if (sk->sk_type == SOCK_SEQPACKET) {
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct l2cap_disconn_req req;
sk->sk_state = BT_DISCONN;
l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
-
- req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_DISCONN_REQ, sizeof(req), &req);
+ l2cap_send_disconn_req(conn, sk);
} else
l2cap_chan_del(sk, reason);
break;
@@ -715,12 +765,16 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent)
pi->imtu = l2cap_pi(parent)->imtu;
pi->omtu = l2cap_pi(parent)->omtu;
+ pi->mode = l2cap_pi(parent)->mode;
+ pi->fcs = l2cap_pi(parent)->fcs;
pi->sec_level = l2cap_pi(parent)->sec_level;
pi->role_switch = l2cap_pi(parent)->role_switch;
pi->force_reliable = l2cap_pi(parent)->force_reliable;
} else {
pi->imtu = L2CAP_DEFAULT_MTU;
pi->omtu = 0;
+ pi->mode = L2CAP_MODE_BASIC;
+ pi->fcs = L2CAP_FCS_CRC16;
pi->sec_level = BT_SECURITY_LOW;
pi->role_switch = 0;
pi->force_reliable = 0;
@@ -956,6 +1010,19 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
goto done;
}
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (enable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
switch (sk->sk_state) {
case BT_CONNECT:
case BT_CONNECT2:
@@ -1007,6 +1074,19 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
goto done;
}
+ switch (l2cap_pi(sk)->mode) {
+ case L2CAP_MODE_BASIC:
+ break;
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ if (enable_ertm)
+ break;
+ /* fall through */
+ default:
+ err = -ENOTSUPP;
+ goto done;
+ }
+
if (!l2cap_pi(sk)->psm) {
bdaddr_t *src = &bt_sk(sk)->src;
u16 psm;
@@ -1117,39 +1197,219 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l
return 0;
}
-static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
+static void l2cap_monitor_timeout(unsigned long arg)
{
- struct l2cap_conn *conn = l2cap_pi(sk)->conn;
- struct sk_buff *skb, **frag;
- int err, hlen, count, sent = 0;
- struct l2cap_hdr *lh;
+ struct sock *sk = (void *) arg;
+ u16 control;
- BT_DBG("sk %p len %d", sk, len);
+ bh_lock_sock(sk);
+ if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
+ l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
+ return;
+ }
- /* First fragment (with L2CAP header) */
- if (sk->sk_type == SOCK_DGRAM)
- hlen = L2CAP_HDR_SIZE + 2;
- else
- hlen = L2CAP_HDR_SIZE;
+ l2cap_pi(sk)->retry_count++;
+ __mod_monitor_timer();
- count = min_t(unsigned int, (conn->mtu - hlen), len);
+ control = L2CAP_CTRL_POLL;
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ bh_unlock_sock(sk);
+}
- skb = bt_skb_send_alloc(sk, hlen + count,
- msg->msg_flags & MSG_DONTWAIT, &err);
- if (!skb)
- return err;
+static void l2cap_retrans_timeout(unsigned long arg)
+{
+ struct sock *sk = (void *) arg;
+ u16 control;
- /* Create L2CAP header */
- lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
- lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
- lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ bh_lock_sock(sk);
+ l2cap_pi(sk)->retry_count = 1;
+ __mod_monitor_timer();
+
+ l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
+
+ control = L2CAP_CTRL_POLL;
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ bh_unlock_sock(sk);
+}
+
+static void l2cap_drop_acked_frames(struct sock *sk)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_peek(TX_QUEUE(sk)))) {
+ if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
+ break;
+
+ skb = skb_dequeue(TX_QUEUE(sk));
+ kfree_skb(skb);
+
+ l2cap_pi(sk)->unacked_frames--;
+ }
+
+ if (!l2cap_pi(sk)->unacked_frames)
+ del_timer(&l2cap_pi(sk)->retrans_timer);
- if (sk->sk_type == SOCK_DGRAM)
- put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
+ return;
+}
+
+static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ int err;
+
+ BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
+
+ err = hci_send_acl(pi->conn->hcon, skb, 0);
+ if (err < 0)
+ kfree_skb(skb);
+
+ return err;
+}
+
+static int l2cap_streaming_send(struct sock *sk)
+{
+ struct sk_buff *skb, *tx_skb;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u16 control, fcs;
+ int err;
+
+ while ((skb = sk->sk_send_head)) {
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+
+ control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
+ put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
+ put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ }
+
+ err = l2cap_do_send(sk, tx_skb);
+ if (err < 0) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ return err;
+ }
+
+ pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
+
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ sk->sk_send_head = NULL;
+ else
+ sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+
+ skb = skb_dequeue(TX_QUEUE(sk));
+ kfree_skb(skb);
+ }
+ return 0;
+}
+
+static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb, *tx_skb;
+ u16 control, fcs;
+ int err;
+
+ skb = skb_peek(TX_QUEUE(sk));
+ do {
+ if (bt_cb(skb)->tx_seq != tx_seq) {
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ break;
+ skb = skb_queue_next(TX_QUEUE(sk), skb);
+ continue;
+ }
+
+ if (pi->remote_max_tx &&
+ bt_cb(skb)->retries == pi->remote_max_tx) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ break;
+ }
+
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+ bt_cb(skb)->retries++;
+ control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+ | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
+ put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
+ }
+
+ err = l2cap_do_send(sk, tx_skb);
+ if (err < 0) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ return err;
+ }
+ break;
+ } while(1);
+ return 0;
+}
+
+static int l2cap_ertm_send(struct sock *sk)
+{
+ struct sk_buff *skb, *tx_skb;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u16 control, fcs;
+ int err;
+
+ if (pi->conn_state & L2CAP_CONN_WAIT_F)
+ return 0;
+
+ while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))
+ && !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
+ tx_skb = skb_clone(skb, GFP_ATOMIC);
+
+ if (pi->remote_max_tx &&
+ bt_cb(skb)->retries == pi->remote_max_tx) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ break;
+ }
+
+ bt_cb(skb)->retries++;
+
+ control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
+ control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
+ | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
+ put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
+
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
+ fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
+ put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
+ }
+
+ err = l2cap_do_send(sk, tx_skb);
+ if (err < 0) {
+ l2cap_send_disconn_req(pi->conn, sk);
+ return err;
+ }
+ __mod_retrans_timer();
+
+ bt_cb(skb)->tx_seq = pi->next_tx_seq;
+ pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
+
+ pi->unacked_frames++;
+
+ if (skb_queue_is_last(TX_QUEUE(sk), skb))
+ sk->sk_send_head = NULL;
+ else
+ sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
+ }
+
+ return 0;
+}
+
+static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct sk_buff **frag;
+ int err, sent = 0;
if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
- err = -EFAULT;
- goto fail;
+ return -EFAULT;
}
sent += count;
@@ -1162,33 +1422,173 @@ static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
*frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
if (!*frag)
- goto fail;
-
- if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
- err = -EFAULT;
- goto fail;
- }
+ return -EFAULT;
+ if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
+ return -EFAULT;
sent += count;
len -= count;
frag = &(*frag)->next;
}
- err = hci_send_acl(conn->hcon, skb, 0);
- if (err < 0)
- goto fail;
return sent;
+}
-fail:
- kfree_skb(skb);
- return err;
+static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct sk_buff *skb;
+ int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("sk %p len %d", sk, (int)len);
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+ skb = bt_skb_send_alloc(sk, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
+
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+ return skb;
+}
+
+static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct sk_buff *skb;
+ int err, count, hlen = L2CAP_HDR_SIZE;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("sk %p len %d", sk, (int)len);
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+ skb = bt_skb_send_alloc(sk, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+ return skb;
+}
+
+static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
+{
+ struct l2cap_conn *conn = l2cap_pi(sk)->conn;
+ struct sk_buff *skb;
+ int err, count, hlen = L2CAP_HDR_SIZE + 2;
+ struct l2cap_hdr *lh;
+
+ BT_DBG("sk %p len %d", sk, (int)len);
+
+ if (sdulen)
+ hlen += 2;
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+ hlen += 2;
+
+ count = min_t(unsigned int, (conn->mtu - hlen), len);
+ skb = bt_skb_send_alloc(sk, count + hlen,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* Create L2CAP header */
+ lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
+ lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
+ lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
+ put_unaligned_le16(control, skb_put(skb, 2));
+ if (sdulen)
+ put_unaligned_le16(sdulen, skb_put(skb, 2));
+
+ err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
+ if (unlikely(err < 0)) {
+ kfree_skb(skb);
+ return ERR_PTR(err);
+ }
+
+ if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
+ put_unaligned_le16(0, skb_put(skb, 2));
+
+ bt_cb(skb)->retries = 0;
+ return skb;
+}
+
+static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ struct sk_buff_head sar_queue;
+ u16 control;
+ size_t size = 0;
+
+ __skb_queue_head_init(&sar_queue);
+ control = L2CAP_SDU_START;
+ skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ __skb_queue_tail(&sar_queue, skb);
+ len -= pi->max_pdu_size;
+ size +=pi->max_pdu_size;
+ control = 0;
+
+ while (len > 0) {
+ size_t buflen;
+
+ if (len > pi->max_pdu_size) {
+ control |= L2CAP_SDU_CONTINUE;
+ buflen = pi->max_pdu_size;
+ } else {
+ control |= L2CAP_SDU_END;
+ buflen = len;
+ }
+
+ skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
+ if (IS_ERR(skb)) {
+ skb_queue_purge(&sar_queue);
+ return PTR_ERR(skb);
+ }
+
+ __skb_queue_tail(&sar_queue, skb);
+ len -= buflen;
+ size += buflen;
+ control = 0;
+ }
+ skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = sar_queue.next;
+
+ return size;
}
static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- int err = 0;
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *skb;
+ u16 control;
+ int err;
BT_DBG("sock %p, sk %p", sock, sk);
@@ -1200,16 +1600,73 @@ static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct ms
return -EOPNOTSUPP;
/* Check outgoing MTU */
- if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
+ if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
+ && len > pi->omtu)
return -EINVAL;
lock_sock(sk);
- if (sk->sk_state == BT_CONNECTED)
- err = l2cap_do_send(sk, msg, len);
- else
+ if (sk->sk_state != BT_CONNECTED) {
err = -ENOTCONN;
+ goto done;
+ }
+
+ /* Connectionless channel */
+ if (sk->sk_type == SOCK_DGRAM) {
+ skb = l2cap_create_connless_pdu(sk, msg, len);
+ err = l2cap_do_send(sk, skb);
+ goto done;
+ }
+
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ /* Create a basic PDU */
+ skb = l2cap_create_basic_pdu(sk, msg, len);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+
+ err = l2cap_do_send(sk, skb);
+ if (!err)
+ err = len;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ /* Entire SDU fits into one PDU */
+ if (len <= pi->max_pdu_size) {
+ control = L2CAP_SDU_UNSEGMENTED;
+ skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ goto done;
+ }
+ __skb_queue_tail(TX_QUEUE(sk), skb);
+ if (sk->sk_send_head == NULL)
+ sk->sk_send_head = skb;
+ } else {
+ /* Segment SDU into multiples PDUs */
+ err = l2cap_sar_segment_sdu(sk, msg, len);
+ if (err < 0)
+ goto done;
+ }
+
+ if (pi->mode == L2CAP_MODE_STREAMING)
+ err = l2cap_streaming_send(sk);
+ else
+ err = l2cap_ertm_send(sk);
+
+ if (!err)
+ err = len;
+ break;
+
+ default:
+ BT_DBG("bad state %1.1x", pi->mode);
+ err = -EINVAL;
+ }
+done:
release_sock(sk);
return err;
}
@@ -1257,7 +1714,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = L2CAP_MODE_BASIC;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
len = min_t(unsigned int, sizeof(opts), optlen);
if (copy_from_user((char *) &opts, optval, len)) {
@@ -1265,8 +1723,10 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us
break;
}
- l2cap_pi(sk)->imtu = opts.imtu;
- l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->imtu = opts.imtu;
+ l2cap_pi(sk)->omtu = opts.omtu;
+ l2cap_pi(sk)->mode = opts.mode;
+ l2cap_pi(sk)->fcs = opts.fcs;
break;
case L2CAP_LM:
@@ -1379,7 +1839,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
opts.imtu = l2cap_pi(sk)->imtu;
opts.omtu = l2cap_pi(sk)->omtu;
opts.flush_to = l2cap_pi(sk)->flush_to;
- opts.mode = L2CAP_MODE_BASIC;
+ opts.mode = l2cap_pi(sk)->mode;
+ opts.fcs = l2cap_pi(sk)->fcs;
len = min_t(unsigned int, len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
@@ -1708,16 +2169,108 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
+static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
+{
+ u32 local_feat_mask = l2cap_feat_mask;
+ if (enable_ertm)
+ local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
+
+ switch (mode) {
+ case L2CAP_MODE_ERTM:
+ return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
+ case L2CAP_MODE_STREAMING:
+ return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
+ default:
+ return 0x00;
+ }
+}
+
+static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
+{
+ switch (mode) {
+ case L2CAP_MODE_STREAMING:
+ case L2CAP_MODE_ERTM:
+ if (l2cap_mode_supported(mode, remote_feat_mask))
+ return mode;
+ /* fall through */
+ default:
+ return L2CAP_MODE_BASIC;
+ }
+}
+
static int l2cap_build_conf_req(struct sock *sk, void *data)
{
struct l2cap_pinfo *pi = l2cap_pi(sk);
struct l2cap_conf_req *req = data;
+ struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
void *ptr = req->data;
BT_DBG("sk %p", sk);
- if (pi->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+ if (pi->num_conf_req || pi->num_conf_rsp)
+ goto done;
+
+ switch (pi->mode) {
+ case L2CAP_MODE_STREAMING:
+ case L2CAP_MODE_ERTM:
+ pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+ if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
+ l2cap_send_disconn_req(pi->conn, sk);
+ break;
+ default:
+ pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
+ break;
+ }
+
+done:
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ if (pi->imtu != L2CAP_DEFAULT_MTU)
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
+ break;
+
+ case L2CAP_MODE_ERTM:
+ rfc.mode = L2CAP_MODE_ERTM;
+ rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
+ rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
+ rfc.retrans_timeout = 0;
+ rfc.monitor_timeout = 0;
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
+ break;
+
+ if (pi->fcs == L2CAP_FCS_NONE ||
+ pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+ pi->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
+ }
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ rfc.mode = L2CAP_MODE_STREAMING;
+ rfc.txwin_size = 0;
+ rfc.max_transmit = 0;
+ rfc.retrans_timeout = 0;
+ rfc.monitor_timeout = 0;
+ rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
+ if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
+ break;
+
+ if (pi->fcs == L2CAP_FCS_NONE ||
+ pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
+ pi->fcs = L2CAP_FCS_NONE;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
+ }
+ break;
+ }
/* FIXME: Need actual value of the flush timeout */
//if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
@@ -1767,6 +2320,12 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
memcpy(&rfc, (void *) val, olen);
break;
+ case L2CAP_CONF_FCS:
+ if (val == L2CAP_FCS_NONE)
+ pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
+
+ break;
+
default:
if (hint)
break;
@@ -1777,30 +2336,83 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
}
}
+ if (pi->num_conf_rsp || pi->num_conf_req)
+ goto done;
+
+ switch (pi->mode) {
+ case L2CAP_MODE_STREAMING:
+ case L2CAP_MODE_ERTM:
+ pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
+ if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
+ return -ECONNREFUSED;
+ break;
+ default:
+ pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
+ break;
+ }
+
+done:
+ if (pi->mode != rfc.mode) {
+ result = L2CAP_CONF_UNACCEPT;
+ rfc.mode = pi->mode;
+
+ if (pi->num_conf_rsp == 1)
+ return -ECONNREFUSED;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+ }
+
+
if (result == L2CAP_CONF_SUCCESS) {
/* Configure output options and let the other side know
* which ones we don't like. */
- if (rfc.mode == L2CAP_MODE_BASIC) {
- if (mtu < pi->omtu)
- result = L2CAP_CONF_UNACCEPT;
- else {
- pi->omtu = mtu;
- pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
- }
+ if (mtu < L2CAP_DEFAULT_MIN_MTU)
+ result = L2CAP_CONF_UNACCEPT;
+ else {
+ pi->omtu = mtu;
+ pi->conf_state |= L2CAP_CONF_MTU_DONE;
+ }
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
- } else {
+ switch (rfc.mode) {
+ case L2CAP_MODE_BASIC:
+ pi->fcs = L2CAP_FCS_NONE;
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ pi->remote_tx_win = rfc.txwin_size;
+ pi->remote_max_tx = rfc.max_transmit;
+ pi->max_pdu_size = rfc.max_pdu_size;
+
+ rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
+ rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
+
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ pi->remote_tx_win = rfc.txwin_size;
+ pi->max_pdu_size = rfc.max_pdu_size;
+
+ pi->conf_state |= L2CAP_CONF_MODE_DONE;
+ break;
+
+ default:
result = L2CAP_CONF_UNACCEPT;
memset(&rfc, 0, sizeof(rfc));
- rfc.mode = L2CAP_MODE_BASIC;
-
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ rfc.mode = pi->mode;
}
- }
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+
+ if (result == L2CAP_CONF_SUCCESS)
+ pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
+ }
rsp->scid = cpu_to_le16(pi->dcid);
rsp->result = cpu_to_le16(result);
rsp->flags = cpu_to_le16(0x0000);
@@ -1808,6 +2420,73 @@ static int l2cap_parse_conf_req(struct sock *sk, void *data)
return ptr - data;
}
+static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct l2cap_conf_req *req = data;
+ void *ptr = req->data;
+ int type, olen;
+ unsigned long val;
+ struct l2cap_conf_rfc rfc;
+
+ BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
+
+ while (len >= L2CAP_CONF_OPT_SIZE) {
+ len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
+
+ switch (type) {
+ case L2CAP_CONF_MTU:
+ if (val < L2CAP_DEFAULT_MIN_MTU) {
+ *result = L2CAP_CONF_UNACCEPT;
+ pi->omtu = L2CAP_DEFAULT_MIN_MTU;
+ } else
+ pi->omtu = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
+ break;
+
+ case L2CAP_CONF_FLUSH_TO:
+ pi->flush_to = val;
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
+ 2, pi->flush_to);
+ break;
+
+ case L2CAP_CONF_RFC:
+ if (olen == sizeof(rfc))
+ memcpy(&rfc, (void *)val, olen);
+
+ if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
+ rfc.mode != pi->mode)
+ return -ECONNREFUSED;
+
+ pi->mode = rfc.mode;
+ pi->fcs = 0;
+
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+ sizeof(rfc), (unsigned long) &rfc);
+ break;
+ }
+ }
+
+ if (*result == L2CAP_CONF_SUCCESS) {
+ switch (rfc.mode) {
+ case L2CAP_MODE_ERTM:
+ pi->remote_tx_win = rfc.txwin_size;
+ pi->retrans_timeout = rfc.retrans_timeout;
+ pi->monitor_timeout = rfc.monitor_timeout;
+ pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
+ break;
+ case L2CAP_MODE_STREAMING:
+ pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
+ break;
+ }
+ }
+
+ req->dcid = cpu_to_le16(pi->dcid);
+ req->flags = cpu_to_le16(0x0000);
+
+ return ptr - data;
+}
+
static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
{
struct l2cap_conf_rsp *rsp = data;
@@ -1994,6 +2673,7 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
l2cap_build_conf_req(sk, req), req);
+ l2cap_pi(sk)->num_conf_req++;
break;
case L2CAP_CR_PEND:
@@ -2052,10 +2732,13 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
/* Complete config. */
len = l2cap_parse_conf_req(sk, rsp);
- if (len < 0)
+ if (len < 0) {
+ l2cap_send_disconn_req(conn, sk);
goto unlock;
+ }
l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
+ l2cap_pi(sk)->num_conf_rsp++;
/* Reset config buffer. */
l2cap_pi(sk)->conf_len = 0;
@@ -2064,7 +2747,22 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
goto unlock;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
+ || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
+ l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
+
sk->sk_state = BT_CONNECTED;
+ l2cap_pi(sk)->next_tx_seq = 0;
+ l2cap_pi(sk)->expected_ack_seq = 0;
+ l2cap_pi(sk)->unacked_frames = 0;
+
+ setup_timer(&l2cap_pi(sk)->retrans_timer,
+ l2cap_retrans_timeout, (unsigned long) sk);
+ setup_timer(&l2cap_pi(sk)->monitor_timer,
+ l2cap_monitor_timeout, (unsigned long) sk);
+
+ __skb_queue_head_init(TX_QUEUE(sk));
+ __skb_queue_head_init(SREJ_QUEUE(sk));
l2cap_chan_ready(sk);
goto unlock;
}
@@ -2073,6 +2771,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr
u8 buf[64];
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
l2cap_build_conf_req(sk, buf), buf);
+ l2cap_pi(sk)->num_conf_req++;
}
unlock:
@@ -2102,29 +2801,32 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
break;
case L2CAP_CONF_UNACCEPT:
- if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
- char req[128];
- /* It does not make sense to adjust L2CAP parameters
- * that are currently defined in the spec. We simply
- * resend config request that we sent earlier. It is
- * stupid, but it helps qualification testing which
- * expects at least some response from us. */
- l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(sk, req), req);
- goto done;
+ if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
+ int len = cmd->len - sizeof(*rsp);
+ char req[64];
+
+ /* throw out any old stored conf requests */
+ result = L2CAP_CONF_SUCCESS;
+ len = l2cap_parse_conf_rsp(sk, rsp->data,
+ len, req, &result);
+ if (len < 0) {
+ l2cap_send_disconn_req(conn, sk);
+ goto done;
+ }
+
+ l2cap_send_cmd(conn, l2cap_get_ident(conn),
+ L2CAP_CONF_REQ, len, req);
+ l2cap_pi(sk)->num_conf_req++;
+ if (result != L2CAP_CONF_SUCCESS)
+ goto done;
+ break;
}
default:
sk->sk_state = BT_DISCONN;
sk->sk_err = ECONNRESET;
l2cap_sock_set_timer(sk, HZ * 5);
- {
- struct l2cap_disconn_req req;
- req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
- req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
- l2cap_send_cmd(conn, l2cap_get_ident(conn),
- L2CAP_DISCONN_REQ, sizeof(req), &req);
- }
+ l2cap_send_disconn_req(conn, sk);
goto done;
}
@@ -2134,7 +2836,16 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr
l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
+ if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
+ || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
+ l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
+
sk->sk_state = BT_CONNECTED;
+ l2cap_pi(sk)->expected_tx_seq = 0;
+ l2cap_pi(sk)->buffer_seq = 0;
+ l2cap_pi(sk)->num_to_ack = 0;
+ __skb_queue_head_init(TX_QUEUE(sk));
+ __skb_queue_head_init(SREJ_QUEUE(sk));
l2cap_chan_ready(sk);
}
@@ -2165,6 +2876,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd
sk->sk_shutdown = SHUTDOWN_MASK;
+ skb_queue_purge(TX_QUEUE(sk));
+ skb_queue_purge(SREJ_QUEUE(sk));
+ del_timer(&l2cap_pi(sk)->retrans_timer);
+ del_timer(&l2cap_pi(sk)->monitor_timer);
+
l2cap_chan_del(sk, ECONNRESET);
bh_unlock_sock(sk);
@@ -2187,6 +2903,11 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd
if (!sk)
return 0;
+ skb_queue_purge(TX_QUEUE(sk));
+ skb_queue_purge(SREJ_QUEUE(sk));
+ del_timer(&l2cap_pi(sk)->retrans_timer);
+ del_timer(&l2cap_pi(sk)->monitor_timer);
+
l2cap_chan_del(sk, 0);
bh_unlock_sock(sk);
@@ -2205,10 +2926,14 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm
if (type == L2CAP_IT_FEAT_MASK) {
u8 buf[8];
+ u32 feat_mask = l2cap_feat_mask;
struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
- put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
+ if (enable_ertm)
+ feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
+ | L2CAP_FEAT_FCS;
+ put_unaligned_le32(feat_mask, rsp->data);
l2cap_send_cmd(conn, cmd->ident,
L2CAP_INFO_RSP, sizeof(buf), buf);
} else if (type == L2CAP_IT_FIXED_CHAN) {
@@ -2359,9 +3084,374 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *sk
kfree_skb(skb);
}
+static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
+{
+ u16 our_fcs, rcv_fcs;
+ int hdr_size = L2CAP_HDR_SIZE + 2;
+
+ if (pi->fcs == L2CAP_FCS_CRC16) {
+ skb_trim(skb, skb->len - 2);
+ rcv_fcs = get_unaligned_le16(skb->data + skb->len);
+ our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
+
+ if (our_fcs != rcv_fcs)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
+{
+ struct sk_buff *next_skb;
+
+ bt_cb(skb)->tx_seq = tx_seq;
+ bt_cb(skb)->sar = sar;
+
+ next_skb = skb_peek(SREJ_QUEUE(sk));
+ if (!next_skb) {
+ __skb_queue_tail(SREJ_QUEUE(sk), skb);
+ return;
+ }
+
+ do {
+ if (bt_cb(next_skb)->tx_seq > tx_seq) {
+ __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
+ return;
+ }
+
+ if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
+ break;
+
+ } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
+
+ __skb_queue_tail(SREJ_QUEUE(sk), skb);
+}
+
+static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct sk_buff *_skb;
+ int err = -EINVAL;
+
+ switch (control & L2CAP_CTRL_SAR) {
+ case L2CAP_SDU_UNSEGMENTED:
+ if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
+ kfree_skb(pi->sdu);
+ break;
+ }
+
+ err = sock_queue_rcv_skb(sk, skb);
+ if (!err)
+ return 0;
+
+ break;
+
+ case L2CAP_SDU_START:
+ if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
+ kfree_skb(pi->sdu);
+ break;
+ }
+
+ pi->sdu_len = get_unaligned_le16(skb->data);
+ skb_pull(skb, 2);
+
+ pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
+ if (!pi->sdu) {
+ err = -ENOMEM;
+ break;
+ }
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ pi->conn_state |= L2CAP_CONN_SAR_SDU;
+ pi->partial_sdu_len = skb->len;
+ err = 0;
+ break;
+
+ case L2CAP_SDU_CONTINUE:
+ if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+ break;
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ pi->partial_sdu_len += skb->len;
+ if (pi->partial_sdu_len > pi->sdu_len)
+ kfree_skb(pi->sdu);
+ else
+ err = 0;
+
+ break;
+
+ case L2CAP_SDU_END:
+ if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
+ break;
+
+ memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
+
+ pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
+ pi->partial_sdu_len += skb->len;
+
+ if (pi->partial_sdu_len == pi->sdu_len) {
+ _skb = skb_clone(pi->sdu, GFP_ATOMIC);
+ err = sock_queue_rcv_skb(sk, _skb);
+ if (err < 0)
+ kfree_skb(_skb);
+ }
+ kfree_skb(pi->sdu);
+ err = 0;
+
+ break;
+ }
+
+ kfree_skb(skb);
+ return err;
+}
+
+static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
+{
+ struct sk_buff *skb;
+ u16 control = 0;
+
+ while((skb = skb_peek(SREJ_QUEUE(sk)))) {
+ if (bt_cb(skb)->tx_seq != tx_seq)
+ break;
+
+ skb = skb_dequeue(SREJ_QUEUE(sk));
+ control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
+ l2cap_sar_reassembly_sdu(sk, skb, control);
+ l2cap_pi(sk)->buffer_seq_srej =
+ (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
+ tx_seq++;
+ }
+}
+
+static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct srej_list *l, *tmp;
+ u16 control;
+
+ list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
+ if (l->tx_seq == tx_seq) {
+ list_del(&l->list);
+ kfree(l);
+ return;
+ }
+ control = L2CAP_SUPER_SELECT_REJECT;
+ control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ l2cap_send_sframe(pi, control);
+ list_del(&l->list);
+ list_add_tail(&l->list, SREJ_LIST(sk));
+ }
+}
+
+static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ struct srej_list *new;
+ u16 control;
+
+ while (tx_seq != pi->expected_tx_seq) {
+ control = L2CAP_SUPER_SELECT_REJECT;
+ control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
+ control |= L2CAP_CTRL_POLL;
+ pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
+ }
+ l2cap_send_sframe(pi, control);
+
+ new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
+ new->tx_seq = pi->expected_tx_seq++;
+ list_add_tail(&new->list, SREJ_LIST(sk));
+ }
+ pi->expected_tx_seq++;
+}
+
+static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_txseq(rx_control);
+ u16 tx_control = 0;
+ u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
+ int err = 0;
+
+ BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+
+ if (tx_seq == pi->expected_tx_seq)
+ goto expected;
+
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ struct srej_list *first;
+
+ first = list_first_entry(SREJ_LIST(sk),
+ struct srej_list, list);
+ if (tx_seq == first->tx_seq) {
+ l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+ l2cap_check_srej_gap(sk, tx_seq);
+
+ list_del(&first->list);
+ kfree(first);
+
+ if (list_empty(SREJ_LIST(sk))) {
+ pi->buffer_seq = pi->buffer_seq_srej;
+ pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
+ }
+ } else {
+ struct srej_list *l;
+ l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+
+ list_for_each_entry(l, SREJ_LIST(sk), list) {
+ if (l->tx_seq == tx_seq) {
+ l2cap_resend_srejframe(sk, tx_seq);
+ return 0;
+ }
+ }
+ l2cap_send_srejframe(sk, tx_seq);
+ }
+ } else {
+ pi->conn_state |= L2CAP_CONN_SREJ_SENT;
+
+ INIT_LIST_HEAD(SREJ_LIST(sk));
+ pi->buffer_seq_srej = pi->buffer_seq;
+
+ __skb_queue_head_init(SREJ_QUEUE(sk));
+ l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+
+ pi->conn_state |= L2CAP_CONN_SEND_PBIT;
+
+ l2cap_send_srejframe(sk, tx_seq);
+ }
+ return 0;
+
+expected:
+ pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
+
+ if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
+ l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
+ return 0;
+ }
+
+ pi->buffer_seq = (pi->buffer_seq + 1) % 64;
+
+ err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
+ if (err < 0)
+ return err;
+
+ pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
+ if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
+ tx_control |= L2CAP_SUPER_RCV_READY;
+ tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
+ l2cap_send_sframe(pi, tx_control);
+ }
+ return 0;
+}
+
+static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
+{
+ struct l2cap_pinfo *pi = l2cap_pi(sk);
+ u8 tx_seq = __get_reqseq(rx_control);
+
+ BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
+
+ switch (rx_control & L2CAP_CTRL_SUPERVISE) {
+ case L2CAP_SUPER_RCV_READY:
+ if (rx_control & L2CAP_CTRL_POLL) {
+ u16 control = L2CAP_CTRL_FINAL;
+ control |= L2CAP_SUPER_RCV_READY |
+ (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
+ l2cap_send_sframe(l2cap_pi(sk), control);
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ } else if (rx_control & L2CAP_CTRL_FINAL) {
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
+
+ if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
+ break;
+
+ pi->conn_state &= ~L2CAP_CONN_WAIT_F;
+ del_timer(&pi->monitor_timer);
+
+ if (pi->unacked_frames > 0)
+ __mod_retrans_timer();
+ } else {
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
+
+ if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
+ && (pi->unacked_frames > 0))
+ __mod_retrans_timer();
+
+ l2cap_ertm_send(sk);
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+ }
+ break;
+
+ case L2CAP_SUPER_REJECT:
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ pi->expected_ack_seq = __get_reqseq(rx_control);
+ l2cap_drop_acked_frames(sk);
+
+ sk->sk_send_head = TX_QUEUE(sk)->next;
+ pi->next_tx_seq = pi->expected_ack_seq;
+
+ l2cap_ertm_send(sk);
+
+ break;
+
+ case L2CAP_SUPER_SELECT_REJECT:
+ pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
+
+ if (rx_control & L2CAP_CTRL_POLL) {
+ l2cap_retransmit_frame(sk, tx_seq);
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
+ l2cap_ertm_send(sk);
+ if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+ pi->srej_save_reqseq = tx_seq;
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ }
+ } else if (rx_control & L2CAP_CTRL_FINAL) {
+ if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
+ pi->srej_save_reqseq == tx_seq)
+ pi->srej_save_reqseq &= ~L2CAP_CONN_SREJ_ACT;
+ else
+ l2cap_retransmit_frame(sk, tx_seq);
+ }
+ else {
+ l2cap_retransmit_frame(sk, tx_seq);
+ if (pi->conn_state & L2CAP_CONN_WAIT_F) {
+ pi->srej_save_reqseq = tx_seq;
+ pi->conn_state |= L2CAP_CONN_SREJ_ACT;
+ }
+ }
+ break;
+
+ case L2CAP_SUPER_RCV_NOT_READY:
+ pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
+ pi->expected_ack_seq = tx_seq;
+ l2cap_drop_acked_frames(sk);
+
+ del_timer(&l2cap_pi(sk)->retrans_timer);
+ if (rx_control & L2CAP_CTRL_POLL) {
+ u16 control = L2CAP_CTRL_FINAL;
+ l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
+ }
+ break;
+ }
+
+ return 0;
+}
+
static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
{
struct sock *sk;
+ struct l2cap_pinfo *pi;
+ u16 control, len;
+ u8 tx_seq;
+ int err;
sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
if (!sk) {
@@ -2369,22 +3459,91 @@ static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk
goto drop;
}
+ pi = l2cap_pi(sk);
+
BT_DBG("sk %p, len %d", sk, skb->len);
if (sk->sk_state != BT_CONNECTED)
goto drop;
- if (l2cap_pi(sk)->imtu < skb->len)
- goto drop;
+ switch (pi->mode) {
+ case L2CAP_MODE_BASIC:
+ /* If socket recv buffers overflows we drop data here
+ * which is *bad* because L2CAP has to be reliable.
+ * But we don't have any other choice. L2CAP doesn't
+ * provide flow control mechanism. */
- /* If socket recv buffers overflows we drop data here
- * which is *bad* because L2CAP has to be reliable.
- * But we don't have any other choice. L2CAP doesn't
- * provide flow control mechanism. */
+ if (pi->imtu < skb->len)
+ goto drop;
+
+ if (!sock_queue_rcv_skb(sk, skb))
+ goto done;
+ break;
+
+ case L2CAP_MODE_ERTM:
+ control = get_unaligned_le16(skb->data);
+ skb_pull(skb, 2);
+ len = skb->len;
+
+ if (__is_sar_start(control))
+ len -= 2;
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ len -= 2;
+
+ /*
+ * We can just drop the corrupted I-frame here.
+ * Receiver will miss it and start proper recovery
+ * procedures and ask retransmission.
+ */
+ if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
+ goto drop;
+
+ if (l2cap_check_fcs(pi, skb))
+ goto drop;
+
+ if (__is_iframe(control))
+ err = l2cap_data_channel_iframe(sk, control, skb);
+ else
+ err = l2cap_data_channel_sframe(sk, control, skb);
+
+ if (!err)
+ goto done;
+ break;
+
+ case L2CAP_MODE_STREAMING:
+ control = get_unaligned_le16(skb->data);
+ skb_pull(skb, 2);
+ len = skb->len;
+
+ if (__is_sar_start(control))
+ len -= 2;
+
+ if (pi->fcs == L2CAP_FCS_CRC16)
+ len -= 2;
+
+ if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
+ goto drop;
+
+ if (l2cap_check_fcs(pi, skb))
+ goto drop;
+
+ tx_seq = __get_txseq(control);
+
+ if (pi->expected_tx_seq == tx_seq)
+ pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
+ else
+ pi->expected_tx_seq = tx_seq + 1;
+
+ err = l2cap_sar_reassembly_sdu(sk, skb, control);
- if (!sock_queue_rcv_skb(sk, skb))
goto done;
+ default:
+ BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
+ break;
+ }
+
drop:
kfree_skb(skb);
@@ -2433,6 +3592,11 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
cid = __le16_to_cpu(lh->cid);
len = __le16_to_cpu(lh->len);
+ if (len != skb->len) {
+ kfree_skb(skb);
+ return;
+ }
+
BT_DBG("len %d, cid 0x%4.4x", len, cid);
switch (cid) {
@@ -2441,7 +3605,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
break;
case L2CAP_CID_CONN_LESS:
- psm = get_unaligned((__le16 *) skb->data);
+ psm = get_unaligned_le16(skb->data);
skb_pull(skb, 2);
l2cap_conless_channel(conn, psm, skb);
break;
@@ -2828,6 +3992,9 @@ EXPORT_SYMBOL(l2cap_load);
module_init(l2cap_init);
module_exit(l2cap_exit);
+module_param(enable_ertm, bool, 0644);
+MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 94b3388c188..25692bc0a34 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -244,6 +244,33 @@ static inline int rfcomm_check_security(struct rfcomm_dlc *d)
auth_type);
}
+static void rfcomm_session_timeout(unsigned long arg)
+{
+ struct rfcomm_session *s = (void *) arg;
+
+ BT_DBG("session %p state %ld", s, s->state);
+
+ set_bit(RFCOMM_TIMED_OUT, &s->flags);
+ rfcomm_session_put(s);
+ rfcomm_schedule(RFCOMM_SCHED_TIMEO);
+}
+
+static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout)
+{
+ BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout);
+
+ if (!mod_timer(&s->timer, jiffies + timeout))
+ rfcomm_session_hold(s);
+}
+
+static void rfcomm_session_clear_timer(struct rfcomm_session *s)
+{
+ BT_DBG("session %p state %ld", s, s->state);
+
+ if (timer_pending(&s->timer) && del_timer(&s->timer))
+ rfcomm_session_put(s);
+}
+
/* ---- RFCOMM DLCs ---- */
static void rfcomm_dlc_timeout(unsigned long arg)
{
@@ -320,6 +347,7 @@ static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d)
rfcomm_session_hold(s);
+ rfcomm_session_clear_timer(s);
rfcomm_dlc_hold(d);
list_add(&d->list, &s->dlcs);
d->session = s;
@@ -335,6 +363,9 @@ static void rfcomm_dlc_unlink(struct rfcomm_dlc *d)
d->session = NULL;
rfcomm_dlc_put(d);
+ if (list_empty(&s->dlcs))
+ rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT);
+
rfcomm_session_put(s);
}
@@ -567,6 +598,8 @@ static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state)
BT_DBG("session %p sock %p", s, sock);
+ setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
+
INIT_LIST_HEAD(&s->dlcs);
s->state = state;
s->sock = sock;
@@ -598,6 +631,7 @@ static void rfcomm_session_del(struct rfcomm_session *s)
if (state == BT_CONNECTED)
rfcomm_send_disc(s, 0);
+ rfcomm_session_clear_timer(s);
sock_release(s->sock);
kfree(s);
@@ -639,6 +673,7 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err)
__rfcomm_dlc_close(d, err);
}
+ rfcomm_session_clear_timer(s);
rfcomm_session_put(s);
}
@@ -1879,6 +1914,12 @@ static inline void rfcomm_process_sessions(void)
struct rfcomm_session *s;
s = list_entry(p, struct rfcomm_session, list);
+ if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) {
+ s->state = BT_DISCONN;
+ rfcomm_send_disc(s, 0);
+ continue;
+ }
+
if (s->state == BT_LISTEN) {
rfcomm_accept_connection(s);
continue;
@@ -2080,7 +2121,7 @@ static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL);
/* ---- Initialization ---- */
static int __init rfcomm_init(void)
{
- int ret;
+ int err;
l2cap_load();
@@ -2088,33 +2129,35 @@ static int __init rfcomm_init(void)
rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
if (IS_ERR(rfcomm_thread)) {
- ret = PTR_ERR(rfcomm_thread);
- goto out_thread;
+ err = PTR_ERR(rfcomm_thread);
+ goto unregister;
}
if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
BT_ERR("Failed to create RFCOMM info file");
- ret = rfcomm_init_ttys();
- if (ret)
- goto out_tty;
+ err = rfcomm_init_ttys();
+ if (err < 0)
+ goto stop;
- ret = rfcomm_init_sockets();
- if (ret)
- goto out_sock;
+ err = rfcomm_init_sockets();
+ if (err < 0)
+ goto cleanup;
BT_INFO("RFCOMM ver %s", VERSION);
return 0;
-out_sock:
+cleanup:
rfcomm_cleanup_ttys();
-out_tty:
+
+stop:
kthread_stop(rfcomm_thread);
-out_thread:
+
+unregister:
hci_unregister_cb(&rfcomm_cb);
- return ret;
+ return err;
}
static void __exit rfcomm_exit(void)
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 51ae0c3e470..13c27f17192 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -359,20 +359,9 @@ static void sco_sock_kill(struct sock *sk)
sock_put(sk);
}
-/* Close socket.
- * Must be called on unlocked socket.
- */
-static void sco_sock_close(struct sock *sk)
+static void __sco_sock_close(struct sock *sk)
{
- struct sco_conn *conn;
-
- sco_sock_clear_timer(sk);
-
- lock_sock(sk);
-
- conn = sco_pi(sk)->conn;
-
- BT_DBG("sk %p state %d conn %p socket %p", sk, sk->sk_state, conn, sk->sk_socket);
+ BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
switch (sk->sk_state) {
case BT_LISTEN:
@@ -390,9 +379,15 @@ static void sco_sock_close(struct sock *sk)
sock_set_flag(sk, SOCK_ZAPPED);
break;
}
+}
+/* Must be called on unlocked socket. */
+static void sco_sock_close(struct sock *sk)
+{
+ sco_sock_clear_timer(sk);
+ lock_sock(sk);
+ __sco_sock_close(sk);
release_sock(sk);
-
sco_sock_kill(sk);
}
@@ -748,6 +743,30 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
return err;
}
+static int sco_sock_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+ if (!sk)
+ return 0;
+
+ lock_sock(sk);
+ if (!sk->sk_shutdown) {
+ sk->sk_shutdown = SHUTDOWN_MASK;
+ sco_sock_clear_timer(sk);
+ __sco_sock_close(sk);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
+ err = bt_sock_wait_state(sk, BT_CLOSED,
+ sk->sk_lingertime);
+ }
+ release_sock(sk);
+ return err;
+}
+
static int sco_sock_release(struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -969,7 +988,7 @@ static const struct proto_ops sco_sock_ops = {
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
- .shutdown = sock_no_shutdown,
+ .shutdown = sco_sock_shutdown,
.setsockopt = sco_sock_setsockopt,
.getsockopt = sco_sock_getsockopt
};
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 18538d7460d..07a07770c8b 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -20,7 +20,7 @@
#include "br_private.h"
/* net device transmit always called with no BH (preempt_disabled) */
-int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
const unsigned char *dest = skb->data;
@@ -39,7 +39,7 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
else
br_flood_deliver(br, skb);
- return 0;
+ return NETDEV_TX_OK;
}
static int br_dev_open(struct net_device *dev)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index d2c27c808d3..bc1704ac6cd 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -22,7 +22,8 @@
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
- return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING);
+ return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
+ p->state == BR_STATE_FORWARDING);
}
static inline unsigned packet_length(const struct sk_buff *skb)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index eb404dc3ed6..142ebac1417 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -256,6 +256,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
p->port_no = index;
+ p->flags = 0;
br_init_port(p);
p->state = BR_STATE_DISABLED;
br_stp_port_timer_init(p);
@@ -263,6 +264,10 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
return p;
}
+static struct device_type br_type = {
+ .name = "bridge",
+};
+
int br_add_bridge(struct net *net, const char *name)
{
struct net_device *dev;
@@ -279,6 +284,8 @@ int br_add_bridge(struct net *net, const char *name)
goto out_free;
}
+ SET_NETDEV_DEVTYPE(dev, &br_type);
+
ret = register_netdevice(dev);
if (ret)
goto out_free;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d22f611e400..907a82e9023 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -359,7 +359,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
},
.proto = 0,
};
- struct in_device *in_dev = in_dev_get(dev);
+ struct in_device *in_dev = __in_dev_get_rcu(dev);
/* If err equals -EHOSTUNREACH the error is due to a
* martian destination or due to the fact that
@@ -905,46 +905,62 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
* ip_refrag() can return NF_STOLEN. */
static struct nf_hook_ops br_nf_ops[] __read_mostly = {
- { .hook = br_nf_pre_routing,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_PRE_ROUTING,
- .priority = NF_BR_PRI_BRNF, },
- { .hook = br_nf_local_in,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_LOCAL_IN,
- .priority = NF_BR_PRI_BRNF, },
- { .hook = br_nf_forward_ip,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_FORWARD,
- .priority = NF_BR_PRI_BRNF - 1, },
- { .hook = br_nf_forward_arp,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_FORWARD,
- .priority = NF_BR_PRI_BRNF, },
- { .hook = br_nf_local_out,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_LOCAL_OUT,
- .priority = NF_BR_PRI_FIRST, },
- { .hook = br_nf_post_routing,
- .owner = THIS_MODULE,
- .pf = PF_BRIDGE,
- .hooknum = NF_BR_POST_ROUTING,
- .priority = NF_BR_PRI_LAST, },
- { .hook = ip_sabotage_in,
- .owner = THIS_MODULE,
- .pf = PF_INET,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP_PRI_FIRST, },
- { .hook = ip_sabotage_in,
- .owner = THIS_MODULE,
- .pf = PF_INET6,
- .hooknum = NF_INET_PRE_ROUTING,
- .priority = NF_IP6_PRI_FIRST, },
+ {
+ .hook = br_nf_pre_routing,
+ .owner = THIS_MODULE,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_PRE_ROUTING,
+ .priority = NF_BR_PRI_BRNF,
+ },
+ {
+ .hook = br_nf_local_in,
+ .owner = THIS_MODULE,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_LOCAL_IN,
+ .priority = NF_BR_PRI_BRNF,
+ },
+ {
+ .hook = br_nf_forward_ip,
+ .owner = THIS_MODULE,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_FORWARD,
+ .priority = NF_BR_PRI_BRNF - 1,
+ },
+ {
+ .hook = br_nf_forward_arp,
+ .owner = THIS_MODULE,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_FORWARD,
+ .priority = NF_BR_PRI_BRNF,
+ },
+ {
+ .hook = br_nf_local_out,
+ .owner = THIS_MODULE,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_LOCAL_OUT,
+ .priority = NF_BR_PRI_FIRST,
+ },
+ {
+ .hook = br_nf_post_routing,
+ .owner = THIS_MODULE,
+ .pf = PF_BRIDGE,
+ .hooknum = NF_BR_POST_ROUTING,
+ .priority = NF_BR_PRI_LAST,
+ },
+ {
+ .hook = ip_sabotage_in,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP_PRI_FIRST,
+ },
+ {
+ .hook = ip_sabotage_in,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_INET_PRE_ROUTING,
+ .priority = NF_IP6_PRI_FIRST,
+ },
};
#ifdef CONFIG_SYSCTL
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d5b5537272b..2114e45682e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -81,6 +81,9 @@ struct net_bridge_port
struct timer_list message_age_timer;
struct kobject kobj;
struct rcu_head rcu;
+
+ unsigned long flags;
+#define BR_HAIRPIN_MODE 0x00000001
};
struct net_bridge
@@ -140,7 +143,8 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
/* br_device.c */
extern void br_dev_setup(struct net_device *dev);
-extern int br_dev_xmit(struct sk_buff *skb, struct net_device *dev);
+extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
+ struct net_device *dev);
/* br_fdb.c */
extern int br_fdb_init(void);
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 0660515f399..fd3f8d6c099 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -21,7 +21,7 @@
*/
#define MESSAGE_AGE_INCR ((HZ < 256) ? 1 : (HZ/256))
-static const char *br_port_state_names[] = {
+static const char *const br_port_state_names[] = {
[BR_STATE_DISABLED] = "disabled",
[BR_STATE_LISTENING] = "listening",
[BR_STATE_LEARNING] = "learning",
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 4a3cdf8f381..820643a3ba9 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -143,6 +143,22 @@ static ssize_t store_flush(struct net_bridge_port *p, unsigned long v)
}
static BRPORT_ATTR(flush, S_IWUSR, NULL, store_flush);
+static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
+{
+ int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
+ return sprintf(buf, "%d\n", hairpin_mode);
+}
+static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
+{
+ if (v)
+ p->flags |= BR_HAIRPIN_MODE;
+ else
+ p->flags &= ~BR_HAIRPIN_MODE;
+ return 0;
+}
+static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
+ show_hairpin_mode, store_hairpin_mode);
+
static struct brport_attribute *brport_attrs[] = {
&brport_attr_path_cost,
&brport_attr_priority,
@@ -159,6 +175,7 @@ static struct brport_attribute *brport_attrs[] = {
&brport_attr_forward_delay_timer,
&brport_attr_hold_timer,
&brport_attr_flush,
+ &brport_attr_hairpin_mode,
NULL
};
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index a94f3cc377c..e4ea3fdd1d4 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -50,14 +50,6 @@ struct arppayload
unsigned char ip_dst[4];
};
-static void print_MAC(const unsigned char *p)
-{
- int i;
-
- for (i = 0; i < ETH_ALEN; i++, p++)
- printk("%02x%c", *p, i == ETH_ALEN - 1 ? ' ':':');
-}
-
static void
print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
{
@@ -88,14 +80,11 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
unsigned int bitmask;
spin_lock_bh(&ebt_log_lock);
- printk("<%c>%s IN=%s OUT=%s MAC source = ", '0' + loginfo->u.log.level,
- prefix, in ? in->name : "", out ? out->name : "");
-
- print_MAC(eth_hdr(skb)->h_source);
- printk("MAC dest = ");
- print_MAC(eth_hdr(skb)->h_dest);
-
- printk("proto = 0x%04x", ntohs(eth_hdr(skb)->h_proto));
+ printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
+ '0' + loginfo->u.log.level, prefix,
+ in ? in->name : "", out ? out->name : "",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
if (loginfo->type == NF_LOG_TYPE_LOG)
bitmask = loginfo->u.log.logflags;
@@ -171,12 +160,8 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
printk(" INCOMPLETE ARP payload");
goto out;
}
- printk(" ARP MAC SRC=");
- print_MAC(ap->mac_src);
- printk(" ARP IP SRC=%pI4", ap->ip_src);
- printk(" ARP MAC DST=");
- print_MAC(ap->mac_dst);
- printk(" ARP IP DST=%pI4", ap->ip_dst);
+ printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4",
+ ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
}
}
out:
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 133eeae45a4..ce50688a643 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -266,7 +266,7 @@ static bool ebt_ulog_tg_check(const struct xt_tgchk_param *par)
if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
- return 0;
+ return true;
}
static struct xt_target ebt_ulog_tg_reg __read_mostly = {
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index c751111440f..d32ab13e728 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -41,7 +41,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
return 0;
}
-static struct ebt_table broute_table =
+static const struct ebt_table broute_table =
{
.name = "broute",
.table = &initial_table,
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index a5eea72938a..60b1a6ca718 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -50,7 +50,7 @@ static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
return 0;
}
-static struct ebt_table frame_filter =
+static const struct ebt_table frame_filter =
{
.name = "filter",
.table = &initial_table,
@@ -77,21 +77,21 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
{
.hook = ebt_in_hook,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_IN,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
.hook = ebt_in_hook,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_FORWARD,
.priority = NF_BR_PRI_FILTER_BRIDGED,
},
{
.hook = ebt_out_hook,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_FILTER_OTHER,
},
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 6024c551f9a..4a98804203b 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -77,21 +77,21 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
{
.hook = ebt_nat_out,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_LOCAL_OUT,
.priority = NF_BR_PRI_NAT_DST_OTHER,
},
{
.hook = ebt_nat_out,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_POST_ROUTING,
.priority = NF_BR_PRI_NAT_SRC,
},
{
.hook = ebt_nat_in,
.owner = THIS_MODULE,
- .pf = PF_BRIDGE,
+ .pf = NFPROTO_BRIDGE,
.hooknum = NF_BR_PRE_ROUTING,
.priority = NF_BR_PRI_NAT_DST_BRIDGED,
},
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 37928d5f284..bd1c65425d4 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1103,23 +1103,24 @@ free_newinfo:
return ret;
}
-struct ebt_table *ebt_register_table(struct net *net, struct ebt_table *table)
+struct ebt_table *
+ebt_register_table(struct net *net, const struct ebt_table *input_table)
{
struct ebt_table_info *newinfo;
- struct ebt_table *t;
+ struct ebt_table *t, *table;
struct ebt_replace_kernel *repl;
int ret, i, countersize;
void *p;
- if (!table || !(repl = table->table) || !repl->entries ||
- repl->entries_size == 0 ||
- repl->counters || table->private) {
+ if (input_table == NULL || (repl = input_table->table) == NULL ||
+ repl->entries == 0 || repl->entries_size == 0 ||
+ repl->counters != NULL || input_table->private != NULL) {
BUGPRINT("Bad table data for ebt_register_table!!!\n");
return ERR_PTR(-EINVAL);
}
/* Don't add one table to multiple lists. */
- table = kmemdup(table, sizeof(struct ebt_table), GFP_KERNEL);
+ table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto out;
diff --git a/net/can/af_can.c b/net/can/af_can.c
index e733725b11d..60683211567 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -199,6 +199,8 @@ static int can_create(struct net *net, struct socket *sock, int protocol)
* @skb: pointer to socket buffer with CAN frame in data section
* @loop: loopback for listeners on local CAN sockets (recommended default!)
*
+ * Due to the loopback this routine must not be called from hardirq context.
+ *
* Return:
* 0 on success
* -ENETDOWN when the selected interface is down
@@ -278,7 +280,7 @@ int can_send(struct sk_buff *skb, int loop)
}
if (newskb)
- netif_rx(newskb);
+ netif_rx_ni(newskb);
/* update statistics */
can_stats.tx_frames++;
@@ -651,12 +653,16 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
struct can_frame *cf = (struct can_frame *)skb->data;
int matches;
- if (dev->type != ARPHRD_CAN || !net_eq(dev_net(dev), &init_net)) {
- kfree_skb(skb);
- return 0;
- }
+ if (!net_eq(dev_net(dev), &init_net))
+ goto drop;
- BUG_ON(skb->len != sizeof(struct can_frame) || cf->can_dlc > 8);
+ if (WARN_ONCE(dev->type != ARPHRD_CAN ||
+ skb->len != sizeof(struct can_frame) ||
+ cf->can_dlc > 8,
+ "PF_CAN: dropped non conform skbuf: "
+ "dev type %d, len %d, can_dlc %d\n",
+ dev->type, skb->len, cf->can_dlc))
+ goto drop;
/* update statistics */
can_stats.rx_frames++;
@@ -682,7 +688,11 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
can_stats.matches_delta++;
}
- return 0;
+ return NET_RX_SUCCESS;
+
+drop:
+ kfree_skb(skb);
+ return NET_RX_DROP;
}
/*
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 72720c71035..597da4f8f88 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -46,6 +46,7 @@
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/uio.h>
#include <linux/net.h>
#include <linux/netdevice.h>
@@ -146,23 +147,18 @@ static char *bcm_proc_getifname(int ifindex)
return "???";
}
-static int bcm_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int bcm_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- struct sock *sk = (struct sock *)data;
+ struct sock *sk = (struct sock *)m->private;
struct bcm_sock *bo = bcm_sk(sk);
struct bcm_op *op;
- len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p",
- sk->sk_socket);
- len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk);
- len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo);
- len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu",
- bo->dropped_usr_msgs);
- len += snprintf(page + len, PAGE_SIZE - len, " / bound %s",
- bcm_proc_getifname(bo->ifindex));
- len += snprintf(page + len, PAGE_SIZE - len, " <<<\n");
+ seq_printf(m, ">>> socket %p", sk->sk_socket);
+ seq_printf(m, " / sk %p", sk);
+ seq_printf(m, " / bo %p", bo);
+ seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
+ seq_printf(m, " / bound %s", bcm_proc_getifname(bo->ifindex));
+ seq_printf(m, " <<<\n");
list_for_each_entry(op, &bo->rx_ops, list) {
@@ -172,71 +168,62 @@ static int bcm_read_proc(char *page, char **start, off_t off,
if (!op->frames_abs)
continue;
- len += snprintf(page + len, PAGE_SIZE - len,
- "rx_op: %03X %-5s ",
+ seq_printf(m, "rx_op: %03X %-5s ",
op->can_id, bcm_proc_getifname(op->ifindex));
- len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ",
- op->nframes,
+ seq_printf(m, "[%d]%c ", op->nframes,
(op->flags & RX_CHECK_DLC)?'d':' ');
if (op->kt_ival1.tv64)
- len += snprintf(page + len, PAGE_SIZE - len,
- "timeo=%lld ",
+ seq_printf(m, "timeo=%lld ",
(long long)
ktime_to_us(op->kt_ival1));
if (op->kt_ival2.tv64)
- len += snprintf(page + len, PAGE_SIZE - len,
- "thr=%lld ",
+ seq_printf(m, "thr=%lld ",
(long long)
ktime_to_us(op->kt_ival2));
- len += snprintf(page + len, PAGE_SIZE - len,
- "# recv %ld (%ld) => reduction: ",
+ seq_printf(m, "# recv %ld (%ld) => reduction: ",
op->frames_filtered, op->frames_abs);
reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
- len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n",
+ seq_printf(m, "%s%ld%%\n",
(reduction == 100)?"near ":"", reduction);
-
- if (len > PAGE_SIZE - 200) {
- /* mark output cut off */
- len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
- break;
- }
}
list_for_each_entry(op, &bo->tx_ops, list) {
- len += snprintf(page + len, PAGE_SIZE - len,
- "tx_op: %03X %s [%d] ",
+ seq_printf(m, "tx_op: %03X %s [%d] ",
op->can_id, bcm_proc_getifname(op->ifindex),
op->nframes);
if (op->kt_ival1.tv64)
- len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ",
+ seq_printf(m, "t1=%lld ",
(long long) ktime_to_us(op->kt_ival1));
if (op->kt_ival2.tv64)
- len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ",
+ seq_printf(m, "t2=%lld ",
(long long) ktime_to_us(op->kt_ival2));
- len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n",
- op->frames_abs);
-
- if (len > PAGE_SIZE - 100) {
- /* mark output cut off */
- len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
- break;
- }
+ seq_printf(m, "# sent %ld\n", op->frames_abs);
}
+ seq_putc(m, '\n');
+ return 0;
+}
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
-
- *eof = 1;
- return len;
+static int bcm_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, bcm_proc_show, PDE(inode)->data);
}
+static const struct file_operations bcm_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = bcm_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
* of the given bcm tx op
@@ -1515,9 +1502,9 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
if (proc_dir) {
/* unique socket address as filename */
sprintf(bo->procname, "%p", sock);
- bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644,
- proc_dir,
- bcm_read_proc, sk);
+ bo->bcm_proc_read = proc_create_data(bo->procname, 0644,
+ proc_dir,
+ &bcm_proc_fops, sk);
}
return 0;
diff --git a/net/can/proc.c b/net/can/proc.c
index 1463653dbe3..9b9ad29be56 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -196,8 +196,8 @@ void can_stat_update(unsigned long data)
*
*/
-static int can_print_rcvlist(char *page, int len, struct hlist_head *rx_list,
- struct net_device *dev)
+static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list,
+ struct net_device *dev)
{
struct receiver *r;
struct hlist_node *n;
@@ -208,199 +208,188 @@ static int can_print_rcvlist(char *page, int len, struct hlist_head *rx_list,
" %-5s %08X %08x %08x %08x %8ld %s\n" :
" %-5s %03X %08x %08lx %08lx %8ld %s\n";
- len += snprintf(page + len, PAGE_SIZE - len, fmt,
- DNAME(dev), r->can_id, r->mask,
+ seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask,
(unsigned long)r->func, (unsigned long)r->data,
r->matches, r->ident);
-
- /* does a typical line fit into the current buffer? */
-
- /* 100 Bytes before end of buffer */
- if (len > PAGE_SIZE - 100) {
- /* mark output cut off */
- len += snprintf(page + len, PAGE_SIZE - len,
- " (..)\n");
- break;
- }
}
rcu_read_unlock();
-
- return len;
}
-static int can_print_recv_banner(char *page, int len)
+static void can_print_recv_banner(struct seq_file *m)
{
/*
* can1. 00000000 00000000 00000000
* ....... 0 tp20
*/
- len += snprintf(page + len, PAGE_SIZE - len,
- " device can_id can_mask function"
+ seq_puts(m, " device can_id can_mask function"
" userdata matches ident\n");
-
- return len;
}
-static int can_proc_read_stats(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int can_stats_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
+ seq_putc(m, '\n');
+ seq_printf(m, " %8ld transmitted frames (TXF)\n", can_stats.tx_frames);
+ seq_printf(m, " %8ld received frames (RXF)\n", can_stats.rx_frames);
+ seq_printf(m, " %8ld matched frames (RXMF)\n", can_stats.matches);
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld transmitted frames (TXF)\n",
- can_stats.tx_frames);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld received frames (RXF)\n", can_stats.rx_frames);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld matched frames (RXMF)\n", can_stats.matches);
-
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
+ seq_putc(m, '\n');
if (can_stattimer.function == can_stat_update) {
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld %% total match ratio (RXMR)\n",
+ seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
can_stats.total_rx_match_ratio);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld frames/s total tx rate (TXR)\n",
+ seq_printf(m, " %8ld frames/s total tx rate (TXR)\n",
can_stats.total_tx_rate);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld frames/s total rx rate (RXR)\n",
+ seq_printf(m, " %8ld frames/s total rx rate (RXR)\n",
can_stats.total_rx_rate);
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
+ seq_putc(m, '\n');
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld %% current match ratio (CRXMR)\n",
+ seq_printf(m, " %8ld %% current match ratio (CRXMR)\n",
can_stats.current_rx_match_ratio);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld frames/s current tx rate (CTXR)\n",
+ seq_printf(m, " %8ld frames/s current tx rate (CTXR)\n",
can_stats.current_tx_rate);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld frames/s current rx rate (CRXR)\n",
+ seq_printf(m, " %8ld frames/s current rx rate (CRXR)\n",
can_stats.current_rx_rate);
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
+ seq_putc(m, '\n');
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld %% max match ratio (MRXMR)\n",
+ seq_printf(m, " %8ld %% max match ratio (MRXMR)\n",
can_stats.max_rx_match_ratio);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld frames/s max tx rate (MTXR)\n",
+ seq_printf(m, " %8ld frames/s max tx rate (MTXR)\n",
can_stats.max_tx_rate);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld frames/s max rx rate (MRXR)\n",
+ seq_printf(m, " %8ld frames/s max rx rate (MRXR)\n",
can_stats.max_rx_rate);
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
+ seq_putc(m, '\n');
}
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld current receive list entries (CRCV)\n",
+ seq_printf(m, " %8ld current receive list entries (CRCV)\n",
can_pstats.rcv_entries);
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld maximum receive list entries (MRCV)\n",
+ seq_printf(m, " %8ld maximum receive list entries (MRCV)\n",
can_pstats.rcv_entries_max);
if (can_pstats.stats_reset)
- len += snprintf(page + len, PAGE_SIZE - len,
- "\n %8ld statistic resets (STR)\n",
+ seq_printf(m, "\n %8ld statistic resets (STR)\n",
can_pstats.stats_reset);
if (can_pstats.user_reset)
- len += snprintf(page + len, PAGE_SIZE - len,
- " %8ld user statistic resets (USTR)\n",
+ seq_printf(m, " %8ld user statistic resets (USTR)\n",
can_pstats.user_reset);
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
-
- *eof = 1;
- return len;
+ seq_putc(m, '\n');
+ return 0;
}
-static int can_proc_read_reset_stats(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int can_stats_proc_open(struct inode *inode, struct file *file)
{
- int len = 0;
+ return single_open(file, can_stats_proc_show, NULL);
+}
+
+static const struct file_operations can_stats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = can_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+static int can_reset_stats_proc_show(struct seq_file *m, void *v)
+{
user_reset = 1;
if (can_stattimer.function == can_stat_update) {
- len += snprintf(page + len, PAGE_SIZE - len,
- "Scheduled statistic reset #%ld.\n",
+ seq_printf(m, "Scheduled statistic reset #%ld.\n",
can_pstats.stats_reset + 1);
} else {
if (can_stats.jiffies_init != jiffies)
can_init_stats();
- len += snprintf(page + len, PAGE_SIZE - len,
- "Performed statistic reset #%ld.\n",
+ seq_printf(m, "Performed statistic reset #%ld.\n",
can_pstats.stats_reset);
}
+ return 0;
+}
- *eof = 1;
- return len;
+static int can_reset_stats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, can_reset_stats_proc_show, NULL);
}
-static int can_proc_read_version(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static const struct file_operations can_reset_stats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = can_reset_stats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int can_version_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
+ seq_printf(m, "%s\n", CAN_VERSION_STRING);
+ return 0;
+}
- len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
- CAN_VERSION_STRING);
- *eof = 1;
- return len;
+static int can_version_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, can_version_proc_show, NULL);
}
-static int can_proc_read_rcvlist(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static const struct file_operations can_version_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = can_version_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int can_rcvlist_proc_show(struct seq_file *m, void *v)
{
/* double cast to prevent GCC warning */
- int idx = (int)(long)data;
- int len = 0;
+ int idx = (int)(long)m->private;
struct dev_rcv_lists *d;
struct hlist_node *n;
- len += snprintf(page + len, PAGE_SIZE - len,
- "\nreceive list '%s':\n", rx_list_name[idx]);
+ seq_printf(m, "\nreceive list '%s':\n", rx_list_name[idx]);
rcu_read_lock();
hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
if (!hlist_empty(&d->rx[idx])) {
- len = can_print_recv_banner(page, len);
- len = can_print_rcvlist(page, len, &d->rx[idx], d->dev);
+ can_print_recv_banner(m);
+ can_print_rcvlist(m, &d->rx[idx], d->dev);
} else
- len += snprintf(page + len, PAGE_SIZE - len,
- " (%s: no entry)\n", DNAME(d->dev));
-
- /* exit on end of buffer? */
- if (len > PAGE_SIZE - 100)
- break;
+ seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
}
rcu_read_unlock();
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
+ seq_putc(m, '\n');
+ return 0;
+}
- *eof = 1;
- return len;
+static int can_rcvlist_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, can_rcvlist_proc_show, PDE(inode)->data);
}
-static int can_proc_read_rcvlist_sff(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static const struct file_operations can_rcvlist_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = can_rcvlist_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
struct dev_rcv_lists *d;
struct hlist_node *n;
/* RX_SFF */
- len += snprintf(page + len, PAGE_SIZE - len,
- "\nreceive list 'rx_sff':\n");
+ seq_puts(m, "\nreceive list 'rx_sff':\n");
rcu_read_lock();
hlist_for_each_entry_rcu(d, n, &can_rx_dev_list, list) {
@@ -413,46 +402,38 @@ static int can_proc_read_rcvlist_sff(char *page, char **start, off_t off,
}
if (!all_empty) {
- len = can_print_recv_banner(page, len);
+ can_print_recv_banner(m);
for (i = 0; i < 0x800; i++) {
- if (!hlist_empty(&d->rx_sff[i]) &&
- len < PAGE_SIZE - 100)
- len = can_print_rcvlist(page, len,
- &d->rx_sff[i],
- d->dev);
+ if (!hlist_empty(&d->rx_sff[i]))
+ can_print_rcvlist(m, &d->rx_sff[i],
+ d->dev);
}
} else
- len += snprintf(page + len, PAGE_SIZE - len,
- " (%s: no entry)\n", DNAME(d->dev));
-
- /* exit on end of buffer? */
- if (len > PAGE_SIZE - 100)
- break;
+ seq_printf(m, " (%s: no entry)\n", DNAME(d->dev));
}
rcu_read_unlock();
- len += snprintf(page + len, PAGE_SIZE - len, "\n");
+ seq_putc(m, '\n');
+ return 0;
+}
- *eof = 1;
- return len;
+static int can_rcvlist_sff_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, can_rcvlist_sff_proc_show, NULL);
}
+static const struct file_operations can_rcvlist_sff_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = can_rcvlist_sff_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* proc utility functions
*/
-static struct proc_dir_entry *can_create_proc_readentry(const char *name,
- mode_t mode,
- read_proc_t *read_proc,
- void *data)
-{
- if (can_dir)
- return create_proc_read_entry(name, mode, can_dir, read_proc,
- data);
- else
- return NULL;
-}
-
static void can_remove_proc_readentry(const char *name)
{
if (can_dir)
@@ -474,24 +455,24 @@ void can_init_proc(void)
}
/* own procfs entries from the AF_CAN core */
- pde_version = can_create_proc_readentry(CAN_PROC_VERSION, 0644,
- can_proc_read_version, NULL);
- pde_stats = can_create_proc_readentry(CAN_PROC_STATS, 0644,
- can_proc_read_stats, NULL);
- pde_reset_stats = can_create_proc_readentry(CAN_PROC_RESET_STATS, 0644,
- can_proc_read_reset_stats, NULL);
- pde_rcvlist_err = can_create_proc_readentry(CAN_PROC_RCVLIST_ERR, 0644,
- can_proc_read_rcvlist, (void *)RX_ERR);
- pde_rcvlist_all = can_create_proc_readentry(CAN_PROC_RCVLIST_ALL, 0644,
- can_proc_read_rcvlist, (void *)RX_ALL);
- pde_rcvlist_fil = can_create_proc_readentry(CAN_PROC_RCVLIST_FIL, 0644,
- can_proc_read_rcvlist, (void *)RX_FIL);
- pde_rcvlist_inv = can_create_proc_readentry(CAN_PROC_RCVLIST_INV, 0644,
- can_proc_read_rcvlist, (void *)RX_INV);
- pde_rcvlist_eff = can_create_proc_readentry(CAN_PROC_RCVLIST_EFF, 0644,
- can_proc_read_rcvlist, (void *)RX_EFF);
- pde_rcvlist_sff = can_create_proc_readentry(CAN_PROC_RCVLIST_SFF, 0644,
- can_proc_read_rcvlist_sff, NULL);
+ pde_version = proc_create(CAN_PROC_VERSION, 0644, can_dir,
+ &can_version_proc_fops);
+ pde_stats = proc_create(CAN_PROC_STATS, 0644, can_dir,
+ &can_stats_proc_fops);
+ pde_reset_stats = proc_create(CAN_PROC_RESET_STATS, 0644, can_dir,
+ &can_reset_stats_proc_fops);
+ pde_rcvlist_err = proc_create_data(CAN_PROC_RCVLIST_ERR, 0644, can_dir,
+ &can_rcvlist_proc_fops, (void *)RX_ERR);
+ pde_rcvlist_all = proc_create_data(CAN_PROC_RCVLIST_ALL, 0644, can_dir,
+ &can_rcvlist_proc_fops, (void *)RX_ALL);
+ pde_rcvlist_fil = proc_create_data(CAN_PROC_RCVLIST_FIL, 0644, can_dir,
+ &can_rcvlist_proc_fops, (void *)RX_FIL);
+ pde_rcvlist_inv = proc_create_data(CAN_PROC_RCVLIST_INV, 0644, can_dir,
+ &can_rcvlist_proc_fops, (void *)RX_INV);
+ pde_rcvlist_eff = proc_create_data(CAN_PROC_RCVLIST_EFF, 0644, can_dir,
+ &can_rcvlist_proc_fops, (void *)RX_EFF);
+ pde_rcvlist_sff = proc_create(CAN_PROC_RCVLIST_SFF, 0644, can_dir,
+ &can_rcvlist_sff_proc_fops);
}
/*
diff --git a/net/can/raw.c b/net/can/raw.c
index f4cc44548bd..db3152df7d2 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -401,6 +401,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
if (peer)
return -EOPNOTSUPP;
+ memset(addr, 0, sizeof(*addr));
addr->can_family = AF_CAN;
addr->can_ifindex = ro->ifindex;
diff --git a/net/compat.c b/net/compat.c
index 8d739053afe..12728b17a22 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -743,6 +743,18 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns
return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
}
+asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+{
+ return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
+}
+
+asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
+ unsigned flags, struct sockaddr __user *addr,
+ int __user *addrlen)
+{
+ return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
+}
+
asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
{
int ret;
@@ -788,10 +800,11 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
ret = sys_sendto(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), a[5]);
break;
case SYS_RECV:
- ret = sys_recv(a0, compat_ptr(a1), a[2], a[3]);
+ ret = compat_sys_recv(a0, compat_ptr(a1), a[2], a[3]);
break;
case SYS_RECVFROM:
- ret = sys_recvfrom(a0, compat_ptr(a1), a[2], a[3], compat_ptr(a[4]), compat_ptr(a[5]));
+ ret = compat_sys_recvfrom(a0, compat_ptr(a1), a[2], a[3],
+ compat_ptr(a[4]), compat_ptr(a[5]));
break;
case SYS_SHUTDOWN:
ret = sys_shutdown(a0,a1);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b0fe69211ee..1c6cf3a1a4f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -55,6 +55,7 @@
#include <net/checksum.h>
#include <net/sock.h>
#include <net/tcp_states.h>
+#include <trace/events/skb.h>
/*
* Is a socket 'connection oriented' ?
@@ -284,6 +285,8 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
int i, copy = start - offset;
struct sk_buff *frag_iter;
+ trace_skb_copy_datagram_iovec(skb, len);
+
/* Copy header. */
if (copy > 0) {
if (copy > len)
diff --git a/net/core/dev.c b/net/core/dev.c
index 6a94475aee8..560c8c9c03a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -191,7 +191,6 @@ static struct list_head ptype_all __read_mostly; /* Taps */
* semaphore held.
*/
DEFINE_RWLOCK(dev_base_lock);
-
EXPORT_SYMBOL(dev_base_lock);
#define NETDEV_HASHBITS 8
@@ -248,6 +247,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
*/
DEFINE_PER_CPU(struct softnet_data, softnet_data);
+EXPORT_PER_CPU_SYMBOL(softnet_data);
#ifdef CONFIG_LOCKDEP
/*
@@ -269,10 +269,10 @@ static const unsigned short netdev_lock_type[] =
ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
- ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154, ARPHRD_IEEE802154_PHY,
+ ARPHRD_PHONET_PIPE, ARPHRD_IEEE802154,
ARPHRD_VOID, ARPHRD_NONE};
-static const char *netdev_lock_name[] =
+static const char *const netdev_lock_name[] =
{"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
"_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
"_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
@@ -287,7 +287,7 @@ static const char *netdev_lock_name[] =
"_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
"_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
"_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
- "_xmit_PHONET_PIPE", "_xmit_IEEE802154", "_xmit_IEEE802154_PHY",
+ "_xmit_PHONET_PIPE", "_xmit_IEEE802154",
"_xmit_VOID", "_xmit_NONE"};
static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
@@ -381,6 +381,7 @@ void dev_add_pack(struct packet_type *pt)
}
spin_unlock_bh(&ptype_lock);
}
+EXPORT_SYMBOL(dev_add_pack);
/**
* __dev_remove_pack - remove packet handler
@@ -418,6 +419,8 @@ void __dev_remove_pack(struct packet_type *pt)
out:
spin_unlock_bh(&ptype_lock);
}
+EXPORT_SYMBOL(__dev_remove_pack);
+
/**
* dev_remove_pack - remove packet handler
* @pt: packet type declaration
@@ -436,6 +439,7 @@ void dev_remove_pack(struct packet_type *pt)
synchronize_net();
}
+EXPORT_SYMBOL(dev_remove_pack);
/******************************************************************************
@@ -499,6 +503,7 @@ int netdev_boot_setup_check(struct net_device *dev)
}
return 0;
}
+EXPORT_SYMBOL(netdev_boot_setup_check);
/**
@@ -591,6 +596,7 @@ struct net_device *__dev_get_by_name(struct net *net, const char *name)
}
return NULL;
}
+EXPORT_SYMBOL(__dev_get_by_name);
/**
* dev_get_by_name - find a device by its name
@@ -615,6 +621,7 @@ struct net_device *dev_get_by_name(struct net *net, const char *name)
read_unlock(&dev_base_lock);
return dev;
}
+EXPORT_SYMBOL(dev_get_by_name);
/**
* __dev_get_by_index - find a device by its ifindex
@@ -640,6 +647,7 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex)
}
return NULL;
}
+EXPORT_SYMBOL(__dev_get_by_index);
/**
@@ -664,6 +672,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex)
read_unlock(&dev_base_lock);
return dev;
}
+EXPORT_SYMBOL(dev_get_by_index);
/**
* dev_getbyhwaddr - find a device by its hardware address
@@ -693,7 +702,6 @@ struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *h
return NULL;
}
-
EXPORT_SYMBOL(dev_getbyhwaddr);
struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
@@ -707,7 +715,6 @@ struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
return NULL;
}
-
EXPORT_SYMBOL(__dev_getfirstbyhwtype);
struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
@@ -721,7 +728,6 @@ struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
rtnl_unlock();
return dev;
}
-
EXPORT_SYMBOL(dev_getfirstbyhwtype);
/**
@@ -736,7 +742,8 @@ EXPORT_SYMBOL(dev_getfirstbyhwtype);
* dev_put to indicate they have finished with it.
*/
-struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
+struct net_device *dev_get_by_flags(struct net *net, unsigned short if_flags,
+ unsigned short mask)
{
struct net_device *dev, *ret;
@@ -752,6 +759,7 @@ struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, u
read_unlock(&dev_base_lock);
return ret;
}
+EXPORT_SYMBOL(dev_get_by_flags);
/**
* dev_valid_name - check if name is okay for network device
@@ -777,6 +785,7 @@ int dev_valid_name(const char *name)
}
return 1;
}
+EXPORT_SYMBOL(dev_valid_name);
/**
* __dev_alloc_name - allocate a name for a device
@@ -870,6 +879,7 @@ int dev_alloc_name(struct net_device *dev, const char *name)
strlcpy(dev->name, buf, IFNAMSIZ);
return ret;
}
+EXPORT_SYMBOL(dev_alloc_name);
/**
@@ -906,8 +916,7 @@ int dev_change_name(struct net_device *dev, const char *newname)
err = dev_alloc_name(dev, newname);
if (err < 0)
return err;
- }
- else if (__dev_get_by_name(net, newname))
+ } else if (__dev_get_by_name(net, newname))
return -EEXIST;
else
strlcpy(dev->name, newname, IFNAMSIZ);
@@ -970,7 +979,7 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
return 0;
}
- dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
+ dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
if (!dev->ifalias)
return -ENOMEM;
@@ -1006,10 +1015,11 @@ void netdev_state_change(struct net_device *dev)
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
}
}
+EXPORT_SYMBOL(netdev_state_change);
-void netdev_bonding_change(struct net_device *dev)
+void netdev_bonding_change(struct net_device *dev, unsigned long event)
{
- call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
+ call_netdevice_notifiers(event, dev);
}
EXPORT_SYMBOL(netdev_bonding_change);
@@ -1031,9 +1041,10 @@ void dev_load(struct net *net, const char *name)
dev = __dev_get_by_name(net, name);
read_unlock(&dev_base_lock);
- if (!dev && capable(CAP_SYS_MODULE))
+ if (!dev && capable(CAP_NET_ADMIN))
request_module("%s", name);
}
+EXPORT_SYMBOL(dev_load);
/**
* dev_open - prepare an interface for use.
@@ -1118,6 +1129,7 @@ int dev_open(struct net_device *dev)
return ret;
}
+EXPORT_SYMBOL(dev_open);
/**
* dev_close - shutdown an interface.
@@ -1184,6 +1196,7 @@ int dev_close(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(dev_close);
/**
@@ -1279,6 +1292,7 @@ rollback:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
+EXPORT_SYMBOL(register_netdevice_notifier);
/**
* unregister_netdevice_notifier - unregister a network notifier block
@@ -1299,6 +1313,7 @@ int unregister_netdevice_notifier(struct notifier_block *nb)
rtnl_unlock();
return err;
}
+EXPORT_SYMBOL(unregister_netdevice_notifier);
/**
* call_netdevice_notifiers - call all network notifier blocks
@@ -1321,11 +1336,13 @@ void net_enable_timestamp(void)
{
atomic_inc(&netstamp_needed);
}
+EXPORT_SYMBOL(net_enable_timestamp);
void net_disable_timestamp(void)
{
atomic_dec(&netstamp_needed);
}
+EXPORT_SYMBOL(net_disable_timestamp);
static inline void net_timestamp(struct sk_buff *skb)
{
@@ -1359,7 +1376,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
if ((ptype->dev == dev || !ptype->dev) &&
(ptype->af_packet_priv == NULL ||
(struct sock *)ptype->af_packet_priv != skb->sk)) {
- struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2)
break;
@@ -1527,6 +1544,7 @@ out_set_summed:
out:
return ret;
}
+EXPORT_SYMBOL(skb_checksum_help);
/**
* skb_gso_segment - Perform segmentation on skb.
@@ -1589,7 +1607,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
return segs;
}
-
EXPORT_SYMBOL(skb_gso_segment);
/* Take action when hardware reception checksum errors are detected. */
@@ -1704,7 +1721,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
skb_dst_drop(skb);
rc = ops->ndo_start_xmit(skb, dev);
- if (rc == 0)
+ if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
/*
* TODO: if skb_orphan() was called by
@@ -1730,7 +1747,7 @@ gso:
skb->next = nskb->next;
nskb->next = NULL;
rc = ops->ndo_start_xmit(nskb, dev);
- if (unlikely(rc)) {
+ if (unlikely(rc != NETDEV_TX_OK)) {
nskb->next = skb->next;
skb->next = nskb;
return rc;
@@ -1744,7 +1761,7 @@ gso:
out_kfree_skb:
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static u32 skb_tx_hashrnd;
@@ -1755,7 +1772,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
- while (unlikely (hash >= dev->real_num_tx_queues))
+ while (unlikely(hash >= dev->real_num_tx_queues))
hash -= dev->real_num_tx_queues;
return hash;
}
@@ -1786,6 +1803,40 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
return netdev_get_tx_queue(dev, queue_index);
}
+static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev,
+ struct netdev_queue *txq)
+{
+ spinlock_t *root_lock = qdisc_lock(q);
+ int rc;
+
+ spin_lock(root_lock);
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
+ !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
+ /*
+ * This is a work-conserving queue; there are no old skbs
+ * waiting to be sent out; and the qdisc is not running -
+ * xmit the skb directly.
+ */
+ __qdisc_update_bstats(q, skb->len);
+ if (sch_direct_xmit(skb, q, dev, txq, root_lock))
+ __qdisc_run(q);
+ else
+ clear_bit(__QDISC_STATE_RUNNING, &q->state);
+
+ rc = NET_XMIT_SUCCESS;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
+ spin_unlock(root_lock);
+
+ return rc;
+}
+
/**
* dev_queue_xmit - transmit a buffer
* @skb: buffer to transmit
@@ -1856,22 +1907,10 @@ gso:
q = rcu_dereference(txq->qdisc);
#ifdef CONFIG_NET_CLS_ACT
- skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
+ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
#endif
if (q->enqueue) {
- spinlock_t *root_lock = qdisc_lock(q);
-
- spin_lock(root_lock);
-
- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
- kfree_skb(skb);
- rc = NET_XMIT_DROP;
- } else {
- rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
- }
- spin_unlock(root_lock);
-
+ rc = __dev_xmit_skb(skb, q, dev, txq);
goto out;
}
@@ -1895,7 +1934,7 @@ gso:
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_tx_queue_stopped(txq)) {
- rc = 0;
+ rc = NET_XMIT_SUCCESS;
if (!dev_hard_start_xmit(skb, dev, txq)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
@@ -1924,6 +1963,7 @@ out:
rcu_read_unlock_bh();
return rc;
}
+EXPORT_SYMBOL(dev_queue_xmit);
/*=======================================================================
@@ -1990,6 +2030,7 @@ enqueue:
kfree_skb(skb);
return NET_RX_DROP;
}
+EXPORT_SYMBOL(netif_rx);
int netif_rx_ni(struct sk_buff *skb)
{
@@ -2003,7 +2044,6 @@ int netif_rx_ni(struct sk_buff *skb)
return err;
}
-
EXPORT_SYMBOL(netif_rx_ni);
static void net_tx_action(struct softirq_action *h)
@@ -2076,7 +2116,7 @@ static inline int deliver_skb(struct sk_buff *skb,
/* This hook is defined here for ATM LANE */
int (*br_fdb_test_addr_hook)(struct net_device *dev,
unsigned char *addr) __read_mostly;
-EXPORT_SYMBOL(br_fdb_test_addr_hook);
+EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
#endif
/*
@@ -2085,7 +2125,7 @@ EXPORT_SYMBOL(br_fdb_test_addr_hook);
*/
struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
struct sk_buff *skb) __read_mostly;
-EXPORT_SYMBOL(br_handle_frame_hook);
+EXPORT_SYMBOL_GPL(br_handle_frame_hook);
static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
struct packet_type **pt_prev, int *ret,
@@ -2336,6 +2376,7 @@ out:
rcu_read_unlock();
return ret;
}
+EXPORT_SYMBOL(netif_receive_skb);
/* Network device is going away, flush any packets still pending */
static void flush_backlog(void *arg)
@@ -2852,7 +2893,7 @@ softnet_break:
goto out;
}
-static gifconf_func_t * gifconf_list [NPROTO];
+static gifconf_func_t *gifconf_list[NPROTO];
/**
* register_gifconf - register a SIOCGIF handler
@@ -2863,13 +2904,14 @@ static gifconf_func_t * gifconf_list [NPROTO];
* that is passed must not be freed or reused until it has been replaced
* by another handler.
*/
-int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
+int register_gifconf(unsigned int family, gifconf_func_t *gifconf)
{
if (family >= NPROTO)
return -EINVAL;
gifconf_list[family] = gifconf;
return 0;
}
+EXPORT_SYMBOL(register_gifconf);
/*
@@ -3080,7 +3122,7 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
s->total, s->dropped, s->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
- s->cpu_collision );
+ s->cpu_collision);
return 0;
}
@@ -3316,6 +3358,7 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
return 0;
}
+EXPORT_SYMBOL(netdev_set_master);
static void dev_change_rx_flags(struct net_device *dev, int flags)
{
@@ -3394,6 +3437,7 @@ int dev_set_promiscuity(struct net_device *dev, int inc)
dev_set_rx_mode(dev);
return err;
}
+EXPORT_SYMBOL(dev_set_promiscuity);
/**
* dev_set_allmulti - update allmulti count on a device
@@ -3437,6 +3481,7 @@ int dev_set_allmulti(struct net_device *dev, int inc)
}
return 0;
}
+EXPORT_SYMBOL(dev_set_allmulti);
/*
* Upload unicast and multicast address lists to device and
@@ -3927,6 +3972,7 @@ int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
}
return err;
}
+EXPORT_SYMBOL_GPL(__dev_addr_sync);
void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
struct dev_addr_list **from, int *from_count)
@@ -3946,6 +3992,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
da = next;
}
}
+EXPORT_SYMBOL_GPL(__dev_addr_unsync);
/**
* dev_unicast_sync - Synchronize device's unicast list to another device
@@ -4064,6 +4111,7 @@ unsigned dev_get_flags(const struct net_device *dev)
return flags;
}
+EXPORT_SYMBOL(dev_get_flags);
/**
* dev_change_flags - change device settings
@@ -4114,12 +4162,13 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
}
if (dev->flags & IFF_UP &&
- ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ ((old_flags ^ dev->flags) & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
IFF_VOLATILE)))
call_netdevice_notifiers(NETDEV_CHANGE, dev);
if ((flags ^ dev->gflags) & IFF_PROMISC) {
- int inc = (flags & IFF_PROMISC) ? +1 : -1;
+ int inc = (flags & IFF_PROMISC) ? 1 : -1;
+
dev->gflags ^= IFF_PROMISC;
dev_set_promiscuity(dev, inc);
}
@@ -4129,7 +4178,8 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
IFF_ALLMULTI is requested not asking us and not reporting.
*/
if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
- int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
+ int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
+
dev->gflags ^= IFF_ALLMULTI;
dev_set_allmulti(dev, inc);
}
@@ -4141,6 +4191,7 @@ int dev_change_flags(struct net_device *dev, unsigned flags)
return ret;
}
+EXPORT_SYMBOL(dev_change_flags);
/**
* dev_set_mtu - Change maximum transfer unit
@@ -4174,6 +4225,7 @@ int dev_set_mtu(struct net_device *dev, int new_mtu)
call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
return err;
}
+EXPORT_SYMBOL(dev_set_mtu);
/**
* dev_set_mac_address - Change Media Access Control Address
@@ -4198,6 +4250,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
return err;
}
+EXPORT_SYMBOL(dev_set_mac_address);
/*
* Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
@@ -4211,56 +4264,56 @@ static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cm
return -ENODEV;
switch (cmd) {
- case SIOCGIFFLAGS: /* Get interface flags */
- ifr->ifr_flags = (short) dev_get_flags(dev);
- return 0;
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr->ifr_flags = (short) dev_get_flags(dev);
+ return 0;
- case SIOCGIFMETRIC: /* Get the metric on the interface
- (currently unused) */
- ifr->ifr_metric = 0;
- return 0;
+ case SIOCGIFMETRIC: /* Get the metric on the interface
+ (currently unused) */
+ ifr->ifr_metric = 0;
+ return 0;
- case SIOCGIFMTU: /* Get the MTU of a device */
- ifr->ifr_mtu = dev->mtu;
- return 0;
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr->ifr_mtu = dev->mtu;
+ return 0;
- case SIOCGIFHWADDR:
- if (!dev->addr_len)
- memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
- else
- memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
- min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
- ifr->ifr_hwaddr.sa_family = dev->type;
- return 0;
+ case SIOCGIFHWADDR:
+ if (!dev->addr_len)
+ memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
+ else
+ memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ ifr->ifr_hwaddr.sa_family = dev->type;
+ return 0;
- case SIOCGIFSLAVE:
- err = -EINVAL;
- break;
+ case SIOCGIFSLAVE:
+ err = -EINVAL;
+ break;
- case SIOCGIFMAP:
- ifr->ifr_map.mem_start = dev->mem_start;
- ifr->ifr_map.mem_end = dev->mem_end;
- ifr->ifr_map.base_addr = dev->base_addr;
- ifr->ifr_map.irq = dev->irq;
- ifr->ifr_map.dma = dev->dma;
- ifr->ifr_map.port = dev->if_port;
- return 0;
+ case SIOCGIFMAP:
+ ifr->ifr_map.mem_start = dev->mem_start;
+ ifr->ifr_map.mem_end = dev->mem_end;
+ ifr->ifr_map.base_addr = dev->base_addr;
+ ifr->ifr_map.irq = dev->irq;
+ ifr->ifr_map.dma = dev->dma;
+ ifr->ifr_map.port = dev->if_port;
+ return 0;
- case SIOCGIFINDEX:
- ifr->ifr_ifindex = dev->ifindex;
- return 0;
+ case SIOCGIFINDEX:
+ ifr->ifr_ifindex = dev->ifindex;
+ return 0;
- case SIOCGIFTXQLEN:
- ifr->ifr_qlen = dev->tx_queue_len;
- return 0;
+ case SIOCGIFTXQLEN:
+ ifr->ifr_qlen = dev->tx_queue_len;
+ return 0;
- default:
- /* dev_ioctl() should ensure this case
- * is never reached
- */
- WARN_ON(1);
- err = -EINVAL;
- break;
+ default:
+ /* dev_ioctl() should ensure this case
+ * is never reached
+ */
+ WARN_ON(1);
+ err = -EINVAL;
+ break;
}
return err;
@@ -4281,92 +4334,91 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
ops = dev->netdev_ops;
switch (cmd) {
- case SIOCSIFFLAGS: /* Set interface flags */
- return dev_change_flags(dev, ifr->ifr_flags);
-
- case SIOCSIFMETRIC: /* Set the metric on the interface
- (currently unused) */
- return -EOPNOTSUPP;
+ case SIOCSIFFLAGS: /* Set interface flags */
+ return dev_change_flags(dev, ifr->ifr_flags);
- case SIOCSIFMTU: /* Set the MTU of a device */
- return dev_set_mtu(dev, ifr->ifr_mtu);
+ case SIOCSIFMETRIC: /* Set the metric on the interface
+ (currently unused) */
+ return -EOPNOTSUPP;
- case SIOCSIFHWADDR:
- return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
+ case SIOCSIFMTU: /* Set the MTU of a device */
+ return dev_set_mtu(dev, ifr->ifr_mtu);
- case SIOCSIFHWBROADCAST:
- if (ifr->ifr_hwaddr.sa_family != dev->type)
- return -EINVAL;
- memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
- min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
- call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
- return 0;
+ case SIOCSIFHWADDR:
+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
- case SIOCSIFMAP:
- if (ops->ndo_set_config) {
- if (!netif_device_present(dev))
- return -ENODEV;
- return ops->ndo_set_config(dev, &ifr->ifr_map);
- }
- return -EOPNOTSUPP;
-
- case SIOCADDMULTI:
- if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
- ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
- return -EINVAL;
- if (!netif_device_present(dev))
- return -ENODEV;
- return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
- dev->addr_len, 1);
+ case SIOCSIFHWBROADCAST:
+ if (ifr->ifr_hwaddr.sa_family != dev->type)
+ return -EINVAL;
+ memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return 0;
- case SIOCDELMULTI:
- if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
- ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
- return -EINVAL;
+ case SIOCSIFMAP:
+ if (ops->ndo_set_config) {
if (!netif_device_present(dev))
return -ENODEV;
- return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
- dev->addr_len, 1);
+ return ops->ndo_set_config(dev, &ifr->ifr_map);
+ }
+ return -EOPNOTSUPP;
- case SIOCSIFTXQLEN:
- if (ifr->ifr_qlen < 0)
- return -EINVAL;
- dev->tx_queue_len = ifr->ifr_qlen;
- return 0;
+ case SIOCADDMULTI:
+ if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
+ dev->addr_len, 1);
+
+ case SIOCDELMULTI:
+ if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
+ dev->addr_len, 1);
- case SIOCSIFNAME:
- ifr->ifr_newname[IFNAMSIZ-1] = '\0';
- return dev_change_name(dev, ifr->ifr_newname);
+ case SIOCSIFTXQLEN:
+ if (ifr->ifr_qlen < 0)
+ return -EINVAL;
+ dev->tx_queue_len = ifr->ifr_qlen;
+ return 0;
- /*
- * Unknown or private ioctl
- */
+ case SIOCSIFNAME:
+ ifr->ifr_newname[IFNAMSIZ-1] = '\0';
+ return dev_change_name(dev, ifr->ifr_newname);
- default:
- if ((cmd >= SIOCDEVPRIVATE &&
- cmd <= SIOCDEVPRIVATE + 15) ||
- cmd == SIOCBONDENSLAVE ||
- cmd == SIOCBONDRELEASE ||
- cmd == SIOCBONDSETHWADDR ||
- cmd == SIOCBONDSLAVEINFOQUERY ||
- cmd == SIOCBONDINFOQUERY ||
- cmd == SIOCBONDCHANGEACTIVE ||
- cmd == SIOCGMIIPHY ||
- cmd == SIOCGMIIREG ||
- cmd == SIOCSMIIREG ||
- cmd == SIOCBRADDIF ||
- cmd == SIOCBRDELIF ||
- cmd == SIOCSHWTSTAMP ||
- cmd == SIOCWANDEV) {
- err = -EOPNOTSUPP;
- if (ops->ndo_do_ioctl) {
- if (netif_device_present(dev))
- err = ops->ndo_do_ioctl(dev, ifr, cmd);
- else
- err = -ENODEV;
- }
- } else
- err = -EINVAL;
+ /*
+ * Unknown or private ioctl
+ */
+ default:
+ if ((cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15) ||
+ cmd == SIOCBONDENSLAVE ||
+ cmd == SIOCBONDRELEASE ||
+ cmd == SIOCBONDSETHWADDR ||
+ cmd == SIOCBONDSLAVEINFOQUERY ||
+ cmd == SIOCBONDINFOQUERY ||
+ cmd == SIOCBONDCHANGEACTIVE ||
+ cmd == SIOCGMIIPHY ||
+ cmd == SIOCGMIIREG ||
+ cmd == SIOCSMIIREG ||
+ cmd == SIOCBRADDIF ||
+ cmd == SIOCBRDELIF ||
+ cmd == SIOCSHWTSTAMP ||
+ cmd == SIOCWANDEV) {
+ err = -EOPNOTSUPP;
+ if (ops->ndo_do_ioctl) {
+ if (netif_device_present(dev))
+ err = ops->ndo_do_ioctl(dev, ifr, cmd);
+ else
+ err = -ENODEV;
+ }
+ } else
+ err = -EINVAL;
}
return err;
@@ -4423,135 +4475,135 @@ int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
*/
switch (cmd) {
- /*
- * These ioctl calls:
- * - can be done by all.
- * - atomic and do not require locking.
- * - return a value
- */
- case SIOCGIFFLAGS:
- case SIOCGIFMETRIC:
- case SIOCGIFMTU:
- case SIOCGIFHWADDR:
- case SIOCGIFSLAVE:
- case SIOCGIFMAP:
- case SIOCGIFINDEX:
- case SIOCGIFTXQLEN:
- dev_load(net, ifr.ifr_name);
- read_lock(&dev_base_lock);
- ret = dev_ifsioc_locked(net, &ifr, cmd);
- read_unlock(&dev_base_lock);
- if (!ret) {
- if (colon)
- *colon = ':';
- if (copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- }
- return ret;
+ /*
+ * These ioctl calls:
+ * - can be done by all.
+ * - atomic and do not require locking.
+ * - return a value
+ */
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ case SIOCGIFINDEX:
+ case SIOCGIFTXQLEN:
+ dev_load(net, ifr.ifr_name);
+ read_lock(&dev_base_lock);
+ ret = dev_ifsioc_locked(net, &ifr, cmd);
+ read_unlock(&dev_base_lock);
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
- case SIOCETHTOOL:
- dev_load(net, ifr.ifr_name);
- rtnl_lock();
- ret = dev_ethtool(net, &ifr);
- rtnl_unlock();
- if (!ret) {
- if (colon)
- *colon = ':';
- if (copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- }
- return ret;
+ case SIOCETHTOOL:
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ethtool(net, &ifr);
+ rtnl_unlock();
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
- /*
- * These ioctl calls:
- * - require superuser power.
- * - require strict serialization.
- * - return a value
- */
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSIFNAME:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- dev_load(net, ifr.ifr_name);
- rtnl_lock();
- ret = dev_ifsioc(net, &ifr, cmd);
- rtnl_unlock();
- if (!ret) {
- if (colon)
- *colon = ':';
- if (copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- }
- return ret;
+ /*
+ * These ioctl calls:
+ * - require superuser power.
+ * - require strict serialization.
+ * - return a value
+ */
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSIFNAME:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
- /*
- * These ioctl calls:
- * - require superuser power.
- * - require strict serialization.
- * - do not return a value
- */
- case SIOCSIFFLAGS:
- case SIOCSIFMETRIC:
- case SIOCSIFMTU:
- case SIOCSIFMAP:
- case SIOCSIFHWADDR:
- case SIOCSIFSLAVE:
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- case SIOCSIFHWBROADCAST:
- case SIOCSIFTXQLEN:
- case SIOCSMIIREG:
- case SIOCBONDENSLAVE:
- case SIOCBONDRELEASE:
- case SIOCBONDSETHWADDR:
- case SIOCBONDCHANGEACTIVE:
- case SIOCBRADDIF:
- case SIOCBRDELIF:
- case SIOCSHWTSTAMP:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- /* fall through */
- case SIOCBONDSLAVEINFOQUERY:
- case SIOCBONDINFOQUERY:
+ /*
+ * These ioctl calls:
+ * - require superuser power.
+ * - require strict serialization.
+ * - do not return a value
+ */
+ case SIOCSIFFLAGS:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMAP:
+ case SIOCSIFHWADDR:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ case SIOCSIFHWBROADCAST:
+ case SIOCSIFTXQLEN:
+ case SIOCSMIIREG:
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ case SIOCBRADDIF:
+ case SIOCBRDELIF:
+ case SIOCSHWTSTAMP:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* fall through */
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCGIFMEM:
+ /* Get the per device memory space. We can add this but
+ * currently do not support it */
+ case SIOCSIFMEM:
+ /* Set the per device memory buffer space.
+ * Not applicable in our case */
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+ default:
+ if (cmd == SIOCWANDEV ||
+ (cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15)) {
dev_load(net, ifr.ifr_name);
rtnl_lock();
ret = dev_ifsioc(net, &ifr, cmd);
rtnl_unlock();
+ if (!ret && copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
return ret;
-
- case SIOCGIFMEM:
- /* Get the per device memory space. We can add this but
- * currently do not support it */
- case SIOCSIFMEM:
- /* Set the per device memory buffer space.
- * Not applicable in our case */
- case SIOCSIFLINK:
- return -EINVAL;
-
- /*
- * Unknown or private ioctl.
- */
- default:
- if (cmd == SIOCWANDEV ||
- (cmd >= SIOCDEVPRIVATE &&
- cmd <= SIOCDEVPRIVATE + 15)) {
- dev_load(net, ifr.ifr_name);
- rtnl_lock();
- ret = dev_ifsioc(net, &ifr, cmd);
- rtnl_unlock();
- if (!ret && copy_to_user(arg, &ifr,
- sizeof(struct ifreq)))
- ret = -EFAULT;
- return ret;
- }
- /* Take care of Wireless Extensions */
- if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
- return wext_handle_ioctl(net, &ifr, cmd, arg);
- return -EINVAL;
+ }
+ /* Take care of Wireless Extensions */
+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
+ return wext_handle_ioctl(net, &ifr, cmd, arg);
+ return -EINVAL;
}
}
@@ -4816,6 +4868,7 @@ err_uninit:
dev->netdev_ops->ndo_uninit(dev);
goto out;
}
+EXPORT_SYMBOL(register_netdevice);
/**
* init_dummy_netdev - init a dummy network device for NAPI
@@ -5168,6 +5221,7 @@ void free_netdev(struct net_device *dev)
/* will free via device release */
put_device(&dev->dev);
}
+EXPORT_SYMBOL(free_netdev);
/**
* synchronize_net - Synchronize with packet receive processing
@@ -5180,6 +5234,7 @@ void synchronize_net(void)
might_sleep();
synchronize_rcu();
}
+EXPORT_SYMBOL(synchronize_net);
/**
* unregister_netdevice - remove device from the kernel
@@ -5200,6 +5255,7 @@ void unregister_netdevice(struct net_device *dev)
/* Finish processing unregister after unlock */
net_set_todo(dev);
}
+EXPORT_SYMBOL(unregister_netdevice);
/**
* unregister_netdev - remove device from the kernel
@@ -5218,7 +5274,6 @@ void unregister_netdev(struct net_device *dev)
unregister_netdevice(dev);
rtnl_unlock();
}
-
EXPORT_SYMBOL(unregister_netdev);
/**
@@ -5347,6 +5402,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
out:
return err;
}
+EXPORT_SYMBOL_GPL(dev_change_net_namespace);
static int dev_cpu_callback(struct notifier_block *nfb,
unsigned long action,
@@ -5407,7 +5463,7 @@ unsigned long netdev_increment_features(unsigned long all, unsigned long one,
unsigned long mask)
{
/* If device needs checksumming, downgrade to it. */
- if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
+ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
else if (mask & NETIF_F_ALL_CSUM) {
/* If one device supports v4/v6 checksumming, set for all. */
@@ -5633,41 +5689,3 @@ static int __init initialize_hashrnd(void)
late_initcall_sync(initialize_hashrnd);
-EXPORT_SYMBOL(__dev_get_by_index);
-EXPORT_SYMBOL(__dev_get_by_name);
-EXPORT_SYMBOL(__dev_remove_pack);
-EXPORT_SYMBOL(dev_valid_name);
-EXPORT_SYMBOL(dev_add_pack);
-EXPORT_SYMBOL(dev_alloc_name);
-EXPORT_SYMBOL(dev_close);
-EXPORT_SYMBOL(dev_get_by_flags);
-EXPORT_SYMBOL(dev_get_by_index);
-EXPORT_SYMBOL(dev_get_by_name);
-EXPORT_SYMBOL(dev_open);
-EXPORT_SYMBOL(dev_queue_xmit);
-EXPORT_SYMBOL(dev_remove_pack);
-EXPORT_SYMBOL(dev_set_allmulti);
-EXPORT_SYMBOL(dev_set_promiscuity);
-EXPORT_SYMBOL(dev_change_flags);
-EXPORT_SYMBOL(dev_set_mtu);
-EXPORT_SYMBOL(dev_set_mac_address);
-EXPORT_SYMBOL(free_netdev);
-EXPORT_SYMBOL(netdev_boot_setup_check);
-EXPORT_SYMBOL(netdev_set_master);
-EXPORT_SYMBOL(netdev_state_change);
-EXPORT_SYMBOL(netif_receive_skb);
-EXPORT_SYMBOL(netif_rx);
-EXPORT_SYMBOL(register_gifconf);
-EXPORT_SYMBOL(register_netdevice);
-EXPORT_SYMBOL(register_netdevice_notifier);
-EXPORT_SYMBOL(skb_checksum_help);
-EXPORT_SYMBOL(synchronize_net);
-EXPORT_SYMBOL(unregister_netdevice);
-EXPORT_SYMBOL(unregister_netdevice_notifier);
-EXPORT_SYMBOL(net_enable_timestamp);
-EXPORT_SYMBOL(net_disable_timestamp);
-EXPORT_SYMBOL(dev_get_flags);
-
-EXPORT_SYMBOL(dev_load);
-
-EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9d66fa953ab..0a113f26bc9 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -52,6 +52,7 @@ struct per_cpu_dm_data {
struct dm_hw_stat_delta {
struct net_device *dev;
+ unsigned long last_rx;
struct list_head list;
struct rcu_head rcu;
unsigned long last_drop_val;
@@ -180,17 +181,25 @@ static void trace_napi_poll_hit(struct napi_struct *napi)
struct dm_hw_stat_delta *new_stat;
/*
- * Ratelimit our check time to dm_hw_check_delta jiffies
+ * Don't check napi structures with no associated device
*/
- if (!time_after(jiffies, napi->dev->last_rx + dm_hw_check_delta))
+ if (!napi->dev)
return;
rcu_read_lock();
list_for_each_entry_rcu(new_stat, &hw_stats_list, list) {
+ /*
+ * only add a note to our monitor buffer if:
+ * 1) this is the dev we received on
+ * 2) its after the last_rx delta
+ * 3) our rx_dropped count has gone up
+ */
if ((new_stat->dev == napi->dev) &&
+ (time_after(jiffies, new_stat->last_rx + dm_hw_check_delta)) &&
(napi->dev->stats.rx_dropped != new_stat->last_drop_val)) {
trace_drop_common(NULL, NULL);
new_stat->last_drop_val = napi->dev->stats.rx_dropped;
+ new_stat->last_rx = jiffies;
break;
}
}
@@ -286,6 +295,7 @@ static int dropmon_net_event(struct notifier_block *ev_block,
goto out;
new_stat->dev = dev;
+ new_stat->last_rx = jiffies;
INIT_RCU_HEAD(&new_stat->rcu);
spin_lock(&trace_state_lock);
list_add_rcu(&new_stat->list, &hw_stats_list);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index d9d5160610d..4c12ddb5f5e 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -30,10 +30,17 @@ u32 ethtool_op_get_link(struct net_device *dev)
return netif_carrier_ok(dev) ? 1 : 0;
}
+u32 ethtool_op_get_rx_csum(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_ALL_CSUM) != 0;
+}
+EXPORT_SYMBOL(ethtool_op_get_rx_csum);
+
u32 ethtool_op_get_tx_csum(struct net_device *dev)
{
return (dev->features & NETIF_F_ALL_CSUM) != 0;
}
+EXPORT_SYMBOL(ethtool_op_get_tx_csum);
int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
{
@@ -891,6 +898,19 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
return actor(dev, edata.data);
}
+static int ethtool_flash_device(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_flash efl;
+
+ if (copy_from_user(&efl, useraddr, sizeof(efl)))
+ return -EFAULT;
+
+ if (!dev->ethtool_ops->flash_device)
+ return -EOPNOTSUPP;
+
+ return dev->ethtool_ops->flash_device(dev, &efl);
+}
+
/* The main entry point in this file. Called from net/core/dev.c */
int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1004,7 +1024,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
break;
case ETHTOOL_GRXCSUM:
rc = ethtool_get_value(dev, useraddr, ethcmd,
- dev->ethtool_ops->get_rx_csum);
+ (dev->ethtool_ops->get_rx_csum ?
+ dev->ethtool_ops->get_rx_csum :
+ ethtool_op_get_rx_csum));
break;
case ETHTOOL_SRXCSUM:
rc = ethtool_set_rx_csum(dev, useraddr);
@@ -1068,7 +1090,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
break;
case ETHTOOL_GFLAGS:
rc = ethtool_get_value(dev, useraddr, ethcmd,
- dev->ethtool_ops->get_flags);
+ (dev->ethtool_ops->get_flags ?
+ dev->ethtool_ops->get_flags :
+ ethtool_op_get_flags));
break;
case ETHTOOL_SFLAGS:
rc = ethtool_set_value(dev, useraddr,
@@ -1100,6 +1124,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
case ETHTOOL_SGRO:
rc = ethtool_set_gro(dev, useraddr);
break;
+ case ETHTOOL_FLASHDEV:
+ rc = ethtool_flash_device(dev, useraddr);
+ break;
default:
rc = -EOPNOTSUPP;
}
@@ -1116,7 +1143,6 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
EXPORT_SYMBOL(ethtool_op_get_link);
EXPORT_SYMBOL(ethtool_op_get_sg);
EXPORT_SYMBOL(ethtool_op_get_tso);
-EXPORT_SYMBOL(ethtool_op_get_tx_csum);
EXPORT_SYMBOL(ethtool_op_set_sg);
EXPORT_SYMBOL(ethtool_op_set_tso);
EXPORT_SYMBOL(ethtool_op_set_tx_csum);
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 78e5bfc454a..493775f4f2f 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -81,7 +81,7 @@
struct gen_estimator
{
struct list_head list;
- struct gnet_stats_basic *bstats;
+ struct gnet_stats_basic_packed *bstats;
struct gnet_stats_rate_est *rate_est;
spinlock_t *stats_lock;
int ewma_log;
@@ -165,7 +165,7 @@ static void gen_add_node(struct gen_estimator *est)
}
static
-struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
+struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est)
{
struct rb_node *p = est_root.rb_node;
@@ -202,7 +202,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic *bstats,
*
* NOTE: Called under rtnl_mutex
*/
-int gen_new_estimator(struct gnet_stats_basic *bstats,
+int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
spinlock_t *stats_lock,
struct nlattr *opt)
@@ -262,7 +262,7 @@ static void __gen_kill_estimator(struct rcu_head *head)
*
* NOTE: Called under rtnl_mutex
*/
-void gen_kill_estimator(struct gnet_stats_basic *bstats,
+void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est)
{
struct gen_estimator *e;
@@ -292,7 +292,7 @@ EXPORT_SYMBOL(gen_kill_estimator);
*
* Returns 0 on success or a negative error code.
*/
-int gen_replace_estimator(struct gnet_stats_basic *bstats,
+int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est *rate_est,
spinlock_t *stats_lock, struct nlattr *opt)
{
@@ -308,7 +308,7 @@ EXPORT_SYMBOL(gen_replace_estimator);
*
* Returns true if estimator is active, and false if not.
*/
-bool gen_estimator_active(const struct gnet_stats_basic *bstats,
+bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est *rate_est)
{
ASSERT_RTNL();
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index c3d0ffeac24..8569310268a 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -106,16 +106,21 @@ gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
* if the room in the socket buffer was not sufficient.
*/
int
-gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic *b)
+gnet_stats_copy_basic(struct gnet_dump *d, struct gnet_stats_basic_packed *b)
{
if (d->compat_tc_stats) {
d->tc_stats.bytes = b->bytes;
d->tc_stats.packets = b->packets;
}
- if (d->tail)
- return gnet_stats_copy(d, TCA_STATS_BASIC, b, sizeof(*b));
+ if (d->tail) {
+ struct gnet_stats_basic sb;
+ memset(&sb, 0, sizeof(sb));
+ sb.bytes = b->bytes;
+ sb.packets = b->packets;
+ return gnet_stats_copy(d, TCA_STATS_BASIC, &sb, sizeof(sb));
+ }
return 0;
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 163b4f5b036..e587e681969 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -692,75 +692,74 @@ static void neigh_connect(struct neighbour *neigh)
hh->hh_output = neigh->ops->hh_output;
}
-static void neigh_periodic_timer(unsigned long arg)
+static void neigh_periodic_work(struct work_struct *work)
{
- struct neigh_table *tbl = (struct neigh_table *)arg;
+ struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
struct neighbour *n, **np;
- unsigned long expire, now = jiffies;
+ unsigned int i;
NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
- write_lock(&tbl->lock);
+ write_lock_bh(&tbl->lock);
/*
* periodically recompute ReachableTime from random function
*/
- if (time_after(now, tbl->last_rand + 300 * HZ)) {
+ if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
struct neigh_parms *p;
- tbl->last_rand = now;
+ tbl->last_rand = jiffies;
for (p = &tbl->parms; p; p = p->next)
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
}
- np = &tbl->hash_buckets[tbl->hash_chain_gc];
- tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
+ for (i = 0 ; i <= tbl->hash_mask; i++) {
+ np = &tbl->hash_buckets[i];
- while ((n = *np) != NULL) {
- unsigned int state;
+ while ((n = *np) != NULL) {
+ unsigned int state;
- write_lock(&n->lock);
+ write_lock(&n->lock);
- state = n->nud_state;
- if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
- write_unlock(&n->lock);
- goto next_elt;
- }
+ state = n->nud_state;
+ if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
+ write_unlock(&n->lock);
+ goto next_elt;
+ }
- if (time_before(n->used, n->confirmed))
- n->used = n->confirmed;
+ if (time_before(n->used, n->confirmed))
+ n->used = n->confirmed;
- if (atomic_read(&n->refcnt) == 1 &&
- (state == NUD_FAILED ||
- time_after(now, n->used + n->parms->gc_staletime))) {
- *np = n->next;
- n->dead = 1;
+ if (atomic_read(&n->refcnt) == 1 &&
+ (state == NUD_FAILED ||
+ time_after(jiffies, n->used + n->parms->gc_staletime))) {
+ *np = n->next;
+ n->dead = 1;
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ continue;
+ }
write_unlock(&n->lock);
- neigh_cleanup_and_release(n);
- continue;
- }
- write_unlock(&n->lock);
next_elt:
- np = &n->next;
+ np = &n->next;
+ }
+ /*
+ * It's fine to release lock here, even if hash table
+ * grows while we are preempted.
+ */
+ write_unlock_bh(&tbl->lock);
+ cond_resched();
+ write_lock_bh(&tbl->lock);
}
-
/* Cycle through all hash buckets every base_reachable_time/2 ticks.
* ARP entry timeouts range from 1/2 base_reachable_time to 3/2
* base_reachable_time.
*/
- expire = tbl->parms.base_reachable_time >> 1;
- expire /= (tbl->hash_mask + 1);
- if (!expire)
- expire = 1;
-
- if (expire>HZ)
- mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
- else
- mod_timer(&tbl->gc_timer, now + expire);
-
- write_unlock(&tbl->lock);
+ schedule_delayed_work(&tbl->gc_work,
+ tbl->parms.base_reachable_time >> 1);
+ write_unlock_bh(&tbl->lock);
}
static __inline__ int neigh_max_probes(struct neighbour *n)
@@ -1316,7 +1315,7 @@ void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
}
EXPORT_SYMBOL(pneigh_enqueue);
-static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
+static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
struct net *net, int ifindex)
{
struct neigh_parms *p;
@@ -1337,7 +1336,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
struct net *net = dev_net(dev);
const struct net_device_ops *ops = dev->netdev_ops;
- ref = lookup_neigh_params(tbl, net, 0);
+ ref = lookup_neigh_parms(tbl, net, 0);
if (!ref)
return NULL;
@@ -1442,10 +1441,8 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
rwlock_init(&tbl->lock);
- setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
- tbl->gc_timer.expires = now + 1;
- add_timer(&tbl->gc_timer);
-
+ INIT_DELAYED_WORK_DEFERRABLE(&tbl->gc_work, neigh_periodic_work);
+ schedule_delayed_work(&tbl->gc_work, tbl->parms.reachable_time);
setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
skb_queue_head_init_class(&tbl->proxy_queue,
&neigh_table_proxy_queue_class);
@@ -1482,7 +1479,8 @@ int neigh_table_clear(struct neigh_table *tbl)
struct neigh_table **tp;
/* It is not clean... Fix it to unload IPv6 module safely */
- del_timer_sync(&tbl->gc_timer);
+ cancel_delayed_work(&tbl->gc_work);
+ flush_scheduled_work();
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
neigh_ifdown(tbl, NULL);
@@ -1752,7 +1750,6 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
.ndtc_last_rand = jiffies_to_msecs(rand_delta),
.ndtc_hash_rnd = tbl->hash_rnd,
.ndtc_hash_mask = tbl->hash_mask,
- .ndtc_hash_chain_gc = tbl->hash_chain_gc,
.ndtc_proxy_qlen = tbl->proxy_queue.qlen,
};
@@ -1906,7 +1903,7 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
if (tbp[NDTPA_IFINDEX])
ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
- p = lookup_neigh_params(tbl, net, ifindex);
+ p = lookup_neigh_parms(tbl, net, ifindex);
if (p == NULL) {
err = -ENOENT;
goto errout_tbl_lock;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 3994680c08b..7d4c57523b0 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -141,7 +141,7 @@ static ssize_t show_dormant(struct device *dev,
return -EINVAL;
}
-static const char *operstates[] = {
+static const char *const operstates[] = {
"unknown",
"notpresent", /* currently unused */
"down",
@@ -493,7 +493,7 @@ void netdev_unregister_kobject(struct net_device * net)
int netdev_register_kobject(struct net_device *net)
{
struct device *dev = &(net->dev);
- struct attribute_group **groups = net->sysfs_groups;
+ const struct attribute_group **groups = net->sysfs_groups;
dev->class = &net_class;
dev->platform_data = net;
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 197283072cc..1c1af2756f3 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/idr.h>
+#include <linux/rculist.h>
+#include <linux/nsproxy.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
@@ -127,7 +129,7 @@ static struct net *net_create(void)
rv = setup_net(net);
if (rv == 0) {
rtnl_lock();
- list_add_tail(&net->list, &net_namespace_list);
+ list_add_tail_rcu(&net->list, &net_namespace_list);
rtnl_unlock();
}
mutex_unlock(&net_mutex);
@@ -156,9 +158,16 @@ static void cleanup_net(struct work_struct *work)
/* Don't let anyone else find us. */
rtnl_lock();
- list_del(&net->list);
+ list_del_rcu(&net->list);
rtnl_unlock();
+ /*
+ * Another CPU might be rcu-iterating the list, wait for it.
+ * This needs to be before calling the exit() notifiers, so
+ * the rcu_barrier() below isn't sufficient alone.
+ */
+ synchronize_rcu();
+
/* Run all of the network namespace exit methods */
list_for_each_entry_reverse(ops, &pernet_list, list) {
if (ops->exit)
@@ -193,6 +202,26 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
}
#endif
+struct net *get_net_ns_by_pid(pid_t pid)
+{
+ struct task_struct *tsk;
+ struct net *net;
+
+ /* Lookup the network namespace */
+ net = ERR_PTR(-ESRCH);
+ rcu_read_lock();
+ tsk = find_task_by_vpid(pid);
+ if (tsk) {
+ struct nsproxy *nsproxy;
+ nsproxy = task_nsproxy(tsk);
+ if (nsproxy)
+ net = get_net(nsproxy->net_ns);
+ }
+ rcu_read_unlock();
+ return net;
+}
+EXPORT_SYMBOL_GPL(get_net_ns_by_pid);
+
static int __init net_ns_init(void)
{
struct net_generic *ng;
@@ -219,7 +248,7 @@ static int __init net_ns_init(void)
panic("Could not setup the initial network namespace");
rtnl_lock();
- list_add_tail(&init_net.list, &net_namespace_list);
+ list_add_tail_rcu(&init_net.list, &net_namespace_list);
rtnl_unlock();
mutex_unlock(&net_mutex);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index df30feb2fc7..0b4d0d35ef4 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -9,6 +9,7 @@
* Copyright (C) 2002 Red Hat, Inc.
*/
+#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/string.h>
@@ -50,6 +51,9 @@ static atomic_t trapped;
static void zap_completion_queue(void);
static void arp_reply(struct sk_buff *skb);
+static unsigned int carrier_timeout = 4;
+module_param(carrier_timeout, uint, 0644);
+
static void queue_process(struct work_struct *work)
{
struct netpoll_info *npinfo =
@@ -319,6 +323,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
udelay(USEC_PER_POLL);
}
+
+ WARN_ONCE(!irqs_disabled(),
+ "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
+ dev->name, ops->ndo_start_xmit);
+
local_irq_restore(flags);
}
@@ -732,7 +741,7 @@ int netpoll_setup(struct netpoll *np)
}
atleast = jiffies + HZ/10;
- atmost = jiffies + 4*HZ;
+ atmost = jiffies + carrier_timeout * HZ;
while (!netif_carrier_ok(ndev)) {
if (time_after(jiffies, atmost)) {
printk(KERN_NOTICE
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 19b8c20e98a..0bcecbf0658 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -131,6 +131,7 @@
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
+#include <linux/hrtimer.h>
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/timer.h>
@@ -162,14 +163,13 @@
#include <asm/byteorder.h>
#include <linux/rcupdate.h>
#include <linux/bitops.h>
-#include <asm/io.h>
+#include <linux/io.h>
+#include <linux/timex.h>
+#include <linux/uaccess.h>
#include <asm/dma.h>
-#include <asm/uaccess.h>
#include <asm/div64.h> /* do_div */
-#include <asm/timex.h>
-
-#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n"
+#define VERSION "2.72"
#define IP_NAME_SZ 32
#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -206,7 +206,7 @@
#define PKTGEN_MAGIC 0xbe9be955
#define PG_PROC_DIR "pktgen"
#define PGCTRL "pgctrl"
-static struct proc_dir_entry *pg_proc_dir = NULL;
+static struct proc_dir_entry *pg_proc_dir;
#define MAX_CFLOWS 65536
@@ -231,9 +231,9 @@ struct pktgen_dev {
*/
struct proc_dir_entry *entry; /* proc file */
struct pktgen_thread *pg_thread;/* the owner */
- struct list_head list; /* Used for chaining in the thread's run-queue */
+ struct list_head list; /* chaining in the thread's run-queue */
- int running; /* if this changes to false, the test will stop */
+ int running; /* if false, the test will stop */
/* If min != max, then we will either do a linear iteration, or
* we will do a random selection from within the range.
@@ -246,33 +246,37 @@ struct pktgen_dev {
int max_pkt_size; /* = ETH_ZLEN; */
int pkt_overhead; /* overhead for MPLS, VLANs, IPSEC etc */
int nfrags;
- __u32 delay_us; /* Default delay */
- __u32 delay_ns;
+ u64 delay; /* nano-seconds */
+
__u64 count; /* Default No packets to send */
__u64 sofar; /* How many pkts we've sent so far */
__u64 tx_bytes; /* How many bytes we've transmitted */
- __u64 errors; /* Errors when trying to transmit, pkts will be re-sent */
+ __u64 errors; /* Errors when trying to transmit,
+ pkts will be re-sent */
/* runtime counters relating to clone_skb */
- __u64 next_tx_us; /* timestamp of when to tx next */
- __u32 next_tx_ns;
__u64 allocated_skbs;
__u32 clone_count;
int last_ok; /* Was last skb sent?
- * Or a failed transmit of some sort? This will keep
- * sequence numbers in order, for example.
+ * Or a failed transmit of some sort?
+ * This will keep sequence numbers in order
*/
- __u64 started_at; /* micro-seconds */
- __u64 stopped_at; /* micro-seconds */
- __u64 idle_acc; /* micro-seconds */
+ ktime_t next_tx;
+ ktime_t started_at;
+ ktime_t stopped_at;
+ u64 idle_acc; /* nano-seconds */
+
__u32 seq_num;
- int clone_skb; /* Use multiple SKBs during packet gen. If this number
- * is greater than 1, then that many copies of the same
- * packet will be sent before a new packet is allocated.
- * For instance, if you want to send 1024 identical packets
- * before creating a new packet, set clone_skb to 1024.
+ int clone_skb; /*
+ * Use multiple SKBs during packet gen.
+ * If this number is greater than 1, then
+ * that many copies of the same packet will be
+ * sent before a new packet is allocated.
+ * If you want to send 1024 identical packets
+ * before creating a new packet,
+ * set clone_skb to 1024.
*/
char dst_min[IP_NAME_SZ]; /* IP, ie 1.2.3.4 */
@@ -304,8 +308,10 @@ struct pktgen_dev {
__u16 udp_dst_max; /* exclusive, dest UDP port */
/* DSCP + ECN */
- __u8 tos; /* six most significant bits of (former) IPv4 TOS are for dscp codepoint */
- __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6 (see RFC 3260, sec. 4) */
+ __u8 tos; /* six MSB of (former) IPv4 TOS
+ are for dscp codepoint */
+ __u8 traffic_class; /* ditto for the (former) Traffic Class in IPv6
+ (see RFC 3260, sec. 4) */
/* MPLS */
unsigned nr_labels; /* Depth of stack, 0 = no MPLS */
@@ -346,15 +352,17 @@ struct pktgen_dev {
*/
__u16 pad; /* pad out the hh struct to an even 16 bytes */
- struct sk_buff *skb; /* skb we are to transmit next, mainly used for when we
+ struct sk_buff *skb; /* skb we are to transmit next, used for when we
* are transmitting the same one multiple times
*/
- struct net_device *odev; /* The out-going device. Note that the device should
- * have it's pg_info pointer pointing back to this
- * device. This will be set when the user specifies
- * the out-going device name (not when the inject is
- * started as it used to do.)
- */
+ struct net_device *odev; /* The out-going device.
+ * Note that the device should have it's
+ * pg_info pointer pointing back to this
+ * device.
+ * Set when the user specifies the out-going
+ * device name (not when the inject is
+ * started as it used to do.)
+ */
struct flow_state *flows;
unsigned cflows; /* Concurrent flows (config) */
unsigned lflow; /* Flow length (config) */
@@ -379,13 +387,14 @@ struct pktgen_hdr {
};
struct pktgen_thread {
- spinlock_t if_lock;
+ spinlock_t if_lock; /* for list of devices */
struct list_head if_list; /* All device here */
struct list_head th_list;
struct task_struct *tsk;
char result[512];
- /* Field for thread to receive "posted" events terminate, stop ifs etc. */
+ /* Field for thread to receive "posted" events terminate,
+ stop ifs etc. */
u32 control;
int cpu;
@@ -397,24 +406,22 @@ struct pktgen_thread {
#define REMOVE 1
#define FIND 0
-/** Convert to micro-seconds */
-static inline __u64 tv_to_us(const struct timeval *tv)
+static inline ktime_t ktime_now(void)
{
- __u64 us = tv->tv_usec;
- us += (__u64) tv->tv_sec * (__u64) 1000000;
- return us;
+ struct timespec ts;
+ ktime_get_ts(&ts);
+
+ return timespec_to_ktime(ts);
}
-static __u64 getCurUs(void)
+/* This works even if 32 bit because of careful byte order choice */
+static inline int ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
{
- struct timeval tv;
- do_gettimeofday(&tv);
- return tv_to_us(&tv);
+ return cmp1.tv64 < cmp2.tv64;
}
-/* old include end */
-
-static char version[] __initdata = VERSION;
+static const char version[] =
+ "pktgen " VERSION ": Packet Generator for packet performance testing.\n";
static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
@@ -424,7 +431,7 @@ static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
static void pktgen_run_all_threads(void);
static void pktgen_reset_all_threads(void);
static void pktgen_stop_all_threads_ifs(void);
-static int pktgen_stop_device(struct pktgen_dev *pkt_dev);
+
static void pktgen_stop(struct pktgen_thread *t);
static void pktgen_clear_counters(struct pktgen_dev *pkt_dev);
@@ -432,10 +439,10 @@ static unsigned int scan_ip6(const char *s, char ip[16]);
static unsigned int fmt_ip6(char *s, const char ip[16]);
/* Module parameters, defaults. */
-static int pg_count_d = 1000; /* 1000 pkts by default */
-static int pg_delay_d;
-static int pg_clone_skb_d;
-static int debug;
+static int pg_count_d __read_mostly = 1000;
+static int pg_delay_d __read_mostly;
+static int pg_clone_skb_d __read_mostly;
+static int debug __read_mostly;
static DEFINE_MUTEX(pktgen_thread_lock);
static LIST_HEAD(pktgen_threads);
@@ -451,12 +458,12 @@ static struct notifier_block pktgen_notifier_block = {
static int pgctrl_show(struct seq_file *seq, void *v)
{
- seq_puts(seq, VERSION);
+ seq_puts(seq, version);
return 0;
}
-static ssize_t pgctrl_write(struct file *file, const char __user * buf,
- size_t count, loff_t * ppos)
+static ssize_t pgctrl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
int err = 0;
char data[128];
@@ -509,10 +516,9 @@ static const struct file_operations pktgen_fops = {
static int pktgen_if_show(struct seq_file *seq, void *v)
{
- struct pktgen_dev *pkt_dev = seq->private;
- __u64 sa;
- __u64 stopped;
- __u64 now = getCurUs();
+ const struct pktgen_dev *pkt_dev = seq->private;
+ ktime_t stopped;
+ u64 idle;
seq_printf(seq,
"Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
@@ -520,9 +526,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
pkt_dev->max_pkt_size);
seq_printf(seq,
- " frags: %d delay: %u clone_skb: %d ifname: %s\n",
- pkt_dev->nfrags,
- 1000 * pkt_dev->delay_us + pkt_dev->delay_ns,
+ " frags: %d delay: %llu clone_skb: %d ifname: %s\n",
+ pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
pkt_dev->clone_skb, pkt_dev->odev->name);
seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows,
@@ -549,11 +554,14 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
" daddr: %s min_daddr: %s max_daddr: %s\n", b1,
b2, b3);
- } else
+ } else {
+ seq_printf(seq,
+ " dst_min: %s dst_max: %s\n",
+ pkt_dev->dst_min, pkt_dev->dst_max);
seq_printf(seq,
- " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n",
- pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min,
- pkt_dev->src_max);
+ " src_min: %s src_max: %s\n",
+ pkt_dev->src_min, pkt_dev->src_max);
+ }
seq_puts(seq, " src_mac: ");
@@ -565,7 +573,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_printf(seq, "%pM\n", pkt_dev->dst_mac);
seq_printf(seq,
- " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n",
+ " udp_src_min: %d udp_src_max: %d"
+ " udp_dst_min: %d udp_dst_max: %d\n",
pkt_dev->udp_src_min, pkt_dev->udp_src_max,
pkt_dev->udp_dst_min, pkt_dev->udp_dst_max);
@@ -581,23 +590,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
i == pkt_dev->nr_labels-1 ? "\n" : ", ");
}
- if (pkt_dev->vlan_id != 0xffff) {
+ if (pkt_dev->vlan_id != 0xffff)
seq_printf(seq, " vlan_id: %u vlan_p: %u vlan_cfi: %u\n",
- pkt_dev->vlan_id, pkt_dev->vlan_p, pkt_dev->vlan_cfi);
- }
+ pkt_dev->vlan_id, pkt_dev->vlan_p,
+ pkt_dev->vlan_cfi);
- if (pkt_dev->svlan_id != 0xffff) {
+ if (pkt_dev->svlan_id != 0xffff)
seq_printf(seq, " svlan_id: %u vlan_p: %u vlan_cfi: %u\n",
- pkt_dev->svlan_id, pkt_dev->svlan_p, pkt_dev->svlan_cfi);
- }
+ pkt_dev->svlan_id, pkt_dev->svlan_p,
+ pkt_dev->svlan_cfi);
- if (pkt_dev->tos) {
+ if (pkt_dev->tos)
seq_printf(seq, " tos: 0x%02x\n", pkt_dev->tos);
- }
- if (pkt_dev->traffic_class) {
+ if (pkt_dev->traffic_class)
seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
- }
seq_printf(seq, " Flags: ");
@@ -654,17 +661,21 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
seq_puts(seq, "\n");
- sa = pkt_dev->started_at;
- stopped = pkt_dev->stopped_at;
- if (pkt_dev->running)
- stopped = now; /* not really stopped, more like last-running-at */
+ /* not really stopped, more like last-running-at */
+ stopped = pkt_dev->running ? ktime_now() : pkt_dev->stopped_at;
+ idle = pkt_dev->idle_acc;
+ do_div(idle, NSEC_PER_USEC);
seq_printf(seq,
- "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n",
+ "Current:\n pkts-sofar: %llu errors: %llu\n",
(unsigned long long)pkt_dev->sofar,
- (unsigned long long)pkt_dev->errors, (unsigned long long)sa,
- (unsigned long long)stopped,
- (unsigned long long)pkt_dev->idle_acc);
+ (unsigned long long)pkt_dev->errors);
+
+ seq_printf(seq,
+ " started: %lluus stopped: %lluus idle: %lluus\n",
+ (unsigned long long) ktime_to_us(pkt_dev->started_at),
+ (unsigned long long) ktime_to_us(stopped),
+ (unsigned long long) idle);
seq_printf(seq,
" seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
@@ -696,7 +707,8 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
}
-static int hex32_arg(const char __user *user_buffer, unsigned long maxlen, __u32 *num)
+static int hex32_arg(const char __user *user_buffer, unsigned long maxlen,
+ __u32 *num)
{
int i = 0;
*num = 0;
@@ -846,9 +858,9 @@ static ssize_t pktgen_if_write(struct file *file,
/* Read variable name */
len = strn_len(&user_buffer[i], sizeof(name) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
memset(name, 0, sizeof(name));
if (copy_from_user(name, &user_buffer[i], len))
return -EFAULT;
@@ -872,9 +884,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "min_pkt_size")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value < 14 + 20 + 8)
value = 14 + 20 + 8;
@@ -889,9 +901,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "max_pkt_size")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value < 14 + 20 + 8)
value = 14 + 20 + 8;
@@ -908,9 +920,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "pkt_size")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value < 14 + 20 + 8)
value = 14 + 20 + 8;
@@ -925,9 +937,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "debug")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
debug = value;
sprintf(pg_result, "OK: debug=%u", debug);
@@ -936,9 +948,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "frags")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
pkt_dev->nfrags = value;
sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags);
@@ -946,26 +958,24 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "delay")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
- if (value == 0x7FFFFFFF) {
- pkt_dev->delay_us = 0x7FFFFFFF;
- pkt_dev->delay_ns = 0;
- } else {
- pkt_dev->delay_us = value / 1000;
- pkt_dev->delay_ns = value % 1000;
- }
- sprintf(pg_result, "OK: delay=%u",
- 1000 * pkt_dev->delay_us + pkt_dev->delay_ns);
+ if (value == 0x7FFFFFFF)
+ pkt_dev->delay = ULLONG_MAX;
+ else
+ pkt_dev->delay = (u64)value * NSEC_PER_USEC;
+
+ sprintf(pg_result, "OK: delay=%llu",
+ (unsigned long long) pkt_dev->delay);
return count;
}
if (!strcmp(name, "udp_src_min")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value != pkt_dev->udp_src_min) {
pkt_dev->udp_src_min = value;
@@ -976,9 +986,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "udp_dst_min")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value != pkt_dev->udp_dst_min) {
pkt_dev->udp_dst_min = value;
@@ -989,9 +999,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "udp_src_max")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value != pkt_dev->udp_src_max) {
pkt_dev->udp_src_max = value;
@@ -1002,9 +1012,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "udp_dst_max")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value != pkt_dev->udp_dst_max) {
pkt_dev->udp_dst_max = value;
@@ -1015,9 +1025,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "clone_skb")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
pkt_dev->clone_skb = value;
@@ -1026,9 +1036,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "count")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
pkt_dev->count = value;
sprintf(pg_result, "OK: count=%llu",
@@ -1037,9 +1047,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "src_mac_count")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (pkt_dev->src_mac_count != value) {
pkt_dev->src_mac_count = value;
@@ -1051,9 +1061,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "dst_mac_count")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (pkt_dev->dst_mac_count != value) {
pkt_dev->dst_mac_count = value;
@@ -1067,9 +1077,9 @@ static ssize_t pktgen_if_write(struct file *file,
char f[32];
memset(f, 0, 32);
len = strn_len(&user_buffer[i], sizeof(f) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
if (copy_from_user(f, &user_buffer[i], len))
return -EFAULT;
i += len;
@@ -1168,9 +1178,8 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "dst_min") || !strcmp(name, "dst")) {
len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_min) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
if (copy_from_user(buf, &user_buffer[i], len))
return -EFAULT;
@@ -1190,9 +1199,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "dst_max")) {
len = strn_len(&user_buffer[i], sizeof(pkt_dev->dst_max) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
if (copy_from_user(buf, &user_buffer[i], len))
return -EFAULT;
@@ -1303,9 +1312,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "src_min")) {
len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_min) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
if (copy_from_user(buf, &user_buffer[i], len))
return -EFAULT;
buf[len] = 0;
@@ -1324,9 +1333,9 @@ static ssize_t pktgen_if_write(struct file *file,
}
if (!strcmp(name, "src_max")) {
len = strn_len(&user_buffer[i], sizeof(pkt_dev->src_max) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
if (copy_from_user(buf, &user_buffer[i], len))
return -EFAULT;
buf[len] = 0;
@@ -1350,9 +1359,9 @@ static ssize_t pktgen_if_write(struct file *file,
memcpy(old_dmac, pkt_dev->dst_mac, ETH_ALEN);
len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
memset(valstr, 0, sizeof(valstr));
if (copy_from_user(valstr, &user_buffer[i], len))
return -EFAULT;
@@ -1392,9 +1401,9 @@ static ssize_t pktgen_if_write(struct file *file,
memcpy(old_smac, pkt_dev->src_mac, ETH_ALEN);
len = strn_len(&user_buffer[i], sizeof(valstr) - 1);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
memset(valstr, 0, sizeof(valstr));
if (copy_from_user(valstr, &user_buffer[i], len))
return -EFAULT;
@@ -1435,9 +1444,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "flows")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value > MAX_CFLOWS)
value = MAX_CFLOWS;
@@ -1449,9 +1458,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "flowlen")) {
len = num_arg(&user_buffer[i], 10, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
pkt_dev->lflow = value;
sprintf(pg_result, "OK: flowlen=%u", pkt_dev->lflow);
@@ -1460,9 +1469,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "queue_map_min")) {
len = num_arg(&user_buffer[i], 5, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
pkt_dev->queue_map_min = value;
sprintf(pg_result, "OK: queue_map_min=%u", pkt_dev->queue_map_min);
@@ -1471,9 +1480,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "queue_map_max")) {
len = num_arg(&user_buffer[i], 5, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
pkt_dev->queue_map_max = value;
sprintf(pg_result, "OK: queue_map_max=%u", pkt_dev->queue_map_max);
@@ -1505,9 +1514,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "vlan_id")) {
len = num_arg(&user_buffer[i], 4, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (value <= 4095) {
pkt_dev->vlan_id = value; /* turn on VLAN */
@@ -1532,9 +1541,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "vlan_p")) {
len = num_arg(&user_buffer[i], 1, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if ((value <= 7) && (pkt_dev->vlan_id != 0xffff)) {
pkt_dev->vlan_p = value;
@@ -1547,9 +1556,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "vlan_cfi")) {
len = num_arg(&user_buffer[i], 1, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if ((value <= 1) && (pkt_dev->vlan_id != 0xffff)) {
pkt_dev->vlan_cfi = value;
@@ -1562,9 +1571,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "svlan_id")) {
len = num_arg(&user_buffer[i], 4, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if ((value <= 4095) && ((pkt_dev->vlan_id != 0xffff))) {
pkt_dev->svlan_id = value; /* turn on SVLAN */
@@ -1589,9 +1598,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "svlan_p")) {
len = num_arg(&user_buffer[i], 1, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if ((value <= 7) && (pkt_dev->svlan_id != 0xffff)) {
pkt_dev->svlan_p = value;
@@ -1604,9 +1613,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "svlan_cfi")) {
len = num_arg(&user_buffer[i], 1, &value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if ((value <= 1) && (pkt_dev->svlan_id != 0xffff)) {
pkt_dev->svlan_cfi = value;
@@ -1620,9 +1629,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "tos")) {
__u32 tmp_value = 0;
len = hex32_arg(&user_buffer[i], 2, &tmp_value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (len == 2) {
pkt_dev->tos = tmp_value;
@@ -1636,9 +1645,9 @@ static ssize_t pktgen_if_write(struct file *file,
if (!strcmp(name, "traffic_class")) {
__u32 tmp_value = 0;
len = hex32_arg(&user_buffer[i], 2, &tmp_value);
- if (len < 0) {
+ if (len < 0)
return len;
- }
+
i += len;
if (len == 2) {
pkt_dev->traffic_class = tmp_value;
@@ -1670,7 +1679,7 @@ static const struct file_operations pktgen_if_fops = {
static int pktgen_thread_show(struct seq_file *seq, void *v)
{
struct pktgen_thread *t = seq->private;
- struct pktgen_dev *pkt_dev;
+ const struct pktgen_dev *pkt_dev;
BUG_ON(!t);
@@ -1873,8 +1882,10 @@ static void pktgen_change_name(struct net_device *dev)
remove_proc_entry(pkt_dev->entry->name, pg_proc_dir);
- pkt_dev->entry = create_proc_entry(dev->name, 0600,
- pg_proc_dir);
+ pkt_dev->entry = proc_create_data(dev->name, 0600,
+ pg_proc_dir,
+ &pktgen_if_fops,
+ pkt_dev);
if (!pkt_dev->entry)
printk(KERN_ERR "pktgen: can't move proc "
" entry for '%s'\n", dev->name);
@@ -1908,13 +1919,14 @@ static int pktgen_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname)
+static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev,
+ const char *ifname)
{
char b[IFNAMSIZ+5];
int i = 0;
- for(i=0; ifname[i] != '@'; i++) {
- if(i == IFNAMSIZ)
+ for (i = 0; ifname[i] != '@'; i++) {
+ if (i == IFNAMSIZ)
break;
b[i] = ifname[i];
@@ -1981,7 +1993,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
printk(KERN_WARNING "pktgen: WARNING: Requested "
"queue_map_min (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
- pkt_dev->queue_map_min, (ntxq ?: 1)- 1, ntxq,
+ pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
pkt_dev->odev->name);
pkt_dev->queue_map_min = ntxq - 1;
}
@@ -1989,7 +2001,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
printk(KERN_WARNING "pktgen: WARNING: Requested "
"queue_map_max (zero-based) (%d) exceeds valid range "
"[0 - %d] for (%d) queues on %s, resetting\n",
- pkt_dev->queue_map_max, (ntxq ?: 1)- 1, ntxq,
+ pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
pkt_dev->odev->name);
pkt_dev->queue_map_max = ntxq - 1;
}
@@ -2030,7 +2042,8 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
*/
rcu_read_lock();
- if ((idev = __in6_dev_get(pkt_dev->odev)) != NULL) {
+ idev = __in6_dev_get(pkt_dev->odev);
+ if (idev) {
struct inet6_ifaddr *ifp;
read_lock_bh(&idev->lock);
@@ -2089,27 +2102,40 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
pkt_dev->nflows = 0;
}
-static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
+
+static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
{
- __u64 start;
- __u64 now;
+ ktime_t start;
+ s32 remaining;
+ struct hrtimer_sleeper t;
+
+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ hrtimer_set_expires(&t.timer, spin_until);
+
+ remaining = ktime_to_us(hrtimer_expires_remaining(&t.timer));
+ if (remaining <= 0)
+ return;
- start = now = getCurUs();
- while (now < spin_until_us) {
- /* TODO: optimize sleeping behavior */
- if (spin_until_us - now > jiffies_to_usecs(1) + 1)
- schedule_timeout_interruptible(1);
- else if (spin_until_us - now > 100) {
- if (!pkt_dev->running)
- return;
- if (need_resched())
+ start = ktime_now();
+ if (remaining < 100)
+ udelay(remaining); /* really small just spin */
+ else {
+ /* see do_nanosleep */
+ hrtimer_init_sleeper(&t, current);
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
+ if (!hrtimer_active(&t.timer))
+ t.task = NULL;
+
+ if (likely(t.task))
schedule();
- }
- now = getCurUs();
+ hrtimer_cancel(&t.timer);
+ } while (t.task && pkt_dev->running && !signal_pending(current));
+ __set_current_state(TASK_RUNNING);
}
-
- pkt_dev->idle_acc += now - start;
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), start));
}
static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
@@ -2120,13 +2146,9 @@ static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
}
-static inline int f_seen(struct pktgen_dev *pkt_dev, int flow)
+static inline int f_seen(const struct pktgen_dev *pkt_dev, int flow)
{
-
- if (pkt_dev->flows[flow].flags & F_INIT)
- return 1;
- else
- return 0;
+ return !!(pkt_dev->flows[flow].flags & F_INIT);
}
static inline int f_pick(struct pktgen_dev *pkt_dev)
@@ -2174,7 +2196,7 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
if (x) {
pkt_dev->flows[flow].x = x;
set_pkt_overhead(pkt_dev);
- pkt_dev->pkt_overhead+=x->props.header_len;
+ pkt_dev->pkt_overhead += x->props.header_len;
}
}
@@ -2313,18 +2335,18 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
if (!(pkt_dev->flags & F_IPV6)) {
- if ((imn = ntohl(pkt_dev->saddr_min)) < (imx =
- ntohl(pkt_dev->
- saddr_max))) {
+ imn = ntohl(pkt_dev->saddr_min);
+ imx = ntohl(pkt_dev->saddr_max);
+ if (imn < imx) {
__u32 t;
if (pkt_dev->flags & F_IPSRC_RND)
t = random32() % (imx - imn) + imn;
else {
t = ntohl(pkt_dev->cur_saddr);
t++;
- if (t > imx) {
+ if (t > imx)
t = imn;
- }
+
}
pkt_dev->cur_saddr = htonl(t);
}
@@ -2435,14 +2457,14 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev)
if (err)
goto error;
- x->curlft.bytes +=skb->len;
+ x->curlft.bytes += skb->len;
x->curlft.packets++;
error:
spin_unlock(&x->lock);
return err;
}
-static inline void free_SAs(struct pktgen_dev *pkt_dev)
+static void free_SAs(struct pktgen_dev *pkt_dev)
{
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
@@ -2457,7 +2479,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
}
}
-static inline int process_ipsec(struct pktgen_dev *pkt_dev,
+static int process_ipsec(struct pktgen_dev *pkt_dev,
struct sk_buff *skb, __be16 protocol)
{
if (pkt_dev->flags & F_IPSEC_ON) {
@@ -2467,11 +2489,11 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
int ret;
__u8 *eth;
nhead = x->props.header_len - skb_headroom(skb);
- if (nhead >0) {
+ if (nhead > 0) {
ret = pskb_expand_head(skb, nhead, 0, GFP_ATOMIC);
if (ret < 0) {
printk(KERN_ERR "Error expanding "
- "ipsec packet %d\n",ret);
+ "ipsec packet %d\n", ret);
goto err;
}
}
@@ -2481,13 +2503,13 @@ static inline int process_ipsec(struct pktgen_dev *pkt_dev,
ret = pktgen_output_ipsec(skb, pkt_dev);
if (ret) {
printk(KERN_ERR "Error creating ipsec "
- "packet %d\n",ret);
+ "packet %d\n", ret);
goto err;
}
/* restore ll */
eth = (__u8 *) skb_push(skb, ETH_HLEN);
memcpy(eth, pkt_dev->hh, 12);
- *(u16 *) & eth[12] = protocol;
+ *(u16 *) &eth[12] = protocol;
}
}
return 1;
@@ -2500,9 +2522,9 @@ err:
static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
{
unsigned i;
- for (i = 0; i < pkt_dev->nr_labels; i++) {
+ for (i = 0; i < pkt_dev->nr_labels; i++)
*mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
- }
+
mpls--;
*mpls |= MPLS_STACK_BOTTOM;
}
@@ -2543,8 +2565,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
mod_cur_headers(pkt_dev);
datalen = (odev->hard_header_len + 16) & ~0xf;
- skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + datalen +
- pkt_dev->pkt_overhead, GFP_ATOMIC);
+ skb = __netdev_alloc_skb(odev,
+ pkt_dev->cur_pkt_size + 64
+ + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
@@ -2668,8 +2691,9 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
}
}
- /* Stamp the time, and sequence number, convert them to network byte order */
-
+ /* Stamp the time, and sequence number,
+ * convert them to network byte order
+ */
if (pgh) {
struct timeval timestamp;
@@ -2882,8 +2906,9 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
queue_map = pkt_dev->cur_queue_map;
mod_cur_headers(pkt_dev);
- skb = alloc_skb(pkt_dev->cur_pkt_size + 64 + 16 +
- pkt_dev->pkt_overhead, GFP_ATOMIC);
+ skb = __netdev_alloc_skb(odev,
+ pkt_dev->cur_pkt_size + 64
+ + 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
if (!skb) {
sprintf(pkt_dev->result, "No memory");
return NULL;
@@ -2922,7 +2947,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
udph = udp_hdr(skb);
memcpy(eth, pkt_dev->hh, 12);
- *(__be16 *) & eth[12] = protocol;
+ *(__be16 *) &eth[12] = protocol;
/* Eth + IPh + UDPh + mpls */
datalen = pkt_dev->cur_pkt_size - 14 -
@@ -3016,8 +3041,10 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
}
}
- /* Stamp the time, and sequence number, convert them to network byte order */
- /* should we update cloned packets too ? */
+ /* Stamp the time, and sequence number,
+ * convert them to network byte order
+ * should we update cloned packets too ?
+ */
if (pgh) {
struct timeval timestamp;
@@ -3033,8 +3060,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
return skb;
}
-static inline struct sk_buff *fill_packet(struct net_device *odev,
- struct pktgen_dev *pkt_dev)
+static struct sk_buff *fill_packet(struct net_device *odev,
+ struct pktgen_dev *pkt_dev)
{
if (pkt_dev->flags & F_IPV6)
return fill_packet_ipv6(odev, pkt_dev);
@@ -3072,9 +3099,9 @@ static void pktgen_run(struct pktgen_thread *t)
pktgen_clear_counters(pkt_dev);
pkt_dev->running = 1; /* Cranke yeself! */
pkt_dev->skb = NULL;
- pkt_dev->started_at = getCurUs();
- pkt_dev->next_tx_us = getCurUs(); /* Transmit immediately */
- pkt_dev->next_tx_ns = 0;
+ pkt_dev->started_at =
+ pkt_dev->next_tx = ktime_now();
+
set_pkt_overhead(pkt_dev);
strcpy(pkt_dev->result, "Starting");
@@ -3101,17 +3128,14 @@ static void pktgen_stop_all_threads_ifs(void)
mutex_unlock(&pktgen_thread_lock);
}
-static int thread_is_running(struct pktgen_thread *t)
+static int thread_is_running(const struct pktgen_thread *t)
{
- struct pktgen_dev *pkt_dev;
- int res = 0;
+ const struct pktgen_dev *pkt_dev;
list_for_each_entry(pkt_dev, &t->if_list, list)
- if (pkt_dev->running) {
- res = 1;
- break;
- }
- return res;
+ if (pkt_dev->running)
+ return 1;
+ return 0;
}
static int pktgen_wait_thread_run(struct pktgen_thread *t)
@@ -3168,7 +3192,8 @@ static void pktgen_run_all_threads(void)
mutex_unlock(&pktgen_thread_lock);
- schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */
+ /* Propagate thread->control */
+ schedule_timeout_interruptible(msecs_to_jiffies(125));
pktgen_wait_all_threads_run();
}
@@ -3186,35 +3211,29 @@ static void pktgen_reset_all_threads(void)
mutex_unlock(&pktgen_thread_lock);
- schedule_timeout_interruptible(msecs_to_jiffies(125)); /* Propagate thread->control */
+ /* Propagate thread->control */
+ schedule_timeout_interruptible(msecs_to_jiffies(125));
pktgen_wait_all_threads_run();
}
static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
{
- __u64 total_us, bps, mbps, pps, idle;
+ __u64 bps, mbps, pps;
char *p = pkt_dev->result;
-
- total_us = pkt_dev->stopped_at - pkt_dev->started_at;
-
- idle = pkt_dev->idle_acc;
-
- p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
- (unsigned long long)total_us,
- (unsigned long long)(total_us - idle),
- (unsigned long long)idle,
+ ktime_t elapsed = ktime_sub(pkt_dev->stopped_at,
+ pkt_dev->started_at);
+ ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
+
+ p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n",
+ (unsigned long long)ktime_to_us(elapsed),
+ (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
+ (unsigned long long)ktime_to_us(idle),
(unsigned long long)pkt_dev->sofar,
pkt_dev->cur_pkt_size, nr_frags);
- pps = pkt_dev->sofar * USEC_PER_SEC;
-
- while ((total_us >> 32) != 0) {
- pps >>= 1;
- total_us >>= 1;
- }
-
- do_div(pps, total_us);
+ pps = div64_u64(pkt_dev->sofar * NSEC_PER_SEC,
+ ktime_to_ns(elapsed));
bps = pps * 8 * pkt_dev->cur_pkt_size;
@@ -3228,7 +3247,6 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
}
/* Set stopped-at timer, remove from running list, do counters & statistics */
-
static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
{
int nr_frags = pkt_dev->skb ? skb_shinfo(pkt_dev->skb)->nr_frags : -1;
@@ -3239,7 +3257,9 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
return -EINVAL;
}
- pkt_dev->stopped_at = getCurUs();
+ kfree_skb(pkt_dev->skb);
+ pkt_dev->skb = NULL;
+ pkt_dev->stopped_at = ktime_now();
pkt_dev->running = 0;
show_results(pkt_dev, nr_frags);
@@ -3258,7 +3278,7 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
continue;
if (best == NULL)
best = pkt_dev;
- else if (pkt_dev->next_tx_us < best->next_tx_us)
+ else if (ktime_lt(pkt_dev->next_tx, best->next_tx))
best = pkt_dev;
}
if_unlock(t);
@@ -3275,9 +3295,6 @@ static void pktgen_stop(struct pktgen_thread *t)
list_for_each_entry(pkt_dev, &t->if_list, list) {
pktgen_stop_device(pkt_dev);
- kfree_skb(pkt_dev->skb);
-
- pkt_dev->skb = NULL;
}
if_unlock(t);
@@ -3348,30 +3365,37 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
mutex_unlock(&pktgen_thread_lock);
}
-static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
+static void idle(struct pktgen_dev *pkt_dev)
+{
+ ktime_t idle_start = ktime_now();
+
+ if (need_resched())
+ schedule();
+ else
+ cpu_relax();
+
+ pkt_dev->idle_acc += ktime_to_ns(ktime_sub(ktime_now(), idle_start));
+}
+
+
+static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
- int (*xmit)(struct sk_buff *, struct net_device *)
+ netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)
= odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
- __u64 idle_start = 0;
u16 queue_map;
int ret;
- if (pkt_dev->delay_us || pkt_dev->delay_ns) {
- u64 now;
-
- now = getCurUs();
- if (now < pkt_dev->next_tx_us)
- spin(pkt_dev, pkt_dev->next_tx_us);
+ if (pkt_dev->delay) {
+ spin(pkt_dev, pkt_dev->next_tx);
/* This is max DELAY, this has special meaning of
* "never transmit"
*/
- if (pkt_dev->delay_us == 0x7FFFFFFF) {
- pkt_dev->next_tx_us = getCurUs() + pkt_dev->delay_us;
- pkt_dev->next_tx_ns = pkt_dev->delay_ns;
- goto out;
+ if (pkt_dev->delay == ULLONG_MAX) {
+ pkt_dev->next_tx = ktime_add_ns(ktime_now(), ULONG_MAX);
+ return;
}
}
@@ -3383,47 +3407,32 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
}
txq = netdev_get_tx_queue(odev, queue_map);
- if (netif_tx_queue_stopped(txq) ||
- netif_tx_queue_frozen(txq) ||
- need_resched()) {
- idle_start = getCurUs();
-
- if (!netif_running(odev)) {
+ /* Did we saturate the queue already? */
+ if (netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)) {
+ /* If device is down, then all queues are permnantly frozen */
+ if (netif_running(odev))
+ idle(pkt_dev);
+ else
pktgen_stop_device(pkt_dev);
- kfree_skb(pkt_dev->skb);
- pkt_dev->skb = NULL;
- goto out;
- }
- if (need_resched())
- schedule();
-
- pkt_dev->idle_acc += getCurUs() - idle_start;
-
- if (netif_tx_queue_stopped(txq) ||
- netif_tx_queue_frozen(txq)) {
- pkt_dev->next_tx_us = getCurUs(); /* TODO */
- pkt_dev->next_tx_ns = 0;
- goto out; /* Try the next interface */
- }
+ return;
}
- if (pkt_dev->last_ok || !pkt_dev->skb) {
- if ((++pkt_dev->clone_count >= pkt_dev->clone_skb)
- || (!pkt_dev->skb)) {
- /* build a new pkt */
- kfree_skb(pkt_dev->skb);
+ if (!pkt_dev->skb || (pkt_dev->last_ok &&
+ ++pkt_dev->clone_count >= pkt_dev->clone_skb)) {
+ /* build a new pkt */
+ kfree_skb(pkt_dev->skb);
- pkt_dev->skb = fill_packet(odev, pkt_dev);
- if (pkt_dev->skb == NULL) {
- printk(KERN_ERR "pktgen: ERROR: couldn't "
- "allocate skb in fill_packet.\n");
- schedule();
- pkt_dev->clone_count--; /* back out increment, OOM */
- goto out;
- }
- pkt_dev->allocated_skbs++;
- pkt_dev->clone_count = 0; /* reset counter */
+ pkt_dev->skb = fill_packet(odev, pkt_dev);
+ if (pkt_dev->skb == NULL) {
+ printk(KERN_ERR "pktgen: ERROR: couldn't "
+ "allocate skb in fill_packet.\n");
+ schedule();
+ pkt_dev->clone_count--; /* back out increment, OOM */
+ return;
}
+
+ pkt_dev->allocated_skbs++;
+ pkt_dev->clone_count = 0; /* reset counter */
}
/* fill_packet() might have changed the queue */
@@ -3431,73 +3440,53 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
txq = netdev_get_tx_queue(odev, queue_map);
__netif_tx_lock_bh(txq);
- if (!netif_tx_queue_stopped(txq) &&
- !netif_tx_queue_frozen(txq)) {
-
+ if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq)))
+ pkt_dev->last_ok = 0;
+ else {
atomic_inc(&(pkt_dev->skb->users));
- retry_now:
+
+ retry_now:
ret = (*xmit)(pkt_dev->skb, odev);
- if (likely(ret == NETDEV_TX_OK)) {
+ switch (ret) {
+ case NETDEV_TX_OK:
txq_trans_update(txq);
pkt_dev->last_ok = 1;
pkt_dev->sofar++;
pkt_dev->seq_num++;
pkt_dev->tx_bytes += pkt_dev->cur_pkt_size;
-
- } else if (ret == NETDEV_TX_LOCKED
- && (odev->features & NETIF_F_LLTX)) {
+ break;
+ case NETDEV_TX_LOCKED:
cpu_relax();
goto retry_now;
- } else { /* Retry it next time */
-
- atomic_dec(&(pkt_dev->skb->users));
-
- if (debug && net_ratelimit())
- printk(KERN_INFO "pktgen: Hard xmit error\n");
-
+ default: /* Drivers are not supposed to return other values! */
+ if (net_ratelimit())
+ pr_info("pktgen: %s xmit error: %d\n",
+ odev->name, ret);
pkt_dev->errors++;
+ /* fallthru */
+ case NETDEV_TX_BUSY:
+ /* Retry it next time */
+ atomic_dec(&(pkt_dev->skb->users));
pkt_dev->last_ok = 0;
}
- pkt_dev->next_tx_us = getCurUs();
- pkt_dev->next_tx_ns = 0;
-
- pkt_dev->next_tx_us += pkt_dev->delay_us;
- pkt_dev->next_tx_ns += pkt_dev->delay_ns;
-
- if (pkt_dev->next_tx_ns > 1000) {
- pkt_dev->next_tx_us++;
- pkt_dev->next_tx_ns -= 1000;
- }
+ if (pkt_dev->delay)
+ pkt_dev->next_tx = ktime_add_ns(ktime_now(),
+ pkt_dev->delay);
}
-
- else { /* Retry it next time */
- pkt_dev->last_ok = 0;
- pkt_dev->next_tx_us = getCurUs(); /* TODO */
- pkt_dev->next_tx_ns = 0;
- }
-
__netif_tx_unlock_bh(txq);
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
- if (atomic_read(&(pkt_dev->skb->users)) != 1) {
- idle_start = getCurUs();
- while (atomic_read(&(pkt_dev->skb->users)) != 1) {
- if (signal_pending(current)) {
- break;
- }
- schedule();
- }
- pkt_dev->idle_acc += getCurUs() - idle_start;
+ while (atomic_read(&(pkt_dev->skb->users)) != 1) {
+ if (signal_pending(current))
+ break;
+ idle(pkt_dev);
}
/* Done with this */
pktgen_stop_device(pkt_dev);
- kfree_skb(pkt_dev->skb);
- pkt_dev->skb = NULL;
}
-out:;
}
/*
@@ -3516,7 +3505,8 @@ static int pktgen_thread_worker(void *arg)
init_waitqueue_head(&t->queue);
complete(&t->start_done);
- pr_debug("pktgen: starting pktgen/%d: pid=%d\n", cpu, task_pid_nr(current));
+ pr_debug("pktgen: starting pktgen/%d: pid=%d\n",
+ cpu, task_pid_nr(current));
set_current_state(TASK_INTERRUPTIBLE);
@@ -3651,8 +3641,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
pkt_dev->max_pkt_size = ETH_ZLEN;
pkt_dev->nfrags = 0;
pkt_dev->clone_skb = pg_clone_skb_d;
- pkt_dev->delay_us = pg_delay_d / 1000;
- pkt_dev->delay_ns = pg_delay_d % 1000;
+ pkt_dev->delay = pg_delay_d;
pkt_dev->count = pg_count_d;
pkt_dev->sofar = 0;
pkt_dev->udp_src_min = 9; /* sink port */
@@ -3864,10 +3853,15 @@ static void __exit pg_cleanup(void)
module_init(pg_init);
module_exit(pg_cleanup);
-MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se");
+MODULE_AUTHOR("Robert Olsson <robert.olsson@its.uu.se>");
MODULE_DESCRIPTION("Packet Generator tool");
MODULE_LICENSE("GPL");
+MODULE_VERSION(VERSION);
module_param(pg_count_d, int, 0);
+MODULE_PARM_DESC(pg_count_d, "Default number of packets to inject");
module_param(pg_delay_d, int, 0);
+MODULE_PARM_DESC(pg_delay_d, "Default delay between packets (nanoseconds)");
module_param(pg_clone_skb_d, int, 0);
+MODULE_PARM_DESC(pg_clone_skb_d, "Default number of copies of the same packet");
module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Enable debugging of pktgen module");
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index d78030f88bd..eb42873f2a3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,7 +35,6 @@
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/if_addr.h>
-#include <linux/nsproxy.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -52,6 +51,7 @@
#include <net/pkt_sched.h>
#include <net/fib_rules.h>
#include <net/rtnetlink.h>
+#include <net/net_namespace.h>
struct rtnl_link
{
@@ -606,7 +606,6 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags)
{
- struct netdev_queue *txq;
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
const struct net_device_stats *stats;
@@ -637,9 +636,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
if (dev->master)
NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
- txq = netdev_get_tx_queue(dev, 0);
- if (txq->qdisc_sleeping)
- NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
+ if (dev->qdisc)
+ NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id);
if (dev->ifalias)
NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
@@ -725,25 +723,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
};
-static struct net *get_net_ns_by_pid(pid_t pid)
-{
- struct task_struct *tsk;
- struct net *net;
-
- /* Lookup the network namespace */
- net = ERR_PTR(-ESRCH);
- rcu_read_lock();
- tsk = find_task_by_vpid(pid);
- if (tsk) {
- struct nsproxy *nsproxy;
- nsproxy = task_nsproxy(tsk);
- if (nsproxy)
- net = get_net(nsproxy->net_ns);
- }
- rcu_read_unlock();
- return net;
-}
-
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
if (dev) {
@@ -993,12 +972,20 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
{
int err;
struct net_device *dev;
+ unsigned int num_queues = 1;
+ unsigned int real_num_queues = 1;
+ if (ops->get_tx_queues) {
+ err = ops->get_tx_queues(net, tb, &num_queues, &real_num_queues);
+ if (err)
+ goto err;
+ }
err = -ENOMEM;
- dev = alloc_netdev(ops->priv_size, ifname, ops->setup);
+ dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
if (!dev)
goto err;
+ dev->real_num_tx_queues = real_num_queues;
if (strchr(dev->name, '%')) {
err = dev_alloc_name(dev, dev->name);
if (err < 0)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9e0597d189b..80a96166df3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -559,9 +559,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
#endif
#endif
new->vlan_tci = old->vlan_tci;
-#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
- new->do_not_encrypt = old->do_not_encrypt;
-#endif
skb_copy_secmark(new, old);
}
diff --git a/net/core/sock.c b/net/core/sock.c
index bbb25be7ddf..524712a7b15 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -142,7 +142,7 @@ static struct lock_class_key af_family_slock_keys[AF_MAX];
* strings build-time, so that runtime initialization of socket
* locks is fast):
*/
-static const char *af_family_key_strings[AF_MAX+1] = {
+static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
"sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
"sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
@@ -158,7 +158,7 @@ static const char *af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_IEEE802154",
"sk_lock-AF_MAX"
};
-static const char *af_family_slock_key_strings[AF_MAX+1] = {
+static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
"slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
"slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
@@ -174,7 +174,7 @@ static const char *af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_IEEE802154",
"slock-AF_MAX"
};
-static const char *af_family_clock_key_strings[AF_MAX+1] = {
+static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
"clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
"clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
@@ -482,6 +482,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
sk->sk_reuse = valbool;
break;
case SO_TYPE:
+ case SO_PROTOCOL:
+ case SO_DOMAIN:
case SO_ERROR:
ret = -ENOPROTOOPT;
break;
@@ -764,6 +766,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_type;
break;
+ case SO_PROTOCOL:
+ v.val = sk->sk_protocol;
+ break;
+
+ case SO_DOMAIN:
+ v.val = sk->sk_family;
+ break;
+
case SO_ERROR:
v.val = -sock_error(sk);
if (v.val == 0)
@@ -1025,6 +1035,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
sk->sk_prot = sk->sk_prot_creator = prot;
sock_lock_init(sk);
sock_net_set(sk, get_net(net));
+ atomic_set(&sk->sk_wmem_alloc, 1);
}
return sk;
@@ -1195,12 +1206,12 @@ EXPORT_SYMBOL_GPL(sk_setup_caps);
void __init sk_init(void)
{
- if (num_physpages <= 4096) {
+ if (totalram_pages <= 4096) {
sysctl_wmem_max = 32767;
sysctl_rmem_max = 32767;
sysctl_wmem_default = 32767;
sysctl_rmem_default = 32767;
- } else if (num_physpages >= 131072) {
+ } else if (totalram_pages >= 131072) {
sysctl_wmem_max = 131071;
sysctl_rmem_max = 131071;
}
@@ -1872,7 +1883,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
*/
smp_wmb();
atomic_set(&sk->sk_refcnt, 1);
- atomic_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_drops, 0);
}
EXPORT_SYMBOL(sock_init_data);
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 8379496de82..e0879bfb7dd 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -64,6 +64,7 @@ static struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
[DCB_ATTR_CAP] = {.type = NLA_NESTED},
[DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
[DCB_ATTR_BCN] = {.type = NLA_NESTED},
+ [DCB_ATTR_APP] = {.type = NLA_NESTED},
};
/* DCB priority flow control to User Priority nested attributes */
@@ -158,6 +159,13 @@ static struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
[DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
};
+/* DCB APP nested attributes. */
+static struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
+ [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
+ [DCB_APP_ATTR_ID] = {.type = NLA_U16},
+ [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
+};
+
/* standard netlink reply call */
static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
u32 seq, u16 flags)
@@ -536,6 +544,120 @@ static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
return ret;
}
+static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ struct sk_buff *dcbnl_skb;
+ struct nlmsghdr *nlh;
+ struct dcbmsg *dcb;
+ struct nlattr *app_nest;
+ struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
+ u16 id;
+ u8 up, idtype;
+ int ret = -EINVAL;
+
+ if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
+ goto out;
+
+ ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
+ dcbnl_app_nest);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ /* all must be non-null */
+ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
+ (!app_tb[DCB_APP_ATTR_ID]))
+ goto out;
+
+ /* either by eth type or by socket number */
+ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
+ if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
+ (idtype != DCB_APP_IDTYPE_PORTNUM))
+ goto out;
+
+ id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
+ up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+
+ /* send this back */
+ dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!dcbnl_skb)
+ goto out;
+
+ nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
+ dcb = NLMSG_DATA(nlh);
+ dcb->dcb_family = AF_UNSPEC;
+ dcb->cmd = DCB_CMD_GAPP;
+
+ app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
+ ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
+ if (ret)
+ goto out_cancel;
+
+ ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
+ if (ret)
+ goto out_cancel;
+
+ ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
+ if (ret)
+ goto out_cancel;
+
+ nla_nest_end(dcbnl_skb, app_nest);
+ nlmsg_end(dcbnl_skb, nlh);
+
+ ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
+ if (ret)
+ goto nlmsg_failure;
+
+ goto out;
+
+out_cancel:
+ nla_nest_cancel(dcbnl_skb, app_nest);
+nlmsg_failure:
+ kfree_skb(dcbnl_skb);
+out:
+ return ret;
+}
+
+static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
+ u32 pid, u32 seq, u16 flags)
+{
+ int ret = -EINVAL;
+ u16 id;
+ u8 up, idtype;
+ struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
+
+ if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->setapp)
+ goto out;
+
+ ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
+ dcbnl_app_nest);
+ if (ret)
+ goto out;
+
+ ret = -EINVAL;
+ /* all must be non-null */
+ if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
+ (!app_tb[DCB_APP_ATTR_ID]) ||
+ (!app_tb[DCB_APP_ATTR_PRIORITY]))
+ goto out;
+
+ /* either by eth type or by socket number */
+ idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
+ if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
+ (idtype != DCB_APP_IDTYPE_PORTNUM))
+ goto out;
+
+ id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
+ up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
+
+ ret = dcbnl_reply(netdev->dcbnl_ops->setapp(netdev, idtype, id, up),
+ RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
+ pid, seq, flags);
+out:
+ return ret;
+}
+
static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
u32 pid, u32 seq, u16 flags, int dir)
{
@@ -1093,6 +1215,14 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
nlh->nlmsg_flags);
goto out;
+ case DCB_CMD_GAPP:
+ ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
+ case DCB_CMD_SAPP:
+ ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
+ nlh->nlmsg_flags);
+ goto out;
default:
goto errout;
}
diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig
index 4b5db44970a..8408398cd44 100644
--- a/net/dccp/ccids/Kconfig
+++ b/net/dccp/ccids/Kconfig
@@ -66,9 +66,9 @@ config IP_DCCP_CCID3_RTO
A value of 0 disables this feature by enforcing the value specified
in RFC 3448. The following values have been suggested as bounds for
experimental use:
- * 16-20ms to match the typical multimedia inter-frame interval
- * 100ms as a reasonable compromise [default]
- * 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
+ * 16-20ms to match the typical multimedia inter-frame interval
+ * 100ms as a reasonable compromise [default]
+ * 1000ms corresponds to the lower TCP RTO bound (RFC 2988, 2.4)
The default of 100ms is a compromise between a large value for
efficient DCCP implementations, and a small value to avoid disrupting
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index d235294ace2..e8cf99e880b 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -1,6 +1,4 @@
/*
- * net/dccp/ccids/ccid2.c
- *
* Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
*
* Changes to meet Linux coding standards, and DCCP infrastructure fixes.
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 2c94ca02901..326ac90fb90 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -1,6 +1,4 @@
/*
- * net/dccp/ccids/ccid2.h
- *
* Copyright (c) 2005 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
*
* This program is free software; you can redistribute it and/or modify
@@ -40,14 +38,14 @@ struct ccid2_seq {
#define CCID2_SEQBUF_LEN 1024
#define CCID2_SEQBUF_MAX 128
-/** struct ccid2_hc_tx_sock - CCID2 TX half connection
- *
+/**
+ * struct ccid2_hc_tx_sock - CCID2 TX half connection
* @ccid2hctx_{cwnd,ssthresh,pipe}: as per RFC 4341, section 5
* @ccid2hctx_packets_acked - Ack counter for deriving cwnd growth (RFC 3465)
* @ccid2hctx_lastrtt -time RTT was last measured
* @ccid2hctx_rpseq - last consecutive seqno
* @ccid2hctx_rpdupack - dupacks since rpseq
-*/
+ */
struct ccid2_hc_tx_sock {
u32 ccid2hctx_cwnd;
u32 ccid2hctx_ssthresh;
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index a27b7f4c19c..34dcc798c45 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -1,6 +1,4 @@
/*
- * net/dccp/ccids/ccid3.c
- *
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
@@ -52,7 +50,7 @@ static int ccid3_debug;
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state)
{
- static char *ccid3_state_names[] = {
+ static const char *const ccid3_state_names[] = {
[TFRC_SSTATE_NO_SENT] = "NO_SENT",
[TFRC_SSTATE_NO_FBACK] = "NO_FBACK",
[TFRC_SSTATE_FBACK] = "FBACK",
@@ -646,7 +644,7 @@ enum ccid3_fback_type {
#ifdef CONFIG_IP_DCCP_CCID3_DEBUG
static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state)
{
- static char *ccid3_rx_state_names[] = {
+ static const char *const ccid3_rx_state_names[] = {
[TFRC_RSTATE_NO_DATA] = "NO_DATA",
[TFRC_RSTATE_DATA] = "DATA",
[TFRC_RSTATE_TERM] = "TERM",
@@ -750,7 +748,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
return 0;
}
-/** ccid3_first_li - Implements [RFC 3448, 6.3.1]
+/**
+ * ccid3_first_li - Implements [RFC 5348, 6.3.1]
*
* Determine the length of the first loss interval via inverse lookup.
* Assume that X_recv can be computed by the throughput equation
diff --git a/net/dccp/ccids/ccid3.h b/net/dccp/ccids/ccid3.h
index 49ca32bd7e7..e5a24414384 100644
--- a/net/dccp/ccids/ccid3.h
+++ b/net/dccp/ccids/ccid3.h
@@ -1,6 +1,4 @@
/*
- * net/dccp/ccids/ccid3.h
- *
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
*
@@ -75,8 +73,8 @@ enum ccid3_hc_tx_states {
TFRC_SSTATE_TERM,
};
-/** struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
- *
+/**
+ * struct ccid3_hc_tx_sock - CCID3 sender half-connection socket
* @ccid3hctx_x - Current sending rate in 64 * bytes per second
* @ccid3hctx_x_recv - Receive rate in 64 * bytes per second
* @ccid3hctx_x_calc - Calculated rate in bytes per second
@@ -119,9 +117,9 @@ struct ccid3_hc_tx_sock {
static inline struct ccid3_hc_tx_sock *ccid3_hc_tx_sk(const struct sock *sk)
{
- struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
- BUG_ON(hctx == NULL);
- return hctx;
+ struct ccid3_hc_tx_sock *hctx = ccid_priv(dccp_sk(sk)->dccps_hc_tx_ccid);
+ BUG_ON(hctx == NULL);
+ return hctx;
}
/* TFRC receiver states */
@@ -131,22 +129,22 @@ enum ccid3_hc_rx_states {
TFRC_RSTATE_TERM = 127,
};
-/** struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
- *
- * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3)
- * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard)
- * @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4)
- * @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1)
- * @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states
- * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes
- * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3)
- * @ccid3hcrx_rtt - Receiver estimate of RTT
- * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent
- * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent
- * @ccid3hcrx_hist - Packet history (loss detection + RTT sampling)
- * @ccid3hcrx_li_hist - Loss Interval database
- * @ccid3hcrx_s - Received packet size in bytes
- * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5)
+/**
+ * struct ccid3_hc_rx_sock - CCID3 receiver half-connection socket
+ * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448 4.3)
+ * @ccid3hcrx_rtt - Receiver estimate of rtt (non-standard)
+ * @ccid3hcrx_p - Current loss event rate (RFC 3448 5.4)
+ * @ccid3hcrx_last_counter - Tracks window counter (RFC 4342, 8.1)
+ * @ccid3hcrx_state - Receiver state, one of %ccid3_hc_rx_states
+ * @ccid3hcrx_bytes_recv - Total sum of DCCP payload bytes
+ * @ccid3hcrx_x_recv - Receiver estimate of send rate (RFC 3448, sec. 4.3)
+ * @ccid3hcrx_rtt - Receiver estimate of RTT
+ * @ccid3hcrx_tstamp_last_feedback - Time at which last feedback was sent
+ * @ccid3hcrx_tstamp_last_ack - Time at which last feedback was sent
+ * @ccid3hcrx_hist - Packet history (loss detection + RTT sampling)
+ * @ccid3hcrx_li_hist - Loss Interval database
+ * @ccid3hcrx_s - Received packet size in bytes
+ * @ccid3hcrx_pinv - Inverse of Loss Event Rate (RFC 4342, sec. 8.5)
*/
struct ccid3_hc_rx_sock {
u8 ccid3hcrx_last_counter:4;
@@ -163,9 +161,9 @@ struct ccid3_hc_rx_sock {
static inline struct ccid3_hc_rx_sock *ccid3_hc_rx_sk(const struct sock *sk)
{
- struct ccid3_hc_rx_sock *hcrx = ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
- BUG_ON(hcrx == NULL);
- return hcrx;
+ struct ccid3_hc_rx_sock *hcrx = ccid_priv(dccp_sk(sk)->dccps_hc_rx_ccid);
+ BUG_ON(hcrx == NULL);
+ return hcrx;
}
#endif /* _DCCP_CCID3_H_ */
diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c
index 4d1e4012726..8fc3cbf7907 100644
--- a/net/dccp/ccids/lib/loss_interval.c
+++ b/net/dccp/ccids/lib/loss_interval.c
@@ -1,6 +1,4 @@
/*
- * net/dccp/ccids/lib/loss_interval.c
- *
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
@@ -21,7 +19,7 @@ static const int tfrc_lh_weights[NINTERVAL] = { 10, 10, 10, 10, 8, 6, 4, 2 };
/* implements LIFO semantics on the array */
static inline u8 LIH_INDEX(const u8 ctr)
{
- return (LIH_SIZE - 1 - (ctr % LIH_SIZE));
+ return LIH_SIZE - 1 - (ctr % LIH_SIZE);
}
/* the `counter' index always points at the next entry to be populated */
@@ -129,7 +127,8 @@ static inline u8 tfrc_lh_is_new_loss(struct tfrc_loss_interval *cur,
(cur->li_is_closed || SUB16(new_loss->tfrchrx_ccval, cur->li_ccval) > 4);
}
-/** tfrc_lh_interval_add - Insert new record into the Loss Interval database
+/**
+ * tfrc_lh_interval_add - Insert new record into the Loss Interval database
* @lh: Loss Interval database
* @rh: Receive history containing a fresh loss event
* @calc_first_li: Caller-dependent routine to compute length of first interval
diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h
index 246018a3b26..d1d2f5383b7 100644
--- a/net/dccp/ccids/lib/loss_interval.h
+++ b/net/dccp/ccids/lib/loss_interval.h
@@ -1,8 +1,6 @@
#ifndef _DCCP_LI_HIST_
#define _DCCP_LI_HIST_
/*
- * net/dccp/ccids/lib/loss_interval.h
- *
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz>
diff --git a/net/dccp/ccids/lib/packet_history.c b/net/dccp/ccids/lib/packet_history.c
index b7785b3581e..3a4f414e94a 100644
--- a/net/dccp/ccids/lib/packet_history.c
+++ b/net/dccp/ccids/lib/packet_history.c
@@ -1,6 +1,4 @@
/*
- * net/dccp/packet_history.c
- *
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
*
@@ -128,7 +126,7 @@ u32 tfrc_tx_hist_rtt(struct tfrc_tx_hist_entry *head, const u64 seqno,
/*
- * Receiver History Routines
+ * Receiver History Routines
*/
static struct kmem_cache *tfrc_rx_hist_slab;
diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h
index 461cc91cce8..7df6c529999 100644
--- a/net/dccp/ccids/lib/packet_history.h
+++ b/net/dccp/ccids/lib/packet_history.h
@@ -70,7 +70,6 @@ struct tfrc_rx_hist_entry {
/**
* tfrc_rx_hist - RX history structure for TFRC-based protocols
- *
* @ring: Packet history for RTT sampling and loss detection
* @loss_count: Number of entries in circular history
* @loss_start: Movable index (for loss detection)
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index e9720b14327..01bb48e96c2 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -1,8 +1,6 @@
#ifndef _TFRC_H_
#define _TFRC_H_
/*
- * net/dccp/ccids/lib/tfrc.h
- *
* Copyright (c) 2007 The University of Aberdeen, Scotland, UK
* Copyright (c) 2005-6 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005-6 Ian McDonald <ian.mcdonald@jandi.co.nz>
@@ -32,7 +30,7 @@ extern int tfrc_debug;
/* integer-arithmetic divisions of type (a * 1000000)/b */
static inline u64 scaled_div(u64 a, u64 b)
{
- BUG_ON(b==0);
+ BUG_ON(b == 0);
return div64_u64(a * 1000000, b);
}
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c
index c5d3a9e5a5a..22ca1cf0eb5 100644
--- a/net/dccp/ccids/lib/tfrc_equation.c
+++ b/net/dccp/ccids/lib/tfrc_equation.c
@@ -1,6 +1,4 @@
/*
- * net/dccp/ccids/lib/tfrc_equation.c
- *
* Copyright (c) 2005 The University of Waikato, Hamilton, New Zealand.
* Copyright (c) 2005 Ian McDonald <ian.mcdonald@jandi.co.nz>
* Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
@@ -79,10 +77,10 @@
}
With the given configuration, we have, with M = TFRC_CALC_X_ARRSIZE-1,
- lookup[0][0] = g(1000000/(M+1)) = 1000000 * f(0.2%)
- lookup[M][0] = g(1000000) = 1000000 * f(100%)
- lookup[0][1] = g(TFRC_SMALLEST_P) = 1000000 * f(0.01%)
- lookup[M][1] = g(TFRC_CALC_X_SPLIT) = 1000000 * f(5%)
+ lookup[0][0] = g(1000000/(M+1)) = 1000000 * f(0.2%)
+ lookup[M][0] = g(1000000) = 1000000 * f(100%)
+ lookup[0][1] = g(TFRC_SMALLEST_P) = 1000000 * f(0.01%)
+ lookup[M][1] = g(TFRC_CALC_X_SPLIT) = 1000000 * f(5%)
In summary, the two columns represent f(p) for the following ranges:
* The first column is for 0.002 <= p <= 1.0
@@ -610,11 +608,10 @@ static inline u32 tfrc_binsearch(u32 fval, u8 small)
/**
* tfrc_calc_x - Calculate the send rate as per section 3.1 of RFC3448
- *
- * @s: packet size in bytes
- * @R: RTT scaled by 1000000 (i.e., microseconds)
- * @p: loss ratio estimate scaled by 1000000
- * Returns X_calc in bytes per second (not scaled).
+ * @s: packet size in bytes
+ * @R: RTT scaled by 1000000 (i.e., microseconds)
+ * @p: loss ratio estimate scaled by 1000000
+ * Returns X_calc in bytes per second (not scaled).
*/
u32 tfrc_calc_x(u16 s, u32 R, u32 p)
{
@@ -630,17 +627,17 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
return ~0U;
}
- if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */
+ if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */
if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */
DCCP_WARN("Value of p (%d) below resolution. "
"Substituting %d\n", p, TFRC_SMALLEST_P);
index = 0;
- } else /* 0.0001 <= p <= 0.05 */
+ } else /* 0.0001 <= p <= 0.05 */
index = p/TFRC_SMALLEST_P - 1;
f = tfrc_calc_x_lookup[index][1];
- } else { /* 0.05 < p <= 1.00 */
+ } else { /* 0.05 < p <= 1.00 */
index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1;
f = tfrc_calc_x_lookup[index][0];
@@ -661,7 +658,6 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p)
/**
* tfrc_calc_x_reverse_lookup - try to find p given f(p)
- *
* @fvalue: function value to match, scaled by 1000000
* Returns closest match for p, also scaled by 1000000
*/
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index b04160a2eea..972b8dc918d 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -213,7 +213,7 @@ static int dccp_feat_default_value(u8 feat_num)
*/
static const char *dccp_feat_fname(const u8 feat)
{
- static const char *feature_names[] = {
+ static const char *const feature_names[] = {
[DCCPF_RESERVED] = "Reserved",
[DCCPF_CCID] = "CCID",
[DCCPF_SHORT_SEQNOS] = "Allow Short Seqnos",
@@ -236,8 +236,9 @@ static const char *dccp_feat_fname(const u8 feat)
return feature_names[feat];
}
-static const char *dccp_feat_sname[] = { "DEFAULT", "INITIALISING", "CHANGING",
- "UNSTABLE", "STABLE" };
+static const char *const dccp_feat_sname[] = {
+ "DEFAULT", "INITIALISING", "CHANGING", "UNSTABLE", "STABLE",
+};
#ifdef CONFIG_IP_DCCP_DEBUG
static const char *dccp_feat_oname(const u8 opt)
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index a0a36c9e6cc..7302e1498d4 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -880,7 +880,7 @@ discard_and_relse:
goto discard_it;
}
-static struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
+static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
@@ -948,7 +948,7 @@ static struct proto dccp_v4_prot = {
#endif
};
-static struct net_protocol dccp_v4_protocol = {
+static const struct net_protocol dccp_v4_protocol = {
.handler = dccp_v4_rcv,
.err_handler = dccp_v4_err,
.no_policy = 1,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3e70faab298..e48ca5d4565 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -35,8 +35,8 @@
/* The per-net dccp.v6_ctl_sk is used for sending RSTs and ACKs */
-static struct inet_connection_sock_af_ops dccp_ipv6_mapped;
-static struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
+static const struct inet_connection_sock_af_ops dccp_ipv6_mapped;
+static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops;
static void dccp_v6_hash(struct sock *sk)
{
@@ -1055,7 +1055,7 @@ failure:
return err;
}
-static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
+static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
.queue_xmit = inet6_csk_xmit,
.send_check = dccp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
@@ -1076,7 +1076,7 @@ static struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
/*
* DCCP over IPv4 via INET6 API
*/
-static struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
+static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = dccp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
@@ -1152,13 +1152,13 @@ static struct proto dccp_v6_prot = {
#endif
};
-static struct inet6_protocol dccp_v6_protocol = {
+static const struct inet6_protocol dccp_v6_protocol = {
.handler = dccp_v6_rcv,
.err_handler = dccp_v6_err,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
};
-static struct proto_ops inet6_dccp_ops = {
+static const struct proto_ops inet6_dccp_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 3281013ce03..bc4467082a0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -124,7 +124,7 @@ EXPORT_SYMBOL_GPL(dccp_done);
const char *dccp_packet_name(const int type)
{
- static const char *dccp_packet_names[] = {
+ static const char *const dccp_packet_names[] = {
[DCCP_PKT_REQUEST] = "REQUEST",
[DCCP_PKT_RESPONSE] = "RESPONSE",
[DCCP_PKT_DATA] = "DATA",
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(dccp_packet_name);
const char *dccp_state_name(const int state)
{
- static char *dccp_state_names[] = {
+ static const char *const dccp_state_names[] = {
[DCCP_OPEN] = "OPEN",
[DCCP_REQUESTING] = "REQUESTING",
[DCCP_PARTOPEN] = "PARTOPEN",
@@ -1049,10 +1049,10 @@ static int __init dccp_init(void)
*
* The methodology is similar to that of the buffer cache.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (21 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (21 - PAGE_SHIFT);
else
- goal = num_physpages >> (23 - PAGE_SHIFT);
+ goal = totalram_pages >> (23 - PAGE_SHIFT);
if (thash_entries)
goal = (thash_entries *
@@ -1159,6 +1159,7 @@ static void __exit dccp_fini(void)
kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
dccp_ackvec_exit();
dccp_sysctl_exit();
+ percpu_counter_destroy(&dccp_orphan_count);
}
module_init(dccp_init);
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index 923786bd6d0..794b5bf95af 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -59,7 +59,7 @@ static int dn_phase3_output(struct sk_buff *);
/*
* For talking to broadcast devices: Ethernet & PPP
*/
-static struct neigh_ops dn_long_ops = {
+static const struct neigh_ops dn_long_ops = {
.family = AF_DECnet,
.error_report = dn_long_error_report,
.output = dn_long_output,
@@ -71,7 +71,7 @@ static struct neigh_ops dn_long_ops = {
/*
* For talking to pointopoint and multidrop devices: DDCMP and X.25
*/
-static struct neigh_ops dn_short_ops = {
+static const struct neigh_ops dn_short_ops = {
.family = AF_DECnet,
.error_report = dn_short_error_report,
.output = dn_short_output,
@@ -83,7 +83,7 @@ static struct neigh_ops dn_short_ops = {
/*
* For talking to DECnet phase III nodes
*/
-static struct neigh_ops dn_phase3_ops = {
+static const struct neigh_ops dn_phase3_ops = {
.family = AF_DECnet,
.error_report = dn_short_error_report, /* Can use short version here */
.output = dn_phase3_output,
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 1d6ca8a98dc..57662cabaf9 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -774,7 +774,7 @@ static int dn_rt_bug(struct sk_buff *skb)
kfree_skb(skb);
- return NET_RX_BAD;
+ return NET_RX_DROP;
}
static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
@@ -1750,7 +1750,7 @@ void __init dn_route_init(void)
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
- goal = num_physpages >> (26 - PAGE_SHIFT);
+ goal = totalram_pages >> (26 - PAGE_SHIFT);
for(order = 0; (1UL << order) < goal; order++)
/* NOTHING */;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 41055f33d28..4b0ea054044 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -169,13 +169,13 @@ struct net_device *dsa_slave_create(struct dsa_switch *ds,
int port, char *name);
/* tag_dsa.c */
-int dsa_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev);
/* tag_edsa.c */
-int edsa_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev);
/* tag_trailer.c */
-int trailer_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev);
#endif
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 8fa25bafe6c..cdf2d28a029 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -15,7 +15,7 @@
#define DSA_HLEN 4
-int dsa_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t dsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *dsa_header;
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 815607bd286..8f53948cff4 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -16,7 +16,7 @@
#define DSA_HLEN 4
#define EDSA_HLEN 8
-int edsa_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t edsa_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
u8 *edsa_header;
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 1c3e30c38b8..a85c829853c 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -13,7 +13,7 @@
#include <linux/netdevice.h>
#include "dsa_priv.h"
-int trailer_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t trailer_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
struct sk_buff *nskb;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 2e1f836d424..0e0254fd767 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
if (peer)
return -EOPNOTSUPP;
+ memset(sec, 0, sizeof(*sec));
mutex_lock(&econet_mutex);
sk = sock->sk;
@@ -1072,7 +1073,7 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, sizeof(struct ec_framehdr));
netif_rx(skb);
- return 0;
+ return NET_RX_SUCCESS;
}
sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
@@ -1083,7 +1084,7 @@ static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
hdr->port))
goto drop;
- return 0;
+ return NET_RX_SUCCESS;
drop:
kfree_skb(skb);
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index f99338a2610..4068a9f5113 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o
+obj-$(CONFIG_IEEE802154) += nl802154.o af_802154.o wpan-class.o
nl802154-y := netlink.o nl_policy.o
af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 3bb6bdb1dac..cd949d5e451 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -34,8 +34,8 @@
#include <net/tcp_states.h>
#include <net/route.h>
-#include <net/ieee802154/af_ieee802154.h>
-#include <net/ieee802154/netdevice.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154_netdev.h>
#include "af802154.h"
@@ -136,7 +136,7 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
unsigned int cmd)
{
struct ifreq ifr;
- int ret = -EINVAL;
+ int ret = -ENOIOCTLCMD;
struct net_device *dev;
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
@@ -146,8 +146,8 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
dev_load(sock_net(sk), ifr.ifr_name);
dev = dev_get_by_name(sock_net(sk), ifr.ifr_name);
- if (dev->type == ARPHRD_IEEE802154 ||
- dev->type == ARPHRD_IEEE802154_PHY)
+
+ if (dev->type == ARPHRD_IEEE802154 && dev->netdev_ops->ndo_do_ioctl)
ret = dev->netdev_ops->ndo_do_ioctl(dev, &ifr, cmd);
if (!ret && copy_to_user(arg, &ifr, sizeof(struct ifreq)))
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 14d39840dd6..51593a48f2d 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -26,9 +26,9 @@
#include <linux/if_arp.h>
#include <linux/list.h>
#include <net/sock.h>
-#include <net/ieee802154/af_ieee802154.h>
-#include <net/ieee802154/mac_def.h>
-#include <net/ieee802154/netdevice.h>
+#include <net/af_ieee802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
#include <asm/ioctls.h>
@@ -40,9 +40,11 @@ static DEFINE_RWLOCK(dgram_lock);
struct dgram_sock {
struct sock sk;
- int bound;
struct ieee802154_addr src_addr;
struct ieee802154_addr dst_addr;
+
+ unsigned bound:1;
+ unsigned want_ack:1;
};
static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -50,7 +52,6 @@ static inline struct dgram_sock *dgram_sk(const struct sock *sk)
return container_of(sk, struct dgram_sock, sk);
}
-
static void dgram_hash(struct sock *sk)
{
write_lock_bh(&dgram_lock);
@@ -73,6 +74,7 @@ static int dgram_init(struct sock *sk)
ro->dst_addr.addr_type = IEEE802154_ADDR_LONG;
ro->dst_addr.pan_id = 0xffff;
+ ro->want_ack = 1;
memset(&ro->dst_addr.hwaddr, 0xff, sizeof(ro->dst_addr.hwaddr));
return 0;
}
@@ -86,18 +88,18 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
{
struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
struct dgram_sock *ro = dgram_sk(sk);
- int err = 0;
+ int err = -EINVAL;
struct net_device *dev;
+ lock_sock(sk);
+
ro->bound = 0;
if (len < sizeof(*addr))
- return -EINVAL;
+ goto out;
if (addr->family != AF_IEEE802154)
- return -EINVAL;
-
- lock_sock(sk);
+ goto out;
dev = ieee802154_get_dev(sock_net(sk), &addr->addr);
if (!dev) {
@@ -113,6 +115,7 @@ static int dgram_bind(struct sock *sk, struct sockaddr *uaddr, int len)
memcpy(&ro->src_addr, &addr->addr, sizeof(struct ieee802154_addr));
ro->bound = 1;
+ err = 0;
out_put:
dev_put(dev);
out:
@@ -235,7 +238,10 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
skb_reset_network_header(skb);
- mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA | MAC_CB_FLAG_ACKREQ;
+ mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
+ if (ro->want_ack)
+ mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
+
mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &ro->dst_addr,
ro->bound ? &ro->src_addr : NULL, size);
@@ -377,6 +383,64 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
return ret;
}
+static int dgram_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct dgram_sock *ro = dgram_sk(sk);
+
+ int val, len;
+
+ if (level != SOL_IEEE802154)
+ return -EOPNOTSUPP;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ len = min_t(unsigned int, len, sizeof(int));
+
+ switch (optname) {
+ case WPAN_WANTACK:
+ val = ro->want_ack;
+ break;
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+ return 0;
+}
+
+static int dgram_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct dgram_sock *ro = dgram_sk(sk);
+ int val;
+ int err = 0;
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ lock_sock(sk);
+
+ switch (optname) {
+ case WPAN_WANTACK:
+ ro->want_ack = !!val;
+ break;
+ default:
+ err = -ENOPROTOOPT;
+ break;
+ }
+
+ release_sock(sk);
+ return err;
+}
+
struct proto ieee802154_dgram_prot = {
.name = "IEEE-802.15.4-MAC",
.owner = THIS_MODULE,
@@ -391,5 +455,7 @@ struct proto ieee802154_dgram_prot = {
.connect = dgram_connect,
.disconnect = dgram_disconnect,
.ioctl = dgram_ioctl,
+ .getsockopt = dgram_getsockopt,
+ .setsockopt = dgram_setsockopt,
};
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 27eda9fdf3c..ca767bde17a 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -19,6 +19,7 @@
* Written by:
* Sergey Lapin <slapin@ossfans.org>
* Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Maxim Osipov <maxim.osipov@siemens.com>
*/
#include <linux/kernel.h>
@@ -26,12 +27,15 @@
#include <linux/netdevice.h>
#include <net/netlink.h>
#include <net/genetlink.h>
+#include <net/sock.h>
#include <linux/nl802154.h>
-#include <net/ieee802154/af_ieee802154.h>
-#include <net/ieee802154/nl802154.h>
-#include <net/ieee802154/netdevice.h>
+#include <net/af_ieee802154.h>
+#include <net/nl802154.h>
+#include <net/ieee802154.h>
+#include <net/ieee802154_netdev.h>
static unsigned int ieee802154_seq_num;
+static DEFINE_SPINLOCK(ieee802154_seq_lock);
static struct genl_family ieee802154_coordinator_family = {
.id = GENL_ID_GENERATE,
@@ -54,12 +58,15 @@ static struct sk_buff *ieee802154_nl_create(int flags, u8 req)
{
void *hdr;
struct sk_buff *msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ unsigned long f;
if (!msg)
return NULL;
+ spin_lock_irqsave(&ieee802154_seq_lock, f);
hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
&ieee802154_coordinator_family, flags, req);
+ spin_unlock_irqrestore(&ieee802154_seq_lock, f);
if (!hdr) {
nlmsg_free(msg);
return NULL;
@@ -73,7 +80,7 @@ static int ieee802154_nl_finish(struct sk_buff *msg)
/* XXX: nlh is right at the start of msg */
void *hdr = genlmsg_data(NLMSG_DATA(msg->data));
- if (!genlmsg_end(msg, hdr))
+ if (genlmsg_end(msg, hdr) < 0)
goto out;
return genlmsg_multicast(msg, 0, ieee802154_coord_mcgrp.id,
@@ -229,7 +236,7 @@ nla_put_failure:
EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
int ieee802154_nl_scan_confirm(struct net_device *dev,
- u8 status, u8 scan_type, u32 unscanned,
+ u8 status, u8 scan_type, u32 unscanned, u8 page,
u8 *edl/* , struct list_head *pan_desc_list */)
{
struct sk_buff *msg;
@@ -248,6 +255,7 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type);
NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned);
+ NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page);
if (edl)
NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
@@ -260,6 +268,60 @@ nla_put_failure:
}
EXPORT_SYMBOL(ieee802154_nl_scan_confirm);
+int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
+{
+ struct sk_buff *msg;
+
+ pr_debug("%s\n", __func__);
+
+ msg = ieee802154_nl_create(0, IEEE802154_START_CONF);
+ if (!msg)
+ return -ENOBUFS;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+
+ NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status);
+
+ return ieee802154_nl_finish(msg);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(ieee802154_nl_start_confirm);
+
+static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
+ u32 seq, int flags, struct net_device *dev)
+{
+ void *hdr;
+
+ pr_debug("%s\n", __func__);
+
+ hdr = genlmsg_put(msg, 0, seq, &ieee802154_coordinator_family, flags,
+ IEEE802154_LIST_IFACE);
+ if (!hdr)
+ goto out;
+
+ NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name);
+ NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex);
+
+ NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
+ dev->dev_addr);
+ NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR,
+ ieee802154_mlme_ops(dev)->get_short_addr(dev));
+ NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID,
+ ieee802154_mlme_ops(dev)->get_pan_id(dev));
+ return genlmsg_end(msg, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+out:
+ return -EMSGSIZE;
+}
+
/* Requests from userspace */
static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
{
@@ -272,7 +334,7 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
dev = dev_get_by_name(&init_net, name);
} else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
dev = dev_get_by_index(&init_net,
- nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
+ nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
else
return NULL;
@@ -292,6 +354,7 @@ static int ieee802154_associate_req(struct sk_buff *skb,
{
struct net_device *dev;
struct ieee802154_addr addr;
+ u8 page;
int ret = -EINVAL;
if (!info->attrs[IEEE802154_ATTR_CHANNEL] ||
@@ -317,8 +380,14 @@ static int ieee802154_associate_req(struct sk_buff *skb,
}
addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]);
+ if (info->attrs[IEEE802154_ATTR_PAGE])
+ page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
+ else
+ page = 0;
+
ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr,
nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]),
+ page,
nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY]));
dev_put(dev);
@@ -401,6 +470,7 @@ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
struct ieee802154_addr addr;
u8 channel, bcn_ord, sf_ord;
+ u8 page;
int pan_coord, blx, coord_realign;
int ret;
@@ -431,7 +501,19 @@ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info)
blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]);
coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]);
- ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel,
+ if (info->attrs[IEEE802154_ATTR_PAGE])
+ page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
+ else
+ page = 0;
+
+
+ if (addr.short_addr == IEEE802154_ADDR_BROADCAST) {
+ ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS);
+ dev_put(dev);
+ return -EINVAL;
+ }
+
+ ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page,
bcn_ord, sf_ord, pan_coord, blx, coord_realign);
dev_put(dev);
@@ -445,6 +527,7 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
u8 type;
u32 channels;
u8 duration;
+ u8 page;
if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] ||
!info->attrs[IEEE802154_ATTR_CHANNELS] ||
@@ -459,13 +542,80 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]);
duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]);
- ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
+ if (info->attrs[IEEE802154_ATTR_PAGE])
+ page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]);
+ else
+ page = 0;
+
+
+ ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
duration);
dev_put(dev);
return ret;
}
+static int ieee802154_list_iface(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ /* Request for interface name, index, type, IEEE address,
+ PAN Id, short address */
+ struct sk_buff *msg;
+ struct net_device *dev = NULL;
+ int rc = -ENOBUFS;
+
+ pr_debug("%s\n", __func__);
+
+ dev = ieee802154_nl_get_dev(info);
+ if (!dev)
+ return -ENODEV;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ goto out_dev;
+
+ rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq,
+ 0, dev);
+ if (rc < 0)
+ goto out_free;
+
+ dev_put(dev);
+
+ return genlmsg_unicast(&init_net, msg, info->snd_pid);
+out_free:
+ nlmsg_free(msg);
+out_dev:
+ dev_put(dev);
+ return rc;
+
+}
+
+static int ieee802154_dump_iface(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int idx;
+ int s_idx = cb->args[0];
+
+ pr_debug("%s\n", __func__);
+
+ idx = 0;
+ for_each_netdev(net, dev) {
+ if (idx < s_idx || (dev->type != ARPHRD_IEEE802154))
+ goto cont;
+
+ if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
+ break;
+cont:
+ idx++;
+ }
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
#define IEEE802154_OP(_cmd, _func) \
{ \
.cmd = _cmd, \
@@ -475,12 +625,22 @@ static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
.flags = GENL_ADMIN_PERM, \
}
+#define IEEE802154_DUMP(_cmd, _func, _dump) \
+ { \
+ .cmd = _cmd, \
+ .policy = ieee802154_policy, \
+ .doit = _func, \
+ .dumpit = _dump, \
+ }
+
static struct genl_ops ieee802154_coordinator_ops[] = {
IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req),
IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req),
IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req),
+ IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface,
+ ieee802154_dump_iface),
};
static int __init ieee802154_nl_init(void)
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index c7d71d1adca..2363ebee02e 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -24,7 +24,7 @@
#define NLA_HW_ADDR NLA_U64
-struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
+const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_DEV_NAME] = { .type = NLA_STRING, },
[IEEE802154_ATTR_DEV_INDEX] = { .type = NLA_U32, },
@@ -33,6 +33,7 @@ struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
[IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
[IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
[IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
[IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
@@ -50,3 +51,4 @@ struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
[IEEE802154_ATTR_ED_LIST] = { .len = 27 },
};
+
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index fca44d59f97..13198859982 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -26,7 +26,7 @@
#include <linux/if_arp.h>
#include <linux/list.h>
#include <net/sock.h>
-#include <net/ieee802154/af_ieee802154.h>
+#include <net/af_ieee802154.h>
#include "af802154.h"
@@ -74,8 +74,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int len)
goto out;
}
- if (dev->type != ARPHRD_IEEE802154_PHY &&
- dev->type != ARPHRD_IEEE802154) {
+ if (dev->type != ARPHRD_IEEE802154) {
err = -ENODEV;
goto out_put;
}
@@ -238,6 +237,18 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
read_unlock(&raw_lock);
}
+static int raw_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+static int raw_setsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int optlen)
+{
+ return -EOPNOTSUPP;
+}
+
struct proto ieee802154_raw_prot = {
.name = "IEEE-802.15.4-RAW",
.owner = THIS_MODULE,
@@ -250,5 +261,7 @@ struct proto ieee802154_raw_prot = {
.unhash = raw_unhash,
.connect = raw_connect,
.disconnect = raw_disconnect,
+ .getsockopt = raw_getsockopt,
+ .setsockopt = raw_setsockopt,
};
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
new file mode 100644
index 00000000000..f306604da67
--- /dev/null
+++ b/net/ieee802154/wpan-class.c
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2007, 2008, 2009 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+
+#include <net/wpan-phy.h>
+
+#define MASTER_SHOW_COMPLEX(name, format_string, args...) \
+static ssize_t name ## _show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev); \
+ int ret; \
+ \
+ mutex_lock(&phy->pib_lock); \
+ ret = sprintf(buf, format_string "\n", args); \
+ mutex_unlock(&phy->pib_lock); \
+ return ret; \
+}
+
+#define MASTER_SHOW(field, format_string) \
+ MASTER_SHOW_COMPLEX(field, format_string, phy->field)
+
+MASTER_SHOW(current_channel, "%d");
+MASTER_SHOW(current_page, "%d");
+MASTER_SHOW(channels_supported, "%#x");
+MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
+ ((signed char) (phy->transmit_power << 2)) >> 2,
+ (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1 );
+MASTER_SHOW(cca_mode, "%d");
+
+static struct device_attribute pmib_attrs[] = {
+ __ATTR_RO(current_channel),
+ __ATTR_RO(current_page),
+ __ATTR_RO(channels_supported),
+ __ATTR_RO(transmit_power),
+ __ATTR_RO(cca_mode),
+ {},
+};
+
+static void wpan_phy_release(struct device *d)
+{
+ struct wpan_phy *phy = container_of(d, struct wpan_phy, dev);
+ kfree(phy);
+}
+
+static struct class wpan_phy_class = {
+ .name = "ieee802154",
+ .dev_release = wpan_phy_release,
+ .dev_attrs = pmib_attrs,
+};
+
+static DEFINE_MUTEX(wpan_phy_mutex);
+static int wpan_phy_idx;
+
+static int wpan_phy_match(struct device *dev, void *data)
+{
+ return !strcmp(dev_name(dev), (const char *)data);
+}
+
+struct wpan_phy *wpan_phy_find(const char *str)
+{
+ struct device *dev;
+
+ if (WARN_ON(!str))
+ return NULL;
+
+ dev = class_find_device(&wpan_phy_class, NULL,
+ (void *)str, wpan_phy_match);
+ if (!dev)
+ return NULL;
+
+ return container_of(dev, struct wpan_phy, dev);
+}
+EXPORT_SYMBOL(wpan_phy_find);
+
+static int wpan_phy_idx_valid(int idx)
+{
+ return idx >= 0;
+}
+
+struct wpan_phy *wpan_phy_alloc(size_t priv_size)
+{
+ struct wpan_phy *phy = kzalloc(sizeof(*phy) + priv_size,
+ GFP_KERNEL);
+
+ mutex_lock(&wpan_phy_mutex);
+ phy->idx = wpan_phy_idx++;
+ if (unlikely(!wpan_phy_idx_valid(phy->idx))) {
+ wpan_phy_idx--;
+ mutex_unlock(&wpan_phy_mutex);
+ kfree(phy);
+ return NULL;
+ }
+ mutex_unlock(&wpan_phy_mutex);
+
+ mutex_init(&phy->pib_lock);
+
+ device_initialize(&phy->dev);
+ dev_set_name(&phy->dev, "wpan-phy%d", phy->idx);
+
+ phy->dev.class = &wpan_phy_class;
+
+ return phy;
+}
+EXPORT_SYMBOL(wpan_phy_alloc);
+
+int wpan_phy_register(struct device *parent, struct wpan_phy *phy)
+{
+ phy->dev.parent = parent;
+
+ return device_add(&phy->dev);
+}
+EXPORT_SYMBOL(wpan_phy_register);
+
+void wpan_phy_unregister(struct wpan_phy *phy)
+{
+ device_del(&phy->dev);
+}
+EXPORT_SYMBOL(wpan_phy_unregister);
+
+void wpan_phy_free(struct wpan_phy *phy)
+{
+ put_device(&phy->dev);
+}
+EXPORT_SYMBOL(wpan_phy_free);
+
+static int __init wpan_phy_class_init(void)
+{
+ return class_register(&wpan_phy_class);
+}
+subsys_initcall(wpan_phy_class_init);
+
+static void __exit wpan_phy_class_exit(void)
+{
+ class_unregister(&wpan_phy_class);
+}
+module_exit(wpan_phy_class_exit);
+
+MODULE_DESCRIPTION("IEEE 802.15.4 device class");
+MODULE_LICENSE("GPL v2");
+
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 566ea6c4321..58c4b0f7c4a 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -124,7 +124,6 @@ static struct list_head inetsw[SOCK_MAX];
static DEFINE_SPINLOCK(inetsw_lock);
struct ipv4_config ipv4_config;
-
EXPORT_SYMBOL(ipv4_config);
/* New destruction routine */
@@ -139,12 +138,12 @@ void inet_sock_destruct(struct sock *sk)
sk_mem_reclaim(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
- printk("Attempt to release TCP socket in state %d %p\n",
+ pr_err("Attempt to release TCP socket in state %d %p\n",
sk->sk_state, sk);
return;
}
if (!sock_flag(sk, SOCK_DEAD)) {
- printk("Attempt to release alive inet socket %p\n", sk);
+ pr_err("Attempt to release alive inet socket %p\n", sk);
return;
}
@@ -157,6 +156,7 @@ void inet_sock_destruct(struct sock *sk)
dst_release(sk->sk_dst_cache);
sk_refcnt_debug_dec(sk);
}
+EXPORT_SYMBOL(inet_sock_destruct);
/*
* The routines beyond this point handle the behaviour of an AF_INET
@@ -219,6 +219,7 @@ out:
release_sock(sk);
return err;
}
+EXPORT_SYMBOL(inet_listen);
u32 inet_ehash_secret __read_mostly;
EXPORT_SYMBOL(inet_ehash_secret);
@@ -243,7 +244,7 @@ EXPORT_SYMBOL(build_ehash_secret);
static inline int inet_netns_ok(struct net *net, int protocol)
{
int hash;
- struct net_protocol *ipprot;
+ const struct net_protocol *ipprot;
if (net_eq(net, &init_net))
return 1;
@@ -435,9 +436,11 @@ int inet_release(struct socket *sock)
}
return 0;
}
+EXPORT_SYMBOL(inet_release);
/* It is off by default, see below. */
int sysctl_ip_nonlocal_bind __read_mostly;
+EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
@@ -519,6 +522,7 @@ out_release_sock:
out:
return err;
}
+EXPORT_SYMBOL(inet_bind);
int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags)
@@ -532,6 +536,7 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
return -EAGAIN;
return sk->sk_prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
}
+EXPORT_SYMBOL(inet_dgram_connect);
static long inet_wait_for_connect(struct sock *sk, long timeo)
{
@@ -641,6 +646,7 @@ sock_error:
sock->state = SS_DISCONNECTING;
goto out;
}
+EXPORT_SYMBOL(inet_stream_connect);
/*
* Accept a pending connection. The TCP layer now gives BSD semantics.
@@ -668,6 +674,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
do_err:
return err;
}
+EXPORT_SYMBOL(inet_accept);
/*
@@ -699,6 +706,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
*uaddr_len = sizeof(*sin);
return 0;
}
+EXPORT_SYMBOL(inet_getname);
int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
size_t size)
@@ -711,9 +719,11 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
return sk->sk_prot->sendmsg(iocb, sk, msg, size);
}
+EXPORT_SYMBOL(inet_sendmsg);
-static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
+static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags)
{
struct sock *sk = sock->sk;
@@ -780,6 +790,7 @@ int inet_shutdown(struct socket *sock, int how)
release_sock(sk);
return err;
}
+EXPORT_SYMBOL(inet_shutdown);
/*
* ioctl() calls you can issue on an INET socket. Most of these are
@@ -798,44 +809,45 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
struct net *net = sock_net(sk);
switch (cmd) {
- case SIOCGSTAMP:
- err = sock_get_timestamp(sk, (struct timeval __user *)arg);
- break;
- case SIOCGSTAMPNS:
- err = sock_get_timestampns(sk, (struct timespec __user *)arg);
- break;
- case SIOCADDRT:
- case SIOCDELRT:
- case SIOCRTMSG:
- err = ip_rt_ioctl(net, cmd, (void __user *)arg);
- break;
- case SIOCDARP:
- case SIOCGARP:
- case SIOCSARP:
- err = arp_ioctl(net, cmd, (void __user *)arg);
- break;
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCSIFBRDADDR:
- case SIOCGIFNETMASK:
- case SIOCSIFNETMASK:
- case SIOCGIFDSTADDR:
- case SIOCSIFDSTADDR:
- case SIOCSIFPFLAGS:
- case SIOCGIFPFLAGS:
- case SIOCSIFFLAGS:
- err = devinet_ioctl(net, cmd, (void __user *)arg);
- break;
- default:
- if (sk->sk_prot->ioctl)
- err = sk->sk_prot->ioctl(sk, cmd, arg);
- else
- err = -ENOIOCTLCMD;
- break;
+ case SIOCGSTAMP:
+ err = sock_get_timestamp(sk, (struct timeval __user *)arg);
+ break;
+ case SIOCGSTAMPNS:
+ err = sock_get_timestampns(sk, (struct timespec __user *)arg);
+ break;
+ case SIOCADDRT:
+ case SIOCDELRT:
+ case SIOCRTMSG:
+ err = ip_rt_ioctl(net, cmd, (void __user *)arg);
+ break;
+ case SIOCDARP:
+ case SIOCGARP:
+ case SIOCSARP:
+ err = arp_ioctl(net, cmd, (void __user *)arg);
+ break;
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFPFLAGS:
+ case SIOCGIFPFLAGS:
+ case SIOCSIFFLAGS:
+ err = devinet_ioctl(net, cmd, (void __user *)arg);
+ break;
+ default:
+ if (sk->sk_prot->ioctl)
+ err = sk->sk_prot->ioctl(sk, cmd, arg);
+ else
+ err = -ENOIOCTLCMD;
+ break;
}
return err;
}
+EXPORT_SYMBOL(inet_ioctl);
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
@@ -862,6 +874,7 @@ const struct proto_ops inet_stream_ops = {
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
+EXPORT_SYMBOL(inet_stream_ops);
const struct proto_ops inet_dgram_ops = {
.family = PF_INET,
@@ -887,6 +900,7 @@ const struct proto_ops inet_dgram_ops = {
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
+EXPORT_SYMBOL(inet_dgram_ops);
/*
* For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
@@ -1016,6 +1030,7 @@ out_illegal:
p->type);
goto out;
}
+EXPORT_SYMBOL(inet_register_protosw);
void inet_unregister_protosw(struct inet_protosw *p)
{
@@ -1031,6 +1046,7 @@ void inet_unregister_protosw(struct inet_protosw *p)
synchronize_net();
}
}
+EXPORT_SYMBOL(inet_unregister_protosw);
/*
* Shall we try to damage output packets if routing dev changes?
@@ -1141,13 +1157,12 @@ int inet_sk_rebuild_header(struct sock *sk)
return err;
}
-
EXPORT_SYMBOL(inet_sk_rebuild_header);
static int inet_gso_send_check(struct sk_buff *skb)
{
struct iphdr *iph;
- struct net_protocol *ops;
+ const struct net_protocol *ops;
int proto;
int ihl;
int err = -EINVAL;
@@ -1183,10 +1198,11 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct iphdr *iph;
- struct net_protocol *ops;
+ const struct net_protocol *ops;
int proto;
int ihl;
int id;
+ unsigned int offset = 0;
if (!(features & NETIF_F_V4_CSUM))
features &= ~NETIF_F_SG;
@@ -1229,7 +1245,14 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
skb = segs;
do {
iph = ip_hdr(skb);
- iph->id = htons(id++);
+ if (proto == IPPROTO_UDP) {
+ iph->id = htons(id);
+ iph->frag_off = htons(offset >> 3);
+ if (skb->next != NULL)
+ iph->frag_off |= htons(IP_MF);
+ offset += (skb->len - skb->mac_len - iph->ihl * 4);
+ } else
+ iph->id = htons(id++);
iph->tot_len = htons(skb->len - skb->mac_len);
iph->check = 0;
iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl);
@@ -1242,7 +1265,7 @@ out:
static struct sk_buff **inet_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
- struct net_protocol *ops;
+ const struct net_protocol *ops;
struct sk_buff **pp = NULL;
struct sk_buff *p;
struct iphdr *iph;
@@ -1319,7 +1342,7 @@ out:
static int inet_gro_complete(struct sk_buff *skb)
{
- struct net_protocol *ops;
+ const struct net_protocol *ops;
struct iphdr *iph = ip_hdr(skb);
int proto = iph->protocol & (MAX_INET_PROTOS - 1);
int err = -ENOSYS;
@@ -1361,7 +1384,6 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
}
return rc;
}
-
EXPORT_SYMBOL_GPL(inet_ctl_sock_create);
unsigned long snmp_fold_field(void *mib[], int offt)
@@ -1405,13 +1427,13 @@ void snmp_mib_free(void *ptr[2])
EXPORT_SYMBOL_GPL(snmp_mib_free);
#ifdef CONFIG_IP_MULTICAST
-static struct net_protocol igmp_protocol = {
+static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
.netns_ok = 1,
};
#endif
-static struct net_protocol tcp_protocol = {
+static const struct net_protocol tcp_protocol = {
.handler = tcp_v4_rcv,
.err_handler = tcp_v4_err,
.gso_send_check = tcp_v4_gso_send_check,
@@ -1422,14 +1444,16 @@ static struct net_protocol tcp_protocol = {
.netns_ok = 1,
};
-static struct net_protocol udp_protocol = {
+static const struct net_protocol udp_protocol = {
.handler = udp_rcv,
.err_handler = udp_err,
+ .gso_send_check = udp4_ufo_send_check,
+ .gso_segment = udp4_ufo_fragment,
.no_policy = 1,
.netns_ok = 1,
};
-static struct net_protocol icmp_protocol = {
+static const struct net_protocol icmp_protocol = {
.handler = icmp_rcv,
.no_policy = 1,
.netns_ok = 1,
@@ -1666,19 +1690,3 @@ static int __init ipv4_proc_init(void)
MODULE_ALIAS_NETPROTO(PF_INET);
-EXPORT_SYMBOL(inet_accept);
-EXPORT_SYMBOL(inet_bind);
-EXPORT_SYMBOL(inet_dgram_connect);
-EXPORT_SYMBOL(inet_dgram_ops);
-EXPORT_SYMBOL(inet_getname);
-EXPORT_SYMBOL(inet_ioctl);
-EXPORT_SYMBOL(inet_listen);
-EXPORT_SYMBOL(inet_register_protosw);
-EXPORT_SYMBOL(inet_release);
-EXPORT_SYMBOL(inet_sendmsg);
-EXPORT_SYMBOL(inet_shutdown);
-EXPORT_SYMBOL(inet_sock_destruct);
-EXPORT_SYMBOL(inet_stream_connect);
-EXPORT_SYMBOL(inet_stream_ops);
-EXPORT_SYMBOL(inet_unregister_protosw);
-EXPORT_SYMBOL(sysctl_ip_nonlocal_bind);
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index e878e494296..5c662703eb1 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -311,7 +311,7 @@ static const struct xfrm_type ah_type =
.output = ah_output
};
-static struct net_protocol ah4_protocol = {
+static const struct net_protocol ah4_protocol = {
.handler = xfrm4_rcv,
.err_handler = ah4_err,
.no_policy = 1,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 090e9991ac2..4e80f336c0c 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -130,7 +130,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb);
static void arp_error_report(struct neighbour *neigh, struct sk_buff *skb);
static void parp_redo(struct sk_buff *skb);
-static struct neigh_ops arp_generic_ops = {
+static const struct neigh_ops arp_generic_ops = {
.family = AF_INET,
.solicit = arp_solicit,
.error_report = arp_error_report,
@@ -140,7 +140,7 @@ static struct neigh_ops arp_generic_ops = {
.queue_xmit = dev_queue_xmit,
};
-static struct neigh_ops arp_hh_ops = {
+static const struct neigh_ops arp_hh_ops = {
.family = AF_INET,
.solicit = arp_solicit,
.error_report = arp_error_report,
@@ -150,7 +150,7 @@ static struct neigh_ops arp_hh_ops = {
.queue_xmit = dev_queue_xmit,
};
-static struct neigh_ops arp_direct_ops = {
+static const struct neigh_ops arp_direct_ops = {
.family = AF_INET,
.output = dev_queue_xmit,
.connected_output = dev_queue_xmit,
@@ -158,7 +158,7 @@ static struct neigh_ops arp_direct_ops = {
.queue_xmit = dev_queue_xmit,
};
-struct neigh_ops arp_broken_ops = {
+const struct neigh_ops arp_broken_ops = {
.family = AF_INET,
.solicit = arp_solicit,
.error_report = arp_error_report,
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 3863c3a4223..07336c6201f 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1087,6 +1087,12 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
case NETDEV_DOWN:
ip_mc_down(in_dev);
break;
+ case NETDEV_BONDING_OLDTYPE:
+ ip_mc_unmap(in_dev);
+ break;
+ case NETDEV_BONDING_NEWTYPE:
+ ip_mc_remap(in_dev);
+ break;
case NETDEV_CHANGEMTU:
if (inetdev_valid_mtu(dev->mtu))
break;
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 18bb383ea39..12f7287e902 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -615,7 +615,7 @@ static const struct xfrm_type esp_type =
.output = esp_output
};
-static struct net_protocol esp4_protocol = {
+static const struct net_protocol esp4_protocol = {
.handler = xfrm4_rcv,
.err_handler = esp4_err,
.no_policy = 1,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 63c2fa7b68c..291bdf50a21 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -48,7 +48,7 @@
* Patrick McHardy <kaber@trash.net>
*/
-#define VERSION "0.408"
+#define VERSION "0.409"
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -164,6 +164,14 @@ static struct tnode *inflate(struct trie *t, struct tnode *tn);
static struct tnode *halve(struct trie *t, struct tnode *tn);
/* tnodes to free after resize(); protected by RTNL */
static struct tnode *tnode_free_head;
+static size_t tnode_free_size;
+
+/*
+ * synchronize_rcu after call_rcu for that many pages; it should be especially
+ * useful before resizing the root node with PREEMPT_NONE configs; the value was
+ * obtained experimentally, aiming to avoid visible slowdown.
+ */
+static const int sync_pages = 128;
static struct kmem_cache *fn_alias_kmem __read_mostly;
static struct kmem_cache *trie_leaf_kmem __read_mostly;
@@ -317,8 +325,7 @@ static inline void check_tnode(const struct tnode *tn)
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
static const int halve_threshold_root = 15;
-static const int inflate_threshold_root = 25;
-
+static const int inflate_threshold_root = 30;
static void __alias_free_mem(struct rcu_head *head)
{
@@ -393,6 +400,8 @@ static void tnode_free_safe(struct tnode *tn)
BUG_ON(IS_LEAF(tn));
tn->tnode_free = tnode_free_head;
tnode_free_head = tn;
+ tnode_free_size += sizeof(struct tnode) +
+ (sizeof(struct node *) << tn->bits);
}
static void tnode_free_flush(void)
@@ -404,6 +413,11 @@ static void tnode_free_flush(void)
tn->tnode_free = NULL;
tnode_free(tn);
}
+
+ if (tnode_free_size >= PAGE_SIZE * sync_pages) {
+ tnode_free_size = 0;
+ synchronize_rcu();
+ }
}
static struct leaf *leaf_new(void)
@@ -499,14 +513,14 @@ static void tnode_put_child_reorg(struct tnode *tn, int i, struct node *n,
rcu_assign_pointer(tn->child[i], n);
}
+#define MAX_WORK 10
static struct node *resize(struct trie *t, struct tnode *tn)
{
int i;
- int err = 0;
struct tnode *old_tn;
int inflate_threshold_use;
int halve_threshold_use;
- int max_resize;
+ int max_work;
if (!tn)
return NULL;
@@ -521,18 +535,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
}
/* One child */
if (tn->empty_children == tnode_child_length(tn) - 1)
- for (i = 0; i < tnode_child_length(tn); i++) {
- struct node *n;
-
- n = tn->child[i];
- if (!n)
- continue;
-
- /* compress one level */
- node_set_parent(n, NULL);
- tnode_free_safe(tn);
- return n;
- }
+ goto one_child;
/*
* Double as long as the resulting node has a number of
* nonempty nodes that are above the threshold.
@@ -601,14 +604,17 @@ static struct node *resize(struct trie *t, struct tnode *tn)
/* Keep root node larger */
- if (!tn->parent)
+ if (!node_parent((struct node*) tn)) {
inflate_threshold_use = inflate_threshold_root;
- else
+ halve_threshold_use = halve_threshold_root;
+ }
+ else {
inflate_threshold_use = inflate_threshold;
+ halve_threshold_use = halve_threshold;
+ }
- err = 0;
- max_resize = 10;
- while ((tn->full_children > 0 && max_resize-- &&
+ max_work = MAX_WORK;
+ while ((tn->full_children > 0 && max_work-- &&
50 * (tn->full_children + tnode_child_length(tn)
- tn->empty_children)
>= inflate_threshold_use * tnode_child_length(tn))) {
@@ -625,35 +631,19 @@ static struct node *resize(struct trie *t, struct tnode *tn)
}
}
- if (max_resize < 0) {
- if (!tn->parent)
- pr_warning("Fix inflate_threshold_root."
- " Now=%d size=%d bits\n",
- inflate_threshold_root, tn->bits);
- else
- pr_warning("Fix inflate_threshold."
- " Now=%d size=%d bits\n",
- inflate_threshold, tn->bits);
- }
-
check_tnode(tn);
+ /* Return if at least one inflate is run */
+ if( max_work != MAX_WORK)
+ return (struct node *) tn;
+
/*
* Halve as long as the number of empty children in this
* node is above threshold.
*/
-
- /* Keep root node larger */
-
- if (!tn->parent)
- halve_threshold_use = halve_threshold_root;
- else
- halve_threshold_use = halve_threshold;
-
- err = 0;
- max_resize = 10;
- while (tn->bits > 1 && max_resize-- &&
+ max_work = MAX_WORK;
+ while (tn->bits > 1 && max_work-- &&
100 * (tnode_child_length(tn) - tn->empty_children) <
halve_threshold_use * tnode_child_length(tn)) {
@@ -668,19 +658,10 @@ static struct node *resize(struct trie *t, struct tnode *tn)
}
}
- if (max_resize < 0) {
- if (!tn->parent)
- pr_warning("Fix halve_threshold_root."
- " Now=%d size=%d bits\n",
- halve_threshold_root, tn->bits);
- else
- pr_warning("Fix halve_threshold."
- " Now=%d size=%d bits\n",
- halve_threshold, tn->bits);
- }
/* Only one child remains */
- if (tn->empty_children == tnode_child_length(tn) - 1)
+ if (tn->empty_children == tnode_child_length(tn) - 1) {
+one_child:
for (i = 0; i < tnode_child_length(tn); i++) {
struct node *n;
@@ -694,7 +675,7 @@ static struct node *resize(struct trie *t, struct tnode *tn)
tnode_free_safe(tn);
return n;
}
-
+ }
return (struct node *) tn;
}
@@ -1435,7 +1416,7 @@ static int fn_trie_lookup(struct fib_table *tb, const struct flowi *flp,
cindex = tkey_extract_bits(mask_pfx(key, current_prefix_length),
pos, bits);
- n = tnode_get_child(pn, cindex);
+ n = tnode_get_child_rcu(pn, cindex);
if (n == NULL) {
#ifdef CONFIG_IP_FIB_TRIE_STATS
@@ -1570,7 +1551,7 @@ backtrace:
if (chopped_off <= pn->bits) {
cindex &= ~(1 << (chopped_off-1));
} else {
- struct tnode *parent = node_parent((struct node *) pn);
+ struct tnode *parent = node_parent_rcu((struct node *) pn);
if (!parent)
goto failed;
@@ -1783,7 +1764,7 @@ static struct leaf *trie_firstleaf(struct trie *t)
static struct leaf *trie_nextleaf(struct leaf *l)
{
struct node *c = (struct node *) l;
- struct tnode *p = node_parent(c);
+ struct tnode *p = node_parent_rcu(c);
if (!p)
return NULL; /* trie with just one leaf */
@@ -2391,7 +2372,7 @@ static inline const char *rtn_scope(char *buf, size_t len, enum rt_scope_t s)
}
}
-static const char *rtn_type_names[__RTN_MAX] = {
+static const char *const rtn_type_names[__RTN_MAX] = {
[RTN_UNSPEC] = "UNSPEC",
[RTN_UNICAST] = "UNICAST",
[RTN_LOCAL] = "LOCAL",
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 97c410e8438..5bc13fe816d 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -655,7 +655,7 @@ static void icmp_unreach(struct sk_buff *skb)
struct iphdr *iph;
struct icmphdr *icmph;
int hash, protocol;
- struct net_protocol *ipprot;
+ const struct net_protocol *ipprot;
u32 info = 0;
struct net *net;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 01b4284ed69..d41e5de79a8 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1298,6 +1298,28 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
}
}
+/* Device changing type */
+
+void ip_mc_unmap(struct in_device *in_dev)
+{
+ struct ip_mc_list *i;
+
+ ASSERT_RTNL();
+
+ for (i = in_dev->mc_list; i; i = i->next)
+ igmp_group_dropped(i);
+}
+
+void ip_mc_remap(struct in_device *in_dev)
+{
+ struct ip_mc_list *i;
+
+ ASSERT_RTNL();
+
+ for (i = in_dev->mc_list; i; i = i->next)
+ igmp_group_added(i);
+}
+
/* Device going down */
void ip_mc_down(struct in_device *in_dev)
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 61283f92882..13f0781f35c 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -218,8 +218,8 @@ void inet_twdr_hangman(unsigned long data)
/* We purged the entire slot, anything left? */
if (twdr->tw_count)
need_timer = 1;
+ twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
}
- twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
if (need_timer)
mod_timer(&twdr->tw_timer, jiffies + twdr->period);
out:
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index cb4a0f4bd5e..d9645c94a06 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -662,7 +662,7 @@ drop_nolock:
return(0);
}
-static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->dev->stats;
@@ -821,7 +821,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_dropped++;
dev_kfree_skb(skb);
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
@@ -889,7 +889,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
IPTUNNEL_XMIT();
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
@@ -898,7 +898,7 @@ tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
static int ipgre_tunnel_bind_dev(struct net_device *dev)
@@ -951,7 +951,7 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
addend += 4;
}
dev->needed_headroom = addend + hlen;
- mtu -= dev->hard_header_len - addend;
+ mtu -= dev->hard_header_len + addend;
if (mtu < 68)
mtu = 68;
@@ -1288,7 +1288,7 @@ static void ipgre_fb_tunnel_init(struct net_device *dev)
}
-static struct net_protocol ipgre_protocol = {
+static const struct net_protocol ipgre_protocol = {
.handler = ipgre_rcv,
.err_handler = ipgre_err,
.netns_ok = 1,
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index db46b4b5b2b..6c98b43badf 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -202,7 +202,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
{
int protocol = ip_hdr(skb)->protocol;
int hash, raw;
- struct net_protocol *ipprot;
+ const struct net_protocol *ipprot;
resubmit:
raw = raw_local_deliver(skb, protocol);
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 7d082105472..9fe5d7b8158 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -813,6 +813,8 @@ int ip_append_data(struct sock *sk,
inet->cork.addr = ipc->addr;
}
rt = *rtp;
+ if (unlikely(!rt))
+ return -EFAULT;
/*
* We steal reference to this route, caller should not release it
*/
@@ -1302,7 +1304,7 @@ int ip_push_pending_frames(struct sock *sk)
err = ip_local_out(skb);
if (err) {
if (err > 0)
- err = inet->recverr ? net_xmit_errno(err) : 0;
+ err = net_xmit_errno(err);
if (err)
goto error;
}
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 3262ce06294..38fbf04150a 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -146,7 +146,7 @@ static const struct xfrm_type ipcomp_type = {
.output = ipcomp_output
};
-static struct net_protocol ipcomp4_protocol = {
+static const struct net_protocol ipcomp4_protocol = {
.handler = xfrm4_rcv,
.err_handler = ipcomp4_err,
.no_policy = 1,
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 93e2b787da2..62548cb0923 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -387,7 +387,7 @@ static int ipip_rcv(struct sk_buff *skb)
* and that skb is filled properly by that function.
*/
-static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->dev->stats;
@@ -486,7 +486,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_dropped++;
dev_kfree_skb(skb);
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
@@ -524,7 +524,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
IPTUNNEL_XMIT();
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
@@ -532,7 +532,7 @@ tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
static void ipip_tunnel_bind_dev(struct net_device *dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9a8da5ed92b..c43ec2d51ce 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -99,10 +99,6 @@ static int ipmr_cache_report(struct net *net,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
-#ifdef CONFIG_IP_PIMSM_V2
-static struct net_protocol pim_protocol;
-#endif
-
static struct timer_list ipmr_expire_timer;
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
@@ -201,7 +197,7 @@ failure:
#ifdef CONFIG_IP_PIMSM
-static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net *net = dev_net(dev);
@@ -212,7 +208,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
IGMPMSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static const struct net_device_ops reg_vif_netdev_ops = {
@@ -1945,7 +1941,7 @@ static const struct file_operations ipmr_mfc_fops = {
#endif
#ifdef CONFIG_IP_PIMSM_V2
-static struct net_protocol pim_protocol = {
+static const struct net_protocol pim_protocol = {
.handler = pim_rcv,
.netns_ok = 1,
};
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 7505dff4ffd..27774c99d88 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -8,7 +8,7 @@
* Copyright (C) 2002 David S. Miller (davem@redhat.com)
*
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -341,15 +341,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
}
/* All zeroes == unconditional rule. */
-static inline int unconditional(const struct arpt_arp *arp)
+static inline bool unconditional(const struct arpt_arp *arp)
{
- unsigned int i;
+ static const struct arpt_arp uncond;
- for (i = 0; i < sizeof(*arp)/sizeof(__u32); i++)
- if (((__u32 *)arp)[i])
- return 0;
-
- return 1;
+ return memcmp(arp, &uncond, sizeof(uncond)) == 0;
}
/* Figures out from what hook each rule can be called: returns 0 if
@@ -537,12 +533,28 @@ out:
return ret;
}
+static bool check_underflow(struct arpt_entry *e)
+{
+ const struct arpt_entry_target *t;
+ unsigned int verdict;
+
+ if (!unconditional(&e->arp))
+ return false;
+ t = arpt_get_target(e);
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+ return false;
+ verdict = ((struct arpt_standard_target *)t)->verdict;
+ verdict = -verdict - 1;
+ return verdict == NF_DROP || verdict == NF_ACCEPT;
+}
+
static inline int check_entry_size_and_hooks(struct arpt_entry *e,
struct xt_table_info *newinfo,
unsigned char *base,
unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
+ unsigned int valid_hooks,
unsigned int *i)
{
unsigned int h;
@@ -562,15 +574,21 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
+ if (!(valid_hooks & (1 << h)))
+ continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
+ if ((unsigned char *)e - base == underflows[h]) {
+ if (!check_underflow(e)) {
+ pr_err("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
+ return -EINVAL;
+ }
newinfo->underflow[h] = underflows[h];
+ }
}
- /* FIXME: underflows must be unconditional, standard verdicts
- < 0 (not ARPT_RETURN). --RR */
-
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
@@ -630,7 +648,7 @@ static int translate_table(const char *name,
newinfo,
entry0,
entry0 + size,
- hook_entries, underflows, &i);
+ hook_entries, underflows, valid_hooks, &i);
duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
if (ret != 0)
return ret;
@@ -1760,7 +1778,8 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
return ret;
}
-struct xt_table *arpt_register_table(struct net *net, struct xt_table *table,
+struct xt_table *arpt_register_table(struct net *net,
+ const struct xt_table *table,
const struct arpt_replace *repl)
{
int ret;
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 6ecfdae7c58..97337601827 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -15,7 +15,7 @@ MODULE_DESCRIPTION("arptables filter table");
#define FILTER_VALID_HOOKS ((1 << NF_ARP_IN) | (1 << NF_ARP_OUT) | \
(1 << NF_ARP_FORWARD))
-static struct
+static const struct
{
struct arpt_replace repl;
struct arpt_standard entries[3];
@@ -45,7 +45,7 @@ static struct
.term = ARPT_ERROR_INIT,
};
-static struct xt_table packet_filter = {
+static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fdefae6b5df..cde755d5eea 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -8,6 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cache.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
@@ -190,16 +191,11 @@ get_entry(void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline int
-unconditional(const struct ipt_ip *ip)
+static inline bool unconditional(const struct ipt_ip *ip)
{
- unsigned int i;
-
- for (i = 0; i < sizeof(*ip)/sizeof(__u32); i++)
- if (((__u32 *)ip)[i])
- return 0;
+ static const struct ipt_ip uncond;
- return 1;
+ return memcmp(ip, &uncond, sizeof(uncond)) == 0;
#undef FWINV
}
@@ -315,7 +311,6 @@ ipt_do_table(struct sk_buff *skb,
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
const struct iphdr *ip;
- u_int16_t datalen;
bool hotdrop = false;
/* Initializing verdict to NF_DROP keeps gcc happy. */
unsigned int verdict = NF_DROP;
@@ -328,7 +323,6 @@ ipt_do_table(struct sk_buff *skb,
/* Initialization */
ip = ip_hdr(skb);
- datalen = skb->len - ip->ihl * 4;
indev = in ? in->name : nulldevname;
outdev = out ? out->name : nulldevname;
/* We handle fragments by dealing with the first fragment as
@@ -427,8 +421,6 @@ ipt_do_table(struct sk_buff *skb,
#endif
/* Target might have changed stuff. */
ip = ip_hdr(skb);
- datalen = skb->len - ip->ihl * 4;
-
if (verdict == IPT_CONTINUE)
e = ipt_next_entry(e);
else
@@ -716,6 +708,21 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
return ret;
}
+static bool check_underflow(struct ipt_entry *e)
+{
+ const struct ipt_entry_target *t;
+ unsigned int verdict;
+
+ if (!unconditional(&e->ip))
+ return false;
+ t = ipt_get_target(e);
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+ return false;
+ verdict = ((struct ipt_standard_target *)t)->verdict;
+ verdict = -verdict - 1;
+ return verdict == NF_DROP || verdict == NF_ACCEPT;
+}
+
static int
check_entry_size_and_hooks(struct ipt_entry *e,
struct xt_table_info *newinfo,
@@ -723,6 +730,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
+ unsigned int valid_hooks,
unsigned int *i)
{
unsigned int h;
@@ -742,15 +750,21 @@ check_entry_size_and_hooks(struct ipt_entry *e,
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ if (!(valid_hooks & (1 << h)))
+ continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
+ if ((unsigned char *)e - base == underflows[h]) {
+ if (!check_underflow(e)) {
+ pr_err("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
+ return -EINVAL;
+ }
newinfo->underflow[h] = underflows[h];
+ }
}
- /* FIXME: underflows must be unconditional, standard verdicts
- < 0 (not IPT_RETURN). --RR */
-
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
@@ -813,7 +827,7 @@ translate_table(const char *name,
newinfo,
entry0,
entry0 + size,
- hook_entries, underflows, &i);
+ hook_entries, underflows, valid_hooks, &i);
if (ret != 0)
return ret;
@@ -2051,7 +2065,8 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
return ret;
}
-struct xt_table *ipt_register_table(struct net *net, struct xt_table *table,
+struct xt_table *ipt_register_table(struct net *net,
+ const struct xt_table *table,
const struct ipt_replace *repl)
{
int ret;
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index c30a969724f..df566cbd68e 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -53,11 +53,11 @@ static struct
.term = IPT_ERROR_INIT, /* ERROR */
};
-static struct xt_table packet_filter = {
+static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET,
+ .af = NFPROTO_IPV4,
};
/* The work comes in here from netfilter.c. */
@@ -102,21 +102,21 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
{
.hook = ipt_local_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_FILTER,
},
{
.hook = ipt_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_FILTER,
},
{
.hook = ipt_local_out_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_FILTER,
},
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index 4087614d951..036047f9b0f 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -28,7 +28,7 @@ MODULE_DESCRIPTION("iptables mangle table");
(1 << NF_INET_POST_ROUTING))
/* Ouch - five different hooks? Maybe this should be a config option..... -- BC */
-static struct
+static const struct
{
struct ipt_replace repl;
struct ipt_standard entries[5];
@@ -64,11 +64,11 @@ static struct
.term = IPT_ERROR_INIT, /* ERROR */
};
-static struct xt_table packet_mangler = {
+static const struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET,
+ .af = NFPROTO_IPV4,
};
/* The work comes in here from netfilter.c. */
@@ -162,35 +162,35 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
{
.hook = ipt_pre_routing_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_local_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_forward_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_local_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_MANGLE,
},
{
.hook = ipt_post_routing_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_MANGLE,
},
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index e5356da1fb5..993edc23be0 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -9,7 +9,7 @@
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
-static struct
+static const struct
{
struct ipt_replace repl;
struct ipt_standard entries[2];
@@ -36,11 +36,11 @@ static struct
.term = IPT_ERROR_INIT, /* ERROR */
};
-static struct xt_table packet_raw = {
+static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET,
+ .af = NFPROTO_IPV4,
};
/* The work comes in here from netfilter.c. */
@@ -74,14 +74,14 @@ ipt_local_hook(unsigned int hook,
static struct nf_hook_ops ipt_ops[] __read_mostly = {
{
.hook = ipt_hook,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_RAW,
.owner = THIS_MODULE,
},
{
.hook = ipt_local_hook,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_RAW,
.owner = THIS_MODULE,
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index 29ab630f240..99eb76c65d2 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -27,7 +27,7 @@ MODULE_DESCRIPTION("iptables security table, for MAC rules");
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT)
-static struct
+static const struct
{
struct ipt_replace repl;
struct ipt_standard entries[3];
@@ -57,11 +57,11 @@ static struct
.term = IPT_ERROR_INIT, /* ERROR */
};
-static struct xt_table security_table = {
+static const struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET,
+ .af = NFPROTO_IPV4,
};
static unsigned int
@@ -105,21 +105,21 @@ static struct nf_hook_ops ipt_ops[] __read_mostly = {
{
.hook = ipt_local_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_SECURITY,
},
{
.hook = ipt_forward_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_SECURITY,
},
{
.hook = ipt_local_out_hook,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SECURITY,
},
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 7d2ead7228a..aa95bb82ee6 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -26,6 +26,7 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
+#include <net/netfilter/nf_log.h>
int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
struct nf_conn *ct,
@@ -113,8 +114,11 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
- if (ret != NF_ACCEPT)
+ if (ret != NF_ACCEPT) {
+ nf_log_packet(NFPROTO_IPV4, hooknum, skb, in, out, NULL,
+ "nf_ct_%s: dropping packet", helper->name);
return ret;
+ }
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
typeof(nf_nat_seq_adjust_hook) seq_adjust;
@@ -158,28 +162,28 @@ static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
{
.hook = ipv4_conntrack_in,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_local,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
@@ -256,11 +260,11 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
tuple.dst.u3.ip = inet->daddr;
tuple.dst.u.tcp.port = inet->dport;
tuple.src.l3num = PF_INET;
- tuple.dst.protonum = IPPROTO_TCP;
+ tuple.dst.protonum = sk->sk_protocol;
- /* We only do TCP at the moment: is there a better way? */
- if (strcmp(sk->sk_prot->name, "TCP")) {
- pr_debug("SO_ORIGINAL_DST: Not a TCP socket\n");
+ /* We only do TCP and SCTP at the moment: is there a better way? */
+ if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
+ pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 3229e0a81ba..68afc6ecd34 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -212,7 +212,7 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
maxip = ntohl(range->max_ip);
j = jhash_2words((__force u32)tuple->src.u3.ip,
range->flags & IP_NAT_RANGE_PERSISTENT ?
- (__force u32)tuple->dst.u3.ip : 0, 0);
+ 0 : (__force u32)tuple->dst.u3.ip, 0);
j = ((u64)j * (maxip - minip + 1)) >> 32;
*var_ipp = htonl(minip + j);
}
@@ -620,7 +620,7 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
};
static int
-nfnetlink_parse_nat(struct nlattr *nat,
+nfnetlink_parse_nat(const struct nlattr *nat,
const struct nf_conn *ct, struct nf_nat_range *range)
{
struct nlattr *tb[CTA_NAT_MAX+1];
@@ -656,7 +656,7 @@ nfnetlink_parse_nat(struct nlattr *nat,
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
- struct nlattr *attr)
+ const struct nlattr *attr)
{
struct nf_nat_range range;
@@ -671,7 +671,7 @@ nfnetlink_parse_nat_setup(struct nf_conn *ct,
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
- struct nlattr *attr)
+ const struct nlattr *attr)
{
return -EOPNOTSUPP;
}
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 6348a793936..9e81e0dfb4e 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -28,7 +28,7 @@
(1 << NF_INET_POST_ROUTING) | \
(1 << NF_INET_LOCAL_OUT))
-static struct
+static const struct
{
struct ipt_replace repl;
struct ipt_standard entries[3];
@@ -58,11 +58,11 @@ static struct
.term = IPT_ERROR_INIT, /* ERROR */
};
-static struct xt_table nat_table = {
+static const struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET,
+ .af = NFPROTO_IPV4,
};
/* Source NAT */
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 5567bd0d075..5f41d017ddd 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -251,7 +251,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
{
.hook = nf_nat_in,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
@@ -259,7 +259,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
{
.hook = nf_nat_out,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
@@ -267,7 +267,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
{
.hook = nf_nat_local_fn,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
@@ -275,7 +275,7 @@ static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
{
.hook = nf_nat_fn,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
diff --git a/net/ipv4/protocol.c b/net/ipv4/protocol.c
index ea50da0649f..542f22fc98b 100644
--- a/net/ipv4/protocol.c
+++ b/net/ipv4/protocol.c
@@ -22,35 +22,20 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
+#include <linux/cache.h>
#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/socket.h>
-#include <linux/in.h>
-#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <linux/timer.h>
-#include <net/ip.h>
+#include <linux/spinlock.h>
#include <net/protocol.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/icmp.h>
-#include <net/udp.h>
-#include <net/ipip.h>
-#include <linux/igmp.h>
-struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
+const struct net_protocol *inet_protos[MAX_INET_PROTOS] ____cacheline_aligned_in_smp;
static DEFINE_SPINLOCK(inet_proto_lock);
/*
* Add a protocol handler to the hash tables
*/
-int inet_add_protocol(struct net_protocol *prot, unsigned char protocol)
+int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int hash, ret;
@@ -72,7 +57,7 @@ int inet_add_protocol(struct net_protocol *prot, unsigned char protocol)
* Remove a protocol from the hash tables.
*/
-int inet_del_protocol(struct net_protocol *prot, unsigned char protocol)
+int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol)
{
int hash, ret;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 2979f14bb18..ebb1e5848bc 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -375,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
- err = inet->recverr ? net_xmit_errno(err) : 0;
+ err = net_xmit_errno(err);
if (err)
goto error;
out:
@@ -386,6 +386,8 @@ error_fault:
kfree_skb(skb);
error:
IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
+ if (err == -ENOBUFS && !inet->recverr)
+ err = 0;
return err;
}
@@ -576,8 +578,11 @@ back_from_confirm:
&ipc, &rt, msg->msg_flags);
if (err)
ip_flush_pending_frames(sk);
- else if (!(msg->msg_flags & MSG_MORE))
+ else if (!(msg->msg_flags & MSG_MORE)) {
err = ip_push_pending_frames(sk);
+ if (err == -ENOBUFS && !inet->recverr)
+ err = 0;
+ }
release_sock(sk);
}
done:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 278f46f5011..df934731453 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1514,13 +1514,17 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
void ip_rt_send_redirect(struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
- struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
+ struct in_device *in_dev;
+ int log_martians;
- if (!in_dev)
+ rcu_read_lock();
+ in_dev = __in_dev_get_rcu(rt->u.dst.dev);
+ if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
+ rcu_read_unlock();
return;
-
- if (!IN_DEV_TX_REDIRECTS(in_dev))
- goto out;
+ }
+ log_martians = IN_DEV_LOG_MARTIANS(in_dev);
+ rcu_read_unlock();
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
@@ -1533,7 +1537,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
*/
if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
rt->u.dst.rate_last = jiffies;
- goto out;
+ return;
}
/* Check for load limit; set rate_last to the latest sent
@@ -1547,7 +1551,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
rt->u.dst.rate_last = jiffies;
++rt->u.dst.rate_tokens;
#ifdef CONFIG_IP_ROUTE_VERBOSE
- if (IN_DEV_LOG_MARTIANS(in_dev) &&
+ if (log_martians &&
rt->u.dst.rate_tokens == ip_rt_redirect_number &&
net_ratelimit())
printk(KERN_WARNING "host %pI4/if%d ignores redirects for %pI4 to %pI4.\n",
@@ -1555,8 +1559,6 @@ void ip_rt_send_redirect(struct sk_buff *skb)
&rt->rt_dst, &rt->rt_gateway);
#endif
}
-out:
- in_dev_put(in_dev);
}
static int ip_error(struct sk_buff *skb)
@@ -3412,7 +3414,7 @@ int __init ip_rt_init(void)
alloc_large_system_hash("IP route cache",
sizeof(struct rt_hash_bucket),
rhash_entries,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
15 : 17,
0,
&rt_hash_log,
@@ -3442,7 +3444,7 @@ int __init ip_rt_init(void)
printk(KERN_ERR "Unable to create route proc files\n");
#ifdef CONFIG_XFRM
xfrm_init();
- xfrm4_init();
+ xfrm4_init(ip_rt_max_size);
#endif
rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index cd2b97f1b6e..a6e0e077ac3 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -37,12 +37,13 @@ __initcall(init_syncookies);
#define COOKIEBITS 24 /* Upper bits store count */
#define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1)
-static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
+static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
+ ipv4_cookie_scratch);
static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
u32 count, int c)
{
- __u32 *tmp = __get_cpu_var(cookie_scratch);
+ __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 91145244ea6..21387ebabf0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1839,7 +1839,7 @@ void tcp_close(struct sock *sk, long timeout)
/* Unread data was tossed, zap the connection. */
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE);
- tcp_send_active_reset(sk, GFP_KERNEL);
+ tcp_send_active_reset(sk, sk->sk_allocation);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0);
@@ -2012,7 +2012,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->packets_out = 0;
- tp->snd_ssthresh = 0x7fffffff;
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_cnt = 0;
tp->bytes_acked = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2336,13 +2336,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
val = !!(tp->nonagle&TCP_NAGLE_CORK);
break;
case TCP_KEEPIDLE:
- val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
+ val = keepalive_time_when(tp) / HZ;
break;
case TCP_KEEPINTVL:
- val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
+ val = keepalive_intvl_when(tp) / HZ;
break;
case TCP_KEEPCNT:
- val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
+ val = keepalive_probes(tp);
break;
case TCP_SYNCNT:
val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
@@ -2658,7 +2658,7 @@ void tcp_free_md5sig_pool(void)
EXPORT_SYMBOL(tcp_free_md5sig_pool);
-static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
+static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(struct sock *sk)
{
int cpu;
struct tcp_md5sig_pool **pool;
@@ -2671,7 +2671,7 @@ static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
struct tcp_md5sig_pool *p;
struct crypto_hash *hash;
- p = kzalloc(sizeof(*p), GFP_KERNEL);
+ p = kzalloc(sizeof(*p), sk->sk_allocation);
if (!p)
goto out_free;
*per_cpu_ptr(pool, cpu) = p;
@@ -2688,7 +2688,7 @@ out_free:
return NULL;
}
-struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
+struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(struct sock *sk)
{
struct tcp_md5sig_pool **pool;
int alloc = 0;
@@ -2709,7 +2709,7 @@ retry:
if (alloc) {
/* we cannot hold spinlock here because this may sleep. */
- struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
+ struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool(sk);
spin_lock_bh(&tcp_md5sig_pool_lock);
if (!p) {
tcp_md5sig_users--;
@@ -2862,7 +2862,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP established",
sizeof(struct inet_ehash_bucket),
thash_entries,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.ehash_size,
@@ -2879,7 +2879,7 @@ void __init tcp_init(void)
alloc_large_system_hash("TCP bind",
sizeof(struct inet_bind_hashbucket),
tcp_hashinfo.ehash_size,
- (num_physpages >= 128 * 1024) ?
+ (totalram_pages >= 128 * 1024) ?
13 : 15,
0,
&tcp_hashinfo.bhash_size,
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index e92beb9e55e..6428b342b16 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -116,7 +116,7 @@ int tcp_set_default_congestion_control(const char *name)
spin_lock(&tcp_cong_list_lock);
ca = tcp_ca_find(name);
#ifdef CONFIG_MODULES
- if (!ca && capable(CAP_SYS_MODULE)) {
+ if (!ca && capable(CAP_NET_ADMIN)) {
spin_unlock(&tcp_cong_list_lock);
request_module("tcp_%s", name);
@@ -246,7 +246,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
#ifdef CONFIG_MODULES
/* not found attempt to autoload module */
- if (!ca && capable(CAP_SYS_MODULE)) {
+ if (!ca && capable(CAP_NET_ADMIN)) {
rcu_read_unlock();
request_module("tcp_%s", name);
rcu_read_lock();
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 2bdb0da237e..d86784be7ab 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -685,7 +685,7 @@ static inline void tcp_set_rto(struct sock *sk)
* is invisible. Actually, Linux-2.4 also generates erratic
* ACKs in some circumstances.
*/
- inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
+ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
/* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them,
@@ -696,8 +696,7 @@ static inline void tcp_set_rto(struct sock *sk)
/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
- if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
- inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+ tcp_bound_rto(sk);
}
/* Save metrics learned by this TCP session.
@@ -762,7 +761,7 @@ void tcp_update_metrics(struct sock *sk)
set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
}
- if (tp->snd_ssthresh >= 0xFFFF) {
+ if (tcp_in_initial_slowstart(tp)) {
/* Slow start still did not finish. */
if (dst_metric(dst, RTAX_SSTHRESH) &&
!dst_metric_locked(dst, RTAX_SSTHRESH) &&
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 6d88219c5e2..7cda24b53f6 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -328,26 +328,29 @@ static void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, u32 mtu)
*
*/
-void tcp_v4_err(struct sk_buff *skb, u32 info)
+void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
{
- struct iphdr *iph = (struct iphdr *)skb->data;
- struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+ struct iphdr *iph = (struct iphdr *)icmp_skb->data;
+ struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
+ struct inet_connection_sock *icsk;
struct tcp_sock *tp;
struct inet_sock *inet;
- const int type = icmp_hdr(skb)->type;
- const int code = icmp_hdr(skb)->code;
+ const int type = icmp_hdr(icmp_skb)->type;
+ const int code = icmp_hdr(icmp_skb)->code;
struct sock *sk;
+ struct sk_buff *skb;
__u32 seq;
+ __u32 remaining;
int err;
- struct net *net = dev_net(skb->dev);
+ struct net *net = dev_net(icmp_skb->dev);
- if (skb->len < (iph->ihl << 2) + 8) {
+ if (icmp_skb->len < (iph->ihl << 2) + 8) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
}
sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
- iph->saddr, th->source, inet_iif(skb));
+ iph->saddr, th->source, inet_iif(icmp_skb));
if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
return;
@@ -367,6 +370,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
if (sk->sk_state == TCP_CLOSE)
goto out;
+ icsk = inet_csk(sk);
tp = tcp_sk(sk);
seq = ntohl(th->seq);
if (sk->sk_state != TCP_LISTEN &&
@@ -393,6 +397,39 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
}
err = icmp_err_convert[code].errno;
+ /* check if icmp_skb allows revert of backoff
+ * (see draft-zimmermann-tcp-lcd) */
+ if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
+ break;
+ if (seq != tp->snd_una || !icsk->icsk_retransmits ||
+ !icsk->icsk_backoff)
+ break;
+
+ icsk->icsk_backoff--;
+ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
+ icsk->icsk_backoff;
+ tcp_bound_rto(sk);
+
+ skb = tcp_write_queue_head(sk);
+ BUG_ON(!skb);
+
+ remaining = icsk->icsk_rto - min(icsk->icsk_rto,
+ tcp_time_stamp - TCP_SKB_CB(skb)->when);
+
+ if (remaining) {
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ remaining, TCP_RTO_MAX);
+ } else if (sock_owned_by_user(sk)) {
+ /* RTO revert clocked out retransmission,
+ * but socket is locked. Will defer. */
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
+ HZ/20, TCP_RTO_MAX);
+ } else {
+ /* RTO revert clocked out retransmission.
+ * Will retransmit now */
+ tcp_retransmit_timer(sk);
+ }
+
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
@@ -849,7 +886,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
}
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
- if (tcp_alloc_md5sig_pool() == NULL) {
+ if (tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
@@ -970,8 +1007,9 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
if (!tcp_sk(sk)->md5sig_info) {
struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_md5sig_info *p = kzalloc(sizeof(*p), GFP_KERNEL);
+ struct tcp_md5sig_info *p;
+ p = kzalloc(sizeof(*p), sk->sk_allocation);
if (!p)
return -EINVAL;
@@ -979,7 +1017,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
- newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
+ newkey = kmemdup(cmd.tcpm_key, cmd.tcpm_keylen, sk->sk_allocation);
if (!newkey)
return -ENOMEM;
return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
@@ -1158,7 +1196,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
};
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.md5_lookup = tcp_v4_reqsk_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
};
@@ -1717,7 +1755,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
return 0;
}
-struct inet_connection_sock_af_ops ipv4_specific = {
+const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
@@ -1737,7 +1775,7 @@ struct inet_connection_sock_af_ops ipv4_specific = {
};
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
+static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
.md5_add = tcp_v4_md5_add_func,
@@ -1770,7 +1808,7 @@ static int tcp_v4_init_sock(struct sock *sk)
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
- tp->snd_ssthresh = 0x7fffffff; /* Infinity */
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = 536;
@@ -2246,7 +2284,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
- tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
+ tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
len);
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index f8d67ccc64f..624c3c9b3c2 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -322,7 +322,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (key != NULL) {
memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
tcptw->tw_md5_keylen = key->keylen;
- if (tcp_alloc_md5sig_pool() == NULL)
+ if (tcp_alloc_md5sig_pool(sk) == NULL)
BUG();
}
} while (0);
@@ -363,7 +363,7 @@ void tcp_twsk_destructor(struct sock *sk)
#ifdef CONFIG_TCP_MD5SIG
struct tcp_timewait_sock *twsk = tcp_twsk(sk);
if (twsk->tw_md5_keylen)
- tcp_put_md5sig_pool();
+ tcp_free_md5sig_pool();
#endif
}
@@ -410,7 +410,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->retrans_out = 0;
newtp->sacked_out = 0;
newtp->fackets_out = 0;
- newtp->snd_ssthresh = 0x7fffffff;
+ newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -657,29 +657,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
if (child == NULL)
goto listen_overflow;
-#ifdef CONFIG_TCP_MD5SIG
- else {
- /* Copy over the MD5 key from the original socket */
- struct tcp_md5sig_key *key;
- struct tcp_sock *tp = tcp_sk(sk);
- key = tp->af_specific->md5_lookup(sk, child);
- if (key != NULL) {
- /*
- * We're using one, so create a matching key on the
- * newsk structure. If we fail to get memory then we
- * end up not copying the key across. Shucks.
- */
- char *newkey = kmemdup(key->key, key->keylen,
- GFP_ATOMIC);
- if (newkey) {
- if (!tcp_alloc_md5sig_pool())
- BUG();
- tp->af_specific->md5_add(child, child, newkey,
- key->keylen);
- }
- }
- }
-#endif
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bd62712848f..5200aab0ca9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -59,6 +59,7 @@ int sysctl_tcp_base_mss __read_mostly = 512;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
+/* Account for new data that has been sent to the network. */
static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -142,6 +143,7 @@ static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
tp->snd_cwnd_used = 0;
}
+/* Congestion state accounting after a packet has been sent. */
static void tcp_event_data_sent(struct tcp_sock *tp,
struct sk_buff *skb, struct sock *sk)
{
@@ -161,6 +163,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
icsk->icsk_ack.pingpong = 1;
}
+/* Account for an ACK we sent. */
static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{
tcp_dec_quickack_mode(sk, pkts);
@@ -276,6 +279,7 @@ static u16 tcp_select_window(struct sock *sk)
return new_win;
}
+/* Packet ECN state for a SYN-ACK */
static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
{
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_CWR;
@@ -283,6 +287,7 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, struct sk_buff *skb)
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
}
+/* Packet ECN state for a SYN. */
static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -301,6 +306,9 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
th->ece = 1;
}
+/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
+ * be sent.
+ */
static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
int tcp_header_len)
{
@@ -362,7 +370,9 @@ struct tcp_out_options {
__u32 tsval, tsecr; /* need to include OPTION_TS */
};
-/* Beware: Something in the Internet is very sensitive to the ordering of
+/* Write previously computed TCP options to the packet.
+ *
+ * Beware: Something in the Internet is very sensitive to the ordering of
* TCP options, we learned this through the hard way, so be careful here.
* Luckily we can at least blame others for their non-compliance but from
* inter-operatibility perspective it seems that we're somewhat stuck with
@@ -445,6 +455,9 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
}
}
+/* Compute TCP options for SYN packets. This is not the final
+ * network wire format yet.
+ */
static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5) {
@@ -493,6 +506,7 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
return size;
}
+/* Set up TCP options for SYN-ACKs. */
static unsigned tcp_synack_options(struct sock *sk,
struct request_sock *req,
unsigned mss, struct sk_buff *skb,
@@ -541,6 +555,9 @@ static unsigned tcp_synack_options(struct sock *sk,
return size;
}
+/* Compute TCP options for ESTABLISHED sockets. This is not the
+ * final wire format yet.
+ */
static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
struct tcp_md5sig_key **md5) {
@@ -705,7 +722,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
return net_xmit_eval(err);
}
-/* This routine just queue's the buffer
+/* This routine just queues the buffer for sending.
*
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
* otherwise socket can stall.
@@ -722,6 +739,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
sk_mem_charge(sk, skb->truesize);
}
+/* Initialize TSO segments for a packet. */
static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
@@ -909,6 +927,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
skb->len = skb->data_len;
}
+/* Remove acked data from a packet in the transmit queue. */
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
@@ -937,7 +956,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
return 0;
}
-/* Not accounting for SACKs here. */
+/* Calculate MSS. Not accounting for SACKs here. */
int tcp_mtu_to_mss(struct sock *sk, int pmtu)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -981,6 +1000,7 @@ int tcp_mss_to_mtu(struct sock *sk, int mss)
return mtu;
}
+/* MTU probing init per socket */
void tcp_mtup_init(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1143,7 +1163,8 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp,
return 0;
}
-/* This must be invoked the first time we consider transmitting
+/* Intialize TSO state of a skb.
+ * This must be invoked the first time we consider transmitting
* SKB onto the wire.
*/
static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
@@ -1158,6 +1179,7 @@ static int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb,
return tso_segs;
}
+/* Minshall's variant of the Nagle send check. */
static inline int tcp_minshall_check(const struct tcp_sock *tp)
{
return after(tp->snd_sml, tp->snd_una) &&
@@ -1242,6 +1264,7 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
return cwnd_quota;
}
+/* Test if sending is allowed right now. */
int tcp_may_send_now(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -1378,6 +1401,10 @@ send_now:
}
/* Create a new MTU probe if we are ready.
+ * MTU probe is regularly attempting to increase the path MTU by
+ * deliberately sending larger packets. This discovers routing
+ * changes resulting in larger path MTUs.
+ *
* Returns 0 if we should wait to probe (no cwnd available),
* 1 if a probe was sent,
* -1 otherwise
@@ -1790,6 +1817,7 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
sk_wmem_free_skb(sk, next_skb);
}
+/* Check if coalescing SKBs is legal. */
static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
{
if (tcp_skb_pcount(skb) > 1)
@@ -1808,6 +1836,9 @@ static int tcp_can_collapse(struct sock *sk, struct sk_buff *skb)
return 1;
}
+/* Collapse packets in the retransmit queue to make to create
+ * less packets on the wire. This is only done on retransmission.
+ */
static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
int space)
{
@@ -1957,6 +1988,9 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
return err;
}
+/* Check if we forward retransmits are possible in the current
+ * window/congestion state.
+ */
static int tcp_can_forward_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2101,7 +2135,8 @@ void tcp_send_fin(struct sock *sk)
} else {
/* Socket is locked, keep trying until memory is available. */
for (;;) {
- skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL);
+ skb = alloc_skb_fclone(MAX_TCP_HEADER,
+ sk->sk_allocation);
if (skb)
break;
yield();
@@ -2145,7 +2180,8 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTRSTS);
}
-/* WARNING: This routine must only be called when we have already sent
+/* Send a crossed SYN-ACK during socket establishment.
+ * WARNING: This routine must only be called when we have already sent
* a SYN packet that crossed the incoming SYN that caused this routine
* to get called. If this assumption fails then the initial rcv_wnd
* and rcv_wscale values will not be correct.
@@ -2180,9 +2216,7 @@ int tcp_send_synack(struct sock *sk)
return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
-/*
- * Prepare a SYN-ACK.
- */
+/* Prepare a SYN-ACK. */
struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req)
{
@@ -2269,9 +2303,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
return skb;
}
-/*
- * Do all connect socket setups that can be done AF independent.
- */
+/* Do all connect socket setups that can be done AF independent. */
static void tcp_connect_init(struct sock *sk)
{
struct dst_entry *dst = __sk_dst_get(sk);
@@ -2330,9 +2362,7 @@ static void tcp_connect_init(struct sock *sk)
tcp_clear_retrans(tp);
}
-/*
- * Build a SYN and send it off.
- */
+/* Build a SYN and send it off. */
int tcp_connect(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -2359,7 +2389,7 @@ int tcp_connect(struct sock *sk)
sk->sk_wmem_queued += buff->truesize;
sk_mem_charge(sk, buff->truesize);
tp->packets_out += tcp_skb_pcount(buff);
- tcp_transmit_skb(sk, buff, 1, GFP_KERNEL);
+ tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
/* We change tp->snd_nxt after the tcp_transmit_skb() call
* in order to make this packet get counted in tcpOutSegs.
@@ -2493,6 +2523,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
}
+/* Initiate keepalive or window probe from timer. */
int tcp_write_wakeup(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b144a26359b..cdb2ca7684d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -137,13 +137,14 @@ static int tcp_write_timeout(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
int retry_until;
+ bool do_reset;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
if (icsk->icsk_retransmits)
dst_negative_advice(&sk->sk_dst_cache);
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
} else {
- if (icsk->icsk_retransmits >= sysctl_tcp_retries1) {
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
/* Black hole detection */
tcp_mtu_probing(icsk, sk);
@@ -155,13 +156,15 @@ static int tcp_write_timeout(struct sock *sk)
const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
retry_until = tcp_orphan_retries(sk, alive);
+ do_reset = alive ||
+ !retransmits_timed_out(sk, retry_until);
- if (tcp_out_of_resources(sk, alive || icsk->icsk_retransmits < retry_until))
+ if (tcp_out_of_resources(sk, do_reset))
return 1;
}
}
- if (icsk->icsk_retransmits >= retry_until) {
+ if (retransmits_timed_out(sk, retry_until)) {
/* Has it gone just too far? */
tcp_write_err(sk);
return 1;
@@ -279,7 +282,7 @@ static void tcp_probe_timer(struct sock *sk)
* The TCP retransmit timer.
*/
-static void tcp_retransmit_timer(struct sock *sk)
+void tcp_retransmit_timer(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -385,7 +388,7 @@ static void tcp_retransmit_timer(struct sock *sk)
out_reset_timer:
icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
- if (icsk->icsk_retransmits > sysctl_tcp_retries1)
+ if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1))
__sk_dst_reset(sk);
out:;
@@ -499,8 +502,7 @@ static void tcp_keepalive_timer (unsigned long data)
elapsed = tcp_time_stamp - tp->rcv_tstamp;
if (elapsed >= keepalive_time_when(tp)) {
- if ((!tp->keepalive_probes && icsk->icsk_probes_out >= sysctl_tcp_keepalive_probes) ||
- (tp->keepalive_probes && icsk->icsk_probes_out >= tp->keepalive_probes)) {
+ if (icsk->icsk_probes_out >= keepalive_probes(tp)) {
tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_write_err(sk);
goto out;
diff --git a/net/ipv4/tunnel4.c b/net/ipv4/tunnel4.c
index cb1f0e83830..3959e0ca456 100644
--- a/net/ipv4/tunnel4.c
+++ b/net/ipv4/tunnel4.c
@@ -132,7 +132,7 @@ static void tunnel64_err(struct sk_buff *skb, u32 info)
}
#endif
-static struct net_protocol tunnel4_protocol = {
+static const struct net_protocol tunnel4_protocol = {
.handler = tunnel4_rcv,
.err_handler = tunnel4_err,
.no_policy = 1,
@@ -140,7 +140,7 @@ static struct net_protocol tunnel4_protocol = {
};
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static struct net_protocol tunnel64_protocol = {
+static const struct net_protocol tunnel64_protocol = {
.handler = tunnel64_rcv,
.err_handler = tunnel64_err,
.no_policy = 1,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 80e3812837a..ebaaa7f973d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,11 +110,12 @@ struct udp_table udp_table;
EXPORT_SYMBOL(udp_table);
int sysctl_udp_mem[3] __read_mostly;
-int sysctl_udp_rmem_min __read_mostly;
-int sysctl_udp_wmem_min __read_mostly;
-
EXPORT_SYMBOL(sysctl_udp_mem);
+
+int sysctl_udp_rmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_rmem_min);
+
+int sysctl_udp_wmem_min __read_mostly;
EXPORT_SYMBOL(sysctl_udp_wmem_min);
atomic_t udp_memory_allocated;
@@ -158,7 +159,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2 ) )
+ const struct sock *sk2))
{
struct udp_hslot *hslot;
struct udp_table *udptable = sk->sk_prot->h.udp_table;
@@ -221,14 +222,15 @@ fail_unlock:
fail:
return error;
}
+EXPORT_SYMBOL(udp_lib_get_port);
static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
{
struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
- return ( !ipv6_only_sock(sk2) &&
- (!inet1->rcv_saddr || !inet2->rcv_saddr ||
- inet1->rcv_saddr == inet2->rcv_saddr ));
+ return (!ipv6_only_sock(sk2) &&
+ (!inet1->rcv_saddr || !inet2->rcv_saddr ||
+ inet1->rcv_saddr == inet2->rcv_saddr));
}
int udp_v4_get_port(struct sock *sk, unsigned short snum)
@@ -383,8 +385,8 @@ found:
void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
{
struct inet_sock *inet;
- struct iphdr *iph = (struct iphdr*)skb->data;
- struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2));
+ struct iphdr *iph = (struct iphdr *)skb->data;
+ struct udphdr *uh = (struct udphdr *)(skb->data+(iph->ihl<<2));
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
struct sock *sk;
@@ -439,7 +441,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
goto out;
} else {
- ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1));
+ ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1));
}
sk->sk_err = err;
sk->sk_error_report(sk);
@@ -474,7 +476,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
* (checksum field must be zeroed out)
*/
static void udp4_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
- __be32 src, __be32 dst, int len )
+ __be32 src, __be32 dst, int len)
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
@@ -545,7 +547,7 @@ static int udp_push_pending_frames(struct sock *sk)
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
- udp4_hwcsum_outgoing(sk, skb, fl->fl4_src,fl->fl4_dst, up->len);
+ udp4_hwcsum_outgoing(sk, skb, fl->fl4_src, fl->fl4_dst, up->len);
goto send;
} else /* `normal' UDP */
@@ -553,18 +555,24 @@ static int udp_push_pending_frames(struct sock *sk)
/* add protocol-dependent pseudo-header */
uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, up->len,
- sk->sk_protocol, csum );
+ sk->sk_protocol, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip_push_pending_frames(sk);
+ if (err) {
+ if (err == -ENOBUFS && !inet->recverr) {
+ UDP_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_SNDBUFERRORS, is_udplite);
+ err = 0;
+ }
+ } else
+ UDP_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_OUTDATAGRAMS, is_udplite);
out:
up->len = 0;
up->pending = 0;
- if (!err)
- UDP_INC_STATS_USER(sock_net(sk),
- UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
@@ -592,7 +600,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* Check the flags.
*/
- if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */
+ if (msg->msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
return -EOPNOTSUPP;
ipc.opt = NULL;
@@ -619,7 +627,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* Get and verify the address.
*/
if (msg->msg_name) {
- struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
+ struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name;
if (msg->msg_namelen < sizeof(*usin))
return -EINVAL;
if (usin->sin_family != AF_INET) {
@@ -684,7 +692,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
if (connected)
- rt = (struct rtable*)sk_dst_check(sk, 0);
+ rt = (struct rtable *)sk_dst_check(sk, 0);
if (rt == NULL) {
struct flowi fl = { .oif = ipc.oif,
@@ -782,6 +790,7 @@ do_confirm:
err = 0;
goto out;
}
+EXPORT_SYMBOL(udp_sendmsg);
int udp_sendpage(struct sock *sk, struct page *page, int offset,
size_t size, int flags)
@@ -871,6 +880,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return 0;
}
+EXPORT_SYMBOL(udp_ioctl);
/*
* This should be easy, if there is something there we
@@ -892,7 +902,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
* Check any passed addresses
*/
if (addr_len)
- *addr_len=sizeof(*sin);
+ *addr_len = sizeof(*sin);
if (flags & MSG_ERRQUEUE)
return ip_recv_error(sk, msg, len);
@@ -923,9 +933,11 @@ try_again:
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, copied );
+ msg->msg_iov, copied);
else {
- err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
+ err = skb_copy_and_csum_datagram_iovec(skb,
+ sizeof(struct udphdr),
+ msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
@@ -941,8 +953,7 @@ try_again:
sock_recv_timestamp(msg, sk, skb);
/* Copy the address. */
- if (sin)
- {
+ if (sin) {
sin->sin_family = AF_INET;
sin->sin_port = udp_hdr(skb)->source;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -995,6 +1006,7 @@ int udp_disconnect(struct sock *sk, int flags)
sk_dst_reset(sk);
return 0;
}
+EXPORT_SYMBOL(udp_disconnect);
void udp_lib_unhash(struct sock *sk)
{
@@ -1044,7 +1056,7 @@ drop:
* Note that in the success and error cases, the skb is assumed to
* have either been requeued or freed.
*/
-int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
+int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
@@ -1214,7 +1226,7 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
if (uh->check == 0) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (skb->ip_summed == CHECKSUM_COMPLETE) {
- if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
+ if (!csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
proto, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -1355,7 +1367,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
int err = 0;
int is_udplite = IS_UDPLITE(sk);
- if (optlen<sizeof(int))
+ if (optlen < sizeof(int))
return -EINVAL;
if (get_user(val, (int __user *)optval))
@@ -1426,6 +1438,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
return err;
}
+EXPORT_SYMBOL(udp_lib_setsockopt);
int udp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
@@ -1453,7 +1466,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
struct udp_sock *up = udp_sk(sk);
int val, len;
- if (get_user(len,optlen))
+ if (get_user(len, optlen))
return -EFAULT;
len = min_t(unsigned int, len, sizeof(int));
@@ -1486,10 +1499,11 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval, &val,len))
+ if (copy_to_user(optval, &val, len))
return -EFAULT;
return 0;
}
+EXPORT_SYMBOL(udp_lib_getsockopt);
int udp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
@@ -1528,9 +1542,9 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
int is_lite = IS_UDPLITE(sk);
/* Check for false positives due to checksum errors */
- if ( (mask & POLLRDNORM) &&
- !(file->f_flags & O_NONBLOCK) &&
- !(sk->sk_shutdown & RCV_SHUTDOWN)){
+ if ((mask & POLLRDNORM) &&
+ !(file->f_flags & O_NONBLOCK) &&
+ !(sk->sk_shutdown & RCV_SHUTDOWN)) {
struct sk_buff_head *rcvq = &sk->sk_receive_queue;
struct sk_buff *skb;
@@ -1552,6 +1566,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait)
return mask;
}
+EXPORT_SYMBOL(udp_poll);
struct proto udp_prot = {
.name = "UDP",
@@ -1582,6 +1597,7 @@ struct proto udp_prot = {
.compat_getsockopt = compat_udp_getsockopt,
#endif
};
+EXPORT_SYMBOL(udp_prot);
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
@@ -1703,11 +1719,13 @@ int udp_proc_register(struct net *net, struct udp_seq_afinfo *afinfo)
rc = -ENOMEM;
return rc;
}
+EXPORT_SYMBOL(udp_proc_register);
void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
{
proc_net_remove(net, afinfo->name);
}
+EXPORT_SYMBOL(udp_proc_unregister);
/* ------------------------------------------------------------------------ */
static void udp4_format_sock(struct sock *sp, struct seq_file *f,
@@ -1741,7 +1759,7 @@ int udp4_seq_show(struct seq_file *seq, void *v)
int len;
udp4_format_sock(v, seq, state->bucket, &len);
- seq_printf(seq, "%*s\n", 127 - len ,"");
+ seq_printf(seq, "%*s\n", 127 - len, "");
}
return 0;
}
@@ -1816,16 +1834,64 @@ void __init udp_init(void)
sysctl_udp_wmem_min = SK_MEM_QUANTUM;
}
-EXPORT_SYMBOL(udp_disconnect);
-EXPORT_SYMBOL(udp_ioctl);
-EXPORT_SYMBOL(udp_prot);
-EXPORT_SYMBOL(udp_sendmsg);
-EXPORT_SYMBOL(udp_lib_getsockopt);
-EXPORT_SYMBOL(udp_lib_setsockopt);
-EXPORT_SYMBOL(udp_poll);
-EXPORT_SYMBOL(udp_lib_get_port);
+int udp4_ufo_send_check(struct sk_buff *skb)
+{
+ const struct iphdr *iph;
+ struct udphdr *uh;
+
+ if (!pskb_may_pull(skb, sizeof(*uh)))
+ return -EINVAL;
+
+ iph = ip_hdr(skb);
+ uh = udp_hdr(skb);
+
+ uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
+ IPPROTO_UDP, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ return 0;
+}
+
+struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
+ int offset;
+ __wsum csum;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
+ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+ /* Packet is from an untrusted source, reset gso_segs. */
+ int type = skb_shinfo(skb)->gso_type;
+
+ if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
+ !(type & (SKB_GSO_UDP))))
+ goto out;
+
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
+
+ segs = NULL;
+ goto out;
+ }
+
+ /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+ * do checksum of UDP packets sent as multiple IP fragments.
+ */
+ offset = skb->csum_start - skb_headroom(skb);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Fragment the skb. IP headers of the fragments are updated in
+ * inet_gso_segment()
+ */
+ segs = skb_segment(skb, features);
+out:
+ return segs;
+}
-#ifdef CONFIG_PROC_FS
-EXPORT_SYMBOL(udp_proc_register);
-EXPORT_SYMBOL(udp_proc_unregister);
-#endif
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c
index c784891cb7e..95248d7f75e 100644
--- a/net/ipv4/udplite.c
+++ b/net/ipv4/udplite.c
@@ -25,7 +25,7 @@ static void udplite_err(struct sk_buff *skb, u32 info)
__udp4_lib_err(skb, info, &udplite_table);
}
-static struct net_protocol udplite_protocol = {
+static const struct net_protocol udplite_protocol = {
.handler = udplite_rcv,
.err_handler = udplite_err,
.no_policy = 1,
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 0071ee6f441..74fb2eb833e 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -264,6 +264,22 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.fill_dst = xfrm4_fill_dst,
};
+#ifdef CONFIG_SYSCTL
+static struct ctl_table xfrm4_policy_table[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "xfrm4_gc_thresh",
+ .data = &xfrm4_dst_ops.gc_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static struct ctl_table_header *sysctl_hdr;
+#endif
+
static void __init xfrm4_policy_init(void)
{
xfrm_policy_register_afinfo(&xfrm4_policy_afinfo);
@@ -271,12 +287,31 @@ static void __init xfrm4_policy_init(void)
static void __exit xfrm4_policy_fini(void)
{
+#ifdef CONFIG_SYSCTL
+ if (sysctl_hdr)
+ unregister_net_sysctl_table(sysctl_hdr);
+#endif
xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
}
-void __init xfrm4_init(void)
+void __init xfrm4_init(int rt_max_size)
{
xfrm4_state_init();
xfrm4_policy_init();
+ /*
+ * Select a default value for the gc_thresh based on the main route
+ * table hash size. It seems to me the worst case scenario is when
+ * we have ipsec operating in transport mode, in which we create a
+ * dst_entry per socket. The xfrm gc algorithm starts trying to remove
+ * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
+ * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
+ * That will let us store an ipsec connection per route table entry,
+ * and start cleaning when were 1/2 full
+ */
+ xfrm4_dst_ops.gc_thresh = rt_max_size/2;
+#ifdef CONFIG_SYSCTL
+ sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path,
+ xfrm4_policy_table);
+#endif
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 43b3c9f89c1..55f486d89c8 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -137,6 +137,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
+static void addrconf_bonding_change(struct net_device *dev,
+ unsigned long event);
static int addrconf_ifdown(struct net_device *dev, int how);
static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
@@ -1371,12 +1373,14 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
/* Gets referenced address, destroys ifaddr */
-static void addrconf_dad_stop(struct inet6_ifaddr *ifp)
+static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
addrconf_del_timer(ifp);
ifp->flags |= IFA_F_TENTATIVE;
+ if (dad_failed)
+ ifp->flags |= IFA_F_DADFAILED;
spin_unlock_bh(&ifp->lock);
in6_ifa_put(ifp);
#ifdef CONFIG_IPV6_PRIVACY
@@ -1403,8 +1407,8 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
struct inet6_dev *idev = ifp->idev;
if (net_ratelimit())
- printk(KERN_INFO "%s: IPv6 duplicate address detected!\n",
- ifp->idev->dev->name);
+ printk(KERN_INFO "%s: IPv6 duplicate address %pI6c detected!\n",
+ ifp->idev->dev->name, &ifp->addr);
if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6) {
struct in6_addr addr;
@@ -1422,7 +1426,7 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
}
}
- addrconf_dad_stop(ifp);
+ addrconf_dad_stop(ifp, 1);
}
/* Join to solicited addr multicast group. */
@@ -2580,6 +2584,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
return notifier_from_errno(err);
}
break;
+ case NETDEV_BONDING_OLDTYPE:
+ case NETDEV_BONDING_NEWTYPE:
+ addrconf_bonding_change(dev, event);
+ break;
}
return NOTIFY_OK;
@@ -2593,6 +2601,19 @@ static struct notifier_block ipv6_dev_notf = {
.priority = 0
};
+static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
+{
+ struct inet6_dev *idev;
+ ASSERT_RTNL();
+
+ idev = __in6_dev_get(dev);
+
+ if (event == NETDEV_BONDING_NEWTYPE)
+ ipv6_mc_remap(idev);
+ else if (event == NETDEV_BONDING_OLDTYPE)
+ ipv6_mc_unmap(idev);
+}
+
static int addrconf_ifdown(struct net_device *dev, int how)
{
struct inet6_dev *idev;
@@ -2778,7 +2799,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
idev->cnf.accept_dad < 1 ||
!(ifp->flags&IFA_F_TENTATIVE) ||
ifp->flags & IFA_F_NODAD) {
- ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC);
+ ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock_bh(&ifp->lock);
read_unlock_bh(&idev->lock);
@@ -2795,7 +2816,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
* - otherwise, kill it.
*/
in6_ifa_hold(ifp);
- addrconf_dad_stop(ifp);
+ addrconf_dad_stop(ifp, 0);
return;
}
@@ -2829,7 +2850,7 @@ static void addrconf_dad_timer(unsigned long data)
* DAD was successful
*/
- ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC);
+ ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock_bh(&ifp->lock);
read_unlock_bh(&idev->lock);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index caa0278d30a..e127a32f954 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -306,8 +306,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
v4addr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
- chk_addr_ret != RTN_BROADCAST)
+ chk_addr_ret != RTN_BROADCAST) {
+ err = -EADDRNOTAVAIL;
goto out;
+ }
} else {
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
@@ -708,7 +710,7 @@ EXPORT_SYMBOL_GPL(ipv6_opt_accepted);
static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
{
- struct inet6_protocol *ops = NULL;
+ const struct inet6_protocol *ops = NULL;
for (;;) {
struct ipv6_opt_hdr *opth;
@@ -743,7 +745,7 @@ static int ipv6_gso_pull_exthdrs(struct sk_buff *skb, int proto)
static int ipv6_gso_send_check(struct sk_buff *skb)
{
struct ipv6hdr *ipv6h;
- struct inet6_protocol *ops;
+ const struct inet6_protocol *ops;
int err = -EINVAL;
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
@@ -771,7 +773,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
struct ipv6hdr *ipv6h;
- struct inet6_protocol *ops;
+ const struct inet6_protocol *ops;
+ int proto;
+ struct frag_hdr *fptr;
+ unsigned int unfrag_ip6hlen;
+ u8 *prevhdr;
+ int offset = 0;
if (!(features & NETIF_F_V6_CSUM))
features &= ~NETIF_F_SG;
@@ -791,10 +798,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
+ proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
rcu_read_lock();
- ops = rcu_dereference(inet6_protos[
- ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr)]);
-
+ ops = rcu_dereference(inet6_protos[proto]);
if (likely(ops && ops->gso_segment)) {
skb_reset_transport_header(skb);
segs = ops->gso_segment(skb, features);
@@ -808,6 +814,16 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features)
ipv6h = ipv6_hdr(skb);
ipv6h->payload_len = htons(skb->len - skb->mac_len -
sizeof(*ipv6h));
+ if (proto == IPPROTO_UDP) {
+ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ fptr = (struct frag_hdr *)(skb_network_header(skb) +
+ unfrag_ip6hlen);
+ fptr->frag_off = htons(offset);
+ if (skb->next != NULL)
+ fptr->frag_off |= htons(IP6_MF);
+ offset += (ntohs(ipv6h->payload_len) -
+ sizeof(struct frag_hdr));
+ }
}
out:
@@ -824,7 +840,7 @@ struct ipv6_gro_cb {
static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
struct sk_buff *skb)
{
- struct inet6_protocol *ops;
+ const struct inet6_protocol *ops;
struct sk_buff **pp = NULL;
struct sk_buff *p;
struct ipv6hdr *iph;
@@ -910,7 +926,7 @@ out:
static int ipv6_gro_complete(struct sk_buff *skb)
{
- struct inet6_protocol *ops;
+ const struct inet6_protocol *ops;
struct ipv6hdr *iph = ipv6_hdr(skb);
int err = -ENOSYS;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 86f42a288c4..c1589e2f1dc 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -527,7 +527,7 @@ static const struct xfrm_type ah6_type =
.hdr_offset = xfrm6_find_1stfragopt,
};
-static struct inet6_protocol ah6_protocol = {
+static const struct inet6_protocol ah6_protocol = {
.handler = xfrm6_rcv,
.err_handler = ah6_err,
.flags = INET6_PROTO_NOPOLICY,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 678bb95b152..af597c73ebe 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -558,7 +558,7 @@ static const struct xfrm_type esp6_type =
.hdr_offset = xfrm6_find_1stfragopt,
};
-static struct inet6_protocol esp6_protocol = {
+static const struct inet6_protocol esp6_protocol = {
.handler = xfrm6_rcv,
.err_handler = esp6_err,
.flags = INET6_PROTO_NOPOLICY,
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 4aae658e550..df159fffe4b 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -500,17 +500,17 @@ unknown_rh:
return -1;
}
-static struct inet6_protocol rthdr_protocol = {
+static const struct inet6_protocol rthdr_protocol = {
.handler = ipv6_rthdr_rcv,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
};
-static struct inet6_protocol destopt_protocol = {
+static const struct inet6_protocol destopt_protocol = {
.handler = ipv6_destopt_rcv,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_GSO_EXTHDR,
};
-static struct inet6_protocol nodata_protocol = {
+static const struct inet6_protocol nodata_protocol = {
.handler = dst_discard,
.flags = INET6_PROTO_NOPOLICY,
};
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index eab62a7a8f0..f23ebbec063 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -86,7 +86,7 @@ static inline struct sock *icmpv6_sk(struct net *net)
static int icmpv6_rcv(struct sk_buff *skb);
-static struct inet6_protocol icmpv6_protocol = {
+static const struct inet6_protocol icmpv6_protocol = {
.handler = icmpv6_rcv,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
@@ -323,7 +323,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
int iif = 0;
int addr_type = 0;
int len;
- int hlimit, tclass;
+ int hlimit;
int err = 0;
if ((u8 *)hdr < skb->head ||
@@ -469,10 +469,6 @@ route_done:
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- tclass = np->tclass;
- if (tclass < 0)
- tclass = 0;
-
msg.skb = skb;
msg.offset = skb_network_offset(skb);
msg.type = type;
@@ -488,8 +484,8 @@ route_done:
err = ip6_append_data(sk, icmpv6_getfrag, &msg,
len + sizeof(struct icmp6hdr),
- sizeof(struct icmp6hdr),
- hlimit, tclass, NULL, &fl, (struct rt6_info*)dst,
+ sizeof(struct icmp6hdr), hlimit,
+ np->tclass, NULL, &fl, (struct rt6_info*)dst,
MSG_DONTWAIT);
if (err) {
ip6_flush_pending_frames(sk);
@@ -522,7 +518,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
struct dst_entry *dst;
int err = 0;
int hlimit;
- int tclass;
saddr = &ipv6_hdr(skb)->daddr;
@@ -562,10 +557,6 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- tclass = np->tclass;
- if (tclass < 0)
- tclass = 0;
-
idev = in6_dev_get(skb->dev);
msg.skb = skb;
@@ -573,7 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
msg.type = ICMPV6_ECHO_REPLY;
err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
- sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl,
+ sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl,
(struct rt6_info*)dst, MSG_DONTWAIT);
if (err) {
@@ -592,7 +583,7 @@ out:
static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
{
- struct inet6_protocol *ipprot;
+ const struct inet6_protocol *ipprot;
int inner_offset;
int hash;
u8 nexthdr;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 52ee1dced2f..0e93ca56eb6 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -164,12 +164,6 @@ static __inline__ void rt6_release(struct rt6_info *rt)
dst_free(&rt->u.dst);
}
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-#define FIB_TABLE_HASHSZ 256
-#else
-#define FIB_TABLE_HASHSZ 1
-#endif
-
static void fib6_link_table(struct net *net, struct fib6_table *tb)
{
unsigned int h;
@@ -180,7 +174,7 @@ static void fib6_link_table(struct net *net, struct fib6_table *tb)
*/
rwlock_init(&tb->tb6_lock);
- h = tb->tb6_id & (FIB_TABLE_HASHSZ - 1);
+ h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1);
/*
* No protection necessary, this is the only list mutatation
@@ -231,7 +225,7 @@ struct fib6_table *fib6_get_table(struct net *net, u32 id)
if (id == 0)
id = RT6_TABLE_MAIN;
- h = id & (FIB_TABLE_HASHSZ - 1);
+ h = id & (FIB6_TABLE_HASHSZ - 1);
rcu_read_lock();
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, node, head, tb6_hlist) {
@@ -382,7 +376,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
arg.net = net;
w->args = &arg;
- for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
+ for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry(tb, node, head, tb6_hlist) {
@@ -1368,7 +1362,7 @@ void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *arg),
unsigned int h;
rcu_read_lock();
- for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
+ for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
head = &net->ipv6.fib_table_hash[h];
hlist_for_each_entry_rcu(table, node, head, tb6_hlist) {
write_lock_bh(&table->tb6_lock);
@@ -1483,7 +1477,7 @@ static int fib6_net_init(struct net *net)
if (!net->ipv6.rt6_stats)
goto out_timer;
- net->ipv6.fib_table_hash = kcalloc(FIB_TABLE_HASHSZ,
+ net->ipv6.fib_table_hash = kcalloc(FIB6_TABLE_HASHSZ,
sizeof(*net->ipv6.fib_table_hash),
GFP_KERNEL);
if (!net->ipv6.fib_table_hash)
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6d6a4277c67..237e2dba6e9 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -63,7 +63,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (skb->pkt_type == PACKET_OTHERHOST) {
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
rcu_read_lock();
@@ -133,7 +133,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (ipv6_parse_hopopts(skb) < 0) {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS);
rcu_read_unlock();
- return 0;
+ return NET_RX_DROP;
}
}
@@ -149,7 +149,7 @@ err:
drop:
rcu_read_unlock();
kfree_skb(skb);
- return 0;
+ return NET_RX_DROP;
}
/*
@@ -159,7 +159,7 @@ drop:
static int ip6_input_finish(struct sk_buff *skb)
{
- struct inet6_protocol *ipprot;
+ const struct inet6_protocol *ipprot;
unsigned int nhoff;
int nexthdr, raw;
u8 hash;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 87f8419a68f..cd48801a8d6 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -57,18 +57,6 @@
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *));
-static __inline__ void ipv6_select_ident(struct sk_buff *skb, struct frag_hdr *fhdr)
-{
- static u32 ipv6_fragmentation_id = 1;
- static DEFINE_SPINLOCK(ip6_id_lock);
-
- spin_lock_bh(&ip6_id_lock);
- fhdr->identification = htonl(ipv6_fragmentation_id);
- if (++ipv6_fragmentation_id == 0)
- ipv6_fragmentation_id = 1;
- spin_unlock_bh(&ip6_id_lock);
-}
-
int __ip6_local_out(struct sk_buff *skb)
{
int len;
@@ -206,7 +194,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
struct ipv6hdr *hdr;
u8 proto = fl->proto;
int seg_len = skb->len;
- int hlimit, tclass;
+ int hlimit = -1;
+ int tclass = 0;
u32 mtu;
if (opt) {
@@ -249,19 +238,13 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
/*
* Fill in the IPv6 header
*/
-
- hlimit = -1;
- if (np)
+ if (np) {
+ tclass = np->tclass;
hlimit = np->hop_limit;
+ }
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
- tclass = -1;
- if (np)
- tclass = np->tclass;
- if (tclass < 0)
- tclass = 0;
-
*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;
hdr->payload_len = htons(seg_len);
@@ -706,7 +689,7 @@ static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
skb_reset_network_header(skb);
memcpy(skb_network_header(skb), tmp_hdr, hlen);
- ipv6_select_ident(skb, fh);
+ ipv6_select_ident(fh);
fh->nexthdr = nexthdr;
fh->reserved = 0;
fh->frag_off = htons(IP6_MF);
@@ -844,7 +827,7 @@ slow_path:
fh->nexthdr = nexthdr;
fh->reserved = 0;
if (!frag_id) {
- ipv6_select_ident(skb, fh);
+ ipv6_select_ident(fh);
frag_id = fh->identification;
} else
fh->identification = frag_id;
@@ -1087,11 +1070,13 @@ static inline int ip6_ufo_append_data(struct sock *sk,
if (!err) {
struct frag_hdr fhdr;
- /* specify the length of each IP datagram fragment*/
- skb_shinfo(skb)->gso_size = mtu - fragheaderlen -
- sizeof(struct frag_hdr);
+ /* Specify the length of each IPv6 datagram fragment.
+ * It has to be a multiple of 8.
+ */
+ skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
+ sizeof(struct frag_hdr)) & ~7;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
- ipv6_select_ident(skb, &fhdr);
+ ipv6_select_ident(&fhdr);
skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
__skb_queue_tail(&sk->sk_write_queue, skb);
@@ -1526,7 +1511,7 @@ int ip6_push_pending_frames(struct sock *sk)
err = ip6_local_out(skb);
if (err) {
if (err > 0)
- err = np->recverr ? net_xmit_errno(err) : 0;
+ err = net_xmit_errno(err);
if (err)
goto error;
}
@@ -1535,6 +1520,7 @@ out:
ip6_cork_release(inet, np);
return err;
error:
+ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
goto out;
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 51f410e7775..7d25bbe3211 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1036,7 +1036,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
@@ -1063,14 +1063,14 @@ ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
goto tx_err;
t->recursion--;
- return 0;
+ return NETDEV_TX_OK;
tx_err:
stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
t->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
static void ip6_tnl_set_cap(struct ip6_tnl *t)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index c769f155c69..3907510c2ce 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -83,10 +83,6 @@ static int ip6mr_cache_report(struct net *net, struct sk_buff *pkt,
static int ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm);
static void mroute_clean_tables(struct net *net);
-#ifdef CONFIG_IPV6_PIMSM_V2
-static struct inet6_protocol pim6_protocol;
-#endif
-
static struct timer_list ipmr_expire_timer;
@@ -204,7 +200,7 @@ static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
return 0;
}
-static struct seq_operations ip6mr_vif_seq_ops = {
+static const struct seq_operations ip6mr_vif_seq_ops = {
.start = ip6mr_vif_seq_start,
.next = ip6mr_vif_seq_next,
.stop = ip6mr_vif_seq_stop,
@@ -217,7 +213,7 @@ static int ip6mr_vif_open(struct inode *inode, struct file *file)
sizeof(struct ipmr_vif_iter));
}
-static struct file_operations ip6mr_vif_fops = {
+static const struct file_operations ip6mr_vif_fops = {
.owner = THIS_MODULE,
.open = ip6mr_vif_open,
.read = seq_read,
@@ -341,7 +337,7 @@ static int ipmr_mfc_open(struct inode *inode, struct file *file)
sizeof(struct ipmr_mfc_iter));
}
-static struct file_operations ip6mr_mfc_fops = {
+static const struct file_operations ip6mr_mfc_fops = {
.owner = THIS_MODULE,
.open = ipmr_mfc_open,
.read = seq_read,
@@ -410,13 +406,14 @@ static int pim6_rcv(struct sk_buff *skb)
return 0;
}
-static struct inet6_protocol pim6_protocol = {
+static const struct inet6_protocol pim6_protocol = {
.handler = pim6_rcv,
};
/* Service routines creating virtual interfaces: PIMREG */
-static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct net *net = dev_net(dev);
@@ -427,7 +424,7 @@ static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
MRT6MSG_WHOLEPKT);
read_unlock(&mrt_lock);
kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static const struct net_device_ops reg_vif_netdev_ops = {
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 79c172f1ff0..2f2a5ca2c87 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -178,7 +178,7 @@ static const struct xfrm_type ipcomp6_type =
.hdr_offset = xfrm6_find_1stfragopt,
};
-static struct inet6_protocol ipcomp6_protocol =
+static const struct inet6_protocol ipcomp6_protocol =
{
.handler = xfrm6_rcv,
.err_handler = ipcomp6_err,
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index a7fdf9a27f1..f5e0682b402 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -315,6 +315,9 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
goto e_inval;
if (val < -1 || val > 0xff)
goto e_inval;
+ /* RFC 3542, 6.5: default traffic class of 0x0 */
+ if (val == -1)
+ val = 0;
np->tclass = val;
retv = 0;
break;
@@ -1037,8 +1040,6 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
case IPV6_TCLASS:
val = np->tclass;
- if (val < 0)
- val = 0;
break;
case IPV6_RECVTCLASS:
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 4b264ed40a8..f9fcf690bd5 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -2107,7 +2107,6 @@ static int ip6_mc_add_src(struct inet6_dev *idev, struct in6_addr *pmca,
for (j=0; j<i; j++)
(void) ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
- struct inet6_dev *idev = pmc->idev;
struct ip6_sf_list *psf;
/* filter mode change */
@@ -2250,6 +2249,25 @@ static void igmp6_timer_handler(unsigned long data)
ma_put(ma);
}
+/* Device changing type */
+
+void ipv6_mc_unmap(struct inet6_dev *idev)
+{
+ struct ifmcaddr6 *i;
+
+ /* Install multicast list, except for all-nodes (already installed) */
+
+ read_lock_bh(&idev->lock);
+ for (i = idev->mc_list; i; i = i->next)
+ igmp6_group_dropped(i);
+ read_unlock_bh(&idev->lock);
+}
+
+void ipv6_mc_remap(struct inet6_dev *idev)
+{
+ ipv6_mc_up(idev);
+}
+
/* Device going down */
void ipv6_mc_down(struct inet6_dev *idev)
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 9eb68e92cc1..7015478797f 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -98,7 +98,7 @@ static int pndisc_constructor(struct pneigh_entry *n);
static void pndisc_destructor(struct pneigh_entry *n);
static void pndisc_redo(struct sk_buff *skb);
-static struct neigh_ops ndisc_generic_ops = {
+static const struct neigh_ops ndisc_generic_ops = {
.family = AF_INET6,
.solicit = ndisc_solicit,
.error_report = ndisc_error_report,
@@ -108,7 +108,7 @@ static struct neigh_ops ndisc_generic_ops = {
.queue_xmit = dev_queue_xmit,
};
-static struct neigh_ops ndisc_hh_ops = {
+static const struct neigh_ops ndisc_hh_ops = {
.family = AF_INET6,
.solicit = ndisc_solicit,
.error_report = ndisc_error_report,
@@ -119,7 +119,7 @@ static struct neigh_ops ndisc_hh_ops = {
};
-static struct neigh_ops ndisc_direct_ops = {
+static const struct neigh_ops ndisc_direct_ops = {
.family = AF_INET6,
.output = dev_queue_xmit,
.connected_output = dev_queue_xmit,
@@ -955,8 +955,8 @@ static void ndisc_recv_na(struct sk_buff *skb)
*/
if (skb->pkt_type != PACKET_LOOPBACK)
ND_PRINTK1(KERN_WARNING
- "ICMPv6 NA: someone advertises our address on %s!\n",
- ifp->idev->dev->name);
+ "ICMPv6 NA: someone advertises our address %pI6 on %s!\n",
+ &ifp->addr, ifp->idev->dev->name);
in6_ifa_put(ifp);
return;
}
@@ -1151,10 +1151,6 @@ static void ndisc_router_discovery(struct sk_buff *skb)
skb->dev->name);
return;
}
- if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra) {
- in6_dev_put(in6_dev);
- return;
- }
if (!ndisc_parse_options(opt, optlen, &ndopts)) {
in6_dev_put(in6_dev);
@@ -1163,6 +1159,10 @@ static void ndisc_router_discovery(struct sk_buff *skb)
return;
}
+ /* skip route and link configuration on routers */
+ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
+ goto skip_linkparms;
+
#ifdef CONFIG_IPV6_NDISC_NODETYPE
/* skip link-specific parameters from interior routers */
if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
@@ -1283,9 +1283,7 @@ skip_defrtr:
}
}
-#ifdef CONFIG_IPV6_NDISC_NODETYPE
skip_linkparms:
-#endif
/*
* Process options.
@@ -1312,6 +1310,10 @@ skip_linkparms:
NEIGH_UPDATE_F_ISROUTER);
}
+ /* skip route and link configuration on routers */
+ if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_ra)
+ goto out;
+
#ifdef CONFIG_IPV6_ROUTE_INFO
if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
struct nd_opt_hdr *p;
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index ced1f2c0cb6..cc9f8ef303f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -8,7 +8,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/capability.h>
#include <linux/in.h>
#include <linux/skbuff.h>
@@ -222,16 +222,11 @@ get_entry(void *base, unsigned int offset)
/* All zeroes == unconditional rule. */
/* Mildly perf critical (only if packet tracing is on) */
-static inline int
-unconditional(const struct ip6t_ip6 *ipv6)
+static inline bool unconditional(const struct ip6t_ip6 *ipv6)
{
- unsigned int i;
-
- for (i = 0; i < sizeof(*ipv6); i++)
- if (((char *)ipv6)[i])
- break;
+ static const struct ip6t_ip6 uncond;
- return (i == sizeof(*ipv6));
+ return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
}
#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
@@ -745,6 +740,21 @@ find_check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
return ret;
}
+static bool check_underflow(struct ip6t_entry *e)
+{
+ const struct ip6t_entry_target *t;
+ unsigned int verdict;
+
+ if (!unconditional(&e->ipv6))
+ return false;
+ t = ip6t_get_target(e);
+ if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
+ return false;
+ verdict = ((struct ip6t_standard_target *)t)->verdict;
+ verdict = -verdict - 1;
+ return verdict == NF_DROP || verdict == NF_ACCEPT;
+}
+
static int
check_entry_size_and_hooks(struct ip6t_entry *e,
struct xt_table_info *newinfo,
@@ -752,6 +762,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
unsigned char *limit,
const unsigned int *hook_entries,
const unsigned int *underflows,
+ unsigned int valid_hooks,
unsigned int *i)
{
unsigned int h;
@@ -771,15 +782,21 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
+ if (!(valid_hooks & (1 << h)))
+ continue;
if ((unsigned char *)e - base == hook_entries[h])
newinfo->hook_entry[h] = hook_entries[h];
- if ((unsigned char *)e - base == underflows[h])
+ if ((unsigned char *)e - base == underflows[h]) {
+ if (!check_underflow(e)) {
+ pr_err("Underflows must be unconditional and "
+ "use the STANDARD target with "
+ "ACCEPT/DROP\n");
+ return -EINVAL;
+ }
newinfo->underflow[h] = underflows[h];
+ }
}
- /* FIXME: underflows must be unconditional, standard verdicts
- < 0 (not IP6T_RETURN). --RR */
-
/* Clear counters and comefrom */
e->counters = ((struct xt_counters) { 0, 0 });
e->comefrom = 0;
@@ -842,7 +859,7 @@ translate_table(const char *name,
newinfo,
entry0,
entry0 + size,
- hook_entries, underflows, &i);
+ hook_entries, underflows, valid_hooks, &i);
if (ret != 0)
return ret;
@@ -2083,7 +2100,8 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
return ret;
}
-struct xt_table *ip6t_register_table(struct net *net, struct xt_table *table,
+struct xt_table *ip6t_register_table(struct net *net,
+ const struct xt_table *table,
const struct ip6t_replace *repl)
{
int ret;
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index db610bacbcc..ca287f6d2bc 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -23,7 +23,6 @@ static bool
eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
{
unsigned char eui64[8];
- int i = 0;
if (!(skb_mac_header(skb) >= skb->head &&
skb_mac_header(skb) + ETH_HLEN <= skb->data) &&
@@ -42,12 +41,8 @@ eui64_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
eui64[4] = 0xfe;
eui64[0] ^= 0x02;
- i = 0;
- while (ipv6_hdr(skb)->saddr.s6_addr[8 + i] == eui64[i]
- && i < 8)
- i++;
-
- if (i == 8)
+ if (!memcmp(ipv6_hdr(skb)->saddr.s6_addr + 8, eui64,
+ sizeof(eui64)))
return true;
}
}
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index ef5a0a32bf8..6f4383ad86f 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -51,11 +51,11 @@ static struct
.term = IP6T_ERROR_INIT, /* ERROR */
};
-static struct xt_table packet_filter = {
+static const struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET6,
+ .af = NFPROTO_IPV6,
};
/* The work comes in here from netfilter.c. */
@@ -95,21 +95,21 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
{
.hook = ip6t_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_FILTER,
},
{
.hook = ip6t_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_FILTER,
},
{
.hook = ip6t_local_out_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_FILTER,
},
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index ab0d398a2ba..0ad91433ed6 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -21,7 +21,7 @@ MODULE_DESCRIPTION("ip6tables mangle table");
(1 << NF_INET_LOCAL_OUT) | \
(1 << NF_INET_POST_ROUTING))
-static struct
+static const struct
{
struct ip6t_replace repl;
struct ip6t_standard entries[5];
@@ -57,11 +57,11 @@ static struct
.term = IP6T_ERROR_INIT, /* ERROR */
};
-static struct xt_table packet_mangler = {
+static const struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET6,
+ .af = NFPROTO_IPV6,
};
/* The work comes in here from netfilter.c. */
@@ -136,35 +136,35 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
{
.hook = ip6t_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_MANGLE,
},
{
.hook = ip6t_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_MANGLE,
},
{
.hook = ip6t_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_MANGLE,
},
{
.hook = ip6t_local_out_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_MANGLE,
},
{
.hook = ip6t_post_routing_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_MANGLE,
},
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 4b792b6ca32..ed1a1180f3b 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -8,7 +8,7 @@
#define RAW_VALID_HOOKS ((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_OUT))
-static struct
+static const struct
{
struct ip6t_replace repl;
struct ip6t_standard entries[2];
@@ -35,11 +35,11 @@ static struct
.term = IP6T_ERROR_INIT, /* ERROR */
};
-static struct xt_table packet_raw = {
+static const struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET6,
+ .af = NFPROTO_IPV6,
};
/* The work comes in here from netfilter.c. */
@@ -68,14 +68,14 @@ ip6t_local_out_hook(unsigned int hook,
static struct nf_hook_ops ip6t_ops[] __read_mostly = {
{
.hook = ip6t_pre_routing_hook,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_FIRST,
.owner = THIS_MODULE,
},
{
.hook = ip6t_local_out_hook,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_FIRST,
.owner = THIS_MODULE,
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index 0ea37ff15d5..41b444c6093 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -26,7 +26,7 @@ MODULE_DESCRIPTION("ip6tables security table, for MAC rules");
(1 << NF_INET_FORWARD) | \
(1 << NF_INET_LOCAL_OUT)
-static struct
+static const struct
{
struct ip6t_replace repl;
struct ip6t_standard entries[3];
@@ -56,11 +56,11 @@ static struct
.term = IP6T_ERROR_INIT, /* ERROR */
};
-static struct xt_table security_table = {
+static const struct xt_table security_table = {
.name = "security",
.valid_hooks = SECURITY_VALID_HOOKS,
.me = THIS_MODULE,
- .af = AF_INET6,
+ .af = NFPROTO_IPV6,
};
static unsigned int
@@ -101,21 +101,21 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
{
.hook = ip6t_local_in_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_SECURITY,
},
{
.hook = ip6t_forward_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_SECURITY,
},
{
.hook = ip6t_local_out_hook,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_SECURITY,
},
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 2a15c2d66c6..5f2ec208a8c 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -27,6 +27,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
+#include <net/netfilter/nf_log.h>
static bool ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
@@ -176,8 +177,11 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
}
ret = helper->help(skb, protoff, ct, ctinfo);
- if (ret != NF_ACCEPT)
+ if (ret != NF_ACCEPT) {
+ nf_log_packet(NFPROTO_IPV6, hooknum, skb, in, out, NULL,
+ "nf_ct_%s: dropping packet", helper->name);
return ret;
+ }
out:
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
@@ -265,42 +269,42 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
{
.hook = ipv6_defrag,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv6_conntrack_in,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP6_PRI_CONNTRACK,
},
{
.hook = ipv6_conntrack_local,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_CONNTRACK,
},
{
.hook = ipv6_defrag,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP6_PRI_CONNTRACK_DEFRAG,
},
{
.hook = ipv6_confirm,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_LAST,
},
{
.hook = ipv6_confirm,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP6_PRI_LAST-1,
},
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 590ddefb7ff..c9605c3ad91 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -101,7 +101,7 @@ static struct snmp_mib snmp6_icmp6_list[] = {
};
/* RFC 4293 v6 ICMPMsgStatsTable; named items for RFC 2466 compatibility */
-static char *icmp6type2name[256] = {
+static const char *const icmp6type2name[256] = {
[ICMPV6_DEST_UNREACH] = "DestUnreachs",
[ICMPV6_PKT_TOOBIG] = "PktTooBigs",
[ICMPV6_TIME_EXCEED] = "TimeExcds",
@@ -144,7 +144,7 @@ static void snmp6_seq_show_icmpv6msg(struct seq_file *seq, void **mib)
/* print by name -- deprecated items */
for (i = 0; i < ICMP6MSG_MIB_MAX; i++) {
int icmptype;
- char *p;
+ const char *p;
icmptype = i & 0xff;
p = icmp6type2name[icmptype];
diff --git a/net/ipv6/protocol.c b/net/ipv6/protocol.c
index 9ab78915991..1fa3468f0f3 100644
--- a/net/ipv6/protocol.c
+++ b/net/ipv6/protocol.c
@@ -20,27 +20,16 @@
* - Removed unused variable 'inet6_protocol_base'
* - Modified inet6_del_protocol() to correctly maintain copy bit.
*/
-
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/net.h>
-#include <linux/in6.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/sock.h>
-#include <net/snmp.h>
-
-#include <net/ipv6.h>
+#include <linux/spinlock.h>
#include <net/protocol.h>
-struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
+const struct inet6_protocol *inet6_protos[MAX_INET_PROTOS];
static DEFINE_SPINLOCK(inet6_proto_lock);
-int inet6_add_protocol(struct inet6_protocol *prot, unsigned char protocol)
+int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
@@ -64,7 +53,7 @@ EXPORT_SYMBOL(inet6_add_protocol);
* Remove a protocol from the hash tables.
*/
-int inet6_del_protocol(struct inet6_protocol *prot, unsigned char protocol)
+int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol)
{
int ret, hash = protocol & (MAX_INET_PROTOS - 1);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d6c3c1c34b2..7d675b8d82d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -642,7 +642,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
err = NF_HOOK(PF_INET6, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
- err = np->recverr ? net_xmit_errno(err) : 0;
+ err = net_xmit_errno(err);
if (err)
goto error;
out:
@@ -653,6 +653,8 @@ error_fault:
kfree_skb(skb);
error:
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+ if (err == -ENOBUFS && !np->recverr)
+ err = 0;
return err;
}
@@ -877,11 +879,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
hlimit = ip6_dst_hoplimit(dst);
}
- if (tclass < 0) {
+ if (tclass < 0)
tclass = np->tclass;
- if (tclass < 0)
- tclass = 0;
- }
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 2642a41a853..da5bd0ed83d 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -627,7 +627,7 @@ fail_hdr:
return -1;
}
-static struct inet6_protocol frag_protocol =
+static const struct inet6_protocol frag_protocol =
{
.handler = ipv6_frag_rcv,
.flags = INET6_PROTO_NOPOLICY,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1473ee0a1f5..77aecbe8ff6 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -481,7 +481,7 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
pref = rinfo->route_pref;
if (pref == ICMPV6_ROUTER_PREF_INVALID)
- pref = ICMPV6_ROUTER_PREF_MEDIUM;
+ return -EINVAL;
lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
@@ -665,7 +665,7 @@ static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, struct in6_addr *dad
net->ipv6.sysctl.ip6_rt_gc_elasticity = 1;
net->ipv6.sysctl.ip6_rt_gc_min_interval = 0;
- ip6_dst_gc(net->ipv6.ip6_dst_ops);
+ ip6_dst_gc(&net->ipv6.ip6_dst_ops);
net->ipv6.sysctl.ip6_rt_gc_elasticity =
saved_rt_elasticity;
@@ -970,7 +970,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
if (unlikely(idev == NULL))
return NULL;
- rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
if (unlikely(rt == NULL)) {
in6_dev_put(idev);
goto out;
@@ -1060,7 +1060,7 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
static int ip6_dst_gc(struct dst_ops *ops)
{
unsigned long now = jiffies;
- struct net *net = ops->dst_net;
+ struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
@@ -1154,7 +1154,7 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
- rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
+ rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
if (rt == NULL) {
err = -ENOMEM;
@@ -1643,7 +1643,7 @@ out:
static struct rt6_info * ip6_rt_copy(struct rt6_info *ort)
{
struct net *net = dev_net(ort->rt6i_dev);
- struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
+ struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
if (rt) {
rt->u.dst.input = ort->u.dst.input;
@@ -1923,7 +1923,7 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
int anycast)
{
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(net->ipv6.ip6_dst_ops);
+ struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
struct neighbour *neigh;
if (rt == NULL)
@@ -2501,7 +2501,7 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v)
net->ipv6.rt6_stats->fib_rt_alloc,
net->ipv6.rt6_stats->fib_rt_entries,
net->ipv6.rt6_stats->fib_rt_cache,
- atomic_read(&net->ipv6.ip6_dst_ops->entries),
+ atomic_read(&net->ipv6.ip6_dst_ops.entries),
net->ipv6.rt6_stats->fib_discarded_routes);
return 0;
@@ -2637,7 +2637,7 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
if (table) {
table[0].data = &net->ipv6.sysctl.flush_delay;
- table[1].data = &net->ipv6.ip6_dst_ops->gc_thresh;
+ table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
@@ -2655,12 +2655,8 @@ static int ip6_route_net_init(struct net *net)
{
int ret = -ENOMEM;
- net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
- sizeof(*net->ipv6.ip6_dst_ops),
- GFP_KERNEL);
- if (!net->ipv6.ip6_dst_ops)
- goto out;
- net->ipv6.ip6_dst_ops->dst_net = hold_net(net);
+ memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
+ sizeof(net->ipv6.ip6_dst_ops));
net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
sizeof(*net->ipv6.ip6_null_entry),
@@ -2669,7 +2665,7 @@ static int ip6_route_net_init(struct net *net)
goto out_ip6_dst_ops;
net->ipv6.ip6_null_entry->u.dst.path =
(struct dst_entry *)net->ipv6.ip6_null_entry;
- net->ipv6.ip6_null_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
+ net->ipv6.ip6_null_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2679,7 +2675,7 @@ static int ip6_route_net_init(struct net *net)
goto out_ip6_null_entry;
net->ipv6.ip6_prohibit_entry->u.dst.path =
(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
- net->ipv6.ip6_prohibit_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
+ net->ipv6.ip6_prohibit_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2688,7 +2684,7 @@ static int ip6_route_net_init(struct net *net)
goto out_ip6_prohibit_entry;
net->ipv6.ip6_blk_hole_entry->u.dst.path =
(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
- net->ipv6.ip6_blk_hole_entry->u.dst.ops = net->ipv6.ip6_dst_ops;
+ net->ipv6.ip6_blk_hole_entry->u.dst.ops = &net->ipv6.ip6_dst_ops;
#endif
net->ipv6.sysctl.flush_delay = 0;
@@ -2717,8 +2713,6 @@ out_ip6_null_entry:
kfree(net->ipv6.ip6_null_entry);
#endif
out_ip6_dst_ops:
- release_net(net->ipv6.ip6_dst_ops->dst_net);
- kfree(net->ipv6.ip6_dst_ops);
goto out;
}
@@ -2733,8 +2727,6 @@ static void ip6_route_net_exit(struct net *net)
kfree(net->ipv6.ip6_prohibit_entry);
kfree(net->ipv6.ip6_blk_hole_entry);
#endif
- release_net(net->ipv6.ip6_dst_ops->dst_net);
- kfree(net->ipv6.ip6_dst_ops);
}
static struct pernet_operations ip6_route_net_ops = {
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 98b7327d094..0ae4f644818 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -609,7 +609,8 @@ static inline __be32 try_6to4(struct in6_addr *v6dst)
* and that skb is filled properly by that function.
*/
-static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct net_device_stats *stats = &tunnel->dev->stats;
@@ -753,7 +754,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_dropped++;
dev_kfree_skb(skb);
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
if (skb->sk)
skb_set_owner_w(new_skb, skb->sk);
@@ -778,7 +779,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
if (mtu > IPV6_MIN_MTU)
- iph->frag_off = htons(IP_DF);
+ iph->frag_off = tiph->frag_off;
else
iph->frag_off = 0;
@@ -794,7 +795,7 @@ static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
IPTUNNEL_XMIT();
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
tx_error_icmp:
dst_link_failure(skb);
@@ -802,7 +803,7 @@ tx_error:
stats->tx_errors++;
dev_kfree_skb(skb);
tunnel->recursion--;
- return 0;
+ return NETDEV_TX_OK;
}
static void ipip6_tunnel_bind_dev(struct net_device *dev)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8c2513982b6..6b6ae913b5d 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -74,12 +74,13 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
return child;
}
-static DEFINE_PER_CPU(__u32, cookie_scratch)[16 + 5 + SHA_WORKSPACE_WORDS];
+static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS],
+ ipv6_cookie_scratch);
static u32 cookie_hash(struct in6_addr *saddr, struct in6_addr *daddr,
__be16 sport, __be16 dport, u32 count, int c)
{
- __u32 *tmp = __get_cpu_var(cookie_scratch);
+ __u32 *tmp = __get_cpu_var(ipv6_cookie_scratch);
/*
* we have 320 bits of information to hash, copy in the remaining
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index a031034720b..0dc6a4e5ed4 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -40,7 +40,7 @@ static ctl_table ipv6_table_template[] = {
{ .ctl_name = 0 }
};
-static ctl_table ipv6_table[] = {
+static ctl_table ipv6_rotable[] = {
{
.ctl_name = NET_IPV6_MLD_MAX_MSF,
.procname = "mld_max_msf",
@@ -130,7 +130,7 @@ int ipv6_sysctl_register(void)
{
int err = -ENOMEM;
- ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_table);
+ ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable);
if (ip6_header == NULL)
goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d849dd53b78..21d100b68b1 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -75,11 +75,11 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
-static struct inet_connection_sock_af_ops ipv6_mapped;
-static struct inet_connection_sock_af_ops ipv6_specific;
+static const struct inet_connection_sock_af_ops ipv6_mapped;
+static const struct inet_connection_sock_af_ops ipv6_specific;
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv6_specific;
-static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
+static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
+static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
#else
static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
struct in6_addr *addr)
@@ -591,7 +591,7 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer,
}
sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
}
- if (tcp_alloc_md5sig_pool() == NULL) {
+ if (tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
@@ -894,7 +894,7 @@ struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
};
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
.md5_lookup = tcp_v6_reqsk_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
};
@@ -1003,6 +1003,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
t1 = (struct tcphdr *) skb_push(buff, tot_len);
+ skb_reset_transport_header(skb);
/* Swap the send and the receive. */
memset(t1, 0, sizeof(*t1));
@@ -1760,7 +1761,7 @@ static int tcp_v6_remember_stamp(struct sock *sk)
return 0;
}
-static struct inet_connection_sock_af_ops ipv6_specific = {
+static const struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = inet6_csk_xmit,
.send_check = tcp_v6_send_check,
.rebuild_header = inet6_sk_rebuild_header,
@@ -1780,7 +1781,7 @@ static struct inet_connection_sock_af_ops ipv6_specific = {
};
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
+static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
.md5_lookup = tcp_v6_md5_lookup,
.calc_md5_hash = tcp_v6_md5_hash_skb,
.md5_add = tcp_v6_md5_add_func,
@@ -1792,7 +1793,7 @@ static struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
* TCP over IPv4 via INET6 API
*/
-static struct inet_connection_sock_af_ops ipv6_mapped = {
+static const struct inet_connection_sock_af_ops ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
@@ -1812,7 +1813,7 @@ static struct inet_connection_sock_af_ops ipv6_mapped = {
};
#ifdef CONFIG_TCP_MD5SIG
-static struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
+static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
.md5_add = tcp_v6_md5_add_func,
@@ -1845,7 +1846,7 @@ static int tcp_v6_init_sock(struct sock *sk)
/* See draft-stevens-tcpca-spec-01 for discussion of the
* initialization of these values.
*/
- tp->snd_ssthresh = 0x7fffffff;
+ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd_clamp = ~0;
tp->mss_cache = 536;
@@ -1968,7 +1969,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
- tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
+ tp->snd_cwnd,
+ tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh
);
}
@@ -2092,7 +2094,7 @@ struct proto tcpv6_prot = {
#endif
};
-static struct inet6_protocol tcpv6_protocol = {
+static const struct inet6_protocol tcpv6_protocol = {
.handler = tcp_v6_rcv,
.err_handler = tcp_v6_err,
.gso_send_check = tcp_v6_gso_send_check,
diff --git a/net/ipv6/tunnel6.c b/net/ipv6/tunnel6.c
index 633ad789eff..51e2832d13a 100644
--- a/net/ipv6/tunnel6.c
+++ b/net/ipv6/tunnel6.c
@@ -133,13 +133,13 @@ static void tunnel6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
break;
}
-static struct inet6_protocol tunnel6_protocol = {
+static const struct inet6_protocol tunnel6_protocol = {
.handler = tunnel6_rcv,
.err_handler = tunnel6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
-static struct inet6_protocol tunnel46_protocol = {
+static const struct inet6_protocol tunnel46_protocol = {
.handler = tunnel46_rcv,
.err_handler = tunnel6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 33b59bd92c4..b265b7047d3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -638,6 +638,47 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
}
}
+/**
+ * udp6_hwcsum_outgoing - handle outgoing HW checksumming
+ * @sk: socket we are sending on
+ * @skb: sk_buff containing the filled-in UDP header
+ * (checksum field must be zeroed out)
+ */
+static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
+ const struct in6_addr *saddr,
+ const struct in6_addr *daddr, int len)
+{
+ unsigned int offset;
+ struct udphdr *uh = udp_hdr(skb);
+ __wsum csum = 0;
+
+ if (skb_queue_len(&sk->sk_write_queue) == 1) {
+ /* Only one fragment on the socket. */
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
+ } else {
+ /*
+ * HW-checksum won't work as there are two or more
+ * fragments on the socket so that all csums of sk_buffs
+ * should be together
+ */
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_queue_walk(&sk->sk_write_queue, skb) {
+ csum = csum_add(csum, skb->csum);
+ }
+
+ uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
+ csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ }
+}
+
/*
* Sending
*/
@@ -668,7 +709,11 @@ static int udp_v6_push_pending_frames(struct sock *sk)
if (is_udplite)
csum = udplite_csum_outgoing(sk, skb);
- else
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
+ udp6_hwcsum_outgoing(sk, skb, &fl->fl6_src, &fl->fl6_dst,
+ up->len);
+ goto send;
+ } else
csum = udp_csum_outgoing(sk, skb);
/* add protocol-dependent pseudo-header */
@@ -677,13 +722,20 @@ static int udp_v6_push_pending_frames(struct sock *sk)
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
+send:
err = ip6_push_pending_frames(sk);
+ if (err) {
+ if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
+ UDP6_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_SNDBUFERRORS, is_udplite);
+ err = 0;
+ }
+ } else
+ UDP6_INC_STATS_USER(sock_net(sk),
+ UDP_MIB_OUTDATAGRAMS, is_udplite);
out:
up->len = 0;
up->pending = 0;
- if (!err)
- UDP6_INC_STATS_USER(sock_net(sk),
- UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
@@ -900,11 +952,8 @@ do_udp_sendmsg:
hlimit = ip6_dst_hoplimit(dst);
}
- if (tclass < 0) {
+ if (tclass < 0)
tclass = np->tclass;
- if (tclass < 0)
- tclass = 0;
- }
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
@@ -1032,9 +1081,102 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
}
#endif
-static struct inet6_protocol udpv6_protocol = {
+static int udp6_ufo_send_check(struct sk_buff *skb)
+{
+ struct ipv6hdr *ipv6h;
+ struct udphdr *uh;
+
+ if (!pskb_may_pull(skb, sizeof(*uh)))
+ return -EINVAL;
+
+ ipv6h = ipv6_hdr(skb);
+ uh = udp_hdr(skb);
+
+ uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
+ IPPROTO_UDP, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ return 0;
+}
+
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
+ unsigned int unfrag_ip6hlen, unfrag_len;
+ struct frag_hdr *fptr;
+ u8 *mac_start, *prevhdr;
+ u8 nexthdr;
+ u8 frag_hdr_sz = sizeof(struct frag_hdr);
+ int offset;
+ __wsum csum;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
+ if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
+ /* Packet is from an untrusted source, reset gso_segs. */
+ int type = skb_shinfo(skb)->gso_type;
+
+ if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY) ||
+ !(type & (SKB_GSO_UDP))))
+ goto out;
+
+ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
+
+ segs = NULL;
+ goto out;
+ }
+
+ /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+ * do checksum of UDP packets sent as multiple IP fragments.
+ */
+ offset = skb->csum_start - skb_headroom(skb);
+ csum = skb_checksum(skb, offset, skb->len- offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Check if there is enough headroom to insert fragment header. */
+ if ((skb_headroom(skb) < frag_hdr_sz) &&
+ pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC))
+ goto out;
+
+ /* Find the unfragmentable header and shift it left by frag_hdr_sz
+ * bytes to insert fragment header.
+ */
+ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ nexthdr = *prevhdr;
+ *prevhdr = NEXTHDR_FRAGMENT;
+ unfrag_len = skb_network_header(skb) - skb_mac_header(skb) +
+ unfrag_ip6hlen;
+ mac_start = skb_mac_header(skb);
+ memmove(mac_start-frag_hdr_sz, mac_start, unfrag_len);
+
+ skb->mac_header -= frag_hdr_sz;
+ skb->network_header -= frag_hdr_sz;
+
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+ ipv6_select_ident(fptr);
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+ */
+ segs = skb_segment(skb, features);
+
+out:
+ return segs;
+}
+
+static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
+ .gso_send_check = udp6_ufo_send_check,
+ .gso_segment = udp6_ufo_fragment,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 4818c48688f..d737a27ee01 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -25,7 +25,7 @@ static void udplitev6_err(struct sk_buff *skb,
__udp6_lib_err(skb, opt, type, code, offset, info, &udplite_table);
}
-static struct inet6_protocol udplitev6_protocol = {
+static const struct inet6_protocol udplitev6_protocol = {
.handler = udplitev6_rcv,
.err_handler = udplitev6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 3a3c677bc0f..8ec3d45cd1d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -306,9 +306,26 @@ static void xfrm6_policy_fini(void)
xfrm_policy_unregister_afinfo(&xfrm6_policy_afinfo);
}
+#ifdef CONFIG_SYSCTL
+static struct ctl_table xfrm6_policy_table[] = {
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "xfrm6_gc_thresh",
+ .data = &xfrm6_dst_ops.gc_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static struct ctl_table_header *sysctl_hdr;
+#endif
+
int __init xfrm6_init(void)
{
int ret;
+ unsigned int gc_thresh;
ret = xfrm6_policy_init();
if (ret)
@@ -317,6 +334,23 @@ int __init xfrm6_init(void)
ret = xfrm6_state_init();
if (ret)
goto out_policy;
+ /*
+ * We need a good default value for the xfrm6 gc threshold.
+ * In ipv4 we set it to the route hash table size * 8, which
+ * is half the size of the maximaum route cache for ipv4. It
+ * would be good to do the same thing for v6, except the table is
+ * constructed differently here. Here each table for a net namespace
+ * can have FIB_TABLE_HASHSZ entries, so lets go with the same
+ * computation that we used for ipv4 here. Also, lets keep the initial
+ * gc_thresh to a minimum of 1024, since, the ipv6 route cache defaults
+ * to that as a minimum as well
+ */
+ gc_thresh = FIB6_TABLE_HASHSZ * 8;
+ xfrm6_dst_ops.gc_thresh = (gc_thresh < 1024) ? 1024 : gc_thresh;
+#ifdef CONFIG_SYSCTL
+ sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path,
+ xfrm6_policy_table);
+#endif
out:
return ret;
out_policy:
@@ -326,6 +360,10 @@ out_policy:
void xfrm6_fini(void)
{
+#ifdef CONFIG_SYSCTL
+ if (sysctl_hdr)
+ unregister_net_sysctl_table(sysctl_hdr);
+#endif
//xfrm6_input_fini();
xfrm6_policy_fini();
xfrm6_state_fini();
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 80cf29aae09..50b43c57d5d 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -715,6 +715,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
+ memset(&saddr, 0, sizeof(saddr));
if (peer) {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
diff --git a/net/irda/ircomm/ircomm_event.c b/net/irda/ircomm/ircomm_event.c
index c35b3ef5c2f..d78554fedba 100644
--- a/net/irda/ircomm/ircomm_event.c
+++ b/net/irda/ircomm/ircomm_event.c
@@ -49,7 +49,7 @@ static int ircomm_state_waitr(struct ircomm_cb *self, IRCOMM_EVENT event,
static int ircomm_state_conn(struct ircomm_cb *self, IRCOMM_EVENT event,
struct sk_buff *skb, struct ircomm_info *info);
-char *ircomm_state[] = {
+const char *const ircomm_state[] = {
"IRCOMM_IDLE",
"IRCOMM_WAITI",
"IRCOMM_WAITR",
@@ -57,7 +57,7 @@ char *ircomm_state[] = {
};
#ifdef CONFIG_IRDA_DEBUG
-static char *ircomm_event[] = {
+static const char *const ircomm_event[] = {
"IRCOMM_CONNECT_REQUEST",
"IRCOMM_CONNECT_RESPONSE",
"IRCOMM_TTP_CONNECT_INDICATION",
diff --git a/net/irda/ircomm/ircomm_tty_attach.c b/net/irda/ircomm/ircomm_tty_attach.c
index 9032a1d1190..eafc010907c 100644
--- a/net/irda/ircomm/ircomm_tty_attach.c
+++ b/net/irda/ircomm/ircomm_tty_attach.c
@@ -80,7 +80,7 @@ static int ircomm_tty_state_ready(struct ircomm_tty_cb *self,
struct sk_buff *skb,
struct ircomm_tty_info *info);
-char *ircomm_tty_state[] = {
+const char *const ircomm_tty_state[] = {
"IRCOMM_TTY_IDLE",
"IRCOMM_TTY_SEARCH",
"IRCOMM_TTY_QUERY_PARAMETERS",
@@ -91,7 +91,7 @@ char *ircomm_tty_state[] = {
};
#ifdef CONFIG_IRDA_DEBUG
-static char *ircomm_tty_event[] = {
+static const char *const ircomm_tty_event[] = {
"IRCOMM_TTY_ATTACH_CABLE",
"IRCOMM_TTY_DETACH_CABLE",
"IRCOMM_TTY_DATA_REQUEST",
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index 4a105dc32dc..294e34d3517 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -44,7 +44,7 @@
#ifdef CONFIG_IRDA_DEBUG
/* FIXME: This one should go in irlmp.c */
-static const char *ias_charset_types[] = {
+static const char *const ias_charset_types[] = {
"CS_ASCII",
"CS_ISO_8859_1",
"CS_ISO_8859_2",
@@ -966,7 +966,7 @@ static void iriap_watchdog_timer_expired(void *data)
#ifdef CONFIG_PROC_FS
-static const char *ias_value_types[] = {
+static const char *const ias_value_types[] = {
"IAS_MISSING",
"IAS_INTEGER",
"IAS_OCT_SEQ",
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 774d73a7685..62116829b81 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -69,14 +69,14 @@ static int eth; /* Use "eth" or "irlan" name for devices */
static int access = ACCESS_PEER; /* PEER, DIRECT or HOSTED */
#ifdef CONFIG_PROC_FS
-static const char *irlan_access[] = {
+static const char *const irlan_access[] = {
"UNKNOWN",
"DIRECT",
"PEER",
"HOSTED"
};
-static const char *irlan_media[] = {
+static const char *const irlan_media[] = {
"UNKNOWN",
"802.3",
"802.5"
diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c
index 724bcf951b8..7b6b631f647 100644
--- a/net/irda/irlan/irlan_eth.c
+++ b/net/irda/irlan/irlan_eth.c
@@ -41,7 +41,8 @@
static int irlan_eth_open(struct net_device *dev);
static int irlan_eth_close(struct net_device *dev);
-static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev);
static void irlan_eth_set_multicast_list( struct net_device *dev);
static struct net_device_stats *irlan_eth_get_stats(struct net_device *dev);
@@ -162,7 +163,8 @@ static int irlan_eth_close(struct net_device *dev)
* Transmits ethernet frames over IrDA link.
*
*/
-static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct irlan_cb *self = netdev_priv(dev);
int ret;
@@ -177,7 +179,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* Did the realloc succeed? */
if (new_skb == NULL)
- return 0;
+ return NETDEV_TX_OK;
/* Use the new skb instead */
skb = new_skb;
@@ -209,7 +211,7 @@ static int irlan_eth_xmit(struct sk_buff *skb, struct net_device *dev)
self->stats.tx_bytes += skb->len;
}
- return 0;
+ return NETDEV_TX_OK;
}
/*
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index e4965b764b9..356e65b1dc4 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -63,7 +63,7 @@ static void irlap_init_qos_capabilities(struct irlap_cb *self,
struct qos_info *qos_user);
#ifdef CONFIG_IRDA_DEBUG
-static char *lap_reasons[] = {
+static const char *const lap_reasons[] = {
"ERROR, NOT USED",
"LAP_DISC_INDICATION",
"LAP_NO_RESPONSE",
diff --git a/net/irda/irlap_event.c b/net/irda/irlap_event.c
index 16c4ef0f5c1..c5c51959e3c 100644
--- a/net/irda/irlap_event.c
+++ b/net/irda/irlap_event.c
@@ -78,7 +78,7 @@ static int irlap_state_reset_check(struct irlap_cb *, IRLAP_EVENT event,
struct sk_buff *, struct irlap_info *);
#ifdef CONFIG_IRDA_DEBUG
-static const char *irlap_event[] = {
+static const char *const irlap_event[] = {
"DISCOVERY_REQUEST",
"CONNECT_REQUEST",
"CONNECT_RESPONSE",
@@ -120,7 +120,7 @@ static const char *irlap_event[] = {
};
#endif /* CONFIG_IRDA_DEBUG */
-const char *irlap_state[] = {
+const char *const irlap_state[] = {
"LAP_NDM",
"LAP_QUERY",
"LAP_REPLY",
diff --git a/net/irda/irlmp_event.c b/net/irda/irlmp_event.c
index 78cce0cb073..c1fb5db8104 100644
--- a/net/irda/irlmp_event.c
+++ b/net/irda/irlmp_event.c
@@ -33,13 +33,13 @@
#include <net/irda/irlmp_frame.h>
#include <net/irda/irlmp_event.h>
-const char *irlmp_state[] = {
+const char *const irlmp_state[] = {
"LAP_STANDBY",
"LAP_U_CONNECT",
"LAP_ACTIVE",
};
-const char *irlsap_state[] = {
+const char *const irlsap_state[] = {
"LSAP_DISCONNECTED",
"LSAP_CONNECT",
"LSAP_CONNECT_PEND",
@@ -49,7 +49,7 @@ const char *irlsap_state[] = {
};
#ifdef CONFIG_IRDA_DEBUG
-static const char *irlmp_event[] = {
+static const char *const irlmp_event[] = {
"LM_CONNECT_REQUEST",
"LM_CONNECT_CONFIRM",
"LM_CONNECT_RESPONSE",
diff --git a/net/irda/irnet/irnet_ppp.h b/net/irda/irnet/irnet_ppp.h
index d9f8bd4ebd0..b5df2418f90 100644
--- a/net/irda/irnet/irnet_ppp.h
+++ b/net/irda/irnet/irnet_ppp.h
@@ -95,7 +95,7 @@ static int
/**************************** VARIABLES ****************************/
/* Filesystem callbacks (to call us) */
-static struct file_operations irnet_device_fops =
+static const struct file_operations irnet_device_fops =
{
.owner = THIS_MODULE,
.read = dev_irnet_read,
diff --git a/net/irda/irnetlink.c b/net/irda/irnetlink.c
index 8dd7ed7e7c1..476b307bd80 100644
--- a/net/irda/irnetlink.c
+++ b/net/irda/irnetlink.c
@@ -115,7 +115,7 @@ static int irda_nl_get_mode(struct sk_buff *skb, struct genl_info *info)
genlmsg_end(msg, hdr);
- return genlmsg_unicast(msg, info->snd_pid);
+ return genlmsg_reply(msg, info);
err_out:
nlmsg_free(msg);
diff --git a/net/irda/irproc.c b/net/irda/irproc.c
index 8ff1861649e..318766e5dbd 100644
--- a/net/irda/irproc.c
+++ b/net/irda/irproc.c
@@ -34,21 +34,21 @@
#include <net/irda/irlap.h>
#include <net/irda/irlmp.h>
-extern struct file_operations discovery_seq_fops;
-extern struct file_operations irlap_seq_fops;
-extern struct file_operations irlmp_seq_fops;
-extern struct file_operations irttp_seq_fops;
-extern struct file_operations irias_seq_fops;
+extern const struct file_operations discovery_seq_fops;
+extern const struct file_operations irlap_seq_fops;
+extern const struct file_operations irlmp_seq_fops;
+extern const struct file_operations irttp_seq_fops;
+extern const struct file_operations irias_seq_fops;
struct irda_entry {
const char *name;
- struct file_operations *fops;
+ const struct file_operations *fops;
};
struct proc_dir_entry *proc_irda;
EXPORT_SYMBOL(proc_irda);
-static struct irda_entry irda_dirs[] = {
+static const struct irda_entry irda_dirs[] = {
{"discovery", &discovery_seq_fops},
{"irttp", &irttp_seq_fops},
{"irlmp", &irlmp_seq_fops},
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 49c15b48408..d985d163dcf 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -34,7 +34,7 @@
static char iucv_userid[80];
-static struct proto_ops iucv_sock_ops;
+static const struct proto_ops iucv_sock_ops;
static struct proto iucv_proto = {
.name = "AF_IUCV",
@@ -59,8 +59,8 @@ do { \
DEFINE_WAIT(__wait); \
long __timeo = timeo; \
ret = 0; \
+ prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
while (!(condition)) { \
- prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \
if (!__timeo) { \
ret = -EAGAIN; \
break; \
@@ -361,10 +361,9 @@ static void iucv_sock_cleanup_listen(struct sock *parent)
}
parent->sk_state = IUCV_CLOSED;
- sock_set_flag(parent, SOCK_ZAPPED);
}
-/* Kill socket */
+/* Kill socket (only if zapped and orphaned) */
static void iucv_sock_kill(struct sock *sk)
{
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
@@ -426,17 +425,18 @@ static void iucv_sock_close(struct sock *sk)
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
-
- sock_set_flag(sk, SOCK_ZAPPED);
break;
default:
sock_set_flag(sk, SOCK_ZAPPED);
+ /* nothing to do here */
break;
}
+ /* mark socket for deletion by iucv_sock_kill() */
+ sock_set_flag(sk, SOCK_ZAPPED);
+
release_sock(sk);
- iucv_sock_kill(sk);
}
static void iucv_sock_init(struct sock *sk, struct sock *parent)
@@ -569,6 +569,7 @@ struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
if (sk->sk_state == IUCV_CONNECTED ||
sk->sk_state == IUCV_SEVERED ||
+ sk->sk_state == IUCV_DISCONN || /* due to PM restore */
!newsock) {
iucv_accept_unlink(sk);
if (newsock)
@@ -1035,6 +1036,10 @@ out:
return err;
}
+/* iucv_fragment_skb() - Fragment a single IUCV message into multiple skb's
+ *
+ * Locking: must be called with message_q.lock held
+ */
static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
{
int dataleft, size, copied = 0;
@@ -1069,6 +1074,10 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)
return 0;
}
+/* iucv_process_message() - Receive a single outstanding IUCV message
+ *
+ * Locking: must be called with message_q.lock held
+ */
static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
struct iucv_path *path,
struct iucv_message *msg)
@@ -1119,6 +1128,10 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);
}
+/* iucv_process_message_q() - Process outstanding IUCV messages
+ *
+ * Locking: must be called with message_q.lock held
+ */
static void iucv_process_message_q(struct sock *sk)
{
struct iucv_sock *iucv = iucv_sk(sk);
@@ -1209,6 +1222,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
kfree_skb(skb);
/* Queue backlog skbs */
+ spin_lock_bh(&iucv->message_q.lock);
rskb = skb_dequeue(&iucv->backlog_skb_q);
while (rskb) {
if (sock_queue_rcv_skb(sk, rskb)) {
@@ -1220,11 +1234,10 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
}
}
if (skb_queue_empty(&iucv->backlog_skb_q)) {
- spin_lock_bh(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list))
iucv_process_message_q(sk);
- spin_unlock_bh(&iucv->message_q.lock);
}
+ spin_unlock_bh(&iucv->message_q.lock);
}
done:
@@ -1682,7 +1695,7 @@ static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16])
bh_unlock_sock(sk);
}
-static struct proto_ops iucv_sock_ops = {
+static const struct proto_ops iucv_sock_ops = {
.family = PF_IUCV,
.owner = THIS_MODULE,
.release = iucv_sock_release,
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index c833481d32e..3973d0e61e5 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -79,6 +79,14 @@ static int iucv_bus_match(struct device *dev, struct device_driver *drv)
return 0;
}
+enum iucv_pm_states {
+ IUCV_PM_INITIAL = 0,
+ IUCV_PM_FREEZING = 1,
+ IUCV_PM_THAWING = 2,
+ IUCV_PM_RESTORING = 3,
+};
+static enum iucv_pm_states iucv_pm_state;
+
static int iucv_pm_prepare(struct device *);
static void iucv_pm_complete(struct device *);
static int iucv_pm_freeze(struct device *);
@@ -354,7 +362,7 @@ static int iucv_query_maxconn(void)
" srl %0,28\n"
: "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
if (ccode == 0)
- iucv_max_pathid = reg0;
+ iucv_max_pathid = reg1;
kfree(param);
return ccode ? -EPERM : 0;
}
@@ -856,7 +864,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -905,7 +913,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
spin_lock_bh(&iucv_table_lock);
iucv_cleanup_queue();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -965,7 +973,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -997,7 +1005,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1026,7 +1034,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
int rc;
preempt_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1060,7 +1068,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1152,7 +1160,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1225,7 +1233,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1264,7 +1272,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1314,7 +1322,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
union iucv_param *parm;
int rc;
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1401,7 +1409,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
int rc;
local_bh_disable();
- if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) {
+ if (cpus_empty(iucv_buffer_cpumask)) {
rc = -EIO;
goto out;
}
@@ -1875,6 +1883,7 @@ static int iucv_pm_freeze(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "iucv_pm_freeze\n");
#endif
+ iucv_pm_state = IUCV_PM_FREEZING;
for_each_cpu_mask_nr(cpu, iucv_irq_cpumask)
smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1);
if (dev->driver && dev->driver->pm && dev->driver->pm->freeze)
@@ -1899,6 +1908,7 @@ static int iucv_pm_thaw(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "iucv_pm_thaw\n");
#endif
+ iucv_pm_state = IUCV_PM_THAWING;
if (!iucv_path_table) {
rc = iucv_enable();
if (rc)
@@ -1933,6 +1943,10 @@ static int iucv_pm_restore(struct device *dev)
#ifdef CONFIG_PM_DEBUG
printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table);
#endif
+ if ((iucv_pm_state != IUCV_PM_RESTORING) && iucv_path_table)
+ pr_warning("Suspending Linux did not completely close all IUCV "
+ "connections\n");
+ iucv_pm_state = IUCV_PM_RESTORING;
if (cpus_empty(iucv_irq_cpumask)) {
rc = iucv_query_maxconn();
rc = iucv_enable();
diff --git a/net/key/af_key.c b/net/key/af_key.c
index dba9abd27f9..4e98193dfa0 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3705,7 +3705,7 @@ static void pfkey_seq_stop(struct seq_file *f, void *v)
read_unlock(&pfkey_table_lock);
}
-static struct seq_operations pfkey_seq_ops = {
+static const struct seq_operations pfkey_seq_ops = {
.start = pfkey_seq_start,
.next = pfkey_seq_next,
.stop = pfkey_seq_stop,
@@ -3718,7 +3718,7 @@ static int pfkey_seq_open(struct inode *inode, struct file *file)
sizeof(struct seq_net_private));
}
-static struct file_operations pfkey_proc_ops = {
+static const struct file_operations pfkey_proc_ops = {
.open = pfkey_seq_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
index 2ba1bc4f3c3..bda96d18fd9 100644
--- a/net/lapb/lapb_iface.c
+++ b/net/lapb/lapb_iface.c
@@ -407,7 +407,7 @@ int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
return lapb->callbacks.data_indication(lapb->dev, skb);
kfree_skb(skb);
- return NET_RX_CN_HIGH; /* For now; must be != NET_RX_DROP */
+ return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
}
int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 9208cf5f2bd..c45eee1c0e8 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -914,6 +914,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
struct llc_sock *llc = llc_sk(sk);
int rc = 0;
+ memset(&sllc, 0, sizeof(sllc));
lock_sock(sk);
if (sock_flag(sk, SOCK_ZAPPED))
goto out;
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index f97be471fe2..be47ac427f6 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -143,7 +143,7 @@ out:
return 0;
}
-static char *llc_conn_state_names[] = {
+static const char *const llc_conn_state_names[] = {
[LLC_CONN_STATE_ADM] = "adm",
[LLC_CONN_STATE_SETUP] = "setup",
[LLC_CONN_STATE_NORMAL] = "normal",
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 7836ee92898..4d5543af312 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -6,7 +6,6 @@ config MAC80211
select CRYPTO_ARC4
select CRYPTO_AES
select CRC32
- select WIRELESS_EXT
---help---
This option enables the hardware independent IEEE 802.11
networking stack.
@@ -14,24 +13,7 @@ config MAC80211
comment "CFG80211 needs to be enabled for MAC80211"
depends on CFG80211=n
-config MAC80211_DEFAULT_PS
- bool "enable powersave by default"
- depends on MAC80211
- default y
- help
- This option enables powersave mode by default.
-
- If this causes your applications to misbehave you should fix your
- applications instead -- they need to register their network
- latency requirement, see Documentation/power/pm_qos_interface.txt.
-
-config MAC80211_DEFAULT_PS_VALUE
- int
- default 1 if MAC80211_DEFAULT_PS
- default 0
-
-menu "Rate control algorithm selection"
- depends on MAC80211 != n
+if MAC80211 != n
config MAC80211_RC_PID
bool "PID controller based rate control algorithm" if EMBEDDED
@@ -78,17 +60,17 @@ config MAC80211_RC_DEFAULT
default "pid" if MAC80211_RC_DEFAULT_PID
default ""
-endmenu
+endif
config MAC80211_MESH
bool "Enable mac80211 mesh networking (pre-802.11s) support"
depends on MAC80211 && EXPERIMENTAL
- depends on BROKEN
---help---
This options enables support of Draft 802.11s mesh networking.
- The implementation is based on Draft 1.08 of the Mesh Networking
- amendment. For more information visit http://o11s.org/.
-
+ The implementation is based on Draft 2.08 of the Mesh Networking
+ amendment. However, no compliance with that draft is claimed or even
+ possible, as drafts leave a number of identifiers to be defined after
+ ratification. For more information visit http://o11s.org/.
config MAC80211_LEDS
bool "Enable LED triggers"
@@ -222,3 +204,15 @@ config MAC80211_DEBUG_COUNTERS
and show them in debugfs.
If unsure, say N.
+
+config MAC80211_DRIVER_API_TRACER
+ bool "Driver API tracer"
+ depends on MAC80211_DEBUG_MENU
+ depends on EVENT_TRACING
+ help
+ Say Y here to make mac80211 register with the ftrace
+ framework for the driver API -- you can see which
+ driver methods it is calling then by looking at the
+ trace.
+
+ If unsure, say N.
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 0e3ab88bb70..9f3cf712932 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_MAC80211) += mac80211.o
# mac80211 objects
mac80211-y := \
main.o \
- wext.o \
sta_info.o \
wep.o \
wpa.o \
@@ -41,6 +40,9 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
mac80211-$(CONFIG_PM) += pm.o
+mac80211-$(CONFIG_MAC80211_DRIVER_API_TRACER) += driver-trace.o
+CFLAGS_driver-trace.o := -I$(src)
+
# objects for PID algorithm
rc80211_pid-y := rc80211_pid_algo.o
rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 9e5762ad307..bd765f30dba 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -381,11 +381,16 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
&local->hw, queue,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
+ if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK))
+ return;
+
+ if (WARN(!sta->ampdu_mlme.tid_tx[tid],
+ "TID %d gone but expected when splicing aggregates from"
+ "the pending queue\n", tid))
+ return;
+
if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- /* mark queue as pending, it is stopped already */
- __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
- &local->queue_stop_reasons[queue]);
/* copy over remaining packets */
skb_queue_splice_tail_init(
&sta->ampdu_mlme.tid_tx[tid]->pending,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 3f47276caeb..5608f6c6841 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -57,36 +57,21 @@ static int ieee80211_add_iface(struct wiphy *wiphy, char *name,
return 0;
}
-static int ieee80211_del_iface(struct wiphy *wiphy, int ifindex)
+static int ieee80211_del_iface(struct wiphy *wiphy, struct net_device *dev)
{
- struct net_device *dev;
- struct ieee80211_sub_if_data *sdata;
-
- /* we're under RTNL */
- dev = __dev_get_by_index(&init_net, ifindex);
- if (!dev)
- return -ENODEV;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- ieee80211_if_remove(sdata);
+ ieee80211_if_remove(IEEE80211_DEV_TO_SUB_IF(dev));
return 0;
}
-static int ieee80211_change_iface(struct wiphy *wiphy, int ifindex,
+static int ieee80211_change_iface(struct wiphy *wiphy,
+ struct net_device *dev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct net_device *dev;
struct ieee80211_sub_if_data *sdata;
int ret;
- /* we're under RTNL */
- dev = __dev_get_by_index(&init_net, ifindex);
- if (!dev)
- return -ENODEV;
-
if (!nl80211_type_check(type))
return -EINVAL;
@@ -338,6 +323,8 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ sinfo->generation = sdata->local->sta_generation;
+
sinfo->filled = STATION_INFO_INACTIVE_TIME |
STATION_INFO_RX_BYTES |
STATION_INFO_TX_BYTES |
@@ -924,6 +911,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop,
else
memset(next_hop, 0, ETH_ALEN);
+ pinfo->generation = mesh_paths_generation;
+
pinfo->filled = MPATH_INFO_FRAME_QLEN |
MPATH_INFO_DSN |
MPATH_INFO_METRIC |
@@ -1177,123 +1166,29 @@ static int ieee80211_scan(struct wiphy *wiphy,
static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_auth_request *req)
{
- struct ieee80211_sub_if_data *sdata;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- switch (req->auth_type) {
- case NL80211_AUTHTYPE_OPEN_SYSTEM:
- sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN;
- break;
- case NL80211_AUTHTYPE_SHARED_KEY:
- sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_SHARED_KEY;
- break;
- case NL80211_AUTHTYPE_FT:
- sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_FT;
- break;
- case NL80211_AUTHTYPE_NETWORK_EAP:
- sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_LEAP;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- memcpy(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN);
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
- sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
-
- /* TODO: req->chan */
- sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
-
- if (req->ssid) {
- sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
- memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
- sdata->u.mgd.ssid_len = req->ssid_len;
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
- }
-
- kfree(sdata->u.mgd.sme_auth_ie);
- sdata->u.mgd.sme_auth_ie = NULL;
- sdata->u.mgd.sme_auth_ie_len = 0;
- if (req->ie) {
- sdata->u.mgd.sme_auth_ie = kmalloc(req->ie_len, GFP_KERNEL);
- if (sdata->u.mgd.sme_auth_ie == NULL)
- return -ENOMEM;
- memcpy(sdata->u.mgd.sme_auth_ie, req->ie, req->ie_len);
- sdata->u.mgd.sme_auth_ie_len = req->ie_len;
- }
-
- sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
- sdata->u.mgd.state = IEEE80211_STA_MLME_DIRECT_PROBE;
- ieee80211_sta_req_auth(sdata);
- return 0;
+ return ieee80211_mgd_auth(IEEE80211_DEV_TO_SUB_IF(dev), req);
}
static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_assoc_request *req)
{
- struct ieee80211_sub_if_data *sdata;
- int ret;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 ||
- !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED))
- return -ENOLINK; /* not authenticated */
-
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
- sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
-
- /* TODO: req->chan */
- sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
-
- if (req->ssid) {
- sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
- memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
- sdata->u.mgd.ssid_len = req->ssid_len;
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
- } else
- sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
-
- ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len);
- if (ret && ret != -EALREADY)
- return ret;
-
- if (req->use_mfp) {
- sdata->u.mgd.mfp = IEEE80211_MFP_REQUIRED;
- sdata->u.mgd.flags |= IEEE80211_STA_MFP_ENABLED;
- } else {
- sdata->u.mgd.mfp = IEEE80211_MFP_DISABLED;
- sdata->u.mgd.flags &= ~IEEE80211_STA_MFP_ENABLED;
- }
-
- if (req->control_port)
- sdata->u.mgd.flags |= IEEE80211_STA_CONTROL_PORT;
- else
- sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
-
- sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
- sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE;
- ieee80211_sta_req_auth(sdata);
- return 0;
+ return ieee80211_mgd_assoc(IEEE80211_DEV_TO_SUB_IF(dev), req);
}
static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_deauth_request *req)
+ struct cfg80211_deauth_request *req,
+ void *cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- /* TODO: req->ie, req->peer_addr */
- return ieee80211_sta_deauthenticate(sdata, req->reason_code);
+ return ieee80211_mgd_deauth(IEEE80211_DEV_TO_SUB_IF(dev),
+ req, cookie);
}
static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
- struct cfg80211_disassoc_request *req)
+ struct cfg80211_disassoc_request *req,
+ void *cookie)
{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- /* TODO: req->ie, req->peer_addr */
- return ieee80211_sta_disassociate(sdata, req->reason_code);
+ return ieee80211_mgd_disassoc(IEEE80211_DEV_TO_SUB_IF(dev),
+ req, cookie);
}
static int ieee80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
@@ -1374,6 +1269,16 @@ static int ieee80211_get_tx_power(struct wiphy *wiphy, int *dbm)
return 0;
}
+static int ieee80211_set_wds_peer(struct wiphy *wiphy, struct net_device *dev,
+ u8 *addr)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ memcpy(&sdata->u.wds.remote_addr, addr, ETH_ALEN);
+
+ return 0;
+}
+
static void ieee80211_rfkill_poll(struct wiphy *wiphy)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
@@ -1381,6 +1286,85 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy)
drv_rfkill_poll(local);
}
+#ifdef CONFIG_NL80211_TESTMODE
+static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+{
+ struct ieee80211_local *local = wiphy_priv(wiphy);
+
+ if (!local->ops->testmode_cmd)
+ return -EOPNOTSUPP;
+
+ return local->ops->testmode_cmd(&local->hw, data, len);
+}
+#endif
+
+static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ bool enabled, int timeout)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_conf *conf = &local->hw.conf;
+
+ if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
+ return -EOPNOTSUPP;
+
+ if (enabled == sdata->u.mgd.powersave &&
+ timeout == conf->dynamic_ps_timeout)
+ return 0;
+
+ sdata->u.mgd.powersave = enabled;
+ conf->dynamic_ps_timeout = timeout;
+
+ if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
+
+ ieee80211_recalc_ps(local, -1);
+
+ return 0;
+}
+
+static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
+ struct net_device *dev,
+ const u8 *addr,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ int i, err = -EINVAL;
+ u32 target_rate;
+ struct ieee80211_supported_band *sband;
+
+ sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
+
+ /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
+ * target_rate = X, rate->fixed = 1 means only rate X
+ * target_rate = X, rate->fixed = 0 means all rates <= X */
+ sdata->max_ratectrl_rateidx = -1;
+ sdata->force_unicast_rateidx = -1;
+
+ if (mask->fixed)
+ target_rate = mask->fixed / 100;
+ else if (mask->maxrate)
+ target_rate = mask->maxrate / 100;
+ else
+ return 0;
+
+ for (i=0; i< sband->n_bitrates; i++) {
+ struct ieee80211_rate *brate = &sband->bitrates[i];
+ int this_rate = brate->bitrate;
+
+ if (target_rate == this_rate) {
+ sdata->max_ratectrl_rateidx = i;
+ if (mask->fixed)
+ sdata->force_unicast_rateidx = i;
+ err = 0;
+ break;
+ }
+ }
+
+ return err;
+}
+
struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -1422,5 +1406,9 @@ struct cfg80211_ops mac80211_config_ops = {
.set_wiphy_params = ieee80211_set_wiphy_params,
.set_tx_power = ieee80211_set_tx_power,
.get_tx_power = ieee80211_get_tx_power,
+ .set_wds_peer = ieee80211_set_wds_peer,
.rfkill_poll = ieee80211_rfkill_poll,
+ CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd)
+ .set_power_mgmt = ieee80211_set_power_mgmt,
+ .set_bitrate_mask = ieee80211_set_bitrate_mask,
};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 6c439cd5cce..96991b68f04 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -175,7 +175,7 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
for (q = 0; q < local->hw.queues; q++)
res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q,
local->queue_stop_reasons[q],
- __netif_subqueue_stopped(local->mdev, q));
+ skb_queue_len(&local->pending[q]));
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
return simple_read_from_buffer(user_buf, count, ppos, buf, res);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index e3420329f4e..61234e79022 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -95,33 +95,9 @@ IEEE80211_IF_FILE(force_unicast_rateidx, force_unicast_rateidx, DEC);
IEEE80211_IF_FILE(max_ratectrl_rateidx, max_ratectrl_rateidx, DEC);
/* STA attributes */
-IEEE80211_IF_FILE(state, u.mgd.state, DEC);
IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
-IEEE80211_IF_FILE(prev_bssid, u.mgd.prev_bssid, MAC);
-IEEE80211_IF_FILE(ssid_len, u.mgd.ssid_len, SIZE);
IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
-IEEE80211_IF_FILE(ap_capab, u.mgd.ap_capab, HEX);
IEEE80211_IF_FILE(capab, u.mgd.capab, HEX);
-IEEE80211_IF_FILE(extra_ie_len, u.mgd.extra_ie_len, SIZE);
-IEEE80211_IF_FILE(auth_tries, u.mgd.auth_tries, DEC);
-IEEE80211_IF_FILE(assoc_tries, u.mgd.assoc_tries, DEC);
-IEEE80211_IF_FILE(auth_algs, u.mgd.auth_algs, HEX);
-IEEE80211_IF_FILE(auth_alg, u.mgd.auth_alg, DEC);
-IEEE80211_IF_FILE(auth_transaction, u.mgd.auth_transaction, DEC);
-
-static ssize_t ieee80211_if_fmt_flags(
- const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
-{
- return scnprintf(buf, buflen, "%s%s%s%s%s%s%s\n",
- sdata->u.mgd.flags & IEEE80211_STA_SSID_SET ? "SSID\n" : "",
- sdata->u.mgd.flags & IEEE80211_STA_BSSID_SET ? "BSSID\n" : "",
- sdata->u.mgd.flags & IEEE80211_STA_PREV_BSSID_SET ? "prev BSSID\n" : "",
- sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED ? "AUTH\n" : "",
- sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED ? "ASSOC\n" : "",
- sdata->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL ? "PROBEREQ POLL\n" : "",
- sdata->vif.bss_conf.use_cts_prot ? "CTS prot\n" : "");
-}
-__IEEE80211_IF_FILE(flags);
/* AP attributes */
IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
@@ -140,6 +116,8 @@ IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
#ifdef CONFIG_MAC80211_MESH
/* Mesh stats attributes */
+IEEE80211_IF_FILE(fwded_mcast, u.mesh.mshstats.fwded_mcast, DEC);
+IEEE80211_IF_FILE(fwded_unicast, u.mesh.mshstats.fwded_unicast, DEC);
IEEE80211_IF_FILE(fwded_frames, u.mesh.mshstats.fwded_frames, DEC);
IEEE80211_IF_FILE(dropped_frames_ttl, u.mesh.mshstats.dropped_frames_ttl, DEC);
IEEE80211_IF_FILE(dropped_frames_no_route,
@@ -184,20 +162,9 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(force_unicast_rateidx, sta);
DEBUGFS_ADD(max_ratectrl_rateidx, sta);
- DEBUGFS_ADD(state, sta);
DEBUGFS_ADD(bssid, sta);
- DEBUGFS_ADD(prev_bssid, sta);
- DEBUGFS_ADD(ssid_len, sta);
DEBUGFS_ADD(aid, sta);
- DEBUGFS_ADD(ap_capab, sta);
DEBUGFS_ADD(capab, sta);
- DEBUGFS_ADD(extra_ie_len, sta);
- DEBUGFS_ADD(auth_tries, sta);
- DEBUGFS_ADD(assoc_tries, sta);
- DEBUGFS_ADD(auth_algs, sta);
- DEBUGFS_ADD(auth_alg, sta);
- DEBUGFS_ADD(auth_transaction, sta);
- DEBUGFS_ADD(flags, sta);
}
static void add_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -240,6 +207,8 @@ static void add_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
sdata->mesh_stats_dir = debugfs_create_dir("mesh_stats",
sdata->debugfsdir);
+ MESHSTATS_ADD(fwded_mcast);
+ MESHSTATS_ADD(fwded_unicast);
MESHSTATS_ADD(fwded_frames);
MESHSTATS_ADD(dropped_frames_ttl);
MESHSTATS_ADD(dropped_frames_no_route);
@@ -317,20 +286,9 @@ static void del_sta_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_DEL(force_unicast_rateidx, sta);
DEBUGFS_DEL(max_ratectrl_rateidx, sta);
- DEBUGFS_DEL(state, sta);
DEBUGFS_DEL(bssid, sta);
- DEBUGFS_DEL(prev_bssid, sta);
- DEBUGFS_DEL(ssid_len, sta);
DEBUGFS_DEL(aid, sta);
- DEBUGFS_DEL(ap_capab, sta);
DEBUGFS_DEL(capab, sta);
- DEBUGFS_DEL(extra_ie_len, sta);
- DEBUGFS_DEL(auth_tries, sta);
- DEBUGFS_DEL(assoc_tries, sta);
- DEBUGFS_DEL(auth_algs, sta);
- DEBUGFS_DEL(auth_alg, sta);
- DEBUGFS_DEL(auth_transaction, sta);
- DEBUGFS_DEL(flags, sta);
}
static void del_ap_files(struct ieee80211_sub_if_data *sdata)
@@ -373,6 +331,8 @@ static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
static void del_mesh_stats(struct ieee80211_sub_if_data *sdata)
{
+ MESHSTATS_DEL(fwded_mcast);
+ MESHSTATS_DEL(fwded_unicast);
MESHSTATS_DEL(fwded_frames);
MESHSTATS_DEL(dropped_frames_ttl);
MESHSTATS_DEL(dropped_frames_no_route);
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 90230c718b5..33a2e892115 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -120,45 +120,38 @@ STA_OPS(last_seq_ctrl);
static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- char buf[768], *p = buf;
+ char buf[30 + STA_TID_NUM * 70], *p = buf;
int i;
struct sta_info *sta = file->private_data;
- p += scnprintf(p, sizeof(buf)+buf-p, "Agg state for STA is:\n");
- p += scnprintf(p, sizeof(buf)+buf-p, " STA next dialog_token is %d \n "
- "TIDs info is: \n TID :",
- (sta->ampdu_mlme.dialog_token_allocator + 1));
- for (i = 0; i < STA_TID_NUM; i++)
- p += scnprintf(p, sizeof(buf)+buf-p, "%5d", i);
-
- p += scnprintf(p, sizeof(buf)+buf-p, "\n RX :");
- for (i = 0; i < STA_TID_NUM; i++)
- p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
- sta->ampdu_mlme.tid_state_rx[i]);
-
- p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
- for (i = 0; i < STA_TID_NUM; i++)
- p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
- sta->ampdu_mlme.tid_state_rx[i] ?
- sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
-
- p += scnprintf(p, sizeof(buf)+buf-p, "\n TX :");
- for (i = 0; i < STA_TID_NUM; i++)
- p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
- sta->ampdu_mlme.tid_state_tx[i]);
-
- p += scnprintf(p, sizeof(buf)+buf-p, "\n DTKN:");
- for (i = 0; i < STA_TID_NUM; i++)
- p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
- sta->ampdu_mlme.tid_state_tx[i] ?
- sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
-
- p += scnprintf(p, sizeof(buf)+buf-p, "\n SSN :");
- for (i = 0; i < STA_TID_NUM; i++)
- p += scnprintf(p, sizeof(buf)+buf-p, "%5d",
- sta->ampdu_mlme.tid_state_tx[i] ?
- sta->ampdu_mlme.tid_tx[i]->ssn : 0);
- p += scnprintf(p, sizeof(buf)+buf-p, "\n");
+ spin_lock_bh(&sta->lock);
+ p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
+ sta->ampdu_mlme.dialog_token_allocator + 1);
+ for (i = 0; i < STA_TID_NUM; i++) {
+ p += scnprintf(p, sizeof(buf)+buf-p, "TID %02d:", i);
+ p += scnprintf(p, sizeof(buf)+buf-p, " RX=%x",
+ sta->ampdu_mlme.tid_state_rx[i]);
+ p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x",
+ sta->ampdu_mlme.tid_state_rx[i] ?
+ sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
+ p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x",
+ sta->ampdu_mlme.tid_state_rx[i] ?
+ sta->ampdu_mlme.tid_rx[i]->ssn : 0);
+
+ p += scnprintf(p, sizeof(buf)+buf-p, " TX=%x",
+ sta->ampdu_mlme.tid_state_tx[i]);
+ p += scnprintf(p, sizeof(buf)+buf-p, "/DTKN=%#.2x",
+ sta->ampdu_mlme.tid_state_tx[i] ?
+ sta->ampdu_mlme.tid_tx[i]->dialog_token : 0);
+ p += scnprintf(p, sizeof(buf)+buf-p, "/SSN=%#.3x",
+ sta->ampdu_mlme.tid_state_tx[i] ?
+ sta->ampdu_mlme.tid_tx[i]->ssn : 0);
+ p += scnprintf(p, sizeof(buf)+buf-p, "/pending=%03d",
+ sta->ampdu_mlme.tid_state_tx[i] ?
+ skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0);
+ p += scnprintf(p, sizeof(buf)+buf-p, "\n");
+ }
+ spin_unlock_bh(&sta->lock);
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
}
@@ -203,6 +196,22 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD(inactive_ms);
DEBUGFS_ADD(last_seq_ctrl);
DEBUGFS_ADD(agg_status);
+ DEBUGFS_ADD(dev);
+ DEBUGFS_ADD(rx_packets);
+ DEBUGFS_ADD(tx_packets);
+ DEBUGFS_ADD(rx_bytes);
+ DEBUGFS_ADD(tx_bytes);
+ DEBUGFS_ADD(rx_duplicates);
+ DEBUGFS_ADD(rx_fragments);
+ DEBUGFS_ADD(rx_dropped);
+ DEBUGFS_ADD(tx_fragments);
+ DEBUGFS_ADD(tx_filtered);
+ DEBUGFS_ADD(tx_retry_failed);
+ DEBUGFS_ADD(tx_retry_count);
+ DEBUGFS_ADD(last_signal);
+ DEBUGFS_ADD(last_qual);
+ DEBUGFS_ADD(last_noise);
+ DEBUGFS_ADD(wep_weak_iv_count);
}
void ieee80211_sta_debugfs_remove(struct sta_info *sta)
@@ -212,6 +221,23 @@ void ieee80211_sta_debugfs_remove(struct sta_info *sta)
DEBUGFS_DEL(inactive_ms);
DEBUGFS_DEL(last_seq_ctrl);
DEBUGFS_DEL(agg_status);
+ DEBUGFS_DEL(aid);
+ DEBUGFS_DEL(dev);
+ DEBUGFS_DEL(rx_packets);
+ DEBUGFS_DEL(tx_packets);
+ DEBUGFS_DEL(rx_bytes);
+ DEBUGFS_DEL(tx_bytes);
+ DEBUGFS_DEL(rx_duplicates);
+ DEBUGFS_DEL(rx_fragments);
+ DEBUGFS_DEL(rx_dropped);
+ DEBUGFS_DEL(tx_fragments);
+ DEBUGFS_DEL(tx_filtered);
+ DEBUGFS_DEL(tx_retry_failed);
+ DEBUGFS_DEL(tx_retry_count);
+ DEBUGFS_DEL(last_signal);
+ DEBUGFS_DEL(last_qual);
+ DEBUGFS_DEL(last_noise);
+ DEBUGFS_DEL(wep_weak_iv_count);
debugfs_remove(sta->debugfs.dir);
sta->debugfs.dir = NULL;
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b13446afd48..020a94a3110 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -3,6 +3,7 @@
#include <net/mac80211.h>
#include "ieee80211_i.h"
+#include "driver-trace.h"
static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
{
@@ -11,29 +12,49 @@ static inline int drv_tx(struct ieee80211_local *local, struct sk_buff *skb)
static inline int drv_start(struct ieee80211_local *local)
{
- return local->ops->start(&local->hw);
+ int ret;
+
+ local->started = true;
+ smp_mb();
+ ret = local->ops->start(&local->hw);
+ trace_drv_start(local, ret);
+ return ret;
}
static inline void drv_stop(struct ieee80211_local *local)
{
local->ops->stop(&local->hw);
+ trace_drv_stop(local);
+
+ /* sync away all work on the tasklet before clearing started */
+ tasklet_disable(&local->tasklet);
+ tasklet_enable(&local->tasklet);
+
+ barrier();
+
+ local->started = false;
}
static inline int drv_add_interface(struct ieee80211_local *local,
struct ieee80211_if_init_conf *conf)
{
- return local->ops->add_interface(&local->hw, conf);
+ int ret = local->ops->add_interface(&local->hw, conf);
+ trace_drv_add_interface(local, conf->mac_addr, conf->vif, ret);
+ return ret;
}
static inline void drv_remove_interface(struct ieee80211_local *local,
struct ieee80211_if_init_conf *conf)
{
local->ops->remove_interface(&local->hw, conf);
+ trace_drv_remove_interface(local, conf->mac_addr, conf->vif);
}
static inline int drv_config(struct ieee80211_local *local, u32 changed)
{
- return local->ops->config(&local->hw, changed);
+ int ret = local->ops->config(&local->hw, changed);
+ trace_drv_config(local, changed, ret);
+ return ret;
}
static inline void drv_bss_info_changed(struct ieee80211_local *local,
@@ -43,24 +64,45 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
{
if (local->ops->bss_info_changed)
local->ops->bss_info_changed(&local->hw, vif, info, changed);
+ trace_drv_bss_info_changed(local, vif, info, changed);
+}
+
+static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
+ int mc_count,
+ struct dev_addr_list *mc_list)
+{
+ u64 ret = 0;
+
+ if (local->ops->prepare_multicast)
+ ret = local->ops->prepare_multicast(&local->hw, mc_count,
+ mc_list);
+
+ trace_drv_prepare_multicast(local, mc_count, ret);
+
+ return ret;
}
static inline void drv_configure_filter(struct ieee80211_local *local,
unsigned int changed_flags,
unsigned int *total_flags,
- int mc_count,
- struct dev_addr_list *mc_list)
+ u64 multicast)
{
+ might_sleep();
+
local->ops->configure_filter(&local->hw, changed_flags, total_flags,
- mc_count, mc_list);
+ multicast);
+ trace_drv_configure_filter(local, changed_flags, total_flags,
+ multicast);
}
static inline int drv_set_tim(struct ieee80211_local *local,
struct ieee80211_sta *sta, bool set)
{
+ int ret = 0;
if (local->ops->set_tim)
- return local->ops->set_tim(&local->hw, sta, set);
- return 0;
+ ret = local->ops->set_tim(&local->hw, sta, set);
+ trace_drv_set_tim(local, sta, set, ret);
+ return ret;
}
static inline int drv_set_key(struct ieee80211_local *local,
@@ -68,7 +110,9 @@ static inline int drv_set_key(struct ieee80211_local *local,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
- return local->ops->set_key(&local->hw, cmd, vif, sta, key);
+ int ret = local->ops->set_key(&local->hw, cmd, vif, sta, key);
+ trace_drv_set_key(local, cmd, vif, sta, key, ret);
+ return ret;
}
static inline void drv_update_tkip_key(struct ieee80211_local *local,
@@ -79,32 +123,41 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local,
if (local->ops->update_tkip_key)
local->ops->update_tkip_key(&local->hw, conf, address,
iv32, phase1key);
+ trace_drv_update_tkip_key(local, conf, address, iv32);
}
static inline int drv_hw_scan(struct ieee80211_local *local,
struct cfg80211_scan_request *req)
{
- return local->ops->hw_scan(&local->hw, req);
+ int ret = local->ops->hw_scan(&local->hw, req);
+ trace_drv_hw_scan(local, req, ret);
+ return ret;
}
static inline void drv_sw_scan_start(struct ieee80211_local *local)
{
if (local->ops->sw_scan_start)
local->ops->sw_scan_start(&local->hw);
+ trace_drv_sw_scan_start(local);
}
static inline void drv_sw_scan_complete(struct ieee80211_local *local)
{
if (local->ops->sw_scan_complete)
local->ops->sw_scan_complete(&local->hw);
+ trace_drv_sw_scan_complete(local);
}
static inline int drv_get_stats(struct ieee80211_local *local,
struct ieee80211_low_level_stats *stats)
{
- if (!local->ops->get_stats)
- return -EOPNOTSUPP;
- return local->ops->get_stats(&local->hw, stats);
+ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_stats)
+ ret = local->ops->get_stats(&local->hw, stats);
+ trace_drv_get_stats(local, stats, ret);
+
+ return ret;
}
static inline void drv_get_tkip_seq(struct ieee80211_local *local,
@@ -112,14 +165,17 @@ static inline void drv_get_tkip_seq(struct ieee80211_local *local,
{
if (local->ops->get_tkip_seq)
local->ops->get_tkip_seq(&local->hw, hw_key_idx, iv32, iv16);
+ trace_drv_get_tkip_seq(local, hw_key_idx, iv32, iv16);
}
static inline int drv_set_rts_threshold(struct ieee80211_local *local,
u32 value)
{
+ int ret = 0;
if (local->ops->set_rts_threshold)
- return local->ops->set_rts_threshold(&local->hw, value);
- return 0;
+ ret = local->ops->set_rts_threshold(&local->hw, value);
+ trace_drv_set_rts_threshold(local, value, ret);
+ return ret;
}
static inline void drv_sta_notify(struct ieee80211_local *local,
@@ -129,46 +185,57 @@ static inline void drv_sta_notify(struct ieee80211_local *local,
{
if (local->ops->sta_notify)
local->ops->sta_notify(&local->hw, vif, cmd, sta);
+ trace_drv_sta_notify(local, vif, cmd, sta);
}
static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
+ int ret = -EOPNOTSUPP;
if (local->ops->conf_tx)
- return local->ops->conf_tx(&local->hw, queue, params);
- return -EOPNOTSUPP;
+ ret = local->ops->conf_tx(&local->hw, queue, params);
+ trace_drv_conf_tx(local, queue, params, ret);
+ return ret;
}
static inline int drv_get_tx_stats(struct ieee80211_local *local,
struct ieee80211_tx_queue_stats *stats)
{
- return local->ops->get_tx_stats(&local->hw, stats);
+ int ret = local->ops->get_tx_stats(&local->hw, stats);
+ trace_drv_get_tx_stats(local, stats, ret);
+ return ret;
}
static inline u64 drv_get_tsf(struct ieee80211_local *local)
{
+ u64 ret = -1ULL;
if (local->ops->get_tsf)
- return local->ops->get_tsf(&local->hw);
- return -1ULL;
+ ret = local->ops->get_tsf(&local->hw);
+ trace_drv_get_tsf(local, ret);
+ return ret;
}
static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf)
{
if (local->ops->set_tsf)
local->ops->set_tsf(&local->hw, tsf);
+ trace_drv_set_tsf(local, tsf);
}
static inline void drv_reset_tsf(struct ieee80211_local *local)
{
if (local->ops->reset_tsf)
local->ops->reset_tsf(&local->hw);
+ trace_drv_reset_tsf(local);
}
static inline int drv_tx_last_beacon(struct ieee80211_local *local)
{
+ int ret = 1;
if (local->ops->tx_last_beacon)
- return local->ops->tx_last_beacon(&local->hw);
- return 1;
+ ret = local->ops->tx_last_beacon(&local->hw);
+ trace_drv_tx_last_beacon(local, ret);
+ return ret;
}
static inline int drv_ampdu_action(struct ieee80211_local *local,
@@ -176,10 +243,12 @@ static inline int drv_ampdu_action(struct ieee80211_local *local,
struct ieee80211_sta *sta, u16 tid,
u16 *ssn)
{
+ int ret = -EOPNOTSUPP;
if (local->ops->ampdu_action)
- return local->ops->ampdu_action(&local->hw, action,
- sta, tid, ssn);
- return -EOPNOTSUPP;
+ ret = local->ops->ampdu_action(&local->hw, action,
+ sta, tid, ssn);
+ trace_drv_ampdu_action(local, action, sta, tid, ssn, ret);
+ return ret;
}
diff --git a/net/mac80211/driver-trace.c b/net/mac80211/driver-trace.c
new file mode 100644
index 00000000000..8ed8711b1a6
--- /dev/null
+++ b/net/mac80211/driver-trace.c
@@ -0,0 +1,9 @@
+/* bug in tracepoint.h, it should include this */
+#include <linux/module.h>
+
+/* sparse isn't too happy with all macros... */
+#ifndef __CHECKER__
+#include "driver-ops.h"
+#define CREATE_TRACE_POINTS
+#include "driver-trace.h"
+#endif
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
new file mode 100644
index 00000000000..37b9051afcf
--- /dev/null
+++ b/net/mac80211/driver-trace.h
@@ -0,0 +1,672 @@
+#if !defined(__MAC80211_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __MAC80211_DRIVER_TRACE
+
+#include <linux/tracepoint.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+#if !defined(CONFIG_MAC80211_DRIVER_API_TRACER) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mac80211
+
+#define MAXNAME 32
+#define LOCAL_ENTRY __array(char, wiphy_name, 32)
+#define LOCAL_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(local->hw.wiphy), MAXNAME)
+#define LOCAL_PR_FMT "%s"
+#define LOCAL_PR_ARG __entry->wiphy_name
+
+#define STA_ENTRY __array(char, sta_addr, ETH_ALEN)
+#define STA_ASSIGN (sta ? memcpy(__entry->sta_addr, sta->addr, ETH_ALEN) : memset(__entry->sta_addr, 0, ETH_ALEN))
+#define STA_PR_FMT " sta:%pM"
+#define STA_PR_ARG __entry->sta_addr
+
+#define VIF_ENTRY __field(enum nl80211_iftype, vif_type) __field(void *, vif)
+#define VIF_ASSIGN __entry->vif_type = vif ? vif->type : 0; __entry->vif = vif
+#define VIF_PR_FMT " vif:%p(%d)"
+#define VIF_PR_ARG __entry->vif, __entry->vif_type
+
+TRACE_EVENT(drv_start,
+ TP_PROTO(struct ieee80211_local *local, int ret),
+
+ TP_ARGS(local, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_stop,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_add_interface,
+ TP_PROTO(struct ieee80211_local *local,
+ const u8 *addr,
+ struct ieee80211_vif *vif,
+ int ret),
+
+ TP_ARGS(local, addr, vif, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __array(char, addr, 6)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->addr, addr, 6);
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_remove_interface,
+ TP_PROTO(struct ieee80211_local *local,
+ const u8 *addr, struct ieee80211_vif *vif),
+
+ TP_ARGS(local, addr, vif),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __array(char, addr, 6)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ memcpy(__entry->addr, addr, 6);
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " addr:%pM",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr
+ )
+);
+
+TRACE_EVENT(drv_config,
+ TP_PROTO(struct ieee80211_local *local,
+ u32 changed,
+ int ret),
+
+ TP_ARGS(local, changed, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, changed)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->changed = changed;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " ch:%#x ret:%d",
+ LOCAL_PR_ARG, __entry->changed, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_bss_info_changed,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed),
+
+ TP_ARGS(local, vif, info, changed),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ __field(bool, assoc)
+ __field(u16, aid)
+ __field(bool, cts)
+ __field(bool, shortpre)
+ __field(bool, shortslot)
+ __field(u8, dtimper)
+ __field(u16, bcnint)
+ __field(u16, assoc_cap)
+ __field(u64, timestamp)
+ __field(u32, basic_rates)
+ __field(u32, changed)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ __entry->changed = changed;
+ __entry->aid = info->aid;
+ __entry->assoc = info->assoc;
+ __entry->shortpre = info->use_short_preamble;
+ __entry->cts = info->use_cts_prot;
+ __entry->shortslot = info->use_short_slot;
+ __entry->dtimper = info->dtim_period;
+ __entry->bcnint = info->beacon_int;
+ __entry->assoc_cap = info->assoc_capability;
+ __entry->timestamp = info->timestamp;
+ __entry->basic_rates = info->basic_rates;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " changed:%#x",
+ LOCAL_PR_ARG, VIF_PR_ARG, __entry->changed
+ )
+);
+
+TRACE_EVENT(drv_prepare_multicast,
+ TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret),
+
+ TP_ARGS(local, mc_count, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, mc_count)
+ __field(u64, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->mc_count = mc_count;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " prepare mc (%d): %llx",
+ LOCAL_PR_ARG, __entry->mc_count,
+ (unsigned long long) __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_configure_filter,
+ TP_PROTO(struct ieee80211_local *local,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast),
+
+ TP_ARGS(local, changed_flags, total_flags, multicast),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(unsigned int, changed)
+ __field(unsigned int, total)
+ __field(u64, multicast)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->changed = changed_flags;
+ __entry->total = *total_flags;
+ __entry->multicast = multicast;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " changed:%#x total:%#x",
+ LOCAL_PR_ARG, __entry->changed, __entry->total
+ )
+);
+
+TRACE_EVENT(drv_set_tim,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sta *sta, bool set, int ret),
+
+ TP_ARGS(local, sta, set, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(bool, set)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->set = set;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d",
+ LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_set_key,
+ TP_PROTO(struct ieee80211_local *local,
+ enum set_key_cmd cmd, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int ret),
+
+ TP_ARGS(local, cmd, vif, sta, key, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(enum ieee80211_key_alg, alg)
+ __field(u8, hw_key_idx)
+ __field(u8, flags)
+ __field(s8, keyidx)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->alg = key->alg;
+ __entry->flags = key->flags;
+ __entry->keyidx = key->keyidx;
+ __entry->hw_key_idx = key->hw_key_idx;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_update_tkip_key,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_key_conf *conf,
+ const u8 *address, u32 iv32),
+
+ TP_ARGS(local, conf, address, iv32),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __array(u8, addr, 6)
+ __field(u32, iv32)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ memcpy(__entry->addr, address, 6);
+ __entry->iv32 = iv32;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " addr:%pM iv32:%#x",
+ LOCAL_PR_ARG, __entry->addr, __entry->iv32
+ )
+);
+
+TRACE_EVENT(drv_hw_scan,
+ TP_PROTO(struct ieee80211_local *local,
+ struct cfg80211_scan_request *req, int ret),
+
+ TP_ARGS(local, req, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_sw_scan_start,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_sw_scan_complete,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_get_stats,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_low_level_stats *stats,
+ int ret),
+
+ TP_ARGS(local, stats, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, ret)
+ __field(unsigned int, ackfail)
+ __field(unsigned int, rtsfail)
+ __field(unsigned int, fcserr)
+ __field(unsigned int, rtssucc)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ __entry->ackfail = stats->dot11ACKFailureCount;
+ __entry->rtsfail = stats->dot11RTSFailureCount;
+ __entry->fcserr = stats->dot11FCSErrorCount;
+ __entry->rtssucc = stats->dot11RTSSuccessCount;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_get_tkip_seq,
+ TP_PROTO(struct ieee80211_local *local,
+ u8 hw_key_idx, u32 *iv32, u16 *iv16),
+
+ TP_ARGS(local, hw_key_idx, iv32, iv16),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u8, hw_key_idx)
+ __field(u32, iv32)
+ __field(u16, iv16)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->hw_key_idx = hw_key_idx;
+ __entry->iv32 = *iv32;
+ __entry->iv16 = *iv16;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_set_rts_threshold,
+ TP_PROTO(struct ieee80211_local *local, u32 value, int ret),
+
+ TP_ARGS(local, value, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u32, value)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ __entry->value = value;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " value:%d ret:%d",
+ LOCAL_PR_ARG, __entry->value, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_sta_notify,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta),
+
+ TP_ARGS(local, vif, cmd, sta),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(u32, cmd)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->cmd = cmd;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " cmd:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->cmd
+ )
+);
+
+TRACE_EVENT(drv_conf_tx,
+ TP_PROTO(struct ieee80211_local *local, u16 queue,
+ const struct ieee80211_tx_queue_params *params,
+ int ret),
+
+ TP_ARGS(local, queue, params, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u16, queue)
+ __field(u16, txop)
+ __field(u16, cw_min)
+ __field(u16, cw_max)
+ __field(u8, aifs)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->queue = queue;
+ __entry->ret = ret;
+ __entry->txop = params->txop;
+ __entry->cw_max = params->cw_max;
+ __entry->cw_min = params->cw_min;
+ __entry->aifs = params->aifs;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " queue:%d ret:%d",
+ LOCAL_PR_ARG, __entry->queue, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_get_tx_stats,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_tx_queue_stats *stats,
+ int ret),
+
+ TP_ARGS(local, stats, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_get_tsf,
+ TP_PROTO(struct ieee80211_local *local, u64 ret),
+
+ TP_ARGS(local, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u64, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " ret:%llu",
+ LOCAL_PR_ARG, (unsigned long long)__entry->ret
+ )
+);
+
+TRACE_EVENT(drv_set_tsf,
+ TP_PROTO(struct ieee80211_local *local, u64 tsf),
+
+ TP_ARGS(local, tsf),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(u64, tsf)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->tsf = tsf;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " tsf:%llu",
+ LOCAL_PR_ARG, (unsigned long long)__entry->tsf
+ )
+);
+
+TRACE_EVENT(drv_reset_tsf,
+ TP_PROTO(struct ieee80211_local *local),
+
+ TP_ARGS(local),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT, LOCAL_PR_ARG
+ )
+);
+
+TRACE_EVENT(drv_tx_last_beacon,
+ TP_PROTO(struct ieee80211_local *local, int ret),
+
+ TP_ARGS(local, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ __entry->ret = ret;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT " ret:%d",
+ LOCAL_PR_ARG, __entry->ret
+ )
+);
+
+TRACE_EVENT(drv_ampdu_action,
+ TP_PROTO(struct ieee80211_local *local,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid,
+ u16 *ssn, int ret),
+
+ TP_ARGS(local, action, sta, tid, ssn, ret),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ STA_ENTRY
+ __field(u32, action)
+ __field(u16, tid)
+ __field(u16, ssn)
+ __field(int, ret)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ STA_ASSIGN;
+ __entry->ret = ret;
+ __entry->action = action;
+ __entry->tid = tid;
+ __entry->ssn = *ssn;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d",
+ LOCAL_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret
+ )
+);
+#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE driver-trace
+#include <trace/define_trace.h>
diff --git a/net/mac80211/event.c b/net/mac80211/event.c
index f288d01a634..01ae759518f 100644
--- a/net/mac80211/event.c
+++ b/net/mac80211/event.c
@@ -7,8 +7,7 @@
*
* mac80211 - events
*/
-
-#include <net/iw_handler.h>
+#include <net/cfg80211.h>
#include "ieee80211_i.h"
/*
@@ -17,26 +16,12 @@
* driver or is still in the frame), it should provide that information.
*/
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
- struct ieee80211_hdr *hdr, const u8 *tsc)
+ struct ieee80211_hdr *hdr, const u8 *tsc,
+ gfp_t gfp)
{
- union iwreq_data wrqu;
- char *buf = kmalloc(128, GFP_ATOMIC);
-
- if (buf) {
- /* TODO: needed parameters: count, key type, TSC */
- sprintf(buf, "MLME-MICHAELMICFAILURE.indication("
- "keyid=%d %scast addr=%pM)",
- keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni",
- hdr->addr2);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = strlen(buf);
- wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
- kfree(buf);
- }
-
cfg80211_michael_mic_failure(sdata->dev, hdr->addr2,
(hdr->addr1[0] & 0x01) ?
NL80211_KEYTYPE_GROUP :
NL80211_KEYTYPE_PAIRWISE,
- keyidx, tsc);
+ keyidx, tsc, gfp);
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 0b30277eb36..920ec8792f4 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -57,7 +57,7 @@ static void ieee80211_rx_mgmt_auth_ibss(struct ieee80211_sub_if_data *sdata,
*/
if (auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1)
ieee80211_send_auth(sdata, 2, WLAN_AUTH_OPEN, NULL, 0,
- sdata->u.ibss.bssid, 0);
+ sdata->u.ibss.bssid, NULL, 0, 0);
}
static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
@@ -494,7 +494,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
capability = WLAN_CAPABILITY_IBSS;
- if (sdata->default_key)
+ if (ifibss->privacy)
capability |= WLAN_CAPABILITY_PRIVACY;
else
sdata->drop_unencrypted = 0;
@@ -524,9 +524,8 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
return;
capability = WLAN_CAPABILITY_IBSS;
- if (sdata->default_key)
+ if (ifibss->privacy)
capability |= WLAN_CAPABILITY_PRIVACY;
-
if (ifibss->fixed_bssid)
bssid = ifibss->bssid;
if (ifibss->fixed_channel)
@@ -705,7 +704,7 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt;
u16 fc;
- rx_status = (struct ieee80211_rx_status *) skb->cb;
+ rx_status = IEEE80211_SKB_RXCB(skb);
mgmt = (struct ieee80211_mgmt *) skb->data;
fc = le16_to_cpu(mgmt->frame_control);
@@ -743,7 +742,7 @@ static void ieee80211_ibss_work(struct work_struct *work)
if (!netif_running(sdata->dev))
return;
- if (local->sw_scanning || local->hw_scanning)
+ if (local->scanning)
return;
if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC))
@@ -782,7 +781,7 @@ static void ieee80211_ibss_timer(unsigned long data)
}
set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
- queue_work(local->hw.workqueue, &ifibss->work);
+ ieee80211_queue_work(&local->hw, &ifibss->work);
}
#ifdef CONFIG_PM
@@ -836,8 +835,7 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
}
ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgmt *mgmt;
@@ -852,11 +850,10 @@ ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
- memcpy(skb->cb, rx_status, sizeof(*rx_status));
case IEEE80211_STYPE_PROBE_REQ:
case IEEE80211_STYPE_AUTH:
skb_queue_tail(&sdata->u.ibss.skb_queue, skb);
- queue_work(local->hw.workqueue, &sdata->u.ibss.work);
+ ieee80211_queue_work(&local->hw, &sdata->u.ibss.work);
return RX_QUEUED;
}
@@ -874,6 +871,8 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
} else
sdata->u.ibss.fixed_bssid = false;
+ sdata->u.ibss.privacy = params->privacy;
+
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
sdata->u.ibss.channel = params->channel;
@@ -913,7 +912,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
ieee80211_recalc_idle(sdata->local);
set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request);
- queue_work(sdata->local->hw.workqueue, &sdata->u.ibss.work);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work);
return 0;
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 68eb5052179..588005c84a6 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/etherdevice.h>
#include <net/cfg80211.h>
-#include <net/iw_handler.h>
#include <net/mac80211.h>
#include "key.h"
#include "sta_info.h"
@@ -213,7 +212,9 @@ struct ieee80211_if_vlan {
};
struct mesh_stats {
- __u32 fwded_frames; /* Mesh forwarded frames */
+ __u32 fwded_mcast; /* Mesh forwarded multicast frames */
+ __u32 fwded_unicast; /* Mesh forwarded unicast frames */
+ __u32 fwded_frames; /* Mesh total forwarded frames */
__u32 dropped_frames_ttl; /* Not transmitted since mesh_ttl == 0*/
__u32 dropped_frames_no_route; /* Not transmitted, no route found */
atomic_t estab_plinks;
@@ -227,86 +228,81 @@ struct mesh_preq_queue {
u8 flags;
};
+enum ieee80211_mgd_state {
+ IEEE80211_MGD_STATE_IDLE,
+ IEEE80211_MGD_STATE_PROBE,
+ IEEE80211_MGD_STATE_AUTH,
+ IEEE80211_MGD_STATE_ASSOC,
+};
+
+struct ieee80211_mgd_work {
+ struct list_head list;
+ struct ieee80211_bss *bss;
+ int ie_len;
+ u8 prev_bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ u8 ssid_len;
+ unsigned long timeout;
+ enum ieee80211_mgd_state state;
+ u16 auth_alg, auth_transaction;
+
+ int tries;
+
+ u8 key[WLAN_KEY_LEN_WEP104];
+ u8 key_len, key_idx;
+
+ /* must be last */
+ u8 ie[0]; /* for auth or assoc frame, not probe */
+};
+
/* flags used in struct ieee80211_if_managed.flags */
-#define IEEE80211_STA_SSID_SET BIT(0)
-#define IEEE80211_STA_BSSID_SET BIT(1)
-#define IEEE80211_STA_PREV_BSSID_SET BIT(2)
-#define IEEE80211_STA_AUTHENTICATED BIT(3)
-#define IEEE80211_STA_ASSOCIATED BIT(4)
-#define IEEE80211_STA_PROBEREQ_POLL BIT(5)
-#define IEEE80211_STA_CREATE_IBSS BIT(6)
-#define IEEE80211_STA_CONTROL_PORT BIT(7)
-#define IEEE80211_STA_WMM_ENABLED BIT(8)
-/* hole at 9, please re-use */
-#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
-#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
-#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
-#define IEEE80211_STA_PRIVACY_INVOKED BIT(13)
-#define IEEE80211_STA_TKIP_WEP_USED BIT(14)
-#define IEEE80211_STA_CSA_RECEIVED BIT(15)
-#define IEEE80211_STA_MFP_ENABLED BIT(16)
-#define IEEE80211_STA_EXT_SME BIT(17)
-/* flags for MLME request */
-#define IEEE80211_STA_REQ_SCAN 0
-#define IEEE80211_STA_REQ_AUTH 1
-#define IEEE80211_STA_REQ_RUN 2
+enum ieee80211_sta_flags {
+ IEEE80211_STA_BEACON_POLL = BIT(0),
+ IEEE80211_STA_CONNECTION_POLL = BIT(1),
+ IEEE80211_STA_CONTROL_PORT = BIT(2),
+ IEEE80211_STA_WMM_ENABLED = BIT(3),
+ IEEE80211_STA_DISABLE_11N = BIT(4),
+ IEEE80211_STA_CSA_RECEIVED = BIT(5),
+ IEEE80211_STA_MFP_ENABLED = BIT(6),
+};
-/* bitfield of allowed auth algs */
-#define IEEE80211_AUTH_ALG_OPEN BIT(0)
-#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
-#define IEEE80211_AUTH_ALG_LEAP BIT(2)
-#define IEEE80211_AUTH_ALG_FT BIT(3)
+/* flags for MLME request */
+enum ieee80211_sta_request {
+ IEEE80211_STA_REQ_SCAN,
+};
struct ieee80211_if_managed {
struct timer_list timer;
+ struct timer_list conn_mon_timer;
+ struct timer_list bcn_mon_timer;
struct timer_list chswitch_timer;
struct work_struct work;
+ struct work_struct monitor_work;
struct work_struct chswitch_work;
struct work_struct beacon_loss_work;
- u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
+ unsigned long probe_timeout;
+ int probe_send_count;
- u8 ssid[IEEE80211_MAX_SSID_LEN];
- size_t ssid_len;
+ struct mutex mtx;
+ struct ieee80211_bss *associated;
+ struct ieee80211_mgd_work *old_associate_work;
+ struct list_head work_list;
- enum {
- IEEE80211_STA_MLME_DISABLED,
- IEEE80211_STA_MLME_DIRECT_PROBE,
- IEEE80211_STA_MLME_AUTHENTICATE,
- IEEE80211_STA_MLME_ASSOCIATE,
- IEEE80211_STA_MLME_ASSOCIATED,
- } state;
+ u8 bssid[ETH_ALEN];
u16 aid;
- u16 ap_capab, capab;
- u8 *extra_ie; /* to be added to the end of AssocReq */
- size_t extra_ie_len;
-
- /* The last AssocReq/Resp IEs */
- u8 *assocreq_ies, *assocresp_ies;
- size_t assocreq_ies_len, assocresp_ies_len;
+ u16 capab;
struct sk_buff_head skb_queue;
- int assoc_scan_tries; /* number of scans done pre-association */
- int direct_probe_tries; /* retries for direct probes */
- int auth_tries; /* retries for auth req */
- int assoc_tries; /* retries for assoc req */
-
unsigned long timers_running; /* used for quiesce/restart */
bool powersave; /* powersave requested for this iface */
unsigned long request;
- unsigned long last_probe;
- unsigned long last_beacon;
-
unsigned int flags;
- unsigned int auth_algs; /* bitfield of allowed auth algs */
- int auth_alg; /* currently used IEEE 802.11 authentication algorithm */
- int auth_transaction;
-
u32 beacon_crc;
enum {
@@ -316,10 +312,6 @@ struct ieee80211_if_managed {
} mfp; /* management frame protection */
int wmm_last_param_set;
-
- /* Extra IE data for management frames */
- u8 *sme_auth_ie;
- size_t sme_auth_ie_len;
};
enum ieee80211_ibss_request {
@@ -339,6 +331,7 @@ struct ieee80211_if_ibss {
bool fixed_bssid;
bool fixed_channel;
+ bool privacy;
u8 bssid[ETH_ALEN];
u8 ssid[IEEE80211_MAX_SSID_LEN];
@@ -364,7 +357,7 @@ struct ieee80211_if_mesh {
unsigned long timers_running;
- bool housekeeping;
+ unsigned long wrkq_flags;
u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
size_t mesh_id_len;
@@ -374,6 +367,10 @@ struct ieee80211_if_mesh {
u8 mesh_pm_id[4];
/* Congestion Control Mode Identifier */
u8 mesh_cc_id[4];
+ /* Synchronization Protocol Identifier */
+ u8 mesh_sp_id[4];
+ /* Authentication Protocol Identifier */
+ u8 mesh_auth_id[4];
/* Local mesh Destination Sequence Number */
u32 dsn;
/* Last used PREQ ID */
@@ -478,20 +475,9 @@ struct ieee80211_sub_if_data {
union {
struct {
struct dentry *drop_unencrypted;
- struct dentry *state;
struct dentry *bssid;
- struct dentry *prev_bssid;
- struct dentry *ssid_len;
struct dentry *aid;
- struct dentry *ap_capab;
struct dentry *capab;
- struct dentry *extra_ie_len;
- struct dentry *auth_tries;
- struct dentry *assoc_tries;
- struct dentry *auth_algs;
- struct dentry *auth_alg;
- struct dentry *auth_transaction;
- struct dentry *flags;
struct dentry *force_unicast_rateidx;
struct dentry *max_ratectrl_rateidx;
} sta;
@@ -526,6 +512,8 @@ struct ieee80211_sub_if_data {
#ifdef CONFIG_MAC80211_MESH
struct dentry *mesh_stats_dir;
struct {
+ struct dentry *fwded_mcast;
+ struct dentry *fwded_unicast;
struct dentry *fwded_frames;
struct dentry *dropped_frames_ttl;
struct dentry *dropped_frames_no_route;
@@ -588,12 +576,44 @@ enum queue_stop_reason {
IEEE80211_QUEUE_STOP_REASON_CSA,
IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
IEEE80211_QUEUE_STOP_REASON_SUSPEND,
- IEEE80211_QUEUE_STOP_REASON_PENDING,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
};
-struct ieee80211_master_priv {
- struct ieee80211_local *local;
+/**
+ * mac80211 scan flags - currently active scan mode
+ *
+ * @SCAN_SW_SCANNING: We're currently in the process of scanning but may as
+ * well be on the operating channel
+ * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
+ * determine if we are on the operating channel or not
+ * @SCAN_OFF_CHANNEL: We're off our operating channel for scanning,
+ * gets only set in conjunction with SCAN_SW_SCANNING
+ */
+enum {
+ SCAN_SW_SCANNING,
+ SCAN_HW_SCANNING,
+ SCAN_OFF_CHANNEL,
+};
+
+/**
+ * enum mac80211_scan_state - scan state machine states
+ *
+ * @SCAN_DECISION: Main entry point to the scan state machine, this state
+ * determines if we should keep on scanning or switch back to the
+ * operating channel
+ * @SCAN_SET_CHANNEL: Set the next channel to be scanned
+ * @SCAN_SEND_PROBE: Send probe requests and wait for probe responses
+ * @SCAN_LEAVE_OPER_CHANNEL: Leave the operating channel, notify the AP
+ * about us leaving the channel and stop all associated STA interfaces
+ * @SCAN_ENTER_OPER_CHANNEL: Enter the operating channel again, notify the
+ * AP about us being back and restart all associated STA interfaces
+ */
+enum mac80211_scan_state {
+ SCAN_DECISION,
+ SCAN_SET_CHANNEL,
+ SCAN_SEND_PROBE,
+ SCAN_LEAVE_OPER_CHANNEL,
+ SCAN_ENTER_OPER_CHANNEL,
};
struct ieee80211_local {
@@ -604,17 +624,33 @@ struct ieee80211_local {
const struct ieee80211_ops *ops;
+ /*
+ * private workqueue to mac80211. mac80211 makes this accessible
+ * via ieee80211_queue_work()
+ */
+ struct workqueue_struct *workqueue;
+
unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
/* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
spinlock_t queue_stop_reason_lock;
- struct net_device *mdev; /* wmaster# - "master" 802.11 device */
int open_count;
int monitors, cooked_mntrs;
/* number of interfaces with corresponding FIF_ flags */
- int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss;
+ int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
unsigned int filter_flags; /* FIF_* */
struct iw_statistics wstats;
+
+ /* protects the aggregated multicast list and filter calls */
+ spinlock_t filter_lock;
+
+ /* used for uploading changed mc list */
+ struct work_struct reconfig_filter;
+
+ /* aggregated multicast list */
+ struct dev_addr_list *mc_list;
+ int mc_count;
+
bool tim_in_locked_section; /* see ieee80211_beacon_get() */
/*
@@ -631,6 +667,9 @@ struct ieee80211_local {
*/
bool quiescing;
+ /* device is started */
+ bool started;
+
int tx_headroom; /* required headroom for hardware/radiotap */
/* Tasklet and skb queue to process calls from IRQ mode. All frames
@@ -653,6 +692,7 @@ struct ieee80211_local {
struct list_head sta_list;
struct sta_info *sta_hash[STA_HASH_SIZE];
struct timer_list sta_cleanup;
+ int sta_generation;
struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
struct tasklet_struct tx_pending_tasklet;
@@ -687,9 +727,9 @@ struct ieee80211_local {
/* Scanning and BSS list */
struct mutex scan_mtx;
- bool sw_scanning, hw_scanning;
+ unsigned long scanning;
struct cfg80211_ssid scan_ssid;
- struct cfg80211_scan_request int_scan_req;
+ struct cfg80211_scan_request *int_scan_req;
struct cfg80211_scan_request *scan_req;
struct ieee80211_channel *scan_channel;
const u8 *orig_ies;
@@ -697,7 +737,7 @@ struct ieee80211_local {
int scan_channel_idx;
int scan_ies_len;
- enum { SCAN_SET_CHANNEL, SCAN_SEND_PROBE } scan_state;
+ enum mac80211_scan_state next_scan_state;
struct delayed_work scan_work;
struct ieee80211_sub_if_data *scan_sdata;
enum nl80211_channel_type oper_channel_type;
@@ -834,10 +874,6 @@ struct ieee80211_local {
static inline struct ieee80211_sub_if_data *
IEEE80211_DEV_TO_SUB_IF(struct net_device *dev)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
-
- BUG_ON(!local || local->mdev == dev);
-
return netdev_priv(dev);
}
@@ -937,21 +973,20 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
void ieee80211_configure_filter(struct ieee80211_local *local);
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata);
-/* wireless extensions */
-extern const struct iw_handler_def ieee80211_iw_handler_def;
-
/* STA code */
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata);
+int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_auth_request *req);
+int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_assoc_request *req);
+int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_deauth_request *req,
+ void *cookie);
+int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_disassoc_request *req,
+ void *cookie);
ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
-int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata);
-int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len);
-int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len);
-int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid);
-void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata);
-int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason);
-int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason);
+ struct sk_buff *skb);
void ieee80211_send_pspoll(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency);
@@ -967,8 +1002,7 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata);
ieee80211_rx_result
-ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
+ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
u8 *bssid, u8 *addr, u32 supp_rates);
int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
@@ -983,16 +1017,9 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
-int ieee80211_scan_results(struct ieee80211_local *local,
- struct iw_request_info *info,
- char *buf, size_t len);
void ieee80211_scan_cancel(struct ieee80211_local *local);
ieee80211_rx_result
-ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
-int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
- const char *ie, size_t len);
+ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
struct ieee80211_bss *
@@ -1008,8 +1035,6 @@ ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
u8 *ssid, u8 ssid_len);
void ieee80211_rx_bss_put(struct ieee80211_local *local,
struct ieee80211_bss *bss);
-void ieee80211_rx_bss_remove(struct ieee80211_sub_if_data *sdata, u8 *bssid,
- int freq, u8 *ssid, u8 ssid_len);
/* interface handling */
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
@@ -1025,9 +1050,10 @@ void ieee80211_recalc_idle(struct ieee80211_local *local);
/* tx handling */
void ieee80211_clear_tx_pending(struct ieee80211_local *local);
void ieee80211_tx_pending(unsigned long data);
-int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
-int ieee80211_monitor_start_xmit(struct sk_buff *skb, struct net_device *dev);
-int ieee80211_subif_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
/* HT */
void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
@@ -1065,6 +1091,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
/* Suspend/resume and hw reconfiguration */
int ieee80211_reconfig(struct ieee80211_local *local);
+void ieee80211_stop_device(struct ieee80211_local *local);
#ifdef CONFIG_PM
int __ieee80211_suspend(struct ieee80211_hw *hw);
@@ -1092,7 +1119,8 @@ u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
int rate, int erp, int short_preamble);
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
- struct ieee80211_hdr *hdr, const u8 *tsc);
+ struct ieee80211_hdr *hdr, const u8 *tsc,
+ gfp_t gfp);
void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata);
void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
int encrypt);
@@ -1129,8 +1157,8 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local,
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
- u8 *extra, size_t extra_len,
- const u8 *bssid, int encrypt);
+ u8 *extra, size_t extra_len, const u8 *bssid,
+ const u8 *key, u8 key_len, u8 key_idx);
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
const u8 *ie, size_t ie_len);
void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index b7c8a448429..b8295cbd7e8 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -190,10 +190,6 @@ static int ieee80211_open(struct net_device *dev)
ETH_ALEN);
}
- if (compare_ether_addr(null_addr, local->mdev->dev_addr) == 0)
- memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr,
- ETH_ALEN);
-
/*
* Validate the MAC address for this device.
*/
@@ -224,18 +220,15 @@ static int ieee80211_open(struct net_device *dev)
local->fif_fcsfail++;
if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
local->fif_plcpfail++;
- if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
+ if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) {
local->fif_control++;
+ local->fif_pspoll++;
+ }
if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
local->fif_other_bss++;
- netif_addr_lock_bh(local->mdev);
ieee80211_configure_filter(local);
- netif_addr_unlock_bh(local->mdev);
break;
- case NL80211_IFTYPE_STATION:
- sdata->u.mgd.flags &= ~IEEE80211_STA_PREV_BSSID_SET;
- /* fall through */
default:
conf.vif = &sdata->vif;
conf.type = sdata->vif.type;
@@ -246,12 +239,15 @@ static int ieee80211_open(struct net_device *dev)
if (ieee80211_vif_is_mesh(&sdata->vif)) {
local->fif_other_bss++;
- netif_addr_lock_bh(local->mdev);
ieee80211_configure_filter(local);
- netif_addr_unlock_bh(local->mdev);
ieee80211_start_mesh(sdata);
+ } else if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ local->fif_pspoll++;
+
+ ieee80211_configure_filter(local);
}
+
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_bss_info_change_notify(sdata, changed);
ieee80211_enable_keys(sdata);
@@ -281,15 +277,6 @@ static int ieee80211_open(struct net_device *dev)
}
}
- if (local->open_count == 0) {
- res = dev_open(local->mdev);
- WARN_ON(res);
- if (res)
- goto err_del_interface;
- tasklet_enable(&local->tx_pending_tasklet);
- tasklet_enable(&local->tasklet);
- }
-
/*
* set_multicast_list will be invoked by the networking core
* which will check whether any increments here were done in
@@ -323,7 +310,7 @@ static int ieee80211_open(struct net_device *dev)
* to fix this.
*/
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- queue_work(local->hw.workqueue, &sdata->u.mgd.work);
+ ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
netif_tx_start_all_queues(dev);
@@ -346,7 +333,10 @@ static int ieee80211_stop(struct net_device *dev)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_init_conf conf;
struct sta_info *sta;
+ unsigned long flags;
+ struct sk_buff *skb, *tmp;
u32 hw_reconf_flags = 0;
+ int i;
/*
* Stop TX on this interface first.
@@ -366,18 +356,6 @@ static int ieee80211_stop(struct net_device *dev)
rcu_read_unlock();
/*
- * Announce that we are leaving the network, in case we are a
- * station interface type. This must be done before removing
- * all stations associated with sta_info_flush, otherwise STA
- * information will be gone and no announce being done.
- */
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.state != IEEE80211_STA_MLME_DISABLED)
- ieee80211_sta_deauthenticate(sdata,
- WLAN_REASON_DEAUTH_LEAVING);
- }
-
- /*
* Remove all stations associated with this interface.
*
* This must be done before calling ops->remove_interface()
@@ -408,13 +386,24 @@ static int ieee80211_stop(struct net_device *dev)
if (sdata->flags & IEEE80211_SDATA_PROMISC)
atomic_dec(&local->iff_promiscs);
- dev_mc_unsync(local->mdev, dev);
+ if (sdata->vif.type == NL80211_IFTYPE_AP)
+ local->fif_pspoll--;
+
+ netif_addr_lock_bh(dev);
+ spin_lock_bh(&local->filter_lock);
+ __dev_addr_unsync(&local->mc_list, &local->mc_count,
+ &dev->mc_list, &dev->mc_count);
+ spin_unlock_bh(&local->filter_lock);
+ netif_addr_unlock_bh(dev);
+
+ ieee80211_configure_filter(local);
+
del_timer_sync(&local->dynamic_ps_timer);
cancel_work_sync(&local->dynamic_ps_enable_work);
/* APs need special treatment */
if (sdata->vif.type == NL80211_IFTYPE_AP) {
- struct ieee80211_sub_if_data *vlan, *tmp;
+ struct ieee80211_sub_if_data *vlan, *tmpsdata;
struct beacon_data *old_beacon = sdata->u.ap.beacon;
/* remove beacon */
@@ -423,7 +412,7 @@ static int ieee80211_stop(struct net_device *dev)
kfree(old_beacon);
/* down all dependent devices, that is VLANs */
- list_for_each_entry_safe(vlan, tmp, &sdata->u.ap.vlans,
+ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
u.vlan.list)
dev_close(vlan->dev);
WARN_ON(!list_empty(&sdata->u.ap.vlans));
@@ -452,29 +441,30 @@ static int ieee80211_stop(struct net_device *dev)
local->fif_fcsfail--;
if (sdata->u.mntr_flags & MONITOR_FLAG_PLCPFAIL)
local->fif_plcpfail--;
- if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL)
+ if (sdata->u.mntr_flags & MONITOR_FLAG_CONTROL) {
+ local->fif_pspoll--;
local->fif_control--;
+ }
if (sdata->u.mntr_flags & MONITOR_FLAG_OTHER_BSS)
local->fif_other_bss--;
- netif_addr_lock_bh(local->mdev);
ieee80211_configure_filter(local);
- netif_addr_unlock_bh(local->mdev);
break;
case NL80211_IFTYPE_STATION:
- memset(sdata->u.mgd.bssid, 0, ETH_ALEN);
del_timer_sync(&sdata->u.mgd.chswitch_timer);
del_timer_sync(&sdata->u.mgd.timer);
+ del_timer_sync(&sdata->u.mgd.conn_mon_timer);
+ del_timer_sync(&sdata->u.mgd.bcn_mon_timer);
/*
- * If the timer fired while we waited for it, it will have
- * requeued the work. Now the work will be running again
+ * If any of the timers fired while we waited for it, it will
+ * have queued its work. Now the work will be running again
* but will not rearm the timer again because it checks
* whether the interface is running, which, at this point,
* it no longer is.
*/
cancel_work_sync(&sdata->u.mgd.work);
cancel_work_sync(&sdata->u.mgd.chswitch_work);
-
+ cancel_work_sync(&sdata->u.mgd.monitor_work);
cancel_work_sync(&sdata->u.mgd.beacon_loss_work);
/*
@@ -485,12 +475,6 @@ static int ieee80211_stop(struct net_device *dev)
*/
synchronize_rcu();
skb_queue_purge(&sdata->u.mgd.skb_queue);
-
- sdata->u.mgd.flags &= ~(IEEE80211_STA_PRIVACY_INVOKED |
- IEEE80211_STA_TKIP_WEP_USED);
- kfree(sdata->u.mgd.extra_ie);
- sdata->u.mgd.extra_ie = NULL;
- sdata->u.mgd.extra_ie_len = 0;
/* fall through */
case NL80211_IFTYPE_ADHOC:
if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
@@ -507,37 +491,23 @@ static int ieee80211_stop(struct net_device *dev)
local->fif_other_bss--;
atomic_dec(&local->iff_allmultis);
- netif_addr_lock_bh(local->mdev);
ieee80211_configure_filter(local);
- netif_addr_unlock_bh(local->mdev);
ieee80211_stop_mesh(sdata);
}
/* fall through */
default:
- if (local->scan_sdata == sdata) {
- if (!local->ops->hw_scan)
- cancel_delayed_work_sync(&local->scan_work);
- /*
- * The software scan can no longer run now, so we can
- * clear out the scan_sdata reference. However, the
- * hardware scan may still be running. The complete
- * function must be prepared to handle a NULL value.
- */
- local->scan_sdata = NULL;
- /*
- * The memory barrier guarantees that another CPU
- * that is hardware-scanning will now see the fact
- * that this interface is gone.
- */
- smp_mb();
- /*
- * If software scanning, complete the scan but since
- * the scan_sdata is NULL already don't send out a
- * scan event to userspace -- the scan is incomplete.
- */
- if (local->sw_scanning)
- ieee80211_scan_completed(&local->hw, true);
+ if (local->scan_sdata == sdata)
+ ieee80211_scan_cancel(local);
+
+ /*
+ * Disable beaconing for AP and mesh, IBSS can't
+ * still be joined to a network at this point.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_AP ||
+ sdata->vif.type == NL80211_IFTYPE_MESH_POINT) {
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_BEACON_ENABLED);
}
conf.vif = &sdata->vif;
@@ -555,17 +525,8 @@ static int ieee80211_stop(struct net_device *dev)
ieee80211_recalc_ps(local, -1);
if (local->open_count == 0) {
- if (netif_running(local->mdev))
- dev_close(local->mdev);
-
- drv_stop(local);
-
- ieee80211_led_radio(local, false);
-
- flush_workqueue(local->hw.workqueue);
-
- tasklet_disable(&local->tx_pending_tasklet);
- tasklet_disable(&local->tasklet);
+ ieee80211_clear_tx_pending(local);
+ ieee80211_stop_device(local);
/* no reconfiguring after stop! */
hw_reconf_flags = 0;
@@ -575,6 +536,18 @@ static int ieee80211_stop(struct net_device *dev)
if (hw_reconf_flags)
ieee80211_hw_config(local, hw_reconf_flags);
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
+ skb_queue_walk_safe(&local->pending[i], skb, tmp) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ if (info->control.vif == &sdata->vif) {
+ __skb_unlink(skb, &local->pending[i]);
+ dev_kfree_skb_irq(skb);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
return 0;
}
@@ -604,8 +577,11 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
atomic_dec(&local->iff_promiscs);
sdata->flags ^= IEEE80211_SDATA_PROMISC;
}
-
- dev_mc_sync(local->mdev, dev);
+ spin_lock_bh(&local->filter_lock);
+ __dev_addr_sync(&local->mc_list, &local->mc_count,
+ &dev->mc_list, &dev->mc_count);
+ spin_unlock_bh(&local->filter_lock);
+ ieee80211_queue_work(&local->hw, &local->reconfig_filter);
}
/*
@@ -652,11 +628,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
kfree_skb(sdata->u.ibss.presp);
break;
case NL80211_IFTYPE_STATION:
- kfree(sdata->u.mgd.extra_ie);
- kfree(sdata->u.mgd.assocreq_ies);
- kfree(sdata->u.mgd.assocresp_ies);
- kfree(sdata->u.mgd.sme_auth_ie);
- break;
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_AP_VLAN:
case NL80211_IFTYPE_MONITOR:
@@ -695,7 +666,6 @@ static void ieee80211_if_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &ieee80211_dataif_ops;
- dev->wireless_handlers = &ieee80211_iw_handler_def;
dev->destructor = free_netdev;
}
@@ -784,6 +754,10 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static struct device_type wiphy_type = {
+ .name = "wlan",
+};
+
int ieee80211_if_add(struct ieee80211_local *local, const char *name,
struct net_device **new_dev, enum nl80211_iftype type,
struct vif_params *params)
@@ -798,6 +772,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
name, ieee80211_if_setup);
if (!ndev)
return -ENOMEM;
+ dev_net_set(ndev, wiphy_net(local->hw.wiphy));
ndev->needed_headroom = local->tx_headroom +
4*6 /* four MAC addresses */
@@ -814,7 +789,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
- ndev->features |= NETIF_F_NETNS_LOCAL;
+ SET_NETDEV_DEVTYPE(ndev, &wiphy_type);
/* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
sdata = netdev_priv(ndev);
@@ -931,7 +906,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata;
int count = 0;
- if (local->hw_scanning || local->sw_scanning)
+ if (local->scanning)
return ieee80211_idle_off(local, "scanning");
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -939,7 +914,8 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
continue;
/* do not count disabled managed interfaces */
if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- sdata->u.mgd.state == IEEE80211_STA_MLME_DISABLED)
+ !sdata->u.mgd.associated &&
+ list_empty(&sdata->u.mgd.work_list))
continue;
/* do not count unused IBSS interfaces */
if (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ce267565e18..659a42d529e 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -67,6 +67,8 @@ static DECLARE_WORK(todo_work, key_todo);
*
* @key: key to add to do item for
* @flag: todo flag(s)
+ *
+ * Must be called with IRQs or softirqs disabled.
*/
static void add_todo(struct ieee80211_key *key, u32 flag)
{
@@ -140,9 +142,9 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
ret = drv_set_key(key->local, SET_KEY, &sdata->vif, sta, &key->conf);
if (!ret) {
- spin_lock(&todo_lock);
+ spin_lock_bh(&todo_lock);
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- spin_unlock(&todo_lock);
+ spin_unlock_bh(&todo_lock);
}
if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP)
@@ -164,12 +166,12 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
if (!key || !key->local->ops->set_key)
return;
- spin_lock(&todo_lock);
+ spin_lock_bh(&todo_lock);
if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
- spin_unlock(&todo_lock);
+ spin_unlock_bh(&todo_lock);
return;
}
- spin_unlock(&todo_lock);
+ spin_unlock_bh(&todo_lock);
sta = get_sta_for_key(key);
sdata = key->sdata;
@@ -188,9 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
wiphy_name(key->local->hw.wiphy),
key->conf.keyidx, sta ? sta->addr : bcast_addr, ret);
- spin_lock(&todo_lock);
+ spin_lock_bh(&todo_lock);
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- spin_unlock(&todo_lock);
+ spin_unlock_bh(&todo_lock);
}
static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata,
@@ -437,14 +439,14 @@ void ieee80211_key_link(struct ieee80211_key *key,
__ieee80211_key_replace(sdata, sta, old_key, key);
- spin_unlock_irqrestore(&sdata->local->key_lock, flags);
-
/* free old key later */
add_todo(old_key, KEY_FLAG_TODO_DELETE);
add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS);
if (netif_running(sdata->dev))
add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD);
+
+ spin_unlock_irqrestore(&sdata->local->key_lock, flags);
}
static void __ieee80211_key_free(struct ieee80211_key *key)
@@ -547,7 +549,7 @@ static void __ieee80211_key_todo(void)
*/
synchronize_rcu();
- spin_lock(&todo_lock);
+ spin_lock_bh(&todo_lock);
while (!list_empty(&todo_list)) {
key = list_first_entry(&todo_list, struct ieee80211_key, todo);
list_del_init(&key->todo);
@@ -558,7 +560,7 @@ static void __ieee80211_key_todo(void)
KEY_FLAG_TODO_HWACCEL_REMOVE |
KEY_FLAG_TODO_DELETE);
key->flags &= ~todoflags;
- spin_unlock(&todo_lock);
+ spin_unlock_bh(&todo_lock);
work_done = false;
@@ -591,9 +593,9 @@ static void __ieee80211_key_todo(void)
WARN_ON(!work_done);
- spin_lock(&todo_lock);
+ spin_lock_bh(&todo_lock);
}
- spin_unlock(&todo_lock);
+ spin_unlock_bh(&todo_lock);
}
void ieee80211_key_todo(void)
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 092a017b237..797f53942e5 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -50,9 +50,9 @@ struct ieee80211_tx_status_rtap_hdr {
} __attribute__ ((packed));
-/* must be called under mdev tx lock */
void ieee80211_configure_filter(struct ieee80211_local *local)
{
+ u64 mc;
unsigned int changed_flags;
unsigned int new_flags = 0;
@@ -62,7 +62,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
if (atomic_read(&local->iff_allmultis))
new_flags |= FIF_ALLMULTI;
- if (local->monitors)
+ if (local->monitors || local->scanning)
new_flags |= FIF_BCN_PRBRESP_PROMISC;
if (local->fif_fcsfail)
@@ -77,77 +77,29 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
if (local->fif_other_bss)
new_flags |= FIF_OTHER_BSS;
+ if (local->fif_pspoll)
+ new_flags |= FIF_PSPOLL;
+
+ spin_lock_bh(&local->filter_lock);
changed_flags = local->filter_flags ^ new_flags;
+ mc = drv_prepare_multicast(local, local->mc_count, local->mc_list);
+ spin_unlock_bh(&local->filter_lock);
+
/* be a bit nasty */
new_flags |= (1<<31);
- drv_configure_filter(local, changed_flags, &new_flags,
- local->mdev->mc_count,
- local->mdev->mc_list);
+ drv_configure_filter(local, changed_flags, &new_flags, mc);
WARN_ON(new_flags & (1<<31));
local->filter_flags = new_flags & ~(1<<31);
}
-/* master interface */
-
-static int header_parse_80211(const struct sk_buff *skb, unsigned char *haddr)
-{
- memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */
- return ETH_ALEN;
-}
-
-static const struct header_ops ieee80211_header_ops = {
- .create = eth_header,
- .parse = header_parse_80211,
- .rebuild = eth_rebuild_header,
- .cache = eth_header_cache,
- .cache_update = eth_header_cache_update,
-};
-
-static int ieee80211_master_open(struct net_device *dev)
+static void ieee80211_reconfig_filter(struct work_struct *work)
{
- struct ieee80211_master_priv *mpriv = netdev_priv(dev);
- struct ieee80211_local *local = mpriv->local;
- struct ieee80211_sub_if_data *sdata;
- int res = -EOPNOTSUPP;
-
- /* we hold the RTNL here so can safely walk the list */
- list_for_each_entry(sdata, &local->interfaces, list) {
- if (netif_running(sdata->dev)) {
- res = 0;
- break;
- }
- }
-
- if (res)
- return res;
-
- netif_tx_start_all_queues(local->mdev);
-
- return 0;
-}
-
-static int ieee80211_master_stop(struct net_device *dev)
-{
- struct ieee80211_master_priv *mpriv = netdev_priv(dev);
- struct ieee80211_local *local = mpriv->local;
- struct ieee80211_sub_if_data *sdata;
-
- /* we hold the RTNL here so can safely walk the list */
- list_for_each_entry(sdata, &local->interfaces, list)
- if (netif_running(sdata->dev))
- dev_close(sdata->dev);
-
- return 0;
-}
-
-static void ieee80211_master_set_multicast_list(struct net_device *dev)
-{
- struct ieee80211_master_priv *mpriv = netdev_priv(dev);
- struct ieee80211_local *local = mpriv->local;
+ struct ieee80211_local *local =
+ container_of(work, struct ieee80211_local, reconfig_filter);
ieee80211_configure_filter(local);
}
@@ -259,7 +211,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
- if (local->sw_scanning) {
+ if (local->quiescing || !netif_running(sdata->dev) ||
+ test_bit(SCAN_SW_SCANNING, &local->scanning)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
/*
@@ -288,9 +241,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
drv_bss_info_changed(local, &sdata->vif,
&sdata->vif.bss_conf, changed);
-
- /* DEPRECATED */
- local->hw.conf.beacon_int = sdata->vif.bss_conf.beacon_int;
}
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
@@ -310,7 +260,6 @@ void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int tmp;
- skb->dev = local->mdev;
skb->pkt_type = IEEE80211_TX_STATUS_MSG;
skb_queue_tail(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS ?
&local->skb_queue : &local->skb_queue_unreliable, skb);
@@ -330,19 +279,16 @@ static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
struct sk_buff *skb;
- struct ieee80211_rx_status rx_status;
struct ieee80211_ra_tid *ra_tid;
while ((skb = skb_dequeue(&local->skb_queue)) ||
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
switch (skb->pkt_type) {
case IEEE80211_RX_MSG:
- /* status is in skb->cb */
- memcpy(&rx_status, skb->cb, sizeof(rx_status));
/* Clear skb->pkt_type in order to not confuse kernel
* netstack. */
skb->pkt_type = 0;
- __ieee80211_rx(local_to_hw(local), skb, &rx_status);
+ ieee80211_rx(local_to_hw(local), skb);
break;
case IEEE80211_TX_STATUS_MSG:
skb->pkt_type = 0;
@@ -375,6 +321,31 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ /*
+ * XXX: This is temporary!
+ *
+ * The problem here is that when we get here, the driver will
+ * quite likely have pretty much overwritten info->control by
+ * using info->driver_data or info->rate_driver_data. Thus,
+ * when passing out the frame to the driver again, we would be
+ * passing completely bogus data since the driver would then
+ * expect a properly filled info->control. In mac80211 itself
+ * the same problem occurs, since we need info->control.vif
+ * internally.
+ *
+ * To fix this, we should send the frame through TX processing
+ * again. However, it's not that simple, since the frame will
+ * have been software-encrypted (if applicable) already, and
+ * encrypting it again doesn't do much good. So to properly do
+ * that, we not only have to skip the actual 'raw' encryption
+ * (key selection etc. still has to be done!) but also the
+ * sequence number assignment since that impacts the crypto
+ * encapsulation, of course.
+ *
+ * Hence, for now, fix the bug by just dropping the frame.
+ */
+ goto drop;
+
sta->tx_filtered_count++;
/*
@@ -428,6 +399,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
return;
}
+ drop:
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit())
printk(KERN_DEBUG "%s: dropped TX filtered frame, "
@@ -510,6 +482,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
}
rate_control_tx_status(local, sband, sta, skb);
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+ ieee80211s_update_metric(local, sta, skb);
}
rcu_read_unlock();
@@ -685,6 +659,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
if (!wiphy)
return NULL;
+ wiphy->netnsok = true;
wiphy->privid = mac80211_wiphy_privid;
/* Yes, putting cfg80211_bss into ieee80211_bss is a hack */
@@ -711,7 +686,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
local->hw.max_rates = 1;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
- local->hw.conf.radio_enabled = true;
local->user_power_level = -1;
INIT_LIST_HEAD(&local->interfaces);
@@ -719,13 +693,15 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
mutex_init(&local->scan_mtx);
spin_lock_init(&local->key_lock);
-
+ spin_lock_init(&local->filter_lock);
spin_lock_init(&local->queue_stop_reason_lock);
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
INIT_WORK(&local->restart_work, ieee80211_restart_work);
+ INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
+
INIT_WORK(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
INIT_WORK(&local->dynamic_ps_disable_work,
@@ -739,12 +715,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
skb_queue_head_init(&local->pending[i]);
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
- tasklet_disable(&local->tx_pending_tasklet);
tasklet_init(&local->tasklet,
ieee80211_tasklet_handler,
(unsigned long) local);
- tasklet_disable(&local->tasklet);
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
@@ -755,30 +729,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
-static const struct net_device_ops ieee80211_master_ops = {
- .ndo_start_xmit = ieee80211_master_start_xmit,
- .ndo_open = ieee80211_master_open,
- .ndo_stop = ieee80211_master_stop,
- .ndo_set_multicast_list = ieee80211_master_set_multicast_list,
- .ndo_select_queue = ieee80211_select_queue,
-};
-
-static void ieee80211_master_setup(struct net_device *mdev)
-{
- mdev->type = ARPHRD_IEEE80211;
- mdev->netdev_ops = &ieee80211_master_ops;
- mdev->header_ops = &ieee80211_header_ops;
- mdev->tx_queue_len = 1000;
- mdev->addr_len = ETH_ALEN;
-}
-
int ieee80211_register_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
int result;
enum ieee80211_band band;
- struct net_device *mdev;
- struct ieee80211_master_priv *mpriv;
int channels, i, j, max_bitrates;
bool supp_ht;
static const u32 cipher_suites[] = {
@@ -818,9 +773,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
}
- local->int_scan_req.n_channels = channels;
- local->int_scan_req.channels = kzalloc(sizeof(void *) * channels, GFP_KERNEL);
- if (!local->int_scan_req.channels)
+ local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
+ sizeof(void *) * channels, GFP_KERNEL);
+ if (!local->int_scan_req)
return -ENOMEM;
/* if low-level driver supports AP, we also support VLAN */
@@ -877,19 +832,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (hw->queues > IEEE80211_MAX_QUEUES)
hw->queues = IEEE80211_MAX_QUEUES;
- mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
- "wmaster%d", ieee80211_master_setup,
- hw->queues);
- if (!mdev)
- goto fail_mdev_alloc;
-
- mpriv = netdev_priv(mdev);
- mpriv->local = local;
- local->mdev = mdev;
-
- local->hw.workqueue =
+ local->workqueue =
create_singlethread_workqueue(wiphy_name(local->hw.wiphy));
- if (!local->hw.workqueue) {
+ if (!local->workqueue) {
result = -ENOMEM;
goto fail_workqueue;
}
@@ -921,17 +866,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
}
rtnl_lock();
- result = dev_alloc_name(local->mdev, local->mdev->name);
- if (result < 0)
- goto fail_dev;
-
- memcpy(local->mdev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
- SET_NETDEV_DEV(local->mdev, wiphy_dev(local->hw.wiphy));
- local->mdev->features |= NETIF_F_NETNS_LOCAL;
-
- result = register_netdevice(local->mdev);
- if (result < 0)
- goto fail_dev;
result = ieee80211_init_rate_ctrl_alg(local,
hw->rate_control_algorithm);
@@ -956,13 +890,13 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* alloc internal scan request */
i = 0;
- local->int_scan_req.ssids = &local->scan_ssid;
- local->int_scan_req.n_ssids = 1;
+ local->int_scan_req->ssids = &local->scan_ssid;
+ local->int_scan_req->n_ssids = 1;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!hw->wiphy->bands[band])
continue;
for (j = 0; j < hw->wiphy->bands[band]->n_channels; j++) {
- local->int_scan_req.channels[i] =
+ local->int_scan_req->channels[i] =
&hw->wiphy->bands[band]->channels[j];
i++;
}
@@ -984,23 +918,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
ieee80211_led_exit(local);
ieee80211_remove_interfaces(local);
fail_rate:
- unregister_netdevice(local->mdev);
- local->mdev = NULL;
- fail_dev:
rtnl_unlock();
ieee80211_wep_free(local);
fail_wep:
sta_info_stop(local);
fail_sta_info:
debugfs_hw_del(local);
- destroy_workqueue(local->hw.workqueue);
+ destroy_workqueue(local->workqueue);
fail_workqueue:
- if (local->mdev)
- free_netdev(local->mdev);
- fail_mdev_alloc:
wiphy_unregister(local->hw.wiphy);
fail_wiphy_register:
- kfree(local->int_scan_req.channels);
+ kfree(local->int_scan_req);
return result;
}
EXPORT_SYMBOL(ieee80211_register_hw);
@@ -1022,15 +950,12 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
* because the driver cannot be handing us frames any
* more and the tasklet is killed.
*/
-
- /* First, we remove all virtual interfaces. */
ieee80211_remove_interfaces(local);
- /* then, finally, remove the master interface */
- unregister_netdevice(local->mdev);
-
rtnl_unlock();
+ cancel_work_sync(&local->reconfig_filter);
+
ieee80211_clear_tx_pending(local);
sta_info_stop(local);
rate_control_deinitialize(local);
@@ -1043,12 +968,11 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
- destroy_workqueue(local->hw.workqueue);
+ destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
ieee80211_wep_free(local);
ieee80211_led_exit(local);
- free_netdev(local->mdev);
- kfree(local->int_scan_req.channels);
+ kfree(local->int_scan_req);
}
EXPORT_SYMBOL(ieee80211_unregister_hw);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 11cf45bce38..f7364e56f1e 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -18,8 +18,11 @@
#define PP_OFFSET 1 /* Path Selection Protocol */
#define PM_OFFSET 5 /* Path Selection Metric */
#define CC_OFFSET 9 /* Congestion Control Mode */
-#define CAPAB_OFFSET 17
-#define ACCEPT_PLINKS 0x80
+#define SP_OFFSET 13 /* Synchronization Protocol */
+#define AUTH_OFFSET 17 /* Authentication Protocol */
+#define CAPAB_OFFSET 22
+#define CAPAB_ACCEPT_PLINKS 0x80
+#define CAPAB_FORWARDING 0x10
#define TMR_RUNNING_HK 0
#define TMR_RUNNING_MP 1
@@ -47,14 +50,14 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- ifmsh->housekeeping = true;
+ ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
if (local->quiescing) {
set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
return;
}
- queue_work(local->hw.workqueue, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
}
/**
@@ -84,7 +87,9 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
memcmp(ifmsh->mesh_id, ie->mesh_id, ie->mesh_id_len) == 0 &&
memcmp(ifmsh->mesh_pp_id, ie->mesh_config + PP_OFFSET, 4) == 0 &&
memcmp(ifmsh->mesh_pm_id, ie->mesh_config + PM_OFFSET, 4) == 0 &&
- memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0)
+ memcmp(ifmsh->mesh_cc_id, ie->mesh_config + CC_OFFSET, 4) == 0 &&
+ memcmp(ifmsh->mesh_sp_id, ie->mesh_config + SP_OFFSET, 4) == 0 &&
+ memcmp(ifmsh->mesh_auth_id, ie->mesh_config + AUTH_OFFSET, 4) == 0)
return true;
return false;
@@ -97,7 +102,7 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
*/
bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie)
{
- return (*(ie->mesh_config + CAPAB_OFFSET) & ACCEPT_PLINKS) != 0;
+ return (*(ie->mesh_config + CAPAB_OFFSET) & CAPAB_ACCEPT_PLINKS) != 0;
}
/**
@@ -123,11 +128,18 @@ void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
void mesh_ids_set_default(struct ieee80211_if_mesh *sta)
{
- u8 def_id[4] = {0x00, 0x0F, 0xAC, 0xff};
-
- memcpy(sta->mesh_pp_id, def_id, 4);
- memcpy(sta->mesh_pm_id, def_id, 4);
- memcpy(sta->mesh_cc_id, def_id, 4);
+ u8 oui[3] = {0x00, 0x0F, 0xAC};
+
+ memcpy(sta->mesh_pp_id, oui, sizeof(oui));
+ memcpy(sta->mesh_pm_id, oui, sizeof(oui));
+ memcpy(sta->mesh_cc_id, oui, sizeof(oui));
+ memcpy(sta->mesh_sp_id, oui, sizeof(oui));
+ memcpy(sta->mesh_auth_id, oui, sizeof(oui));
+ sta->mesh_pp_id[sizeof(oui)] = 0;
+ sta->mesh_pm_id[sizeof(oui)] = 0;
+ sta->mesh_cc_id[sizeof(oui)] = 0xff;
+ sta->mesh_sp_id[sizeof(oui)] = 0xff;
+ sta->mesh_auth_id[sizeof(oui)] = 0x0;
}
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata)
@@ -245,7 +257,7 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
if (sdata->u.mesh.mesh_id_len)
memcpy(pos, sdata->u.mesh.mesh_id, sdata->u.mesh.mesh_id_len);
- pos = skb_put(skb, 21);
+ pos = skb_put(skb, 2 + IEEE80211_MESH_CONFIG_LEN);
*pos++ = WLAN_EID_MESH_CONFIG;
*pos++ = IEEE80211_MESH_CONFIG_LEN;
/* Version */
@@ -263,15 +275,22 @@ void mesh_mgmt_ies_add(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
memcpy(pos, sdata->u.mesh.mesh_cc_id, 4);
pos += 4;
- /* Channel precedence:
- * Not running simple channel unification protocol
- */
- memset(pos, 0x00, 4);
+ /* Synchronization protocol identifier */
+ memcpy(pos, sdata->u.mesh.mesh_sp_id, 4);
pos += 4;
+ /* Authentication Protocol identifier */
+ memcpy(pos, sdata->u.mesh.mesh_auth_id, 4);
+ pos += 4;
+
+ /* Mesh Formation Info */
+ memset(pos, 0x00, 1);
+ pos += 1;
+
/* Mesh capability */
sdata->u.mesh.accepting_plinks = mesh_plink_availables(sdata);
- *pos++ = sdata->u.mesh.accepting_plinks ? ACCEPT_PLINKS : 0x00;
+ *pos = CAPAB_FORWARDING;
+ *pos++ |= sdata->u.mesh.accepting_plinks ? CAPAB_ACCEPT_PLINKS : 0x00;
*pos++ = 0x00;
return;
@@ -320,30 +339,6 @@ struct mesh_table *mesh_table_alloc(int size_order)
return newtbl;
}
-static void __mesh_table_free(struct mesh_table *tbl)
-{
- kfree(tbl->hash_buckets);
- kfree(tbl->hashwlock);
- kfree(tbl);
-}
-
-void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
-{
- struct hlist_head *mesh_hash;
- struct hlist_node *p, *q;
- int i;
-
- mesh_hash = tbl->hash_buckets;
- for (i = 0; i <= tbl->hash_mask; i++) {
- spin_lock(&tbl->hashwlock[i]);
- hlist_for_each_safe(p, q, &mesh_hash[i]) {
- tbl->free_node(p, free_leafs);
- atomic_dec(&tbl->entries);
- }
- spin_unlock(&tbl->hashwlock[i]);
- }
- __mesh_table_free(tbl);
-}
static void ieee80211_mesh_path_timer(unsigned long data)
{
@@ -357,63 +352,79 @@ static void ieee80211_mesh_path_timer(unsigned long data)
return;
}
- queue_work(local->hw.workqueue, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
}
-struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
-{
- struct mesh_table *newtbl;
- struct hlist_head *oldhash;
- struct hlist_node *p, *q;
- int i;
-
- if (atomic_read(&tbl->entries)
- < tbl->mean_chain_len * (tbl->hash_mask + 1))
- goto endgrow;
-
- newtbl = mesh_table_alloc(tbl->size_order + 1);
- if (!newtbl)
- goto endgrow;
-
- newtbl->free_node = tbl->free_node;
- newtbl->mean_chain_len = tbl->mean_chain_len;
- newtbl->copy_node = tbl->copy_node;
- atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
-
- oldhash = tbl->hash_buckets;
- for (i = 0; i <= tbl->hash_mask; i++)
- hlist_for_each(p, &oldhash[i])
- if (tbl->copy_node(p, newtbl) < 0)
- goto errcopy;
-
- return newtbl;
-
-errcopy:
- for (i = 0; i <= newtbl->hash_mask; i++) {
- hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
- tbl->free_node(p, 0);
+/**
+ * ieee80211_fill_mesh_addresses - fill addresses of a locally originated mesh frame
+ * @hdr: 802.11 frame header
+ * @fc: frame control field
+ * @meshda: destination address in the mesh
+ * @meshsa: source address address in the mesh. Same as TA, as frame is
+ * locally originated.
+ *
+ * Return the length of the 802.11 (does not include a mesh control header)
+ */
+int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc, char
+ *meshda, char *meshsa) {
+ if (is_multicast_ether_addr(meshda)) {
+ *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
+ /* DA TA SA */
+ memcpy(hdr->addr1, meshda, ETH_ALEN);
+ memcpy(hdr->addr2, meshsa, ETH_ALEN);
+ memcpy(hdr->addr3, meshsa, ETH_ALEN);
+ return 24;
+ } else {
+ *fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS |
+ IEEE80211_FCTL_TODS);
+ /* RA TA DA SA */
+ memset(hdr->addr1, 0, ETH_ALEN); /* RA is resolved later */
+ memcpy(hdr->addr2, meshsa, ETH_ALEN);
+ memcpy(hdr->addr3, meshda, ETH_ALEN);
+ memcpy(hdr->addr4, meshsa, ETH_ALEN);
+ return 30;
}
- __mesh_table_free(newtbl);
-endgrow:
- return NULL;
}
/**
* ieee80211_new_mesh_header - create a new mesh header
* @meshhdr: uninitialized mesh header
* @sdata: mesh interface to be used
+ * @addr4: addr4 of the mesh frame (1st in ae header)
+ * may be NULL
+ * @addr5: addr5 of the mesh frame (1st or 2nd in ae header)
+ * may be NULL unless addr6 is present
+ * @addr6: addr6 of the mesh frame (2nd or 3rd in ae header)
+ * may be NULL unless addr5 is present
*
* Return the header length.
*/
int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
- struct ieee80211_sub_if_data *sdata)
+ struct ieee80211_sub_if_data *sdata, char *addr4,
+ char *addr5, char *addr6)
{
- meshhdr->flags = 0;
+ int aelen = 0;
+ memset(meshhdr, 0, sizeof(meshhdr));
meshhdr->ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &meshhdr->seqnum);
sdata->u.mesh.mesh_seqnum++;
-
- return 6;
+ if (addr4) {
+ meshhdr->flags |= MESH_FLAGS_AE_A4;
+ aelen += ETH_ALEN;
+ memcpy(meshhdr->eaddr1, addr4, ETH_ALEN);
+ }
+ if (addr5 && addr6) {
+ meshhdr->flags |= MESH_FLAGS_AE_A5_A6;
+ aelen += 2 * ETH_ALEN;
+ if (!addr4) {
+ memcpy(meshhdr->eaddr1, addr5, ETH_ALEN);
+ memcpy(meshhdr->eaddr2, addr6, ETH_ALEN);
+ } else {
+ memcpy(meshhdr->eaddr2, addr5, ETH_ALEN);
+ memcpy(meshhdr->eaddr3, addr6, ETH_ALEN);
+ }
+ }
+ return 6 + aelen;
}
static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
@@ -433,7 +444,6 @@ static void ieee80211_mesh_housekeeping(struct ieee80211_sub_if_data *sdata,
if (free_plinks != sdata->u.mesh.accepting_plinks)
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
- ifmsh->housekeeping = false;
mod_timer(&ifmsh->housekeeping_timer,
round_jiffies(jiffies + IEEE80211_MESH_HOUSEKEEPING_INTERVAL));
}
@@ -470,10 +480,12 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
- ifmsh->housekeeping = true;
- queue_work(local->hw.workqueue, &ifmsh->work);
+ ifmsh->wrkq_flags |= MESH_WORK_HOUSEKEEPING;
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
+ sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED);
+ BSS_CHANGED_BEACON_ENABLED |
+ BSS_CHANGED_BEACON_INT);
}
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
@@ -568,7 +580,7 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
ifmsh = &sdata->u.mesh;
- rx_status = (struct ieee80211_rx_status *) skb->cb;
+ rx_status = IEEE80211_SKB_RXCB(skb);
mgmt = (struct ieee80211_mgmt *) skb->data;
stype = le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE;
@@ -597,7 +609,7 @@ static void ieee80211_mesh_work(struct work_struct *work)
if (!netif_running(sdata->dev))
return;
- if (local->sw_scanning || local->hw_scanning)
+ if (local->scanning)
return;
while ((skb = skb_dequeue(&ifmsh->skb_queue)))
@@ -608,7 +620,13 @@ static void ieee80211_mesh_work(struct work_struct *work)
ifmsh->last_preq + msecs_to_jiffies(ifmsh->mshcfg.dot11MeshHWMPpreqMinInterval)))
mesh_path_start_discovery(sdata);
- if (ifmsh->housekeeping)
+ if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
+ mesh_mpath_table_grow();
+
+ if (test_and_clear_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags))
+ mesh_mpp_table_grow();
+
+ if (test_and_clear_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags))
ieee80211_mesh_housekeeping(sdata, ifmsh);
}
@@ -619,7 +637,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
if (ieee80211_vif_is_mesh(&sdata->vif))
- queue_work(local->hw.workqueue, &sdata->u.mesh.work);
+ ieee80211_queue_work(&local->hw, &sdata->u.mesh.work);
rcu_read_unlock();
}
@@ -671,8 +689,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
}
ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
@@ -686,12 +703,14 @@ ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
fc = le16_to_cpu(mgmt->frame_control);
switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_ACTION:
+ if (skb->len < IEEE80211_MIN_ACTION_SIZE)
+ return RX_DROP_MONITOR;
+ /* fall through */
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
- case IEEE80211_STYPE_ACTION:
- memcpy(skb->cb, rx_status, sizeof(*rx_status));
skb_queue_tail(&ifmsh->skb_queue, skb);
- queue_work(local->hw.workqueue, &ifmsh->work);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
return RX_QUEUED;
}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index c7d72819cdd..dd1c19319f0 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -44,6 +44,23 @@ enum mesh_path_flags {
};
/**
+ * enum mesh_deferred_task_flags - mac80211 mesh deferred tasks
+ *
+ *
+ *
+ * @MESH_WORK_HOUSEKEEPING: run the periodic mesh housekeeping tasks
+ * @MESH_WORK_GROW_MPATH_TABLE: the mesh path table is full and needs
+ * to grow.
+ * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
+ * grow
+ */
+enum mesh_deferred_task_flags {
+ MESH_WORK_HOUSEKEEPING,
+ MESH_WORK_GROW_MPATH_TABLE,
+ MESH_WORK_GROW_MPP_TABLE,
+};
+
+/**
* struct mesh_path - mac80211 mesh path structure
*
* @dst: mesh path destination mac address
@@ -61,7 +78,7 @@ enum mesh_path_flags {
* retry
* @discovery_retries: number of discovery retries
* @flags: mesh path flags, as specified on &enum mesh_path_flags
- * @state_lock: mesh pat state lock
+ * @state_lock: mesh path state lock
*
*
* The combination of dst and sdata is unique in the mesh path table. Since the
@@ -174,6 +191,7 @@ struct mesh_rmc {
*/
#define MESH_PATH_REFRESH_TIME 1000
#define MESH_MIN_DISCOVERY_TIMEOUT (2 * MESH_DIAM_TRAVERSAL_TIME)
+#define MESH_DEFAULT_BEACON_INTERVAL 1000 /* in 1024 us units */
#define MESH_MAX_PREQ_RETRIES 4
#define MESH_PATH_EXPIRE (600 * HZ)
@@ -193,8 +211,11 @@ struct mesh_rmc {
/* Public interfaces */
/* Various */
+int ieee80211_fill_mesh_addresses(struct ieee80211_hdr *hdr, __le16 *fc,
+ char *da, char *sa);
int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
- struct ieee80211_sub_if_data *sdata);
+ struct ieee80211_sub_if_data *sdata, char *addr4,
+ char *addr5, char *addr6);
int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
struct ieee80211_sub_if_data *sdata);
bool mesh_matches_local(struct ieee802_11_elems *ie,
@@ -205,11 +226,12 @@ void mesh_mgmt_ies_add(struct sk_buff *skb,
void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
void ieee80211s_init(void);
+void ieee80211s_update_metric(struct ieee80211_local *local,
+ struct sta_info *stainfo, struct sk_buff *skb);
void ieee80211s_stop(void);
void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
ieee80211_rx_result
-ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status);
+ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
@@ -247,7 +269,8 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
/* Mesh tables */
struct mesh_table *mesh_table_alloc(int size_order);
void mesh_table_free(struct mesh_table *tbl, bool free_leafs);
-struct mesh_table *mesh_table_grow(struct mesh_table *tbl);
+void mesh_mpath_table_grow(void);
+void mesh_mpp_table_grow(void);
u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl);
/* Mesh paths */
@@ -266,6 +289,8 @@ void mesh_path_discard_frame(struct sk_buff *skb,
void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
+extern int mesh_paths_generation;
+
#ifdef CONFIG_MAC80211_MESH
extern int mesh_allocated;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index f49ef288e2e..e12a786e26b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -201,6 +201,24 @@ int mesh_path_error_tx(u8 *dst, __le32 dst_dsn, u8 *ra,
return 0;
}
+void ieee80211s_update_metric(struct ieee80211_local *local,
+ struct sta_info *stainfo, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ int failed;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ failed = !(txinfo->flags & IEEE80211_TX_STAT_ACK);
+
+ /* moving average, scaled to 100 */
+ stainfo->fail_avg = ((80 * stainfo->fail_avg + 5) / 100 + 20 * failed);
+ if (stainfo->fail_avg > 95)
+ mesh_plink_broken(stainfo);
+}
+
static u32 airtime_link_metric_get(struct ieee80211_local *local,
struct sta_info *sta)
{
@@ -397,7 +415,8 @@ static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
- u8 *preq_elem, u32 metric) {
+ u8 *preq_elem, u32 metric)
+{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_path *mpath;
u8 *dst_addr, *orig_addr;
@@ -430,7 +449,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
if ((!(mpath->flags & MESH_PATH_DSN_VALID)) ||
DSN_LT(mpath->dsn, dst_dsn)) {
mpath->dsn = dst_dsn;
- mpath->flags &= MESH_PATH_DSN_VALID;
+ mpath->flags |= MESH_PATH_DSN_VALID;
} else if ((!(dst_flags & MP_F_DO)) &&
(mpath->flags & MESH_PATH_ACTIVE)) {
reply = true;
@@ -478,6 +497,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata,
hopcount, ttl, cpu_to_le32(lifetime),
cpu_to_le32(metric), cpu_to_le32(preq_id),
sdata);
+ ifmsh->mshstats.fwded_mcast++;
ifmsh->mshstats.fwded_frames++;
}
}
@@ -536,6 +556,8 @@ static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
cpu_to_le32(lifetime), cpu_to_le32(metric),
0, sdata);
rcu_read_unlock();
+
+ sdata->u.mesh.mshstats.fwded_unicast++;
sdata->u.mesh.mshstats.fwded_frames++;
return;
@@ -660,14 +682,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
spin_unlock(&ifmsh->mesh_preq_queue_lock);
if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata)))
- queue_work(sdata->local->hw.workqueue, &ifmsh->work);
+ ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
else if (time_before(jiffies, ifmsh->last_preq)) {
/* avoid long wait if did not send preqs for a long time
* and jiffies wrapped around
*/
ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1;
- queue_work(sdata->local->hw.workqueue, &ifmsh->work);
+ ieee80211_queue_work(&sdata->local->hw, &ifmsh->work);
} else
mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq +
min_preq_int_jiff(sdata));
@@ -686,11 +708,11 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
u8 ttl, dst_flags;
u32 lifetime;
- spin_lock(&ifmsh->mesh_preq_queue_lock);
+ spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
if (!ifmsh->preq_queue_len ||
time_before(jiffies, ifmsh->last_preq +
min_preq_int_jiff(sdata))) {
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
return;
}
@@ -698,7 +720,7 @@ void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
struct mesh_preq_queue, list);
list_del(&preq_node->list);
--ifmsh->preq_queue_len;
- spin_unlock(&ifmsh->mesh_preq_queue_lock);
+ spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
rcu_read_lock();
mpath = mesh_path_lookup(preq_node->dst, sdata);
@@ -784,7 +806,6 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
mesh_path_add(dst_addr, sdata);
mpath = mesh_path_lookup(dst_addr, sdata);
if (!mpath) {
- dev_kfree_skb(skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
err = -ENOSPC;
goto endlookup;
@@ -792,7 +813,7 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
}
if (mpath->flags & MESH_PATH_ACTIVE) {
- if (time_after(jiffies, mpath->exp_time -
+ if (time_after(jiffies, mpath->exp_time +
msecs_to_jiffies(sdata->u.mesh.mshcfg.path_refresh_time))
&& !memcmp(sdata->dev->dev_addr, hdr->addr4,
ETH_ALEN)
@@ -804,17 +825,17 @@ int mesh_nexthop_lookup(struct sk_buff *skb,
memcpy(hdr->addr1, mpath->next_hop->sta.addr,
ETH_ALEN);
} else {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!(mpath->flags & MESH_PATH_RESOLVING)) {
/* Start discovery only if it is not running yet */
mesh_queue_preq(mpath, PREQ_Q_F_START);
}
if (skb_queue_len(&mpath->frame_queue) >=
- MESH_FRAME_QUEUE_LEN) {
- skb_to_free = mpath->frame_queue.next;
- skb_unlink(skb_to_free, &mpath->frame_queue);
- }
+ MESH_FRAME_QUEUE_LEN)
+ skb_to_free = skb_dequeue(&mpath->frame_queue);
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
skb_queue_tail(&mpath->frame_queue, skb);
if (skb_to_free)
mesh_path_discard_frame(skb_to_free, sdata);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 479597e8858..751c4d0e2b3 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -38,6 +38,71 @@ struct mpath_node {
static struct mesh_table *mesh_paths;
static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */
+int mesh_paths_generation;
+static void __mesh_table_free(struct mesh_table *tbl)
+{
+ kfree(tbl->hash_buckets);
+ kfree(tbl->hashwlock);
+ kfree(tbl);
+}
+
+void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
+{
+ struct hlist_head *mesh_hash;
+ struct hlist_node *p, *q;
+ int i;
+
+ mesh_hash = tbl->hash_buckets;
+ for (i = 0; i <= tbl->hash_mask; i++) {
+ spin_lock(&tbl->hashwlock[i]);
+ hlist_for_each_safe(p, q, &mesh_hash[i]) {
+ tbl->free_node(p, free_leafs);
+ atomic_dec(&tbl->entries);
+ }
+ spin_unlock(&tbl->hashwlock[i]);
+ }
+ __mesh_table_free(tbl);
+}
+
+static struct mesh_table *mesh_table_grow(struct mesh_table *tbl)
+{
+ struct mesh_table *newtbl;
+ struct hlist_head *oldhash;
+ struct hlist_node *p, *q;
+ int i;
+
+ if (atomic_read(&tbl->entries)
+ < tbl->mean_chain_len * (tbl->hash_mask + 1))
+ goto endgrow;
+
+ newtbl = mesh_table_alloc(tbl->size_order + 1);
+ if (!newtbl)
+ goto endgrow;
+
+ newtbl->free_node = tbl->free_node;
+ newtbl->mean_chain_len = tbl->mean_chain_len;
+ newtbl->copy_node = tbl->copy_node;
+ atomic_set(&newtbl->entries, atomic_read(&tbl->entries));
+
+ oldhash = tbl->hash_buckets;
+ for (i = 0; i <= tbl->hash_mask; i++)
+ hlist_for_each(p, &oldhash[i])
+ if (tbl->copy_node(p, newtbl) < 0)
+ goto errcopy;
+
+ return newtbl;
+
+errcopy:
+ for (i = 0; i <= newtbl->hash_mask; i++) {
+ hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
+ tbl->free_node(p, 0);
+ }
+ __mesh_table_free(newtbl);
+endgrow:
+ return NULL;
+}
+
+
/* This lock will have the grow table function as writer and add / delete nodes
* as readers. When reading the table (i.e. doing lookups) we are well protected
* by RCU
@@ -55,7 +120,25 @@ static DEFINE_RWLOCK(pathtbl_resize_lock);
*/
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff_head tmpq;
+ unsigned long flags;
+
rcu_assign_pointer(mpath->next_hop, sta);
+
+ __skb_queue_head_init(&tmpq);
+
+ spin_lock_irqsave(&mpath->frame_queue.lock, flags);
+
+ while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
+ __skb_queue_tail(&tmpq, skb);
+ }
+
+ skb_queue_splice(&tmpq, &mpath->frame_queue);
+ spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
@@ -167,6 +250,8 @@ struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data
*/
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
@@ -175,8 +260,6 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- might_sleep();
-
if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -188,11 +271,11 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
return -ENOSPC;
err = -ENOMEM;
- new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
+ new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
- new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
@@ -225,23 +308,13 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
grow = 1;
+ mesh_paths_generation++;
+
spin_unlock(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock);
if (grow) {
- struct mesh_table *oldtbl, *newtbl;
-
- write_lock(&pathtbl_resize_lock);
- oldtbl = mesh_paths;
- newtbl = mesh_table_grow(mesh_paths);
- if (!newtbl) {
- write_unlock(&pathtbl_resize_lock);
- return 0;
- }
- rcu_assign_pointer(mesh_paths, newtbl);
- write_unlock(&pathtbl_resize_lock);
-
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
}
return 0;
@@ -256,9 +329,46 @@ err_path_alloc:
return err;
}
+void mesh_mpath_table_grow(void)
+{
+ struct mesh_table *oldtbl, *newtbl;
+
+ write_lock(&pathtbl_resize_lock);
+ oldtbl = mesh_paths;
+ newtbl = mesh_table_grow(mesh_paths);
+ if (!newtbl) {
+ write_unlock(&pathtbl_resize_lock);
+ return;
+ }
+ rcu_assign_pointer(mesh_paths, newtbl);
+ write_unlock(&pathtbl_resize_lock);
+
+ synchronize_rcu();
+ mesh_table_free(oldtbl, false);
+}
+
+void mesh_mpp_table_grow(void)
+{
+ struct mesh_table *oldtbl, *newtbl;
+
+ write_lock(&pathtbl_resize_lock);
+ oldtbl = mpp_paths;
+ newtbl = mesh_table_grow(mpp_paths);
+ if (!newtbl) {
+ write_unlock(&pathtbl_resize_lock);
+ return;
+ }
+ rcu_assign_pointer(mpp_paths, newtbl);
+ write_unlock(&pathtbl_resize_lock);
+
+ synchronize_rcu();
+ mesh_table_free(oldtbl, false);
+}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+ struct ieee80211_local *local = sdata->local;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
@@ -267,8 +377,6 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
int err = 0;
u32 hash_idx;
- might_sleep();
-
if (memcmp(dst, sdata->dev->dev_addr, ETH_ALEN) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
@@ -277,11 +385,11 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
return -ENOTSUPP;
err = -ENOMEM;
- new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
+ new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
- new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
+ new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
@@ -315,20 +423,8 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
spin_unlock(&mpp_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock);
if (grow) {
- struct mesh_table *oldtbl, *newtbl;
-
- write_lock(&pathtbl_resize_lock);
- oldtbl = mpp_paths;
- newtbl = mesh_table_grow(mpp_paths);
- if (!newtbl) {
- write_unlock(&pathtbl_resize_lock);
- return 0;
- }
- rcu_assign_pointer(mpp_paths, newtbl);
- write_unlock(&pathtbl_resize_lock);
-
- synchronize_rcu();
- mesh_table_free(oldtbl, false);
+ set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
+ ieee80211_queue_work(&local->hw, &ifmsh->work);
}
return 0;
@@ -466,6 +562,7 @@ int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
err = -ENXIO;
enddel:
+ mesh_paths_generation++;
spin_unlock(&mesh_paths->hashwlock[hash_idx]);
read_unlock(&pathtbl_resize_lock);
return err;
@@ -481,11 +578,9 @@ enddel:
*/
void mesh_path_tx_pending(struct mesh_path *mpath)
{
- struct sk_buff *skb;
-
- while ((skb = skb_dequeue(&mpath->frame_queue)) &&
- (mpath->flags & MESH_PATH_ACTIVE))
- dev_queue_xmit(skb);
+ if (mpath->flags & MESH_PATH_ACTIVE)
+ ieee80211_add_pending_skbs(mpath->sdata->local,
+ &mpath->frame_queue);
}
/**
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index cb14253587f..ffcbad75e09 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -409,7 +409,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
baseaddr += 4;
- baselen -= 4;
+ baselen += 4;
}
ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
if (!elems.peer_link) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 07e7e41816b..97a278a2f48 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -27,43 +27,99 @@
#include "rate.h"
#include "led.h"
-#define IEEE80211_ASSOC_SCANS_MAX_TRIES 2
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
#define IEEE80211_ASSOC_MAX_TRIES 3
-#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
-#define IEEE80211_PROBE_WAIT (HZ / 5)
-#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
-#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
+#define IEEE80211_MAX_PROBE_TRIES 5
+
+/*
+ * beacon loss detection timeout
+ * XXX: should depend on beacon interval
+ */
+#define IEEE80211_BEACON_LOSS_TIME (2 * HZ)
+/*
+ * Time the connection can be idle before we probe
+ * it to see if we can still talk to the AP.
+ */
+#define IEEE80211_CONNECTION_IDLE_TIME (30 * HZ)
+/*
+ * Time we wait for a probe response after sending
+ * a probe request because of beacon loss or for
+ * checking the connection still works.
+ */
+#define IEEE80211_PROBE_WAIT (HZ / 2)
#define TMR_RUNNING_TIMER 0
#define TMR_RUNNING_CHANSW 1
+/*
+ * All cfg80211 functions have to be called outside a locked
+ * section so that they can acquire a lock themselves... This
+ * is much simpler than queuing up things in cfg80211, but we
+ * do need some indirection for that here.
+ */
+enum rx_mgmt_action {
+ /* no action required */
+ RX_MGMT_NONE,
+
+ /* caller must call cfg80211_send_rx_auth() */
+ RX_MGMT_CFG80211_AUTH,
+
+ /* caller must call cfg80211_send_rx_assoc() */
+ RX_MGMT_CFG80211_ASSOC,
+
+ /* caller must call cfg80211_send_deauth() */
+ RX_MGMT_CFG80211_DEAUTH,
+
+ /* caller must call cfg80211_send_disassoc() */
+ RX_MGMT_CFG80211_DISASSOC,
+
+ /* caller must call cfg80211_auth_timeout() & free work */
+ RX_MGMT_CFG80211_AUTH_TO,
+
+ /* caller must call cfg80211_assoc_timeout() & free work */
+ RX_MGMT_CFG80211_ASSOC_TO,
+};
+
/* utils */
-static int ecw2cw(int ecw)
+static inline void ASSERT_MGD_MTX(struct ieee80211_if_managed *ifmgd)
{
- return (1 << ecw) - 1;
+ WARN_ON(!mutex_is_locked(&ifmgd->mtx));
}
-static u8 *ieee80211_bss_get_ie(struct ieee80211_bss *bss, u8 ie)
+/*
+ * We can have multiple work items (and connection probing)
+ * scheduling this timer, but we need to take care to only
+ * reschedule it when it should fire _earlier_ than it was
+ * asked for before, or if it's not pending right now. This
+ * function ensures that. Note that it then is required to
+ * run this function for all timeouts after the first one
+ * has happened -- the work that runs from this timer will
+ * do that.
+ */
+static void run_again(struct ieee80211_if_managed *ifmgd,
+ unsigned long timeout)
{
- u8 *end, *pos;
+ ASSERT_MGD_MTX(ifmgd);
- pos = bss->cbss.information_elements;
- if (pos == NULL)
- return NULL;
- end = pos + bss->cbss.len_information_elements;
+ if (!timer_pending(&ifmgd->timer) ||
+ time_before(timeout, ifmgd->timer.expires))
+ mod_timer(&ifmgd->timer, timeout);
+}
- while (pos + 1 < end) {
- if (pos + 2 + pos[1] > end)
- break;
- if (pos[0] == ie)
- return pos;
- pos += 2 + pos[1];
- }
+static void mod_beacon_timer(struct ieee80211_sub_if_data *sdata)
+{
+ if (sdata->local->hw.flags & IEEE80211_HW_BEACON_FILTER)
+ return;
+
+ mod_timer(&sdata->u.mgd.bcn_mon_timer,
+ round_jiffies_up(jiffies + IEEE80211_BEACON_LOSS_TIME));
+}
- return NULL;
+static int ecw2cw(int ecw)
+{
+ return (1 << ecw) - 1;
}
static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
@@ -94,11 +150,10 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
*/
static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
struct ieee80211_ht_info *hti,
- u16 ap_ht_cap_flags)
+ const u8 *bssid, u16 ap_ht_cap_flags)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct sta_info *sta;
u32 changed = 0;
u16 ht_opmode;
@@ -147,12 +202,10 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
ieee80211_hw_config(local, 0);
rcu_read_lock();
-
- sta = sta_info_get(local, ifmgd->bssid);
+ sta = sta_info_get(local, bssid);
if (sta)
rate_control_rate_update(local, sband, sta,
IEEE80211_RC_HT_CHANGED);
-
rcu_read_unlock();
}
@@ -175,23 +228,24 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
-static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 *pos, *ies, *ht_ie;
+ u8 *pos;
+ const u8 *ies, *ht_ie;
int i, len, count, rates_len, supp_rates_len;
u16 capab;
- struct ieee80211_bss *bss;
int wmm = 0;
struct ieee80211_supported_band *sband;
u32 rates = 0;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
- sizeof(*mgmt) + 200 + ifmgd->extra_ie_len +
- ifmgd->ssid_len);
+ sizeof(*mgmt) + 200 + wk->ie_len +
+ wk->ssid_len);
if (!skb) {
printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
"frame\n", sdata->dev->name);
@@ -210,45 +264,35 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
capab |= WLAN_CAPABILITY_SHORT_PREAMBLE;
}
- bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
- local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
- if (bss) {
- if (bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
- capab |= WLAN_CAPABILITY_PRIVACY;
- if (bss->wmm_used)
- wmm = 1;
+ if (wk->bss->cbss.capability & WLAN_CAPABILITY_PRIVACY)
+ capab |= WLAN_CAPABILITY_PRIVACY;
+ if (wk->bss->wmm_used)
+ wmm = 1;
- /* get all rates supported by the device and the AP as
- * some APs don't like getting a superset of their rates
- * in the association request (e.g. D-Link DAP 1353 in
- * b-only mode) */
- rates_len = ieee80211_compatible_rates(bss, sband, &rates);
+ /* get all rates supported by the device and the AP as
+ * some APs don't like getting a superset of their rates
+ * in the association request (e.g. D-Link DAP 1353 in
+ * b-only mode) */
+ rates_len = ieee80211_compatible_rates(wk->bss, sband, &rates);
- if ((bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
- (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
- capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
-
- ieee80211_rx_bss_put(local, bss);
- } else {
- rates = ~0;
- rates_len = sband->n_bitrates;
- }
+ if ((wk->bss->cbss.capability & WLAN_CAPABILITY_SPECTRUM_MGMT) &&
+ (local->hw.flags & IEEE80211_HW_SPECTRUM_MGMT))
+ capab |= WLAN_CAPABILITY_SPECTRUM_MGMT;
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
- memcpy(mgmt->da, ifmgd->bssid, ETH_ALEN);
+ memcpy(mgmt->da, wk->bss->cbss.bssid, ETH_ALEN);
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, ifmgd->bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, wk->bss->cbss.bssid, ETH_ALEN);
- if (ifmgd->flags & IEEE80211_STA_PREV_BSSID_SET) {
+ if (!is_zero_ether_addr(wk->prev_bssid)) {
skb_put(skb, 10);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_REASSOC_REQ);
mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
mgmt->u.reassoc_req.listen_interval =
cpu_to_le16(local->hw.conf.listen_interval);
- memcpy(mgmt->u.reassoc_req.current_ap, ifmgd->prev_bssid,
+ memcpy(mgmt->u.reassoc_req.current_ap, wk->prev_bssid,
ETH_ALEN);
} else {
skb_put(skb, 4);
@@ -260,10 +304,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
/* SSID */
- ies = pos = skb_put(skb, 2 + ifmgd->ssid_len);
+ ies = pos = skb_put(skb, 2 + wk->ssid_len);
*pos++ = WLAN_EID_SSID;
- *pos++ = ifmgd->ssid_len;
- memcpy(pos, ifmgd->ssid, ifmgd->ssid_len);
+ *pos++ = wk->ssid_len;
+ memcpy(pos, wk->ssid, wk->ssid_len);
/* add all rates which were marked to be used above */
supp_rates_len = rates_len;
@@ -318,9 +362,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
}
- if (ifmgd->extra_ie) {
- pos = skb_put(skb, ifmgd->extra_ie_len);
- memcpy(pos, ifmgd->extra_ie, ifmgd->extra_ie_len);
+ if (wk->ie_len && wk->ie) {
+ pos = skb_put(skb, wk->ie_len);
+ memcpy(pos, wk->ie, wk->ie_len);
}
if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED)) {
@@ -345,9 +389,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
*/
if (wmm && (ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
sband->ht_cap.ht_supported &&
- (ht_ie = ieee80211_bss_get_ie(bss, WLAN_EID_HT_INFORMATION)) &&
+ (ht_ie = ieee80211_bss_get_ie(&wk->bss->cbss, WLAN_EID_HT_INFORMATION)) &&
ht_ie[1] >= sizeof(struct ieee80211_ht_info) &&
- (!(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED))) {
+ (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))) {
struct ieee80211_ht_info *ht_info =
(struct ieee80211_ht_info *)(ht_ie + 2);
u16 cap = sband->ht_cap.cap;
@@ -382,18 +426,13 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
}
- kfree(ifmgd->assocreq_ies);
- ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies;
- ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL);
- if (ifmgd->assocreq_ies)
- memcpy(ifmgd->assocreq_ies, ies, ifmgd->assocreq_ies_len);
-
ieee80211_tx_skb(sdata, skb, 0);
}
static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
- u16 stype, u16 reason)
+ const u8 *bssid, u16 stype, u16 reason,
+ void *cookie)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -410,18 +449,18 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24);
memset(mgmt, 0, 24);
- memcpy(mgmt->da, ifmgd->bssid, ETH_ALEN);
+ memcpy(mgmt->da, bssid, ETH_ALEN);
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
- memcpy(mgmt->bssid, ifmgd->bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, bssid, ETH_ALEN);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
skb_put(skb, 2);
/* u.deauth.reason_code == u.disassoc.reason_code */
mgmt->u.deauth.reason_code = cpu_to_le16(reason);
if (stype == IEEE80211_STYPE_DEAUTH)
- cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, skb->len);
+ cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, cookie);
else
- cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, skb->len);
+ cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len, cookie);
ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
}
@@ -494,28 +533,26 @@ static void ieee80211_chswitch_work(struct work_struct *work)
{
struct ieee80211_sub_if_data *sdata =
container_of(work, struct ieee80211_sub_if_data, u.mgd.chswitch_work);
- struct ieee80211_bss *bss;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
if (!netif_running(sdata->dev))
return;
- bss = ieee80211_rx_bss_get(sdata->local, ifmgd->bssid,
- sdata->local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
- if (!bss)
- goto exit;
+ mutex_lock(&ifmgd->mtx);
+ if (!ifmgd->associated)
+ goto out;
sdata->local->oper_channel = sdata->local->csa_channel;
+ ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL);
+
/* XXX: shouldn't really modify cfg80211-owned data! */
- if (!ieee80211_hw_config(sdata->local, IEEE80211_CONF_CHANGE_CHANNEL))
- bss->cbss.channel = sdata->local->oper_channel;
+ ifmgd->associated->cbss.channel = sdata->local->oper_channel;
- ieee80211_rx_bss_put(sdata->local, bss);
-exit:
- ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
+ out:
+ ifmgd->flags &= ~IEEE80211_STA_CSA_RECEIVED;
+ mutex_unlock(&ifmgd->mtx);
}
static void ieee80211_chswitch_timer(unsigned long data)
@@ -529,7 +566,7 @@ static void ieee80211_chswitch_timer(unsigned long data)
return;
}
- queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
+ ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
}
void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
@@ -540,10 +577,12 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
int new_freq = ieee80211_channel_to_frequency(sw_elem->new_ch_num);
- if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATED)
+ ASSERT_MGD_MTX(ifmgd);
+
+ if (!ifmgd->associated)
return;
- if (sdata->local->sw_scanning || sdata->local->hw_scanning)
+ if (sdata->local->scanning)
return;
/* Disregard subsequent beacons if we are already running a timer
@@ -559,7 +598,7 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata->local->csa_channel = new_ch;
if (sw_elem->count <= 1) {
- queue_work(sdata->local->hw.workqueue, &ifmgd->chswitch_work);
+ ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
} else {
ieee80211_stop_queues_by_reason(&sdata->local->hw,
IEEE80211_QUEUE_STOP_REASON_CSA);
@@ -601,7 +640,7 @@ static void ieee80211_enable_ps(struct ieee80211_local *local,
* If we are scanning right now then the parameters will
* take effect when scan finishes.
*/
- if (local->hw_scanning || local->sw_scanning)
+ if (local->scanning)
return;
if (conf->dynamic_ps_timeout > 0 &&
@@ -651,8 +690,9 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency)
}
if (count == 1 && found->u.mgd.powersave &&
- (found->u.mgd.flags & IEEE80211_STA_ASSOCIATED) &&
- !(found->u.mgd.flags & IEEE80211_STA_PROBEREQ_POLL)) {
+ found->u.mgd.associated && list_empty(&found->u.mgd.work_list) &&
+ !(found->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))) {
s32 beaconint_us;
if (latency < 0)
@@ -724,7 +764,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
if (local->quiescing || local->suspended)
return;
- queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);
+ ieee80211_queue_work(&local->hw, &local->dynamic_ps_enable_work);
}
/* MLME */
@@ -806,9 +846,6 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
u16 capab, bool erp_valid, u8 erp)
{
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-#endif
u32 changed = 0;
bool use_protection;
bool use_short_preamble;
@@ -825,42 +862,16 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
use_short_slot = !!(capab & WLAN_CAPABILITY_SHORT_SLOT_TIME);
if (use_protection != bss_conf->use_cts_prot) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: CTS protection %s (BSSID=%pM)\n",
- sdata->dev->name,
- use_protection ? "enabled" : "disabled",
- ifmgd->bssid);
- }
-#endif
bss_conf->use_cts_prot = use_protection;
changed |= BSS_CHANGED_ERP_CTS_PROT;
}
if (use_short_preamble != bss_conf->use_short_preamble) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: switched to %s barker preamble"
- " (BSSID=%pM)\n",
- sdata->dev->name,
- use_short_preamble ? "short" : "long",
- ifmgd->bssid);
- }
-#endif
bss_conf->use_short_preamble = use_short_preamble;
changed |= BSS_CHANGED_ERP_PREAMBLE;
}
if (use_short_slot != bss_conf->use_short_slot) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: switched to %s slot time"
- " (BSSID=%pM)\n",
- sdata->dev->name,
- use_short_slot ? "short" : "long",
- ifmgd->bssid);
- }
-#endif
bss_conf->use_short_slot = use_short_slot;
changed |= BSS_CHANGED_ERP_SLOT;
}
@@ -868,105 +879,31 @@ static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
return changed;
}
-static void ieee80211_sta_send_apinfo(struct ieee80211_sub_if_data *sdata)
-{
- union iwreq_data wrqu;
-
- memset(&wrqu, 0, sizeof(wrqu));
- if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED)
- memcpy(wrqu.ap_addr.sa_data, sdata->u.mgd.bssid, ETH_ALEN);
- wrqu.ap_addr.sa_family = ARPHRD_ETHER;
- wireless_send_event(sdata->dev, SIOCGIWAP, &wrqu, NULL);
-}
-
-static void ieee80211_sta_send_associnfo(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- char *buf;
- size_t len;
- int i;
- union iwreq_data wrqu;
-
- if (!ifmgd->assocreq_ies && !ifmgd->assocresp_ies)
- return;
-
- buf = kmalloc(50 + 2 * (ifmgd->assocreq_ies_len +
- ifmgd->assocresp_ies_len), GFP_KERNEL);
- if (!buf)
- return;
-
- len = sprintf(buf, "ASSOCINFO(");
- if (ifmgd->assocreq_ies) {
- len += sprintf(buf + len, "ReqIEs=");
- for (i = 0; i < ifmgd->assocreq_ies_len; i++) {
- len += sprintf(buf + len, "%02x",
- ifmgd->assocreq_ies[i]);
- }
- }
- if (ifmgd->assocresp_ies) {
- if (ifmgd->assocreq_ies)
- len += sprintf(buf + len, " ");
- len += sprintf(buf + len, "RespIEs=");
- for (i = 0; i < ifmgd->assocresp_ies_len; i++) {
- len += sprintf(buf + len, "%02x",
- ifmgd->assocresp_ies[i]);
- }
- }
- len += sprintf(buf + len, ")");
-
- if (len > IW_CUSTOM_MAX) {
- len = sprintf(buf, "ASSOCRESPIE=");
- for (i = 0; i < ifmgd->assocresp_ies_len; i++) {
- len += sprintf(buf + len, "%02x",
- ifmgd->assocresp_ies[i]);
- }
- }
-
- if (len <= IW_CUSTOM_MAX) {
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = len;
- wireless_send_event(sdata->dev, IWEVCUSTOM, &wrqu, buf);
- }
-
- kfree(buf);
-}
-
-
static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk,
u32 bss_info_changed)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_conf *conf = &local_to_hw(local)->conf;
-
- struct ieee80211_bss *bss;
+ struct ieee80211_bss *bss = wk->bss;
bss_info_changed |= BSS_CHANGED_ASSOC;
- ifmgd->flags |= IEEE80211_STA_ASSOCIATED;
+ /* set timing information */
+ sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval;
+ sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
+ sdata->vif.bss_conf.dtim_period = bss->dtim_period;
- bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
- conf->channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
- if (bss) {
- /* set timing information */
- sdata->vif.bss_conf.beacon_int = bss->cbss.beacon_interval;
- sdata->vif.bss_conf.timestamp = bss->cbss.tsf;
- sdata->vif.bss_conf.dtim_period = bss->dtim_period;
+ bss_info_changed |= BSS_CHANGED_BEACON_INT;
+ bss_info_changed |= ieee80211_handle_bss_capability(sdata,
+ bss->cbss.capability, bss->has_erp_value, bss->erp_value);
- bss_info_changed |= BSS_CHANGED_BEACON_INT;
- bss_info_changed |= ieee80211_handle_bss_capability(sdata,
- bss->cbss.capability, bss->has_erp_value, bss->erp_value);
-
- cfg80211_hold_bss(&bss->cbss);
-
- ieee80211_rx_bss_put(local, bss);
- }
+ sdata->u.mgd.associated = bss;
+ sdata->u.mgd.old_associate_work = wk;
+ memcpy(sdata->u.mgd.bssid, bss->cbss.bssid, ETH_ALEN);
- ifmgd->flags |= IEEE80211_STA_PREV_BSSID_SET;
- memcpy(ifmgd->prev_bssid, sdata->u.mgd.bssid, ETH_ALEN);
- ieee80211_sta_send_associnfo(sdata);
+ /* just to be sure */
+ sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ IEEE80211_STA_BEACON_POLL);
- ifmgd->last_probe = jiffies;
ieee80211_led_assoc(local, 1);
sdata->vif.bss_conf.assoc = 1;
@@ -982,176 +919,157 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
ieee80211_bss_info_change_notify(sdata, bss_info_changed);
- /* will be same as sdata */
- if (local->ps_sdata) {
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
- mutex_unlock(&local->iflist_mtx);
- }
+ mutex_lock(&local->iflist_mtx);
+ ieee80211_recalc_ps(local, -1);
+ mutex_unlock(&local->iflist_mtx);
netif_tx_start_all_queues(sdata->dev);
netif_carrier_on(sdata->dev);
-
- ieee80211_sta_send_apinfo(sdata);
}
-static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
+static enum rx_mgmt_action __must_check
+ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- ifmgd->direct_probe_tries++;
- if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
+ wk->tries++;
+ if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
printk(KERN_DEBUG "%s: direct probe to AP %pM timed out\n",
- sdata->dev->name, ifmgd->bssid);
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(local);
- cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid);
+ sdata->dev->name, wk->bss->cbss.bssid);
/*
* Most likely AP is not in the range so remove the
- * bss information associated to the AP
+ * bss struct for that AP.
*/
- ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
- sdata->local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
+ cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
/*
* We might have a pending scan which had no chance to run yet
- * due to state == IEEE80211_STA_MLME_DIRECT_PROBE.
- * Hence, queue the STAs work again
+ * due to work needing to be done. Hence, queue the STAs work
+ * again for that.
*/
- queue_work(local->hw.workqueue, &ifmgd->work);
- return;
+ ieee80211_queue_work(&local->hw, &ifmgd->work);
+ return RX_MGMT_CFG80211_AUTH_TO;
}
- printk(KERN_DEBUG "%s: direct probe to AP %pM try %d\n",
- sdata->dev->name, ifmgd->bssid,
- ifmgd->direct_probe_tries);
+ printk(KERN_DEBUG "%s: direct probe to AP %pM (try %d)\n",
+ sdata->dev->name, wk->bss->cbss.bssid,
+ wk->tries);
- ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
-
- /* Direct probe is sent to broadcast address as some APs
+ /*
+ * Direct probe is sent to broadcast address as some APs
* will not answer to direct packet in unassociated state.
*/
- ieee80211_send_probe_req(sdata, NULL,
- ifmgd->ssid, ifmgd->ssid_len, NULL, 0);
+ ieee80211_send_probe_req(sdata, NULL, wk->ssid, wk->ssid_len, NULL, 0);
+
+ wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(ifmgd, wk->timeout);
- mod_timer(&ifmgd->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
+ return RX_MGMT_NONE;
}
-static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
+static enum rx_mgmt_action __must_check
+ieee80211_authenticate(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- u8 *ies;
- size_t ies_len;
- ifmgd->auth_tries++;
- if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
+ wk->tries++;
+ if (wk->tries > IEEE80211_AUTH_MAX_TRIES) {
printk(KERN_DEBUG "%s: authentication with AP %pM"
" timed out\n",
- sdata->dev->name, ifmgd->bssid);
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(local);
- cfg80211_send_auth_timeout(sdata->dev, ifmgd->bssid);
- ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
- sdata->local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
+ sdata->dev->name, wk->bss->cbss.bssid);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
/*
* We might have a pending scan which had no chance to run yet
- * due to state == IEEE80211_STA_MLME_AUTHENTICATE.
- * Hence, queue the STAs work again
+ * due to work needing to be done. Hence, queue the STAs work
+ * again for that.
*/
- queue_work(local->hw.workqueue, &ifmgd->work);
- return;
+ ieee80211_queue_work(&local->hw, &ifmgd->work);
+ return RX_MGMT_CFG80211_AUTH_TO;
}
- ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE;
- printk(KERN_DEBUG "%s: authenticate with AP %pM\n",
- sdata->dev->name, ifmgd->bssid);
+ printk(KERN_DEBUG "%s: authenticate with AP %pM (try %d)\n",
+ sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
- if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
- ies = ifmgd->sme_auth_ie;
- ies_len = ifmgd->sme_auth_ie_len;
- } else {
- ies = NULL;
- ies_len = 0;
- }
- ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, ies, ies_len,
- ifmgd->bssid, 0);
- ifmgd->auth_transaction = 2;
+ ieee80211_send_auth(sdata, 1, wk->auth_alg, wk->ie, wk->ie_len,
+ wk->bss->cbss.bssid, NULL, 0, 0);
+ wk->auth_transaction = 2;
+
+ wk->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
+ run_again(ifmgd, wk->timeout);
- mod_timer(&ifmgd->timer, jiffies + IEEE80211_AUTH_TIMEOUT);
+ return RX_MGMT_NONE;
}
-/*
- * The disassoc 'reason' argument can be either our own reason
- * if self disconnected or a reason code from the AP.
- */
static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
- bool deauth, bool self_disconnected,
- u16 reason)
+ bool deauth)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_conf *conf = &local_to_hw(local)->conf;
- struct ieee80211_bss *bss;
struct sta_info *sta;
u32 changed = 0, config_changed = 0;
+ u8 bssid[ETH_ALEN];
+
+ ASSERT_MGD_MTX(ifmgd);
+
+ if (WARN_ON(!ifmgd->associated))
+ return;
+
+ memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN);
+
+ ifmgd->associated = NULL;
+ memset(ifmgd->bssid, 0, ETH_ALEN);
if (deauth) {
- ifmgd->direct_probe_tries = 0;
- ifmgd->auth_tries = 0;
+ kfree(ifmgd->old_associate_work);
+ ifmgd->old_associate_work = NULL;
+ } else {
+ struct ieee80211_mgd_work *wk = ifmgd->old_associate_work;
+
+ wk->state = IEEE80211_MGD_STATE_IDLE;
+ list_add(&wk->list, &ifmgd->work_list);
}
- ifmgd->assoc_scan_tries = 0;
- ifmgd->assoc_tries = 0;
+
+ /*
+ * we need to commit the associated = NULL change because the
+ * scan code uses that to determine whether this iface should
+ * go to/wake up from powersave or not -- and could otherwise
+ * wake the queues erroneously.
+ */
+ smp_mb();
+
+ /*
+ * Thus, we can only afterwards stop the queues -- to account
+ * for the case where another CPU is finishing a scan at this
+ * time -- we don't want the scan code to enable queues.
+ */
netif_tx_stop_all_queues(sdata->dev);
netif_carrier_off(sdata->dev);
rcu_read_lock();
- sta = sta_info_get(local, ifmgd->bssid);
+ sta = sta_info_get(local, bssid);
if (sta)
ieee80211_sta_tear_down_BA_sessions(sta);
rcu_read_unlock();
- bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
- conf->channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
-
- if (bss) {
- cfg80211_unhold_bss(&bss->cbss);
- ieee80211_rx_bss_put(local, bss);
- }
-
- if (self_disconnected) {
- if (deauth)
- ieee80211_send_deauth_disassoc(sdata,
- IEEE80211_STYPE_DEAUTH, reason);
- else
- ieee80211_send_deauth_disassoc(sdata,
- IEEE80211_STYPE_DISASSOC, reason);
- }
-
- ifmgd->flags &= ~IEEE80211_STA_ASSOCIATED;
changed |= ieee80211_reset_erp_info(sdata);
ieee80211_led_assoc(local, 0);
changed |= BSS_CHANGED_ASSOC;
sdata->vif.bss_conf.assoc = false;
- ieee80211_sta_send_apinfo(sdata);
-
- if (self_disconnected || reason == WLAN_REASON_DISASSOC_STA_HAS_LEFT) {
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
- sdata->local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
- }
-
ieee80211_set_wmm_default(sdata);
ieee80211_recalc_idle(local);
@@ -1180,7 +1098,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
rcu_read_lock();
- sta = sta_info_get(local, ifmgd->bssid);
+ sta = sta_info_get(local, bssid);
if (!sta) {
rcu_read_unlock();
return;
@@ -1193,83 +1111,42 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sta_info_destroy(sta);
}
-static int ieee80211_sta_wep_configured(struct ieee80211_sub_if_data *sdata)
-{
- if (!sdata || !sdata->default_key ||
- sdata->default_key->conf.alg != ALG_WEP)
- return 0;
- return 1;
-}
-
-static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_bss *bss;
- int bss_privacy;
- int wep_privacy;
- int privacy_invoked;
-
- if (!ifmgd || (ifmgd->flags & IEEE80211_STA_EXT_SME))
- return 0;
-
- bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
- local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
- if (!bss)
- return 0;
-
- bss_privacy = !!(bss->cbss.capability & WLAN_CAPABILITY_PRIVACY);
- wep_privacy = !!ieee80211_sta_wep_configured(sdata);
- privacy_invoked = !!(ifmgd->flags & IEEE80211_STA_PRIVACY_INVOKED);
-
- ieee80211_rx_bss_put(local, bss);
-
- if ((bss_privacy == wep_privacy) || (bss_privacy == privacy_invoked))
- return 0;
-
- return 1;
-}
-
-static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
+static enum rx_mgmt_action __must_check
+ieee80211_associate(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
- ifmgd->assoc_tries++;
- if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
+ wk->tries++;
+ if (wk->tries > IEEE80211_ASSOC_MAX_TRIES) {
printk(KERN_DEBUG "%s: association with AP %pM"
" timed out\n",
- sdata->dev->name, ifmgd->bssid);
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(local);
- cfg80211_send_assoc_timeout(sdata->dev, ifmgd->bssid);
- ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
- sdata->local->hw.conf.channel->center_freq,
- ifmgd->ssid, ifmgd->ssid_len);
+ sdata->dev->name, wk->bss->cbss.bssid);
+
+ /*
+ * Most likely AP is not in the range so remove the
+ * bss struct for that AP.
+ */
+ cfg80211_unlink_bss(local->hw.wiphy, &wk->bss->cbss);
+
/*
* We might have a pending scan which had no chance to run yet
- * due to state == IEEE80211_STA_MLME_ASSOCIATE.
- * Hence, queue the STAs work again
+ * due to work needing to be done. Hence, queue the STAs work
+ * again for that.
*/
- queue_work(local->hw.workqueue, &ifmgd->work);
- return;
+ ieee80211_queue_work(&local->hw, &ifmgd->work);
+ return RX_MGMT_CFG80211_ASSOC_TO;
}
- ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE;
- printk(KERN_DEBUG "%s: associate with AP %pM\n",
- sdata->dev->name, ifmgd->bssid);
- if (ieee80211_privacy_mismatch(sdata)) {
- printk(KERN_DEBUG "%s: mismatch in privacy configuration and "
- "mixed-cell disabled - abort association\n", sdata->dev->name);
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(local);
- return;
- }
+ printk(KERN_DEBUG "%s: associate with AP %pM (try %d)\n",
+ sdata->dev->name, wk->bss->cbss.bssid, wk->tries);
+ ieee80211_send_assoc(sdata, wk);
- ieee80211_send_assoc(sdata);
+ wk->timeout = jiffies + IEEE80211_ASSOC_TIMEOUT;
+ run_again(ifmgd, wk->timeout);
- mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
+ return RX_MGMT_NONE;
}
void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1280,160 +1157,113 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
* from AP because we know that the connection is working both ways
* at that time. But multicast frames (and hence also beacons) must
* be ignored here, because we need to trigger the timer during
- * data idle periods for sending the periodical probe request to
- * the AP.
+ * data idle periods for sending the periodic probe request to the
+ * AP we're connected to.
*/
- if (!is_multicast_ether_addr(hdr->addr1))
- mod_timer(&sdata->u.mgd.timer,
- jiffies + IEEE80211_MONITORING_INTERVAL);
-}
-
-void ieee80211_beacon_loss_work(struct work_struct *work)
-{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
- u.mgd.beacon_loss_work);
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-
- /*
- * The driver has already reported this event and we have
- * already sent a probe request. Maybe the AP died and the
- * driver keeps reporting until we disassociate... We have
- * to ignore that because otherwise we would continually
- * reset the timer and never check whether we received a
- * probe response!
- */
- if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL)
+ if (is_multicast_ether_addr(hdr->addr1))
return;
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
- "- sending probe request\n", sdata->dev->name,
- sdata->u.mgd.bssid);
- }
-#endif
-
- ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
-
- mutex_lock(&sdata->local->iflist_mtx);
- ieee80211_recalc_ps(sdata->local, -1);
- mutex_unlock(&sdata->local->iflist_mtx);
-
- ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
- ifmgd->ssid_len, NULL, 0);
-
- mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
+ mod_timer(&sdata->u.mgd.conn_mon_timer,
+ round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
}
-void ieee80211_beacon_loss(struct ieee80211_vif *vif)
+static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ const u8 *ssid;
+
+ ssid = ieee80211_bss_get_ie(&ifmgd->associated->cbss, WLAN_EID_SSID);
+ ieee80211_send_probe_req(sdata, ifmgd->associated->cbss.bssid,
+ ssid + 2, ssid[1], NULL, 0);
- queue_work(sdata->local->hw.workqueue,
- &sdata->u.mgd.beacon_loss_work);
+ ifmgd->probe_send_count++;
+ ifmgd->probe_timeout = jiffies + IEEE80211_PROBE_WAIT;
+ run_again(ifmgd, ifmgd->probe_timeout);
}
-EXPORT_SYMBOL(ieee80211_beacon_loss);
-static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
+ bool beacon)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- unsigned long last_rx;
- bool disassoc = false;
+ bool already = false;
- /* TODO: start monitoring current AP signal quality and number of
- * missed beacons. Scan other channels every now and then and search
- * for better APs. */
- /* TODO: remove expired BSSes */
+ if (!netif_running(sdata->dev))
+ return;
- ifmgd->state = IEEE80211_STA_MLME_ASSOCIATED;
+ if (sdata->local->scanning)
+ return;
- rcu_read_lock();
+ mutex_lock(&ifmgd->mtx);
- sta = sta_info_get(local, ifmgd->bssid);
- if (!sta) {
- printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n",
- sdata->dev->name, ifmgd->bssid);
- disassoc = true;
- rcu_read_unlock();
+ if (!ifmgd->associated)
goto out;
- }
- last_rx = sta->last_rx;
- rcu_read_unlock();
-
- if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
- time_after(jiffies, last_rx + IEEE80211_PROBE_WAIT)) {
- printk(KERN_DEBUG "%s: no probe response from AP %pM "
- "- disassociating\n",
- sdata->dev->name, ifmgd->bssid);
- disassoc = true;
- ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
- goto out;
- }
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ if (beacon && net_ratelimit())
+ printk(KERN_DEBUG "%s: detected beacon loss from AP "
+ "- sending probe request\n", sdata->dev->name);
+#endif
/*
- * Beacon filtering is only enabled with power save and then the
- * stack should not check for beacon loss.
+ * The driver/our work has already reported this event or the
+ * connection monitoring has kicked in and we have already sent
+ * a probe request. Or maybe the AP died and the driver keeps
+ * reporting until we disassociate...
+ *
+ * In either case we have to ignore the current call to this
+ * function (except for setting the correct probe reason bit)
+ * because otherwise we would reset the timer every time and
+ * never check whether we received a probe response!
*/
- if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) &&
- (local->hw.conf.flags & IEEE80211_CONF_PS)) &&
- time_after(jiffies,
- ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: beacon loss from AP %pM "
- "- sending probe request\n",
- sdata->dev->name, ifmgd->bssid);
- }
-#endif
- ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
- mutex_unlock(&local->iflist_mtx);
- ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
- ifmgd->ssid_len, NULL, 0);
- mod_timer(&ifmgd->timer, jiffies + IEEE80211_PROBE_WAIT);
+ if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL))
+ already = true;
+
+ if (beacon)
+ ifmgd->flags |= IEEE80211_STA_BEACON_POLL;
+ else
+ ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
+
+ if (already)
goto out;
- }
- if (time_after(jiffies, last_rx + IEEE80211_PROBE_IDLE_TIME)) {
- ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, -1);
- mutex_unlock(&local->iflist_mtx);
- ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
- ifmgd->ssid_len, NULL, 0);
- }
+ mutex_lock(&sdata->local->iflist_mtx);
+ ieee80211_recalc_ps(sdata->local, -1);
+ mutex_unlock(&sdata->local->iflist_mtx);
+ ifmgd->probe_send_count = 0;
+ ieee80211_mgd_probe_ap_send(sdata);
out:
- if (!disassoc)
- mod_timer(&ifmgd->timer,
- jiffies + IEEE80211_MONITORING_INTERVAL);
- else
- ieee80211_set_disassoc(sdata, true, true,
- WLAN_REASON_PREV_AUTH_NOT_VALID);
+ mutex_unlock(&ifmgd->mtx);
}
+void ieee80211_beacon_loss_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.beacon_loss_work);
+
+ ieee80211_mgd_probe_ap(sdata, true);
+}
-static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata)
+void ieee80211_beacon_loss(struct ieee80211_vif *vif)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work);
+}
+EXPORT_SYMBOL(ieee80211_beacon_loss);
+
+static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk)
+{
+ wk->state = IEEE80211_MGD_STATE_IDLE;
printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
- ifmgd->flags |= IEEE80211_STA_AUTHENTICATED;
- if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
- /* Wait for SME to request association */
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(sdata->local);
- } else
- ieee80211_associate(sdata);
}
static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk,
struct ieee80211_mgmt *mgmt,
size_t len)
{
@@ -1444,161 +1274,133 @@ static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(pos, len - (pos - (u8 *) mgmt), &elems);
if (!elems.challenge)
return;
- ieee80211_send_auth(sdata, 3, sdata->u.mgd.auth_alg,
+ ieee80211_send_auth(sdata, 3, wk->auth_alg,
elems.challenge - 2, elems.challenge_len + 2,
- sdata->u.mgd.bssid, 1);
- sdata->u.mgd.auth_transaction = 4;
+ wk->bss->cbss.bssid,
+ wk->key, wk->key_len, wk->key_idx);
+ wk->auth_transaction = 4;
}
-static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len)
+static enum rx_mgmt_action __must_check
+ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 auth_alg, auth_transaction, status_code;
- if (ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE)
- return;
+ if (wk->state != IEEE80211_MGD_STATE_AUTH)
+ return RX_MGMT_NONE;
if (len < 24 + 6)
- return;
+ return RX_MGMT_NONE;
- if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN) != 0)
- return;
+ if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
+ return RX_MGMT_NONE;
- if (memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0)
- return;
+ if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
+ return RX_MGMT_NONE;
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
status_code = le16_to_cpu(mgmt->u.auth.status_code);
- if (auth_alg != ifmgd->auth_alg ||
- auth_transaction != ifmgd->auth_transaction)
- return;
+ if (auth_alg != wk->auth_alg ||
+ auth_transaction != wk->auth_transaction)
+ return RX_MGMT_NONE;
if (status_code != WLAN_STATUS_SUCCESS) {
- if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
- u8 algs[3];
- const int num_algs = ARRAY_SIZE(algs);
- int i, pos;
- algs[0] = algs[1] = algs[2] = 0xff;
- if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_OPEN)
- algs[0] = WLAN_AUTH_OPEN;
- if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_SHARED_KEY)
- algs[1] = WLAN_AUTH_SHARED_KEY;
- if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP)
- algs[2] = WLAN_AUTH_LEAP;
- if (ifmgd->auth_alg == WLAN_AUTH_OPEN)
- pos = 0;
- else if (ifmgd->auth_alg == WLAN_AUTH_SHARED_KEY)
- pos = 1;
- else
- pos = 2;
- for (i = 0; i < num_algs; i++) {
- pos++;
- if (pos >= num_algs)
- pos = 0;
- if (algs[pos] == ifmgd->auth_alg ||
- algs[pos] == 0xff)
- continue;
- if (algs[pos] == WLAN_AUTH_SHARED_KEY &&
- !ieee80211_sta_wep_configured(sdata))
- continue;
- ifmgd->auth_alg = algs[pos];
- break;
- }
- }
- return;
+ list_del(&wk->list);
+ kfree(wk);
+ return RX_MGMT_CFG80211_AUTH;
}
- switch (ifmgd->auth_alg) {
+ switch (wk->auth_alg) {
case WLAN_AUTH_OPEN:
case WLAN_AUTH_LEAP:
case WLAN_AUTH_FT:
- ieee80211_auth_completed(sdata);
- cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
- break;
+ ieee80211_auth_completed(sdata, wk);
+ return RX_MGMT_CFG80211_AUTH;
case WLAN_AUTH_SHARED_KEY:
- if (ifmgd->auth_transaction == 4) {
- ieee80211_auth_completed(sdata);
- cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
+ if (wk->auth_transaction == 4) {
+ ieee80211_auth_completed(sdata, wk);
+ return RX_MGMT_CFG80211_AUTH;
} else
- ieee80211_auth_challenge(sdata, mgmt, len);
+ ieee80211_auth_challenge(sdata, wk, mgmt, len);
break;
}
+
+ return RX_MGMT_NONE;
}
-static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len)
+static enum rx_mgmt_action __must_check
+ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ const u8 *bssid = NULL;
u16 reason_code;
if (len < 24 + 2)
- return;
+ return RX_MGMT_NONE;
- if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN))
- return;
+ ASSERT_MGD_MTX(ifmgd);
+
+ if (wk)
+ bssid = wk->bss->cbss.bssid;
+ else
+ bssid = ifmgd->associated->cbss.bssid;
reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
- if (ifmgd->flags & IEEE80211_STA_AUTHENTICATED)
- printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n",
- sdata->dev->name, reason_code);
+ printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
+ sdata->dev->name, bssid, reason_code);
- if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
- (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE ||
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)) {
- ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
- mod_timer(&ifmgd->timer, jiffies +
- IEEE80211_RETRY_AUTH_INTERVAL);
+ if (!wk) {
+ ieee80211_set_disassoc(sdata, true);
+ } else {
+ list_del(&wk->list);
+ kfree(wk);
}
- ieee80211_set_disassoc(sdata, true, false, 0);
- ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED;
- cfg80211_send_deauth(sdata->dev, (u8 *) mgmt, len);
+ return RX_MGMT_CFG80211_DEAUTH;
}
-static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len)
+static enum rx_mgmt_action __must_check
+ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgmt *mgmt, size_t len)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u16 reason_code;
if (len < 24 + 2)
- return;
+ return RX_MGMT_NONE;
- if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN))
- return;
+ ASSERT_MGD_MTX(ifmgd);
- reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
+ if (WARN_ON(!ifmgd->associated))
+ return RX_MGMT_NONE;
- if (ifmgd->flags & IEEE80211_STA_ASSOCIATED)
- printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
- sdata->dev->name, reason_code);
+ if (WARN_ON(memcmp(ifmgd->associated->cbss.bssid, mgmt->sa, ETH_ALEN)))
+ return RX_MGMT_NONE;
- if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) {
- ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE;
- mod_timer(&ifmgd->timer, jiffies +
- IEEE80211_RETRY_AUTH_INTERVAL);
- }
+ reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
- ieee80211_set_disassoc(sdata, false, false, reason_code);
- cfg80211_send_disassoc(sdata->dev, (u8 *) mgmt, len);
+ printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
+ sdata->dev->name, reason_code);
+
+ ieee80211_set_disassoc(sdata, false);
+ return RX_MGMT_CFG80211_DISASSOC;
}
-static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len,
- int reassoc)
+static enum rx_mgmt_action __must_check
+ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_mgd_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ bool reassoc)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
@@ -1614,17 +1416,16 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
bool have_higher_than_11mbit = false, newsta = false;
u16 ap_ht_cap_flags;
- /* AssocResp and ReassocResp have identical structure, so process both
- * of them in this function. */
-
- if (ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE)
- return;
+ /*
+ * AssocResp and ReassocResp have identical structure, so process both
+ * of them in this function.
+ */
if (len < 24 + 6)
- return;
+ return RX_MGMT_NONE;
- if (memcmp(ifmgd->bssid, mgmt->sa, ETH_ALEN) != 0)
- return;
+ if (memcmp(wk->bss->cbss.bssid, mgmt->sa, ETH_ALEN) != 0)
+ return RX_MGMT_NONE;
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
@@ -1647,26 +1448,18 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: AP rejected association temporarily; "
"comeback duration %u TU (%u ms)\n",
sdata->dev->name, tu, ms);
+ wk->timeout = jiffies + msecs_to_jiffies(ms);
if (ms > IEEE80211_ASSOC_TIMEOUT)
- mod_timer(&ifmgd->timer,
- jiffies + msecs_to_jiffies(ms));
- return;
+ run_again(ifmgd, jiffies + msecs_to_jiffies(ms));
+ return RX_MGMT_NONE;
}
if (status_code != WLAN_STATUS_SUCCESS) {
printk(KERN_DEBUG "%s: AP denied association (code=%d)\n",
sdata->dev->name, status_code);
- /* if this was a reassociation, ensure we try a "full"
- * association next time. This works around some broken APs
- * which do not correctly reject reassociation requests. */
- ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
- cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
- if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
- /* Wait for SME to decide what to do next */
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(local);
- }
- return;
+ list_del(&wk->list);
+ kfree(wk);
+ return RX_MGMT_CFG80211_ASSOC;
}
if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14)))
@@ -1677,51 +1470,35 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!elems.supp_rates) {
printk(KERN_DEBUG "%s: no SuppRates element in AssocResp\n",
sdata->dev->name);
- return;
+ return RX_MGMT_NONE;
}
printk(KERN_DEBUG "%s: associated\n", sdata->dev->name);
ifmgd->aid = aid;
- ifmgd->ap_capab = capab_info;
-
- kfree(ifmgd->assocresp_ies);
- ifmgd->assocresp_ies_len = len - (pos - (u8 *) mgmt);
- ifmgd->assocresp_ies = kmalloc(ifmgd->assocresp_ies_len, GFP_KERNEL);
- if (ifmgd->assocresp_ies)
- memcpy(ifmgd->assocresp_ies, pos, ifmgd->assocresp_ies_len);
rcu_read_lock();
/* Add STA entry for the AP */
- sta = sta_info_get(local, ifmgd->bssid);
+ sta = sta_info_get(local, wk->bss->cbss.bssid);
if (!sta) {
newsta = true;
- sta = sta_info_alloc(sdata, ifmgd->bssid, GFP_ATOMIC);
+ rcu_read_unlock();
+
+ sta = sta_info_alloc(sdata, wk->bss->cbss.bssid, GFP_KERNEL);
if (!sta) {
printk(KERN_DEBUG "%s: failed to alloc STA entry for"
" the AP\n", sdata->dev->name);
- rcu_read_unlock();
- return;
+ return RX_MGMT_NONE;
}
- /* update new sta with its last rx activity */
- sta->last_rx = jiffies;
- }
+ set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC |
+ WLAN_STA_ASSOC_AP);
+ if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
+ set_sta_flags(sta, WLAN_STA_AUTHORIZED);
- /*
- * FIXME: Do we really need to update the sta_info's information here?
- * We already know about the AP (we found it in our list) so it
- * should already be filled with the right info, no?
- * As is stands, all this is racy because typically we assume
- * the information that is filled in here (except flags) doesn't
- * change while a STA structure is alive. As such, it should move
- * to between the sta_info_alloc() and sta_info_insert() above.
- */
-
- set_sta_flags(sta, WLAN_STA_AUTH | WLAN_STA_ASSOC | WLAN_STA_ASSOC_AP);
- if (!(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
- set_sta_flags(sta, WLAN_STA_AUTHORIZED);
+ rcu_read_lock();
+ }
rates = 0;
basic_rates = 0;
@@ -1771,8 +1548,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
else
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
- /* If TKIP/WEP is used, no need to parse AP's HT capabilities */
- if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED))
+ if (elems.ht_cap_elem && !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
ieee80211_ht_cap_ie_to_sta_ht_cap(sband,
elems.ht_cap_elem, &sta->sta.ht_cap);
@@ -1792,7 +1568,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
printk(KERN_DEBUG "%s: failed to insert STA entry for"
" the AP (error %d)\n", sdata->dev->name, err);
rcu_read_unlock();
- return;
+ return RX_MGMT_NONE;
}
}
@@ -1806,24 +1582,29 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (elems.ht_info_elem && elems.wmm_param &&
(ifmgd->flags & IEEE80211_STA_WMM_ENABLED) &&
- !(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED))
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
+ wk->bss->cbss.bssid,
ap_ht_cap_flags);
+ /* delete work item -- must be before set_associated for PS */
+ list_del(&wk->list);
+
/* set AID and assoc capability,
* ieee80211_set_associated() will tell the driver */
bss_conf->aid = aid;
bss_conf->assoc_capability = capab_info;
- ieee80211_set_associated(sdata, changed);
+ /* this will take ownership of wk */
+ ieee80211_set_associated(sdata, wk, changed);
/*
- * initialise the time of last beacon to be the association time,
- * otherwise beacon loss check will trigger immediately
+ * Start timer to probe the connection to the AP now.
+ * Also start the timer that will detect beacon loss.
*/
- ifmgd->last_beacon = jiffies;
+ ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
+ mod_beacon_timer(sdata);
- ieee80211_associated(sdata);
- cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
+ return RX_MGMT_CFG80211_ASSOC;
}
@@ -1851,23 +1632,25 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
bss = ieee80211_bss_info_update(local, rx_status, mgmt, len, elems,
channel, beacon);
- if (!bss)
+ if (bss)
+ ieee80211_rx_bss_put(local, bss);
+
+ if (!sdata->u.mgd.associated)
return;
if (elems->ch_switch_elem && (elems->ch_switch_elem_len == 3) &&
- (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN) == 0)) {
+ (memcmp(mgmt->bssid, sdata->u.mgd.associated->cbss.bssid,
+ ETH_ALEN) == 0)) {
struct ieee80211_channel_sw_ie *sw_elem =
(struct ieee80211_channel_sw_ie *)elems->ch_switch_elem;
ieee80211_sta_process_chanswitch(sdata, sw_elem, bss);
}
-
- ieee80211_rx_bss_put(local, bss);
}
static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt,
- size_t len,
+ struct ieee80211_mgd_work *wk,
+ struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_if_managed *ifmgd;
@@ -1876,6 +1659,8 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ifmgd = &sdata->u.mgd;
+ ASSERT_MGD_MTX(ifmgd);
+
if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
return; /* ignore ProbeResp to foreign address */
@@ -1889,17 +1674,32 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, false);
/* direct probe may be part of the association flow */
- if (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE) {
+ if (wk && wk->state == IEEE80211_MGD_STATE_PROBE) {
printk(KERN_DEBUG "%s direct probe responded\n",
sdata->dev->name);
- ieee80211_authenticate(sdata);
+ wk->tries = 0;
+ wk->state = IEEE80211_MGD_STATE_AUTH;
+ WARN_ON(ieee80211_authenticate(sdata, wk) != RX_MGMT_NONE);
}
- if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
- ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
+ if (ifmgd->associated &&
+ memcmp(mgmt->bssid, ifmgd->associated->cbss.bssid, ETH_ALEN) == 0 &&
+ ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL)) {
+ ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ IEEE80211_STA_BEACON_POLL);
mutex_lock(&sdata->local->iflist_mtx);
ieee80211_recalc_ps(sdata->local, -1);
mutex_unlock(&sdata->local->iflist_mtx);
+ /*
+ * We've received a probe response, but are not sure whether
+ * we have or will be receiving any beacons or data, so let's
+ * schedule the timers again, just in case.
+ */
+ mod_beacon_timer(sdata);
+ mod_timer(&ifmgd->conn_mon_timer,
+ round_jiffies_up(jiffies +
+ IEEE80211_CONNECTION_IDLE_TIME));
}
}
@@ -1937,6 +1737,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
bool erp_valid, directed_tim = false;
u8 erp_value = 0;
u32 ncrc;
+ u8 *bssid;
+
+ ASSERT_MGD_MTX(ifmgd);
/* Process beacon from the current BSS */
baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
@@ -1946,23 +1749,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (rx_status->freq != local->hw.conf.channel->center_freq)
return;
- if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED) ||
- memcmp(ifmgd->bssid, mgmt->bssid, ETH_ALEN) != 0)
+ /*
+ * We might have received a number of frames, among them a
+ * disassoc frame and a beacon...
+ */
+ if (!ifmgd->associated)
+ return;
+
+ bssid = ifmgd->associated->cbss.bssid;
+
+ /*
+ * And in theory even frames from a different AP we were just
+ * associated to a split-second ago!
+ */
+ if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
return;
- if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
+ if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: cancelling probereq poll due "
"to a received beacon\n", sdata->dev->name);
}
#endif
- ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
+ ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_ps(local, -1);
mutex_unlock(&local->iflist_mtx);
}
+ /*
+ * Push the beacon loss detection into the future since
+ * we are processing a beacon from the AP just now.
+ */
+ mod_beacon_timer(sdata);
+
ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
len - baselen, &elems,
@@ -2019,15 +1840,15 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param &&
- !(ifmgd->flags & IEEE80211_STA_TKIP_WEP_USED)) {
+ !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
struct sta_info *sta;
struct ieee80211_supported_band *sband;
u16 ap_ht_cap_flags;
rcu_read_lock();
- sta = sta_info_get(local, ifmgd->bssid);
- if (!sta) {
+ sta = sta_info_get(local, bssid);
+ if (WARN_ON(!sta)) {
rcu_read_unlock();
return;
}
@@ -2042,15 +1863,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
- ap_ht_cap_flags);
+ bssid, ap_ht_cap_flags);
}
+ /* Note: country IE parsing is done for us by cfg80211 */
if (elems.country_elem) {
- /* Note we are only reviewing this on beacons
- * for the BSSID we are associated to */
- regulatory_hint_11d(local->hw.wiphy,
- elems.country_elem, elems.country_elem_len);
-
/* TODO: IBSS also needs this */
if (elems.pwr_constr_elem)
ieee80211_handle_pwr_constr(sdata,
@@ -2063,8 +1880,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+ struct sk_buff *skb)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_mgmt *mgmt;
@@ -2080,14 +1896,14 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
case IEEE80211_STYPE_PROBE_REQ:
case IEEE80211_STYPE_PROBE_RESP:
case IEEE80211_STYPE_BEACON:
- memcpy(skb->cb, rx_status, sizeof(*rx_status));
case IEEE80211_STYPE_AUTH:
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
case IEEE80211_STYPE_DEAUTH:
case IEEE80211_STYPE_DISASSOC:
+ case IEEE80211_STYPE_ACTION:
skb_queue_tail(&sdata->u.mgd.skb_queue, skb);
- queue_work(local->hw.workqueue, &sdata->u.mgd.work);
+ ieee80211_queue_work(&local->hw, &sdata->u.mgd.work);
return RX_QUEUED;
}
@@ -2097,40 +1913,119 @@ ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata,
static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_rx_status *rx_status;
struct ieee80211_mgmt *mgmt;
+ struct ieee80211_mgd_work *wk;
+ enum rx_mgmt_action rma = RX_MGMT_NONE;
u16 fc;
rx_status = (struct ieee80211_rx_status *) skb->cb;
mgmt = (struct ieee80211_mgmt *) skb->data;
fc = le16_to_cpu(mgmt->frame_control);
- switch (fc & IEEE80211_FCTL_STYPE) {
- case IEEE80211_STYPE_PROBE_RESP:
- ieee80211_rx_mgmt_probe_resp(sdata, mgmt, skb->len,
- rx_status);
- break;
- case IEEE80211_STYPE_BEACON:
- ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
- rx_status);
- break;
- case IEEE80211_STYPE_AUTH:
- ieee80211_rx_mgmt_auth(sdata, mgmt, skb->len);
+ mutex_lock(&ifmgd->mtx);
+
+ if (ifmgd->associated &&
+ memcmp(ifmgd->associated->cbss.bssid, mgmt->bssid,
+ ETH_ALEN) == 0) {
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_BEACON:
+ ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len,
+ rx_status);
+ break;
+ case IEEE80211_STYPE_PROBE_RESP:
+ ieee80211_rx_mgmt_probe_resp(sdata, NULL, mgmt,
+ skb->len, rx_status);
+ break;
+ case IEEE80211_STYPE_DEAUTH:
+ rma = ieee80211_rx_mgmt_deauth(sdata, NULL,
+ mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_DISASSOC:
+ rma = ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_ACTION:
+ /* XXX: differentiate, can only happen for CSA now! */
+ ieee80211_sta_process_chanswitch(sdata,
+ &mgmt->u.action.u.chan_switch.sw_elem,
+ ifmgd->associated);
+ break;
+ }
+ mutex_unlock(&ifmgd->mtx);
+
+ switch (rma) {
+ case RX_MGMT_NONE:
+ /* no action */
+ break;
+ case RX_MGMT_CFG80211_DEAUTH:
+ cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len,
+ NULL);
+ break;
+ case RX_MGMT_CFG80211_DISASSOC:
+ cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len,
+ NULL);
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
+ }
+ goto out;
+ }
+
+ list_for_each_entry(wk, &ifmgd->work_list, list) {
+ if (memcmp(wk->bss->cbss.bssid, mgmt->bssid, ETH_ALEN) != 0)
+ continue;
+
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_PROBE_RESP:
+ ieee80211_rx_mgmt_probe_resp(sdata, wk, mgmt, skb->len,
+ rx_status);
+ break;
+ case IEEE80211_STYPE_AUTH:
+ rma = ieee80211_rx_mgmt_auth(sdata, wk, mgmt, skb->len);
+ break;
+ case IEEE80211_STYPE_ASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
+ skb->len, false);
+ break;
+ case IEEE80211_STYPE_REASSOC_RESP:
+ rma = ieee80211_rx_mgmt_assoc_resp(sdata, wk, mgmt,
+ skb->len, true);
+ break;
+ case IEEE80211_STYPE_DEAUTH:
+ rma = ieee80211_rx_mgmt_deauth(sdata, wk, mgmt,
+ skb->len);
+ break;
+ }
+ /*
+ * We've processed this frame for that work, so it can't
+ * belong to another work struct.
+ * NB: this is also required for correctness because the
+ * called functions can free 'wk', and for 'rma'!
+ */
break;
- case IEEE80211_STYPE_ASSOC_RESP:
- ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, 0);
+ }
+
+ mutex_unlock(&ifmgd->mtx);
+
+ switch (rma) {
+ case RX_MGMT_NONE:
+ /* no action */
break;
- case IEEE80211_STYPE_REASSOC_RESP:
- ieee80211_rx_mgmt_assoc_resp(sdata, mgmt, skb->len, 1);
+ case RX_MGMT_CFG80211_AUTH:
+ cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, skb->len);
break;
- case IEEE80211_STYPE_DEAUTH:
- ieee80211_rx_mgmt_deauth(sdata, mgmt, skb->len);
+ case RX_MGMT_CFG80211_ASSOC:
+ cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, skb->len);
break;
- case IEEE80211_STYPE_DISASSOC:
- ieee80211_rx_mgmt_disassoc(sdata, mgmt, skb->len);
+ case RX_MGMT_CFG80211_DEAUTH:
+ cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len, NULL);
break;
+ default:
+ WARN(1, "unexpected: %d", rma);
}
+ out:
kfree_skb(skb);
}
@@ -2146,215 +2041,216 @@ static void ieee80211_sta_timer(unsigned long data)
return;
}
- set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
- queue_work(local->hw.workqueue, &ifmgd->work);
+ ieee80211_queue_work(&local->hw, &ifmgd->work);
}
-static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata)
+static void ieee80211_sta_work(struct work_struct *work)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data, u.mgd.work);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_if_managed *ifmgd;
+ struct sk_buff *skb;
+ struct ieee80211_mgd_work *wk, *tmp;
+ LIST_HEAD(free_work);
+ enum rx_mgmt_action rma;
+ bool anybusy = false;
- /* Reset own TSF to allow time synchronization work. */
- drv_reset_tsf(local);
+ if (!netif_running(sdata->dev))
+ return;
- ifmgd->wmm_last_param_set = -1; /* allow any WMM update */
+ if (local->scanning)
+ return;
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ return;
- if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_OPEN)
- ifmgd->auth_alg = WLAN_AUTH_OPEN;
- else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_SHARED_KEY)
- ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY;
- else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP)
- ifmgd->auth_alg = WLAN_AUTH_LEAP;
- else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_FT)
- ifmgd->auth_alg = WLAN_AUTH_FT;
- else
- ifmgd->auth_alg = WLAN_AUTH_OPEN;
- ifmgd->auth_transaction = -1;
- ifmgd->flags &= ~IEEE80211_STA_ASSOCIATED;
- ifmgd->assoc_scan_tries = 0;
- ifmgd->direct_probe_tries = 0;
- ifmgd->auth_tries = 0;
- ifmgd->assoc_tries = 0;
- netif_tx_stop_all_queues(sdata->dev);
- netif_carrier_off(sdata->dev);
-}
+ /*
+ * ieee80211_queue_work() should have picked up most cases,
+ * here we'll pick the the rest.
+ */
+ if (WARN(local->suspended, "STA MLME work scheduled while "
+ "going to suspend\n"))
+ return;
-static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_bss *bss;
- u8 *bssid = ifmgd->bssid, *ssid = ifmgd->ssid;
- u8 ssid_len = ifmgd->ssid_len;
- u16 capa_mask = WLAN_CAPABILITY_ESS;
- u16 capa_val = WLAN_CAPABILITY_ESS;
- struct ieee80211_channel *chan = local->oper_channel;
+ ifmgd = &sdata->u.mgd;
- if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
- ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL |
- IEEE80211_STA_AUTO_BSSID_SEL |
- IEEE80211_STA_AUTO_CHANNEL_SEL)) {
- capa_mask |= WLAN_CAPABILITY_PRIVACY;
- if (sdata->default_key)
- capa_val |= WLAN_CAPABILITY_PRIVACY;
- }
+ /* first process frames to avoid timing out while a frame is pending */
+ while ((skb = skb_dequeue(&ifmgd->skb_queue)))
+ ieee80211_sta_rx_queued_mgmt(sdata, skb);
+
+ /* then process the rest of the work */
+ mutex_lock(&ifmgd->mtx);
- if (ifmgd->flags & IEEE80211_STA_AUTO_CHANNEL_SEL)
- chan = NULL;
+ if (ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL) &&
+ ifmgd->associated) {
+ u8 bssid[ETH_ALEN];
- if (ifmgd->flags & IEEE80211_STA_AUTO_BSSID_SEL)
- bssid = NULL;
+ memcpy(bssid, ifmgd->associated->cbss.bssid, ETH_ALEN);
+ if (time_is_after_jiffies(ifmgd->probe_timeout))
+ run_again(ifmgd, ifmgd->probe_timeout);
- if (ifmgd->flags & IEEE80211_STA_AUTO_SSID_SEL) {
- ssid = NULL;
- ssid_len = 0;
+ else if (ifmgd->probe_send_count < IEEE80211_MAX_PROBE_TRIES) {
+#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
+ printk(KERN_DEBUG "No probe response from AP %pM"
+ " after %dms, try %d\n", bssid,
+ (1000 * IEEE80211_PROBE_WAIT)/HZ,
+ ifmgd->probe_send_count);
+#endif
+ ieee80211_mgd_probe_ap_send(sdata);
+ } else {
+ /*
+ * We actually lost the connection ... or did we?
+ * Let's make sure!
+ */
+ ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
+ IEEE80211_STA_BEACON_POLL);
+ printk(KERN_DEBUG "No probe response from AP %pM"
+ " after %dms, disconnecting.\n",
+ bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
+ ieee80211_set_disassoc(sdata, true);
+ mutex_unlock(&ifmgd->mtx);
+ /*
+ * must be outside lock due to cfg80211,
+ * but that's not a problem.
+ */
+ ieee80211_send_deauth_disassoc(sdata, bssid,
+ IEEE80211_STYPE_DEAUTH,
+ WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
+ NULL);
+ mutex_lock(&ifmgd->mtx);
+ }
}
- bss = (void *)cfg80211_get_bss(local->hw.wiphy, chan,
- bssid, ssid, ssid_len,
- capa_mask, capa_val);
- if (bss) {
- local->oper_channel = bss->cbss.channel;
- local->oper_channel_type = NL80211_CHAN_NO_HT;
- ieee80211_hw_config(local, 0);
+ ieee80211_recalc_idle(local);
- if (!(ifmgd->flags & IEEE80211_STA_SSID_SET))
- ieee80211_sta_set_ssid(sdata, bss->ssid,
- bss->ssid_len);
- ieee80211_sta_set_bssid(sdata, bss->cbss.bssid);
- ieee80211_sta_def_wmm_params(sdata, bss->supp_rates_len,
- bss->supp_rates);
- if (sdata->u.mgd.mfp == IEEE80211_MFP_REQUIRED)
- sdata->u.mgd.flags |= IEEE80211_STA_MFP_ENABLED;
- else
- sdata->u.mgd.flags &= ~IEEE80211_STA_MFP_ENABLED;
-
- /* Send out direct probe if no probe resp was received or
- * the one we have is outdated
- */
- if (!bss->last_probe_resp ||
- time_after(jiffies, bss->last_probe_resp
- + IEEE80211_SCAN_RESULT_EXPIRE))
- ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
- else
- ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE;
+ list_for_each_entry_safe(wk, tmp, &ifmgd->work_list, list) {
+ if (time_is_after_jiffies(wk->timeout)) {
+ /*
+ * This work item isn't supposed to be worked on
+ * right now, but take care to adjust the timer
+ * properly.
+ */
+ run_again(ifmgd, wk->timeout);
+ continue;
+ }
- ieee80211_rx_bss_put(local, bss);
- ieee80211_sta_reset_auth(sdata);
- return 0;
- } else {
- if (ifmgd->assoc_scan_tries < IEEE80211_ASSOC_SCANS_MAX_TRIES) {
+ switch (wk->state) {
+ default:
+ WARN_ON(1);
+ /* fall through */
+ case IEEE80211_MGD_STATE_IDLE:
+ /* nothing */
+ rma = RX_MGMT_NONE;
+ break;
+ case IEEE80211_MGD_STATE_PROBE:
+ rma = ieee80211_direct_probe(sdata, wk);
+ break;
+ case IEEE80211_MGD_STATE_AUTH:
+ rma = ieee80211_authenticate(sdata, wk);
+ break;
+ case IEEE80211_MGD_STATE_ASSOC:
+ rma = ieee80211_associate(sdata, wk);
+ break;
+ }
+
+ switch (rma) {
+ case RX_MGMT_NONE:
+ /* no action required */
+ break;
+ case RX_MGMT_CFG80211_AUTH_TO:
+ case RX_MGMT_CFG80211_ASSOC_TO:
+ list_del(&wk->list);
+ list_add(&wk->list, &free_work);
+ wk->tries = rma; /* small abuse but only local */
+ break;
+ default:
+ WARN(1, "unexpected: %d", rma);
+ }
+ }
- ifmgd->assoc_scan_tries++;
+ list_for_each_entry(wk, &ifmgd->work_list, list) {
+ if (wk->state != IEEE80211_MGD_STATE_IDLE) {
+ anybusy = true;
+ break;
+ }
+ }
+ if (!anybusy &&
+ test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request))
+ ieee80211_queue_delayed_work(&local->hw,
+ &local->scan_work,
+ round_jiffies_relative(0));
- ieee80211_request_internal_scan(sdata, ifmgd->ssid,
- ssid_len);
+ mutex_unlock(&ifmgd->mtx);
- ifmgd->state = IEEE80211_STA_MLME_AUTHENTICATE;
- set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
- } else {
- ifmgd->assoc_scan_tries = 0;
- ifmgd->state = IEEE80211_STA_MLME_DISABLED;
- ieee80211_recalc_idle(local);
+ list_for_each_entry_safe(wk, tmp, &free_work, list) {
+ switch (wk->tries) {
+ case RX_MGMT_CFG80211_AUTH_TO:
+ cfg80211_send_auth_timeout(sdata->dev,
+ wk->bss->cbss.bssid);
+ break;
+ case RX_MGMT_CFG80211_ASSOC_TO:
+ cfg80211_send_assoc_timeout(sdata->dev,
+ wk->bss->cbss.bssid);
+ break;
+ default:
+ WARN(1, "unexpected: %d", wk->tries);
}
+
+ list_del(&wk->list);
+ kfree(wk);
}
- return -1;
-}
+ ieee80211_recalc_idle(local);
+}
-static void ieee80211_sta_work(struct work_struct *work)
+static void ieee80211_sta_bcn_mon_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data, u.mgd.work);
+ (struct ieee80211_sub_if_data *) data;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_if_managed *ifmgd;
- struct sk_buff *skb;
-
- if (!netif_running(sdata->dev))
- return;
- if (local->sw_scanning || local->hw_scanning)
+ if (local->quiescing)
return;
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
- return;
-
- /*
- * Nothing should have been stuffed into the workqueue during
- * the suspend->resume cycle. If this WARN is seen then there
- * is a bug with either the driver suspend or something in
- * mac80211 stuffing into the workqueue which we haven't yet
- * cleared during mac80211's suspend cycle.
- */
- if (WARN_ON(local->suspended))
- return;
-
- ifmgd = &sdata->u.mgd;
-
- while ((skb = skb_dequeue(&ifmgd->skb_queue)))
- ieee80211_sta_rx_queued_mgmt(sdata, skb);
+ ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work);
+}
- if (ifmgd->state != IEEE80211_STA_MLME_DIRECT_PROBE &&
- ifmgd->state != IEEE80211_STA_MLME_AUTHENTICATE &&
- ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE &&
- test_and_clear_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request)) {
- queue_delayed_work(local->hw.workqueue, &local->scan_work,
- round_jiffies_relative(0));
- return;
- }
+static void ieee80211_sta_conn_mon_timer(unsigned long data)
+{
+ struct ieee80211_sub_if_data *sdata =
+ (struct ieee80211_sub_if_data *) data;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_local *local = sdata->local;
- if (test_and_clear_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request)) {
- if (ieee80211_sta_config_auth(sdata))
- return;
- clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
- } else if (!test_and_clear_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request))
+ if (local->quiescing)
return;
- ieee80211_recalc_idle(local);
-
- switch (ifmgd->state) {
- case IEEE80211_STA_MLME_DISABLED:
- break;
- case IEEE80211_STA_MLME_DIRECT_PROBE:
- ieee80211_direct_probe(sdata);
- break;
- case IEEE80211_STA_MLME_AUTHENTICATE:
- ieee80211_authenticate(sdata);
- break;
- case IEEE80211_STA_MLME_ASSOCIATE:
- ieee80211_associate(sdata);
- break;
- case IEEE80211_STA_MLME_ASSOCIATED:
- ieee80211_associated(sdata);
- break;
- default:
- WARN_ON(1);
- break;
- }
+ ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
+}
- if (ieee80211_privacy_mismatch(sdata)) {
- printk(KERN_DEBUG "%s: privacy configuration mismatch and "
- "mixed-cell disabled - disassociate\n", sdata->dev->name);
+static void ieee80211_sta_monitor_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ u.mgd.monitor_work);
- ieee80211_set_disassoc(sdata, false, true,
- WLAN_REASON_UNSPECIFIED);
- }
+ ieee80211_mgd_probe_ap(sdata, false);
}
static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
{
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- /*
- * Need to update last_beacon to avoid beacon loss
- * test to trigger.
- */
- sdata->u.mgd.last_beacon = jiffies;
-
-
- queue_work(sdata->local->hw.workqueue,
+ sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL |
+ IEEE80211_STA_CONNECTION_POLL);
+
+ /* let's probe the connection once */
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->u.mgd.monitor_work);
+ /* and do all the other regular work too */
+ ieee80211_queue_work(&sdata->local->hw,
&sdata->u.mgd.work);
}
}
@@ -2378,6 +2274,11 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
cancel_work_sync(&ifmgd->chswitch_work);
if (del_timer_sync(&ifmgd->chswitch_timer))
set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
+
+ cancel_work_sync(&ifmgd->monitor_work);
+ /* these will just be re-established on connection */
+ del_timer_sync(&ifmgd->conn_mon_timer);
+ del_timer_sync(&ifmgd->bcn_mon_timer);
}
void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
@@ -2395,210 +2296,277 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd;
- u32 hw_flags;
ifmgd = &sdata->u.mgd;
INIT_WORK(&ifmgd->work, ieee80211_sta_work);
+ INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work);
setup_timer(&ifmgd->timer, ieee80211_sta_timer,
(unsigned long) sdata);
+ setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
+ (unsigned long) sdata);
+ setup_timer(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer,
+ (unsigned long) sdata);
setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
(unsigned long) sdata);
skb_queue_head_init(&ifmgd->skb_queue);
+ INIT_LIST_HEAD(&ifmgd->work_list);
+
ifmgd->capab = WLAN_CAPABILITY_ESS;
- ifmgd->auth_algs = IEEE80211_AUTH_ALG_OPEN |
- IEEE80211_AUTH_ALG_SHARED_KEY;
- ifmgd->flags |= IEEE80211_STA_CREATE_IBSS |
- IEEE80211_STA_AUTO_BSSID_SEL |
- IEEE80211_STA_AUTO_CHANNEL_SEL;
+ ifmgd->flags = 0;
if (sdata->local->hw.queues >= 4)
ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
- hw_flags = sdata->local->hw.flags;
-
- if (hw_flags & IEEE80211_HW_SUPPORTS_PS) {
- ifmgd->powersave = CONFIG_MAC80211_DEFAULT_PS_VALUE;
- sdata->local->hw.conf.dynamic_ps_timeout = 500;
- }
+ mutex_init(&ifmgd->mtx);
}
-/* configuration hooks */
-void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata)
+/* scan finished notification */
+void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- struct ieee80211_local *local = sdata->local;
-
- if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
- return;
-
- if ((ifmgd->flags & (IEEE80211_STA_BSSID_SET |
- IEEE80211_STA_AUTO_BSSID_SEL)) &&
- (ifmgd->flags & (IEEE80211_STA_SSID_SET |
- IEEE80211_STA_AUTO_SSID_SEL))) {
-
- if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)
- ieee80211_set_disassoc(sdata, true, true,
- WLAN_REASON_DEAUTH_LEAVING);
-
- if (ifmgd->ssid_len == 0) {
- /*
- * Only allow association to be started if a valid SSID
- * is configured.
- */
- return;
- }
+ struct ieee80211_sub_if_data *sdata = local->scan_sdata;
- if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) ||
- ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE)
- set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
- else if (ifmgd->flags & IEEE80211_STA_EXT_SME)
- set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
- queue_work(local->hw.workqueue, &ifmgd->work);
- }
+ /* Restart STA timers */
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list)
+ ieee80211_restart_sta_timer(sdata);
+ rcu_read_unlock();
}
-int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata)
+int ieee80211_max_network_latency(struct notifier_block *nb,
+ unsigned long data, void *dummy)
{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ s32 latency_usec = (s32) data;
+ struct ieee80211_local *local =
+ container_of(nb, struct ieee80211_local,
+ network_latency_notifier);
- if (ifmgd->ssid_len)
- ifmgd->flags |= IEEE80211_STA_SSID_SET;
- else
- ifmgd->flags &= ~IEEE80211_STA_SSID_SET;
+ mutex_lock(&local->iflist_mtx);
+ ieee80211_recalc_ps(local, latency_usec);
+ mutex_unlock(&local->iflist_mtx);
return 0;
}
-int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
+/* config hooks */
+int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_auth_request *req)
{
- struct ieee80211_if_managed *ifmgd;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ const u8 *ssid;
+ struct ieee80211_mgd_work *wk;
+ u16 auth_alg;
- if (len > IEEE80211_MAX_SSID_LEN)
- return -EINVAL;
+ switch (req->auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ auth_alg = WLAN_AUTH_OPEN;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ auth_alg = WLAN_AUTH_SHARED_KEY;
+ break;
+ case NL80211_AUTHTYPE_FT:
+ auth_alg = WLAN_AUTH_FT;
+ break;
+ case NL80211_AUTHTYPE_NETWORK_EAP:
+ auth_alg = WLAN_AUTH_LEAP;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- ifmgd = &sdata->u.mgd;
+ wk = kzalloc(sizeof(*wk) + req->ie_len, GFP_KERNEL);
+ if (!wk)
+ return -ENOMEM;
- if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) {
- if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)
- ieee80211_set_disassoc(sdata, true, true,
- WLAN_REASON_DEAUTH_LEAVING);
+ wk->bss = (void *)req->bss;
- /*
- * Do not use reassociation if SSID is changed (different ESS).
- */
- ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
- memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid));
- memcpy(ifmgd->ssid, ssid, len);
- ifmgd->ssid_len = len;
+ if (req->ie && req->ie_len) {
+ memcpy(wk->ie, req->ie, req->ie_len);
+ wk->ie_len = req->ie_len;
}
- return ieee80211_sta_commit(sdata);
-}
+ if (req->key && req->key_len) {
+ wk->key_len = req->key_len;
+ wk->key_idx = req->key_idx;
+ memcpy(wk->key, req->key, req->key_len);
+ }
-int ieee80211_sta_get_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t *len)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- memcpy(ssid, ifmgd->ssid, ifmgd->ssid_len);
- *len = ifmgd->ssid_len;
+ ssid = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
+ memcpy(wk->ssid, ssid + 2, ssid[1]);
+ wk->ssid_len = ssid[1];
+
+ wk->state = IEEE80211_MGD_STATE_PROBE;
+ wk->auth_alg = auth_alg;
+ wk->timeout = jiffies; /* run right away */
+
+ /*
+ * XXX: if still associated need to tell AP that we're going
+ * to sleep and then change channel etc.
+ */
+ sdata->local->oper_channel = req->bss->channel;
+ ieee80211_hw_config(sdata->local, 0);
+
+ mutex_lock(&ifmgd->mtx);
+ list_add(&wk->list, &sdata->u.mgd.work_list);
+ mutex_unlock(&ifmgd->mtx);
+
+ ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work);
return 0;
}
-int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
+int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_assoc_request *req)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_work *wk, *found = NULL;
+ int i, err;
- if (compare_ether_addr(bssid, ifmgd->bssid) != 0 &&
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)
- ieee80211_set_disassoc(sdata, true, true,
- WLAN_REASON_DEAUTH_LEAVING);
+ mutex_lock(&ifmgd->mtx);
- if (is_valid_ether_addr(bssid)) {
- memcpy(ifmgd->bssid, bssid, ETH_ALEN);
- ifmgd->flags |= IEEE80211_STA_BSSID_SET;
- } else {
- memset(ifmgd->bssid, 0, ETH_ALEN);
- ifmgd->flags &= ~IEEE80211_STA_BSSID_SET;
+ list_for_each_entry(wk, &ifmgd->work_list, list) {
+ if (&wk->bss->cbss == req->bss &&
+ wk->state == IEEE80211_MGD_STATE_IDLE) {
+ found = wk;
+ break;
+ }
}
- return ieee80211_sta_commit(sdata);
-}
+ if (!found) {
+ err = -ENOLINK;
+ goto out;
+ }
-int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
- const char *ie, size_t len)
-{
- struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ list_del(&found->list);
- if (len == 0 && ifmgd->extra_ie_len == 0)
- return -EALREADY;
+ wk = krealloc(found, sizeof(*wk) + req->ie_len, GFP_KERNEL);
+ if (!wk) {
+ list_add(&found->list, &ifmgd->work_list);
+ err = -ENOMEM;
+ goto out;
+ }
- if (len == ifmgd->extra_ie_len && ifmgd->extra_ie &&
- memcmp(ifmgd->extra_ie, ie, len) == 0)
- return -EALREADY;
+ list_add(&wk->list, &ifmgd->work_list);
- kfree(ifmgd->extra_ie);
- if (len == 0) {
- ifmgd->extra_ie = NULL;
- ifmgd->extra_ie_len = 0;
- return 0;
- }
- ifmgd->extra_ie = kmalloc(len, GFP_KERNEL);
- if (!ifmgd->extra_ie) {
- ifmgd->extra_ie_len = 0;
- return -ENOMEM;
+ ifmgd->flags &= ~IEEE80211_STA_DISABLE_11N;
+
+ for (i = 0; i < req->crypto.n_ciphers_pairwise; i++)
+ if (req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP40 ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP ||
+ req->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_WEP104)
+ ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
+
+ sdata->local->oper_channel = req->bss->channel;
+ ieee80211_hw_config(sdata->local, 0);
+
+ if (req->ie && req->ie_len) {
+ memcpy(wk->ie, req->ie, req->ie_len);
+ wk->ie_len = req->ie_len;
+ } else
+ wk->ie_len = 0;
+
+ if (req->prev_bssid)
+ memcpy(wk->prev_bssid, req->prev_bssid, ETH_ALEN);
+
+ wk->state = IEEE80211_MGD_STATE_ASSOC;
+ wk->tries = 0;
+ wk->timeout = jiffies; /* run right away */
+
+ if (req->use_mfp) {
+ ifmgd->mfp = IEEE80211_MFP_REQUIRED;
+ ifmgd->flags |= IEEE80211_STA_MFP_ENABLED;
+ } else {
+ ifmgd->mfp = IEEE80211_MFP_DISABLED;
+ ifmgd->flags &= ~IEEE80211_STA_MFP_ENABLED;
}
- memcpy(ifmgd->extra_ie, ie, len);
- ifmgd->extra_ie_len = len;
- return 0;
-}
-int ieee80211_sta_deauthenticate(struct ieee80211_sub_if_data *sdata, u16 reason)
-{
- printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
- sdata->dev->name, reason);
+ if (req->crypto.control_port)
+ ifmgd->flags |= IEEE80211_STA_CONTROL_PORT;
+ else
+ ifmgd->flags &= ~IEEE80211_STA_CONTROL_PORT;
- ieee80211_set_disassoc(sdata, true, true, reason);
- return 0;
+ ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.work);
+
+ err = 0;
+
+ out:
+ mutex_unlock(&ifmgd->mtx);
+ return err;
}
-int ieee80211_sta_disassociate(struct ieee80211_sub_if_data *sdata, u16 reason)
+int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_deauth_request *req,
+ void *cookie)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_mgd_work *wk;
+ const u8 *bssid = NULL;
- printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
- sdata->dev->name, reason);
+ printk(KERN_DEBUG "%s: deauthenticating by local choice (reason=%d)\n",
+ sdata->dev->name, req->reason_code);
+
+ mutex_lock(&ifmgd->mtx);
+
+ if (ifmgd->associated && &ifmgd->associated->cbss == req->bss) {
+ bssid = req->bss->bssid;
+ ieee80211_set_disassoc(sdata, true);
+ } else list_for_each_entry(wk, &ifmgd->work_list, list) {
+ if (&wk->bss->cbss == req->bss) {
+ bssid = req->bss->bssid;
+ list_del(&wk->list);
+ kfree(wk);
+ break;
+ }
+ }
- if (!(ifmgd->flags & IEEE80211_STA_ASSOCIATED))
+ /*
+ * cfg80211 should catch this ... but it's racy since
+ * we can receive a deauth frame, process it, hand it
+ * to cfg80211 while that's in a locked section already
+ * trying to tell us that the user wants to disconnect.
+ */
+ if (!bssid) {
+ mutex_unlock(&ifmgd->mtx);
return -ENOLINK;
+ }
+
+ mutex_unlock(&ifmgd->mtx);
+
+ ieee80211_send_deauth_disassoc(sdata, bssid,
+ IEEE80211_STYPE_DEAUTH, req->reason_code,
+ cookie);
- ieee80211_set_disassoc(sdata, false, true, reason);
return 0;
}
-/* scan finished notification */
-void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
+int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_disassoc_request *req,
+ void *cookie)
{
- struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- /* Restart STA timers */
- rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces, list)
- ieee80211_restart_sta_timer(sdata);
- rcu_read_unlock();
-}
+ printk(KERN_DEBUG "%s: disassociating by local choice (reason=%d)\n",
+ sdata->dev->name, req->reason_code);
-int ieee80211_max_network_latency(struct notifier_block *nb,
- unsigned long data, void *dummy)
-{
- s32 latency_usec = (s32) data;
- struct ieee80211_local *local =
- container_of(nb, struct ieee80211_local,
- network_latency_notifier);
+ mutex_lock(&ifmgd->mtx);
- mutex_lock(&local->iflist_mtx);
- ieee80211_recalc_ps(local, latency_usec);
- mutex_unlock(&local->iflist_mtx);
+ /*
+ * cfg80211 should catch this ... but it's racy since
+ * we can receive a disassoc frame, process it, hand it
+ * to cfg80211 while that's in a locked section already
+ * trying to tell us that the user wants to disconnect.
+ */
+ if (&ifmgd->associated->cbss != req->bss) {
+ mutex_unlock(&ifmgd->mtx);
+ return -ENOLINK;
+ }
+
+ ieee80211_set_disassoc(sdata, false);
+
+ mutex_unlock(&ifmgd->mtx);
+ ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
+ IEEE80211_STYPE_DISASSOC, req->reason_code,
+ cookie);
return 0;
}
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 5e3d476972f..e535f1c988f 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -26,7 +26,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
/* make quiescing visible to timers everywhere */
mb();
- flush_workqueue(local->hw.workqueue);
+ flush_workqueue(local->workqueue);
/* Don't try to run timers while suspended. */
del_timer_sync(&local->sta_cleanup);
@@ -96,6 +96,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
if (!netif_running(sdata->dev))
continue;
+ /* disable beaconing */
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_BEACON_ENABLED);
+
conf.vif = &sdata->vif;
conf.type = sdata->vif.type;
conf.mac_addr = sdata->dev->dev_addr;
@@ -103,17 +107,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
}
/* stop hardware - this must stop RX */
- if (local->open_count) {
- ieee80211_led_radio(local, false);
- drv_stop(local);
- }
-
- /*
- * flush again, in case driver queued work -- it
- * shouldn't be doing (or cancel everything in the
- * stop callback) that but better safe than sorry.
- */
- flush_workqueue(local->hw.workqueue);
+ if (local->open_count)
+ ieee80211_stop_device(local);
local->suspended = true;
/* need suspended to be visible before quiescing is false */
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 4641f00a1e5..b33efc4fc26 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -198,6 +198,35 @@ static void rate_control_release(struct kref *kref)
kfree(ctrl_ref);
}
+static bool rc_no_data_or_no_ack(struct ieee80211_tx_rate_control *txrc)
+{
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ return ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !ieee80211_is_data(fc));
+}
+
+bool rate_control_send_low(struct ieee80211_sta *sta,
+ void *priv_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
+
+ if (!sta || !priv_sta || rc_no_data_or_no_ack(txrc)) {
+ info->control.rates[0].idx = rate_lowest_index(txrc->sband, sta);
+ info->control.rates[0].count =
+ (info->flags & IEEE80211_TX_CTL_NO_ACK) ?
+ 1 : txrc->hw->max_rate_tries;
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(rate_control_send_low);
+
void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta,
struct ieee80211_tx_rate_control *txrc)
@@ -258,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
struct rate_control_ref *ref, *old;
ASSERT_RTNL();
- if (local->open_count || netif_running(local->mdev))
+ if (local->open_count)
return -EBUSY;
ref = rate_control_alloc(name, local);
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 37771abd8f5..6e5d68b4e42 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -70,20 +70,6 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
return i;
}
-static inline bool
-use_low_rate(struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- u16 fc;
-
- fc = le16_to_cpu(hdr->frame_control);
-
- return ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
- (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA);
-}
-
-
static void
minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
{
@@ -232,7 +218,6 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
void *priv_sta, struct ieee80211_tx_rate_control *txrc)
{
struct sk_buff *skb = txrc->skb;
- struct ieee80211_supported_band *sband = txrc->sband;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
@@ -245,14 +230,8 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
int mrr_ndx[3];
int sample_rate;
- if (!sta || !mi || use_low_rate(skb)) {
- ar[0].idx = rate_lowest_index(sband, sta);
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- ar[0].count = 1;
- else
- ar[0].count = mp->max_retry;
+ if (rate_control_send_low(sta, priv_sta, txrc))
return;
- }
mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot;
@@ -439,7 +418,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
/* contention window */
tx_time_single += t_slot + min(cw, mp->cw_max);
- cw = (cw + 1) << 1;
+ cw = (cw << 1) | 1;
tx_time += tx_time_single;
tx_time_cts += tx_time_single + mi->sp_ack_dur;
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 869fe0ef951..38bf4168fc3 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -33,7 +33,6 @@ struct minstrel_rate {
/* per-rate throughput */
u32 cur_tp;
- u32 throughput;
u64 succ_hist;
u64 att_hist;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 98f48070805..a715d9454f6 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -83,7 +83,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "%3u%s", mr->bitrate / 2,
(mr->bitrate & 1 ? ".5" : " "));
- tp = ((mr->cur_tp * 96) / 18000) >> 10;
+ tp = mr->cur_tp / ((18000 << 10) / 96);
prob = mr->cur_prob / 18;
eprob = mr->probability / 18;
@@ -139,7 +139,7 @@ minstrel_stats_release(struct inode *inode, struct file *file)
return 0;
}
-static struct file_operations minstrel_stat_fops = {
+static const struct file_operations minstrel_stat_fops = {
.owner = THIS_MODULE,
.open = minstrel_stats_open,
.read = minstrel_stats_read,
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index a0bef767ceb..699d3ed869c 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -169,19 +169,9 @@ static void rate_control_pid_sample(struct rc_pid_info *pinfo,
* still a good measurement and copy it. */
if (unlikely(spinfo->tx_num_xmit == 0))
pf = spinfo->last_pf;
- else {
- /* XXX: BAD HACK!!! */
- struct sta_info *si = container_of(sta, struct sta_info, sta);
-
+ else
pf = spinfo->tx_num_failed * 100 / spinfo->tx_num_xmit;
- if (ieee80211_vif_is_mesh(&si->sdata->vif) && pf == 100)
- mesh_plink_broken(si);
- pf <<= RC_PID_ARITH_SHIFT;
- si->fail_avg = ((pf + (spinfo->last_pf << 3)) / 9)
- >> RC_PID_ARITH_SHIFT;
- }
-
spinfo->tx_num_xmit = 0;
spinfo->tx_num_failed = 0;
@@ -276,11 +266,9 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
{
struct sk_buff *skb = txrc->skb;
struct ieee80211_supported_band *sband = txrc->sband;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct rc_pid_sta_info *spinfo = priv_sta;
int rateidx;
- u16 fc;
if (txrc->rts)
info->control.rates[0].count =
@@ -290,16 +278,8 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
txrc->hw->conf.short_frame_max_tx_count;
/* Send management frames and NO_ACK data using lowest rate. */
- fc = le16_to_cpu(hdr->frame_control);
- if (!sta || !spinfo ||
- (fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA ||
- info->flags & IEEE80211_TX_CTL_NO_ACK) {
- info->control.rates[0].idx = rate_lowest_index(sband, sta);
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
- info->control.rates[0].count = 1;
-
+ if (rate_control_send_low(sta, priv_sta, txrc))
return;
- }
rateidx = spinfo->txrate_idx;
@@ -321,7 +301,6 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
struct rc_pid_sta_info *spinfo = priv_sta;
struct rc_pid_info *pinfo = priv;
struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
- struct sta_info *si;
int i, j, tmp;
bool s;
@@ -358,9 +337,6 @@ rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
}
spinfo->txrate_idx = rate_lowest_index(sband, sta);
- /* HACK */
- si = container_of(sta, struct sta_info, sta);
- si->fail_avg = 0;
}
static void *rate_control_pid_alloc(struct ieee80211_hw *hw,
diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
index a08a9b53034..a59043fbb0f 100644
--- a/net/mac80211/rc80211_pid_debugfs.c
+++ b/net/mac80211/rc80211_pid_debugfs.c
@@ -198,7 +198,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
#undef RC_PID_PRINT_BUF_SIZE
-static struct file_operations rc_pid_fop_events = {
+static const struct file_operations rc_pid_fop_events = {
.owner = THIS_MODULE,
.read = rate_control_pid_events_read,
.poll = rate_control_pid_events_poll,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0936fc24942..c01588f9d45 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -30,7 +30,6 @@
static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb,
- struct ieee80211_rx_status *status,
u16 mpdu_seq_num,
int bar_req);
/*
@@ -59,11 +58,11 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
return skb;
}
-static inline int should_drop_frame(struct ieee80211_rx_status *status,
- struct sk_buff *skb,
+static inline int should_drop_frame(struct sk_buff *skb,
int present_fcs_len,
int radiotap_len)
{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
@@ -111,10 +110,10 @@ ieee80211_rx_radiotap_len(struct ieee80211_local *local,
static void
ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct sk_buff *skb,
- struct ieee80211_rx_status *status,
struct ieee80211_rate *rate,
int rtap_len)
{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
@@ -220,9 +219,9 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*/
static struct sk_buff *
ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
- struct ieee80211_rx_status *status,
struct ieee80211_rate *rate)
{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
struct ieee80211_sub_if_data *sdata;
int needed_headroom = 0;
struct sk_buff *skb, *skb2;
@@ -248,8 +247,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
present_fcs_len = FCS_LEN;
if (!local->monitors) {
- if (should_drop_frame(status, origskb, present_fcs_len,
- rtap_len)) {
+ if (should_drop_frame(origskb, present_fcs_len, rtap_len)) {
dev_kfree_skb(origskb);
return NULL;
}
@@ -257,7 +255,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
return remove_monitor_info(local, origskb, rtap_len);
}
- if (should_drop_frame(status, origskb, present_fcs_len, rtap_len)) {
+ if (should_drop_frame(origskb, present_fcs_len, rtap_len)) {
/* only need to expand headroom if necessary */
skb = origskb;
origskb = NULL;
@@ -289,7 +287,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
/* if necessary, prepend radiotap information */
if (!(status->flag & RX_FLAG_RADIOTAP))
- ieee80211_add_rx_radiotap_header(local, skb, status, rate,
+ ieee80211_add_rx_radiotap_header(local, skb, rate,
needed_headroom);
skb_reset_mac_header(skb);
@@ -420,13 +418,13 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct sk_buff *skb = rx->skb;
- if (unlikely(local->hw_scanning))
- return ieee80211_scan_rx(rx->sdata, skb, rx->status);
+ if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning)))
+ return ieee80211_scan_rx(rx->sdata, skb);
- if (unlikely(local->sw_scanning)) {
+ if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning) &&
+ (rx->flags & IEEE80211_RX_IN_SCAN))) {
/* drop all the other packets during a software scan anyway */
- if (ieee80211_scan_rx(rx->sdata, skb, rx->status)
- != RX_QUEUED)
+ if (ieee80211_scan_rx(rx->sdata, skb) != RX_QUEUED)
dev_kfree_skb(skb);
return RX_QUEUED;
}
@@ -491,12 +489,21 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ char *dev_addr = rx->dev->dev_addr;
if (ieee80211_is_data(hdr->frame_control)) {
- if (!ieee80211_has_a4(hdr->frame_control))
- return RX_DROP_MONITOR;
- if (memcmp(hdr->addr4, rx->dev->dev_addr, ETH_ALEN) == 0)
- return RX_DROP_MONITOR;
+ if (is_multicast_ether_addr(hdr->addr1)) {
+ if (ieee80211_has_tods(hdr->frame_control) ||
+ !ieee80211_has_fromds(hdr->frame_control))
+ return RX_DROP_MONITOR;
+ if (memcmp(hdr->addr3, dev_addr, ETH_ALEN) == 0)
+ return RX_DROP_MONITOR;
+ } else {
+ if (!ieee80211_has_a4(hdr->frame_control))
+ return RX_DROP_MONITOR;
+ if (memcmp(hdr->addr4, dev_addr, ETH_ALEN) == 0)
+ return RX_DROP_MONITOR;
+ }
}
/* If there is not an established peer link and this is not a peer link
@@ -529,7 +536,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
if (ieee80211_is_data(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1) &&
- mesh_rmc_check(hdr->addr4, msh_h_get(hdr, hdrlen), rx->sdata))
+ mesh_rmc_check(hdr->addr3, msh_h_get(hdr, hdrlen), rx->sdata))
return RX_DROP_MONITOR;
#undef msh_h_get
@@ -785,7 +792,7 @@ static void ap_sta_ps_start(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
atomic_inc(&sdata->bss->num_sta_ps);
- set_and_clear_sta_flags(sta, WLAN_STA_PS, WLAN_STA_PSPOLL);
+ set_sta_flags(sta, WLAN_STA_PS);
drv_sta_notify(local, &sdata->vif, STA_NOTIFY_SLEEP, &sta->sta);
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "%s: STA %pM aid %d enters power save mode\n",
@@ -801,7 +808,7 @@ static int ap_sta_ps_end(struct sta_info *sta)
atomic_dec(&sdata->bss->num_sta_ps);
- clear_sta_flags(sta, WLAN_STA_PS | WLAN_STA_PSPOLL);
+ clear_sta_flags(sta, WLAN_STA_PS);
drv_sta_notify(local, &sdata->vif, STA_NOTIFY_AWAKE, &sta->sta);
if (!skb_queue_empty(&sta->ps_tx_buf))
@@ -836,28 +843,22 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (!sta)
return RX_CONTINUE;
- /* Update last_rx only for IBSS packets which are for the current
- * BSSID to avoid keeping the current IBSS network alive in cases where
- * other STAs are using different BSSID. */
+ /*
+ * Update last_rx only for IBSS packets which are for the current
+ * BSSID to avoid keeping the current IBSS network alive in cases
+ * where other STAs start using different BSSID.
+ */
if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
NL80211_IFTYPE_ADHOC);
if (compare_ether_addr(bssid, rx->sdata->u.ibss.bssid) == 0)
sta->last_rx = jiffies;
- } else
- if (!is_multicast_ether_addr(hdr->addr1) ||
- rx->sdata->vif.type == NL80211_IFTYPE_STATION) {
- /* Update last_rx only for unicast frames in order to prevent
- * the Probe Request frames (the only broadcast frames from a
- * STA in infrastructure mode) from keeping a connection alive.
+ } else if (!is_multicast_ether_addr(hdr->addr1)) {
+ /*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
*/
- if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
- ieee80211_is_beacon(hdr->frame_control)) {
- rx->sdata->u.mgd.last_beacon = jiffies;
- } else
- sta->last_rx = jiffies;
+ sta->last_rx = jiffies;
}
if (!(rx->flags & IEEE80211_RX_RA_MATCH))
@@ -1125,14 +1126,15 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
skb_queue_empty(&rx->sta->ps_tx_buf);
if (skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr =
(struct ieee80211_hdr *) skb->data;
/*
- * Tell TX path to send one frame even though the STA may
+ * Tell TX path to send this frame even though the STA may
* still remain is PS mode after this frame exchange.
*/
- set_sta_flags(rx->sta, WLAN_STA_PSPOLL);
+ info->flags |= IEEE80211_TX_CTL_PSPOLL_RESPONSE;
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "STA %pM aid %d: PS Poll (entries after %d)\n",
@@ -1147,7 +1149,7 @@ ieee80211_rx_h_ps_poll(struct ieee80211_rx_data *rx)
else
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- dev_queue_xmit(skb);
+ ieee80211_add_pending_skb(rx->local, skb);
if (no_pending_pkts)
sta_info_clear_tim_bit(rx->sta);
@@ -1487,10 +1489,13 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct ieee80211s_hdr *mesh_hdr;
unsigned int hdrlen;
struct sk_buff *skb = rx->skb, *fwd_skb;
+ struct ieee80211_local *local = rx->local;
+ struct ieee80211_sub_if_data *sdata;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
if (!ieee80211_is_data(hdr->frame_control))
return RX_CONTINUE;
@@ -1499,11 +1504,10 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
/* illegal frame */
return RX_DROP_MONITOR;
- if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6){
- struct ieee80211_sub_if_data *sdata;
+ if (!is_multicast_ether_addr(hdr->addr1) &&
+ (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6)) {
struct mesh_path *mppath;
- sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
rcu_read_lock();
mppath = mpp_path_lookup(mesh_hdr->eaddr2, sdata);
if (!mppath) {
@@ -1518,7 +1522,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
rcu_read_unlock();
}
- if (compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
+ /* Frame has reached destination. Don't forward */
+ if (!is_multicast_ether_addr(hdr->addr1) &&
+ compare_ether_addr(rx->dev->dev_addr, hdr->addr3) == 0)
return RX_CONTINUE;
mesh_hdr->ttl--;
@@ -1529,6 +1535,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
dropped_frames_ttl);
else {
struct ieee80211_hdr *fwd_hdr;
+ struct ieee80211_tx_info *info;
+
fwd_skb = skb_copy(skb, GFP_ATOMIC);
if (!fwd_skb && net_ratelimit())
@@ -1536,19 +1544,40 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
rx->dev->name);
fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
- /*
- * Save TA to addr1 to send TA a path error if a
- * suitable next hop is not found
- */
- memcpy(fwd_hdr->addr1, fwd_hdr->addr2, ETH_ALEN);
memcpy(fwd_hdr->addr2, rx->dev->dev_addr, ETH_ALEN);
- fwd_skb->dev = rx->local->mdev;
- fwd_skb->iif = rx->dev->ifindex;
- dev_queue_xmit(fwd_skb);
+ info = IEEE80211_SKB_CB(fwd_skb);
+ memset(info, 0, sizeof(*info));
+ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.vif = &rx->sdata->vif;
+ ieee80211_select_queue(local, fwd_skb);
+ if (is_multicast_ether_addr(fwd_hdr->addr1))
+ IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
+ fwded_mcast);
+ else {
+ int err;
+ /*
+ * Save TA to addr1 to send TA a path error if a
+ * suitable next hop is not found
+ */
+ memcpy(fwd_hdr->addr1, fwd_hdr->addr2,
+ ETH_ALEN);
+ err = mesh_nexthop_lookup(fwd_skb, sdata);
+ /* Failed to immediately resolve next hop:
+ * fwded frame was dropped or will be added
+ * later to the pending skb queue. */
+ if (err)
+ return RX_DROP_MONITOR;
+
+ IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
+ fwded_unicast);
+ }
+ IEEE80211_IFSTA_MESH_CTR_INC(&sdata->u.mesh,
+ fwded_frames);
+ ieee80211_add_pending_skb(local, fwd_skb);
}
}
- if (is_multicast_ether_addr(hdr->addr3) ||
+ if (is_multicast_ether_addr(hdr->addr1) ||
rx->dev->flags & IFF_PROMISC)
return RX_CONTINUE;
else
@@ -1620,7 +1649,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
/* manage reordering buffer according to requested */
/* sequence number */
rcu_read_lock();
- ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
+ ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
start_seq_num, 1);
rcu_read_unlock();
return RX_DROP_UNUSABLE;
@@ -1644,12 +1673,7 @@ static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
if (compare_ether_addr(mgmt->sa, sdata->u.mgd.bssid) != 0 ||
compare_ether_addr(mgmt->bssid, sdata->u.mgd.bssid) != 0) {
- /* Not from the current AP. */
- return;
- }
-
- if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATE) {
- /* Association in progress; ignore SA Query */
+ /* Not from the current AP or not associated yet. */
return;
}
@@ -1686,7 +1710,6 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
struct ieee80211_local *local = rx->local;
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(rx->dev);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
- struct ieee80211_bss *bss;
int len = rx->skb->len;
if (!ieee80211_is_action(mgmt->frame_control))
@@ -1764,17 +1787,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN))
return RX_DROP_MONITOR;
- bss = ieee80211_rx_bss_get(local, sdata->u.mgd.bssid,
- local->hw.conf.channel->center_freq,
- sdata->u.mgd.ssid,
- sdata->u.mgd.ssid_len);
- if (!bss)
- return RX_DROP_MONITOR;
-
- ieee80211_sta_process_chanswitch(sdata,
- &mgmt->u.action.u.chan_switch.sw_elem, bss);
- ieee80211_rx_bss_put(local, bss);
- break;
+ return ieee80211_sta_rx_mgmt(sdata, rx->skb);
}
break;
case WLAN_CATEGORY_SA_QUERY:
@@ -1817,19 +1830,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
return RX_DROP_MONITOR;
if (ieee80211_vif_is_mesh(&sdata->vif))
- return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
+ return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
+ return ieee80211_ibss_rx_mgmt(sdata, rx->skb);
if (sdata->vif.type == NL80211_IFTYPE_STATION)
- return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
+ return ieee80211_sta_rx_mgmt(sdata, rx->skb);
return RX_DROP_MONITOR;
}
-static void ieee80211_rx_michael_mic_report(struct net_device *dev,
- struct ieee80211_hdr *hdr,
+static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr,
struct ieee80211_rx_data *rx)
{
int keyidx;
@@ -1866,7 +1878,8 @@ static void ieee80211_rx_michael_mic_report(struct net_device *dev,
!ieee80211_is_auth(hdr->frame_control))
goto ignore;
- mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL);
+ mac80211_ev_michael_mic_failure(rx->sdata, keyidx, hdr, NULL,
+ GFP_ATOMIC);
ignore:
dev_kfree_skb(rx->skb);
rx->skb = NULL;
@@ -2028,13 +2041,8 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
case NL80211_IFTYPE_STATION:
if (!bssid)
return 0;
- if (!ieee80211_bssid_match(bssid, sdata->u.mgd.bssid)) {
- if (!(rx->flags & IEEE80211_RX_IN_SCAN))
- return 0;
- rx->flags &= ~IEEE80211_RX_RA_MATCH;
- } else if (!multicast &&
- compare_ether_addr(sdata->dev->dev_addr,
- hdr->addr1) != 0) {
+ if (!multicast &&
+ compare_ether_addr(sdata->dev->dev_addr, hdr->addr1) != 0) {
if (!(sdata->dev->flags & IFF_PROMISC))
return 0;
rx->flags &= ~IEEE80211_RX_RA_MATCH;
@@ -2114,9 +2122,9 @@ static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata,
*/
static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
struct sk_buff *skb,
- struct ieee80211_rx_status *status,
struct ieee80211_rate *rate)
{
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
struct ieee80211_hdr *hdr;
@@ -2143,11 +2151,12 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
}
if ((status->flag & RX_FLAG_MMIC_ERROR)) {
- ieee80211_rx_michael_mic_report(local->mdev, hdr, &rx);
+ ieee80211_rx_michael_mic_report(hdr, &rx);
return;
}
- if (unlikely(local->sw_scanning || local->hw_scanning))
+ if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
+ test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
rx.flags |= IEEE80211_RX_IN_SCAN;
ieee80211_parse_qos(&rx);
@@ -2227,20 +2236,21 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
- struct ieee80211_rx_status status;
+ struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
+ struct ieee80211_rx_status *status;
- if (!tid_agg_rx->reorder_buf[index])
+ if (!skb)
goto no_frame;
+ status = IEEE80211_SKB_RXCB(skb);
+
/* release the reordered frames to stack */
- memcpy(&status, tid_agg_rx->reorder_buf[index]->cb, sizeof(status));
- sband = hw->wiphy->bands[status.band];
- if (status.flag & RX_FLAG_HT)
+ sband = hw->wiphy->bands[status->band];
+ if (status->flag & RX_FLAG_HT)
rate = sband->bitrates; /* TODO: HT rates */
else
- rate = &sband->bitrates[status.rate_idx];
- __ieee80211_rx_handle_packet(hw, tid_agg_rx->reorder_buf[index],
- &status, rate);
+ rate = &sband->bitrates[status->rate_idx];
+ __ieee80211_rx_handle_packet(hw, skb, rate);
tid_agg_rx->stored_mpdu_num--;
tid_agg_rx->reorder_buf[index] = NULL;
@@ -2265,7 +2275,6 @@ no_frame:
static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb,
- struct ieee80211_rx_status *rxstatus,
u16 mpdu_seq_num,
int bar_req)
{
@@ -2324,8 +2333,6 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
/* put the frame in the reordering buffer */
tid_agg_rx->reorder_buf[index] = skb;
tid_agg_rx->reorder_time[index] = jiffies;
- memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
- sizeof(*rxstatus));
tid_agg_rx->stored_mpdu_num++;
/* release the buffer until next missing frame */
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
@@ -2374,8 +2381,7 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
}
static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
- struct sk_buff *skb,
- struct ieee80211_rx_status *status)
+ struct sk_buff *skb)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -2424,7 +2430,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
/* according to mpdu sequence number deal with reordering buffer */
mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
- ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
+ ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
mpdu_seq_num, 0);
end_reorder:
return ret;
@@ -2434,24 +2440,20 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
* This is the receive path handler. It is called by a low level driver when an
* 802.11 MPDU is received from the hardware.
*/
-void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ieee80211_rx_status *status)
+void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_rate *rate = NULL;
struct ieee80211_supported_band *sband;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- if (status->band < 0 ||
- status->band >= IEEE80211_NUM_BANDS) {
- WARN_ON(1);
- return;
- }
+ if (WARN_ON(status->band < 0 ||
+ status->band >= IEEE80211_NUM_BANDS))
+ goto drop;
sband = local->hw.wiphy->bands[status->band];
- if (!sband) {
- WARN_ON(1);
- return;
- }
+ if (WARN_ON(!sband))
+ goto drop;
/*
* If we're suspending, it is possible although not too likely
@@ -2460,16 +2462,21 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
* that might, for example, cause stations to be added or other
* driver callbacks be invoked.
*/
- if (unlikely(local->quiescing || local->suspended)) {
- kfree_skb(skb);
- return;
- }
+ if (unlikely(local->quiescing || local->suspended))
+ goto drop;
+
+ /*
+ * The same happens when we're not even started,
+ * but that's worth a warning.
+ */
+ if (WARN_ON(!local->started))
+ goto drop;
if (status->flag & RX_FLAG_HT) {
/* rate_idx is MCS index */
if (WARN_ON(status->rate_idx < 0 ||
status->rate_idx >= 76))
- return;
+ goto drop;
/* HT rates are not in the table - use the highest legacy rate
* for now since other parts of mac80211 may not yet be fully
* MCS aware. */
@@ -2477,7 +2484,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
} else {
if (WARN_ON(status->rate_idx < 0 ||
status->rate_idx >= sband->n_bitrates))
- return;
+ goto drop;
rate = &sband->bitrates[status->rate_idx];
}
@@ -2494,7 +2501,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
* if it was previously present.
* Also, frames with less than 16 bytes are dropped.
*/
- skb = ieee80211_rx_monitor(local, skb, status, rate);
+ skb = ieee80211_rx_monitor(local, skb, rate);
if (!skb) {
rcu_read_unlock();
return;
@@ -2512,25 +2519,25 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
* frames from other than operational channel), but that should not
* happen in normal networks.
*/
- if (!ieee80211_rx_reorder_ampdu(local, skb, status))
- __ieee80211_rx_handle_packet(hw, skb, status, rate);
+ if (!ieee80211_rx_reorder_ampdu(local, skb))
+ __ieee80211_rx_handle_packet(hw, skb, rate);
rcu_read_unlock();
+
+ return;
+ drop:
+ kfree_skb(skb);
}
-EXPORT_SYMBOL(__ieee80211_rx);
+EXPORT_SYMBOL(ieee80211_rx);
/* This is a version of the rx handler that can be called from hard irq
* context. Post the skb on the queue and schedule the tasklet */
-void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb,
- struct ieee80211_rx_status *status)
+void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct ieee80211_local *local = hw_to_local(hw);
BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
- skb->dev = local->mdev;
- /* copy status into skb->cb for use by tasklet */
- memcpy(skb->cb, status, sizeof(*status));
skb->pkt_type = IEEE80211_RX_MSG;
skb_queue_tail(&local->skb_queue, skb);
tasklet_schedule(&local->tasklet);
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 2a8d09ad17f..039901109fa 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -18,7 +18,6 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
-#include <net/iw_handler.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
@@ -26,7 +25,7 @@
#define IEEE80211_PROBE_DELAY (HZ / 33)
#define IEEE80211_CHANNEL_TIME (HZ / 33)
-#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 5)
+#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8)
struct ieee80211_bss *
ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
@@ -121,23 +120,10 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
return bss;
}
-void ieee80211_rx_bss_remove(struct ieee80211_sub_if_data *sdata, u8 *bssid,
- int freq, u8 *ssid, u8 ssid_len)
-{
- struct ieee80211_bss *bss;
- struct ieee80211_local *local = sdata->local;
-
- bss = ieee80211_rx_bss_get(local, bssid, freq, ssid, ssid_len);
- if (bss) {
- cfg80211_unlink_bss(local->hw.wiphy, (void *)bss);
- ieee80211_rx_bss_put(local, bss);
- }
-}
-
ieee80211_rx_result
-ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
{
+ struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_mgmt *mgmt;
struct ieee80211_bss *bss;
u8 *elements;
@@ -278,7 +264,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
mutex_lock(&local->scan_mtx);
- if (WARN_ON(!local->hw_scanning && !local->sw_scanning)) {
+ if (WARN_ON(!local->scanning)) {
mutex_unlock(&local->scan_mtx);
return;
}
@@ -288,16 +274,16 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
return;
}
- if (local->hw_scanning)
+ if (test_bit(SCAN_HW_SCANNING, &local->scanning))
ieee80211_restore_scan_ies(local);
- if (local->scan_req != &local->int_scan_req)
+ if (local->scan_req != local->int_scan_req)
cfg80211_scan_done(local->scan_req, aborted);
local->scan_req = NULL;
+ local->scan_sdata = NULL;
- was_hw_scan = local->hw_scanning;
- local->hw_scanning = false;
- local->sw_scanning = false;
+ was_hw_scan = test_bit(SCAN_HW_SCANNING, &local->scanning);
+ local->scanning = 0;
local->scan_channel = NULL;
/* we only have to protect scan_req and hw/sw scan */
@@ -307,16 +293,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
if (was_hw_scan)
goto done;
- netif_tx_lock_bh(local->mdev);
- netif_addr_lock(local->mdev);
- local->filter_flags &= ~FIF_BCN_PRBRESP_PROMISC;
- drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
- &local->filter_flags,
- local->mdev->mc_count,
- local->mdev->mc_list);
-
- netif_addr_unlock(local->mdev);
- netif_tx_unlock_bh(local->mdev);
+ ieee80211_configure_filter(local);
drv_sw_scan_complete(local);
@@ -327,7 +304,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
/* Tell AP we're back */
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
+ if (sdata->u.mgd.associated) {
ieee80211_scan_ps_disable(sdata);
netif_tx_wake_all_queues(sdata->dev);
}
@@ -382,30 +359,24 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
ieee80211_bss_info_change_notify(
sdata, BSS_CHANGED_BEACON_ENABLED);
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
- netif_tx_stop_all_queues(sdata->dev);
- ieee80211_scan_ps_enable(sdata);
- }
- } else
+ /*
+ * only handle non-STA interfaces here, STA interfaces
+ * are handled in the scan state machine
+ */
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
netif_tx_stop_all_queues(sdata->dev);
}
mutex_unlock(&local->iflist_mtx);
- local->scan_state = SCAN_SET_CHANNEL;
+ local->next_scan_state = SCAN_DECISION;
local->scan_channel_idx = 0;
- netif_addr_lock_bh(local->mdev);
- local->filter_flags |= FIF_BCN_PRBRESP_PROMISC;
- drv_configure_filter(local, FIF_BCN_PRBRESP_PROMISC,
- &local->filter_flags,
- local->mdev->mc_count,
- local->mdev->mc_list);
- netif_addr_unlock_bh(local->mdev);
+ ieee80211_configure_filter(local);
/* TODO: start scan as soon as all nullfunc frames are ACKed */
- queue_delayed_work(local->hw.workqueue, &local->scan_work,
- IEEE80211_CHANNEL_TIME);
+ ieee80211_queue_delayed_work(&local->hw,
+ &local->scan_work,
+ IEEE80211_CHANNEL_TIME);
return 0;
}
@@ -441,20 +412,18 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
local->scan_req = req;
local->scan_sdata = sdata;
- if (req != &local->int_scan_req &&
+ if (req != local->int_scan_req &&
sdata->vif.type == NL80211_IFTYPE_STATION &&
- (ifmgd->state == IEEE80211_STA_MLME_DIRECT_PROBE ||
- ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
- ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE)) {
- /* actually wait for the assoc to finish/time out */
+ !list_empty(&ifmgd->work_list)) {
+ /* actually wait for the work it's doing to finish/time out */
set_bit(IEEE80211_STA_REQ_SCAN, &ifmgd->request);
return 0;
}
if (local->ops->hw_scan)
- local->hw_scanning = true;
+ __set_bit(SCAN_HW_SCANNING, &local->scanning);
else
- local->sw_scanning = true;
+ __set_bit(SCAN_SW_SCANNING, &local->scanning);
/*
* Kicking off the scan need not be protected,
* only the scan variable stuff, since now
@@ -477,11 +446,9 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
mutex_lock(&local->scan_mtx);
if (rc) {
- if (local->ops->hw_scan) {
- local->hw_scanning = false;
+ if (local->ops->hw_scan)
ieee80211_restore_scan_ies(local);
- } else
- local->sw_scanning = false;
+ local->scanning = 0;
ieee80211_recalc_idle(local);
@@ -492,13 +459,195 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
return rc;
}
+static int ieee80211_scan_state_decision(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ bool associated = false;
+ struct ieee80211_sub_if_data *sdata;
+
+ /* if no more bands/channels left, complete scan and advance to the idle state */
+ if (local->scan_channel_idx >= local->scan_req->n_channels) {
+ ieee80211_scan_completed(&local->hw, false);
+ return 1;
+ }
+
+ /* check if at least one STA interface is associated */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!netif_running(sdata->dev))
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (sdata->u.mgd.associated) {
+ associated = true;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+ if (local->scan_channel) {
+ /*
+ * we're currently scanning a different channel, let's
+ * switch back to the operating channel now if at least
+ * one interface is associated. Otherwise just scan the
+ * next channel
+ */
+ if (associated)
+ local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
+ else
+ local->next_scan_state = SCAN_SET_CHANNEL;
+ } else {
+ /*
+ * we're on the operating channel currently, let's
+ * leave that channel now to scan another one
+ */
+ local->next_scan_state = SCAN_LEAVE_OPER_CHANNEL;
+ }
+
+ *next_delay = 0;
+ return 0;
+}
+
+static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ /*
+ * notify the AP about us leaving the channel and stop all STA interfaces
+ */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!netif_running(sdata->dev))
+ continue;
+
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ netif_tx_stop_all_queues(sdata->dev);
+ if (sdata->u.mgd.associated)
+ ieee80211_scan_ps_enable(sdata);
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+ __set_bit(SCAN_OFF_CHANNEL, &local->scanning);
+
+ /* advance to the next channel to be scanned */
+ *next_delay = HZ / 10;
+ local->next_scan_state = SCAN_SET_CHANNEL;
+}
+
+static void ieee80211_scan_state_enter_oper_channel(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+
+ /* switch back to the operating channel */
+ local->scan_channel = NULL;
+ ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
+
+ /*
+ * notify the AP about us being back and restart all STA interfaces
+ */
+ mutex_lock(&local->iflist_mtx);
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!netif_running(sdata->dev))
+ continue;
+
+ /* Tell AP we're back */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ if (sdata->u.mgd.associated)
+ ieee80211_scan_ps_disable(sdata);
+ netif_tx_wake_all_queues(sdata->dev);
+ }
+ }
+ mutex_unlock(&local->iflist_mtx);
+
+ __clear_bit(SCAN_OFF_CHANNEL, &local->scanning);
+
+ *next_delay = HZ / 5;
+ local->next_scan_state = SCAN_DECISION;
+}
+
+static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ int skip;
+ struct ieee80211_channel *chan;
+ struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+
+ skip = 0;
+ chan = local->scan_req->channels[local->scan_channel_idx];
+
+ if (chan->flags & IEEE80211_CHAN_DISABLED ||
+ (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
+ chan->flags & IEEE80211_CHAN_NO_IBSS))
+ skip = 1;
+
+ if (!skip) {
+ local->scan_channel = chan;
+ if (ieee80211_hw_config(local,
+ IEEE80211_CONF_CHANGE_CHANNEL))
+ skip = 1;
+ }
+
+ /* advance state machine to next channel/band */
+ local->scan_channel_idx++;
+
+ if (skip) {
+ /* if we skip this channel return to the decision state */
+ local->next_scan_state = SCAN_DECISION;
+ return;
+ }
+
+ /*
+ * Probe delay is used to update the NAV, cf. 11.1.3.2.2
+ * (which unfortunately doesn't say _why_ step a) is done,
+ * but it waits for the probe delay or until a frame is
+ * received - and the received frame would update the NAV).
+ * For now, we do not support waiting until a frame is
+ * received.
+ *
+ * In any case, it is not necessary for a passive scan.
+ */
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
+ !local->scan_req->n_ssids) {
+ *next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ local->next_scan_state = SCAN_DECISION;
+ return;
+ }
+
+ /* active scan, send probes */
+ *next_delay = IEEE80211_PROBE_DELAY;
+ local->next_scan_state = SCAN_SEND_PROBE;
+}
+
+static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
+ unsigned long *next_delay)
+{
+ int i;
+ struct ieee80211_sub_if_data *sdata = local->scan_sdata;
+
+ for (i = 0; i < local->scan_req->n_ssids; i++)
+ ieee80211_send_probe_req(
+ sdata, NULL,
+ local->scan_req->ssids[i].ssid,
+ local->scan_req->ssids[i].ssid_len,
+ local->scan_req->ie, local->scan_req->ie_len);
+
+ /*
+ * After sending probe requests, wait for probe responses
+ * on the channel.
+ */
+ *next_delay = IEEE80211_CHANNEL_TIME;
+ local->next_scan_state = SCAN_DECISION;
+}
+
void ieee80211_scan_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, scan_work.work);
struct ieee80211_sub_if_data *sdata = local->scan_sdata;
- struct ieee80211_channel *chan;
- int skip, i;
unsigned long next_delay = 0;
mutex_lock(&local->scan_mtx);
@@ -507,11 +656,12 @@ void ieee80211_scan_work(struct work_struct *work)
return;
}
- if (local->scan_req && !(local->sw_scanning || local->hw_scanning)) {
+ if (local->scan_req && !local->scanning) {
struct cfg80211_scan_request *req = local->scan_req;
int rc;
local->scan_req = NULL;
+ local->scan_sdata = NULL;
rc = __ieee80211_start_scan(sdata, req);
mutex_unlock(&local->scan_mtx);
@@ -531,72 +681,32 @@ void ieee80211_scan_work(struct work_struct *work)
return;
}
- switch (local->scan_state) {
- case SCAN_SET_CHANNEL:
- /* if no more bands/channels left, complete scan */
- if (local->scan_channel_idx >= local->scan_req->n_channels) {
- ieee80211_scan_completed(&local->hw, false);
- return;
- }
- skip = 0;
- chan = local->scan_req->channels[local->scan_channel_idx];
-
- if (chan->flags & IEEE80211_CHAN_DISABLED ||
- (sdata->vif.type == NL80211_IFTYPE_ADHOC &&
- chan->flags & IEEE80211_CHAN_NO_IBSS))
- skip = 1;
-
- if (!skip) {
- local->scan_channel = chan;
- if (ieee80211_hw_config(local,
- IEEE80211_CONF_CHANGE_CHANNEL))
- skip = 1;
- }
-
- /* advance state machine to next channel/band */
- local->scan_channel_idx++;
-
- if (skip)
+ /*
+ * as long as no delay is required advance immediately
+ * without scheduling a new work
+ */
+ do {
+ switch (local->next_scan_state) {
+ case SCAN_DECISION:
+ if (ieee80211_scan_state_decision(local, &next_delay))
+ return;
break;
-
- /*
- * Probe delay is used to update the NAV, cf. 11.1.3.2.2
- * (which unfortunately doesn't say _why_ step a) is done,
- * but it waits for the probe delay or until a frame is
- * received - and the received frame would update the NAV).
- * For now, we do not support waiting until a frame is
- * received.
- *
- * In any case, it is not necessary for a passive scan.
- */
- if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN ||
- !local->scan_req->n_ssids) {
- next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
+ case SCAN_SET_CHANNEL:
+ ieee80211_scan_state_set_channel(local, &next_delay);
+ break;
+ case SCAN_SEND_PROBE:
+ ieee80211_scan_state_send_probe(local, &next_delay);
+ break;
+ case SCAN_LEAVE_OPER_CHANNEL:
+ ieee80211_scan_state_leave_oper_channel(local, &next_delay);
+ break;
+ case SCAN_ENTER_OPER_CHANNEL:
+ ieee80211_scan_state_enter_oper_channel(local, &next_delay);
break;
}
+ } while (next_delay == 0);
- next_delay = IEEE80211_PROBE_DELAY;
- local->scan_state = SCAN_SEND_PROBE;
- break;
- case SCAN_SEND_PROBE:
- for (i = 0; i < local->scan_req->n_ssids; i++)
- ieee80211_send_probe_req(
- sdata, NULL,
- local->scan_req->ssids[i].ssid,
- local->scan_req->ssids[i].ssid_len,
- local->scan_req->ie, local->scan_req->ie_len);
-
- /*
- * After sending probe requests, wait for probe responses
- * on the channel.
- */
- next_delay = IEEE80211_CHANNEL_TIME;
- local->scan_state = SCAN_SET_CHANNEL;
- break;
- }
-
- queue_delayed_work(local->hw.workqueue, &local->scan_work,
- next_delay);
+ ieee80211_queue_delayed_work(&local->hw, &local->scan_work, next_delay);
}
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
@@ -623,10 +733,10 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
if (local->scan_req)
goto unlock;
- memcpy(local->int_scan_req.ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
- local->int_scan_req.ssids[0].ssid_len = ssid_len;
+ memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
+ local->int_scan_req->ssids[0].ssid_len = ssid_len;
- ret = __ieee80211_start_scan(sdata, &sdata->local->int_scan_req);
+ ret = __ieee80211_start_scan(sdata, sdata->local->int_scan_req);
unlock:
mutex_unlock(&local->scan_mtx);
return ret;
@@ -634,7 +744,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_cancel(struct ieee80211_local *local)
{
- bool swscan;
+ bool abortscan;
cancel_delayed_work_sync(&local->scan_work);
@@ -643,9 +753,10 @@ void ieee80211_scan_cancel(struct ieee80211_local *local)
* queued -- mostly at suspend under RTNL.
*/
mutex_lock(&local->scan_mtx);
- swscan = local->sw_scanning;
+ abortscan = test_bit(SCAN_SW_SCANNING, &local->scanning) ||
+ (!local->scanning && local->scan_req);
mutex_unlock(&local->scan_mtx);
- if (swscan)
+ if (abortscan)
ieee80211_scan_completed(&local->hw, true);
}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index a360bceeba5..eec001491e6 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -349,6 +349,7 @@ int sta_info_insert(struct sta_info *sta)
goto out_free;
}
list_add(&sta->list, &local->sta_list);
+ local->sta_generation++;
local->num_sta++;
sta_info_hash_add(local, sta);
@@ -485,6 +486,7 @@ static void __sta_info_unlink(struct sta_info **sta)
}
local->num_sta--;
+ local->sta_generation++;
if (local->ops->sta_notify) {
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 49a1a1f7651..ccc3adf962c 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -30,7 +30,6 @@
* @WLAN_STA_ASSOC_AP: We're associated to that station, it is an AP.
* @WLAN_STA_WME: Station is a QoS-STA.
* @WLAN_STA_WDS: Station is one of our WDS peers.
- * @WLAN_STA_PSPOLL: Station has just PS-polled us.
* @WLAN_STA_CLEAR_PS_FILT: Clear PS filter in hardware (using the
* IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
* frame to this station is transmitted.
@@ -47,7 +46,6 @@ enum ieee80211_sta_info_flags {
WLAN_STA_ASSOC_AP = 1<<5,
WLAN_STA_WME = 1<<6,
WLAN_STA_WDS = 1<<7,
- WLAN_STA_PSPOLL = 1<<8,
WLAN_STA_CLEAR_PS_FILT = 1<<9,
WLAN_STA_MFP = 1<<10,
WLAN_STA_SUSPEND = 1<<11
@@ -308,6 +306,23 @@ struct sta_info {
struct dentry *inactive_ms;
struct dentry *last_seq_ctrl;
struct dentry *agg_status;
+ struct dentry *aid;
+ struct dentry *dev;
+ struct dentry *rx_packets;
+ struct dentry *tx_packets;
+ struct dentry *rx_bytes;
+ struct dentry *tx_bytes;
+ struct dentry *rx_duplicates;
+ struct dentry *rx_fragments;
+ struct dentry *rx_dropped;
+ struct dentry *tx_fragments;
+ struct dentry *tx_filtered;
+ struct dentry *tx_retry_failed;
+ struct dentry *tx_retry_count;
+ struct dentry *last_signal;
+ struct dentry *last_qual;
+ struct dentry *last_noise;
+ struct dentry *wep_weak_iv_count;
bool add_has_run;
} debugfs;
#endif
@@ -342,17 +357,6 @@ static inline void clear_sta_flags(struct sta_info *sta, const u32 flags)
spin_unlock_irqrestore(&sta->flaglock, irqfl);
}
-static inline void set_and_clear_sta_flags(struct sta_info *sta,
- const u32 set, const u32 clear)
-{
- unsigned long irqfl;
-
- spin_lock_irqsave(&sta->flaglock, irqfl);
- sta->flags |= set;
- sta->flags &= ~clear;
- spin_unlock_irqrestore(&sta->flaglock, irqfl);
-}
-
static inline u32 test_sta_flags(struct sta_info *sta, const u32 flags)
{
u32 ret;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 3a8922cd103..5143d203256 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -192,7 +192,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED))
return TX_CONTINUE;
- if (unlikely(tx->local->sw_scanning) &&
+ if (unlikely(test_bit(SCAN_OFF_CHANNEL, &tx->local->scanning)) &&
!ieee80211_is_probe_req(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control))
/*
@@ -317,30 +317,30 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
if (!atomic_read(&tx->sdata->bss->num_sta_ps))
return TX_CONTINUE;
+ /* buffered in hardware */
+ if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) {
+ info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+
+ return TX_CONTINUE;
+ }
+
/* buffered in mac80211 */
- if (tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING) {
- if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
- purge_old_ps_buffers(tx->local);
- if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >=
- AP_MAX_BC_BUFFER) {
+ if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
+ purge_old_ps_buffers(tx->local);
+
+ if (skb_queue_len(&tx->sdata->bss->ps_bc_buf) >= AP_MAX_BC_BUFFER) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
- if (net_ratelimit()) {
- printk(KERN_DEBUG "%s: BC TX buffer full - "
- "dropping the oldest frame\n",
- tx->dev->name);
- }
+ if (net_ratelimit())
+ printk(KERN_DEBUG "%s: BC TX buffer full - dropping the oldest frame\n",
+ tx->dev->name);
#endif
- dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
- } else
- tx->local->total_ps_buffered++;
- skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
- return TX_QUEUED;
- }
+ dev_kfree_skb(skb_dequeue(&tx->sdata->bss->ps_bc_buf));
+ } else
+ tx->local->total_ps_buffered++;
- /* buffered in hardware */
- info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
+ skb_queue_tail(&tx->sdata->bss->ps_bc_buf, tx->skb);
- return TX_CONTINUE;
+ return TX_QUEUED;
}
static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
@@ -373,7 +373,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
staflags = get_sta_flags(sta);
if (unlikely((staflags & WLAN_STA_PS) &&
- !(staflags & WLAN_STA_PSPOLL))) {
+ !(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE))) {
#ifdef CONFIG_MAC80211_VERBOSE_PS_DEBUG
printk(KERN_DEBUG "STA %pM aid %d: PS buffer (entries "
"before %d)\n",
@@ -400,6 +400,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
sta_info_set_tim_bit(sta);
info->control.jiffies = jiffies;
+ info->control.vif = &tx->sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
skb_queue_tail(&sta->ps_tx_buf, tx->skb);
return TX_QUEUED;
@@ -411,24 +412,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
sta->sta.addr);
}
#endif /* CONFIG_MAC80211_VERBOSE_PS_DEBUG */
- if (test_and_clear_sta_flags(sta, WLAN_STA_PSPOLL)) {
- /*
- * The sleeping station with pending data is now snoozing.
- * It queried us for its buffered frames and will go back
- * to deep sleep once it got everything.
- *
- * inform the driver, in case the hardware does powersave
- * frame filtering and keeps a station blacklist on its own
- * (e.g: p54), so that frames can be delivered unimpeded.
- *
- * Note: It should be safe to disable the filter now.
- * As, it is really unlikely that we still have any pending
- * frame for this station in the hw's buffers/fifos left,
- * that is not rejected with a unsuccessful tx_status yet.
- */
- info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- }
return TX_CONTINUE;
}
@@ -451,7 +435,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
- if (unlikely(tx->skb->do_not_encrypt))
+ if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
tx->key = NULL;
else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
tx->key = key;
@@ -497,7 +481,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
}
if (!tx->key || !(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE))
- tx->skb->do_not_encrypt = 1;
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
return TX_CONTINUE;
}
@@ -512,6 +496,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
int i, len;
bool inval = false, rts = false, short_preamble = false;
struct ieee80211_tx_rate_control txrc;
+ u32 sta_flags;
memset(&txrc, 0, sizeof(txrc));
@@ -544,7 +529,26 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
(tx->sta && test_sta_flags(tx->sta, WLAN_STA_SHORT_PREAMBLE))))
txrc.short_preamble = short_preamble = true;
+ sta_flags = tx->sta ? get_sta_flags(tx->sta) : 0;
+
+ /*
+ * Lets not bother rate control if we're associated and cannot
+ * talk to the sta. This should not happen.
+ */
+ if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) &&
+ (sta_flags & WLAN_STA_ASSOC) &&
+ !rate_usable_index_exists(sband, &tx->sta->sta),
+ "%s: Dropped data frame as no usable bitrate found while "
+ "scanning and associated. Target station: "
+ "%pM on %d GHz band\n",
+ tx->dev->name, hdr->addr1,
+ tx->channel->band ? 5 : 2))
+ return TX_DROP;
+ /*
+ * If we're associated with the sta at this point we know we can at
+ * least send the frame at the lowest bit rate.
+ */
rate_control_get_rate(tx->sdata, tx->sta, &txrc);
if (unlikely(info->control.rates[0].idx < 0))
@@ -676,7 +680,7 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
* number, if we have no matching interface then we
* neither assign one ourselves nor ask the driver to.
*/
- if (unlikely(!info->control.vif))
+ if (unlikely(info->control.vif->type == NL80211_IFTYPE_MONITOR))
return TX_CONTINUE;
if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
@@ -696,7 +700,6 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
/* for pure STA mode without beacons, we can do it */
hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number);
tx->sdata->sequence_number += 0x10;
- tx->sdata->sequence_number &= IEEE80211_SCTL_SEQ;
return TX_CONTINUE;
}
@@ -754,9 +757,7 @@ static int ieee80211_fragment(struct ieee80211_local *local,
memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
skb_copy_queue_mapping(tmp, skb);
tmp->priority = skb->priority;
- tmp->do_not_encrypt = skb->do_not_encrypt;
tmp->dev = skb->dev;
- tmp->iif = skb->iif;
/* copy header and data */
memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
@@ -784,7 +785,7 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
/*
* Warn when submitting a fragmented A-MPDU frame and drop it.
- * This scenario is handled in __ieee80211_tx_prepare but extra
+ * This scenario is handled in ieee80211_tx_prepare but extra
* caution taken here as fragmented ampdu may cause Tx stop.
*/
if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
@@ -842,6 +843,23 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
}
static ieee80211_tx_result debug_noinline
+ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
+{
+ struct sk_buff *skb = tx->skb;
+
+ if (!tx->sta)
+ return TX_CONTINUE;
+
+ tx->sta->tx_packets++;
+ do {
+ tx->sta->tx_fragments++;
+ tx->sta->tx_bytes += skb->len;
+ } while ((skb = skb->next));
+
+ return TX_CONTINUE;
+}
+
+static ieee80211_tx_result debug_noinline
ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
{
if (!tx->key)
@@ -885,23 +903,6 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
return TX_CONTINUE;
}
-static ieee80211_tx_result debug_noinline
-ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
-{
- struct sk_buff *skb = tx->skb;
-
- if (!tx->sta)
- return TX_CONTINUE;
-
- tx->sta->tx_packets++;
- do {
- tx->sta->tx_fragments++;
- tx->sta->tx_bytes += skb->len;
- } while ((skb = skb->next));
-
- return TX_CONTINUE;
-}
-
/* actual transmit path */
/*
@@ -923,11 +924,12 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
struct ieee80211_radiotap_header *rthdr =
(struct ieee80211_radiotap_header *) skb->data;
struct ieee80211_supported_band *sband;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len);
sband = tx->local->hw.wiphy->bands[tx->channel->band];
- skb->do_not_encrypt = 1;
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
tx->flags &= ~IEEE80211_TX_FRAGMENTED;
/*
@@ -965,7 +967,7 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
skb_trim(skb, skb->len - FCS_LEN);
}
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_WEP)
- tx->skb->do_not_encrypt = 0;
+ info->flags &= ~IEEE80211_TX_INTFL_DONT_ENCRYPT;
if (*iterator.this_arg & IEEE80211_RADIOTAP_F_FRAG)
tx->flags |= IEEE80211_TX_FRAGMENTED;
break;
@@ -998,13 +1000,12 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx,
* initialises @tx
*/
static ieee80211_tx_result
-__ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
- struct sk_buff *skb,
- struct net_device *dev)
+ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_tx_data *tx,
+ struct sk_buff *skb)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_hdr *hdr;
- struct ieee80211_sub_if_data *sdata;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int hdrlen, tid;
u8 *qc, *state;
@@ -1012,9 +1013,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
memset(tx, 0, sizeof(*tx));
tx->skb = skb;
- tx->dev = dev; /* use original interface */
+ tx->dev = sdata->dev; /* use original interface */
tx->local = local;
- tx->sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ tx->sdata = sdata;
tx->channel = local->hw.conf.channel;
/*
* Set this flag (used below to indicate "automatic fragmentation"),
@@ -1023,7 +1024,6 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
tx->flags |= IEEE80211_TX_FRAGMENTED;
/* process and remove the injection radiotap header */
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
if (unlikely(info->flags & IEEE80211_TX_CTL_INJECTED)) {
if (!__ieee80211_parse_tx_radiotap(tx, skb))
return TX_DROP;
@@ -1075,6 +1075,7 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
} else if (*state != HT_AGG_STATE_IDLE) {
/* in progress */
queued = true;
+ info->control.vif = &sdata->vif;
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
__skb_queue_tail(&tid_tx->pending, skb);
}
@@ -1119,50 +1120,29 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
return TX_CONTINUE;
}
-/*
- * NB: @tx is uninitialised when passed in here
- */
-static int ieee80211_tx_prepare(struct ieee80211_local *local,
- struct ieee80211_tx_data *tx,
- struct sk_buff *skb)
-{
- struct net_device *dev;
-
- dev = dev_get_by_index(&init_net, skb->iif);
- if (unlikely(dev && !is_ieee80211_device(local, dev))) {
- dev_put(dev);
- dev = NULL;
- }
- if (unlikely(!dev))
- return -ENODEV;
- /*
- * initialises tx with control
- *
- * return value is safe to ignore here because this function
- * can only be invoked for multicast frames
- *
- * XXX: clean up
- */
- __ieee80211_tx_prepare(tx, skb, dev);
- dev_put(dev);
- return 0;
-}
-
static int __ieee80211_tx(struct ieee80211_local *local,
struct sk_buff **skbp,
- struct sta_info *sta)
+ struct sta_info *sta,
+ bool txpending)
{
struct sk_buff *skb = *skbp, *next;
struct ieee80211_tx_info *info;
+ struct ieee80211_sub_if_data *sdata;
+ unsigned long flags;
int ret, len;
bool fragm = false;
- local->mdev->trans_start = jiffies;
-
while (skb) {
- if (ieee80211_queue_stopped(&local->hw,
- skb_get_queue_mapping(skb)))
- return IEEE80211_TX_PENDING;
+ int q = skb_get_queue_mapping(skb);
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ ret = IEEE80211_TX_OK;
+ if (local->queue_stop_reasons[q] ||
+ (!txpending && !skb_queue_empty(&local->pending[q])))
+ ret = IEEE80211_TX_PENDING;
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ if (ret != IEEE80211_TX_OK)
+ return ret;
info = IEEE80211_SKB_CB(skb);
@@ -1172,13 +1152,35 @@ static int __ieee80211_tx(struct ieee80211_local *local,
next = skb->next;
len = skb->len;
+
+ if (next)
+ info->flags |= IEEE80211_TX_CTL_MORE_FRAMES;
+
+ sdata = vif_to_sdata(info->control.vif);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_MONITOR:
+ info->control.vif = NULL;
+ break;
+ case NL80211_IFTYPE_AP_VLAN:
+ info->control.vif = &container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap)->vif;
+ break;
+ default:
+ /* keep */
+ break;
+ }
+
ret = drv_tx(local, skb);
if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
dev_kfree_skb(skb);
ret = NETDEV_TX_OK;
}
- if (ret != NETDEV_TX_OK)
+ if (ret != NETDEV_TX_OK) {
+ info->control.vif = &sdata->vif;
return IEEE80211_TX_AGAIN;
+ }
+
*skbp = skb = next;
ieee80211_led_tx(local, 1);
fragm = true;
@@ -1210,9 +1212,9 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
CALL_TXH(ieee80211_tx_h_sequence)
CALL_TXH(ieee80211_tx_h_fragment)
/* handlers after fragment must be aware of tx info fragmentation! */
+ CALL_TXH(ieee80211_tx_h_stats)
CALL_TXH(ieee80211_tx_h_encrypt)
CALL_TXH(ieee80211_tx_h_calculate_duration)
- CALL_TXH(ieee80211_tx_h_stats)
#undef CALL_TXH
txh_done:
@@ -1234,10 +1236,10 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
return 0;
}
-static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
- bool txpending)
+static void ieee80211_tx(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, bool txpending)
{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_data tx;
ieee80211_tx_result res_prepare;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -1248,8 +1250,6 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
queue = skb_get_queue_mapping(skb);
- WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
-
if (unlikely(skb->len < 10)) {
dev_kfree_skb(skb);
return;
@@ -1258,7 +1258,7 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
rcu_read_lock();
/* initialises tx */
- res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
+ res_prepare = ieee80211_tx_prepare(sdata, &tx, skb);
if (unlikely(res_prepare == TX_DROP)) {
dev_kfree_skb(skb);
@@ -1277,7 +1277,7 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
retries = 0;
retry:
- ret = __ieee80211_tx(local, &tx.skb, tx.sta);
+ ret = __ieee80211_tx(local, &tx.skb, tx.sta, txpending);
switch (ret) {
case IEEE80211_TX_OK:
break;
@@ -1295,34 +1295,35 @@ static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- if (__netif_subqueue_stopped(local->mdev, queue)) {
+ if (local->queue_stop_reasons[queue] ||
+ !skb_queue_empty(&local->pending[queue])) {
+ /*
+ * if queue is stopped, queue up frames for later
+ * transmission from the tasklet
+ */
do {
next = skb->next;
skb->next = NULL;
if (unlikely(txpending))
- skb_queue_head(&local->pending[queue],
- skb);
+ __skb_queue_head(&local->pending[queue],
+ skb);
else
- skb_queue_tail(&local->pending[queue],
- skb);
+ __skb_queue_tail(&local->pending[queue],
+ skb);
} while ((skb = next));
- /*
- * Make sure nobody will enable the queue on us
- * (without going through the tasklet) nor disable the
- * netdev queue underneath the pending handling code.
- */
- __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
- &local->queue_stop_reasons[queue]);
-
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
} else {
+ /*
+ * otherwise retry, but this is a race condition or
+ * a driver bug (which we warn about if it persists)
+ */
spin_unlock_irqrestore(&local->queue_stop_reason_lock,
flags);
retries++;
- if (WARN(retries > 10, "tx refused but queue active"))
+ if (WARN(retries > 10, "tx refused but queue active\n"))
goto drop;
goto retry;
}
@@ -1383,44 +1384,25 @@ static int ieee80211_skb_resize(struct ieee80211_local *local,
return 0;
}
-int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
{
- struct ieee80211_master_priv *mpriv = netdev_priv(dev);
- struct ieee80211_local *local = mpriv->local;
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- struct net_device *odev = NULL;
- struct ieee80211_sub_if_data *osdata;
+ struct ieee80211_sub_if_data *tmp_sdata;
int headroom;
bool may_encrypt;
- enum {
- NOT_MONITOR,
- FOUND_SDATA,
- UNKNOWN_ADDRESS,
- } monitor_iface = NOT_MONITOR;
-
- if (skb->iif)
- odev = dev_get_by_index(&init_net, skb->iif);
- if (unlikely(odev && !is_ieee80211_device(local, odev))) {
- dev_put(odev);
- odev = NULL;
- }
- if (unlikely(!odev)) {
-#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
- printk(KERN_DEBUG "%s: Discarded packet with nonexistent "
- "originating device\n", dev->name);
-#endif
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+
+ dev_hold(sdata->dev);
if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
local->hw.conf.dynamic_ps_timeout > 0 &&
- !local->sw_scanning && !local->hw_scanning && local->ps_sdata) {
+ !(local->scanning) && local->ps_sdata) {
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
ieee80211_stop_queues_by_reason(&local->hw,
IEEE80211_QUEUE_STOP_REASON_PS);
- queue_work(local->hw.workqueue,
+ ieee80211_queue_work(&local->hw,
&local->dynamic_ps_disable_work);
}
@@ -1428,31 +1410,13 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
}
- memset(info, 0, sizeof(*info));
-
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- osdata = IEEE80211_DEV_TO_SUB_IF(odev);
-
- if (ieee80211_vif_is_mesh(&osdata->vif) &&
- ieee80211_is_data(hdr->frame_control)) {
- if (is_multicast_ether_addr(hdr->addr3))
- memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
- else
- if (mesh_nexthop_lookup(skb, osdata)) {
- dev_put(odev);
- return NETDEV_TX_OK;
- }
- if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
- IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
- fwded_frames);
- } else if (unlikely(osdata->vif.type == NL80211_IFTYPE_MONITOR)) {
- struct ieee80211_sub_if_data *sdata;
+ if (unlikely(sdata->vif.type == NL80211_IFTYPE_MONITOR)) {
int hdrlen;
u16 len_rthdr;
info->flags |= IEEE80211_TX_CTL_INJECTED;
- monitor_iface = UNKNOWN_ADDRESS;
len_rthdr = ieee80211_get_radiotap_len(skb->data);
hdr = (struct ieee80211_hdr *)(skb->data + len_rthdr);
@@ -1471,20 +1435,17 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
rcu_read_lock();
- list_for_each_entry_rcu(sdata, &local->interfaces,
+ list_for_each_entry_rcu(tmp_sdata, &local->interfaces,
list) {
- if (!netif_running(sdata->dev))
+ if (!netif_running(tmp_sdata->dev))
continue;
- if (sdata->vif.type != NL80211_IFTYPE_AP)
+ if (tmp_sdata->vif.type != NL80211_IFTYPE_AP)
continue;
- if (compare_ether_addr(sdata->dev->dev_addr,
+ if (compare_ether_addr(tmp_sdata->dev->dev_addr,
hdr->addr2)) {
- dev_hold(sdata->dev);
- dev_put(odev);
- osdata = sdata;
- odev = osdata->dev;
- skb->iif = sdata->dev->ifindex;
- monitor_iface = FOUND_SDATA;
+ dev_hold(tmp_sdata->dev);
+ dev_put(sdata->dev);
+ sdata = tmp_sdata;
break;
}
}
@@ -1492,40 +1453,44 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
}
- may_encrypt = !skb->do_not_encrypt;
+ may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT);
- headroom = osdata->local->tx_headroom;
+ headroom = local->tx_headroom;
if (may_encrypt)
headroom += IEEE80211_ENCRYPT_HEADROOM;
headroom -= skb_headroom(skb);
headroom = max_t(int, 0, headroom);
- if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
+ if (ieee80211_skb_resize(local, skb, headroom, may_encrypt)) {
dev_kfree_skb(skb);
- dev_put(odev);
- return NETDEV_TX_OK;
+ dev_put(sdata->dev);
+ return;
}
- if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- osdata = container_of(osdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
- if (likely(monitor_iface != UNKNOWN_ADDRESS))
- info->control.vif = &osdata->vif;
+ info->control.vif = &sdata->vif;
- ieee80211_tx(odev, skb, false);
- dev_put(odev);
+ if (ieee80211_vif_is_mesh(&sdata->vif) &&
+ ieee80211_is_data(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1))
+ if (mesh_nexthop_lookup(skb, sdata)) {
+ /* skb queued: don't free */
+ dev_put(sdata->dev);
+ return;
+ }
- return NETDEV_TX_OK;
+ ieee80211_select_queue(local, skb);
+ ieee80211_tx(sdata, skb, false);
+ dev_put(sdata->dev);
}
-int ieee80211_monitor_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_channel *chan = local->hw.conf.channel;
struct ieee80211_radiotap_header *prthdr =
(struct ieee80211_radiotap_header *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
u16 len_rthdr;
/*
@@ -1563,15 +1528,6 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
if (unlikely(skb->len < len_rthdr))
goto fail; /* skb too short for claimed rt header extent */
- skb->dev = local->mdev;
-
- /* needed because we set skb device to master */
- skb->iif = dev->ifindex;
-
- /* sometimes we do encrypt injected frames, will be fixed
- * up in radiotap parser if not wanted */
- skb->do_not_encrypt = 0;
-
/*
* fix up the pointers accounting for the radiotap
* header still being in there. We are being given
@@ -1586,8 +1542,10 @@ int ieee80211_monitor_start_xmit(struct sk_buff *skb,
skb_set_network_header(skb, len_rthdr);
skb_set_transport_header(skb, len_rthdr);
- /* pass the radiotap header up to the next stage intact */
- dev_queue_xmit(skb);
+ memset(info, 0, sizeof(*info));
+
+ /* pass the radiotap header up to xmit */
+ ieee80211_xmit(IEEE80211_DEV_TO_SUB_IF(dev), skb);
return NETDEV_TX_OK;
fail:
@@ -1610,11 +1568,12 @@ fail:
* encapsulated packet will then be passed to master interface, wlan#.11, for
* transmission (through low-level driver).
*/
-int ieee80211_subif_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int ret = NETDEV_TX_BUSY, head_need;
u16 ethertype, hdrlen, meshhdrlen = 0;
__le16 fc;
@@ -1627,7 +1586,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
u32 sta_flags = 0;
if (unlikely(skb->len < ETH_HLEN)) {
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
@@ -1660,52 +1619,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
- fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS);
if (!sdata->u.mesh.mshcfg.dot11MeshTTL) {
/* Do not send frames with mesh_ttl == 0 */
sdata->u.mesh.mshstats.dropped_frames_ttl++;
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
- memset(&mesh_hdr, 0, sizeof(mesh_hdr));
if (compare_ether_addr(dev->dev_addr,
skb->data + ETH_ALEN) == 0) {
- /* RA TA DA SA */
- memset(hdr.addr1, 0, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
- memcpy(hdr.addr3, skb->data, ETH_ALEN);
- memcpy(hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
- meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata);
+ hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
+ skb->data, skb->data + ETH_ALEN);
+ meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
+ sdata, NULL, NULL, NULL);
} else {
/* packet from other interface */
struct mesh_path *mppath;
+ int is_mesh_mcast = 1;
+ char *mesh_da;
- memset(hdr.addr1, 0, ETH_ALEN);
- memcpy(hdr.addr2, dev->dev_addr, ETH_ALEN);
- memcpy(hdr.addr4, dev->dev_addr, ETH_ALEN);
-
+ rcu_read_lock();
if (is_multicast_ether_addr(skb->data))
- memcpy(hdr.addr3, skb->data, ETH_ALEN);
+ /* DA TA mSA AE:SA */
+ mesh_da = skb->data;
else {
- rcu_read_lock();
mppath = mpp_path_lookup(skb->data, sdata);
- if (mppath)
- memcpy(hdr.addr3, mppath->mpp, ETH_ALEN);
- else
- memset(hdr.addr3, 0xff, ETH_ALEN);
- rcu_read_unlock();
+ if (mppath) {
+ /* RA TA mDA mSA AE:DA SA */
+ mesh_da = mppath->mpp;
+ is_mesh_mcast = 0;
+ } else
+ /* DA TA mSA AE:SA */
+ mesh_da = dev->broadcast;
}
+ hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
+ mesh_da, dev->dev_addr);
+ rcu_read_unlock();
+ if (is_mesh_mcast)
+ meshhdrlen =
+ ieee80211_new_mesh_header(&mesh_hdr,
+ sdata,
+ skb->data + ETH_ALEN,
+ NULL,
+ NULL);
+ else
+ meshhdrlen =
+ ieee80211_new_mesh_header(&mesh_hdr,
+ sdata,
+ NULL,
+ skb->data,
+ skb->data + ETH_ALEN);
- mesh_hdr.flags |= MESH_FLAGS_AE_A5_A6;
- mesh_hdr.ttl = sdata->u.mesh.mshcfg.dot11MeshTTL;
- put_unaligned(cpu_to_le32(sdata->u.mesh.mesh_seqnum), &mesh_hdr.seqnum);
- memcpy(mesh_hdr.eaddr1, skb->data, ETH_ALEN);
- memcpy(mesh_hdr.eaddr2, skb->data + ETH_ALEN, ETH_ALEN);
- sdata->u.mesh.mesh_seqnum++;
- meshhdrlen = 18;
}
- hdrlen = 30;
break;
#endif
case NL80211_IFTYPE_STATION:
@@ -1724,7 +1689,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
hdrlen = 24;
break;
default:
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
@@ -1766,7 +1731,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
- ret = 0;
+ ret = NETDEV_TX_OK;
goto fail;
}
@@ -1842,9 +1807,6 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
nh_pos += hdrlen;
h_pos += hdrlen;
- skb->iif = dev->ifindex;
-
- skb->dev = local->mdev;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
@@ -1855,13 +1817,15 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
skb_set_network_header(skb, nh_pos);
skb_set_transport_header(skb, h_pos);
+ memset(info, 0, sizeof(*info));
+
dev->trans_start = jiffies;
- dev_queue_xmit(skb);
+ ieee80211_xmit(sdata, skb);
- return 0;
+ return NETDEV_TX_OK;
fail:
- if (!ret)
+ if (ret == NETDEV_TX_OK)
dev_kfree_skb(skb);
return ret;
@@ -1887,101 +1851,74 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata;
struct sta_info *sta;
struct ieee80211_hdr *hdr;
- struct net_device *dev;
int ret;
bool result = true;
- /* does interface still exist? */
- dev = dev_get_by_index(&init_net, skb->iif);
- if (!dev) {
- dev_kfree_skb(skb);
- return true;
- }
-
- /* validate info->control.vif against skb->iif */
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
- sdata = container_of(sdata->bss,
- struct ieee80211_sub_if_data,
- u.ap);
-
- if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
- dev_kfree_skb(skb);
- result = true;
- goto out;
- }
+ sdata = vif_to_sdata(info->control.vif);
if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
- ieee80211_tx(dev, skb, true);
+ ieee80211_tx(sdata, skb, true);
} else {
hdr = (struct ieee80211_hdr *)skb->data;
sta = sta_info_get(local, hdr->addr1);
- ret = __ieee80211_tx(local, &skb, sta);
+ ret = __ieee80211_tx(local, &skb, sta, true);
if (ret != IEEE80211_TX_OK)
result = false;
}
- out:
- dev_put(dev);
-
return result;
}
/*
- * Transmit all pending packets. Called from tasklet, locks master device
- * TX lock so that no new packets can come in.
+ * Transmit all pending packets. Called from tasklet.
*/
void ieee80211_tx_pending(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *)data;
- struct net_device *dev = local->mdev;
unsigned long flags;
int i;
- bool next;
+ bool txok;
rcu_read_lock();
- netif_tx_lock_bh(dev);
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
for (i = 0; i < local->hw.queues; i++) {
/*
* If queue is stopped by something other than due to pending
* frames, or we have no pending frames, proceed to next queue.
*/
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- next = false;
- if (local->queue_stop_reasons[i] !=
- BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
+ if (local->queue_stop_reasons[i] ||
skb_queue_empty(&local->pending[i]))
- next = true;
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-
- if (next)
continue;
- /*
- * start the queue now to allow processing our packets,
- * we're under the tx lock here anyway so nothing will
- * happen as a result of this
- */
- netif_start_subqueue(local->mdev, i);
-
while (!skb_queue_empty(&local->pending[i])) {
- struct sk_buff *skb = skb_dequeue(&local->pending[i]);
+ struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sub_if_data *sdata;
- if (!ieee80211_tx_pending_skb(local, skb)) {
- skb_queue_head(&local->pending[i], skb);
- break;
+ if (WARN_ON(!info->control.vif)) {
+ kfree_skb(skb);
+ continue;
}
- }
- /* Start regular packet processing again. */
- if (skb_queue_empty(&local->pending[i]))
- ieee80211_wake_queue_by_reason(&local->hw, i,
- IEEE80211_QUEUE_STOP_REASON_PENDING);
+ sdata = vif_to_sdata(info->control.vif);
+ dev_hold(sdata->dev);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock,
+ flags);
+
+ txok = ieee80211_tx_pending_skb(local, skb);
+ dev_put(sdata->dev);
+ if (!txok)
+ __skb_queue_head(&local->pending[i], skb);
+ spin_lock_irqsave(&local->queue_stop_reason_lock,
+ flags);
+ if (!txok)
+ break;
+ }
}
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- netif_tx_unlock_bh(dev);
rcu_read_unlock();
}
@@ -2156,8 +2093,7 @@ struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
info = IEEE80211_SKB_CB(skb);
- skb->do_not_encrypt = 1;
-
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
info->band = band;
/*
* XXX: For now, always use the lowest rate
@@ -2228,9 +2164,6 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
sdata = vif_to_sdata(vif);
bss = &sdata->u.ap;
- if (!bss)
- return NULL;
-
rcu_read_lock();
beacon = rcu_dereference(bss->beacon);
@@ -2256,7 +2189,7 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
- if (!ieee80211_tx_prepare(local, &tx, skb))
+ if (!ieee80211_tx_prepare(sdata, &tx, skb))
break;
dev_kfree_skb_any(skb);
}
@@ -2276,3 +2209,24 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,
return skb;
}
EXPORT_SYMBOL(ieee80211_get_buffered_bc);
+
+void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
+ int encrypt)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, 0);
+ skb_set_transport_header(skb, 0);
+
+ if (!encrypt)
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ /*
+ * The other path calling ieee80211_xmit is from the tasklet,
+ * and while we can handle concurrent transmissions locking
+ * requirements are that we do not come into tx with bhs on.
+ */
+ local_bh_disable();
+ ieee80211_xmit(sdata, skb);
+ local_bh_enable();
+}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 915e7776931..dd656432136 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -31,6 +31,7 @@
#include "mesh.h"
#include "wme.h"
#include "led.h"
+#include "wep.h"
/* privid for wiphys to determine whether they belong to us or not */
void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
@@ -274,16 +275,12 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
__clear_bit(reason, &local->queue_stop_reasons[queue]);
- if (!skb_queue_empty(&local->pending[queue]) &&
- local->queue_stop_reasons[queue] ==
- BIT(IEEE80211_QUEUE_STOP_REASON_PENDING))
- tasklet_schedule(&local->tx_pending_tasklet);
-
if (local->queue_stop_reasons[queue] != 0)
/* someone still has this queue stopped */
return;
- netif_wake_subqueue(local->mdev, queue);
+ if (!skb_queue_empty(&local->pending[queue]))
+ tasklet_schedule(&local->tx_pending_tasklet);
}
void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -312,14 +309,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
if (WARN_ON(queue >= hw->queues))
return;
- /*
- * Only stop if it was previously running, this is necessary
- * for correct pending packets handling because there we may
- * start (but not wake) the queue and rely on that.
- */
- if (!local->queue_stop_reasons[queue])
- netif_stop_subqueue(local->mdev, queue);
-
__set_bit(reason, &local->queue_stop_reasons[queue]);
}
@@ -347,11 +336,16 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct ieee80211_hw *hw = &local->hw;
unsigned long flags;
int queue = skb_get_queue_mapping(skb);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (WARN_ON(!info->control.vif)) {
+ kfree(skb);
+ return;
+ }
spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
__ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
- __ieee80211_stop_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_PENDING);
- skb_queue_tail(&local->pending[queue], skb);
+ __skb_queue_tail(&local->pending[queue], skb);
__ieee80211_wake_queue(hw, queue, IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
@@ -370,18 +364,21 @@ int ieee80211_add_pending_skbs(struct ieee80211_local *local,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
while ((skb = skb_dequeue(skbs))) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (WARN_ON(!info->control.vif)) {
+ kfree(skb);
+ continue;
+ }
+
ret++;
queue = skb_get_queue_mapping(skb);
- skb_queue_tail(&local->pending[queue], skb);
+ __skb_queue_tail(&local->pending[queue], skb);
}
- for (i = 0; i < hw->queues; i++) {
- if (ret)
- __ieee80211_stop_queue(hw, i,
- IEEE80211_QUEUE_STOP_REASON_PENDING);
+ for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
- }
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
return ret;
@@ -412,11 +409,16 @@ EXPORT_SYMBOL(ieee80211_stop_queues);
int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
{
struct ieee80211_local *local = hw_to_local(hw);
+ unsigned long flags;
+ int ret;
if (WARN_ON(queue >= hw->queues))
return true;
- return __netif_subqueue_stopped(local->mdev, queue);
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ ret = !!local->queue_stop_reasons[queue];
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(ieee80211_queue_stopped);
@@ -509,6 +511,46 @@ void ieee80211_iterate_active_interfaces_atomic(
}
EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
+/*
+ * Nothing should have been stuffed into the workqueue during
+ * the suspend->resume cycle. If this WARN is seen then there
+ * is a bug with either the driver suspend or something in
+ * mac80211 stuffing into the workqueue which we haven't yet
+ * cleared during mac80211's suspend cycle.
+ */
+static bool ieee80211_can_queue_work(struct ieee80211_local *local)
+{
+ if (WARN(local->suspended, "queueing ieee80211 work while "
+ "going to suspend\n"))
+ return false;
+
+ return true;
+}
+
+void ieee80211_queue_work(struct ieee80211_hw *hw, struct work_struct *work)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (!ieee80211_can_queue_work(local))
+ return;
+
+ queue_work(local->workqueue, work);
+}
+EXPORT_SYMBOL(ieee80211_queue_work);
+
+void ieee80211_queue_delayed_work(struct ieee80211_hw *hw,
+ struct delayed_work *dwork,
+ unsigned long delay)
+{
+ struct ieee80211_local *local = hw_to_local(hw);
+
+ if (!ieee80211_can_queue_work(local))
+ return;
+
+ queue_delayed_work(local->workqueue, dwork, delay);
+}
+EXPORT_SYMBOL(ieee80211_queue_delayed_work);
+
void ieee802_11_parse_elems(u8 *start, size_t len,
struct ieee802_11_elems *elems)
{
@@ -760,20 +802,6 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
ieee80211_set_wmm_default(sdata);
}
-void ieee80211_tx_skb(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb,
- int encrypt)
-{
- skb->dev = sdata->local->mdev;
- skb_set_mac_header(skb, 0);
- skb_set_network_header(skb, 0);
- skb_set_transport_header(skb, 0);
-
- skb->iif = sdata->dev->ifindex;
- skb->do_not_encrypt = !encrypt;
-
- dev_queue_xmit(skb);
-}
-
u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
enum ieee80211_band band)
{
@@ -804,12 +832,13 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg,
- u8 *extra, size_t extra_len,
- const u8 *bssid, int encrypt)
+ u8 *extra, size_t extra_len, const u8 *bssid,
+ const u8 *key, u8 key_len, u8 key_idx)
{
struct ieee80211_local *local = sdata->local;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
+ int err;
skb = dev_alloc_skb(local->hw.extra_tx_headroom +
sizeof(*mgmt) + 6 + extra_len);
@@ -824,8 +853,6 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
memset(mgmt, 0, 24 + 6);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_AUTH);
- if (encrypt)
- mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
memcpy(mgmt->da, bssid, ETH_ALEN);
memcpy(mgmt->sa, sdata->dev->dev_addr, ETH_ALEN);
memcpy(mgmt->bssid, bssid, ETH_ALEN);
@@ -835,7 +862,13 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
if (extra)
memcpy(skb_put(skb, extra_len), extra, extra_len);
- ieee80211_tx_skb(sdata, skb, encrypt);
+ if (auth_alg == WLAN_AUTH_SHARED_KEY && transaction == 3) {
+ mgmt->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ err = ieee80211_wep_encrypt(local, skb, key, key_len, key_idx);
+ WARN_ON(err);
+ }
+
+ ieee80211_tx_skb(sdata, skb, 0);
}
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
@@ -974,6 +1007,16 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
return supp_rates;
}
+void ieee80211_stop_device(struct ieee80211_local *local)
+{
+ ieee80211_led_radio(local, false);
+
+ cancel_work_sync(&local->reconfig_filter);
+ drv_stop(local);
+
+ flush_workqueue(local->workqueue);
+}
+
int ieee80211_reconfig(struct ieee80211_local *local)
{
struct ieee80211_hw *hw = &local->hw;
@@ -1043,9 +1086,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
/* reconfigure hardware */
ieee80211_hw_config(local, ~0);
- netif_addr_lock_bh(local->mdev);
ieee80211_configure_filter(local);
- netif_addr_unlock_bh(local->mdev);
/* Finally also reconfigure all the BSS information */
list_for_each_entry(sdata, &local->interfaces, list) {
@@ -1121,3 +1162,4 @@ int ieee80211_reconfig(struct ieee80211_local *local)
#endif
return 0;
}
+
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index ef73105b306..8a980f13694 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -67,10 +67,10 @@ static inline bool ieee80211_wep_weak_iv(u32 iv, int keylen)
static void ieee80211_wep_get_iv(struct ieee80211_local *local,
- struct ieee80211_key *key, u8 *iv)
+ int keylen, int keyidx, u8 *iv)
{
local->wep_iv++;
- if (ieee80211_wep_weak_iv(local->wep_iv, key->conf.keylen))
+ if (ieee80211_wep_weak_iv(local->wep_iv, keylen))
local->wep_iv += 0x0100;
if (!iv)
@@ -79,13 +79,13 @@ static void ieee80211_wep_get_iv(struct ieee80211_local *local,
*iv++ = (local->wep_iv >> 16) & 0xff;
*iv++ = (local->wep_iv >> 8) & 0xff;
*iv++ = local->wep_iv & 0xff;
- *iv++ = key->conf.keyidx << 6;
+ *iv++ = keyidx << 6;
}
static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
struct sk_buff *skb,
- struct ieee80211_key *key)
+ int keylen, int keyidx)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
unsigned int hdrlen;
@@ -100,7 +100,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
hdrlen = ieee80211_hdrlen(hdr->frame_control);
newhdr = skb_push(skb, WEP_IV_LEN);
memmove(newhdr, newhdr + WEP_IV_LEN, hdrlen);
- ieee80211_wep_get_iv(local, key, newhdr + hdrlen);
+ ieee80211_wep_get_iv(local, keylen, keyidx, newhdr + hdrlen);
return newhdr + hdrlen;
}
@@ -144,26 +144,17 @@ void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
*
* WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
*/
-int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_key *key)
+int ieee80211_wep_encrypt(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ const u8 *key, int keylen, int keyidx)
{
- u32 klen;
- u8 *rc4key, *iv;
+ u8 *iv;
size_t len;
+ u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
- if (!key || key->conf.alg != ALG_WEP)
- return -1;
-
- klen = 3 + key->conf.keylen;
- rc4key = kmalloc(klen, GFP_ATOMIC);
- if (!rc4key)
- return -1;
-
- iv = ieee80211_wep_add_iv(local, skb, key);
- if (!iv) {
- kfree(rc4key);
+ iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
+ if (!iv)
return -1;
- }
len = skb->len - (iv + WEP_IV_LEN - skb->data);
@@ -171,16 +162,14 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb,
memcpy(rc4key, iv, 3);
/* Copy rest of the WEP key (the secret part) */
- memcpy(rc4key + 3, key->conf.key, key->conf.keylen);
+ memcpy(rc4key + 3, key, keylen);
/* Add room for ICV */
skb_put(skb, WEP_ICV_LEN);
- ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, klen,
+ ieee80211_wep_encrypt_data(local->wep_tx_tfm, rc4key, keylen + 3,
iv + WEP_IV_LEN, len);
- kfree(rc4key);
-
return 0;
}
@@ -216,8 +205,9 @@ int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
* failure. If frame is OK, IV and ICV will be removed, i.e., decrypted payload
* is moved to the beginning of the skb and skb length will be reduced.
*/
-int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_key *key)
+static int ieee80211_wep_decrypt(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ struct ieee80211_key *key)
{
u32 klen;
u8 *rc4key;
@@ -314,12 +304,16 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (!(tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) {
- if (ieee80211_wep_encrypt(tx->local, skb, tx->key))
+ if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key,
+ tx->key->conf.keylen,
+ tx->key->conf.keyidx))
return -1;
} else {
info->control.hw_key = &tx->key->conf;
if (tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) {
- if (!ieee80211_wep_add_iv(tx->local, skb, tx->key))
+ if (!ieee80211_wep_add_iv(tx->local, skb,
+ tx->key->conf.keylen,
+ tx->key->conf.keyidx))
return -1;
}
}
diff --git a/net/mac80211/wep.h b/net/mac80211/wep.h
index d3f0db48314..fe29d7e5759 100644
--- a/net/mac80211/wep.h
+++ b/net/mac80211/wep.h
@@ -20,12 +20,11 @@ int ieee80211_wep_init(struct ieee80211_local *local);
void ieee80211_wep_free(struct ieee80211_local *local);
void ieee80211_wep_encrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len);
+int ieee80211_wep_encrypt(struct ieee80211_local *local,
+ struct sk_buff *skb,
+ const u8 *key, int keylen, int keyidx);
int ieee80211_wep_decrypt_data(struct crypto_blkcipher *tfm, u8 *rc4key,
size_t klen, u8 *data, size_t data_len);
-int ieee80211_wep_encrypt(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_key *key);
-int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb,
- struct ieee80211_key *key);
bool ieee80211_wep_is_weak_iv(struct sk_buff *skb, struct ieee80211_key *key);
ieee80211_rx_result
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
deleted file mode 100644
index 1da81f45674..00000000000
--- a/net/mac80211/wext.c
+++ /dev/null
@@ -1,633 +0,0 @@
-/*
- * Copyright 2002-2005, Instant802 Networks, Inc.
- * Copyright 2005-2006, Devicescape Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <asm/uaccess.h>
-
-#include <net/mac80211.h>
-#include "ieee80211_i.h"
-#include "led.h"
-#include "rate.h"
-#include "wpa.h"
-#include "aes_ccm.h"
-
-
-static int ieee80211_ioctl_siwgenie(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
-{
- struct ieee80211_sub_if_data *sdata;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
- if (ret && ret != -EALREADY)
- return ret;
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
- sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
- sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
- if (ret != -EALREADY)
- ieee80211_sta_req_auth(sdata);
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int ieee80211_ioctl_siwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_channel *chan;
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return cfg80211_ibss_wext_siwfreq(dev, info, freq, extra);
- else if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_CHANNEL_SEL;
-
- /* freq->e == 0: freq->m = channel; otherwise freq = m * 10^e */
- if (freq->e == 0) {
- if (freq->m < 0) {
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sdata->u.mgd.flags |=
- IEEE80211_STA_AUTO_CHANNEL_SEL;
- return 0;
- } else
- chan = ieee80211_get_channel(local->hw.wiphy,
- ieee80211_channel_to_frequency(freq->m));
- } else {
- int i, div = 1000000;
- for (i = 0; i < freq->e; i++)
- div /= 10;
- if (div <= 0)
- return -EINVAL;
- chan = ieee80211_get_channel(local->hw.wiphy, freq->m / div);
- }
-
- if (!chan)
- return -EINVAL;
-
- if (chan->flags & IEEE80211_CHAN_DISABLED)
- return -EINVAL;
-
- /*
- * no change except maybe auto -> fixed, ignore the HT
- * setting so you can fix a channel you're on already
- */
- if (local->oper_channel == chan)
- return 0;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_sta_req_auth(sdata);
-
- local->oper_channel = chan;
- local->oper_channel_type = NL80211_CHAN_NO_HT;
- ieee80211_hw_config(local, 0);
-
- return 0;
-}
-
-
-static int ieee80211_ioctl_giwfreq(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
-{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
-
- freq->m = local->oper_channel->center_freq;
- freq->e = 6;
-
- return 0;
-}
-
-
-static int ieee80211_ioctl_siwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- size_t len = data->length;
- int ret;
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
-
- /* iwconfig uses nul termination in SSID.. */
- if (len > 0 && ssid[len - 1] == '\0')
- len--;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (data->flags)
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
- else
- sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
-
- ret = ieee80211_sta_set_ssid(sdata, ssid, len);
- if (ret)
- return ret;
-
- sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
- sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
- ieee80211_sta_req_auth(sdata);
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-
-static int ieee80211_ioctl_giwessid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *ssid)
-{
- size_t len;
- struct ieee80211_sub_if_data *sdata;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- int res = ieee80211_sta_get_ssid(sdata, ssid, &len);
- if (res == 0) {
- data->length = len;
- data->flags = 1;
- } else
- data->flags = 0;
- return res;
- }
-
- return -EOPNOTSUPP;
-}
-
-
-static int ieee80211_ioctl_siwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- int ret;
-
- if (is_zero_ether_addr((u8 *) &ap_addr->sa_data))
- sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL |
- IEEE80211_STA_AUTO_CHANNEL_SEL;
- else if (is_broadcast_ether_addr((u8 *) &ap_addr->sa_data))
- sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL;
- else
- sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
- ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
- if (ret)
- return ret;
- sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
- sdata->u.mgd.flags &= ~IEEE80211_STA_CONTROL_PORT;
- ieee80211_sta_req_auth(sdata);
- return 0;
- } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
- /*
- * If it is necessary to update the WDS peer address
- * while the interface is running, then we need to do
- * more work here, namely if it is running we need to
- * add a new and remove the old STA entry, this is
- * normally handled by _open() and _stop().
- */
- if (netif_running(dev))
- return -EBUSY;
-
- memcpy(&sdata->u.wds.remote_addr, (u8 *) &ap_addr->sa_data,
- ETH_ALEN);
-
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-
-static int ieee80211_ioctl_giwap(struct net_device *dev,
- struct iw_request_info *info,
- struct sockaddr *ap_addr, char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
- return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (sdata->u.mgd.state == IEEE80211_STA_MLME_ASSOCIATED) {
- ap_addr->sa_family = ARPHRD_ETHER;
- memcpy(&ap_addr->sa_data, sdata->u.mgd.bssid, ETH_ALEN);
- } else
- memset(&ap_addr->sa_data, 0, ETH_ALEN);
- return 0;
- } else if (sdata->vif.type == NL80211_IFTYPE_WDS) {
- ap_addr->sa_family = ARPHRD_ETHER;
- memcpy(&ap_addr->sa_data, sdata->u.wds.remote_addr, ETH_ALEN);
- return 0;
- }
-
- return -EOPNOTSUPP;
-}
-
-
-static int ieee80211_ioctl_siwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra)
-{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- int i, err = -EINVAL;
- u32 target_rate = rate->value / 100000;
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_supported_band *sband;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
- /* target_rate = -1, rate->fixed = 0 means auto only, so use all rates
- * target_rate = X, rate->fixed = 1 means only rate X
- * target_rate = X, rate->fixed = 0 means all rates <= X */
- sdata->max_ratectrl_rateidx = -1;
- sdata->force_unicast_rateidx = -1;
- if (rate->value < 0)
- return 0;
-
- for (i=0; i< sband->n_bitrates; i++) {
- struct ieee80211_rate *brate = &sband->bitrates[i];
- int this_rate = brate->bitrate;
-
- if (target_rate == this_rate) {
- sdata->max_ratectrl_rateidx = i;
- if (rate->fixed)
- sdata->force_unicast_rateidx = i;
- err = 0;
- break;
- }
- }
- return err;
-}
-
-static int ieee80211_ioctl_giwrate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *rate, char *extra)
-{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- struct sta_info *sta;
- struct ieee80211_sub_if_data *sdata;
- struct ieee80211_supported_band *sband;
-
- sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return -EOPNOTSUPP;
-
- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
-
- rcu_read_lock();
-
- sta = sta_info_get(local, sdata->u.mgd.bssid);
-
- if (sta && !(sta->last_tx_rate.flags & IEEE80211_TX_RC_MCS))
- rate->value = sband->bitrates[sta->last_tx_rate.idx].bitrate;
- else
- rate->value = 0;
-
- rcu_read_unlock();
-
- if (!sta)
- return -ENODEV;
-
- rate->value *= 100000;
-
- return 0;
-}
-
-static int ieee80211_ioctl_siwpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- struct ieee80211_conf *conf = &local->hw.conf;
- int timeout = 0;
- bool ps;
-
- if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
- return -EOPNOTSUPP;
-
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- return -EINVAL;
-
- if (wrq->disabled) {
- ps = false;
- timeout = 0;
- goto set;
- }
-
- switch (wrq->flags & IW_POWER_MODE) {
- case IW_POWER_ON: /* If not specified */
- case IW_POWER_MODE: /* If set all mask */
- case IW_POWER_ALL_R: /* If explicitely state all */
- ps = true;
- break;
- default: /* Otherwise we ignore */
- return -EINVAL;
- }
-
- if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT))
- return -EINVAL;
-
- if (wrq->flags & IW_POWER_TIMEOUT)
- timeout = wrq->value / 1000;
-
- set:
- if (ps == sdata->u.mgd.powersave && timeout == conf->dynamic_ps_timeout)
- return 0;
-
- sdata->u.mgd.powersave = ps;
- conf->dynamic_ps_timeout = timeout;
-
- if (local->hw.flags & IEEE80211_HW_SUPPORTS_DYNAMIC_PS)
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
-
- ieee80211_recalc_ps(local, -1);
-
- return 0;
-}
-
-static int ieee80211_ioctl_giwpower(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu,
- char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
-
- wrqu->power.disabled = !sdata->u.mgd.powersave;
-
- return 0;
-}
-
-static int ieee80211_ioctl_siwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- int ret = 0;
-
- switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_WPA_VERSION:
- case IW_AUTH_CIPHER_GROUP:
- case IW_AUTH_WPA_ENABLED:
- case IW_AUTH_RX_UNENCRYPTED_EAPOL:
- case IW_AUTH_KEY_MGMT:
- case IW_AUTH_CIPHER_GROUP_MGMT:
- break;
- case IW_AUTH_CIPHER_PAIRWISE:
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- if (data->value & (IW_AUTH_CIPHER_WEP40 |
- IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_TKIP))
- sdata->u.mgd.flags |=
- IEEE80211_STA_TKIP_WEP_USED;
- else
- sdata->u.mgd.flags &=
- ~IEEE80211_STA_TKIP_WEP_USED;
- }
- break;
- case IW_AUTH_DROP_UNENCRYPTED:
- sdata->drop_unencrypted = !!data->value;
- break;
- case IW_AUTH_PRIVACY_INVOKED:
- if (sdata->vif.type != NL80211_IFTYPE_STATION)
- ret = -EINVAL;
- else {
- sdata->u.mgd.flags &= ~IEEE80211_STA_PRIVACY_INVOKED;
- /*
- * Privacy invoked by wpa_supplicant, store the
- * value and allow associating to a protected
- * network without having a key up front.
- */
- if (data->value)
- sdata->u.mgd.flags |=
- IEEE80211_STA_PRIVACY_INVOKED;
- }
- break;
- case IW_AUTH_80211_AUTH_ALG:
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sdata->u.mgd.auth_algs = data->value;
- else
- ret = -EOPNOTSUPP;
- break;
- case IW_AUTH_MFP:
- if (!(sdata->local->hw.flags & IEEE80211_HW_MFP_CAPABLE)) {
- ret = -EOPNOTSUPP;
- break;
- }
- if (sdata->vif.type == NL80211_IFTYPE_STATION) {
- switch (data->value) {
- case IW_AUTH_MFP_DISABLED:
- sdata->u.mgd.mfp = IEEE80211_MFP_DISABLED;
- break;
- case IW_AUTH_MFP_OPTIONAL:
- sdata->u.mgd.mfp = IEEE80211_MFP_OPTIONAL;
- break;
- case IW_AUTH_MFP_REQUIRED:
- sdata->u.mgd.mfp = IEEE80211_MFP_REQUIRED;
- break;
- default:
- ret = -EINVAL;
- }
- } else
- ret = -EOPNOTSUPP;
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
- return ret;
-}
-
-/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
-static struct iw_statistics *ieee80211_get_wireless_stats(struct net_device *dev)
-{
- struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- struct iw_statistics *wstats = &local->wstats;
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct sta_info *sta = NULL;
-
- rcu_read_lock();
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- sta = sta_info_get(local, sdata->u.mgd.bssid);
-
- if (!sta) {
- wstats->discard.fragment = 0;
- wstats->discard.misc = 0;
- wstats->qual.qual = 0;
- wstats->qual.level = 0;
- wstats->qual.noise = 0;
- wstats->qual.updated = IW_QUAL_ALL_INVALID;
- } else {
- wstats->qual.updated = 0;
- /*
- * mirror what cfg80211 does for iwrange/scan results,
- * otherwise userspace gets confused.
- */
- if (local->hw.flags & (IEEE80211_HW_SIGNAL_UNSPEC |
- IEEE80211_HW_SIGNAL_DBM)) {
- wstats->qual.updated |= IW_QUAL_LEVEL_UPDATED;
- wstats->qual.updated |= IW_QUAL_QUAL_UPDATED;
- } else {
- wstats->qual.updated |= IW_QUAL_LEVEL_INVALID;
- wstats->qual.updated |= IW_QUAL_QUAL_INVALID;
- }
-
- if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
- wstats->qual.level = sta->last_signal;
- wstats->qual.qual = sta->last_signal;
- } else if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
- int sig = sta->last_signal;
-
- wstats->qual.updated |= IW_QUAL_DBM;
- wstats->qual.level = sig;
- if (sig < -110)
- sig = -110;
- else if (sig > -40)
- sig = -40;
- wstats->qual.qual = sig + 110;
- }
-
- if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
- /*
- * This assumes that if driver reports noise, it also
- * reports signal in dBm.
- */
- wstats->qual.noise = sta->last_noise;
- wstats->qual.updated |= IW_QUAL_NOISE_UPDATED;
- } else {
- wstats->qual.updated |= IW_QUAL_NOISE_INVALID;
- }
- }
-
- rcu_read_unlock();
-
- return wstats;
-}
-
-static int ieee80211_ioctl_giwauth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *data, char *extra)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- int ret = 0;
-
- switch (data->flags & IW_AUTH_INDEX) {
- case IW_AUTH_80211_AUTH_ALG:
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
- data->value = sdata->u.mgd.auth_algs;
- else
- ret = -EOPNOTSUPP;
- break;
- default:
- ret = -EOPNOTSUPP;
- break;
- }
- return ret;
-}
-
-
-/* Structures to export the Wireless Handlers */
-
-static const iw_handler ieee80211_handler[] =
-{
- (iw_handler) NULL, /* SIOCSIWCOMMIT */
- (iw_handler) cfg80211_wext_giwname, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) ieee80211_ioctl_siwfreq, /* SIOCSIWFREQ */
- (iw_handler) ieee80211_ioctl_giwfreq, /* SIOCGIWFREQ */
- (iw_handler) cfg80211_wext_siwmode, /* SIOCSIWMODE */
- (iw_handler) cfg80211_wext_giwmode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL /* not used */, /* SIOCSIWRANGE */
- (iw_handler) cfg80211_wext_giwrange, /* SIOCGIWRANGE */
- (iw_handler) NULL /* not used */, /* SIOCSIWPRIV */
- (iw_handler) NULL /* kernel code */, /* SIOCGIWPRIV */
- (iw_handler) NULL /* not used */, /* SIOCSIWSTATS */
- (iw_handler) NULL /* kernel code */, /* SIOCGIWSTATS */
- (iw_handler) NULL, /* SIOCSIWSPY */
- (iw_handler) NULL, /* SIOCGIWSPY */
- (iw_handler) NULL, /* SIOCSIWTHRSPY */
- (iw_handler) NULL, /* SIOCGIWTHRSPY */
- (iw_handler) ieee80211_ioctl_siwap, /* SIOCSIWAP */
- (iw_handler) ieee80211_ioctl_giwap, /* SIOCGIWAP */
- (iw_handler) cfg80211_wext_siwmlme, /* SIOCSIWMLME */
- (iw_handler) NULL, /* SIOCGIWAPLIST */
- (iw_handler) cfg80211_wext_siwscan, /* SIOCSIWSCAN */
- (iw_handler) cfg80211_wext_giwscan, /* SIOCGIWSCAN */
- (iw_handler) ieee80211_ioctl_siwessid, /* SIOCSIWESSID */
- (iw_handler) ieee80211_ioctl_giwessid, /* SIOCGIWESSID */
- (iw_handler) NULL, /* SIOCSIWNICKN */
- (iw_handler) NULL, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) ieee80211_ioctl_siwrate, /* SIOCSIWRATE */
- (iw_handler) ieee80211_ioctl_giwrate, /* SIOCGIWRATE */
- (iw_handler) cfg80211_wext_siwrts, /* SIOCSIWRTS */
- (iw_handler) cfg80211_wext_giwrts, /* SIOCGIWRTS */
- (iw_handler) cfg80211_wext_siwfrag, /* SIOCSIWFRAG */
- (iw_handler) cfg80211_wext_giwfrag, /* SIOCGIWFRAG */
- (iw_handler) cfg80211_wext_siwtxpower, /* SIOCSIWTXPOW */
- (iw_handler) cfg80211_wext_giwtxpower, /* SIOCGIWTXPOW */
- (iw_handler) cfg80211_wext_siwretry, /* SIOCSIWRETRY */
- (iw_handler) cfg80211_wext_giwretry, /* SIOCGIWRETRY */
- (iw_handler) cfg80211_wext_siwencode, /* SIOCSIWENCODE */
- (iw_handler) cfg80211_wext_giwencode, /* SIOCGIWENCODE */
- (iw_handler) ieee80211_ioctl_siwpower, /* SIOCSIWPOWER */
- (iw_handler) ieee80211_ioctl_giwpower, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) ieee80211_ioctl_siwgenie, /* SIOCSIWGENIE */
- (iw_handler) NULL, /* SIOCGIWGENIE */
- (iw_handler) ieee80211_ioctl_siwauth, /* SIOCSIWAUTH */
- (iw_handler) ieee80211_ioctl_giwauth, /* SIOCGIWAUTH */
- (iw_handler) cfg80211_wext_siwencodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) NULL, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
- (iw_handler) NULL, /* -- hole -- */
-};
-
-const struct iw_handler_def ieee80211_iw_handler_def =
-{
- .num_standard = ARRAY_SIZE(ieee80211_handler),
- .standard = (iw_handler *) ieee80211_handler,
- .get_wireless_stats = ieee80211_get_wireless_stats,
-};
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 116a923b14d..b19b7696f3a 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -85,10 +85,8 @@ static u16 classify80211(struct ieee80211_local *local, struct sk_buff *skb)
return ieee802_1d_to_ac[skb->priority];
}
-u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
+void ieee80211_select_queue(struct ieee80211_local *local, struct sk_buff *skb)
{
- struct ieee80211_master_priv *mpriv = netdev_priv(dev);
- struct ieee80211_local *local = mpriv->local;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
u16 queue;
u8 tid;
@@ -113,5 +111,5 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
*p = 0;
}
- return queue;
+ skb_set_queue_mapping(skb, queue);
}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 7520d2e014d..d4fd87ca511 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -20,6 +20,7 @@
extern const int ieee802_1d_to_ac[8];
-u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb);
+void ieee80211_select_queue(struct ieee80211_local *local,
+ struct sk_buff *skb);
#endif /* _WME_H */
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index dcfae8884b8..70778694877 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -122,7 +122,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
mac80211_ev_michael_mic_failure(rx->sdata, rx->key->conf.keyidx,
- (void *) skb->data, NULL);
+ (void *) skb->data, NULL,
+ GFP_ATOMIC);
return RX_DROP_UNUSABLE;
}
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 201b8ea3020..3c7e42735b6 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -18,6 +18,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -262,12 +265,12 @@ static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
if (vseq->delta || vseq->previous_delta) {
if(after(seq, vseq->init_seq)) {
th->seq = htonl(seq + vseq->delta);
- IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
- vseq->delta);
+ IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
+ __func__, vseq->delta);
} else {
th->seq = htonl(seq + vseq->previous_delta);
- IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
- "(%d) to seq\n", vseq->previous_delta);
+ IP_VS_DBG(9, "%s(): added previous_delta (%d) to seq\n",
+ __func__, vseq->previous_delta);
}
}
}
@@ -291,14 +294,14 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
to receive next, so compare it with init_seq+delta */
if(after(ack_seq, vseq->init_seq+vseq->delta)) {
th->ack_seq = htonl(ack_seq - vseq->delta);
- IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
- "(%d) from ack_seq\n", vseq->delta);
+ IP_VS_DBG(9, "%s(): subtracted delta "
+ "(%d) from ack_seq\n", __func__, vseq->delta);
} else {
th->ack_seq = htonl(ack_seq - vseq->previous_delta);
- IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
+ IP_VS_DBG(9, "%s(): subtracted "
"previous_delta (%d) from ack_seq\n",
- vseq->previous_delta);
+ __func__, vseq->previous_delta);
}
}
}
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 77bfdfeb966..27c30cf933d 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -22,6 +22,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/interrupt.h>
#include <linux/in.h>
#include <linux/net.h>
@@ -150,8 +153,8 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
atomic_inc(&cp->refcnt);
ret = 1;
} else {
- IP_VS_ERR("ip_vs_conn_hash(): request for already hashed, "
- "called from %p\n", __builtin_return_address(0));
+ pr_err("%s(): request for already hashed, called from %pF\n",
+ __func__, __builtin_return_address(0));
ret = 0;
}
@@ -689,7 +692,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC);
if (cp == NULL) {
- IP_VS_ERR_RL("ip_vs_conn_new: no memory available.\n");
+ IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NULL;
}
@@ -1073,10 +1076,10 @@ int __init ip_vs_conn_init(void)
return -ENOMEM;
}
- IP_VS_INFO("Connection hash table configured "
- "(size=%d, memory=%ldKbytes)\n",
- IP_VS_CONN_TAB_SIZE,
- (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024);
+ pr_info("Connection hash table configured "
+ "(size=%d, memory=%ldKbytes)\n",
+ IP_VS_CONN_TAB_SIZE,
+ (long)(IP_VS_CONN_TAB_SIZE*sizeof(struct list_head))/1024);
IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
sizeof(struct ip_vs_conn));
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 8dddb17a947..b95699f0054 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -24,6 +24,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/ip.h>
@@ -388,9 +391,9 @@ ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
*/
if (!svc->fwmark && pptr[1] != svc->port) {
if (!svc->port)
- IP_VS_ERR("Schedule: port zero only supported "
- "in persistent services, "
- "check your ipvs configuration\n");
+ pr_err("Schedule: port zero only supported "
+ "in persistent services, "
+ "check your ipvs configuration\n");
return NULL;
}
@@ -462,7 +465,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
ip_vs_service_put(svc);
/* create a new connection entry */
- IP_VS_DBG(6, "ip_vs_leave: create a cache_bypass entry\n");
+ IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
cp = ip_vs_conn_new(svc->af, iph.protocol,
&iph.saddr, pptr[0],
&iph.daddr, pptr[1],
@@ -664,8 +667,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != 0) {
- IP_VS_ERR("shouldn't reach here, because the box is on the "
- "half connection in the tun/dr module.\n");
+ pr_err("shouldn't reach here, because the box is on the "
+ "half connection in the tun/dr module.\n");
}
/* Ensure the checksum is correct */
@@ -1256,7 +1259,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
struct ip_vs_iphdr iph;
struct ip_vs_protocol *pp;
struct ip_vs_conn *cp;
- int ret, restart, af;
+ int ret, restart, af, pkts;
af = (skb->protocol == htons(ETH_P_IP)) ? AF_INET : AF_INET6;
@@ -1274,13 +1277,24 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
return NF_ACCEPT;
}
- if (unlikely(iph.protocol == IPPROTO_ICMP)) {
- int related, verdict = ip_vs_in_icmp(skb, &related, hooknum);
+#ifdef CONFIG_IP_VS_IPV6
+ if (af == AF_INET6) {
+ if (unlikely(iph.protocol == IPPROTO_ICMPV6)) {
+ int related, verdict = ip_vs_in_icmp_v6(skb, &related, hooknum);
- if (related)
- return verdict;
- ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
- }
+ if (related)
+ return verdict;
+ ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+ }
+ } else
+#endif
+ if (unlikely(iph.protocol == IPPROTO_ICMP)) {
+ int related, verdict = ip_vs_in_icmp(skb, &related, hooknum);
+
+ if (related)
+ return verdict;
+ ip_vs_fill_iphdr(af, skb_network_header(skb), &iph);
+ }
/* Protocol supported? */
pp = ip_vs_proto_get(iph.protocol);
@@ -1343,12 +1357,12 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
* Sync connection if it is about to close to
* encorage the standby servers to update the connections timeout
*/
- atomic_inc(&cp->in_pkts);
+ pkts = atomic_add_return(1, &cp->in_pkts);
if (af == AF_INET &&
(ip_vs_sync_state & IP_VS_STATE_MASTER) &&
(((cp->protocol != IPPROTO_TCP ||
cp->state == IP_VS_TCP_S_ESTABLISHED) &&
- (atomic_read(&cp->in_pkts) % sysctl_ip_vs_sync_threshold[1]
+ (pkts % sysctl_ip_vs_sync_threshold[1]
== sysctl_ip_vs_sync_threshold[0])) ||
((cp->protocol == IPPROTO_TCP) && (cp->old_state != cp->state) &&
((cp->state == IP_VS_TCP_S_FIN_WAIT) ||
@@ -1487,7 +1501,7 @@ static int __init ip_vs_init(void)
ret = ip_vs_control_init();
if (ret < 0) {
- IP_VS_ERR("can't setup control.\n");
+ pr_err("can't setup control.\n");
goto cleanup_estimator;
}
@@ -1495,23 +1509,23 @@ static int __init ip_vs_init(void)
ret = ip_vs_app_init();
if (ret < 0) {
- IP_VS_ERR("can't setup application helper.\n");
+ pr_err("can't setup application helper.\n");
goto cleanup_protocol;
}
ret = ip_vs_conn_init();
if (ret < 0) {
- IP_VS_ERR("can't setup connection table.\n");
+ pr_err("can't setup connection table.\n");
goto cleanup_app;
}
ret = nf_register_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
if (ret < 0) {
- IP_VS_ERR("can't register hooks.\n");
+ pr_err("can't register hooks.\n");
goto cleanup_conn;
}
- IP_VS_INFO("ipvs loaded.\n");
+ pr_info("ipvs loaded.\n");
return ret;
cleanup_conn:
@@ -1534,7 +1548,7 @@ static void __exit ip_vs_cleanup(void)
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
ip_vs_estimator_cleanup();
- IP_VS_INFO("ipvs unloaded.\n");
+ pr_info("ipvs unloaded.\n");
}
module_init(ip_vs_init);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 7c1333c67ff..fba2892b99e 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -18,6 +18,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -340,8 +343,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
unsigned hash;
if (svc->flags & IP_VS_SVC_F_HASHED) {
- IP_VS_ERR("ip_vs_svc_hash(): request for already hashed, "
- "called from %p\n", __builtin_return_address(0));
+ pr_err("%s(): request for already hashed, called from %pF\n",
+ __func__, __builtin_return_address(0));
return 0;
}
@@ -374,8 +377,8 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
static int ip_vs_svc_unhash(struct ip_vs_service *svc)
{
if (!(svc->flags & IP_VS_SVC_F_HASHED)) {
- IP_VS_ERR("ip_vs_svc_unhash(): request for unhash flagged, "
- "called from %p\n", __builtin_return_address(0));
+ pr_err("%s(): request for unhash flagged, called from %pF\n",
+ __func__, __builtin_return_address(0));
return 0;
}
@@ -841,7 +844,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
dest = kzalloc(sizeof(struct ip_vs_dest), GFP_ATOMIC);
if (dest == NULL) {
- IP_VS_ERR("ip_vs_new_dest: kmalloc failed.\n");
+ pr_err("%s(): no memory.\n", __func__);
return -ENOMEM;
}
@@ -885,13 +888,13 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
EnterFunction(2);
if (udest->weight < 0) {
- IP_VS_ERR("ip_vs_add_dest(): server weight less than zero\n");
+ pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
- IP_VS_ERR("ip_vs_add_dest(): lower threshold is higher than "
- "upper threshold\n");
+ pr_err("%s(): lower threshold is higher than upper threshold\n",
+ __func__);
return -ERANGE;
}
@@ -903,7 +906,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
dest = ip_vs_lookup_dest(svc, &daddr, dport);
if (dest != NULL) {
- IP_VS_DBG(1, "ip_vs_add_dest(): dest already exists\n");
+ IP_VS_DBG(1, "%s(): dest already exists\n", __func__);
return -EEXIST;
}
@@ -997,13 +1000,13 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
EnterFunction(2);
if (udest->weight < 0) {
- IP_VS_ERR("ip_vs_edit_dest(): server weight less than zero\n");
+ pr_err("%s(): server weight less than zero\n", __func__);
return -ERANGE;
}
if (udest->l_threshold > udest->u_threshold) {
- IP_VS_ERR("ip_vs_edit_dest(): lower threshold is higher than "
- "upper threshold\n");
+ pr_err("%s(): lower threshold is higher than upper threshold\n",
+ __func__);
return -ERANGE;
}
@@ -1015,7 +1018,7 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
dest = ip_vs_lookup_dest(svc, &daddr, dport);
if (dest == NULL) {
- IP_VS_DBG(1, "ip_vs_edit_dest(): dest doesn't exist\n");
+ IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__);
return -ENOENT;
}
@@ -1115,7 +1118,7 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
if (dest == NULL) {
- IP_VS_DBG(1, "ip_vs_del_dest(): destination not found!\n");
+ IP_VS_DBG(1, "%s(): destination not found!\n", __func__);
return -ENOENT;
}
@@ -1161,8 +1164,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
/* Lookup the scheduler by 'u->sched_name' */
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
- IP_VS_INFO("Scheduler module ip_vs_%s not found\n",
- u->sched_name);
+ pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
ret = -ENOENT;
goto out_mod_dec;
}
@@ -1176,7 +1178,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
svc = kzalloc(sizeof(struct ip_vs_service), GFP_ATOMIC);
if (svc == NULL) {
- IP_VS_DBG(1, "ip_vs_add_service: kmalloc failed.\n");
+ IP_VS_DBG(1, "%s(): no memory\n", __func__);
ret = -ENOMEM;
goto out_err;
}
@@ -1259,8 +1261,7 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
*/
sched = ip_vs_scheduler_get(u->sched_name);
if (sched == NULL) {
- IP_VS_INFO("Scheduler module ip_vs_%s not found\n",
- u->sched_name);
+ pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name);
return -ENOENT;
}
old_sched = sched;
@@ -2077,8 +2078,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
return -EPERM;
if (len != set_arglen[SET_CMDID(cmd)]) {
- IP_VS_ERR("set_ctl: len %u != %u\n",
- len, set_arglen[SET_CMDID(cmd)]);
+ pr_err("set_ctl: len %u != %u\n",
+ len, set_arglen[SET_CMDID(cmd)]);
return -EINVAL;
}
@@ -2129,9 +2130,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
/* Check for valid protocol: TCP or UDP, even for fwmark!=0 */
if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP) {
- IP_VS_ERR("set_ctl: invalid protocol: %d %pI4:%d %s\n",
- usvc.protocol, &usvc.addr.ip,
- ntohs(usvc.port), usvc.sched_name);
+ pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
+ usvc.protocol, &usvc.addr.ip,
+ ntohs(usvc.port), usvc.sched_name);
ret = -EFAULT;
goto out_unlock;
}
@@ -2356,8 +2357,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
return -EPERM;
if (*len < get_arglen[GET_CMDID(cmd)]) {
- IP_VS_ERR("get_ctl: len %u < %u\n",
- *len, get_arglen[GET_CMDID(cmd)]);
+ pr_err("get_ctl: len %u < %u\n",
+ *len, get_arglen[GET_CMDID(cmd)]);
return -EINVAL;
}
@@ -2402,7 +2403,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
size = sizeof(*get) +
sizeof(struct ip_vs_service_entry) * get->num_services;
if (*len != size) {
- IP_VS_ERR("length: %u != %u\n", *len, size);
+ pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
@@ -2442,7 +2443,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
size = sizeof(*get) +
sizeof(struct ip_vs_dest_entry) * get->num_dests;
if (*len != size) {
- IP_VS_ERR("length: %u != %u\n", *len, size);
+ pr_err("length: %u != %u\n", *len, size);
ret = -EINVAL;
goto out;
}
@@ -3170,7 +3171,7 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
else if (cmd == IPVS_CMD_GET_CONFIG)
reply_cmd = IPVS_CMD_SET_CONFIG;
else {
- IP_VS_ERR("unknown Generic Netlink command\n");
+ pr_err("unknown Generic Netlink command\n");
return -EINVAL;
}
@@ -3231,11 +3232,11 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
}
genlmsg_end(msg, reply);
- ret = genlmsg_unicast(msg, info->snd_pid);
+ ret = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
- IP_VS_ERR("not enough space in Netlink message\n");
+ pr_err("not enough space in Netlink message\n");
ret = -EMSGSIZE;
out_err:
@@ -3366,13 +3367,13 @@ int __init ip_vs_control_init(void)
ret = nf_register_sockopt(&ip_vs_sockopts);
if (ret) {
- IP_VS_ERR("cannot register sockopt.\n");
+ pr_err("cannot register sockopt.\n");
return ret;
}
ret = ip_vs_genl_register();
if (ret) {
- IP_VS_ERR("cannot register Generic Netlink interface.\n");
+ pr_err("cannot register Generic Netlink interface.\n");
nf_unregister_sockopt(&ip_vs_sockopts);
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index a9dac74bb13..fe3e18834b9 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -35,6 +35,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -147,7 +150,7 @@ static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE,
GFP_ATOMIC);
if (tbl == NULL) {
- IP_VS_ERR("ip_vs_dh_init_svc(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return -ENOMEM;
}
svc->sched_data = tbl;
@@ -214,7 +217,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
- IP_VS_DBG(6, "ip_vs_dh_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
tbl = (struct ip_vs_dh_bucket *)svc->sched_data;
dest = ip_vs_dh_get(svc->af, tbl, &iph.daddr);
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 2eb2860dabb..702b53ca937 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -11,6 +11,10 @@
* Changes:
*
*/
+
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 428edbf481c..33e2c799cba 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -22,6 +22,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
@@ -382,8 +385,8 @@ static int __init ip_vs_ftp_init(void)
ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
if (ret)
break;
- IP_VS_INFO("%s: loaded support on port[%d] = %d\n",
- app->name, i, ports[i]);
+ pr_info("%s: loaded support on port[%d] = %d\n",
+ app->name, i, ports[i]);
}
if (ret)
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 3eb5e2660c4..c1757f3620c 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -39,6 +39,9 @@
* me to write this module.
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -199,7 +202,7 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en) {
- IP_VS_ERR("ip_vs_lblc_new(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return NULL;
}
@@ -332,7 +335,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
*/
tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
if (tbl == NULL) {
- IP_VS_ERR("ip_vs_lblc_init_svc(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return -ENOMEM;
}
svc->sched_data = tbl;
@@ -477,7 +480,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
- IP_VS_DBG(6, "ip_vs_lblc_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/* First look in our cache */
read_lock(&svc->sched_lock);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index c04ce56c7f0..715b57f9540 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -37,6 +37,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -108,7 +111,7 @@ ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
e = kmalloc(sizeof(*e), GFP_ATOMIC);
if (e == NULL) {
- IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return NULL;
}
@@ -202,8 +205,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
}
}
- IP_VS_DBG_BUF(6, "ip_vs_dest_set_min: server %s:%d "
+ IP_VS_DBG_BUF(6, "%s(): server %s:%d "
"activeconns %d refcnt %d weight %d overhead %d\n",
+ __func__,
IP_VS_DBG_ADDR(least->af, &least->addr),
ntohs(least->port),
atomic_read(&least->activeconns),
@@ -249,8 +253,9 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
}
}
- IP_VS_DBG_BUF(6, "ip_vs_dest_set_max: server %s:%d "
+ IP_VS_DBG_BUF(6, "%s(): server %s:%d "
"activeconns %d refcnt %d weight %d overhead %d\n",
+ __func__,
IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
atomic_read(&most->activeconns),
atomic_read(&most->refcnt),
@@ -374,7 +379,7 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
if (!en) {
en = kmalloc(sizeof(*en), GFP_ATOMIC);
if (!en) {
- IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return NULL;
}
@@ -508,7 +513,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
*/
tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
if (tbl == NULL) {
- IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return -ENOMEM;
}
svc->sched_data = tbl;
@@ -654,7 +659,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
- IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/* First look in our cache */
read_lock(&svc->sched_lock);
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index d0dadc8a65f..4f69db1fac5 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -14,6 +14,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -44,7 +47,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
struct ip_vs_dest *dest, *least = NULL;
unsigned int loh = 0, doh;
- IP_VS_DBG(6, "ip_vs_lc_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* Simply select the server with the least number of
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index 694952db502..c413e183082 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -31,6 +31,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -57,7 +60,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
struct ip_vs_dest *dest, *least = NULL;
unsigned int loh = 0, doh;
- IP_VS_DBG(6, "ip_vs_nq_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* We calculate the load of each dest server as follows:
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index a01520e3d6b..3e767167454 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -13,6 +13,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -124,7 +127,8 @@ ip_vs_create_timeout_table(int *table, int size)
* Set timeout value for state specified by name
*/
int
-ip_vs_set_state_timeout(int *table, int num, char **names, char *name, int to)
+ip_vs_set_state_timeout(int *table, int num, const char *const *names,
+ const char *name, int to)
{
int i;
@@ -181,7 +185,7 @@ ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp,
&ih->daddr, ntohs(pptr[1]));
}
- printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+ pr_debug("%s: %s\n", msg, buf);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -215,7 +219,7 @@ ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp,
&ih->daddr, ntohs(pptr[1]));
}
- printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+ pr_debug("%s: %s\n", msg, buf);
}
#endif
@@ -259,7 +263,7 @@ int __init ip_vs_protocol_init(void)
#ifdef CONFIG_IP_VS_PROTO_ESP
REGISTER_PROTOCOL(&ip_vs_protocol_esp);
#endif
- IP_VS_INFO("Registered protocols (%s)\n", &protocols[2]);
+ pr_info("Registered protocols (%s)\n", &protocols[2]);
return 0;
}
diff --git a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
index 79f56c1e7c1..c30b43c36cd 100644
--- a/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_ah_esp.c
@@ -10,6 +10,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/module.h>
@@ -138,7 +141,7 @@ ah_esp_debug_packet_v4(struct ip_vs_protocol *pp, const struct sk_buff *skb,
sprintf(buf, "%s %pI4->%pI4",
pp->name, &ih->saddr, &ih->daddr);
- printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+ pr_debug("%s: %s\n", msg, buf);
}
#ifdef CONFIG_IP_VS_IPV6
@@ -156,7 +159,7 @@ ah_esp_debug_packet_v6(struct ip_vs_protocol *pp, const struct sk_buff *skb,
sprintf(buf, "%s %pI6->%pI6",
pp->name, &ih->saddr, &ih->daddr);
- printk(KERN_DEBUG "IPVS: %s: %s\n", msg, buf);
+ pr_debug("%s: %s\n", msg, buf);
}
#endif
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 8cba4180285..91d28e07374 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -13,6 +13,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/tcp.h> /* for tcphdr */
@@ -374,7 +377,7 @@ static int tcp_timeouts[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_LAST] = 2*HZ,
};
-static char * tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
+static const char *const tcp_state_name_table[IP_VS_TCP_S_LAST+1] = {
[IP_VS_TCP_S_NONE] = "NONE",
[IP_VS_TCP_S_ESTABLISHED] = "ESTABLISHED",
[IP_VS_TCP_S_SYN_SENT] = "SYN_SENT",
@@ -661,7 +664,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
break;
spin_unlock(&tcp_app_lock);
- IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
+ IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index d2930a71084..e7a6885e016 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -13,6 +13,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
@@ -442,7 +445,7 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
break;
spin_unlock(&udp_app_lock);
- IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
+ IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
"%s:%u to app %s on port %u\n",
__func__,
IP_VS_DBG_ADDR(cp->af, &cp->caddr),
@@ -469,7 +472,7 @@ static int udp_timeouts[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_LAST] = 2*HZ,
};
-static char * udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
+static const char *const udp_state_name_table[IP_VS_UDP_S_LAST+1] = {
[IP_VS_UDP_S_NORMAL] = "UDP",
[IP_VS_UDP_S_LAST] = "BUG!",
};
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index 2d16ab7f8c1..e210f37d8ea 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -19,6 +19,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -48,7 +51,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
struct list_head *p, *q;
struct ip_vs_dest *dest;
- IP_VS_DBG(6, "ip_vs_rr_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
write_lock(&svc->sched_lock);
p = (struct list_head *)svc->sched_data;
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index a46ad9e3501..bbc1ac79595 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -17,6 +17,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -44,11 +47,11 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc,
int ret;
if (svc == NULL) {
- IP_VS_ERR("ip_vs_bind_scheduler(): svc arg NULL\n");
+ pr_err("%s(): svc arg NULL\n", __func__);
return -EINVAL;
}
if (scheduler == NULL) {
- IP_VS_ERR("ip_vs_bind_scheduler(): scheduler arg NULL\n");
+ pr_err("%s(): scheduler arg NULL\n", __func__);
return -EINVAL;
}
@@ -57,7 +60,7 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc,
if (scheduler->init_service) {
ret = scheduler->init_service(svc);
if (ret) {
- IP_VS_ERR("ip_vs_bind_scheduler(): init error\n");
+ pr_err("%s(): init error\n", __func__);
return ret;
}
}
@@ -74,19 +77,19 @@ int ip_vs_unbind_scheduler(struct ip_vs_service *svc)
struct ip_vs_scheduler *sched;
if (svc == NULL) {
- IP_VS_ERR("ip_vs_unbind_scheduler(): svc arg NULL\n");
+ pr_err("%s(): svc arg NULL\n", __func__);
return -EINVAL;
}
sched = svc->scheduler;
if (sched == NULL) {
- IP_VS_ERR("ip_vs_unbind_scheduler(): svc isn't bound\n");
+ pr_err("%s(): svc isn't bound\n", __func__);
return -EINVAL;
}
if (sched->done_service) {
if (sched->done_service(svc) != 0) {
- IP_VS_ERR("ip_vs_unbind_scheduler(): done error\n");
+ pr_err("%s(): done error\n", __func__);
return -EINVAL;
}
}
@@ -103,8 +106,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
{
struct ip_vs_scheduler *sched;
- IP_VS_DBG(2, "ip_vs_sched_getbyname(): sched_name \"%s\"\n",
- sched_name);
+ IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
read_lock_bh(&__ip_vs_sched_lock);
@@ -170,12 +172,12 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
struct ip_vs_scheduler *sched;
if (!scheduler) {
- IP_VS_ERR("register_ip_vs_scheduler(): NULL arg\n");
+ pr_err("%s(): NULL arg\n", __func__);
return -EINVAL;
}
if (!scheduler->name) {
- IP_VS_ERR("register_ip_vs_scheduler(): NULL scheduler_name\n");
+ pr_err("%s(): NULL scheduler_name\n", __func__);
return -EINVAL;
}
@@ -187,8 +189,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
if (!list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec();
- IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
- "already linked\n", scheduler->name);
+ pr_err("%s(): [%s] scheduler already linked\n",
+ __func__, scheduler->name);
return -EINVAL;
}
@@ -200,9 +202,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
if (strcmp(scheduler->name, sched->name) == 0) {
write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec();
- IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
- "already existed in the system\n",
- scheduler->name);
+ pr_err("%s(): [%s] scheduler already existed "
+ "in the system\n", __func__, scheduler->name);
return -EINVAL;
}
}
@@ -212,7 +213,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
list_add(&scheduler->n_list, &ip_vs_schedulers);
write_unlock_bh(&__ip_vs_sched_lock);
- IP_VS_INFO("[%s] scheduler registered.\n", scheduler->name);
+ pr_info("[%s] scheduler registered.\n", scheduler->name);
return 0;
}
@@ -224,15 +225,15 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
{
if (!scheduler) {
- IP_VS_ERR( "unregister_ip_vs_scheduler(): NULL arg\n");
+ pr_err("%s(): NULL arg\n", __func__);
return -EINVAL;
}
write_lock_bh(&__ip_vs_sched_lock);
if (list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
- IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
- "is not in the list. failed\n", scheduler->name);
+ pr_err("%s(): [%s] scheduler is not in the list. failed\n",
+ __func__, scheduler->name);
return -EINVAL;
}
@@ -245,7 +246,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
/* decrease the module use count */
ip_vs_use_count_dec();
- IP_VS_INFO("[%s] scheduler unregistered.\n", scheduler->name);
+ pr_info("[%s] scheduler unregistered.\n", scheduler->name);
return 0;
}
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 20e4657d2f3..1ab75a9dc40 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -35,6 +35,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -61,7 +64,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
struct ip_vs_dest *dest, *least;
unsigned int loh, doh;
- IP_VS_DBG(6, "ip_vs_sed_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* We calculate the load of each dest server as follows:
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 75709ebeb63..8e6cfd36e6f 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -32,6 +32,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/ip.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -144,7 +147,7 @@ static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE,
GFP_ATOMIC);
if (tbl == NULL) {
- IP_VS_ERR("ip_vs_sh_init_svc(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return -ENOMEM;
}
svc->sched_data = tbl;
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 5c48378a852..e177f0dc208 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -17,6 +17,9 @@
* Justin Ossevoort : Fix endian problem on sync message size.
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/inetdevice.h>
@@ -243,7 +246,7 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
if (!curr_sb) {
if (!(curr_sb=ip_vs_sync_buff_create())) {
spin_unlock(&curr_sb_lock);
- IP_VS_ERR("ip_vs_sync_buff_create failed.\n");
+ pr_err("ip_vs_sync_buff_create failed.\n");
return;
}
}
@@ -409,7 +412,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
if (dest)
atomic_dec(&dest->refcnt);
if (!cp) {
- IP_VS_ERR("ip_vs_conn_new failed\n");
+ pr_err("ip_vs_conn_new failed\n");
return;
}
} else if (!cp->dest) {
@@ -577,8 +580,8 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
if (!addr)
- IP_VS_ERR("You probably need to specify IP address on "
- "multicast interface.\n");
+ pr_err("You probably need to specify IP address on "
+ "multicast interface.\n");
IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
ifname, &addr);
@@ -602,13 +605,13 @@ static struct socket * make_send_sock(void)
/* First create a socket */
result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (result < 0) {
- IP_VS_ERR("Error during creation of socket; terminating\n");
+ pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
result = set_mcast_if(sock->sk, ip_vs_master_mcast_ifn);
if (result < 0) {
- IP_VS_ERR("Error setting outbound mcast interface\n");
+ pr_err("Error setting outbound mcast interface\n");
goto error;
}
@@ -617,14 +620,14 @@ static struct socket * make_send_sock(void)
result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
if (result < 0) {
- IP_VS_ERR("Error binding address of the mcast interface\n");
+ pr_err("Error binding address of the mcast interface\n");
goto error;
}
result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr,
sizeof(struct sockaddr), 0);
if (result < 0) {
- IP_VS_ERR("Error connecting to the multicast addr\n");
+ pr_err("Error connecting to the multicast addr\n");
goto error;
}
@@ -647,7 +650,7 @@ static struct socket * make_receive_sock(void)
/* First create a socket */
result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (result < 0) {
- IP_VS_ERR("Error during creation of socket; terminating\n");
+ pr_err("Error during creation of socket; terminating\n");
return ERR_PTR(result);
}
@@ -657,7 +660,7 @@ static struct socket * make_receive_sock(void)
result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
sizeof(struct sockaddr));
if (result < 0) {
- IP_VS_ERR("Error binding to the multicast addr\n");
+ pr_err("Error binding to the multicast addr\n");
goto error;
}
@@ -666,7 +669,7 @@ static struct socket * make_receive_sock(void)
(struct in_addr *) &mcast_addr.sin_addr,
ip_vs_backup_mcast_ifn);
if (result < 0) {
- IP_VS_ERR("Error joining to the multicast group\n");
+ pr_err("Error joining to the multicast group\n");
goto error;
}
@@ -706,7 +709,7 @@ ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg)
msg->size = htons(msg->size);
if (ip_vs_send_async(sock, (char *)msg, msize) != msize)
- IP_VS_ERR("ip_vs_send_async error\n");
+ pr_err("ip_vs_send_async error\n");
}
static int
@@ -737,9 +740,9 @@ static int sync_thread_master(void *data)
struct ip_vs_sync_thread_data *tinfo = data;
struct ip_vs_sync_buff *sb;
- IP_VS_INFO("sync thread started: state = MASTER, mcast_ifn = %s, "
- "syncid = %d\n",
- ip_vs_master_mcast_ifn, ip_vs_master_syncid);
+ pr_info("sync thread started: state = MASTER, mcast_ifn = %s, "
+ "syncid = %d\n",
+ ip_vs_master_mcast_ifn, ip_vs_master_syncid);
while (!kthread_should_stop()) {
while ((sb = sb_dequeue())) {
@@ -780,9 +783,9 @@ static int sync_thread_backup(void *data)
struct ip_vs_sync_thread_data *tinfo = data;
int len;
- IP_VS_INFO("sync thread started: state = BACKUP, mcast_ifn = %s, "
- "syncid = %d\n",
- ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
+ pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, "
+ "syncid = %d\n",
+ ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
while (!kthread_should_stop()) {
wait_event_interruptible(*tinfo->sock->sk->sk_sleep,
@@ -794,7 +797,7 @@ static int sync_thread_backup(void *data)
len = ip_vs_receive(tinfo->sock, tinfo->buf,
sync_recv_mesg_maxlen);
if (len <= 0) {
- IP_VS_ERR("receiving message error\n");
+ pr_err("receiving message error\n");
break;
}
@@ -824,7 +827,7 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
int (*threadfn)(void *data);
int result = -ENOMEM;
- IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current));
+ IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn));
@@ -901,14 +904,14 @@ out:
int stop_sync_thread(int state)
{
- IP_VS_DBG(7, "%s: pid %d\n", __func__, task_pid_nr(current));
+ IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current));
if (state == IP_VS_STATE_MASTER) {
if (!sync_master_thread)
return -ESRCH;
- IP_VS_INFO("stopping master sync thread %d ...\n",
- task_pid_nr(sync_master_thread));
+ pr_info("stopping master sync thread %d ...\n",
+ task_pid_nr(sync_master_thread));
/*
* The lock synchronizes with sb_queue_tail(), so that we don't
@@ -925,8 +928,8 @@ int stop_sync_thread(int state)
if (!sync_backup_thread)
return -ESRCH;
- IP_VS_INFO("stopping backup sync thread %d ...\n",
- task_pid_nr(sync_backup_thread));
+ pr_info("stopping backup sync thread %d ...\n",
+ task_pid_nr(sync_backup_thread));
ip_vs_sync_state &= ~IP_VS_STATE_BACKUP;
kthread_stop(sync_backup_thread);
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index 8e942565b47..bbddfdb10db 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -19,6 +19,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index f7d74ef1ecf..6182e8ea0be 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -18,6 +18,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/net.h>
@@ -74,11 +77,12 @@ static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc)
static int ip_vs_wrr_max_weight(struct ip_vs_service *svc)
{
struct ip_vs_dest *dest;
- int weight = 0;
+ int new_weight, weight = 0;
list_for_each_entry(dest, &svc->destinations, n_list) {
- if (atomic_read(&dest->weight) > weight)
- weight = atomic_read(&dest->weight);
+ new_weight = atomic_read(&dest->weight);
+ if (new_weight > weight)
+ weight = new_weight;
}
return weight;
@@ -94,7 +98,7 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
*/
mark = kmalloc(sizeof(struct ip_vs_wrr_mark), GFP_ATOMIC);
if (mark == NULL) {
- IP_VS_ERR("ip_vs_wrr_init_svc(): no memory\n");
+ pr_err("%s(): no memory\n", __func__);
return -ENOMEM;
}
mark->cl = &svc->destinations;
@@ -141,7 +145,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
struct ip_vs_wrr_mark *mark = svc->sched_data;
struct list_head *p;
- IP_VS_DBG(6, "ip_vs_wrr_schedule(): Scheduling...\n");
+ IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
/*
* This loop will always terminate, because mark->cw in (0, max_weight]
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 5874657af7f..30b3189bd29 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -13,6 +13,9 @@
*
*/
+#define KMSG_COMPONENT "IPVS"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/kernel.h>
#include <linux/tcp.h> /* for tcphdr */
#include <net/ip.h>
@@ -235,8 +238,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
if (ip_route_output_key(&init_net, &rt, &fl)) {
- IP_VS_DBG_RL("ip_vs_bypass_xmit(): ip_route_output error, dest: %pI4\n",
- &iph->daddr);
+ IP_VS_DBG_RL("%s(): ip_route_output error, dest: %pI4\n",
+ __func__, &iph->daddr);
goto tx_error_icmp;
}
@@ -245,7 +248,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
- IP_VS_DBG_RL("ip_vs_bypass_xmit(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -299,8 +302,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
if (!rt) {
- IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): ip6_route_output error, dest: %pI6\n",
- &iph->daddr);
+ IP_VS_DBG_RL("%s(): ip6_route_output error, dest: %pI6\n",
+ __func__, &iph->daddr);
goto tx_error_icmp;
}
@@ -309,7 +312,7 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (skb->len > mtu) {
dst_release(&rt->u.dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- IP_VS_DBG_RL("ip_vs_bypass_xmit_v6(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -536,9 +539,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
if (skb->protocol != htons(ETH_P_IP)) {
- IP_VS_DBG_RL("ip_vs_tunnel_xmit(): protocol error, "
+ IP_VS_DBG_RL("%s(): protocol error, "
"ETH_P_IP: %d, skb protocol: %d\n",
- htons(ETH_P_IP), skb->protocol);
+ __func__, htons(ETH_P_IP), skb->protocol);
goto tx_error;
}
@@ -550,7 +553,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr);
if (mtu < 68) {
ip_rt_put(rt);
- IP_VS_DBG_RL("ip_vs_tunnel_xmit(): mtu less than 68\n");
+ IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
goto tx_error;
}
if (skb_dst(skb))
@@ -562,7 +565,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
&& mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
- IP_VS_DBG_RL("ip_vs_tunnel_xmit(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -578,7 +581,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!new_skb) {
ip_rt_put(rt);
kfree_skb(skb);
- IP_VS_ERR_RL("ip_vs_tunnel_xmit(): no memory\n");
+ IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
}
kfree_skb(skb);
@@ -646,9 +649,9 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
if (skb->protocol != htons(ETH_P_IPV6)) {
- IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): protocol error, "
+ IP_VS_DBG_RL("%s(): protocol error, "
"ETH_P_IPV6: %d, skb protocol: %d\n",
- htons(ETH_P_IPV6), skb->protocol);
+ __func__, htons(ETH_P_IPV6), skb->protocol);
goto tx_error;
}
@@ -662,7 +665,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
/* TODO IPv6: do we need this check in IPv6? */
if (mtu < 1280) {
dst_release(&rt->u.dst);
- IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): mtu less than 1280\n");
+ IP_VS_DBG_RL("%s(): mtu less than 1280\n", __func__);
goto tx_error;
}
if (skb_dst(skb))
@@ -671,7 +674,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
dst_release(&rt->u.dst);
- IP_VS_DBG_RL("ip_vs_tunnel_xmit_v6(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -687,7 +690,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (!new_skb) {
dst_release(&rt->u.dst);
kfree_skb(skb);
- IP_VS_ERR_RL("ip_vs_tunnel_xmit_v6(): no memory\n");
+ IP_VS_ERR_RL("%s(): no memory\n", __func__);
return NF_STOLEN;
}
kfree_skb(skb);
@@ -760,7 +763,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
- IP_VS_DBG_RL("ip_vs_dr_xmit(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -813,7 +816,7 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (skb->len > mtu) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
dst_release(&rt->u.dst);
- IP_VS_DBG_RL("ip_vs_dr_xmit_v6(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -888,7 +891,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
- IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -963,7 +966,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
if (skb->len > mtu) {
dst_release(&rt->u.dst);
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev);
- IP_VS_DBG_RL("ip_vs_in_icmp(): frag needed\n");
+ IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index b5869b9574b..7c9ec3dee96 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -47,7 +47,7 @@
int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
enum nf_nat_manip_type manip,
- struct nlattr *attr) __read_mostly;
+ const struct nlattr *attr) __read_mostly;
EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);
DEFINE_SPINLOCK(nf_conntrack_lock);
@@ -1089,14 +1089,14 @@ void nf_conntrack_flush_report(struct net *net, u32 pid, int report)
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
-static void nf_ct_release_dying_list(void)
+static void nf_ct_release_dying_list(struct net *net)
{
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct hlist_nulls_node *n;
spin_lock_bh(&nf_conntrack_lock);
- hlist_nulls_for_each_entry(h, n, &init_net.ct.dying, hnnode) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.dying, hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
/* never fails to remove them, no listeners at this point */
nf_ct_kill(ct);
@@ -1115,7 +1115,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
{
i_see_dead_people:
nf_ct_iterate_cleanup(net, kill_all, NULL);
- nf_ct_release_dying_list();
+ nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0) {
schedule();
goto i_see_dead_people;
@@ -1245,9 +1245,9 @@ static int nf_conntrack_init_init_net(void)
* machine has 512 buckets. >= 1GB machines have 16384 buckets. */
if (!nf_conntrack_htable_size) {
nf_conntrack_htable_size
- = (((num_physpages << PAGE_SHIFT) / 16384)
+ = (((totalram_pages << PAGE_SHIFT) / 16384)
/ sizeof(struct hlist_head));
- if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
nf_conntrack_htable_size = 16384;
if (nf_conntrack_htable_size < 32)
nf_conntrack_htable_size = 32;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 49479d19457..59d8064eb52 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -704,7 +704,8 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
}
static int
-ctnetlink_parse_tuple(struct nlattr *cda[], struct nf_conntrack_tuple *tuple,
+ctnetlink_parse_tuple(const struct nlattr * const cda[],
+ struct nf_conntrack_tuple *tuple,
enum ctattr_tuple type, u_int8_t l3num)
{
struct nlattr *tb[CTA_TUPLE_MAX+1];
@@ -740,7 +741,7 @@ ctnetlink_parse_tuple(struct nlattr *cda[], struct nf_conntrack_tuple *tuple,
}
static inline int
-ctnetlink_parse_help(struct nlattr *attr, char **helper_name)
+ctnetlink_parse_help(const struct nlattr *attr, char **helper_name)
{
struct nlattr *tb[CTA_HELP_MAX+1];
@@ -764,7 +765,8 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
static int
ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *cda[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
{
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
@@ -823,7 +825,8 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
static int
ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *cda[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
{
struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
@@ -884,7 +887,7 @@ out:
static int
ctnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
- struct nlattr *attr)
+ const struct nlattr *attr)
{
typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
@@ -914,7 +917,7 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
#endif
static int
-ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
{
unsigned long d;
unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
@@ -940,7 +943,7 @@ ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
}
static int
-ctnetlink_change_nat(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
{
#ifdef CONFIG_NF_NAT_NEEDED
int ret;
@@ -966,7 +969,7 @@ ctnetlink_change_nat(struct nf_conn *ct, struct nlattr *cda[])
}
static inline int
-ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_helper(struct nf_conn *ct, const struct nlattr * const cda[])
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
@@ -1028,7 +1031,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
}
static inline int
-ctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_timeout(struct nf_conn *ct, const struct nlattr * const cda[])
{
u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
@@ -1042,9 +1045,10 @@ ctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[])
}
static inline int
-ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[])
{
- struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO];
+ const struct nlattr *attr = cda[CTA_PROTOINFO];
+ struct nlattr *tb[CTA_PROTOINFO_MAX+1];
struct nf_conntrack_l4proto *l4proto;
int err = 0;
@@ -1061,7 +1065,7 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
#ifdef CONFIG_NF_NAT_NEEDED
static inline int
-change_nat_seq_adj(struct nf_nat_seq *natseq, struct nlattr *attr)
+change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
{
struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
@@ -1089,7 +1093,8 @@ change_nat_seq_adj(struct nf_nat_seq *natseq, struct nlattr *attr)
}
static int
-ctnetlink_change_nat_seq_adj(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
+ const struct nlattr * const cda[])
{
int ret = 0;
struct nf_conn_nat *nat = nfct_nat(ct);
@@ -1120,7 +1125,8 @@ ctnetlink_change_nat_seq_adj(struct nf_conn *ct, struct nlattr *cda[])
#endif
static int
-ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
+ctnetlink_change_conntrack(struct nf_conn *ct,
+ const struct nlattr * const cda[])
{
int err;
@@ -1169,7 +1175,7 @@ ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
}
static struct nf_conn *
-ctnetlink_create_conntrack(struct nlattr *cda[],
+ctnetlink_create_conntrack(const struct nlattr * const cda[],
struct nf_conntrack_tuple *otuple,
struct nf_conntrack_tuple *rtuple,
u8 u3)
@@ -1304,7 +1310,8 @@ err1:
static int
ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *cda[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
{
struct nf_conntrack_tuple otuple, rtuple;
struct nf_conntrack_tuple_hash *h = NULL;
@@ -1629,7 +1636,8 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
static int
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *cda[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
@@ -1689,7 +1697,8 @@ out:
static int
ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *cda[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
{
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple tuple;
@@ -1767,13 +1776,15 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
return 0;
}
static int
-ctnetlink_change_expect(struct nf_conntrack_expect *x, struct nlattr *cda[])
+ctnetlink_change_expect(struct nf_conntrack_expect *x,
+ const struct nlattr * const cda[])
{
return -EOPNOTSUPP;
}
static int
-ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3, u32 pid, int report)
+ctnetlink_create_expect(const struct nlattr * const cda[], u_int8_t u3,
+ u32 pid, int report)
{
struct nf_conntrack_tuple tuple, mask, master_tuple;
struct nf_conntrack_tuple_hash *h = NULL;
@@ -1831,7 +1842,8 @@ out:
static int
ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *cda[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_expect *exp;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 92761a98837..eedc0c1ac7a 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -170,7 +170,7 @@ replay:
if (err < 0)
return err;
- err = nc->call(nfnl, skb, nlh, cda);
+ err = nc->call(nfnl, skb, nlh, (const struct nlattr **)cda);
if (err == -EAGAIN)
goto replay;
return err;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 66a6dd5c519..f900dc3194a 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -694,7 +694,8 @@ static struct notifier_block nfulnl_rtnl_notifier = {
static int
nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *nfqa[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nfqa[])
{
return -ENOTSUPP;
}
@@ -716,7 +717,8 @@ static const struct nla_policy nfula_cfg_policy[NFULA_CFG_MAX+1] = {
static int
nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *nfula[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nfula[])
{
struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
u_int16_t group_num = ntohs(nfmsg->res_id);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 71daa0934b6..7a9dec9fb82 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -608,7 +608,8 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
static int
nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *nfqa[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nfqa[])
{
struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
u_int16_t queue_num = ntohs(nfmsg->res_id);
@@ -670,7 +671,8 @@ err_out_unlock:
static int
nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *nfqa[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nfqa[])
{
return -ENOTSUPP;
}
@@ -687,7 +689,8 @@ static const struct nf_queue_handler nfqh = {
static int
nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *nfqa[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const nfqa[])
{
struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
u_int16_t queue_num = ntohs(nfmsg->res_id);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 025d1a0af78..f01955cce31 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -617,7 +617,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
int cpu;
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
- if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
+ if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
return NULL;
newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
@@ -736,16 +736,17 @@ xt_replace_table(struct xt_table *table,
}
EXPORT_SYMBOL_GPL(xt_replace_table);
-struct xt_table *xt_register_table(struct net *net, struct xt_table *table,
+struct xt_table *xt_register_table(struct net *net,
+ const struct xt_table *input_table,
struct xt_table_info *bootstrap,
struct xt_table_info *newinfo)
{
int ret;
struct xt_table_info *private;
- struct xt_table *t;
+ struct xt_table *t, *table;
/* Don't add one object to multiple lists. */
- table = kmemdup(table, sizeof(struct xt_table), GFP_KERNEL);
+ table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
if (!table) {
ret = -ENOMEM;
goto out;
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index d6e5ab46327..593457068ae 100644
--- a/net/netfilter/xt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -36,45 +36,6 @@ MODULE_ALIAS("ip6t_CONNMARK");
#include <net/netfilter/nf_conntrack_ecache.h>
static unsigned int
-connmark_tg_v0(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_connmark_target_info *markinfo = par->targinfo;
- struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- u_int32_t diff;
- u_int32_t mark;
- u_int32_t newmark;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (ct) {
- switch(markinfo->mode) {
- case XT_CONNMARK_SET:
- newmark = (ct->mark & ~markinfo->mask) | markinfo->mark;
- if (newmark != ct->mark) {
- ct->mark = newmark;
- nf_conntrack_event_cache(IPCT_MARK, ct);
- }
- break;
- case XT_CONNMARK_SAVE:
- newmark = (ct->mark & ~markinfo->mask) |
- (skb->mark & markinfo->mask);
- if (ct->mark != newmark) {
- ct->mark = newmark;
- nf_conntrack_event_cache(IPCT_MARK, ct);
- }
- break;
- case XT_CONNMARK_RESTORE:
- mark = skb->mark;
- diff = (ct->mark ^ mark) & markinfo->mask;
- skb->mark = mark ^ diff;
- break;
- }
- }
-
- return XT_CONTINUE;
-}
-
-static unsigned int
connmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct xt_connmark_tginfo1 *info = par->targinfo;
@@ -112,30 +73,6 @@ connmark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool connmark_tg_check_v0(const struct xt_tgchk_param *par)
-{
- const struct xt_connmark_target_info *matchinfo = par->targinfo;
-
- if (matchinfo->mode == XT_CONNMARK_RESTORE) {
- if (strcmp(par->table, "mangle") != 0) {
- printk(KERN_WARNING "CONNMARK: restore can only be "
- "called from \"mangle\" table, not \"%s\"\n",
- par->table);
- return false;
- }
- }
- if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) {
- printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n");
- return false;
- }
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
-}
-
static bool connmark_tg_check(const struct xt_tgchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
@@ -151,74 +88,25 @@ static void connmark_tg_destroy(const struct xt_tgdtor_param *par)
nf_ct_l3proto_module_put(par->family);
}
-#ifdef CONFIG_COMPAT
-struct compat_xt_connmark_target_info {
- compat_ulong_t mark, mask;
- u_int8_t mode;
- u_int8_t __pad1;
- u_int16_t __pad2;
-};
-
-static void connmark_tg_compat_from_user_v0(void *dst, void *src)
-{
- const struct compat_xt_connmark_target_info *cm = src;
- struct xt_connmark_target_info m = {
- .mark = cm->mark,
- .mask = cm->mask,
- .mode = cm->mode,
- };
- memcpy(dst, &m, sizeof(m));
-}
-
-static int connmark_tg_compat_to_user_v0(void __user *dst, void *src)
-{
- const struct xt_connmark_target_info *m = src;
- struct compat_xt_connmark_target_info cm = {
- .mark = m->mark,
- .mask = m->mask,
- .mode = m->mode,
- };
- return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct xt_target connmark_tg_reg[] __read_mostly = {
- {
- .name = "CONNMARK",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connmark_tg_check_v0,
- .destroy = connmark_tg_destroy,
- .target = connmark_tg_v0,
- .targetsize = sizeof(struct xt_connmark_target_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_connmark_target_info),
- .compat_from_user = connmark_tg_compat_from_user_v0,
- .compat_to_user = connmark_tg_compat_to_user_v0,
-#endif
- .me = THIS_MODULE
- },
- {
- .name = "CONNMARK",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connmark_tg_check,
- .target = connmark_tg,
- .targetsize = sizeof(struct xt_connmark_tginfo1),
- .destroy = connmark_tg_destroy,
- .me = THIS_MODULE,
- },
+static struct xt_target connmark_tg_reg __read_mostly = {
+ .name = "CONNMARK",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg,
+ .targetsize = sizeof(struct xt_connmark_tginfo1),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
};
static int __init connmark_tg_init(void)
{
- return xt_register_targets(connmark_tg_reg,
- ARRAY_SIZE(connmark_tg_reg));
+ return xt_register_target(&connmark_tg_reg);
}
static void __exit connmark_tg_exit(void)
{
- xt_unregister_targets(connmark_tg_reg, ARRAY_SIZE(connmark_tg_reg));
+ xt_unregister_target(&connmark_tg_reg);
}
module_init(connmark_tg_init);
diff --git a/net/netfilter/xt_DSCP.c b/net/netfilter/xt_DSCP.c
index 6a347e768f8..74ce8926005 100644
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -18,7 +18,6 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_DSCP.h>
-#include <linux/netfilter_ipv4/ipt_TOS.h>
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
@@ -73,41 +72,6 @@ static bool dscp_tg_check(const struct xt_tgchk_param *par)
}
static unsigned int
-tos_tg_v0(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct ipt_tos_target_info *info = par->targinfo;
- struct iphdr *iph = ip_hdr(skb);
- u_int8_t oldtos;
-
- if ((iph->tos & IPTOS_TOS_MASK) != info->tos) {
- if (!skb_make_writable(skb, sizeof(struct iphdr)))
- return NF_DROP;
-
- iph = ip_hdr(skb);
- oldtos = iph->tos;
- iph->tos = (iph->tos & IPTOS_PREC_MASK) | info->tos;
- csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
- }
-
- return XT_CONTINUE;
-}
-
-static bool tos_tg_check_v0(const struct xt_tgchk_param *par)
-{
- const struct ipt_tos_target_info *info = par->targinfo;
- const uint8_t tos = info->tos;
-
- if (tos != IPTOS_LOWDELAY && tos != IPTOS_THROUGHPUT &&
- tos != IPTOS_RELIABILITY && tos != IPTOS_MINCOST &&
- tos != IPTOS_NORMALSVC) {
- printk(KERN_WARNING "TOS: bad tos value %#x\n", tos);
- return false;
- }
-
- return true;
-}
-
-static unsigned int
tos_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct xt_tos_target_info *info = par->targinfo;
@@ -168,16 +132,6 @@ static struct xt_target dscp_tg_reg[] __read_mostly = {
},
{
.name = "TOS",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .table = "mangle",
- .target = tos_tg_v0,
- .targetsize = sizeof(struct ipt_tos_target_info),
- .checkentry = tos_tg_check_v0,
- .me = THIS_MODULE,
- },
- {
- .name = "TOS",
.revision = 1,
.family = NFPROTO_IPV4,
.table = "mangle",
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
index 67574bcfb8a..225f8d11e17 100644
--- a/net/netfilter/xt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -25,39 +25,6 @@ MODULE_ALIAS("ipt_MARK");
MODULE_ALIAS("ip6t_MARK");
static unsigned int
-mark_tg_v0(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_mark_target_info *markinfo = par->targinfo;
-
- skb->mark = markinfo->mark;
- return XT_CONTINUE;
-}
-
-static unsigned int
-mark_tg_v1(struct sk_buff *skb, const struct xt_target_param *par)
-{
- const struct xt_mark_target_info_v1 *markinfo = par->targinfo;
- int mark = 0;
-
- switch (markinfo->mode) {
- case XT_MARK_SET:
- mark = markinfo->mark;
- break;
-
- case XT_MARK_AND:
- mark = skb->mark & markinfo->mark;
- break;
-
- case XT_MARK_OR:
- mark = skb->mark | markinfo->mark;
- break;
- }
-
- skb->mark = mark;
- return XT_CONTINUE;
-}
-
-static unsigned int
mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct xt_mark_tginfo2 *info = par->targinfo;
@@ -66,135 +33,23 @@ mark_tg(struct sk_buff *skb, const struct xt_target_param *par)
return XT_CONTINUE;
}
-static bool mark_tg_check_v0(const struct xt_tgchk_param *par)
-{
- const struct xt_mark_target_info *markinfo = par->targinfo;
-
- if (markinfo->mark > 0xffffffff) {
- printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
- return false;
- }
- return true;
-}
-
-static bool mark_tg_check_v1(const struct xt_tgchk_param *par)
-{
- const struct xt_mark_target_info_v1 *markinfo = par->targinfo;
-
- if (markinfo->mode != XT_MARK_SET
- && markinfo->mode != XT_MARK_AND
- && markinfo->mode != XT_MARK_OR) {
- printk(KERN_WARNING "MARK: unknown mode %u\n",
- markinfo->mode);
- return false;
- }
- if (markinfo->mark > 0xffffffff) {
- printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
- return false;
- }
- return true;
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_xt_mark_target_info {
- compat_ulong_t mark;
-};
-
-static void mark_tg_compat_from_user_v0(void *dst, void *src)
-{
- const struct compat_xt_mark_target_info *cm = src;
- struct xt_mark_target_info m = {
- .mark = cm->mark,
- };
- memcpy(dst, &m, sizeof(m));
-}
-
-static int mark_tg_compat_to_user_v0(void __user *dst, void *src)
-{
- const struct xt_mark_target_info *m = src;
- struct compat_xt_mark_target_info cm = {
- .mark = m->mark,
- };
- return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
-}
-
-struct compat_xt_mark_target_info_v1 {
- compat_ulong_t mark;
- u_int8_t mode;
- u_int8_t __pad1;
- u_int16_t __pad2;
-};
-
-static void mark_tg_compat_from_user_v1(void *dst, void *src)
-{
- const struct compat_xt_mark_target_info_v1 *cm = src;
- struct xt_mark_target_info_v1 m = {
- .mark = cm->mark,
- .mode = cm->mode,
- };
- memcpy(dst, &m, sizeof(m));
-}
-
-static int mark_tg_compat_to_user_v1(void __user *dst, void *src)
-{
- const struct xt_mark_target_info_v1 *m = src;
- struct compat_xt_mark_target_info_v1 cm = {
- .mark = m->mark,
- .mode = m->mode,
- };
- return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct xt_target mark_tg_reg[] __read_mostly = {
- {
- .name = "MARK",
- .family = NFPROTO_UNSPEC,
- .revision = 0,
- .checkentry = mark_tg_check_v0,
- .target = mark_tg_v0,
- .targetsize = sizeof(struct xt_mark_target_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_mark_target_info),
- .compat_from_user = mark_tg_compat_from_user_v0,
- .compat_to_user = mark_tg_compat_to_user_v0,
-#endif
- .table = "mangle",
- .me = THIS_MODULE,
- },
- {
- .name = "MARK",
- .family = NFPROTO_UNSPEC,
- .revision = 1,
- .checkentry = mark_tg_check_v1,
- .target = mark_tg_v1,
- .targetsize = sizeof(struct xt_mark_target_info_v1),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_mark_target_info_v1),
- .compat_from_user = mark_tg_compat_from_user_v1,
- .compat_to_user = mark_tg_compat_to_user_v1,
-#endif
- .table = "mangle",
- .me = THIS_MODULE,
- },
- {
- .name = "MARK",
- .revision = 2,
- .family = NFPROTO_UNSPEC,
- .target = mark_tg,
- .targetsize = sizeof(struct xt_mark_tginfo2),
- .me = THIS_MODULE,
- },
+static struct xt_target mark_tg_reg __read_mostly = {
+ .name = "MARK",
+ .revision = 2,
+ .family = NFPROTO_UNSPEC,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
};
static int __init mark_tg_init(void)
{
- return xt_register_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
+ return xt_register_target(&mark_tg_reg);
}
static void __exit mark_tg_exit(void)
{
- xt_unregister_targets(mark_tg_reg, ARRAY_SIZE(mark_tg_reg));
+ xt_unregister_target(&mark_tg_reg);
}
module_init(mark_tg_init);
diff --git a/net/netfilter/xt_RATEEST.c b/net/netfilter/xt_RATEEST.c
index 43f5676b1af..d80b8192e0d 100644
--- a/net/netfilter/xt_RATEEST.c
+++ b/net/netfilter/xt_RATEEST.c
@@ -74,7 +74,7 @@ static unsigned int
xt_rateest_tg(struct sk_buff *skb, const struct xt_target_param *par)
{
const struct xt_rateest_target_info *info = par->targinfo;
- struct gnet_stats_basic *stats = &info->est->bstats;
+ struct gnet_stats_basic_packed *stats = &info->est->bstats;
spin_lock_bh(&info->est->lock);
stats->bytes += skb->len;
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
index 86cacab7a4a..122aa8b0147 100644
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -47,36 +47,6 @@ connmark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((ct->mark & info->mask) == info->mark) ^ info->invert;
}
-static bool
-connmark_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct xt_connmark_info *info = par->matchinfo;
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
-
- ct = nf_ct_get(skb, &ctinfo);
- if (!ct)
- return false;
-
- return ((ct->mark & info->mask) == info->mark) ^ info->invert;
-}
-
-static bool connmark_mt_check_v0(const struct xt_mtchk_param *par)
-{
- const struct xt_connmark_info *cm = par->matchinfo;
-
- if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) {
- printk(KERN_WARNING "connmark: only support 32bit mark\n");
- return false;
- }
- if (nf_ct_l3proto_try_module_get(par->family) < 0) {
- printk(KERN_WARNING "can't load conntrack support for "
- "proto=%u\n", par->family);
- return false;
- }
- return true;
-}
-
static bool connmark_mt_check(const struct xt_mtchk_param *par)
{
if (nf_ct_l3proto_try_module_get(par->family) < 0) {
@@ -92,74 +62,25 @@ static void connmark_mt_destroy(const struct xt_mtdtor_param *par)
nf_ct_l3proto_module_put(par->family);
}
-#ifdef CONFIG_COMPAT
-struct compat_xt_connmark_info {
- compat_ulong_t mark, mask;
- u_int8_t invert;
- u_int8_t __pad1;
- u_int16_t __pad2;
-};
-
-static void connmark_mt_compat_from_user_v0(void *dst, void *src)
-{
- const struct compat_xt_connmark_info *cm = src;
- struct xt_connmark_info m = {
- .mark = cm->mark,
- .mask = cm->mask,
- .invert = cm->invert,
- };
- memcpy(dst, &m, sizeof(m));
-}
-
-static int connmark_mt_compat_to_user_v0(void __user *dst, void *src)
-{
- const struct xt_connmark_info *m = src;
- struct compat_xt_connmark_info cm = {
- .mark = m->mark,
- .mask = m->mask,
- .invert = m->invert,
- };
- return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct xt_match connmark_mt_reg[] __read_mostly = {
- {
- .name = "connmark",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = connmark_mt_check_v0,
- .match = connmark_mt_v0,
- .destroy = connmark_mt_destroy,
- .matchsize = sizeof(struct xt_connmark_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_connmark_info),
- .compat_from_user = connmark_mt_compat_from_user_v0,
- .compat_to_user = connmark_mt_compat_to_user_v0,
-#endif
- .me = THIS_MODULE
- },
- {
- .name = "connmark",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .checkentry = connmark_mt_check,
- .match = connmark_mt,
- .matchsize = sizeof(struct xt_connmark_mtinfo1),
- .destroy = connmark_mt_destroy,
- .me = THIS_MODULE,
- },
+static struct xt_match connmark_mt_reg __read_mostly = {
+ .name = "connmark",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connmark_mt_check,
+ .match = connmark_mt,
+ .matchsize = sizeof(struct xt_connmark_mtinfo1),
+ .destroy = connmark_mt_destroy,
+ .me = THIS_MODULE,
};
static int __init connmark_mt_init(void)
{
- return xt_register_matches(connmark_mt_reg,
- ARRAY_SIZE(connmark_mt_reg));
+ return xt_register_match(&connmark_mt_reg);
}
static void __exit connmark_mt_exit(void)
{
- xt_unregister_matches(connmark_mt_reg, ARRAY_SIZE(connmark_mt_reg));
+ xt_unregister_match(&connmark_mt_reg);
}
module_init(connmark_mt_init);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
index fc581800698..6dc4652f2fe 100644
--- a/net/netfilter/xt_conntrack.c
+++ b/net/netfilter/xt_conntrack.c
@@ -19,101 +19,12 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: connection tracking state match");
MODULE_ALIAS("ipt_conntrack");
MODULE_ALIAS("ip6t_conntrack");
static bool
-conntrack_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct xt_conntrack_info *sinfo = par->matchinfo;
- const struct nf_conn *ct;
- enum ip_conntrack_info ctinfo;
- unsigned int statebit;
-
- ct = nf_ct_get(skb, &ctinfo);
-
-#define FWINV(bool, invflg) ((bool) ^ !!(sinfo->invflags & (invflg)))
-
- if (ct == &nf_conntrack_untracked)
- statebit = XT_CONNTRACK_STATE_UNTRACKED;
- else if (ct)
- statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
- else
- statebit = XT_CONNTRACK_STATE_INVALID;
-
- if (sinfo->flags & XT_CONNTRACK_STATE) {
- if (ct) {
- if (test_bit(IPS_SRC_NAT_BIT, &ct->status))
- statebit |= XT_CONNTRACK_STATE_SNAT;
- if (test_bit(IPS_DST_NAT_BIT, &ct->status))
- statebit |= XT_CONNTRACK_STATE_DNAT;
- }
- if (FWINV((statebit & sinfo->statemask) == 0,
- XT_CONNTRACK_STATE))
- return false;
- }
-
- if (ct == NULL) {
- if (sinfo->flags & ~XT_CONNTRACK_STATE)
- return false;
- return true;
- }
-
- if (sinfo->flags & XT_CONNTRACK_PROTO &&
- FWINV(nf_ct_protonum(ct) !=
- sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum,
- XT_CONNTRACK_PROTO))
- return false;
-
- if (sinfo->flags & XT_CONNTRACK_ORIGSRC &&
- FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip &
- sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
- sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip,
- XT_CONNTRACK_ORIGSRC))
- return false;
-
- if (sinfo->flags & XT_CONNTRACK_ORIGDST &&
- FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip &
- sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) !=
- sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip,
- XT_CONNTRACK_ORIGDST))
- return false;
-
- if (sinfo->flags & XT_CONNTRACK_REPLSRC &&
- FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip &
- sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) !=
- sinfo->tuple[IP_CT_DIR_REPLY].src.ip,
- XT_CONNTRACK_REPLSRC))
- return false;
-
- if (sinfo->flags & XT_CONNTRACK_REPLDST &&
- FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip &
- sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) !=
- sinfo->tuple[IP_CT_DIR_REPLY].dst.ip,
- XT_CONNTRACK_REPLDST))
- return false;
-
- if (sinfo->flags & XT_CONNTRACK_STATUS &&
- FWINV((ct->status & sinfo->statusmask) == 0,
- XT_CONNTRACK_STATUS))
- return false;
-
- if(sinfo->flags & XT_CONNTRACK_EXPIRES) {
- unsigned long expires = timer_pending(&ct->timeout) ?
- (ct->timeout.expires - jiffies)/HZ : 0;
-
- if (FWINV(!(expires >= sinfo->expires_min &&
- expires <= sinfo->expires_max),
- XT_CONNTRACK_EXPIRES))
- return false;
- }
- return true;
-#undef FWINV
-}
-
-static bool
conntrack_addrcmp(const union nf_inet_addr *kaddr,
const union nf_inet_addr *uaddr,
const union nf_inet_addr *umask, unsigned int l3proto)
@@ -337,73 +248,9 @@ static void conntrack_mt_destroy_v1(const struct xt_mtdtor_param *par)
conntrack_mt_destroy(par);
}
-#ifdef CONFIG_COMPAT
-struct compat_xt_conntrack_info
-{
- compat_uint_t statemask;
- compat_uint_t statusmask;
- struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
- struct in_addr sipmsk[IP_CT_DIR_MAX];
- struct in_addr dipmsk[IP_CT_DIR_MAX];
- compat_ulong_t expires_min;
- compat_ulong_t expires_max;
- u_int8_t flags;
- u_int8_t invflags;
-};
-
-static void conntrack_mt_compat_from_user_v0(void *dst, void *src)
-{
- const struct compat_xt_conntrack_info *cm = src;
- struct xt_conntrack_info m = {
- .statemask = cm->statemask,
- .statusmask = cm->statusmask,
- .expires_min = cm->expires_min,
- .expires_max = cm->expires_max,
- .flags = cm->flags,
- .invflags = cm->invflags,
- };
- memcpy(m.tuple, cm->tuple, sizeof(m.tuple));
- memcpy(m.sipmsk, cm->sipmsk, sizeof(m.sipmsk));
- memcpy(m.dipmsk, cm->dipmsk, sizeof(m.dipmsk));
- memcpy(dst, &m, sizeof(m));
-}
-
-static int conntrack_mt_compat_to_user_v0(void __user *dst, void *src)
-{
- const struct xt_conntrack_info *m = src;
- struct compat_xt_conntrack_info cm = {
- .statemask = m->statemask,
- .statusmask = m->statusmask,
- .expires_min = m->expires_min,
- .expires_max = m->expires_max,
- .flags = m->flags,
- .invflags = m->invflags,
- };
- memcpy(cm.tuple, m->tuple, sizeof(cm.tuple));
- memcpy(cm.sipmsk, m->sipmsk, sizeof(cm.sipmsk));
- memcpy(cm.dipmsk, m->dipmsk, sizeof(cm.dipmsk));
- return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
-}
-#endif
-
static struct xt_match conntrack_mt_reg[] __read_mostly = {
{
.name = "conntrack",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .match = conntrack_mt_v0,
- .checkentry = conntrack_mt_check,
- .destroy = conntrack_mt_destroy,
- .matchsize = sizeof(struct xt_conntrack_info),
- .me = THIS_MODULE,
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_conntrack_info),
- .compat_from_user = conntrack_mt_compat_from_user_v0,
- .compat_to_user = conntrack_mt_compat_to_user_v0,
-#endif
- },
- {
- .name = "conntrack",
.revision = 1,
.family = NFPROTO_UNSPEC,
.matchsize = sizeof(struct xt_conntrack_mtinfo1),
diff --git a/net/netfilter/xt_dscp.c b/net/netfilter/xt_dscp.c
index c3f8085460d..0280d3a8c16 100644
--- a/net/netfilter/xt_dscp.c
+++ b/net/netfilter/xt_dscp.c
@@ -15,7 +15,6 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_dscp.h>
-#include <linux/netfilter_ipv4/ipt_tos.h>
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: DSCP/TOS field match");
@@ -55,14 +54,6 @@ static bool dscp_mt_check(const struct xt_mtchk_param *par)
return true;
}
-static bool
-tos_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct ipt_tos_info *info = par->matchinfo;
-
- return (ip_hdr(skb)->tos == info->tos) ^ info->invert;
-}
-
static bool tos_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_tos_match_info *info = par->matchinfo;
@@ -94,14 +85,6 @@ static struct xt_match dscp_mt_reg[] __read_mostly = {
},
{
.name = "tos",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .match = tos_mt_v0,
- .matchsize = sizeof(struct ipt_tos_info),
- .me = THIS_MODULE,
- },
- {
- .name = "tos",
.revision = 1,
.family = NFPROTO_IPV4,
.match = tos_mt,
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 219dcdbe388..dd16e404424 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -194,9 +194,9 @@ static int htable_create_v0(struct xt_hashlimit_info *minfo, u_int8_t family)
if (minfo->cfg.size)
size = minfo->cfg.size;
else {
- size = ((num_physpages << PAGE_SHIFT) / 16384) /
+ size = ((totalram_pages << PAGE_SHIFT) / 16384) /
sizeof(struct list_head);
- if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ if (totalram_pages > (1024 * 1024 * 1024 / PAGE_SIZE))
size = 8192;
if (size < 16)
size = 16;
@@ -266,9 +266,9 @@ static int htable_create(struct xt_hashlimit_mtinfo1 *minfo, u_int8_t family)
if (minfo->cfg.size) {
size = minfo->cfg.size;
} else {
- size = (num_physpages << PAGE_SHIFT) / 16384 /
+ size = (totalram_pages << PAGE_SHIFT) / 16384 /
sizeof(struct list_head);
- if (num_physpages > 1024 * 1024 * 1024 / PAGE_SIZE)
+ if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
size = 8192;
if (size < 16)
size = 16;
diff --git a/net/netfilter/xt_iprange.c b/net/netfilter/xt_iprange.c
index 501f9b62318..ffc96387d55 100644
--- a/net/netfilter/xt_iprange.c
+++ b/net/netfilter/xt_iprange.c
@@ -14,40 +14,6 @@
#include <linux/ipv6.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_iprange.h>
-#include <linux/netfilter_ipv4/ipt_iprange.h>
-
-static bool
-iprange_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct ipt_iprange_info *info = par->matchinfo;
- const struct iphdr *iph = ip_hdr(skb);
-
- if (info->flags & IPRANGE_SRC) {
- if ((ntohl(iph->saddr) < ntohl(info->src.min_ip)
- || ntohl(iph->saddr) > ntohl(info->src.max_ip))
- ^ !!(info->flags & IPRANGE_SRC_INV)) {
- pr_debug("src IP %pI4 NOT in range %s%pI4-%pI4\n",
- &iph->saddr,
- info->flags & IPRANGE_SRC_INV ? "(INV) " : "",
- &info->src.min_ip,
- &info->src.max_ip);
- return false;
- }
- }
- if (info->flags & IPRANGE_DST) {
- if ((ntohl(iph->daddr) < ntohl(info->dst.min_ip)
- || ntohl(iph->daddr) > ntohl(info->dst.max_ip))
- ^ !!(info->flags & IPRANGE_DST_INV)) {
- pr_debug("dst IP %pI4 NOT in range %s%pI4-%pI4\n",
- &iph->daddr,
- info->flags & IPRANGE_DST_INV ? "(INV) " : "",
- &info->dst.min_ip,
- &info->dst.max_ip);
- return false;
- }
- }
- return true;
-}
static bool
iprange_mt4(const struct sk_buff *skb, const struct xt_match_param *par)
@@ -127,14 +93,6 @@ iprange_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
static struct xt_match iprange_mt_reg[] __read_mostly = {
{
.name = "iprange",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .match = iprange_mt_v0,
- .matchsize = sizeof(struct ipt_iprange_info),
- .me = THIS_MODULE,
- },
- {
- .name = "iprange",
.revision = 1,
.family = NFPROTO_IPV4,
.match = iprange_mt4,
@@ -164,7 +122,8 @@ static void __exit iprange_mt_exit(void)
module_init(iprange_mt_init);
module_exit(iprange_mt_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>, Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: arbitrary IPv4 range matching");
MODULE_ALIAS("ipt_iprange");
MODULE_ALIAS("ip6t_iprange");
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
index 10b9e34bbc5..1db07d8125f 100644
--- a/net/netfilter/xt_mark.c
+++ b/net/netfilter/xt_mark.c
@@ -3,7 +3,7 @@
*
* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * Jan Engelhardt <jengelh@computergmbh.de>
+ * Jan Engelhardt <jengelh@medozas.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -23,14 +23,6 @@ MODULE_ALIAS("ipt_mark");
MODULE_ALIAS("ip6t_mark");
static bool
-mark_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct xt_mark_info *info = par->matchinfo;
-
- return ((skb->mark & info->mask) == info->mark) ^ info->invert;
-}
-
-static bool
mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
{
const struct xt_mark_mtinfo1 *info = par->matchinfo;
@@ -38,81 +30,23 @@ mark_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return ((skb->mark & info->mask) == info->mark) ^ info->invert;
}
-static bool mark_mt_check_v0(const struct xt_mtchk_param *par)
-{
- const struct xt_mark_info *minfo = par->matchinfo;
-
- if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) {
- printk(KERN_WARNING "mark: only supports 32bit mark\n");
- return false;
- }
- return true;
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_xt_mark_info {
- compat_ulong_t mark, mask;
- u_int8_t invert;
- u_int8_t __pad1;
- u_int16_t __pad2;
-};
-
-static void mark_mt_compat_from_user_v0(void *dst, void *src)
-{
- const struct compat_xt_mark_info *cm = src;
- struct xt_mark_info m = {
- .mark = cm->mark,
- .mask = cm->mask,
- .invert = cm->invert,
- };
- memcpy(dst, &m, sizeof(m));
-}
-
-static int mark_mt_compat_to_user_v0(void __user *dst, void *src)
-{
- const struct xt_mark_info *m = src;
- struct compat_xt_mark_info cm = {
- .mark = m->mark,
- .mask = m->mask,
- .invert = m->invert,
- };
- return copy_to_user(dst, &cm, sizeof(cm)) ? -EFAULT : 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct xt_match mark_mt_reg[] __read_mostly = {
- {
- .name = "mark",
- .revision = 0,
- .family = NFPROTO_UNSPEC,
- .checkentry = mark_mt_check_v0,
- .match = mark_mt_v0,
- .matchsize = sizeof(struct xt_mark_info),
-#ifdef CONFIG_COMPAT
- .compatsize = sizeof(struct compat_xt_mark_info),
- .compat_from_user = mark_mt_compat_from_user_v0,
- .compat_to_user = mark_mt_compat_to_user_v0,
-#endif
- .me = THIS_MODULE,
- },
- {
- .name = "mark",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .match = mark_mt,
- .matchsize = sizeof(struct xt_mark_mtinfo1),
- .me = THIS_MODULE,
- },
+static struct xt_match mark_mt_reg __read_mostly = {
+ .name = "mark",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .match = mark_mt,
+ .matchsize = sizeof(struct xt_mark_mtinfo1),
+ .me = THIS_MODULE,
};
static int __init mark_mt_init(void)
{
- return xt_register_matches(mark_mt_reg, ARRAY_SIZE(mark_mt_reg));
+ return xt_register_match(&mark_mt_reg);
}
static void __exit mark_mt_exit(void)
{
- xt_unregister_matches(mark_mt_reg, ARRAY_SIZE(mark_mt_reg));
+ xt_unregister_match(&mark_mt_reg);
}
module_init(mark_mt_init);
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index 0f482e2440b..63e19050465 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -70,7 +70,8 @@ static void xt_osf_finger_free_rcu(struct rcu_head *rcu_head)
}
static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *osf_attrs[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const osf_attrs[])
{
struct xt_osf_user_finger *f;
struct xt_osf_finger *kf = NULL, *sf;
@@ -112,7 +113,8 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
}
static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
- struct nlmsghdr *nlh, struct nlattr *osf_attrs[])
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const osf_attrs[])
{
struct xt_osf_user_finger *f;
struct xt_osf_finger *sf;
diff --git a/net/netfilter/xt_owner.c b/net/netfilter/xt_owner.c
index 22b2a5e881e..d24c76dffee 100644
--- a/net/netfilter/xt_owner.c
+++ b/net/netfilter/xt_owner.c
@@ -5,7 +5,6 @@
* (C) 2000 Marc Boucher <marc@mbsi.ca>
*
* Copyright © CC Computer Consultants GmbH, 2007 - 2008
- * <jengelh@computergmbh.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -17,60 +16,6 @@
#include <net/sock.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_owner.h>
-#include <linux/netfilter_ipv4/ipt_owner.h>
-#include <linux/netfilter_ipv6/ip6t_owner.h>
-
-static bool
-owner_mt_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct ipt_owner_info *info = par->matchinfo;
- const struct file *filp;
-
- if (skb->sk == NULL || skb->sk->sk_socket == NULL)
- return false;
-
- filp = skb->sk->sk_socket->file;
- if (filp == NULL)
- return false;
-
- if (info->match & IPT_OWNER_UID)
- if ((filp->f_cred->fsuid != info->uid) ^
- !!(info->invert & IPT_OWNER_UID))
- return false;
-
- if (info->match & IPT_OWNER_GID)
- if ((filp->f_cred->fsgid != info->gid) ^
- !!(info->invert & IPT_OWNER_GID))
- return false;
-
- return true;
-}
-
-static bool
-owner_mt6_v0(const struct sk_buff *skb, const struct xt_match_param *par)
-{
- const struct ip6t_owner_info *info = par->matchinfo;
- const struct file *filp;
-
- if (skb->sk == NULL || skb->sk->sk_socket == NULL)
- return false;
-
- filp = skb->sk->sk_socket->file;
- if (filp == NULL)
- return false;
-
- if (info->match & IP6T_OWNER_UID)
- if ((filp->f_cred->fsuid != info->uid) ^
- !!(info->invert & IP6T_OWNER_UID))
- return false;
-
- if (info->match & IP6T_OWNER_GID)
- if ((filp->f_cred->fsgid != info->gid) ^
- !!(info->invert & IP6T_OWNER_GID))
- return false;
-
- return true;
-}
static bool
owner_mt(const struct sk_buff *skb, const struct xt_match_param *par)
@@ -107,81 +52,30 @@ owner_mt(const struct sk_buff *skb, const struct xt_match_param *par)
return true;
}
-static bool owner_mt_check_v0(const struct xt_mtchk_param *par)
-{
- const struct ipt_owner_info *info = par->matchinfo;
-
- if (info->match & (IPT_OWNER_PID | IPT_OWNER_SID | IPT_OWNER_COMM)) {
- printk(KERN_WARNING KBUILD_MODNAME
- ": PID, SID and command matching is not "
- "supported anymore\n");
- return false;
- }
-
- return true;
-}
-
-static bool owner_mt6_check_v0(const struct xt_mtchk_param *par)
-{
- const struct ip6t_owner_info *info = par->matchinfo;
-
- if (info->match & (IP6T_OWNER_PID | IP6T_OWNER_SID)) {
- printk(KERN_WARNING KBUILD_MODNAME
- ": PID and SID matching is not supported anymore\n");
- return false;
- }
-
- return true;
-}
-
-static struct xt_match owner_mt_reg[] __read_mostly = {
- {
- .name = "owner",
- .revision = 0,
- .family = NFPROTO_IPV4,
- .match = owner_mt_v0,
- .matchsize = sizeof(struct ipt_owner_info),
- .checkentry = owner_mt_check_v0,
- .hooks = (1 << NF_INET_LOCAL_OUT) |
- (1 << NF_INET_POST_ROUTING),
- .me = THIS_MODULE,
- },
- {
- .name = "owner",
- .revision = 0,
- .family = NFPROTO_IPV6,
- .match = owner_mt6_v0,
- .matchsize = sizeof(struct ip6t_owner_info),
- .checkentry = owner_mt6_check_v0,
- .hooks = (1 << NF_INET_LOCAL_OUT) |
- (1 << NF_INET_POST_ROUTING),
- .me = THIS_MODULE,
- },
- {
- .name = "owner",
- .revision = 1,
- .family = NFPROTO_UNSPEC,
- .match = owner_mt,
- .matchsize = sizeof(struct xt_owner_match_info),
- .hooks = (1 << NF_INET_LOCAL_OUT) |
- (1 << NF_INET_POST_ROUTING),
- .me = THIS_MODULE,
- },
+static struct xt_match owner_mt_reg __read_mostly = {
+ .name = "owner",
+ .revision = 1,
+ .family = NFPROTO_UNSPEC,
+ .match = owner_mt,
+ .matchsize = sizeof(struct xt_owner_match_info),
+ .hooks = (1 << NF_INET_LOCAL_OUT) |
+ (1 << NF_INET_POST_ROUTING),
+ .me = THIS_MODULE,
};
static int __init owner_mt_init(void)
{
- return xt_register_matches(owner_mt_reg, ARRAY_SIZE(owner_mt_reg));
+ return xt_register_match(&owner_mt_reg);
}
static void __exit owner_mt_exit(void)
{
- xt_unregister_matches(owner_mt_reg, ARRAY_SIZE(owner_mt_reg));
+ xt_unregister_match(&owner_mt_reg);
}
module_init(owner_mt_init);
module_exit(owner_mt_exit);
-MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
MODULE_DESCRIPTION("Xtables: socket owner matching");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ipt_owner");
diff --git a/net/netfilter/xt_quota.c b/net/netfilter/xt_quota.c
index 98fc190e8f0..390b7d09fe5 100644
--- a/net/netfilter/xt_quota.c
+++ b/net/netfilter/xt_quota.c
@@ -52,7 +52,7 @@ static bool quota_mt_check(const struct xt_mtchk_param *par)
q->master = kmalloc(sizeof(*q->master), GFP_KERNEL);
if (q->master == NULL)
- return -ENOMEM;
+ return false;
q->master->quota = q->quota;
return true;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 16e6c4378ff..6ce00205f34 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -185,8 +185,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
return 0;
cfg_unlbl_map_add_failure:
- if (entry != NULL)
- kfree(entry->domain);
+ kfree(entry->domain);
kfree(entry);
kfree(addrmap);
kfree(map4);
@@ -385,8 +384,7 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
cfg_cipsov4_map_add_failure:
cipso_v4_doi_putdef(doi_def);
- if (entry != NULL)
- kfree(entry->domain);
+ kfree(entry->domain);
kfree(entry);
kfree(addrmap);
kfree(addrinfo);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 2936fa3b6dc..55180b99562 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -83,6 +83,11 @@ struct netlink_sock {
struct module *module;
};
+struct listeners_rcu_head {
+ struct rcu_head rcu_head;
+ void *ptr;
+};
+
#define NETLINK_KERNEL_SOCKET 0x1
#define NETLINK_RECV_PKTINFO 0x2
#define NETLINK_BROADCAST_SEND_ERROR 0x4
@@ -172,9 +177,11 @@ static void netlink_sock_destruct(struct sock *sk)
* this, _but_ remember, it adds useless work on UP machines.
*/
-static void netlink_table_grab(void)
+void netlink_table_grab(void)
__acquires(nl_table_lock)
{
+ might_sleep();
+
write_lock_irq(&nl_table_lock);
if (atomic_read(&nl_table_users)) {
@@ -195,7 +202,7 @@ static void netlink_table_grab(void)
}
}
-static void netlink_table_ungrab(void)
+void netlink_table_ungrab(void)
__releases(nl_table_lock)
{
write_unlock_irq(&nl_table_lock);
@@ -1356,7 +1363,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
struct netlink_sock *nlk = nlk_sk(sk);
int noblock = flags&MSG_DONTWAIT;
size_t copied;
- struct sk_buff *skb;
+ struct sk_buff *skb, *frag __maybe_unused = NULL;
int err;
if (flags&MSG_OOB)
@@ -1368,6 +1375,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
if (skb == NULL)
goto out;
+#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+ if (unlikely(skb_shinfo(skb)->frag_list)) {
+ bool need_compat = !!(flags & MSG_CMSG_COMPAT);
+
+ /*
+ * If this skb has a frag_list, then here that means that
+ * we will have to use the frag_list skb for compat tasks
+ * and the regular skb for non-compat tasks.
+ *
+ * The skb might (and likely will) be cloned, so we can't
+ * just reset frag_list and go on with things -- we need to
+ * keep that. For the compat case that's easy -- simply get
+ * a reference to the compat skb and free the regular one
+ * including the frag. For the non-compat case, we need to
+ * avoid sending the frag to the user -- so assign NULL but
+ * restore it below before freeing the skb.
+ */
+ if (need_compat) {
+ struct sk_buff *compskb = skb_shinfo(skb)->frag_list;
+ skb_get(compskb);
+ kfree_skb(skb);
+ skb = compskb;
+ } else {
+ frag = skb_shinfo(skb)->frag_list;
+ skb_shinfo(skb)->frag_list = NULL;
+ }
+ }
+#endif
+
msg->msg_namelen = 0;
copied = skb->len;
@@ -1398,6 +1434,11 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
siocb->scm->creds = *NETLINK_CREDS(skb);
if (flags & MSG_TRUNC)
copied = skb->len;
+
+#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+ skb_shinfo(skb)->frag_list = frag;
+#endif
+
skb_free_datagram(sk, skb);
if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2)
@@ -1453,7 +1494,8 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
if (groups < 32)
groups = 32;
- listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL);
+ listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head),
+ GFP_KERNEL);
if (!listeners)
goto out_sock_release;
@@ -1501,6 +1543,49 @@ netlink_kernel_release(struct sock *sk)
EXPORT_SYMBOL(netlink_kernel_release);
+static void netlink_free_old_listeners(struct rcu_head *rcu_head)
+{
+ struct listeners_rcu_head *lrh;
+
+ lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head);
+ kfree(lrh->ptr);
+}
+
+int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
+{
+ unsigned long *listeners, *old = NULL;
+ struct listeners_rcu_head *old_rcu_head;
+ struct netlink_table *tbl = &nl_table[sk->sk_protocol];
+
+ if (groups < 32)
+ groups = 32;
+
+ if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
+ listeners = kzalloc(NLGRPSZ(groups) +
+ sizeof(struct listeners_rcu_head),
+ GFP_ATOMIC);
+ if (!listeners)
+ return -ENOMEM;
+ old = tbl->listeners;
+ memcpy(listeners, old, NLGRPSZ(tbl->groups));
+ rcu_assign_pointer(tbl->listeners, listeners);
+ /*
+ * Free the old memory after an RCU grace period so we
+ * don't leak it. We use call_rcu() here in order to be
+ * able to call this function from atomic contexts. The
+ * allocation of this memory will have reserved enough
+ * space for struct listeners_rcu_head at the end.
+ */
+ old_rcu_head = (void *)(tbl->listeners +
+ NLGRPLONGS(tbl->groups));
+ old_rcu_head->ptr = old;
+ call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners);
+ }
+ tbl->groups = groups;
+
+ return 0;
+}
+
/**
* netlink_change_ngroups - change number of multicast groups
*
@@ -1515,33 +1600,14 @@ EXPORT_SYMBOL(netlink_kernel_release);
*/
int netlink_change_ngroups(struct sock *sk, unsigned int groups)
{
- unsigned long *listeners, *old = NULL;
- struct netlink_table *tbl = &nl_table[sk->sk_protocol];
- int err = 0;
-
- if (groups < 32)
- groups = 32;
+ int err;
netlink_table_grab();
- if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) {
- listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC);
- if (!listeners) {
- err = -ENOMEM;
- goto out_ungrab;
- }
- old = tbl->listeners;
- memcpy(listeners, old, NLGRPSZ(tbl->groups));
- rcu_assign_pointer(tbl->listeners, listeners);
- }
- tbl->groups = groups;
-
- out_ungrab:
+ err = __netlink_change_ngroups(sk, groups);
netlink_table_ungrab();
- synchronize_rcu();
- kfree(old);
+
return err;
}
-EXPORT_SYMBOL(netlink_change_ngroups);
/**
* netlink_clear_multicast_users - kick off multicast listeners
@@ -1564,7 +1630,6 @@ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
netlink_table_ungrab();
}
-EXPORT_SYMBOL(netlink_clear_multicast_users);
void netlink_set_nonroot(int protocol, unsigned int flags)
{
@@ -1647,7 +1712,7 @@ errout:
}
int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
- struct nlmsghdr *nlh,
+ const struct nlmsghdr *nlh,
int (*dump)(struct sk_buff *skb,
struct netlink_callback *),
int (*done)(struct netlink_callback *))
@@ -2026,10 +2091,10 @@ static int __init netlink_proto_init(void)
if (!nl_table)
goto panic;
- if (num_physpages >= (128 * 1024))
- limit = num_physpages >> (21 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ limit = totalram_pages >> (21 - PAGE_SHIFT);
else
- limit = num_physpages >> (23 - PAGE_SHIFT);
+ limit = totalram_pages >> (23 - PAGE_SHIFT);
order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
limit = (1UL << order) / sizeof(struct hlist_head);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index eed4c6a8afc..566941e0336 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -18,8 +18,6 @@
#include <net/sock.h>
#include <net/genetlink.h>
-struct sock *genl_sock = NULL;
-
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
static inline void genl_lock(void)
@@ -138,7 +136,7 @@ int genl_register_mc_group(struct genl_family *family,
{
int id;
unsigned long *new_groups;
- int err;
+ int err = 0;
BUG_ON(grp->name[0] == '\0');
@@ -175,10 +173,34 @@ int genl_register_mc_group(struct genl_family *family,
mc_groups_longs++;
}
- err = netlink_change_ngroups(genl_sock,
- mc_groups_longs * BITS_PER_LONG);
- if (err)
- goto out;
+ if (family->netnsok) {
+ struct net *net;
+
+ netlink_table_grab();
+ rcu_read_lock();
+ for_each_net_rcu(net) {
+ err = __netlink_change_ngroups(net->genl_sock,
+ mc_groups_longs * BITS_PER_LONG);
+ if (err) {
+ /*
+ * No need to roll back, can only fail if
+ * memory allocation fails and then the
+ * number of _possible_ groups has been
+ * increased on some sockets which is ok.
+ */
+ rcu_read_unlock();
+ netlink_table_ungrab();
+ goto out;
+ }
+ }
+ rcu_read_unlock();
+ netlink_table_ungrab();
+ } else {
+ err = netlink_change_ngroups(init_net.genl_sock,
+ mc_groups_longs * BITS_PER_LONG);
+ if (err)
+ goto out;
+ }
grp->id = id;
set_bit(id, mc_groups);
@@ -195,8 +217,14 @@ EXPORT_SYMBOL(genl_register_mc_group);
static void __genl_unregister_mc_group(struct genl_family *family,
struct genl_multicast_group *grp)
{
+ struct net *net;
BUG_ON(grp->family != family);
- netlink_clear_multicast_users(genl_sock, grp->id);
+
+ rcu_read_lock();
+ for_each_net_rcu(net)
+ netlink_clear_multicast_users(net->genl_sock, grp->id);
+ rcu_read_unlock();
+
clear_bit(grp->id, mc_groups);
list_del(&grp->list);
genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp);
@@ -467,6 +495,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct genl_ops *ops;
struct genl_family *family;
+ struct net *net = sock_net(skb->sk);
struct genl_info info;
struct genlmsghdr *hdr = nlmsg_data(nlh);
int hdrlen, err;
@@ -475,6 +504,10 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
if (family == NULL)
return -ENOENT;
+ /* this family doesn't exist in this netns */
+ if (!family->netnsok && !net_eq(net, &init_net))
+ return -ENOENT;
+
hdrlen = GENL_HDRLEN + family->hdrsize;
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
@@ -492,7 +525,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return -EOPNOTSUPP;
genl_unlock();
- err = netlink_dump_start(genl_sock, skb, nlh,
+ err = netlink_dump_start(net->genl_sock, skb, nlh,
ops->dumpit, ops->done);
genl_lock();
return err;
@@ -514,6 +547,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
info.genlhdr = nlmsg_data(nlh);
info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
info.attrs = family->attrbuf;
+ genl_info_net_set(&info, net);
return ops->doit(skb, &info);
}
@@ -534,6 +568,7 @@ static struct genl_family genl_ctrl = {
.name = "nlctrl",
.version = 0x2,
.maxattr = CTRL_ATTR_MAX,
+ .netnsok = true,
};
static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
@@ -650,6 +685,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
int i, n = 0;
struct genl_family *rt;
+ struct net *net = sock_net(skb->sk);
int chains_to_skip = cb->args[0];
int fams_to_skip = cb->args[1];
@@ -658,6 +694,8 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
continue;
n = 0;
list_for_each_entry(rt, genl_family_chain(i), family_list) {
+ if (!rt->netnsok && !net_eq(net, &init_net))
+ continue;
if (++n < fams_to_skip)
continue;
if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
@@ -729,6 +767,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
res = genl_family_find_byid(id);
+ err = -ENOENT;
}
if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
@@ -736,49 +775,61 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
res = genl_family_find_byname(name);
+ err = -ENOENT;
}
- if (res == NULL) {
- err = -ENOENT;
- goto errout;
+ if (res == NULL)
+ return err;
+
+ if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) {
+ /* family doesn't exist here */
+ return -ENOENT;
}
msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
CTRL_CMD_NEWFAMILY);
- if (IS_ERR(msg)) {
- err = PTR_ERR(msg);
- goto errout;
- }
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- err = genlmsg_reply(msg, info);
-errout:
- return err;
+ return genlmsg_reply(msg, info);
}
static int genl_ctrl_event(int event, void *data)
{
struct sk_buff *msg;
+ struct genl_family *family;
+ struct genl_multicast_group *grp;
- if (genl_sock == NULL)
+ /* genl is still initialising */
+ if (!init_net.genl_sock)
return 0;
switch (event) {
case CTRL_CMD_NEWFAMILY:
case CTRL_CMD_DELFAMILY:
- msg = ctrl_build_family_msg(data, 0, 0, event);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
-
- genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL);
+ family = data;
+ msg = ctrl_build_family_msg(family, 0, 0, event);
break;
case CTRL_CMD_NEWMCAST_GRP:
case CTRL_CMD_DELMCAST_GRP:
+ grp = data;
+ family = grp->family;
msg = ctrl_build_mcgrp_msg(data, 0, 0, event);
- if (IS_ERR(msg))
- return PTR_ERR(msg);
-
- genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL);
break;
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ if (!family->netnsok) {
+ genlmsg_multicast_netns(&init_net, msg, 0,
+ GENL_ID_CTRL, GFP_KERNEL);
+ } else {
+ rcu_read_lock();
+ genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC);
+ rcu_read_unlock();
}
return 0;
@@ -795,6 +846,33 @@ static struct genl_multicast_group notify_grp = {
.name = "notify",
};
+static int __net_init genl_pernet_init(struct net *net)
+{
+ /* we'll bump the group number right afterwards */
+ net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0,
+ genl_rcv, &genl_mutex,
+ THIS_MODULE);
+
+ if (!net->genl_sock && net_eq(net, &init_net))
+ panic("GENL: Cannot initialize generic netlink\n");
+
+ if (!net->genl_sock)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __net_exit genl_pernet_exit(struct net *net)
+{
+ netlink_kernel_release(net->genl_sock);
+ net->genl_sock = NULL;
+}
+
+static struct pernet_operations genl_pernet_ops = {
+ .init = genl_pernet_init,
+ .exit = genl_pernet_exit,
+};
+
static int __init genl_init(void)
{
int i, err;
@@ -804,36 +882,67 @@ static int __init genl_init(void)
err = genl_register_family(&genl_ctrl);
if (err < 0)
- goto errout;
+ goto problem;
err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops);
if (err < 0)
- goto errout_register;
+ goto problem;
netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
- /* we'll bump the group number right afterwards */
- genl_sock = netlink_kernel_create(&init_net, NETLINK_GENERIC, 0,
- genl_rcv, &genl_mutex, THIS_MODULE);
- if (genl_sock == NULL)
- panic("GENL: Cannot initialize generic netlink\n");
+ err = register_pernet_subsys(&genl_pernet_ops);
+ if (err)
+ goto problem;
err = genl_register_mc_group(&genl_ctrl, &notify_grp);
if (err < 0)
- goto errout_register;
+ goto problem;
return 0;
-errout_register:
- genl_unregister_family(&genl_ctrl);
-errout:
+problem:
panic("GENL: Cannot register controller: %d\n", err);
}
subsys_initcall(genl_init);
-EXPORT_SYMBOL(genl_sock);
EXPORT_SYMBOL(genl_register_ops);
EXPORT_SYMBOL(genl_unregister_ops);
EXPORT_SYMBOL(genl_register_family);
EXPORT_SYMBOL(genl_unregister_family);
+
+static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
+ gfp_t flags)
+{
+ struct sk_buff *tmp;
+ struct net *net, *prev = NULL;
+ int err;
+
+ for_each_net_rcu(net) {
+ if (prev) {
+ tmp = skb_clone(skb, flags);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+ err = nlmsg_multicast(prev->genl_sock, tmp,
+ pid, group, flags);
+ if (err)
+ goto error;
+ }
+
+ prev = net;
+ }
+
+ return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags);
+ error:
+ kfree_skb(skb);
+ return err;
+}
+
+int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
+ gfp_t flags)
+{
+ return genlmsg_mcast(skb, pid, group, flags);
+}
+EXPORT_SYMBOL(genlmsg_multicast_allns);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index ce51ce012cd..ce1a34b99c2 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -847,6 +847,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
sax->fsa_ax25.sax25_family = AF_NETROM;
sax->fsa_ax25.sax25_ndigis = 1;
sax->fsa_ax25.sax25_call = nr->user_addr;
+ memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
sax->fsa_digipeater[0] = nr->dest_addr;
*uaddr_len = sizeof(struct full_sockaddr_ax25);
} else {
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 351372463fe..7aa11b01b2e 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -169,7 +169,7 @@ static int nr_close(struct net_device *dev)
return 0;
}
-static int nr_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t nr_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
unsigned int len = skb->len;
@@ -177,13 +177,13 @@ static int nr_xmit(struct sk_buff *skb, struct net_device *dev)
if (!nr_route_frame(skb, NULL)) {
kfree_skb(skb);
stats->tx_errors++;
- return 0;
+ return NETDEV_TX_OK;
}
stats->tx_packets++;
stats->tx_bytes += len;
- return 0;
+ return NETDEV_TX_OK;
}
static const struct header_ops nr_header_ops = {
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index e943c16552a..4eb1ac9a767 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -630,23 +630,23 @@ out:
return dev;
}
-static ax25_digi *nr_call_to_digi(int ndigis, ax25_address *digipeaters)
+static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
+ ax25_address *digipeaters)
{
- static ax25_digi ax25_digi;
int i;
if (ndigis == 0)
return NULL;
for (i = 0; i < ndigis; i++) {
- ax25_digi.calls[i] = digipeaters[i];
- ax25_digi.repeated[i] = 0;
+ digi->calls[i] = digipeaters[i];
+ digi->repeated[i] = 0;
}
- ax25_digi.ndigi = ndigis;
- ax25_digi.lastrepeat = -1;
+ digi->ndigi = ndigis;
+ digi->lastrepeat = -1;
- return &ax25_digi;
+ return digi;
}
/*
@@ -656,6 +656,7 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
{
struct nr_route_struct nr_route;
struct net_device *dev;
+ ax25_digi digi;
int ret;
switch (cmd) {
@@ -673,13 +674,15 @@ int nr_rt_ioctl(unsigned int cmd, void __user *arg)
ret = nr_add_node(&nr_route.callsign,
nr_route.mnemonic,
&nr_route.neighbour,
- nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
+ nr_call_to_digi(&digi, nr_route.ndigis,
+ nr_route.digipeaters),
dev, nr_route.quality,
nr_route.obs_count);
break;
case NETROM_NEIGH:
ret = nr_add_neigh(&nr_route.callsign,
- nr_call_to_digi(nr_route.ndigis, nr_route.digipeaters),
+ nr_call_to_digi(&digi, nr_route.ndigis,
+ nr_route.digipeaters),
dev, nr_route.quality);
break;
default:
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ebe5718baa3..d3d52c66cdc 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -137,8 +137,7 @@ dev->hard_header == NULL (ll header is added by device, we cannot control it)
/* Private packet socket structures. */
-struct packet_mclist
-{
+struct packet_mclist {
struct packet_mclist *next;
int ifindex;
int count;
@@ -149,8 +148,7 @@ struct packet_mclist
/* identical to struct packet_mreq except it has
* a longer address field.
*/
-struct packet_mreq_max
-{
+struct packet_mreq_max {
int mr_ifindex;
unsigned short mr_type;
unsigned short mr_alen;
@@ -162,7 +160,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
int closing, int tx_ring);
struct packet_ring_buffer {
- char * *pg_vec;
+ char **pg_vec;
unsigned int head;
unsigned int frames_per_block;
unsigned int frame_size;
@@ -239,7 +237,7 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status)
flush_dcache_page(virt_to_page(&h.h2->tp_status));
break;
default:
- printk(KERN_ERR "TPACKET version not supported\n");
+ pr_err("TPACKET version not supported\n");
BUG();
}
@@ -265,7 +263,7 @@ static int __packet_get_status(struct packet_sock *po, void *frame)
flush_dcache_page(virt_to_page(&h.h2->tp_status));
return h.h2->tp_status;
default:
- printk(KERN_ERR "TPACKET version not supported\n");
+ pr_err("TPACKET version not supported\n");
BUG();
return 0;
}
@@ -327,7 +325,7 @@ static void packet_sock_destruct(struct sock *sk)
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
- printk("Attempt to release alive packet socket: %p\n", sk);
+ pr_err("Attempt to release alive packet socket: %p\n", sk);
return;
}
@@ -339,7 +337,8 @@ static const struct proto_ops packet_ops;
static const struct proto_ops packet_ops_spkt;
-static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_pkt *spkt;
@@ -368,7 +367,8 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
if (dev_net(dev) != sock_net(sk))
goto out;
- if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (skb == NULL)
goto oom;
/* drop any routing info */
@@ -394,7 +394,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
* to prevent sockets using all the memory up.
*/
- if (sock_queue_rcv_skb(sk,skb) == 0)
+ if (sock_queue_rcv_skb(sk, skb) == 0)
return 0;
out:
@@ -413,25 +413,23 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
+ struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
struct sk_buff *skb;
struct net_device *dev;
- __be16 proto=0;
+ __be16 proto = 0;
int err;
/*
* Get and verify the address.
*/
- if (saddr)
- {
+ if (saddr) {
if (msg->msg_namelen < sizeof(struct sockaddr))
- return(-EINVAL);
- if (msg->msg_namelen==sizeof(struct sockaddr_pkt))
- proto=saddr->spkt_protocol;
- }
- else
- return(-ENOTCONN); /* SOCK_PACKET must be sent giving an address */
+ return -EINVAL;
+ if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
+ proto = saddr->spkt_protocol;
+ } else
+ return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
/*
* Find the device first to size check it
@@ -448,8 +446,8 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
goto out_unlock;
/*
- * You may not queue a frame bigger than the mtu. This is the lowest level
- * raw protocol and you must do your own fragmentation at this level.
+ * You may not queue a frame bigger than the mtu. This is the lowest level
+ * raw protocol and you must do your own fragmentation at this level.
*/
err = -EMSGSIZE;
@@ -460,9 +458,9 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
skb = sock_wmalloc(sk, len + LL_RESERVED_SPACE(dev), 0, GFP_KERNEL);
/*
- * If the write buffer is full, then tough. At this level the user gets to
- * deal with the problem - do your own algorithmic backoffs. That's far
- * more flexible.
+ * If the write buffer is full, then tough. At this level the user
+ * gets to deal with the problem - do your own algorithmic backoffs.
+ * That's far more flexible.
*/
if (skb == NULL)
@@ -488,7 +486,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
}
/* Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
@@ -501,7 +499,7 @@ static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
dev_queue_xmit(skb);
dev_put(dev);
- return(len);
+ return len;
out_free:
kfree_skb(skb);
@@ -537,12 +535,13 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
we will not harm anyone.
*/
-static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct sockaddr_ll *sll;
struct packet_sock *po;
- u8 * skb_head = skb->data;
+ u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
@@ -648,7 +647,8 @@ drop:
}
#ifdef CONFIG_PACKET_MMAP
-static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
+static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev)
{
struct sock *sk;
struct packet_sock *po;
@@ -658,7 +658,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
struct tpacket2_hdr *h2;
void *raw;
} h;
- u8 * skb_head = skb->data;
+ u8 *skb_head = skb->data;
int skb_len = skb->len;
unsigned int snaplen, res;
unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
@@ -821,7 +821,7 @@ ring_is_full:
static void tpacket_destruct_skb(struct sk_buff *skb)
{
struct packet_sock *po = pkt_sk(skb->sk);
- void * ph;
+ void *ph;
BUG_ON(skb == NULL);
@@ -836,9 +836,9 @@ static void tpacket_destruct_skb(struct sk_buff *skb)
sock_wfree(skb);
}
-static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
- void * frame, struct net_device *dev, int size_max,
- __be16 proto, unsigned char * addr)
+static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
+ void *frame, struct net_device *dev, int size_max,
+ __be16 proto, unsigned char *addr)
{
union {
struct tpacket_hdr *h1;
@@ -867,8 +867,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
break;
}
if (unlikely(tp_len > size_max)) {
- printk(KERN_ERR "packet size is too long (%d > %d)\n",
- tp_len, size_max);
+ pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
return -EMSGSIZE;
}
@@ -883,12 +882,11 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
NULL, tp_len);
if (unlikely(err < 0))
return -EINVAL;
- } else if (dev->hard_header_len ) {
+ } else if (dev->hard_header_len) {
/* net device doesn't like empty head */
if (unlikely(tp_len <= dev->hard_header_len)) {
- printk(KERN_ERR "packet size is too short "
- "(%d < %d)\n", tp_len,
- dev->hard_header_len);
+ pr_err("packet size is too short (%d < %d)\n",
+ tp_len, dev->hard_header_len);
return -EINVAL;
}
@@ -917,9 +915,8 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff * skb,
nr_frags = skb_shinfo(skb)->nr_frags;
if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
- printk(KERN_ERR "Packet exceed the number "
- "of skb frags(%lu)\n",
- MAX_SKB_FRAGS);
+ pr_err("Packet exceed the number of skb frags(%lu)\n",
+ MAX_SKB_FRAGS);
return -EFAULT;
}
@@ -944,8 +941,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
struct net_device *dev;
__be16 proto;
int ifindex, err, reserve = 0;
- void * ph;
- struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
+ void *ph;
+ struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
int tp_len, size_max;
unsigned char *addr;
int len_sum = 0;
@@ -1038,8 +1035,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_xmit;
packet_increment_head(&po->tx_ring);
len_sum += tp_len;
- }
- while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
+ } while (likely((ph != NULL) || ((!(msg->msg_flags & MSG_DONTWAIT))
&& (atomic_read(&po->tx_ring.pending))))
);
@@ -1064,7 +1060,7 @@ static int packet_snd(struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
- struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
+ struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
struct sk_buff *skb;
struct net_device *dev;
__be16 proto;
@@ -1110,7 +1106,7 @@ static int packet_snd(struct socket *sock,
skb = sock_alloc_send_skb(sk, len + LL_ALLOCATED_SPACE(dev),
msg->msg_flags & MSG_DONTWAIT, &err);
- if (skb==NULL)
+ if (skb == NULL)
goto out_unlock;
skb_reserve(skb, LL_RESERVED_SPACE(dev));
@@ -1122,7 +1118,7 @@ static int packet_snd(struct socket *sock,
goto out_free;
/* Returns -EFAULT on error */
- err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
+ err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
if (err)
goto out_free;
@@ -1140,7 +1136,7 @@ static int packet_snd(struct socket *sock,
dev_put(dev);
- return(len);
+ return len;
out_free:
kfree_skb(skb);
@@ -1283,9 +1279,10 @@ out_unlock:
* Bind a packet socket to a device
*/
-static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len)
+static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len)
{
- struct sock *sk=sock->sk;
+ struct sock *sk = sock->sk;
char name[15];
struct net_device *dev;
int err = -ENODEV;
@@ -1296,7 +1293,7 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
if (addr_len != sizeof(struct sockaddr))
return -EINVAL;
- strlcpy(name,uaddr->sa_data,sizeof(name));
+ strlcpy(name, uaddr->sa_data, sizeof(name));
dev = dev_get_by_name(sock_net(sk), name);
if (dev) {
@@ -1308,8 +1305,8 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add
static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
- struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
- struct sock *sk=sock->sk;
+ struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
+ struct sock *sk = sock->sk;
struct net_device *dev = NULL;
int err;
@@ -1404,7 +1401,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol)
sk_add_node(sk, &net->packet.sklist);
sock_prot_inuse_add(net, &packet_proto, 1);
write_unlock_bh(&net->packet.sklist_lock);
- return(0);
+ return 0;
out:
return err;
}
@@ -1441,7 +1438,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
* but then it will block.
*/
- skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
+ skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
/*
* An error occurred so return it. Because skb_recv_datagram()
@@ -1469,10 +1466,9 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
*/
copied = skb->len;
- if (copied > len)
- {
- copied=len;
- msg->msg_flags|=MSG_TRUNC;
+ if (copied > len) {
+ copied = len;
+ msg->msg_flags |= MSG_TRUNC;
}
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
@@ -1539,7 +1535,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
struct net_device *dev;
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- struct sockaddr_ll *sll = (struct sockaddr_ll*)uaddr;
+ struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
if (peer)
return -EOPNOTSUPP;
@@ -1584,14 +1580,15 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
else
return dev_unicast_delete(dev, i->addr);
break;
- default:;
+ default:
+ break;
}
return 0;
}
static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
{
- for ( ; i; i=i->next) {
+ for ( ; i; i = i->next) {
if (i->ifindex == dev->ifindex)
packet_dev_mc(dev, i, what);
}
@@ -1693,7 +1690,8 @@ static void packet_flush_mclist(struct sock *sk)
struct net_device *dev;
po->mclist = ml->next;
- if ((dev = dev_get_by_index(sock_net(sk), ml->ifindex)) != NULL) {
+ dev = dev_get_by_index(sock_net(sk), ml->ifindex);
+ if (dev != NULL) {
packet_dev_mc(dev, ml, -1);
dev_put(dev);
}
@@ -1723,7 +1721,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return -EINVAL;
if (len > sizeof(mreq))
len = sizeof(mreq);
- if (copy_from_user(&mreq,optval,len))
+ if (copy_from_user(&mreq, optval, len))
return -EFAULT;
if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
return -EINVAL;
@@ -1740,9 +1738,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
{
struct tpacket_req req;
- if (optlen<sizeof(req))
+ if (optlen < sizeof(req))
return -EINVAL;
- if (copy_from_user(&req,optval,sizeof(req)))
+ if (copy_from_user(&req, optval, sizeof(req)))
return -EFAULT;
return packet_set_ring(sk, &req, 0, optname == PACKET_TX_RING);
}
@@ -1750,9 +1748,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
{
int val;
- if (optlen!=sizeof(val))
+ if (optlen != sizeof(val))
return -EINVAL;
- if (copy_from_user(&val,optval,sizeof(val)))
+ if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
pkt_sk(sk)->copy_thresh = val;
@@ -1985,51 +1983,51 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
struct sock *sk = sock->sk;
switch (cmd) {
- case SIOCOUTQ:
- {
- int amount = sk_wmem_alloc_get(sk);
+ case SIOCOUTQ:
+ {
+ int amount = sk_wmem_alloc_get(sk);
- return put_user(amount, (int __user *)arg);
- }
- case SIOCINQ:
- {
- struct sk_buff *skb;
- int amount = 0;
-
- spin_lock_bh(&sk->sk_receive_queue.lock);
- skb = skb_peek(&sk->sk_receive_queue);
- if (skb)
- amount = skb->len;
- spin_unlock_bh(&sk->sk_receive_queue.lock);
- return put_user(amount, (int __user *)arg);
- }
- case SIOCGSTAMP:
- return sock_get_timestamp(sk, (struct timeval __user *)arg);
- case SIOCGSTAMPNS:
- return sock_get_timestampns(sk, (struct timespec __user *)arg);
+ return put_user(amount, (int __user *)arg);
+ }
+ case SIOCINQ:
+ {
+ struct sk_buff *skb;
+ int amount = 0;
+
+ spin_lock_bh(&sk->sk_receive_queue.lock);
+ skb = skb_peek(&sk->sk_receive_queue);
+ if (skb)
+ amount = skb->len;
+ spin_unlock_bh(&sk->sk_receive_queue.lock);
+ return put_user(amount, (int __user *)arg);
+ }
+ case SIOCGSTAMP:
+ return sock_get_timestamp(sk, (struct timeval __user *)arg);
+ case SIOCGSTAMPNS:
+ return sock_get_timestampns(sk, (struct timespec __user *)arg);
#ifdef CONFIG_INET
- case SIOCADDRT:
- case SIOCDELRT:
- case SIOCDARP:
- case SIOCGARP:
- case SIOCSARP:
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCSIFBRDADDR:
- case SIOCGIFNETMASK:
- case SIOCSIFNETMASK:
- case SIOCGIFDSTADDR:
- case SIOCSIFDSTADDR:
- case SIOCSIFFLAGS:
- if (!net_eq(sock_net(sk), &init_net))
- return -ENOIOCTLCMD;
- return inet_dgram_ops.ioctl(sock, cmd, arg);
+ case SIOCADDRT:
+ case SIOCDELRT:
+ case SIOCDARP:
+ case SIOCGARP:
+ case SIOCSARP:
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCSIFNETMASK:
+ case SIOCGIFDSTADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFFLAGS:
+ if (!net_eq(sock_net(sk), &init_net))
+ return -ENOIOCTLCMD;
+ return inet_dgram_ops.ioctl(sock, cmd, arg);
#endif
- default:
- return -ENOIOCTLCMD;
+ default:
+ return -ENOIOCTLCMD;
}
return 0;
}
@@ -2039,7 +2037,7 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
#define packet_poll datagram_poll
#else
-static unsigned int packet_poll(struct file * file, struct socket *sock,
+static unsigned int packet_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
@@ -2069,7 +2067,7 @@ static unsigned int packet_poll(struct file * file, struct socket *sock,
static void packet_mm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
- struct socket * sock = file->private_data;
+ struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
@@ -2079,7 +2077,7 @@ static void packet_mm_open(struct vm_area_struct *vma)
static void packet_mm_close(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
- struct socket * sock = file->private_data;
+ struct socket *sock = file->private_data;
struct sock *sk = sock->sk;
if (sk)
@@ -2087,8 +2085,8 @@ static void packet_mm_close(struct vm_area_struct *vma)
}
static struct vm_operations_struct packet_mmap_ops = {
- .open = packet_mm_open,
- .close =packet_mm_close,
+ .open = packet_mm_open,
+ .close = packet_mm_close,
};
static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
@@ -2239,8 +2237,8 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
skb_queue_purge(rb_queue);
#undef XC
if (atomic_read(&po->mapped))
- printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n",
- atomic_read(&po->mapped));
+ pr_err("packet_mmap: vma is busy: %d\n",
+ atomic_read(&po->mapped));
}
mutex_unlock(&po->pg_vec_lock);
@@ -2303,7 +2301,7 @@ static int packet_mmap(struct file *file, struct socket *sock,
int pg_num;
for (pg_num = 0; pg_num < rb->pg_vec_pages;
- pg_num++,page++) {
+ pg_num++, page++) {
err = vm_insert_page(vma, start, page);
if (unlikely(err))
goto out;
@@ -2372,7 +2370,7 @@ static struct net_proto_family packet_family_ops = {
};
static struct notifier_block packet_netdev_notifier = {
- .notifier_call =packet_notifier,
+ .notifier_call = packet_notifier,
};
#ifdef CONFIG_PROC_FS
@@ -2402,7 +2400,7 @@ static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
return (v == SEQ_START_TOKEN)
? sk_head(&net->packet.sklist)
- : sk_next((struct sock*)v) ;
+ : sk_next((struct sock *)v) ;
}
static void packet_seq_stop(struct seq_file *seq, void *v)
@@ -2430,7 +2428,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
po->running,
atomic_read(&s->sk_rmem_alloc),
sock_i_uid(s),
- sock_i_ino(s) );
+ sock_i_ino(s));
}
return 0;
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
index e087862ed7e..ef5c75c372e 100644
--- a/net/phonet/datagram.c
+++ b/net/phonet/datagram.c
@@ -159,8 +159,11 @@ out_nofree:
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
int err = sock_queue_rcv_skb(sk, skb);
- if (err < 0)
+ if (err < 0) {
kfree_skb(skb);
+ if (err == -ENOMEM)
+ atomic_inc(&sk->sk_drops);
+ }
return err ? NET_RX_DROP : NET_RX_SUCCESS;
}
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
index 480839dfc56..d183509d3fa 100644
--- a/net/phonet/pep-gprs.c
+++ b/net/phonet/pep-gprs.c
@@ -183,7 +183,7 @@ static int gprs_close(struct net_device *dev)
return 0;
}
-static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gprs_dev *gp = netdev_priv(dev);
struct sock *sk = gp->sk;
@@ -195,7 +195,7 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
break;
default:
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
skb_orphan(skb);
@@ -215,7 +215,7 @@ static int gprs_xmit(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (pep_writeable(sk))
netif_wake_queue(dev);
- return 0;
+ return NETDEV_TX_OK;
}
static int gprs_set_mtu(struct net_device *dev, int new_mtu)
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index eef833ea6d7..b8252d289cd 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -346,8 +346,10 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
break;
case PNS_PEP_CTRL_REQ:
- if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX)
+ if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
+ atomic_inc(&sk->sk_drops);
break;
+ }
__skb_pull(skb, 4);
queue = &pn->ctrlreq_queue;
goto queue;
@@ -358,10 +360,13 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
err = sock_queue_rcv_skb(sk, skb);
if (!err)
return 0;
+ if (err == -ENOMEM)
+ atomic_inc(&sk->sk_drops);
break;
}
if (pn->rx_credits == 0) {
+ atomic_inc(&sk->sk_drops);
err = -ENOBUFS;
break;
}
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index b0d6ddd82a9..5f42f30dd16 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -27,6 +27,8 @@
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/phonet.h>
+#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
#include <net/sock.h>
#include <net/netns/generic.h>
#include <net/phonet/pn_dev.h>
@@ -96,7 +98,7 @@ struct net_device *phonet_device_get(struct net *net)
{
struct phonet_device_list *pndevs = phonet_device_list(net);
struct phonet_device *pnd;
- struct net_device *dev;
+ struct net_device *dev = NULL;
spin_lock_bh(&pndevs->lock);
list_for_each_entry(pnd, &pndevs->list, list) {
@@ -194,14 +196,44 @@ found:
return err;
}
+/* automatically configure a Phonet device, if supported */
+static int phonet_device_autoconf(struct net_device *dev)
+{
+ struct if_phonet_req req;
+ int ret;
+
+ if (!dev->netdev_ops->ndo_do_ioctl)
+ return -EOPNOTSUPP;
+
+ ret = dev->netdev_ops->ndo_do_ioctl(dev, (struct ifreq *)&req,
+ SIOCPNGAUTOCONF);
+ if (ret < 0)
+ return ret;
+
+ ASSERT_RTNL();
+ ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device);
+ if (ret)
+ return ret;
+ phonet_address_notify(RTM_NEWADDR, dev,
+ req.ifr_phonet_autoconf.device);
+ return 0;
+}
+
/* notify Phonet of device events */
static int phonet_device_notify(struct notifier_block *me, unsigned long what,
void *arg)
{
struct net_device *dev = arg;
- if (what == NETDEV_UNREGISTER)
+ switch (what) {
+ case NETDEV_REGISTER:
+ if (dev->type == ARPHRD_PHONET)
+ phonet_device_autoconf(dev);
+ break;
+ case NETDEV_UNREGISTER:
phonet_device_destroy(dev);
+ break;
+ }
return 0;
}
@@ -218,6 +250,11 @@ static int phonet_init_net(struct net *net)
if (!pnn)
return -ENOMEM;
+ if (!proc_net_fops_create(net, "phonet", 0, &pn_sock_seq_fops)) {
+ kfree(pnn);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(&pnn->pndevs.list);
spin_lock_init(&pnn->pndevs.lock);
net_assign_generic(net, phonet_net_id, pnn);
@@ -233,6 +270,8 @@ static void phonet_exit_net(struct net *net)
for_each_netdev(net, dev)
phonet_device_destroy(dev);
rtnl_unlock();
+
+ proc_net_remove(net, "phonet");
kfree(pnn);
}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index f8b4cee434c..d21fd357661 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -147,7 +147,7 @@ static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
if (fill_addr(skb, pnd->netdev, addr << 2,
NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWADDR))
+ cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0)
goto out;
}
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index ada2a35bf7a..7a4ee397d2f 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -412,3 +412,102 @@ found:
return 0;
}
EXPORT_SYMBOL(pn_sock_get_port);
+
+#ifdef CONFIG_PROC_FS
+static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct net *net = seq_file_net(seq);
+ struct hlist_node *node;
+ struct sock *sknode;
+
+ sk_for_each(sknode, node, &pnsocks.hlist) {
+ if (!net_eq(net, sock_net(sknode)))
+ continue;
+ if (!pos)
+ return sknode;
+ pos--;
+ }
+ return NULL;
+}
+
+static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
+{
+ struct net *net = seq_file_net(seq);
+
+ do
+ sk = sk_next(sk);
+ while (sk && !net_eq(net, sock_net(sk)));
+
+ return sk;
+}
+
+static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(pnsocks.lock)
+{
+ spin_lock_bh(&pnsocks.lock);
+ return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock *sk;
+
+ if (v == SEQ_START_TOKEN)
+ sk = pn_sock_get_idx(seq, 0);
+ else
+ sk = pn_sock_get_next(seq, v);
+ (*pos)++;
+ return sk;
+}
+
+static void pn_sock_seq_stop(struct seq_file *seq, void *v)
+ __releases(pnsocks.lock)
+{
+ spin_unlock_bh(&pnsocks.lock);
+}
+
+static int pn_sock_seq_show(struct seq_file *seq, void *v)
+{
+ int len;
+
+ if (v == SEQ_START_TOKEN)
+ seq_printf(seq, "%s%n", "pt loc rem rs st tx_queue rx_queue "
+ " uid inode ref pointer drops", &len);
+ else {
+ struct sock *sk = v;
+ struct pn_sock *pn = pn_sk(sk);
+
+ seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
+ "%d %p %d%n",
+ sk->sk_protocol, pn->sobject, 0, pn->resource,
+ sk->sk_state,
+ sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
+ sock_i_uid(sk), sock_i_ino(sk),
+ atomic_read(&sk->sk_refcnt), sk,
+ atomic_read(&sk->sk_drops), &len);
+ }
+ seq_printf(seq, "%*s\n", 127 - len, "");
+ return 0;
+}
+
+static const struct seq_operations pn_sock_seq_ops = {
+ .start = pn_sock_seq_start,
+ .next = pn_sock_seq_next,
+ .stop = pn_sock_seq_stop,
+ .show = pn_sock_seq_show,
+};
+
+static int pn_sock_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &pn_sock_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+const struct file_operations pn_sock_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = pn_sock_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+#endif
diff --git a/net/rds/Kconfig b/net/rds/Kconfig
index 796773b5df9..ec753b3ae72 100644
--- a/net/rds/Kconfig
+++ b/net/rds/Kconfig
@@ -1,14 +1,28 @@
config RDS
- tristate "Reliable Datagram Sockets (RDS) (EXPERIMENTAL)"
- depends on INET && INFINIBAND_IPOIB && EXPERIMENTAL
- depends on INFINIBAND && INFINIBAND_ADDR_TRANS
+ tristate "The RDS Protocol (EXPERIMENTAL)"
+ depends on INET && EXPERIMENTAL
---help---
- RDS provides reliable, sequenced delivery of datagrams
- over Infiniband.
+ The RDS (Reliable Datagram Sockets) protocol provides reliable,
+ sequenced delivery of datagrams over Infiniband, iWARP,
+ or TCP.
+
+config RDS_RDMA
+ tristate "RDS over Infiniband and iWARP"
+ depends on RDS && INFINIBAND && INFINIBAND_ADDR_TRANS
+ ---help---
+ Allow RDS to use Infiniband and iWARP as a transport.
+ This transport supports RDMA operations.
+
+config RDS_TCP
+ tristate "RDS over TCP"
+ depends on RDS
+ ---help---
+ Allow RDS to use TCP as a transport.
+ This transport does not support RDMA operations.
config RDS_DEBUG
- bool "Debugging messages"
+ bool "RDS debugging messages"
depends on RDS
default n
diff --git a/net/rds/Makefile b/net/rds/Makefile
index 51f27585fa0..b46eca10968 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -1,13 +1,20 @@
obj-$(CONFIG_RDS) += rds.o
rds-y := af_rds.o bind.o cong.o connection.o info.o message.o \
recv.o send.o stats.o sysctl.o threads.o transport.o \
- loop.o page.o rdma.o \
- rdma_transport.o \
+ loop.o page.o rdma.o
+
+obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
+rds_rdma-objs := rdma_transport.o \
ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
ib_sysctl.o ib_rdma.o \
iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
iw_sysctl.o iw_rdma.o
+
+obj-$(CONFIG_RDS_TCP) += rds_tcp.o
+rds_tcp-objs := tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
+ tcp_send.o tcp_stats.o
+
ifeq ($(CONFIG_RDS_DEBUG), y)
EXTRA_CFLAGS += -DDEBUG
endif
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index b11e7e52786..6b58aeff4c7 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -39,7 +39,6 @@
#include "rds.h"
#include "rdma.h"
-#include "rdma_transport.h"
/* this is just used for stats gathering :/ */
static DEFINE_SPINLOCK(rds_sock_lock);
@@ -360,7 +359,7 @@ static struct proto rds_proto = {
.obj_size = sizeof(struct rds_sock),
};
-static struct proto_ops rds_proto_ops = {
+static const struct proto_ops rds_proto_ops = {
.family = AF_RDS,
.owner = THIS_MODULE,
.release = rds_release,
@@ -509,7 +508,6 @@ out:
static void __exit rds_exit(void)
{
- rds_rdma_exit();
sock_unregister(rds_family_ops.family);
proto_unregister(&rds_proto);
rds_conn_exit();
@@ -549,14 +547,8 @@ static int __init rds_init(void)
rds_info_register_func(RDS_INFO_SOCKETS, rds_sock_info);
rds_info_register_func(RDS_INFO_RECV_MESSAGES, rds_sock_inc_info);
- /* ib/iwarp transports currently compiled-in */
- ret = rds_rdma_init();
- if (ret)
- goto out_sock;
goto out;
-out_sock:
- sock_unregister(rds_family_ops.family);
out_proto:
proto_unregister(&rds_proto);
out_stats:
diff --git a/net/rds/bind.c b/net/rds/bind.c
index c17cc39160c..5d95fc007f1 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -187,6 +187,9 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (trans == NULL) {
ret = -EADDRNOTAVAIL;
rds_remove_bound(rs);
+ if (printk_ratelimit())
+ printk(KERN_INFO "RDS: rds_bind() could not find a transport, "
+ "load rds_tcp or rds_rdma?\n");
goto out;
}
diff --git a/net/rds/cong.c b/net/rds/cong.c
index 710e4599d76..dd2711df640 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -254,6 +254,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
read_unlock_irqrestore(&rds_cong_monitor_lock, flags);
}
}
+EXPORT_SYMBOL_GPL(rds_cong_map_updated);
int rds_cong_updated_since(unsigned long *recent)
{
diff --git a/net/rds/connection.c b/net/rds/connection.c
index d14445c4830..cc8b568c0c8 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -126,7 +126,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp,
int is_outgoing)
{
- struct rds_connection *conn, *tmp, *parent = NULL;
+ struct rds_connection *conn, *parent = NULL;
struct hlist_head *head = rds_conn_bucket(laddr, faddr);
unsigned long flags;
int ret;
@@ -155,7 +155,6 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
}
INIT_HLIST_NODE(&conn->c_hash_node);
- conn->c_version = RDS_PROTOCOL_3_0;
conn->c_laddr = laddr;
conn->c_faddr = faddr;
spin_lock_init(&conn->c_lock);
@@ -211,26 +210,40 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
trans->t_name ? trans->t_name : "[unknown]",
is_outgoing ? "(outgoing)" : "");
+ /*
+ * Since we ran without holding the conn lock, someone could
+ * have created the same conn (either normal or passive) in the
+ * interim. We check while holding the lock. If we won, we complete
+ * init and return our conn. If we lost, we rollback and return the
+ * other one.
+ */
spin_lock_irqsave(&rds_conn_lock, flags);
- if (parent == NULL) {
- tmp = rds_conn_lookup(head, laddr, faddr, trans);
- if (tmp == NULL)
- hlist_add_head(&conn->c_hash_node, head);
- } else {
- tmp = parent->c_passive;
- if (!tmp)
+ if (parent) {
+ /* Creating passive conn */
+ if (parent->c_passive) {
+ trans->conn_free(conn->c_transport_data);
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = parent->c_passive;
+ } else {
parent->c_passive = conn;
- }
-
- if (tmp) {
- trans->conn_free(conn->c_transport_data);
- kmem_cache_free(rds_conn_slab, conn);
- conn = tmp;
+ rds_cong_add_conn(conn);
+ rds_conn_count++;
+ }
} else {
- rds_cong_add_conn(conn);
- rds_conn_count++;
+ /* Creating normal conn */
+ struct rds_connection *found;
+
+ found = rds_conn_lookup(head, laddr, faddr, trans);
+ if (found) {
+ trans->conn_free(conn->c_transport_data);
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = found;
+ } else {
+ hlist_add_head(&conn->c_hash_node, head);
+ rds_cong_add_conn(conn);
+ rds_conn_count++;
+ }
}
-
spin_unlock_irqrestore(&rds_conn_lock, flags);
out:
@@ -242,12 +255,14 @@ struct rds_connection *rds_conn_create(__be32 laddr, __be32 faddr,
{
return __rds_conn_create(laddr, faddr, trans, gfp, 0);
}
+EXPORT_SYMBOL_GPL(rds_conn_create);
struct rds_connection *rds_conn_create_outgoing(__be32 laddr, __be32 faddr,
struct rds_transport *trans, gfp_t gfp)
{
return __rds_conn_create(laddr, faddr, trans, gfp, 1);
}
+EXPORT_SYMBOL_GPL(rds_conn_create_outgoing);
void rds_conn_destroy(struct rds_connection *conn)
{
@@ -290,6 +305,7 @@ void rds_conn_destroy(struct rds_connection *conn)
rds_conn_count--;
}
+EXPORT_SYMBOL_GPL(rds_conn_destroy);
static void rds_conn_message_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
@@ -393,6 +409,7 @@ void rds_for_each_conn_info(struct socket *sock, unsigned int len,
spin_unlock_irqrestore(&rds_conn_lock, flags);
}
+EXPORT_SYMBOL_GPL(rds_for_each_conn_info);
static int rds_conn_info_visitor(struct rds_connection *conn,
void *buffer)
@@ -468,6 +485,7 @@ void rds_conn_drop(struct rds_connection *conn)
atomic_set(&conn->c_state, RDS_CONN_ERROR);
queue_work(rds_wq, &conn->c_down_w);
}
+EXPORT_SYMBOL_GPL(rds_conn_drop);
/*
* An error occurred on the connection
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b9bcd32431e..536ebe5d3f6 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -43,11 +43,14 @@
unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
+unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
module_param(fmr_pool_size, int, 0444);
MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
module_param(fmr_message_size, int, 0444);
MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
+module_param(rds_ib_retry_count, int, 0444);
+MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
struct list_head rds_ib_devices;
@@ -82,9 +85,6 @@ void rds_ib_add_one(struct ib_device *device)
rds_ibdev->max_wrs = dev_attr->max_qp_wr;
rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
- rds_ibdev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
- rds_ibdev->fmr_page_size = 1 << rds_ibdev->fmr_page_shift;
- rds_ibdev->fmr_page_mask = ~((u64) rds_ibdev->fmr_page_size - 1);
rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
rds_ibdev->max_fmrs = dev_attr->max_fmr ?
min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
@@ -282,6 +282,7 @@ struct rds_transport rds_ib_transport = {
.flush_mrs = rds_ib_flush_mrs,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
+ .t_type = RDS_TRANS_IB
};
int __init rds_ib_init(void)
diff --git a/net/rds/ib.h b/net/rds/ib.h
index 455ae73047f..1378b854cac 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -15,6 +15,8 @@
#define RDS_IB_DEFAULT_RECV_WR 1024
#define RDS_IB_DEFAULT_SEND_WR 256
+#define RDS_IB_DEFAULT_RETRY_COUNT 2
+
#define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
extern struct list_head rds_ib_devices;
@@ -157,9 +159,6 @@ struct rds_ib_device {
struct ib_pd *pd;
struct ib_mr *mr;
struct rds_ib_mr_pool *mr_pool;
- int fmr_page_shift;
- int fmr_page_size;
- u64 fmr_page_mask;
unsigned int fmr_max_remaps;
unsigned int max_fmrs;
int max_sge;
@@ -247,6 +246,7 @@ extern struct ib_client rds_ib_client;
extern unsigned int fmr_pool_size;
extern unsigned int fmr_message_size;
+extern unsigned int rds_ib_retry_count;
extern spinlock_t ib_nodev_conns_lock;
extern struct list_head ib_nodev_conns;
@@ -355,17 +355,25 @@ extern ctl_table rds_ib_sysctl_table[];
/*
* Helper functions for getting/setting the header and data SGEs in
* RDS packets (not RDMA)
+ *
+ * From version 3.1 onwards, header is in front of data in the sge.
*/
static inline struct ib_sge *
rds_ib_header_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
{
- return &sge[0];
+ if (ic->conn->c_version > RDS_PROTOCOL_3_0)
+ return &sge[0];
+ else
+ return &sge[1];
}
static inline struct ib_sge *
rds_ib_data_sge(struct rds_ib_connection *ic, struct ib_sge *sge)
{
- return &sge[1];
+ if (ic->conn->c_version > RDS_PROTOCOL_3_0)
+ return &sge[1];
+ else
+ return &sge[0];
}
#endif
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index f8e40e1a603..c2d372f13db 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -98,21 +98,34 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
struct ib_qp_attr qp_attr;
int err;
- if (event->param.conn.private_data_len) {
+ if (event->param.conn.private_data_len >= sizeof(*dp)) {
dp = event->param.conn.private_data;
- rds_ib_set_protocol(conn,
+ /* make sure it isn't empty data */
+ if (dp->dp_protocol_major) {
+ rds_ib_set_protocol(conn,
RDS_PROTOCOL(dp->dp_protocol_major,
- dp->dp_protocol_minor));
- rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+ dp->dp_protocol_minor));
+ rds_ib_set_flow_control(conn, be32_to_cpu(dp->dp_credit));
+ }
}
printk(KERN_NOTICE "RDS/IB: connected to %pI4 version %u.%u%s\n",
- &conn->c_laddr,
+ &conn->c_faddr,
RDS_PROTOCOL_MAJOR(conn->c_version),
RDS_PROTOCOL_MINOR(conn->c_version),
ic->i_flowctl ? ", flow control" : "");
+ /*
+ * Init rings and fill recv. this needs to wait until protocol negotiation
+ * is complete, since ring layout is different from 3.0 to 3.1.
+ */
+ rds_ib_send_init_ring(ic);
+ rds_ib_recv_init_ring(ic);
+ /* Post receive buffers - as a side effect, this will update
+ * the posted credit count. */
+ rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
+
/* Tune RNR behavior */
rds_ib_tune_rnr(ic, &qp_attr);
@@ -145,7 +158,7 @@ static void rds_ib_cm_fill_conn_param(struct rds_connection *conn,
/* XXX tune these? */
conn_param->responder_resources = 1;
conn_param->initiator_depth = 1;
- conn_param->retry_count = 7;
+ conn_param->retry_count = min_t(unsigned int, rds_ib_retry_count, 7);
conn_param->rnr_retry_count = 7;
if (dp) {
@@ -190,9 +203,9 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
break;
default:
- printk(KERN_WARNING "RDS/ib: unhandled QP event %u "
- "on connection to %pI4\n", event->event,
- &conn->c_faddr);
+ rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u "
+ "- connection %pI4->%pI4, reconnecting\n",
+ event->event, &conn->c_laddr, &conn->c_faddr);
break;
}
}
@@ -321,7 +334,7 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
rdsdebug("send allocation failed\n");
goto out;
}
- rds_ib_send_init_ring(ic);
+ memset(ic->i_sends, 0, ic->i_send_ring.w_nr * sizeof(struct rds_ib_send_work));
ic->i_recvs = vmalloc(ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
if (ic->i_recvs == NULL) {
@@ -329,14 +342,10 @@ static int rds_ib_setup_qp(struct rds_connection *conn)
rdsdebug("recv allocation failed\n");
goto out;
}
+ memset(ic->i_recvs, 0, ic->i_recv_ring.w_nr * sizeof(struct rds_ib_recv_work));
- rds_ib_recv_init_ring(ic);
rds_ib_recv_init_ack(ic);
- /* Post receive buffers - as a side effect, this will update
- * the posted credit count. */
- rds_ib_recv_refill(conn, GFP_KERNEL, GFP_HIGHUSER, 1);
-
rdsdebug("conn %p pd %p mr %p cq %p %p\n", conn, ic->i_pd, ic->i_mr,
ic->i_send_cq, ic->i_recv_cq);
@@ -344,19 +353,32 @@ out:
return ret;
}
-static u32 rds_ib_protocol_compatible(const struct rds_ib_connect_private *dp)
+static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event)
{
+ const struct rds_ib_connect_private *dp = event->param.conn.private_data;
u16 common;
u32 version = 0;
- /* rdma_cm private data is odd - when there is any private data in the
+ /*
+ * rdma_cm private data is odd - when there is any private data in the
* request, we will be given a pretty large buffer without telling us the
* original size. The only way to tell the difference is by looking at
* the contents, which are initialized to zero.
* If the protocol version fields aren't set, this is a connection attempt
* from an older version. This could could be 3.0 or 2.0 - we can't tell.
- * We really should have changed this for OFED 1.3 :-( */
- if (dp->dp_protocol_major == 0)
+ * We really should have changed this for OFED 1.3 :-(
+ */
+
+ /* Be paranoid. RDS always has privdata */
+ if (!event->param.conn.private_data_len) {
+ printk(KERN_NOTICE "RDS incoming connection has no private data, "
+ "rejecting\n");
+ return 0;
+ }
+
+ /* Even if len is crap *now* I still want to check it. -ASG */
+ if (event->param.conn.private_data_len < sizeof (*dp)
+ || dp->dp_protocol_major == 0)
return RDS_PROTOCOL_3_0;
common = be16_to_cpu(dp->dp_protocol_minor_mask) & RDS_IB_SUPPORTED_PROTOCOLS;
@@ -388,7 +410,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
int err, destroy = 1;
/* Check whether the remote protocol version matches ours. */
- version = rds_ib_protocol_compatible(dp);
+ version = rds_ib_protocol_compatible(event);
if (!version)
goto out;
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 81033af9302..ef3ab5b7283 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -211,7 +211,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
pool->fmr_attr.max_pages = fmr_message_size;
pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
- pool->fmr_attr.page_shift = rds_ibdev->fmr_page_shift;
+ pool->fmr_attr.page_shift = PAGE_SHIFT;
pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
/* We never allow more than max_items MRs to be allocated.
@@ -349,13 +349,13 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
- if (dma_addr & ~rds_ibdev->fmr_page_mask) {
+ if (dma_addr & ~PAGE_MASK) {
if (i > 0)
return -EINVAL;
else
++page_cnt;
}
- if ((dma_addr + dma_len) & ~rds_ibdev->fmr_page_mask) {
+ if ((dma_addr + dma_len) & ~PAGE_MASK) {
if (i < sg_dma_len - 1)
return -EINVAL;
else
@@ -365,7 +365,7 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
len += dma_len;
}
- page_cnt += len >> rds_ibdev->fmr_page_shift;
+ page_cnt += len >> PAGE_SHIFT;
if (page_cnt > fmr_message_size)
return -EINVAL;
@@ -378,9 +378,9 @@ static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibm
unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
- for (j = 0; j < dma_len; j += rds_ibdev->fmr_page_size)
+ for (j = 0; j < dma_len; j += PAGE_SIZE)
dma_pages[page_cnt++] =
- (dma_addr & rds_ibdev->fmr_page_mask) + j;
+ (dma_addr & PAGE_MASK) + j;
}
ret = ib_map_phys_fmr(ibmr->fmr,
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 5709bad2832..cd7a6cfcab0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -555,6 +555,47 @@ u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
return rds_ib_get_ack(ic);
}
+static struct rds_header *rds_ib_get_header(struct rds_connection *conn,
+ struct rds_ib_recv_work *recv,
+ u32 data_len)
+{
+ struct rds_ib_connection *ic = conn->c_transport_data;
+ void *hdr_buff = &ic->i_recv_hdrs[recv - ic->i_recvs];
+ void *addr;
+ u32 misplaced_hdr_bytes;
+
+ /*
+ * Support header at the front (RDS 3.1+) as well as header-at-end.
+ *
+ * Cases:
+ * 1) header all in header buff (great!)
+ * 2) header all in data page (copy all to header buff)
+ * 3) header split across hdr buf + data page
+ * (move bit in hdr buff to end before copying other bit from data page)
+ */
+ if (conn->c_version > RDS_PROTOCOL_3_0 || data_len == RDS_FRAG_SIZE)
+ return hdr_buff;
+
+ if (data_len <= (RDS_FRAG_SIZE - sizeof(struct rds_header))) {
+ addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
+ memcpy(hdr_buff,
+ addr + recv->r_frag->f_offset + data_len,
+ sizeof(struct rds_header));
+ kunmap_atomic(addr, KM_SOFTIRQ0);
+ return hdr_buff;
+ }
+
+ misplaced_hdr_bytes = (sizeof(struct rds_header) - (RDS_FRAG_SIZE - data_len));
+
+ memmove(hdr_buff + misplaced_hdr_bytes, hdr_buff, misplaced_hdr_bytes);
+
+ addr = kmap_atomic(recv->r_frag->f_page, KM_SOFTIRQ0);
+ memcpy(hdr_buff, addr + recv->r_frag->f_offset + data_len,
+ sizeof(struct rds_header) - misplaced_hdr_bytes);
+ kunmap_atomic(addr, KM_SOFTIRQ0);
+ return hdr_buff;
+}
+
/*
* It's kind of lame that we're copying from the posted receive pages into
* long-lived bitmaps. We could have posted the bitmaps and rdma written into
@@ -645,7 +686,7 @@ struct rds_ib_ack_state {
};
static void rds_ib_process_recv(struct rds_connection *conn,
- struct rds_ib_recv_work *recv, u32 byte_len,
+ struct rds_ib_recv_work *recv, u32 data_len,
struct rds_ib_ack_state *state)
{
struct rds_ib_connection *ic = conn->c_transport_data;
@@ -655,9 +696,9 @@ static void rds_ib_process_recv(struct rds_connection *conn,
/* XXX shut down the connection if port 0,0 are seen? */
rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
- byte_len);
+ data_len);
- if (byte_len < sizeof(struct rds_header)) {
+ if (data_len < sizeof(struct rds_header)) {
rds_ib_conn_error(conn, "incoming message "
"from %pI4 didn't inclue a "
"header, disconnecting and "
@@ -665,9 +706,9 @@ static void rds_ib_process_recv(struct rds_connection *conn,
&conn->c_faddr);
return;
}
- byte_len -= sizeof(struct rds_header);
+ data_len -= sizeof(struct rds_header);
- ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
+ ihdr = rds_ib_get_header(conn, recv, data_len);
/* Validate the checksum. */
if (!rds_message_verify_checksum(ihdr)) {
@@ -687,7 +728,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
if (ihdr->h_credit)
rds_ib_send_add_credits(conn, ihdr->h_credit);
- if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && byte_len == 0) {
+ if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
/* This is an ACK-only packet. The fact that it gets
* special treatment here is that historically, ACKs
* were rather special beasts.
diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
index 02e3e3d50d4..d2c904dd6fb 100644
--- a/net/rds/ib_stats.c
+++ b/net/rds/ib_stats.c
@@ -37,9 +37,9 @@
#include "rds.h"
#include "ib.h"
-DEFINE_PER_CPU(struct rds_ib_statistics, rds_ib_stats) ____cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
-static char *rds_ib_stat_names[] = {
+static const char *const rds_ib_stat_names[] = {
"ib_connect_raced",
"ib_listen_closed_stale",
"ib_tx_cq_call",
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index d87830db93a..84b5ffcb280 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -53,7 +53,17 @@ unsigned long rds_ib_sysctl_max_unsig_bytes = (16 << 20);
static unsigned long rds_ib_sysctl_max_unsig_bytes_min = 1;
static unsigned long rds_ib_sysctl_max_unsig_bytes_max = ~0UL;
-unsigned int rds_ib_sysctl_flow_control = 1;
+/*
+ * This sysctl does nothing.
+ *
+ * Backwards compatibility with RDS 3.0 wire protocol
+ * disables initial FC credit exchange.
+ * If it's ever possible to drop 3.0 support,
+ * setting this to 1 and moving init/refill of send/recv
+ * rings from ib_cm_connect_complete() back into ib_setup_qp()
+ * will cause credits to be added before protocol negotiation.
+ */
+unsigned int rds_ib_sysctl_flow_control = 0;
ctl_table rds_ib_sysctl_table[] = {
{
diff --git a/net/rds/info.c b/net/rds/info.c
index 62aeef37aef..814a91a6f4a 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -79,6 +79,7 @@ void rds_info_register_func(int optname, rds_info_func func)
rds_info_funcs[offset] = func;
spin_unlock(&rds_info_lock);
}
+EXPORT_SYMBOL_GPL(rds_info_register_func);
void rds_info_deregister_func(int optname, rds_info_func func)
{
@@ -91,6 +92,7 @@ void rds_info_deregister_func(int optname, rds_info_func func)
rds_info_funcs[offset] = NULL;
spin_unlock(&rds_info_lock);
}
+EXPORT_SYMBOL_GPL(rds_info_deregister_func);
/*
* Typically we hold an atomic kmap across multiple rds_info_copy() calls
@@ -137,6 +139,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
}
}
}
+EXPORT_SYMBOL_GPL(rds_info_copy);
/*
* @optval points to the userspace buffer that the information snapshot
diff --git a/net/rds/iw.c b/net/rds/iw.c
index d16e1cbc8e8..db224f7c293 100644
--- a/net/rds/iw.c
+++ b/net/rds/iw.c
@@ -83,23 +83,16 @@ void rds_iw_add_one(struct ib_device *device)
rds_iwdev->max_wrs = dev_attr->max_qp_wr;
rds_iwdev->max_sge = min(dev_attr->max_sge, RDS_IW_MAX_SGE);
- rds_iwdev->page_shift = max(PAGE_SHIFT, ffs(dev_attr->page_size_cap) - 1);
-
rds_iwdev->dev = device;
rds_iwdev->pd = ib_alloc_pd(device);
if (IS_ERR(rds_iwdev->pd))
goto free_dev;
if (!rds_iwdev->dma_local_lkey) {
- if (device->node_type != RDMA_NODE_RNIC) {
- rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
- IB_ACCESS_LOCAL_WRITE);
- } else {
- rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
- IB_ACCESS_REMOTE_READ |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_LOCAL_WRITE);
- }
+ rds_iwdev->mr = ib_get_dma_mr(rds_iwdev->pd,
+ IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(rds_iwdev->mr))
goto err_pd;
} else
@@ -291,6 +284,7 @@ struct rds_transport rds_iw_transport = {
.flush_mrs = rds_iw_flush_mrs,
.t_owner = THIS_MODULE,
.t_name = "iwarp",
+ .t_type = RDS_TRANS_IWARP,
.t_prefer_loopback = 1,
};
diff --git a/net/rds/iw.h b/net/rds/iw.h
index 0715dde323e..dd72b62bd50 100644
--- a/net/rds/iw.h
+++ b/net/rds/iw.h
@@ -181,7 +181,6 @@ struct rds_iw_device {
struct ib_pd *pd;
struct ib_mr *mr;
struct rds_iw_mr_pool *mr_pool;
- int page_shift;
int max_sge;
unsigned int max_wrs;
unsigned int dma_local_lkey:1;
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c
index dcdb37da80f..de4a1b16bf7 100644
--- a/net/rds/iw_rdma.c
+++ b/net/rds/iw_rdma.c
@@ -263,18 +263,12 @@ static void rds_iw_set_scatterlist(struct rds_iw_scatterlist *sg,
}
static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
- struct rds_iw_scatterlist *sg,
- unsigned int dma_page_shift)
+ struct rds_iw_scatterlist *sg)
{
struct ib_device *dev = rds_iwdev->dev;
u64 *dma_pages = NULL;
- u64 dma_mask;
- unsigned int dma_page_size;
int i, j, ret;
- dma_page_size = 1 << dma_page_shift;
- dma_mask = dma_page_size - 1;
-
WARN_ON(sg->dma_len);
sg->dma_len = ib_dma_map_sg(dev, sg->list, sg->len, DMA_BIDIRECTIONAL);
@@ -295,18 +289,18 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
sg->bytes += dma_len;
end_addr = dma_addr + dma_len;
- if (dma_addr & dma_mask) {
+ if (dma_addr & PAGE_MASK) {
if (i > 0)
goto out_unmap;
- dma_addr &= ~dma_mask;
+ dma_addr &= ~PAGE_MASK;
}
- if (end_addr & dma_mask) {
+ if (end_addr & PAGE_MASK) {
if (i < sg->dma_len - 1)
goto out_unmap;
- end_addr = (end_addr + dma_mask) & ~dma_mask;
+ end_addr = (end_addr + PAGE_MASK) & ~PAGE_MASK;
}
- sg->dma_npages += (end_addr - dma_addr) >> dma_page_shift;
+ sg->dma_npages += (end_addr - dma_addr) >> PAGE_SHIFT;
}
/* Now gather the dma addrs into one list */
@@ -325,8 +319,8 @@ static u64 *rds_iw_map_scatterlist(struct rds_iw_device *rds_iwdev,
u64 end_addr;
end_addr = dma_addr + dma_len;
- dma_addr &= ~dma_mask;
- for (; dma_addr < end_addr; dma_addr += dma_page_size)
+ dma_addr &= ~PAGE_MASK;
+ for (; dma_addr < end_addr; dma_addr += PAGE_SIZE)
dma_pages[j++] = dma_addr;
BUG_ON(j > sg->dma_npages);
}
@@ -727,7 +721,7 @@ static int rds_iw_rdma_build_fastreg(struct rds_iw_mapping *mapping)
f_wr.wr.fast_reg.rkey = mapping->m_rkey;
f_wr.wr.fast_reg.page_list = ibmr->page_list;
f_wr.wr.fast_reg.page_list_len = mapping->m_sg.dma_len;
- f_wr.wr.fast_reg.page_shift = ibmr->device->page_shift;
+ f_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
f_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
@@ -780,9 +774,7 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool,
rds_iw_set_scatterlist(&mapping->m_sg, sg, sg_len);
- dma_pages = rds_iw_map_scatterlist(rds_iwdev,
- &mapping->m_sg,
- rds_iwdev->page_shift);
+ dma_pages = rds_iw_map_scatterlist(rds_iwdev, &mapping->m_sg);
if (IS_ERR(dma_pages)) {
ret = PTR_ERR(dma_pages);
dma_pages = NULL;
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 44a6a0551f2..1f5abe3cf2b 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -779,7 +779,7 @@ static void rds_iw_build_send_fastreg(struct rds_iw_device *rds_iwdev, struct rd
send->s_wr.wr.fast_reg.rkey = send->s_mr->rkey;
send->s_wr.wr.fast_reg.page_list = send->s_page_list;
send->s_wr.wr.fast_reg.page_list_len = nent;
- send->s_wr.wr.fast_reg.page_shift = rds_iwdev->page_shift;
+ send->s_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
send->s_wr.wr.fast_reg.access_flags = IB_ACCESS_REMOTE_WRITE;
send->s_wr.wr.fast_reg.iova_start = sg_addr;
diff --git a/net/rds/iw_stats.c b/net/rds/iw_stats.c
index ccc7e8f0bf0..5fe67f6a1d8 100644
--- a/net/rds/iw_stats.c
+++ b/net/rds/iw_stats.c
@@ -37,9 +37,9 @@
#include "rds.h"
#include "iw.h"
-DEFINE_PER_CPU(struct rds_iw_statistics, rds_iw_stats) ____cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_iw_statistics, rds_iw_stats);
-static char *rds_iw_stat_names[] = {
+static const char *const rds_iw_stat_names[] = {
"iw_connect_raced",
"iw_listen_closed_stale",
"iw_tx_cq_call",
diff --git a/net/rds/message.c b/net/rds/message.c
index 5a15dc8d0cd..ca50a8ec974 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -50,6 +50,7 @@ void rds_message_addref(struct rds_message *rm)
rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
atomic_inc(&rm->m_refcount);
}
+EXPORT_SYMBOL_GPL(rds_message_addref);
/*
* This relies on dma_map_sg() not touching sg[].page during merging.
@@ -92,6 +93,7 @@ void rds_message_put(struct rds_message *rm)
kfree(rm);
}
}
+EXPORT_SYMBOL_GPL(rds_message_put);
void rds_message_inc_free(struct rds_incoming *inc)
{
@@ -108,6 +110,7 @@ void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
hdr->h_sequence = cpu_to_be64(seq);
hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
}
+EXPORT_SYMBOL_GPL(rds_message_populate_header);
int rds_message_add_extension(struct rds_header *hdr,
unsigned int type, const void *data, unsigned int len)
@@ -133,6 +136,7 @@ int rds_message_add_extension(struct rds_header *hdr,
dst[len] = RDS_EXTHDR_NONE;
return 1;
}
+EXPORT_SYMBOL_GPL(rds_message_add_extension);
/*
* If a message has extension headers, retrieve them here.
@@ -208,6 +212,7 @@ int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 o
ext_hdr.h_rdma_offset = cpu_to_be32(offset);
return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
}
+EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
struct rds_message *rds_message_alloc(unsigned int nents, gfp_t gfp)
{
@@ -399,4 +404,5 @@ void rds_message_unmapped(struct rds_message *rm)
if (waitqueue_active(&rds_message_flush_waitq))
wake_up(&rds_message_flush_waitq);
}
+EXPORT_SYMBOL_GPL(rds_message_unmapped);
diff --git a/net/rds/page.c b/net/rds/page.c
index c460743a89a..36790122dfd 100644
--- a/net/rds/page.c
+++ b/net/rds/page.c
@@ -39,7 +39,7 @@ struct rds_page_remainder {
unsigned long r_offset;
};
-DEFINE_PER_CPU(struct rds_page_remainder, rds_page_remainders) ____cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_page_remainder, rds_page_remainders);
/*
* returns 0 on success or -errno on failure.
@@ -81,6 +81,7 @@ int rds_page_copy_user(struct page *page, unsigned long offset,
return 0;
}
+EXPORT_SYMBOL_GPL(rds_page_copy_user);
/*
* Message allocation uses this to build up regions of a message.
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 7d0f901c93d..9ece910ea39 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_DISCONNECTED:
- printk(KERN_WARNING "RDS/IW: DISCONNECT event - dropping connection "
+ printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection "
"%pI4->%pI4\n", &conn->c_laddr,
&conn->c_faddr);
rds_conn_drop(conn);
@@ -132,12 +132,12 @@ static int __init rds_rdma_listen_init(void)
cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
- printk(KERN_ERR "RDS/IW: failed to setup listener, "
+ printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_create_id() returned %d\n", ret);
goto out;
}
- sin.sin_family = PF_INET,
+ sin.sin_family = AF_INET,
sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
sin.sin_port = (__force u16)htons(RDS_PORT);
@@ -147,14 +147,14 @@ static int __init rds_rdma_listen_init(void)
*/
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
if (ret) {
- printk(KERN_ERR "RDS/IW: failed to setup listener, "
+ printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_bind_addr() returned %d\n", ret);
goto out;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
- printk(KERN_ERR "RDS/IW: failed to setup listener, "
+ printk(KERN_ERR "RDS/RDMA: failed to setup listener, "
"rdma_listen() returned %d\n", ret);
goto out;
}
@@ -203,6 +203,7 @@ err_iw_init:
out:
return ret;
}
+module_init(rds_rdma_init);
void rds_rdma_exit(void)
{
@@ -211,4 +212,9 @@ void rds_rdma_exit(void)
rds_ib_exit();
rds_iw_exit();
}
+module_exit(rds_rdma_exit);
+
+MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
+MODULE_DESCRIPTION("RDS: IB/iWARP transport");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/net/rds/rds.h b/net/rds/rds.h
index dbe11123678..85d6f897ecc 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -311,11 +311,17 @@ struct rds_notifier {
* flag and header.
*/
+#define RDS_TRANS_IB 0
+#define RDS_TRANS_IWARP 1
+#define RDS_TRANS_TCP 2
+#define RDS_TRANS_COUNT 3
+
struct rds_transport {
char t_name[TRANSNAMSIZ];
struct list_head t_item;
struct module *t_owner;
unsigned int t_prefer_loopback:1;
+ unsigned int t_type;
int (*laddr_check)(__be32 addr);
int (*conn_alloc)(struct rds_connection *conn, gfp_t gfp);
@@ -652,7 +658,8 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
int __init rds_stats_init(void);
void rds_stats_exit(void);
void rds_stats_info_copy(struct rds_info_iterator *iter,
- uint64_t *values, char **names, size_t nr);
+ uint64_t *values, const char *const *names,
+ size_t nr);
/* sysctl.c */
int __init rds_sysctl_init(void);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index f2118c51cfa..fdff33c7b43 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -46,12 +46,14 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
inc->i_saddr = saddr;
inc->i_rdma_cookie = 0;
}
+EXPORT_SYMBOL_GPL(rds_inc_init);
void rds_inc_addref(struct rds_incoming *inc)
{
rdsdebug("addref inc %p ref %d\n", inc, atomic_read(&inc->i_refcount));
atomic_inc(&inc->i_refcount);
}
+EXPORT_SYMBOL_GPL(rds_inc_addref);
void rds_inc_put(struct rds_incoming *inc)
{
@@ -62,6 +64,7 @@ void rds_inc_put(struct rds_incoming *inc)
inc->i_conn->c_trans->inc_free(inc);
}
}
+EXPORT_SYMBOL_GPL(rds_inc_put);
static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk,
struct rds_cong_map *map,
@@ -237,6 +240,7 @@ out:
if (rs)
rds_sock_put(rs);
}
+EXPORT_SYMBOL_GPL(rds_recv_incoming);
/*
* be very careful here. This is being called as the condition in
@@ -409,18 +413,18 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
if (msg_flags & MSG_OOB)
goto out;
- /* If there are pending notifications, do those - and nothing else */
- if (!list_empty(&rs->rs_notify_queue)) {
- ret = rds_notify_queue_get(rs, msg);
- goto out;
- }
+ while (1) {
+ /* If there are pending notifications, do those - and nothing else */
+ if (!list_empty(&rs->rs_notify_queue)) {
+ ret = rds_notify_queue_get(rs, msg);
+ break;
+ }
- if (rs->rs_cong_notify) {
- ret = rds_notify_cong(rs, msg);
- goto out;
- }
+ if (rs->rs_cong_notify) {
+ ret = rds_notify_cong(rs, msg);
+ break;
+ }
- while (1) {
if (!rds_next_incoming(rs, &inc)) {
if (nonblock) {
ret = -EAGAIN;
@@ -428,7 +432,9 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
}
timeo = wait_event_interruptible_timeout(*sk->sk_sleep,
- rds_next_incoming(rs, &inc),
+ (!list_empty(&rs->rs_notify_queue)
+ || rs->rs_cong_notify
+ || rds_next_incoming(rs, &inc)),
timeo);
rdsdebug("recvmsg woke inc %p timeo %ld\n", inc,
timeo);
diff --git a/net/rds/send.c b/net/rds/send.c
index a4a7f428cd7..28c88ff3d03 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -439,6 +439,7 @@ void rds_rdma_send_complete(struct rds_message *rm, int status)
sock_put(rds_rs_to_sk(rs));
}
}
+EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
/*
* This is the same as rds_rdma_send_complete except we
@@ -494,6 +495,7 @@ out:
return found;
}
+EXPORT_SYMBOL_GPL(rds_send_get_message);
/*
* This removes messages from the socket's list if they're on it. The list
@@ -610,6 +612,7 @@ void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
/* now remove the messages from the sock list as needed */
rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
}
+EXPORT_SYMBOL_GPL(rds_send_drop_acked);
void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
{
diff --git a/net/rds/stats.c b/net/rds/stats.c
index 637146893cf..7598eb07cfb 100644
--- a/net/rds/stats.c
+++ b/net/rds/stats.c
@@ -37,10 +37,11 @@
#include "rds.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats);
+EXPORT_PER_CPU_SYMBOL_GPL(rds_stats);
/* :.,$s/unsigned long\>.*\<s_\(.*\);/"\1",/g */
-static char *rds_stat_names[] = {
+static const char *const rds_stat_names[] = {
"conn_reset",
"recv_drop_bad_checksum",
"recv_drop_old_seq",
@@ -77,7 +78,7 @@ static char *rds_stat_names[] = {
};
void rds_stats_info_copy(struct rds_info_iterator *iter,
- uint64_t *values, char **names, size_t nr)
+ uint64_t *values, const char *const *names, size_t nr)
{
struct rds_info_counter ctr;
size_t i;
@@ -90,6 +91,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter,
rds_info_copy(iter, &ctr, sizeof(ctr));
}
}
+EXPORT_SYMBOL_GPL(rds_stats_info_copy);
/*
* This gives global counters across all the transports. The strings
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
new file mode 100644
index 00000000000..b5198aee45d
--- /dev/null
+++ b/net/rds/tcp.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2006 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <net/tcp.h>
+
+#include "rds.h"
+#include "tcp.h"
+
+/* only for info exporting */
+static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
+static LIST_HEAD(rds_tcp_tc_list);
+unsigned int rds_tcp_tc_count;
+
+/* Track rds_tcp_connection structs so they can be cleaned up */
+static DEFINE_SPINLOCK(rds_tcp_conn_lock);
+static LIST_HEAD(rds_tcp_conn_list);
+
+static struct kmem_cache *rds_tcp_conn_slab;
+
+#define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024)
+
+/* doing it this way avoids calling tcp_sk() */
+void rds_tcp_nonagle(struct socket *sock)
+{
+ mm_segment_t oldfs = get_fs();
+ int val = 1;
+
+ set_fs(KERNEL_DS);
+ sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
+ sizeof(val));
+ set_fs(oldfs);
+}
+
+void rds_tcp_tune(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ rds_tcp_nonagle(sock);
+
+ /*
+ * We're trying to saturate gigabit with the default,
+ * see svc_sock_setbufsize().
+ */
+ lock_sock(sk);
+ sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE;
+ sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE;
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
+ release_sock(sk);
+}
+
+u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
+{
+ return tcp_sk(tc->t_sock->sk)->snd_nxt;
+}
+
+u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
+{
+ return tcp_sk(tc->t_sock->sk)->snd_una;
+}
+
+void rds_tcp_restore_callbacks(struct socket *sock,
+ struct rds_tcp_connection *tc)
+{
+ rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
+ write_lock_bh(&sock->sk->sk_callback_lock);
+
+ /* done under the callback_lock to serialize with write_space */
+ spin_lock(&rds_tcp_tc_list_lock);
+ list_del_init(&tc->t_list_item);
+ rds_tcp_tc_count--;
+ spin_unlock(&rds_tcp_tc_list_lock);
+
+ tc->t_sock = NULL;
+
+ sock->sk->sk_write_space = tc->t_orig_write_space;
+ sock->sk->sk_data_ready = tc->t_orig_data_ready;
+ sock->sk->sk_state_change = tc->t_orig_state_change;
+ sock->sk->sk_user_data = NULL;
+
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+}
+
+/*
+ * This is the only path that sets tc->t_sock. Send and receive trust that
+ * it is set. The RDS_CONN_CONNECTED bit protects those paths from being
+ * called while it isn't set.
+ */
+void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+
+ rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
+ write_lock_bh(&sock->sk->sk_callback_lock);
+
+ /* done under the callback_lock to serialize with write_space */
+ spin_lock(&rds_tcp_tc_list_lock);
+ list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
+ rds_tcp_tc_count++;
+ spin_unlock(&rds_tcp_tc_list_lock);
+
+ /* accepted sockets need our listen data ready undone */
+ if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
+ sock->sk->sk_data_ready = sock->sk->sk_user_data;
+
+ tc->t_sock = sock;
+ tc->conn = conn;
+ tc->t_orig_data_ready = sock->sk->sk_data_ready;
+ tc->t_orig_write_space = sock->sk->sk_write_space;
+ tc->t_orig_state_change = sock->sk->sk_state_change;
+
+ sock->sk->sk_user_data = conn;
+ sock->sk->sk_data_ready = rds_tcp_data_ready;
+ sock->sk->sk_write_space = rds_tcp_write_space;
+ sock->sk->sk_state_change = rds_tcp_state_change;
+
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+}
+
+static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
+ struct rds_info_iterator *iter,
+ struct rds_info_lengths *lens)
+{
+ struct rds_info_tcp_socket tsinfo;
+ struct rds_tcp_connection *tc;
+ unsigned long flags;
+ struct sockaddr_in sin;
+ int sinlen;
+
+ spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
+
+ if (len / sizeof(tsinfo) < rds_tcp_tc_count)
+ goto out;
+
+ list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
+
+ sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
+ tsinfo.local_addr = sin.sin_addr.s_addr;
+ tsinfo.local_port = sin.sin_port;
+ sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
+ tsinfo.peer_addr = sin.sin_addr.s_addr;
+ tsinfo.peer_port = sin.sin_port;
+
+ tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
+ tsinfo.data_rem = tc->t_tinc_data_rem;
+ tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
+ tsinfo.last_expected_una = tc->t_last_expected_una;
+ tsinfo.last_seen_una = tc->t_last_seen_una;
+
+ rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
+ }
+
+out:
+ lens->nr = rds_tcp_tc_count;
+ lens->each = sizeof(tsinfo);
+
+ spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
+}
+
+static int rds_tcp_laddr_check(__be32 addr)
+{
+ if (inet_addr_type(&init_net, addr) == RTN_LOCAL)
+ return 0;
+ return -EADDRNOTAVAIL;
+}
+
+static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
+{
+ struct rds_tcp_connection *tc;
+
+ tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
+ if (tc == NULL)
+ return -ENOMEM;
+
+ tc->t_sock = NULL;
+ tc->t_tinc = NULL;
+ tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+ tc->t_tinc_data_rem = 0;
+
+ conn->c_transport_data = tc;
+
+ spin_lock_irq(&rds_tcp_conn_lock);
+ list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
+ spin_unlock_irq(&rds_tcp_conn_lock);
+
+ rdsdebug("alloced tc %p\n", conn->c_transport_data);
+ return 0;
+}
+
+static void rds_tcp_conn_free(void *arg)
+{
+ struct rds_tcp_connection *tc = arg;
+ rdsdebug("freeing tc %p\n", tc);
+ kmem_cache_free(rds_tcp_conn_slab, tc);
+}
+
+static void rds_tcp_destroy_conns(void)
+{
+ struct rds_tcp_connection *tc, *_tc;
+ LIST_HEAD(tmp_list);
+
+ /* avoid calling conn_destroy with irqs off */
+ spin_lock_irq(&rds_tcp_conn_lock);
+ list_splice(&rds_tcp_conn_list, &tmp_list);
+ INIT_LIST_HEAD(&rds_tcp_conn_list);
+ spin_unlock_irq(&rds_tcp_conn_lock);
+
+ list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
+ if (tc->conn->c_passive)
+ rds_conn_destroy(tc->conn->c_passive);
+ rds_conn_destroy(tc->conn);
+ }
+}
+
+void rds_tcp_exit(void)
+{
+ rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+ rds_tcp_listen_stop();
+ rds_tcp_destroy_conns();
+ rds_trans_unregister(&rds_tcp_transport);
+ rds_tcp_recv_exit();
+ kmem_cache_destroy(rds_tcp_conn_slab);
+}
+module_exit(rds_tcp_exit);
+
+struct rds_transport rds_tcp_transport = {
+ .laddr_check = rds_tcp_laddr_check,
+ .xmit_prepare = rds_tcp_xmit_prepare,
+ .xmit_complete = rds_tcp_xmit_complete,
+ .xmit_cong_map = rds_tcp_xmit_cong_map,
+ .xmit = rds_tcp_xmit,
+ .recv = rds_tcp_recv,
+ .conn_alloc = rds_tcp_conn_alloc,
+ .conn_free = rds_tcp_conn_free,
+ .conn_connect = rds_tcp_conn_connect,
+ .conn_shutdown = rds_tcp_conn_shutdown,
+ .inc_copy_to_user = rds_tcp_inc_copy_to_user,
+ .inc_purge = rds_tcp_inc_purge,
+ .inc_free = rds_tcp_inc_free,
+ .stats_info_copy = rds_tcp_stats_info_copy,
+ .exit = rds_tcp_exit,
+ .t_owner = THIS_MODULE,
+ .t_name = "tcp",
+ .t_type = RDS_TRANS_TCP,
+ .t_prefer_loopback = 1,
+};
+
+int __init rds_tcp_init(void)
+{
+ int ret;
+
+ rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
+ sizeof(struct rds_tcp_connection),
+ 0, 0, NULL);
+ if (rds_tcp_conn_slab == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = rds_tcp_recv_init();
+ if (ret)
+ goto out_slab;
+
+ ret = rds_trans_register(&rds_tcp_transport);
+ if (ret)
+ goto out_recv;
+
+ ret = rds_tcp_listen_init();
+ if (ret)
+ goto out_register;
+
+ rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
+
+ goto out;
+
+out_register:
+ rds_trans_unregister(&rds_tcp_transport);
+out_recv:
+ rds_tcp_recv_exit();
+out_slab:
+ kmem_cache_destroy(rds_tcp_conn_slab);
+out:
+ return ret;
+}
+module_init(rds_tcp_init);
+
+MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
+MODULE_DESCRIPTION("RDS: TCP transport");
+MODULE_LICENSE("Dual BSD/GPL");
+
diff --git a/net/rds/tcp.h b/net/rds/tcp.h
new file mode 100644
index 00000000000..844fa6b9cf5
--- /dev/null
+++ b/net/rds/tcp.h
@@ -0,0 +1,93 @@
+#ifndef _RDS_TCP_H
+#define _RDS_TCP_H
+
+#define RDS_TCP_PORT 16385
+
+struct rds_tcp_incoming {
+ struct rds_incoming ti_inc;
+ struct sk_buff_head ti_skb_list;
+};
+
+struct rds_tcp_connection {
+
+ struct list_head t_tcp_node;
+ struct rds_connection *conn;
+ struct socket *t_sock;
+ void *t_orig_write_space;
+ void *t_orig_data_ready;
+ void *t_orig_state_change;
+
+ struct rds_tcp_incoming *t_tinc;
+ size_t t_tinc_hdr_rem;
+ size_t t_tinc_data_rem;
+
+ /* XXX error report? */
+ struct work_struct t_conn_w;
+ struct work_struct t_send_w;
+ struct work_struct t_down_w;
+ struct work_struct t_recv_w;
+
+ /* for info exporting only */
+ struct list_head t_list_item;
+ u32 t_last_sent_nxt;
+ u32 t_last_expected_una;
+ u32 t_last_seen_una;
+};
+
+struct rds_tcp_statistics {
+ uint64_t s_tcp_data_ready_calls;
+ uint64_t s_tcp_write_space_calls;
+ uint64_t s_tcp_sndbuf_full;
+ uint64_t s_tcp_connect_raced;
+ uint64_t s_tcp_listen_closed_stale;
+};
+
+/* tcp.c */
+int __init rds_tcp_init(void);
+void rds_tcp_exit(void);
+void rds_tcp_tune(struct socket *sock);
+void rds_tcp_nonagle(struct socket *sock);
+void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn);
+void rds_tcp_restore_callbacks(struct socket *sock,
+ struct rds_tcp_connection *tc);
+u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc);
+u32 rds_tcp_snd_una(struct rds_tcp_connection *tc);
+u64 rds_tcp_map_seq(struct rds_tcp_connection *tc, u32 seq);
+extern struct rds_transport rds_tcp_transport;
+
+/* tcp_connect.c */
+int rds_tcp_conn_connect(struct rds_connection *conn);
+void rds_tcp_conn_shutdown(struct rds_connection *conn);
+void rds_tcp_state_change(struct sock *sk);
+
+/* tcp_listen.c */
+int __init rds_tcp_listen_init(void);
+void rds_tcp_listen_stop(void);
+void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
+
+/* tcp_recv.c */
+int __init rds_tcp_recv_init(void);
+void rds_tcp_recv_exit(void);
+void rds_tcp_data_ready(struct sock *sk, int bytes);
+int rds_tcp_recv(struct rds_connection *conn);
+void rds_tcp_inc_purge(struct rds_incoming *inc);
+void rds_tcp_inc_free(struct rds_incoming *inc);
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
+ size_t size);
+
+/* tcp_send.c */
+void rds_tcp_xmit_prepare(struct rds_connection *conn);
+void rds_tcp_xmit_complete(struct rds_connection *conn);
+int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
+ unsigned int hdr_off, unsigned int sg, unsigned int off);
+void rds_tcp_write_space(struct sock *sk);
+int rds_tcp_xmit_cong_map(struct rds_connection *conn,
+ struct rds_cong_map *map, unsigned long offset);
+
+/* tcp_stats.c */
+DECLARE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats);
+#define rds_tcp_stats_inc(member) rds_stats_inc_which(rds_tcp_stats, member)
+unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
+ unsigned int avail);
+
+#endif
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c
new file mode 100644
index 00000000000..211522f9a9a
--- /dev/null
+++ b/net/rds/tcp_connect.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2006 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <net/tcp.h>
+
+#include "rds.h"
+#include "tcp.h"
+
+void rds_tcp_state_change(struct sock *sk)
+{
+ void (*state_change)(struct sock *sk);
+ struct rds_connection *conn;
+ struct rds_tcp_connection *tc;
+
+ read_lock(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (conn == NULL) {
+ state_change = sk->sk_state_change;
+ goto out;
+ }
+ tc = conn->c_transport_data;
+ state_change = tc->t_orig_state_change;
+
+ rdsdebug("sock %p state_change to %d\n", tc->t_sock, sk->sk_state);
+
+ switch(sk->sk_state) {
+ /* ignore connecting sockets as they make progress */
+ case TCP_SYN_SENT:
+ case TCP_SYN_RECV:
+ break;
+ case TCP_ESTABLISHED:
+ rds_connect_complete(conn);
+ break;
+ case TCP_CLOSE:
+ rds_conn_drop(conn);
+ default:
+ break;
+ }
+out:
+ read_unlock(&sk->sk_callback_lock);
+ state_change(sk);
+}
+
+int rds_tcp_conn_connect(struct rds_connection *conn)
+{
+ struct socket *sock = NULL;
+ struct sockaddr_in src, dest;
+ int ret;
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0)
+ goto out;
+
+ rds_tcp_tune(sock);
+
+ src.sin_family = AF_INET;
+ src.sin_addr.s_addr = (__force u32)conn->c_laddr;
+ src.sin_port = (__force u16)htons(0);
+
+ ret = sock->ops->bind(sock, (struct sockaddr *)&src, sizeof(src));
+ if (ret) {
+ rdsdebug("bind failed with %d at address %u.%u.%u.%u\n",
+ ret, NIPQUAD(conn->c_laddr));
+ goto out;
+ }
+
+ dest.sin_family = AF_INET;
+ dest.sin_addr.s_addr = (__force u32)conn->c_faddr;
+ dest.sin_port = (__force u16)htons(RDS_TCP_PORT);
+
+ /*
+ * once we call connect() we can start getting callbacks and they
+ * own the socket
+ */
+ rds_tcp_set_callbacks(sock, conn);
+ ret = sock->ops->connect(sock, (struct sockaddr *)&dest, sizeof(dest),
+ O_NONBLOCK);
+ sock = NULL;
+
+ rdsdebug("connect to address %u.%u.%u.%u returned %d\n",
+ NIPQUAD(conn->c_faddr), ret);
+ if (ret == -EINPROGRESS)
+ ret = 0;
+
+out:
+ if (sock)
+ sock_release(sock);
+ return ret;
+}
+
+/*
+ * Before killing the tcp socket this needs to serialize with callbacks. The
+ * caller has already grabbed the sending sem so we're serialized with other
+ * senders.
+ *
+ * TCP calls the callbacks with the sock lock so we hold it while we reset the
+ * callbacks to those set by TCP. Our callbacks won't execute again once we
+ * hold the sock lock.
+ */
+void rds_tcp_conn_shutdown(struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ struct socket *sock = tc->t_sock;
+
+ rdsdebug("shutting down conn %p tc %p sock %p\n", conn, tc, sock);
+
+ if (sock) {
+ sock->ops->shutdown(sock, RCV_SHUTDOWN | SEND_SHUTDOWN);
+ lock_sock(sock->sk);
+ rds_tcp_restore_callbacks(sock, tc); /* tc->tc_sock = NULL */
+
+ release_sock(sock->sk);
+ sock_release(sock);
+ };
+
+ if (tc->t_tinc) {
+ rds_inc_put(&tc->t_tinc->ti_inc);
+ tc->t_tinc = NULL;
+ }
+ tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+ tc->t_tinc_data_rem = 0;
+}
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
new file mode 100644
index 00000000000..24b743eb0b1
--- /dev/null
+++ b/net/rds/tcp_listen.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2006 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <net/tcp.h>
+
+#include "rds.h"
+#include "tcp.h"
+
+/*
+ * cheesy, but simple..
+ */
+static void rds_tcp_accept_worker(struct work_struct *work);
+static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
+static struct socket *rds_tcp_listen_sock;
+
+static int rds_tcp_accept_one(struct socket *sock)
+{
+ struct socket *new_sock = NULL;
+ struct rds_connection *conn;
+ int ret;
+ struct inet_sock *inet;
+
+ ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
+ sock->sk->sk_protocol, &new_sock);
+ if (ret)
+ goto out;
+
+ new_sock->type = sock->type;
+ new_sock->ops = sock->ops;
+ ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
+ if (ret < 0)
+ goto out;
+
+ rds_tcp_tune(new_sock);
+
+ inet = inet_sk(new_sock->sk);
+
+ rdsdebug("accepted tcp %u.%u.%u.%u:%u -> %u.%u.%u.%u:%u\n",
+ NIPQUAD(inet->saddr), ntohs(inet->sport),
+ NIPQUAD(inet->daddr), ntohs(inet->dport));
+
+ conn = rds_conn_create(inet->saddr, inet->daddr, &rds_tcp_transport,
+ GFP_KERNEL);
+ if (IS_ERR(conn)) {
+ ret = PTR_ERR(conn);
+ goto out;
+ }
+
+ /*
+ * see the comment above rds_queue_delayed_reconnect()
+ */
+ if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) {
+ if (rds_conn_state(conn) == RDS_CONN_UP)
+ rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+ else
+ rds_tcp_stats_inc(s_tcp_connect_raced);
+ rds_conn_drop(conn);
+ ret = 0;
+ goto out;
+ }
+
+ rds_tcp_set_callbacks(new_sock, conn);
+ rds_connect_complete(conn);
+ new_sock = NULL;
+ ret = 0;
+
+out:
+ if (new_sock)
+ sock_release(new_sock);
+ return ret;
+}
+
+static void rds_tcp_accept_worker(struct work_struct *work)
+{
+ while (rds_tcp_accept_one(rds_tcp_listen_sock) == 0)
+ cond_resched();
+}
+
+void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+
+ rdsdebug("listen data ready sk %p\n", sk);
+
+ read_lock(&sk->sk_callback_lock);
+ ready = sk->sk_user_data;
+ if (ready == NULL) { /* check for teardown race */
+ ready = sk->sk_data_ready;
+ goto out;
+ }
+
+ /*
+ * ->sk_data_ready is also called for a newly established child socket
+ * before it has been accepted and the accepter has set up their
+ * data_ready.. we only want to queue listen work for our listening
+ * socket
+ */
+ if (sk->sk_state == TCP_LISTEN)
+ queue_work(rds_wq, &rds_tcp_listen_work);
+
+out:
+ read_unlock(&sk->sk_callback_lock);
+ ready(sk, bytes);
+}
+
+int __init rds_tcp_listen_init(void)
+{
+ struct sockaddr_in sin;
+ struct socket *sock = NULL;
+ int ret;
+
+ ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
+ if (ret < 0)
+ goto out;
+
+ sock->sk->sk_reuse = 1;
+ rds_tcp_nonagle(sock);
+
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = sock->sk->sk_data_ready;
+ sock->sk->sk_data_ready = rds_tcp_listen_data_ready;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+
+ sin.sin_family = PF_INET,
+ sin.sin_addr.s_addr = (__force u32)htonl(INADDR_ANY);
+ sin.sin_port = (__force u16)htons(RDS_TCP_PORT);
+
+ ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
+ if (ret < 0)
+ goto out;
+
+ ret = sock->ops->listen(sock, 64);
+ if (ret < 0)
+ goto out;
+
+ rds_tcp_listen_sock = sock;
+ sock = NULL;
+out:
+ if (sock)
+ sock_release(sock);
+ return ret;
+}
+
+void rds_tcp_listen_stop(void)
+{
+ struct socket *sock = rds_tcp_listen_sock;
+ struct sock *sk;
+
+ if (sock == NULL)
+ return;
+
+ sk = sock->sk;
+
+ /* serialize with and prevent further callbacks */
+ lock_sock(sk);
+ write_lock_bh(&sk->sk_callback_lock);
+ if (sk->sk_user_data) {
+ sk->sk_data_ready = sk->sk_user_data;
+ sk->sk_user_data = NULL;
+ }
+ write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
+
+ /* wait for accepts to stop and close the socket */
+ flush_workqueue(rds_wq);
+ sock_release(sock);
+ rds_tcp_listen_sock = NULL;
+}
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
new file mode 100644
index 00000000000..c00dafffbb5
--- /dev/null
+++ b/net/rds/tcp_recv.c
@@ -0,0 +1,356 @@
+/*
+ * Copyright (c) 2006 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <net/tcp.h>
+
+#include "rds.h"
+#include "tcp.h"
+
+static struct kmem_cache *rds_tcp_incoming_slab;
+
+void rds_tcp_inc_purge(struct rds_incoming *inc)
+{
+ struct rds_tcp_incoming *tinc;
+ tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
+ rdsdebug("purging tinc %p inc %p\n", tinc, inc);
+ skb_queue_purge(&tinc->ti_skb_list);
+}
+
+void rds_tcp_inc_free(struct rds_incoming *inc)
+{
+ struct rds_tcp_incoming *tinc;
+ tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
+ rds_tcp_inc_purge(inc);
+ rdsdebug("freeing tinc %p inc %p\n", tinc, inc);
+ kmem_cache_free(rds_tcp_incoming_slab, tinc);
+}
+
+/*
+ * this is pretty lame, but, whatever.
+ */
+int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
+ size_t size)
+{
+ struct rds_tcp_incoming *tinc;
+ struct iovec *iov, tmp;
+ struct sk_buff *skb;
+ unsigned long to_copy, skb_off;
+ int ret = 0;
+
+ if (size == 0)
+ goto out;
+
+ tinc = container_of(inc, struct rds_tcp_incoming, ti_inc);
+ iov = first_iov;
+ tmp = *iov;
+
+ skb_queue_walk(&tinc->ti_skb_list, skb) {
+ skb_off = 0;
+ while (skb_off < skb->len) {
+ while (tmp.iov_len == 0) {
+ iov++;
+ tmp = *iov;
+ }
+
+ to_copy = min(tmp.iov_len, size);
+ to_copy = min(to_copy, skb->len - skb_off);
+
+ rdsdebug("ret %d size %zu skb %p skb_off %lu "
+ "skblen %d iov_base %p iov_len %zu cpy %lu\n",
+ ret, size, skb, skb_off, skb->len,
+ tmp.iov_base, tmp.iov_len, to_copy);
+
+ /* modifies tmp as it copies */
+ if (skb_copy_datagram_iovec(skb, skb_off, &tmp,
+ to_copy)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ size -= to_copy;
+ ret += to_copy;
+ skb_off += to_copy;
+ if (size == 0)
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+/*
+ * We have a series of skbs that have fragmented pieces of the congestion
+ * bitmap. They must add up to the exact size of the congestion bitmap. We
+ * use the skb helpers to copy those into the pages that make up the in-memory
+ * congestion bitmap for the remote address of this connection. We then tell
+ * the congestion core that the bitmap has been changed so that it can wake up
+ * sleepers.
+ *
+ * This is racing with sending paths which are using test_bit to see if the
+ * bitmap indicates that their recipient is congested.
+ */
+
+static void rds_tcp_cong_recv(struct rds_connection *conn,
+ struct rds_tcp_incoming *tinc)
+{
+ struct sk_buff *skb;
+ unsigned int to_copy, skb_off;
+ unsigned int map_off;
+ unsigned int map_page;
+ struct rds_cong_map *map;
+ int ret;
+
+ /* catch completely corrupt packets */
+ if (be32_to_cpu(tinc->ti_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
+ return;
+
+ map_page = 0;
+ map_off = 0;
+ map = conn->c_fcong;
+
+ skb_queue_walk(&tinc->ti_skb_list, skb) {
+ skb_off = 0;
+ while (skb_off < skb->len) {
+ to_copy = min_t(unsigned int, PAGE_SIZE - map_off,
+ skb->len - skb_off);
+
+ BUG_ON(map_page >= RDS_CONG_MAP_PAGES);
+
+ /* only returns 0 or -error */
+ ret = skb_copy_bits(skb, skb_off,
+ (void *)map->m_page_addrs[map_page] + map_off,
+ to_copy);
+ BUG_ON(ret != 0);
+
+ skb_off += to_copy;
+ map_off += to_copy;
+ if (map_off == PAGE_SIZE) {
+ map_off = 0;
+ map_page++;
+ }
+ }
+ }
+
+ rds_cong_map_updated(map, ~(u64) 0);
+}
+
+struct rds_tcp_desc_arg {
+ struct rds_connection *conn;
+ gfp_t gfp;
+ enum km_type km;
+};
+
+static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ unsigned int offset, size_t len)
+{
+ struct rds_tcp_desc_arg *arg = desc->arg.data;
+ struct rds_connection *conn = arg->conn;
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ struct rds_tcp_incoming *tinc = tc->t_tinc;
+ struct sk_buff *clone;
+ size_t left = len, to_copy;
+
+ rdsdebug("tcp data tc %p skb %p offset %u len %zu\n", tc, skb, offset,
+ len);
+
+ /*
+ * tcp_read_sock() interprets partial progress as an indication to stop
+ * processing.
+ */
+ while (left) {
+ if (tinc == NULL) {
+ tinc = kmem_cache_alloc(rds_tcp_incoming_slab,
+ arg->gfp);
+ if (tinc == NULL) {
+ desc->error = -ENOMEM;
+ goto out;
+ }
+ tc->t_tinc = tinc;
+ rdsdebug("alloced tinc %p\n", tinc);
+ rds_inc_init(&tinc->ti_inc, conn, conn->c_faddr);
+ /*
+ * XXX * we might be able to use the __ variants when
+ * we've already serialized at a higher level.
+ */
+ skb_queue_head_init(&tinc->ti_skb_list);
+ }
+
+ if (left && tc->t_tinc_hdr_rem) {
+ to_copy = min(tc->t_tinc_hdr_rem, left);
+ rdsdebug("copying %zu header from skb %p\n", to_copy,
+ skb);
+ skb_copy_bits(skb, offset,
+ (char *)&tinc->ti_inc.i_hdr +
+ sizeof(struct rds_header) -
+ tc->t_tinc_hdr_rem,
+ to_copy);
+ tc->t_tinc_hdr_rem -= to_copy;
+ left -= to_copy;
+ offset += to_copy;
+
+ if (tc->t_tinc_hdr_rem == 0) {
+ /* could be 0 for a 0 len message */
+ tc->t_tinc_data_rem =
+ be32_to_cpu(tinc->ti_inc.i_hdr.h_len);
+ }
+ }
+
+ if (left && tc->t_tinc_data_rem) {
+ clone = skb_clone(skb, arg->gfp);
+ if (clone == NULL) {
+ desc->error = -ENOMEM;
+ goto out;
+ }
+
+ to_copy = min(tc->t_tinc_data_rem, left);
+ pskb_pull(clone, offset);
+ pskb_trim(clone, to_copy);
+ skb_queue_tail(&tinc->ti_skb_list, clone);
+
+ rdsdebug("skb %p data %p len %d off %u to_copy %zu -> "
+ "clone %p data %p len %d\n",
+ skb, skb->data, skb->len, offset, to_copy,
+ clone, clone->data, clone->len);
+
+ tc->t_tinc_data_rem -= to_copy;
+ left -= to_copy;
+ offset += to_copy;
+ }
+
+ if (tc->t_tinc_hdr_rem == 0 && tc->t_tinc_data_rem == 0) {
+ if (tinc->ti_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
+ rds_tcp_cong_recv(conn, tinc);
+ else
+ rds_recv_incoming(conn, conn->c_faddr,
+ conn->c_laddr, &tinc->ti_inc,
+ arg->gfp, arg->km);
+
+ tc->t_tinc_hdr_rem = sizeof(struct rds_header);
+ tc->t_tinc_data_rem = 0;
+ tc->t_tinc = NULL;
+ rds_inc_put(&tinc->ti_inc);
+ tinc = NULL;
+ }
+ }
+out:
+ rdsdebug("returning len %zu left %zu skb len %d rx queue depth %d\n",
+ len, left, skb->len,
+ skb_queue_len(&tc->t_sock->sk->sk_receive_queue));
+ return len - left;
+}
+
+/* the caller has to hold the sock lock */
+int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, enum km_type km)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ struct socket *sock = tc->t_sock;
+ read_descriptor_t desc;
+ struct rds_tcp_desc_arg arg;
+
+ /* It's like glib in the kernel! */
+ arg.conn = conn;
+ arg.gfp = gfp;
+ arg.km = km;
+ desc.arg.data = &arg;
+ desc.error = 0;
+ desc.count = 1; /* give more than one skb per call */
+
+ tcp_read_sock(sock->sk, &desc, rds_tcp_data_recv);
+ rdsdebug("tcp_read_sock for tc %p gfp 0x%x returned %d\n", tc, gfp,
+ desc.error);
+
+ return desc.error;
+}
+
+/*
+ * We hold the sock lock to serialize our rds_tcp_recv->tcp_read_sock from
+ * data_ready.
+ *
+ * if we fail to allocate we're in trouble.. blindly wait some time before
+ * trying again to see if the VM can free up something for us.
+ */
+int rds_tcp_recv(struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ struct socket *sock = tc->t_sock;
+ int ret = 0;
+
+ rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
+
+ lock_sock(sock->sk);
+ ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0);
+ release_sock(sock->sk);
+
+ return ret;
+}
+
+void rds_tcp_data_ready(struct sock *sk, int bytes)
+{
+ void (*ready)(struct sock *sk, int bytes);
+ struct rds_connection *conn;
+ struct rds_tcp_connection *tc;
+
+ rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
+
+ read_lock(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (conn == NULL) { /* check for teardown race */
+ ready = sk->sk_data_ready;
+ goto out;
+ }
+
+ tc = conn->c_transport_data;
+ ready = tc->t_orig_data_ready;
+ rds_tcp_stats_inc(s_tcp_data_ready_calls);
+
+ if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM)
+ queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
+out:
+ read_unlock(&sk->sk_callback_lock);
+ ready(sk, bytes);
+}
+
+int __init rds_tcp_recv_init(void)
+{
+ rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming",
+ sizeof(struct rds_tcp_incoming),
+ 0, 0, NULL);
+ if (rds_tcp_incoming_slab == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+void rds_tcp_recv_exit(void)
+{
+ kmem_cache_destroy(rds_tcp_incoming_slab);
+}
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
new file mode 100644
index 00000000000..ab545e0cd5d
--- /dev/null
+++ b/net/rds/tcp_send.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2006 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/in.h>
+#include <net/tcp.h>
+
+#include "rds.h"
+#include "tcp.h"
+
+static void rds_tcp_cork(struct socket *sock, int val)
+{
+ mm_segment_t oldfs;
+
+ oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
+ sizeof(val));
+ set_fs(oldfs);
+}
+
+void rds_tcp_xmit_prepare(struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+
+ rds_tcp_cork(tc->t_sock, 1);
+}
+
+void rds_tcp_xmit_complete(struct rds_connection *conn)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+
+ rds_tcp_cork(tc->t_sock, 0);
+}
+
+/* the core send_sem serializes this with other xmit and shutdown */
+int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
+{
+ struct kvec vec = {
+ .iov_base = data,
+ .iov_len = len,
+ };
+ struct msghdr msg = {
+ .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
+ };
+
+ return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
+}
+
+/* the core send_sem serializes this with other xmit and shutdown */
+int rds_tcp_xmit_cong_map(struct rds_connection *conn,
+ struct rds_cong_map *map, unsigned long offset)
+{
+ static struct rds_header rds_tcp_map_header = {
+ .h_flags = RDS_FLAG_CONG_BITMAP,
+ };
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ unsigned long i;
+ int ret;
+ int copied = 0;
+
+ /* Some problem claims cpu_to_be32(constant) isn't a constant. */
+ rds_tcp_map_header.h_len = cpu_to_be32(RDS_CONG_MAP_BYTES);
+
+ if (offset < sizeof(struct rds_header)) {
+ ret = rds_tcp_sendmsg(tc->t_sock,
+ (void *)&rds_tcp_map_header + offset,
+ sizeof(struct rds_header) - offset);
+ if (ret <= 0)
+ return ret;
+ offset += ret;
+ copied = ret;
+ if (offset < sizeof(struct rds_header))
+ return ret;
+ }
+
+ offset -= sizeof(struct rds_header);
+ i = offset / PAGE_SIZE;
+ offset = offset % PAGE_SIZE;
+ BUG_ON(i >= RDS_CONG_MAP_PAGES);
+
+ do {
+ ret = tc->t_sock->ops->sendpage(tc->t_sock,
+ virt_to_page(map->m_page_addrs[i]),
+ offset, PAGE_SIZE - offset,
+ MSG_DONTWAIT);
+ if (ret <= 0)
+ break;
+ copied += ret;
+ offset += ret;
+ if (offset == PAGE_SIZE) {
+ offset = 0;
+ i++;
+ }
+ } while (i < RDS_CONG_MAP_PAGES);
+
+ return copied ? copied : ret;
+}
+
+/* the core send_sem serializes this with other xmit and shutdown */
+int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
+ unsigned int hdr_off, unsigned int sg, unsigned int off)
+{
+ struct rds_tcp_connection *tc = conn->c_transport_data;
+ int done = 0;
+ int ret = 0;
+
+ if (hdr_off == 0) {
+ /*
+ * m_ack_seq is set to the sequence number of the last byte of
+ * header and data. see rds_tcp_is_acked().
+ */
+ tc->t_last_sent_nxt = rds_tcp_snd_nxt(tc);
+ rm->m_ack_seq = tc->t_last_sent_nxt +
+ sizeof(struct rds_header) +
+ be32_to_cpu(rm->m_inc.i_hdr.h_len) - 1;
+ smp_mb__before_clear_bit();
+ set_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags);
+ tc->t_last_expected_una = rm->m_ack_seq + 1;
+
+ rdsdebug("rm %p tcp nxt %u ack_seq %llu\n",
+ rm, rds_tcp_snd_nxt(tc),
+ (unsigned long long)rm->m_ack_seq);
+ }
+
+ if (hdr_off < sizeof(struct rds_header)) {
+ /* see rds_tcp_write_space() */
+ set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags);
+
+ ret = rds_tcp_sendmsg(tc->t_sock,
+ (void *)&rm->m_inc.i_hdr + hdr_off,
+ sizeof(rm->m_inc.i_hdr) - hdr_off);
+ if (ret < 0)
+ goto out;
+ done += ret;
+ if (hdr_off + done != sizeof(struct rds_header))
+ goto out;
+ }
+
+ while (sg < rm->m_nents) {
+ ret = tc->t_sock->ops->sendpage(tc->t_sock,
+ sg_page(&rm->m_sg[sg]),
+ rm->m_sg[sg].offset + off,
+ rm->m_sg[sg].length - off,
+ MSG_DONTWAIT|MSG_NOSIGNAL);
+ rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->m_sg[sg]),
+ rm->m_sg[sg].offset + off, rm->m_sg[sg].length - off,
+ ret);
+ if (ret <= 0)
+ break;
+
+ off += ret;
+ done += ret;
+ if (off == rm->m_sg[sg].length) {
+ off = 0;
+ sg++;
+ }
+ }
+
+out:
+ if (ret <= 0) {
+ /* write_space will hit after EAGAIN, all else fatal */
+ if (ret == -EAGAIN) {
+ rds_tcp_stats_inc(s_tcp_sndbuf_full);
+ ret = 0;
+ } else {
+ printk(KERN_WARNING "RDS/tcp: send to %u.%u.%u.%u "
+ "returned %d, disconnecting and reconnecting\n",
+ NIPQUAD(conn->c_faddr), ret);
+ rds_conn_drop(conn);
+ }
+ }
+ if (done == 0)
+ done = ret;
+ return done;
+}
+
+/*
+ * rm->m_ack_seq is set to the tcp sequence number that corresponds to the
+ * last byte of the message, including the header. This means that the
+ * entire message has been received if rm->m_ack_seq is "before" the next
+ * unacked byte of the TCP sequence space. We have to do very careful
+ * wrapping 32bit comparisons here.
+ */
+static int rds_tcp_is_acked(struct rds_message *rm, uint64_t ack)
+{
+ if (!test_bit(RDS_MSG_HAS_ACK_SEQ, &rm->m_flags))
+ return 0;
+ return (__s32)((u32)rm->m_ack_seq - (u32)ack) < 0;
+}
+
+void rds_tcp_write_space(struct sock *sk)
+{
+ void (*write_space)(struct sock *sk);
+ struct rds_connection *conn;
+ struct rds_tcp_connection *tc;
+
+ read_lock(&sk->sk_callback_lock);
+ conn = sk->sk_user_data;
+ if (conn == NULL) {
+ write_space = sk->sk_write_space;
+ goto out;
+ }
+
+ tc = conn->c_transport_data;
+ rdsdebug("write_space for tc %p\n", tc);
+ write_space = tc->t_orig_write_space;
+ rds_tcp_stats_inc(s_tcp_write_space_calls);
+
+ rdsdebug("tcp una %u\n", rds_tcp_snd_una(tc));
+ tc->t_last_seen_una = rds_tcp_snd_una(tc);
+ rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
+
+ queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+out:
+ read_unlock(&sk->sk_callback_lock);
+
+ /*
+ * write_space is only called when data leaves tcp's send queue if
+ * SOCK_NOSPACE is set. We set SOCK_NOSPACE every time we put
+ * data in tcp's send queue because we use write_space to parse the
+ * sequence numbers and notice that rds messages have been fully
+ * received.
+ *
+ * tcp's write_space clears SOCK_NOSPACE if the send queue has more
+ * than a certain amount of space. So we need to set it again *after*
+ * we call tcp's write_space or else we might only get called on the
+ * first of a series of incoming tcp acks.
+ */
+ write_space(sk);
+
+ if (sk->sk_socket)
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+}
diff --git a/net/rds/tcp_stats.c b/net/rds/tcp_stats.c
new file mode 100644
index 00000000000..d5898d03cd6
--- /dev/null
+++ b/net/rds/tcp_stats.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2006 Oracle. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+#include <linux/percpu.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+
+#include "rds.h"
+#include "tcp.h"
+
+DEFINE_PER_CPU(struct rds_tcp_statistics, rds_tcp_stats)
+ ____cacheline_aligned;
+
+static const char const *rds_tcp_stat_names[] = {
+ "tcp_data_ready_calls",
+ "tcp_write_space_calls",
+ "tcp_sndbuf_full",
+ "tcp_connect_raced",
+ "tcp_listen_closed_stale",
+};
+
+unsigned int rds_tcp_stats_info_copy(struct rds_info_iterator *iter,
+ unsigned int avail)
+{
+ struct rds_tcp_statistics stats = {0, };
+ uint64_t *src;
+ uint64_t *sum;
+ size_t i;
+ int cpu;
+
+ if (avail < ARRAY_SIZE(rds_tcp_stat_names))
+ goto out;
+
+ for_each_online_cpu(cpu) {
+ src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu));
+ sum = (uint64_t *)&stats;
+ for (i = 0; i < sizeof(stats) / sizeof(uint64_t); i++)
+ *(sum++) += *(src++);
+ }
+
+ rds_stats_info_copy(iter, (uint64_t *)&stats, rds_tcp_stat_names,
+ ARRAY_SIZE(rds_tcp_stat_names));
+out:
+ return ARRAY_SIZE(rds_tcp_stat_names);
+}
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 828a1bf9ea9..dd7e0cad1e7 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -68,6 +68,7 @@
* (TCP, IB/RDMA) to provide the necessary synchronisation.
*/
struct workqueue_struct *rds_wq;
+EXPORT_SYMBOL_GPL(rds_wq);
void rds_connect_complete(struct rds_connection *conn)
{
@@ -89,6 +90,7 @@ void rds_connect_complete(struct rds_connection *conn)
queue_delayed_work(rds_wq, &conn->c_send_w, 0);
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
}
+EXPORT_SYMBOL_GPL(rds_connect_complete);
/*
* This random exponential backoff is relied on to eventually resolve racing
diff --git a/net/rds/transport.c b/net/rds/transport.c
index 767da61ad2f..7e106790135 100644
--- a/net/rds/transport.c
+++ b/net/rds/transport.c
@@ -37,7 +37,7 @@
#include "rds.h"
#include "loop.h"
-static LIST_HEAD(rds_transports);
+static struct rds_transport *transports[RDS_TRANS_COUNT];
static DECLARE_RWSEM(rds_trans_sem);
int rds_trans_register(struct rds_transport *trans)
@@ -46,36 +46,44 @@ int rds_trans_register(struct rds_transport *trans)
down_write(&rds_trans_sem);
- list_add_tail(&trans->t_item, &rds_transports);
- printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name);
+ if (transports[trans->t_type])
+ printk(KERN_ERR "RDS Transport type %d already registered\n",
+ trans->t_type);
+ else {
+ transports[trans->t_type] = trans;
+ printk(KERN_INFO "Registered RDS/%s transport\n", trans->t_name);
+ }
up_write(&rds_trans_sem);
return 0;
}
+EXPORT_SYMBOL_GPL(rds_trans_register);
void rds_trans_unregister(struct rds_transport *trans)
{
down_write(&rds_trans_sem);
- list_del_init(&trans->t_item);
+ transports[trans->t_type] = NULL;
printk(KERN_INFO "Unregistered RDS/%s transport\n", trans->t_name);
up_write(&rds_trans_sem);
}
+EXPORT_SYMBOL_GPL(rds_trans_unregister);
struct rds_transport *rds_trans_get_preferred(__be32 addr)
{
- struct rds_transport *trans;
struct rds_transport *ret = NULL;
+ int i;
if (IN_LOOPBACK(ntohl(addr)))
return &rds_loop_transport;
down_read(&rds_trans_sem);
- list_for_each_entry(trans, &rds_transports, t_item) {
- if (trans->laddr_check(addr) == 0) {
- ret = trans;
+ for (i = 0; i < RDS_TRANS_COUNT; i++)
+ {
+ if (transports[i] && (transports[i]->laddr_check(addr) == 0)) {
+ ret = transports[i];
break;
}
}
@@ -97,12 +105,15 @@ unsigned int rds_trans_stats_info_copy(struct rds_info_iterator *iter,
struct rds_transport *trans;
unsigned int total = 0;
unsigned int part;
+ int i;
rds_info_iter_unmap(iter);
down_read(&rds_trans_sem);
- list_for_each_entry(trans, &rds_transports, t_item) {
- if (trans->stats_info_copy == NULL)
+ for (i = 0; i < RDS_TRANS_COUNT; i++)
+ {
+ trans = transports[i];
+ if (!trans || !trans->stats_info_copy)
continue;
part = trans->stats_info_copy(iter, avail);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 2fc4a1724eb..dbeaf298382 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -589,11 +589,13 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
return "wimax";
case RFKILL_TYPE_WWAN:
return "wwan";
+ case RFKILL_TYPE_GPS:
+ return "gps";
default:
BUG();
}
- BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_WWAN + 1);
+ BUILD_BUG_ON(NUM_RFKILL_TYPES != RFKILL_TYPE_GPS + 1);
}
static ssize_t rfkill_type_show(struct device *dev,
@@ -1091,10 +1093,16 @@ static ssize_t rfkill_fop_write(struct file *file, const char __user *buf,
struct rfkill_event ev;
/* we don't need the 'hard' variable but accept it */
- if (count < sizeof(ev) - 1)
+ if (count < RFKILL_EVENT_SIZE_V1 - 1)
return -EINVAL;
- if (copy_from_user(&ev, buf, sizeof(ev) - 1))
+ /*
+ * Copy as much data as we can accept into our 'ev' buffer,
+ * but tell userspace how much we've copied so it can determine
+ * our API version even in a write() call, if it cares.
+ */
+ count = min(count, sizeof(ev));
+ if (copy_from_user(&ev, buf, count))
return -EFAULT;
if (ev.op != RFKILL_OP_CHANGE && ev.op != RFKILL_OP_CHANGE_ALL)
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index f0a76f6bca7..1e166c9685a 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -63,7 +63,7 @@ int sysctl_rose_window_size = ROSE_DEFAULT_WINDOW_SIZE;
static HLIST_HEAD(rose_list);
static DEFINE_SPINLOCK(rose_list_lock);
-static struct proto_ops rose_proto_ops;
+static const struct proto_ops rose_proto_ops;
ax25_address rose_callsign;
@@ -954,6 +954,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
struct rose_sock *rose = rose_sk(sk);
int n;
+ memset(srose, 0, sizeof(*srose));
if (peer != 0) {
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
@@ -1514,7 +1515,7 @@ static struct net_proto_family rose_family_ops = {
.owner = THIS_MODULE,
};
-static struct proto_ops rose_proto_ops = {
+static const struct proto_ops rose_proto_ops = {
.family = PF_ROSE,
.owner = THIS_MODULE,
.release = rose_release,
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 389d6e0d774..424b893d145 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -131,7 +131,7 @@ static int rose_close(struct net_device *dev)
return 0;
}
-static int rose_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t rose_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device_stats *stats = &dev->stats;
@@ -141,7 +141,7 @@ static int rose_xmit(struct sk_buff *skb, struct net_device *dev)
}
dev_kfree_skb(skb);
stats->tx_errors++;
- return 0;
+ return NETDEV_TX_OK;
}
static const struct header_ops rose_header_ops = {
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index 3ac1672e107..b4a22097703 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -20,7 +20,7 @@
static unsigned rxrpc_ack_defer = 1;
-static const char *rxrpc_acks[] = {
+static const char *const rxrpc_acks[] = {
"---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
"-?-"
};
@@ -40,7 +40,7 @@ static const s8 rxrpc_ack_priority[] = {
/*
* propose an ACK be sent
*/
-void __rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
+void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
__be32 serial, bool immediate)
{
unsigned long expiry;
@@ -120,7 +120,7 @@ cancel_timer:
/*
* propose an ACK be sent, locking the call structure
*/
-void rxrpc_propose_ACK(struct rxrpc_call *call, uint8_t ack_reason,
+void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
__be32 serial, bool immediate)
{
s8 prior = rxrpc_ack_priority[ack_reason];
@@ -520,7 +520,7 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call)
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
unsigned long _skb, *acks_window;
- uint8_t winsz = call->acks_winsz;
+ u8 winsz = call->acks_winsz;
int tail;
acks_window = call->acks_window;
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index d9231245a79..bc0019f704f 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -96,7 +96,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
}
/*
- * allocate a new client call and attempt to to get a connection slot for it
+ * allocate a new client call and attempt to get a connection slot for it
*/
static struct rxrpc_call *rxrpc_alloc_client_call(
struct rxrpc_sock *rx,
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 3e7318c1343..7043b294bb6 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -229,7 +229,7 @@ struct rxrpc_conn_bundle {
int debug_id; /* debug ID for printks */
unsigned short num_conns; /* number of connections in this bundle */
__be16 service_id; /* service ID */
- uint8_t security_ix; /* security type */
+ u8 security_ix; /* security type */
};
/*
@@ -370,10 +370,10 @@ struct rxrpc_call {
u8 channel; /* connection channel occupied by this call */
/* transmission-phase ACK management */
- uint8_t acks_head; /* offset into window of first entry */
- uint8_t acks_tail; /* offset into window of last entry */
- uint8_t acks_winsz; /* size of un-ACK'd window */
- uint8_t acks_unacked; /* lowest unacked packet in last ACK received */
+ u8 acks_head; /* offset into window of first entry */
+ u8 acks_tail; /* offset into window of last entry */
+ u8 acks_winsz; /* size of un-ACK'd window */
+ u8 acks_unacked; /* lowest unacked packet in last ACK received */
int acks_latest; /* serial number of latest ACK received */
rxrpc_seq_t acks_hard; /* highest definitively ACK'd msg seq */
unsigned long *acks_window; /* sent packet window
@@ -388,7 +388,7 @@ struct rxrpc_call {
rxrpc_seq_t rx_first_oos; /* first packet in rx_oos_queue (or 0) */
rxrpc_seq_t ackr_win_top; /* top of ACK window (rx_data_eaten is bottom) */
rxrpc_seq_net_t ackr_prev_seq; /* previous sequence number received */
- uint8_t ackr_reason; /* reason to ACK */
+ u8 ackr_reason; /* reason to ACK */
__be32 ackr_serial; /* serial of packet being ACK'd */
atomic_t ackr_not_idle; /* number of packets in Rx queue */
@@ -402,22 +402,6 @@ struct rxrpc_call {
};
/*
- * RxRPC key for Kerberos (type-2 security)
- */
-struct rxkad_key {
- u16 security_index; /* RxRPC header security index */
- u16 ticket_len; /* length of ticket[] */
- u32 expiry; /* time at which expires */
- u32 kvno; /* key version number */
- u8 session_key[8]; /* DES session key */
- u8 ticket[0]; /* the encrypted ticket */
-};
-
-struct rxrpc_key_payload {
- struct rxkad_key k;
-};
-
-/*
* locally abort an RxRPC call
*/
static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
@@ -450,8 +434,8 @@ extern int rxrpc_reject_call(struct rxrpc_sock *);
/*
* ar-ack.c
*/
-extern void __rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool);
-extern void rxrpc_propose_ACK(struct rxrpc_call *, uint8_t, __be32, bool);
+extern void __rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
+extern void rxrpc_propose_ACK(struct rxrpc_call *, u8, __be32, bool);
extern void rxrpc_process_call(struct work_struct *);
/*
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index ad8c7a782da..74697b20049 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -17,6 +17,7 @@
#include <linux/skbuff.h>
#include <linux/key-type.h>
#include <linux/crypto.h>
+#include <linux/ctype.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <keys/rxrpc-type.h>
@@ -28,6 +29,7 @@ static int rxrpc_instantiate_s(struct key *, const void *, size_t);
static void rxrpc_destroy(struct key *);
static void rxrpc_destroy_s(struct key *);
static void rxrpc_describe(const struct key *, struct seq_file *);
+static long rxrpc_read(const struct key *, char __user *, size_t);
/*
* rxrpc defined keys take an arbitrary string as the description and an
@@ -39,6 +41,7 @@ struct key_type key_type_rxrpc = {
.match = user_match,
.destroy = rxrpc_destroy,
.describe = rxrpc_describe,
+ .read = rxrpc_read,
};
EXPORT_SYMBOL(key_type_rxrpc);
@@ -55,6 +58,595 @@ struct key_type key_type_rxrpc_s = {
};
/*
+ * parse an RxKAD type XDR format token
+ * - the caller guarantees we have at least 4 words
+ */
+static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
+ unsigned toklen)
+{
+ struct rxrpc_key_token *token, **pptoken;
+ size_t plen;
+ u32 tktlen;
+ int ret;
+
+ _enter(",{%x,%x,%x,%x},%u",
+ ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]),
+ toklen);
+
+ if (toklen <= 8 * 4)
+ return -EKEYREJECTED;
+ tktlen = ntohl(xdr[7]);
+ _debug("tktlen: %x", tktlen);
+ if (tktlen > AFSTOKEN_RK_TIX_MAX)
+ return -EKEYREJECTED;
+ if (8 * 4 + tktlen != toklen)
+ return -EKEYREJECTED;
+
+ plen = sizeof(*token) + sizeof(*token->kad) + tktlen;
+ ret = key_payload_reserve(key, key->datalen + plen);
+ if (ret < 0)
+ return ret;
+
+ plen -= sizeof(*token);
+ token = kmalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return -ENOMEM;
+
+ token->kad = kmalloc(plen, GFP_KERNEL);
+ if (!token->kad) {
+ kfree(token);
+ return -ENOMEM;
+ }
+
+ token->security_index = RXRPC_SECURITY_RXKAD;
+ token->kad->ticket_len = tktlen;
+ token->kad->vice_id = ntohl(xdr[0]);
+ token->kad->kvno = ntohl(xdr[1]);
+ token->kad->start = ntohl(xdr[4]);
+ token->kad->expiry = ntohl(xdr[5]);
+ token->kad->primary_flag = ntohl(xdr[6]);
+ memcpy(&token->kad->session_key, &xdr[2], 8);
+ memcpy(&token->kad->ticket, &xdr[8], tktlen);
+
+ _debug("SCIX: %u", token->security_index);
+ _debug("TLEN: %u", token->kad->ticket_len);
+ _debug("EXPY: %x", token->kad->expiry);
+ _debug("KVNO: %u", token->kad->kvno);
+ _debug("PRIM: %u", token->kad->primary_flag);
+ _debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x",
+ token->kad->session_key[0], token->kad->session_key[1],
+ token->kad->session_key[2], token->kad->session_key[3],
+ token->kad->session_key[4], token->kad->session_key[5],
+ token->kad->session_key[6], token->kad->session_key[7]);
+ if (token->kad->ticket_len >= 8)
+ _debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x",
+ token->kad->ticket[0], token->kad->ticket[1],
+ token->kad->ticket[2], token->kad->ticket[3],
+ token->kad->ticket[4], token->kad->ticket[5],
+ token->kad->ticket[6], token->kad->ticket[7]);
+
+ /* count the number of tokens attached */
+ key->type_data.x[0]++;
+
+ /* attach the data */
+ for (pptoken = (struct rxrpc_key_token **)&key->payload.data;
+ *pptoken;
+ pptoken = &(*pptoken)->next)
+ continue;
+ *pptoken = token;
+ if (token->kad->expiry < key->expiry)
+ key->expiry = token->kad->expiry;
+
+ _leave(" = 0");
+ return 0;
+}
+
+static void rxrpc_free_krb5_principal(struct krb5_principal *princ)
+{
+ int loop;
+
+ if (princ->name_parts) {
+ for (loop = princ->n_name_parts - 1; loop >= 0; loop--)
+ kfree(princ->name_parts[loop]);
+ kfree(princ->name_parts);
+ }
+ kfree(princ->realm);
+}
+
+static void rxrpc_free_krb5_tagged(struct krb5_tagged_data *td)
+{
+ kfree(td->data);
+}
+
+/*
+ * free up an RxK5 token
+ */
+static void rxrpc_rxk5_free(struct rxk5_key *rxk5)
+{
+ int loop;
+
+ rxrpc_free_krb5_principal(&rxk5->client);
+ rxrpc_free_krb5_principal(&rxk5->server);
+ rxrpc_free_krb5_tagged(&rxk5->session);
+
+ if (rxk5->addresses) {
+ for (loop = rxk5->n_addresses - 1; loop >= 0; loop--)
+ rxrpc_free_krb5_tagged(&rxk5->addresses[loop]);
+ kfree(rxk5->addresses);
+ }
+ if (rxk5->authdata) {
+ for (loop = rxk5->n_authdata - 1; loop >= 0; loop--)
+ rxrpc_free_krb5_tagged(&rxk5->authdata[loop]);
+ kfree(rxk5->authdata);
+ }
+
+ kfree(rxk5->ticket);
+ kfree(rxk5->ticket2);
+ kfree(rxk5);
+}
+
+/*
+ * extract a krb5 principal
+ */
+static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
+ const __be32 **_xdr,
+ unsigned *_toklen)
+{
+ const __be32 *xdr = *_xdr;
+ unsigned toklen = *_toklen, n_parts, loop, tmp;
+
+ /* there must be at least one name, and at least #names+1 length
+ * words */
+ if (toklen <= 12)
+ return -EINVAL;
+
+ _enter(",{%x,%x,%x},%u",
+ ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), toklen);
+
+ n_parts = ntohl(*xdr++);
+ toklen -= 4;
+ if (n_parts <= 0 || n_parts > AFSTOKEN_K5_COMPONENTS_MAX)
+ return -EINVAL;
+ princ->n_name_parts = n_parts;
+
+ if (toklen <= (n_parts + 1) * 4)
+ return -EINVAL;
+
+ princ->name_parts = kcalloc(sizeof(char *), n_parts, GFP_KERNEL);
+ if (!princ->name_parts)
+ return -ENOMEM;
+
+ for (loop = 0; loop < n_parts; loop++) {
+ if (toklen < 4)
+ return -EINVAL;
+ tmp = ntohl(*xdr++);
+ toklen -= 4;
+ if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
+ return -EINVAL;
+ if (tmp > toklen)
+ return -EINVAL;
+ princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
+ if (!princ->name_parts[loop])
+ return -ENOMEM;
+ memcpy(princ->name_parts[loop], xdr, tmp);
+ princ->name_parts[loop][tmp] = 0;
+ tmp = (tmp + 3) & ~3;
+ toklen -= tmp;
+ xdr += tmp >> 2;
+ }
+
+ if (toklen < 4)
+ return -EINVAL;
+ tmp = ntohl(*xdr++);
+ toklen -= 4;
+ if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
+ return -EINVAL;
+ if (tmp > toklen)
+ return -EINVAL;
+ princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
+ if (!princ->realm)
+ return -ENOMEM;
+ memcpy(princ->realm, xdr, tmp);
+ princ->realm[tmp] = 0;
+ tmp = (tmp + 3) & ~3;
+ toklen -= tmp;
+ xdr += tmp >> 2;
+
+ _debug("%s/...@%s", princ->name_parts[0], princ->realm);
+
+ *_xdr = xdr;
+ *_toklen = toklen;
+ _leave(" = 0 [toklen=%u]", toklen);
+ return 0;
+}
+
+/*
+ * extract a piece of krb5 tagged data
+ */
+static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
+ size_t max_data_size,
+ const __be32 **_xdr,
+ unsigned *_toklen)
+{
+ const __be32 *xdr = *_xdr;
+ unsigned toklen = *_toklen, len;
+
+ /* there must be at least one tag and one length word */
+ if (toklen <= 8)
+ return -EINVAL;
+
+ _enter(",%zu,{%x,%x},%u",
+ max_data_size, ntohl(xdr[0]), ntohl(xdr[1]), toklen);
+
+ td->tag = ntohl(*xdr++);
+ len = ntohl(*xdr++);
+ toklen -= 8;
+ if (len > max_data_size)
+ return -EINVAL;
+ td->data_len = len;
+
+ if (len > 0) {
+ td->data = kmalloc(len, GFP_KERNEL);
+ if (!td->data)
+ return -ENOMEM;
+ memcpy(td->data, xdr, len);
+ len = (len + 3) & ~3;
+ toklen -= len;
+ xdr += len >> 2;
+ }
+
+ _debug("tag %x len %x", td->tag, td->data_len);
+
+ *_xdr = xdr;
+ *_toklen = toklen;
+ _leave(" = 0 [toklen=%u]", toklen);
+ return 0;
+}
+
+/*
+ * extract an array of tagged data
+ */
+static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
+ u8 *_n_elem,
+ u8 max_n_elem,
+ size_t max_elem_size,
+ const __be32 **_xdr,
+ unsigned *_toklen)
+{
+ struct krb5_tagged_data *td;
+ const __be32 *xdr = *_xdr;
+ unsigned toklen = *_toklen, n_elem, loop;
+ int ret;
+
+ /* there must be at least one count */
+ if (toklen < 4)
+ return -EINVAL;
+
+ _enter(",,%u,%zu,{%x},%u",
+ max_n_elem, max_elem_size, ntohl(xdr[0]), toklen);
+
+ n_elem = ntohl(*xdr++);
+ toklen -= 4;
+ if (n_elem < 0 || n_elem > max_n_elem)
+ return -EINVAL;
+ *_n_elem = n_elem;
+ if (n_elem > 0) {
+ if (toklen <= (n_elem + 1) * 4)
+ return -EINVAL;
+
+ _debug("n_elem %d", n_elem);
+
+ td = kcalloc(sizeof(struct krb5_tagged_data), n_elem,
+ GFP_KERNEL);
+ if (!td)
+ return -ENOMEM;
+ *_td = td;
+
+ for (loop = 0; loop < n_elem; loop++) {
+ ret = rxrpc_krb5_decode_tagged_data(&td[loop],
+ max_elem_size,
+ &xdr, &toklen);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ *_xdr = xdr;
+ *_toklen = toklen;
+ _leave(" = 0 [toklen=%u]", toklen);
+ return 0;
+}
+
+/*
+ * extract a krb5 ticket
+ */
+static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
+ const __be32 **_xdr, unsigned *_toklen)
+{
+ const __be32 *xdr = *_xdr;
+ unsigned toklen = *_toklen, len;
+
+ /* there must be at least one length word */
+ if (toklen <= 4)
+ return -EINVAL;
+
+ _enter(",{%x},%u", ntohl(xdr[0]), toklen);
+
+ len = ntohl(*xdr++);
+ toklen -= 4;
+ if (len > AFSTOKEN_K5_TIX_MAX)
+ return -EINVAL;
+ *_tktlen = len;
+
+ _debug("ticket len %u", len);
+
+ if (len > 0) {
+ *_ticket = kmalloc(len, GFP_KERNEL);
+ if (!*_ticket)
+ return -ENOMEM;
+ memcpy(*_ticket, xdr, len);
+ len = (len + 3) & ~3;
+ toklen -= len;
+ xdr += len >> 2;
+ }
+
+ *_xdr = xdr;
+ *_toklen = toklen;
+ _leave(" = 0 [toklen=%u]", toklen);
+ return 0;
+}
+
+/*
+ * parse an RxK5 type XDR format token
+ * - the caller guarantees we have at least 4 words
+ */
+static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr,
+ unsigned toklen)
+{
+ struct rxrpc_key_token *token, **pptoken;
+ struct rxk5_key *rxk5;
+ const __be32 *end_xdr = xdr + (toklen >> 2);
+ int ret;
+
+ _enter(",{%x,%x,%x,%x},%u",
+ ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]),
+ toklen);
+
+ /* reserve some payload space for this subkey - the length of the token
+ * is a reasonable approximation */
+ ret = key_payload_reserve(key, key->datalen + toklen);
+ if (ret < 0)
+ return ret;
+
+ token = kzalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
+ return -ENOMEM;
+
+ rxk5 = kzalloc(sizeof(*rxk5), GFP_KERNEL);
+ if (!rxk5) {
+ kfree(token);
+ return -ENOMEM;
+ }
+
+ token->security_index = RXRPC_SECURITY_RXK5;
+ token->k5 = rxk5;
+
+ /* extract the principals */
+ ret = rxrpc_krb5_decode_principal(&rxk5->client, &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+ ret = rxrpc_krb5_decode_principal(&rxk5->server, &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+
+ /* extract the session key and the encoding type (the tag field ->
+ * ENCTYPE_xxx) */
+ ret = rxrpc_krb5_decode_tagged_data(&rxk5->session, AFSTOKEN_DATA_MAX,
+ &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+
+ if (toklen < 4 * 8 + 2 * 4)
+ goto inval;
+ rxk5->authtime = be64_to_cpup((const __be64 *) xdr);
+ xdr += 2;
+ rxk5->starttime = be64_to_cpup((const __be64 *) xdr);
+ xdr += 2;
+ rxk5->endtime = be64_to_cpup((const __be64 *) xdr);
+ xdr += 2;
+ rxk5->renew_till = be64_to_cpup((const __be64 *) xdr);
+ xdr += 2;
+ rxk5->is_skey = ntohl(*xdr++);
+ rxk5->flags = ntohl(*xdr++);
+ toklen -= 4 * 8 + 2 * 4;
+
+ _debug("times: a=%llx s=%llx e=%llx rt=%llx",
+ rxk5->authtime, rxk5->starttime, rxk5->endtime,
+ rxk5->renew_till);
+ _debug("is_skey=%x flags=%x", rxk5->is_skey, rxk5->flags);
+
+ /* extract the permitted client addresses */
+ ret = rxrpc_krb5_decode_tagged_array(&rxk5->addresses,
+ &rxk5->n_addresses,
+ AFSTOKEN_K5_ADDRESSES_MAX,
+ AFSTOKEN_DATA_MAX,
+ &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+
+ ASSERTCMP((end_xdr - xdr) << 2, ==, toklen);
+
+ /* extract the tickets */
+ ret = rxrpc_krb5_decode_ticket(&rxk5->ticket, &rxk5->ticket_len,
+ &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+ ret = rxrpc_krb5_decode_ticket(&rxk5->ticket2, &rxk5->ticket2_len,
+ &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+
+ ASSERTCMP((end_xdr - xdr) << 2, ==, toklen);
+
+ /* extract the typed auth data */
+ ret = rxrpc_krb5_decode_tagged_array(&rxk5->authdata,
+ &rxk5->n_authdata,
+ AFSTOKEN_K5_AUTHDATA_MAX,
+ AFSTOKEN_BDATALN_MAX,
+ &xdr, &toklen);
+ if (ret < 0)
+ goto error;
+
+ ASSERTCMP((end_xdr - xdr) << 2, ==, toklen);
+
+ if (toklen != 0)
+ goto inval;
+
+ /* attach the payload to the key */
+ for (pptoken = (struct rxrpc_key_token **)&key->payload.data;
+ *pptoken;
+ pptoken = &(*pptoken)->next)
+ continue;
+ *pptoken = token;
+ if (token->kad->expiry < key->expiry)
+ key->expiry = token->kad->expiry;
+
+ _leave(" = 0");
+ return 0;
+
+inval:
+ ret = -EINVAL;
+error:
+ rxrpc_rxk5_free(rxk5);
+ kfree(token);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * attempt to parse the data as the XDR format
+ * - the caller guarantees we have more than 7 words
+ */
+static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datalen)
+{
+ const __be32 *xdr = data, *token;
+ const char *cp;
+ unsigned len, tmp, loop, ntoken, toklen, sec_ix;
+ int ret;
+
+ _enter(",{%x,%x,%x,%x},%zu",
+ ntohl(xdr[0]), ntohl(xdr[1]), ntohl(xdr[2]), ntohl(xdr[3]),
+ datalen);
+
+ if (datalen > AFSTOKEN_LENGTH_MAX)
+ goto not_xdr;
+
+ /* XDR is an array of __be32's */
+ if (datalen & 3)
+ goto not_xdr;
+
+ /* the flags should be 0 (the setpag bit must be handled by
+ * userspace) */
+ if (ntohl(*xdr++) != 0)
+ goto not_xdr;
+ datalen -= 4;
+
+ /* check the cell name */
+ len = ntohl(*xdr++);
+ if (len < 1 || len > AFSTOKEN_CELL_MAX)
+ goto not_xdr;
+ datalen -= 4;
+ tmp = (len + 3) & ~3;
+ if (tmp > datalen)
+ goto not_xdr;
+
+ cp = (const char *) xdr;
+ for (loop = 0; loop < len; loop++)
+ if (!isprint(cp[loop]))
+ goto not_xdr;
+ if (len < tmp)
+ for (; loop < tmp; loop++)
+ if (cp[loop])
+ goto not_xdr;
+ _debug("cellname: [%u/%u] '%*.*s'",
+ len, tmp, len, len, (const char *) xdr);
+ datalen -= tmp;
+ xdr += tmp >> 2;
+
+ /* get the token count */
+ if (datalen < 12)
+ goto not_xdr;
+ ntoken = ntohl(*xdr++);
+ datalen -= 4;
+ _debug("ntoken: %x", ntoken);
+ if (ntoken < 1 || ntoken > AFSTOKEN_MAX)
+ goto not_xdr;
+
+ /* check each token wrapper */
+ token = xdr;
+ loop = ntoken;
+ do {
+ if (datalen < 8)
+ goto not_xdr;
+ toklen = ntohl(*xdr++);
+ sec_ix = ntohl(*xdr);
+ datalen -= 4;
+ _debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
+ if (toklen < 20 || toklen > datalen)
+ goto not_xdr;
+ datalen -= (toklen + 3) & ~3;
+ xdr += (toklen + 3) >> 2;
+
+ } while (--loop > 0);
+
+ _debug("remainder: %zu", datalen);
+ if (datalen != 0)
+ goto not_xdr;
+
+ /* okay: we're going to assume it's valid XDR format
+ * - we ignore the cellname, relying on the key to be correctly named
+ */
+ do {
+ xdr = token;
+ toklen = ntohl(*xdr++);
+ token = xdr + ((toklen + 3) >> 2);
+ sec_ix = ntohl(*xdr++);
+ toklen -= 4;
+
+ _debug("TOKEN type=%u [%p-%p]", sec_ix, xdr, token);
+
+ switch (sec_ix) {
+ case RXRPC_SECURITY_RXKAD:
+ ret = rxrpc_instantiate_xdr_rxkad(key, xdr, toklen);
+ if (ret != 0)
+ goto error;
+ break;
+
+ case RXRPC_SECURITY_RXK5:
+ ret = rxrpc_instantiate_xdr_rxk5(key, xdr, toklen);
+ if (ret != 0)
+ goto error;
+ break;
+
+ default:
+ ret = -EPROTONOSUPPORT;
+ goto error;
+ }
+
+ } while (--ntoken > 0);
+
+ _leave(" = 0");
+ return 0;
+
+not_xdr:
+ _leave(" = -EPROTO");
+ return -EPROTO;
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
* instantiate an rxrpc defined key
* data should be of the form:
* OFFSET LEN CONTENT
@@ -70,8 +662,8 @@ struct key_type key_type_rxrpc_s = {
*/
static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
{
- const struct rxkad_key *tsec;
- struct rxrpc_key_payload *upayload;
+ const struct rxrpc_key_data_v1 *v1;
+ struct rxrpc_key_token *token, **pp;
size_t plen;
u32 kver;
int ret;
@@ -82,6 +674,13 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
if (!data && datalen == 0)
return 0;
+ /* determine if the XDR payload format is being used */
+ if (datalen > 7 * 4) {
+ ret = rxrpc_instantiate_xdr(key, data, datalen);
+ if (ret != -EPROTO)
+ return ret;
+ }
+
/* get the key interface version number */
ret = -EINVAL;
if (datalen <= 4 || !data)
@@ -98,53 +697,67 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
/* deal with a version 1 key */
ret = -EINVAL;
- if (datalen < sizeof(*tsec))
+ if (datalen < sizeof(*v1))
goto error;
- tsec = data;
- if (datalen != sizeof(*tsec) + tsec->ticket_len)
+ v1 = data;
+ if (datalen != sizeof(*v1) + v1->ticket_length)
goto error;
- _debug("SCIX: %u", tsec->security_index);
- _debug("TLEN: %u", tsec->ticket_len);
- _debug("EXPY: %x", tsec->expiry);
- _debug("KVNO: %u", tsec->kvno);
+ _debug("SCIX: %u", v1->security_index);
+ _debug("TLEN: %u", v1->ticket_length);
+ _debug("EXPY: %x", v1->expiry);
+ _debug("KVNO: %u", v1->kvno);
_debug("SKEY: %02x%02x%02x%02x%02x%02x%02x%02x",
- tsec->session_key[0], tsec->session_key[1],
- tsec->session_key[2], tsec->session_key[3],
- tsec->session_key[4], tsec->session_key[5],
- tsec->session_key[6], tsec->session_key[7]);
- if (tsec->ticket_len >= 8)
+ v1->session_key[0], v1->session_key[1],
+ v1->session_key[2], v1->session_key[3],
+ v1->session_key[4], v1->session_key[5],
+ v1->session_key[6], v1->session_key[7]);
+ if (v1->ticket_length >= 8)
_debug("TCKT: %02x%02x%02x%02x%02x%02x%02x%02x",
- tsec->ticket[0], tsec->ticket[1],
- tsec->ticket[2], tsec->ticket[3],
- tsec->ticket[4], tsec->ticket[5],
- tsec->ticket[6], tsec->ticket[7]);
+ v1->ticket[0], v1->ticket[1],
+ v1->ticket[2], v1->ticket[3],
+ v1->ticket[4], v1->ticket[5],
+ v1->ticket[6], v1->ticket[7]);
ret = -EPROTONOSUPPORT;
- if (tsec->security_index != 2)
+ if (v1->security_index != RXRPC_SECURITY_RXKAD)
goto error;
- key->type_data.x[0] = tsec->security_index;
-
- plen = sizeof(*upayload) + tsec->ticket_len;
- ret = key_payload_reserve(key, plen);
+ plen = sizeof(*token->kad) + v1->ticket_length;
+ ret = key_payload_reserve(key, plen + sizeof(*token));
if (ret < 0)
goto error;
ret = -ENOMEM;
- upayload = kmalloc(plen, GFP_KERNEL);
- if (!upayload)
+ token = kmalloc(sizeof(*token), GFP_KERNEL);
+ if (!token)
goto error;
+ token->kad = kmalloc(plen, GFP_KERNEL);
+ if (!token->kad)
+ goto error_free;
+
+ token->security_index = RXRPC_SECURITY_RXKAD;
+ token->kad->ticket_len = v1->ticket_length;
+ token->kad->expiry = v1->expiry;
+ token->kad->kvno = v1->kvno;
+ memcpy(&token->kad->session_key, &v1->session_key, 8);
+ memcpy(&token->kad->ticket, v1->ticket, v1->ticket_length);
/* attach the data */
- memcpy(&upayload->k, tsec, sizeof(*tsec));
- memcpy(&upayload->k.ticket, (void *)tsec + sizeof(*tsec),
- tsec->ticket_len);
- key->payload.data = upayload;
- key->expiry = tsec->expiry;
+ key->type_data.x[0]++;
+
+ pp = (struct rxrpc_key_token **)&key->payload.data;
+ while (*pp)
+ pp = &(*pp)->next;
+ *pp = token;
+ if (token->kad->expiry < key->expiry)
+ key->expiry = token->kad->expiry;
+ token = NULL;
ret = 0;
+error_free:
+ kfree(token);
error:
return ret;
}
@@ -184,7 +797,26 @@ static int rxrpc_instantiate_s(struct key *key, const void *data,
*/
static void rxrpc_destroy(struct key *key)
{
- kfree(key->payload.data);
+ struct rxrpc_key_token *token;
+
+ while ((token = key->payload.data)) {
+ key->payload.data = token->next;
+ switch (token->security_index) {
+ case RXRPC_SECURITY_RXKAD:
+ kfree(token->kad);
+ break;
+ case RXRPC_SECURITY_RXK5:
+ if (token->k5)
+ rxrpc_rxk5_free(token->k5);
+ break;
+ default:
+ printk(KERN_ERR "Unknown token type %x on rxrpc key\n",
+ token->security_index);
+ BUG();
+ }
+
+ kfree(token);
+ }
}
/*
@@ -293,7 +925,7 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
struct {
u32 kver;
- struct rxkad_key tsec;
+ struct rxrpc_key_data_v1 v1;
} data;
_enter("");
@@ -308,13 +940,12 @@ int rxrpc_get_server_data_key(struct rxrpc_connection *conn,
_debug("key %d", key_serial(key));
data.kver = 1;
- data.tsec.security_index = 2;
- data.tsec.ticket_len = 0;
- data.tsec.expiry = expiry;
- data.tsec.kvno = 0;
+ data.v1.security_index = RXRPC_SECURITY_RXKAD;
+ data.v1.ticket_length = 0;
+ data.v1.expiry = expiry;
+ data.v1.kvno = 0;
- memcpy(&data.tsec.session_key, session_key,
- sizeof(data.tsec.session_key));
+ memcpy(&data.v1.session_key, session_key, sizeof(data.v1.session_key));
ret = key_instantiate_and_link(key, &data, sizeof(data), NULL, NULL);
if (ret < 0)
@@ -360,3 +991,210 @@ struct key *rxrpc_get_null_key(const char *keyname)
return key;
}
EXPORT_SYMBOL(rxrpc_get_null_key);
+
+/*
+ * read the contents of an rxrpc key
+ * - this returns the result in XDR form
+ */
+static long rxrpc_read(const struct key *key,
+ char __user *buffer, size_t buflen)
+{
+ const struct rxrpc_key_token *token;
+ const struct krb5_principal *princ;
+ size_t size;
+ __be32 __user *xdr, *oldxdr;
+ u32 cnlen, toksize, ntoks, tok, zero;
+ u16 toksizes[AFSTOKEN_MAX];
+ int loop;
+
+ _enter("");
+
+ /* we don't know what form we should return non-AFS keys in */
+ if (memcmp(key->description, "afs@", 4) != 0)
+ return -EOPNOTSUPP;
+ cnlen = strlen(key->description + 4);
+
+#define RND(X) (((X) + 3) & ~3)
+
+ /* AFS keys we return in XDR form, so we need to work out the size of
+ * the XDR */
+ size = 2 * 4; /* flags, cellname len */
+ size += RND(cnlen); /* cellname */
+ size += 1 * 4; /* token count */
+
+ ntoks = 0;
+ for (token = key->payload.data; token; token = token->next) {
+ toksize = 4; /* sec index */
+
+ switch (token->security_index) {
+ case RXRPC_SECURITY_RXKAD:
+ toksize += 8 * 4; /* viceid, kvno, key*2, begin,
+ * end, primary, tktlen */
+ toksize += RND(token->kad->ticket_len);
+ break;
+
+ case RXRPC_SECURITY_RXK5:
+ princ = &token->k5->client;
+ toksize += 4 + princ->n_name_parts * 4;
+ for (loop = 0; loop < princ->n_name_parts; loop++)
+ toksize += RND(strlen(princ->name_parts[loop]));
+ toksize += 4 + RND(strlen(princ->realm));
+
+ princ = &token->k5->server;
+ toksize += 4 + princ->n_name_parts * 4;
+ for (loop = 0; loop < princ->n_name_parts; loop++)
+ toksize += RND(strlen(princ->name_parts[loop]));
+ toksize += 4 + RND(strlen(princ->realm));
+
+ toksize += 8 + RND(token->k5->session.data_len);
+
+ toksize += 4 * 8 + 2 * 4;
+
+ toksize += 4 + token->k5->n_addresses * 8;
+ for (loop = 0; loop < token->k5->n_addresses; loop++)
+ toksize += RND(token->k5->addresses[loop].data_len);
+
+ toksize += 4 + RND(token->k5->ticket_len);
+ toksize += 4 + RND(token->k5->ticket2_len);
+
+ toksize += 4 + token->k5->n_authdata * 8;
+ for (loop = 0; loop < token->k5->n_authdata; loop++)
+ toksize += RND(token->k5->authdata[loop].data_len);
+ break;
+
+ default: /* we have a ticket we can't encode */
+ BUG();
+ continue;
+ }
+
+ _debug("token[%u]: toksize=%u", ntoks, toksize);
+ ASSERTCMP(toksize, <=, AFSTOKEN_LENGTH_MAX);
+
+ toksizes[ntoks++] = toksize;
+ size += toksize + 4; /* each token has a length word */
+ }
+
+#undef RND
+
+ if (!buffer || buflen < size)
+ return size;
+
+ xdr = (__be32 __user *) buffer;
+ zero = 0;
+#define ENCODE(x) \
+ do { \
+ __be32 y = htonl(x); \
+ if (put_user(y, xdr++) < 0) \
+ goto fault; \
+ } while(0)
+#define ENCODE_DATA(l, s) \
+ do { \
+ u32 _l = (l); \
+ ENCODE(l); \
+ if (copy_to_user(xdr, (s), _l) != 0) \
+ goto fault; \
+ if (_l & 3 && \
+ copy_to_user((u8 *)xdr + _l, &zero, 4 - (_l & 3)) != 0) \
+ goto fault; \
+ xdr += (_l + 3) >> 2; \
+ } while(0)
+#define ENCODE64(x) \
+ do { \
+ __be64 y = cpu_to_be64(x); \
+ if (copy_to_user(xdr, &y, 8) != 0) \
+ goto fault; \
+ xdr += 8 >> 2; \
+ } while(0)
+#define ENCODE_STR(s) \
+ do { \
+ const char *_s = (s); \
+ ENCODE_DATA(strlen(_s), _s); \
+ } while(0)
+
+ ENCODE(0); /* flags */
+ ENCODE_DATA(cnlen, key->description + 4); /* cellname */
+ ENCODE(ntoks);
+
+ tok = 0;
+ for (token = key->payload.data; token; token = token->next) {
+ toksize = toksizes[tok++];
+ ENCODE(toksize);
+ oldxdr = xdr;
+ ENCODE(token->security_index);
+
+ switch (token->security_index) {
+ case RXRPC_SECURITY_RXKAD:
+ ENCODE(token->kad->vice_id);
+ ENCODE(token->kad->kvno);
+ ENCODE_DATA(8, token->kad->session_key);
+ ENCODE(token->kad->start);
+ ENCODE(token->kad->expiry);
+ ENCODE(token->kad->primary_flag);
+ ENCODE_DATA(token->kad->ticket_len, token->kad->ticket);
+ break;
+
+ case RXRPC_SECURITY_RXK5:
+ princ = &token->k5->client;
+ ENCODE(princ->n_name_parts);
+ for (loop = 0; loop < princ->n_name_parts; loop++)
+ ENCODE_STR(princ->name_parts[loop]);
+ ENCODE_STR(princ->realm);
+
+ princ = &token->k5->server;
+ ENCODE(princ->n_name_parts);
+ for (loop = 0; loop < princ->n_name_parts; loop++)
+ ENCODE_STR(princ->name_parts[loop]);
+ ENCODE_STR(princ->realm);
+
+ ENCODE(token->k5->session.tag);
+ ENCODE_DATA(token->k5->session.data_len,
+ token->k5->session.data);
+
+ ENCODE64(token->k5->authtime);
+ ENCODE64(token->k5->starttime);
+ ENCODE64(token->k5->endtime);
+ ENCODE64(token->k5->renew_till);
+ ENCODE(token->k5->is_skey);
+ ENCODE(token->k5->flags);
+
+ ENCODE(token->k5->n_addresses);
+ for (loop = 0; loop < token->k5->n_addresses; loop++) {
+ ENCODE(token->k5->addresses[loop].tag);
+ ENCODE_DATA(token->k5->addresses[loop].data_len,
+ token->k5->addresses[loop].data);
+ }
+
+ ENCODE_DATA(token->k5->ticket_len, token->k5->ticket);
+ ENCODE_DATA(token->k5->ticket2_len, token->k5->ticket2);
+
+ ENCODE(token->k5->n_authdata);
+ for (loop = 0; loop < token->k5->n_authdata; loop++) {
+ ENCODE(token->k5->authdata[loop].tag);
+ ENCODE_DATA(token->k5->authdata[loop].data_len,
+ token->k5->authdata[loop].data);
+ }
+ break;
+
+ default:
+ BUG();
+ break;
+ }
+
+ ASSERTCMP((unsigned long)xdr - (unsigned long)oldxdr, ==,
+ toksize);
+ }
+
+#undef ENCODE_STR
+#undef ENCODE_DATA
+#undef ENCODE64
+#undef ENCODE
+
+ ASSERTCMP(tok, ==, ntoks);
+ ASSERTCMP((char __user *) xdr - buffer, ==, size);
+ _leave(" = %zu", size);
+ return size;
+
+fault:
+ _leave(" = -EFAULT");
+ return -EFAULT;
+}
diff --git a/net/rxrpc/ar-security.c b/net/rxrpc/ar-security.c
index dc62920ee19..49b3cc31ee1 100644
--- a/net/rxrpc/ar-security.c
+++ b/net/rxrpc/ar-security.c
@@ -16,6 +16,7 @@
#include <linux/crypto.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#include <keys/rxrpc-type.h>
#include "ar-internal.h"
static LIST_HEAD(rxrpc_security_methods);
@@ -122,6 +123,7 @@ EXPORT_SYMBOL_GPL(rxrpc_unregister_security);
*/
int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
{
+ struct rxrpc_key_token *token;
struct rxrpc_security *sec;
struct key *key = conn->key;
int ret;
@@ -135,7 +137,11 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
if (ret < 0)
return ret;
- sec = rxrpc_security_lookup(key->type_data.x[0]);
+ if (!key->payload.data)
+ return -EKEYREJECTED;
+ token = key->payload.data;
+
+ sec = rxrpc_security_lookup(token->security_index);
if (!sec)
return -EKEYREJECTED;
conn->security = sec;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index ef8f91030a1..713ac593e2e 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -18,6 +18,7 @@
#include <linux/ctype.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
+#include <keys/rxrpc-type.h>
#define rxrpc_debug rxkad_debug
#include "ar-internal.h"
@@ -42,7 +43,7 @@ struct rxkad_level2_hdr {
__be32 checksum; /* decrypted data checksum */
};
-MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos)");
+MODULE_DESCRIPTION("RxRPC network protocol type-2 security (Kerberos 4)");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
@@ -59,14 +60,14 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
*/
static int rxkad_init_connection_security(struct rxrpc_connection *conn)
{
- struct rxrpc_key_payload *payload;
struct crypto_blkcipher *ci;
+ struct rxrpc_key_token *token;
int ret;
_enter("{%d},{%x}", conn->debug_id, key_serial(conn->key));
- payload = conn->key->payload.data;
- conn->security_ix = payload->k.security_index;
+ token = conn->key->payload.data;
+ conn->security_ix = token->security_index;
ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(ci)) {
@@ -75,8 +76,8 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
goto error;
}
- if (crypto_blkcipher_setkey(ci, payload->k.session_key,
- sizeof(payload->k.session_key)) < 0)
+ if (crypto_blkcipher_setkey(ci, token->kad->session_key,
+ sizeof(token->kad->session_key)) < 0)
BUG();
switch (conn->security_level) {
@@ -110,7 +111,7 @@ error:
*/
static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
{
- struct rxrpc_key_payload *payload;
+ struct rxrpc_key_token *token;
struct blkcipher_desc desc;
struct scatterlist sg[2];
struct rxrpc_crypt iv;
@@ -123,8 +124,8 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
if (!conn->key)
return;
- payload = conn->key->payload.data;
- memcpy(&iv, payload->k.session_key, sizeof(iv));
+ token = conn->key->payload.data;
+ memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = conn->cipher;
desc.info = iv.x;
@@ -197,7 +198,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
u32 data_size,
void *sechdr)
{
- const struct rxrpc_key_payload *payload;
+ const struct rxrpc_key_token *token;
struct rxkad_level2_hdr rxkhdr
__attribute__((aligned(8))); /* must be all on one page */
struct rxrpc_skb_priv *sp;
@@ -219,8 +220,8 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
rxkhdr.checksum = 0;
/* encrypt from the session key */
- payload = call->conn->key->payload.data;
- memcpy(&iv, payload->k.session_key, sizeof(iv));
+ token = call->conn->key->payload.data;
+ memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
@@ -400,7 +401,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *skb,
u32 *_abort_code)
{
- const struct rxrpc_key_payload *payload;
+ const struct rxrpc_key_token *token;
struct rxkad_level2_hdr sechdr;
struct rxrpc_skb_priv *sp;
struct blkcipher_desc desc;
@@ -431,8 +432,8 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
skb_to_sgvec(skb, sg, 0, skb->len);
/* decrypt from the session key */
- payload = call->conn->key->payload.data;
- memcpy(&iv, payload->k.session_key, sizeof(iv));
+ token = call->conn->key->payload.data;
+ memcpy(&iv, token->kad->session_key, sizeof(iv));
desc.tfm = call->conn->cipher;
desc.info = iv.x;
desc.flags = 0;
@@ -506,7 +507,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
if (!call->conn->cipher)
return 0;
- if (sp->hdr.securityIndex != 2) {
+ if (sp->hdr.securityIndex != RXRPC_SECURITY_RXKAD) {
*_abort_code = RXKADINCONSISTENCY;
_leave(" = -EPROTO [not rxkad]");
return -EPROTO;
@@ -737,7 +738,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
struct sk_buff *skb,
u32 *_abort_code)
{
- const struct rxrpc_key_payload *payload;
+ const struct rxrpc_key_token *token;
struct rxkad_challenge challenge;
struct rxkad_response resp
__attribute__((aligned(8))); /* must be aligned for crypto */
@@ -778,7 +779,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
if (conn->security_level < min_level)
goto protocol_error;
- payload = conn->key->payload.data;
+ token = conn->key->payload.data;
/* build the response packet */
memset(&resp, 0, sizeof(resp));
@@ -797,13 +798,13 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
(conn->channels[3] ? conn->channels[3]->call_id : 0);
resp.encrypted.inc_nonce = htonl(nonce + 1);
resp.encrypted.level = htonl(conn->security_level);
- resp.kvno = htonl(payload->k.kvno);
- resp.ticket_len = htonl(payload->k.ticket_len);
+ resp.kvno = htonl(token->kad->kvno);
+ resp.ticket_len = htonl(token->kad->ticket_len);
/* calculate the response checksum and then do the encryption */
rxkad_calc_response_checksum(&resp);
- rxkad_encrypt_response(conn, &resp, &payload->k);
- return rxkad_send_response(conn, &sp->hdr, &resp, &payload->k);
+ rxkad_encrypt_response(conn, &resp, token->kad);
+ return rxkad_send_response(conn, &sp->hdr, &resp, token->kad);
protocol_error:
*_abort_code = abort_code;
@@ -1122,7 +1123,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
static struct rxrpc_security rxkad = {
.owner = THIS_MODULE,
.name = "rxkad",
- .security_index = RXKAD_VERSION,
+ .security_index = RXRPC_SECURITY_RXKAD,
.init_connection_security = rxkad_init_connection_security,
.prime_packet_security = rxkad_prime_packet_security,
.secure_packet = rxkad_secure_packet,
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 54d950cd4b8..f14e71bfa58 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -2,7 +2,7 @@
# Makefile for the Linux Traffic Control Unit.
#
-obj-y := sch_generic.o
+obj-y := sch_generic.o sch_mq.o
obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
obj-$(CONFIG_NET_CLS) += cls_api.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 9d03cc33b6c..2dfb3e7a040 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -1011,7 +1011,7 @@ replay:
}
static struct nlattr *
-find_dump_kind(struct nlmsghdr *n)
+find_dump_kind(const struct nlmsghdr *n)
{
struct nlattr *tb1, *tb2[TCA_ACT_MAX+1];
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 09cdcdfe7e9..6a536949cdc 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -168,8 +168,7 @@ replay:
/* Find qdisc */
if (!parent) {
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
- q = dev_queue->qdisc_sleeping;
+ q = dev->qdisc;
parent = q->handle;
} else {
q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent));
@@ -181,6 +180,9 @@ replay:
if ((cops = q->ops->cl_ops) == NULL)
return -EINVAL;
+ if (cops->tcf_chain == NULL)
+ return -EOPNOTSUPP;
+
/* Do we search for filter, attached to class? */
if (TC_H_MIN(parent)) {
cl = cops->get(q, parent);
@@ -405,7 +407,6 @@ static int tcf_node_dump(struct tcf_proto *tp, unsigned long n,
static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
- struct netdev_queue *dev_queue;
int t;
int s_t;
struct net_device *dev;
@@ -424,15 +425,16 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
return skb->len;
- dev_queue = netdev_get_tx_queue(dev, 0);
if (!tcm->tcm_parent)
- q = dev_queue->qdisc_sleeping;
+ q = dev->qdisc;
else
q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
if (!q)
goto out;
if ((cops = q->ops->cl_ops) == NULL)
goto errout;
+ if (cops->tcf_chain == NULL)
+ goto errout;
if (TC_H_MIN(tcm->tcm_parent)) {
cl = cops->get(q, tcm->tcm_parent);
if (cl == 0)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 24d17ce9c29..903e4188b6c 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -207,7 +207,7 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
static void qdisc_list_add(struct Qdisc *q)
{
if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
- list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+ list_add_tail(&q->list, &qdisc_dev(q)->qdisc->list);
}
void qdisc_list_del(struct Qdisc *q)
@@ -219,17 +219,11 @@ EXPORT_SYMBOL(qdisc_list_del);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
- unsigned int i;
struct Qdisc *q;
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- struct Qdisc *txq_root = txq->qdisc_sleeping;
-
- q = qdisc_match_from_root(txq_root, handle);
- if (q)
- goto out;
- }
+ q = qdisc_match_from_root(dev->qdisc, handle);
+ if (q)
+ goto out;
q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
out:
@@ -616,32 +610,6 @@ static u32 qdisc_alloc_handle(struct net_device *dev)
return i>0 ? autohandle : 0;
}
-/* Attach toplevel qdisc to device queue. */
-
-static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
- struct Qdisc *qdisc)
-{
- struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
- spinlock_t *root_lock;
-
- root_lock = qdisc_lock(oqdisc);
- spin_lock_bh(root_lock);
-
- /* Prune old scheduler */
- if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
- qdisc_reset(oqdisc);
-
- /* ... and graft new one */
- if (qdisc == NULL)
- qdisc = &noop_qdisc;
- dev_queue->qdisc_sleeping = qdisc;
- rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
-
- spin_unlock_bh(root_lock);
-
- return oqdisc;
-}
-
void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
{
const struct Qdisc_class_ops *cops;
@@ -710,6 +678,11 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (dev->flags & IFF_UP)
dev_deactivate(dev);
+ if (new && new->ops->attach) {
+ new->ops->attach(new);
+ num_q = 0;
+ }
+
for (i = 0; i < num_q; i++) {
struct netdev_queue *dev_queue = &dev->rx_queue;
@@ -720,6 +693,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
if (new && i > 0)
atomic_inc(&new->refcnt);
+ if (!ingress)
+ qdisc_destroy(old);
+ }
+
+ if (!ingress) {
+ notify_and_destroy(skb, n, classid, dev->qdisc, new);
+ if (new && !new->ops->attach)
+ atomic_inc(&new->refcnt);
+ dev->qdisc = new ? : &noop_qdisc;
+ } else {
notify_and_destroy(skb, n, classid, old, new);
}
@@ -728,14 +711,14 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
} else {
const struct Qdisc_class_ops *cops = parent->ops->cl_ops;
- err = -EINVAL;
-
- if (cops) {
+ err = -EOPNOTSUPP;
+ if (cops && cops->graft) {
unsigned long cl = cops->get(parent, classid);
if (cl) {
err = cops->graft(parent, cl, new, &old);
cops->put(parent, cl);
- }
+ } else
+ err = -ENOENT;
}
if (!err)
notify_and_destroy(skb, n, classid, old, new);
@@ -755,7 +738,8 @@ static struct lock_class_key qdisc_rx_lock;
static struct Qdisc *
qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
- u32 parent, u32 handle, struct nlattr **tca, int *errp)
+ struct Qdisc *p, u32 parent, u32 handle,
+ struct nlattr **tca, int *errp)
{
int err;
struct nlattr *kind = tca[TCA_KIND];
@@ -825,31 +809,28 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
stab = qdisc_get_stab(tca[TCA_STAB]);
if (IS_ERR(stab)) {
err = PTR_ERR(stab);
- goto err_out3;
+ goto err_out4;
}
sch->stab = stab;
}
if (tca[TCA_RATE]) {
spinlock_t *root_lock;
+ err = -EOPNOTSUPP;
+ if (sch->flags & TCQ_F_MQROOT)
+ goto err_out4;
+
if ((sch->parent != TC_H_ROOT) &&
- !(sch->flags & TCQ_F_INGRESS))
+ !(sch->flags & TCQ_F_INGRESS) &&
+ (!p || !(p->flags & TCQ_F_MQROOT)))
root_lock = qdisc_root_sleeping_lock(sch);
else
root_lock = qdisc_lock(sch);
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
root_lock, tca[TCA_RATE]);
- if (err) {
- /*
- * Any broken qdiscs that would require
- * a ops->reset() here? The qdisc was never
- * in action so it shouldn't be necessary.
- */
- if (ops->destroy)
- ops->destroy(sch);
- goto err_out3;
- }
+ if (err)
+ goto err_out4;
}
qdisc_list_add(sch);
@@ -857,7 +838,6 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
return sch;
}
err_out3:
- qdisc_put_stab(sch->stab);
dev_put(dev);
kfree((char *) sch - sch->padded);
err_out2:
@@ -865,6 +845,16 @@ err_out2:
err_out:
*errp = err;
return NULL;
+
+err_out4:
+ /*
+ * Any broken qdiscs that would require a ops->reset() here?
+ * The qdisc was never in action so it shouldn't be necessary.
+ */
+ qdisc_put_stab(sch->stab);
+ if (ops->destroy)
+ ops->destroy(sch);
+ goto err_out3;
}
static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
@@ -889,13 +879,16 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
qdisc_put_stab(sch->stab);
sch->stab = stab;
- if (tca[TCA_RATE])
+ if (tca[TCA_RATE]) {
/* NB: ignores errors from replace_estimator
because change can't be undone. */
+ if (sch->flags & TCQ_F_MQROOT)
+ goto out;
gen_replace_estimator(&sch->bstats, &sch->rate_est,
qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
-
+ }
+out:
return 0;
}
@@ -974,9 +967,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
q = dev->rx_queue.qdisc_sleeping;
}
} else {
- struct netdev_queue *dev_queue;
- dev_queue = netdev_get_tx_queue(dev, 0);
- q = dev_queue->qdisc_sleeping;
+ q = dev->qdisc;
}
if (!q)
return -ENOENT;
@@ -1044,9 +1035,7 @@ replay:
q = dev->rx_queue.qdisc_sleeping;
}
} else {
- struct netdev_queue *dev_queue;
- dev_queue = netdev_get_tx_queue(dev, 0);
- q = dev_queue->qdisc_sleeping;
+ q = dev->qdisc;
}
/* It may be default qdisc, ignore it */
@@ -1123,13 +1112,23 @@ create_n_graft:
if (!(n->nlmsg_flags&NLM_F_CREATE))
return -ENOENT;
if (clid == TC_H_INGRESS)
- q = qdisc_create(dev, &dev->rx_queue,
+ q = qdisc_create(dev, &dev->rx_queue, p,
tcm->tcm_parent, tcm->tcm_parent,
tca, &err);
- else
- q = qdisc_create(dev, netdev_get_tx_queue(dev, 0),
+ else {
+ struct netdev_queue *dev_queue;
+
+ if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
+ dev_queue = p->ops->cl_ops->select_queue(p, tcm);
+ else if (p)
+ dev_queue = p->dev_queue;
+ else
+ dev_queue = netdev_get_tx_queue(dev, 0);
+
+ q = qdisc_create(dev, dev_queue, p,
tcm->tcm_parent, tcm->tcm_handle,
tca, &err);
+ }
if (q == NULL) {
if (err == -EAGAIN)
goto replay;
@@ -1291,8 +1290,7 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
- dev_queue = netdev_get_tx_queue(dev, 0);
- if (tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, &q_idx, s_q_idx) < 0)
+ if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0)
goto done;
dev_queue = &dev->rx_queue;
@@ -1323,7 +1321,6 @@ done:
static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
{
struct net *net = sock_net(skb->sk);
- struct netdev_queue *dev_queue;
struct tcmsg *tcm = NLMSG_DATA(n);
struct nlattr *tca[TCA_MAX + 1];
struct net_device *dev;
@@ -1361,7 +1358,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
/* Step 1. Determine qdisc handle X:0 */
- dev_queue = netdev_get_tx_queue(dev, 0);
if (pid != TC_H_ROOT) {
u32 qid1 = TC_H_MAJ(pid);
@@ -1372,7 +1368,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
} else if (qid1) {
qid = qid1;
} else if (qid == 0)
- qid = dev_queue->qdisc_sleeping->handle;
+ qid = dev->qdisc->handle;
/* Now qid is genuine qdisc handle consistent
both with parent and child.
@@ -1383,7 +1379,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
pid = TC_H_MAKE(qid, pid);
} else {
if (qid == 0)
- qid = dev_queue->qdisc_sleeping->handle;
+ qid = dev->qdisc->handle;
}
/* OK. Locate qdisc */
@@ -1417,7 +1413,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
goto out;
break;
case RTM_DELTCLASS:
- err = cops->delete(q, cl);
+ err = -EOPNOTSUPP;
+ if (cops->delete)
+ err = cops->delete(q, cl);
if (err == 0)
tclass_notify(skb, n, q, cl, RTM_DELTCLASS);
goto out;
@@ -1431,7 +1429,9 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
}
new_cl = cl;
- err = cops->change(q, clid, pid, tca, &new_cl);
+ err = -EOPNOTSUPP;
+ if (cops->change)
+ err = cops->change(q, clid, pid, tca, &new_cl);
if (err == 0)
tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS);
@@ -1456,6 +1456,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags);
tcm = NLMSG_DATA(nlh);
tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm__pad1 = 0;
+ tcm->tcm__pad2 = 0;
tcm->tcm_ifindex = qdisc_dev(q)->ifindex;
tcm->tcm_parent = q->handle;
tcm->tcm_handle = q->handle;
@@ -1584,8 +1586,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
s_t = cb->args[0];
t = 0;
- dev_queue = netdev_get_tx_queue(dev, 0);
- if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
+ if (tc_dump_tclass_root(dev->qdisc, skb, tcm, cb, &t, s_t) < 0)
goto done;
dev_queue = &dev->rx_queue;
@@ -1705,6 +1706,7 @@ static int __init pktsched_init(void)
{
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
+ register_qdisc(&mq_qdisc_ops);
proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 2a8b83af7c4..ab82f145f68 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -49,7 +49,7 @@ struct atm_flow_data {
struct socket *sock; /* for closing */
u32 classid; /* x:y type ID */
int ref; /* reference count */
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct atm_flow_data *next;
struct atm_flow_data *excess; /* flow for excess traffic;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 23a167670fd..5b132c47326 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -128,7 +128,7 @@ struct cbq_class
long avgidle;
long deficit; /* Saved deficit for WRR */
psched_time_t penalized;
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct tc_cbq_xstats xstats;
@@ -1621,29 +1621,25 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct cbq_class *cl = (struct cbq_class*)arg;
- if (cl) {
- if (new == NULL) {
- new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
- &pfifo_qdisc_ops,
- cl->common.classid);
- if (new == NULL)
- return -ENOBUFS;
- } else {
+ if (new == NULL) {
+ new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+ &pfifo_qdisc_ops, cl->common.classid);
+ if (new == NULL)
+ return -ENOBUFS;
+ } else {
#ifdef CONFIG_NET_CLS_ACT
- if (cl->police == TC_POLICE_RECLASSIFY)
- new->reshape_fail = cbq_reshape_fail;
+ if (cl->police == TC_POLICE_RECLASSIFY)
+ new->reshape_fail = cbq_reshape_fail;
#endif
- }
- sch_tree_lock(sch);
- *old = cl->q;
- cl->q = new;
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- sch_tree_unlock(sch);
-
- return 0;
}
- return -ENOENT;
+ sch_tree_lock(sch);
+ *old = cl->q;
+ cl->q = new;
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
+ sch_tree_unlock(sch);
+
+ return 0;
}
static struct Qdisc *
@@ -1651,7 +1647,7 @@ cbq_leaf(struct Qdisc *sch, unsigned long arg)
{
struct cbq_class *cl = (struct cbq_class*)arg;
- return cl ? cl->q : NULL;
+ return cl->q;
}
static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 7597fe14686..5a888af7e5d 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -22,7 +22,7 @@ struct drr_class {
unsigned int refcnt;
unsigned int filter_cnt;
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct list_head alist;
@@ -274,8 +274,10 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct tc_drr_stats xstats;
memset(&xstats, 0, sizeof(xstats));
- if (cl->qdisc->q.qlen)
+ if (cl->qdisc->q.qlen) {
xstats.deficit = cl->deficit;
+ cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
+ }
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 27d03816ec3..4ae6aa562f2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -37,15 +37,11 @@
* - updates to tree and tree walking are only done under the rtnl mutex.
*/
-static inline int qdisc_qlen(struct Qdisc *q)
-{
- return q->q.qlen;
-}
-
static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
{
q->gso_skb = skb;
q->qstats.requeues++;
+ q->q.qlen++; /* it's still part of the queue */
__netif_schedule(q);
return 0;
@@ -61,9 +57,11 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
/* check the reason of requeuing without tx lock first */
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
- if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+ if (!netif_tx_queue_stopped(txq) &&
+ !netif_tx_queue_frozen(txq)) {
q->gso_skb = NULL;
- else
+ q->q.qlen--;
+ } else
skb = NULL;
} else {
skb = q->dequeue(q);
@@ -103,44 +101,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
}
/*
- * NOTE: Called under qdisc_lock(q) with locally disabled BH.
- *
- * __QDISC_STATE_RUNNING guarantees only one CPU can process
- * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
- * this queue.
- *
- * netif_tx_lock serializes accesses to device driver.
- *
- * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
- * if one is grabbed, another must be free.
- *
- * Note, that this procedure can be called by a watchdog timer
+ * Transmit one skb, and handle the return status as required. Holding the
+ * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
+ * function.
*
* Returns to the caller:
* 0 - queue is empty or throttled.
* >0 - queue is not empty.
- *
*/
-static inline int qdisc_restart(struct Qdisc *q)
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock)
{
- struct netdev_queue *txq;
int ret = NETDEV_TX_BUSY;
- struct net_device *dev;
- spinlock_t *root_lock;
- struct sk_buff *skb;
-
- /* Dequeue packet */
- if (unlikely((skb = dequeue_skb(q)) == NULL))
- return 0;
-
- root_lock = qdisc_lock(q);
/* And release qdisc */
spin_unlock(root_lock);
- dev = qdisc_dev(q);
- txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (!netif_tx_queue_stopped(txq) &&
!netif_tx_queue_frozen(txq))
@@ -177,6 +154,44 @@ static inline int qdisc_restart(struct Qdisc *q)
return ret;
}
+/*
+ * NOTE: Called under qdisc_lock(q) with locally disabled BH.
+ *
+ * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
+ * this queue.
+ *
+ * netif_tx_lock serializes accesses to device driver.
+ *
+ * qdisc_lock(q) and netif_tx_lock are mutually exclusive,
+ * if one is grabbed, another must be free.
+ *
+ * Note, that this procedure can be called by a watchdog timer
+ *
+ * Returns to the caller:
+ * 0 - queue is empty or throttled.
+ * >0 - queue is not empty.
+ *
+ */
+static inline int qdisc_restart(struct Qdisc *q)
+{
+ struct netdev_queue *txq;
+ struct net_device *dev;
+ spinlock_t *root_lock;
+ struct sk_buff *skb;
+
+ /* Dequeue packet */
+ skb = dequeue_skb(q);
+ if (unlikely(!skb))
+ return 0;
+
+ root_lock = qdisc_lock(q);
+ dev = qdisc_dev(q);
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+
+ return sch_direct_xmit(skb, q, dev, txq, root_lock);
+}
+
void __qdisc_run(struct Qdisc *q)
{
unsigned long start_time = jiffies;
@@ -391,18 +406,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] =
#define PFIFO_FAST_BANDS 3
-static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
- struct Qdisc *qdisc)
+/*
+ * Private data for a pfifo_fast scheduler containing:
+ * - queues for the three band
+ * - bitmap indicating which of the bands contain skbs
+ */
+struct pfifo_fast_priv {
+ u32 bitmap;
+ struct sk_buff_head q[PFIFO_FAST_BANDS];
+};
+
+/*
+ * Convert a bitmap to the first band number where an skb is queued, where:
+ * bitmap=0 means there are no skbs on any band.
+ * bitmap=1 means there is an skb on band 0.
+ * bitmap=7 means there are skbs on all 3 bands, etc.
+ */
+static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0};
+
+static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv,
+ int band)
{
- struct sk_buff_head *list = qdisc_priv(qdisc);
- return list + prio2band[skb->priority & TC_PRIO_MAX];
+ return priv->q + band;
}
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
{
- struct sk_buff_head *list = prio2list(skb, qdisc);
+ if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) {
+ int band = prio2band[skb->priority & TC_PRIO_MAX];
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ struct sk_buff_head *list = band2list(priv, band);
- if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
+ priv->bitmap |= (1 << band);
qdisc->q.qlen++;
return __qdisc_enqueue_tail(skb, qdisc, list);
}
@@ -412,14 +447,18 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
{
- int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ int band = bitmap2band[priv->bitmap];
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
- if (!skb_queue_empty(list + prio)) {
- qdisc->q.qlen--;
- return __qdisc_dequeue_head(qdisc, list + prio);
- }
+ if (likely(band >= 0)) {
+ struct sk_buff_head *list = band2list(priv, band);
+ struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list);
+
+ qdisc->q.qlen--;
+ if (skb_queue_empty(list))
+ priv->bitmap &= ~(1 << band);
+
+ return skb;
}
return NULL;
@@ -427,12 +466,13 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
{
- int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
+ int band = bitmap2band[priv->bitmap];
- for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
- if (!skb_queue_empty(list + prio))
- return skb_peek(list + prio);
+ if (band >= 0) {
+ struct sk_buff_head *list = band2list(priv, band);
+
+ return skb_peek(list);
}
return NULL;
@@ -441,11 +481,12 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc)
static void pfifo_fast_reset(struct Qdisc* qdisc)
{
int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- __qdisc_reset_queue(qdisc, list + prio);
+ __qdisc_reset_queue(qdisc, band2list(priv, prio));
+ priv->bitmap = 0;
qdisc->qstats.backlog = 0;
qdisc->q.qlen = 0;
}
@@ -465,17 +506,17 @@ nla_put_failure:
static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
{
int prio;
- struct sk_buff_head *list = qdisc_priv(qdisc);
+ struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
- skb_queue_head_init(list + prio);
+ skb_queue_head_init(band2list(priv, prio));
return 0;
}
-static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
+struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.id = "pfifo_fast",
- .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
+ .priv_size = sizeof(struct pfifo_fast_priv),
.enqueue = pfifo_fast_enqueue,
.dequeue = pfifo_fast_dequeue,
.peek = pfifo_fast_peek,
@@ -547,8 +588,11 @@ void qdisc_reset(struct Qdisc *qdisc)
if (ops->reset)
ops->reset(qdisc);
- kfree_skb(qdisc->gso_skb);
- qdisc->gso_skb = NULL;
+ if (qdisc->gso_skb) {
+ kfree_skb(qdisc->gso_skb);
+ qdisc->gso_skb = NULL;
+ qdisc->q.qlen = 0;
+ }
}
EXPORT_SYMBOL(qdisc_reset);
@@ -579,17 +623,29 @@ void qdisc_destroy(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_destroy);
-static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
+/* Attach toplevel qdisc to device queue. */
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc)
{
- unsigned int i;
+ struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
+ spinlock_t *root_lock;
- for (i = 0; i < dev->num_tx_queues; i++) {
- struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+ root_lock = qdisc_lock(oqdisc);
+ spin_lock_bh(root_lock);
- if (txq->qdisc_sleeping != &noop_qdisc)
- return false;
- }
- return true;
+ /* Prune old scheduler */
+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
+ qdisc_reset(oqdisc);
+
+ /* ... and graft new one */
+ if (qdisc == NULL)
+ qdisc = &noop_qdisc;
+ dev_queue->qdisc_sleeping = qdisc;
+ rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
+
+ spin_unlock_bh(root_lock);
+
+ return oqdisc;
}
static void attach_one_default_qdisc(struct net_device *dev,
@@ -605,12 +661,35 @@ static void attach_one_default_qdisc(struct net_device *dev,
printk(KERN_INFO "%s: activation failed\n", dev->name);
return;
}
+
+ /* Can by-pass the queue discipline for default qdisc */
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
} else {
qdisc = &noqueue_qdisc;
}
dev_queue->qdisc_sleeping = qdisc;
}
+static void attach_default_qdiscs(struct net_device *dev)
+{
+ struct netdev_queue *txq;
+ struct Qdisc *qdisc;
+
+ txq = netdev_get_tx_queue(dev, 0);
+
+ if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) {
+ netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+ dev->qdisc = txq->qdisc_sleeping;
+ atomic_inc(&dev->qdisc->refcnt);
+ } else {
+ qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT);
+ if (qdisc) {
+ qdisc->ops->attach(qdisc);
+ dev->qdisc = qdisc;
+ }
+ }
+}
+
static void transition_one_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_need_watchdog)
@@ -638,8 +717,8 @@ void dev_activate(struct net_device *dev)
virtual interfaces
*/
- if (dev_all_qdisc_sleeping_noop(dev))
- netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
+ if (dev->qdisc == &noop_qdisc)
+ attach_default_qdiscs(dev);
if (!netif_carrier_ok(dev))
/* Delay activation until next carrier-on event */
@@ -730,6 +809,7 @@ static void dev_init_scheduler_queue(struct net_device *dev,
void dev_init_scheduler(struct net_device *dev)
{
+ dev->qdisc = &noop_qdisc;
netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
@@ -755,5 +835,8 @@ void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc);
+ qdisc_destroy(dev->qdisc);
+ dev->qdisc = &noop_qdisc;
+
WARN_ON(timer_pending(&dev->watchdog_timer));
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 362c2811b2d..2c5c76be18f 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -77,7 +77,7 @@
* The service curve parameters are converted to the internal
* representation. The slope values are scaled to avoid overflow.
* the inverse slope values as well as the y-projection of the 1st
- * segment are kept in order to to avoid 64-bit divide operations
+ * segment are kept in order to avoid 64-bit divide operations
* that are expensive on 32-bit architectures.
*/
@@ -116,7 +116,7 @@ struct hfsc_class
struct Qdisc_class_common cl_common;
unsigned int refcnt; /* usage count */
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
unsigned int level; /* class level in hierarchy */
@@ -1203,8 +1203,6 @@ hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct hfsc_class *cl = (struct hfsc_class *)arg;
- if (cl == NULL)
- return -ENOENT;
if (cl->level > 0)
return -EINVAL;
if (new == NULL) {
@@ -1228,7 +1226,7 @@ hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
{
struct hfsc_class *cl = (struct hfsc_class *)arg;
- if (cl != NULL && cl->level == 0)
+ if (cl->level == 0)
return cl->qdisc;
return NULL;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 88cd0262662..85acab9dc6f 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -74,7 +74,7 @@ enum htb_cmode {
struct htb_class {
struct Qdisc_class_common common;
/* general class parameters */
- struct gnet_stats_basic bstats;
+ struct gnet_stats_basic_packed bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
struct tc_htb_xstats xstats; /* our special stats */
@@ -1117,30 +1117,29 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
{
struct htb_class *cl = (struct htb_class *)arg;
- if (cl && !cl->level) {
- if (new == NULL &&
- (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
- &pfifo_qdisc_ops,
- cl->common.classid))
- == NULL)
- return -ENOBUFS;
- sch_tree_lock(sch);
- *old = cl->un.leaf.q;
- cl->un.leaf.q = new;
- if (*old != NULL) {
- qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
- qdisc_reset(*old);
- }
- sch_tree_unlock(sch);
- return 0;
+ if (cl->level)
+ return -EINVAL;
+ if (new == NULL &&
+ (new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
+ &pfifo_qdisc_ops,
+ cl->common.classid)) == NULL)
+ return -ENOBUFS;
+
+ sch_tree_lock(sch);
+ *old = cl->un.leaf.q;
+ cl->un.leaf.q = new;
+ if (*old != NULL) {
+ qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
+ qdisc_reset(*old);
}
- return -ENOENT;
+ sch_tree_unlock(sch);
+ return 0;
}
static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
{
struct htb_class *cl = (struct htb_class *)arg;
- return (cl && !cl->level) ? cl->un.leaf.q : NULL;
+ return !cl->level ? cl->un.leaf.q : NULL;
}
static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index 4a2b7737435..a9e646bdb60 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -22,12 +22,6 @@ struct ingress_qdisc_data {
/* ------------------------- Class/flow operations ------------------------- */
-static int ingress_graft(struct Qdisc *sch, unsigned long arg,
- struct Qdisc *new, struct Qdisc **old)
-{
- return -EOPNOTSUPP;
-}
-
static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
{
return NULL;
@@ -48,12 +42,6 @@ static void ingress_put(struct Qdisc *sch, unsigned long cl)
{
}
-static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct nlattr **tca, unsigned long *arg)
-{
- return 0;
-}
-
static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
return;
@@ -123,11 +111,9 @@ nla_put_failure:
}
static const struct Qdisc_class_ops ingress_class_ops = {
- .graft = ingress_graft,
.leaf = ingress_leaf,
.get = ingress_get,
.put = ingress_put,
- .change = ingress_change,
.walk = ingress_walk,
.tcf_chain = ingress_find_tcf,
.bind_tcf = ingress_bind_filter,
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
new file mode 100644
index 00000000000..d1dea3d5dc9
--- /dev/null
+++ b/net/sched/sch_mq.c
@@ -0,0 +1,241 @@
+/*
+ * net/sched/sch_mq.c Classful multiqueue dummy scheduler
+ *
+ * Copyright (c) 2009 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+
+struct mq_sched {
+ struct Qdisc **qdiscs;
+};
+
+static void mq_destroy(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mq_sched *priv = qdisc_priv(sch);
+ unsigned int ntx;
+
+ if (!priv->qdiscs)
+ return;
+ for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++)
+ qdisc_destroy(priv->qdiscs[ntx]);
+ kfree(priv->qdiscs);
+}
+
+static int mq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mq_sched *priv = qdisc_priv(sch);
+ struct netdev_queue *dev_queue;
+ struct Qdisc *qdisc;
+ unsigned int ntx;
+
+ if (sch->parent != TC_H_ROOT)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_multiqueue(dev))
+ return -EOPNOTSUPP;
+
+ /* pre-allocate qdiscs, attachment can't fail */
+ priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
+ GFP_KERNEL);
+ if (priv->qdiscs == NULL)
+ return -ENOMEM;
+
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ dev_queue = netdev_get_tx_queue(dev, ntx);
+ qdisc = qdisc_create_dflt(dev, dev_queue, &pfifo_fast_ops,
+ TC_H_MAKE(TC_H_MAJ(sch->handle),
+ TC_H_MIN(ntx + 1)));
+ if (qdisc == NULL)
+ goto err;
+ qdisc->flags |= TCQ_F_CAN_BYPASS;
+ priv->qdiscs[ntx] = qdisc;
+ }
+
+ sch->flags |= TCQ_F_MQROOT;
+ return 0;
+
+err:
+ mq_destroy(sch);
+ return -ENOMEM;
+}
+
+static void mq_attach(struct Qdisc *sch)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct mq_sched *priv = qdisc_priv(sch);
+ struct Qdisc *qdisc;
+ unsigned int ntx;
+
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = priv->qdiscs[ntx];
+ qdisc = dev_graft_qdisc(qdisc->dev_queue, qdisc);
+ if (qdisc)
+ qdisc_destroy(qdisc);
+ }
+ kfree(priv->qdiscs);
+ priv->qdiscs = NULL;
+}
+
+static int mq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ struct Qdisc *qdisc;
+ unsigned int ntx;
+
+ sch->q.qlen = 0;
+ memset(&sch->bstats, 0, sizeof(sch->bstats));
+ memset(&sch->qstats, 0, sizeof(sch->qstats));
+
+ for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
+ qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
+ spin_lock_bh(qdisc_lock(qdisc));
+ sch->q.qlen += qdisc->q.qlen;
+ sch->bstats.bytes += qdisc->bstats.bytes;
+ sch->bstats.packets += qdisc->bstats.packets;
+ sch->qstats.qlen += qdisc->qstats.qlen;
+ sch->qstats.backlog += qdisc->qstats.backlog;
+ sch->qstats.drops += qdisc->qstats.drops;
+ sch->qstats.requeues += qdisc->qstats.requeues;
+ sch->qstats.overlimits += qdisc->qstats.overlimits;
+ spin_unlock_bh(qdisc_lock(qdisc));
+ }
+ return 0;
+}
+
+static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned long ntx = cl - 1;
+
+ if (ntx >= dev->num_tx_queues)
+ return NULL;
+ return netdev_get_tx_queue(dev, ntx);
+}
+
+static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
+ struct tcmsg *tcm)
+{
+ unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
+ struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);
+
+ if (!dev_queue) {
+ struct net_device *dev = qdisc_dev(sch);
+
+ return netdev_get_tx_queue(dev, 0);
+ }
+ return dev_queue;
+}
+
+static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
+ struct Qdisc **old)
+{
+ struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+ struct net_device *dev = qdisc_dev(sch);
+
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+ *old = dev_graft_qdisc(dev_queue, new);
+
+ if (dev->flags & IFF_UP)
+ dev_activate(dev);
+ return 0;
+}
+
+static struct Qdisc *mq_leaf(struct Qdisc *sch, unsigned long cl)
+{
+ struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+
+ return dev_queue->qdisc_sleeping;
+}
+
+static unsigned long mq_get(struct Qdisc *sch, u32 classid)
+{
+ unsigned int ntx = TC_H_MIN(classid);
+
+ if (!mq_queue_get(sch, ntx))
+ return 0;
+ return ntx;
+}
+
+static void mq_put(struct Qdisc *sch, unsigned long cl)
+{
+ return;
+}
+
+static int mq_dump_class(struct Qdisc *sch, unsigned long cl,
+ struct sk_buff *skb, struct tcmsg *tcm)
+{
+ struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+
+ tcm->tcm_parent = TC_H_ROOT;
+ tcm->tcm_handle |= TC_H_MIN(cl);
+ tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
+ return 0;
+}
+
+static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
+
+ sch = dev_queue->qdisc_sleeping;
+ sch->qstats.qlen = sch->q.qlen;
+ if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
+ gnet_stats_copy_queue(d, &sch->qstats) < 0)
+ return -1;
+ return 0;
+}
+
+static void mq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+ struct net_device *dev = qdisc_dev(sch);
+ unsigned int ntx;
+
+ if (arg->stop)
+ return;
+
+ arg->count = arg->skip;
+ for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) {
+ if (arg->fn(sch, ntx + 1, arg) < 0) {
+ arg->stop = 1;
+ break;
+ }
+ arg->count++;
+ }
+}
+
+static const struct Qdisc_class_ops mq_class_ops = {
+ .select_queue = mq_select_queue,
+ .graft = mq_graft,
+ .leaf = mq_leaf,
+ .get = mq_get,
+ .put = mq_put,
+ .walk = mq_walk,
+ .dump = mq_dump_class,
+ .dump_stats = mq_dump_class_stats,
+};
+
+struct Qdisc_ops mq_qdisc_ops __read_mostly = {
+ .cl_ops = &mq_class_ops,
+ .id = "mq",
+ .priv_size = sizeof(struct mq_sched),
+ .init = mq_init,
+ .destroy = mq_destroy,
+ .attach = mq_attach,
+ .dump = mq_dump,
+ .owner = THIS_MODULE,
+};
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 91273120304..7db2c88ce58 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -298,9 +298,6 @@ static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
- if (band >= q->bands)
- return -EINVAL;
-
if (new == NULL)
new = &noop_qdisc;
@@ -320,9 +317,6 @@ multiq_leaf(struct Qdisc *sch, unsigned long arg)
struct multiq_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
- if (band >= q->bands)
- return NULL;
-
return q->queues[band];
}
@@ -348,36 +342,13 @@ static void multiq_put(struct Qdisc *q, unsigned long cl)
return;
}
-static int multiq_change(struct Qdisc *sch, u32 handle, u32 parent,
- struct nlattr **tca, unsigned long *arg)
-{
- unsigned long cl = *arg;
- struct multiq_sched_data *q = qdisc_priv(sch);
-
- if (cl - 1 > q->bands)
- return -ENOENT;
- return 0;
-}
-
-static int multiq_delete(struct Qdisc *sch, unsigned long cl)
-{
- struct multiq_sched_data *q = qdisc_priv(sch);
- if (cl - 1 > q->bands)
- return -ENOENT;
- return 0;
-}
-
-
static int multiq_dump_class(struct Qdisc *sch, unsigned long cl,
struct sk_buff *skb, struct tcmsg *tcm)
{
struct multiq_sched_data *q = qdisc_priv(sch);
- if (cl - 1 > q->bands)
- return -ENOENT;
tcm->tcm_handle |= TC_H_MIN(cl);
- if (q->queues[cl-1])
- tcm->tcm_info = q->queues[cl-1]->handle;
+ tcm->tcm_info = q->queues[cl-1]->handle;
return 0;
}
@@ -388,6 +359,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
+ cl_q->qstats.qlen = cl_q->q.qlen;
if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
return -1;
@@ -430,8 +402,6 @@ static const struct Qdisc_class_ops multiq_class_ops = {
.leaf = multiq_leaf,
.get = multiq_get,
.put = multiq_put,
- .change = multiq_change,
- .delete = multiq_delete,
.walk = multiq_walk,
.tcf_chain = multiq_find_tcf,
.bind_tcf = multiq_bind,
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 94cecef7014..93285cecb24 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -262,9 +262,6 @@ static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
- if (band >= q->bands)
- return -EINVAL;
-
if (new == NULL)
new = &noop_qdisc;
@@ -284,9 +281,6 @@ prio_leaf(struct Qdisc *sch, unsigned long arg)
struct prio_sched_data *q = qdisc_priv(sch);
unsigned long band = arg - 1;
- if (band >= q->bands)
- return NULL;
-
return q->queues[band];
}
@@ -311,35 +305,13 @@ static void prio_put(struct Qdisc *q, unsigned long cl)
return;
}
-static int prio_change(struct Qdisc *sch, u32 handle, u32 parent, struct nlattr **tca, unsigned long *arg)
-{
- unsigned long cl = *arg;
- struct prio_sched_data *q = qdisc_priv(sch);
-
- if (cl - 1 > q->bands)
- return -ENOENT;
- return 0;
-}
-
-static int prio_delete(struct Qdisc *sch, unsigned long cl)
-{
- struct prio_sched_data *q = qdisc_priv(sch);
- if (cl - 1 > q->bands)
- return -ENOENT;
- return 0;
-}
-
-
static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
struct tcmsg *tcm)
{
struct prio_sched_data *q = qdisc_priv(sch);
- if (cl - 1 > q->bands)
- return -ENOENT;
tcm->tcm_handle |= TC_H_MIN(cl);
- if (q->queues[cl-1])
- tcm->tcm_info = q->queues[cl-1]->handle;
+ tcm->tcm_info = q->queues[cl-1]->handle;
return 0;
}
@@ -350,6 +322,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
struct Qdisc *cl_q;
cl_q = q->queues[cl - 1];
+ cl_q->qstats.qlen = cl_q->q.qlen;
if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
return -1;
@@ -392,8 +365,6 @@ static const struct Qdisc_class_ops prio_class_ops = {
.leaf = prio_leaf,
.get = prio_get,
.put = prio_put,
- .change = prio_change,
- .delete = prio_delete,
.walk = prio_walk,
.tcf_chain = prio_find_tcf,
.bind_tcf = prio_bind,
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 2bdf241f631..072cdf442f8 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -268,8 +268,6 @@ static int red_dump_class(struct Qdisc *sch, unsigned long cl,
{
struct red_sched_data *q = qdisc_priv(sch);
- if (cl != 1)
- return -ENOENT;
tcm->tcm_handle |= TC_H_MIN(1);
tcm->tcm_info = q->qdisc->handle;
return 0;
@@ -308,17 +306,6 @@ static void red_put(struct Qdisc *sch, unsigned long arg)
return;
}
-static int red_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
-{
- return -ENOSYS;
-}
-
-static int red_delete(struct Qdisc *sch, unsigned long cl)
-{
- return -ENOSYS;
-}
-
static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
@@ -331,20 +318,12 @@ static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_proto **red_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
- return NULL;
-}
-
static const struct Qdisc_class_ops red_class_ops = {
.graft = red_graft,
.leaf = red_leaf,
.get = red_get,
.put = red_put,
- .change = red_change_class,
- .delete = red_delete,
.walk = red_walk,
- .tcf_chain = red_find_tcf,
.dump = red_dump_class,
};
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 8706920a6d4..cb21380c060 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -496,12 +496,6 @@ nla_put_failure:
return -1;
}
-static int sfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
-{
- return -EOPNOTSUPP;
-}
-
static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
{
return 0;
@@ -560,7 +554,6 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
static const struct Qdisc_class_ops sfq_class_ops = {
.get = sfq_get,
- .change = sfq_change_class,
.tcf_chain = sfq_find_tcf,
.dump = sfq_dump_class,
.dump_stats = sfq_dump_class_stats,
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index e22dfe85e43..8fb8107ab18 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -368,9 +368,6 @@ static int tbf_dump_class(struct Qdisc *sch, unsigned long cl,
{
struct tbf_sched_data *q = qdisc_priv(sch);
- if (cl != 1) /* only one class */
- return -ENOENT;
-
tcm->tcm_handle |= TC_H_MIN(1);
tcm->tcm_info = q->qdisc->handle;
@@ -410,17 +407,6 @@ static void tbf_put(struct Qdisc *sch, unsigned long arg)
{
}
-static int tbf_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
- struct nlattr **tca, unsigned long *arg)
-{
- return -ENOSYS;
-}
-
-static int tbf_delete(struct Qdisc *sch, unsigned long arg)
-{
- return -ENOSYS;
-}
-
static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
if (!walker->stop) {
@@ -433,21 +419,13 @@ static void tbf_walk(struct Qdisc *sch, struct qdisc_walker *walker)
}
}
-static struct tcf_proto **tbf_find_tcf(struct Qdisc *sch, unsigned long cl)
-{
- return NULL;
-}
-
static const struct Qdisc_class_ops tbf_class_ops =
{
.graft = tbf_graft,
.leaf = tbf_leaf,
.get = tbf_get,
.put = tbf_put,
- .change = tbf_change_class,
- .delete = tbf_delete,
.walk = tbf_walk,
- .tcf_chain = tbf_find_tcf,
.dump = tbf_dump_class,
};
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 9c002b6e053..5a002c24723 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -268,7 +268,7 @@ static inline int teql_resolve(struct sk_buff *skb,
return __teql_resolve(skb, skb_res, dev);
}
-static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct teql_master *master = netdev_priv(dev);
struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
@@ -307,14 +307,14 @@ restart:
if (!netif_tx_queue_stopped(slave_txq) &&
!netif_tx_queue_frozen(slave_txq) &&
- slave_ops->ndo_start_xmit(skb, slave) == 0) {
+ slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
txq_trans_update(slave_txq);
__netif_tx_unlock(slave_txq);
master->slaves = NEXT_SLAVE(q);
netif_wake_queue(dev);
txq->tx_packets++;
txq->tx_bytes += length;
- return 0;
+ return NETDEV_TX_OK;
}
__netif_tx_unlock(slave_txq);
}
@@ -323,7 +323,7 @@ restart:
break;
case 1:
master->slaves = NEXT_SLAVE(q);
- return 0;
+ return NETDEV_TX_OK;
default:
nores = 1;
break;
@@ -345,7 +345,7 @@ restart:
drop:
txq->tx_dropped++;
dev_kfree_skb(skb);
- return 0;
+ return NETDEV_TX_OK;
}
static int teql_master_open(struct net_device *dev)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 525864bf4f0..8450960df24 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -112,6 +112,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000)
* 1000;
asoc->frag_point = 0;
+ asoc->user_frag = sp->user_frag;
/* Set the association max_retrans and RTO values from the
* socket values.
@@ -202,6 +203,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->a_rwnd = asoc->rwnd;
asoc->rwnd_over = 0;
+ asoc->rwnd_press = 0;
/* Use my own max window until I learn something better. */
asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
@@ -582,6 +584,33 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
asoc->addip_last_asconf->transport == peer)
asoc->addip_last_asconf->transport = NULL;
+ /* If we have something on the transmitted list, we have to
+ * save it off. The best place is the active path.
+ */
+ if (!list_empty(&peer->transmitted)) {
+ struct sctp_transport *active = asoc->peer.active_path;
+ struct sctp_chunk *ch;
+
+ /* Reset the transport of each chunk on this list */
+ list_for_each_entry(ch, &peer->transmitted,
+ transmitted_list) {
+ ch->transport = NULL;
+ ch->rtt_in_progress = 0;
+ }
+
+ list_splice_tail_init(&peer->transmitted,
+ &active->transmitted);
+
+ /* Start a T3 timer here in case it wasn't running so
+ * that these migrated packets have a chance to get
+ * retrnasmitted.
+ */
+ if (!timer_pending(&active->T3_rtx_timer))
+ if (!mod_timer(&active->T3_rtx_timer,
+ jiffies + active->rto))
+ sctp_transport_hold(active);
+ }
+
asoc->peer.transport_count--;
sctp_transport_free(peer);
@@ -651,13 +680,15 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
*/
peer->param_flags = asoc->param_flags;
+ sctp_transport_route(peer, NULL, sp);
+
/* Initialize the pmtu of the transport. */
- if (peer->param_flags & SPP_PMTUD_ENABLE)
- sctp_transport_pmtu(peer);
- else if (asoc->pathmtu)
- peer->pathmtu = asoc->pathmtu;
- else
- peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+ if (peer->param_flags & SPP_PMTUD_DISABLE) {
+ if (asoc->pathmtu)
+ peer->pathmtu = asoc->pathmtu;
+ else
+ peer->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
+ }
/* If this is the first transport addr on this association,
* initialize the association PMTU to the peer's PMTU.
@@ -673,7 +704,7 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
"%d\n", asoc, asoc->pathmtu);
peer->pmtu_pending = 0;
- asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
+ asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
/* The asoc->peer.port might not be meaningful yet, but
* initialize the packet structure anyway.
@@ -810,11 +841,16 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
break;
case SCTP_TRANSPORT_DOWN:
- /* if the transort was never confirmed, do not transition it
- * to inactive state.
+ /* If the transport was never confirmed, do not transition it
+ * to inactive state. Also, release the cached route since
+ * there may be a better route next time.
*/
if (transport->state != SCTP_UNCONFIRMED)
transport->state = SCTP_INACTIVE;
+ else {
+ dst_release(transport->dst);
+ transport->dst = NULL;
+ }
spc_state = SCTP_ADDR_UNREACHABLE;
break;
@@ -1324,9 +1360,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc)
}
if (pmtu) {
- struct sctp_sock *sp = sctp_sk(asoc->base.sk);
asoc->pathmtu = pmtu;
- asoc->frag_point = sctp_frag_point(sp, pmtu);
+ asoc->frag_point = sctp_frag_point(asoc, pmtu);
}
SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n",
@@ -1369,6 +1404,17 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
asoc->rwnd += len;
}
+ /* If we had window pressure, start recovering it
+ * once our rwnd had reached the accumulated pressure
+ * threshold. The idea is to recover slowly, but up
+ * to the initial advertised window.
+ */
+ if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
+ int change = min(asoc->pathmtu, asoc->rwnd_press);
+ asoc->rwnd += change;
+ asoc->rwnd_press -= change;
+ }
+
SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) "
"- %u\n", __func__, asoc, len, asoc->rwnd,
asoc->rwnd_over, asoc->a_rwnd);
@@ -1401,17 +1447,38 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
/* Decrease asoc's rwnd by len. */
void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
{
+ int rx_count;
+ int over = 0;
+
SCTP_ASSERT(asoc->rwnd, "rwnd zero", return);
SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return);
+
+ if (asoc->ep->rcvbuf_policy)
+ rx_count = atomic_read(&asoc->rmem_alloc);
+ else
+ rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
+
+ /* If we've reached or overflowed our receive buffer, announce
+ * a 0 rwnd if rwnd would still be positive. Store the
+ * the pottential pressure overflow so that the window can be restored
+ * back to original value.
+ */
+ if (rx_count >= asoc->base.sk->sk_rcvbuf)
+ over = 1;
+
if (asoc->rwnd >= len) {
asoc->rwnd -= len;
+ if (over) {
+ asoc->rwnd_press = asoc->rwnd;
+ asoc->rwnd = 0;
+ }
} else {
asoc->rwnd_over = len - asoc->rwnd;
asoc->rwnd = 0;
}
- SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n",
+ SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u, %u)\n",
__func__, asoc, len, asoc->rwnd,
- asoc->rwnd_over);
+ asoc->rwnd_over, asoc->rwnd_press);
}
/* Build the bind address list for the association based on info from the
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 6d5944a745d..13a6fba4107 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -510,9 +510,28 @@ int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope)
* of requested destination address, sender and receiver
* SHOULD include all of its addresses with level greater
* than or equal to L.
+ *
+ * Address scoping can be selectively controlled via sysctl
+ * option
*/
- if (addr_scope <= scope)
+ switch (sctp_scope_policy) {
+ case SCTP_SCOPE_POLICY_DISABLE:
return 1;
+ case SCTP_SCOPE_POLICY_ENABLE:
+ if (addr_scope <= scope)
+ return 1;
+ break;
+ case SCTP_SCOPE_POLICY_PRIVATE:
+ if (addr_scope <= scope || SCTP_SCOPE_PRIVATE == addr_scope)
+ return 1;
+ break;
+ case SCTP_SCOPE_POLICY_LINK:
+ if (addr_scope <= scope || SCTP_SCOPE_LINK == addr_scope)
+ return 1;
+ break;
+ default:
+ break;
+ }
return 0;
}
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 1748ef90950..acf7c4d128f 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -59,6 +59,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg)
msg->can_abandon = 0;
msg->expires_at = 0;
INIT_LIST_HEAD(&msg->chunks);
+ msg->msg_size = 0;
}
/* Allocate and initialize datamsg. */
@@ -73,6 +74,19 @@ SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp)
return msg;
}
+void sctp_datamsg_free(struct sctp_datamsg *msg)
+{
+ struct sctp_chunk *chunk;
+
+ /* This doesn't have to be a _safe vairant because
+ * sctp_chunk_free() only drops the refs.
+ */
+ list_for_each_entry(chunk, &msg->chunks, frag_list)
+ sctp_chunk_free(chunk);
+
+ sctp_datamsg_put(msg);
+}
+
/* Final destructruction of datamsg memory. */
static void sctp_datamsg_destroy(struct sctp_datamsg *msg)
{
@@ -142,6 +156,7 @@ static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chu
{
sctp_datamsg_hold(msg);
chunk->msg = msg;
+ msg->msg_size += chunk->skb->len;
}
@@ -158,6 +173,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
{
int max, whole, i, offset, over, err;
int len, first_len;
+ int max_data;
struct sctp_chunk *chunk;
struct sctp_datamsg *msg;
struct list_head *pos, *temp;
@@ -179,8 +195,14 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
__func__, msg, msg->expires_at, jiffies);
}
- max = asoc->frag_point;
+ /* This is the biggest possible DATA chunk that can fit into
+ * the packet
+ */
+ max_data = asoc->pathmtu -
+ sctp_sk(asoc->base.sk)->pf->af->net_header_len -
+ sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+ max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
* we need to accound for bundling of the AUTH chunks along with
* DATA.
@@ -189,23 +211,41 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
struct sctp_hmac *hmac_desc = sctp_auth_asoc_get_hmac(asoc);
if (hmac_desc)
- max -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
+ max_data -= WORD_ROUND(sizeof(sctp_auth_chunk_t) +
hmac_desc->hmac_len);
}
+ /* Now, check if we need to reduce our max */
+ if (max > max_data)
+ max = max_data;
+
whole = 0;
first_len = max;
+ /* Check to see if we have a pending SACK and try to let it be bundled
+ * with this message. Do this if we don't have any data queued already.
+ * To check that, look at out_qlen and retransmit list.
+ * NOTE: we will not reduce to account for SACK, if the message would
+ * not have been fragmented.
+ */
+ if (timer_pending(&asoc->timers[SCTP_EVENT_TIMEOUT_SACK]) &&
+ asoc->outqueue.out_qlen == 0 &&
+ list_empty(&asoc->outqueue.retransmit) &&
+ msg_len > max)
+ max_data -= WORD_ROUND(sizeof(sctp_sack_chunk_t));
+
/* Encourage Cookie-ECHO bundling. */
- if (asoc->state < SCTP_STATE_COOKIE_ECHOED) {
- whole = msg_len / (max - SCTP_ARBITRARY_COOKIE_ECHO_LEN);
-
- /* Account for the DATA to be bundled with the COOKIE-ECHO. */
- if (whole) {
- first_len = max - SCTP_ARBITRARY_COOKIE_ECHO_LEN;
- msg_len -= first_len;
- whole = 1;
- }
+ if (asoc->state < SCTP_STATE_COOKIE_ECHOED)
+ max_data -= SCTP_ARBITRARY_COOKIE_ECHO_LEN;
+
+ /* Now that we adjusted completely, reset first_len */
+ if (first_len > max_data)
+ first_len = max_data;
+
+ /* Account for a different sized first fragment */
+ if (msg_len >= first_len) {
+ msg_len -= first_len;
+ whole = 1;
}
/* How many full sized? How many bytes leftover? */
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 7ff548a30cf..bf24fa697de 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -52,7 +52,7 @@ int sctp_debug_flag = 1; /* Initially enable DEBUG */
#endif /* SCTP_DEBUG */
/* These are printable forms of Chunk ID's from section 3.1. */
-static const char *sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = {
+static const char *const sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = {
"DATA",
"INIT",
"INIT_ACK",
@@ -97,7 +97,7 @@ const char *sctp_cname(const sctp_subtype_t cid)
}
/* These are printable forms of the states. */
-const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
+const char *const sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
"STATE_EMPTY",
"STATE_CLOSED",
"STATE_COOKIE_WAIT",
@@ -110,7 +110,7 @@ const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = {
};
/* Events that could change the state of an association. */
-const char *sctp_evttype_tbl[] = {
+const char *const sctp_evttype_tbl[] = {
"EVENT_T_unknown",
"EVENT_T_CHUNK",
"EVENT_T_TIMEOUT",
@@ -119,7 +119,7 @@ const char *sctp_evttype_tbl[] = {
};
/* Return value of a state function */
-const char *sctp_status_tbl[] = {
+const char *const sctp_status_tbl[] = {
"DISPOSITION_DISCARD",
"DISPOSITION_CONSUME",
"DISPOSITION_NOMEM",
@@ -132,7 +132,7 @@ const char *sctp_status_tbl[] = {
};
/* Printable forms of primitives */
-static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = {
+static const char *const sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = {
"PRIMITIVE_ASSOCIATE",
"PRIMITIVE_SHUTDOWN",
"PRIMITIVE_ABORT",
@@ -149,7 +149,7 @@ const char *sctp_pname(const sctp_subtype_t id)
return "unknown_primitive";
}
-static const char *sctp_other_tbl[] = {
+static const char *const sctp_other_tbl[] = {
"NO_PENDING_TSN",
"ICMP_PROTO_UNREACH",
};
@@ -162,7 +162,7 @@ const char *sctp_oname(const sctp_subtype_t id)
return "unknown 'other' event";
}
-static const char *sctp_timer_tbl[] = {
+static const char *const sctp_timer_tbl[] = {
"TIMEOUT_NONE",
"TIMEOUT_T1_COOKIE",
"TIMEOUT_T1_INIT",
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6a4b1909414..bb280e60e00 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -949,7 +949,7 @@ static int sctp6_rcv(struct sk_buff *skb)
return sctp_rcv(skb) ? -1 : 0;
}
-static struct inet6_protocol sctpv6_protocol = {
+static const struct inet6_protocol sctpv6_protocol = {
.handler = sctp6_rcv,
.err_handler = sctp_v6_err,
.flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
diff --git a/net/sctp/output.c b/net/sctp/output.c
index b94c2119056..5cbda8f1ddf 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -61,8 +61,24 @@
#include <net/sctp/checksum.h>
/* Forward declarations for private helpers. */
-static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
+static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
struct sctp_chunk *chunk);
+static void sctp_packet_append_data(struct sctp_packet *packet,
+ struct sctp_chunk *chunk);
+static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ u16 chunk_len);
+
+static void sctp_packet_reset(struct sctp_packet *packet)
+{
+ packet->size = packet->overhead;
+ packet->has_cookie_echo = 0;
+ packet->has_sack = 0;
+ packet->has_data = 0;
+ packet->has_auth = 0;
+ packet->ipfragok = 0;
+ packet->auth = NULL;
+}
/* Config a packet.
* This appears to be a followup set of initializations.
@@ -75,13 +91,8 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
packet, vtag);
+ sctp_packet_reset(packet);
packet->vtag = vtag;
- packet->has_cookie_echo = 0;
- packet->has_sack = 0;
- packet->has_auth = 0;
- packet->has_data = 0;
- packet->ipfragok = 0;
- packet->auth = NULL;
if (ecn_capable && sctp_packet_empty(packet)) {
chunk = sctp_get_ecne_prepend(packet->transport->asoc);
@@ -119,15 +130,9 @@ struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
}
overhead += sizeof(struct sctphdr);
packet->overhead = overhead;
- packet->size = overhead;
+ sctp_packet_reset(packet);
packet->vtag = 0;
- packet->has_cookie_echo = 0;
- packet->has_sack = 0;
- packet->has_auth = 0;
- packet->has_data = 0;
- packet->ipfragok = 0;
packet->malloced = 0;
- packet->auth = NULL;
return packet;
}
@@ -204,7 +209,7 @@ static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
/* See if this is an auth chunk we are bundling or if
* auth is already bundled.
*/
- if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->auth)
+ if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
return retval;
/* if the peer did not request this chunk to be authenticated,
@@ -234,18 +239,19 @@ static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
!pkt->has_cookie_echo) {
struct sctp_association *asoc;
+ struct timer_list *timer;
asoc = pkt->transport->asoc;
+ timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
- if (asoc->a_rwnd > asoc->rwnd) {
+ /* If the SACK timer is running, we have a pending SACK */
+ if (timer_pending(timer)) {
struct sctp_chunk *sack;
asoc->a_rwnd = asoc->rwnd;
sack = sctp_make_sack(asoc);
if (sack) {
- struct timer_list *timer;
retval = sctp_packet_append_chunk(pkt, sack);
asoc->peer.sack_needed = 0;
- timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
- if (timer_pending(timer) && del_timer(timer))
+ if (del_timer(timer))
sctp_association_put(asoc);
}
}
@@ -261,13 +267,20 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
{
sctp_xmit_t retval = SCTP_XMIT_OK;
__u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
- size_t psize;
- size_t pmtu;
- int too_big;
SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
chunk);
+ /* Data chunks are special. Before seeing what else we can
+ * bundle into this packet, check to see if we are allowed to
+ * send this DATA.
+ */
+ if (sctp_chunk_is_data(chunk)) {
+ retval = sctp_packet_can_append_data(packet, chunk);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
+ }
+
/* Try to bundle AUTH chunk */
retval = sctp_packet_bundle_auth(packet, chunk);
if (retval != SCTP_XMIT_OK)
@@ -278,51 +291,16 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
if (retval != SCTP_XMIT_OK)
goto finish;
- psize = packet->size;
- pmtu = ((packet->transport->asoc) ?
- (packet->transport->asoc->pathmtu) :
- (packet->transport->pathmtu));
-
- too_big = (psize + chunk_len > pmtu);
-
- /* Decide if we need to fragment or resubmit later. */
- if (too_big) {
- /* It's OK to fragmet at IP level if any one of the following
- * is true:
- * 1. The packet is empty (meaning this chunk is greater
- * the MTU)
- * 2. The chunk we are adding is a control chunk
- * 3. The packet doesn't have any data in it yet and data
- * requires authentication.
- */
- if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
- (!packet->has_data && chunk->auth)) {
- /* We no longer do re-fragmentation.
- * Just fragment at the IP layer, if we
- * actually hit this condition
- */
- packet->ipfragok = 1;
- goto append;
-
- } else {
- retval = SCTP_XMIT_PMTU_FULL;
- goto finish;
- }
- }
-
-append:
- /* We believe that this chunk is OK to add to the packet (as
- * long as we have the cwnd for it).
- */
+ /* Check to see if this chunk will fit into the packet */
+ retval = sctp_packet_will_fit(packet, chunk, chunk_len);
+ if (retval != SCTP_XMIT_OK)
+ goto finish;
- /* DATA is a special case since we must examine both rwnd and cwnd
- * before we send DATA.
- */
+ /* We believe that this chunk is OK to add to the packet */
switch (chunk->chunk_hdr->type) {
case SCTP_CID_DATA:
- retval = sctp_packet_append_data(packet, chunk);
- if (SCTP_XMIT_OK != retval)
- goto finish;
+ /* Account for the data being in the packet */
+ sctp_packet_append_data(packet, chunk);
/* Disallow SACK bundling after DATA. */
packet->has_sack = 1;
/* Disallow AUTH bundling after DATA */
@@ -598,7 +576,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
(*tp->af_specific->sctp_xmit)(nskb, tp);
out:
- packet->size = packet->overhead;
+ sctp_packet_reset(packet);
return err;
no_route:
kfree_skb(nskb);
@@ -632,16 +610,15 @@ nomem:
* 2nd Level Abstractions
********************************************************************/
-/* This private function handles the specifics of appending DATA chunks. */
-static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
+/* This private function check to see if a chunk can be added */
+static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
struct sctp_chunk *chunk)
{
sctp_xmit_t retval = SCTP_XMIT_OK;
- size_t datasize, rwnd, inflight;
+ size_t datasize, rwnd, inflight, flight_size;
struct sctp_transport *transport = packet->transport;
__u32 max_burst_bytes;
struct sctp_association *asoc = transport->asoc;
- struct sctp_sock *sp = sctp_sk(asoc->base.sk);
struct sctp_outq *q = &asoc->outqueue;
/* RFC 2960 6.1 Transmission of DATA Chunks
@@ -658,7 +635,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
*/
rwnd = asoc->peer.rwnd;
- inflight = asoc->outqueue.outstanding_bytes;
+ inflight = q->outstanding_bytes;
+ flight_size = transport->flight_size;
datasize = sctp_data_size(chunk);
@@ -681,8 +659,8 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* cwnd = flightsize + Max.Burst * MTU
*/
max_burst_bytes = asoc->max_burst * asoc->pathmtu;
- if ((transport->flight_size + max_burst_bytes) < transport->cwnd) {
- transport->cwnd = transport->flight_size + max_burst_bytes;
+ if ((flight_size + max_burst_bytes) < transport->cwnd) {
+ transport->cwnd = flight_size + max_burst_bytes;
SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: "
"transport: %p, cwnd: %d, "
"ssthresh: %d, flight_size: %d, "
@@ -707,7 +685,7 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* ignore the value of cwnd and SHOULD NOT delay retransmission.
*/
if (chunk->fast_retransmit != SCTP_NEED_FRTX)
- if (transport->flight_size >= transport->cwnd) {
+ if (flight_size >= transport->cwnd) {
retval = SCTP_XMIT_RWND_FULL;
goto finish;
}
@@ -717,20 +695,36 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
* if any previously transmitted data on the connection remains
* unacknowledged.
*/
- if (!sp->nodelay && sctp_packet_empty(packet) &&
- q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) {
- unsigned len = datasize + q->out_qlen;
+ if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
+ inflight && sctp_state(asoc, ESTABLISHED)) {
+ unsigned max = transport->pathmtu - packet->overhead;
+ unsigned len = chunk->skb->len + q->out_qlen;
/* Check whether this chunk and all the rest of pending
* data will fit or delay in hopes of bundling a full
* sized packet.
+ * Don't delay large message writes that may have been
+ * fragmeneted into small peices.
*/
- if (len < asoc->frag_point) {
+ if ((len < max) && (chunk->msg->msg_size < max)) {
retval = SCTP_XMIT_NAGLE_DELAY;
goto finish;
}
}
+finish:
+ return retval;
+}
+
+/* This private function does management things when adding DATA chunk */
+static void sctp_packet_append_data(struct sctp_packet *packet,
+ struct sctp_chunk *chunk)
+{
+ struct sctp_transport *transport = packet->transport;
+ size_t datasize = sctp_data_size(chunk);
+ struct sctp_association *asoc = transport->asoc;
+ u32 rwnd = asoc->peer.rwnd;
+
/* Keep track of how many bytes are in flight over this transport. */
transport->flight_size += datasize;
@@ -753,7 +747,45 @@ static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet,
/* Has been accepted for transmission. */
if (!asoc->peer.prsctp_capable)
chunk->msg->can_abandon = 0;
+}
+
+static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
+ struct sctp_chunk *chunk,
+ u16 chunk_len)
+{
+ size_t psize;
+ size_t pmtu;
+ int too_big;
+ sctp_xmit_t retval = SCTP_XMIT_OK;
+
+ psize = packet->size;
+ pmtu = ((packet->transport->asoc) ?
+ (packet->transport->asoc->pathmtu) :
+ (packet->transport->pathmtu));
+
+ too_big = (psize + chunk_len > pmtu);
+
+ /* Decide if we need to fragment or resubmit later. */
+ if (too_big) {
+ /* It's OK to fragmet at IP level if any one of the following
+ * is true:
+ * 1. The packet is empty (meaning this chunk is greater
+ * the MTU)
+ * 2. The chunk we are adding is a control chunk
+ * 3. The packet doesn't have any data in it yet and data
+ * requires authentication.
+ */
+ if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
+ (!packet->has_data && chunk->auth)) {
+ /* We no longer do re-fragmentation.
+ * Just fragment at the IP layer, if we
+ * actually hit this condition
+ */
+ packet->ipfragok = 1;
+ } else {
+ retval = SCTP_XMIT_PMTU_FULL;
+ }
+ }
-finish:
return retval;
}
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index d765fc53e74..c9f20e28521 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -406,8 +406,9 @@ void sctp_retransmit_mark(struct sctp_outq *q,
* not be retransmitted
*/
if (!chunk->tsn_gap_acked) {
- chunk->transport->flight_size -=
- sctp_data_size(chunk);
+ if (chunk->transport)
+ chunk->transport->flight_size -=
+ sctp_data_size(chunk);
q->outstanding_bytes -= sctp_data_size(chunk);
q->asoc->peer.rwnd += (sctp_data_size(chunk) +
sizeof(struct sk_buff));
@@ -443,7 +444,8 @@ void sctp_retransmit_mark(struct sctp_outq *q,
q->asoc->peer.rwnd += (sctp_data_size(chunk) +
sizeof(struct sk_buff));
q->outstanding_bytes -= sctp_data_size(chunk);
- transport->flight_size -= sctp_data_size(chunk);
+ if (chunk->transport)
+ transport->flight_size -= sctp_data_size(chunk);
/* sctpimpguide-05 Section 2.8.2
* M5) If a T3-rtx timer expires, the
@@ -1310,6 +1312,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
__u32 rtt;
__u8 restart_timer = 0;
int bytes_acked = 0;
+ int migrate_bytes = 0;
/* These state variables are for coherent debug output. --xguo */
@@ -1343,8 +1346,9 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* considering it as 'outstanding'.
*/
if (!tchunk->tsn_gap_acked) {
- tchunk->transport->flight_size -=
- sctp_data_size(tchunk);
+ if (tchunk->transport)
+ tchunk->transport->flight_size -=
+ sctp_data_size(tchunk);
q->outstanding_bytes -= sctp_data_size(tchunk);
}
continue;
@@ -1378,6 +1382,20 @@ static void sctp_check_transmitted(struct sctp_outq *q,
rtt);
}
}
+
+ /* If the chunk hasn't been marked as ACKED,
+ * mark it and account bytes_acked if the
+ * chunk had a valid transport (it will not
+ * have a transport if ASCONF had deleted it
+ * while DATA was outstanding).
+ */
+ if (!tchunk->tsn_gap_acked) {
+ tchunk->tsn_gap_acked = 1;
+ bytes_acked += sctp_data_size(tchunk);
+ if (!tchunk->transport)
+ migrate_bytes += sctp_data_size(tchunk);
+ }
+
if (TSN_lte(tsn, sack_ctsn)) {
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
@@ -1391,8 +1409,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
restart_timer = 1;
if (!tchunk->tsn_gap_acked) {
- tchunk->tsn_gap_acked = 1;
- bytes_acked += sctp_data_size(tchunk);
/*
* SFR-CACC algorithm:
* 2) If the SACK contains gap acks
@@ -1432,10 +1448,6 @@ static void sctp_check_transmitted(struct sctp_outq *q,
* older than that newly acknowledged DATA
* chunk, are qualified as 'Stray DATA chunks'.
*/
- if (!tchunk->tsn_gap_acked) {
- tchunk->tsn_gap_acked = 1;
- bytes_acked += sctp_data_size(tchunk);
- }
list_add_tail(lchunk, &tlist);
}
@@ -1491,7 +1503,8 @@ static void sctp_check_transmitted(struct sctp_outq *q,
tsn);
tchunk->tsn_gap_acked = 0;
- bytes_acked -= sctp_data_size(tchunk);
+ if (tchunk->transport)
+ bytes_acked -= sctp_data_size(tchunk);
/* RFC 2960 6.3.2 Retransmission Timer Rules
*
@@ -1561,6 +1574,14 @@ static void sctp_check_transmitted(struct sctp_outq *q,
#endif /* SCTP_DEBUG */
if (transport) {
if (bytes_acked) {
+ /* We may have counted DATA that was migrated
+ * to this transport due to DEL-IP operation.
+ * Subtract those bytes, since the were never
+ * send on this transport and shouldn't be
+ * credited to this transport.
+ */
+ bytes_acked -= migrate_bytes;
+
/* 8.2. When an outstanding TSN is acknowledged,
* the endpoint shall clear the error counter of
* the destination transport address to which the
@@ -1589,7 +1610,7 @@ static void sctp_check_transmitted(struct sctp_outq *q,
transport->flight_size -= bytes_acked;
if (transport->flight_size == 0)
transport->partial_bytes_acked = 0;
- q->outstanding_bytes -= bytes_acked;
+ q->outstanding_bytes -= bytes_acked + migrate_bytes;
} else {
/* RFC 2960 6.1, sctpimpguide-06 2.15.2
* When a sender is doing zero window probing, it
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index f268910620b..d093cbfeaac 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -512,10 +512,8 @@ int __init sctp_remaddr_proc_init(void)
{
struct proc_dir_entry *p;
- p = create_proc_entry("remaddr", S_IRUGO, proc_net_sctp);
+ p = proc_create("remaddr", S_IRUGO, proc_net_sctp, &sctp_remaddr_seq_fops);
if (!p)
return -ENOMEM;
- p->proc_fops = &sctp_remaddr_seq_fops;
-
return 0;
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 79cbd47f4df..612dc878e05 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -160,6 +160,7 @@ static void sctp_proc_exit(void)
remove_proc_entry("sctp", init_net.proc_net);
}
#endif
+ percpu_counter_destroy(&sctp_sockets_allocated);
}
/* Private helper to extract ipv4 address and stash them in
@@ -430,16 +431,14 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
* of requested destination address, sender and receiver
* SHOULD include all of its addresses with level greater
* than or equal to L.
+ *
+ * IPv4 scoping can be controlled through sysctl option
+ * net.sctp.addr_scope_policy
*/
static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
{
sctp_scope_t retval;
- /* Should IPv4 scoping be a sysctl configurable option
- * so users can turn it off (default on) for certain
- * unconventional networking environments?
- */
-
/* Check for unusable SCTP addresses. */
if (IS_IPV4_UNUSABLE_ADDRESS(addr->v4.sin_addr.s_addr)) {
retval = SCTP_SCOPE_UNUSABLE;
@@ -925,7 +924,7 @@ static struct inet_protosw sctp_stream_protosw = {
};
/* Register with IP layer. */
-static struct net_protocol sctp_protocol = {
+static const struct net_protocol sctp_protocol = {
.handler = sctp_rcv,
.err_handler = sctp_v4_err,
.no_policy = 1,
@@ -1185,10 +1184,10 @@ SCTP_STATIC __init int sctp_init(void)
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
*/
- if (num_physpages >= (128 * 1024))
- goal = num_physpages >> (22 - PAGE_SHIFT);
+ if (totalram_pages >= (128 * 1024))
+ goal = totalram_pages >> (22 - PAGE_SHIFT);
else
- goal = num_physpages >> (24 - PAGE_SHIFT);
+ goal = totalram_pages >> (24 - PAGE_SHIFT);
for (order = 0; (1UL << order) < goal; order++)
;
@@ -1258,6 +1257,9 @@ SCTP_STATIC __init int sctp_init(void)
/* Disable AUTH by default. */
sctp_auth_enable = 0;
+ /* Set SCOPE policy to enabled */
+ sctp_scope_policy = SCTP_SCOPE_POLICY_ENABLE;
+
sctp_sysctl_register();
INIT_LIST_HEAD(&sctp_address_families);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 61cc6075b0d..9d881a61ac0 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2861,6 +2861,11 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
addr_param = (union sctp_addr_param *)
((void *)asconf_param + sizeof(sctp_addip_param_t));
+ if (asconf_param->param_hdr.type != SCTP_PARAM_ADD_IP &&
+ asconf_param->param_hdr.type != SCTP_PARAM_DEL_IP &&
+ asconf_param->param_hdr.type != SCTP_PARAM_SET_PRIMARY)
+ return SCTP_ERROR_UNKNOWN_PARAM;
+
switch (addr_param->v4.param_hdr.type) {
case SCTP_PARAM_IPV6_ADDRESS:
if (!asoc->peer.ipv6_address)
@@ -2958,9 +2963,6 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
sctp_assoc_set_primary(asoc, peer);
break;
- default:
- return SCTP_ERROR_UNKNOWN_PARAM;
- break;
}
return SCTP_ERROR_NO_ERROR;
@@ -3104,7 +3106,7 @@ done:
}
/* Process a asconf parameter that is successfully acked. */
-static int sctp_asconf_param_success(struct sctp_association *asoc,
+static void sctp_asconf_param_success(struct sctp_association *asoc,
sctp_addip_param_t *asconf_param)
{
struct sctp_af *af;
@@ -3113,7 +3115,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
union sctp_addr_param *addr_param;
struct sctp_transport *transport;
struct sctp_sockaddr_entry *saddr;
- int retval = 0;
addr_param = (union sctp_addr_param *)
((void *)asconf_param + sizeof(sctp_addip_param_t));
@@ -3133,10 +3134,18 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
saddr->state = SCTP_ADDR_SRC;
}
local_bh_enable();
+ list_for_each_entry(transport, &asoc->peer.transport_addr_list,
+ transports) {
+ if (transport->state == SCTP_ACTIVE)
+ continue;
+ dst_release(transport->dst);
+ sctp_transport_route(transport, NULL,
+ sctp_sk(asoc->base.sk));
+ }
break;
case SCTP_PARAM_DEL_IP:
local_bh_disable();
- retval = sctp_del_bind_addr(bp, &addr);
+ sctp_del_bind_addr(bp, &addr);
local_bh_enable();
list_for_each_entry(transport, &asoc->peer.transport_addr_list,
transports) {
@@ -3148,8 +3157,6 @@ static int sctp_asconf_param_success(struct sctp_association *asoc,
default:
break;
}
-
- return retval;
}
/* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk
@@ -3266,7 +3273,7 @@ int sctp_process_asconf_ack(struct sctp_association *asoc,
switch (err_code) {
case SCTP_ERROR_NO_ERROR:
- retval = sctp_asconf_param_success(asoc, asconf_param);
+ sctp_asconf_param_success(asoc, asconf_param);
break;
case SCTP_ERROR_RSRC_LOW:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 86426aac160..8674d491955 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -440,14 +440,26 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
/* The check for association's overall error counter exceeding the
* threshold is done in the state function.
*/
- /* When probing UNCONFIRMED addresses, the association overall
- * error count is NOT incremented
+ /* We are here due to a timer expiration. If the timer was
+ * not a HEARTBEAT, then normal error tracking is done.
+ * If the timer was a heartbeat, we only increment error counts
+ * when we already have an outstanding HEARTBEAT that has not
+ * been acknowledged.
+ * Additionaly, some tranport states inhibit error increments.
*/
- if (transport->state != SCTP_UNCONFIRMED)
+ if (!is_hb) {
asoc->overall_error_count++;
+ if (transport->state != SCTP_INACTIVE)
+ transport->error_count++;
+ } else if (transport->hb_sent) {
+ if (transport->state != SCTP_UNCONFIRMED)
+ asoc->overall_error_count++;
+ if (transport->state != SCTP_INACTIVE)
+ transport->error_count++;
+ }
if (transport->state != SCTP_INACTIVE &&
- (transport->error_count++ >= transport->pathmaxrxt)) {
+ (transport->error_count > transport->pathmaxrxt)) {
SCTP_DEBUG_PRINTK_IPADDR("transport_strike:association %p",
" transport IP: port:%d failed.\n",
asoc,
@@ -931,6 +943,27 @@ static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
}
+/* Send the whole message, chunk by chunk, to the outqueue.
+ * This way the whole message is queued up and bundling if
+ * encouraged for small fragments.
+ */
+static int sctp_cmd_send_msg(struct sctp_association *asoc,
+ struct sctp_datamsg *msg)
+{
+ struct sctp_chunk *chunk;
+ int error = 0;
+
+ list_for_each_entry(chunk, &msg->chunks, frag_list) {
+ error = sctp_outq_tail(&asoc->outqueue, chunk);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+
+
/* These three macros allow us to pull the debugging code out of the
* main flow of sctp_do_sm() to keep attention focused on the real
* functionality there.
@@ -1500,7 +1533,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_PROCESS_CTSN:
/* Dummy up a SACK for processing. */
sackh.cum_tsn_ack = cmd->obj.be32;
- sackh.a_rwnd = 0;
+ sackh.a_rwnd = asoc->peer.rwnd +
+ asoc->outqueue.outstanding_bytes;
sackh.num_gap_ack_blocks = 0;
sackh.num_dup_tsns = 0;
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
@@ -1575,7 +1609,13 @@ static int sctp_cmd_interpreter(sctp_event_t event_type,
case SCTP_CMD_UPDATE_INITTAG:
asoc->peer.i.init_tag = cmd->obj.u32;
break;
-
+ case SCTP_CMD_SEND_MSG:
+ if (!asoc->outqueue.cork) {
+ sctp_outq_cork(&asoc->outqueue);
+ local_cork = 1;
+ }
+ error = sctp_cmd_send_msg(asoc, cmd->obj.msg);
+ break;
default:
printk(KERN_WARNING "Impossible command: %u, %p\n",
cmd->verb, cmd->obj.ptr);
@@ -1593,9 +1633,9 @@ out:
*/
if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
if (chunk->end_of_packet || chunk->singleton)
- sctp_outq_uncork(&asoc->outqueue);
+ error = sctp_outq_uncork(&asoc->outqueue);
} else if (local_cork)
- sctp_outq_uncork(&asoc->outqueue);
+ error = sctp_outq_uncork(&asoc->outqueue);
return error;
nomem:
error = -ENOMEM;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 7288192f7df..c8fae1983dd 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -334,6 +334,15 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
+ /* If the INIT is coming toward a closing socket, we'll send back
+ * and ABORT. Essentially, this catches the race of INIT being
+ * backloged to the socket at the same time as the user isses close().
+ * Since the socket and all its associations are going away, we
+ * can treat this OOTB
+ */
+ if (sctp_sstate(ep->base.sk, CLOSING))
+ return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands);
+
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(asoc, chunk->chunk_hdr->type,
@@ -962,7 +971,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep,
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
- if (asoc->overall_error_count > asoc->max_retrans) {
+ if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
@@ -1106,7 +1115,8 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
- if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
+ if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
+ sizeof(sctp_sender_hb_info_t)))
return sctp_sf_violation_chunklen(ep, asoc, type, arg,
commands);
@@ -2561,6 +2571,12 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep,
chunk->subh.shutdown_hdr = sdh;
ctsn = ntohl(sdh->cum_tsn_ack);
+ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
+ SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
+ SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
+ return SCTP_DISPOSITION_DISCARD;
+ }
+
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
@@ -2624,6 +2640,7 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
+ __u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(ep, asoc, type, arg, commands);
@@ -2635,12 +2652,19 @@ sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(const struct sctp_endpoint *ep,
commands);
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
+ ctsn = ntohl(sdh->cum_tsn_ack);
+
+ if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
+ SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn);
+ SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point);
+ return SCTP_DISPOSITION_DISCARD;
+ }
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
- if (!TSN_lt(ntohl(sdh->cum_tsn_ack), asoc->next_tsn))
+ if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(ep, asoc, type, arg, commands);
/* verify, by checking the Cumulative TSN Ack field of the
@@ -2867,6 +2891,9 @@ sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep,
goto discard_force;
case SCTP_IERROR_NO_DATA:
goto consume;
+ case SCTP_IERROR_PROTO_VIOLATION:
+ return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+ (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
@@ -2977,6 +3004,9 @@ sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep,
break;
case SCTP_IERROR_NO_DATA:
goto consume;
+ case SCTP_IERROR_PROTO_VIOLATION:
+ return sctp_sf_abort_violation(ep, asoc, chunk, commands,
+ (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
@@ -3519,6 +3549,12 @@ sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep,
asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
if (!asconf_ack)
return SCTP_DISPOSITION_DISCARD;
+
+ /* Reset the transport so that we select the correct one
+ * this time around. This is to make sure that we don't
+ * accidentally use a stale transport that's been removed.
+ */
+ asconf_ack->transport = NULL;
} else {
/* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
* it must be either a stale packet or from an attacker.
@@ -4546,9 +4582,9 @@ sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep,
void *arg,
sctp_cmd_seq_t *commands)
{
- struct sctp_chunk *chunk = arg;
+ struct sctp_datamsg *msg = arg;
- sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
+ sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
return SCTP_DISPOSITION_CONSUME;
}
@@ -5847,6 +5883,9 @@ static int sctp_eat_data(const struct sctp_association *asoc,
__u32 tsn;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sock *sk = asoc->base.sk;
+ u16 ssn;
+ u16 sid;
+ u8 ordered = 0;
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
@@ -5986,8 +6025,10 @@ static int sctp_eat_data(const struct sctp_association *asoc,
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS);
- else
+ else {
SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS);
+ ordered = 1;
+ }
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
@@ -5997,7 +6038,8 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
- if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) {
+ sid = ntohs(data_hdr->stream);
+ if (sid >= asoc->c.sinit_max_instreams) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
@@ -6010,6 +6052,18 @@ static int sctp_eat_data(const struct sctp_association *asoc,
return SCTP_IERROR_BAD_STREAM;
}
+ /* Check to see if the SSN is possible for this TSN.
+ * The biggest gap we can record is 4K wide. Since SSNs wrap
+ * at an unsigned short, there is no way that an SSN can
+ * wrap and for a valid TSN. We can simply check if the current
+ * SSN is smaller then the next expected one. If it is, it wrapped
+ * and is invalid.
+ */
+ ssn = ntohs(data_hdr->ssn);
+ if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
+ return SCTP_IERROR_PROTO_VIOLATION;
+ }
+
/* Send the data up to the user. Note: Schedule the
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 971890dbfea..89af37a6c87 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1361,6 +1361,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout)
sctp_lock_sock(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
+ sk->sk_state = SCTP_SS_CLOSING;
ep = sctp_sk(sk)->ep;
@@ -1813,20 +1814,22 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_set_owner_w(chunk);
chunk->transport = chunk_tp;
-
- /* Send it to the lower layers. Note: all chunks
- * must either fail or succeed. The lower layer
- * works that way today. Keep it that way or this
- * breaks.
- */
- err = sctp_primitive_SEND(asoc, chunk);
- /* Did the lower layer accept the chunk? */
- if (err)
- sctp_chunk_free(chunk);
- SCTP_DEBUG_PRINTK("We sent primitively.\n");
}
- sctp_datamsg_put(datamsg);
+ /* Send it to the lower layers. Note: all chunks
+ * must either fail or succeed. The lower layer
+ * works that way today. Keep it that way or this
+ * breaks.
+ */
+ err = sctp_primitive_SEND(asoc, datamsg);
+ /* Did the lower layer accept the chunk? */
+ if (err)
+ sctp_datamsg_free(datamsg);
+ else
+ sctp_datamsg_put(datamsg);
+
+ SCTP_DEBUG_PRINTK("We sent primitively.\n");
+
if (err)
goto out_free;
else
@@ -2240,7 +2243,7 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
sctp_assoc_sync_pmtu(asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
- sctp_frag_point(sp, params->spp_pathmtu);
+ sctp_frag_point(asoc, params->spp_pathmtu);
} else {
sp->pathmtu = params->spp_pathmtu;
}
@@ -2877,15 +2880,10 @@ static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optl
val -= sizeof(struct sctphdr) +
sizeof(struct sctp_data_chunk);
}
-
- asoc->frag_point = val;
+ asoc->user_frag = val;
+ asoc->frag_point = sctp_frag_point(asoc, asoc->pathmtu);
} else {
sp->user_frag = val;
-
- /* Update the frag_point of the existing associations. */
- list_for_each_entry(asoc, &(sp->ep->asocs), asocs) {
- asoc->frag_point = sctp_frag_point(sp, asoc->pathmtu);
- }
}
return 0;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 63eabbc7129..ab7151da120 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -51,6 +51,7 @@ static int timer_max = 86400000; /* ms in one day */
static int int_max = INT_MAX;
static int sack_timer_min = 1;
static int sack_timer_max = 500;
+static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
extern int sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
@@ -272,6 +273,17 @@ static ctl_table sctp_table[] = {
.proc_handler = proc_dointvec,
.strategy = sysctl_intvec
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "addr_scope_policy",
+ .data = &sctp_scope_policy,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &addr_scope_max,
+ },
{ .ctl_name = 0 }
};
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index e5dde45c79d..c256e483931 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -503,6 +503,9 @@ void sctp_transport_lower_cwnd(struct sctp_transport *transport,
transport->ssthresh = max(transport->cwnd/2,
4*transport->asoc->pathmtu);
transport->cwnd = transport->asoc->pathmtu;
+
+ /* T3-rtx also clears fast recovery on the transport */
+ transport->fast_recovery = 0;
break;
case SCTP_LOWER_CWND_FAST_RTX:
diff --git a/net/socket.c b/net/socket.c
index 6d471655904..0ad02ae61a9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -285,7 +285,7 @@ static int init_inodecache(void)
return 0;
}
-static struct super_operations sockfs_ops = {
+static const struct super_operations sockfs_ops = {
.alloc_inode = sock_alloc_inode,
.destroy_inode =sock_destroy_inode,
.statfs = simple_statfs,
@@ -489,6 +489,7 @@ static struct socket *sock_alloc(void)
sock = SOCKET_I(inode);
+ kmemcheck_annotate_bitfield(sock, type);
inode->i_mode = S_IFSOCK | S_IRWXUGO;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7389804e3bb..a417d5ab5dd 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -956,6 +956,7 @@ static inline void
rpc_task_force_reencode(struct rpc_task *task)
{
task->tk_rqstp->rq_snd_buf.len = 0;
+ task->tk_rqstp->rq_bytes_sent = 0;
}
static inline void
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 7f676bdf70d..858a443f418 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -930,7 +930,7 @@ void rpc_remove_cache_dir(struct dentry *dentry)
/*
* populate the filesystem
*/
-static struct super_operations s_ops = {
+static const struct super_operations s_ops = {
.alloc_inode = rpc_alloc_inode,
.destroy_inode = rpc_destroy_inode,
.statfs = simple_statfs,
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a7a36779b9b..327011fcc40 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -119,7 +119,7 @@ int tipc_register_media(u32 media_type,
warn("Media <%s> rejected, no broadcast address\n", name);
goto exit;
}
- if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
+ if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
(bearer_priority > TIPC_MAX_LINK_PRI)) {
warn("Media <%s> rejected, illegal priority (%u)\n", name,
bearer_priority);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 3c57005e44d..7bda8e3d139 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -62,7 +62,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
rep_nlh = nlmsg_hdr(rep_buf);
memcpy(rep_nlh, req_nlh, hdr_space);
rep_nlh->nlmsg_len = rep_buf->len;
- genlmsg_unicast(rep_buf, NETLINK_CB(skb).pid);
+ genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid);
}
return 0;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 1848693ebb8..e8254e809b7 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1748,6 +1748,12 @@ static int getsockopt(struct socket *sock,
value = jiffies_to_msecs(sk->sk_rcvtimeo);
/* no need to set "res", since already 0 at this point */
break;
+ case TIPC_NODE_RECVQ_DEPTH:
+ value = (u32)atomic_read(&tipc_queue_size);
+ break;
+ case TIPC_SOCK_RECVQ_DEPTH:
+ value = skb_queue_len(&sk->sk_receive_queue);
+ break;
default:
res = -EINVAL;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index fc3ebb90691..51ab497115e 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1501,6 +1501,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct sk_buff *skb;
int sent = 0;
struct scm_cookie tmp_scm;
+ bool fds_sent = false;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
@@ -1562,12 +1563,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
size = min_t(int, size, skb_tailroom(skb));
memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
- if (siocb->scm->fp) {
+ /* Only send the fds in the first buffer */
+ if (siocb->scm->fp && !fds_sent) {
err = unix_attach_fds(siocb->scm, skb);
if (err) {
kfree_skb(skb);
goto out_err;
}
+ fds_sent = true;
}
err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 4428dd5e911..abf7ca3f9ff 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -1,6 +1,47 @@
config CFG80211
- tristate "Improved wireless configuration API"
+ tristate "cfg80211 - wireless configuration API"
depends on RFKILL || !RFKILL
+ ---help---
+ cfg80211 is the Linux wireless LAN (802.11) configuration API.
+ Enable this if you have a wireless device.
+
+ For more information refer to documentation on the wireless wiki:
+
+ http://wireless.kernel.org/en/developers/Documentation/cfg80211
+
+ When built as a module it will be called cfg80211.
+
+config NL80211_TESTMODE
+ bool "nl80211 testmode command"
+ depends on CFG80211
+ help
+ The nl80211 testmode command helps implementing things like
+ factory calibration or validation tools for wireless chips.
+
+ Select this option ONLY for kernels that are specifically
+ built for such purposes.
+
+ Debugging tools that are supposed to end up in the hands of
+ users should better be implemented with debugfs.
+
+ Say N.
+
+config CFG80211_DEVELOPER_WARNINGS
+ bool "enable developer warnings"
+ depends on CFG80211
+ default n
+ help
+ This option enables some additional warnings that help
+ cfg80211 developers and driver developers, but that can
+ trigger due to races with userspace.
+
+ For example, when a driver reports that it was disconnected
+ from the AP, but the user disconnects manually at the same
+ time, the warning might trigger spuriously due to races.
+
+ Say Y only if you are developing cfg80211 or a driver based
+ on it (or mac80211).
+
config CFG80211_REG_DEBUG
bool "cfg80211 regulatory debugging"
@@ -8,9 +49,29 @@ config CFG80211_REG_DEBUG
default n
---help---
You can enable this if you want to debug regulatory changes.
+ For more information on cfg80211 regulatory refer to the wireless
+ wiki:
+
+ http://wireless.kernel.org/en/developers/Regulatory
If unsure, say N.
+config CFG80211_DEFAULT_PS
+ bool "enable powersave by default"
+ depends on CFG80211
+ default y
+ help
+ This option enables powersave mode by default.
+
+ If this causes your applications to misbehave you should fix your
+ applications instead -- they need to register their network
+ latency requirement, see Documentation/power/pm_qos_interface.txt.
+
+config CFG80211_DEFAULT_PS_VALUE
+ int
+ default 1 if CFG80211_DEFAULT_PS
+ default 0
+
config CFG80211_DEBUGFS
bool "cfg80211 DebugFS entries"
depends on CFG80211 && DEBUG_FS
@@ -35,19 +96,13 @@ config WIRELESS_OLD_REGULATORY
config WIRELESS_EXT
bool "Wireless extensions"
- default n
+ default y
---help---
This option enables the legacy wireless extensions
(wireless network interface configuration via ioctls.)
- Wireless extensions will be replaced by cfg80211 and
- will be required only by legacy drivers that implement
- wireless extension handlers. This option does not
- affect the wireless-extension backward compatibility
- code in cfg80211.
-
- Say N (if you can) unless you know you need wireless
- extensions for external modules.
+ Say Y unless you've upgraded all your userspace to use
+ nl80211 instead of wireless extensions.
config WIRELESS_EXT_SYSFS
bool "Wireless extensions sysfs files"
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index f78c4832a9c..3ecaa917997 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -5,8 +5,9 @@ obj-$(CONFIG_LIB80211_CRYPT_WEP) += lib80211_crypt_wep.o
obj-$(CONFIG_LIB80211_CRYPT_CCMP) += lib80211_crypt_ccmp.o
obj-$(CONFIG_LIB80211_CRYPT_TKIP) += lib80211_crypt_tkip.o
-cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o mlme.o ibss.o
+cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
+cfg80211-y += mlme.o ibss.o sme.o chan.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
-cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o
+cfg80211-$(CONFIG_WIRELESS_EXT) += wext-compat.o wext-sme.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
new file mode 100644
index 00000000000..a46ac6c9b36
--- /dev/null
+++ b/net/wireless/chan.c
@@ -0,0 +1,89 @@
+/*
+ * This file contains helper code to handle channel
+ * settings and keeping track of what is possible at
+ * any point in time.
+ *
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ */
+
+#include <net/cfg80211.h>
+#include "core.h"
+
+struct ieee80211_channel *
+rdev_fixed_channel(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *for_wdev)
+{
+ struct wireless_dev *wdev;
+ struct ieee80211_channel *result = NULL;
+
+ WARN_ON(!mutex_is_locked(&rdev->devlist_mtx));
+
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ if (wdev == for_wdev)
+ continue;
+
+ /*
+ * Lock manually to tell lockdep about allowed
+ * nesting here if for_wdev->mtx is held already.
+ * This is ok as it's all under the rdev devlist
+ * mutex and as such can only be done once at any
+ * given time.
+ */
+ mutex_lock_nested(&wdev->mtx, SINGLE_DEPTH_NESTING);
+ if (wdev->current_bss)
+ result = wdev->current_bss->pub.channel;
+ wdev_unlock(wdev);
+
+ if (result)
+ break;
+ }
+
+ return result;
+}
+
+int rdev_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *for_wdev,
+ int freq, enum nl80211_channel_type channel_type)
+{
+ struct ieee80211_channel *chan;
+ struct ieee80211_sta_ht_cap *ht_cap;
+ int result;
+
+ if (rdev_fixed_channel(rdev, for_wdev))
+ return -EBUSY;
+
+ if (!rdev->ops->set_channel)
+ return -EOPNOTSUPP;
+
+ chan = ieee80211_get_channel(&rdev->wiphy, freq);
+
+ /* Primary channel not allowed */
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+ return -EINVAL;
+
+ if (channel_type == NL80211_CHAN_HT40MINUS &&
+ chan->flags & IEEE80211_CHAN_NO_HT40MINUS)
+ return -EINVAL;
+ else if (channel_type == NL80211_CHAN_HT40PLUS &&
+ chan->flags & IEEE80211_CHAN_NO_HT40PLUS)
+ return -EINVAL;
+
+ ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
+
+ if (channel_type != NL80211_CHAN_NO_HT) {
+ if (!ht_cap->ht_supported)
+ return -EINVAL;
+
+ if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
+ ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)
+ return -EINVAL;
+ }
+
+ result = rdev->ops->set_channel(&rdev->wiphy, chan, channel_type);
+ if (result)
+ return result;
+
+ rdev->channel = chan;
+
+ return 0;
+}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d5850292b3d..45b2be3274d 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -12,6 +12,7 @@
#include <linux/debugfs.h>
#include <linux/notifier.h>
#include <linux/device.h>
+#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
@@ -19,6 +20,7 @@
#include "core.h"
#include "sysfs.h"
#include "debugfs.h"
+#include "wext-compat.h"
/* name for sysfs, %d is appended */
#define PHY_NAME "phy"
@@ -30,12 +32,11 @@ MODULE_DESCRIPTION("wireless configuration support");
/* RCU might be appropriate here since we usually
* only read the list, and that can happen quite
* often because we need to do it for each command */
-LIST_HEAD(cfg80211_drv_list);
+LIST_HEAD(cfg80211_rdev_list);
+int cfg80211_rdev_list_generation;
/*
- * This is used to protect the cfg80211_drv_list, cfg80211_regdomain,
- * country_ie_regdomain, the reg_beacon_list and the the last regulatory
- * request receipt (last_request).
+ * This is used to protect the cfg80211_rdev_list
*/
DEFINE_MUTEX(cfg80211_mutex);
@@ -43,18 +44,18 @@ DEFINE_MUTEX(cfg80211_mutex);
static struct dentry *ieee80211_debugfs_dir;
/* requires cfg80211_mutex to be held! */
-struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx)
+struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
{
- struct cfg80211_registered_device *result = NULL, *drv;
+ struct cfg80211_registered_device *result = NULL, *rdev;
if (!wiphy_idx_valid(wiphy_idx))
return NULL;
assert_cfg80211_lock();
- list_for_each_entry(drv, &cfg80211_drv_list, list) {
- if (drv->wiphy_idx == wiphy_idx) {
- result = drv;
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (rdev->wiphy_idx == wiphy_idx) {
+ result = rdev;
break;
}
}
@@ -64,32 +65,32 @@ struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx)
int get_wiphy_idx(struct wiphy *wiphy)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
if (!wiphy)
return WIPHY_IDX_STALE;
- drv = wiphy_to_dev(wiphy);
- return drv->wiphy_idx;
+ rdev = wiphy_to_dev(wiphy);
+ return rdev->wiphy_idx;
}
-/* requires cfg80211_drv_mutex to be held! */
+/* requires cfg80211_rdev_mutex to be held! */
struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
if (!wiphy_idx_valid(wiphy_idx))
return NULL;
assert_cfg80211_lock();
- drv = cfg80211_drv_by_wiphy_idx(wiphy_idx);
- if (!drv)
+ rdev = cfg80211_rdev_by_wiphy_idx(wiphy_idx);
+ if (!rdev)
return NULL;
- return &drv->wiphy;
+ return &rdev->wiphy;
}
/* requires cfg80211_mutex to be held! */
struct cfg80211_registered_device *
-__cfg80211_drv_from_info(struct genl_info *info)
+__cfg80211_rdev_from_info(struct genl_info *info)
{
int ifindex;
struct cfg80211_registered_device *bywiphyidx = NULL, *byifidx = NULL;
@@ -99,14 +100,14 @@ __cfg80211_drv_from_info(struct genl_info *info)
assert_cfg80211_lock();
if (info->attrs[NL80211_ATTR_WIPHY]) {
- bywiphyidx = cfg80211_drv_by_wiphy_idx(
+ bywiphyidx = cfg80211_rdev_by_wiphy_idx(
nla_get_u32(info->attrs[NL80211_ATTR_WIPHY]));
err = -ENODEV;
}
if (info->attrs[NL80211_ATTR_IFINDEX]) {
ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]);
- dev = dev_get_by_index(&init_net, ifindex);
+ dev = dev_get_by_index(genl_info_net(info), ifindex);
if (dev) {
if (dev->ieee80211_ptr)
byifidx =
@@ -134,54 +135,48 @@ __cfg80211_drv_from_info(struct genl_info *info)
struct cfg80211_registered_device *
cfg80211_get_dev_from_info(struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
mutex_lock(&cfg80211_mutex);
- drv = __cfg80211_drv_from_info(info);
+ rdev = __cfg80211_rdev_from_info(info);
/* if it is not an error we grab the lock on
* it to assure it won't be going away while
* we operate on it */
- if (!IS_ERR(drv))
- mutex_lock(&drv->mtx);
+ if (!IS_ERR(rdev))
+ mutex_lock(&rdev->mtx);
mutex_unlock(&cfg80211_mutex);
- return drv;
+ return rdev;
}
struct cfg80211_registered_device *
-cfg80211_get_dev_from_ifindex(int ifindex)
+cfg80211_get_dev_from_ifindex(struct net *net, int ifindex)
{
- struct cfg80211_registered_device *drv = ERR_PTR(-ENODEV);
+ struct cfg80211_registered_device *rdev = ERR_PTR(-ENODEV);
struct net_device *dev;
mutex_lock(&cfg80211_mutex);
- dev = dev_get_by_index(&init_net, ifindex);
+ dev = dev_get_by_index(net, ifindex);
if (!dev)
goto out;
if (dev->ieee80211_ptr) {
- drv = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
- mutex_lock(&drv->mtx);
+ rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
+ mutex_lock(&rdev->mtx);
} else
- drv = ERR_PTR(-ENODEV);
+ rdev = ERR_PTR(-ENODEV);
dev_put(dev);
out:
mutex_unlock(&cfg80211_mutex);
- return drv;
-}
-
-void cfg80211_put_dev(struct cfg80211_registered_device *drv)
-{
- BUG_ON(IS_ERR(drv));
- mutex_unlock(&drv->mtx);
+ return rdev;
}
/* requires cfg80211_mutex to be held */
int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
char *newname)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev2;
int wiphy_idx, taken = -1, result, digits;
assert_cfg80211_lock();
@@ -207,8 +202,8 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
return 0;
/* Ensure another device does not already have this name. */
- list_for_each_entry(drv, &cfg80211_drv_list, list)
- if (strcmp(newname, dev_name(&drv->wiphy.dev)) == 0)
+ list_for_each_entry(rdev2, &cfg80211_rdev_list, list)
+ if (strcmp(newname, dev_name(&rdev2->wiphy.dev)) == 0)
return -EINVAL;
result = device_rename(&rdev->wiphy.dev, newname);
@@ -228,28 +223,64 @@ int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
return 0;
}
+int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
+ struct net *net)
+{
+ struct wireless_dev *wdev;
+ int err = 0;
+
+ if (!rdev->wiphy.netnsok)
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ err = dev_change_net_namespace(wdev->netdev, net, "wlan%d");
+ if (err)
+ break;
+ wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ }
+
+ if (err) {
+ /* failed -- clean up to old netns */
+ net = wiphy_net(&rdev->wiphy);
+
+ list_for_each_entry_continue_reverse(wdev, &rdev->netdev_list,
+ list) {
+ wdev->netdev->features &= ~NETIF_F_NETNS_LOCAL;
+ err = dev_change_net_namespace(wdev->netdev, net,
+ "wlan%d");
+ WARN_ON(err);
+ wdev->netdev->features |= NETIF_F_NETNS_LOCAL;
+ }
+ }
+
+ wiphy_net_set(&rdev->wiphy, net);
+
+ return err;
+}
+
static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
{
- struct cfg80211_registered_device *drv = data;
+ struct cfg80211_registered_device *rdev = data;
- drv->ops->rfkill_poll(&drv->wiphy);
+ rdev->ops->rfkill_poll(&rdev->wiphy);
}
static int cfg80211_rfkill_set_block(void *data, bool blocked)
{
- struct cfg80211_registered_device *drv = data;
+ struct cfg80211_registered_device *rdev = data;
struct wireless_dev *wdev;
if (!blocked)
return 0;
rtnl_lock();
- mutex_lock(&drv->devlist_mtx);
+ mutex_lock(&rdev->devlist_mtx);
- list_for_each_entry(wdev, &drv->netdev_list, list)
+ list_for_each_entry(wdev, &rdev->netdev_list, list)
dev_close(wdev->netdev);
- mutex_unlock(&drv->devlist_mtx);
+ mutex_unlock(&rdev->devlist_mtx);
rtnl_unlock();
return 0;
@@ -257,10 +288,25 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
static void cfg80211_rfkill_sync_work(struct work_struct *work)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
- drv = container_of(work, struct cfg80211_registered_device, rfkill_sync);
- cfg80211_rfkill_set_block(drv, rfkill_blocked(drv->rfkill));
+ rdev = container_of(work, struct cfg80211_registered_device, rfkill_sync);
+ cfg80211_rfkill_set_block(rdev, rfkill_blocked(rdev->rfkill));
+}
+
+static void cfg80211_event_work(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(work, struct cfg80211_registered_device,
+ event_work);
+
+ rtnl_lock();
+ cfg80211_lock_rdev(rdev);
+
+ cfg80211_process_rdev_events(rdev);
+ cfg80211_unlock_rdev(rdev);
+ rtnl_unlock();
}
/* exported functions */
@@ -269,76 +315,90 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)
{
static int wiphy_counter;
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int alloc_size;
- WARN_ON(!ops->add_key && ops->del_key);
- WARN_ON(ops->add_key && !ops->del_key);
+ WARN_ON(ops->add_key && (!ops->del_key || !ops->set_default_key));
+ WARN_ON(ops->auth && (!ops->assoc || !ops->deauth || !ops->disassoc));
+ WARN_ON(ops->connect && !ops->disconnect);
+ WARN_ON(ops->join_ibss && !ops->leave_ibss);
+ WARN_ON(ops->add_virtual_intf && !ops->del_virtual_intf);
+ WARN_ON(ops->add_station && !ops->del_station);
+ WARN_ON(ops->add_mpath && !ops->del_mpath);
- alloc_size = sizeof(*drv) + sizeof_priv;
+ alloc_size = sizeof(*rdev) + sizeof_priv;
- drv = kzalloc(alloc_size, GFP_KERNEL);
- if (!drv)
+ rdev = kzalloc(alloc_size, GFP_KERNEL);
+ if (!rdev)
return NULL;
- drv->ops = ops;
+ rdev->ops = ops;
mutex_lock(&cfg80211_mutex);
- drv->wiphy_idx = wiphy_counter++;
+ rdev->wiphy_idx = wiphy_counter++;
- if (unlikely(!wiphy_idx_valid(drv->wiphy_idx))) {
+ if (unlikely(!wiphy_idx_valid(rdev->wiphy_idx))) {
wiphy_counter--;
mutex_unlock(&cfg80211_mutex);
/* ugh, wrapped! */
- kfree(drv);
+ kfree(rdev);
return NULL;
}
mutex_unlock(&cfg80211_mutex);
/* give it a proper name */
- dev_set_name(&drv->wiphy.dev, PHY_NAME "%d", drv->wiphy_idx);
+ dev_set_name(&rdev->wiphy.dev, PHY_NAME "%d", rdev->wiphy_idx);
- mutex_init(&drv->mtx);
- mutex_init(&drv->devlist_mtx);
- INIT_LIST_HEAD(&drv->netdev_list);
- spin_lock_init(&drv->bss_lock);
- INIT_LIST_HEAD(&drv->bss_list);
+ mutex_init(&rdev->mtx);
+ mutex_init(&rdev->devlist_mtx);
+ INIT_LIST_HEAD(&rdev->netdev_list);
+ spin_lock_init(&rdev->bss_lock);
+ INIT_LIST_HEAD(&rdev->bss_list);
+ INIT_WORK(&rdev->scan_done_wk, __cfg80211_scan_done);
- device_initialize(&drv->wiphy.dev);
- drv->wiphy.dev.class = &ieee80211_class;
- drv->wiphy.dev.platform_data = drv;
+ device_initialize(&rdev->wiphy.dev);
+ rdev->wiphy.dev.class = &ieee80211_class;
+ rdev->wiphy.dev.platform_data = rdev;
- drv->rfkill_ops.set_block = cfg80211_rfkill_set_block;
- drv->rfkill = rfkill_alloc(dev_name(&drv->wiphy.dev),
- &drv->wiphy.dev, RFKILL_TYPE_WLAN,
- &drv->rfkill_ops, drv);
+ rdev->wiphy.ps_default = CONFIG_CFG80211_DEFAULT_PS_VALUE;
- if (!drv->rfkill) {
- kfree(drv);
+ wiphy_net_set(&rdev->wiphy, &init_net);
+
+ rdev->rfkill_ops.set_block = cfg80211_rfkill_set_block;
+ rdev->rfkill = rfkill_alloc(dev_name(&rdev->wiphy.dev),
+ &rdev->wiphy.dev, RFKILL_TYPE_WLAN,
+ &rdev->rfkill_ops, rdev);
+
+ if (!rdev->rfkill) {
+ kfree(rdev);
return NULL;
}
- INIT_WORK(&drv->rfkill_sync, cfg80211_rfkill_sync_work);
+ INIT_WORK(&rdev->rfkill_sync, cfg80211_rfkill_sync_work);
+ INIT_WORK(&rdev->conn_work, cfg80211_conn_work);
+ INIT_WORK(&rdev->event_work, cfg80211_event_work);
+
+ init_waitqueue_head(&rdev->dev_wait);
/*
* Initialize wiphy parameters to IEEE 802.11 MIB default values.
* Fragmentation and RTS threshold are disabled by default with the
* special -1 value.
*/
- drv->wiphy.retry_short = 7;
- drv->wiphy.retry_long = 4;
- drv->wiphy.frag_threshold = (u32) -1;
- drv->wiphy.rts_threshold = (u32) -1;
+ rdev->wiphy.retry_short = 7;
+ rdev->wiphy.retry_long = 4;
+ rdev->wiphy.frag_threshold = (u32) -1;
+ rdev->wiphy.rts_threshold = (u32) -1;
- return &drv->wiphy;
+ return &rdev->wiphy;
}
EXPORT_SYMBOL(wiphy_new);
int wiphy_register(struct wiphy *wiphy)
{
- struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
int res;
enum ieee80211_band band;
struct ieee80211_supported_band *sband;
@@ -346,9 +406,6 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
- if (WARN_ON(wiphy->max_scan_ssids < 1))
- return -EINVAL;
-
/* sanity check ifmodes */
WARN_ON(!ifmodes);
ifmodes &= ((1 << __NL80211_IFTYPE_AFTER_LAST) - 1) & ~1;
@@ -395,11 +452,11 @@ int wiphy_register(struct wiphy *wiphy)
/* check and set up bitrates */
ieee80211_set_bitrate_flags(wiphy);
- res = device_add(&drv->wiphy.dev);
+ res = device_add(&rdev->wiphy.dev);
if (res)
return res;
- res = rfkill_register(drv->rfkill);
+ res = rfkill_register(rdev->rfkill);
if (res)
goto out_rm_dev;
@@ -408,16 +465,17 @@ int wiphy_register(struct wiphy *wiphy)
/* set up regulatory info */
wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
- list_add(&drv->list, &cfg80211_drv_list);
+ list_add(&rdev->list, &cfg80211_rdev_list);
+ cfg80211_rdev_list_generation++;
mutex_unlock(&cfg80211_mutex);
/* add to debugfs */
- drv->wiphy.debugfsdir =
- debugfs_create_dir(wiphy_name(&drv->wiphy),
+ rdev->wiphy.debugfsdir =
+ debugfs_create_dir(wiphy_name(&rdev->wiphy),
ieee80211_debugfs_dir);
- if (IS_ERR(drv->wiphy.debugfsdir))
- drv->wiphy.debugfsdir = NULL;
+ if (IS_ERR(rdev->wiphy.debugfsdir))
+ rdev->wiphy.debugfsdir = NULL;
if (wiphy->custom_regulatory) {
struct regulatory_request request;
@@ -430,83 +488,101 @@ int wiphy_register(struct wiphy *wiphy)
nl80211_send_reg_change_event(&request);
}
- cfg80211_debugfs_drv_add(drv);
+ cfg80211_debugfs_rdev_add(rdev);
return 0;
out_rm_dev:
- device_del(&drv->wiphy.dev);
+ device_del(&rdev->wiphy.dev);
return res;
}
EXPORT_SYMBOL(wiphy_register);
void wiphy_rfkill_start_polling(struct wiphy *wiphy)
{
- struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- if (!drv->ops->rfkill_poll)
+ if (!rdev->ops->rfkill_poll)
return;
- drv->rfkill_ops.poll = cfg80211_rfkill_poll;
- rfkill_resume_polling(drv->rfkill);
+ rdev->rfkill_ops.poll = cfg80211_rfkill_poll;
+ rfkill_resume_polling(rdev->rfkill);
}
EXPORT_SYMBOL(wiphy_rfkill_start_polling);
void wiphy_rfkill_stop_polling(struct wiphy *wiphy)
{
- struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- rfkill_pause_polling(drv->rfkill);
+ rfkill_pause_polling(rdev->rfkill);
}
EXPORT_SYMBOL(wiphy_rfkill_stop_polling);
void wiphy_unregister(struct wiphy *wiphy)
{
- struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- rfkill_unregister(drv->rfkill);
+ rfkill_unregister(rdev->rfkill);
/* protect the device list */
mutex_lock(&cfg80211_mutex);
- BUG_ON(!list_empty(&drv->netdev_list));
+ wait_event(rdev->dev_wait, ({
+ int __count;
+ mutex_lock(&rdev->devlist_mtx);
+ __count = rdev->opencount;
+ mutex_unlock(&rdev->devlist_mtx);
+ __count == 0;}));
+
+ mutex_lock(&rdev->devlist_mtx);
+ BUG_ON(!list_empty(&rdev->netdev_list));
+ mutex_unlock(&rdev->devlist_mtx);
+
+ /*
+ * First remove the hardware from everywhere, this makes
+ * it impossible to find from userspace.
+ */
+ cfg80211_debugfs_rdev_del(rdev);
+ list_del(&rdev->list);
/*
- * Try to grab drv->mtx. If a command is still in progress,
+ * Try to grab rdev->mtx. If a command is still in progress,
* hopefully the driver will refuse it since it's tearing
* down the device already. We wait for this command to complete
* before unlinking the item from the list.
* Note: as codified by the BUG_ON above we cannot get here if
- * a virtual interface is still associated. Hence, we can only
- * get to lock contention here if userspace issues a command
- * that identified the hardware by wiphy index.
+ * a virtual interface is still present. Hence, we can only get
+ * to lock contention here if userspace issues a command that
+ * identified the hardware by wiphy index.
*/
- mutex_lock(&drv->mtx);
- /* unlock again before freeing */
- mutex_unlock(&drv->mtx);
-
- cfg80211_debugfs_drv_del(drv);
+ cfg80211_lock_rdev(rdev);
+ /* nothing */
+ cfg80211_unlock_rdev(rdev);
/* If this device got a regulatory hint tell core its
* free to listen now to a new shiny device regulatory hint */
reg_device_remove(wiphy);
- list_del(&drv->list);
- device_del(&drv->wiphy.dev);
- debugfs_remove(drv->wiphy.debugfsdir);
+ cfg80211_rdev_list_generation++;
+ device_del(&rdev->wiphy.dev);
+ debugfs_remove(rdev->wiphy.debugfsdir);
mutex_unlock(&cfg80211_mutex);
+
+ flush_work(&rdev->scan_done_wk);
+ cancel_work_sync(&rdev->conn_work);
+ flush_work(&rdev->event_work);
}
EXPORT_SYMBOL(wiphy_unregister);
-void cfg80211_dev_free(struct cfg80211_registered_device *drv)
+void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
{
struct cfg80211_internal_bss *scan, *tmp;
- rfkill_destroy(drv->rfkill);
- mutex_destroy(&drv->mtx);
- mutex_destroy(&drv->devlist_mtx);
- list_for_each_entry_safe(scan, tmp, &drv->bss_list, list)
+ rfkill_destroy(rdev->rfkill);
+ mutex_destroy(&rdev->mtx);
+ mutex_destroy(&rdev->devlist_mtx);
+ list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
cfg80211_put_bss(&scan->pub);
- kfree(drv);
+ kfree(rdev);
}
void wiphy_free(struct wiphy *wiphy)
@@ -517,68 +593,181 @@ EXPORT_SYMBOL(wiphy_free);
void wiphy_rfkill_set_hw_state(struct wiphy *wiphy, bool blocked)
{
- struct cfg80211_registered_device *drv = wiphy_to_dev(wiphy);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- if (rfkill_set_hw_state(drv->rfkill, blocked))
- schedule_work(&drv->rfkill_sync);
+ if (rfkill_set_hw_state(rdev->rfkill, blocked))
+ schedule_work(&rdev->rfkill_sync);
}
EXPORT_SYMBOL(wiphy_rfkill_set_hw_state);
+static void wdev_cleanup_work(struct work_struct *work)
+{
+ struct wireless_dev *wdev;
+ struct cfg80211_registered_device *rdev;
+
+ wdev = container_of(work, struct wireless_dev, cleanup_work);
+ rdev = wiphy_to_dev(wdev->wiphy);
+
+ cfg80211_lock_rdev(rdev);
+
+ if (WARN_ON(rdev->scan_req && rdev->scan_req->dev == wdev->netdev)) {
+ rdev->scan_req->aborted = true;
+ ___cfg80211_scan_done(rdev, true);
+ }
+
+ cfg80211_unlock_rdev(rdev);
+
+ mutex_lock(&rdev->devlist_mtx);
+ rdev->opencount--;
+ mutex_unlock(&rdev->devlist_mtx);
+ wake_up(&rdev->dev_wait);
+
+ dev_put(wdev->netdev);
+}
+
static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
unsigned long state,
void *ndev)
{
struct net_device *dev = ndev;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev;
- if (!dev->ieee80211_ptr)
+ if (!wdev)
return NOTIFY_DONE;
- rdev = wiphy_to_dev(dev->ieee80211_ptr->wiphy);
+ rdev = wiphy_to_dev(wdev->wiphy);
- WARN_ON(dev->ieee80211_ptr->iftype == NL80211_IFTYPE_UNSPECIFIED);
+ WARN_ON(wdev->iftype == NL80211_IFTYPE_UNSPECIFIED);
switch (state) {
case NETDEV_REGISTER:
+ /*
+ * NB: cannot take rdev->mtx here because this may be
+ * called within code protected by it when interfaces
+ * are added with nl80211.
+ */
+ mutex_init(&wdev->mtx);
+ INIT_WORK(&wdev->cleanup_work, wdev_cleanup_work);
+ INIT_LIST_HEAD(&wdev->event_list);
+ spin_lock_init(&wdev->event_lock);
mutex_lock(&rdev->devlist_mtx);
- list_add(&dev->ieee80211_ptr->list, &rdev->netdev_list);
+ list_add(&wdev->list, &rdev->netdev_list);
+ rdev->devlist_generation++;
+ /* can only change netns with wiphy */
+ dev->features |= NETIF_F_NETNS_LOCAL;
+
if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
"phy80211")) {
printk(KERN_ERR "wireless: failed to add phy80211 "
"symlink to netdev!\n");
}
- dev->ieee80211_ptr->netdev = dev;
+ wdev->netdev = dev;
+ wdev->sme_state = CFG80211_SME_IDLE;
+ mutex_unlock(&rdev->devlist_mtx);
#ifdef CONFIG_WIRELESS_EXT
- dev->ieee80211_ptr->wext.default_key = -1;
- dev->ieee80211_ptr->wext.default_mgmt_key = -1;
+ if (!dev->wireless_handlers)
+ dev->wireless_handlers = &cfg80211_wext_handler;
+ wdev->wext.default_key = -1;
+ wdev->wext.default_mgmt_key = -1;
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+ wdev->wext.ps = wdev->wiphy->ps_default;
+ wdev->wext.ps_timeout = 100;
+ if (rdev->ops->set_power_mgmt)
+ if (rdev->ops->set_power_mgmt(wdev->wiphy, dev,
+ wdev->wext.ps,
+ wdev->wext.ps_timeout)) {
+ /* assume this means it's off */
+ wdev->wext.ps = false;
+ }
#endif
- mutex_unlock(&rdev->devlist_mtx);
break;
case NETDEV_GOING_DOWN:
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC)
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ cfg80211_leave_ibss(rdev, dev, true);
break;
- if (!dev->ieee80211_ptr->ssid_len)
+ case NL80211_IFTYPE_STATION:
+ wdev_lock(wdev);
+#ifdef CONFIG_WIRELESS_EXT
+ kfree(wdev->wext.ie);
+ wdev->wext.ie = NULL;
+ wdev->wext.ie_len = 0;
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+#endif
+ __cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, true);
+ cfg80211_mlme_down(rdev, dev);
+ wdev_unlock(wdev);
+ break;
+ default:
break;
- cfg80211_leave_ibss(rdev, dev, true);
+ }
+ break;
+ case NETDEV_DOWN:
+ dev_hold(dev);
+ schedule_work(&wdev->cleanup_work);
break;
case NETDEV_UP:
+ /*
+ * If we have a really quick DOWN/UP succession we may
+ * have this work still pending ... cancel it and see
+ * if it was pending, in which case we need to account
+ * for some of the work it would have done.
+ */
+ if (cancel_work_sync(&wdev->cleanup_work)) {
+ mutex_lock(&rdev->devlist_mtx);
+ rdev->opencount--;
+ mutex_unlock(&rdev->devlist_mtx);
+ dev_put(dev);
+ }
#ifdef CONFIG_WIRELESS_EXT
- if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC)
+ cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ cfg80211_ibss_wext_join(rdev, wdev);
break;
- if (!dev->ieee80211_ptr->wext.ibss.ssid_len)
+ case NL80211_IFTYPE_STATION:
+ cfg80211_mgd_wext_connect(rdev, wdev);
break;
- cfg80211_join_ibss(rdev, dev, &dev->ieee80211_ptr->wext.ibss);
- break;
+ default:
+ break;
+ }
+ wdev_unlock(wdev);
+ rdev->opencount++;
+ mutex_unlock(&rdev->devlist_mtx);
+ cfg80211_unlock_rdev(rdev);
#endif
+ break;
case NETDEV_UNREGISTER:
+ /*
+ * NB: cannot take rdev->mtx here because this may be
+ * called within code protected by it when interfaces
+ * are removed with nl80211.
+ */
mutex_lock(&rdev->devlist_mtx);
- if (!list_empty(&dev->ieee80211_ptr->list)) {
+ /*
+ * It is possible to get NETDEV_UNREGISTER
+ * multiple times. To detect that, check
+ * that the interface is still on the list
+ * of registered interfaces, and only then
+ * remove and clean it up.
+ */
+ if (!list_empty(&wdev->list)) {
sysfs_remove_link(&dev->dev.kobj, "phy80211");
- list_del_init(&dev->ieee80211_ptr->list);
+ list_del_init(&wdev->list);
+ rdev->devlist_generation++;
+#ifdef CONFIG_WIRELESS_EXT
+ kfree(wdev->wext.keys);
+#endif
}
mutex_unlock(&rdev->devlist_mtx);
break;
case NETDEV_PRE_UP:
+ if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
+ return notifier_from_errno(-EOPNOTSUPP);
if (rfkill_blocked(rdev->rfkill))
return notifier_from_errno(-ERFKILL);
break;
@@ -591,10 +780,32 @@ static struct notifier_block cfg80211_netdev_notifier = {
.notifier_call = cfg80211_netdev_notifier_call,
};
-static int cfg80211_init(void)
+static void __net_exit cfg80211_pernet_exit(struct net *net)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rtnl_lock();
+ mutex_lock(&cfg80211_mutex);
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (net_eq(wiphy_net(&rdev->wiphy), net))
+ WARN_ON(cfg80211_switch_netns(rdev, &init_net));
+ }
+ mutex_unlock(&cfg80211_mutex);
+ rtnl_unlock();
+}
+
+static struct pernet_operations cfg80211_pernet_ops = {
+ .exit = cfg80211_pernet_exit,
+};
+
+static int __init cfg80211_init(void)
{
int err;
+ err = register_pernet_device(&cfg80211_pernet_ops);
+ if (err)
+ goto out_fail_pernet;
+
err = wiphy_sysfs_init();
if (err)
goto out_fail_sysfs;
@@ -622,9 +833,10 @@ out_fail_nl80211:
out_fail_notifier:
wiphy_sysfs_exit();
out_fail_sysfs:
+ unregister_pernet_device(&cfg80211_pernet_ops);
+out_fail_pernet:
return err;
}
-
subsys_initcall(cfg80211_init);
static void cfg80211_exit(void)
@@ -634,5 +846,6 @@ static void cfg80211_exit(void)
unregister_netdevice_notifier(&cfg80211_netdev_notifier);
wiphy_sysfs_exit();
regulatory_exit();
+ unregister_pernet_device(&cfg80211_pernet_ops);
}
module_exit(cfg80211_exit);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index bfa340c7abb..2a33d8bc886 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -49,6 +49,9 @@ struct cfg80211_registered_device {
/* associate netdev list */
struct mutex devlist_mtx;
struct list_head netdev_list;
+ int devlist_generation;
+ int opencount; /* also protected by devlist_mtx */
+ wait_queue_head_t dev_wait;
/* BSSes/scanning */
spinlock_t bss_lock;
@@ -57,6 +60,17 @@ struct cfg80211_registered_device {
u32 bss_generation;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
unsigned long suspend_at;
+ struct work_struct scan_done_wk;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct genl_info *testmode_info;
+#endif
+
+ struct work_struct conn_work;
+ struct work_struct event_work;
+
+ /* current channel */
+ struct ieee80211_channel *channel;
#ifdef CONFIG_CFG80211_DEBUGFS
/* Debugfs entries */
@@ -89,13 +103,14 @@ bool wiphy_idx_valid(int wiphy_idx)
}
extern struct mutex cfg80211_mutex;
-extern struct list_head cfg80211_drv_list;
+extern struct list_head cfg80211_rdev_list;
+extern int cfg80211_rdev_list_generation;
#define assert_cfg80211_lock() WARN_ON(!mutex_is_locked(&cfg80211_mutex))
/*
* You can use this to mark a wiphy_idx as not having an associated wiphy.
- * It guarantees cfg80211_drv_by_wiphy_idx(wiphy_idx) will return NULL
+ * It guarantees cfg80211_rdev_by_wiphy_idx(wiphy_idx) will return NULL
*/
#define WIPHY_IDX_STALE -1
@@ -104,17 +119,40 @@ struct cfg80211_internal_bss {
struct rb_node rbn;
unsigned long ts;
struct kref ref;
- bool hold, ies_allocated;
+ atomic_t hold;
+ bool ies_allocated;
/* must be last because of priv member */
struct cfg80211_bss pub;
};
-struct cfg80211_registered_device *cfg80211_drv_by_wiphy_idx(int wiphy_idx);
+static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pub)
+{
+ return container_of(pub, struct cfg80211_internal_bss, pub);
+}
+
+static inline void cfg80211_ref_bss(struct cfg80211_internal_bss *bss)
+{
+ kref_get(&bss->ref);
+}
+
+static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
+{
+ atomic_inc(&bss->hold);
+}
+
+static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
+{
+ int r = atomic_dec_return(&bss->hold);
+ WARN_ON(r < 0);
+}
+
+
+struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx);
int get_wiphy_idx(struct wiphy *wiphy);
struct cfg80211_registered_device *
-__cfg80211_drv_from_info(struct genl_info *info);
+__cfg80211_rdev_from_info(struct genl_info *info);
/*
* This function returns a pointer to the driver
@@ -122,12 +160,12 @@ __cfg80211_drv_from_info(struct genl_info *info);
* If successful, it returns non-NULL and also locks
* the driver's mutex!
*
- * This means that you need to call cfg80211_put_dev()
+ * This means that you need to call cfg80211_unlock_rdev()
* before being allowed to acquire &cfg80211_mutex!
*
* This is necessary because we need to lock the global
* mutex to get an item off the list safely, and then
- * we lock the drv mutex so it doesn't go away under us.
+ * we lock the rdev mutex so it doesn't go away under us.
*
* We don't want to keep cfg80211_mutex locked
* for all the time in order to allow requests on
@@ -139,19 +177,93 @@ __cfg80211_drv_from_info(struct genl_info *info);
extern struct cfg80211_registered_device *
cfg80211_get_dev_from_info(struct genl_info *info);
-/* requires cfg80211_drv_mutex to be held! */
+/* requires cfg80211_rdev_mutex to be held! */
struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx);
/* identical to cfg80211_get_dev_from_info but only operate on ifindex */
extern struct cfg80211_registered_device *
-cfg80211_get_dev_from_ifindex(int ifindex);
+cfg80211_get_dev_from_ifindex(struct net *net, int ifindex);
+
+int cfg80211_switch_netns(struct cfg80211_registered_device *rdev,
+ struct net *net);
+
+static inline void cfg80211_lock_rdev(struct cfg80211_registered_device *rdev)
+{
+ mutex_lock(&rdev->mtx);
+}
+
+static inline void cfg80211_unlock_rdev(struct cfg80211_registered_device *rdev)
+{
+ BUG_ON(IS_ERR(rdev) || !rdev);
+ mutex_unlock(&rdev->mtx);
+}
+
+static inline void wdev_lock(struct wireless_dev *wdev)
+ __acquires(wdev)
+{
+ mutex_lock(&wdev->mtx);
+ __acquire(wdev->mtx);
+}
+
+static inline void wdev_unlock(struct wireless_dev *wdev)
+ __releases(wdev)
+{
+ __release(wdev->mtx);
+ mutex_unlock(&wdev->mtx);
+}
+
+#define ASSERT_RDEV_LOCK(rdev) WARN_ON(!mutex_is_locked(&(rdev)->mtx));
+#define ASSERT_WDEV_LOCK(wdev) WARN_ON(!mutex_is_locked(&(wdev)->mtx));
+
+enum cfg80211_event_type {
+ EVENT_CONNECT_RESULT,
+ EVENT_ROAMED,
+ EVENT_DISCONNECTED,
+ EVENT_IBSS_JOINED,
+};
+
+struct cfg80211_event {
+ struct list_head list;
+ enum cfg80211_event_type type;
+
+ union {
+ struct {
+ u8 bssid[ETH_ALEN];
+ const u8 *req_ie;
+ const u8 *resp_ie;
+ size_t req_ie_len;
+ size_t resp_ie_len;
+ u16 status;
+ } cr;
+ struct {
+ u8 bssid[ETH_ALEN];
+ const u8 *req_ie;
+ const u8 *resp_ie;
+ size_t req_ie_len;
+ size_t resp_ie_len;
+ } rm;
+ struct {
+ const u8 *ie;
+ size_t ie_len;
+ u16 reason;
+ } dc;
+ struct {
+ u8 bssid[ETH_ALEN];
+ } ij;
+ };
+};
+
+struct cfg80211_cached_keys {
+ struct key_params params[6];
+ u8 data[6][WLAN_MAX_KEY_LEN];
+ int def, defmgmt;
+};
-extern void cfg80211_put_dev(struct cfg80211_registered_device *drv);
/* free object */
-extern void cfg80211_dev_free(struct cfg80211_registered_device *drv);
+extern void cfg80211_dev_free(struct cfg80211_registered_device *rdev);
-extern int cfg80211_dev_rename(struct cfg80211_registered_device *drv,
+extern int cfg80211_dev_rename(struct cfg80211_registered_device *rdev,
char *newname);
void ieee80211_set_bitrate_flags(struct wiphy *wiphy);
@@ -163,15 +275,124 @@ void cfg80211_bss_age(struct cfg80211_registered_device *dev,
unsigned long age_secs);
/* IBSS */
+int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_cached_keys *connkeys);
int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev,
- struct cfg80211_ibss_params *params);
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_cached_keys *connkeys);
void cfg80211_clear_ibss(struct net_device *dev, bool nowext);
int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
+int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
+/* MLME */
+int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_auth_type auth_type,
+ const u8 *bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len,
+ const u8 *key, int key_len, int key_idx);
+int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, struct ieee80211_channel *chan,
+ enum nl80211_auth_type auth_type, const u8 *bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len,
+ const u8 *key, int key_len, int key_idx);
+int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ const u8 *bssid, const u8 *prev_bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len, bool use_mfp,
+ struct cfg80211_crypto_settings *crypt);
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, struct ieee80211_channel *chan,
+ const u8 *bssid, const u8 *prev_bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len, bool use_mfp,
+ struct cfg80211_crypto_settings *crypt);
+int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason);
+int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason);
+int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason);
+void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
+ struct net_device *dev);
+void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, bool wextev,
+ struct cfg80211_bss *bss);
+
+/* SME */
+int __cfg80211_connect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_connect_params *connect,
+ struct cfg80211_cached_keys *connkeys,
+ const u8 *prev_bssid);
+int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_connect_params *connect,
+ struct cfg80211_cached_keys *connkeys);
+int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, u16 reason,
+ bool wextev);
+int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, u16 reason,
+ bool wextev);
+void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len);
+int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev);
+
+void cfg80211_conn_work(struct work_struct *work);
+bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev);
/* internal helpers */
-int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
+int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
+ struct key_params *params, int key_idx,
const u8 *mac_addr);
+void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
+ size_t ie_len, u16 reason, bool from_ap);
+void cfg80211_sme_scan_done(struct net_device *dev);
+void cfg80211_sme_rx_auth(struct net_device *dev, const u8 *buf, size_t len);
+void cfg80211_sme_disassoc(struct net_device *dev, int idx);
+void __cfg80211_scan_done(struct work_struct *wk);
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak);
+void cfg80211_upload_connect_keys(struct wireless_dev *wdev);
+int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ u32 *flags, struct vif_params *params);
+void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+
+struct ieee80211_channel *
+rdev_fixed_channel(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *for_wdev);
+int rdev_set_freq(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *for_wdev,
+ int freq, enum nl80211_channel_type channel_type);
+
+#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
+#define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond)
+#else
+/*
+ * Trick to enable using it as a condition,
+ * and also not give a warning when it's
+ * not used that way.
+ */
+#define CFG80211_DEV_WARN_ON(cond) ({bool __r = (cond); __r; })
+#endif
#endif /* __NET_WIRELESS_CORE_H */
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index 679ddfcec1e..13d93d84f90 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -104,15 +104,15 @@ static const struct file_operations ht40allow_map_ops = {
};
#define DEBUGFS_ADD(name) \
- drv->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \
- &drv->wiphy, &name## _ops);
+ rdev->debugfs.name = debugfs_create_file(#name, S_IRUGO, phyd, \
+ &rdev->wiphy, &name## _ops);
#define DEBUGFS_DEL(name) \
- debugfs_remove(drv->debugfs.name); \
- drv->debugfs.name = NULL;
+ debugfs_remove(rdev->debugfs.name); \
+ rdev->debugfs.name = NULL;
-void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv)
+void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev)
{
- struct dentry *phyd = drv->wiphy.debugfsdir;
+ struct dentry *phyd = rdev->wiphy.debugfsdir;
DEBUGFS_ADD(rts_threshold);
DEBUGFS_ADD(fragmentation_threshold);
@@ -121,7 +121,7 @@ void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv)
DEBUGFS_ADD(ht40allow_map);
}
-void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv)
+void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev)
{
DEBUGFS_DEL(rts_threshold);
DEBUGFS_DEL(fragmentation_threshold);
diff --git a/net/wireless/debugfs.h b/net/wireless/debugfs.h
index c226983ae66..6419b6d6ce3 100644
--- a/net/wireless/debugfs.h
+++ b/net/wireless/debugfs.h
@@ -2,13 +2,13 @@
#define __CFG80211_DEBUGFS_H
#ifdef CONFIG_CFG80211_DEBUGFS
-void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv);
-void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv);
+void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev);
+void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev);
#else
static inline
-void cfg80211_debugfs_drv_add(struct cfg80211_registered_device *drv) {}
+void cfg80211_debugfs_rdev_add(struct cfg80211_registered_device *rdev) {}
static inline
-void cfg80211_debugfs_drv_del(struct cfg80211_registered_device *drv) {}
+void cfg80211_debugfs_rdev_del(struct cfg80211_registered_device *rdev) {}
#endif
#endif /* __CFG80211_DEBUGFS_H */
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index a4a1c3498ff..c8833891197 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -7,10 +7,11 @@
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <net/cfg80211.h>
+#include "wext-compat.h"
#include "nl80211.h"
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_bss *bss;
@@ -21,10 +22,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return;
- if (WARN_ON(!wdev->ssid_len))
- return;
-
- if (memcmp(bssid, wdev->bssid, ETH_ALEN) == 0)
+ if (!wdev->ssid_len)
return;
bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
@@ -36,39 +34,76 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
}
- cfg80211_hold_bss(bss);
- wdev->current_bss = bss;
- memcpy(wdev->bssid, bssid, ETH_ALEN);
+ cfg80211_hold_bss(bss_from_pub(bss));
+ wdev->current_bss = bss_from_pub(bss);
+
+ cfg80211_upload_connect_keys(wdev);
- nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, gfp);
+ nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
+ GFP_KERNEL);
#ifdef CONFIG_WIRELESS_EXT
memset(&wrqu, 0, sizeof(wrqu));
memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
#endif
}
+
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_event *ev;
+ unsigned long flags;
+
+ CFG80211_DEV_WARN_ON(!wdev->ssid_len);
+
+ ev = kzalloc(sizeof(*ev), gfp);
+ if (!ev)
+ return;
+
+ ev->type = EVENT_IBSS_JOINED;
+ memcpy(ev->cr.bssid, bssid, ETH_ALEN);
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ list_add_tail(&ev->list, &wdev->event_list);
+ spin_unlock_irqrestore(&wdev->event_lock, flags);
+ schedule_work(&rdev->event_work);
+}
EXPORT_SYMBOL(cfg80211_ibss_joined);
-int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_ibss_params *params)
+int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_cached_keys *connkeys)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct ieee80211_channel *chan;
int err;
+ ASSERT_WDEV_LOCK(wdev);
+
+ chan = rdev_fixed_channel(rdev, wdev);
+ if (chan && chan != params->channel)
+ return -EBUSY;
+
if (wdev->ssid_len)
return -EALREADY;
+ if (WARN_ON(wdev->connect_keys))
+ kfree(wdev->connect_keys);
+ wdev->connect_keys = connkeys;
+
#ifdef CONFIG_WIRELESS_EXT
wdev->wext.ibss.channel = params->channel;
#endif
err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
-
- if (err)
+ if (err) {
+ wdev->connect_keys = NULL;
return err;
+ }
memcpy(wdev->ssid, params->ssid, params->ssid_len);
wdev->ssid_len = params->ssid_len;
@@ -76,45 +111,107 @@ int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
return 0;
}
-void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
+int cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_ibss_params *params,
+ struct cfg80211_cached_keys *connkeys)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+ err = __cfg80211_join_ibss(rdev, dev, params, connkeys);
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return err;
+}
+
+static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ int i;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ kfree(wdev->connect_keys);
+ wdev->connect_keys = NULL;
+
+ /*
+ * Delete all the keys ... pairwise keys can't really
+ * exist any more anyway, but default keys might.
+ */
+ if (rdev->ops->del_key)
+ for (i = 0; i < 6; i++)
+ rdev->ops->del_key(wdev->wiphy, dev, i, NULL);
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
- cfg80211_put_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
}
wdev->current_bss = NULL;
wdev->ssid_len = 0;
- memset(wdev->bssid, 0, ETH_ALEN);
#ifdef CONFIG_WIRELESS_EXT
if (!nowext)
wdev->wext.ibss.ssid_len = 0;
#endif
}
-int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
- struct net_device *dev, bool nowext)
+void cfg80211_clear_ibss(struct net_device *dev, bool nowext)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ wdev_lock(wdev);
+ __cfg80211_clear_ibss(dev, nowext);
+ wdev_unlock(wdev);
+}
+
+static int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool nowext)
{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
int err;
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!wdev->ssid_len)
+ return -ENOLINK;
+
err = rdev->ops->leave_ibss(&rdev->wiphy, dev);
if (err)
return err;
- cfg80211_clear_ibss(dev, nowext);
+ __cfg80211_clear_ibss(dev, nowext);
return 0;
}
+int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, bool nowext)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_leave_ibss(rdev, dev, nowext);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
#ifdef CONFIG_WIRELESS_EXT
-static int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev)
+int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
{
+ struct cfg80211_cached_keys *ck = NULL;
enum ieee80211_band band;
- int i;
+ int i, err;
+
+ ASSERT_WDEV_LOCK(wdev);
if (!wdev->wext.ibss.beacon_interval)
wdev->wext.ibss.beacon_interval = 100;
@@ -154,43 +251,66 @@ static int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
if (!netif_running(wdev->netdev))
return 0;
- return cfg80211_join_ibss(wiphy_to_dev(wdev->wiphy),
- wdev->netdev, &wdev->wext.ibss);
+ if (wdev->wext.keys)
+ wdev->wext.keys->def = wdev->wext.default_key;
+
+ wdev->wext.ibss.privacy = wdev->wext.default_key != -1;
+
+ if (wdev->wext.keys) {
+ ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
+ if (!ck)
+ return -ENOMEM;
+ for (i = 0; i < 6; i++)
+ ck->params[i].key = ck->data[i];
+ }
+ err = __cfg80211_join_ibss(rdev, wdev->netdev,
+ &wdev->wext.ibss, ck);
+ if (err)
+ kfree(ck);
+
+ return err;
}
int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
struct iw_request_info *info,
- struct iw_freq *freq, char *extra)
+ struct iw_freq *wextfreq, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct ieee80211_channel *chan;
- int err;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct ieee80211_channel *chan = NULL;
+ int err, freq;
/* call only for ibss! */
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
- if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
+ if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
- chan = cfg80211_wext_freq(wdev->wiphy, freq);
- if (chan && IS_ERR(chan))
- return PTR_ERR(chan);
+ freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+ if (freq < 0)
+ return freq;
- if (chan &&
- (chan->flags & IEEE80211_CHAN_NO_IBSS ||
- chan->flags & IEEE80211_CHAN_DISABLED))
- return -EINVAL;
+ if (freq) {
+ chan = ieee80211_get_channel(wdev->wiphy, freq);
+ if (!chan)
+ return -EINVAL;
+ if (chan->flags & IEEE80211_CHAN_NO_IBSS ||
+ chan->flags & IEEE80211_CHAN_DISABLED)
+ return -EINVAL;
+ }
if (wdev->wext.ibss.channel == chan)
return 0;
- if (wdev->ssid_len) {
- err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
- dev, true);
- if (err)
- return err;
- }
+ wdev_lock(wdev);
+ err = 0;
+ if (wdev->ssid_len)
+ err = __cfg80211_leave_ibss(rdev, dev, true);
+ wdev_unlock(wdev);
+
+ if (err)
+ return err;
if (chan) {
wdev->wext.ibss.channel = chan;
@@ -200,10 +320,14 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
wdev->wext.ibss.channel_fixed = false;
}
- return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+ err = cfg80211_ibss_wext_join(rdev, wdev);
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return err;
}
-/* temporary symbol - mark GPL - in the future the handler won't be */
-EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwfreq);
int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
struct iw_request_info *info,
@@ -216,10 +340,12 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
+ wdev_lock(wdev);
if (wdev->current_bss)
- chan = wdev->current_bss->channel;
+ chan = wdev->current_bss->pub.channel;
else if (wdev->wext.ibss.channel)
chan = wdev->wext.ibss.channel;
+ wdev_unlock(wdev);
if (chan) {
freq->m = chan->center_freq;
@@ -230,14 +356,13 @@ int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
/* no channel if not joining */
return -EINVAL;
}
-/* temporary symbol - mark GPL - in the future the handler won't be */
-EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwfreq);
int cfg80211_ibss_wext_siwessid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *data, char *ssid)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
size_t len = data->length;
int err;
@@ -245,15 +370,17 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
- if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
+ if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
- if (wdev->ssid_len) {
- err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
- dev, true);
- if (err)
- return err;
- }
+ wdev_lock(wdev);
+ err = 0;
+ if (wdev->ssid_len)
+ err = __cfg80211_leave_ibss(rdev, dev, true);
+ wdev_unlock(wdev);
+
+ if (err)
+ return err;
/* iwconfig uses nul termination in SSID.. */
if (len > 0 && ssid[len - 1] == '\0')
@@ -263,10 +390,14 @@ int cfg80211_ibss_wext_siwessid(struct net_device *dev,
memcpy(wdev->wext.ibss.ssid, ssid, len);
wdev->wext.ibss.ssid_len = len;
- return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+ err = cfg80211_ibss_wext_join(rdev, wdev);
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return err;
}
-/* temporary symbol - mark GPL - in the future the handler won't be */
-EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwessid);
int cfg80211_ibss_wext_giwessid(struct net_device *dev,
struct iw_request_info *info,
@@ -280,6 +411,7 @@ int cfg80211_ibss_wext_giwessid(struct net_device *dev,
data->flags = 0;
+ wdev_lock(wdev);
if (wdev->ssid_len) {
data->flags = 1;
data->length = wdev->ssid_len;
@@ -289,17 +421,17 @@ int cfg80211_ibss_wext_giwessid(struct net_device *dev,
data->length = wdev->wext.ibss.ssid_len;
memcpy(ssid, wdev->wext.ibss.ssid, data->length);
}
+ wdev_unlock(wdev);
return 0;
}
-/* temporary symbol - mark GPL - in the future the handler won't be */
-EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwessid);
int cfg80211_ibss_wext_siwap(struct net_device *dev,
struct iw_request_info *info,
struct sockaddr *ap_addr, char *extra)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
u8 *bssid = ap_addr->sa_data;
int err;
@@ -307,7 +439,7 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_ADHOC))
return -EINVAL;
- if (!wiphy_to_dev(wdev->wiphy)->ops->join_ibss)
+ if (!rdev->ops->join_ibss)
return -EOPNOTSUPP;
if (ap_addr->sa_family != ARPHRD_ETHER)
@@ -326,12 +458,14 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
compare_ether_addr(bssid, wdev->wext.ibss.bssid) == 0)
return 0;
- if (wdev->ssid_len) {
- err = cfg80211_leave_ibss(wiphy_to_dev(wdev->wiphy),
- dev, true);
- if (err)
- return err;
- }
+ wdev_lock(wdev);
+ err = 0;
+ if (wdev->ssid_len)
+ err = __cfg80211_leave_ibss(rdev, dev, true);
+ wdev_unlock(wdev);
+
+ if (err)
+ return err;
if (bssid) {
memcpy(wdev->wext.bssid, bssid, ETH_ALEN);
@@ -339,10 +473,14 @@ int cfg80211_ibss_wext_siwap(struct net_device *dev,
} else
wdev->wext.ibss.bssid = NULL;
- return cfg80211_ibss_wext_join(wiphy_to_dev(wdev->wiphy), wdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+ err = cfg80211_ibss_wext_join(rdev, wdev);
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return err;
}
-/* temporary symbol - mark GPL - in the future the handler won't be */
-EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_siwap);
int cfg80211_ibss_wext_giwap(struct net_device *dev,
struct iw_request_info *info,
@@ -356,14 +494,16 @@ int cfg80211_ibss_wext_giwap(struct net_device *dev,
ap_addr->sa_family = ARPHRD_ETHER;
- if (wdev->wext.ibss.bssid) {
+ wdev_lock(wdev);
+ if (wdev->current_bss)
+ memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
+ else if (wdev->wext.ibss.bssid)
memcpy(ap_addr->sa_data, wdev->wext.ibss.bssid, ETH_ALEN);
- return 0;
- }
+ else
+ memset(ap_addr->sa_data, 0, ETH_ALEN);
+
+ wdev_unlock(wdev);
- memcpy(ap_addr->sa_data, wdev->bssid, ETH_ALEN);
return 0;
}
-/* temporary symbol - mark GPL - in the future the handler won't be */
-EXPORT_SYMBOL_GPL(cfg80211_ibss_wext_giwap);
#endif
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 42184361a10..79d2eec54ce 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -8,75 +8,652 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
+#include <linux/wireless.h>
#include <net/cfg80211.h>
+#include <net/iw_handler.h>
#include "core.h"
#include "nl80211.h"
void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_rx_auth(rdev, dev, buf, len);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+ u8 *bssid = mgmt->bssid;
+ int i;
+ u16 status = le16_to_cpu(mgmt->u.auth.status_code);
+ bool done = false;
+
+ wdev_lock(wdev);
+
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (wdev->authtry_bsses[i] &&
+ memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid,
+ ETH_ALEN) == 0) {
+ if (status == WLAN_STATUS_SUCCESS) {
+ wdev->auth_bsses[i] = wdev->authtry_bsses[i];
+ } else {
+ cfg80211_unhold_bss(wdev->authtry_bsses[i]);
+ cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
+ }
+ wdev->authtry_bsses[i] = NULL;
+ done = true;
+ break;
+ }
+ }
+
+ WARN_ON(!done);
+
+ nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
+ cfg80211_sme_rx_auth(dev, buf, len);
+
+ wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_rx_auth);
void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ u16 status_code;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_rx_assoc(rdev, dev, buf, len);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+ u8 *ie = mgmt->u.assoc_resp.variable;
+ int i, ieoffs = offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+ struct cfg80211_internal_bss *bss = NULL;
+
+ wdev_lock(wdev);
+
+ status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+
+ /*
+ * This is a bit of a hack, we don't notify userspace of
+ * a (re-)association reply if we tried to send a reassoc
+ * and got a reject -- we only try again with an assoc
+ * frame instead of reassoc.
+ */
+ if (status_code != WLAN_STATUS_SUCCESS && wdev->conn &&
+ cfg80211_sme_failed_reassoc(wdev))
+ goto out;
+
+ nl80211_send_rx_assoc(rdev, dev, buf, len, GFP_KERNEL);
+
+ if (status_code == WLAN_STATUS_SUCCESS) {
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (!wdev->auth_bsses[i])
+ continue;
+ if (memcmp(wdev->auth_bsses[i]->pub.bssid, mgmt->bssid,
+ ETH_ALEN) == 0) {
+ bss = wdev->auth_bsses[i];
+ wdev->auth_bsses[i] = NULL;
+ /* additional reference to drop hold */
+ cfg80211_ref_bss(bss);
+ break;
+ }
+ }
+
+ WARN_ON(!bss);
+ }
+
+ if (!wdev->conn && wdev->sme_state == CFG80211_SME_IDLE) {
+ /*
+ * This is for the userspace SME, the CONNECTING
+ * state will be changed to CONNECTED by
+ * __cfg80211_connect_result() below.
+ */
+ wdev->sme_state = CFG80211_SME_CONNECTING;
+ }
+
+ /* this consumes one bss reference (unless bss is NULL) */
+ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
+ status_code,
+ status_code == WLAN_STATUS_SUCCESS,
+ bss ? &bss->pub : NULL);
+ /* drop hold now, and also reference acquired above */
+ if (bss) {
+ cfg80211_unhold_bss(bss);
+ cfg80211_put_bss(&bss->pub);
+ }
+
+ out:
+ wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_rx_assoc);
-void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len)
+static void __cfg80211_send_deauth(struct net_device *dev,
+ const u8 *buf, size_t len)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_deauth(rdev, dev, buf, len);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+ const u8 *bssid = mgmt->bssid;
+ int i;
+ bool done = false;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ nl80211_send_deauth(rdev, dev, buf, len, GFP_KERNEL);
+
+ if (wdev->current_bss &&
+ memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ done = true;
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
+ } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (wdev->auth_bsses[i] &&
+ memcmp(wdev->auth_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
+ cfg80211_unhold_bss(wdev->auth_bsses[i]);
+ cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
+ wdev->auth_bsses[i] = NULL;
+ done = true;
+ break;
+ }
+ if (wdev->authtry_bsses[i] &&
+ memcmp(wdev->authtry_bsses[i]->pub.bssid, bssid, ETH_ALEN) == 0) {
+ cfg80211_unhold_bss(wdev->authtry_bsses[i]);
+ cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
+ wdev->authtry_bsses[i] = NULL;
+ done = true;
+ break;
+ }
+ }
+
+ WARN_ON(!done);
+
+ if (wdev->sme_state == CFG80211_SME_CONNECTED) {
+ u16 reason_code;
+ bool from_ap;
+
+ reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
+
+ from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
+ __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
+ } else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
+ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+ }
+}
+
+
+void cfg80211_send_deauth(struct net_device *dev, const u8 *buf, size_t len,
+ void *cookie)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ BUG_ON(cookie && wdev != cookie);
+
+ if (cookie) {
+ /* called within callback */
+ __cfg80211_send_deauth(dev, buf, len);
+ } else {
+ wdev_lock(wdev);
+ __cfg80211_send_deauth(dev, buf, len);
+ wdev_unlock(wdev);
+ }
}
EXPORT_SYMBOL(cfg80211_send_deauth);
-void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
+static void __cfg80211_send_disassoc(struct net_device *dev,
+ const u8 *buf, size_t len)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_disassoc(rdev, dev, buf, len);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+ const u8 *bssid = mgmt->bssid;
+ int i;
+ u16 reason_code;
+ bool from_ap;
+ bool done = false;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ nl80211_send_disassoc(rdev, dev, buf, len, GFP_KERNEL);
+
+ if (wdev->sme_state != CFG80211_SME_CONNECTED)
+ return;
+
+ if (wdev->current_bss &&
+ memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (wdev->authtry_bsses[i] || wdev->auth_bsses[i])
+ continue;
+ wdev->auth_bsses[i] = wdev->current_bss;
+ wdev->current_bss = NULL;
+ done = true;
+ cfg80211_sme_disassoc(dev, i);
+ break;
+ }
+ WARN_ON(!done);
+ } else
+ WARN_ON(1);
+
+
+ reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
+
+ from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0;
+ __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
}
-EXPORT_SYMBOL(cfg80211_send_disassoc);
-static void cfg80211_wext_disconnected(struct net_device *dev)
+void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len,
+ void *cookie)
{
-#ifdef CONFIG_WIRELESS_EXT
- union iwreq_data wrqu;
- memset(&wrqu, 0, sizeof(wrqu));
- wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
-#endif
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ BUG_ON(cookie && wdev != cookie);
+
+ if (cookie) {
+ /* called within callback */
+ __cfg80211_send_disassoc(dev, buf, len);
+ } else {
+ wdev_lock(wdev);
+ __cfg80211_send_disassoc(dev, buf, len);
+ wdev_unlock(wdev);
+ }
}
+EXPORT_SYMBOL(cfg80211_send_disassoc);
void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_auth_timeout(rdev, dev, addr);
- cfg80211_wext_disconnected(dev);
+ int i;
+ bool done = false;
+
+ wdev_lock(wdev);
+
+ nl80211_send_auth_timeout(rdev, dev, addr, GFP_KERNEL);
+ if (wdev->sme_state == CFG80211_SME_CONNECTING)
+ __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+
+ for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
+ if (wdev->authtry_bsses[i] &&
+ memcmp(wdev->authtry_bsses[i]->pub.bssid,
+ addr, ETH_ALEN) == 0) {
+ cfg80211_unhold_bss(wdev->authtry_bsses[i]);
+ cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
+ wdev->authtry_bsses[i] = NULL;
+ done = true;
+ break;
+ }
+ }
+
+ WARN_ON(!done);
+
+ wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_auth_timeout);
void cfg80211_send_assoc_timeout(struct net_device *dev, const u8 *addr)
{
- struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_send_assoc_timeout(rdev, dev, addr);
- cfg80211_wext_disconnected(dev);
+ int i;
+ bool done = false;
+
+ wdev_lock(wdev);
+
+ nl80211_send_assoc_timeout(rdev, dev, addr, GFP_KERNEL);
+ if (wdev->sme_state == CFG80211_SME_CONNECTING)
+ __cfg80211_connect_result(dev, addr, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+
+ for (i = 0; addr && i < MAX_AUTH_BSSES; i++) {
+ if (wdev->auth_bsses[i] &&
+ memcmp(wdev->auth_bsses[i]->pub.bssid,
+ addr, ETH_ALEN) == 0) {
+ cfg80211_unhold_bss(wdev->auth_bsses[i]);
+ cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
+ wdev->auth_bsses[i] = NULL;
+ done = true;
+ break;
+ }
+ }
+
+ WARN_ON(!done);
+
+ wdev_unlock(wdev);
}
EXPORT_SYMBOL(cfg80211_send_assoc_timeout);
void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
enum nl80211_key_type key_type, int key_id,
- const u8 *tsc)
+ const u8 *tsc, gfp_t gfp)
{
struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
- nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc);
+#ifdef CONFIG_WIRELESS_EXT
+ union iwreq_data wrqu;
+ char *buf = kmalloc(128, gfp);
+
+ if (buf) {
+ sprintf(buf, "MLME-MICHAELMICFAILURE.indication("
+ "keyid=%d %scast addr=%pM)", key_id,
+ key_type == NL80211_KEYTYPE_GROUP ? "broad" : "uni",
+ addr);
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = strlen(buf);
+ wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
+ kfree(buf);
+ }
+#endif
+
+ nl80211_michael_mic_failure(rdev, dev, addr, key_type, key_id, tsc, gfp);
}
EXPORT_SYMBOL(cfg80211_michael_mic_failure);
+
+/* some MLME handling for userspace SME */
+int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ enum nl80211_auth_type auth_type,
+ const u8 *bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len,
+ const u8 *key, int key_len, int key_idx)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_auth_request req;
+ struct cfg80211_internal_bss *bss;
+ int i, err, slot = -1, nfree = 0;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (auth_type == NL80211_AUTHTYPE_SHARED_KEY)
+ if (!key || !key_len || key_idx < 0 || key_idx > 4)
+ return -EINVAL;
+
+ if (wdev->current_bss &&
+ memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0)
+ return -EALREADY;
+
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (wdev->authtry_bsses[i] &&
+ memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid,
+ ETH_ALEN) == 0)
+ return -EALREADY;
+ if (wdev->auth_bsses[i] &&
+ memcmp(bssid, wdev->auth_bsses[i]->pub.bssid,
+ ETH_ALEN) == 0)
+ return -EALREADY;
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ req.ie = ie;
+ req.ie_len = ie_len;
+ req.auth_type = auth_type;
+ req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ req.key = key;
+ req.key_len = key_len;
+ req.key_idx = key_idx;
+ if (!req.bss)
+ return -ENOENT;
+
+ bss = bss_from_pub(req.bss);
+
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (!wdev->auth_bsses[i] && !wdev->authtry_bsses[i]) {
+ slot = i;
+ nfree++;
+ }
+ }
+
+ /* we need one free slot for disassoc and one for this auth */
+ if (nfree < 2) {
+ err = -ENOSPC;
+ goto out;
+ }
+
+ wdev->authtry_bsses[slot] = bss;
+ cfg80211_hold_bss(bss);
+
+ err = rdev->ops->auth(&rdev->wiphy, dev, &req);
+ if (err) {
+ wdev->authtry_bsses[slot] = NULL;
+ cfg80211_unhold_bss(bss);
+ }
+
+ out:
+ if (err)
+ cfg80211_put_bss(req.bss);
+ return err;
+}
+
+int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, struct ieee80211_channel *chan,
+ enum nl80211_auth_type auth_type, const u8 *bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len,
+ const u8 *key, int key_len, int key_idx)
+{
+ int err;
+
+ wdev_lock(dev->ieee80211_ptr);
+ err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
+ ssid, ssid_len, ie, ie_len,
+ key, key_len, key_idx);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ return err;
+}
+
+int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ const u8 *bssid, const u8 *prev_bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len, bool use_mfp,
+ struct cfg80211_crypto_settings *crypt)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_assoc_request req;
+ struct cfg80211_internal_bss *bss;
+ int i, err, slot = -1;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ memset(&req, 0, sizeof(req));
+
+ if (wdev->current_bss)
+ return -EALREADY;
+
+ req.ie = ie;
+ req.ie_len = ie_len;
+ memcpy(&req.crypto, crypt, sizeof(req.crypto));
+ req.use_mfp = use_mfp;
+ req.prev_bssid = prev_bssid;
+ req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+ if (!req.bss)
+ return -ENOENT;
+
+ bss = bss_from_pub(req.bss);
+
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (bss == wdev->auth_bsses[i]) {
+ slot = i;
+ break;
+ }
+ }
+
+ if (slot < 0) {
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ err = rdev->ops->assoc(&rdev->wiphy, dev, &req);
+ out:
+ /* still a reference in wdev->auth_bsses[slot] */
+ cfg80211_put_bss(req.bss);
+ return err;
+}
+
+int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct ieee80211_channel *chan,
+ const u8 *bssid, const u8 *prev_bssid,
+ const u8 *ssid, int ssid_len,
+ const u8 *ie, int ie_len, bool use_mfp,
+ struct cfg80211_crypto_settings *crypt)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
+ ssid, ssid_len, ie, ie_len, use_mfp, crypt);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
+int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_deauth_request req;
+ int i;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ memset(&req, 0, sizeof(req));
+ req.reason_code = reason;
+ req.ie = ie;
+ req.ie_len = ie_len;
+ if (wdev->current_bss &&
+ memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) {
+ req.bss = &wdev->current_bss->pub;
+ } else for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (wdev->auth_bsses[i] &&
+ memcmp(bssid, wdev->auth_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
+ req.bss = &wdev->auth_bsses[i]->pub;
+ break;
+ }
+ if (wdev->authtry_bsses[i] &&
+ memcmp(bssid, wdev->authtry_bsses[i]->pub.bssid, ETH_ALEN) == 0) {
+ req.bss = &wdev->authtry_bsses[i]->pub;
+ break;
+ }
+ }
+
+ if (!req.bss)
+ return -ENOTCONN;
+
+ return rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
+}
+
+int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
+static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_disassoc_request req;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (wdev->sme_state != CFG80211_SME_CONNECTED)
+ return -ENOTCONN;
+
+ if (WARN_ON(!wdev->current_bss))
+ return -ENOTCONN;
+
+ memset(&req, 0, sizeof(req));
+ req.reason_code = reason;
+ req.ie = ie;
+ req.ie_len = ie_len;
+ if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
+ req.bss = &wdev->current_bss->pub;
+ else
+ return -ENOTCONN;
+
+ return rdev->ops->disassoc(&rdev->wiphy, dev, &req, wdev);
+}
+
+int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *bssid,
+ const u8 *ie, int ie_len, u16 reason)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ wdev_lock(wdev);
+ err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason);
+ wdev_unlock(wdev);
+
+ return err;
+}
+
+void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
+ struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_deauth_request req;
+ int i;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!rdev->ops->deauth)
+ return;
+
+ memset(&req, 0, sizeof(req));
+ req.reason_code = WLAN_REASON_DEAUTH_LEAVING;
+ req.ie = NULL;
+ req.ie_len = 0;
+
+ if (wdev->current_bss) {
+ req.bss = &wdev->current_bss->pub;
+ rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
+ if (wdev->current_bss) {
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
+ }
+ }
+
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (wdev->auth_bsses[i]) {
+ req.bss = &wdev->auth_bsses[i]->pub;
+ rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
+ if (wdev->auth_bsses[i]) {
+ cfg80211_unhold_bss(wdev->auth_bsses[i]);
+ cfg80211_put_bss(&wdev->auth_bsses[i]->pub);
+ wdev->auth_bsses[i] = NULL;
+ }
+ }
+ if (wdev->authtry_bsses[i]) {
+ req.bss = &wdev->authtry_bsses[i]->pub;
+ rdev->ops->deauth(&rdev->wiphy, dev, &req, wdev);
+ if (wdev->authtry_bsses[i]) {
+ cfg80211_unhold_bss(wdev->authtry_bsses[i]);
+ cfg80211_put_bss(&wdev->authtry_bsses[i]->pub);
+ wdev->authtry_bsses[i] = NULL;
+ }
+ }
+ }
+}
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 634496b3ed7..eddab097435 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -14,8 +14,10 @@
#include <linux/rtnetlink.h>
#include <linux/netlink.h>
#include <linux/etherdevice.h>
+#include <net/net_namespace.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
+#include <net/sock.h>
#include "core.h"
#include "nl80211.h"
#include "reg.h"
@@ -27,27 +29,29 @@ static struct genl_family nl80211_fam = {
.hdrsize = 0, /* no private header */
.version = 1, /* no particular meaning now */
.maxattr = NL80211_ATTR_MAX,
+ .netnsok = true,
};
-/* internal helper: get drv and dev */
-static int get_drv_dev_by_info_ifindex(struct nlattr **attrs,
- struct cfg80211_registered_device **drv,
+/* internal helper: get rdev and dev */
+static int get_rdev_dev_by_info_ifindex(struct genl_info *info,
+ struct cfg80211_registered_device **rdev,
struct net_device **dev)
{
+ struct nlattr **attrs = info->attrs;
int ifindex;
if (!attrs[NL80211_ATTR_IFINDEX])
return -EINVAL;
ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]);
- *dev = dev_get_by_index(&init_net, ifindex);
+ *dev = dev_get_by_index(genl_info_net(info), ifindex);
if (!*dev)
return -ENODEV;
- *drv = cfg80211_get_dev_from_ifindex(ifindex);
- if (IS_ERR(*drv)) {
+ *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex);
+ if (IS_ERR(*rdev)) {
dev_put(*dev);
- return PTR_ERR(*drv);
+ return PTR_ERR(*rdev);
}
return 0;
@@ -71,7 +75,9 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
[NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
.len = WLAN_MAX_KEY_LEN },
[NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 },
@@ -128,6 +134,21 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
.len = sizeof(struct nl80211_sta_flag_update),
},
[NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG },
+ [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
+ [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
+ [NL80211_ATTR_PID] = { .type = NLA_U32 },
+};
+
+/* policy for the attributes */
+static struct nla_policy
+nl80211_key_policy[NL80211_KEY_MAX + 1] __read_mostly = {
+ [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN },
+ [NL80211_KEY_IDX] = { .type = NLA_U8 },
+ [NL80211_KEY_CIPHER] = { .type = NLA_U32 },
+ [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 },
+ [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG },
+ [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG },
};
/* IE validation */
@@ -194,6 +215,177 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
/* netlink command implementations */
+struct key_parse {
+ struct key_params p;
+ int idx;
+ bool def, defmgmt;
+};
+
+static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k)
+{
+ struct nlattr *tb[NL80211_KEY_MAX + 1];
+ int err = nla_parse_nested(tb, NL80211_KEY_MAX, key,
+ nl80211_key_policy);
+ if (err)
+ return err;
+
+ k->def = !!tb[NL80211_KEY_DEFAULT];
+ k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT];
+
+ if (tb[NL80211_KEY_IDX])
+ k->idx = nla_get_u8(tb[NL80211_KEY_IDX]);
+
+ if (tb[NL80211_KEY_DATA]) {
+ k->p.key = nla_data(tb[NL80211_KEY_DATA]);
+ k->p.key_len = nla_len(tb[NL80211_KEY_DATA]);
+ }
+
+ if (tb[NL80211_KEY_SEQ]) {
+ k->p.seq = nla_data(tb[NL80211_KEY_SEQ]);
+ k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]);
+ }
+
+ if (tb[NL80211_KEY_CIPHER])
+ k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]);
+
+ return 0;
+}
+
+static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k)
+{
+ if (info->attrs[NL80211_ATTR_KEY_DATA]) {
+ k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]);
+ k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
+ }
+
+ if (info->attrs[NL80211_ATTR_KEY_SEQ]) {
+ k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
+ k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
+ }
+
+ if (info->attrs[NL80211_ATTR_KEY_IDX])
+ k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
+
+ if (info->attrs[NL80211_ATTR_KEY_CIPHER])
+ k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]);
+
+ k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT];
+ k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT];
+
+ return 0;
+}
+
+static int nl80211_parse_key(struct genl_info *info, struct key_parse *k)
+{
+ int err;
+
+ memset(k, 0, sizeof(*k));
+ k->idx = -1;
+
+ if (info->attrs[NL80211_ATTR_KEY])
+ err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k);
+ else
+ err = nl80211_parse_key_old(info, k);
+
+ if (err)
+ return err;
+
+ if (k->def && k->defmgmt)
+ return -EINVAL;
+
+ if (k->idx != -1) {
+ if (k->defmgmt) {
+ if (k->idx < 4 || k->idx > 5)
+ return -EINVAL;
+ } else if (k->def) {
+ if (k->idx < 0 || k->idx > 3)
+ return -EINVAL;
+ } else {
+ if (k->idx < 0 || k->idx > 5)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static struct cfg80211_cached_keys *
+nl80211_parse_connkeys(struct cfg80211_registered_device *rdev,
+ struct nlattr *keys)
+{
+ struct key_parse parse;
+ struct nlattr *key;
+ struct cfg80211_cached_keys *result;
+ int rem, err, def = 0;
+
+ result = kzalloc(sizeof(*result), GFP_KERNEL);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ result->def = -1;
+ result->defmgmt = -1;
+
+ nla_for_each_nested(key, keys, rem) {
+ memset(&parse, 0, sizeof(parse));
+ parse.idx = -1;
+
+ err = nl80211_parse_key_new(key, &parse);
+ if (err)
+ goto error;
+ err = -EINVAL;
+ if (!parse.p.key)
+ goto error;
+ if (parse.idx < 0 || parse.idx > 4)
+ goto error;
+ if (parse.def) {
+ if (def)
+ goto error;
+ def = 1;
+ result->def = parse.idx;
+ } else if (parse.defmgmt)
+ goto error;
+ err = cfg80211_validate_key_settings(rdev, &parse.p,
+ parse.idx, NULL);
+ if (err)
+ goto error;
+ result->params[parse.idx].cipher = parse.p.cipher;
+ result->params[parse.idx].key_len = parse.p.key_len;
+ result->params[parse.idx].key = result->data[parse.idx];
+ memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len);
+ }
+
+ return result;
+ error:
+ kfree(result);
+ return ERR_PTR(err);
+}
+
+static int nl80211_key_allowed(struct wireless_dev *wdev)
+{
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!netif_running(wdev->netdev))
+ return -ENETDOWN;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (!wdev->current_bss)
+ return -ENOLINK;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (wdev->sme_state != CFG80211_SME_CONNECTED)
+ return -ENOLINK;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct cfg80211_registered_device *dev)
{
@@ -216,6 +408,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx);
NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy));
+ NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
+ cfg80211_rdev_list_generation);
+
NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
dev->wiphy.retry_short);
NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
@@ -345,8 +540,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
CMD(deauth, DEAUTHENTICATE);
CMD(disassoc, DISASSOCIATE);
CMD(join_ibss, JOIN_IBSS);
+ if (dev->wiphy.netnsok) {
+ i++;
+ NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
+ }
#undef CMD
+
+ if (dev->ops->connect || dev->ops->auth) {
+ i++;
+ NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT);
+ }
+
+ if (dev->ops->disconnect || dev->ops->deauth) {
+ i++;
+ NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT);
+ }
+
nla_nest_end(msg, nl_cmds);
return genlmsg_end(msg, hdr);
@@ -363,7 +573,9 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
struct cfg80211_registered_device *dev;
mutex_lock(&cfg80211_mutex);
- list_for_each_entry(dev, &cfg80211_drv_list, list) {
+ list_for_each_entry(dev, &cfg80211_rdev_list, list) {
+ if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
+ continue;
if (++idx <= start)
continue;
if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid,
@@ -396,14 +608,14 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0)
goto out_free;
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(dev);
- return genlmsg_unicast(msg, info->snd_pid);
+ return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_err:
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(dev);
return -ENOBUFS;
}
@@ -445,7 +657,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&cfg80211_mutex);
- rdev = __cfg80211_drv_from_info(info);
+ rdev = __cfg80211_rdev_from_info(info);
if (IS_ERR(rdev)) {
mutex_unlock(&cfg80211_mutex);
result = PTR_ERR(rdev);
@@ -492,15 +704,8 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
- struct ieee80211_channel *chan;
- struct ieee80211_sta_ht_cap *ht_cap;
u32 freq;
- if (!rdev->ops->set_channel) {
- result = -EOPNOTSUPP;
- goto bad_res;
- }
-
result = -EINVAL;
if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) {
@@ -514,38 +719,10 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
}
freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]);
- chan = ieee80211_get_channel(&rdev->wiphy, freq);
-
- /* Primary channel not allowed */
- if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
- goto bad_res;
-
- if (channel_type == NL80211_CHAN_HT40MINUS &&
- (chan->flags & IEEE80211_CHAN_NO_HT40MINUS))
- goto bad_res;
- else if (channel_type == NL80211_CHAN_HT40PLUS &&
- (chan->flags & IEEE80211_CHAN_NO_HT40PLUS))
- goto bad_res;
-
- /*
- * At this point we know if that if HT40 was requested
- * we are allowed to use it and the extension channel
- * exists.
- */
-
- ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap;
- /* no HT capabilities or intolerant */
- if (channel_type != NL80211_CHAN_NO_HT) {
- if (!ht_cap->ht_supported)
- goto bad_res;
- if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) ||
- (ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT))
- goto bad_res;
- }
-
- result = rdev->ops->set_channel(&rdev->wiphy, chan,
- channel_type);
+ mutex_lock(&rdev->devlist_mtx);
+ result = rdev_set_freq(rdev, NULL, freq, channel_type);
+ mutex_unlock(&rdev->devlist_mtx);
if (result)
goto bad_res;
}
@@ -651,6 +828,11 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name);
NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype);
+
+ NLA_PUT_U32(msg, NL80211_ATTR_GENERATION,
+ rdev->devlist_generation ^
+ (cfg80211_rdev_list_generation << 2));
+
return genlmsg_end(msg, hdr);
nla_put_failure:
@@ -664,32 +846,34 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
int if_idx = 0;
int wp_start = cb->args[0];
int if_start = cb->args[1];
- struct cfg80211_registered_device *dev;
+ struct cfg80211_registered_device *rdev;
struct wireless_dev *wdev;
mutex_lock(&cfg80211_mutex);
- list_for_each_entry(dev, &cfg80211_drv_list, list) {
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
+ if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk)))
+ continue;
if (wp_idx < wp_start) {
wp_idx++;
continue;
}
if_idx = 0;
- mutex_lock(&dev->devlist_mtx);
- list_for_each_entry(wdev, &dev->netdev_list, list) {
+ mutex_lock(&rdev->devlist_mtx);
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
if (if_idx < if_start) {
if_idx++;
continue;
}
if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- dev, wdev->netdev) < 0) {
- mutex_unlock(&dev->devlist_mtx);
+ rdev, wdev->netdev) < 0) {
+ mutex_unlock(&rdev->devlist_mtx);
goto out;
}
if_idx++;
}
- mutex_unlock(&dev->devlist_mtx);
+ mutex_unlock(&rdev->devlist_mtx);
wp_idx++;
}
@@ -709,7 +893,7 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
struct net_device *netdev;
int err;
- err = get_drv_dev_by_info_ifindex(info->attrs, &dev, &netdev);
+ err = get_rdev_dev_by_info_ifindex(info, &dev, &netdev);
if (err)
return err;
@@ -722,15 +906,15 @@ static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info)
goto out_free;
dev_put(netdev);
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(dev);
- return genlmsg_unicast(msg, info->snd_pid);
+ return genlmsg_reply(msg, info);
out_free:
nlmsg_free(msg);
out_err:
dev_put(netdev);
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(dev);
return -ENOBUFS;
}
@@ -765,9 +949,9 @@ static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags)
static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct vif_params params;
- int err, ifindex;
+ int err;
enum nl80211_iftype otype, ntype;
struct net_device *dev;
u32 _flags, *flags = NULL;
@@ -777,13 +961,11 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- ifindex = dev->ifindex;
otype = ntype = dev->ieee80211_ptr->iftype;
- dev_put(dev);
if (info->attrs[NL80211_ATTR_IFTYPE]) {
ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]);
@@ -795,12 +977,6 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
}
}
- if (!drv->ops->change_virtual_intf ||
- !(drv->wiphy.interface_modes & (1 << ntype))) {
- err = -EOPNOTSUPP;
- goto unlock;
- }
-
if (info->attrs[NL80211_ATTR_MESH_ID]) {
if (ntype != NL80211_IFTYPE_MESH_POINT) {
err = -EINVAL;
@@ -826,21 +1002,13 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
}
if (change)
- err = drv->ops->change_virtual_intf(&drv->wiphy, ifindex,
- ntype, flags, &params);
+ err = cfg80211_change_iface(rdev, dev, ntype, flags, &params);
else
err = 0;
- dev = __dev_get_by_index(&init_net, ifindex);
- WARN_ON(!dev || (!err && dev->ieee80211_ptr->iftype != ntype));
-
- if (dev && !err && (ntype != otype)) {
- if (otype == NL80211_IFTYPE_ADHOC)
- cfg80211_clear_ibss(dev, false);
- }
-
unlock:
- cfg80211_put_dev(drv);
+ dev_put(dev);
+ cfg80211_unlock_rdev(rdev);
unlock_rtnl:
rtnl_unlock();
return err;
@@ -848,7 +1016,7 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct vif_params params;
int err;
enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED;
@@ -867,14 +1035,14 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- drv = cfg80211_get_dev_from_info(info);
- if (IS_ERR(drv)) {
- err = PTR_ERR(drv);
+ rdev = cfg80211_get_dev_from_info(info);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
goto unlock_rtnl;
}
- if (!drv->ops->add_virtual_intf ||
- !(drv->wiphy.interface_modes & (1 << type))) {
+ if (!rdev->ops->add_virtual_intf ||
+ !(rdev->wiphy.interface_modes & (1 << type))) {
err = -EOPNOTSUPP;
goto unlock;
}
@@ -888,12 +1056,12 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ?
info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL,
&flags);
- err = drv->ops->add_virtual_intf(&drv->wiphy,
+ err = rdev->ops->add_virtual_intf(&rdev->wiphy,
nla_data(info->attrs[NL80211_ATTR_IFNAME]),
type, err ? NULL : &flags, &params);
unlock:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
unlock_rtnl:
rtnl_unlock();
return err;
@@ -901,27 +1069,26 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
- int ifindex, err;
+ struct cfg80211_registered_device *rdev;
+ int err;
struct net_device *dev;
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- ifindex = dev->ifindex;
- dev_put(dev);
- if (!drv->ops->del_virtual_intf) {
+ if (!rdev->ops->del_virtual_intf) {
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->del_virtual_intf(&drv->wiphy, ifindex);
+ err = rdev->ops->del_virtual_intf(&rdev->wiphy, dev);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
unlock_rtnl:
rtnl_unlock();
return err;
@@ -930,10 +1097,12 @@ static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
struct get_key_cookie {
struct sk_buff *msg;
int error;
+ int idx;
};
static void get_key_callback(void *c, struct key_params *params)
{
+ struct nlattr *key;
struct get_key_cookie *cookie = c;
if (params->key)
@@ -948,6 +1117,26 @@ static void get_key_callback(void *c, struct key_params *params)
NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
params->cipher);
+ key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
+ if (!key)
+ goto nla_put_failure;
+
+ if (params->key)
+ NLA_PUT(cookie->msg, NL80211_KEY_DATA,
+ params->key_len, params->key);
+
+ if (params->seq)
+ NLA_PUT(cookie->msg, NL80211_KEY_SEQ,
+ params->seq_len, params->seq);
+
+ if (params->cipher)
+ NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER,
+ params->cipher);
+
+ NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx);
+
+ nla_nest_end(cookie->msg, key);
+
return;
nla_put_failure:
cookie->error = 1;
@@ -955,7 +1144,7 @@ static void get_key_callback(void *c, struct key_params *params)
static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
u8 key_idx = 0;
@@ -977,11 +1166,11 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->get_key) {
+ if (!rdev->ops->get_key) {
err = -EOPNOTSUPP;
goto out;
}
@@ -1001,13 +1190,14 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
}
cookie.msg = msg;
+ cookie.idx = key_idx;
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx);
if (mac_addr)
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
- err = drv->ops->get_key(&drv->wiphy, dev, key_idx, mac_addr,
+ err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, mac_addr,
&cookie, get_key_callback);
if (err)
@@ -1017,7 +1207,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
goto nla_put_failure;
genlmsg_end(msg, hdr);
- err = genlmsg_unicast(msg, info->snd_pid);
+ err = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
@@ -1025,7 +1215,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
free_msg:
nlmsg_free(msg);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -1035,57 +1225,57 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
+ struct key_parse key;
int err;
struct net_device *dev;
- u8 key_idx;
int (*func)(struct wiphy *wiphy, struct net_device *netdev,
u8 key_index);
- if (!info->attrs[NL80211_ATTR_KEY_IDX])
- return -EINVAL;
-
- key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
+ err = nl80211_parse_key(info, &key);
+ if (err)
+ return err;
- if (info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]) {
- if (key_idx < 4 || key_idx > 5)
- return -EINVAL;
- } else if (key_idx > 3)
+ if (key.idx < 0)
return -EINVAL;
- /* currently only support setting default key */
- if (!info->attrs[NL80211_ATTR_KEY_DEFAULT] &&
- !info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT])
+ /* only support setting default key */
+ if (!key.def && !key.defmgmt)
return -EINVAL;
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (info->attrs[NL80211_ATTR_KEY_DEFAULT])
- func = drv->ops->set_default_key;
+ if (key.def)
+ func = rdev->ops->set_default_key;
else
- func = drv->ops->set_default_mgmt_key;
+ func = rdev->ops->set_default_mgmt_key;
if (!func) {
err = -EOPNOTSUPP;
goto out;
}
- err = func(&drv->wiphy, dev, key_idx);
+ wdev_lock(dev->ieee80211_ptr);
+ err = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (!err)
+ err = func(&rdev->wiphy, dev, key.idx);
+
#ifdef CONFIG_WIRELESS_EXT
if (!err) {
- if (func == drv->ops->set_default_key)
- dev->ieee80211_ptr->wext.default_key = key_idx;
+ if (func == rdev->ops->set_default_key)
+ dev->ieee80211_ptr->wext.default_key = key.idx;
else
- dev->ieee80211_ptr->wext.default_mgmt_key = key_idx;
+ dev->ieee80211_ptr->wext.default_mgmt_key = key.idx;
}
#endif
+ wdev_unlock(dev->ieee80211_ptr);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
@@ -1096,62 +1286,47 @@ static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info)
static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
- int err, i;
+ struct cfg80211_registered_device *rdev;
+ int err;
struct net_device *dev;
- struct key_params params;
- u8 key_idx = 0;
+ struct key_parse key;
u8 *mac_addr = NULL;
- memset(&params, 0, sizeof(params));
+ err = nl80211_parse_key(info, &key);
+ if (err)
+ return err;
- if (!info->attrs[NL80211_ATTR_KEY_CIPHER])
+ if (!key.p.key)
return -EINVAL;
- if (info->attrs[NL80211_ATTR_KEY_DATA]) {
- params.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]);
- params.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]);
- }
-
- if (info->attrs[NL80211_ATTR_KEY_SEQ]) {
- params.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]);
- params.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]);
- }
-
- if (info->attrs[NL80211_ATTR_KEY_IDX])
- key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
-
- params.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]);
-
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
- if (cfg80211_validate_key_settings(&params, key_idx, mac_addr))
- return -EINVAL;
-
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- for (i = 0; i < drv->wiphy.n_cipher_suites; i++)
- if (params.cipher == drv->wiphy.cipher_suites[i])
- break;
- if (i == drv->wiphy.n_cipher_suites) {
- err = -EINVAL;
+ if (!rdev->ops->add_key) {
+ err = -EOPNOTSUPP;
goto out;
}
- if (!drv->ops->add_key) {
- err = -EOPNOTSUPP;
+ if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, mac_addr)) {
+ err = -EINVAL;
goto out;
}
- err = drv->ops->add_key(&drv->wiphy, dev, key_idx, mac_addr, &params);
+ wdev_lock(dev->ieee80211_ptr);
+ err = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (!err)
+ err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx,
+ mac_addr, &key.p);
+ wdev_unlock(dev->ieee80211_ptr);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -1161,45 +1336,47 @@ static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info)
static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
- u8 key_idx = 0;
u8 *mac_addr = NULL;
+ struct key_parse key;
- if (info->attrs[NL80211_ATTR_KEY_IDX])
- key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]);
-
- if (key_idx > 5)
- return -EINVAL;
+ err = nl80211_parse_key(info, &key);
+ if (err)
+ return err;
if (info->attrs[NL80211_ATTR_MAC])
mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->del_key) {
+ if (!rdev->ops->del_key) {
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->del_key(&drv->wiphy, dev, key_idx, mac_addr);
+ wdev_lock(dev->ieee80211_ptr);
+ err = nl80211_key_allowed(dev->ieee80211_ptr);
+ if (!err)
+ err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, mac_addr);
#ifdef CONFIG_WIRELESS_EXT
if (!err) {
- if (key_idx == dev->ieee80211_ptr->wext.default_key)
+ if (key.idx == dev->ieee80211_ptr->wext.default_key)
dev->ieee80211_ptr->wext.default_key = -1;
- else if (key_idx == dev->ieee80211_ptr->wext.default_mgmt_key)
+ else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key)
dev->ieee80211_ptr->wext.default_mgmt_key = -1;
}
#endif
+ wdev_unlock(dev->ieee80211_ptr);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
@@ -1212,7 +1389,7 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
{
int (*call)(struct wiphy *wiphy, struct net_device *dev,
struct beacon_parameters *info);
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
struct beacon_parameters params;
@@ -1223,7 +1400,7 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
@@ -1242,10 +1419,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- call = drv->ops->add_beacon;
+ call = rdev->ops->add_beacon;
break;
case NL80211_CMD_SET_BEACON:
- call = drv->ops->set_beacon;
+ call = rdev->ops->set_beacon;
break;
default:
WARN_ON(1);
@@ -1291,10 +1468,10 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = call(&drv->wiphy, dev, &params);
+ err = call(&rdev->wiphy, dev, &params);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -1304,17 +1481,17 @@ static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info)
static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->del_beacon) {
+ if (!rdev->ops->del_beacon) {
err = -EOPNOTSUPP;
goto out;
}
@@ -1323,10 +1500,10 @@ static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info)
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->del_beacon(&drv->wiphy, dev);
+ err = rdev->ops->del_beacon(&rdev->wiphy, dev);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -1433,6 +1610,8 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr);
+ NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation);
+
sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
if (!sinfoattr)
goto nla_put_failure;
@@ -1520,13 +1699,13 @@ static int nl80211_dump_station(struct sk_buff *skb,
rtnl_lock();
- netdev = __dev_get_by_index(&init_net, ifidx);
+ netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
if (!netdev) {
err = -ENODEV;
goto out_rtnl;
}
- dev = cfg80211_get_dev_from_ifindex(ifidx);
+ dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out_rtnl;
@@ -1560,7 +1739,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
cb->args[1] = sta_idx;
err = skb->len;
out_err:
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(dev);
out_rtnl:
rtnl_unlock();
@@ -1569,7 +1748,7 @@ static int nl80211_dump_station(struct sk_buff *skb,
static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
struct station_info sinfo;
@@ -1585,16 +1764,16 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->get_station) {
+ if (!rdev->ops->get_station) {
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->get_station(&drv->wiphy, dev, mac_addr, &sinfo);
+ err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo);
if (err)
goto out;
@@ -1606,13 +1785,13 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
dev, mac_addr, &sinfo) < 0)
goto out_free;
- err = genlmsg_unicast(msg, info->snd_pid);
+ err = genlmsg_reply(msg, info);
goto out;
out_free:
nlmsg_free(msg);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -1623,14 +1802,16 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
/*
* Get vlan interface making sure it is on the right wiphy.
*/
-static int get_vlan(struct nlattr *vlanattr,
+static int get_vlan(struct genl_info *info,
struct cfg80211_registered_device *rdev,
struct net_device **vlan)
{
+ struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN];
*vlan = NULL;
if (vlanattr) {
- *vlan = dev_get_by_index(&init_net, nla_get_u32(vlanattr));
+ *vlan = dev_get_by_index(genl_info_net(info),
+ nla_get_u32(vlanattr));
if (!*vlan)
return -ENODEV;
if (!(*vlan)->ieee80211_ptr)
@@ -1643,7 +1824,7 @@ static int get_vlan(struct nlattr *vlanattr,
static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
struct station_parameters params;
@@ -1685,11 +1866,11 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
+ err = get_vlan(info, rdev, &params.vlan);
if (err)
goto out;
@@ -1738,17 +1919,17 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out;
- if (!drv->ops->change_station) {
+ if (!rdev->ops->change_station) {
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->change_station(&drv->wiphy, dev, mac_addr, &params);
+ err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params);
out:
if (params.vlan)
dev_put(params.vlan);
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -1758,7 +1939,7 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
struct station_parameters params;
@@ -1798,11 +1979,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- err = get_vlan(info->attrs[NL80211_ATTR_STA_VLAN], drv, &params.vlan);
+ err = get_vlan(info, rdev, &params.vlan);
if (err)
goto out;
@@ -1838,7 +2019,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
if (err)
goto out;
- if (!drv->ops->add_station) {
+ if (!rdev->ops->add_station) {
err = -EOPNOTSUPP;
goto out;
}
@@ -1848,12 +2029,12 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = drv->ops->add_station(&drv->wiphy, dev, mac_addr, &params);
+ err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params);
out:
if (params.vlan)
dev_put(params.vlan);
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -1863,7 +2044,7 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
u8 *mac_addr = NULL;
@@ -1873,7 +2054,7 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
@@ -1884,15 +2065,15 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (!drv->ops->del_station) {
+ if (!rdev->ops->del_station) {
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->del_station(&drv->wiphy, dev, mac_addr);
+ err = rdev->ops->del_station(&rdev->wiphy, dev, mac_addr);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -1916,6 +2097,8 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst);
NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop);
+ NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation);
+
pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
if (!pinfoattr)
goto nla_put_failure;
@@ -1979,13 +2162,13 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
rtnl_lock();
- netdev = __dev_get_by_index(&init_net, ifidx);
+ netdev = __dev_get_by_index(sock_net(skb->sk), ifidx);
if (!netdev) {
err = -ENODEV;
goto out_rtnl;
}
- dev = cfg80211_get_dev_from_ifindex(ifidx);
+ dev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out_rtnl;
@@ -1998,7 +2181,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
err = -EOPNOTSUPP;
- goto out;
+ goto out_err;
}
while (1) {
@@ -2023,7 +2206,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
cb->args[1] = path_idx;
err = skb->len;
out_err:
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(dev);
out_rtnl:
rtnl_unlock();
@@ -2032,7 +2215,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb,
static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
struct mpath_info pinfo;
@@ -2049,11 +2232,11 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->get_mpath) {
+ if (!rdev->ops->get_mpath) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2063,7 +2246,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = drv->ops->get_mpath(&drv->wiphy, dev, dst, next_hop, &pinfo);
+ err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo);
if (err)
goto out;
@@ -2075,13 +2258,13 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
dev, dst, next_hop, &pinfo) < 0)
goto out_free;
- err = genlmsg_unicast(msg, info->snd_pid);
+ err = genlmsg_reply(msg, info);
goto out;
out_free:
nlmsg_free(msg);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2091,7 +2274,7 @@ static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info)
static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
u8 *dst = NULL;
@@ -2108,11 +2291,11 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->change_mpath) {
+ if (!rdev->ops->change_mpath) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2127,10 +2310,10 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = drv->ops->change_mpath(&drv->wiphy, dev, dst, next_hop);
+ err = rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2139,7 +2322,7 @@ static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info)
}
static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
u8 *dst = NULL;
@@ -2156,11 +2339,11 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->add_mpath) {
+ if (!rdev->ops->add_mpath) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2175,10 +2358,10 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = drv->ops->add_mpath(&drv->wiphy, dev, dst, next_hop);
+ err = rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2188,7 +2371,7 @@ static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info)
static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
u8 *dst = NULL;
@@ -2198,19 +2381,19 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->del_mpath) {
+ if (!rdev->ops->del_mpath) {
err = -EOPNOTSUPP;
goto out;
}
- err = drv->ops->del_mpath(&drv->wiphy, dev, dst);
+ err = rdev->ops->del_mpath(&rdev->wiphy, dev, dst);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2220,7 +2403,7 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
int err;
struct net_device *dev;
struct bss_parameters params;
@@ -2249,11 +2432,11 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->change_bss) {
+ if (!rdev->ops->change_bss) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2263,10 +2446,10 @@ static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = drv->ops->change_bss(&drv->wiphy, dev, &params);
+ err = rdev->ops->change_bss(&rdev->wiphy, dev, &params);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2357,7 +2540,7 @@ static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info)
static int nl80211_get_mesh_params(struct sk_buff *skb,
struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct mesh_config cur_params;
int err;
struct net_device *dev;
@@ -2368,17 +2551,17 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
rtnl_lock();
/* Look up our device */
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->get_mesh_params) {
+ if (!rdev->ops->get_mesh_params) {
err = -EOPNOTSUPP;
goto out;
}
/* Get the mesh params */
- err = drv->ops->get_mesh_params(&drv->wiphy, dev, &cur_params);
+ err = rdev->ops->get_mesh_params(&rdev->wiphy, dev, &cur_params);
if (err)
goto out;
@@ -2424,7 +2607,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
cur_params.dot11MeshHWMPnetDiameterTraversalTime);
nla_nest_end(msg, pinfoattr);
genlmsg_end(msg, hdr);
- err = genlmsg_unicast(msg, info->snd_pid);
+ err = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
@@ -2432,7 +2615,7 @@ static int nl80211_get_mesh_params(struct sk_buff *skb,
err = -EMSGSIZE;
out:
/* Cleanup */
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2470,7 +2653,7 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
{
int err;
u32 mask;
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
struct mesh_config cfg;
struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1];
@@ -2485,11 +2668,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- if (!drv->ops->set_mesh_params) {
+ if (!rdev->ops->set_mesh_params) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2534,11 +2717,11 @@ static int nl80211_set_mesh_params(struct sk_buff *skb, struct genl_info *info)
nla_get_u16);
/* Apply changes */
- err = drv->ops->set_mesh_params(&drv->wiphy, dev, &cfg, mask);
+ err = rdev->ops->set_mesh_params(&rdev->wiphy, dev, &cfg, mask);
out:
/* cleanup */
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2612,7 +2795,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
nla_nest_end(msg, nl_reg_rules);
genlmsg_end(msg, hdr);
- err = genlmsg_unicast(msg, info->snd_pid);
+ err = genlmsg_reply(msg, info);
goto out;
nla_put_failure:
@@ -2698,16 +2881,41 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
return r;
}
+static int validate_scan_freqs(struct nlattr *freqs)
+{
+ struct nlattr *attr1, *attr2;
+ int n_channels = 0, tmp1, tmp2;
+
+ nla_for_each_nested(attr1, freqs, tmp1) {
+ n_channels++;
+ /*
+ * Some hardware has a limited channel list for
+ * scanning, and it is pretty much nonsensical
+ * to scan for a channel twice, so disallow that
+ * and don't require drivers to check that the
+ * channel list they get isn't longer than what
+ * they can scan, as long as they can scan all
+ * the channels they registered at once.
+ */
+ nla_for_each_nested(attr2, freqs, tmp2)
+ if (attr1 != attr2 &&
+ nla_get_u32(attr1) == nla_get_u32(attr2))
+ return 0;
+ }
+
+ return n_channels;
+}
+
static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
struct cfg80211_scan_request *request;
struct cfg80211_ssid *ssid;
struct ieee80211_channel *channel;
struct nlattr *attr;
struct wiphy *wiphy;
- int err, tmp, n_ssids = 0, n_channels = 0, i;
+ int err, tmp, n_ssids = 0, n_channels, i;
enum ieee80211_band band;
size_t ie_len;
@@ -2716,13 +2924,13 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto out_rtnl;
- wiphy = &drv->wiphy;
+ wiphy = &rdev->wiphy;
- if (!drv->ops->scan) {
+ if (!rdev->ops->scan) {
err = -EOPNOTSUPP;
goto out;
}
@@ -2732,19 +2940,21 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (drv->scan_req) {
+ if (rdev->scan_req) {
err = -EBUSY;
goto out;
}
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
- nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp)
- n_channels++;
+ n_channels = validate_scan_freqs(
+ info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
if (!n_channels) {
err = -EINVAL;
goto out;
}
} else {
+ n_channels = 0;
+
for (band = 0; band < IEEE80211_NUM_BANDS; band++)
if (wiphy->bands[band])
n_channels += wiphy->bands[band]->n_channels;
@@ -2778,10 +2988,9 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- request->channels = (void *)((char *)request + sizeof(*request));
request->n_channels = n_channels;
if (n_ssids)
- request->ssids = (void *)(request->channels + n_channels);
+ request->ssids = (void *)&request->channels[n_channels];
request->n_ssids = n_ssids;
if (ie_len) {
if (request->ssids)
@@ -2836,19 +3045,24 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->ie_len);
}
- request->ifidx = dev->ifindex;
- request->wiphy = &drv->wiphy;
+ request->dev = dev;
+ request->wiphy = &rdev->wiphy;
+
+ rdev->scan_req = request;
+ err = rdev->ops->scan(&rdev->wiphy, dev, request);
- drv->scan_req = request;
- err = drv->ops->scan(&drv->wiphy, dev, request);
+ if (!err) {
+ nl80211_send_scan_start(rdev, dev);
+ dev_hold(dev);
+ }
out_free:
if (err) {
- drv->scan_req = NULL;
+ rdev->scan_req = NULL;
kfree(request);
}
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
out_rtnl:
rtnl_unlock();
@@ -2858,20 +3072,23 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
struct cfg80211_registered_device *rdev,
- struct net_device *dev,
- struct cfg80211_bss *res)
+ struct wireless_dev *wdev,
+ struct cfg80211_internal_bss *intbss)
{
+ struct cfg80211_bss *res = &intbss->pub;
void *hdr;
struct nlattr *bss;
+ int i;
+
+ ASSERT_WDEV_LOCK(wdev);
hdr = nl80211hdr_put(msg, pid, seq, flags,
NL80211_CMD_NEW_SCAN_RESULTS);
if (!hdr)
return -1;
- NLA_PUT_U32(msg, NL80211_ATTR_SCAN_GENERATION,
- rdev->bss_generation);
- NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex);
+ NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex);
bss = nla_nest_start(msg, NL80211_ATTR_BSS);
if (!bss)
@@ -2900,6 +3117,28 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
break;
}
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ if (intbss == wdev->current_bss)
+ NLA_PUT_U32(msg, NL80211_BSS_STATUS,
+ NL80211_BSS_STATUS_ASSOCIATED);
+ else for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (intbss != wdev->auth_bsses[i])
+ continue;
+ NLA_PUT_U32(msg, NL80211_BSS_STATUS,
+ NL80211_BSS_STATUS_AUTHENTICATED);
+ break;
+ }
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ if (intbss == wdev->current_bss)
+ NLA_PUT_U32(msg, NL80211_BSS_STATUS,
+ NL80211_BSS_STATUS_IBSS_JOINED);
+ break;
+ default:
+ break;
+ }
+
nla_nest_end(msg, bss);
return genlmsg_end(msg, hdr);
@@ -2912,9 +3151,10 @@ static int nl80211_send_bss(struct sk_buff *msg, u32 pid, u32 seq, int flags,
static int nl80211_dump_scan(struct sk_buff *skb,
struct netlink_callback *cb)
{
- struct cfg80211_registered_device *dev;
- struct net_device *netdev;
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
struct cfg80211_internal_bss *scan;
+ struct wireless_dev *wdev;
int ifidx = cb->args[0];
int start = cb->args[1], idx = 0;
int err;
@@ -2935,58 +3175,83 @@ static int nl80211_dump_scan(struct sk_buff *skb,
cb->args[0] = ifidx;
}
- netdev = dev_get_by_index(&init_net, ifidx);
- if (!netdev)
+ dev = dev_get_by_index(sock_net(skb->sk), ifidx);
+ if (!dev)
return -ENODEV;
- dev = cfg80211_get_dev_from_ifindex(ifidx);
- if (IS_ERR(dev)) {
- err = PTR_ERR(dev);
+ rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
goto out_put_netdev;
}
- spin_lock_bh(&dev->bss_lock);
- cfg80211_bss_expire(dev);
+ wdev = dev->ieee80211_ptr;
+
+ wdev_lock(wdev);
+ spin_lock_bh(&rdev->bss_lock);
+ cfg80211_bss_expire(rdev);
- list_for_each_entry(scan, &dev->bss_list, list) {
+ list_for_each_entry(scan, &rdev->bss_list, list) {
if (++idx <= start)
continue;
if (nl80211_send_bss(skb,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- dev, netdev, &scan->pub) < 0) {
+ rdev, wdev, scan) < 0) {
idx--;
goto out;
}
}
out:
- spin_unlock_bh(&dev->bss_lock);
+ spin_unlock_bh(&rdev->bss_lock);
+ wdev_unlock(wdev);
cb->args[1] = idx;
err = skb->len;
- cfg80211_put_dev(dev);
+ cfg80211_unlock_rdev(rdev);
out_put_netdev:
- dev_put(netdev);
+ dev_put(dev);
return err;
}
static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type)
{
- return auth_type == NL80211_AUTHTYPE_OPEN_SYSTEM ||
- auth_type == NL80211_AUTHTYPE_SHARED_KEY ||
- auth_type == NL80211_AUTHTYPE_FT ||
- auth_type == NL80211_AUTHTYPE_NETWORK_EAP;
+ return auth_type <= NL80211_AUTHTYPE_MAX;
}
+static bool nl80211_valid_wpa_versions(u32 wpa_versions)
+{
+ return !(wpa_versions & ~(NL80211_WPA_VERSION_1 |
+ NL80211_WPA_VERSION_2));
+}
+
+static bool nl80211_valid_akm_suite(u32 akm)
+{
+ return akm == WLAN_AKM_SUITE_8021X ||
+ akm == WLAN_AKM_SUITE_PSK;
+}
+
+static bool nl80211_valid_cipher_suite(u32 cipher)
+{
+ return cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ cipher == WLAN_CIPHER_SUITE_WEP104 ||
+ cipher == WLAN_CIPHER_SUITE_TKIP ||
+ cipher == WLAN_CIPHER_SUITE_CCMP ||
+ cipher == WLAN_CIPHER_SUITE_AES_CMAC;
+}
+
+
static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
- struct cfg80211_auth_request req;
- struct wiphy *wiphy;
- int err;
+ struct ieee80211_channel *chan;
+ const u8 *bssid, *ssid, *ie = NULL;
+ int err, ssid_len, ie_len = 0;
+ enum nl80211_auth_type auth_type;
+ struct key_parse key;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -2997,13 +3262,38 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
if (!info->attrs[NL80211_ATTR_AUTH_TYPE])
return -EINVAL;
+ if (!info->attrs[NL80211_ATTR_SSID])
+ return -EINVAL;
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ])
+ return -EINVAL;
+
+ err = nl80211_parse_key(info, &key);
+ if (err)
+ return err;
+
+ if (key.idx >= 0) {
+ if (!key.p.key || !key.p.key_len)
+ return -EINVAL;
+ if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 ||
+ key.p.key_len != WLAN_KEY_LEN_WEP40) &&
+ (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 ||
+ key.p.key_len != WLAN_KEY_LEN_WEP104))
+ return -EINVAL;
+ if (key.idx > 4)
+ return -EINVAL;
+ } else {
+ key.p.key_len = 0;
+ key.p.key = NULL;
+ }
+
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->auth) {
+ if (!rdev->ops->auth) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3018,69 +3308,130 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- wiphy = &drv->wiphy;
- memset(&req, 0, sizeof(req));
-
- req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
-
- if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
- req.chan = ieee80211_get_channel(
- wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!req.chan) {
- err = -EINVAL;
- goto out;
- }
+ bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ chan = ieee80211_get_channel(&rdev->wiphy,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) {
+ err = -EINVAL;
+ goto out;
}
- if (info->attrs[NL80211_ATTR_SSID]) {
- req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
- req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
- }
+ ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
if (info->attrs[NL80211_ATTR_IE]) {
- req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
- req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- req.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
- if (!nl80211_valid_auth_type(req.auth_type)) {
+ auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
+ if (!nl80211_valid_auth_type(auth_type)) {
err = -EINVAL;
goto out;
}
- err = drv->ops->auth(&drv->wiphy, dev, &req);
+ err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
+ ssid, ssid_len, ie, ie_len,
+ key.p.key, key.p.key_len, key.idx);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
return err;
}
+static int nl80211_crypto_settings(struct genl_info *info,
+ struct cfg80211_crypto_settings *settings,
+ int cipher_limit)
+{
+ memset(settings, 0, sizeof(*settings));
+
+ settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
+
+ if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) {
+ void *data;
+ int len, i;
+
+ data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]);
+ len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]);
+ settings->n_ciphers_pairwise = len / sizeof(u32);
+
+ if (len % sizeof(u32))
+ return -EINVAL;
+
+ if (settings->n_ciphers_pairwise > cipher_limit)
+ return -EINVAL;
+
+ memcpy(settings->ciphers_pairwise, data, len);
+
+ for (i = 0; i < settings->n_ciphers_pairwise; i++)
+ if (!nl80211_valid_cipher_suite(
+ settings->ciphers_pairwise[i]))
+ return -EINVAL;
+ }
+
+ if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) {
+ settings->cipher_group =
+ nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]);
+ if (!nl80211_valid_cipher_suite(settings->cipher_group))
+ return -EINVAL;
+ }
+
+ if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) {
+ settings->wpa_versions =
+ nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]);
+ if (!nl80211_valid_wpa_versions(settings->wpa_versions))
+ return -EINVAL;
+ }
+
+ if (info->attrs[NL80211_ATTR_AKM_SUITES]) {
+ void *data;
+ int len, i;
+
+ data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]);
+ len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]);
+ settings->n_akm_suites = len / sizeof(u32);
+
+ if (len % sizeof(u32))
+ return -EINVAL;
+
+ memcpy(settings->akm_suites, data, len);
+
+ for (i = 0; i < settings->n_ciphers_pairwise; i++)
+ if (!nl80211_valid_akm_suite(settings->akm_suites[i]))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
- struct cfg80211_assoc_request req;
- struct wiphy *wiphy;
- int err;
+ struct cfg80211_crypto_settings crypto;
+ struct ieee80211_channel *chan, *fixedchan;
+ const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL;
+ int err, ssid_len, ie_len = 0;
+ bool use_mfp = false;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
if (!info->attrs[NL80211_ATTR_MAC] ||
- !info->attrs[NL80211_ATTR_SSID])
+ !info->attrs[NL80211_ATTR_SSID] ||
+ !info->attrs[NL80211_ATTR_WIPHY_FREQ])
return -EINVAL;
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->assoc) {
+ if (!rdev->ops->assoc) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3095,46 +3446,54 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- wiphy = &drv->wiphy;
- memset(&req, 0, sizeof(req));
+ bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ chan = ieee80211_get_channel(&rdev->wiphy,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) {
+ err = -EINVAL;
+ goto out;
+ }
- if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
- req.chan = ieee80211_get_channel(
- wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!req.chan) {
- err = -EINVAL;
- goto out;
- }
+ mutex_lock(&rdev->devlist_mtx);
+ fixedchan = rdev_fixed_channel(rdev, NULL);
+ if (fixedchan && chan != fixedchan) {
+ err = -EBUSY;
+ mutex_unlock(&rdev->devlist_mtx);
+ goto out;
}
+ mutex_unlock(&rdev->devlist_mtx);
- req.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
- req.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+ ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
if (info->attrs[NL80211_ATTR_IE]) {
- req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
- req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
if (info->attrs[NL80211_ATTR_USE_MFP]) {
- enum nl80211_mfp use_mfp =
+ enum nl80211_mfp mfp =
nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
- if (use_mfp == NL80211_MFP_REQUIRED)
- req.use_mfp = true;
- else if (use_mfp != NL80211_MFP_NO) {
+ if (mfp == NL80211_MFP_REQUIRED)
+ use_mfp = true;
+ else if (mfp != NL80211_MFP_NO) {
err = -EINVAL;
goto out;
}
}
- req.control_port = info->attrs[NL80211_ATTR_CONTROL_PORT];
+ if (info->attrs[NL80211_ATTR_PREV_BSSID])
+ prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
- err = drv->ops->assoc(&drv->wiphy, dev, &req);
+ err = nl80211_crypto_settings(info, &crypto, 1);
+ if (!err)
+ err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid,
+ ssid, ssid_len, ie, ie_len, use_mfp,
+ &crypto);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -3143,11 +3502,11 @@ unlock_rtnl:
static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
- struct cfg80211_deauth_request req;
- struct wiphy *wiphy;
- int err;
+ const u8 *ie = NULL, *bssid;
+ int err, ie_len = 0;
+ u16 reason_code;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3160,11 +3519,11 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->deauth) {
+ if (!rdev->ops->deauth) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3179,27 +3538,24 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- wiphy = &drv->wiphy;
- memset(&req, 0, sizeof(req));
+ bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
-
- req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
- if (req.reason_code == 0) {
+ reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+ if (reason_code == 0) {
/* Reason Code 0 is reserved */
err = -EINVAL;
goto out;
}
if (info->attrs[NL80211_ATTR_IE]) {
- req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
- req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- err = drv->ops->deauth(&drv->wiphy, dev, &req);
+ err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -3208,11 +3564,11 @@ unlock_rtnl:
static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
- struct cfg80211_disassoc_request req;
- struct wiphy *wiphy;
- int err;
+ const u8 *ie = NULL, *bssid;
+ int err, ie_len = 0;
+ u16 reason_code;
if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
return -EINVAL;
@@ -3225,11 +3581,11 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->disassoc) {
+ if (!rdev->ops->disassoc) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3244,27 +3600,24 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- wiphy = &drv->wiphy;
- memset(&req, 0, sizeof(req));
+ bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- req.peer_addr = nla_data(info->attrs[NL80211_ATTR_MAC]);
-
- req.reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
- if (req.reason_code == 0) {
+ reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+ if (reason_code == 0) {
/* Reason Code 0 is reserved */
err = -EINVAL;
goto out;
}
if (info->attrs[NL80211_ATTR_IE]) {
- req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
- req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
}
- err = drv->ops->disassoc(&drv->wiphy, dev, &req);
+ err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
rtnl_unlock();
@@ -3273,10 +3626,11 @@ unlock_rtnl:
static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
struct cfg80211_ibss_params ibss;
struct wiphy *wiphy;
+ struct cfg80211_cached_keys *connkeys = NULL;
int err;
memset(&ibss, 0, sizeof(ibss));
@@ -3300,11 +3654,11 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->join_ibss) {
+ if (!rdev->ops->join_ibss) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3319,7 +3673,7 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- wiphy = &drv->wiphy;
+ wiphy = &rdev->wiphy;
if (info->attrs[NL80211_ATTR_MAC])
ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
@@ -3341,30 +3695,43 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
}
ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED];
+ ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY];
+
+ if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) {
+ connkeys = nl80211_parse_connkeys(rdev,
+ info->attrs[NL80211_ATTR_KEYS]);
+ if (IS_ERR(connkeys)) {
+ err = PTR_ERR(connkeys);
+ connkeys = NULL;
+ goto out;
+ }
+ }
- err = cfg80211_join_ibss(drv, dev, &ibss);
+ err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
+ if (err)
+ kfree(connkeys);
rtnl_unlock();
return err;
}
static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct net_device *dev;
int err;
rtnl_lock();
- err = get_drv_dev_by_info_ifindex(info->attrs, &drv, &dev);
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
if (err)
goto unlock_rtnl;
- if (!drv->ops->leave_ibss) {
+ if (!rdev->ops->leave_ibss) {
err = -EOPNOTSUPP;
goto out;
}
@@ -3379,12 +3746,309 @@ static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- err = cfg80211_leave_ibss(drv, dev, false);
+ err = cfg80211_leave_ibss(rdev, dev, false);
+
+out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+#ifdef CONFIG_NL80211_TESTMODE
+static struct genl_multicast_group nl80211_testmode_mcgrp = {
+ .name = "testmode",
+};
+
+static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ int err;
+
+ if (!info->attrs[NL80211_ATTR_TESTDATA])
+ return -EINVAL;
+
+ rtnl_lock();
+
+ rdev = cfg80211_get_dev_from_info(info);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ goto unlock_rtnl;
+ }
+
+ err = -EOPNOTSUPP;
+ if (rdev->ops->testmode_cmd) {
+ rdev->testmode_info = info;
+ err = rdev->ops->testmode_cmd(&rdev->wiphy,
+ nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
+ nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
+ rdev->testmode_info = NULL;
+ }
+
+ cfg80211_unlock_rdev(rdev);
+
+ unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static struct sk_buff *
+__cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
+ int approxlen, u32 pid, u32 seq, gfp_t gfp)
+{
+ struct sk_buff *skb;
+ void *hdr;
+ struct nlattr *data;
+
+ skb = nlmsg_new(approxlen + 100, gfp);
+ if (!skb)
+ return NULL;
+
+ hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE);
+ if (!hdr) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
+
+ ((void **)skb->cb)[0] = rdev;
+ ((void **)skb->cb)[1] = hdr;
+ ((void **)skb->cb)[2] = data;
+
+ return skb;
+
+ nla_put_failure:
+ kfree_skb(skb);
+ return NULL;
+}
+
+struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy,
+ int approxlen)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ if (WARN_ON(!rdev->testmode_info))
+ return NULL;
+
+ return __cfg80211_testmode_alloc_skb(rdev, approxlen,
+ rdev->testmode_info->snd_pid,
+ rdev->testmode_info->snd_seq,
+ GFP_KERNEL);
+}
+EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb);
+
+int cfg80211_testmode_reply(struct sk_buff *skb)
+{
+ struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
+ void *hdr = ((void **)skb->cb)[1];
+ struct nlattr *data = ((void **)skb->cb)[2];
+
+ if (WARN_ON(!rdev->testmode_info)) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ nla_nest_end(skb, data);
+ genlmsg_end(skb, hdr);
+ return genlmsg_reply(skb, rdev->testmode_info);
+}
+EXPORT_SYMBOL(cfg80211_testmode_reply);
+
+struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy,
+ int approxlen, gfp_t gfp)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp);
+}
+EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
+
+void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
+{
+ void *hdr = ((void **)skb->cb)[1];
+ struct nlattr *data = ((void **)skb->cb)[2];
+
+ nla_nest_end(skb, data);
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
+}
+EXPORT_SYMBOL(cfg80211_testmode_event);
+#endif
+
+static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ struct cfg80211_connect_params connect;
+ struct wiphy *wiphy;
+ struct cfg80211_cached_keys *connkeys = NULL;
+ int err;
+
+ memset(&connect, 0, sizeof(connect));
+
+ if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
+ return -EINVAL;
+
+ if (!info->attrs[NL80211_ATTR_SSID] ||
+ !nla_len(info->attrs[NL80211_ATTR_SSID]))
+ return -EINVAL;
+
+ if (info->attrs[NL80211_ATTR_AUTH_TYPE]) {
+ connect.auth_type =
+ nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
+ if (!nl80211_valid_auth_type(connect.auth_type))
+ return -EINVAL;
+ } else
+ connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+
+ connect.privacy = info->attrs[NL80211_ATTR_PRIVACY];
+
+ err = nl80211_crypto_settings(info, &connect.crypto,
+ NL80211_MAX_NR_CIPHER_SUITES);
+ if (err)
+ return err;
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ wiphy = &rdev->wiphy;
+
+ if (info->attrs[NL80211_ATTR_MAC])
+ connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
+ connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
+
+ if (info->attrs[NL80211_ATTR_IE]) {
+ connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
+ connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
+ }
+
+ if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
+ connect.channel =
+ ieee80211_get_channel(wiphy,
+ nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
+ if (!connect.channel ||
+ connect.channel->flags & IEEE80211_CHAN_DISABLED) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
+ connkeys = nl80211_parse_connkeys(rdev,
+ info->attrs[NL80211_ATTR_KEYS]);
+ if (IS_ERR(connkeys)) {
+ err = PTR_ERR(connkeys);
+ connkeys = NULL;
+ goto out;
+ }
+ }
+
+ err = cfg80211_connect(rdev, dev, &connect, connkeys);
out:
- cfg80211_put_dev(drv);
+ cfg80211_unlock_rdev(rdev);
dev_put(dev);
unlock_rtnl:
+ if (err)
+ kfree(connkeys);
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net_device *dev;
+ int err;
+ u16 reason;
+
+ if (!info->attrs[NL80211_ATTR_REASON_CODE])
+ reason = WLAN_REASON_DEAUTH_LEAVING;
+ else
+ reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]);
+
+ if (reason == 0)
+ return -EINVAL;
+
+ rtnl_lock();
+
+ err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
+ if (err)
+ goto unlock_rtnl;
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if (!netif_running(dev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+
+ err = cfg80211_disconnect(rdev, dev, reason, true);
+
+out:
+ cfg80211_unlock_rdev(rdev);
+ dev_put(dev);
+unlock_rtnl:
+ rtnl_unlock();
+ return err;
+}
+
+static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev;
+ struct net *net;
+ int err;
+ u32 pid;
+
+ if (!info->attrs[NL80211_ATTR_PID])
+ return -EINVAL;
+
+ pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]);
+
+ rtnl_lock();
+
+ rdev = cfg80211_get_dev_from_info(info);
+ if (IS_ERR(rdev)) {
+ err = PTR_ERR(rdev);
+ goto out;
+ }
+
+ net = get_net_ns_by_pid(pid);
+ if (IS_ERR(net)) {
+ err = PTR_ERR(net);
+ goto out;
+ }
+
+ err = 0;
+
+ /* check if anything to do */
+ if (net_eq(wiphy_net(&rdev->wiphy), net))
+ goto out_put_net;
+
+ err = cfg80211_switch_netns(rdev, net);
+ out_put_net:
+ put_net(net);
+ out:
+ cfg80211_unlock_rdev(rdev);
rtnl_unlock();
return err;
}
@@ -3602,6 +4266,32 @@ static struct genl_ops nl80211_ops[] = {
.policy = nl80211_policy,
.flags = GENL_ADMIN_PERM,
},
+#ifdef CONFIG_NL80211_TESTMODE
+ {
+ .cmd = NL80211_CMD_TESTMODE,
+ .doit = nl80211_testmode_do,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+#endif
+ {
+ .cmd = NL80211_CMD_CONNECT,
+ .doit = nl80211_connect,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_DISCONNECT,
+ .doit = nl80211_disconnect,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = NL80211_CMD_SET_WIPHY_NETNS,
+ .doit = nl80211_wiphy_netns,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
.name = "mlme",
@@ -3633,7 +4323,8 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
return;
}
- genlmsg_multicast(msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_config_mcgrp.id, GFP_KERNEL);
}
static int nl80211_add_scan_req(struct sk_buff *msg,
@@ -3643,6 +4334,8 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
struct nlattr *nest;
int i;
+ ASSERT_RDEV_LOCK(rdev);
+
if (WARN_ON(!req))
return 0;
@@ -3668,11 +4361,11 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
return -ENOBUFS;
}
-static int nl80211_send_scan_donemsg(struct sk_buff *msg,
- struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- u32 pid, u32 seq, int flags,
- u32 cmd)
+static int nl80211_send_scan_msg(struct sk_buff *msg,
+ struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ u32 pid, u32 seq, int flags,
+ u32 cmd)
{
void *hdr;
@@ -3693,6 +4386,25 @@ static int nl80211_send_scan_donemsg(struct sk_buff *msg,
return -EMSGSIZE;
}
+void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev)
+{
+ struct sk_buff *msg;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ NL80211_CMD_TRIGGER_SCAN) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_scan_mcgrp.id, GFP_KERNEL);
+}
+
void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
struct net_device *netdev)
{
@@ -3702,13 +4414,14 @@ void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_donemsg(msg, rdev, netdev, 0, 0, 0,
- NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
+ if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
nlmsg_free(msg);
return;
}
- genlmsg_multicast(msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_scan_mcgrp.id, GFP_KERNEL);
}
void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
@@ -3720,13 +4433,14 @@ void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_donemsg(msg, rdev, netdev, 0, 0, 0,
- NL80211_CMD_SCAN_ABORTED) < 0) {
+ if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0,
+ NL80211_CMD_SCAN_ABORTED) < 0) {
nlmsg_free(msg);
return;
}
- genlmsg_multicast(msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_scan_mcgrp.id, GFP_KERNEL);
}
/*
@@ -3775,7 +4489,10 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
return;
}
- genlmsg_multicast(msg, 0, nl80211_regulatory_mcgrp.id, GFP_KERNEL);
+ rcu_read_lock();
+ genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
+ GFP_ATOMIC);
+ rcu_read_unlock();
return;
@@ -3787,12 +4504,12 @@ nla_put_failure:
static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
const u8 *buf, size_t len,
- enum nl80211_commands cmd)
+ enum nl80211_commands cmd, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -3811,7 +4528,8 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
return;
}
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
@@ -3820,42 +4538,45 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
}
void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *buf, size_t len)
+ struct net_device *netdev, const u8 *buf,
+ size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_AUTHENTICATE);
+ NL80211_CMD_AUTHENTICATE, gfp);
}
void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
- size_t len)
+ size_t len, gfp_t gfp)
{
- nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE);
+ nl80211_send_mlme_event(rdev, netdev, buf, len,
+ NL80211_CMD_ASSOCIATE, gfp);
}
void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *buf, size_t len)
+ struct net_device *netdev, const u8 *buf,
+ size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DEAUTHENTICATE);
+ NL80211_CMD_DEAUTHENTICATE, gfp);
}
void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *buf,
- size_t len)
+ size_t len, gfp_t gfp)
{
nl80211_send_mlme_event(rdev, netdev, buf, len,
- NL80211_CMD_DISASSOCIATE);
+ NL80211_CMD_DISASSOCIATE, gfp);
}
static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
struct net_device *netdev, int cmd,
- const u8 *addr)
+ const u8 *addr, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -3875,7 +4596,8 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
return;
}
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
@@ -3884,16 +4606,145 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
}
void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *addr)
+ struct net_device *netdev, const u8 *addr,
+ gfp_t gfp)
{
nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE,
- addr);
+ addr, gfp);
}
void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
- struct net_device *netdev, const u8 *addr)
+ struct net_device *netdev, const u8 *addr,
+ gfp_t gfp)
{
- nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr);
+ nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE,
+ addr, gfp);
+}
+
+void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ if (bssid)
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+ NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status);
+ if (req_ie)
+ NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
+ if (resp_ie)
+ NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+
+}
+
+void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid);
+ if (req_ie)
+ NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie);
+ if (resp_ie)
+ NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+
+}
+
+void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u16 reason,
+ const u8 *ie, size_t ie_len, bool from_ap)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT);
+ if (!hdr) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
+ NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
+ if (from_ap && reason)
+ NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason);
+ if (from_ap)
+ NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP);
+ if (ie)
+ NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie);
+
+ if (genlmsg_end(msg, hdr) < 0) {
+ nlmsg_free(msg);
+ return;
+ }
+
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, GFP_KERNEL);
+ return;
+
+ nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ nlmsg_free(msg);
+
}
void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
@@ -3922,7 +4773,8 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
return;
}
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
@@ -3933,12 +4785,12 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *addr,
enum nl80211_key_type key_type, int key_id,
- const u8 *tsc)
+ const u8 *tsc, gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
if (!msg)
return;
@@ -3962,7 +4814,8 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
return;
}
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
@@ -4015,7 +4868,10 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
return;
}
- genlmsg_multicast(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC);
+ rcu_read_lock();
+ genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id,
+ GFP_ATOMIC);
+ rcu_read_unlock();
return;
@@ -4051,6 +4907,12 @@ int nl80211_init(void)
if (err)
goto err_out;
+#ifdef CONFIG_NL80211_TESTMODE
+ err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp);
+ if (err)
+ goto err_out;
+#endif
+
return 0;
err_out:
genl_unregister_family(&nl80211_fam);
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 5c12ad13499..44cc2a76a1b 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -3,39 +3,54 @@
#include "core.h"
-extern int nl80211_init(void);
-extern void nl80211_exit(void);
-extern void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
-extern void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
-extern void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
-extern void nl80211_send_reg_change_event(struct regulatory_request *request);
-extern void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- const u8 *buf, size_t len);
-extern void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- const u8 *buf, size_t len);
-extern void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
+int nl80211_init(void);
+void nl80211_exit(void);
+void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
+void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev);
+void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev);
+void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev);
+void nl80211_send_reg_change_event(struct regulatory_request *request);
+void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *buf, size_t len, gfp_t gfp);
+void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev,
+ const u8 *addr, gfp_t gfp);
+void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
struct net_device *netdev,
- const u8 *buf, size_t len);
-extern void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- const u8 *buf, size_t len);
-extern void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- const u8 *addr);
-extern void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev,
- struct net_device *netdev,
- const u8 *addr);
-extern void
+ const u8 *addr, gfp_t gfp);
+void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, gfp_t gfp);
+void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
+ struct net_device *netdev, u16 reason,
+ const u8 *ie, size_t ie_len, bool from_ap);
+
+void
nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *addr,
enum nl80211_key_type key_type,
- int key_id, const u8 *tsc);
+ int key_id, const u8 *tsc, gfp_t gfp);
-extern void
+void
nl80211_send_beacon_hint_event(struct wiphy *wiphy,
struct ieee80211_channel *channel_before,
struct ieee80211_channel *channel_after);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 75a406d3361..f256dfffbf4 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -62,6 +62,16 @@ const struct ieee80211_regdomain *cfg80211_regdomain;
*/
static const struct ieee80211_regdomain *country_ie_regdomain;
+/*
+ * Protects static reg.c components:
+ * - cfg80211_world_regdom
+ * - cfg80211_regdom
+ * - country_ie_regdomain
+ * - last_request
+ */
+DEFINE_MUTEX(reg_mutex);
+#define assert_reg_lock() WARN_ON(!mutex_is_locked(&reg_mutex))
+
/* Used to queue up regulatory hints */
static LIST_HEAD(reg_requests_list);
static spinlock_t reg_requests_lock;
@@ -113,11 +123,7 @@ static const struct ieee80211_regdomain world_regdom = {
static const struct ieee80211_regdomain *cfg80211_world_regdom =
&world_regdom;
-#ifdef CONFIG_WIRELESS_OLD_REGULATORY
-static char *ieee80211_regdom = "US";
-#else
static char *ieee80211_regdom = "00";
-#endif
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
@@ -1012,7 +1018,6 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = chan->orig_mag =
(int) MBI_TO_DBI(power_rule->max_antenna_gain);
- chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
chan->max_power = chan->orig_mpwr =
(int) MBM_TO_DBM(power_rule->max_eirp);
return;
@@ -1021,7 +1026,6 @@ static void handle_channel(struct wiphy *wiphy, enum ieee80211_band band,
chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
chan->max_antenna_gain = min(chan->orig_mag,
(int) MBI_TO_DBI(power_rule->max_antenna_gain));
- chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
if (chan->orig_mpwr)
chan->max_power = min(chan->orig_mpwr,
(int) MBM_TO_DBM(power_rule->max_eirp));
@@ -1061,10 +1065,10 @@ static bool ignore_reg_update(struct wiphy *wiphy,
static void update_all_wiphy_regulatory(enum nl80211_reg_initiator initiator)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
- list_for_each_entry(drv, &cfg80211_drv_list, list)
- wiphy_update_regulatory(&drv->wiphy, initiator);
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+ wiphy_update_regulatory(&rdev->wiphy, initiator);
}
static void handle_reg_beacon(struct wiphy *wiphy,
@@ -1298,7 +1302,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
- assert_cfg80211_lock();
+ assert_reg_lock();
sband = wiphy->bands[band];
BUG_ON(chan_idx >= sband->n_channels);
@@ -1323,7 +1327,6 @@ static void handle_channel_custom(struct wiphy *wiphy,
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
chan->max_antenna_gain = (int) MBI_TO_DBI(power_rule->max_antenna_gain);
- chan->max_bandwidth = KHZ_TO_MHZ(desired_bw_khz);
chan->max_power = (int) MBM_TO_DBM(power_rule->max_eirp);
}
@@ -1347,14 +1350,14 @@ void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
enum ieee80211_band band;
unsigned int bands_set = 0;
- mutex_lock(&cfg80211_mutex);
+ mutex_lock(&reg_mutex);
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!wiphy->bands[band])
continue;
handle_band_custom(wiphy, band, regd);
bands_set++;
}
- mutex_unlock(&cfg80211_mutex);
+ mutex_unlock(&reg_mutex);
/*
* no point in calling this if it won't have any effect
@@ -1421,7 +1424,7 @@ static int ignore_request(struct wiphy *wiphy,
if (last_wiphy != wiphy) {
/*
* Two cards with two APs claiming different
- * different Country IE alpha2s. We could
+ * Country IE alpha2s. We could
* intersect them, but that seems unlikely
* to be correct. Reject second one for now.
*/
@@ -1500,7 +1503,7 @@ static int ignore_request(struct wiphy *wiphy,
* Returns zero if all went fine, %-EALREADY if a regulatory domain had
* already been set or other standard error codes.
*
- * Caller must hold &cfg80211_mutex
+ * Caller must hold &cfg80211_mutex and &reg_mutex
*/
static int __regulatory_hint(struct wiphy *wiphy,
struct regulatory_request *pending_request)
@@ -1575,6 +1578,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
BUG_ON(!reg_request->alpha2);
mutex_lock(&cfg80211_mutex);
+ mutex_lock(&reg_mutex);
if (wiphy_idx_valid(reg_request->wiphy_idx))
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
@@ -1590,6 +1594,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
if (r == -EALREADY && wiphy && wiphy->strict_regulatory)
wiphy_update_regulatory(wiphy, reg_request->initiator);
out:
+ mutex_unlock(&reg_mutex);
mutex_unlock(&cfg80211_mutex);
}
@@ -1615,9 +1620,13 @@ static void reg_process_pending_hints(void)
/* Processes beacon hints -- this has nothing to do with country IEs */
static void reg_process_pending_beacon_hints(void)
{
- struct cfg80211_registered_device *drv;
+ struct cfg80211_registered_device *rdev;
struct reg_beacon *pending_beacon, *tmp;
+ /*
+ * No need to hold the reg_mutex here as we just touch wiphys
+ * and do not read or access regulatory variables.
+ */
mutex_lock(&cfg80211_mutex);
/* This goes through the _pending_ beacon list */
@@ -1634,8 +1643,8 @@ static void reg_process_pending_beacon_hints(void)
list_del_init(&pending_beacon->list);
/* Applies the beacon hint to current wiphys */
- list_for_each_entry(drv, &cfg80211_drv_list, list)
- wiphy_update_new_beacon(&drv->wiphy, pending_beacon);
+ list_for_each_entry(rdev, &cfg80211_rdev_list, list)
+ wiphy_update_new_beacon(&rdev->wiphy, pending_beacon);
/* Remembers the beacon hint for new wiphys or reg changes */
list_add_tail(&pending_beacon->list, &reg_beacon_list);
@@ -1739,12 +1748,13 @@ int regulatory_hint(struct wiphy *wiphy, const char *alpha2)
}
EXPORT_SYMBOL(regulatory_hint);
+/* Caller must hold reg_mutex */
static bool reg_same_country_ie_hint(struct wiphy *wiphy,
u32 country_ie_checksum)
{
struct wiphy *request_wiphy;
- assert_cfg80211_lock();
+ assert_reg_lock();
if (unlikely(last_request->initiator !=
NL80211_REGDOM_SET_BY_COUNTRY_IE))
@@ -1767,6 +1777,10 @@ static bool reg_same_country_ie_hint(struct wiphy *wiphy,
return false;
}
+/*
+ * We hold wdev_lock() here so we cannot hold cfg80211_mutex() and
+ * therefore cannot iterate over the rdev list here.
+ */
void regulatory_hint_11d(struct wiphy *wiphy,
u8 *country_ie,
u8 country_ie_len)
@@ -1777,12 +1791,10 @@ void regulatory_hint_11d(struct wiphy *wiphy,
enum environment_cap env = ENVIRON_ANY;
struct regulatory_request *request;
- mutex_lock(&cfg80211_mutex);
+ mutex_lock(&reg_mutex);
- if (unlikely(!last_request)) {
- mutex_unlock(&cfg80211_mutex);
- return;
- }
+ if (unlikely(!last_request))
+ goto out;
/* IE len must be evenly divisible by 2 */
if (country_ie_len & 0x01)
@@ -1808,54 +1820,14 @@ void regulatory_hint_11d(struct wiphy *wiphy,
env = ENVIRON_OUTDOOR;
/*
- * We will run this for *every* beacon processed for the BSSID, so
- * we optimize an early check to exit out early if we don't have to
- * do anything
+ * We will run this only upon a successful connection on cfg80211.
+ * We leave conflict resolution to the workqueue, where can hold
+ * cfg80211_mutex.
*/
if (likely(last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- wiphy_idx_valid(last_request->wiphy_idx))) {
- struct cfg80211_registered_device *drv_last_ie;
-
- drv_last_ie =
- cfg80211_drv_by_wiphy_idx(last_request->wiphy_idx);
-
- /*
- * Lets keep this simple -- we trust the first AP
- * after we intersect with CRDA
- */
- if (likely(&drv_last_ie->wiphy == wiphy)) {
- /*
- * Ignore IEs coming in on this wiphy with
- * the same alpha2 and environment cap
- */
- if (likely(alpha2_equal(drv_last_ie->country_ie_alpha2,
- alpha2) &&
- env == drv_last_ie->env)) {
- goto out;
- }
- /*
- * the wiphy moved on to another BSSID or the AP
- * was reconfigured. XXX: We need to deal with the
- * case where the user suspends and goes to goes
- * to another country, and then gets IEs from an
- * AP with different settings
- */
- goto out;
- } else {
- /*
- * Ignore IEs coming in on two separate wiphys with
- * the same alpha2 and environment cap
- */
- if (likely(alpha2_equal(drv_last_ie->country_ie_alpha2,
- alpha2) &&
- env == drv_last_ie->env)) {
- goto out;
- }
- /* We could potentially intersect though */
- goto out;
- }
- }
+ wiphy_idx_valid(last_request->wiphy_idx)))
+ goto out;
rd = country_ie_2_rd(country_ie, country_ie_len, &checksum);
if (!rd)
@@ -1890,7 +1862,7 @@ void regulatory_hint_11d(struct wiphy *wiphy,
request->country_ie_checksum = checksum;
request->country_ie_env = env;
- mutex_unlock(&cfg80211_mutex);
+ mutex_unlock(&reg_mutex);
queue_regulatory_request(request);
@@ -1899,9 +1871,8 @@ void regulatory_hint_11d(struct wiphy *wiphy,
free_rd_out:
kfree(rd);
out:
- mutex_unlock(&cfg80211_mutex);
+ mutex_unlock(&reg_mutex);
}
-EXPORT_SYMBOL(regulatory_hint_11d);
static bool freq_is_chan_12_13_14(u16 freq)
{
@@ -1996,14 +1967,14 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
if (last_request->initiator ==
NL80211_REGDOM_SET_BY_COUNTRY_IE) {
- struct cfg80211_registered_device *drv;
- drv = cfg80211_drv_by_wiphy_idx(
+ struct cfg80211_registered_device *rdev;
+ rdev = cfg80211_rdev_by_wiphy_idx(
last_request->wiphy_idx);
- if (drv) {
+ if (rdev) {
printk(KERN_INFO "cfg80211: Current regulatory "
"domain updated by AP to: %c%c\n",
- drv->country_ie_alpha2[0],
- drv->country_ie_alpha2[1]);
+ rdev->country_ie_alpha2[0],
+ rdev->country_ie_alpha2[1]);
} else
printk(KERN_INFO "cfg80211: Current regulatory "
"domain intersected: \n");
@@ -2064,7 +2035,7 @@ static inline void reg_country_ie_process_debug(
static int __set_regdom(const struct ieee80211_regdomain *rd)
{
const struct ieee80211_regdomain *intersected_rd = NULL;
- struct cfg80211_registered_device *drv = NULL;
+ struct cfg80211_registered_device *rdev = NULL;
struct wiphy *request_wiphy;
/* Some basic sanity checks first */
@@ -2203,11 +2174,11 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
if (!intersected_rd)
return -EINVAL;
- drv = wiphy_to_dev(request_wiphy);
+ rdev = wiphy_to_dev(request_wiphy);
- drv->country_ie_alpha2[0] = rd->alpha2[0];
- drv->country_ie_alpha2[1] = rd->alpha2[1];
- drv->env = last_request->country_ie_env;
+ rdev->country_ie_alpha2[0] = rd->alpha2[0];
+ rdev->country_ie_alpha2[1] = rd->alpha2[1];
+ rdev->env = last_request->country_ie_env;
BUG_ON(intersected_rd == rd);
@@ -2232,10 +2203,13 @@ int set_regdom(const struct ieee80211_regdomain *rd)
assert_cfg80211_lock();
+ mutex_lock(&reg_mutex);
+
/* Note that this doesn't update the wiphys, this is done below */
r = __set_regdom(rd);
if (r) {
kfree(rd);
+ mutex_unlock(&reg_mutex);
return r;
}
@@ -2250,6 +2224,8 @@ int set_regdom(const struct ieee80211_regdomain *rd)
nl80211_send_reg_change_event(last_request);
+ mutex_unlock(&reg_mutex);
+
return r;
}
@@ -2260,16 +2236,20 @@ void reg_device_remove(struct wiphy *wiphy)
assert_cfg80211_lock();
+ mutex_lock(&reg_mutex);
+
kfree(wiphy->regd);
if (last_request)
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (!request_wiphy || request_wiphy != wiphy)
- return;
+ goto out;
last_request->wiphy_idx = WIPHY_IDX_STALE;
last_request->country_ie_env = ENVIRON_ANY;
+out:
+ mutex_unlock(&reg_mutex);
}
int regulatory_init(void)
@@ -2288,22 +2268,12 @@ int regulatory_init(void)
printk(KERN_INFO "cfg80211: Using static regulatory domain info\n");
print_regdomain_info(cfg80211_regdomain);
- /*
- * The old code still requests for a new regdomain and if
- * you have CRDA you get it updated, otherwise you get
- * stuck with the static values. Since "EU" is not a valid
- * ISO / IEC 3166 alpha2 code we can't expect userpace to
- * give us a regulatory domain for it. We need last_request
- * iniitalized though so lets just send a request which we
- * know will be ignored... this crap will be removed once
- * OLD_REG dies.
- */
- err = regulatory_hint_core(ieee80211_regdom);
#else
cfg80211_regdomain = cfg80211_world_regdom;
- err = regulatory_hint_core(ieee80211_regdom);
#endif
+ /* We always try to get an update for the static regdomain */
+ err = regulatory_hint_core(cfg80211_regdomain->alpha2);
if (err) {
if (err == -ENOMEM)
return err;
@@ -2322,6 +2292,13 @@ int regulatory_init(void)
#endif
}
+ /*
+ * Finally, if the user set the module parameter treat it
+ * as a user hint.
+ */
+ if (!is_world_regdom(ieee80211_regdom))
+ regulatory_hint_user(ieee80211_regdom);
+
return 0;
}
@@ -2333,6 +2310,7 @@ void regulatory_exit(void)
cancel_work_sync(&reg_work);
mutex_lock(&cfg80211_mutex);
+ mutex_lock(&reg_mutex);
reset_regdomains();
@@ -2371,5 +2349,6 @@ void regulatory_exit(void)
}
spin_unlock(&reg_requests_lock);
+ mutex_unlock(&reg_mutex);
mutex_unlock(&cfg80211_mutex);
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 4e167a8e11b..3362c7c069b 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -37,4 +37,19 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
struct ieee80211_channel *beacon_chan,
gfp_t gfp);
+/**
+ * regulatory_hint_11d - hints a country IE as a regulatory domain
+ * @wiphy: the wireless device giving the hint (used only for reporting
+ * conflicts)
+ * @country_ie: pointer to the country IE
+ * @country_ie_len: length of the country IE
+ *
+ * We will intersect the rd with the what CRDA tells us should apply
+ * for the alpha2 this country IE belongs to, this prevents APs from
+ * sending us incorrect or outdated information against a country.
+ */
+void regulatory_hint_11d(struct wiphy *wiphy,
+ u8 *country_ie,
+ u8 country_ie_len);
+
#endif /* __NET_WIRELESS_REG_H */
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 7e595ce24ee..e5f92ee758f 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -14,29 +14,41 @@
#include <net/iw_handler.h>
#include "core.h"
#include "nl80211.h"
+#include "wext-compat.h"
-#define IEEE80211_SCAN_RESULT_EXPIRE (10 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ)
-void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
{
+ struct cfg80211_scan_request *request;
struct net_device *dev;
#ifdef CONFIG_WIRELESS_EXT
union iwreq_data wrqu;
#endif
- dev = dev_get_by_index(&init_net, request->ifidx);
- if (!dev)
- goto out;
+ ASSERT_RDEV_LOCK(rdev);
- WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
+ request = rdev->scan_req;
+
+ if (!request)
+ return;
+
+ dev = request->dev;
- if (aborted)
- nl80211_send_scan_aborted(wiphy_to_dev(request->wiphy), dev);
+ /*
+ * This must be before sending the other events!
+ * Otherwise, wpa_supplicant gets completely confused with
+ * wext events.
+ */
+ cfg80211_sme_scan_done(dev);
+
+ if (request->aborted)
+ nl80211_send_scan_aborted(rdev, dev);
else
- nl80211_send_scan_done(wiphy_to_dev(request->wiphy), dev);
+ nl80211_send_scan_done(rdev, dev);
#ifdef CONFIG_WIRELESS_EXT
- if (!aborted) {
+ if (!request->aborted) {
memset(&wrqu, 0, sizeof(wrqu));
wireless_send_event(dev, SIOCGIWSCAN, &wrqu, NULL);
@@ -45,9 +57,38 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
dev_put(dev);
- out:
- wiphy_to_dev(request->wiphy)->scan_req = NULL;
- kfree(request);
+ rdev->scan_req = NULL;
+
+ /*
+ * OK. If this is invoked with "leak" then we can't
+ * free this ... but we've cleaned it up anyway. The
+ * driver failed to call the scan_done callback, so
+ * all bets are off, it might still be trying to use
+ * the scan request or not ... if it accesses the dev
+ * in there (it shouldn't anyway) then it may crash.
+ */
+ if (!leak)
+ kfree(request);
+}
+
+void __cfg80211_scan_done(struct work_struct *wk)
+{
+ struct cfg80211_registered_device *rdev;
+
+ rdev = container_of(wk, struct cfg80211_registered_device,
+ scan_done_wk);
+
+ cfg80211_lock_rdev(rdev);
+ ___cfg80211_scan_done(rdev, false);
+ cfg80211_unlock_rdev(rdev);
+}
+
+void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
+{
+ WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
+
+ request->aborted = aborted;
+ schedule_work(&wiphy_to_dev(request->wiphy)->scan_done_wk);
}
EXPORT_SYMBOL(cfg80211_scan_done);
@@ -62,6 +103,8 @@ static void bss_release(struct kref *ref)
if (bss->ies_allocated)
kfree(bss->pub.information_elements);
+ BUG_ON(atomic_read(&bss->hold));
+
kfree(bss);
}
@@ -84,8 +127,9 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
bool expired = false;
list_for_each_entry_safe(bss, tmp, &dev->bss_list, list) {
- if (bss->hold ||
- !time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
+ if (atomic_read(&bss->hold))
+ continue;
+ if (!time_after(jiffies, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE))
continue;
list_del(&bss->list);
rb_erase(&bss->rbn, &dev->bss_tree);
@@ -97,7 +141,7 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *dev)
dev->bss_generation++;
}
-static u8 *find_ie(u8 num, u8 *ies, size_t len)
+static u8 *find_ie(u8 num, u8 *ies, int len)
{
while (len > 2 && ies[0] != num) {
len -= ies[1] + 2;
@@ -539,6 +583,7 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
spin_lock_bh(&dev->bss_lock);
list_del(&bss->list);
+ dev->bss_generation++;
rb_erase(&bss->rbn, &dev->bss_tree);
spin_unlock_bh(&dev->bss_lock);
@@ -547,30 +592,6 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
}
EXPORT_SYMBOL(cfg80211_unlink_bss);
-void cfg80211_hold_bss(struct cfg80211_bss *pub)
-{
- struct cfg80211_internal_bss *bss;
-
- if (!pub)
- return;
-
- bss = container_of(pub, struct cfg80211_internal_bss, pub);
- bss->hold = true;
-}
-EXPORT_SYMBOL(cfg80211_hold_bss);
-
-void cfg80211_unhold_bss(struct cfg80211_bss *pub)
-{
- struct cfg80211_internal_bss *bss;
-
- if (!pub)
- return;
-
- bss = container_of(pub, struct cfg80211_internal_bss, pub);
- bss->hold = false;
-}
-EXPORT_SYMBOL(cfg80211_unhold_bss);
-
#ifdef CONFIG_WIRELESS_EXT
int cfg80211_wext_siwscan(struct net_device *dev,
struct iw_request_info *info,
@@ -586,7 +607,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (!netif_running(dev))
return -ENETDOWN;
- rdev = cfg80211_get_dev_from_ifindex(dev->ifindex);
+ if (wrqu->data.length == sizeof(struct iw_scan_req))
+ wreq = (struct iw_scan_req *)extra;
+
+ rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -598,9 +622,14 @@ int cfg80211_wext_siwscan(struct net_device *dev,
wiphy = &rdev->wiphy;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++)
- if (wiphy->bands[band])
- n_channels += wiphy->bands[band]->n_channels;
+ /* Determine number of channels, needed to allocate creq */
+ if (wreq && wreq->num_channels)
+ n_channels = wreq->num_channels;
+ else {
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++)
+ if (wiphy->bands[band])
+ n_channels += wiphy->bands[band]->n_channels;
+ }
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
@@ -611,28 +640,52 @@ int cfg80211_wext_siwscan(struct net_device *dev,
}
creq->wiphy = wiphy;
- creq->ifidx = dev->ifindex;
- creq->ssids = (void *)(creq + 1);
- creq->channels = (void *)(creq->ssids + 1);
+ creq->dev = dev;
+ /* SSIDs come after channels */
+ creq->ssids = (void *)&creq->channels[n_channels];
creq->n_channels = n_channels;
creq->n_ssids = 1;
- /* all channels */
+ /* translate "Scan on frequencies" request */
i = 0;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
int j;
if (!wiphy->bands[band])
continue;
for (j = 0; j < wiphy->bands[band]->n_channels; j++) {
+
+ /* If we have a wireless request structure and the
+ * wireless request specifies frequencies, then search
+ * for the matching hardware channel.
+ */
+ if (wreq && wreq->num_channels) {
+ int k;
+ int wiphy_freq = wiphy->bands[band]->channels[j].center_freq;
+ for (k = 0; k < wreq->num_channels; k++) {
+ int wext_freq = cfg80211_wext_freq(wiphy, &wreq->channel_list[k]);
+ if (wext_freq == wiphy_freq)
+ goto wext_freq_found;
+ }
+ goto wext_freq_not_found;
+ }
+
+ wext_freq_found:
creq->channels[i] = &wiphy->bands[band]->channels[j];
i++;
+ wext_freq_not_found: ;
}
}
+ /* No channels found? */
+ if (!i) {
+ err = -EINVAL;
+ goto out;
+ }
- /* translate scan request */
- if (wrqu->data.length == sizeof(struct iw_scan_req)) {
- wreq = (struct iw_scan_req *)extra;
+ /* Set real number of channels specified in creq->channels[] */
+ creq->n_channels = i;
+ /* translate "Scan for SSID" request */
+ if (wreq) {
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
return -EINVAL;
@@ -648,9 +701,12 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (err) {
rdev->scan_req = NULL;
kfree(creq);
+ } else {
+ nl80211_send_scan_start(rdev, dev);
+ dev_hold(dev);
}
out:
- cfg80211_put_dev(rdev);
+ cfg80211_unlock_rdev(rdev);
return err;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwscan);
@@ -941,7 +997,7 @@ int cfg80211_wext_giwscan(struct net_device *dev,
if (!netif_running(dev))
return -ENETDOWN;
- rdev = cfg80211_get_dev_from_ifindex(dev->ifindex);
+ rdev = cfg80211_get_dev_from_ifindex(dev_net(dev), dev->ifindex);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
@@ -959,7 +1015,7 @@ int cfg80211_wext_giwscan(struct net_device *dev,
}
out:
- cfg80211_put_dev(rdev);
+ cfg80211_unlock_rdev(rdev);
return res;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwscan);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
new file mode 100644
index 00000000000..7fae7eee65d
--- /dev/null
+++ b/net/wireless/sme.c
@@ -0,0 +1,938 @@
+/*
+ * SME code for cfg80211's connect emulation.
+ *
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/workqueue.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <net/cfg80211.h>
+#include <net/rtnetlink.h>
+#include "nl80211.h"
+#include "reg.h"
+
+struct cfg80211_conn {
+ struct cfg80211_connect_params params;
+ /* these are sub-states of the _CONNECTING sme_state */
+ enum {
+ CFG80211_CONN_IDLE,
+ CFG80211_CONN_SCANNING,
+ CFG80211_CONN_SCAN_AGAIN,
+ CFG80211_CONN_AUTHENTICATE_NEXT,
+ CFG80211_CONN_AUTHENTICATING,
+ CFG80211_CONN_ASSOCIATE_NEXT,
+ CFG80211_CONN_ASSOCIATING,
+ } state;
+ u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
+ u8 *ie;
+ size_t ie_len;
+ bool auto_auth, prev_bssid_valid;
+};
+
+
+static int cfg80211_conn_scan(struct wireless_dev *wdev)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_scan_request *request;
+ int n_channels, err;
+
+ ASSERT_RTNL();
+ ASSERT_RDEV_LOCK(rdev);
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (rdev->scan_req)
+ return -EBUSY;
+
+ if (wdev->conn->params.channel) {
+ n_channels = 1;
+ } else {
+ enum ieee80211_band band;
+ n_channels = 0;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wdev->wiphy->bands[band])
+ continue;
+ n_channels += wdev->wiphy->bands[band]->n_channels;
+ }
+ }
+ request = kzalloc(sizeof(*request) + sizeof(request->ssids[0]) +
+ sizeof(request->channels[0]) * n_channels,
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ if (wdev->conn->params.channel)
+ request->channels[0] = wdev->conn->params.channel;
+ else {
+ int i = 0, j;
+ enum ieee80211_band band;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!wdev->wiphy->bands[band])
+ continue;
+ for (j = 0; j < wdev->wiphy->bands[band]->n_channels;
+ i++, j++)
+ request->channels[i] =
+ &wdev->wiphy->bands[band]->channels[j];
+ }
+ }
+ request->n_channels = n_channels;
+ request->ssids = (void *)&request->channels[n_channels];
+ request->n_ssids = 1;
+
+ memcpy(request->ssids[0].ssid, wdev->conn->params.ssid,
+ wdev->conn->params.ssid_len);
+ request->ssids[0].ssid_len = wdev->conn->params.ssid_len;
+
+ request->dev = wdev->netdev;
+ request->wiphy = &rdev->wiphy;
+
+ rdev->scan_req = request;
+
+ err = rdev->ops->scan(wdev->wiphy, wdev->netdev, request);
+ if (!err) {
+ wdev->conn->state = CFG80211_CONN_SCANNING;
+ nl80211_send_scan_start(rdev, wdev->netdev);
+ dev_hold(wdev->netdev);
+ } else {
+ rdev->scan_req = NULL;
+ kfree(request);
+ }
+ return err;
+}
+
+static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_connect_params *params;
+ const u8 *prev_bssid = NULL;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!wdev->conn)
+ return 0;
+
+ params = &wdev->conn->params;
+
+ switch (wdev->conn->state) {
+ case CFG80211_CONN_SCAN_AGAIN:
+ return cfg80211_conn_scan(wdev);
+ case CFG80211_CONN_AUTHENTICATE_NEXT:
+ BUG_ON(!rdev->ops->auth);
+ wdev->conn->state = CFG80211_CONN_AUTHENTICATING;
+ return __cfg80211_mlme_auth(rdev, wdev->netdev,
+ params->channel, params->auth_type,
+ params->bssid,
+ params->ssid, params->ssid_len,
+ NULL, 0,
+ params->key, params->key_len,
+ params->key_idx);
+ case CFG80211_CONN_ASSOCIATE_NEXT:
+ BUG_ON(!rdev->ops->assoc);
+ wdev->conn->state = CFG80211_CONN_ASSOCIATING;
+ if (wdev->conn->prev_bssid_valid)
+ prev_bssid = wdev->conn->prev_bssid;
+ err = __cfg80211_mlme_assoc(rdev, wdev->netdev,
+ params->channel, params->bssid,
+ prev_bssid,
+ params->ssid, params->ssid_len,
+ params->ie, params->ie_len,
+ false, &params->crypto);
+ if (err)
+ __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+ NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING);
+ return err;
+ default:
+ return 0;
+ }
+}
+
+void cfg80211_conn_work(struct work_struct *work)
+{
+ struct cfg80211_registered_device *rdev =
+ container_of(work, struct cfg80211_registered_device, conn_work);
+ struct wireless_dev *wdev;
+
+ rtnl_lock();
+ cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->devlist_mtx);
+
+ list_for_each_entry(wdev, &rdev->netdev_list, list) {
+ wdev_lock(wdev);
+ if (!netif_running(wdev->netdev)) {
+ wdev_unlock(wdev);
+ continue;
+ }
+ if (wdev->sme_state != CFG80211_SME_CONNECTING) {
+ wdev_unlock(wdev);
+ continue;
+ }
+ if (cfg80211_conn_do_work(wdev))
+ __cfg80211_connect_result(
+ wdev->netdev,
+ wdev->conn->params.bssid,
+ NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+ wdev_unlock(wdev);
+ }
+
+ mutex_unlock(&rdev->devlist_mtx);
+ cfg80211_unlock_rdev(rdev);
+ rtnl_unlock();
+}
+
+static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_bss *bss;
+ u16 capa = WLAN_CAPABILITY_ESS;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (wdev->conn->params.privacy)
+ capa |= WLAN_CAPABILITY_PRIVACY;
+
+ bss = cfg80211_get_bss(wdev->wiphy, NULL, wdev->conn->params.bssid,
+ wdev->conn->params.ssid,
+ wdev->conn->params.ssid_len,
+ WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_PRIVACY,
+ capa);
+ if (!bss)
+ return NULL;
+
+ memcpy(wdev->conn->bssid, bss->bssid, ETH_ALEN);
+ wdev->conn->params.bssid = wdev->conn->bssid;
+ wdev->conn->params.channel = bss->channel;
+ wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+ schedule_work(&rdev->conn_work);
+
+ return bss;
+}
+
+static void __cfg80211_sme_scan_done(struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_bss *bss;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (wdev->sme_state != CFG80211_SME_CONNECTING)
+ return;
+
+ if (!wdev->conn)
+ return;
+
+ if (wdev->conn->state != CFG80211_CONN_SCANNING &&
+ wdev->conn->state != CFG80211_CONN_SCAN_AGAIN)
+ return;
+
+ bss = cfg80211_get_conn_bss(wdev);
+ if (bss) {
+ cfg80211_put_bss(bss);
+ } else {
+ /* not found */
+ if (wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)
+ schedule_work(&rdev->conn_work);
+ else
+ __cfg80211_connect_result(
+ wdev->netdev,
+ wdev->conn->params.bssid,
+ NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ false, NULL);
+ }
+}
+
+void cfg80211_sme_scan_done(struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
+ wdev_lock(wdev);
+ __cfg80211_sme_scan_done(dev);
+ wdev_unlock(wdev);
+ mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
+}
+
+void cfg80211_sme_rx_auth(struct net_device *dev,
+ const u8 *buf, size_t len)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
+ u16 status_code = le16_to_cpu(mgmt->u.auth.status_code);
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ /* should only RX auth frames when connecting */
+ if (wdev->sme_state != CFG80211_SME_CONNECTING)
+ return;
+
+ if (WARN_ON(!wdev->conn))
+ return;
+
+ if (status_code == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG &&
+ wdev->conn->auto_auth &&
+ wdev->conn->params.auth_type != NL80211_AUTHTYPE_NETWORK_EAP) {
+ /* select automatically between only open, shared, leap */
+ switch (wdev->conn->params.auth_type) {
+ case NL80211_AUTHTYPE_OPEN_SYSTEM:
+ if (wdev->connect_keys)
+ wdev->conn->params.auth_type =
+ NL80211_AUTHTYPE_SHARED_KEY;
+ else
+ wdev->conn->params.auth_type =
+ NL80211_AUTHTYPE_NETWORK_EAP;
+ break;
+ case NL80211_AUTHTYPE_SHARED_KEY:
+ wdev->conn->params.auth_type =
+ NL80211_AUTHTYPE_NETWORK_EAP;
+ break;
+ default:
+ /* huh? */
+ wdev->conn->params.auth_type =
+ NL80211_AUTHTYPE_OPEN_SYSTEM;
+ break;
+ }
+ wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+ schedule_work(&rdev->conn_work);
+ } else if (status_code != WLAN_STATUS_SUCCESS) {
+ __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
+ status_code, false, NULL);
+ } else if (wdev->sme_state == CFG80211_SME_CONNECTING &&
+ wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
+ wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
+ schedule_work(&rdev->conn_work);
+ }
+}
+
+bool cfg80211_sme_failed_reassoc(struct wireless_dev *wdev)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+
+ if (WARN_ON(!wdev->conn))
+ return false;
+
+ if (!wdev->conn->prev_bssid_valid)
+ return false;
+
+ /*
+ * Some stupid APs don't accept reassoc, so we
+ * need to fall back to trying regular assoc.
+ */
+ wdev->conn->prev_bssid_valid = false;
+ wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
+ schedule_work(&rdev->conn_work);
+
+ return true;
+}
+
+void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, bool wextev,
+ struct cfg80211_bss *bss)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ u8 *country_ie;
+#ifdef CONFIG_WIRELESS_EXT
+ union iwreq_data wrqu;
+#endif
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return;
+
+ if (wdev->sme_state != CFG80211_SME_CONNECTING)
+ return;
+
+ nl80211_send_connect_result(wiphy_to_dev(wdev->wiphy), dev,
+ bssid, req_ie, req_ie_len,
+ resp_ie, resp_ie_len,
+ status, GFP_KERNEL);
+
+#ifdef CONFIG_WIRELESS_EXT
+ if (wextev) {
+ if (req_ie && status == WLAN_STATUS_SUCCESS) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = req_ie_len;
+ wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, req_ie);
+ }
+
+ if (resp_ie && status == WLAN_STATUS_SUCCESS) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = resp_ie_len;
+ wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, resp_ie);
+ }
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ if (bssid && status == WLAN_STATUS_SUCCESS) {
+ memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+ wdev->wext.prev_bssid_valid = true;
+ }
+ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
+ }
+#endif
+
+ if (wdev->current_bss) {
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
+ }
+
+ if (wdev->conn)
+ wdev->conn->state = CFG80211_CONN_IDLE;
+
+ if (status != WLAN_STATUS_SUCCESS) {
+ wdev->sme_state = CFG80211_SME_IDLE;
+ if (wdev->conn)
+ kfree(wdev->conn->ie);
+ kfree(wdev->conn);
+ wdev->conn = NULL;
+ kfree(wdev->connect_keys);
+ wdev->connect_keys = NULL;
+ wdev->ssid_len = 0;
+ return;
+ }
+
+ if (!bss)
+ bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+ wdev->ssid, wdev->ssid_len,
+ WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+
+ if (WARN_ON(!bss))
+ return;
+
+ cfg80211_hold_bss(bss_from_pub(bss));
+ wdev->current_bss = bss_from_pub(bss);
+
+ wdev->sme_state = CFG80211_SME_CONNECTED;
+ cfg80211_upload_connect_keys(wdev);
+
+ country_ie = (u8 *) ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY);
+
+ if (!country_ie)
+ return;
+
+ /*
+ * ieee80211_bss_get_ie() ensures we can access:
+ * - country_ie + 2, the start of the country ie data, and
+ * - and country_ie[1] which is the IE length
+ */
+ regulatory_hint_11d(wdev->wiphy,
+ country_ie + 2,
+ country_ie[1]);
+}
+
+void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len,
+ u16 status, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_event *ev;
+ unsigned long flags;
+
+ CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
+
+ ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+ if (!ev)
+ return;
+
+ ev->type = EVENT_CONNECT_RESULT;
+ if (bssid)
+ memcpy(ev->cr.bssid, bssid, ETH_ALEN);
+ ev->cr.req_ie = ((u8 *)ev) + sizeof(*ev);
+ ev->cr.req_ie_len = req_ie_len;
+ memcpy((void *)ev->cr.req_ie, req_ie, req_ie_len);
+ ev->cr.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
+ ev->cr.resp_ie_len = resp_ie_len;
+ memcpy((void *)ev->cr.resp_ie, resp_ie, resp_ie_len);
+ ev->cr.status = status;
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ list_add_tail(&ev->list, &wdev->event_list);
+ spin_unlock_irqrestore(&wdev->event_lock, flags);
+ schedule_work(&rdev->event_work);
+}
+EXPORT_SYMBOL(cfg80211_connect_result);
+
+void __cfg80211_roamed(struct wireless_dev *wdev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len)
+{
+ struct cfg80211_bss *bss;
+#ifdef CONFIG_WIRELESS_EXT
+ union iwreq_data wrqu;
+#endif
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return;
+
+ if (wdev->sme_state != CFG80211_SME_CONNECTED)
+ return;
+
+ /* internal error -- how did we get to CONNECTED w/o BSS? */
+ if (WARN_ON(!wdev->current_bss)) {
+ return;
+ }
+
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ wdev->current_bss = NULL;
+
+ bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+ wdev->ssid, wdev->ssid_len,
+ WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
+
+ if (WARN_ON(!bss))
+ return;
+
+ cfg80211_hold_bss(bss_from_pub(bss));
+ wdev->current_bss = bss_from_pub(bss);
+
+ nl80211_send_roamed(wiphy_to_dev(wdev->wiphy), wdev->netdev, bssid,
+ req_ie, req_ie_len, resp_ie, resp_ie_len,
+ GFP_KERNEL);
+
+#ifdef CONFIG_WIRELESS_EXT
+ if (req_ie) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = req_ie_len;
+ wireless_send_event(wdev->netdev, IWEVASSOCREQIE,
+ &wrqu, req_ie);
+ }
+
+ if (resp_ie) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = resp_ie_len;
+ wireless_send_event(wdev->netdev, IWEVASSOCRESPIE,
+ &wrqu, resp_ie);
+ }
+
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ memcpy(wrqu.ap_addr.sa_data, bssid, ETH_ALEN);
+ memcpy(wdev->wext.prev_bssid, bssid, ETH_ALEN);
+ wdev->wext.prev_bssid_valid = true;
+ wireless_send_event(wdev->netdev, SIOCGIWAP, &wrqu, NULL);
+#endif
+}
+
+void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
+ const u8 *req_ie, size_t req_ie_len,
+ const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_event *ev;
+ unsigned long flags;
+
+ CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
+
+ ev = kzalloc(sizeof(*ev) + req_ie_len + resp_ie_len, gfp);
+ if (!ev)
+ return;
+
+ ev->type = EVENT_ROAMED;
+ memcpy(ev->rm.bssid, bssid, ETH_ALEN);
+ ev->rm.req_ie = ((u8 *)ev) + sizeof(*ev);
+ ev->rm.req_ie_len = req_ie_len;
+ memcpy((void *)ev->rm.req_ie, req_ie, req_ie_len);
+ ev->rm.resp_ie = ((u8 *)ev) + sizeof(*ev) + req_ie_len;
+ ev->rm.resp_ie_len = resp_ie_len;
+ memcpy((void *)ev->rm.resp_ie, resp_ie, resp_ie_len);
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ list_add_tail(&ev->list, &wdev->event_list);
+ spin_unlock_irqrestore(&wdev->event_lock, flags);
+ schedule_work(&rdev->event_work);
+}
+EXPORT_SYMBOL(cfg80211_roamed);
+
+void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
+ size_t ie_len, u16 reason, bool from_ap)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ int i;
+#ifdef CONFIG_WIRELESS_EXT
+ union iwreq_data wrqu;
+#endif
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return;
+
+ if (wdev->sme_state != CFG80211_SME_CONNECTED)
+ return;
+
+ if (wdev->current_bss) {
+ cfg80211_unhold_bss(wdev->current_bss);
+ cfg80211_put_bss(&wdev->current_bss->pub);
+ }
+
+ wdev->current_bss = NULL;
+ wdev->sme_state = CFG80211_SME_IDLE;
+ wdev->ssid_len = 0;
+
+ if (wdev->conn) {
+ const u8 *bssid;
+ int ret;
+
+ kfree(wdev->conn->ie);
+ wdev->conn->ie = NULL;
+ kfree(wdev->conn);
+ wdev->conn = NULL;
+
+ /*
+ * If this disconnect was due to a disassoc, we
+ * we might still have an auth BSS around. For
+ * the userspace SME that's currently expected,
+ * but for the kernel SME (nl80211 CONNECT or
+ * wireless extensions) we want to clear up all
+ * state.
+ */
+ for (i = 0; i < MAX_AUTH_BSSES; i++) {
+ if (!wdev->auth_bsses[i])
+ continue;
+ bssid = wdev->auth_bsses[i]->pub.bssid;
+ ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING);
+ WARN(ret, "deauth failed: %d\n", ret);
+ }
+ }
+
+ nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
+
+ /*
+ * Delete all the keys ... pairwise keys can't really
+ * exist any more anyway, but default keys might.
+ */
+ if (rdev->ops->del_key)
+ for (i = 0; i < 6; i++)
+ rdev->ops->del_key(wdev->wiphy, dev, i, NULL);
+
+#ifdef CONFIG_WIRELESS_EXT
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
+#endif
+}
+
+void cfg80211_disconnected(struct net_device *dev, u16 reason,
+ u8 *ie, size_t ie_len, gfp_t gfp)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_event *ev;
+ unsigned long flags;
+
+ CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTED);
+
+ ev = kzalloc(sizeof(*ev) + ie_len, gfp);
+ if (!ev)
+ return;
+
+ ev->type = EVENT_DISCONNECTED;
+ ev->dc.ie = ((u8 *)ev) + sizeof(*ev);
+ ev->dc.ie_len = ie_len;
+ memcpy((void *)ev->dc.ie, ie, ie_len);
+ ev->dc.reason = reason;
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ list_add_tail(&ev->list, &wdev->event_list);
+ spin_unlock_irqrestore(&wdev->event_lock, flags);
+ schedule_work(&rdev->event_work);
+}
+EXPORT_SYMBOL(cfg80211_disconnected);
+
+int __cfg80211_connect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_connect_params *connect,
+ struct cfg80211_cached_keys *connkeys,
+ const u8 *prev_bssid)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct ieee80211_channel *chan;
+ struct cfg80211_bss *bss = NULL;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (wdev->sme_state != CFG80211_SME_IDLE)
+ return -EALREADY;
+
+ chan = rdev_fixed_channel(rdev, wdev);
+ if (chan && chan != connect->channel)
+ return -EBUSY;
+
+ if (WARN_ON(wdev->connect_keys)) {
+ kfree(wdev->connect_keys);
+ wdev->connect_keys = NULL;
+ }
+
+ if (connkeys && connkeys->def >= 0) {
+ int idx;
+ u32 cipher;
+
+ idx = connkeys->def;
+ cipher = connkeys->params[idx].cipher;
+ /* If given a WEP key we may need it for shared key auth */
+ if (cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ cipher == WLAN_CIPHER_SUITE_WEP104) {
+ connect->key_idx = idx;
+ connect->key = connkeys->params[idx].key;
+ connect->key_len = connkeys->params[idx].key_len;
+
+ /*
+ * If ciphers are not set (e.g. when going through
+ * iwconfig), we have to set them appropriately here.
+ */
+ if (connect->crypto.cipher_group == 0)
+ connect->crypto.cipher_group = cipher;
+
+ if (connect->crypto.n_ciphers_pairwise == 0) {
+ connect->crypto.n_ciphers_pairwise = 1;
+ connect->crypto.ciphers_pairwise[0] = cipher;
+ }
+ }
+ }
+
+ if (!rdev->ops->connect) {
+ if (!rdev->ops->auth || !rdev->ops->assoc)
+ return -EOPNOTSUPP;
+
+ if (WARN_ON(wdev->conn))
+ return -EINPROGRESS;
+
+ wdev->conn = kzalloc(sizeof(*wdev->conn), GFP_KERNEL);
+ if (!wdev->conn)
+ return -ENOMEM;
+
+ /*
+ * Copy all parameters, and treat explicitly IEs, BSSID, SSID.
+ */
+ memcpy(&wdev->conn->params, connect, sizeof(*connect));
+ if (connect->bssid) {
+ wdev->conn->params.bssid = wdev->conn->bssid;
+ memcpy(wdev->conn->bssid, connect->bssid, ETH_ALEN);
+ }
+
+ if (connect->ie) {
+ wdev->conn->ie = kmemdup(connect->ie, connect->ie_len,
+ GFP_KERNEL);
+ wdev->conn->params.ie = wdev->conn->ie;
+ if (!wdev->conn->ie) {
+ kfree(wdev->conn);
+ wdev->conn = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ if (connect->auth_type == NL80211_AUTHTYPE_AUTOMATIC) {
+ wdev->conn->auto_auth = true;
+ /* start with open system ... should mostly work */
+ wdev->conn->params.auth_type =
+ NL80211_AUTHTYPE_OPEN_SYSTEM;
+ } else {
+ wdev->conn->auto_auth = false;
+ }
+
+ memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
+ wdev->ssid_len = connect->ssid_len;
+ wdev->conn->params.ssid = wdev->ssid;
+ wdev->conn->params.ssid_len = connect->ssid_len;
+
+ /* don't care about result -- but fill bssid & channel */
+ if (!wdev->conn->params.bssid || !wdev->conn->params.channel)
+ bss = cfg80211_get_conn_bss(wdev);
+
+ wdev->sme_state = CFG80211_SME_CONNECTING;
+ wdev->connect_keys = connkeys;
+
+ if (prev_bssid) {
+ memcpy(wdev->conn->prev_bssid, prev_bssid, ETH_ALEN);
+ wdev->conn->prev_bssid_valid = true;
+ }
+
+ /* we're good if we have a matching bss struct */
+ if (bss) {
+ wdev->conn->state = CFG80211_CONN_AUTHENTICATE_NEXT;
+ err = cfg80211_conn_do_work(wdev);
+ cfg80211_put_bss(bss);
+ } else {
+ /* otherwise we'll need to scan for the AP first */
+ err = cfg80211_conn_scan(wdev);
+ /*
+ * If we can't scan right now, then we need to scan again
+ * after the current scan finished, since the parameters
+ * changed (unless we find a good AP anyway).
+ */
+ if (err == -EBUSY) {
+ err = 0;
+ wdev->conn->state = CFG80211_CONN_SCAN_AGAIN;
+ }
+ }
+ if (err) {
+ kfree(wdev->conn->ie);
+ kfree(wdev->conn);
+ wdev->conn = NULL;
+ wdev->sme_state = CFG80211_SME_IDLE;
+ wdev->connect_keys = NULL;
+ wdev->ssid_len = 0;
+ }
+
+ return err;
+ } else {
+ wdev->sme_state = CFG80211_SME_CONNECTING;
+ wdev->connect_keys = connkeys;
+ err = rdev->ops->connect(&rdev->wiphy, dev, connect);
+ if (err) {
+ wdev->connect_keys = NULL;
+ wdev->sme_state = CFG80211_SME_IDLE;
+ return err;
+ }
+
+ memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
+ wdev->ssid_len = connect->ssid_len;
+
+ return 0;
+ }
+}
+
+int cfg80211_connect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_connect_params *connect,
+ struct cfg80211_cached_keys *connkeys)
+{
+ int err;
+
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(dev->ieee80211_ptr);
+ err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
+ wdev_unlock(dev->ieee80211_ptr);
+ mutex_unlock(&rdev->devlist_mtx);
+
+ return err;
+}
+
+int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, u16 reason, bool wextev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ int err;
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (wdev->sme_state == CFG80211_SME_IDLE)
+ return -EINVAL;
+
+ kfree(wdev->connect_keys);
+ wdev->connect_keys = NULL;
+
+ if (!rdev->ops->disconnect) {
+ if (!rdev->ops->deauth)
+ return -EOPNOTSUPP;
+
+ /* was it connected by userspace SME? */
+ if (!wdev->conn) {
+ cfg80211_mlme_down(rdev, dev);
+ return 0;
+ }
+
+ if (wdev->sme_state == CFG80211_SME_CONNECTING &&
+ (wdev->conn->state == CFG80211_CONN_SCANNING ||
+ wdev->conn->state == CFG80211_CONN_SCAN_AGAIN)) {
+ wdev->sme_state = CFG80211_SME_IDLE;
+ kfree(wdev->conn->ie);
+ kfree(wdev->conn);
+ wdev->conn = NULL;
+ wdev->ssid_len = 0;
+ return 0;
+ }
+
+ /* wdev->conn->params.bssid must be set if > SCANNING */
+ err = __cfg80211_mlme_deauth(rdev, dev,
+ wdev->conn->params.bssid,
+ NULL, 0, reason);
+ if (err)
+ return err;
+ } else {
+ err = rdev->ops->disconnect(&rdev->wiphy, dev, reason);
+ if (err)
+ return err;
+ }
+
+ if (wdev->sme_state == CFG80211_SME_CONNECTED)
+ __cfg80211_disconnected(dev, NULL, 0, 0, false);
+ else if (wdev->sme_state == CFG80211_SME_CONNECTING)
+ __cfg80211_connect_result(dev, NULL, NULL, 0, NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ wextev, NULL);
+
+ return 0;
+}
+
+int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ u16 reason, bool wextev)
+{
+ int err;
+
+ wdev_lock(dev->ieee80211_ptr);
+ err = __cfg80211_disconnect(rdev, dev, reason, wextev);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ return err;
+}
+
+void cfg80211_sme_disassoc(struct net_device *dev, int idx)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ u8 bssid[ETH_ALEN];
+
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!wdev->conn)
+ return;
+
+ if (wdev->conn->state == CFG80211_CONN_IDLE)
+ return;
+
+ /*
+ * Ok, so the association was made by this SME -- we don't
+ * want it any more so deauthenticate too.
+ */
+
+ if (!wdev->auth_bsses[idx])
+ return;
+
+ memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN);
+ if (__cfg80211_mlme_deauth(rdev, dev, bssid,
+ NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) {
+ /* whatever -- assume gone anyway */
+ cfg80211_unhold_bss(wdev->auth_bsses[idx]);
+ cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
+ wdev->auth_bsses[idx] = NULL;
+ }
+}
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 25550692dda..3fc2df86278 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -141,9 +141,12 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
set_mandatory_flags_band(wiphy->bands[band], band);
}
-int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
+int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
+ struct key_params *params, int key_idx,
const u8 *mac_addr)
{
+ int i;
+
if (key_idx > 5)
return -EINVAL;
@@ -197,6 +200,12 @@ int cfg80211_validate_key_settings(struct key_params *params, int key_idx,
}
}
+ for (i = 0; i < rdev->wiphy.n_cipher_suites; i++)
+ if (params->cipher == rdev->wiphy.cipher_suites[i])
+ break;
+ if (i == rdev->wiphy.n_cipher_suites)
+ return -EINVAL;
+
return 0;
}
@@ -265,11 +274,11 @@ static int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
switch (ae) {
case 0:
return 6;
- case 1:
+ case MESH_FLAGS_AE_A4:
return 12;
- case 2:
+ case MESH_FLAGS_AE_A5_A6:
return 18;
- case 3:
+ case (MESH_FLAGS_AE_A4 | MESH_FLAGS_AE_A5_A6):
return 24;
default:
return 6;
@@ -324,10 +333,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, u8 *addr,
}
break;
case cpu_to_le16(IEEE80211_FCTL_FROMDS):
- if (iftype != NL80211_IFTYPE_STATION ||
+ if ((iftype != NL80211_IFTYPE_STATION &&
+ iftype != NL80211_IFTYPE_MESH_POINT) ||
(is_multicast_ether_addr(dst) &&
!compare_ether_addr(src, addr)))
return -1;
+ if (iftype == NL80211_IFTYPE_MESH_POINT) {
+ struct ieee80211s_hdr *meshdr =
+ (struct ieee80211s_hdr *) (skb->data + hdrlen);
+ hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+ if (meshdr->flags & MESH_FLAGS_AE_A4)
+ memcpy(src, meshdr->eaddr1, ETH_ALEN);
+ }
break;
case cpu_to_le16(0):
if (iftype != NL80211_IFTYPE_ADHOC)
@@ -502,3 +519,166 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb)
return dscp >> 5;
}
EXPORT_SYMBOL(cfg80211_classify8021d);
+
+const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
+{
+ u8 *end, *pos;
+
+ pos = bss->information_elements;
+ if (pos == NULL)
+ return NULL;
+ end = pos + bss->len_information_elements;
+
+ while (pos + 1 < end) {
+ if (pos + 2 + pos[1] > end)
+ break;
+ if (pos[0] == ie)
+ return pos;
+ pos += 2 + pos[1];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ieee80211_bss_get_ie);
+
+void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
+{
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct net_device *dev = wdev->netdev;
+ int i;
+
+ if (!wdev->connect_keys)
+ return;
+
+ for (i = 0; i < 6; i++) {
+ if (!wdev->connect_keys->params[i].cipher)
+ continue;
+ if (rdev->ops->add_key(wdev->wiphy, dev, i, NULL,
+ &wdev->connect_keys->params[i])) {
+ printk(KERN_ERR "%s: failed to set key %d\n",
+ dev->name, i);
+ continue;
+ }
+ if (wdev->connect_keys->def == i)
+ if (rdev->ops->set_default_key(wdev->wiphy, dev, i)) {
+ printk(KERN_ERR "%s: failed to set defkey %d\n",
+ dev->name, i);
+ continue;
+ }
+ if (wdev->connect_keys->defmgmt == i)
+ if (rdev->ops->set_default_mgmt_key(wdev->wiphy, dev, i))
+ printk(KERN_ERR "%s: failed to set mgtdef %d\n",
+ dev->name, i);
+ }
+
+ kfree(wdev->connect_keys);
+ wdev->connect_keys = NULL;
+}
+
+static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+{
+ struct cfg80211_event *ev;
+ unsigned long flags;
+ const u8 *bssid = NULL;
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ while (!list_empty(&wdev->event_list)) {
+ ev = list_first_entry(&wdev->event_list,
+ struct cfg80211_event, list);
+ list_del(&ev->list);
+ spin_unlock_irqrestore(&wdev->event_lock, flags);
+
+ wdev_lock(wdev);
+ switch (ev->type) {
+ case EVENT_CONNECT_RESULT:
+ if (!is_zero_ether_addr(ev->cr.bssid))
+ bssid = ev->cr.bssid;
+ __cfg80211_connect_result(
+ wdev->netdev, bssid,
+ ev->cr.req_ie, ev->cr.req_ie_len,
+ ev->cr.resp_ie, ev->cr.resp_ie_len,
+ ev->cr.status,
+ ev->cr.status == WLAN_STATUS_SUCCESS,
+ NULL);
+ break;
+ case EVENT_ROAMED:
+ __cfg80211_roamed(wdev, ev->rm.bssid,
+ ev->rm.req_ie, ev->rm.req_ie_len,
+ ev->rm.resp_ie, ev->rm.resp_ie_len);
+ break;
+ case EVENT_DISCONNECTED:
+ __cfg80211_disconnected(wdev->netdev,
+ ev->dc.ie, ev->dc.ie_len,
+ ev->dc.reason, true);
+ break;
+ case EVENT_IBSS_JOINED:
+ __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
+ break;
+ }
+ wdev_unlock(wdev);
+
+ kfree(ev);
+
+ spin_lock_irqsave(&wdev->event_lock, flags);
+ }
+ spin_unlock_irqrestore(&wdev->event_lock, flags);
+}
+
+void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev)
+{
+ struct wireless_dev *wdev;
+
+ ASSERT_RTNL();
+ ASSERT_RDEV_LOCK(rdev);
+
+ mutex_lock(&rdev->devlist_mtx);
+
+ list_for_each_entry(wdev, &rdev->netdev_list, list)
+ cfg80211_process_wdev_events(wdev);
+
+ mutex_unlock(&rdev->devlist_mtx);
+}
+
+int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, enum nl80211_iftype ntype,
+ u32 *flags, struct vif_params *params)
+{
+ int err;
+ enum nl80211_iftype otype = dev->ieee80211_ptr->iftype;
+
+ ASSERT_RDEV_LOCK(rdev);
+
+ /* don't support changing VLANs, you just re-create them */
+ if (otype == NL80211_IFTYPE_AP_VLAN)
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->change_virtual_intf ||
+ !(rdev->wiphy.interface_modes & (1 << ntype)))
+ return -EOPNOTSUPP;
+
+ if (ntype != otype) {
+ switch (otype) {
+ case NL80211_IFTYPE_ADHOC:
+ cfg80211_leave_ibss(rdev, dev, false);
+ break;
+ case NL80211_IFTYPE_STATION:
+ cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, true);
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ /* mesh should be handled? */
+ break;
+ default:
+ break;
+ }
+
+ cfg80211_process_rdev_events(rdev);
+ }
+
+ err = rdev->ops->change_virtual_intf(&rdev->wiphy, dev,
+ ntype, flags, params);
+
+ WARN_ON(!err && dev->ieee80211_ptr->iftype != ntype);
+
+ return err;
+}
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index d030c531567..561a45cf2a6 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <net/iw_handler.h>
#include <net/cfg80211.h>
+#include "wext-compat.h"
#include "core.h"
int cfg80211_wext_giwname(struct net_device *dev,
@@ -69,18 +70,8 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
enum nl80211_iftype type;
int ret;
- if (!wdev)
- return -EOPNOTSUPP;
-
rdev = wiphy_to_dev(wdev->wiphy);
- if (!rdev->ops->change_virtual_intf)
- return -EOPNOTSUPP;
-
- /* don't support changing VLANs, you just re-create them */
- if (wdev->iftype == NL80211_IFTYPE_AP_VLAN)
- return -EOPNOTSUPP;
-
switch (*mode) {
case IW_MODE_INFRA:
type = NL80211_IFTYPE_STATION;
@@ -103,9 +94,9 @@ int cfg80211_wext_siwmode(struct net_device *dev, struct iw_request_info *info,
memset(&vifparams, 0, sizeof(vifparams));
- ret = rdev->ops->change_virtual_intf(wdev->wiphy, dev->ifindex, type,
- NULL, &vifparams);
- WARN_ON(!ret && wdev->iftype != type);
+ cfg80211_lock_rdev(rdev);
+ ret = cfg80211_change_iface(rdev, dev, type, NULL, &vifparams);
+ cfg80211_unlock_rdev(rdev);
return ret;
}
@@ -154,7 +145,7 @@ int cfg80211_wext_giwrange(struct net_device *dev,
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct iw_range *range = (struct iw_range *) extra;
enum ieee80211_band band;
- int c = 0;
+ int i, c = 0;
if (!wdev)
return -EOPNOTSUPP;
@@ -173,9 +164,6 @@ int cfg80211_wext_giwrange(struct net_device *dev,
range->min_frag = 256;
range->max_frag = 2346;
- range->encoding_size[0] = 5;
- range->encoding_size[1] = 13;
- range->num_encoding_sizes = 2;
range->max_encoding_tokens = 4;
range->max_qual.updated = IW_QUAL_NOISE_INVALID;
@@ -204,11 +192,31 @@ int cfg80211_wext_giwrange(struct net_device *dev,
range->avg_qual.noise = range->max_qual.noise / 2;
range->avg_qual.updated = range->max_qual.updated;
- range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
- IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+ for (i = 0; i < wdev->wiphy->n_cipher_suites; i++) {
+ switch (wdev->wiphy->cipher_suites[i]) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ range->enc_capa |= (IW_ENC_CAPA_CIPHER_TKIP |
+ IW_ENC_CAPA_WPA);
+ break;
+
+ case WLAN_CIPHER_SUITE_CCMP:
+ range->enc_capa |= (IW_ENC_CAPA_CIPHER_CCMP |
+ IW_ENC_CAPA_WPA2);
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP40:
+ range->encoding_size[range->num_encoding_sizes++] =
+ WLAN_KEY_LEN_WEP40;
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ range->encoding_size[range->num_encoding_sizes++] =
+ WLAN_KEY_LEN_WEP104;
+ break;
+ }
+ }
for (band = 0; band < IEEE80211_NUM_BANDS; band ++) {
- int i;
struct ieee80211_supported_band *sband;
sband = wdev->wiphy->bands[band];
@@ -236,97 +244,40 @@ int cfg80211_wext_giwrange(struct net_device *dev,
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP);
IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN);
- range->scan_capa |= IW_SCAN_CAPA_ESSID;
+ if (wdev->wiphy->max_scan_ssids > 0)
+ range->scan_capa |= IW_SCAN_CAPA_ESSID;
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwrange);
-int cfg80211_wext_siwmlme(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *data, char *extra)
-{
- struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct iw_mlme *mlme = (struct iw_mlme *)extra;
- struct cfg80211_registered_device *rdev;
- union {
- struct cfg80211_disassoc_request disassoc;
- struct cfg80211_deauth_request deauth;
- } cmd;
-
- if (!wdev)
- return -EOPNOTSUPP;
-
- rdev = wiphy_to_dev(wdev->wiphy);
-
- if (wdev->iftype != NL80211_IFTYPE_STATION)
- return -EINVAL;
-
- if (mlme->addr.sa_family != ARPHRD_ETHER)
- return -EINVAL;
-
- memset(&cmd, 0, sizeof(cmd));
-
- switch (mlme->cmd) {
- case IW_MLME_DEAUTH:
- if (!rdev->ops->deauth)
- return -EOPNOTSUPP;
- cmd.deauth.peer_addr = mlme->addr.sa_data;
- cmd.deauth.reason_code = mlme->reason_code;
- return rdev->ops->deauth(wdev->wiphy, dev, &cmd.deauth);
- case IW_MLME_DISASSOC:
- if (!rdev->ops->disassoc)
- return -EOPNOTSUPP;
- cmd.disassoc.peer_addr = mlme->addr.sa_data;
- cmd.disassoc.reason_code = mlme->reason_code;
- return rdev->ops->disassoc(wdev->wiphy, dev, &cmd.disassoc);
- default:
- return -EOPNOTSUPP;
- }
-}
-EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
-
/**
* cfg80211_wext_freq - get wext frequency for non-"auto"
* @wiphy: the wiphy
* @freq: the wext freq encoding
*
- * Returns a channel, %NULL for auto, or an ERR_PTR for errors!
+ * Returns a frequency, or a negative error code, or 0 for auto.
*/
-struct ieee80211_channel *cfg80211_wext_freq(struct wiphy *wiphy,
- struct iw_freq *freq)
+int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq)
{
- struct ieee80211_channel *chan;
- int f;
-
/*
- * Parse frequency - return NULL for auto and
+ * Parse frequency - return 0 for auto and
* -EINVAL for impossible things.
*/
if (freq->e == 0) {
if (freq->m < 0)
- return NULL;
- f = ieee80211_channel_to_frequency(freq->m);
+ return 0;
+ return ieee80211_channel_to_frequency(freq->m);
} else {
int i, div = 1000000;
for (i = 0; i < freq->e; i++)
div /= 10;
if (div <= 0)
- return ERR_PTR(-EINVAL);
- f = freq->m / div;
+ return -EINVAL;
+ return freq->m / div;
}
-
- /*
- * Look up channel struct and return -EINVAL when
- * it cannot be found.
- */
- chan = ieee80211_get_channel(wiphy, f);
- if (!chan)
- return ERR_PTR(-EINVAL);
- return chan;
}
-EXPORT_SYMBOL_GPL(cfg80211_wext_freq);
int cfg80211_wext_siwrts(struct net_device *dev,
struct iw_request_info *info,
@@ -479,15 +430,32 @@ int cfg80211_wext_giwretry(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwretry);
-static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
- struct net_device *dev, const u8 *addr,
- bool remove, bool tx_key, int idx,
- struct key_params *params)
+static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *addr,
+ bool remove, bool tx_key, int idx,
+ struct key_params *params)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
+ int err, i;
+
+ if (!wdev->wext.keys) {
+ wdev->wext.keys = kzalloc(sizeof(*wdev->wext.keys),
+ GFP_KERNEL);
+ if (!wdev->wext.keys)
+ return -ENOMEM;
+ for (i = 0; i < 6; i++)
+ wdev->wext.keys->params[i].key =
+ wdev->wext.keys->data[i];
+ }
+
+ if (wdev->iftype != NL80211_IFTYPE_ADHOC &&
+ wdev->iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
+ if (!wdev->current_bss)
+ return -ENOLINK;
+
if (!rdev->ops->set_default_mgmt_key)
return -EOPNOTSUPP;
@@ -497,8 +465,14 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
return -EINVAL;
if (remove) {
- err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
+ err = 0;
+ if (wdev->current_bss)
+ err = rdev->ops->del_key(&rdev->wiphy, dev, idx, addr);
if (!err) {
+ if (!addr) {
+ wdev->wext.keys->params[idx].key_len = 0;
+ wdev->wext.keys->params[idx].cipher = 0;
+ }
if (idx == wdev->wext.default_key)
wdev->wext.default_key = -1;
else if (idx == wdev->wext.default_mgmt_key)
@@ -512,36 +486,65 @@ static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
return 0;
return err;
- } else {
- if (addr)
- tx_key = false;
+ }
- if (cfg80211_validate_key_settings(params, idx, addr))
- return -EINVAL;
+ if (addr)
+ tx_key = false;
+ if (cfg80211_validate_key_settings(rdev, params, idx, addr))
+ return -EINVAL;
+
+ err = 0;
+ if (wdev->current_bss)
err = rdev->ops->add_key(&rdev->wiphy, dev, idx, addr, params);
- if (err)
- return err;
+ if (err)
+ return err;
+
+ if (!addr) {
+ wdev->wext.keys->params[idx] = *params;
+ memcpy(wdev->wext.keys->data[idx],
+ params->key, params->key_len);
+ wdev->wext.keys->params[idx].key =
+ wdev->wext.keys->data[idx];
+ }
- if (tx_key || (!addr && wdev->wext.default_key == -1)) {
+ if ((params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ params->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ (tx_key || (!addr && wdev->wext.default_key == -1))) {
+ if (wdev->current_bss)
err = rdev->ops->set_default_key(&rdev->wiphy,
dev, idx);
- if (!err)
- wdev->wext.default_key = idx;
- return err;
- }
+ if (!err)
+ wdev->wext.default_key = idx;
+ return err;
+ }
- if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
- (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
+ if (params->cipher == WLAN_CIPHER_SUITE_AES_CMAC &&
+ (tx_key || (!addr && wdev->wext.default_mgmt_key == -1))) {
+ if (wdev->current_bss)
err = rdev->ops->set_default_mgmt_key(&rdev->wiphy,
dev, idx);
- if (!err)
- wdev->wext.default_mgmt_key = idx;
- return err;
- }
-
- return 0;
+ if (!err)
+ wdev->wext.default_mgmt_key = idx;
+ return err;
}
+
+ return 0;
+}
+
+static int cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
+ struct net_device *dev, const u8 *addr,
+ bool remove, bool tx_key, int idx,
+ struct key_params *params)
+{
+ int err;
+
+ wdev_lock(dev->ieee80211_ptr);
+ err = __cfg80211_set_encryption(rdev, dev, addr, remove,
+ tx_key, idx, params);
+ wdev_unlock(dev->ieee80211_ptr);
+
+ return err;
}
int cfg80211_wext_siwencode(struct net_device *dev,
@@ -554,6 +557,10 @@ int cfg80211_wext_siwencode(struct net_device *dev,
bool remove = false;
struct key_params params;
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_ADHOC)
+ return -EOPNOTSUPP;
+
/* no use -- only MFP (set_default_mgmt_key) is optional */
if (!rdev->ops->del_key ||
!rdev->ops->add_key ||
@@ -574,9 +581,14 @@ int cfg80211_wext_siwencode(struct net_device *dev,
remove = true;
else if (erq->length == 0) {
/* No key data - just set the default TX key index */
- err = rdev->ops->set_default_key(&rdev->wiphy, dev, idx);
+ err = 0;
+ wdev_lock(wdev);
+ if (wdev->current_bss)
+ err = rdev->ops->set_default_key(&rdev->wiphy,
+ dev, idx);
if (!err)
wdev->wext.default_key = idx;
+ wdev_unlock(wdev);
return err;
}
@@ -609,6 +621,10 @@ int cfg80211_wext_siwencodeext(struct net_device *dev,
struct key_params params;
u32 cipher;
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_ADHOC)
+ return -EOPNOTSUPP;
+
/* no use -- only MFP (set_default_mgmt_key) is optional */
if (!rdev->ops->del_key ||
!rdev->ops->add_key ||
@@ -682,37 +698,15 @@ int cfg80211_wext_siwencodeext(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwencodeext);
-struct giwencode_cookie {
- size_t buflen;
- char *keybuf;
-};
-
-static void giwencode_get_key_cb(void *cookie, struct key_params *params)
-{
- struct giwencode_cookie *data = cookie;
-
- if (!params->key) {
- data->buflen = 0;
- return;
- }
-
- data->buflen = min_t(size_t, data->buflen, params->key_len);
- memcpy(data->keybuf, params->key, data->buflen);
-}
-
int cfg80211_wext_giwencode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *erq, char *keybuf)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
- int idx, err;
- struct giwencode_cookie data = {
- .keybuf = keybuf,
- .buflen = erq->length,
- };
+ int idx;
- if (!rdev->ops->get_key)
+ if (wdev->iftype != NL80211_IFTYPE_STATION &&
+ wdev->iftype != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
idx = erq->flags & IW_ENCODE_INDEX;
@@ -727,24 +721,70 @@ int cfg80211_wext_giwencode(struct net_device *dev,
erq->flags = idx + 1;
- err = rdev->ops->get_key(&rdev->wiphy, dev, idx, NULL, &data,
- giwencode_get_key_cb);
- if (!err) {
- erq->length = data.buflen;
- erq->flags |= IW_ENCODE_ENABLED;
- return 0;
- }
-
- if (err == -ENOENT) {
+ if (!wdev->wext.keys || !wdev->wext.keys->params[idx].cipher) {
erq->flags |= IW_ENCODE_DISABLED;
erq->length = 0;
return 0;
}
- return err;
+ erq->length = min_t(size_t, erq->length,
+ wdev->wext.keys->params[idx].key_len);
+ memcpy(keybuf, wdev->wext.keys->params[idx].key, erq->length);
+ erq->flags |= IW_ENCODE_ENABLED;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwencode);
+int cfg80211_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *wextfreq, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ int freq, err;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ return cfg80211_mgd_wext_siwfreq(dev, info, wextfreq, extra);
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_ibss_wext_siwfreq(dev, info, wextfreq, extra);
+ default:
+ freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+ if (freq < 0)
+ return freq;
+ if (freq == 0)
+ return -EINVAL;
+ mutex_lock(&rdev->devlist_mtx);
+ err = rdev_set_freq(rdev, NULL, freq, NL80211_CHAN_NO_HT);
+ mutex_unlock(&rdev->devlist_mtx);
+ return err;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwfreq);
+
+int cfg80211_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_STATION:
+ return cfg80211_mgd_wext_giwfreq(dev, info, freq, extra);
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_ibss_wext_giwfreq(dev, info, freq, extra);
+ default:
+ if (!rdev->channel)
+ return -EINVAL;
+ freq->m = rdev->channel->center_freq;
+ freq->e = 6;
+ return 0;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_giwfreq);
+
int cfg80211_wext_siwtxpower(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
@@ -794,7 +834,7 @@ int cfg80211_wext_siwtxpower(struct net_device *dev,
return 0;
}
- return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);;
+ return rdev->ops->set_tx_power(wdev->wiphy, type, dbm);
}
EXPORT_SYMBOL_GPL(cfg80211_wext_siwtxpower);
@@ -827,3 +867,547 @@ int cfg80211_wext_giwtxpower(struct net_device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(cfg80211_wext_giwtxpower);
+
+static int cfg80211_set_auth_alg(struct wireless_dev *wdev,
+ s32 auth_alg)
+{
+ int nr_alg = 0;
+
+ if (!auth_alg)
+ return -EINVAL;
+
+ if (auth_alg & ~(IW_AUTH_ALG_OPEN_SYSTEM |
+ IW_AUTH_ALG_SHARED_KEY |
+ IW_AUTH_ALG_LEAP))
+ return -EINVAL;
+
+ if (auth_alg & IW_AUTH_ALG_OPEN_SYSTEM) {
+ nr_alg++;
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM;
+ }
+
+ if (auth_alg & IW_AUTH_ALG_SHARED_KEY) {
+ nr_alg++;
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_SHARED_KEY;
+ }
+
+ if (auth_alg & IW_AUTH_ALG_LEAP) {
+ nr_alg++;
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_NETWORK_EAP;
+ }
+
+ if (nr_alg > 1)
+ wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
+
+ return 0;
+}
+
+static int cfg80211_set_wpa_version(struct wireless_dev *wdev, u32 wpa_versions)
+{
+ wdev->wext.connect.crypto.wpa_versions = 0;
+
+ if (wpa_versions & ~(IW_AUTH_WPA_VERSION_WPA |
+ IW_AUTH_WPA_VERSION_WPA2|
+ IW_AUTH_WPA_VERSION_DISABLED))
+ return -EINVAL;
+
+ if ((wpa_versions & IW_AUTH_WPA_VERSION_DISABLED) &&
+ (wpa_versions & (IW_AUTH_WPA_VERSION_WPA|
+ IW_AUTH_WPA_VERSION_WPA2)))
+ return -EINVAL;
+
+ if (wpa_versions & IW_AUTH_WPA_VERSION_DISABLED)
+ wdev->wext.connect.crypto.wpa_versions &=
+ ~(NL80211_WPA_VERSION_1|NL80211_WPA_VERSION_2);
+
+ if (wpa_versions & IW_AUTH_WPA_VERSION_WPA)
+ wdev->wext.connect.crypto.wpa_versions |=
+ NL80211_WPA_VERSION_1;
+
+ if (wpa_versions & IW_AUTH_WPA_VERSION_WPA2)
+ wdev->wext.connect.crypto.wpa_versions |=
+ NL80211_WPA_VERSION_2;
+
+ return 0;
+}
+
+static int cfg80211_set_cipher_group(struct wireless_dev *wdev, u32 cipher)
+{
+ wdev->wext.connect.crypto.cipher_group = 0;
+
+ if (cipher & IW_AUTH_CIPHER_WEP40)
+ wdev->wext.connect.crypto.cipher_group =
+ WLAN_CIPHER_SUITE_WEP40;
+ else if (cipher & IW_AUTH_CIPHER_WEP104)
+ wdev->wext.connect.crypto.cipher_group =
+ WLAN_CIPHER_SUITE_WEP104;
+ else if (cipher & IW_AUTH_CIPHER_TKIP)
+ wdev->wext.connect.crypto.cipher_group =
+ WLAN_CIPHER_SUITE_TKIP;
+ else if (cipher & IW_AUTH_CIPHER_CCMP)
+ wdev->wext.connect.crypto.cipher_group =
+ WLAN_CIPHER_SUITE_CCMP;
+ else if (cipher & IW_AUTH_CIPHER_AES_CMAC)
+ wdev->wext.connect.crypto.cipher_group =
+ WLAN_CIPHER_SUITE_AES_CMAC;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cfg80211_set_cipher_pairwise(struct wireless_dev *wdev, u32 cipher)
+{
+ int nr_ciphers = 0;
+ u32 *ciphers_pairwise = wdev->wext.connect.crypto.ciphers_pairwise;
+
+ if (cipher & IW_AUTH_CIPHER_WEP40) {
+ ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP40;
+ nr_ciphers++;
+ }
+
+ if (cipher & IW_AUTH_CIPHER_WEP104) {
+ ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_WEP104;
+ nr_ciphers++;
+ }
+
+ if (cipher & IW_AUTH_CIPHER_TKIP) {
+ ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_TKIP;
+ nr_ciphers++;
+ }
+
+ if (cipher & IW_AUTH_CIPHER_CCMP) {
+ ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_CCMP;
+ nr_ciphers++;
+ }
+
+ if (cipher & IW_AUTH_CIPHER_AES_CMAC) {
+ ciphers_pairwise[nr_ciphers] = WLAN_CIPHER_SUITE_AES_CMAC;
+ nr_ciphers++;
+ }
+
+ BUILD_BUG_ON(NL80211_MAX_NR_CIPHER_SUITES < 5);
+
+ wdev->wext.connect.crypto.n_ciphers_pairwise = nr_ciphers;
+
+ return 0;
+}
+
+
+static int cfg80211_set_key_mgt(struct wireless_dev *wdev, u32 key_mgt)
+{
+ int nr_akm_suites = 0;
+
+ if (key_mgt & ~(IW_AUTH_KEY_MGMT_802_1X |
+ IW_AUTH_KEY_MGMT_PSK))
+ return -EINVAL;
+
+ if (key_mgt & IW_AUTH_KEY_MGMT_802_1X) {
+ wdev->wext.connect.crypto.akm_suites[nr_akm_suites] =
+ WLAN_AKM_SUITE_8021X;
+ nr_akm_suites++;
+ }
+
+ if (key_mgt & IW_AUTH_KEY_MGMT_PSK) {
+ wdev->wext.connect.crypto.akm_suites[nr_akm_suites] =
+ WLAN_AKM_SUITE_PSK;
+ nr_akm_suites++;
+ }
+
+ wdev->wext.connect.crypto.n_akm_suites = nr_akm_suites;
+
+ return 0;
+}
+
+int cfg80211_wext_siwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ switch (data->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_PRIVACY_INVOKED:
+ wdev->wext.connect.privacy = data->value;
+ return 0;
+ case IW_AUTH_WPA_VERSION:
+ return cfg80211_set_wpa_version(wdev, data->value);
+ case IW_AUTH_CIPHER_GROUP:
+ return cfg80211_set_cipher_group(wdev, data->value);
+ case IW_AUTH_KEY_MGMT:
+ return cfg80211_set_key_mgt(wdev, data->value);
+ case IW_AUTH_CIPHER_PAIRWISE:
+ return cfg80211_set_cipher_pairwise(wdev, data->value);
+ case IW_AUTH_80211_AUTH_ALG:
+ return cfg80211_set_auth_alg(wdev, data->value);
+ case IW_AUTH_WPA_ENABLED:
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ case IW_AUTH_DROP_UNENCRYPTED:
+ case IW_AUTH_MFP:
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwauth);
+
+int cfg80211_wext_giwauth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *data, char *extra)
+{
+ /* XXX: what do we need? */
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_giwauth);
+
+int cfg80211_wext_siwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ bool ps = wdev->wext.ps;
+ int timeout = wdev->wext.ps_timeout;
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+ if (!rdev->ops->set_power_mgmt)
+ return -EOPNOTSUPP;
+
+ if (wrq->disabled) {
+ ps = false;
+ } else {
+ switch (wrq->flags & IW_POWER_MODE) {
+ case IW_POWER_ON: /* If not specified */
+ case IW_POWER_MODE: /* If set all mask */
+ case IW_POWER_ALL_R: /* If explicitely state all */
+ ps = true;
+ break;
+ default: /* Otherwise we ignore */
+ return -EINVAL;
+ }
+
+ if (wrq->flags & ~(IW_POWER_MODE | IW_POWER_TIMEOUT))
+ return -EINVAL;
+
+ if (wrq->flags & IW_POWER_TIMEOUT)
+ timeout = wrq->value / 1000;
+ }
+
+ err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, ps, timeout);
+ if (err)
+ return err;
+
+ wdev->wext.ps = ps;
+ wdev->wext.ps_timeout = timeout;
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwpower);
+
+int cfg80211_wext_giwpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *wrq, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ wrq->disabled = !wdev->wext.ps;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_giwpower);
+
+static int cfg80211_wds_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *addr, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ int err;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
+ return -EINVAL;
+
+ if (addr->sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ if (!rdev->ops->set_wds_peer)
+ return -EOPNOTSUPP;
+
+ err = rdev->ops->set_wds_peer(wdev->wiphy, dev, (u8 *) &addr->sa_data);
+ if (err)
+ return err;
+
+ memcpy(&wdev->wext.bssid, (u8 *) &addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+static int cfg80211_wds_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *addr, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_WDS))
+ return -EINVAL;
+
+ addr->sa_family = ARPHRD_ETHER;
+ memcpy(&addr->sa_data, wdev->wext.bssid, ETH_ALEN);
+
+ return 0;
+}
+
+int cfg80211_wext_siwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct cfg80211_bitrate_mask mask;
+
+ if (!rdev->ops->set_bitrate_mask)
+ return -EOPNOTSUPP;
+
+ mask.fixed = 0;
+ mask.maxrate = 0;
+
+ if (rate->value < 0) {
+ /* nothing */
+ } else if (rate->fixed) {
+ mask.fixed = rate->value / 1000; /* kbps */
+ } else {
+ mask.maxrate = rate->value / 1000; /* kbps */
+ }
+
+ return rdev->ops->set_bitrate_mask(wdev->wiphy, dev, NULL, &mask);
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwrate);
+
+int cfg80211_wext_giwrate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_param *rate, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ /* we are under RTNL - globally locked - so can use a static struct */
+ static struct station_info sinfo;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (!rdev->ops->get_station)
+ return -EOPNOTSUPP;
+
+ err = 0;
+ wdev_lock(wdev);
+ if (wdev->current_bss)
+ memcpy(addr, wdev->current_bss->pub.bssid, ETH_ALEN);
+ else
+ err = -EOPNOTSUPP;
+ wdev_unlock(wdev);
+ if (err)
+ return err;
+
+ err = rdev->ops->get_station(&rdev->wiphy, dev, addr, &sinfo);
+ if (err)
+ return err;
+
+ if (!(sinfo.filled & STATION_INFO_TX_BITRATE))
+ return -EOPNOTSUPP;
+
+ rate->value = 0;
+
+ if (!(sinfo.txrate.flags & RATE_INFO_FLAGS_MCS))
+ rate->value = 100000 * sinfo.txrate.legacy;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_giwrate);
+
+/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
+struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ /* we are under RTNL - globally locked - so can use static structs */
+ static struct iw_statistics wstats;
+ static struct station_info sinfo;
+ u8 bssid[ETH_ALEN];
+
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION)
+ return NULL;
+
+ if (!rdev->ops->get_station)
+ return NULL;
+
+ /* Grab BSSID of current BSS, if any */
+ wdev_lock(wdev);
+ if (!wdev->current_bss) {
+ wdev_unlock(wdev);
+ return NULL;
+ }
+ memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
+ wdev_unlock(wdev);
+
+ if (rdev->ops->get_station(&rdev->wiphy, dev, bssid, &sinfo))
+ return NULL;
+
+ memset(&wstats, 0, sizeof(wstats));
+
+ switch (rdev->wiphy.signal_type) {
+ case CFG80211_SIGNAL_TYPE_MBM:
+ if (sinfo.filled & STATION_INFO_SIGNAL) {
+ int sig = sinfo.signal;
+ wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
+ wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
+ wstats.qual.updated |= IW_QUAL_DBM;
+ wstats.qual.level = sig;
+ if (sig < -110)
+ sig = -110;
+ else if (sig > -40)
+ sig = -40;
+ wstats.qual.qual = sig + 110;
+ break;
+ }
+ case CFG80211_SIGNAL_TYPE_UNSPEC:
+ if (sinfo.filled & STATION_INFO_SIGNAL) {
+ wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
+ wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
+ wstats.qual.level = sinfo.signal;
+ wstats.qual.qual = sinfo.signal;
+ break;
+ }
+ default:
+ wstats.qual.updated |= IW_QUAL_LEVEL_INVALID;
+ wstats.qual.updated |= IW_QUAL_QUAL_INVALID;
+ }
+
+ wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
+
+ return &wstats;
+}
+EXPORT_SYMBOL_GPL(cfg80211_wireless_stats);
+
+int cfg80211_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_ibss_wext_siwap(dev, info, ap_addr, extra);
+ case NL80211_IFTYPE_STATION:
+ return cfg80211_mgd_wext_siwap(dev, info, ap_addr, extra);
+ case NL80211_IFTYPE_WDS:
+ return cfg80211_wds_wext_siwap(dev, info, ap_addr, extra);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwap);
+
+int cfg80211_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_ibss_wext_giwap(dev, info, ap_addr, extra);
+ case NL80211_IFTYPE_STATION:
+ return cfg80211_mgd_wext_giwap(dev, info, ap_addr, extra);
+ case NL80211_IFTYPE_WDS:
+ return cfg80211_wds_wext_giwap(dev, info, ap_addr, extra);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_giwap);
+
+int cfg80211_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_ibss_wext_siwessid(dev, info, data, ssid);
+ case NL80211_IFTYPE_STATION:
+ return cfg80211_mgd_wext_siwessid(dev, info, data, ssid);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwessid);
+
+int cfg80211_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ switch (wdev->iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_ibss_wext_giwessid(dev, info, data, ssid);
+ case NL80211_IFTYPE_STATION:
+ return cfg80211_mgd_wext_giwessid(dev, info, data, ssid);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid);
+
+static const iw_handler cfg80211_handlers[] = {
+ [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
+ [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq,
+ [IW_IOCTL_IDX(SIOCGIWFREQ)] = (iw_handler) cfg80211_wext_giwfreq,
+ [IW_IOCTL_IDX(SIOCSIWMODE)] = (iw_handler) cfg80211_wext_siwmode,
+ [IW_IOCTL_IDX(SIOCGIWMODE)] = (iw_handler) cfg80211_wext_giwmode,
+ [IW_IOCTL_IDX(SIOCGIWRANGE)] = (iw_handler) cfg80211_wext_giwrange,
+ [IW_IOCTL_IDX(SIOCSIWAP)] = (iw_handler) cfg80211_wext_siwap,
+ [IW_IOCTL_IDX(SIOCGIWAP)] = (iw_handler) cfg80211_wext_giwap,
+ [IW_IOCTL_IDX(SIOCSIWMLME)] = (iw_handler) cfg80211_wext_siwmlme,
+ [IW_IOCTL_IDX(SIOCSIWSCAN)] = (iw_handler) cfg80211_wext_siwscan,
+ [IW_IOCTL_IDX(SIOCGIWSCAN)] = (iw_handler) cfg80211_wext_giwscan,
+ [IW_IOCTL_IDX(SIOCSIWESSID)] = (iw_handler) cfg80211_wext_siwessid,
+ [IW_IOCTL_IDX(SIOCGIWESSID)] = (iw_handler) cfg80211_wext_giwessid,
+ [IW_IOCTL_IDX(SIOCSIWRATE)] = (iw_handler) cfg80211_wext_siwrate,
+ [IW_IOCTL_IDX(SIOCGIWRATE)] = (iw_handler) cfg80211_wext_giwrate,
+ [IW_IOCTL_IDX(SIOCSIWRTS)] = (iw_handler) cfg80211_wext_siwrts,
+ [IW_IOCTL_IDX(SIOCGIWRTS)] = (iw_handler) cfg80211_wext_giwrts,
+ [IW_IOCTL_IDX(SIOCSIWFRAG)] = (iw_handler) cfg80211_wext_siwfrag,
+ [IW_IOCTL_IDX(SIOCGIWFRAG)] = (iw_handler) cfg80211_wext_giwfrag,
+ [IW_IOCTL_IDX(SIOCSIWTXPOW)] = (iw_handler) cfg80211_wext_siwtxpower,
+ [IW_IOCTL_IDX(SIOCGIWTXPOW)] = (iw_handler) cfg80211_wext_giwtxpower,
+ [IW_IOCTL_IDX(SIOCSIWRETRY)] = (iw_handler) cfg80211_wext_siwretry,
+ [IW_IOCTL_IDX(SIOCGIWRETRY)] = (iw_handler) cfg80211_wext_giwretry,
+ [IW_IOCTL_IDX(SIOCSIWENCODE)] = (iw_handler) cfg80211_wext_siwencode,
+ [IW_IOCTL_IDX(SIOCGIWENCODE)] = (iw_handler) cfg80211_wext_giwencode,
+ [IW_IOCTL_IDX(SIOCSIWPOWER)] = (iw_handler) cfg80211_wext_siwpower,
+ [IW_IOCTL_IDX(SIOCGIWPOWER)] = (iw_handler) cfg80211_wext_giwpower,
+ [IW_IOCTL_IDX(SIOCSIWGENIE)] = (iw_handler) cfg80211_wext_siwgenie,
+ [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth,
+ [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth,
+ [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext,
+};
+
+const struct iw_handler_def cfg80211_wext_handler = {
+ .num_standard = ARRAY_SIZE(cfg80211_handlers),
+ .standard = cfg80211_handlers,
+ .get_wireless_stats = cfg80211_wireless_stats,
+};
diff --git a/net/wireless/wext-compat.h b/net/wireless/wext-compat.h
new file mode 100644
index 00000000000..20b3daef696
--- /dev/null
+++ b/net/wireless/wext-compat.h
@@ -0,0 +1,49 @@
+#ifndef __WEXT_COMPAT
+#define __WEXT_COMPAT
+
+#include <net/iw_handler.h>
+#include <linux/wireless.h>
+
+int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra);
+int cfg80211_ibss_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra);
+int cfg80211_ibss_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra);
+int cfg80211_ibss_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra);
+int cfg80211_ibss_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid);
+int cfg80211_ibss_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid);
+
+int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra);
+int cfg80211_mgd_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra);
+int cfg80211_mgd_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra);
+int cfg80211_mgd_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra);
+int cfg80211_mgd_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid);
+int cfg80211_mgd_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid);
+
+int cfg80211_wext_freq(struct wiphy *wiphy, struct iw_freq *freq);
+
+
+extern const struct iw_handler_def cfg80211_wext_handler;
+#endif /* __WEXT_COMPAT */
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
new file mode 100644
index 00000000000..d16cd9ea4d0
--- /dev/null
+++ b/net/wireless/wext-sme.c
@@ -0,0 +1,404 @@
+/*
+ * cfg80211 wext compat for managed mode.
+ *
+ * Copyright 2009 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2009 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <net/cfg80211.h>
+#include "wext-compat.h"
+#include "nl80211.h"
+
+int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev)
+{
+ struct cfg80211_cached_keys *ck = NULL;
+ const u8 *prev_bssid = NULL;
+ int err, i;
+
+ ASSERT_RDEV_LOCK(rdev);
+ ASSERT_WDEV_LOCK(wdev);
+
+ if (!netif_running(wdev->netdev))
+ return 0;
+
+ wdev->wext.connect.ie = wdev->wext.ie;
+ wdev->wext.connect.ie_len = wdev->wext.ie_len;
+ wdev->wext.connect.privacy = wdev->wext.default_key != -1;
+
+ if (wdev->wext.keys) {
+ wdev->wext.keys->def = wdev->wext.default_key;
+ wdev->wext.keys->defmgmt = wdev->wext.default_mgmt_key;
+ }
+
+ if (!wdev->wext.connect.ssid_len)
+ return 0;
+
+ if (wdev->wext.keys) {
+ ck = kmemdup(wdev->wext.keys, sizeof(*ck), GFP_KERNEL);
+ if (!ck)
+ return -ENOMEM;
+ for (i = 0; i < 6; i++)
+ ck->params[i].key = ck->data[i];
+ }
+
+ if (wdev->wext.prev_bssid_valid)
+ prev_bssid = wdev->wext.prev_bssid;
+
+ err = __cfg80211_connect(rdev, wdev->netdev,
+ &wdev->wext.connect, ck, prev_bssid);
+ if (err)
+ kfree(ck);
+
+ return err;
+}
+
+int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *wextfreq, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ struct ieee80211_channel *chan = NULL;
+ int err, freq;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ freq = cfg80211_wext_freq(wdev->wiphy, wextfreq);
+ if (freq < 0)
+ return freq;
+
+ if (freq) {
+ chan = ieee80211_get_channel(wdev->wiphy, freq);
+ if (!chan)
+ return -EINVAL;
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ return -EINVAL;
+ }
+
+ cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+
+ if (wdev->sme_state != CFG80211_SME_IDLE) {
+ bool event = true;
+
+ if (wdev->wext.connect.channel == chan) {
+ err = 0;
+ goto out;
+ }
+
+ /* if SSID set, we'll try right again, avoid event */
+ if (wdev->wext.connect.ssid_len)
+ event = false;
+ err = __cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, event);
+ if (err)
+ goto out;
+ }
+
+
+ wdev->wext.connect.channel = chan;
+
+ /* SSID is not set, we just want to switch channel */
+ if (chan && !wdev->wext.connect.ssid_len) {
+ err = rdev_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
+ goto out;
+ }
+
+ err = cfg80211_mgd_wext_connect(rdev, wdev);
+ out:
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+ cfg80211_unlock_rdev(rdev);
+ return err;
+}
+
+int cfg80211_mgd_wext_giwfreq(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_freq *freq, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct ieee80211_channel *chan = NULL;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ wdev_lock(wdev);
+ if (wdev->current_bss)
+ chan = wdev->current_bss->pub.channel;
+ else if (wdev->wext.connect.channel)
+ chan = wdev->wext.connect.channel;
+ wdev_unlock(wdev);
+
+ if (chan) {
+ freq->m = chan->center_freq;
+ freq->e = 6;
+ return 0;
+ }
+
+ /* no channel if not joining */
+ return -EINVAL;
+}
+
+int cfg80211_mgd_wext_siwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ size_t len = data->length;
+ int err;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ if (!data->flags)
+ len = 0;
+
+ /* iwconfig uses nul termination in SSID.. */
+ if (len > 0 && ssid[len - 1] == '\0')
+ len--;
+
+ cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+
+ err = 0;
+
+ if (wdev->sme_state != CFG80211_SME_IDLE) {
+ bool event = true;
+
+ if (wdev->wext.connect.ssid && len &&
+ len == wdev->wext.connect.ssid_len &&
+ memcmp(wdev->wext.connect.ssid, ssid, len) == 0)
+ goto out;
+
+ /* if SSID set now, we'll try to connect, avoid event */
+ if (len)
+ event = false;
+ err = __cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, event);
+ if (err)
+ goto out;
+ }
+
+ wdev->wext.prev_bssid_valid = false;
+ wdev->wext.connect.ssid = wdev->wext.ssid;
+ memcpy(wdev->wext.ssid, ssid, len);
+ wdev->wext.connect.ssid_len = len;
+
+ wdev->wext.connect.crypto.control_port = false;
+
+ err = cfg80211_mgd_wext_connect(rdev, wdev);
+ out:
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+ cfg80211_unlock_rdev(rdev);
+ return err;
+}
+
+int cfg80211_mgd_wext_giwessid(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *ssid)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ data->flags = 0;
+
+ wdev_lock(wdev);
+ if (wdev->current_bss) {
+ const u8 *ie = ieee80211_bss_get_ie(&wdev->current_bss->pub,
+ WLAN_EID_SSID);
+ if (ie) {
+ data->flags = 1;
+ data->length = ie[1];
+ memcpy(ssid, ie + 2, data->length);
+ }
+ } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
+ data->flags = 1;
+ data->length = wdev->wext.connect.ssid_len;
+ memcpy(ssid, wdev->wext.connect.ssid, data->length);
+ } else
+ data->flags = 0;
+ wdev_unlock(wdev);
+
+ return 0;
+}
+
+int cfg80211_mgd_wext_siwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ u8 *bssid = ap_addr->sa_data;
+ int err;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ if (ap_addr->sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+
+ /* automatic mode */
+ if (is_zero_ether_addr(bssid) || is_broadcast_ether_addr(bssid))
+ bssid = NULL;
+
+ cfg80211_lock_rdev(rdev);
+ mutex_lock(&rdev->devlist_mtx);
+ wdev_lock(wdev);
+
+ if (wdev->sme_state != CFG80211_SME_IDLE) {
+ err = 0;
+ /* both automatic */
+ if (!bssid && !wdev->wext.connect.bssid)
+ goto out;
+
+ /* fixed already - and no change */
+ if (wdev->wext.connect.bssid && bssid &&
+ compare_ether_addr(bssid, wdev->wext.connect.bssid) == 0)
+ goto out;
+
+ err = __cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, false);
+ if (err)
+ goto out;
+ }
+
+ if (bssid) {
+ memcpy(wdev->wext.bssid, bssid, ETH_ALEN);
+ wdev->wext.connect.bssid = wdev->wext.bssid;
+ } else
+ wdev->wext.connect.bssid = NULL;
+
+ err = cfg80211_mgd_wext_connect(rdev, wdev);
+ out:
+ wdev_unlock(wdev);
+ mutex_unlock(&rdev->devlist_mtx);
+ cfg80211_unlock_rdev(rdev);
+ return err;
+}
+
+int cfg80211_mgd_wext_giwap(struct net_device *dev,
+ struct iw_request_info *info,
+ struct sockaddr *ap_addr, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+
+ /* call only for station! */
+ if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
+ return -EINVAL;
+
+ ap_addr->sa_family = ARPHRD_ETHER;
+
+ wdev_lock(wdev);
+ if (wdev->current_bss)
+ memcpy(ap_addr->sa_data, wdev->current_bss->pub.bssid, ETH_ALEN);
+ else if (wdev->wext.connect.bssid)
+ memcpy(ap_addr->sa_data, wdev->wext.connect.bssid, ETH_ALEN);
+ else
+ memset(ap_addr->sa_data, 0, ETH_ALEN);
+ wdev_unlock(wdev);
+
+ return 0;
+}
+
+int cfg80211_wext_siwgenie(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+ u8 *ie = extra;
+ int ie_len = data->length, err;
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION)
+ return -EOPNOTSUPP;
+
+ if (!ie_len)
+ ie = NULL;
+
+ wdev_lock(wdev);
+
+ /* no change */
+ err = 0;
+ if (wdev->wext.ie_len == ie_len &&
+ memcmp(wdev->wext.ie, ie, ie_len) == 0)
+ goto out;
+
+ if (ie_len) {
+ ie = kmemdup(extra, ie_len, GFP_KERNEL);
+ if (!ie) {
+ err = -ENOMEM;
+ goto out;
+ }
+ } else
+ ie = NULL;
+
+ kfree(wdev->wext.ie);
+ wdev->wext.ie = ie;
+ wdev->wext.ie_len = ie_len;
+
+ if (wdev->sme_state != CFG80211_SME_IDLE) {
+ err = __cfg80211_disconnect(rdev, dev,
+ WLAN_REASON_DEAUTH_LEAVING, false);
+ if (err)
+ goto out;
+ }
+
+ /* userspace better not think we'll reconnect */
+ err = 0;
+ out:
+ wdev_unlock(wdev);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwgenie);
+
+int cfg80211_wext_siwmlme(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *data, char *extra)
+{
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
+ struct cfg80211_registered_device *rdev;
+ int err;
+
+ if (!wdev)
+ return -EOPNOTSUPP;
+
+ rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (wdev->iftype != NL80211_IFTYPE_STATION)
+ return -EINVAL;
+
+ if (mlme->addr.sa_family != ARPHRD_ETHER)
+ return -EINVAL;
+
+ wdev_lock(wdev);
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ case IW_MLME_DISASSOC:
+ err = __cfg80211_disconnect(rdev, dev, mlme->reason_code,
+ true);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ wdev_unlock(wdev);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(cfg80211_wext_siwmlme);
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index 252c2010c2e..5b4a0cee441 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -417,6 +417,21 @@ static const int event_type_size[] = {
IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */
};
+#ifdef CONFIG_COMPAT
+static const int compat_event_type_size[] = {
+ IW_EV_COMPAT_LCP_LEN, /* IW_HEADER_TYPE_NULL */
+ 0,
+ IW_EV_COMPAT_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */
+ 0,
+ IW_EV_COMPAT_UINT_LEN, /* IW_HEADER_TYPE_UINT */
+ IW_EV_COMPAT_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */
+ IW_EV_COMPAT_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */
+ 0,
+ IW_EV_COMPAT_POINT_LEN, /* Without variable payload */
+ IW_EV_COMPAT_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */
+ IW_EV_COMPAT_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */
+};
+#endif
/************************ COMMON SUBROUTINES ************************/
/*
@@ -610,6 +625,11 @@ static void wireless_seq_printf_stats(struct seq_file *seq,
{
/* Get stats from the driver */
struct iw_statistics *stats = get_wireless_stats(dev);
+ static struct iw_statistics nullstats = {};
+
+ /* show device if it's wireless regardless of current stats */
+ if (!stats && dev->wireless_handlers)
+ stats = &nullstats;
if (stats) {
seq_printf(seq, "%6s: %04x %3d%c %3d%c %3d%c %6d %6d %6d "
@@ -628,7 +648,9 @@ static void wireless_seq_printf_stats(struct seq_file *seq,
stats->discard.nwid, stats->discard.code,
stats->discard.fragment, stats->discard.retries,
stats->discard.misc, stats->miss.beacon);
- stats->qual.updated &= ~IW_QUAL_ALL_UPDATED;
+
+ if (stats != &nullstats)
+ stats->qual.updated &= ~IW_QUAL_ALL_UPDATED;
}
}
@@ -1250,65 +1272,57 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
}
#endif
-/************************* EVENT PROCESSING *************************/
-/*
- * Process events generated by the wireless layer or the driver.
- * Most often, the event will be propagated through rtnetlink
- */
+static int __net_init wext_pernet_init(struct net *net)
+{
+ skb_queue_head_init(&net->wext_nlevents);
+ return 0;
+}
-/* ---------------------------------------------------------------- */
-/*
- * Locking...
- * ----------
- *
- * Thanks to Herbert Xu <herbert@gondor.apana.org.au> for fixing
- * the locking issue in here and implementing this code !
- *
- * The issue : wireless_send_event() is often called in interrupt context,
- * while the Netlink layer can never be called in interrupt context.
- * The fully formed RtNetlink events are queued, and then a tasklet is run
- * to feed those to Netlink.
- * The skb_queue is interrupt safe, and its lock is not held while calling
- * Netlink, so there is no possibility of dealock.
- * Jean II
- */
+static void __net_exit wext_pernet_exit(struct net *net)
+{
+ skb_queue_purge(&net->wext_nlevents);
+}
-static struct sk_buff_head wireless_nlevent_queue;
+static struct pernet_operations wext_pernet_ops = {
+ .init = wext_pernet_init,
+ .exit = wext_pernet_exit,
+};
static int __init wireless_nlevent_init(void)
{
- skb_queue_head_init(&wireless_nlevent_queue);
- return 0;
+ return register_pernet_subsys(&wext_pernet_ops);
}
subsys_initcall(wireless_nlevent_init);
-static void wireless_nlevent_process(unsigned long data)
+/* Process events generated by the wireless layer or the driver. */
+static void wireless_nlevent_process(struct work_struct *work)
{
struct sk_buff *skb;
+ struct net *net;
- while ((skb = skb_dequeue(&wireless_nlevent_queue)))
- rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+ rtnl_lock();
+
+ for_each_net(net) {
+ while ((skb = skb_dequeue(&net->wext_nlevents)))
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+ GFP_KERNEL);
+ }
+
+ rtnl_unlock();
}
-static DECLARE_TASKLET(wireless_nlevent_tasklet, wireless_nlevent_process, 0);
+static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process);
-/* ---------------------------------------------------------------- */
-/*
- * Fill a rtnetlink message with our event data.
- * Note that we propage only the specified event and don't dump the
- * current wireless config. Dumping the wireless config is far too
- * expensive (for each parameter, the driver need to query the hardware).
- */
-static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
- int type, char *event, int event_len)
+static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev,
+ struct sk_buff *skb)
{
struct ifinfomsg *r;
struct nlmsghdr *nlh;
- nlh = nlmsg_put(skb, 0, 0, type, sizeof(*r), 0);
- if (nlh == NULL)
- return -EMSGSIZE;
+ nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0);
+ if (!nlh)
+ return NULL;
r = nlmsg_data(nlh);
r->ifi_family = AF_UNSPEC;
@@ -1319,48 +1333,14 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
r->ifi_change = 0; /* Wireless changes don't affect those flags */
NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
- /* Add the wireless events in the netlink packet */
- NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
- return nlmsg_end(skb, nlh);
-
-nla_put_failure:
+ return nlh;
+ nla_put_failure:
nlmsg_cancel(skb, nlh);
- return -EMSGSIZE;
+ return NULL;
}
-/* ---------------------------------------------------------------- */
-/*
- * Create and broadcast and send it on the standard rtnetlink socket
- * This is a pure clone rtmsg_ifinfo() in net/core/rtnetlink.c
- * Andrzej Krzysztofowicz mandated that I used a IFLA_XXX field
- * within a RTM_NEWLINK event.
- */
-static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len)
-{
- struct sk_buff *skb;
- int err;
-
- if (!net_eq(dev_net(dev), &init_net))
- return;
-
- skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
- if (!skb)
- return;
- err = rtnetlink_fill_iwinfo(skb, dev, RTM_NEWLINK, event, event_len);
- if (err < 0) {
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(skb);
- return;
- }
-
- NETLINK_CB(skb).dst_group = RTNLGRP_LINK;
- skb_queue_tail(&wireless_nlevent_queue, skb);
- tasklet_schedule(&wireless_nlevent_tasklet);
-}
-
-/* ---------------------------------------------------------------- */
/*
* Main event dispatcher. Called from other parts and drivers.
* Send the event on the appropriate channels.
@@ -1369,7 +1349,7 @@ static void rtmsg_iwinfo(struct net_device *dev, char *event, int event_len)
void wireless_send_event(struct net_device * dev,
unsigned int cmd,
union iwreq_data * wrqu,
- char * extra)
+ const char * extra)
{
const struct iw_ioctl_description * descr = NULL;
int extra_len = 0;
@@ -1379,6 +1359,25 @@ void wireless_send_event(struct net_device * dev,
int wrqu_off = 0; /* Offset in wrqu */
/* Don't "optimise" the following variable, it will crash */
unsigned cmd_index; /* *MUST* be unsigned */
+ struct sk_buff *skb;
+ struct nlmsghdr *nlh;
+ struct nlattr *nla;
+#ifdef CONFIG_COMPAT
+ struct __compat_iw_event *compat_event;
+ struct compat_iw_point compat_wrqu;
+ struct sk_buff *compskb;
+#endif
+
+ /*
+ * Nothing in the kernel sends scan events with data, be safe.
+ * This is necessary because we cannot fix up scan event data
+ * for compat, due to being contained in 'extra', but normally
+ * applications are required to retrieve the scan data anyway
+ * and no data is included in the event, this codifies that
+ * practice.
+ */
+ if (WARN_ON(cmd == SIOCGIWSCAN && extra))
+ extra = NULL;
/* Get the description of the Event */
if (cmd <= SIOCIWLAST) {
@@ -1426,25 +1425,107 @@ void wireless_send_event(struct net_device * dev,
hdr_len = event_type_size[descr->header_type];
event_len = hdr_len + extra_len;
- /* Create temporary buffer to hold the event */
- event = kmalloc(event_len, GFP_ATOMIC);
- if (event == NULL)
+ /*
+ * The problem for 64/32 bit.
+ *
+ * On 64-bit, a regular event is laid out as follows:
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | event.len | event.cmd | p a d d i n g |
+ * | wrqu data ... (with the correct size) |
+ *
+ * This padding exists because we manipulate event->u,
+ * and 'event' is not packed.
+ *
+ * An iw_point event is laid out like this instead:
+ * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
+ * | event.len | event.cmd | p a d d i n g |
+ * | iwpnt.len | iwpnt.flg | p a d d i n g |
+ * | extra data ...
+ *
+ * The second padding exists because struct iw_point is extended,
+ * but this depends on the platform...
+ *
+ * On 32-bit, all the padding shouldn't be there.
+ */
+
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ /* Send via the RtNetlink event channel */
+ nlh = rtnetlink_ifinfo_prep(dev, skb);
+ if (WARN_ON(!nlh)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ /* Add the wireless events in the netlink packet */
+ nla = nla_reserve(skb, IFLA_WIRELESS, event_len);
+ if (!nla) {
+ kfree_skb(skb);
return;
+ }
+ event = nla_data(nla);
- /* Fill event */
+ /* Fill event - first clear to avoid data leaking */
+ memset(event, 0, hdr_len);
event->len = event_len;
event->cmd = cmd;
memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN);
- if (extra)
+ if (extra_len)
memcpy(((char *) event) + hdr_len, extra, extra_len);
+ nlmsg_end(skb, nlh);
+#ifdef CONFIG_COMPAT
+ hdr_len = compat_event_type_size[descr->header_type];
+ event_len = hdr_len + extra_len;
+
+ compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!compskb) {
+ kfree_skb(skb);
+ return;
+ }
+
/* Send via the RtNetlink event channel */
- rtmsg_iwinfo(dev, (char *) event, event_len);
+ nlh = rtnetlink_ifinfo_prep(dev, compskb);
+ if (WARN_ON(!nlh)) {
+ kfree_skb(skb);
+ kfree_skb(compskb);
+ return;
+ }
- /* Cleanup */
- kfree(event);
+ /* Add the wireless events in the netlink packet */
+ nla = nla_reserve(compskb, IFLA_WIRELESS, event_len);
+ if (!nla) {
+ kfree_skb(skb);
+ kfree_skb(compskb);
+ return;
+ }
+ compat_event = nla_data(nla);
- return; /* Always success, I guess ;-) */
+ compat_event->len = event_len;
+ compat_event->cmd = cmd;
+ if (descr->header_type == IW_HEADER_TYPE_POINT) {
+ compat_wrqu.length = wrqu->data.length;
+ compat_wrqu.flags = wrqu->data.flags;
+ memcpy(&compat_event->pointer,
+ ((char *) &compat_wrqu) + IW_EV_COMPAT_POINT_OFF,
+ hdr_len - IW_EV_COMPAT_LCP_LEN);
+ if (extra_len)
+ memcpy(((char *) compat_event) + hdr_len,
+ extra, extra_len);
+ } else {
+ /* extra_len must be zero, so no if (extra) needed */
+ memcpy(&compat_event->pointer, wrqu,
+ hdr_len - IW_EV_COMPAT_LCP_LEN);
+ }
+
+ nlmsg_end(compskb, nlh);
+
+ skb_shinfo(skb)->frag_list = compskb;
+#endif
+ skb_queue_tail(&dev_net(dev)->wext_nlevents, skb);
+ schedule_work(&wireless_nlevent_work);
}
EXPORT_SYMBOL(wireless_send_event);
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index d401dc8f05e..e5195c99f71 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -16,7 +16,7 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
{
- return ntohl(daddr->a4 ^ saddr->a4);
+ return ntohl(daddr->a4 + saddr->a4);
}
static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
diff --git a/net/xfrm/xfrm_proc.c b/net/xfrm/xfrm_proc.c
index a2adb51849a..fef8db553e8 100644
--- a/net/xfrm/xfrm_proc.c
+++ b/net/xfrm/xfrm_proc.c
@@ -60,7 +60,7 @@ static int xfrm_statistics_seq_open(struct inode *inode, struct file *file)
return single_open_net(inode, file, xfrm_statistics_seq_show);
}
-static struct file_operations xfrm_statistics_seq_fops = {
+static const struct file_operations xfrm_statistics_seq_fops = {
.owner = THIS_MODULE,
.open = xfrm_statistics_seq_open,
.read = seq_read,