aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/crypto/api-intro.txt4
-rw-r--r--Documentation/feature-removal-schedule.txt12
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/avr32/boards/atstk1000/Makefile2
-rw-r--r--arch/avr32/boards/atstk1000/atstk1002.c53
-rw-r--r--arch/avr32/boards/atstk1000/spi.c27
-rw-r--r--arch/avr32/kernel/cpu.c1
-rw-r--r--arch/avr32/kernel/irq.c1
-rw-r--r--arch/avr32/kernel/setup.c4
-rw-r--r--arch/avr32/lib/libgcc.h33
-rw-r--r--arch/avr32/lib/longlong.h98
-rw-r--r--arch/avr32/mach-at32ap/Makefile2
-rw-r--r--arch/avr32/mach-at32ap/at32ap7000.c60
-rw-r--r--arch/avr32/mach-at32ap/extint.c36
-rw-r--r--arch/avr32/mach-at32ap/pio.c255
-rw-r--r--arch/avr32/mm/cache.c32
-rw-r--r--arch/s390/defconfig3
-rw-r--r--crypto/Kconfig31
-rw-r--r--crypto/Makefile3
-rw-r--r--crypto/algapi.c15
-rw-r--r--crypto/api.c80
-rw-r--r--crypto/blkcipher.c9
-rw-r--r--crypto/camellia.c1801
-rw-r--r--crypto/cbc.c9
-rw-r--r--crypto/cipher.c447
-rw-r--r--crypto/compress.c5
-rw-r--r--crypto/digest.c24
-rw-r--r--crypto/ecb.c9
-rw-r--r--crypto/fcrypt.c423
-rw-r--r--crypto/hash.c5
-rw-r--r--crypto/hmac.c9
-rw-r--r--crypto/internal.h27
-rw-r--r--crypto/lrw.c11
-rw-r--r--crypto/pcbc.c349
-rw-r--r--crypto/tcrypt.c73
-rw-r--r--crypto/tcrypt.h538
-rw-r--r--crypto/xcbc.c60
-rw-r--r--drivers/crypto/geode-aes.c2
-rw-r--r--drivers/net/Kconfig11
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1/Makefile2
-rw-r--r--drivers/net/atl1/atl1.h283
-rw-r--r--drivers/net/atl1/atl1_ethtool.c508
-rw-r--r--drivers/net/atl1/atl1_hw.c718
-rw-r--r--drivers/net/atl1/atl1_hw.h951
-rw-r--r--drivers/net/atl1/atl1_main.c2468
-rw-r--r--drivers/net/atl1/atl1_param.c206
-rw-r--r--drivers/net/bonding/bond_alb.c4
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c86
-rw-r--r--drivers/net/slip.c5
-rw-r--r--drivers/net/tg3.c2
-rw-r--r--drivers/net/ucc_geth.c79
-rw-r--r--drivers/net/ucc_geth_phy.c2
-rw-r--r--drivers/s390/char/monreader.c218
-rw-r--r--drivers/s390/char/vmlogrdr.c279
-rw-r--r--drivers/s390/net/Kconfig7
-rw-r--r--drivers/s390/net/Makefile1
-rw-r--r--drivers/s390/net/iucv.c2540
-rw-r--r--drivers/s390/net/iucv.h849
-rw-r--r--drivers/s390/net/netiucv.c1314
-rw-r--r--drivers/s390/net/smsgiucv.c147
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h1
-rw-r--r--include/asm-avr32/arch-at32ap/at32ap7000.h2
-rw-r--r--include/asm-avr32/arch-at32ap/gpio.h27
-rw-r--r--include/asm-avr32/arch-at32ap/irq.h14
-rw-r--r--include/asm-avr32/arch-at32ap/portmux.h8
-rw-r--r--include/asm-avr32/checksum.h2
-rw-r--r--include/asm-avr32/dma-mapping.h8
-rw-r--r--include/asm-avr32/gpio.h6
-rw-r--r--include/asm-avr32/irq.h8
-rw-r--r--include/asm-avr32/posix_types.h2
-rw-r--r--include/asm-avr32/uaccess.h6
-rw-r--r--include/crypto/algapi.h24
-rw-r--r--include/linux/atmarp.h2
-rw-r--r--include/linux/crypto.h148
-rw-r--r--include/linux/if_packet.h10
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/Kbuild1
-rw-r--r--include/linux/netfilter/nf_conntrack_sane.h21
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h4
-rw-r--r--include/linux/netfilter/xt_TCPMSS.h10
-rw-r--r--include/linux/netfilter_ipv4/ip_nat.h1
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h22
-rw-r--r--include/linux/netfilter_ipv4/ipt_TCPMSS.h7
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h35
-rw-r--r--include/linux/netfilter_ipv6/ip6t_mh.h15
-rw-r--r--include/linux/pagemap.h2
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pfkeyv2.h4
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/sysctl.h3
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/wanrouter.h8
-rw-r--r--include/linux/xfrm.h19
-rw-r--r--include/net/inet_hashtables.h10
-rw-r--r--include/net/iucv/af_iucv.h106
-rw-r--r--include/net/iucv/iucv.h415
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_nat.h1
-rw-r--r--include/net/route.h5
-rw-r--r--include/net/tcp.h5
-rw-r--r--include/net/x25.h18
-rw-r--r--include/net/xfrm.h47
-rw-r--r--mm/filemap.c20
-rw-r--r--net/Kconfig1
-rw-r--r--net/Makefile1
-rw-r--r--net/atm/common.c3
-rw-r--r--net/bridge/br_netfilter.c29
-rw-r--r--net/bridge/br_netlink.c14
-rw-r--r--net/bridge/netfilter/ebt_ip.c1
-rw-r--r--net/bridge/netfilter/ebt_log.c1
-rw-r--r--net/core/dev.c13
-rw-r--r--net/core/dst.c9
-rw-r--r--net/core/fib_rules.c14
-rw-r--r--net/core/neighbour.c29
-rw-r--r--net/core/rtnetlink.c23
-rw-r--r--net/dccp/ccids/ccid3.c5
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/decnet/dn_dev.c14
-rw-r--r--net/decnet/dn_table.c11
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/datagram.c2
-rw-r--r--net/ipv4/devinet.c14
-rw-r--r--net/ipv4/fib_semantics.c14
-rw-r--r--net/ipv4/igmp.c2
-rw-r--r--net/ipv4/inet_diag.c19
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c4
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/netfilter/Kconfig26
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c40
-rw-r--r--net/ipv4/netfilter/ip_nat_core.c12
-rw-r--r--net/ipv4/netfilter/ip_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_tcp.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_proto_udp.c5
-rw-r--r--net/ipv4/netfilter/ip_nat_rule.c32
-rw-r--r--net/ipv4/netfilter/ip_tables.c40
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c15
-rw-r--r--net/ipv4/netfilter/ipt_ECN.c13
-rw-r--r--net/ipv4/netfilter/ipt_LOG.c16
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c9
-rw-r--r--net/ipv4/netfilter/ipt_NETMAP.c8
-rw-r--r--net/ipv4/netfilter/ipt_REDIRECT.c8
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c12
-rw-r--r--net/ipv4/netfilter/ipt_SAME.c8
-rw-r--r--net/ipv4/netfilter/ipt_TCPMSS.c207
-rw-r--r--net/ipv4/netfilter/ipt_TOS.c11
-rw-r--r--net/ipv4/netfilter/ipt_TTL.c11
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c18
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c9
-rw-r--r--net/ipv4/netfilter/ipt_ah.c10
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c10
-rw-r--r--net/ipv4/netfilter/ipt_iprange.c10
-rw-r--r--net/ipv4/netfilter/ipt_owner.c9
-rw-r--r--net/ipv4/netfilter/ipt_recent.c12
-rw-r--r--net/ipv4/netfilter/ipt_tos.c10
-rw-r--r--net/ipv4/netfilter/ipt_ttl.c11
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/iptable_raw.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_tcp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_udp.c4
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c8
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c6
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp.c7
-rw-r--r--net/ipv4/tcp_input.c105
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv4/tcp_output.c3
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c57
-rw-r--r--net/ipv4/xfrm4_policy.c51
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c70
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_tunnel.c3
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/mip6.c26
-rw-r--r--net/ipv6/netfilter/Kconfig8
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/ip6_tables.c12
-rw-r--r--net/ipv6/netfilter/ip6t_HL.c17
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c15
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c10
-rw-r--r--net/ipv6/netfilter/ip6t_ah.c8
-rw-r--r--net/ipv6/netfilter/ip6t_eui64.c8
-rw-r--r--net/ipv6/netfilter/ip6t_frag.c8
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c1
-rw-r--r--net/ipv6/netfilter/ip6t_hl.c11
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c8
-rw-r--r--net/ipv6/netfilter/ip6t_mh.c108
-rw-r--r--net/ipv6/netfilter/ip6t_owner.c8
-rw-r--r--net/ipv6/netfilter/ip6t_rt.c8
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c21
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c21
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c19
-rw-r--r--net/ipv6/raw.c15
-rw-r--r--net/ipv6/route.c33
-rw-r--r--net/ipv6/sit.c3
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/ipv6/xfrm6_mode_tunnel.c42
-rw-r--r--net/ipv6/xfrm6_policy.c46
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/ipx/af_ipx.c24
-rw-r--r--net/irda/irias_object.c40
-rw-r--r--net/irda/irlan/irlan_common.c23
-rw-r--r--net/iucv/Kconfig15
-rw-r--r--net/iucv/Makefile6
-rw-r--r--net/iucv/af_iucv.c1077
-rw-r--r--net/iucv/iucv.c1619
-rw-r--r--net/key/af_key.c422
-rw-r--r--net/netfilter/Kconfig39
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c40
-rw-r--r--net/netfilter/nf_conntrack_sane.c242
-rw-r--r--net/netfilter/xt_CLASSIFY.c4
-rw-r--r--net/netfilter/xt_CONNMARK.c5
-rw-r--r--net/netfilter/xt_CONNSECMARK.c6
-rw-r--r--net/netfilter/xt_MARK.c8
-rw-r--r--net/netfilter/xt_SECMARK.c4
-rw-r--r--net/netfilter/xt_TCPMSS.c296
-rw-r--r--net/netfilter/xt_hashlimit.c1
-rw-r--r--net/packet/af_packet.c79
-rw-r--r--net/sched/act_ipt.c2
-rw-r--r--net/sched/sch_generic.c2
-rw-r--r--net/sched/sch_prio.c15
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/socket.c29
-rw-r--r--net/wanrouter/wanmain.c17
-rw-r--r--net/x25/Makefile2
-rw-r--r--net/x25/af_x25.c32
-rw-r--r--net/x25/sysctl_net_x25.c8
-rw-r--r--net/x25/x25_dev.c13
-rw-r--r--net/x25/x25_forward.c163
-rw-r--r--net/x25/x25_proc.c98
-rw-r--r--net/x25/x25_route.c3
-rw-r--r--net/xfrm/Kconfig26
-rw-r--r--net/xfrm/xfrm_algo.c17
-rw-r--r--net/xfrm/xfrm_policy.c231
-rw-r--r--net/xfrm/xfrm_state.c184
-rw-r--r--net/xfrm/xfrm_user.c173
253 files changed, 16449 insertions, 6484 deletions
diff --git a/Documentation/crypto/api-intro.txt b/Documentation/crypto/api-intro.txt
index 5a03a2801d6..e41a79aa71c 100644
--- a/Documentation/crypto/api-intro.txt
+++ b/Documentation/crypto/api-intro.txt
@@ -193,6 +193,7 @@ Original developers of the crypto algorithms:
Kartikey Mahendra Bhatt (CAST6)
Jon Oberheide (ARC4)
Jouni Malinen (Michael MIC)
+ NTT(Nippon Telegraph and Telephone Corporation) (Camellia)
SHA1 algorithm contributors:
Jean-Francois Dive
@@ -246,6 +247,9 @@ Tiger algorithm contributors:
VIA PadLock contributors:
Michal Ludvig
+Camellia algorithm contributors:
+ NTT(Nippon Telegraph and Telephone Corporation) (Camellia)
+
Generic scatterwalk code by Adam J. Richter <adam@yggdrasil.com>
Please send any credits updates or corrections to:
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 2dc5e5da8f8..4a1d8979ed9 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -186,18 +186,6 @@ Who: Greg Kroah-Hartman <gregkh@suse.de>
---------------------------
-What: find_trylock_page
-When: January 2007
-Why: The interface no longer has any callers left in the kernel. It
- is an odd interface (compared with other find_*_page functions), in
- that it does not take a refcount to the page, only the page lock.
- It should be replaced with find_get_page or find_lock_page if possible.
- This feature removal can be reevaluated if users of the interface
- cannot cleanly use something else.
-Who: Nick Piggin <npiggin@suse.de>
-
----------------------------
-
What: Interrupt only SA_* flags
When: Januar 2007
Why: The interrupt related SA_* flags are replaced by IRQF_* to move them
diff --git a/MAINTAINERS b/MAINTAINERS
index 010658a8b5a..f2a79481f1c 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1114,7 +1114,7 @@ S: Supported
DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER
P: Tobias Ringstrom
M: tori@unhappy.mine.nu
-L: linux-kernel@vger.kernel.org
+L: netdev@vger.kernel.org
S: Maintained
DOCBOOK FOR DOCUMENTATION
@@ -2361,7 +2361,7 @@ S: Maintained
NETWORKING [WIRELESS]
P: John W. Linville
M: linville@tuxdriver.com
-L: netdev@vger.kernel.org
+L: linux-wireless@vger.kernel.org
T: git kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6.git
S: Maintained
diff --git a/arch/avr32/boards/atstk1000/Makefile b/arch/avr32/boards/atstk1000/Makefile
index df949948053..8e0992201bb 100644
--- a/arch/avr32/boards/atstk1000/Makefile
+++ b/arch/avr32/boards/atstk1000/Makefile
@@ -1,2 +1,2 @@
-obj-y += setup.o spi.o flash.o
+obj-y += setup.o flash.o
obj-$(CONFIG_BOARD_ATSTK1002) += atstk1002.o
diff --git a/arch/avr32/boards/atstk1000/atstk1002.c b/arch/avr32/boards/atstk1000/atstk1002.c
index 32b361f31c2..d47e39f0e97 100644
--- a/arch/avr32/boards/atstk1000/atstk1002.c
+++ b/arch/avr32/boards/atstk1000/atstk1002.c
@@ -8,17 +8,24 @@
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
+#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/spi/spi.h>
#include <asm/io.h>
#include <asm/setup.h>
+#include <asm/arch/at32ap7000.h>
#include <asm/arch/board.h>
#include <asm/arch/init.h>
+#include <asm/arch/portmux.h>
+
+
+#define SW2_DEFAULT /* MMCI and UART_A available */
struct eth_addr {
u8 addr[6];
@@ -29,6 +36,16 @@ static struct eth_addr __initdata hw_addr[2];
static struct eth_platform_data __initdata eth_data[2];
extern struct lcdc_platform_data atstk1000_fb0_data;
+static struct spi_board_info spi_board_info[] __initdata = {
+ {
+ .modalias = "ltv350qv",
+ .controller_data = (void *)GPIO_PIN_PA(4),
+ .max_speed_hz = 16000000,
+ .bus_num = 0,
+ .chip_select = 1,
+ },
+};
+
/*
* The next two functions should go away as the boot loader is
* supposed to initialize the macb address registers with a valid
@@ -86,23 +103,53 @@ static void __init set_hw_addr(struct platform_device *pdev)
void __init setup_board(void)
{
- at32_map_usart(1, 0); /* /dev/ttyS0 */
- at32_map_usart(2, 1); /* /dev/ttyS1 */
- at32_map_usart(3, 2); /* /dev/ttyS2 */
+#ifdef SW2_DEFAULT
+ at32_map_usart(1, 0); /* USART 1/A: /dev/ttyS0, DB9 */
+#else
+ at32_map_usart(0, 1); /* USART 0/B: /dev/ttyS1, IRDA */
+#endif
+ /* USART 2/unused: expansion connector */
+ at32_map_usart(3, 2); /* USART 3/C: /dev/ttyS2, DB9 */
at32_setup_serial_console(0);
}
static int __init atstk1002_init(void)
{
+ /*
+ * ATSTK1000 uses 32-bit SDRAM interface. Reserve the
+ * SDRAM-specific pins so that nobody messes with them.
+ */
+ at32_reserve_pin(GPIO_PIN_PE(0)); /* DATA[16] */
+ at32_reserve_pin(GPIO_PIN_PE(1)); /* DATA[17] */
+ at32_reserve_pin(GPIO_PIN_PE(2)); /* DATA[18] */
+ at32_reserve_pin(GPIO_PIN_PE(3)); /* DATA[19] */
+ at32_reserve_pin(GPIO_PIN_PE(4)); /* DATA[20] */
+ at32_reserve_pin(GPIO_PIN_PE(5)); /* DATA[21] */
+ at32_reserve_pin(GPIO_PIN_PE(6)); /* DATA[22] */
+ at32_reserve_pin(GPIO_PIN_PE(7)); /* DATA[23] */
+ at32_reserve_pin(GPIO_PIN_PE(8)); /* DATA[24] */
+ at32_reserve_pin(GPIO_PIN_PE(9)); /* DATA[25] */
+ at32_reserve_pin(GPIO_PIN_PE(10)); /* DATA[26] */
+ at32_reserve_pin(GPIO_PIN_PE(11)); /* DATA[27] */
+ at32_reserve_pin(GPIO_PIN_PE(12)); /* DATA[28] */
+ at32_reserve_pin(GPIO_PIN_PE(13)); /* DATA[29] */
+ at32_reserve_pin(GPIO_PIN_PE(14)); /* DATA[30] */
+ at32_reserve_pin(GPIO_PIN_PE(15)); /* DATA[31] */
+ at32_reserve_pin(GPIO_PIN_PE(26)); /* SDCS */
+
at32_add_system_devices();
+#ifdef SW2_DEFAULT
at32_add_device_usart(0);
+#else
at32_add_device_usart(1);
+#endif
at32_add_device_usart(2);
set_hw_addr(at32_add_device_eth(0, &eth_data[0]));
+ spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
at32_add_device_spi(0);
at32_add_device_lcdc(0, &atstk1000_fb0_data);
diff --git a/arch/avr32/boards/atstk1000/spi.c b/arch/avr32/boards/atstk1000/spi.c
deleted file mode 100644
index 567726c82c6..00000000000
--- a/arch/avr32/boards/atstk1000/spi.c
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * ATSTK1000 SPI devices
- *
- * Copyright (C) 2005 Atmel Norway
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-
-static struct spi_board_info spi_board_info[] __initdata = {
- {
- .modalias = "ltv350qv",
- .max_speed_hz = 16000000,
- .bus_num = 0,
- .chip_select = 1,
- },
-};
-
-static int board_init_spi(void)
-{
- spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
- return 0;
-}
-arch_initcall(board_init_spi);
diff --git a/arch/avr32/kernel/cpu.c b/arch/avr32/kernel/cpu.c
index 342452ba204..2e72fd2699d 100644
--- a/arch/avr32/kernel/cpu.c
+++ b/arch/avr32/kernel/cpu.c
@@ -9,6 +9,7 @@
#include <linux/sysdev.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
+#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/param.h>
#include <linux/errno.h>
diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c
index 856f3548e66..fd311248c14 100644
--- a/arch/avr32/kernel/irq.c
+++ b/arch/avr32/kernel/irq.c
@@ -57,6 +57,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%3d: ", i);
for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[i]);
+ seq_printf(p, " %8s", irq_desc[i].chip->name ? : "-");
seq_printf(p, " %s", action->name);
for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
diff --git a/arch/avr32/kernel/setup.c b/arch/avr32/kernel/setup.c
index a3421160100..c6734aefb55 100644
--- a/arch/avr32/kernel/setup.c
+++ b/arch/avr32/kernel/setup.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
+#include <linux/kernel.h>
#include <asm/sections.h>
#include <asm/processor.h>
@@ -174,8 +175,7 @@ static int __init parse_tag_mem_range(struct tag *tag,
* Copy the data so the bootmem init code doesn't need to care
* about it.
*/
- if (mem_range_next_free >=
- (sizeof(mem_range_cache) / sizeof(mem_range_cache[0])))
+ if (mem_range_next_free >= ARRAY_SIZE(mem_range_cache))
panic("Physical memory map too complex!\n");
new = &mem_range_cache[mem_range_next_free++];
diff --git a/arch/avr32/lib/libgcc.h b/arch/avr32/lib/libgcc.h
deleted file mode 100644
index 5a091b5e361..00000000000
--- a/arch/avr32/lib/libgcc.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Definitions for various functions 'borrowed' from gcc-3.4.3 */
-
-#define BITS_PER_UNIT 8
-
-typedef int QItype __attribute__ ((mode (QI)));
-typedef unsigned int UQItype __attribute__ ((mode (QI)));
-typedef int HItype __attribute__ ((mode (HI)));
-typedef unsigned int UHItype __attribute__ ((mode (HI)));
-typedef int SItype __attribute__ ((mode (SI)));
-typedef unsigned int USItype __attribute__ ((mode (SI)));
-typedef int DItype __attribute__ ((mode (DI)));
-typedef unsigned int UDItype __attribute__ ((mode (DI)));
-typedef float SFtype __attribute__ ((mode (SF)));
-typedef float DFtype __attribute__ ((mode (DF)));
-typedef int word_type __attribute__ ((mode (__word__)));
-
-#define W_TYPE_SIZE (4 * BITS_PER_UNIT)
-#define Wtype SItype
-#define UWtype USItype
-#define HWtype SItype
-#define UHWtype USItype
-#define DWtype DItype
-#define UDWtype UDItype
-#define __NW(a,b) __ ## a ## si ## b
-#define __NDW(a,b) __ ## a ## di ## b
-
-struct DWstruct {Wtype high, low;};
-
-typedef union
-{
- struct DWstruct s;
- DWtype ll;
-} DWunion;
diff --git a/arch/avr32/lib/longlong.h b/arch/avr32/lib/longlong.h
deleted file mode 100644
index cd5e369ac43..00000000000
--- a/arch/avr32/lib/longlong.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* longlong.h -- definitions for mixed size 32/64 bit arithmetic.
- Copyright (C) 1991, 1992, 1994, 1995, 1996, 1997, 1998, 1999, 2000
- Free Software Foundation, Inc.
-
- This definition file is free software; you can redistribute it
- and/or modify it under the terms of the GNU General Public
- License as published by the Free Software Foundation; either
- version 2, or (at your option) any later version.
-
- This definition file is distributed in the hope that it will be
- useful, but WITHOUT ANY WARRANTY; without even the implied
- warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place - Suite 330,
- Boston, MA 02111-1307, USA. */
-
-/* Borrowed from gcc-3.4.3 */
-
-#define __BITS4 (W_TYPE_SIZE / 4)
-#define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2))
-#define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1))
-#define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2))
-
-#define count_leading_zeros(count, x) ((count) = __builtin_clz(x))
-
-#define __udiv_qrnnd_c(q, r, n1, n0, d) \
- do { \
- UWtype __d1, __d0, __q1, __q0; \
- UWtype __r1, __r0, __m; \
- __d1 = __ll_highpart (d); \
- __d0 = __ll_lowpart (d); \
- \
- __r1 = (n1) % __d1; \
- __q1 = (n1) / __d1; \
- __m = (UWtype) __q1 * __d0; \
- __r1 = __r1 * __ll_B | __ll_highpart (n0); \
- if (__r1 < __m) \
- { \
- __q1--, __r1 += (d); \
- if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\
- if (__r1 < __m) \
- __q1--, __r1 += (d); \
- } \
- __r1 -= __m; \
- \
- __r0 = __r1 % __d1; \
- __q0 = __r1 / __d1; \
- __m = (UWtype) __q0 * __d0; \
- __r0 = __r0 * __ll_B | __ll_lowpart (n0); \
- if (__r0 < __m) \
- { \
- __q0--, __r0 += (d); \
- if (__r0 >= (d)) \
- if (__r0 < __m) \
- __q0--, __r0 += (d); \
- } \
- __r0 -= __m; \
- \
- (q) = (UWtype) __q1 * __ll_B | __q0; \
- (r) = __r0; \
- } while (0)
-
-#define udiv_qrnnd __udiv_qrnnd_c
-
-#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
- do { \
- UWtype __x; \
- __x = (al) - (bl); \
- (sh) = (ah) - (bh) - (__x > (al)); \
- (sl) = __x; \
- } while (0)
-
-#define umul_ppmm(w1, w0, u, v) \
- do { \
- UWtype __x0, __x1, __x2, __x3; \
- UHWtype __ul, __vl, __uh, __vh; \
- \
- __ul = __ll_lowpart (u); \
- __uh = __ll_highpart (u); \
- __vl = __ll_lowpart (v); \
- __vh = __ll_highpart (v); \
- \
- __x0 = (UWtype) __ul * __vl; \
- __x1 = (UWtype) __ul * __vh; \
- __x2 = (UWtype) __uh * __vl; \
- __x3 = (UWtype) __uh * __vh; \
- \
- __x1 += __ll_highpart (__x0);/* this can't give carry */ \
- __x1 += __x2; /* but this indeed can */ \
- if (__x1 < __x2) /* did we get it? */ \
- __x3 += __ll_B; /* yes, add it in the proper pos. */ \
- \
- (w1) = __x3 + __ll_highpart (__x1); \
- (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
- } while (0)
diff --git a/arch/avr32/mach-at32ap/Makefile b/arch/avr32/mach-at32ap/Makefile
index f62eb691551..b21bea9af8b 100644
--- a/arch/avr32/mach-at32ap/Makefile
+++ b/arch/avr32/mach-at32ap/Makefile
@@ -1,2 +1,2 @@
-obj-y += at32ap.o clock.o pio.o intc.o extint.o hsmc.o
+obj-y += at32ap.o clock.o intc.o extint.o pio.o hsmc.o
obj-$(CONFIG_CPU_AT32AP7000) += at32ap7000.o
diff --git a/arch/avr32/mach-at32ap/at32ap7000.c b/arch/avr32/mach-at32ap/at32ap7000.c
index 48f4ef38c70..c1e477ec757 100644
--- a/arch/avr32/mach-at32ap/at32ap7000.c
+++ b/arch/avr32/mach-at32ap/at32ap7000.c
@@ -496,9 +496,16 @@ static struct resource pio3_resource[] = {
DEFINE_DEV(pio, 3);
DEV_CLK(mck, pio3, pba, 13);
+static struct resource pio4_resource[] = {
+ PBMEM(0xffe03800),
+ IRQ(17),
+};
+DEFINE_DEV(pio, 4);
+DEV_CLK(mck, pio4, pba, 14);
+
void __init at32_add_system_devices(void)
{
- system_manager.eim_first_irq = NR_INTERNAL_IRQS;
+ system_manager.eim_first_irq = EIM_IRQ_BASE;
platform_device_register(&at32_sm_device);
platform_device_register(&at32_intc0_device);
@@ -509,6 +516,7 @@ void __init at32_add_system_devices(void)
platform_device_register(&pio1_device);
platform_device_register(&pio2_device);
platform_device_register(&pio3_device);
+ platform_device_register(&pio4_device);
}
/* --------------------------------------------------------------------
@@ -521,7 +529,7 @@ static struct atmel_uart_data atmel_usart0_data = {
};
static struct resource atmel_usart0_resource[] = {
PBMEM(0xffe00c00),
- IRQ(7),
+ IRQ(6),
};
DEFINE_DEV_DATA(atmel_usart, 0);
DEV_CLK(usart, atmel_usart0, pba, 4);
@@ -583,7 +591,7 @@ static inline void configure_usart3_pins(void)
select_peripheral(PB(17), PERIPH_B, 0); /* TXD */
}
-static struct platform_device *at32_usarts[4];
+static struct platform_device *__initdata at32_usarts[4];
void __init at32_map_usart(unsigned int hw_id, unsigned int line)
{
@@ -728,12 +736,19 @@ at32_add_device_eth(unsigned int id, struct eth_platform_data *data)
/* --------------------------------------------------------------------
* SPI
* -------------------------------------------------------------------- */
-static struct resource spi0_resource[] = {
+static struct resource atmel_spi0_resource[] = {
PBMEM(0xffe00000),
IRQ(3),
};
-DEFINE_DEV(spi, 0);
-DEV_CLK(mck, spi0, pba, 0);
+DEFINE_DEV(atmel_spi, 0);
+DEV_CLK(spi_clk, atmel_spi0, pba, 0);
+
+static struct resource atmel_spi1_resource[] = {
+ PBMEM(0xffe00400),
+ IRQ(4),
+};
+DEFINE_DEV(atmel_spi, 1);
+DEV_CLK(spi_clk, atmel_spi1, pba, 1);
struct platform_device *__init at32_add_device_spi(unsigned int id)
{
@@ -741,13 +756,33 @@ struct platform_device *__init at32_add_device_spi(unsigned int id)
switch (id) {
case 0:
- pdev = &spi0_device;
+ pdev = &atmel_spi0_device;
select_peripheral(PA(0), PERIPH_A, 0); /* MISO */
select_peripheral(PA(1), PERIPH_A, 0); /* MOSI */
select_peripheral(PA(2), PERIPH_A, 0); /* SCK */
- select_peripheral(PA(3), PERIPH_A, 0); /* NPCS0 */
- select_peripheral(PA(4), PERIPH_A, 0); /* NPCS1 */
- select_peripheral(PA(5), PERIPH_A, 0); /* NPCS2 */
+
+ /* NPCS[2:0] */
+ at32_select_gpio(GPIO_PIN_PA(3),
+ AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
+ at32_select_gpio(GPIO_PIN_PA(4),
+ AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
+ at32_select_gpio(GPIO_PIN_PA(5),
+ AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
+ break;
+
+ case 1:
+ pdev = &atmel_spi1_device;
+ select_peripheral(PB(0), PERIPH_B, 0); /* MISO */
+ select_peripheral(PB(1), PERIPH_B, 0); /* MOSI */
+ select_peripheral(PB(5), PERIPH_B, 0); /* SCK */
+
+ /* NPCS[2:0] */
+ at32_select_gpio(GPIO_PIN_PB(2),
+ AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
+ at32_select_gpio(GPIO_PIN_PB(3),
+ AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
+ at32_select_gpio(GPIO_PIN_PB(4),
+ AT32_GPIOF_OUTPUT | AT32_GPIOF_HIGH);
break;
default:
@@ -860,6 +895,7 @@ struct clk *at32_clock_list[] = {
&pio1_mck,
&pio2_mck,
&pio3_mck,
+ &pio4_mck,
&atmel_usart0_usart,
&atmel_usart1_usart,
&atmel_usart2_usart,
@@ -868,7 +904,8 @@ struct clk *at32_clock_list[] = {
&macb0_pclk,
&macb1_hclk,
&macb1_pclk,
- &spi0_mck,
+ &atmel_spi0_spi_clk,
+ &atmel_spi1_spi_clk,
&lcdc0_hclk,
&lcdc0_pixclk,
};
@@ -880,6 +917,7 @@ void __init at32_portmux_init(void)
at32_init_pio(&pio1_device);
at32_init_pio(&pio2_device);
at32_init_pio(&pio3_device);
+ at32_init_pio(&pio4_device);
}
void __init at32_clock_init(void)
diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c
index b59272e81b9..4a60eccfebd 100644
--- a/arch/avr32/mach-at32ap/extint.c
+++ b/arch/avr32/mach-at32ap/extint.c
@@ -55,20 +55,11 @@ static int eim_set_irq_type(unsigned int irq, unsigned int flow_type)
unsigned long flags;
int ret = 0;
+ flow_type &= IRQ_TYPE_SENSE_MASK;
if (flow_type == IRQ_TYPE_NONE)
flow_type = IRQ_TYPE_LEVEL_LOW;
desc = &irq_desc[irq];
- desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
- desc->status |= flow_type & IRQ_TYPE_SENSE_MASK;
-
- if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) {
- desc->status |= IRQ_LEVEL;
- set_irq_handler(irq, handle_level_irq);
- } else {
- set_irq_handler(irq, handle_edge_irq);
- }
-
spin_lock_irqsave(&sm->lock, flags);
mode = sm_readl(sm, EIM_MODE);
@@ -97,9 +88,16 @@ static int eim_set_irq_type(unsigned int irq, unsigned int flow_type)
break;
}
- sm_writel(sm, EIM_MODE, mode);
- sm_writel(sm, EIM_EDGE, edge);
- sm_writel(sm, EIM_LEVEL, level);
+ if (ret == 0) {
+ sm_writel(sm, EIM_MODE, mode);
+ sm_writel(sm, EIM_EDGE, edge);
+ sm_writel(sm, EIM_LEVEL, level);
+
+ if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
+ flow_type |= IRQ_LEVEL;
+ desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL);
+ desc->status |= flow_type;
+ }
spin_unlock_irqrestore(&sm->lock, flags);
@@ -122,8 +120,6 @@ static void demux_eim_irq(unsigned int irq, struct irq_desc *desc)
unsigned long status, pending;
unsigned int i, ext_irq;
- spin_lock(&sm->lock);
-
status = sm_readl(sm, EIM_ISR);
pending = status & sm_readl(sm, EIM_IMR);
@@ -133,10 +129,11 @@ static void demux_eim_irq(unsigned int irq, struct irq_desc *desc)
ext_irq = i + sm->eim_first_irq;
ext_desc = irq_desc + ext_irq;
- ext_desc->handle_irq(ext_irq, ext_desc);
+ if (ext_desc->status & IRQ_LEVEL)
+ handle_level_irq(ext_irq, ext_desc);
+ else
+ handle_edge_irq(ext_irq, ext_desc);
}
-
- spin_unlock(&sm->lock);
}
static int __init eim_init(void)
@@ -168,8 +165,9 @@ static int __init eim_init(void)
sm->eim_chip = &eim_chip;
for (i = 0; i < nr_irqs; i++) {
+ /* NOTE the handler we set here is ignored by the demux */
set_irq_chip_and_handler(sm->eim_first_irq + i, &eim_chip,
- handle_edge_irq);
+ handle_level_irq);
set_irq_chip_data(sm->eim_first_irq + i, sm);
}
diff --git a/arch/avr32/mach-at32ap/pio.c b/arch/avr32/mach-at32ap/pio.c
index f1280ed8ed6..9ba5654cde1 100644
--- a/arch/avr32/mach-at32ap/pio.c
+++ b/arch/avr32/mach-at32ap/pio.c
@@ -12,7 +12,9 @@
#include <linux/debugfs.h>
#include <linux/fs.h>
#include <linux/platform_device.h>
+#include <linux/irq.h>
+#include <asm/gpio.h>
#include <asm/io.h>
#include <asm/arch/portmux.h>
@@ -26,7 +28,8 @@ struct pio_device {
const struct platform_device *pdev;
struct clk *clk;
u32 pinmux_mask;
- char name[32];
+ u32 gpio_mask;
+ char name[8];
};
static struct pio_device pio_dev[MAX_NR_PIO_DEVICES];
@@ -76,6 +79,9 @@ void __init at32_select_periph(unsigned int pin, unsigned int periph,
if (!(flags & AT32_GPIOF_PULLUP))
pio_writel(pio, PUDR, mask);
+ /* gpio_request NOT allowed */
+ set_bit(pin_index, &pio->gpio_mask);
+
return;
fail:
@@ -99,19 +105,52 @@ void __init at32_select_gpio(unsigned int pin, unsigned long flags)
goto fail;
}
- pio_writel(pio, PUER, mask);
- if (flags & AT32_GPIOF_HIGH)
- pio_writel(pio, SODR, mask);
- else
- pio_writel(pio, CODR, mask);
- if (flags & AT32_GPIOF_OUTPUT)
+ if (flags & AT32_GPIOF_OUTPUT) {
+ if (flags & AT32_GPIOF_HIGH)
+ pio_writel(pio, SODR, mask);
+ else
+ pio_writel(pio, CODR, mask);
+ pio_writel(pio, PUDR, mask);
pio_writel(pio, OER, mask);
- else
+ } else {
+ if (flags & AT32_GPIOF_PULLUP)
+ pio_writel(pio, PUER, mask);
+ else
+ pio_writel(pio, PUDR, mask);
+ if (flags & AT32_GPIOF_DEGLITCH)
+ pio_writel(pio, IFER, mask);
+ else
+ pio_writel(pio, IFDR, mask);
pio_writel(pio, ODR, mask);
+ }
pio_writel(pio, PER, mask);
- if (!(flags & AT32_GPIOF_PULLUP))
- pio_writel(pio, PUDR, mask);
+
+ /* gpio_request now allowed */
+ clear_bit(pin_index, &pio->gpio_mask);
+
+ return;
+
+fail:
+ dump_stack();
+}
+
+/* Reserve a pin, preventing anyone else from changing its configuration. */
+void __init at32_reserve_pin(unsigned int pin)
+{
+ struct pio_device *pio;
+ unsigned int pin_index = pin & 0x1f;
+
+ pio = gpio_to_pio(pin);
+ if (unlikely(!pio)) {
+ printk("pio: invalid pin %u\n", pin);
+ goto fail;
+ }
+
+ if (unlikely(test_and_set_bit(pin_index, &pio->pinmux_mask))) {
+ printk("%s: pin %u is busy\n", pio->name, pin_index);
+ goto fail;
+ }
return;
@@ -119,20 +158,197 @@ fail:
dump_stack();
}
+/*--------------------------------------------------------------------------*/
+
+/* GPIO API */
+
+int gpio_request(unsigned int gpio, const char *label)
+{
+ struct pio_device *pio;
+ unsigned int pin;
+
+ pio = gpio_to_pio(gpio);
+ if (!pio)
+ return -ENODEV;
+
+ pin = gpio & 0x1f;
+ if (test_and_set_bit(pin, &pio->gpio_mask))
+ return -EBUSY;
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_request);
+
+void gpio_free(unsigned int gpio)
+{
+ struct pio_device *pio;
+ unsigned int pin;
+
+ pio = gpio_to_pio(gpio);
+ if (!pio) {
+ printk(KERN_ERR
+ "gpio: attempted to free invalid pin %u\n", gpio);
+ return;
+ }
+
+ pin = gpio & 0x1f;
+ if (!test_and_clear_bit(pin, &pio->gpio_mask))
+ printk(KERN_ERR "gpio: freeing free or non-gpio pin %s-%u\n",
+ pio->name, pin);
+}
+EXPORT_SYMBOL(gpio_free);
+
+int gpio_direction_input(unsigned int gpio)
+{
+ struct pio_device *pio;
+ unsigned int pin;
+
+ pio = gpio_to_pio(gpio);
+ if (!pio)
+ return -ENODEV;
+
+ pin = gpio & 0x1f;
+ pio_writel(pio, ODR, 1 << pin);
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_direction_input);
+
+int gpio_direction_output(unsigned int gpio)
+{
+ struct pio_device *pio;
+ unsigned int pin;
+
+ pio = gpio_to_pio(gpio);
+ if (!pio)
+ return -ENODEV;
+
+ pin = gpio & 0x1f;
+ pio_writel(pio, OER, 1 << pin);
+
+ return 0;
+}
+EXPORT_SYMBOL(gpio_direction_output);
+
+int gpio_get_value(unsigned int gpio)
+{
+ struct pio_device *pio = &pio_dev[gpio >> 5];
+
+ return (pio_readl(pio, PDSR) >> (gpio & 0x1f)) & 1;
+}
+EXPORT_SYMBOL(gpio_get_value);
+
+void gpio_set_value(unsigned int gpio, int value)
+{
+ struct pio_device *pio = &pio_dev[gpio >> 5];
+ u32 mask;
+
+ mask = 1 << (gpio & 0x1f);
+ if (value)
+ pio_writel(pio, SODR, mask);
+ else
+ pio_writel(pio, CODR, mask);
+}
+EXPORT_SYMBOL(gpio_set_value);
+
+/*--------------------------------------------------------------------------*/
+
+/* GPIO IRQ support */
+
+static void gpio_irq_mask(unsigned irq)
+{
+ unsigned gpio = irq_to_gpio(irq);
+ struct pio_device *pio = &pio_dev[gpio >> 5];
+
+ pio_writel(pio, IDR, 1 << (gpio & 0x1f));
+}
+
+static void gpio_irq_unmask(unsigned irq)
+{
+ unsigned gpio = irq_to_gpio(irq);
+ struct pio_device *pio = &pio_dev[gpio >> 5];
+
+ pio_writel(pio, IER, 1 << (gpio & 0x1f));
+}
+
+static int gpio_irq_type(unsigned irq, unsigned type)
+{
+ if (type != IRQ_TYPE_EDGE_BOTH && type != IRQ_TYPE_NONE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip gpio_irqchip = {
+ .name = "gpio",
+ .mask = gpio_irq_mask,
+ .unmask = gpio_irq_unmask,
+ .set_type = gpio_irq_type,
+};
+
+static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+{
+ struct pio_device *pio = get_irq_chip_data(irq);
+ unsigned gpio_irq;
+
+ gpio_irq = (unsigned) get_irq_data(irq);
+ for (;;) {
+ u32 isr;
+ struct irq_desc *d;
+
+ /* ack pending GPIO interrupts */
+ isr = pio_readl(pio, ISR) & pio_readl(pio, IMR);
+ if (!isr)
+ break;
+ do {
+ int i;
+
+ i = ffs(isr) - 1;
+ isr &= ~(1 << i);
+
+ i += gpio_irq;
+ d = &irq_desc[i];
+
+ d->handle_irq(i, d);
+ } while (isr);
+ }
+}
+
+static void __init
+gpio_irq_setup(struct pio_device *pio, int irq, int gpio_irq)
+{
+ unsigned i;
+
+ set_irq_chip_data(irq, pio);
+ set_irq_data(irq, (void *) gpio_irq);
+
+ for (i = 0; i < 32; i++, gpio_irq++) {
+ set_irq_chip_data(gpio_irq, pio);
+ set_irq_chip_and_handler(gpio_irq, &gpio_irqchip,
+ handle_simple_irq);
+ }
+
+ set_irq_chained_handler(irq, gpio_irq_handler);
+}
+
+/*--------------------------------------------------------------------------*/
+
static int __init pio_probe(struct platform_device *pdev)
{
struct pio_device *pio = NULL;
+ int irq = platform_get_irq(pdev, 0);
+ int gpio_irq_base = GPIO_IRQ_BASE + pdev->id * 32;
BUG_ON(pdev->id >= MAX_NR_PIO_DEVICES);
pio = &pio_dev[pdev->id];
BUG_ON(!pio->regs);
- /* TODO: Interrupts */
+ gpio_irq_setup(pio, irq, gpio_irq_base);
platform_set_drvdata(pdev, pio);
- printk(KERN_INFO "%s: Atmel Port Multiplexer at 0x%p (irq %d)\n",
- pio->name, pio->regs, platform_get_irq(pdev, 0));
+ printk(KERN_DEBUG "%s: base 0x%p, irq %d chains %d..%d\n",
+ pio->name, pio->regs, irq, gpio_irq_base, gpio_irq_base + 31);
return 0;
}
@@ -148,7 +364,7 @@ static int __init pio_init(void)
{
return platform_driver_register(&pio_driver);
}
-subsys_initcall(pio_init);
+postcore_initcall(pio_init);
void __init at32_init_pio(struct platform_device *pdev)
{
@@ -184,6 +400,13 @@ void __init at32_init_pio(struct platform_device *pdev)
pio->pdev = pdev;
pio->regs = ioremap(regs->start, regs->end - regs->start + 1);
- pio_writel(pio, ODR, ~0UL);
- pio_writel(pio, PER, ~0UL);
+ /*
+ * request_gpio() is only valid for pins that have been
+ * explicitly configured as GPIO and not previously requested
+ */
+ pio->gpio_mask = ~0UL;
+
+ /* start with irqs disabled and acked */
+ pio_writel(pio, IDR, ~0UL);
+ (void) pio_readl(pio, ISR);
}
diff --git a/arch/avr32/mm/cache.c b/arch/avr32/mm/cache.c
index 450515b245a..fb13f72e9a0 100644
--- a/arch/avr32/mm/cache.c
+++ b/arch/avr32/mm/cache.c
@@ -22,18 +22,34 @@
void invalidate_dcache_region(void *start, size_t size)
{
- unsigned long v, begin, end, linesz;
+ unsigned long v, begin, end, linesz, mask;
+ int flush = 0;
linesz = boot_cpu_data.dcache.linesz;
+ mask = linesz - 1;
+
+ /* when first and/or last cachelines are shared, flush them
+ * instead of invalidating ... never discard valid data!
+ */
+ begin = (unsigned long)start;
+ end = begin + size - 1;
+
+ if (begin & mask) {
+ flush_dcache_line(start);
+ begin += linesz;
+ flush = 1;
+ }
+ if ((end & mask) != mask) {
+ flush_dcache_line((void *)end);
+ end -= linesz;
+ flush = 1;
+ }
- //printk("invalidate dcache: %p + %u\n", start, size);
-
- /* You asked for it, you got it */
- begin = (unsigned long)start & ~(linesz - 1);
- end = ((unsigned long)start + size + linesz - 1) & ~(linesz - 1);
-
- for (v = begin; v < end; v += linesz)
+ /* remaining cachelines only need invalidation */
+ for (v = begin; v <= end; v += linesz)
invalidate_dcache_line((void *)v);
+ if (flush)
+ flush_write_buffer();
}
void clean_dcache_region(void *start, size_t size)
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7c621b8ef68..1406400bf3e 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -179,6 +179,8 @@ CONFIG_XFRM=y
# CONFIG_XFRM_USER is not set
# CONFIG_XFRM_SUB_POLICY is not set
CONFIG_NET_KEY=y
+CONFIG_IUCV=m
+CONFIG_AFIUCV=m
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
@@ -508,7 +510,6 @@ CONFIG_NET_ETHERNET=y
#
CONFIG_LCS=m
CONFIG_CTC=m
-CONFIG_IUCV=m
# CONFIG_NETIUCV is not set
# CONFIG_SMSGIUCV is not set
# CONFIG_CLAW is not set
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 918b4d845f9..086fcec4472 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -149,6 +149,15 @@ config CRYPTO_CBC
CBC: Cipher Block Chaining mode
This block cipher algorithm is required for IPSec.
+config CRYPTO_PCBC
+ tristate "PCBC support"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ default m
+ help
+ PCBC: Propagating Cipher Block Chaining mode
+ This block cipher algorithm is required for RxRPC.
+
config CRYPTO_LRW
tristate "LRW support (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -168,6 +177,13 @@ config CRYPTO_DES
help
DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3).
+config CRYPTO_FCRYPT
+ tristate "FCrypt cipher algorithm"
+ select CRYPTO_ALGAPI
+ select CRYPTO_BLKCIPHER
+ help
+ FCrypt algorithm used by RxRPC.
+
config CRYPTO_BLOWFISH
tristate "Blowfish cipher algorithm"
select CRYPTO_ALGAPI
@@ -409,6 +425,21 @@ config CRYPTO_CRC32C
See Castagnoli93. This implementation uses lib/libcrc32c.
Module will be crc32c.
+config CRYPTO_CAMELLIA
+ tristate "Camellia cipher algorithms"
+ depends on CRYPTO
+ select CRYPTO_ALGAPI
+ help
+ Camellia cipher algorithms module.
+
+ Camellia is a symmetric key block cipher developed jointly
+ at NTT and Mitsubishi Electric Corporation.
+
+ The Camellia specifies three key sizes: 128, 192 and 256 bits.
+
+ See also:
+ <https://info.isl.ntt.co.jp/crypt/eng/camellia/index_s.html>
+
config CRYPTO_TEST
tristate "Testing module"
depends on m
diff --git a/crypto/Makefile b/crypto/Makefile
index 60e3d24f61f..12f93f57817 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -27,13 +27,16 @@ obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o
obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o
obj-$(CONFIG_CRYPTO_ECB) += ecb.o
obj-$(CONFIG_CRYPTO_CBC) += cbc.o
+obj-$(CONFIG_CRYPTO_PCBC) += pcbc.o
obj-$(CONFIG_CRYPTO_LRW) += lrw.o
obj-$(CONFIG_CRYPTO_DES) += des.o
+obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish.o
obj-$(CONFIG_CRYPTO_TWOFISH) += twofish.o
obj-$(CONFIG_CRYPTO_TWOFISH_COMMON) += twofish_common.o
obj-$(CONFIG_CRYPTO_SERPENT) += serpent.o
obj-$(CONFIG_CRYPTO_AES) += aes.o
+obj-$(CONFIG_CRYPTO_CAMELLIA) += camellia.o
obj-$(CONFIG_CRYPTO_CAST5) += cast5.o
obj-$(CONFIG_CRYPTO_CAST6) += cast6.o
obj-$(CONFIG_CRYPTO_ARC4) += arc4.o
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c91530021e9..f7d2185b2c8 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -377,7 +377,8 @@ void crypto_drop_spawn(struct crypto_spawn *spawn)
}
EXPORT_SYMBOL_GPL(crypto_drop_spawn);
-struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+ u32 mask)
{
struct crypto_alg *alg;
struct crypto_alg *alg2;
@@ -396,10 +397,18 @@ struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn)
return ERR_PTR(-EAGAIN);
}
- tfm = __crypto_alloc_tfm(alg, 0);
+ tfm = ERR_PTR(-EINVAL);
+ if (unlikely((alg->cra_flags ^ type) & mask))
+ goto out_put_alg;
+
+ tfm = __crypto_alloc_tfm(alg, type, mask);
if (IS_ERR(tfm))
- crypto_mod_put(alg);
+ goto out_put_alg;
+
+ return tfm;
+out_put_alg:
+ crypto_mod_put(alg);
return tfm;
}
EXPORT_SYMBOL_GPL(crypto_spawn_tfm);
diff --git a/crypto/api.c b/crypto/api.c
index 8c446871cd5..55af8bb0f05 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -212,31 +212,12 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
}
EXPORT_SYMBOL_GPL(crypto_alg_mod_lookup);
-static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
+static int crypto_init_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
- tfm->crt_flags = flags & CRYPTO_TFM_REQ_MASK;
- flags &= ~CRYPTO_TFM_REQ_MASK;
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_flags(tfm, flags);
- }
-
- return 0;
-}
+ const struct crypto_type *type_obj = tfm->__crt_alg->cra_type;
-static int crypto_init_ops(struct crypto_tfm *tfm)
-{
- const struct crypto_type *type = tfm->__crt_alg->cra_type;
-
- if (type)
- return type->init(tfm);
+ if (type_obj)
+ return type_obj->init(tfm, type, mask);
switch (crypto_tfm_alg_type(tfm)) {
case CRYPTO_ALG_TYPE_CIPHER:
@@ -285,29 +266,29 @@ static void crypto_exit_ops(struct crypto_tfm *tfm)
}
}
-static unsigned int crypto_ctxsize(struct crypto_alg *alg, int flags)
+static unsigned int crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
{
- const struct crypto_type *type = alg->cra_type;
+ const struct crypto_type *type_obj = alg->cra_type;
unsigned int len;
len = alg->cra_alignmask & ~(crypto_tfm_ctx_alignment() - 1);
- if (type)
- return len + type->ctxsize(alg);
+ if (type_obj)
+ return len + type_obj->ctxsize(alg, type, mask);
switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
default:
BUG();
case CRYPTO_ALG_TYPE_CIPHER:
- len += crypto_cipher_ctxsize(alg, flags);
+ len += crypto_cipher_ctxsize(alg);
break;
case CRYPTO_ALG_TYPE_DIGEST:
- len += crypto_digest_ctxsize(alg, flags);
+ len += crypto_digest_ctxsize(alg);
break;
case CRYPTO_ALG_TYPE_COMPRESS:
- len += crypto_compress_ctxsize(alg, flags);
+ len += crypto_compress_ctxsize(alg);
break;
}
@@ -322,24 +303,21 @@ void crypto_shoot_alg(struct crypto_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_shoot_alg);
-struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags)
+struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
+ u32 mask)
{
struct crypto_tfm *tfm = NULL;
unsigned int tfm_size;
int err = -ENOMEM;
- tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, flags);
+ tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
tfm = kzalloc(tfm_size, GFP_KERNEL);
if (tfm == NULL)
goto out_err;
tfm->__crt_alg = alg;
- err = crypto_init_flags(tfm, flags);
- if (err)
- goto out_free_tfm;
-
- err = crypto_init_ops(tfm);
+ err = crypto_init_ops(tfm, type, mask);
if (err)
goto out_free_tfm;
@@ -362,31 +340,6 @@ out:
}
EXPORT_SYMBOL_GPL(__crypto_alloc_tfm);
-struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
-{
- struct crypto_tfm *tfm = NULL;
- int err;
-
- do {
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name, 0, CRYPTO_ALG_ASYNC);
- err = PTR_ERR(alg);
- if (IS_ERR(alg))
- continue;
-
- tfm = __crypto_alloc_tfm(alg, flags);
- err = 0;
- if (IS_ERR(tfm)) {
- crypto_mod_put(alg);
- err = PTR_ERR(tfm);
- tfm = NULL;
- }
- } while (err == -EAGAIN && !signal_pending(current));
-
- return tfm;
-}
-
/*
* crypto_alloc_base - Locate algorithm and allocate transform
* @alg_name: Name of algorithm
@@ -420,7 +373,7 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
goto err;
}
- tfm = __crypto_alloc_tfm(alg, 0);
+ tfm = __crypto_alloc_tfm(alg, type, mask);
if (!IS_ERR(tfm))
return tfm;
@@ -466,7 +419,6 @@ void crypto_free_tfm(struct crypto_tfm *tfm)
kfree(tfm);
}
-EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
EXPORT_SYMBOL_GPL(crypto_free_tfm);
int crypto_has_alg(const char *name, u32 type, u32 mask)
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 6e93004f218..b5befe8c3a9 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -16,6 +16,7 @@
#include <linux/crypto.h>
#include <linux/errno.h>
+#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -313,6 +314,9 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
struct crypto_blkcipher *tfm = desc->tfm;
unsigned int alignmask = crypto_blkcipher_alignmask(tfm);
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
walk->nbytes = walk->total;
if (unlikely(!walk->total))
return 0;
@@ -345,7 +349,8 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key,
return cipher->setkey(tfm, key, keylen);
}
-static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
+static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg, u32 type,
+ u32 mask)
{
struct blkcipher_alg *cipher = &alg->cra_blkcipher;
unsigned int len = alg->cra_ctxsize;
@@ -358,7 +363,7 @@ static unsigned int crypto_blkcipher_ctxsize(struct crypto_alg *alg)
return len;
}
-static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm)
+static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
diff --git a/crypto/camellia.c b/crypto/camellia.c
new file mode 100644
index 00000000000..6877ecfd90b
--- /dev/null
+++ b/crypto/camellia.c
@@ -0,0 +1,1801 @@
+/*
+ * Copyright (C) 2006
+ * NTT (Nippon Telegraph and Telephone Corporation).
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Algorithm Specification
+ * http://info.isl.ntt.co.jp/crypt/eng/camellia/specifications.html
+ */
+
+/*
+ *
+ * NOTE --- NOTE --- NOTE --- NOTE
+ * This implementation assumes that all memory addresses passed
+ * as parameters are four-byte aligned.
+ *
+ */
+
+#include <linux/crypto.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+
+#define CAMELLIA_MIN_KEY_SIZE 16
+#define CAMELLIA_MAX_KEY_SIZE 32
+#define CAMELLIA_BLOCK_SIZE 16
+#define CAMELLIA_TABLE_BYTE_LEN 272
+#define CAMELLIA_TABLE_WORD_LEN (CAMELLIA_TABLE_BYTE_LEN / 4)
+
+typedef u32 KEY_TABLE_TYPE[CAMELLIA_TABLE_WORD_LEN];
+
+
+/* key constants */
+
+#define CAMELLIA_SIGMA1L (0xA09E667FL)
+#define CAMELLIA_SIGMA1R (0x3BCC908BL)
+#define CAMELLIA_SIGMA2L (0xB67AE858L)
+#define CAMELLIA_SIGMA2R (0x4CAA73B2L)
+#define CAMELLIA_SIGMA3L (0xC6EF372FL)
+#define CAMELLIA_SIGMA3R (0xE94F82BEL)
+#define CAMELLIA_SIGMA4L (0x54FF53A5L)
+#define CAMELLIA_SIGMA4R (0xF1D36F1CL)
+#define CAMELLIA_SIGMA5L (0x10E527FAL)
+#define CAMELLIA_SIGMA5R (0xDE682D1DL)
+#define CAMELLIA_SIGMA6L (0xB05688C2L)
+#define CAMELLIA_SIGMA6R (0xB3E6C1FDL)
+
+struct camellia_ctx {
+ int key_length;
+ KEY_TABLE_TYPE key_table;
+};
+
+
+/*
+ * macros
+ */
+
+
+# define GETU32(pt) (((u32)(pt)[0] << 24) \
+ ^ ((u32)(pt)[1] << 16) \
+ ^ ((u32)(pt)[2] << 8) \
+ ^ ((u32)(pt)[3]))
+
+#define COPY4WORD(dst, src) \
+ do { \
+ (dst)[0]=(src)[0]; \
+ (dst)[1]=(src)[1]; \
+ (dst)[2]=(src)[2]; \
+ (dst)[3]=(src)[3]; \
+ }while(0)
+
+#define SWAP4WORD(word) \
+ do { \
+ CAMELLIA_SWAP4((word)[0]); \
+ CAMELLIA_SWAP4((word)[1]); \
+ CAMELLIA_SWAP4((word)[2]); \
+ CAMELLIA_SWAP4((word)[3]); \
+ }while(0)
+
+#define XOR4WORD(a, b)/* a = a ^ b */ \
+ do { \
+ (a)[0]^=(b)[0]; \
+ (a)[1]^=(b)[1]; \
+ (a)[2]^=(b)[2]; \
+ (a)[3]^=(b)[3]; \
+ }while(0)
+
+#define XOR4WORD2(a, b, c)/* a = b ^ c */ \
+ do { \
+ (a)[0]=(b)[0]^(c)[0]; \
+ (a)[1]=(b)[1]^(c)[1]; \
+ (a)[2]=(b)[2]^(c)[2]; \
+ (a)[3]=(b)[3]^(c)[3]; \
+ }while(0)
+
+#define CAMELLIA_SUBKEY_L(INDEX) (subkey[(INDEX)*2])
+#define CAMELLIA_SUBKEY_R(INDEX) (subkey[(INDEX)*2 + 1])
+
+/* rotation right shift 1byte */
+#define CAMELLIA_RR8(x) (((x) >> 8) + ((x) << 24))
+/* rotation left shift 1bit */
+#define CAMELLIA_RL1(x) (((x) << 1) + ((x) >> 31))
+/* rotation left shift 1byte */
+#define CAMELLIA_RL8(x) (((x) << 8) + ((x) >> 24))
+
+#define CAMELLIA_ROLDQ(ll, lr, rl, rr, w0, w1, bits) \
+ do { \
+ w0 = ll; \
+ ll = (ll << bits) + (lr >> (32 - bits)); \
+ lr = (lr << bits) + (rl >> (32 - bits)); \
+ rl = (rl << bits) + (rr >> (32 - bits)); \
+ rr = (rr << bits) + (w0 >> (32 - bits)); \
+ } while(0)
+
+#define CAMELLIA_ROLDQo32(ll, lr, rl, rr, w0, w1, bits) \
+ do { \
+ w0 = ll; \
+ w1 = lr; \
+ ll = (lr << (bits - 32)) + (rl >> (64 - bits)); \
+ lr = (rl << (bits - 32)) + (rr >> (64 - bits)); \
+ rl = (rr << (bits - 32)) + (w0 >> (64 - bits)); \
+ rr = (w0 << (bits - 32)) + (w1 >> (64 - bits)); \
+ } while(0)
+
+#define CAMELLIA_SP1110(INDEX) (camellia_sp1110[(INDEX)])
+#define CAMELLIA_SP0222(INDEX) (camellia_sp0222[(INDEX)])
+#define CAMELLIA_SP3033(INDEX) (camellia_sp3033[(INDEX)])
+#define CAMELLIA_SP4404(INDEX) (camellia_sp4404[(INDEX)])
+
+#define CAMELLIA_F(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
+ do { \
+ il = xl ^ kl; \
+ ir = xr ^ kr; \
+ t0 = il >> 16; \
+ t1 = ir >> 16; \
+ yl = CAMELLIA_SP1110(ir & 0xff) \
+ ^ CAMELLIA_SP0222((t1 >> 8) & 0xff) \
+ ^ CAMELLIA_SP3033(t1 & 0xff) \
+ ^ CAMELLIA_SP4404((ir >> 8) & 0xff); \
+ yr = CAMELLIA_SP1110((t0 >> 8) & 0xff) \
+ ^ CAMELLIA_SP0222(t0 & 0xff) \
+ ^ CAMELLIA_SP3033((il >> 8) & 0xff) \
+ ^ CAMELLIA_SP4404(il & 0xff); \
+ yl ^= yr; \
+ yr = CAMELLIA_RR8(yr); \
+ yr ^= yl; \
+ } while(0)
+
+
+/*
+ * for speed up
+ *
+ */
+#define CAMELLIA_FLS(ll, lr, rl, rr, kll, klr, krl, krr, t0, t1, t2, t3) \
+ do { \
+ t0 = kll; \
+ t2 = krr; \
+ t0 &= ll; \
+ t2 |= rr; \
+ rl ^= t2; \
+ lr ^= CAMELLIA_RL1(t0); \
+ t3 = krl; \
+ t1 = klr; \
+ t3 &= rl; \
+ t1 |= lr; \
+ ll ^= t1; \
+ rr ^= CAMELLIA_RL1(t3); \
+ } while(0)
+
+#define CAMELLIA_ROUNDSM(xl, xr, kl, kr, yl, yr, il, ir, t0, t1) \
+ do { \
+ ir = CAMELLIA_SP1110(xr & 0xff); \
+ il = CAMELLIA_SP1110((xl>>24) & 0xff); \
+ ir ^= CAMELLIA_SP0222((xr>>24) & 0xff); \
+ il ^= CAMELLIA_SP0222((xl>>16) & 0xff); \
+ ir ^= CAMELLIA_SP3033((xr>>16) & 0xff); \
+ il ^= CAMELLIA_SP3033((xl>>8) & 0xff); \
+ ir ^= CAMELLIA_SP4404((xr>>8) & 0xff); \
+ il ^= CAMELLIA_SP4404(xl & 0xff); \
+ il ^= kl; \
+ ir ^= il ^ kr; \
+ yl ^= ir; \
+ yr ^= CAMELLIA_RR8(il) ^ ir; \
+ } while(0)
+
+/**
+ * Stuff related to the Camellia key schedule
+ */
+#define SUBL(x) subL[(x)]
+#define SUBR(x) subR[(x)]
+
+
+static const u32 camellia_sp1110[256] = {
+ 0x70707000,0x82828200,0x2c2c2c00,0xececec00,
+ 0xb3b3b300,0x27272700,0xc0c0c000,0xe5e5e500,
+ 0xe4e4e400,0x85858500,0x57575700,0x35353500,
+ 0xeaeaea00,0x0c0c0c00,0xaeaeae00,0x41414100,
+ 0x23232300,0xefefef00,0x6b6b6b00,0x93939300,
+ 0x45454500,0x19191900,0xa5a5a500,0x21212100,
+ 0xededed00,0x0e0e0e00,0x4f4f4f00,0x4e4e4e00,
+ 0x1d1d1d00,0x65656500,0x92929200,0xbdbdbd00,
+ 0x86868600,0xb8b8b800,0xafafaf00,0x8f8f8f00,
+ 0x7c7c7c00,0xebebeb00,0x1f1f1f00,0xcecece00,
+ 0x3e3e3e00,0x30303000,0xdcdcdc00,0x5f5f5f00,
+ 0x5e5e5e00,0xc5c5c500,0x0b0b0b00,0x1a1a1a00,
+ 0xa6a6a600,0xe1e1e100,0x39393900,0xcacaca00,
+ 0xd5d5d500,0x47474700,0x5d5d5d00,0x3d3d3d00,
+ 0xd9d9d900,0x01010100,0x5a5a5a00,0xd6d6d600,
+ 0x51515100,0x56565600,0x6c6c6c00,0x4d4d4d00,
+ 0x8b8b8b00,0x0d0d0d00,0x9a9a9a00,0x66666600,
+ 0xfbfbfb00,0xcccccc00,0xb0b0b000,0x2d2d2d00,
+ 0x74747400,0x12121200,0x2b2b2b00,0x20202000,
+ 0xf0f0f000,0xb1b1b100,0x84848400,0x99999900,
+ 0xdfdfdf00,0x4c4c4c00,0xcbcbcb00,0xc2c2c200,
+ 0x34343400,0x7e7e7e00,0x76767600,0x05050500,
+ 0x6d6d6d00,0xb7b7b700,0xa9a9a900,0x31313100,
+ 0xd1d1d100,0x17171700,0x04040400,0xd7d7d700,
+ 0x14141400,0x58585800,0x3a3a3a00,0x61616100,
+ 0xdedede00,0x1b1b1b00,0x11111100,0x1c1c1c00,
+ 0x32323200,0x0f0f0f00,0x9c9c9c00,0x16161600,
+ 0x53535300,0x18181800,0xf2f2f200,0x22222200,
+ 0xfefefe00,0x44444400,0xcfcfcf00,0xb2b2b200,
+ 0xc3c3c300,0xb5b5b500,0x7a7a7a00,0x91919100,
+ 0x24242400,0x08080800,0xe8e8e800,0xa8a8a800,
+ 0x60606000,0xfcfcfc00,0x69696900,0x50505000,
+ 0xaaaaaa00,0xd0d0d000,0xa0a0a000,0x7d7d7d00,
+ 0xa1a1a100,0x89898900,0x62626200,0x97979700,
+ 0x54545400,0x5b5b5b00,0x1e1e1e00,0x95959500,
+ 0xe0e0e000,0xffffff00,0x64646400,0xd2d2d200,
+ 0x10101000,0xc4c4c400,0x00000000,0x48484800,
+ 0xa3a3a300,0xf7f7f700,0x75757500,0xdbdbdb00,
+ 0x8a8a8a00,0x03030300,0xe6e6e600,0xdadada00,
+ 0x09090900,0x3f3f3f00,0xdddddd00,0x94949400,
+ 0x87878700,0x5c5c5c00,0x83838300,0x02020200,
+ 0xcdcdcd00,0x4a4a4a00,0x90909000,0x33333300,
+ 0x73737300,0x67676700,0xf6f6f600,0xf3f3f300,
+ 0x9d9d9d00,0x7f7f7f00,0xbfbfbf00,0xe2e2e200,
+ 0x52525200,0x9b9b9b00,0xd8d8d800,0x26262600,
+ 0xc8c8c800,0x37373700,0xc6c6c600,0x3b3b3b00,
+ 0x81818100,0x96969600,0x6f6f6f00,0x4b4b4b00,
+ 0x13131300,0xbebebe00,0x63636300,0x2e2e2e00,
+ 0xe9e9e900,0x79797900,0xa7a7a700,0x8c8c8c00,
+ 0x9f9f9f00,0x6e6e6e00,0xbcbcbc00,0x8e8e8e00,
+ 0x29292900,0xf5f5f500,0xf9f9f900,0xb6b6b600,
+ 0x2f2f2f00,0xfdfdfd00,0xb4b4b400,0x59595900,
+ 0x78787800,0x98989800,0x06060600,0x6a6a6a00,
+ 0xe7e7e700,0x46464600,0x71717100,0xbababa00,
+ 0xd4d4d400,0x25252500,0xababab00,0x42424200,
+ 0x88888800,0xa2a2a200,0x8d8d8d00,0xfafafa00,
+ 0x72727200,0x07070700,0xb9b9b900,0x55555500,
+ 0xf8f8f800,0xeeeeee00,0xacacac00,0x0a0a0a00,
+ 0x36363600,0x49494900,0x2a2a2a00,0x68686800,
+ 0x3c3c3c00,0x38383800,0xf1f1f100,0xa4a4a400,
+ 0x40404000,0x28282800,0xd3d3d300,0x7b7b7b00,
+ 0xbbbbbb00,0xc9c9c900,0x43434300,0xc1c1c100,
+ 0x15151500,0xe3e3e300,0xadadad00,0xf4f4f400,
+ 0x77777700,0xc7c7c700,0x80808000,0x9e9e9e00,
+};
+
+static const u32 camellia_sp0222[256] = {
+ 0x00e0e0e0,0x00050505,0x00585858,0x00d9d9d9,
+ 0x00676767,0x004e4e4e,0x00818181,0x00cbcbcb,
+ 0x00c9c9c9,0x000b0b0b,0x00aeaeae,0x006a6a6a,
+ 0x00d5d5d5,0x00181818,0x005d5d5d,0x00828282,
+ 0x00464646,0x00dfdfdf,0x00d6d6d6,0x00272727,
+ 0x008a8a8a,0x00323232,0x004b4b4b,0x00424242,
+ 0x00dbdbdb,0x001c1c1c,0x009e9e9e,0x009c9c9c,
+ 0x003a3a3a,0x00cacaca,0x00252525,0x007b7b7b,
+ 0x000d0d0d,0x00717171,0x005f5f5f,0x001f1f1f,
+ 0x00f8f8f8,0x00d7d7d7,0x003e3e3e,0x009d9d9d,
+ 0x007c7c7c,0x00606060,0x00b9b9b9,0x00bebebe,
+ 0x00bcbcbc,0x008b8b8b,0x00161616,0x00343434,
+ 0x004d4d4d,0x00c3c3c3,0x00727272,0x00959595,
+ 0x00ababab,0x008e8e8e,0x00bababa,0x007a7a7a,
+ 0x00b3b3b3,0x00020202,0x00b4b4b4,0x00adadad,
+ 0x00a2a2a2,0x00acacac,0x00d8d8d8,0x009a9a9a,
+ 0x00171717,0x001a1a1a,0x00353535,0x00cccccc,
+ 0x00f7f7f7,0x00999999,0x00616161,0x005a5a5a,
+ 0x00e8e8e8,0x00242424,0x00565656,0x00404040,
+ 0x00e1e1e1,0x00636363,0x00090909,0x00333333,
+ 0x00bfbfbf,0x00989898,0x00979797,0x00858585,
+ 0x00686868,0x00fcfcfc,0x00ececec,0x000a0a0a,
+ 0x00dadada,0x006f6f6f,0x00535353,0x00626262,
+ 0x00a3a3a3,0x002e2e2e,0x00080808,0x00afafaf,
+ 0x00282828,0x00b0b0b0,0x00747474,0x00c2c2c2,
+ 0x00bdbdbd,0x00363636,0x00222222,0x00383838,
+ 0x00646464,0x001e1e1e,0x00393939,0x002c2c2c,
+ 0x00a6a6a6,0x00303030,0x00e5e5e5,0x00444444,
+ 0x00fdfdfd,0x00888888,0x009f9f9f,0x00656565,
+ 0x00878787,0x006b6b6b,0x00f4f4f4,0x00232323,
+ 0x00484848,0x00101010,0x00d1d1d1,0x00515151,
+ 0x00c0c0c0,0x00f9f9f9,0x00d2d2d2,0x00a0a0a0,
+ 0x00555555,0x00a1a1a1,0x00414141,0x00fafafa,
+ 0x00434343,0x00131313,0x00c4c4c4,0x002f2f2f,
+ 0x00a8a8a8,0x00b6b6b6,0x003c3c3c,0x002b2b2b,
+ 0x00c1c1c1,0x00ffffff,0x00c8c8c8,0x00a5a5a5,
+ 0x00202020,0x00898989,0x00000000,0x00909090,
+ 0x00474747,0x00efefef,0x00eaeaea,0x00b7b7b7,
+ 0x00151515,0x00060606,0x00cdcdcd,0x00b5b5b5,
+ 0x00121212,0x007e7e7e,0x00bbbbbb,0x00292929,
+ 0x000f0f0f,0x00b8b8b8,0x00070707,0x00040404,
+ 0x009b9b9b,0x00949494,0x00212121,0x00666666,
+ 0x00e6e6e6,0x00cecece,0x00ededed,0x00e7e7e7,
+ 0x003b3b3b,0x00fefefe,0x007f7f7f,0x00c5c5c5,
+ 0x00a4a4a4,0x00373737,0x00b1b1b1,0x004c4c4c,
+ 0x00919191,0x006e6e6e,0x008d8d8d,0x00767676,
+ 0x00030303,0x002d2d2d,0x00dedede,0x00969696,
+ 0x00262626,0x007d7d7d,0x00c6c6c6,0x005c5c5c,
+ 0x00d3d3d3,0x00f2f2f2,0x004f4f4f,0x00191919,
+ 0x003f3f3f,0x00dcdcdc,0x00797979,0x001d1d1d,
+ 0x00525252,0x00ebebeb,0x00f3f3f3,0x006d6d6d,
+ 0x005e5e5e,0x00fbfbfb,0x00696969,0x00b2b2b2,
+ 0x00f0f0f0,0x00313131,0x000c0c0c,0x00d4d4d4,
+ 0x00cfcfcf,0x008c8c8c,0x00e2e2e2,0x00757575,
+ 0x00a9a9a9,0x004a4a4a,0x00575757,0x00848484,
+ 0x00111111,0x00454545,0x001b1b1b,0x00f5f5f5,
+ 0x00e4e4e4,0x000e0e0e,0x00737373,0x00aaaaaa,
+ 0x00f1f1f1,0x00dddddd,0x00595959,0x00141414,
+ 0x006c6c6c,0x00929292,0x00545454,0x00d0d0d0,
+ 0x00787878,0x00707070,0x00e3e3e3,0x00494949,
+ 0x00808080,0x00505050,0x00a7a7a7,0x00f6f6f6,
+ 0x00777777,0x00939393,0x00868686,0x00838383,
+ 0x002a2a2a,0x00c7c7c7,0x005b5b5b,0x00e9e9e9,
+ 0x00eeeeee,0x008f8f8f,0x00010101,0x003d3d3d,
+};
+
+static const u32 camellia_sp3033[256] = {
+ 0x38003838,0x41004141,0x16001616,0x76007676,
+ 0xd900d9d9,0x93009393,0x60006060,0xf200f2f2,
+ 0x72007272,0xc200c2c2,0xab00abab,0x9a009a9a,
+ 0x75007575,0x06000606,0x57005757,0xa000a0a0,
+ 0x91009191,0xf700f7f7,0xb500b5b5,0xc900c9c9,
+ 0xa200a2a2,0x8c008c8c,0xd200d2d2,0x90009090,
+ 0xf600f6f6,0x07000707,0xa700a7a7,0x27002727,
+ 0x8e008e8e,0xb200b2b2,0x49004949,0xde00dede,
+ 0x43004343,0x5c005c5c,0xd700d7d7,0xc700c7c7,
+ 0x3e003e3e,0xf500f5f5,0x8f008f8f,0x67006767,
+ 0x1f001f1f,0x18001818,0x6e006e6e,0xaf00afaf,
+ 0x2f002f2f,0xe200e2e2,0x85008585,0x0d000d0d,
+ 0x53005353,0xf000f0f0,0x9c009c9c,0x65006565,
+ 0xea00eaea,0xa300a3a3,0xae00aeae,0x9e009e9e,
+ 0xec00ecec,0x80008080,0x2d002d2d,0x6b006b6b,
+ 0xa800a8a8,0x2b002b2b,0x36003636,0xa600a6a6,
+ 0xc500c5c5,0x86008686,0x4d004d4d,0x33003333,
+ 0xfd00fdfd,0x66006666,0x58005858,0x96009696,
+ 0x3a003a3a,0x09000909,0x95009595,0x10001010,
+ 0x78007878,0xd800d8d8,0x42004242,0xcc00cccc,
+ 0xef00efef,0x26002626,0xe500e5e5,0x61006161,
+ 0x1a001a1a,0x3f003f3f,0x3b003b3b,0x82008282,
+ 0xb600b6b6,0xdb00dbdb,0xd400d4d4,0x98009898,
+ 0xe800e8e8,0x8b008b8b,0x02000202,0xeb00ebeb,
+ 0x0a000a0a,0x2c002c2c,0x1d001d1d,0xb000b0b0,
+ 0x6f006f6f,0x8d008d8d,0x88008888,0x0e000e0e,
+ 0x19001919,0x87008787,0x4e004e4e,0x0b000b0b,
+ 0xa900a9a9,0x0c000c0c,0x79007979,0x11001111,
+ 0x7f007f7f,0x22002222,0xe700e7e7,0x59005959,
+ 0xe100e1e1,0xda00dada,0x3d003d3d,0xc800c8c8,
+ 0x12001212,0x04000404,0x74007474,0x54005454,
+ 0x30003030,0x7e007e7e,0xb400b4b4,0x28002828,
+ 0x55005555,0x68006868,0x50005050,0xbe00bebe,
+ 0xd000d0d0,0xc400c4c4,0x31003131,0xcb00cbcb,
+ 0x2a002a2a,0xad00adad,0x0f000f0f,0xca00caca,
+ 0x70007070,0xff00ffff,0x32003232,0x69006969,
+ 0x08000808,0x62006262,0x00000000,0x24002424,
+ 0xd100d1d1,0xfb00fbfb,0xba00baba,0xed00eded,
+ 0x45004545,0x81008181,0x73007373,0x6d006d6d,
+ 0x84008484,0x9f009f9f,0xee00eeee,0x4a004a4a,
+ 0xc300c3c3,0x2e002e2e,0xc100c1c1,0x01000101,
+ 0xe600e6e6,0x25002525,0x48004848,0x99009999,
+ 0xb900b9b9,0xb300b3b3,0x7b007b7b,0xf900f9f9,
+ 0xce00cece,0xbf00bfbf,0xdf00dfdf,0x71007171,
+ 0x29002929,0xcd00cdcd,0x6c006c6c,0x13001313,
+ 0x64006464,0x9b009b9b,0x63006363,0x9d009d9d,
+ 0xc000c0c0,0x4b004b4b,0xb700b7b7,0xa500a5a5,
+ 0x89008989,0x5f005f5f,0xb100b1b1,0x17001717,
+ 0xf400f4f4,0xbc00bcbc,0xd300d3d3,0x46004646,
+ 0xcf00cfcf,0x37003737,0x5e005e5e,0x47004747,
+ 0x94009494,0xfa00fafa,0xfc00fcfc,0x5b005b5b,
+ 0x97009797,0xfe00fefe,0x5a005a5a,0xac00acac,
+ 0x3c003c3c,0x4c004c4c,0x03000303,0x35003535,
+ 0xf300f3f3,0x23002323,0xb800b8b8,0x5d005d5d,
+ 0x6a006a6a,0x92009292,0xd500d5d5,0x21002121,
+ 0x44004444,0x51005151,0xc600c6c6,0x7d007d7d,
+ 0x39003939,0x83008383,0xdc00dcdc,0xaa00aaaa,
+ 0x7c007c7c,0x77007777,0x56005656,0x05000505,
+ 0x1b001b1b,0xa400a4a4,0x15001515,0x34003434,
+ 0x1e001e1e,0x1c001c1c,0xf800f8f8,0x52005252,
+ 0x20002020,0x14001414,0xe900e9e9,0xbd00bdbd,
+ 0xdd00dddd,0xe400e4e4,0xa100a1a1,0xe000e0e0,
+ 0x8a008a8a,0xf100f1f1,0xd600d6d6,0x7a007a7a,
+ 0xbb00bbbb,0xe300e3e3,0x40004040,0x4f004f4f,
+};
+
+static const u32 camellia_sp4404[256] = {
+ 0x70700070,0x2c2c002c,0xb3b300b3,0xc0c000c0,
+ 0xe4e400e4,0x57570057,0xeaea00ea,0xaeae00ae,
+ 0x23230023,0x6b6b006b,0x45450045,0xa5a500a5,
+ 0xeded00ed,0x4f4f004f,0x1d1d001d,0x92920092,
+ 0x86860086,0xafaf00af,0x7c7c007c,0x1f1f001f,
+ 0x3e3e003e,0xdcdc00dc,0x5e5e005e,0x0b0b000b,
+ 0xa6a600a6,0x39390039,0xd5d500d5,0x5d5d005d,
+ 0xd9d900d9,0x5a5a005a,0x51510051,0x6c6c006c,
+ 0x8b8b008b,0x9a9a009a,0xfbfb00fb,0xb0b000b0,
+ 0x74740074,0x2b2b002b,0xf0f000f0,0x84840084,
+ 0xdfdf00df,0xcbcb00cb,0x34340034,0x76760076,
+ 0x6d6d006d,0xa9a900a9,0xd1d100d1,0x04040004,
+ 0x14140014,0x3a3a003a,0xdede00de,0x11110011,
+ 0x32320032,0x9c9c009c,0x53530053,0xf2f200f2,
+ 0xfefe00fe,0xcfcf00cf,0xc3c300c3,0x7a7a007a,
+ 0x24240024,0xe8e800e8,0x60600060,0x69690069,
+ 0xaaaa00aa,0xa0a000a0,0xa1a100a1,0x62620062,
+ 0x54540054,0x1e1e001e,0xe0e000e0,0x64640064,
+ 0x10100010,0x00000000,0xa3a300a3,0x75750075,
+ 0x8a8a008a,0xe6e600e6,0x09090009,0xdddd00dd,
+ 0x87870087,0x83830083,0xcdcd00cd,0x90900090,
+ 0x73730073,0xf6f600f6,0x9d9d009d,0xbfbf00bf,
+ 0x52520052,0xd8d800d8,0xc8c800c8,0xc6c600c6,
+ 0x81810081,0x6f6f006f,0x13130013,0x63630063,
+ 0xe9e900e9,0xa7a700a7,0x9f9f009f,0xbcbc00bc,
+ 0x29290029,0xf9f900f9,0x2f2f002f,0xb4b400b4,
+ 0x78780078,0x06060006,0xe7e700e7,0x71710071,
+ 0xd4d400d4,0xabab00ab,0x88880088,0x8d8d008d,
+ 0x72720072,0xb9b900b9,0xf8f800f8,0xacac00ac,
+ 0x36360036,0x2a2a002a,0x3c3c003c,0xf1f100f1,
+ 0x40400040,0xd3d300d3,0xbbbb00bb,0x43430043,
+ 0x15150015,0xadad00ad,0x77770077,0x80800080,
+ 0x82820082,0xecec00ec,0x27270027,0xe5e500e5,
+ 0x85850085,0x35350035,0x0c0c000c,0x41410041,
+ 0xefef00ef,0x93930093,0x19190019,0x21210021,
+ 0x0e0e000e,0x4e4e004e,0x65650065,0xbdbd00bd,
+ 0xb8b800b8,0x8f8f008f,0xebeb00eb,0xcece00ce,
+ 0x30300030,0x5f5f005f,0xc5c500c5,0x1a1a001a,
+ 0xe1e100e1,0xcaca00ca,0x47470047,0x3d3d003d,
+ 0x01010001,0xd6d600d6,0x56560056,0x4d4d004d,
+ 0x0d0d000d,0x66660066,0xcccc00cc,0x2d2d002d,
+ 0x12120012,0x20200020,0xb1b100b1,0x99990099,
+ 0x4c4c004c,0xc2c200c2,0x7e7e007e,0x05050005,
+ 0xb7b700b7,0x31310031,0x17170017,0xd7d700d7,
+ 0x58580058,0x61610061,0x1b1b001b,0x1c1c001c,
+ 0x0f0f000f,0x16160016,0x18180018,0x22220022,
+ 0x44440044,0xb2b200b2,0xb5b500b5,0x91910091,
+ 0x08080008,0xa8a800a8,0xfcfc00fc,0x50500050,
+ 0xd0d000d0,0x7d7d007d,0x89890089,0x97970097,
+ 0x5b5b005b,0x95950095,0xffff00ff,0xd2d200d2,
+ 0xc4c400c4,0x48480048,0xf7f700f7,0xdbdb00db,
+ 0x03030003,0xdada00da,0x3f3f003f,0x94940094,
+ 0x5c5c005c,0x02020002,0x4a4a004a,0x33330033,
+ 0x67670067,0xf3f300f3,0x7f7f007f,0xe2e200e2,
+ 0x9b9b009b,0x26260026,0x37370037,0x3b3b003b,
+ 0x96960096,0x4b4b004b,0xbebe00be,0x2e2e002e,
+ 0x79790079,0x8c8c008c,0x6e6e006e,0x8e8e008e,
+ 0xf5f500f5,0xb6b600b6,0xfdfd00fd,0x59590059,
+ 0x98980098,0x6a6a006a,0x46460046,0xbaba00ba,
+ 0x25250025,0x42420042,0xa2a200a2,0xfafa00fa,
+ 0x07070007,0x55550055,0xeeee00ee,0x0a0a000a,
+ 0x49490049,0x68680068,0x38380038,0xa4a400a4,
+ 0x28280028,0x7b7b007b,0xc9c900c9,0xc1c100c1,
+ 0xe3e300e3,0xf4f400f4,0xc7c700c7,0x9e9e009e,
+};
+
+
+
+static void camellia_setup128(const unsigned char *key, u32 *subkey)
+{
+ u32 kll, klr, krl, krr;
+ u32 il, ir, t0, t1, w0, w1;
+ u32 kw4l, kw4r, dw, tl, tr;
+ u32 subL[26];
+ u32 subR[26];
+
+ /**
+ * k == kll || klr || krl || krr (|| is concatination)
+ */
+ kll = GETU32(key );
+ klr = GETU32(key + 4);
+ krl = GETU32(key + 8);
+ krr = GETU32(key + 12);
+ /**
+ * generate KL dependent subkeys
+ */
+ /* kw1 */
+ SUBL(0) = kll; SUBR(0) = klr;
+ /* kw2 */
+ SUBL(1) = krl; SUBR(1) = krr;
+ /* rotation left shift 15bit */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* k3 */
+ SUBL(4) = kll; SUBR(4) = klr;
+ /* k4 */
+ SUBL(5) = krl; SUBR(5) = krr;
+ /* rotation left shift 15+30bit */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
+ /* k7 */
+ SUBL(10) = kll; SUBR(10) = klr;
+ /* k8 */
+ SUBL(11) = krl; SUBR(11) = krr;
+ /* rotation left shift 15+30+15bit */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* k10 */
+ SUBL(13) = krl; SUBR(13) = krr;
+ /* rotation left shift 15+30+15+17 bit */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ /* kl3 */
+ SUBL(16) = kll; SUBR(16) = klr;
+ /* kl4 */
+ SUBL(17) = krl; SUBR(17) = krr;
+ /* rotation left shift 15+30+15+17+17 bit */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ /* k13 */
+ SUBL(18) = kll; SUBR(18) = klr;
+ /* k14 */
+ SUBL(19) = krl; SUBR(19) = krr;
+ /* rotation left shift 15+30+15+17+17+17 bit */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ /* k17 */
+ SUBL(22) = kll; SUBR(22) = klr;
+ /* k18 */
+ SUBL(23) = krl; SUBR(23) = krr;
+
+ /* generate KA */
+ kll = SUBL(0); klr = SUBR(0);
+ krl = SUBL(1); krr = SUBR(1);
+ CAMELLIA_F(kll, klr,
+ CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
+ w0, w1, il, ir, t0, t1);
+ krl ^= w0; krr ^= w1;
+ CAMELLIA_F(krl, krr,
+ CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
+ kll, klr, il, ir, t0, t1);
+ /* current status == (kll, klr, w0, w1) */
+ CAMELLIA_F(kll, klr,
+ CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
+ krl, krr, il, ir, t0, t1);
+ krl ^= w0; krr ^= w1;
+ CAMELLIA_F(krl, krr,
+ CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
+ w0, w1, il, ir, t0, t1);
+ kll ^= w0; klr ^= w1;
+
+ /* generate KA dependent subkeys */
+ /* k1, k2 */
+ SUBL(2) = kll; SUBR(2) = klr;
+ SUBL(3) = krl; SUBR(3) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* k5,k6 */
+ SUBL(6) = kll; SUBR(6) = klr;
+ SUBL(7) = krl; SUBR(7) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* kl1, kl2 */
+ SUBL(8) = kll; SUBR(8) = klr;
+ SUBL(9) = krl; SUBR(9) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* k9 */
+ SUBL(12) = kll; SUBR(12) = klr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* k11, k12 */
+ SUBL(14) = kll; SUBR(14) = klr;
+ SUBL(15) = krl; SUBR(15) = krr;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
+ /* k15, k16 */
+ SUBL(20) = kll; SUBR(20) = klr;
+ SUBL(21) = krl; SUBR(21) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ /* kw3, kw4 */
+ SUBL(24) = kll; SUBR(24) = klr;
+ SUBL(25) = krl; SUBR(25) = krr;
+
+
+ /* absorb kw2 to other subkeys */
+ /* round 2 */
+ SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
+ /* round 4 */
+ SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
+ /* round 6 */
+ SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
+ SUBL(1) ^= SUBR(1) & ~SUBR(9);
+ dw = SUBL(1) & SUBL(9),
+ SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
+ /* round 8 */
+ SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
+ /* round 10 */
+ SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
+ /* round 12 */
+ SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
+ SUBL(1) ^= SUBR(1) & ~SUBR(17);
+ dw = SUBL(1) & SUBL(17),
+ SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
+ /* round 14 */
+ SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
+ /* round 16 */
+ SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
+ /* round 18 */
+ SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
+ /* kw3 */
+ SUBL(24) ^= SUBL(1); SUBR(24) ^= SUBR(1);
+
+ /* absorb kw4 to other subkeys */
+ kw4l = SUBL(25); kw4r = SUBR(25);
+ /* round 17 */
+ SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
+ /* round 15 */
+ SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
+ /* round 13 */
+ SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
+ kw4l ^= kw4r & ~SUBR(16);
+ dw = kw4l & SUBL(16),
+ kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
+ /* round 11 */
+ SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
+ /* round 9 */
+ SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
+ /* round 7 */
+ SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
+ kw4l ^= kw4r & ~SUBR(8);
+ dw = kw4l & SUBL(8),
+ kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
+ /* round 5 */
+ SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
+ /* round 3 */
+ SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
+ /* round 1 */
+ SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
+ /* kw1 */
+ SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
+
+
+ /* key XOR is end of F-function */
+ CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
+ CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
+ CAMELLIA_SUBKEY_L(2) = SUBL(3); /* round 1 */
+ CAMELLIA_SUBKEY_R(2) = SUBR(3);
+ CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
+ CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
+ CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
+ CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
+ CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
+ CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
+ CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
+ CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
+ tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
+ dw = tl & SUBL(8), /* FL(kl1) */
+ tr = SUBR(10) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
+ CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
+ CAMELLIA_SUBKEY_L(8) = SUBL(8); /* FL(kl1) */
+ CAMELLIA_SUBKEY_R(8) = SUBR(8);
+ CAMELLIA_SUBKEY_L(9) = SUBL(9); /* FLinv(kl2) */
+ CAMELLIA_SUBKEY_R(9) = SUBR(9);
+ tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
+ dw = tl & SUBL(9), /* FLinv(kl2) */
+ tr = SUBR(7) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
+ CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
+ CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
+ CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
+ CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
+ CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
+ CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
+ CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
+ CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
+ CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
+ tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
+ dw = tl & SUBL(16), /* FL(kl3) */
+ tr = SUBR(18) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
+ CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
+ CAMELLIA_SUBKEY_L(16) = SUBL(16); /* FL(kl3) */
+ CAMELLIA_SUBKEY_R(16) = SUBR(16);
+ CAMELLIA_SUBKEY_L(17) = SUBL(17); /* FLinv(kl4) */
+ CAMELLIA_SUBKEY_R(17) = SUBR(17);
+ tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
+ dw = tl & SUBL(17), /* FLinv(kl4) */
+ tr = SUBR(15) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
+ CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
+ CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
+ CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
+ CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
+ CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
+ CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
+ CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
+ CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
+ CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
+ CAMELLIA_SUBKEY_L(23) = SUBL(22); /* round 18 */
+ CAMELLIA_SUBKEY_R(23) = SUBR(22);
+ CAMELLIA_SUBKEY_L(24) = SUBL(24) ^ SUBL(23); /* kw3 */
+ CAMELLIA_SUBKEY_R(24) = SUBR(24) ^ SUBR(23);
+
+ /* apply the inverse of the last half of P-function */
+ dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
+ dw = CAMELLIA_RL8(dw);/* round 1 */
+ CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
+ CAMELLIA_SUBKEY_L(2) = dw;
+ dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
+ dw = CAMELLIA_RL8(dw);/* round 2 */
+ CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
+ CAMELLIA_SUBKEY_L(3) = dw;
+ dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
+ dw = CAMELLIA_RL8(dw);/* round 3 */
+ CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
+ CAMELLIA_SUBKEY_L(4) = dw;
+ dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
+ dw = CAMELLIA_RL8(dw);/* round 4 */
+ CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
+ CAMELLIA_SUBKEY_L(5) = dw;
+ dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
+ dw = CAMELLIA_RL8(dw);/* round 5 */
+ CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
+ CAMELLIA_SUBKEY_L(6) = dw;
+ dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
+ dw = CAMELLIA_RL8(dw);/* round 6 */
+ CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
+ CAMELLIA_SUBKEY_L(7) = dw;
+ dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
+ dw = CAMELLIA_RL8(dw);/* round 7 */
+ CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
+ CAMELLIA_SUBKEY_L(10) = dw;
+ dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
+ dw = CAMELLIA_RL8(dw);/* round 8 */
+ CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
+ CAMELLIA_SUBKEY_L(11) = dw;
+ dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
+ dw = CAMELLIA_RL8(dw);/* round 9 */
+ CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
+ CAMELLIA_SUBKEY_L(12) = dw;
+ dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
+ dw = CAMELLIA_RL8(dw);/* round 10 */
+ CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
+ CAMELLIA_SUBKEY_L(13) = dw;
+ dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
+ dw = CAMELLIA_RL8(dw);/* round 11 */
+ CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
+ CAMELLIA_SUBKEY_L(14) = dw;
+ dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
+ dw = CAMELLIA_RL8(dw);/* round 12 */
+ CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
+ CAMELLIA_SUBKEY_L(15) = dw;
+ dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
+ dw = CAMELLIA_RL8(dw);/* round 13 */
+ CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
+ CAMELLIA_SUBKEY_L(18) = dw;
+ dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
+ dw = CAMELLIA_RL8(dw);/* round 14 */
+ CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
+ CAMELLIA_SUBKEY_L(19) = dw;
+ dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
+ dw = CAMELLIA_RL8(dw);/* round 15 */
+ CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
+ CAMELLIA_SUBKEY_L(20) = dw;
+ dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
+ dw = CAMELLIA_RL8(dw);/* round 16 */
+ CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
+ CAMELLIA_SUBKEY_L(21) = dw;
+ dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
+ dw = CAMELLIA_RL8(dw);/* round 17 */
+ CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
+ CAMELLIA_SUBKEY_L(22) = dw;
+ dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
+ dw = CAMELLIA_RL8(dw);/* round 18 */
+ CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
+ CAMELLIA_SUBKEY_L(23) = dw;
+
+ return;
+}
+
+
+static void camellia_setup256(const unsigned char *key, u32 *subkey)
+{
+ u32 kll,klr,krl,krr; /* left half of key */
+ u32 krll,krlr,krrl,krrr; /* right half of key */
+ u32 il, ir, t0, t1, w0, w1; /* temporary variables */
+ u32 kw4l, kw4r, dw, tl, tr;
+ u32 subL[34];
+ u32 subR[34];
+
+ /**
+ * key = (kll || klr || krl || krr || krll || krlr || krrl || krrr)
+ * (|| is concatination)
+ */
+
+ kll = GETU32(key );
+ klr = GETU32(key + 4);
+ krl = GETU32(key + 8);
+ krr = GETU32(key + 12);
+ krll = GETU32(key + 16);
+ krlr = GETU32(key + 20);
+ krrl = GETU32(key + 24);
+ krrr = GETU32(key + 28);
+
+ /* generate KL dependent subkeys */
+ /* kw1 */
+ SUBL(0) = kll; SUBR(0) = klr;
+ /* kw2 */
+ SUBL(1) = krl; SUBR(1) = krr;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 45);
+ /* k9 */
+ SUBL(12) = kll; SUBR(12) = klr;
+ /* k10 */
+ SUBL(13) = krl; SUBR(13) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* kl3 */
+ SUBL(16) = kll; SUBR(16) = klr;
+ /* kl4 */
+ SUBL(17) = krl; SUBR(17) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 17);
+ /* k17 */
+ SUBL(22) = kll; SUBR(22) = klr;
+ /* k18 */
+ SUBL(23) = krl; SUBR(23) = krr;
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 34);
+ /* k23 */
+ SUBL(30) = kll; SUBR(30) = klr;
+ /* k24 */
+ SUBL(31) = krl; SUBR(31) = krr;
+
+ /* generate KR dependent subkeys */
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
+ /* k3 */
+ SUBL(4) = krll; SUBR(4) = krlr;
+ /* k4 */
+ SUBL(5) = krrl; SUBR(5) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 15);
+ /* kl1 */
+ SUBL(8) = krll; SUBR(8) = krlr;
+ /* kl2 */
+ SUBL(9) = krrl; SUBR(9) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ /* k13 */
+ SUBL(18) = krll; SUBR(18) = krlr;
+ /* k14 */
+ SUBL(19) = krrl; SUBR(19) = krrr;
+ CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
+ /* k19 */
+ SUBL(26) = krll; SUBR(26) = krlr;
+ /* k20 */
+ SUBL(27) = krrl; SUBR(27) = krrr;
+ CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 34);
+
+ /* generate KA */
+ kll = SUBL(0) ^ krll; klr = SUBR(0) ^ krlr;
+ krl = SUBL(1) ^ krrl; krr = SUBR(1) ^ krrr;
+ CAMELLIA_F(kll, klr,
+ CAMELLIA_SIGMA1L, CAMELLIA_SIGMA1R,
+ w0, w1, il, ir, t0, t1);
+ krl ^= w0; krr ^= w1;
+ CAMELLIA_F(krl, krr,
+ CAMELLIA_SIGMA2L, CAMELLIA_SIGMA2R,
+ kll, klr, il, ir, t0, t1);
+ kll ^= krll; klr ^= krlr;
+ CAMELLIA_F(kll, klr,
+ CAMELLIA_SIGMA3L, CAMELLIA_SIGMA3R,
+ krl, krr, il, ir, t0, t1);
+ krl ^= w0 ^ krrl; krr ^= w1 ^ krrr;
+ CAMELLIA_F(krl, krr,
+ CAMELLIA_SIGMA4L, CAMELLIA_SIGMA4R,
+ w0, w1, il, ir, t0, t1);
+ kll ^= w0; klr ^= w1;
+
+ /* generate KB */
+ krll ^= kll; krlr ^= klr;
+ krrl ^= krl; krrr ^= krr;
+ CAMELLIA_F(krll, krlr,
+ CAMELLIA_SIGMA5L, CAMELLIA_SIGMA5R,
+ w0, w1, il, ir, t0, t1);
+ krrl ^= w0; krrr ^= w1;
+ CAMELLIA_F(krrl, krrr,
+ CAMELLIA_SIGMA6L, CAMELLIA_SIGMA6R,
+ w0, w1, il, ir, t0, t1);
+ krll ^= w0; krlr ^= w1;
+
+ /* generate KA dependent subkeys */
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 15);
+ /* k5 */
+ SUBL(6) = kll; SUBR(6) = klr;
+ /* k6 */
+ SUBL(7) = krl; SUBR(7) = krr;
+ CAMELLIA_ROLDQ(kll, klr, krl, krr, w0, w1, 30);
+ /* k11 */
+ SUBL(14) = kll; SUBR(14) = klr;
+ /* k12 */
+ SUBL(15) = krl; SUBR(15) = krr;
+ /* rotation left shift 32bit */
+ /* kl5 */
+ SUBL(24) = klr; SUBR(24) = krl;
+ /* kl6 */
+ SUBL(25) = krr; SUBR(25) = kll;
+ /* rotation left shift 49 from k11,k12 -> k21,k22 */
+ CAMELLIA_ROLDQo32(kll, klr, krl, krr, w0, w1, 49);
+ /* k21 */
+ SUBL(28) = kll; SUBR(28) = klr;
+ /* k22 */
+ SUBL(29) = krl; SUBR(29) = krr;
+
+ /* generate KB dependent subkeys */
+ /* k1 */
+ SUBL(2) = krll; SUBR(2) = krlr;
+ /* k2 */
+ SUBL(3) = krrl; SUBR(3) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ /* k7 */
+ SUBL(10) = krll; SUBR(10) = krlr;
+ /* k8 */
+ SUBL(11) = krrl; SUBR(11) = krrr;
+ CAMELLIA_ROLDQ(krll, krlr, krrl, krrr, w0, w1, 30);
+ /* k15 */
+ SUBL(20) = krll; SUBR(20) = krlr;
+ /* k16 */
+ SUBL(21) = krrl; SUBR(21) = krrr;
+ CAMELLIA_ROLDQo32(krll, krlr, krrl, krrr, w0, w1, 51);
+ /* kw3 */
+ SUBL(32) = krll; SUBR(32) = krlr;
+ /* kw4 */
+ SUBL(33) = krrl; SUBR(33) = krrr;
+
+ /* absorb kw2 to other subkeys */
+ /* round 2 */
+ SUBL(3) ^= SUBL(1); SUBR(3) ^= SUBR(1);
+ /* round 4 */
+ SUBL(5) ^= SUBL(1); SUBR(5) ^= SUBR(1);
+ /* round 6 */
+ SUBL(7) ^= SUBL(1); SUBR(7) ^= SUBR(1);
+ SUBL(1) ^= SUBR(1) & ~SUBR(9);
+ dw = SUBL(1) & SUBL(9),
+ SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl2) */
+ /* round 8 */
+ SUBL(11) ^= SUBL(1); SUBR(11) ^= SUBR(1);
+ /* round 10 */
+ SUBL(13) ^= SUBL(1); SUBR(13) ^= SUBR(1);
+ /* round 12 */
+ SUBL(15) ^= SUBL(1); SUBR(15) ^= SUBR(1);
+ SUBL(1) ^= SUBR(1) & ~SUBR(17);
+ dw = SUBL(1) & SUBL(17),
+ SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl4) */
+ /* round 14 */
+ SUBL(19) ^= SUBL(1); SUBR(19) ^= SUBR(1);
+ /* round 16 */
+ SUBL(21) ^= SUBL(1); SUBR(21) ^= SUBR(1);
+ /* round 18 */
+ SUBL(23) ^= SUBL(1); SUBR(23) ^= SUBR(1);
+ SUBL(1) ^= SUBR(1) & ~SUBR(25);
+ dw = SUBL(1) & SUBL(25),
+ SUBR(1) ^= CAMELLIA_RL1(dw); /* modified for FLinv(kl6) */
+ /* round 20 */
+ SUBL(27) ^= SUBL(1); SUBR(27) ^= SUBR(1);
+ /* round 22 */
+ SUBL(29) ^= SUBL(1); SUBR(29) ^= SUBR(1);
+ /* round 24 */
+ SUBL(31) ^= SUBL(1); SUBR(31) ^= SUBR(1);
+ /* kw3 */
+ SUBL(32) ^= SUBL(1); SUBR(32) ^= SUBR(1);
+
+
+ /* absorb kw4 to other subkeys */
+ kw4l = SUBL(33); kw4r = SUBR(33);
+ /* round 23 */
+ SUBL(30) ^= kw4l; SUBR(30) ^= kw4r;
+ /* round 21 */
+ SUBL(28) ^= kw4l; SUBR(28) ^= kw4r;
+ /* round 19 */
+ SUBL(26) ^= kw4l; SUBR(26) ^= kw4r;
+ kw4l ^= kw4r & ~SUBR(24);
+ dw = kw4l & SUBL(24),
+ kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl5) */
+ /* round 17 */
+ SUBL(22) ^= kw4l; SUBR(22) ^= kw4r;
+ /* round 15 */
+ SUBL(20) ^= kw4l; SUBR(20) ^= kw4r;
+ /* round 13 */
+ SUBL(18) ^= kw4l; SUBR(18) ^= kw4r;
+ kw4l ^= kw4r & ~SUBR(16);
+ dw = kw4l & SUBL(16),
+ kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl3) */
+ /* round 11 */
+ SUBL(14) ^= kw4l; SUBR(14) ^= kw4r;
+ /* round 9 */
+ SUBL(12) ^= kw4l; SUBR(12) ^= kw4r;
+ /* round 7 */
+ SUBL(10) ^= kw4l; SUBR(10) ^= kw4r;
+ kw4l ^= kw4r & ~SUBR(8);
+ dw = kw4l & SUBL(8),
+ kw4r ^= CAMELLIA_RL1(dw); /* modified for FL(kl1) */
+ /* round 5 */
+ SUBL(6) ^= kw4l; SUBR(6) ^= kw4r;
+ /* round 3 */
+ SUBL(4) ^= kw4l; SUBR(4) ^= kw4r;
+ /* round 1 */
+ SUBL(2) ^= kw4l; SUBR(2) ^= kw4r;
+ /* kw1 */
+ SUBL(0) ^= kw4l; SUBR(0) ^= kw4r;
+
+ /* key XOR is end of F-function */
+ CAMELLIA_SUBKEY_L(0) = SUBL(0) ^ SUBL(2);/* kw1 */
+ CAMELLIA_SUBKEY_R(0) = SUBR(0) ^ SUBR(2);
+ CAMELLIA_SUBKEY_L(2) = SUBL(3); /* round 1 */
+ CAMELLIA_SUBKEY_R(2) = SUBR(3);
+ CAMELLIA_SUBKEY_L(3) = SUBL(2) ^ SUBL(4); /* round 2 */
+ CAMELLIA_SUBKEY_R(3) = SUBR(2) ^ SUBR(4);
+ CAMELLIA_SUBKEY_L(4) = SUBL(3) ^ SUBL(5); /* round 3 */
+ CAMELLIA_SUBKEY_R(4) = SUBR(3) ^ SUBR(5);
+ CAMELLIA_SUBKEY_L(5) = SUBL(4) ^ SUBL(6); /* round 4 */
+ CAMELLIA_SUBKEY_R(5) = SUBR(4) ^ SUBR(6);
+ CAMELLIA_SUBKEY_L(6) = SUBL(5) ^ SUBL(7); /* round 5 */
+ CAMELLIA_SUBKEY_R(6) = SUBR(5) ^ SUBR(7);
+ tl = SUBL(10) ^ (SUBR(10) & ~SUBR(8));
+ dw = tl & SUBL(8), /* FL(kl1) */
+ tr = SUBR(10) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(7) = SUBL(6) ^ tl; /* round 6 */
+ CAMELLIA_SUBKEY_R(7) = SUBR(6) ^ tr;
+ CAMELLIA_SUBKEY_L(8) = SUBL(8); /* FL(kl1) */
+ CAMELLIA_SUBKEY_R(8) = SUBR(8);
+ CAMELLIA_SUBKEY_L(9) = SUBL(9); /* FLinv(kl2) */
+ CAMELLIA_SUBKEY_R(9) = SUBR(9);
+ tl = SUBL(7) ^ (SUBR(7) & ~SUBR(9));
+ dw = tl & SUBL(9), /* FLinv(kl2) */
+ tr = SUBR(7) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(10) = tl ^ SUBL(11); /* round 7 */
+ CAMELLIA_SUBKEY_R(10) = tr ^ SUBR(11);
+ CAMELLIA_SUBKEY_L(11) = SUBL(10) ^ SUBL(12); /* round 8 */
+ CAMELLIA_SUBKEY_R(11) = SUBR(10) ^ SUBR(12);
+ CAMELLIA_SUBKEY_L(12) = SUBL(11) ^ SUBL(13); /* round 9 */
+ CAMELLIA_SUBKEY_R(12) = SUBR(11) ^ SUBR(13);
+ CAMELLIA_SUBKEY_L(13) = SUBL(12) ^ SUBL(14); /* round 10 */
+ CAMELLIA_SUBKEY_R(13) = SUBR(12) ^ SUBR(14);
+ CAMELLIA_SUBKEY_L(14) = SUBL(13) ^ SUBL(15); /* round 11 */
+ CAMELLIA_SUBKEY_R(14) = SUBR(13) ^ SUBR(15);
+ tl = SUBL(18) ^ (SUBR(18) & ~SUBR(16));
+ dw = tl & SUBL(16), /* FL(kl3) */
+ tr = SUBR(18) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(15) = SUBL(14) ^ tl; /* round 12 */
+ CAMELLIA_SUBKEY_R(15) = SUBR(14) ^ tr;
+ CAMELLIA_SUBKEY_L(16) = SUBL(16); /* FL(kl3) */
+ CAMELLIA_SUBKEY_R(16) = SUBR(16);
+ CAMELLIA_SUBKEY_L(17) = SUBL(17); /* FLinv(kl4) */
+ CAMELLIA_SUBKEY_R(17) = SUBR(17);
+ tl = SUBL(15) ^ (SUBR(15) & ~SUBR(17));
+ dw = tl & SUBL(17), /* FLinv(kl4) */
+ tr = SUBR(15) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(18) = tl ^ SUBL(19); /* round 13 */
+ CAMELLIA_SUBKEY_R(18) = tr ^ SUBR(19);
+ CAMELLIA_SUBKEY_L(19) = SUBL(18) ^ SUBL(20); /* round 14 */
+ CAMELLIA_SUBKEY_R(19) = SUBR(18) ^ SUBR(20);
+ CAMELLIA_SUBKEY_L(20) = SUBL(19) ^ SUBL(21); /* round 15 */
+ CAMELLIA_SUBKEY_R(20) = SUBR(19) ^ SUBR(21);
+ CAMELLIA_SUBKEY_L(21) = SUBL(20) ^ SUBL(22); /* round 16 */
+ CAMELLIA_SUBKEY_R(21) = SUBR(20) ^ SUBR(22);
+ CAMELLIA_SUBKEY_L(22) = SUBL(21) ^ SUBL(23); /* round 17 */
+ CAMELLIA_SUBKEY_R(22) = SUBR(21) ^ SUBR(23);
+ tl = SUBL(26) ^ (SUBR(26)
+ & ~SUBR(24));
+ dw = tl & SUBL(24), /* FL(kl5) */
+ tr = SUBR(26) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(23) = SUBL(22) ^ tl; /* round 18 */
+ CAMELLIA_SUBKEY_R(23) = SUBR(22) ^ tr;
+ CAMELLIA_SUBKEY_L(24) = SUBL(24); /* FL(kl5) */
+ CAMELLIA_SUBKEY_R(24) = SUBR(24);
+ CAMELLIA_SUBKEY_L(25) = SUBL(25); /* FLinv(kl6) */
+ CAMELLIA_SUBKEY_R(25) = SUBR(25);
+ tl = SUBL(23) ^ (SUBR(23) &
+ ~SUBR(25));
+ dw = tl & SUBL(25), /* FLinv(kl6) */
+ tr = SUBR(23) ^ CAMELLIA_RL1(dw);
+ CAMELLIA_SUBKEY_L(26) = tl ^ SUBL(27); /* round 19 */
+ CAMELLIA_SUBKEY_R(26) = tr ^ SUBR(27);
+ CAMELLIA_SUBKEY_L(27) = SUBL(26) ^ SUBL(28); /* round 20 */
+ CAMELLIA_SUBKEY_R(27) = SUBR(26) ^ SUBR(28);
+ CAMELLIA_SUBKEY_L(28) = SUBL(27) ^ SUBL(29); /* round 21 */
+ CAMELLIA_SUBKEY_R(28) = SUBR(27) ^ SUBR(29);
+ CAMELLIA_SUBKEY_L(29) = SUBL(28) ^ SUBL(30); /* round 22 */
+ CAMELLIA_SUBKEY_R(29) = SUBR(28) ^ SUBR(30);
+ CAMELLIA_SUBKEY_L(30) = SUBL(29) ^ SUBL(31); /* round 23 */
+ CAMELLIA_SUBKEY_R(30) = SUBR(29) ^ SUBR(31);
+ CAMELLIA_SUBKEY_L(31) = SUBL(30); /* round 24 */
+ CAMELLIA_SUBKEY_R(31) = SUBR(30);
+ CAMELLIA_SUBKEY_L(32) = SUBL(32) ^ SUBL(31); /* kw3 */
+ CAMELLIA_SUBKEY_R(32) = SUBR(32) ^ SUBR(31);
+
+ /* apply the inverse of the last half of P-function */
+ dw = CAMELLIA_SUBKEY_L(2) ^ CAMELLIA_SUBKEY_R(2),
+ dw = CAMELLIA_RL8(dw);/* round 1 */
+ CAMELLIA_SUBKEY_R(2) = CAMELLIA_SUBKEY_L(2) ^ dw,
+ CAMELLIA_SUBKEY_L(2) = dw;
+ dw = CAMELLIA_SUBKEY_L(3) ^ CAMELLIA_SUBKEY_R(3),
+ dw = CAMELLIA_RL8(dw);/* round 2 */
+ CAMELLIA_SUBKEY_R(3) = CAMELLIA_SUBKEY_L(3) ^ dw,
+ CAMELLIA_SUBKEY_L(3) = dw;
+ dw = CAMELLIA_SUBKEY_L(4) ^ CAMELLIA_SUBKEY_R(4),
+ dw = CAMELLIA_RL8(dw);/* round 3 */
+ CAMELLIA_SUBKEY_R(4) = CAMELLIA_SUBKEY_L(4) ^ dw,
+ CAMELLIA_SUBKEY_L(4) = dw;
+ dw = CAMELLIA_SUBKEY_L(5) ^ CAMELLIA_SUBKEY_R(5),
+ dw = CAMELLIA_RL8(dw);/* round 4 */
+ CAMELLIA_SUBKEY_R(5) = CAMELLIA_SUBKEY_L(5) ^ dw,
+ CAMELLIA_SUBKEY_L(5) = dw;
+ dw = CAMELLIA_SUBKEY_L(6) ^ CAMELLIA_SUBKEY_R(6),
+ dw = CAMELLIA_RL8(dw);/* round 5 */
+ CAMELLIA_SUBKEY_R(6) = CAMELLIA_SUBKEY_L(6) ^ dw,
+ CAMELLIA_SUBKEY_L(6) = dw;
+ dw = CAMELLIA_SUBKEY_L(7) ^ CAMELLIA_SUBKEY_R(7),
+ dw = CAMELLIA_RL8(dw);/* round 6 */
+ CAMELLIA_SUBKEY_R(7) = CAMELLIA_SUBKEY_L(7) ^ dw,
+ CAMELLIA_SUBKEY_L(7) = dw;
+ dw = CAMELLIA_SUBKEY_L(10) ^ CAMELLIA_SUBKEY_R(10),
+ dw = CAMELLIA_RL8(dw);/* round 7 */
+ CAMELLIA_SUBKEY_R(10) = CAMELLIA_SUBKEY_L(10) ^ dw,
+ CAMELLIA_SUBKEY_L(10) = dw;
+ dw = CAMELLIA_SUBKEY_L(11) ^ CAMELLIA_SUBKEY_R(11),
+ dw = CAMELLIA_RL8(dw);/* round 8 */
+ CAMELLIA_SUBKEY_R(11) = CAMELLIA_SUBKEY_L(11) ^ dw,
+ CAMELLIA_SUBKEY_L(11) = dw;
+ dw = CAMELLIA_SUBKEY_L(12) ^ CAMELLIA_SUBKEY_R(12),
+ dw = CAMELLIA_RL8(dw);/* round 9 */
+ CAMELLIA_SUBKEY_R(12) = CAMELLIA_SUBKEY_L(12) ^ dw,
+ CAMELLIA_SUBKEY_L(12) = dw;
+ dw = CAMELLIA_SUBKEY_L(13) ^ CAMELLIA_SUBKEY_R(13),
+ dw = CAMELLIA_RL8(dw);/* round 10 */
+ CAMELLIA_SUBKEY_R(13) = CAMELLIA_SUBKEY_L(13) ^ dw,
+ CAMELLIA_SUBKEY_L(13) = dw;
+ dw = CAMELLIA_SUBKEY_L(14) ^ CAMELLIA_SUBKEY_R(14),
+ dw = CAMELLIA_RL8(dw);/* round 11 */
+ CAMELLIA_SUBKEY_R(14) = CAMELLIA_SUBKEY_L(14) ^ dw,
+ CAMELLIA_SUBKEY_L(14) = dw;
+ dw = CAMELLIA_SUBKEY_L(15) ^ CAMELLIA_SUBKEY_R(15),
+ dw = CAMELLIA_RL8(dw);/* round 12 */
+ CAMELLIA_SUBKEY_R(15) = CAMELLIA_SUBKEY_L(15) ^ dw,
+ CAMELLIA_SUBKEY_L(15) = dw;
+ dw = CAMELLIA_SUBKEY_L(18) ^ CAMELLIA_SUBKEY_R(18),
+ dw = CAMELLIA_RL8(dw);/* round 13 */
+ CAMELLIA_SUBKEY_R(18) = CAMELLIA_SUBKEY_L(18) ^ dw,
+ CAMELLIA_SUBKEY_L(18) = dw;
+ dw = CAMELLIA_SUBKEY_L(19) ^ CAMELLIA_SUBKEY_R(19),
+ dw = CAMELLIA_RL8(dw);/* round 14 */
+ CAMELLIA_SUBKEY_R(19) = CAMELLIA_SUBKEY_L(19) ^ dw,
+ CAMELLIA_SUBKEY_L(19) = dw;
+ dw = CAMELLIA_SUBKEY_L(20) ^ CAMELLIA_SUBKEY_R(20),
+ dw = CAMELLIA_RL8(dw);/* round 15 */
+ CAMELLIA_SUBKEY_R(20) = CAMELLIA_SUBKEY_L(20) ^ dw,
+ CAMELLIA_SUBKEY_L(20) = dw;
+ dw = CAMELLIA_SUBKEY_L(21) ^ CAMELLIA_SUBKEY_R(21),
+ dw = CAMELLIA_RL8(dw);/* round 16 */
+ CAMELLIA_SUBKEY_R(21) = CAMELLIA_SUBKEY_L(21) ^ dw,
+ CAMELLIA_SUBKEY_L(21) = dw;
+ dw = CAMELLIA_SUBKEY_L(22) ^ CAMELLIA_SUBKEY_R(22),
+ dw = CAMELLIA_RL8(dw);/* round 17 */
+ CAMELLIA_SUBKEY_R(22) = CAMELLIA_SUBKEY_L(22) ^ dw,
+ CAMELLIA_SUBKEY_L(22) = dw;
+ dw = CAMELLIA_SUBKEY_L(23) ^ CAMELLIA_SUBKEY_R(23),
+ dw = CAMELLIA_RL8(dw);/* round 18 */
+ CAMELLIA_SUBKEY_R(23) = CAMELLIA_SUBKEY_L(23) ^ dw,
+ CAMELLIA_SUBKEY_L(23) = dw;
+ dw = CAMELLIA_SUBKEY_L(26) ^ CAMELLIA_SUBKEY_R(26),
+ dw = CAMELLIA_RL8(dw);/* round 19 */
+ CAMELLIA_SUBKEY_R(26) = CAMELLIA_SUBKEY_L(26) ^ dw,
+ CAMELLIA_SUBKEY_L(26) = dw;
+ dw = CAMELLIA_SUBKEY_L(27) ^ CAMELLIA_SUBKEY_R(27),
+ dw = CAMELLIA_RL8(dw);/* round 20 */
+ CAMELLIA_SUBKEY_R(27) = CAMELLIA_SUBKEY_L(27) ^ dw,
+ CAMELLIA_SUBKEY_L(27) = dw;
+ dw = CAMELLIA_SUBKEY_L(28) ^ CAMELLIA_SUBKEY_R(28),
+ dw = CAMELLIA_RL8(dw);/* round 21 */
+ CAMELLIA_SUBKEY_R(28) = CAMELLIA_SUBKEY_L(28) ^ dw,
+ CAMELLIA_SUBKEY_L(28) = dw;
+ dw = CAMELLIA_SUBKEY_L(29) ^ CAMELLIA_SUBKEY_R(29),
+ dw = CAMELLIA_RL8(dw);/* round 22 */
+ CAMELLIA_SUBKEY_R(29) = CAMELLIA_SUBKEY_L(29) ^ dw,
+ CAMELLIA_SUBKEY_L(29) = dw;
+ dw = CAMELLIA_SUBKEY_L(30) ^ CAMELLIA_SUBKEY_R(30),
+ dw = CAMELLIA_RL8(dw);/* round 23 */
+ CAMELLIA_SUBKEY_R(30) = CAMELLIA_SUBKEY_L(30) ^ dw,
+ CAMELLIA_SUBKEY_L(30) = dw;
+ dw = CAMELLIA_SUBKEY_L(31) ^ CAMELLIA_SUBKEY_R(31),
+ dw = CAMELLIA_RL8(dw);/* round 24 */
+ CAMELLIA_SUBKEY_R(31) = CAMELLIA_SUBKEY_L(31) ^ dw,
+ CAMELLIA_SUBKEY_L(31) = dw;
+
+ return;
+}
+
+static void camellia_setup192(const unsigned char *key, u32 *subkey)
+{
+ unsigned char kk[32];
+ u32 krll, krlr, krrl,krrr;
+
+ memcpy(kk, key, 24);
+ memcpy((unsigned char *)&krll, key+16,4);
+ memcpy((unsigned char *)&krlr, key+20,4);
+ krrl = ~krll;
+ krrr = ~krlr;
+ memcpy(kk+24, (unsigned char *)&krrl, 4);
+ memcpy(kk+28, (unsigned char *)&krrr, 4);
+ camellia_setup256(kk, subkey);
+ return;
+}
+
+
+/**
+ * Stuff related to camellia encryption/decryption
+ */
+static void camellia_encrypt128(const u32 *subkey, __be32 *io_text)
+{
+ u32 il,ir,t0,t1; /* temporary valiables */
+
+ u32 io[4];
+
+ io[0] = be32_to_cpu(io_text[0]);
+ io[1] = be32_to_cpu(io_text[1]);
+ io[2] = be32_to_cpu(io_text[2]);
+ io[3] = be32_to_cpu(io_text[3]);
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= CAMELLIA_SUBKEY_L(0);
+ io[1] ^= CAMELLIA_SUBKEY_R(0);
+ /* main iteration */
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+ CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+ CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= CAMELLIA_SUBKEY_L(24);
+ io[3] ^= CAMELLIA_SUBKEY_R(24);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+
+ io_text[0] = cpu_to_be32(io[0]);
+ io_text[1] = cpu_to_be32(io[1]);
+ io_text[2] = cpu_to_be32(io[2]);
+ io_text[3] = cpu_to_be32(io[3]);
+
+ return;
+}
+
+static void camellia_decrypt128(const u32 *subkey, __be32 *io_text)
+{
+ u32 il,ir,t0,t1; /* temporary valiables */
+
+ u32 io[4];
+
+ io[0] = be32_to_cpu(io_text[0]);
+ io[1] = be32_to_cpu(io_text[1]);
+ io[2] = be32_to_cpu(io_text[2]);
+ io[3] = be32_to_cpu(io_text[3]);
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= CAMELLIA_SUBKEY_L(24);
+ io[1] ^= CAMELLIA_SUBKEY_R(24);
+
+ /* main iteration */
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+ CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+ CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= CAMELLIA_SUBKEY_L(0);
+ io[3] ^= CAMELLIA_SUBKEY_R(0);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+
+ io_text[0] = cpu_to_be32(io[0]);
+ io_text[1] = cpu_to_be32(io[1]);
+ io_text[2] = cpu_to_be32(io[2]);
+ io_text[3] = cpu_to_be32(io[3]);
+
+ return;
+}
+
+
+/**
+ * stuff for 192 and 256bit encryption/decryption
+ */
+static void camellia_encrypt256(const u32 *subkey, __be32 *io_text)
+{
+ u32 il,ir,t0,t1; /* temporary valiables */
+
+ u32 io[4];
+
+ io[0] = be32_to_cpu(io_text[0]);
+ io[1] = be32_to_cpu(io_text[1]);
+ io[2] = be32_to_cpu(io_text[2]);
+ io[3] = be32_to_cpu(io_text[3]);
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= CAMELLIA_SUBKEY_L(0);
+ io[1] ^= CAMELLIA_SUBKEY_R(0);
+
+ /* main iteration */
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+ CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+ CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
+ CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= CAMELLIA_SUBKEY_L(32);
+ io[3] ^= CAMELLIA_SUBKEY_R(32);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+
+ io_text[0] = cpu_to_be32(io[0]);
+ io_text[1] = cpu_to_be32(io[1]);
+ io_text[2] = cpu_to_be32(io[2]);
+ io_text[3] = cpu_to_be32(io[3]);
+
+ return;
+}
+
+
+static void camellia_decrypt256(const u32 *subkey, __be32 *io_text)
+{
+ u32 il,ir,t0,t1; /* temporary valiables */
+
+ u32 io[4];
+
+ io[0] = be32_to_cpu(io_text[0]);
+ io[1] = be32_to_cpu(io_text[1]);
+ io[2] = be32_to_cpu(io_text[2]);
+ io[3] = be32_to_cpu(io_text[3]);
+
+ /* pre whitening but absorb kw2*/
+ io[0] ^= CAMELLIA_SUBKEY_L(32);
+ io[1] ^= CAMELLIA_SUBKEY_R(32);
+
+ /* main iteration */
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(31),CAMELLIA_SUBKEY_R(31),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(30),CAMELLIA_SUBKEY_R(30),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(29),CAMELLIA_SUBKEY_R(29),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(28),CAMELLIA_SUBKEY_R(28),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(27),CAMELLIA_SUBKEY_R(27),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(26),CAMELLIA_SUBKEY_R(26),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(25),CAMELLIA_SUBKEY_R(25),
+ CAMELLIA_SUBKEY_L(24),CAMELLIA_SUBKEY_R(24),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(23),CAMELLIA_SUBKEY_R(23),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(22),CAMELLIA_SUBKEY_R(22),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(21),CAMELLIA_SUBKEY_R(21),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(20),CAMELLIA_SUBKEY_R(20),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(19),CAMELLIA_SUBKEY_R(19),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(18),CAMELLIA_SUBKEY_R(18),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(17),CAMELLIA_SUBKEY_R(17),
+ CAMELLIA_SUBKEY_L(16),CAMELLIA_SUBKEY_R(16),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(15),CAMELLIA_SUBKEY_R(15),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(14),CAMELLIA_SUBKEY_R(14),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(13),CAMELLIA_SUBKEY_R(13),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(12),CAMELLIA_SUBKEY_R(12),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(11),CAMELLIA_SUBKEY_R(11),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(10),CAMELLIA_SUBKEY_R(10),
+ io[0],io[1],il,ir,t0,t1);
+
+ CAMELLIA_FLS(io[0],io[1],io[2],io[3],
+ CAMELLIA_SUBKEY_L(9),CAMELLIA_SUBKEY_R(9),
+ CAMELLIA_SUBKEY_L(8),CAMELLIA_SUBKEY_R(8),
+ t0,t1,il,ir);
+
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(7),CAMELLIA_SUBKEY_R(7),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(6),CAMELLIA_SUBKEY_R(6),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(5),CAMELLIA_SUBKEY_R(5),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(4),CAMELLIA_SUBKEY_R(4),
+ io[0],io[1],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[0],io[1],
+ CAMELLIA_SUBKEY_L(3),CAMELLIA_SUBKEY_R(3),
+ io[2],io[3],il,ir,t0,t1);
+ CAMELLIA_ROUNDSM(io[2],io[3],
+ CAMELLIA_SUBKEY_L(2),CAMELLIA_SUBKEY_R(2),
+ io[0],io[1],il,ir,t0,t1);
+
+ /* post whitening but kw4 */
+ io[2] ^= CAMELLIA_SUBKEY_L(0);
+ io[3] ^= CAMELLIA_SUBKEY_R(0);
+
+ t0 = io[0];
+ t1 = io[1];
+ io[0] = io[2];
+ io[1] = io[3];
+ io[2] = t0;
+ io[3] = t1;
+
+ io_text[0] = cpu_to_be32(io[0]);
+ io_text[1] = cpu_to_be32(io[1]);
+ io_text[2] = cpu_to_be32(io[2]);
+ io_text[3] = cpu_to_be32(io[3]);
+
+ return;
+}
+
+
+static int
+camellia_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+{
+ struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
+ const unsigned char *key = (const unsigned char *)in_key;
+ u32 *flags = &tfm->crt_flags;
+
+ if (key_len != 16 && key_len != 24 && key_len != 32) {
+ *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+ }
+
+ cctx->key_length = key_len;
+
+ switch(key_len) {
+ case 16:
+ camellia_setup128(key, cctx->key_table);
+ break;
+ case 24:
+ camellia_setup192(key, cctx->key_table);
+ break;
+ case 32:
+ camellia_setup256(key, cctx->key_table);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
+static void camellia_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
+ const __be32 *src = (const __be32 *)in;
+ __be32 *dst = (__be32 *)out;
+
+ __be32 tmp[4];
+
+ memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
+
+ switch (cctx->key_length) {
+ case 16:
+ camellia_encrypt128(cctx->key_table, tmp);
+ break;
+ case 24:
+ /* fall through */
+ case 32:
+ camellia_encrypt256(cctx->key_table, tmp);
+ break;
+ default:
+ break;
+ }
+
+ memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
+}
+
+
+static void camellia_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+{
+ const struct camellia_ctx *cctx = crypto_tfm_ctx(tfm);
+ const __be32 *src = (const __be32 *)in;
+ __be32 *dst = (__be32 *)out;
+
+ __be32 tmp[4];
+
+ memcpy(tmp, src, CAMELLIA_BLOCK_SIZE);
+
+ switch (cctx->key_length) {
+ case 16:
+ camellia_decrypt128(cctx->key_table, tmp);
+ break;
+ case 24:
+ /* fall through */
+ case 32:
+ camellia_decrypt256(cctx->key_table, tmp);
+ break;
+ default:
+ break;
+ }
+
+ memcpy(dst, tmp, CAMELLIA_BLOCK_SIZE);
+}
+
+
+static struct crypto_alg camellia_alg = {
+ .cra_name = "camellia",
+ .cra_driver_name = "camellia-generic",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = CAMELLIA_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct camellia_ctx),
+ .cra_alignmask = 3,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(camellia_alg.cra_list),
+ .cra_u = {
+ .cipher = {
+ .cia_min_keysize = CAMELLIA_MIN_KEY_SIZE,
+ .cia_max_keysize = CAMELLIA_MAX_KEY_SIZE,
+ .cia_setkey = camellia_set_key,
+ .cia_encrypt = camellia_encrypt,
+ .cia_decrypt = camellia_decrypt
+ }
+ }
+};
+
+static int __init camellia_init(void)
+{
+ return crypto_register_alg(&camellia_alg);
+}
+
+
+static void __exit camellia_fini(void)
+{
+ crypto_unregister_alg(&camellia_alg);
+}
+
+
+module_init(camellia_init);
+module_exit(camellia_fini);
+
+
+MODULE_DESCRIPTION("Camellia Cipher Algorithm");
+MODULE_LICENSE("GPL");
diff --git a/crypto/cbc.c b/crypto/cbc.c
index f5542b4db38..136fea7e700 100644
--- a/crypto/cbc.c
+++ b/crypto/cbc.c
@@ -243,6 +243,7 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
@@ -260,11 +261,11 @@ static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
ctx->xor = xor_quad;
}
- tfm = crypto_spawn_tfm(spawn);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
- ctx->child = crypto_cipher_cast(tfm);
+ ctx->child = cipher;
return 0;
}
diff --git a/crypto/cipher.c b/crypto/cipher.c
index 9e03701cfdc..333aab2f027 100644
--- a/crypto/cipher.c
+++ b/crypto/cipher.c
@@ -12,274 +12,13 @@
* any later version.
*
*/
-#include <linux/compiler.h>
+
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/errno.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
+#include <linux/scatterlist.h>
#include <linux/string.h>
-#include <asm/scatterlist.h>
#include "internal.h"
-#include "scatterwalk.h"
-
-struct cipher_alg_compat {
- unsigned int cia_min_keysize;
- unsigned int cia_max_keysize;
- int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-
- unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes);
- unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes);
- unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes);
- unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes);
-};
-
-static inline void xor_64(u8 *a, const u8 *b)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
-}
-
-static inline void xor_128(u8 *a, const u8 *b)
-{
- ((u32 *)a)[0] ^= ((u32 *)b)[0];
- ((u32 *)a)[1] ^= ((u32 *)b)[1];
- ((u32 *)a)[2] ^= ((u32 *)b)[2];
- ((u32 *)a)[3] ^= ((u32 *)b)[3];
-}
-
-static unsigned int crypt_slow(const struct cipher_desc *desc,
- struct scatter_walk *in,
- struct scatter_walk *out, unsigned int bsize)
-{
- unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
- u8 buffer[bsize * 2 + alignmask];
- u8 *src = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- u8 *dst = src + bsize;
-
- scatterwalk_copychunks(src, in, bsize, 0);
- desc->prfn(desc, dst, src, bsize);
- scatterwalk_copychunks(dst, out, bsize, 1);
-
- return bsize;
-}
-
-static inline unsigned int crypt_fast(const struct cipher_desc *desc,
- struct scatter_walk *in,
- struct scatter_walk *out,
- unsigned int nbytes, u8 *tmp)
-{
- u8 *src, *dst;
- u8 *real_src, *real_dst;
-
- real_src = scatterwalk_map(in, 0);
- real_dst = scatterwalk_map(out, 1);
-
- src = real_src;
- dst = scatterwalk_samebuf(in, out) ? src : real_dst;
-
- if (tmp) {
- memcpy(tmp, src, nbytes);
- src = tmp;
- dst = tmp;
- }
-
- nbytes = desc->prfn(desc, dst, src, nbytes);
-
- if (tmp)
- memcpy(real_dst, tmp, nbytes);
-
- scatterwalk_unmap(real_src, 0);
- scatterwalk_unmap(real_dst, 1);
-
- scatterwalk_advance(in, nbytes);
- scatterwalk_advance(out, nbytes);
-
- return nbytes;
-}
-
-/*
- * Generic encrypt/decrypt wrapper for ciphers, handles operations across
- * multiple page boundaries by using temporary blocks. In user context,
- * the kernel is given a chance to schedule us once per page.
- */
-static int crypt(const struct cipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- struct scatter_walk walk_in, walk_out;
- struct crypto_tfm *tfm = desc->tfm;
- const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
- unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
- unsigned long buffer = 0;
-
- if (!nbytes)
- return 0;
-
- if (nbytes % bsize) {
- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
- return -EINVAL;
- }
-
- scatterwalk_start(&walk_in, src);
- scatterwalk_start(&walk_out, dst);
-
- for(;;) {
- unsigned int n = nbytes;
- u8 *tmp = NULL;
-
- if (!scatterwalk_aligned(&walk_in, alignmask) ||
- !scatterwalk_aligned(&walk_out, alignmask)) {
- if (!buffer) {
- buffer = __get_free_page(GFP_ATOMIC);
- if (!buffer)
- n = 0;
- }
- tmp = (u8 *)buffer;
- }
-
- n = scatterwalk_clamp(&walk_in, n);
- n = scatterwalk_clamp(&walk_out, n);
-
- if (likely(n >= bsize))
- n = crypt_fast(desc, &walk_in, &walk_out, n, tmp);
- else
- n = crypt_slow(desc, &walk_in, &walk_out, bsize);
-
- nbytes -= n;
-
- scatterwalk_done(&walk_in, 0, nbytes);
- scatterwalk_done(&walk_out, 1, nbytes);
-
- if (!nbytes)
- break;
-
- crypto_yield(tfm->crt_flags);
- }
-
- if (buffer)
- free_page(buffer);
-
- return 0;
-}
-
-static int crypt_iv_unaligned(struct cipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- struct crypto_tfm *tfm = desc->tfm;
- unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
- u8 *iv = desc->info;
-
- if (unlikely(((unsigned long)iv & alignmask))) {
- unsigned int ivsize = tfm->crt_cipher.cit_ivsize;
- u8 buffer[ivsize + alignmask];
- u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
- int err;
-
- desc->info = memcpy(tmp, iv, ivsize);
- err = crypt(desc, dst, src, nbytes);
- memcpy(iv, tmp, ivsize);
-
- return err;
- }
-
- return crypt(desc, dst, src, nbytes);
-}
-
-static unsigned int cbc_process_encrypt(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes)
-{
- struct crypto_tfm *tfm = desc->tfm;
- void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
- int bsize = crypto_tfm_alg_blocksize(tfm);
-
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
- u8 *iv = desc->info;
- unsigned int done = 0;
-
- nbytes -= bsize;
-
- do {
- xor(iv, src);
- fn(tfm, dst, iv);
- memcpy(iv, dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((done += bsize) <= nbytes);
-
- return done;
-}
-
-static unsigned int cbc_process_decrypt(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes)
-{
- struct crypto_tfm *tfm = desc->tfm;
- void (*xor)(u8 *, const u8 *) = tfm->crt_u.cipher.cit_xor_block;
- int bsize = crypto_tfm_alg_blocksize(tfm);
- unsigned long alignmask = crypto_tfm_alg_alignmask(desc->tfm);
-
- u8 stack[src == dst ? bsize + alignmask : 0];
- u8 *buf = (u8 *)ALIGN((unsigned long)stack, alignmask + 1);
- u8 **dst_p = src == dst ? &buf : &dst;
-
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
- u8 *iv = desc->info;
- unsigned int done = 0;
-
- nbytes -= bsize;
-
- do {
- u8 *tmp_dst = *dst_p;
-
- fn(tfm, tmp_dst, src);
- xor(tmp_dst, iv);
- memcpy(iv, src, bsize);
- if (tmp_dst != dst)
- memcpy(dst, tmp_dst, bsize);
-
- src += bsize;
- dst += bsize;
- } while ((done += bsize) <= nbytes);
-
- return done;
-}
-
-static unsigned int ecb_process(const struct cipher_desc *desc, u8 *dst,
- const u8 *src, unsigned int nbytes)
-{
- struct crypto_tfm *tfm = desc->tfm;
- int bsize = crypto_tfm_alg_blocksize(tfm);
- void (*fn)(struct crypto_tfm *, u8 *, const u8 *) = desc->crfn;
- unsigned int done = 0;
-
- nbytes -= bsize;
-
- do {
- fn(tfm, dst, src);
-
- src += bsize;
- dst += bsize;
- } while ((done += bsize) <= nbytes);
-
- return done;
-}
static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
{
@@ -293,122 +32,6 @@ static int setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
return cia->cia_setkey(tfm, key, keylen);
}
-static int ecb_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes)
-{
- struct cipher_desc desc;
- struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
-
- desc.tfm = tfm;
- desc.crfn = cipher->cia_encrypt;
- desc.prfn = cipher->cia_encrypt_ecb ?: ecb_process;
-
- return crypt(&desc, dst, src, nbytes);
-}
-
-static int ecb_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- struct cipher_desc desc;
- struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
-
- desc.tfm = tfm;
- desc.crfn = cipher->cia_decrypt;
- desc.prfn = cipher->cia_decrypt_ecb ?: ecb_process;
-
- return crypt(&desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- struct cipher_desc desc;
- struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
-
- desc.tfm = tfm;
- desc.crfn = cipher->cia_encrypt;
- desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
- desc.info = tfm->crt_cipher.cit_iv;
-
- return crypt(&desc, dst, src, nbytes);
-}
-
-static int cbc_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- struct cipher_desc desc;
- struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
-
- desc.tfm = tfm;
- desc.crfn = cipher->cia_encrypt;
- desc.prfn = cipher->cia_encrypt_cbc ?: cbc_process_encrypt;
- desc.info = iv;
-
- return crypt_iv_unaligned(&desc, dst, src, nbytes);
-}
-
-static int cbc_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- struct cipher_desc desc;
- struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
-
- desc.tfm = tfm;
- desc.crfn = cipher->cia_decrypt;
- desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
- desc.info = tfm->crt_cipher.cit_iv;
-
- return crypt(&desc, dst, src, nbytes);
-}
-
-static int cbc_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- struct cipher_desc desc;
- struct cipher_alg_compat *cipher = (void *)&tfm->__crt_alg->cra_cipher;
-
- desc.tfm = tfm;
- desc.crfn = cipher->cia_decrypt;
- desc.prfn = cipher->cia_decrypt_cbc ?: cbc_process_decrypt;
- desc.info = iv;
-
- return crypt_iv_unaligned(&desc, dst, src, nbytes);
-}
-
-static int nocrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return -ENOSYS;
-}
-
-static int nocrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- return -ENOSYS;
-}
-
-int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags)
-{
- u32 mode = flags & CRYPTO_TFM_MODE_MASK;
- tfm->crt_cipher.cit_mode = mode ? mode : CRYPTO_TFM_MODE_ECB;
- return 0;
-}
-
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
const u8 *),
struct crypto_tfm *tfm,
@@ -454,7 +77,6 @@ static void cipher_decrypt_unaligned(struct crypto_tfm *tfm,
int crypto_init_cipher_ops(struct crypto_tfm *tfm)
{
- int ret = 0;
struct cipher_tfm *ops = &tfm->crt_cipher;
struct cipher_alg *cipher = &tfm->__crt_alg->cra_cipher;
@@ -464,70 +86,7 @@ int crypto_init_cipher_ops(struct crypto_tfm *tfm)
ops->cit_decrypt_one = crypto_tfm_alg_alignmask(tfm) ?
cipher_decrypt_unaligned : cipher->cia_decrypt;
- switch (tfm->crt_cipher.cit_mode) {
- case CRYPTO_TFM_MODE_ECB:
- ops->cit_encrypt = ecb_encrypt;
- ops->cit_decrypt = ecb_decrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CBC:
- ops->cit_encrypt = cbc_encrypt;
- ops->cit_decrypt = cbc_decrypt;
- ops->cit_encrypt_iv = cbc_encrypt_iv;
- ops->cit_decrypt_iv = cbc_decrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CFB:
- ops->cit_encrypt = nocrypt;
- ops->cit_decrypt = nocrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- case CRYPTO_TFM_MODE_CTR:
- ops->cit_encrypt = nocrypt;
- ops->cit_decrypt = nocrypt;
- ops->cit_encrypt_iv = nocrypt_iv;
- ops->cit_decrypt_iv = nocrypt_iv;
- break;
-
- default:
- BUG();
- }
-
- if (ops->cit_mode == CRYPTO_TFM_MODE_CBC) {
- unsigned long align;
- unsigned long addr;
-
- switch (crypto_tfm_alg_blocksize(tfm)) {
- case 8:
- ops->cit_xor_block = xor_64;
- break;
-
- case 16:
- ops->cit_xor_block = xor_128;
- break;
-
- default:
- printk(KERN_WARNING "%s: block size %u not supported\n",
- crypto_tfm_alg_name(tfm),
- crypto_tfm_alg_blocksize(tfm));
- ret = -EINVAL;
- goto out;
- }
-
- ops->cit_ivsize = crypto_tfm_alg_blocksize(tfm);
- align = crypto_tfm_alg_alignmask(tfm) + 1;
- addr = (unsigned long)crypto_tfm_ctx(tfm);
- addr = ALIGN(addr, align);
- addr += ALIGN(tfm->__crt_alg->cra_ctxsize, align);
- ops->cit_iv = (void *)addr;
- }
-
-out:
- return ret;
+ return 0;
}
void crypto_exit_cipher_ops(struct crypto_tfm *tfm)
diff --git a/crypto/compress.c b/crypto/compress.c
index eca182aa338..0a6570048c1 100644
--- a/crypto/compress.c
+++ b/crypto/compress.c
@@ -34,11 +34,6 @@ static int crypto_decompress(struct crypto_tfm *tfm,
dlen);
}
-int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags)
-{
- return flags ? -EINVAL : 0;
-}
-
int crypto_init_compress_ops(struct crypto_tfm *tfm)
{
struct compress_tfm *ops = &tfm->crt_compress;
diff --git a/crypto/digest.c b/crypto/digest.c
index 8f4593268ce..1bf7414aeb9 100644
--- a/crypto/digest.c
+++ b/crypto/digest.c
@@ -14,7 +14,9 @@
#include <linux/mm.h>
#include <linux/errno.h>
+#include <linux/hardirq.h>
#include <linux/highmem.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
@@ -29,8 +31,8 @@ static int init(struct hash_desc *desc)
return 0;
}
-static int update(struct hash_desc *desc,
- struct scatterlist *sg, unsigned int nbytes)
+static int update2(struct hash_desc *desc,
+ struct scatterlist *sg, unsigned int nbytes)
{
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);
@@ -81,6 +83,14 @@ static int update(struct hash_desc *desc,
return 0;
}
+static int update(struct hash_desc *desc,
+ struct scatterlist *sg, unsigned int nbytes)
+{
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+ return update2(desc, sg, nbytes);
+}
+
static int final(struct hash_desc *desc, u8 *out)
{
struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
@@ -118,16 +128,14 @@ static int setkey(struct crypto_hash *hash, const u8 *key, unsigned int keylen)
static int digest(struct hash_desc *desc,
struct scatterlist *sg, unsigned int nbytes, u8 *out)
{
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
init(desc);
- update(desc, sg, nbytes);
+ update2(desc, sg, nbytes);
return final(desc, out);
}
-int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags)
-{
- return flags ? -EINVAL : 0;
-}
-
int crypto_init_digest_ops(struct crypto_tfm *tfm)
{
struct hash_tfm *ops = &tfm->crt_hash;
diff --git a/crypto/ecb.c b/crypto/ecb.c
index f239aa9c401..839a0aed8c2 100644
--- a/crypto/ecb.c
+++ b/crypto/ecb.c
@@ -99,12 +99,13 @@ static int crypto_ecb_init_tfm(struct crypto_tfm *tfm)
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_ecb_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
- tfm = crypto_spawn_tfm(spawn);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
- ctx->child = crypto_cipher_cast(tfm);
+ ctx->child = cipher;
return 0;
}
diff --git a/crypto/fcrypt.c b/crypto/fcrypt.c
new file mode 100644
index 00000000000..9c2bb535b09
--- /dev/null
+++ b/crypto/fcrypt.c
@@ -0,0 +1,423 @@
+/* FCrypt encryption algorithm
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Based on code:
+ *
+ * Copyright (c) 1995 - 2000 Kungliga Tekniska Högskolan
+ * (Royal Institute of Technology, Stockholm, Sweden).
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Institute nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+
+#define ROUNDS 16
+
+struct fcrypt_ctx {
+ u32 sched[ROUNDS];
+};
+
+/* Rotate right two 32 bit numbers as a 56 bit number */
+#define ror56(hi, lo, n) \
+do { \
+ u32 t = lo & ((1 << n) - 1); \
+ lo = (lo >> n) | ((hi & ((1 << n) - 1)) << (32 - n)); \
+ hi = (hi >> n) | (t << (24-n)); \
+} while(0)
+
+/* Rotate right one 64 bit number as a 56 bit number */
+#define ror56_64(k, n) \
+do { \
+ k = (k >> n) | ((k & ((1 << n) - 1)) << (56 - n)); \
+} while(0)
+
+/*
+ * Sboxes for Feistel network derived from
+ * /afs/transarc.com/public/afsps/afs.rel31b.export-src/rxkad/sboxes.h
+ */
+#undef Z
+#define Z(x) __constant_be32_to_cpu(x << 3)
+static const u32 sbox0[256] = {
+ Z(0xea), Z(0x7f), Z(0xb2), Z(0x64), Z(0x9d), Z(0xb0), Z(0xd9), Z(0x11),
+ Z(0xcd), Z(0x86), Z(0x86), Z(0x91), Z(0x0a), Z(0xb2), Z(0x93), Z(0x06),
+ Z(0x0e), Z(0x06), Z(0xd2), Z(0x65), Z(0x73), Z(0xc5), Z(0x28), Z(0x60),
+ Z(0xf2), Z(0x20), Z(0xb5), Z(0x38), Z(0x7e), Z(0xda), Z(0x9f), Z(0xe3),
+ Z(0xd2), Z(0xcf), Z(0xc4), Z(0x3c), Z(0x61), Z(0xff), Z(0x4a), Z(0x4a),
+ Z(0x35), Z(0xac), Z(0xaa), Z(0x5f), Z(0x2b), Z(0xbb), Z(0xbc), Z(0x53),
+ Z(0x4e), Z(0x9d), Z(0x78), Z(0xa3), Z(0xdc), Z(0x09), Z(0x32), Z(0x10),
+ Z(0xc6), Z(0x6f), Z(0x66), Z(0xd6), Z(0xab), Z(0xa9), Z(0xaf), Z(0xfd),
+ Z(0x3b), Z(0x95), Z(0xe8), Z(0x34), Z(0x9a), Z(0x81), Z(0x72), Z(0x80),
+ Z(0x9c), Z(0xf3), Z(0xec), Z(0xda), Z(0x9f), Z(0x26), Z(0x76), Z(0x15),
+ Z(0x3e), Z(0x55), Z(0x4d), Z(0xde), Z(0x84), Z(0xee), Z(0xad), Z(0xc7),
+ Z(0xf1), Z(0x6b), Z(0x3d), Z(0xd3), Z(0x04), Z(0x49), Z(0xaa), Z(0x24),
+ Z(0x0b), Z(0x8a), Z(0x83), Z(0xba), Z(0xfa), Z(0x85), Z(0xa0), Z(0xa8),
+ Z(0xb1), Z(0xd4), Z(0x01), Z(0xd8), Z(0x70), Z(0x64), Z(0xf0), Z(0x51),
+ Z(0xd2), Z(0xc3), Z(0xa7), Z(0x75), Z(0x8c), Z(0xa5), Z(0x64), Z(0xef),
+ Z(0x10), Z(0x4e), Z(0xb7), Z(0xc6), Z(0x61), Z(0x03), Z(0xeb), Z(0x44),
+ Z(0x3d), Z(0xe5), Z(0xb3), Z(0x5b), Z(0xae), Z(0xd5), Z(0xad), Z(0x1d),
+ Z(0xfa), Z(0x5a), Z(0x1e), Z(0x33), Z(0xab), Z(0x93), Z(0xa2), Z(0xb7),
+ Z(0xe7), Z(0xa8), Z(0x45), Z(0xa4), Z(0xcd), Z(0x29), Z(0x63), Z(0x44),
+ Z(0xb6), Z(0x69), Z(0x7e), Z(0x2e), Z(0x62), Z(0x03), Z(0xc8), Z(0xe0),
+ Z(0x17), Z(0xbb), Z(0xc7), Z(0xf3), Z(0x3f), Z(0x36), Z(0xba), Z(0x71),
+ Z(0x8e), Z(0x97), Z(0x65), Z(0x60), Z(0x69), Z(0xb6), Z(0xf6), Z(0xe6),
+ Z(0x6e), Z(0xe0), Z(0x81), Z(0x59), Z(0xe8), Z(0xaf), Z(0xdd), Z(0x95),
+ Z(0x22), Z(0x99), Z(0xfd), Z(0x63), Z(0x19), Z(0x74), Z(0x61), Z(0xb1),
+ Z(0xb6), Z(0x5b), Z(0xae), Z(0x54), Z(0xb3), Z(0x70), Z(0xff), Z(0xc6),
+ Z(0x3b), Z(0x3e), Z(0xc1), Z(0xd7), Z(0xe1), Z(0x0e), Z(0x76), Z(0xe5),
+ Z(0x36), Z(0x4f), Z(0x59), Z(0xc7), Z(0x08), Z(0x6e), Z(0x82), Z(0xa6),
+ Z(0x93), Z(0xc4), Z(0xaa), Z(0x26), Z(0x49), Z(0xe0), Z(0x21), Z(0x64),
+ Z(0x07), Z(0x9f), Z(0x64), Z(0x81), Z(0x9c), Z(0xbf), Z(0xf9), Z(0xd1),
+ Z(0x43), Z(0xf8), Z(0xb6), Z(0xb9), Z(0xf1), Z(0x24), Z(0x75), Z(0x03),
+ Z(0xe4), Z(0xb0), Z(0x99), Z(0x46), Z(0x3d), Z(0xf5), Z(0xd1), Z(0x39),
+ Z(0x72), Z(0x12), Z(0xf6), Z(0xba), Z(0x0c), Z(0x0d), Z(0x42), Z(0x2e)
+};
+
+#undef Z
+#define Z(x) __constant_be32_to_cpu((x << 27) | (x >> 5))
+static const u32 sbox1[256] = {
+ Z(0x77), Z(0x14), Z(0xa6), Z(0xfe), Z(0xb2), Z(0x5e), Z(0x8c), Z(0x3e),
+ Z(0x67), Z(0x6c), Z(0xa1), Z(0x0d), Z(0xc2), Z(0xa2), Z(0xc1), Z(0x85),
+ Z(0x6c), Z(0x7b), Z(0x67), Z(0xc6), Z(0x23), Z(0xe3), Z(0xf2), Z(0x89),
+ Z(0x50), Z(0x9c), Z(0x03), Z(0xb7), Z(0x73), Z(0xe6), Z(0xe1), Z(0x39),
+ Z(0x31), Z(0x2c), Z(0x27), Z(0x9f), Z(0xa5), Z(0x69), Z(0x44), Z(0xd6),
+ Z(0x23), Z(0x83), Z(0x98), Z(0x7d), Z(0x3c), Z(0xb4), Z(0x2d), Z(0x99),
+ Z(0x1c), Z(0x1f), Z(0x8c), Z(0x20), Z(0x03), Z(0x7c), Z(0x5f), Z(0xad),
+ Z(0xf4), Z(0xfa), Z(0x95), Z(0xca), Z(0x76), Z(0x44), Z(0xcd), Z(0xb6),
+ Z(0xb8), Z(0xa1), Z(0xa1), Z(0xbe), Z(0x9e), Z(0x54), Z(0x8f), Z(0x0b),
+ Z(0x16), Z(0x74), Z(0x31), Z(0x8a), Z(0x23), Z(0x17), Z(0x04), Z(0xfa),
+ Z(0x79), Z(0x84), Z(0xb1), Z(0xf5), Z(0x13), Z(0xab), Z(0xb5), Z(0x2e),
+ Z(0xaa), Z(0x0c), Z(0x60), Z(0x6b), Z(0x5b), Z(0xc4), Z(0x4b), Z(0xbc),
+ Z(0xe2), Z(0xaf), Z(0x45), Z(0x73), Z(0xfa), Z(0xc9), Z(0x49), Z(0xcd),
+ Z(0x00), Z(0x92), Z(0x7d), Z(0x97), Z(0x7a), Z(0x18), Z(0x60), Z(0x3d),
+ Z(0xcf), Z(0x5b), Z(0xde), Z(0xc6), Z(0xe2), Z(0xe6), Z(0xbb), Z(0x8b),
+ Z(0x06), Z(0xda), Z(0x08), Z(0x15), Z(0x1b), Z(0x88), Z(0x6a), Z(0x17),
+ Z(0x89), Z(0xd0), Z(0xa9), Z(0xc1), Z(0xc9), Z(0x70), Z(0x6b), Z(0xe5),
+ Z(0x43), Z(0xf4), Z(0x68), Z(0xc8), Z(0xd3), Z(0x84), Z(0x28), Z(0x0a),
+ Z(0x52), Z(0x66), Z(0xa3), Z(0xca), Z(0xf2), Z(0xe3), Z(0x7f), Z(0x7a),
+ Z(0x31), Z(0xf7), Z(0x88), Z(0x94), Z(0x5e), Z(0x9c), Z(0x63), Z(0xd5),
+ Z(0x24), Z(0x66), Z(0xfc), Z(0xb3), Z(0x57), Z(0x25), Z(0xbe), Z(0x89),
+ Z(0x44), Z(0xc4), Z(0xe0), Z(0x8f), Z(0x23), Z(0x3c), Z(0x12), Z(0x52),
+ Z(0xf5), Z(0x1e), Z(0xf4), Z(0xcb), Z(0x18), Z(0x33), Z(0x1f), Z(0xf8),
+ Z(0x69), Z(0x10), Z(0x9d), Z(0xd3), Z(0xf7), Z(0x28), Z(0xf8), Z(0x30),
+ Z(0x05), Z(0x5e), Z(0x32), Z(0xc0), Z(0xd5), Z(0x19), Z(0xbd), Z(0x45),
+ Z(0x8b), Z(0x5b), Z(0xfd), Z(0xbc), Z(0xe2), Z(0x5c), Z(0xa9), Z(0x96),
+ Z(0xef), Z(0x70), Z(0xcf), Z(0xc2), Z(0x2a), Z(0xb3), Z(0x61), Z(0xad),
+ Z(0x80), Z(0x48), Z(0x81), Z(0xb7), Z(0x1d), Z(0x43), Z(0xd9), Z(0xd7),
+ Z(0x45), Z(0xf0), Z(0xd8), Z(0x8a), Z(0x59), Z(0x7c), Z(0x57), Z(0xc1),
+ Z(0x79), Z(0xc7), Z(0x34), Z(0xd6), Z(0x43), Z(0xdf), Z(0xe4), Z(0x78),
+ Z(0x16), Z(0x06), Z(0xda), Z(0x92), Z(0x76), Z(0x51), Z(0xe1), Z(0xd4),
+ Z(0x70), Z(0x03), Z(0xe0), Z(0x2f), Z(0x96), Z(0x91), Z(0x82), Z(0x80)
+};
+
+#undef Z
+#define Z(x) __constant_be32_to_cpu(x << 11)
+static const u32 sbox2[256] = {
+ Z(0xf0), Z(0x37), Z(0x24), Z(0x53), Z(0x2a), Z(0x03), Z(0x83), Z(0x86),
+ Z(0xd1), Z(0xec), Z(0x50), Z(0xf0), Z(0x42), Z(0x78), Z(0x2f), Z(0x6d),
+ Z(0xbf), Z(0x80), Z(0x87), Z(0x27), Z(0x95), Z(0xe2), Z(0xc5), Z(0x5d),
+ Z(0xf9), Z(0x6f), Z(0xdb), Z(0xb4), Z(0x65), Z(0x6e), Z(0xe7), Z(0x24),
+ Z(0xc8), Z(0x1a), Z(0xbb), Z(0x49), Z(0xb5), Z(0x0a), Z(0x7d), Z(0xb9),
+ Z(0xe8), Z(0xdc), Z(0xb7), Z(0xd9), Z(0x45), Z(0x20), Z(0x1b), Z(0xce),
+ Z(0x59), Z(0x9d), Z(0x6b), Z(0xbd), Z(0x0e), Z(0x8f), Z(0xa3), Z(0xa9),
+ Z(0xbc), Z(0x74), Z(0xa6), Z(0xf6), Z(0x7f), Z(0x5f), Z(0xb1), Z(0x68),
+ Z(0x84), Z(0xbc), Z(0xa9), Z(0xfd), Z(0x55), Z(0x50), Z(0xe9), Z(0xb6),
+ Z(0x13), Z(0x5e), Z(0x07), Z(0xb8), Z(0x95), Z(0x02), Z(0xc0), Z(0xd0),
+ Z(0x6a), Z(0x1a), Z(0x85), Z(0xbd), Z(0xb6), Z(0xfd), Z(0xfe), Z(0x17),
+ Z(0x3f), Z(0x09), Z(0xa3), Z(0x8d), Z(0xfb), Z(0xed), Z(0xda), Z(0x1d),
+ Z(0x6d), Z(0x1c), Z(0x6c), Z(0x01), Z(0x5a), Z(0xe5), Z(0x71), Z(0x3e),
+ Z(0x8b), Z(0x6b), Z(0xbe), Z(0x29), Z(0xeb), Z(0x12), Z(0x19), Z(0x34),
+ Z(0xcd), Z(0xb3), Z(0xbd), Z(0x35), Z(0xea), Z(0x4b), Z(0xd5), Z(0xae),
+ Z(0x2a), Z(0x79), Z(0x5a), Z(0xa5), Z(0x32), Z(0x12), Z(0x7b), Z(0xdc),
+ Z(0x2c), Z(0xd0), Z(0x22), Z(0x4b), Z(0xb1), Z(0x85), Z(0x59), Z(0x80),
+ Z(0xc0), Z(0x30), Z(0x9f), Z(0x73), Z(0xd3), Z(0x14), Z(0x48), Z(0x40),
+ Z(0x07), Z(0x2d), Z(0x8f), Z(0x80), Z(0x0f), Z(0xce), Z(0x0b), Z(0x5e),
+ Z(0xb7), Z(0x5e), Z(0xac), Z(0x24), Z(0x94), Z(0x4a), Z(0x18), Z(0x15),
+ Z(0x05), Z(0xe8), Z(0x02), Z(0x77), Z(0xa9), Z(0xc7), Z(0x40), Z(0x45),
+ Z(0x89), Z(0xd1), Z(0xea), Z(0xde), Z(0x0c), Z(0x79), Z(0x2a), Z(0x99),
+ Z(0x6c), Z(0x3e), Z(0x95), Z(0xdd), Z(0x8c), Z(0x7d), Z(0xad), Z(0x6f),
+ Z(0xdc), Z(0xff), Z(0xfd), Z(0x62), Z(0x47), Z(0xb3), Z(0x21), Z(0x8a),
+ Z(0xec), Z(0x8e), Z(0x19), Z(0x18), Z(0xb4), Z(0x6e), Z(0x3d), Z(0xfd),
+ Z(0x74), Z(0x54), Z(0x1e), Z(0x04), Z(0x85), Z(0xd8), Z(0xbc), Z(0x1f),
+ Z(0x56), Z(0xe7), Z(0x3a), Z(0x56), Z(0x67), Z(0xd6), Z(0xc8), Z(0xa5),
+ Z(0xf3), Z(0x8e), Z(0xde), Z(0xae), Z(0x37), Z(0x49), Z(0xb7), Z(0xfa),
+ Z(0xc8), Z(0xf4), Z(0x1f), Z(0xe0), Z(0x2a), Z(0x9b), Z(0x15), Z(0xd1),
+ Z(0x34), Z(0x0e), Z(0xb5), Z(0xe0), Z(0x44), Z(0x78), Z(0x84), Z(0x59),
+ Z(0x56), Z(0x68), Z(0x77), Z(0xa5), Z(0x14), Z(0x06), Z(0xf5), Z(0x2f),
+ Z(0x8c), Z(0x8a), Z(0x73), Z(0x80), Z(0x76), Z(0xb4), Z(0x10), Z(0x86)
+};
+
+#undef Z
+#define Z(x) __constant_be32_to_cpu(x << 19)
+static const u32 sbox3[256] = {
+ Z(0xa9), Z(0x2a), Z(0x48), Z(0x51), Z(0x84), Z(0x7e), Z(0x49), Z(0xe2),
+ Z(0xb5), Z(0xb7), Z(0x42), Z(0x33), Z(0x7d), Z(0x5d), Z(0xa6), Z(0x12),
+ Z(0x44), Z(0x48), Z(0x6d), Z(0x28), Z(0xaa), Z(0x20), Z(0x6d), Z(0x57),
+ Z(0xd6), Z(0x6b), Z(0x5d), Z(0x72), Z(0xf0), Z(0x92), Z(0x5a), Z(0x1b),
+ Z(0x53), Z(0x80), Z(0x24), Z(0x70), Z(0x9a), Z(0xcc), Z(0xa7), Z(0x66),
+ Z(0xa1), Z(0x01), Z(0xa5), Z(0x41), Z(0x97), Z(0x41), Z(0x31), Z(0x82),
+ Z(0xf1), Z(0x14), Z(0xcf), Z(0x53), Z(0x0d), Z(0xa0), Z(0x10), Z(0xcc),
+ Z(0x2a), Z(0x7d), Z(0xd2), Z(0xbf), Z(0x4b), Z(0x1a), Z(0xdb), Z(0x16),
+ Z(0x47), Z(0xf6), Z(0x51), Z(0x36), Z(0xed), Z(0xf3), Z(0xb9), Z(0x1a),
+ Z(0xa7), Z(0xdf), Z(0x29), Z(0x43), Z(0x01), Z(0x54), Z(0x70), Z(0xa4),
+ Z(0xbf), Z(0xd4), Z(0x0b), Z(0x53), Z(0x44), Z(0x60), Z(0x9e), Z(0x23),
+ Z(0xa1), Z(0x18), Z(0x68), Z(0x4f), Z(0xf0), Z(0x2f), Z(0x82), Z(0xc2),
+ Z(0x2a), Z(0x41), Z(0xb2), Z(0x42), Z(0x0c), Z(0xed), Z(0x0c), Z(0x1d),
+ Z(0x13), Z(0x3a), Z(0x3c), Z(0x6e), Z(0x35), Z(0xdc), Z(0x60), Z(0x65),
+ Z(0x85), Z(0xe9), Z(0x64), Z(0x02), Z(0x9a), Z(0x3f), Z(0x9f), Z(0x87),
+ Z(0x96), Z(0xdf), Z(0xbe), Z(0xf2), Z(0xcb), Z(0xe5), Z(0x6c), Z(0xd4),
+ Z(0x5a), Z(0x83), Z(0xbf), Z(0x92), Z(0x1b), Z(0x94), Z(0x00), Z(0x42),
+ Z(0xcf), Z(0x4b), Z(0x00), Z(0x75), Z(0xba), Z(0x8f), Z(0x76), Z(0x5f),
+ Z(0x5d), Z(0x3a), Z(0x4d), Z(0x09), Z(0x12), Z(0x08), Z(0x38), Z(0x95),
+ Z(0x17), Z(0xe4), Z(0x01), Z(0x1d), Z(0x4c), Z(0xa9), Z(0xcc), Z(0x85),
+ Z(0x82), Z(0x4c), Z(0x9d), Z(0x2f), Z(0x3b), Z(0x66), Z(0xa1), Z(0x34),
+ Z(0x10), Z(0xcd), Z(0x59), Z(0x89), Z(0xa5), Z(0x31), Z(0xcf), Z(0x05),
+ Z(0xc8), Z(0x84), Z(0xfa), Z(0xc7), Z(0xba), Z(0x4e), Z(0x8b), Z(0x1a),
+ Z(0x19), Z(0xf1), Z(0xa1), Z(0x3b), Z(0x18), Z(0x12), Z(0x17), Z(0xb0),
+ Z(0x98), Z(0x8d), Z(0x0b), Z(0x23), Z(0xc3), Z(0x3a), Z(0x2d), Z(0x20),
+ Z(0xdf), Z(0x13), Z(0xa0), Z(0xa8), Z(0x4c), Z(0x0d), Z(0x6c), Z(0x2f),
+ Z(0x47), Z(0x13), Z(0x13), Z(0x52), Z(0x1f), Z(0x2d), Z(0xf5), Z(0x79),
+ Z(0x3d), Z(0xa2), Z(0x54), Z(0xbd), Z(0x69), Z(0xc8), Z(0x6b), Z(0xf3),
+ Z(0x05), Z(0x28), Z(0xf1), Z(0x16), Z(0x46), Z(0x40), Z(0xb0), Z(0x11),
+ Z(0xd3), Z(0xb7), Z(0x95), Z(0x49), Z(0xcf), Z(0xc3), Z(0x1d), Z(0x8f),
+ Z(0xd8), Z(0xe1), Z(0x73), Z(0xdb), Z(0xad), Z(0xc8), Z(0xc9), Z(0xa9),
+ Z(0xa1), Z(0xc2), Z(0xc5), Z(0xe3), Z(0xba), Z(0xfc), Z(0x0e), Z(0x25)
+};
+
+/*
+ * This is a 16 round Feistel network with permutation F_ENCRYPT
+ */
+#define F_ENCRYPT(R, L, sched) \
+do { \
+ union lc4 { u32 l; u8 c[4]; } u; \
+ u.l = sched ^ R; \
+ L ^= sbox0[u.c[0]] ^ sbox1[u.c[1]] ^ sbox2[u.c[2]] ^ sbox3[u.c[3]]; \
+} while(0)
+
+/*
+ * encryptor
+ */
+static void fcrypt_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct {
+ u32 l, r;
+ } X;
+
+ memcpy(&X, src, sizeof(X));
+
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
+
+ memcpy(dst, &X, sizeof(X));
+}
+
+/*
+ * decryptor
+ */
+static void fcrypt_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+ const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct {
+ u32 l, r;
+ } X;
+
+ memcpy(&X, src, sizeof(X));
+
+ F_ENCRYPT(X.l, X.r, ctx->sched[0xf]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0xe]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0xd]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0xc]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0xb]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0xa]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x9]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x8]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x7]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x6]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x5]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x4]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x3]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x2]);
+ F_ENCRYPT(X.l, X.r, ctx->sched[0x1]);
+ F_ENCRYPT(X.r, X.l, ctx->sched[0x0]);
+
+ memcpy(dst, &X, sizeof(X));
+}
+
+/*
+ * Generate a key schedule from key, the least significant bit in each key byte
+ * is parity and shall be ignored. This leaves 56 significant bits in the key
+ * to scatter over the 16 key schedules. For each schedule extract the low
+ * order 32 bits and use as schedule, then rotate right by 11 bits.
+ */
+static int fcrypt_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen)
+{
+ struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm);
+
+#if BITS_PER_LONG == 64 /* the 64-bit version can also be used for 32-bit
+ * kernels - it seems to be faster but the code is
+ * larger */
+
+ u64 k; /* k holds all 56 non-parity bits */
+
+ /* discard the parity bits */
+ k = (*key++) >> 1;
+ k <<= 7;
+ k |= (*key++) >> 1;
+ k <<= 7;
+ k |= (*key++) >> 1;
+ k <<= 7;
+ k |= (*key++) >> 1;
+ k <<= 7;
+ k |= (*key++) >> 1;
+ k <<= 7;
+ k |= (*key++) >> 1;
+ k <<= 7;
+ k |= (*key++) >> 1;
+ k <<= 7;
+ k |= (*key) >> 1;
+
+ /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
+ ctx->sched[0x0] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x1] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x2] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x3] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x4] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x5] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x6] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x7] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x8] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0x9] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0xa] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0xb] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0xc] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0xd] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0xe] = be32_to_cpu(k); ror56_64(k, 11);
+ ctx->sched[0xf] = be32_to_cpu(k);
+
+ return 0;
+#else
+ u32 hi, lo; /* hi is upper 24 bits and lo lower 32, total 56 */
+
+ /* discard the parity bits */
+ lo = (*key++) >> 1;
+ lo <<= 7;
+ lo |= (*key++) >> 1;
+ lo <<= 7;
+ lo |= (*key++) >> 1;
+ lo <<= 7;
+ lo |= (*key++) >> 1;
+ hi = lo >> 4;
+ lo &= 0xf;
+ lo <<= 7;
+ lo |= (*key++) >> 1;
+ lo <<= 7;
+ lo |= (*key++) >> 1;
+ lo <<= 7;
+ lo |= (*key++) >> 1;
+ lo <<= 7;
+ lo |= (*key) >> 1;
+
+ /* Use lower 32 bits for schedule, rotate by 11 each round (16 times) */
+ ctx->sched[0x0] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x1] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x2] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x3] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x4] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x5] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x6] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x7] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x8] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0x9] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0xa] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0xb] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0xc] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0xd] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0xe] = be32_to_cpu(lo); ror56(hi, lo, 11);
+ ctx->sched[0xf] = be32_to_cpu(lo);
+ return 0;
+#endif
+}
+
+static struct crypto_alg fcrypt_alg = {
+ .cra_name = "fcrypt",
+ .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
+ .cra_blocksize = 8,
+ .cra_ctxsize = sizeof(struct fcrypt_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_alignmask = 3,
+ .cra_list = LIST_HEAD_INIT(fcrypt_alg.cra_list),
+ .cra_u = { .cipher = {
+ .cia_min_keysize = 8,
+ .cia_max_keysize = 8,
+ .cia_setkey = fcrypt_setkey,
+ .cia_encrypt = fcrypt_encrypt,
+ .cia_decrypt = fcrypt_decrypt } }
+};
+
+static int __init init(void)
+{
+ return crypto_register_alg(&fcrypt_alg);
+}
+
+static void __exit fini(void)
+{
+ crypto_unregister_alg(&fcrypt_alg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
+MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
diff --git a/crypto/hash.c b/crypto/hash.c
index cdec23d885f..12c4514f347 100644
--- a/crypto/hash.c
+++ b/crypto/hash.c
@@ -16,12 +16,13 @@
#include "internal.h"
-static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg)
+static unsigned int crypto_hash_ctxsize(struct crypto_alg *alg, u32 type,
+ u32 mask)
{
return alg->cra_ctxsize;
}
-static int crypto_init_hash_ops(struct crypto_tfm *tfm)
+static int crypto_init_hash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
{
struct hash_tfm *crt = &tfm->crt_hash;
struct hash_alg *alg = &tfm->__crt_alg->cra_hash;
diff --git a/crypto/hmac.c b/crypto/hmac.c
index b521bcd2b2c..44187c5ee59 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -172,15 +172,16 @@ static int hmac_digest(struct hash_desc *pdesc, struct scatterlist *sg,
static int hmac_init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_hash *hash;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct hmac_ctx *ctx = hmac_ctx(__crypto_hash_cast(tfm));
- tfm = crypto_spawn_tfm(spawn);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ hash = crypto_spawn_hash(spawn);
+ if (IS_ERR(hash))
+ return PTR_ERR(hash);
- ctx->child = crypto_hash_cast(tfm);
+ ctx->child = hash;
return 0;
}
diff --git a/crypto/internal.h b/crypto/internal.h
index 2da6ad4f359..60acad9788c 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -83,8 +83,7 @@ static inline void crypto_exit_proc(void)
{ }
#endif
-static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
- int flags)
+static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg)
{
unsigned int len = alg->cra_ctxsize;
@@ -96,23 +95,12 @@ static inline unsigned int crypto_digest_ctxsize(struct crypto_alg *alg,
return len;
}
-static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg,
- int flags)
+static inline unsigned int crypto_cipher_ctxsize(struct crypto_alg *alg)
{
- unsigned int len = alg->cra_ctxsize;
-
- switch (flags & CRYPTO_TFM_MODE_MASK) {
- case CRYPTO_TFM_MODE_CBC:
- len = ALIGN(len, (unsigned long)alg->cra_alignmask + 1);
- len += alg->cra_blocksize;
- break;
- }
-
- return len;
+ return alg->cra_ctxsize;
}
-static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg,
- int flags)
+static inline unsigned int crypto_compress_ctxsize(struct crypto_alg *alg)
{
return alg->cra_ctxsize;
}
@@ -121,10 +109,6 @@ struct crypto_alg *crypto_mod_get(struct crypto_alg *alg);
struct crypto_alg *__crypto_alg_lookup(const char *name, u32 type, u32 mask);
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask);
-int crypto_init_digest_flags(struct crypto_tfm *tfm, u32 flags);
-int crypto_init_cipher_flags(struct crypto_tfm *tfm, u32 flags);
-int crypto_init_compress_flags(struct crypto_tfm *tfm, u32 flags);
-
int crypto_init_digest_ops(struct crypto_tfm *tfm);
int crypto_init_cipher_ops(struct crypto_tfm *tfm);
int crypto_init_compress_ops(struct crypto_tfm *tfm);
@@ -136,7 +120,8 @@ void crypto_exit_compress_ops(struct crypto_tfm *tfm);
void crypto_larval_error(const char *name, u32 type, u32 mask);
void crypto_shoot_alg(struct crypto_alg *alg);
-struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 flags);
+struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
+ u32 mask);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 56642586d84..b4105080ac7 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -201,21 +201,22 @@ static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
static int init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct priv *ctx = crypto_tfm_ctx(tfm);
u32 *flags = &tfm->crt_flags;
- tfm = crypto_spawn_tfm(spawn);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
- if (crypto_tfm_alg_blocksize(tfm) != 16) {
+ if (crypto_cipher_blocksize(cipher) != 16) {
*flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL;
}
- ctx->child = crypto_cipher_cast(tfm);
+ ctx->child = cipher;
return 0;
}
diff --git a/crypto/pcbc.c b/crypto/pcbc.c
new file mode 100644
index 00000000000..5174d7fdad6
--- /dev/null
+++ b/crypto/pcbc.c
@@ -0,0 +1,349 @@
+/*
+ * PCBC: Propagating Cipher Block Chaining mode
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Derived from cbc.c
+ * - Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/algapi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct crypto_pcbc_ctx {
+ struct crypto_cipher *child;
+ void (*xor)(u8 *dst, const u8 *src, unsigned int bs);
+};
+
+static int crypto_pcbc_setkey(struct crypto_tfm *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err;
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(child, key, keylen);
+ crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int crypto_pcbc_encrypt_segment(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_cipher *tfm,
+ void (*xor)(u8 *, const u8 *,
+ unsigned int))
+{
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ crypto_cipher_alg(tfm)->cia_encrypt;
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ xor(iv, src, bsize);
+ fn(crypto_cipher_tfm(tfm), dst, iv);
+ memcpy(iv, dst, bsize);
+ xor(iv, src, bsize);
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ return nbytes;
+}
+
+static int crypto_pcbc_encrypt_inplace(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_cipher *tfm,
+ void (*xor)(u8 *, const u8 *,
+ unsigned int))
+{
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ crypto_cipher_alg(tfm)->cia_encrypt;
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+ u8 tmpbuf[bsize];
+
+ do {
+ memcpy(tmpbuf, src, bsize);
+ xor(iv, tmpbuf, bsize);
+ fn(crypto_cipher_tfm(tfm), src, iv);
+ memcpy(iv, src, bsize);
+ xor(iv, tmpbuf, bsize);
+
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_pcbc_encrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+ struct crypto_blkcipher *tfm = desc->tfm;
+ struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_cipher *child = ctx->child;
+ void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ nbytes = crypto_pcbc_encrypt_inplace(desc, &walk, child,
+ xor);
+ else
+ nbytes = crypto_pcbc_encrypt_segment(desc, &walk, child,
+ xor);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
+}
+
+static int crypto_pcbc_decrypt_segment(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_cipher *tfm,
+ void (*xor)(u8 *, const u8 *,
+ unsigned int))
+{
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ crypto_cipher_alg(tfm)->cia_decrypt;
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ fn(crypto_cipher_tfm(tfm), dst, src);
+ xor(dst, iv, bsize);
+ memcpy(iv, src, bsize);
+ xor(iv, dst, bsize);
+
+ src += bsize;
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_pcbc_decrypt_inplace(struct blkcipher_desc *desc,
+ struct blkcipher_walk *walk,
+ struct crypto_cipher *tfm,
+ void (*xor)(u8 *, const u8 *,
+ unsigned int))
+{
+ void (*fn)(struct crypto_tfm *, u8 *, const u8 *) =
+ crypto_cipher_alg(tfm)->cia_decrypt;
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *iv = walk->iv;
+ u8 tmpbuf[bsize];
+
+ do {
+ memcpy(tmpbuf, src, bsize);
+ fn(crypto_cipher_tfm(tfm), src, src);
+ xor(src, iv, bsize);
+ memcpy(iv, tmpbuf, bsize);
+ xor(iv, src, bsize);
+
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+ memcpy(walk->iv, iv, bsize);
+
+ return nbytes;
+}
+
+static int crypto_pcbc_decrypt(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ struct blkcipher_walk walk;
+ struct crypto_blkcipher *tfm = desc->tfm;
+ struct crypto_pcbc_ctx *ctx = crypto_blkcipher_ctx(tfm);
+ struct crypto_cipher *child = ctx->child;
+ void (*xor)(u8 *, const u8 *, unsigned int bs) = ctx->xor;
+ int err;
+
+ blkcipher_walk_init(&walk, dst, src, nbytes);
+ err = blkcipher_walk_virt(desc, &walk);
+
+ while ((nbytes = walk.nbytes)) {
+ if (walk.src.virt.addr == walk.dst.virt.addr)
+ nbytes = crypto_pcbc_decrypt_inplace(desc, &walk, child,
+ xor);
+ else
+ nbytes = crypto_pcbc_decrypt_segment(desc, &walk, child,
+ xor);
+ err = blkcipher_walk_done(desc, &walk, nbytes);
+ }
+
+ return err;
+}
+
+static void xor_byte(u8 *a, const u8 *b, unsigned int bs)
+{
+ do {
+ *a++ ^= *b++;
+ } while (--bs);
+}
+
+static void xor_quad(u8 *dst, const u8 *src, unsigned int bs)
+{
+ u32 *a = (u32 *)dst;
+ u32 *b = (u32 *)src;
+
+ do {
+ *a++ ^= *b++;
+ } while ((bs -= 4));
+}
+
+static void xor_64(u8 *a, const u8 *b, unsigned int bs)
+{
+ ((u32 *)a)[0] ^= ((u32 *)b)[0];
+ ((u32 *)a)[1] ^= ((u32 *)b)[1];
+}
+
+static void xor_128(u8 *a, const u8 *b, unsigned int bs)
+{
+ ((u32 *)a)[0] ^= ((u32 *)b)[0];
+ ((u32 *)a)[1] ^= ((u32 *)b)[1];
+ ((u32 *)a)[2] ^= ((u32 *)b)[2];
+ ((u32 *)a)[3] ^= ((u32 *)b)[3];
+}
+
+static int crypto_pcbc_init_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_instance *inst = (void *)tfm->__crt_alg;
+ struct crypto_spawn *spawn = crypto_instance_ctx(inst);
+ struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ switch (crypto_tfm_alg_blocksize(tfm)) {
+ case 8:
+ ctx->xor = xor_64;
+ break;
+
+ case 16:
+ ctx->xor = xor_128;
+ break;
+
+ default:
+ if (crypto_tfm_alg_blocksize(tfm) % 4)
+ ctx->xor = xor_byte;
+ else
+ ctx->xor = xor_quad;
+ }
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void crypto_pcbc_exit_tfm(struct crypto_tfm *tfm)
+{
+ struct crypto_pcbc_ctx *ctx = crypto_tfm_ctx(tfm);
+ crypto_free_cipher(ctx->child);
+}
+
+static struct crypto_instance *crypto_pcbc_alloc(void *param, unsigned int len)
+{
+ struct crypto_instance *inst;
+ struct crypto_alg *alg;
+
+ alg = crypto_get_attr_alg(param, len, CRYPTO_ALG_TYPE_CIPHER,
+ CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
+ if (IS_ERR(alg))
+ return ERR_PTR(PTR_ERR(alg));
+
+ inst = crypto_alloc_instance("pcbc", alg);
+ if (IS_ERR(inst))
+ goto out_put_alg;
+
+ inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER;
+ inst->alg.cra_priority = alg->cra_priority;
+ inst->alg.cra_blocksize = alg->cra_blocksize;
+ inst->alg.cra_alignmask = alg->cra_alignmask;
+ inst->alg.cra_type = &crypto_blkcipher_type;
+
+ if (!(alg->cra_blocksize % 4))
+ inst->alg.cra_alignmask |= 3;
+ inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize;
+ inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
+
+ inst->alg.cra_ctxsize = sizeof(struct crypto_pcbc_ctx);
+
+ inst->alg.cra_init = crypto_pcbc_init_tfm;
+ inst->alg.cra_exit = crypto_pcbc_exit_tfm;
+
+ inst->alg.cra_blkcipher.setkey = crypto_pcbc_setkey;
+ inst->alg.cra_blkcipher.encrypt = crypto_pcbc_encrypt;
+ inst->alg.cra_blkcipher.decrypt = crypto_pcbc_decrypt;
+
+out_put_alg:
+ crypto_mod_put(alg);
+ return inst;
+}
+
+static void crypto_pcbc_free(struct crypto_instance *inst)
+{
+ crypto_drop_spawn(crypto_instance_ctx(inst));
+ kfree(inst);
+}
+
+static struct crypto_template crypto_pcbc_tmpl = {
+ .name = "pcbc",
+ .alloc = crypto_pcbc_alloc,
+ .free = crypto_pcbc_free,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_pcbc_module_init(void)
+{
+ return crypto_register_template(&crypto_pcbc_tmpl);
+}
+
+static void __exit crypto_pcbc_module_exit(void)
+{
+ crypto_unregister_template(&crypto_pcbc_tmpl);
+}
+
+module_init(crypto_pcbc_module_init);
+module_exit(crypto_pcbc_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCBC block cipher algorithm");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index d671e8942b1..f5e9da319ec 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -12,6 +12,7 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
+ * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Added cipher speed tests (Reyk Floeter <reyk@vantronix.net>)
* 2003-09-14 Rewritten by Kartikey Mahendra Bhatt
*
@@ -71,7 +72,8 @@ static char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha256", "blowfish",
"twofish", "serpent", "sha384", "sha512", "md4", "aes", "cast6",
"arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
- "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", NULL
+ "khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
+ "camellia", NULL
};
static void hexdump(unsigned char *buf, unsigned int len)
@@ -765,7 +767,7 @@ static void test_deflate(void)
memcpy(tvmem, deflate_comp_tv_template, tsize);
tv = (void *)tvmem;
- tfm = crypto_alloc_tfm("deflate", 0);
+ tfm = crypto_alloc_comp("deflate", 0, CRYPTO_ALG_ASYNC);
if (tfm == NULL) {
printk("failed to load transform for deflate\n");
return;
@@ -964,6 +966,26 @@ static void do_test(void)
test_cipher("ecb(xeta)", DECRYPT, xeta_dec_tv_template,
XETA_DEC_TEST_VECTORS);
+ //FCrypt
+ test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
+ FCRYPT_ENC_TEST_VECTORS);
+ test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
+ FCRYPT_DEC_TEST_VECTORS);
+
+ //CAMELLIA
+ test_cipher("ecb(camellia)", ENCRYPT,
+ camellia_enc_tv_template,
+ CAMELLIA_ENC_TEST_VECTORS);
+ test_cipher("ecb(camellia)", DECRYPT,
+ camellia_dec_tv_template,
+ CAMELLIA_DEC_TEST_VECTORS);
+ test_cipher("cbc(camellia)", ENCRYPT,
+ camellia_cbc_enc_tv_template,
+ CAMELLIA_CBC_ENC_TEST_VECTORS);
+ test_cipher("cbc(camellia)", DECRYPT,
+ camellia_cbc_dec_tv_template,
+ CAMELLIA_CBC_DEC_TEST_VECTORS);
+
test_hash("sha384", sha384_tv_template, SHA384_TEST_VECTORS);
test_hash("sha512", sha512_tv_template, SHA512_TEST_VECTORS);
test_hash("wp512", wp512_tv_template, WP512_TEST_VECTORS);
@@ -980,6 +1002,10 @@ static void do_test(void)
HMAC_SHA1_TEST_VECTORS);
test_hash("hmac(sha256)", hmac_sha256_tv_template,
HMAC_SHA256_TEST_VECTORS);
+ test_hash("hmac(sha384)", hmac_sha384_tv_template,
+ HMAC_SHA384_TEST_VECTORS);
+ test_hash("hmac(sha512)", hmac_sha512_tv_template,
+ HMAC_SHA512_TEST_VECTORS);
test_hash("xcbc(aes)", aes_xcbc128_tv_template,
XCBC_AES_TEST_VECTORS);
@@ -1177,6 +1203,28 @@ static void do_test(void)
XETA_DEC_TEST_VECTORS);
break;
+ case 31:
+ test_cipher("pcbc(fcrypt)", ENCRYPT, fcrypt_pcbc_enc_tv_template,
+ FCRYPT_ENC_TEST_VECTORS);
+ test_cipher("pcbc(fcrypt)", DECRYPT, fcrypt_pcbc_dec_tv_template,
+ FCRYPT_DEC_TEST_VECTORS);
+ break;
+
+ case 32:
+ test_cipher("ecb(camellia)", ENCRYPT,
+ camellia_enc_tv_template,
+ CAMELLIA_ENC_TEST_VECTORS);
+ test_cipher("ecb(camellia)", DECRYPT,
+ camellia_dec_tv_template,
+ CAMELLIA_DEC_TEST_VECTORS);
+ test_cipher("cbc(camellia)", ENCRYPT,
+ camellia_cbc_enc_tv_template,
+ CAMELLIA_CBC_ENC_TEST_VECTORS);
+ test_cipher("cbc(camellia)", DECRYPT,
+ camellia_cbc_dec_tv_template,
+ CAMELLIA_CBC_DEC_TEST_VECTORS);
+ break;
+
case 100:
test_hash("hmac(md5)", hmac_md5_tv_template,
HMAC_MD5_TEST_VECTORS);
@@ -1192,6 +1240,16 @@ static void do_test(void)
HMAC_SHA256_TEST_VECTORS);
break;
+ case 103:
+ test_hash("hmac(sha384)", hmac_sha384_tv_template,
+ HMAC_SHA384_TEST_VECTORS);
+ break;
+
+ case 104:
+ test_hash("hmac(sha512)", hmac_sha512_tv_template,
+ HMAC_SHA512_TEST_VECTORS);
+ break;
+
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
@@ -1260,6 +1318,17 @@ static void do_test(void)
des_speed_template);
break;
+ case 205:
+ test_cipher_speed("ecb(camellia)", ENCRYPT, sec, NULL, 0,
+ camellia_speed_template);
+ test_cipher_speed("ecb(camellia)", DECRYPT, sec, NULL, 0,
+ camellia_speed_template);
+ test_cipher_speed("cbc(camellia)", ENCRYPT, sec, NULL, 0,
+ camellia_speed_template);
+ test_cipher_speed("cbc(camellia)", DECRYPT, sec, NULL, 0,
+ camellia_speed_template);
+ break;
+
case 300:
/* fall through */
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index 48a81362cb8..887527bd5bc 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -12,6 +12,7 @@
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
+ * 2006-12-07 Added SHA384 HMAC and SHA512 HMAC tests
* 2004-08-09 Cipher speed tests by Reyk Floeter <reyk@vantronix.net>
* 2003-09-14 Changes by Kartikey Mahendra Bhatt
*
@@ -27,7 +28,7 @@
struct hash_testvec {
/* only used with keyed hash algorithms */
- char key[128] __attribute__ ((__aligned__(4)));
+ char key[132] __attribute__ ((__aligned__(4)));
char plaintext[240];
char digest[MAX_DIGEST_SIZE];
unsigned char tap[MAX_TAP];
@@ -1002,6 +1003,248 @@ static struct hash_testvec aes_xcbc128_tv_template[] = {
};
/*
+ * SHA384 HMAC test vectors from RFC4231
+ */
+
+#define HMAC_SHA384_TEST_VECTORS 4
+
+static struct hash_testvec hmac_sha384_tv_template[] = {
+ {
+ .key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b }, // (20 bytes)
+ .ksize = 20,
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 }, // ("Hi There")
+ .psize = 8,
+ .digest = { 0xaf, 0xd0, 0x39, 0x44, 0xd8, 0x48, 0x95, 0x62,
+ 0x6b, 0x08, 0x25, 0xf4, 0xab, 0x46, 0x90, 0x7f,
+ 0x15, 0xf9, 0xda, 0xdb, 0xe4, 0x10, 0x1e, 0xc6,
+ 0x82, 0xaa, 0x03, 0x4c, 0x7c, 0xeb, 0xc5, 0x9c,
+ 0xfa, 0xea, 0x9e, 0xa9, 0x07, 0x6e, 0xde, 0x7f,
+ 0x4a, 0xf1, 0x52, 0xe8, 0xb2, 0xfa, 0x9c, 0xb6 },
+ }, {
+ .key = { 0x4a, 0x65, 0x66, 0x65 }, // ("Jefe")
+ .ksize = 4,
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, // ("what do ya want ")
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f }, // ("for nothing?")
+ .psize = 28,
+ .digest = { 0xaf, 0x45, 0xd2, 0xe3, 0x76, 0x48, 0x40, 0x31,
+ 0x61, 0x7f, 0x78, 0xd2, 0xb5, 0x8a, 0x6b, 0x1b,
+ 0x9c, 0x7e, 0xf4, 0x64, 0xf5, 0xa0, 0x1b, 0x47,
+ 0xe4, 0x2e, 0xc3, 0x73, 0x63, 0x22, 0x44, 0x5e,
+ 0x8e, 0x22, 0x40, 0xca, 0x5e, 0x69, 0xe2, 0xc7,
+ 0x8b, 0x32, 0x39, 0xec, 0xfa, 0xb2, 0x16, 0x49 },
+ .np = 4,
+ .tap = { 7, 7, 7, 7 }
+ }, {
+ .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa }, // (131 bytes)
+ .ksize = 131,
+ .plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
+ 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65, // ("Test Using Large")
+ 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a, // ("r Than Block-Siz")
+ 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
+ 0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79, // ("e Key - Hash Key")
+ 0x20, 0x46, 0x69, 0x72, 0x73, 0x74 }, // (" First")
+ .psize = 54,
+ .digest = { 0x4e, 0xce, 0x08, 0x44, 0x85, 0x81, 0x3e, 0x90,
+ 0x88, 0xd2, 0xc6, 0x3a, 0x04, 0x1b, 0xc5, 0xb4,
+ 0x4f, 0x9e, 0xf1, 0x01, 0x2a, 0x2b, 0x58, 0x8f,
+ 0x3c, 0xd1, 0x1f, 0x05, 0x03, 0x3a, 0xc4, 0xc6,
+ 0x0c, 0x2e, 0xf6, 0xab, 0x40, 0x30, 0xfe, 0x82,
+ 0x96, 0x24, 0x8d, 0xf1, 0x63, 0xf4, 0x49, 0x52 },
+ }, {
+ .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa }, // (131 bytes)
+ .ksize = 131,
+ .plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
+ 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75, // ("This is a test u")
+ 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
+ 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68, // ("sing a larger th")
+ 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65, // ("an block-size ke")
+ 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
+ 0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, // ("y and a larger t")
+ 0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64, // ("han block-size d")
+ 0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
+ 0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65, // ("ata. The key nee")
+ 0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
+ 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20, // ("ds to be hashed ")
+ 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
+ 0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65, // ("before being use")
+ 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c, // ("d by the HMAC al")
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e }, // ("gorithm.")
+ .psize = 152,
+ .digest = { 0x66, 0x17, 0x17, 0x8e, 0x94, 0x1f, 0x02, 0x0d,
+ 0x35, 0x1e, 0x2f, 0x25, 0x4e, 0x8f, 0xd3, 0x2c,
+ 0x60, 0x24, 0x20, 0xfe, 0xb0, 0xb8, 0xfb, 0x9a,
+ 0xdc, 0xce, 0xbb, 0x82, 0x46, 0x1e, 0x99, 0xc5,
+ 0xa6, 0x78, 0xcc, 0x31, 0xe7, 0x99, 0x17, 0x6d,
+ 0x38, 0x60, 0xe6, 0x11, 0x0c, 0x46, 0x52, 0x3e },
+ },
+};
+
+/*
+ * SHA512 HMAC test vectors from RFC4231
+ */
+
+#define HMAC_SHA512_TEST_VECTORS 4
+
+static struct hash_testvec hmac_sha512_tv_template[] = {
+ {
+ .key = { 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b,
+ 0x0b, 0x0b, 0x0b, 0x0b }, // (20 bytes)
+ .ksize = 20,
+ .plaintext = { 0x48, 0x69, 0x20, 0x54, 0x68, 0x65, 0x72, 0x65 }, // ("Hi There")
+ .psize = 8,
+ .digest = { 0x87, 0xaa, 0x7c, 0xde, 0xa5, 0xef, 0x61, 0x9d,
+ 0x4f, 0xf0, 0xb4, 0x24, 0x1a, 0x1d, 0x6c, 0xb0,
+ 0x23, 0x79, 0xf4, 0xe2, 0xce, 0x4e, 0xc2, 0x78,
+ 0x7a, 0xd0, 0xb3, 0x05, 0x45, 0xe1, 0x7c, 0xde,
+ 0xda, 0xa8, 0x33, 0xb7, 0xd6, 0xb8, 0xa7, 0x02,
+ 0x03, 0x8b, 0x27, 0x4e, 0xae, 0xa3, 0xf4, 0xe4,
+ 0xbe, 0x9d, 0x91, 0x4e, 0xeb, 0x61, 0xf1, 0x70,
+ 0x2e, 0x69, 0x6c, 0x20, 0x3a, 0x12, 0x68, 0x54 },
+ }, {
+ .key = { 0x4a, 0x65, 0x66, 0x65 }, // ("Jefe")
+ .ksize = 4,
+ .plaintext = { 0x77, 0x68, 0x61, 0x74, 0x20, 0x64, 0x6f, 0x20,
+ 0x79, 0x61, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, // ("what do ya want ")
+ 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x74, 0x68,
+ 0x69, 0x6e, 0x67, 0x3f }, // ("for nothing?")
+ .psize = 28,
+ .digest = { 0x16, 0x4b, 0x7a, 0x7b, 0xfc, 0xf8, 0x19, 0xe2,
+ 0xe3, 0x95, 0xfb, 0xe7, 0x3b, 0x56, 0xe0, 0xa3,
+ 0x87, 0xbd, 0x64, 0x22, 0x2e, 0x83, 0x1f, 0xd6,
+ 0x10, 0x27, 0x0c, 0xd7, 0xea, 0x25, 0x05, 0x54,
+ 0x97, 0x58, 0xbf, 0x75, 0xc0, 0x5a, 0x99, 0x4a,
+ 0x6d, 0x03, 0x4f, 0x65, 0xf8, 0xf0, 0xe6, 0xfd,
+ 0xca, 0xea, 0xb1, 0xa3, 0x4d, 0x4a, 0x6b, 0x4b,
+ 0x63, 0x6e, 0x07, 0x0a, 0x38, 0xbc, 0xe7, 0x37 },
+ .np = 4,
+ .tap = { 7, 7, 7, 7 }
+ }, {
+ .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa }, // (131 bytes)
+ .ksize = 131,
+ .plaintext = { 0x54, 0x65, 0x73, 0x74, 0x20, 0x55, 0x73, 0x69,
+ 0x6e, 0x67, 0x20, 0x4c, 0x61, 0x72, 0x67, 0x65, // ("Test Using Large")
+ 0x72, 0x20, 0x54, 0x68, 0x61, 0x6e, 0x20, 0x42,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x2d, 0x53, 0x69, 0x7a, // ("r Than Block-Siz")
+ 0x65, 0x20, 0x4b, 0x65, 0x79, 0x20, 0x2d, 0x20,
+ 0x48, 0x61, 0x73, 0x68, 0x20, 0x4b, 0x65, 0x79, // ("e Key - Hash Key")
+ 0x20, 0x46, 0x69, 0x72, 0x73, 0x74 }, // (" First")
+ .psize = 54,
+ .digest = { 0x80, 0xb2, 0x42, 0x63, 0xc7, 0xc1, 0xa3, 0xeb,
+ 0xb7, 0x14, 0x93, 0xc1, 0xdd, 0x7b, 0xe8, 0xb4,
+ 0x9b, 0x46, 0xd1, 0xf4, 0x1b, 0x4a, 0xee, 0xc1,
+ 0x12, 0x1b, 0x01, 0x37, 0x83, 0xf8, 0xf3, 0x52,
+ 0x6b, 0x56, 0xd0, 0x37, 0xe0, 0x5f, 0x25, 0x98,
+ 0xbd, 0x0f, 0xd2, 0x21, 0x5d, 0x6a, 0x1e, 0x52,
+ 0x95, 0xe6, 0x4f, 0x73, 0xf6, 0x3f, 0x0a, 0xec,
+ 0x8b, 0x91, 0x5a, 0x98, 0x5d, 0x78, 0x65, 0x98 },
+ }, {
+ .key = { 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
+ 0xaa, 0xaa, 0xaa }, // (131 bytes)
+ .ksize = 131,
+ .plaintext = { 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20,
+ 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x20, 0x75, // ("This is a test u")
+ 0x73, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c,
+ 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, 0x68, // ("sing a larger th")
+ 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b,
+ 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6b, 0x65, // ("an block-size ke")
+ 0x79, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61, 0x20,
+ 0x6c, 0x61, 0x72, 0x67, 0x65, 0x72, 0x20, 0x74, // ("y and a larger t")
+ 0x68, 0x61, 0x6e, 0x20, 0x62, 0x6c, 0x6f, 0x63,
+ 0x6b, 0x2d, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x64, // ("han block-size d")
+ 0x61, 0x74, 0x61, 0x2e, 0x20, 0x54, 0x68, 0x65,
+ 0x20, 0x6b, 0x65, 0x79, 0x20, 0x6e, 0x65, 0x65, // ("ata. The key nee")
+ 0x64, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65,
+ 0x20, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x20, // ("ds to be hashed ")
+ 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x62,
+ 0x65, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x65, // ("before being use")
+ 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65,
+ 0x20, 0x48, 0x4d, 0x41, 0x43, 0x20, 0x61, 0x6c, // ("d by the HMAC al")
+ 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x2e }, // ("gorithm.")
+ .psize = 152,
+ .digest = { 0xe3, 0x7b, 0x6a, 0x77, 0x5d, 0xc8, 0x7d, 0xba,
+ 0xa4, 0xdf, 0xa9, 0xf9, 0x6e, 0x5e, 0x3f, 0xfd,
+ 0xde, 0xbd, 0x71, 0xf8, 0x86, 0x72, 0x89, 0x86,
+ 0x5d, 0xf5, 0xa3, 0x2d, 0x20, 0xcd, 0xc9, 0x44,
+ 0xb6, 0x02, 0x2c, 0xac, 0x3c, 0x49, 0x82, 0xb1,
+ 0x0d, 0x5e, 0xeb, 0x55, 0xc3, 0xe4, 0xde, 0x15,
+ 0x13, 0x46, 0x76, 0xfb, 0x6d, 0xe0, 0x44, 0x60,
+ 0x65, 0xc9, 0x74, 0x40, 0xfa, 0x8c, 0x6a, 0x58 },
+ },
+};
+
+/*
* DES test vectors.
*/
#define DES_ENC_TEST_VECTORS 10
@@ -3316,6 +3559,278 @@ static struct cipher_testvec xeta_dec_tv_template[] = {
}
};
+/*
+ * FCrypt test vectors
+ */
+#define FCRYPT_ENC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_enc_tv_template)
+#define FCRYPT_DEC_TEST_VECTORS ARRAY_SIZE(fcrypt_pcbc_dec_tv_template)
+
+static struct cipher_testvec fcrypt_pcbc_enc_tv_template[] = {
+ { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
+ .key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .klen = 8,
+ .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .input = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .ilen = 8,
+ .result = { 0x0E, 0x09, 0x00, 0xC7, 0x3E, 0xF7, 0xED, 0x41 },
+ .rlen = 8,
+ }, {
+ .key = { 0x11, 0x44, 0x77, 0xAA, 0xDD, 0x00, 0x33, 0x66 },
+ .klen = 8,
+ .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .input = { 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0 },
+ .ilen = 8,
+ .result = { 0xD8, 0xED, 0x78, 0x74, 0x77, 0xEC, 0x06, 0x80 },
+ .rlen = 8,
+ }, { /* From Arla */
+ .key = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
+ .klen = 8,
+ .iv = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .input = "The quick brown fox jumps over the lazy dogs.\0\0",
+ .ilen = 48,
+ .result = { 0x00, 0xf0, 0xe, 0x11, 0x75, 0xe6, 0x23, 0x82,
+ 0xee, 0xac, 0x98, 0x62, 0x44, 0x51, 0xe4, 0x84,
+ 0xc3, 0x59, 0xd8, 0xaa, 0x64, 0x60, 0xae, 0xf7,
+ 0xd2, 0xd9, 0x13, 0x79, 0x72, 0xa3, 0x45, 0x03,
+ 0x23, 0xb5, 0x62, 0xd7, 0x0c, 0xf5, 0x27, 0xd1,
+ 0xf8, 0x91, 0x3c, 0xac, 0x44, 0x22, 0x92, 0xef },
+ .rlen = 48,
+ }, {
+ .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .klen = 8,
+ .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
+ .input = "The quick brown fox jumps over the lazy dogs.\0\0",
+ .ilen = 48,
+ .result = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
+ 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
+ 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
+ 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
+ 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
+ 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
+ .rlen = 48,
+ }, { /* split-page version */
+ .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .klen = 8,
+ .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
+ .input = "The quick brown fox jumps over the lazy dogs.\0\0",
+ .ilen = 48,
+ .result = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
+ 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
+ 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
+ 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
+ 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
+ 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
+ .rlen = 48,
+ .np = 2,
+ .tap = { 20, 28 },
+ }
+};
+
+static struct cipher_testvec fcrypt_pcbc_dec_tv_template[] = {
+ { /* http://www.openafs.org/pipermail/openafs-devel/2000-December/005320.html */
+ .key = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .klen = 8,
+ .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .input = { 0x0E, 0x09, 0x00, 0xC7, 0x3E, 0xF7, 0xED, 0x41 },
+ .ilen = 8,
+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .rlen = 8,
+ }, {
+ .key = { 0x11, 0x44, 0x77, 0xAA, 0xDD, 0x00, 0x33, 0x66 },
+ .klen = 8,
+ .iv = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
+ .input = { 0xD8, 0xED, 0x78, 0x74, 0x77, 0xEC, 0x06, 0x80 },
+ .ilen = 8,
+ .result = { 0x12, 0x34, 0x56, 0x78, 0x9A, 0xBC, 0xDE, 0xF0 },
+ .rlen = 8,
+ }, { /* From Arla */
+ .key = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
+ .klen = 8,
+ .iv = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .input = { 0x00, 0xf0, 0xe, 0x11, 0x75, 0xe6, 0x23, 0x82,
+ 0xee, 0xac, 0x98, 0x62, 0x44, 0x51, 0xe4, 0x84,
+ 0xc3, 0x59, 0xd8, 0xaa, 0x64, 0x60, 0xae, 0xf7,
+ 0xd2, 0xd9, 0x13, 0x79, 0x72, 0xa3, 0x45, 0x03,
+ 0x23, 0xb5, 0x62, 0xd7, 0x0c, 0xf5, 0x27, 0xd1,
+ 0xf8, 0x91, 0x3c, 0xac, 0x44, 0x22, 0x92, 0xef },
+ .ilen = 48,
+ .result = "The quick brown fox jumps over the lazy dogs.\0\0",
+ .rlen = 48,
+ }, {
+ .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .klen = 8,
+ .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
+ .input = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
+ 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
+ 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
+ 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
+ 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
+ 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
+ .ilen = 48,
+ .result = "The quick brown fox jumps over the lazy dogs.\0\0",
+ .rlen = 48,
+ }, { /* split-page version */
+ .key = { 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .klen = 8,
+ .iv = { 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87 },
+ .input = { 0xca, 0x90, 0xf5, 0x9d, 0xcb, 0xd4, 0xd2, 0x3c,
+ 0x01, 0x88, 0x7f, 0x3e, 0x31, 0x6e, 0x62, 0x9d,
+ 0xd8, 0xe0, 0x57, 0xa3, 0x06, 0x3a, 0x42, 0x58,
+ 0x2a, 0x28, 0xfe, 0x72, 0x52, 0x2f, 0xdd, 0xe0,
+ 0x19, 0x89, 0x09, 0x1c, 0x2a, 0x8e, 0x8c, 0x94,
+ 0xfc, 0xc7, 0x68, 0xe4, 0x88, 0xaa, 0xde, 0x0f },
+ .ilen = 48,
+ .result = "The quick brown fox jumps over the lazy dogs.\0\0",
+ .rlen = 48,
+ .np = 2,
+ .tap = { 20, 28 },
+ }
+};
+
+/*
+ * CAMELLIA test vectors.
+ */
+#define CAMELLIA_ENC_TEST_VECTORS 3
+#define CAMELLIA_DEC_TEST_VECTORS 3
+#define CAMELLIA_CBC_ENC_TEST_VECTORS 2
+#define CAMELLIA_CBC_DEC_TEST_VECTORS 2
+
+static struct cipher_testvec camellia_enc_tv_template[] = {
+ {
+ .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .klen = 16,
+ .input = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .ilen = 16,
+ .result = { 0x67, 0x67, 0x31, 0x38, 0x54, 0x96, 0x69, 0x73,
+ 0x08, 0x57, 0x06, 0x56, 0x48, 0xea, 0xbe, 0x43 },
+ .rlen = 16,
+ }, {
+ .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 },
+ .klen = 24,
+ .input = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .ilen = 16,
+ .result = { 0xb4, 0x99, 0x34, 0x01, 0xb3, 0xe9, 0x96, 0xf8,
+ 0x4e, 0xe5, 0xce, 0xe7, 0xd7, 0x9b, 0x09, 0xb9 },
+ .rlen = 16,
+ }, {
+ .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
+ .klen = 32,
+ .input = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .ilen = 16,
+ .result = { 0x9a, 0xcc, 0x23, 0x7d, 0xff, 0x16, 0xd7, 0x6c,
+ 0x20, 0xef, 0x7c, 0x91, 0x9e, 0x3a, 0x75, 0x09 },
+ .rlen = 16,
+ },
+};
+
+static struct cipher_testvec camellia_dec_tv_template[] = {
+ {
+ .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .klen = 16,
+ .input = { 0x67, 0x67, 0x31, 0x38, 0x54, 0x96, 0x69, 0x73,
+ 0x08, 0x57, 0x06, 0x56, 0x48, 0xea, 0xbe, 0x43 },
+ .ilen = 16,
+ .result = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .rlen = 16,
+ }, {
+ .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 },
+ .klen = 24,
+ .input = { 0xb4, 0x99, 0x34, 0x01, 0xb3, 0xe9, 0x96, 0xf8,
+ 0x4e, 0xe5, 0xce, 0xe7, 0xd7, 0x9b, 0x09, 0xb9 },
+ .ilen = 16,
+ .result = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .rlen = 16,
+ }, {
+ .key = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10,
+ 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff },
+ .klen = 32,
+ .input = { 0x9a, 0xcc, 0x23, 0x7d, 0xff, 0x16, 0xd7, 0x6c,
+ 0x20, 0xef, 0x7c, 0x91, 0x9e, 0x3a, 0x75, 0x09 },
+ .ilen = 16,
+ .result = { 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef,
+ 0xfe, 0xdc, 0xba, 0x98, 0x76, 0x54, 0x32, 0x10 },
+ .rlen = 16,
+ },
+};
+
+static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
+ {
+ .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
+ 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
+ .klen = 16,
+ .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
+ 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
+ .input = { "Single block msg" },
+ .ilen = 16,
+ .result = { 0xea, 0x32, 0x12, 0x76, 0x3b, 0x50, 0x10, 0xe7,
+ 0x18, 0xf6, 0xfd, 0x5d, 0xf6, 0x8f, 0x13, 0x51 },
+ .rlen = 16,
+ }, {
+ .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
+ 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
+ .klen = 16,
+ .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
+ 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
+ .input = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ .ilen = 32,
+ .result = { 0xa5, 0xdf, 0x6e, 0x50, 0xda, 0x70, 0x6c, 0x01,
+ 0x4a, 0xab, 0xf3, 0xf2, 0xd6, 0xfc, 0x6c, 0xfd,
+ 0x19, 0xb4, 0x3e, 0x57, 0x1c, 0x02, 0x5e, 0xa0,
+ 0x15, 0x78, 0xe0, 0x5e, 0xf2, 0xcb, 0x87, 0x16 },
+ .rlen = 32,
+ },
+};
+
+static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
+ {
+ .key = { 0x06, 0xa9, 0x21, 0x40, 0x36, 0xb8, 0xa1, 0x5b,
+ 0x51, 0x2e, 0x03, 0xd5, 0x34, 0x12, 0x00, 0x06 },
+ .klen = 16,
+ .iv = { 0x3d, 0xaf, 0xba, 0x42, 0x9d, 0x9e, 0xb4, 0x30,
+ 0xb4, 0x22, 0xda, 0x80, 0x2c, 0x9f, 0xac, 0x41 },
+ .input = { 0xea, 0x32, 0x12, 0x76, 0x3b, 0x50, 0x10, 0xe7,
+ 0x18, 0xf6, 0xfd, 0x5d, 0xf6, 0x8f, 0x13, 0x51 },
+ .ilen = 16,
+ .result = { "Single block msg" },
+ .rlen = 16,
+ }, {
+ .key = { 0xc2, 0x86, 0x69, 0x6d, 0x88, 0x7c, 0x9a, 0xa0,
+ 0x61, 0x1b, 0xbb, 0x3e, 0x20, 0x25, 0xa4, 0x5a },
+ .klen = 16,
+ .iv = { 0x56, 0x2e, 0x17, 0x99, 0x6d, 0x09, 0x3d, 0x28,
+ 0xdd, 0xb3, 0xba, 0x69, 0x5a, 0x2e, 0x6f, 0x58 },
+ .input = { 0xa5, 0xdf, 0x6e, 0x50, 0xda, 0x70, 0x6c, 0x01,
+ 0x4a, 0xab, 0xf3, 0xf2, 0xd6, 0xfc, 0x6c, 0xfd,
+ 0x19, 0xb4, 0x3e, 0x57, 0x1c, 0x02, 0x5e, 0xa0,
+ 0x15, 0x78, 0xe0, 0x5e, 0xf2, 0xcb, 0x87, 0x16 },
+ .ilen = 32,
+ .result = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f },
+ .rlen = 32,
+ },
+};
+
/*
* Compression stuff.
*/
@@ -3769,4 +4284,25 @@ static struct hash_speed generic_hash_speed_template[] = {
{ .blen = 0, .plen = 0, }
};
+static struct cipher_speed camellia_speed_template[] = {
+ { .klen = 16, .blen = 16, },
+ { .klen = 16, .blen = 64, },
+ { .klen = 16, .blen = 256, },
+ { .klen = 16, .blen = 1024, },
+ { .klen = 16, .blen = 8192, },
+ { .klen = 24, .blen = 16, },
+ { .klen = 24, .blen = 64, },
+ { .klen = 24, .blen = 256, },
+ { .klen = 24, .blen = 1024, },
+ { .klen = 24, .blen = 8192, },
+ { .klen = 32, .blen = 16, },
+ { .klen = 32, .blen = 64, },
+ { .klen = 32, .blen = 256, },
+ { .klen = 32, .blen = 1024, },
+ { .klen = 32, .blen = 8192, },
+
+ /* End marker */
+ { .klen = 0, .blen = 0, }
+};
+
#endif /* _CRYPTO_TCRYPT_H */
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 9347eb6bcf6..53e8ccbf0f5 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -21,6 +21,7 @@
#include <linux/crypto.h>
#include <linux/err.h>
+#include <linux/hardirq.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/rtnetlink.h>
@@ -47,7 +48,7 @@ static u_int32_t ks[12] = {0x01010101, 0x01010101, 0x01010101, 0x01010101,
* +------------------------
*/
struct crypto_xcbc_ctx {
- struct crypto_tfm *child;
+ struct crypto_cipher *child;
u8 *odds;
u8 *prev;
u8 *key;
@@ -75,8 +76,7 @@ static int _crypto_xcbc_digest_setkey(struct crypto_hash *parent,
if ((err = crypto_cipher_setkey(ctx->child, ctx->key, ctx->keylen)))
return err;
- ctx->child->__crt_alg->cra_cipher.cia_encrypt(ctx->child, key1,
- ctx->consts);
+ crypto_cipher_encrypt_one(ctx->child, key1, ctx->consts);
return crypto_cipher_setkey(ctx->child, key1, bs);
}
@@ -86,7 +86,7 @@ static int crypto_xcbc_digest_setkey(struct crypto_hash *parent,
{
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- if (keylen != crypto_tfm_alg_blocksize(ctx->child))
+ if (keylen != crypto_cipher_blocksize(ctx->child))
return -EINVAL;
ctx->keylen = keylen;
@@ -108,13 +108,13 @@ static int crypto_xcbc_digest_init(struct hash_desc *pdesc)
return 0;
}
-static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
- struct scatterlist *sg,
- unsigned int nbytes)
+static int crypto_xcbc_digest_update2(struct hash_desc *pdesc,
+ struct scatterlist *sg,
+ unsigned int nbytes)
{
struct crypto_hash *parent = pdesc->tfm;
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_tfm *tfm = ctx->child;
+ struct crypto_cipher *tfm = ctx->child;
int bs = crypto_hash_blocksize(parent);
unsigned int i = 0;
@@ -142,7 +142,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
offset += len;
crypto_kunmap(p, 0);
- crypto_yield(tfm->crt_flags);
+ crypto_yield(pdesc->flags);
continue;
}
@@ -152,7 +152,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
p += bs - ctx->len;
ctx->xor(ctx->prev, ctx->odds, bs);
- tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
+ crypto_cipher_encrypt_one(tfm, ctx->prev, ctx->prev);
/* clearing the length */
ctx->len = 0;
@@ -160,7 +160,8 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
/* encrypting the rest of data */
while (len > bs) {
ctx->xor(ctx->prev, p, bs);
- tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, ctx->prev, ctx->prev);
+ crypto_cipher_encrypt_one(tfm, ctx->prev,
+ ctx->prev);
p += bs;
len -= bs;
}
@@ -171,7 +172,7 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
ctx->len = len;
}
crypto_kunmap(p, 0);
- crypto_yield(tfm->crt_flags);
+ crypto_yield(pdesc->flags);
slen -= min(slen, ((unsigned int)(PAGE_SIZE)) - offset);
offset = 0;
pg++;
@@ -183,11 +184,20 @@ static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
return 0;
}
+static int crypto_xcbc_digest_update(struct hash_desc *pdesc,
+ struct scatterlist *sg,
+ unsigned int nbytes)
+{
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+ return crypto_xcbc_digest_update2(pdesc, sg, nbytes);
+}
+
static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
{
struct crypto_hash *parent = pdesc->tfm;
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(parent);
- struct crypto_tfm *tfm = ctx->child;
+ struct crypto_cipher *tfm = ctx->child;
int bs = crypto_hash_blocksize(parent);
int err = 0;
@@ -197,13 +207,14 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
return err;
- tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key2, (const u8*)(ctx->consts+bs));
+ crypto_cipher_encrypt_one(tfm, key2,
+ (u8 *)(ctx->consts + bs));
ctx->xor(ctx->prev, ctx->odds, bs);
ctx->xor(ctx->prev, key2, bs);
_crypto_xcbc_digest_setkey(parent, ctx);
- tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
+ crypto_cipher_encrypt_one(tfm, out, ctx->prev);
} else {
u8 key3[bs];
unsigned int rlen;
@@ -218,14 +229,15 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
if ((err = crypto_cipher_setkey(tfm, ctx->key, ctx->keylen)) != 0)
return err;
- tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, key3, (const u8*)(ctx->consts+bs*2));
+ crypto_cipher_encrypt_one(tfm, key3,
+ (u8 *)(ctx->consts + bs * 2));
ctx->xor(ctx->prev, ctx->odds, bs);
ctx->xor(ctx->prev, key3, bs);
_crypto_xcbc_digest_setkey(parent, ctx);
- tfm->__crt_alg->cra_cipher.cia_encrypt(tfm, out, ctx->prev);
+ crypto_cipher_encrypt_one(tfm, out, ctx->prev);
}
return 0;
@@ -234,21 +246,25 @@ static int crypto_xcbc_digest_final(struct hash_desc *pdesc, u8 *out)
static int crypto_xcbc_digest(struct hash_desc *pdesc,
struct scatterlist *sg, unsigned int nbytes, u8 *out)
{
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
crypto_xcbc_digest_init(pdesc);
- crypto_xcbc_digest_update(pdesc, sg, nbytes);
+ crypto_xcbc_digest_update2(pdesc, sg, nbytes);
return crypto_xcbc_digest_final(pdesc, out);
}
static int xcbc_init_tfm(struct crypto_tfm *tfm)
{
+ struct crypto_cipher *cipher;
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_xcbc_ctx *ctx = crypto_hash_ctx_aligned(__crypto_hash_cast(tfm));
int bs = crypto_hash_blocksize(__crypto_hash_cast(tfm));
- tfm = crypto_spawn_tfm(spawn);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
switch(bs) {
case 16:
@@ -258,7 +274,7 @@ static int xcbc_init_tfm(struct crypto_tfm *tfm)
return -EINVAL;
}
- ctx->child = crypto_cipher_cast(tfm);
+ ctx->child = cipher;
ctx->odds = (u8*)(ctx+1);
ctx->prev = ctx->odds + bs;
ctx->key = ctx->prev + bs;
diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
index 43a68398656..31ea405f2ee 100644
--- a/drivers/crypto/geode-aes.c
+++ b/drivers/crypto/geode-aes.c
@@ -457,7 +457,7 @@ static struct pci_driver geode_aes_driver = {
static int __init
geode_aes_init(void)
{
- return pci_module_init(&geode_aes_driver);
+ return pci_register_driver(&geode_aes_driver);
}
static void __exit
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad92b6a76ee..4f2ffbd81dc 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2335,6 +2335,17 @@ config QLA3XXX
To compile this driver as a module, choose M here: the module
will be called qla3xxx.
+config ATL1
+ tristate "Attansic L1 Gigabit Ethernet support (EXPERIMENTAL)"
+ depends on NET_PCI && PCI && EXPERIMENTAL
+ select CRC32
+ select MII
+ help
+ This driver supports the Attansic L1 gigabit ethernet adapter.
+
+ To compile this driver as a module, choose M here. The module
+ will be called atl1.
+
endmenu
#
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 0878e3df517..33af833667d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_BONDING) += bonding/
+obj-$(CONFIG_ATL1) += atl1/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
gianfar_driver-objs := gianfar.o \
diff --git a/drivers/net/atl1/Makefile b/drivers/net/atl1/Makefile
new file mode 100644
index 00000000000..a6b707e4e69
--- /dev/null
+++ b/drivers/net/atl1/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_ATL1) += atl1.o
+atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
new file mode 100644
index 00000000000..b1c6034e68f
--- /dev/null
+++ b/drivers/net/atl1/atl1.h
@@ -0,0 +1,283 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ATL1_H_
+#define _ATL1_H_
+
+#include <linux/types.h>
+#include <linux/if_vlan.h>
+
+#include "atl1_hw.h"
+
+/* function prototypes needed by multiple files */
+s32 atl1_up(struct atl1_adapter *adapter);
+void atl1_down(struct atl1_adapter *adapter);
+int atl1_reset(struct atl1_adapter *adapter);
+s32 atl1_setup_ring_resources(struct atl1_adapter *adapter);
+void atl1_free_ring_resources(struct atl1_adapter *adapter);
+
+extern char atl1_driver_name[];
+extern char atl1_driver_version[];
+extern const struct ethtool_ops atl1_ethtool_ops;
+
+struct atl1_adapter;
+
+#define ATL1_MAX_INTR 3
+
+#define ATL1_DEFAULT_TPD 256
+#define ATL1_MAX_TPD 1024
+#define ATL1_MIN_TPD 64
+#define ATL1_DEFAULT_RFD 512
+#define ATL1_MIN_RFD 128
+#define ATL1_MAX_RFD 2048
+
+#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
+#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
+#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
+#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
+
+/*
+ * Some workarounds require millisecond delays and are run during interrupt
+ * context. Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+
+/*
+ * wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct atl1_buffer {
+ struct sk_buff *skb;
+ u16 length;
+ u16 alloced;
+ dma_addr_t dma;
+};
+
+#define MAX_TX_BUF_LEN 0x3000 /* 12KB */
+
+struct atl1_tpd_ring {
+ void *desc; /* pointer to the descriptor ring memory */
+ dma_addr_t dma; /* physical adress of the descriptor ring */
+ u16 size; /* length of descriptor ring in bytes */
+ u16 count; /* number of descriptors in the ring */
+ u16 hw_idx; /* hardware index */
+ atomic_t next_to_clean;
+ atomic_t next_to_use;
+ struct atl1_buffer *buffer_info;
+};
+
+struct atl1_rfd_ring {
+ void *desc;
+ dma_addr_t dma;
+ u16 size;
+ u16 count;
+ atomic_t next_to_use;
+ u16 next_to_clean;
+ struct atl1_buffer *buffer_info;
+};
+
+struct atl1_rrd_ring {
+ void *desc;
+ dma_addr_t dma;
+ unsigned int size;
+ u16 count;
+ u16 next_to_use;
+ atomic_t next_to_clean;
+};
+
+struct atl1_ring_header {
+ void *desc; /* pointer to the descriptor ring memory */
+ dma_addr_t dma; /* physical adress of the descriptor ring */
+ unsigned int size; /* length of descriptor ring in bytes */
+};
+
+struct atl1_cmb {
+ struct coals_msg_block *cmb;
+ dma_addr_t dma;
+};
+
+struct atl1_smb {
+ struct stats_msg_block *smb;
+ dma_addr_t dma;
+};
+
+/* Statistics counters */
+struct atl1_sft_stats {
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ u64 multicast;
+ u64 collisions;
+ u64 rx_errors;
+ u64 rx_length_errors;
+ u64 rx_crc_errors;
+ u64 rx_frame_errors;
+ u64 rx_fifo_errors;
+ u64 rx_missed_errors;
+ u64 tx_errors;
+ u64 tx_fifo_errors;
+ u64 tx_aborted_errors;
+ u64 tx_window_errors;
+ u64 tx_carrier_errors;
+
+ u64 tx_pause; /* num Pause packet transmitted. */
+ u64 excecol; /* num tx packets aborted due to excessive collisions. */
+ u64 deffer; /* num deferred tx packets */
+ u64 scc; /* num packets subsequently transmitted successfully w/ single prior collision. */
+ u64 mcc; /* num packets subsequently transmitted successfully w/ multiple prior collisions. */
+ u64 latecol; /* num tx packets w/ late collisions. */
+ u64 tx_underun; /* num tx packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
+ u64 tx_trunc; /* num tx packets truncated due to size exceeding MTU, regardless whether truncated by Selene or not. (The name doesn't really reflect the meaning in this case.) */
+ u64 rx_pause; /* num Pause packets received. */
+ u64 rx_rrd_ov;
+ u64 rx_trunc;
+};
+
+/* board specific private data structure */
+#define ATL1_REGS_LEN 8
+
+/* Structure containing variables used by the shared code */
+struct atl1_hw {
+ u8 __iomem *hw_addr;
+ struct atl1_adapter *back;
+ enum atl1_dma_order dma_ord;
+ enum atl1_dma_rcb rcb_value;
+ enum atl1_dma_req_block dmar_block;
+ enum atl1_dma_req_block dmaw_block;
+ u8 preamble_len;
+ u8 max_retry; /* Retransmission maximum, after which the packet will be discarded */
+ u8 jam_ipg; /* IPG to start JAM for collision based flow control in half-duplex mode. In units of 8-bit time */
+ u8 ipgt; /* Desired back to back inter-packet gap. The default is 96-bit time */
+ u8 min_ifg; /* Minimum number of IFG to enforce in between RX frames. Frame gap below such IFP is dropped */
+ u8 ipgr1; /* 64bit Carrier-Sense window */
+ u8 ipgr2; /* 96-bit IPG window */
+ u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. Each TPD is 16 bytes long */
+ u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned burst. Each RFD is 12 bytes long */
+ u8 rfd_fetch_gap;
+ u8 rrd_burst; /* Threshold number of RRDs that can be retired in a burst. Each RRD is 16 bytes long */
+ u8 tpd_fetch_th;
+ u8 tpd_fetch_gap;
+ u16 tx_jumbo_task_th;
+ u16 txf_burst; /* Number of data bytes to read in a cache-aligned burst. Each SRAM entry is
+ 8 bytes long */
+ u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN packets should add 4 bytes */
+ u16 rx_jumbo_lkah;
+ u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after every 512ns passes. */
+ u16 lcol; /* Collision Window */
+
+ u16 cmb_tpd;
+ u16 cmb_rrd;
+ u16 cmb_rx_timer;
+ u16 cmb_tx_timer;
+ u32 smb_timer;
+ u16 media_type;
+ u16 autoneg_advertised;
+ u16 pci_cmd_word;
+
+ u16 mii_autoneg_adv_reg;
+ u16 mii_1000t_ctrl_reg;
+
+ u32 mem_rang;
+ u32 txcw;
+ u32 max_frame_size;
+ u32 min_frame_size;
+ u32 mc_filter_type;
+ u32 num_mc_addrs;
+ u32 collision_delta;
+ u32 tx_packet_delta;
+ u16 phy_spd_default;
+
+ u16 dev_rev;
+ u8 revision_id;
+
+ /* spi flash */
+ u8 flash_vendor;
+
+ u8 dma_fairness;
+ u8 mac_addr[ETH_ALEN];
+ u8 perm_mac_addr[ETH_ALEN];
+
+ /* bool phy_preamble_sup; */
+ bool phy_configured;
+};
+
+struct atl1_adapter {
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+ struct atl1_sft_stats soft_stats;
+
+ struct vlan_group *vlgrp;
+ u32 rx_buffer_len;
+ u32 wol;
+ u16 link_speed;
+ u16 link_duplex;
+ spinlock_t lock;
+ atomic_t irq_sem;
+ struct work_struct tx_timeout_task;
+ struct work_struct link_chg_task;
+ struct work_struct pcie_dma_to_rst_task;
+ struct timer_list watchdog_timer;
+ struct timer_list phy_config_timer;
+ bool phy_timer_pending;
+
+ bool mac_disabled;
+
+ /* All descriptor rings' memory */
+ struct atl1_ring_header ring_header;
+
+ /* TX */
+ struct atl1_tpd_ring tpd_ring;
+ spinlock_t mb_lock;
+
+ /* RX */
+ struct atl1_rfd_ring rfd_ring;
+ struct atl1_rrd_ring rrd_ring;
+ u64 hw_csum_err;
+ u64 hw_csum_good;
+
+ u32 gorcl;
+ u64 gorcl_old;
+
+ /* Interrupt Moderator timer ( 2us resolution) */
+ u16 imt;
+ /* Interrupt Clear timer (2us resolution) */
+ u16 ict;
+
+ /* MII interface info */
+ struct mii_if_info mii;
+
+ /* structs defined in atl1_hw.h */
+ u32 bd_number; /* board number */
+ bool pci_using_64;
+ struct atl1_hw hw;
+ struct atl1_smb smb;
+ struct atl1_cmb cmb;
+
+ u32 pci_state[16];
+};
+
+#endif /* _ATL1_H_ */
diff --git a/drivers/net/atl1/atl1_ethtool.c b/drivers/net/atl1/atl1_ethtool.c
new file mode 100644
index 00000000000..c11c27798e5
--- /dev/null
+++ b/drivers/net/atl1/atl1_ethtool.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+#include <asm/uaccess.h>
+
+#include "atl1.h"
+
+struct atl1_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \
+ offsetof(struct atl1_adapter, m)
+
+static struct atl1_stats atl1_gstrings_stats[] = {
+ {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
+ {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
+ {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
+ {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
+ {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
+ {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
+ {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
+ {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
+ {"multicast", ATL1_STAT(soft_stats.multicast)},
+ {"collisions", ATL1_STAT(soft_stats.collisions)},
+ {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
+ {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
+ {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
+ {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
+ {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
+ {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
+ {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
+ {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
+ {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
+ {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
+ {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
+ {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
+ {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
+ {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
+ {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
+ {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
+ {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
+ {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
+ {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
+ {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
+ {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
+};
+
+static void atl1_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int i;
+ char *p;
+
+ for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
+ p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
+ data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+
+}
+
+static int atl1_get_stats_count(struct net_device *netdev)
+{
+ return ARRAY_SIZE(atl1_gstrings_stats);
+}
+
+static int atl1_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg | SUPPORTED_TP);
+ ecmd->advertising = ADVERTISED_TP;
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ ecmd->advertising |=
+ (ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full);
+ }
+ else
+ ecmd->advertising |= (ADVERTISED_1000baseT_Full);
+ }
+ ecmd->port = PORT_TP;
+ ecmd->phy_address = 0;
+ ecmd->transceiver = XCVR_INTERNAL;
+
+ if (netif_carrier_ok(adapter->netdev)) {
+ u16 link_speed, link_duplex;
+ atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
+ ecmd->speed = link_speed;
+ if (link_duplex == FULL_DUPLEX)
+ ecmd->duplex = DUPLEX_FULL;
+ else
+ ecmd->duplex = DUPLEX_HALF;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL)
+ ecmd->autoneg = AUTONEG_ENABLE;
+ else
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static int atl1_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ u16 phy_data;
+ int ret_val = 0;
+ u16 old_media_type = hw->media_type;
+
+ if (netif_running(adapter->netdev)) {
+ printk(KERN_DEBUG "%s: ethtool shutting down adapter\n",
+ atl1_driver_name);
+ atl1_down(adapter);
+ }
+
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
+ else {
+ if (ecmd->speed == SPEED_1000) {
+ if (ecmd->duplex != DUPLEX_FULL) {
+ printk(KERN_WARNING
+ "%s: can't force to 1000M half duplex\n",
+ atl1_driver_name);
+ ret_val = -EINVAL;
+ goto exit_sset;
+ }
+ hw->media_type = MEDIA_TYPE_1000M_FULL;
+ } else if (ecmd->speed == SPEED_100) {
+ if (ecmd->duplex == DUPLEX_FULL) {
+ hw->media_type = MEDIA_TYPE_100M_FULL;
+ } else
+ hw->media_type = MEDIA_TYPE_100M_HALF;
+ } else {
+ if (ecmd->duplex == DUPLEX_FULL)
+ hw->media_type = MEDIA_TYPE_10M_FULL;
+ else
+ hw->media_type = MEDIA_TYPE_10M_HALF;
+ }
+ }
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ ecmd->advertising =
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg | ADVERTISED_TP;
+ break;
+ case MEDIA_TYPE_1000M_FULL:
+ ecmd->advertising =
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_Autoneg | ADVERTISED_TP;
+ break;
+ default:
+ ecmd->advertising = 0;
+ break;
+ }
+ if (atl1_phy_setup_autoneg_adv(hw)) {
+ ret_val = -EINVAL;
+ printk(KERN_WARNING
+ "%s: invalid ethtool speed/duplex setting\n",
+ atl1_driver_name);
+ goto exit_sset;
+ }
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL)
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+ else {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+ MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default: /* MEDIA_TYPE_10M_HALF: */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ }
+ }
+ atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+exit_sset:
+ if (ret_val)
+ hw->media_type = old_media_type;
+
+ if (netif_running(adapter->netdev)) {
+ printk(KERN_DEBUG "%s: ethtool starting adapter\n",
+ atl1_driver_name);
+ atl1_up(adapter);
+ } else if (!ret_val) {
+ printk(KERN_DEBUG "%s: ethtool resetting adapter\n",
+ atl1_driver_name);
+ atl1_reset(adapter);
+ }
+ return ret_val;
+}
+
+static void atl1_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver));
+ strncpy(drvinfo->version, atl1_driver_version,
+ sizeof(drvinfo->version));
+ strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->eedump_len = ATL1_EEDUMP_LEN;
+}
+
+static void atl1_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+ wol->wolopts = 0;
+ if (adapter->wol & ATL1_WUFC_EX)
+ wol->wolopts |= WAKE_UCAST;
+ if (adapter->wol & ATL1_WUFC_MC)
+ wol->wolopts |= WAKE_MCAST;
+ if (adapter->wol & ATL1_WUFC_BC)
+ wol->wolopts |= WAKE_BCAST;
+ if (adapter->wol & ATL1_WUFC_MAG)
+ wol->wolopts |= WAKE_MAGIC;
+ return;
+}
+
+static int atl1_set_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ return -EOPNOTSUPP;
+ adapter->wol = 0;
+ if (wol->wolopts & WAKE_UCAST)
+ adapter->wol |= ATL1_WUFC_EX;
+ if (wol->wolopts & WAKE_MCAST)
+ adapter->wol |= ATL1_WUFC_MC;
+ if (wol->wolopts & WAKE_BCAST)
+ adapter->wol |= ATL1_WUFC_BC;
+ if (wol->wolopts & WAKE_MAGIC)
+ adapter->wol |= ATL1_WUFC_MAG;
+ return 0;
+}
+
+static void atl1_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
+
+ ring->rx_max_pending = ATL1_MAX_RFD;
+ ring->tx_max_pending = ATL1_MAX_TPD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rxdr->count;
+ ring->tx_pending = txdr->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int atl1_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
+ struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
+ struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
+
+ struct atl1_tpd_ring tpd_old, tpd_new;
+ struct atl1_rfd_ring rfd_old, rfd_new;
+ struct atl1_rrd_ring rrd_old, rrd_new;
+ struct atl1_ring_header rhdr_old, rhdr_new;
+ int err;
+
+ tpd_old = adapter->tpd_ring;
+ rfd_old = adapter->rfd_ring;
+ rrd_old = adapter->rrd_ring;
+ rhdr_old = adapter->ring_header;
+
+ if (netif_running(adapter->netdev))
+ atl1_down(adapter);
+
+ rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
+ rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
+ rfdr->count;
+ rfdr->count = (rfdr->count + 3) & ~3;
+ rrdr->count = rfdr->count;
+
+ tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
+ tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
+ tpdr->count;
+ tpdr->count = (tpdr->count + 3) & ~3;
+
+ if (netif_running(adapter->netdev)) {
+ /* try to get new resources before deleting old */
+ err = atl1_setup_ring_resources(adapter);
+ if (err)
+ goto err_setup_ring;
+
+ /*
+ * save the new, restore the old in order to free it,
+ * then restore the new back again
+ */
+
+ rfd_new = adapter->rfd_ring;
+ rrd_new = adapter->rrd_ring;
+ tpd_new = adapter->tpd_ring;
+ rhdr_new = adapter->ring_header;
+ adapter->rfd_ring = rfd_old;
+ adapter->rrd_ring = rrd_old;
+ adapter->tpd_ring = tpd_old;
+ adapter->ring_header = rhdr_old;
+ atl1_free_ring_resources(adapter);
+ adapter->rfd_ring = rfd_new;
+ adapter->rrd_ring = rrd_new;
+ adapter->tpd_ring = tpd_new;
+ adapter->ring_header = rhdr_new;
+
+ err = atl1_up(adapter);
+ if (err)
+ return err;
+ }
+ return 0;
+
+err_setup_ring:
+ adapter->rfd_ring = rfd_old;
+ adapter->rrd_ring = rrd_old;
+ adapter->tpd_ring = tpd_old;
+ adapter->ring_header = rhdr_old;
+ atl1_up(adapter);
+ return err;
+}
+
+static void atl1_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *epause)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ epause->autoneg = AUTONEG_ENABLE;
+ } else {
+ epause->autoneg = AUTONEG_DISABLE;
+ }
+ epause->rx_pause = 1;
+ epause->tx_pause = 1;
+}
+
+static int atl1_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *epause)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ epause->autoneg = AUTONEG_ENABLE;
+ } else {
+ epause->autoneg = AUTONEG_DISABLE;
+ }
+
+ epause->rx_pause = 1;
+ epause->tx_pause = 1;
+
+ return 0;
+}
+
+static u32 atl1_get_rx_csum(struct net_device *netdev)
+{
+ return 1;
+}
+
+static void atl1_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
+ memcpy(p, atl1_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int atl1_nway_reset(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+
+ if (netif_running(netdev)) {
+ u16 phy_data;
+ atl1_down(adapter);
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL) {
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+ } else {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data = MII_CR_FULL_DUPLEX |
+ MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data = MII_CR_FULL_DUPLEX |
+ MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default: /* MEDIA_TYPE_10M_HALF */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ }
+ }
+ atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+ atl1_up(adapter);
+ }
+ return 0;
+}
+
+const struct ethtool_ops atl1_ethtool_ops = {
+ .get_settings = atl1_get_settings,
+ .set_settings = atl1_set_settings,
+ .get_drvinfo = atl1_get_drvinfo,
+ .get_wol = atl1_get_wol,
+ .set_wol = atl1_set_wol,
+ .get_ringparam = atl1_get_ringparam,
+ .set_ringparam = atl1_set_ringparam,
+ .get_pauseparam = atl1_get_pauseparam,
+ .set_pauseparam = atl1_set_pauseparam,
+ .get_rx_csum = atl1_get_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_hw_csum,
+ .get_link = ethtool_op_get_link,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_strings = atl1_get_strings,
+ .nway_reset = atl1_nway_reset,
+ .get_ethtool_stats = atl1_get_ethtool_stats,
+ .get_stats_count = atl1_get_stats_count,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ethtool_op_set_tso,
+};
diff --git a/drivers/net/atl1/atl1_hw.c b/drivers/net/atl1/atl1_hw.c
new file mode 100644
index 00000000000..08b2d785469
--- /dev/null
+++ b/drivers/net/atl1/atl1_hw.c
@@ -0,0 +1,718 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <asm/byteorder.h>
+
+#include "atl1.h"
+
+/*
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ * hw - Struct containing variables accessed by shared code
+ * return : ATL1_SUCCESS or idle status (if error)
+ */
+s32 atl1_reset_hw(struct atl1_hw *hw)
+{
+ u32 icr;
+ int i;
+
+ /*
+ * Clear Interrupt mask to stop board from generating
+ * interrupts & Clear any pending interrupt events
+ */
+ /*
+ * iowrite32(0, hw->hw_addr + REG_IMR);
+ * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
+ */
+
+ /*
+ * Issue Soft Reset to the MAC. This will reset the chip's
+ * transmit, receive, DMA. It will not effect
+ * the current PCI configuration. The global reset bit is self-
+ * clearing, and should clear within a microsecond.
+ */
+ iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
+ ioread32(hw->hw_addr + REG_MASTER_CTRL);
+
+ iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE);
+ ioread16(hw->hw_addr + REG_GPHY_ENABLE);
+
+ msleep(1); /* delay about 1ms */
+
+ /* Wait at least 10ms for All module to be Idle */
+ for (i = 0; i < 10; i++) {
+ icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
+ if (!icr)
+ break;
+ msleep(1); /* delay 1 ms */
+ cpu_relax(); /* FIXME: is this still the right way to do this? */
+ }
+
+ if (icr) {
+ printk (KERN_DEBUG "icr = %x\n", icr);
+ return icr;
+ }
+
+ return ATL1_SUCCESS;
+}
+
+/* function about EEPROM
+ *
+ * check_eeprom_exist
+ * return 0 if eeprom exist
+ */
+static int atl1_check_eeprom_exist(struct atl1_hw *hw)
+{
+ u32 value;
+ value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+ if (value & SPI_FLASH_CTRL_EN_VPD) {
+ value &= ~SPI_FLASH_CTRL_EN_VPD;
+ iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+ }
+
+ value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
+ return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
+}
+
+static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
+{
+ int i;
+ u32 control;
+
+ if (offset & 3)
+ return false; /* address do not align */
+
+ iowrite32(0, hw->hw_addr + REG_VPD_DATA);
+ control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
+ iowrite32(control, hw->hw_addr + REG_VPD_CAP);
+ ioread32(hw->hw_addr + REG_VPD_CAP);
+
+ for (i = 0; i < 10; i++) {
+ msleep(2);
+ control = ioread32(hw->hw_addr + REG_VPD_CAP);
+ if (control & VPD_CAP_VPD_FLAG)
+ break;
+ }
+ if (control & VPD_CAP_VPD_FLAG) {
+ *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
+ return true;
+ }
+ return false; /* timeout */
+}
+
+/*
+ * Reads the value from a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to read
+ */
+s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
+{
+ u32 val;
+ int i;
+
+ val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
+ MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
+ MDIO_CLK_SEL_SHIFT;
+ iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
+ ioread32(hw->hw_addr + REG_MDIO_CTRL);
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+ if (!(val & (MDIO_START | MDIO_BUSY))) {
+ *phy_data = (u16) val;
+ return ATL1_SUCCESS;
+ }
+ return ATL1_ERR_PHY;
+}
+
+#define CUSTOM_SPI_CS_SETUP 2
+#define CUSTOM_SPI_CLK_HI 2
+#define CUSTOM_SPI_CLK_LO 2
+#define CUSTOM_SPI_CS_HOLD 2
+#define CUSTOM_SPI_CS_HI 3
+
+static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
+{
+ int i;
+ u32 value;
+
+ iowrite32(0, hw->hw_addr + REG_SPI_DATA);
+ iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
+
+ value = SPI_FLASH_CTRL_WAIT_READY |
+ (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
+ SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
+ SPI_FLASH_CTRL_CLK_HI_MASK) <<
+ SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
+ SPI_FLASH_CTRL_CLK_LO_MASK) <<
+ SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
+ SPI_FLASH_CTRL_CS_HOLD_MASK) <<
+ SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
+ SPI_FLASH_CTRL_CS_HI_MASK) <<
+ SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
+ SPI_FLASH_CTRL_INS_SHIFT;
+
+ iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+
+ value |= SPI_FLASH_CTRL_START;
+ iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
+ ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+
+ for (i = 0; i < 10; i++) {
+ msleep(1); /* 1ms */
+ value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
+ if (!(value & SPI_FLASH_CTRL_START))
+ break;
+ }
+
+ if (value & SPI_FLASH_CTRL_START)
+ return false;
+
+ *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
+
+ return true;
+}
+
+/*
+ * get_permanent_address
+ * return 0 if get valid mac address,
+ */
+static int atl1_get_permanent_address(struct atl1_hw *hw)
+{
+ u32 addr[2];
+ u32 i, control;
+ u16 reg;
+ u8 eth_addr[ETH_ALEN];
+ bool key_valid;
+
+ if (is_valid_ether_addr(hw->perm_mac_addr))
+ return 0;
+
+ /* init */
+ addr[0] = addr[1] = 0;
+
+ if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */
+ reg = 0;
+ key_valid = false;
+ /* Read out all EEPROM content */
+ i = 0;
+ while (1) {
+ if (atl1_read_eeprom(hw, i + 0x100, &control)) {
+ if (key_valid) {
+ if (reg == REG_MAC_STA_ADDR)
+ addr[0] = control;
+ else if (reg == (REG_MAC_STA_ADDR + 4))
+ addr[1] = control;
+ key_valid = false;
+ } else if ((control & 0xff) == 0x5A) {
+ key_valid = true;
+ reg = (u16) (control >> 16);
+ } else
+ break; /* assume data end while encount an invalid KEYWORD */
+ } else
+ break; /* read error */
+ i += 4;
+ }
+
+/*
+ * The following 2 lines are the Attansic originals. Saving for posterity.
+ * *(u32 *) & eth_addr[2] = LONGSWAP(addr[0]);
+ * *(u16 *) & eth_addr[0] = SHORTSWAP(*(u16 *) & addr[1]);
+ */
+ *(u32 *) & eth_addr[2] = swab32(addr[0]);
+ *(u16 *) & eth_addr[0] = swab16(*(u16 *) & addr[1]);
+
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+ return 1;
+ }
+
+ /* see if SPI FLAGS exist ? */
+ addr[0] = addr[1] = 0;
+ reg = 0;
+ key_valid = false;
+ i = 0;
+ while (1) {
+ if (atl1_spi_read(hw, i + 0x1f000, &control)) {
+ if (key_valid) {
+ if (reg == REG_MAC_STA_ADDR)
+ addr[0] = control;
+ else if (reg == (REG_MAC_STA_ADDR + 4))
+ addr[1] = control;
+ key_valid = false;
+ } else if ((control & 0xff) == 0x5A) {
+ key_valid = true;
+ reg = (u16) (control >> 16);
+ } else
+ break; /* data end */
+ } else
+ break; /* read error */
+ i += 4;
+ }
+
+/*
+ * The following 2 lines are the Attansic originals. Saving for posterity.
+ * *(u32 *) & eth_addr[2] = LONGSWAP(addr[0]);
+ * *(u16 *) & eth_addr[0] = SHORTSWAP(*(u16 *) & addr[1]);
+ */
+ *(u32 *) & eth_addr[2] = swab32(addr[0]);
+ *(u16 *) & eth_addr[0] = swab16(*(u16 *) & addr[1]);
+ if (is_valid_ether_addr(eth_addr)) {
+ memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
+ return 0;
+ }
+ return 1;
+}
+
+/*
+ * Reads the adapter's MAC address from the EEPROM
+ * hw - Struct containing variables accessed by shared code
+ */
+s32 atl1_read_mac_addr(struct atl1_hw *hw)
+{
+ u16 i;
+
+ if (atl1_get_permanent_address(hw))
+ random_ether_addr(hw->perm_mac_addr);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ hw->mac_addr[i] = hw->perm_mac_addr[i];
+ return ATL1_SUCCESS;
+}
+
+/*
+ * Hashes an address to determine its location in the multicast table
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *
+ * atl1_hash_mc_addr
+ * purpose
+ * set hash value for a multicast address
+ * hash calcu processing :
+ * 1. calcu 32bit CRC for multicast address
+ * 2. reverse crc with MSB to LSB
+ */
+u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
+{
+ u32 crc32, value = 0;
+ int i;
+
+ crc32 = ether_crc_le(6, mc_addr);
+ crc32 = ~crc32;
+ for (i = 0; i < 32; i++)
+ value |= (((crc32 >> i) & 1) << (31 - i));
+
+ return value;
+}
+
+/*
+ * Sets the bit in the multicast table corresponding to the hash value.
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ */
+void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
+{
+ u32 hash_bit, hash_reg;
+ u32 mta;
+
+ /*
+ * The HASH Table is a register array of 2 32-bit registers.
+ * It is treated like an array of 64 bits. We want to set
+ * bit BitArray[hash_value]. So we figure out what register
+ * the bit is in, read it, OR in the new bit, then write
+ * back the new value. The register is determined by the
+ * upper 7 bits of the hash value and the bit within that
+ * register are determined by the lower 5 bits of the value.
+ */
+ hash_reg = (hash_value >> 31) & 0x1;
+ hash_bit = (hash_value >> 26) & 0x1F;
+ mta = ioread32((hw + REG_RX_HASH_TABLE) + (hash_reg << 2));
+ mta |= (1 << hash_bit);
+ iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
+}
+
+/*
+ * Writes a value to a PHY register
+ * hw - Struct containing variables accessed by shared code
+ * reg_addr - address of the PHY register to write
+ * data - data to write to the PHY
+ */
+s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
+{
+ int i;
+ u32 val;
+
+ val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
+ (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
+ MDIO_SUP_PREAMBLE |
+ MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
+ iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
+ ioread32(hw->hw_addr + REG_MDIO_CTRL);
+
+ for (i = 0; i < MDIO_WAIT_TIMES; i++) {
+ udelay(2);
+ val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ return ATL1_SUCCESS;
+
+ return ATL1_ERR_PHY;
+}
+
+/*
+ * Make L001's PHY out of Power Saving State (bug)
+ * hw - Struct containing variables accessed by shared code
+ * when power on, L001's PHY always on Power saving State
+ * (Gigabit Link forbidden)
+ */
+static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
+{
+ s32 ret;
+ ret = atl1_write_phy_reg(hw, 29, 0x0029);
+ if (ret)
+ return ret;
+ return atl1_write_phy_reg(hw, 30, 0);
+}
+
+/*
+ *TODO: do something or get rid of this
+ */
+s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
+{
+/* s32 ret_val;
+ * u16 phy_data;
+ */
+
+/*
+ ret_val = atl1_write_phy_reg(hw, ...);
+ ret_val = atl1_write_phy_reg(hw, ...);
+ ....
+*/
+ return ATL1_SUCCESS;
+}
+
+/*
+ * Resets the PHY and make all config validate
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
+ */
+static s32 atl1_phy_reset(struct atl1_hw *hw)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
+ hw->media_type == MEDIA_TYPE_1000M_FULL)
+ phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
+ else {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+ MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default: /* MEDIA_TYPE_10M_HALF: */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ }
+ }
+
+ ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+ if (ret_val) {
+ u32 val;
+ int i;
+ /* pcie serdes link may be down! */
+ printk(KERN_DEBUG "%s: autoneg caused pcie phy link down\n",
+ atl1_driver_name);
+
+ for (i = 0; i < 25; i++) {
+ msleep(1);
+ val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
+ if (!(val & (MDIO_START | MDIO_BUSY)))
+ break;
+ }
+
+ if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
+ printk(KERN_WARNING
+ "%s: pcie link down at least for 25ms\n",
+ atl1_driver_name);
+ return ret_val;
+ }
+ }
+ return ATL1_SUCCESS;
+}
+
+/*
+ * Configures PHY autoneg and flow control advertisement settings
+ * hw - Struct containing variables accessed by shared code
+ */
+s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
+{
+ s32 ret_val;
+ s16 mii_autoneg_adv_reg;
+ s16 mii_1000t_ctrl_reg;
+
+ /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
+
+ /* Read the MII 1000Base-T Control Register (Address 9). */
+ mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
+
+ /*
+ * First we clear all the 10/100 mb speed bits in the Auto-Neg
+ * Advertisement Register (Address 4) and the 1000 mb speed bits in
+ * the 1000Base-T Control Register (Address 9).
+ */
+ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
+ mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
+
+ /*
+ * Need to parse media_type and set up
+ * the appropriate PHY registers.
+ */
+ switch (hw->media_type) {
+ case MEDIA_TYPE_AUTO_SENSOR:
+ mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
+ MII_AR_10T_FD_CAPS |
+ MII_AR_100TX_HD_CAPS |
+ MII_AR_100TX_FD_CAPS);
+ mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
+ break;
+
+ case MEDIA_TYPE_1000M_FULL:
+ mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
+ break;
+
+ case MEDIA_TYPE_100M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
+ break;
+
+ case MEDIA_TYPE_100M_HALF:
+ mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
+ break;
+
+ case MEDIA_TYPE_10M_FULL:
+ mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
+ break;
+
+ default:
+ mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
+ break;
+ }
+
+ /* flow control fixed to enable all */
+ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
+
+ hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
+ hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
+
+ ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg);
+ if (ret_val)
+ return ret_val;
+
+ return ATL1_SUCCESS;
+}
+
+/*
+ * Configures link settings.
+ * hw - Struct containing variables accessed by shared code
+ * Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ */
+static s32 atl1_setup_link(struct atl1_hw *hw)
+{
+ s32 ret_val;
+
+ /*
+ * Options:
+ * PHY will advertise value(s) parsed from
+ * autoneg_advertised and fc
+ * no matter what autoneg is , We will not wait link result.
+ */
+ ret_val = atl1_phy_setup_autoneg_adv(hw);
+ if (ret_val) {
+ printk(KERN_DEBUG "%s: error setting up autonegotiation\n",
+ atl1_driver_name);
+ return ret_val;
+ }
+ /* SW.Reset , En-Auto-Neg if needed */
+ ret_val = atl1_phy_reset(hw);
+ if (ret_val) {
+ printk(KERN_DEBUG "%s: error resetting the phy\n",
+ atl1_driver_name);
+ return ret_val;
+ }
+ hw->phy_configured = true;
+ return ret_val;
+}
+
+static struct atl1_spi_flash_dev flash_table[] = {
+/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
+ {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
+ {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
+ {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
+};
+
+static void atl1_init_flash_opcode(struct atl1_hw *hw)
+{
+ if (hw->flash_vendor >= sizeof(flash_table) / sizeof(flash_table[0]))
+ hw->flash_vendor = 0; /* ATMEL */
+
+ /* Init OP table */
+ iowrite8(flash_table[hw->flash_vendor].cmd_program,
+ hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
+ iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
+ hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
+ iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
+ hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
+ iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
+ hw->hw_addr + REG_SPI_FLASH_OP_RDID);
+ iowrite8(flash_table[hw->flash_vendor].cmd_wren,
+ hw->hw_addr + REG_SPI_FLASH_OP_WREN);
+ iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
+ hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
+ iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
+ hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
+ iowrite8(flash_table[hw->flash_vendor].cmd_read,
+ hw->hw_addr + REG_SPI_FLASH_OP_READ);
+}
+
+/*
+ * Performs basic configuration of the adapter.
+ * hw - Struct containing variables accessed by shared code
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes multicast table,
+ * and Calls routines to setup link
+ * Leaves the transmit and receive units disabled and uninitialized.
+ */
+s32 atl1_init_hw(struct atl1_hw *hw)
+{
+ u32 ret_val = 0;
+
+ /* Zero out the Multicast HASH table */
+ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+ /* clear the old settings from the multicast hash table */
+ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+ atl1_init_flash_opcode(hw);
+
+ if (!hw->phy_configured) {
+ /* enable GPHY LinkChange Interrrupt */
+ ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
+ if (ret_val)
+ return ret_val;
+ /* make PHY out of power-saving state */
+ ret_val = atl1_phy_leave_power_saving(hw);
+ if (ret_val)
+ return ret_val;
+ /* Call a subroutine to configure the link */
+ ret_val = atl1_setup_link(hw);
+ }
+ return ret_val;
+}
+
+/*
+ * Detects the current speed and duplex settings of the hardware.
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ */
+s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
+{
+ s32 ret_val;
+ u16 phy_data;
+
+ /* ; --- Read PHY Specific Status Register (17) */
+ ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
+ if (ret_val)
+ return ret_val;
+
+ if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
+ return ATL1_ERR_PHY_RES;
+
+ switch (phy_data & MII_AT001_PSSR_SPEED) {
+ case MII_AT001_PSSR_1000MBS:
+ *speed = SPEED_1000;
+ break;
+ case MII_AT001_PSSR_100MBS:
+ *speed = SPEED_100;
+ break;
+ case MII_AT001_PSSR_10MBS:
+ *speed = SPEED_10;
+ break;
+ default:
+ printk(KERN_DEBUG "%s: error getting speed\n",
+ atl1_driver_name);
+ return ATL1_ERR_PHY_SPEED;
+ break;
+ }
+ if (phy_data & MII_AT001_PSSR_DPLX)
+ *duplex = FULL_DUPLEX;
+ else
+ *duplex = HALF_DUPLEX;
+
+ return ATL1_SUCCESS;
+}
+
+void atl1_set_mac_addr(struct atl1_hw *hw)
+{
+ u32 value;
+ /*
+ * 00-0B-6A-F6-00-DC
+ * 0: 6AF600DC 1: 000B
+ * low dword
+ */
+ value = (((u32) hw->mac_addr[2]) << 24) |
+ (((u32) hw->mac_addr[3]) << 16) |
+ (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
+ iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
+ /* high dword */
+ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
+ iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
+}
diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h
new file mode 100644
index 00000000000..100c09c66e6
--- /dev/null
+++ b/drivers/net/atl1/atl1_hw.h
@@ -0,0 +1,951 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * There are a lot of defines in here that are unused and/or have cryptic
+ * names. Please leave them alone, as they're the closest thing we have
+ * to a spec from Attansic at present. *ahem* -- CHS
+ */
+
+#ifndef _ATL1_HW_H_
+#define _ATL1_HW_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+
+struct atl1_adapter;
+struct atl1_hw;
+
+/* function prototypes needed by multiple files */
+s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw);
+s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data);
+s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
+s32 atl1_read_mac_addr(struct atl1_hw *hw);
+s32 atl1_init_hw(struct atl1_hw *hw);
+s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
+s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex);
+u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
+void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
+s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
+void atl1_set_mac_addr(struct atl1_hw *hw);
+s32 atl1_phy_enter_power_saving(struct atl1_hw *hw);
+s32 atl1_reset_hw(struct atl1_hw *hw);
+void atl1_check_options(struct atl1_adapter *adapter);
+
+/* register definitions */
+#define REG_PCIE_CAP_LIST 0x58
+
+#define REG_VPD_CAP 0x6C
+#define VPD_CAP_ID_MASK 0xff
+#define VPD_CAP_ID_SHIFT 0
+#define VPD_CAP_NEXT_PTR_MASK 0xFF
+#define VPD_CAP_NEXT_PTR_SHIFT 8
+#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
+#define VPD_CAP_VPD_ADDR_SHIFT 16
+#define VPD_CAP_VPD_FLAG 0x80000000
+
+#define REG_VPD_DATA 0x70
+
+#define REG_SPI_FLASH_CTRL 0x200
+#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
+#define SPI_FLASH_CTRL_STS_WEN 0x2
+#define SPI_FLASH_CTRL_STS_WPEN 0x80
+#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
+#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
+#define SPI_FLASH_CTRL_INS_MASK 0x7
+#define SPI_FLASH_CTRL_INS_SHIFT 8
+#define SPI_FLASH_CTRL_START 0x800
+#define SPI_FLASH_CTRL_EN_VPD 0x2000
+#define SPI_FLASH_CTRL_LDSTART 0x8000
+#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
+#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
+#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
+#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
+#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
+#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
+#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
+#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
+#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
+#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
+#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
+
+#define REG_SPI_ADDR 0x204
+
+#define REG_SPI_DATA 0x208
+
+#define REG_SPI_FLASH_CONFIG 0x20C
+#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
+#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
+#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
+#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
+#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
+
+#define REG_SPI_FLASH_OP_PROGRAM 0x210
+#define REG_SPI_FLASH_OP_SC_ERASE 0x211
+#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
+#define REG_SPI_FLASH_OP_RDID 0x213
+#define REG_SPI_FLASH_OP_WREN 0x214
+#define REG_SPI_FLASH_OP_RDSR 0x215
+#define REG_SPI_FLASH_OP_WRSR 0x216
+#define REG_SPI_FLASH_OP_READ 0x217
+
+#define REG_TWSI_CTRL 0x218
+#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
+#define TWSI_CTRL_LD_OFFSET_SHIFT 0
+#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
+#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
+#define TWSI_CTRL_SW_LDSTART 0x800
+#define TWSI_CTRL_HW_LDSTART 0x1000
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
+#define TWSI_CTRL_LD_EXIST 0x400000
+#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
+#define TWSI_CTRL_FREQ_SEL_100K 0
+#define TWSI_CTRL_FREQ_SEL_200K 1
+#define TWSI_CTRL_FREQ_SEL_300K 2
+#define TWSI_CTRL_FREQ_SEL_400K 3
+#define TWSI_CTRL_SMB_SLV_ADDR
+#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
+#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
+
+#define REG_PCIE_DEV_MISC_CTRL 0x21C
+#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
+#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
+#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
+#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
+#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
+
+/* Selene Master Control Register */
+#define REG_MASTER_CTRL 0x1400
+#define MASTER_CTRL_SOFT_RST 0x1
+#define MASTER_CTRL_MTIMER_EN 0x2
+#define MASTER_CTRL_ITIMER_EN 0x4
+#define MASTER_CTRL_MANUAL_INT 0x8
+#define MASTER_CTRL_REV_NUM_SHIFT 16
+#define MASTER_CTRL_REV_NUM_MASK 0xff
+#define MASTER_CTRL_DEV_ID_SHIFT 24
+#define MASTER_CTRL_DEV_ID_MASK 0xff
+
+/* Timer Initial Value Register */
+#define REG_MANUAL_TIMER_INIT 0x1404
+
+/* IRQ ModeratorTimer Initial Value Register */
+#define REG_IRQ_MODU_TIMER_INIT 0x1408
+
+#define REG_GPHY_ENABLE 0x140C
+
+/* IRQ Anti-Lost Timer Initial Value Register */
+#define REG_CMBDISDMA_TIMER 0x140E
+
+/* Block IDLE Status Register */
+#define REG_IDLE_STATUS 0x1410
+#define IDLE_STATUS_RXMAC 1
+#define IDLE_STATUS_TXMAC 2
+#define IDLE_STATUS_RXQ 4
+#define IDLE_STATUS_TXQ 8
+#define IDLE_STATUS_DMAR 0x10
+#define IDLE_STATUS_DMAW 0x20
+#define IDLE_STATUS_SMB 0x40
+#define IDLE_STATUS_CMB 0x80
+
+/* MDIO Control Register */
+#define REG_MDIO_CTRL 0x1414
+#define MDIO_DATA_MASK 0xffff
+#define MDIO_DATA_SHIFT 0
+#define MDIO_REG_ADDR_MASK 0x1f
+#define MDIO_REG_ADDR_SHIFT 16
+#define MDIO_RW 0x200000
+#define MDIO_SUP_PREAMBLE 0x400000
+#define MDIO_START 0x800000
+#define MDIO_CLK_SEL_SHIFT 24
+#define MDIO_CLK_25_4 0
+#define MDIO_CLK_25_6 2
+#define MDIO_CLK_25_8 3
+#define MDIO_CLK_25_10 4
+#define MDIO_CLK_25_14 5
+#define MDIO_CLK_25_20 6
+#define MDIO_CLK_25_28 7
+#define MDIO_BUSY 0x8000000
+#define MDIO_WAIT_TIMES 30
+
+/* MII PHY Status Register */
+#define REG_PHY_STATUS 0x1418
+
+/* BIST Control and Status Register0 (for the Packet Memory) */
+#define REG_BIST0_CTRL 0x141c
+#define BIST0_NOW 0x1
+#define BIST0_SRAM_FAIL 0x2
+#define BIST0_FUSE_FLAG 0x4
+#define REG_BIST1_CTRL 0x1420
+#define BIST1_NOW 0x1
+#define BIST1_SRAM_FAIL 0x2
+#define BIST1_FUSE_FLAG 0x4
+
+/* MAC Control Register */
+#define REG_MAC_CTRL 0x1480
+#define MAC_CTRL_TX_EN 1
+#define MAC_CTRL_RX_EN 2
+#define MAC_CTRL_TX_FLOW 4
+#define MAC_CTRL_RX_FLOW 8
+#define MAC_CTRL_LOOPBACK 0x10
+#define MAC_CTRL_DUPLX 0x20
+#define MAC_CTRL_ADD_CRC 0x40
+#define MAC_CTRL_PAD 0x80
+#define MAC_CTRL_LENCHK 0x100
+#define MAC_CTRL_HUGE_EN 0x200
+#define MAC_CTRL_PRMLEN_SHIFT 10
+#define MAC_CTRL_PRMLEN_MASK 0xf
+#define MAC_CTRL_RMV_VLAN 0x4000
+#define MAC_CTRL_PROMIS_EN 0x8000
+#define MAC_CTRL_TX_PAUSE 0x10000
+#define MAC_CTRL_SCNT 0x20000
+#define MAC_CTRL_SRST_TX 0x40000
+#define MAC_CTRL_TX_SIMURST 0x80000
+#define MAC_CTRL_SPEED_SHIFT 20
+#define MAC_CTRL_SPEED_MASK 0x300000
+#define MAC_CTRL_SPEED_1000 2
+#define MAC_CTRL_SPEED_10_100 1
+#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
+#define MAC_CTRL_TX_HUGE 0x800000
+#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
+#define MAC_CTRL_MC_ALL_EN 0x2000000
+#define MAC_CTRL_BC_EN 0x4000000
+#define MAC_CTRL_DBG 0x8000000
+
+/* MAC IPG/IFG Control Register */
+#define REG_MAC_IPG_IFG 0x1484
+#define MAC_IPG_IFG_IPGT_SHIFT 0
+#define MAC_IPG_IFG_IPGT_MASK 0x7f
+#define MAC_IPG_IFG_MIFG_SHIFT 8
+#define MAC_IPG_IFG_MIFG_MASK 0xff
+#define MAC_IPG_IFG_IPGR1_SHIFT 16
+#define MAC_IPG_IFG_IPGR1_MASK 0x7f
+#define MAC_IPG_IFG_IPGR2_SHIFT 24
+#define MAC_IPG_IFG_IPGR2_MASK 0x7f
+
+/* MAC STATION ADDRESS */
+#define REG_MAC_STA_ADDR 0x1488
+
+/* Hash table for multicast address */
+#define REG_RX_HASH_TABLE 0x1490
+
+/* MAC Half-Duplex Control Register */
+#define REG_MAC_HALF_DUPLX_CTRL 0x1498
+#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
+#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
+#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
+#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
+#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
+#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
+#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
+#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
+#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf
+
+/* Maximum Frame Length Control Register */
+#define REG_MTU 0x149c
+
+/* Wake-On-Lan control register */
+#define REG_WOL_CTRL 0x14a0
+#define WOL_PATTERN_EN 0x00000001
+#define WOL_PATTERN_PME_EN 0x00000002
+#define WOL_MAGIC_EN 0x00000004
+#define WOL_MAGIC_PME_EN 0x00000008
+#define WOL_LINK_CHG_EN 0x00000010
+#define WOL_LINK_CHG_PME_EN 0x00000020
+#define WOL_PATTERN_ST 0x00000100
+#define WOL_MAGIC_ST 0x00000200
+#define WOL_LINKCHG_ST 0x00000400
+#define WOL_CLK_SWITCH_EN 0x00008000
+#define WOL_PT0_EN 0x00010000
+#define WOL_PT1_EN 0x00020000
+#define WOL_PT2_EN 0x00040000
+#define WOL_PT3_EN 0x00080000
+#define WOL_PT4_EN 0x00100000
+#define WOL_PT5_EN 0x00200000
+#define WOL_PT6_EN 0x00400000
+
+/* WOL Length ( 2 DWORD ) */
+#define REG_WOL_PATTERN_LEN 0x14a4
+#define WOL_PT_LEN_MASK 0x7f
+#define WOL_PT0_LEN_SHIFT 0
+#define WOL_PT1_LEN_SHIFT 8
+#define WOL_PT2_LEN_SHIFT 16
+#define WOL_PT3_LEN_SHIFT 24
+#define WOL_PT4_LEN_SHIFT 0
+#define WOL_PT5_LEN_SHIFT 8
+#define WOL_PT6_LEN_SHIFT 16
+
+/* Internal SRAM Partition Register */
+#define REG_SRAM_RFD_ADDR 0x1500
+#define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4)
+#define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8)
+#define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12)
+#define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16)
+#define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20)
+#define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24)
+#define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28)
+#define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32)
+#define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36)
+#define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40)
+#define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44)
+#define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48)
+#define SRAM_TCPH_ADDR_MASK 0x0fff
+#define SRAM_TCPH_ADDR_SHIFT 0
+#define SRAM_PATH_ADDR_MASK 0x0fff
+#define SRAM_PATH_ADDR_SHIFT 16
+
+/* Load Ptr Register */
+#define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52)
+
+/* Descriptor Control register */
+#define REG_DESC_BASE_ADDR_HI 0x1540
+#define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4)
+#define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8)
+#define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12)
+#define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16)
+#define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20)
+#define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24)
+#define DESC_RFD_RING_SIZE_MASK 0x7ff
+#define DESC_RFD_RING_SIZE_SHIFT 0
+#define DESC_RRD_RING_SIZE_MASK 0x7ff
+#define DESC_RRD_RING_SIZE_SHIFT 16
+#define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28)
+#define DESC_TPD_RING_SIZE_MASK 0x3ff
+#define DESC_TPD_RING_SIZE_SHIFT 0
+
+/* TXQ Control Register */
+#define REG_TXQ_CTRL 0x1580
+#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
+#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f
+#define TXQ_CTRL_EN 0x20
+#define TXQ_CTRL_ENH_MODE 0x40
+#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
+#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f
+#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
+#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
+
+/* Jumbo packet Threshold for task offload */
+#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
+#define TX_JUMBO_TASK_TH_MASK 0x7ff
+#define TX_JUMBO_TASK_TH_SHIFT 0
+#define TX_TPD_MIN_IPG_MASK 0x1f
+#define TX_TPD_MIN_IPG_SHIFT 16
+
+/* RXQ Control Register */
+#define REG_RXQ_CTRL 0x15a0
+#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
+#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff
+#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
+#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff
+#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
+#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f
+#define RXQ_CTRL_CUT_THRU_EN 0x40000000
+#define RXQ_CTRL_EN 0x80000000
+
+/* Rx jumbo packet threshold and rrd retirement timer */
+#define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4)
+#define RXQ_JMBOSZ_TH_MASK 0x7ff
+#define RXQ_JMBOSZ_TH_SHIFT 0
+#define RXQ_JMBO_LKAH_MASK 0xf
+#define RXQ_JMBO_LKAH_SHIFT 11
+#define RXQ_RRD_TIMER_MASK 0xffff
+#define RXQ_RRD_TIMER_SHIFT 16
+
+/* RFD flow control register */
+#define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8)
+#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
+#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
+#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
+#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
+
+/* RRD flow control register */
+#define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12)
+#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
+#define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff
+#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
+#define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff
+
+/* DMA Engine Control Register */
+#define REG_DMA_CTRL 0x15c0
+#define DMA_CTRL_DMAR_IN_ORDER 0x1
+#define DMA_CTRL_DMAR_ENH_ORDER 0x2
+#define DMA_CTRL_DMAR_OUT_ORDER 0x4
+#define DMA_CTRL_RCB_VALUE 0x8
+#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
+#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
+#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
+#define DMA_CTRL_DMAR_EN 0x400
+#define DMA_CTRL_DMAW_EN 0x800
+
+/* CMB/SMB Control Register */
+#define REG_CSMB_CTRL 0x15d0
+#define CSMB_CTRL_CMB_NOW 1
+#define CSMB_CTRL_SMB_NOW 2
+#define CSMB_CTRL_CMB_EN 4
+#define CSMB_CTRL_SMB_EN 8
+
+/* CMB DMA Write Threshold Register */
+#define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4)
+#define CMB_RRD_TH_SHIFT 0
+#define CMB_RRD_TH_MASK 0x7ff
+#define CMB_TPD_TH_SHIFT 16
+#define CMB_TPD_TH_MASK 0x7ff
+
+/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
+#define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8)
+#define CMB_RX_TM_SHIFT 0
+#define CMB_RX_TM_MASK 0xffff
+#define CMB_TX_TM_SHIFT 16
+#define CMB_TX_TM_MASK 0xffff
+
+/* Number of packet received since last CMB write */
+#define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12)
+
+/* Number of packet transmitted since last CMB write */
+#define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16)
+
+/* SMB auto DMA timer register */
+#define REG_SMB_TIMER (REG_CSMB_CTRL+20)
+
+/* Mailbox Register */
+#define REG_MAILBOX 0x15f0
+#define MB_RFD_PROD_INDX_SHIFT 0
+#define MB_RFD_PROD_INDX_MASK 0x7ff
+#define MB_RRD_CONS_INDX_SHIFT 11
+#define MB_RRD_CONS_INDX_MASK 0x7ff
+#define MB_TPD_PROD_INDX_SHIFT 22
+#define MB_TPD_PROD_INDX_MASK 0x3ff
+
+/* Interrupt Status Register */
+#define REG_ISR 0x1600
+#define ISR_SMB 1
+#define ISR_TIMER 2
+#define ISR_MANUAL 4
+#define ISR_RXF_OV 8
+#define ISR_RFD_UNRUN 0x10
+#define ISR_RRD_OV 0x20
+#define ISR_TXF_UNRUN 0x40
+#define ISR_LINK 0x80
+#define ISR_HOST_RFD_UNRUN 0x100
+#define ISR_HOST_RRD_OV 0x200
+#define ISR_DMAR_TO_RST 0x400
+#define ISR_DMAW_TO_RST 0x800
+#define ISR_GPHY 0x1000
+#define ISR_RX_PKT 0x10000
+#define ISR_TX_PKT 0x20000
+#define ISR_TX_DMA 0x40000
+#define ISR_RX_DMA 0x80000
+#define ISR_CMB_RX 0x100000
+#define ISR_CMB_TX 0x200000
+#define ISR_MAC_RX 0x400000
+#define ISR_MAC_TX 0x800000
+#define ISR_UR_DETECTED 0x1000000
+#define ISR_FERR_DETECTED 0x2000000
+#define ISR_NFERR_DETECTED 0x4000000
+#define ISR_CERR_DETECTED 0x8000000
+#define ISR_PHY_LINKDOWN 0x10000000
+#define ISR_DIS_SMB 0x20000000
+#define ISR_DIS_DMA 0x40000000
+#define ISR_DIS_INT 0x80000000
+
+/* Interrupt Mask Register */
+#define REG_IMR 0x1604
+
+/* Normal Interrupt mask */
+#define IMR_NORMAL_MASK (\
+ ISR_SMB |\
+ ISR_GPHY |\
+ ISR_PHY_LINKDOWN|\
+ ISR_DMAR_TO_RST |\
+ ISR_DMAW_TO_RST |\
+ ISR_CMB_TX |\
+ ISR_CMB_RX )
+
+/* Debug Interrupt Mask (enable all interrupt) */
+#define IMR_DEBUG_MASK (\
+ ISR_SMB |\
+ ISR_TIMER |\
+ ISR_MANUAL |\
+ ISR_RXF_OV |\
+ ISR_RFD_UNRUN |\
+ ISR_RRD_OV |\
+ ISR_TXF_UNRUN |\
+ ISR_LINK |\
+ ISR_CMB_TX |\
+ ISR_CMB_RX |\
+ ISR_RX_PKT |\
+ ISR_TX_PKT |\
+ ISR_MAC_RX |\
+ ISR_MAC_TX )
+
+/* Interrupt Status Register */
+#define REG_RFD_RRD_IDX 0x1800
+#define REG_TPD_IDX 0x1804
+
+/* MII definition */
+/* PHY Common Register */
+#define MII_AT001_CR 0x09
+#define MII_AT001_SR 0x0A
+#define MII_AT001_ESR 0x0F
+#define MII_AT001_PSCR 0x10
+#define MII_AT001_PSSR 0x11
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
+#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
+#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_MASK 0x2040
+#define MII_CR_SPEED_1000 0x0040
+#define MII_CR_SPEED_100 0x2000
+#define MII_CR_SPEED_10 0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
+#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+
+/* Link partner ability register. */
+#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
+#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
+#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
+#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
+#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
+#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
+#define MII_LPA_PAUSE 0x0400 /* PAUSE */
+#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
+#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
+#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
+#define MII_LPA_NPAGE 0x8000 /* Next page bit */
+
+/* Autoneg Advertisement Register */
+#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
+#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
+#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
+#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
+#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
+#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
+#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
+#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
+#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
+#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
+#define MII_AR_SPEED_MASK 0x01E0
+#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
+
+/* 1000BASE-T Control Register */
+#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
+#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
+#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */
+#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */
+#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */
+#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
+#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
+#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
+#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
+#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
+#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
+
+/* 1000BASE-T Status Register */
+#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
+#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
+#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
+#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
+#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
+#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
+#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
+
+/* Extended Status Register */
+#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+/* AT001 PHY Specific Control Register */
+#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
+#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
+#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
+#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */
+#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */
+#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
+#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */
+#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */
+#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
+#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
+#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
+#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
+#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* AT001 PHY Specific Status Register */
+#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
+#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
+#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
+#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
+#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
+#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
+
+/* PCI Command Register Bit Definitions */
+#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
+#define CMD_IO_SPACE 0x0001
+#define CMD_MEMORY_SPACE 0x0002
+#define CMD_BUS_MASTER 0x0004
+
+/* Wake Up Filter Control */
+#define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
+#define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
+#define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
+#define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
+
+/* Error Codes */
+#define ATL1_SUCCESS 0
+#define ATL1_ERR_EEPROM 1
+#define ATL1_ERR_PHY 2
+#define ATL1_ERR_CONFIG 3
+#define ATL1_ERR_PARAM 4
+#define ATL1_ERR_MAC_TYPE 5
+#define ATL1_ERR_PHY_TYPE 6
+#define ATL1_ERR_PHY_SPEED 7
+#define ATL1_ERR_PHY_RES 8
+
+#define SPEED_0 0xffff
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define MEDIA_TYPE_AUTO_SENSOR 0
+#define MEDIA_TYPE_1000M_FULL 1
+#define MEDIA_TYPE_100M_FULL 2
+#define MEDIA_TYPE_100M_HALF 3
+#define MEDIA_TYPE_10M_FULL 4
+#define MEDIA_TYPE_10M_HALF 5
+
+#define ADVERTISE_10_HALF 0x0001
+#define ADVERTISE_10_FULL 0x0002
+#define ADVERTISE_100_HALF 0x0004
+#define ADVERTISE_100_FULL 0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
+#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
+
+/* The size (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE 14
+#define MAXIMUM_ETHERNET_FRAME_SIZE 1518 /* with FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* with FCS */
+#define ETHERNET_FCS_SIZE 4
+#define MAX_JUMBO_FRAME_SIZE 0x2800
+
+#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
+#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
+
+/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */
+#define EEPROM_SUM 0xBABA
+
+#define ATL1_EEDUMP_LEN 48
+
+/* Statistics counters collected by the MAC */
+struct stats_msg_block {
+ /* rx */
+ u32 rx_ok; /* The number of good packet received. */
+ u32 rx_bcast; /* The number of good broadcast packet received. */
+ u32 rx_mcast; /* The number of good multicast packet received. */
+ u32 rx_pause; /* The number of Pause packet received. */
+ u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */
+ u32 rx_fcs_err; /* The number of packets with bad FCS. */
+ u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */
+ u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
+ u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
+ u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
+ u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
+ u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
+ u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
+ u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
+ u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
+ u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
+ u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
+ u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size Å¡C truncated by Selene. */
+ u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
+ u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
+ u32 rx_align_err; /* Alignment Error */
+ u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
+ u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
+ u32 rx_err_addr; /* The number of packets dropped due to address filtering. */
+
+ /* tx */
+ u32 tx_ok; /* The number of good packet transmitted. */
+ u32 tx_bcast; /* The number of good broadcast packet transmitted. */
+ u32 tx_mcast; /* The number of good multicast packet transmitted. */
+ u32 tx_pause; /* The number of Pause packet transmitted. */
+ u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
+ u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
+ u32 tx_defer; /* The number of packets transmitted that is deferred. */
+ u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
+ u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
+ u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
+ u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
+ u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
+ u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
+ u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
+ u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
+ u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
+ u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
+ u32 tx_late_col; /* The number of packets transmitted with late collisions. */
+ u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
+ u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
+ u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
+ u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
+ u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
+ u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
+ u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
+ u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update.
+ * Software should clear this bit as soon as retrieving the statistics information. */
+};
+
+/* Coalescing Message Block */
+struct coals_msg_block {
+ u32 int_stats; /* interrupt status */
+ u16 rrd_prod_idx; /* TRD Producer Index. */
+ u16 rfd_cons_idx; /* RFD Consumer Index. */
+ u16 update; /* Selene sets this bit every time it DMA the CMB to host memory.
+ * Software supposes to clear this bit when CMB information is processed. */
+ u16 tpd_cons_idx; /* TPD Consumer Index. */
+};
+
+/* RRD descriptor */
+struct rx_return_desc {
+ u8 num_buf; /* Number of RFD buffers used by the received packet */
+ u8 resved;
+ u16 buf_indx; /* RFD Index of the first buffer */
+ union {
+ u32 valid;
+ struct {
+ u16 rx_chksum;
+ u16 pkt_size;
+ } xsum_sz;
+ } xsz;
+
+ u16 pkt_flg; /* Packet flags */
+ u16 err_flg; /* Error flags */
+ u16 resved2;
+ u16 vlan_tag; /* VLAN TAG */
+};
+
+#define PACKET_FLAG_ETH_TYPE 0x0080
+#define PACKET_FLAG_VLAN_INS 0x0100
+#define PACKET_FLAG_ERR 0x0200
+#define PACKET_FLAG_IPV4 0x0400
+#define PACKET_FLAG_UDP 0x0800
+#define PACKET_FLAG_TCP 0x1000
+#define PACKET_FLAG_BCAST 0x2000
+#define PACKET_FLAG_MCAST 0x4000
+#define PACKET_FLAG_PAUSE 0x8000
+
+#define ERR_FLAG_CRC 0x0001
+#define ERR_FLAG_CODE 0x0002
+#define ERR_FLAG_DRIBBLE 0x0004
+#define ERR_FLAG_RUNT 0x0008
+#define ERR_FLAG_OV 0x0010
+#define ERR_FLAG_TRUNC 0x0020
+#define ERR_FLAG_IP_CHKSUM 0x0040
+#define ERR_FLAG_L4_CHKSUM 0x0080
+#define ERR_FLAG_LEN 0x0100
+#define ERR_FLAG_DES_ADDR 0x0200
+
+/* RFD descriptor */
+struct rx_free_desc {
+ __le64 buffer_addr; /* Address of the descriptor's data buffer */
+ __le16 buf_len; /* Size of the receive buffer in host memory, in byte */
+ u16 coalese; /* Update consumer index to host after the reception of this frame */
+ /* __attribute__ ((packed)) is required */
+} __attribute__ ((packed));
+
+/* tsopu defines */
+#define TSO_PARAM_BUFLEN_MASK 0x3FFF
+#define TSO_PARAM_BUFLEN_SHIFT 0
+#define TSO_PARAM_DMAINT_MASK 0x0001
+#define TSO_PARAM_DMAINT_SHIFT 14
+#define TSO_PARAM_PKTNT_MASK 0x0001
+#define TSO_PARAM_PKTINT_SHIFT 15
+#define TSO_PARAM_VLANTAG_MASK 0xFFFF
+#define TSO_PARAM_VLAN_SHIFT 16
+
+/* tsopl defines */
+#define TSO_PARAM_EOP_MASK 0x0001
+#define TSO_PARAM_EOP_SHIFT 0
+#define TSO_PARAM_COALESCE_MASK 0x0001
+#define TSO_PARAM_COALESCE_SHIFT 1
+#define TSO_PARAM_INSVLAG_MASK 0x0001
+#define TSO_PARAM_INSVLAG_SHIFT 2
+#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001
+#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3
+#define TSO_PARAM_SEGMENT_MASK 0x0001
+#define TSO_PARAM_SEGMENT_SHIFT 4
+#define TSO_PARAM_IPCKSUM_MASK 0x0001
+#define TSO_PARAM_IPCKSUM_SHIFT 5
+#define TSO_PARAM_TCPCKSUM_MASK 0x0001
+#define TSO_PARAM_TCPCKSUM_SHIFT 6
+#define TSO_PARAM_UDPCKSUM_MASK 0x0001
+#define TSO_PARAM_UDPCKSUM_SHIFT 7
+#define TSO_PARAM_VLANTAGGED_MASK 0x0001
+#define TSO_PARAM_VLANTAGGED_SHIFT 8
+#define TSO_PARAM_ETHTYPE_MASK 0x0001
+#define TSO_PARAM_ETHTYPE_SHIFT 9
+#define TSO_PARAM_IPHL_MASK 0x000F
+#define TSO_PARAM_IPHL_SHIFT 10
+#define TSO_PARAM_TCPHDRLEN_MASK 0x000F
+#define TSO_PARAM_TCPHDRLEN_SHIFT 14
+#define TSO_PARAM_HDRFLAG_MASK 0x0001
+#define TSO_PARAM_HDRFLAG_SHIFT 18
+#define TSO_PARAM_MSS_MASK 0x1FFF
+#define TSO_PARAM_MSS_SHIFT 19
+
+/* csumpu defines */
+#define CSUM_PARAM_BUFLEN_MASK 0x3FFF
+#define CSUM_PARAM_BUFLEN_SHIFT 0
+#define CSUM_PARAM_DMAINT_MASK 0x0001
+#define CSUM_PARAM_DMAINT_SHIFT 14
+#define CSUM_PARAM_PKTINT_MASK 0x0001
+#define CSUM_PARAM_PKTINT_SHIFT 15
+#define CSUM_PARAM_VALANTAG_MASK 0xFFFF
+#define CSUM_PARAM_VALAN_SHIFT 16
+
+/* csumpl defines*/
+#define CSUM_PARAM_EOP_MASK 0x0001
+#define CSUM_PARAM_EOP_SHIFT 0
+#define CSUM_PARAM_COALESCE_MASK 0x0001
+#define CSUM_PARAM_COALESCE_SHIFT 1
+#define CSUM_PARAM_INSVLAG_MASK 0x0001
+#define CSUM_PARAM_INSVLAG_SHIFT 2
+#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001
+#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3
+#define CSUM_PARAM_SEGMENT_MASK 0x0001
+#define CSUM_PARAM_SEGMENT_SHIFT 4
+#define CSUM_PARAM_IPCKSUM_MASK 0x0001
+#define CSUM_PARAM_IPCKSUM_SHIFT 5
+#define CSUM_PARAM_TCPCKSUM_MASK 0x0001
+#define CSUM_PARAM_TCPCKSUM_SHIFT 6
+#define CSUM_PARAM_UDPCKSUM_MASK 0x0001
+#define CSUM_PARAM_UDPCKSUM_SHIFT 7
+#define CSUM_PARAM_VLANTAGGED_MASK 0x0001
+#define CSUM_PARAM_VLANTAGGED_SHIFT 8
+#define CSUM_PARAM_ETHTYPE_MASK 0x0001
+#define CSUM_PARAM_ETHTYPE_SHIFT 9
+#define CSUM_PARAM_IPHL_MASK 0x000F
+#define CSUM_PARAM_IPHL_SHIFT 10
+#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF
+#define CSUM_PARAM_PLOADOFFSET_SHIFT 16
+#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF
+#define CSUM_PARAM_XSUMOFFSET_SHIFT 24
+
+/* TPD descriptor */
+struct tso_param {
+ /* The order of these declarations is important -- don't change it */
+ u32 tsopu; /* tso_param upper word */
+ u32 tsopl; /* tso_param lower word */
+};
+
+struct csum_param {
+ /* The order of these declarations is important -- don't change it */
+ u32 csumpu; /* csum_param upper word */
+ u32 csumpl; /* csum_param lower word */
+};
+
+union tpd_descr {
+ u64 data;
+ struct csum_param csum;
+ struct tso_param tso;
+};
+
+struct tx_packet_desc {
+ __le64 buffer_addr;
+ union tpd_descr desc;
+};
+
+/* DMA Order Settings */
+enum atl1_dma_order {
+ atl1_dma_ord_in = 1,
+ atl1_dma_ord_enh = 2,
+ atl1_dma_ord_out = 4
+};
+
+enum atl1_dma_rcb {
+ atl1_rcb_64 = 0,
+ atl1_rcb_128 = 1
+};
+
+enum atl1_dma_req_block {
+ atl1_dma_req_128 = 0,
+ atl1_dma_req_256 = 1,
+ atl1_dma_req_512 = 2,
+ atl1_dam_req_1024 = 3,
+ atl1_dam_req_2048 = 4,
+ atl1_dma_req_4096 = 5
+};
+
+struct atl1_spi_flash_dev {
+ const char *manu_name; /* manufacturer id */
+ /* op-code */
+ u8 cmd_wrsr;
+ u8 cmd_read;
+ u8 cmd_program;
+ u8 cmd_wren;
+ u8 cmd_wrdi;
+ u8 cmd_rdsr;
+ u8 cmd_rdid;
+ u8 cmd_sector_erase;
+ u8 cmd_chip_erase;
+};
+
+#endif /* _ATL1_HW_H_ */
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atl1/atl1_main.c
new file mode 100644
index 00000000000..6655640eb4c
--- /dev/null
+++ b/drivers/net/atl1/atl1_main.c
@@ -0,0 +1,2468 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ *
+ * Contact Information:
+ * Xiong Huang <xiong_huang@attansic.com>
+ * Attansic Technology Corp. 3F 147, Xianzheng 9th Road, Zhubei,
+ * Xinzhu 302, TAIWAN, REPUBLIC OF CHINA
+ *
+ * Chris Snook <csnook@redhat.com>
+ * Jay Cliburn <jcliburn@gmail.com>
+ *
+ * This version is adapted from the Attansic reference driver for
+ * inclusion in the Linux kernel. It is currently under heavy development.
+ * A very incomplete list of things that need to be dealt with:
+ *
+ * TODO:
+ * Fix TSO; tx performance is horrible with TSO enabled.
+ * Wake on LAN.
+ * Add more ethtool functions, including set ring parameters.
+ * Fix abstruse irq enable/disable condition described here:
+ * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2
+ *
+ * NEEDS TESTING:
+ * VLAN
+ * multicast
+ * promiscuous mode
+ * interrupt coalescing
+ * SMP torture testing
+ */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/irqreturn.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
+#include <linux/irqflags.h>
+#include <linux/dma-mapping.h>
+#include <linux/net.h>
+#include <linux/pm.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <net/checksum.h>
+
+#include <asm/atomic.h>
+#include <asm/byteorder.h>
+
+#include "atl1.h"
+
+#define RUN_REALTIME 0
+#define DRIVER_VERSION "2.0.6"
+
+char atl1_driver_name[] = "atl1";
+static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver";
+static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation.";
+char atl1_driver_version[] = DRIVER_VERSION;
+
+MODULE_AUTHOR
+ ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
+
+/*
+ * atl1_pci_tbl - PCI Device ID Table
+ */
+static const struct pci_device_id atl1_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1048)},
+ /* required last entry */
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
+
+/*
+ * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * atl1_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ */
+static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
+{
+ struct atl1_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+ hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ adapter->wol = 0;
+ adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
+ adapter->ict = 50000; /* 100ms */
+ adapter->link_speed = SPEED_0; /* hardware init */
+ adapter->link_duplex = FULL_DUPLEX;
+
+ hw->phy_configured = false;
+ hw->preamble_len = 7;
+ hw->ipgt = 0x60;
+ hw->min_ifg = 0x50;
+ hw->ipgr1 = 0x40;
+ hw->ipgr2 = 0x60;
+ hw->max_retry = 0xf;
+ hw->lcol = 0x37;
+ hw->jam_ipg = 7;
+ hw->rfd_burst = 8;
+ hw->rrd_burst = 8;
+ hw->rfd_fetch_gap = 1;
+ hw->rx_jumbo_th = adapter->rx_buffer_len / 8;
+ hw->rx_jumbo_lkah = 1;
+ hw->rrd_ret_timer = 16;
+ hw->tpd_burst = 4;
+ hw->tpd_fetch_th = 16;
+ hw->txf_burst = 0x100;
+ hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3;
+ hw->tpd_fetch_gap = 1;
+ hw->rcb_value = atl1_rcb_64;
+ hw->dma_ord = atl1_dma_ord_enh;
+ hw->dmar_block = atl1_dma_req_256;
+ hw->dmaw_block = atl1_dma_req_256;
+ hw->cmb_rrd = 4;
+ hw->cmb_tpd = 4;
+ hw->cmb_rx_timer = 1; /* about 2us */
+ hw->cmb_tx_timer = 1; /* about 2us */
+ hw->smb_timer = 100000; /* about 200ms */
+
+ atomic_set(&adapter->irq_sem, 0);
+ spin_lock_init(&adapter->lock);
+ spin_lock_init(&adapter->mb_lock);
+
+ return 0;
+}
+
+/*
+ * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ */
+s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_ring_header *ring_header = &adapter->ring_header;
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+ u8 offset = 0;
+
+ size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
+ tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
+ if (unlikely(!tpd_ring->buffer_info)) {
+ printk(KERN_WARNING "%s: kzalloc failed , size = D%d\n",
+ atl1_driver_name, size);
+ goto err_nomem;
+ }
+ rfd_ring->buffer_info =
+ (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
+
+ /* real ring DMA buffer */
+ ring_header->size = size = sizeof(struct tx_packet_desc) *
+ tpd_ring->count
+ + sizeof(struct rx_free_desc) * rfd_ring->count
+ + sizeof(struct rx_return_desc) * rrd_ring->count
+ + sizeof(struct coals_msg_block)
+ + sizeof(struct stats_msg_block)
+ + 40; /* "40: for 8 bytes align" huh? -- CHS */
+
+ ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
+ &ring_header->dma);
+ if (unlikely(!ring_header->desc)) {
+ printk(KERN_WARNING
+ "%s: pci_alloc_consistent failed, size = D%d\n",
+ atl1_driver_name, size);
+ goto err_nomem;
+ }
+
+ memset(ring_header->desc, 0, ring_header->size);
+
+ /* init TPD ring */
+ tpd_ring->dma = ring_header->dma;
+ offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0;
+ tpd_ring->dma += offset;
+ tpd_ring->desc = (u8 *) ring_header->desc + offset;
+ tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count;
+ atomic_set(&tpd_ring->next_to_use, 0);
+ atomic_set(&tpd_ring->next_to_clean, 0);
+
+ /* init RFD ring */
+ rfd_ring->dma = tpd_ring->dma + tpd_ring->size;
+ offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0;
+ rfd_ring->dma += offset;
+ rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset);
+ rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count;
+ rfd_ring->next_to_clean = 0;
+ /* rfd_ring->next_to_use = rfd_ring->count - 1; */
+ atomic_set(&rfd_ring->next_to_use, 0);
+
+ /* init RRD ring */
+ rrd_ring->dma = rfd_ring->dma + rfd_ring->size;
+ offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0;
+ rrd_ring->dma += offset;
+ rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset);
+ rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count;
+ rrd_ring->next_to_use = 0;
+ atomic_set(&rrd_ring->next_to_clean, 0);
+
+ /* init CMB */
+ adapter->cmb.dma = rrd_ring->dma + rrd_ring->size;
+ offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0;
+ adapter->cmb.dma += offset;
+ adapter->cmb.cmb =
+ (struct coals_msg_block *) ((u8 *) rrd_ring->desc +
+ (rrd_ring->size + offset));
+
+ /* init SMB */
+ adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block);
+ offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0;
+ adapter->smb.dma += offset;
+ adapter->smb.smb = (struct stats_msg_block *)
+ ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset));
+
+ return ATL1_SUCCESS;
+
+err_nomem:
+ kfree(tpd_ring->buffer_info);
+ return -ENOMEM;
+}
+
+/*
+ * atl1_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ */
+static void atl1_irq_enable(struct atl1_adapter *adapter)
+{
+ if (likely(!atomic_dec_and_test(&adapter->irq_sem)))
+ iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
+}
+
+static void atl1_clear_phy_int(struct atl1_adapter *adapter)
+{
+ u16 phy_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+static void atl1_inc_smb(struct atl1_adapter *adapter)
+{
+ struct stats_msg_block *smb = adapter->smb.smb;
+
+ /* Fill out the OS statistics structure */
+ adapter->soft_stats.rx_packets += smb->rx_ok;
+ adapter->soft_stats.tx_packets += smb->tx_ok;
+ adapter->soft_stats.rx_bytes += smb->rx_byte_cnt;
+ adapter->soft_stats.tx_bytes += smb->tx_byte_cnt;
+ adapter->soft_stats.multicast += smb->rx_mcast;
+ adapter->soft_stats.collisions += (smb->tx_1_col +
+ smb->tx_2_col * 2 +
+ smb->tx_late_col +
+ smb->tx_abort_col *
+ adapter->hw.max_retry);
+
+ /* Rx Errors */
+ adapter->soft_stats.rx_errors += (smb->rx_frag +
+ smb->rx_fcs_err +
+ smb->rx_len_err +
+ smb->rx_sz_ov +
+ smb->rx_rxf_ov +
+ smb->rx_rrd_ov + smb->rx_align_err);
+ adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov;
+ adapter->soft_stats.rx_length_errors += smb->rx_len_err;
+ adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err;
+ adapter->soft_stats.rx_frame_errors += smb->rx_align_err;
+ adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov +
+ smb->rx_rxf_ov);
+
+ adapter->soft_stats.rx_pause += smb->rx_pause;
+ adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov;
+ adapter->soft_stats.rx_trunc += smb->rx_sz_ov;
+
+ /* Tx Errors */
+ adapter->soft_stats.tx_errors += (smb->tx_late_col +
+ smb->tx_abort_col +
+ smb->tx_underrun + smb->tx_trunc);
+ adapter->soft_stats.tx_fifo_errors += smb->tx_underrun;
+ adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col;
+ adapter->soft_stats.tx_window_errors += smb->tx_late_col;
+
+ adapter->soft_stats.excecol += smb->tx_abort_col;
+ adapter->soft_stats.deffer += smb->tx_defer;
+ adapter->soft_stats.scc += smb->tx_1_col;
+ adapter->soft_stats.mcc += smb->tx_2_col;
+ adapter->soft_stats.latecol += smb->tx_late_col;
+ adapter->soft_stats.tx_underun += smb->tx_underrun;
+ adapter->soft_stats.tx_trunc += smb->tx_trunc;
+ adapter->soft_stats.tx_pause += smb->tx_pause;
+
+ adapter->net_stats.rx_packets = adapter->soft_stats.rx_packets;
+ adapter->net_stats.tx_packets = adapter->soft_stats.tx_packets;
+ adapter->net_stats.rx_bytes = adapter->soft_stats.rx_bytes;
+ adapter->net_stats.tx_bytes = adapter->soft_stats.tx_bytes;
+ adapter->net_stats.multicast = adapter->soft_stats.multicast;
+ adapter->net_stats.collisions = adapter->soft_stats.collisions;
+ adapter->net_stats.rx_errors = adapter->soft_stats.rx_errors;
+ adapter->net_stats.rx_over_errors =
+ adapter->soft_stats.rx_missed_errors;
+ adapter->net_stats.rx_length_errors =
+ adapter->soft_stats.rx_length_errors;
+ adapter->net_stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors;
+ adapter->net_stats.rx_frame_errors =
+ adapter->soft_stats.rx_frame_errors;
+ adapter->net_stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors;
+ adapter->net_stats.rx_missed_errors =
+ adapter->soft_stats.rx_missed_errors;
+ adapter->net_stats.tx_errors = adapter->soft_stats.tx_errors;
+ adapter->net_stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors;
+ adapter->net_stats.tx_aborted_errors =
+ adapter->soft_stats.tx_aborted_errors;
+ adapter->net_stats.tx_window_errors =
+ adapter->soft_stats.tx_window_errors;
+ adapter->net_stats.tx_carrier_errors =
+ adapter->soft_stats.tx_carrier_errors;
+}
+
+static void atl1_rx_checksum(struct atl1_adapter *adapter,
+ struct rx_return_desc *rrd,
+ struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
+ if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
+ ERR_FLAG_CODE | ERR_FLAG_OV)) {
+ adapter->hw_csum_err++;
+ printk(KERN_DEBUG "%s: rx checksum error\n",
+ atl1_driver_name);
+ return;
+ }
+ }
+
+ /* not IPv4 */
+ if (!(rrd->pkt_flg & PACKET_FLAG_IPV4))
+ /* checksum is invalid, but it's not an IPv4 pkt, so ok */
+ return;
+
+ /* IPv4 packet */
+ if (likely(!(rrd->err_flg &
+ (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_good++;
+ return;
+ }
+
+ /* IPv4, but hardware thinks its checksum is wrong */
+ printk(KERN_DEBUG "%s: hw csum wrong pkt_flag:%x, err_flag:%x\n",
+ atl1_driver_name, rrd->pkt_flg, rrd->err_flg);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
+ adapter->hw_csum_err++;
+ return;
+}
+
+/*
+ * atl1_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ */
+static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ struct page *page;
+ unsigned long offset;
+ struct atl1_buffer *buffer_info, *next_info;
+ struct sk_buff *skb;
+ u16 num_alloc = 0;
+ u16 rfd_next_to_use, next_next;
+ struct rx_free_desc *rfd_desc;
+
+ next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use);
+ if (++next_next == rfd_ring->count)
+ next_next = 0;
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+
+ while (!buffer_info->alloced && !next_info->alloced) {
+ if (buffer_info->skb) {
+ buffer_info->alloced = 1;
+ goto next;
+ }
+
+ rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
+
+ skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
+ if (unlikely(!skb)) { /* Better luck next round */
+ adapter->net_stats.rx_dropped++;
+ break;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = netdev;
+
+ buffer_info->alloced = 1;
+ buffer_info->skb = skb;
+ buffer_info->length = (u16) adapter->rx_buffer_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(pdev, page, offset,
+ adapter->rx_buffer_len,
+ PCI_DMA_FROMDEVICE);
+ rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+ rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
+ rfd_desc->coalese = 0;
+
+next:
+ rfd_next_to_use = next_next;
+ if (unlikely(++next_next == rfd_ring->count))
+ next_next = 0;
+
+ buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
+ next_info = &rfd_ring->buffer_info[next_next];
+ num_alloc++;
+ }
+
+ if (num_alloc) {
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use);
+ }
+ return num_alloc;
+}
+
+static void atl1_intr_rx(struct atl1_adapter *adapter)
+{
+ int i, count;
+ u16 length;
+ u16 rrd_next_to_clean;
+ u32 value;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_buffer *buffer_info;
+ struct rx_return_desc *rrd;
+ struct sk_buff *skb;
+
+ count = 0;
+
+ rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
+
+ while (1) {
+ rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
+ i = 1;
+ if (likely(rrd->xsz.valid)) { /* packet valid */
+chk_rrd:
+ /* check rrd status */
+ if (likely(rrd->num_buf == 1))
+ goto rrd_ok;
+
+ /* rrd seems to be bad */
+ if (unlikely(i-- > 0)) {
+ /* rrd may not be DMAed completely */
+ printk(KERN_DEBUG
+ "%s: RRD may not be DMAed completely\n",
+ atl1_driver_name);
+ udelay(1);
+ goto chk_rrd;
+ }
+ /* bad rrd */
+ printk(KERN_DEBUG "%s: bad RRD\n", atl1_driver_name);
+ /* see if update RFD index */
+ if (rrd->num_buf > 1) {
+ u16 num_buf;
+ num_buf =
+ (rrd->xsz.xsum_sz.pkt_size +
+ adapter->rx_buffer_len -
+ 1) / adapter->rx_buffer_len;
+ if (rrd->num_buf == num_buf) {
+ /* clean alloc flag for bad rrd */
+ while (rfd_ring->next_to_clean !=
+ (rrd->buf_indx + num_buf)) {
+ rfd_ring->buffer_info[rfd_ring->
+ next_to_clean].alloced = 0;
+ if (++rfd_ring->next_to_clean ==
+ rfd_ring->count) {
+ rfd_ring->
+ next_to_clean = 0;
+ }
+ }
+ }
+ }
+
+ /* update rrd */
+ rrd->xsz.valid = 0;
+ if (++rrd_next_to_clean == rrd_ring->count)
+ rrd_next_to_clean = 0;
+ count++;
+ continue;
+ } else { /* current rrd still not be updated */
+
+ break;
+ }
+rrd_ok:
+ /* clean alloc flag for bad rrd */
+ while (rfd_ring->next_to_clean != rrd->buf_indx) {
+ rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced =
+ 0;
+ if (++rfd_ring->next_to_clean == rfd_ring->count)
+ rfd_ring->next_to_clean = 0;
+ }
+
+ buffer_info = &rfd_ring->buffer_info[rrd->buf_indx];
+ if (++rfd_ring->next_to_clean == rfd_ring->count)
+ rfd_ring->next_to_clean = 0;
+
+ /* update rrd next to clean */
+ if (++rrd_next_to_clean == rrd_ring->count)
+ rrd_next_to_clean = 0;
+ count++;
+
+ if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) {
+ if (!(rrd->err_flg &
+ (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM
+ | ERR_FLAG_LEN))) {
+ /* packet error, don't need upstream */
+ buffer_info->alloced = 0;
+ rrd->xsz.valid = 0;
+ continue;
+ }
+ }
+
+ /* Good Receive */
+ pci_unmap_page(adapter->pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_FROMDEVICE);
+ skb = buffer_info->skb;
+ length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
+
+ skb_put(skb, length - ETHERNET_FCS_SIZE);
+
+ /* Receive Checksum Offload */
+ atl1_rx_checksum(adapter, rrd, skb);
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) {
+ u16 vlan_tag = (rrd->vlan_tag >> 4) |
+ ((rrd->vlan_tag & 7) << 13) |
+ ((rrd->vlan_tag & 8) << 9);
+ vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag);
+ } else
+ netif_rx(skb);
+
+ /* let protocol layer free skb */
+ buffer_info->skb = NULL;
+ buffer_info->alloced = 0;
+ rrd->xsz.valid = 0;
+
+ adapter->netdev->last_rx = jiffies;
+ }
+
+ atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean);
+
+ atl1_alloc_rx_buffers(adapter);
+
+ /* update mailbox ? */
+ if (count) {
+ u32 tpd_next_to_use;
+ u32 rfd_next_to_use;
+ u32 rrd_next_to_clean;
+
+ spin_lock(&adapter->mb_lock);
+
+ tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+ rfd_next_to_use =
+ atomic_read(&adapter->rfd_ring.next_to_use);
+ rrd_next_to_clean =
+ atomic_read(&adapter->rrd_ring.next_to_clean);
+ value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+ MB_RFD_PROD_INDX_SHIFT) |
+ ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+ MB_RRD_CONS_INDX_SHIFT) |
+ ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+ MB_TPD_PROD_INDX_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+ spin_unlock(&adapter->mb_lock);
+ }
+}
+
+static void atl1_intr_tx(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ u16 sw_tpd_next_to_clean;
+ u16 cmb_tpd_next_to_clean;
+ u8 update = 0;
+
+ sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
+
+ while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
+ struct tx_packet_desc *tpd;
+ update = 1;
+ tpd = ATL1_TPD_DESC(tpd_ring, sw_tpd_next_to_clean);
+ buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
+ if (buffer_info->dma) {
+ pci_unmap_page(adapter->pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+
+ if (buffer_info->skb) {
+ dev_kfree_skb_irq(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ tpd->buffer_addr = 0;
+ tpd->desc.data = 0;
+
+ if (++sw_tpd_next_to_clean == tpd_ring->count)
+ sw_tpd_next_to_clean = 0;
+ }
+ atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
+
+ if (netif_queue_stopped(adapter->netdev)
+ && netif_carrier_ok(adapter->netdev))
+ netif_wake_queue(adapter->netdev);
+}
+
+static void atl1_check_for_link(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u16 phy_data = 0;
+
+ spin_lock(&adapter->lock);
+ adapter->phy_timer_pending = false;
+ atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
+ spin_unlock(&adapter->lock);
+
+ /* notify upper layer link down ASAP */
+ if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ printk(KERN_INFO "%s: %s link is down\n",
+ atl1_driver_name, netdev->name);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ }
+ schedule_work(&adapter->link_chg_task);
+}
+
+/*
+ * atl1_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ */
+static irqreturn_t atl1_intr(int irq, void *data)
+{
+ /*struct atl1_adapter *adapter = ((struct net_device *)data)->priv;*/
+ struct atl1_adapter *adapter = netdev_priv(data);
+ u32 status;
+ u8 update_rx;
+ int max_ints = 10;
+
+ status = adapter->cmb.cmb->int_stats;
+ if (!status)
+ return IRQ_NONE;
+
+ update_rx = 0;
+
+ do {
+ /* clear CMB interrupt status at once */
+ adapter->cmb.cmb->int_stats = 0;
+
+ if (status & ISR_GPHY) /* clear phy status */
+ atl1_clear_phy_int(adapter);
+
+ /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
+ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
+
+ /* check if SMB intr */
+ if (status & ISR_SMB)
+ atl1_inc_smb(adapter);
+
+ /* check if PCIE PHY Link down */
+ if (status & ISR_PHY_LINKDOWN) {
+ printk(KERN_DEBUG "%s: pcie phy link down %x\n",
+ atl1_driver_name, status);
+ if (netif_running(adapter->netdev)) { /* reset MAC */
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ schedule_work(&adapter->pcie_dma_to_rst_task);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* check if DMA read/write error ? */
+ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
+ printk(KERN_DEBUG
+ "%s: pcie DMA r/w error (status = 0x%x)\n",
+ atl1_driver_name, status);
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ schedule_work(&adapter->pcie_dma_to_rst_task);
+ return IRQ_HANDLED;
+ }
+
+ /* link event */
+ if (status & ISR_GPHY) {
+ adapter->soft_stats.tx_carrier_errors++;
+ atl1_check_for_link(adapter);
+ }
+
+ /* transmit event */
+ if (status & ISR_CMB_TX)
+ atl1_intr_tx(adapter);
+
+ /* rx exception */
+ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
+ ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
+ ISR_HOST_RRD_OV | ISR_CMB_RX))) {
+ if (status &
+ (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV |
+ ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV))
+ printk(KERN_INFO
+ "%s: rx exception: status = 0x%x\n",
+ atl1_driver_name, status);
+ atl1_intr_rx(adapter);
+ }
+
+ if (--max_ints < 0)
+ break;
+
+ } while ((status = adapter->cmb.cmb->int_stats));
+
+ /* re-enable Interrupt */
+ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
+ return IRQ_HANDLED;
+}
+
+/*
+ * atl1_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ */
+static void atl1_set_multi(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ struct dev_mc_list *mc_ptr;
+ u32 rctl;
+ u32 hash_value;
+
+ /* Check for Promiscuous and All Multicast modes */
+ rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+ if (netdev->flags & IFF_PROMISC)
+ rctl |= MAC_CTRL_PROMIS_EN;
+ else if (netdev->flags & IFF_ALLMULTI) {
+ rctl |= MAC_CTRL_MC_ALL_EN;
+ rctl &= ~MAC_CTRL_PROMIS_EN;
+ } else
+ rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
+
+ iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
+
+ /* clear the old settings from the multicast hash table */
+ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
+ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
+
+ /* compute mc addresses' hash value ,and put it into hash table */
+ for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
+ atl1_hash_set(hw, hash_value);
+ }
+}
+
+static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
+{
+ u32 value;
+ struct atl1_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ /* Config MAC CTRL Register */
+ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
+ /* duplex */
+ if (FULL_DUPLEX == adapter->link_duplex)
+ value |= MAC_CTRL_DUPLX;
+ /* speed */
+ value |= ((u32) ((SPEED_1000 == adapter->link_speed) ?
+ MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) <<
+ MAC_CTRL_SPEED_SHIFT);
+ /* flow control */
+ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
+ /* PAD & CRC */
+ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
+ /* preamble length */
+ value |= (((u32) adapter->hw.preamble_len
+ & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT);
+ /* vlan */
+ if (adapter->vlgrp)
+ value |= MAC_CTRL_RMV_VLAN;
+ /* rx checksum
+ if (adapter->rx_csum)
+ value |= MAC_CTRL_RX_CHKSUM_EN;
+ */
+ /* filter mode */
+ value |= MAC_CTRL_BC_EN;
+ if (netdev->flags & IFF_PROMISC)
+ value |= MAC_CTRL_PROMIS_EN;
+ else if (netdev->flags & IFF_ALLMULTI)
+ value |= MAC_CTRL_MC_ALL_EN;
+ /* value |= MAC_CTRL_LOOPBACK; */
+ iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
+}
+
+static u32 atl1_check_link(struct atl1_adapter *adapter)
+{
+ struct atl1_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ u32 ret_val;
+ u16 speed, duplex, phy_data;
+ int reconfig = 0;
+
+ /* MII_BMSR must read twice */
+ atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
+ atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
+ if (!(phy_data & BMSR_LSTATUS)) { /* link down */
+ if (netif_carrier_ok(netdev)) { /* old link state: Up */
+ printk(KERN_INFO "%s: link is down\n",
+ atl1_driver_name);
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ return ATL1_SUCCESS;
+ }
+
+ /* Link Up */
+ ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex);
+ if (ret_val)
+ return ret_val;
+
+ switch (hw->media_type) {
+ case MEDIA_TYPE_1000M_FULL:
+ if (speed != SPEED_1000 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_100M_FULL:
+ if (speed != SPEED_100 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ if (speed != SPEED_100 || duplex != HALF_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ if (speed != SPEED_10 || duplex != FULL_DUPLEX)
+ reconfig = 1;
+ break;
+ case MEDIA_TYPE_10M_HALF:
+ if (speed != SPEED_10 || duplex != HALF_DUPLEX)
+ reconfig = 1;
+ break;
+ }
+
+ /* link result is our setting */
+ if (!reconfig) {
+ if (adapter->link_speed != speed
+ || adapter->link_duplex != duplex) {
+ adapter->link_speed = speed;
+ adapter->link_duplex = duplex;
+ atl1_setup_mac_ctrl(adapter);
+ printk(KERN_INFO "%s: %s link is up %d Mbps %s\n",
+ atl1_driver_name, netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex ==
+ FULL_DUPLEX ? "full duplex" : "half duplex");
+ }
+ if (!netif_carrier_ok(netdev)) { /* Link down -> Up */
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ return ATL1_SUCCESS;
+ }
+
+ /* change orignal link status */
+ if (netif_carrier_ok(netdev)) {
+ adapter->link_speed = SPEED_0;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+
+ if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR &&
+ hw->media_type != MEDIA_TYPE_1000M_FULL) {
+ switch (hw->media_type) {
+ case MEDIA_TYPE_100M_FULL:
+ phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
+ MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_100M_HALF:
+ phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
+ break;
+ case MEDIA_TYPE_10M_FULL:
+ phy_data =
+ MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ default: /* MEDIA_TYPE_10M_HALF: */
+ phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
+ break;
+ }
+ atl1_write_phy_reg(hw, MII_BMCR, phy_data);
+ return ATL1_SUCCESS;
+ }
+
+ /* auto-neg, insert timer to re-config phy */
+ if (!adapter->phy_timer_pending) {
+ adapter->phy_timer_pending = true;
+ mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
+ }
+
+ return ATL1_SUCCESS;
+}
+
+static void set_flow_ctrl_old(struct atl1_adapter *adapter)
+{
+ u32 hi, lo, value;
+
+ /* RFD Flow Control */
+ value = adapter->rfd_ring.count;
+ hi = value / 16;
+ if (hi < 2)
+ hi = 2;
+ lo = value * 7 / 8;
+
+ value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
+
+ /* RRD Flow Control */
+ value = adapter->rrd_ring.count;
+ lo = value / 16;
+ hi = value * 7 / 8;
+ if (lo < 2)
+ lo = 2;
+ value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
+}
+
+static void set_flow_ctrl_new(struct atl1_hw *hw)
+{
+ u32 hi, lo, value;
+
+ /* RXF Flow Control */
+ value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN);
+ lo = value / 16;
+ if (lo < 192)
+ lo = 192;
+ hi = value * 7 / 8;
+ if (hi < lo)
+ hi = lo + 16;
+ value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH);
+
+ /* RRD Flow Control */
+ value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN);
+ lo = value / 8;
+ hi = value * 7 / 8;
+ if (lo < 2)
+ lo = 2;
+ if (hi < lo)
+ hi = lo + 3;
+ value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) |
+ ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH);
+}
+
+/*
+ * atl1_configure - Configure Transmit&Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx /Rx unit of the MAC after a reset.
+ */
+static u32 atl1_configure(struct atl1_adapter *adapter)
+{
+ struct atl1_hw *hw = &adapter->hw;
+ u32 value;
+
+ /* clear interrupt status */
+ iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR);
+
+ /* set MAC Address */
+ value = (((u32) hw->mac_addr[2]) << 24) |
+ (((u32) hw->mac_addr[3]) << 16) |
+ (((u32) hw->mac_addr[4]) << 8) |
+ (((u32) hw->mac_addr[5]));
+ iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
+ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
+ iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4));
+
+ /* tx / rx ring */
+
+ /* HI base address */
+ iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32),
+ hw->hw_addr + REG_DESC_BASE_ADDR_HI);
+ /* LO base address */
+ iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_RFD_ADDR_LO);
+ iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_RRD_ADDR_LO);
+ iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_TPD_ADDR_LO);
+ iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_CMB_ADDR_LO);
+ iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL),
+ hw->hw_addr + REG_DESC_SMB_ADDR_LO);
+
+ /* element count */
+ value = adapter->rrd_ring.count;
+ value <<= 16;
+ value += adapter->rfd_ring.count;
+ iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE);
+ iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE);
+
+ /* Load Ptr */
+ iowrite32(1, hw->hw_addr + REG_LOAD_PTR);
+
+ /* config Mailbox */
+ value = ((atomic_read(&adapter->tpd_ring.next_to_use)
+ & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) |
+ ((atomic_read(&adapter->rrd_ring.next_to_clean)
+ & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) |
+ ((atomic_read(&adapter->rfd_ring.next_to_use)
+ & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_MAILBOX);
+
+ /* config IPG/IFG */
+ value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK)
+ << MAC_IPG_IFG_IPGT_SHIFT) |
+ (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK)
+ << MAC_IPG_IFG_MIFG_SHIFT) |
+ (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK)
+ << MAC_IPG_IFG_IPGR1_SHIFT) |
+ (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK)
+ << MAC_IPG_IFG_IPGR2_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG);
+
+ /* config Half-Duplex Control */
+ value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) |
+ (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK)
+ << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) |
+ MAC_HALF_DUPLX_CTRL_EXC_DEF_EN |
+ (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) |
+ (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK)
+ << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL);
+
+ /* set Interrupt Moderator Timer */
+ iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT);
+ iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL);
+
+ /* set Interrupt Clear Timer */
+ iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER);
+
+ /* set MTU, 4 : VLAN */
+ iowrite32(hw->max_frame_size + 4, hw->hw_addr + REG_MTU);
+
+ /* jumbo size & rrd retirement timer */
+ value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK)
+ << RXQ_JMBOSZ_TH_SHIFT) |
+ (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK)
+ << RXQ_JMBO_LKAH_SHIFT) |
+ (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK)
+ << RXQ_RRD_TIMER_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM);
+
+ /* Flow Control */
+ switch (hw->dev_rev) {
+ case 0x8001:
+ case 0x9001:
+ case 0x9002:
+ case 0x9003:
+ set_flow_ctrl_old(adapter);
+ break;
+ default:
+ set_flow_ctrl_new(hw);
+ break;
+ }
+
+ /* config TXQ */
+ value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK)
+ << TXQ_CTRL_TPD_BURST_NUM_SHIFT) |
+ (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK)
+ << TXQ_CTRL_TXF_BURST_NUM_SHIFT) |
+ (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK)
+ << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN;
+ iowrite32(value, hw->hw_addr + REG_TXQ_CTRL);
+
+ /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */
+ value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK)
+ << TX_JUMBO_TASK_TH_SHIFT) |
+ (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK)
+ << TX_TPD_MIN_IPG_SHIFT);
+ iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG);
+
+ /* config RXQ */
+ value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK)
+ << RXQ_CTRL_RFD_BURST_NUM_SHIFT) |
+ (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK)
+ << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) |
+ (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK)
+ << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) |
+ RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
+ iowrite32(value, hw->hw_addr + REG_RXQ_CTRL);
+
+ /* config DMA Engine */
+ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
+ ((((u32) hw->dmaw_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
+ << DMA_CTRL_DMAR_BURST_LEN_SHIFT) |
+ DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN;
+ value |= (u32) hw->dma_ord;
+ if (atl1_rcb_128 == hw->rcb_value)
+ value |= DMA_CTRL_RCB_VALUE;
+ iowrite32(value, hw->hw_addr + REG_DMA_CTRL);
+
+ /* config CMB / SMB */
+ value = hw->cmb_rrd | ((u32) hw->cmb_tpd << 16);
+ iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH);
+ value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16);
+ iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER);
+ iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER);
+
+ /* --- enable CMB / SMB */
+ value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN;
+ iowrite32(value, hw->hw_addr + REG_CSMB_CTRL);
+
+ value = ioread32(adapter->hw.hw_addr + REG_ISR);
+ if (unlikely((value & ISR_PHY_LINKDOWN) != 0))
+ value = 1; /* config failed */
+ else
+ value = 0;
+
+ /* clear all interrupt status */
+ iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR);
+ iowrite32(0, adapter->hw.hw_addr + REG_ISR);
+ return value;
+}
+
+/*
+ * atl1_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ */
+static void atl1_irq_disable(struct atl1_adapter *adapter)
+{
+ atomic_inc(&adapter->irq_sem);
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ ioread32(adapter->hw.hw_addr + REG_IMR);
+ synchronize_irq(adapter->pdev->irq);
+}
+
+static void atl1_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ u32 ctrl;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ /* atl1_irq_disable(adapter); */
+ adapter->vlgrp = grp;
+
+ if (grp) {
+ /* enable VLAN tag insert/strip */
+ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+ ctrl |= MAC_CTRL_RMV_VLAN;
+ iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+ } else {
+ /* disable VLAN tag insert/strip */
+ ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
+ ctrl &= ~MAC_CTRL_RMV_VLAN;
+ iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
+ }
+
+ /* atl1_irq_enable(adapter); */
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/* FIXME: justify or remove -- CHS */
+static void atl1_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ /* We don't do Vlan filtering */
+ return;
+}
+
+/* FIXME: this looks wrong too -- CHS */
+static void atl1_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ /* atl1_irq_disable(adapter); */
+ if (adapter->vlgrp)
+ adapter->vlgrp->vlan_devices[vid] = NULL;
+ /* atl1_irq_enable(adapter); */
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ /* We don't do Vlan filtering */
+ return;
+}
+
+static void atl1_restore_vlan(struct atl1_adapter *adapter)
+{
+ atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!adapter->vlgrp->vlan_devices[vid])
+ continue;
+ atl1_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+static u16 tpd_avail(struct atl1_tpd_ring *tpd_ring)
+{
+ u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
+ u16 next_to_use = atomic_read(&tpd_ring->next_to_use);
+ return ((next_to_clean >
+ next_to_use) ? next_to_clean - next_to_use -
+ 1 : tpd_ring->count + next_to_clean - next_to_use - 1);
+}
+
+static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct tso_param *tso)
+{
+ /* We enter this function holding a spinlock. */
+ u8 ipofst;
+ int err;
+
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (unlikely(err))
+ return err;
+ }
+
+ if (skb->protocol == ntohs(ETH_P_IP)) {
+ skb->nh.iph->tot_len = 0;
+ skb->nh.iph->check = 0;
+ skb->h.th->check =
+ ~csum_tcpudp_magic(skb->nh.iph->saddr,
+ skb->nh.iph->daddr, 0,
+ IPPROTO_TCP, 0);
+ ipofst = skb->nh.raw - skb->data;
+ if (ipofst != ENET_HEADER_SIZE) /* 802.3 frame */
+ tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT;
+
+ tso->tsopl |= (skb->nh.iph->ihl &
+ CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT;
+ tso->tsopl |= ((skb->h.th->doff << 2) &
+ TSO_PARAM_TCPHDRLEN_MASK) << TSO_PARAM_TCPHDRLEN_SHIFT;
+ tso->tsopl |= (skb_shinfo(skb)->gso_size &
+ TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT;
+ tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT;
+ tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT;
+ tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT;
+ return true;
+ }
+ }
+ return false;
+}
+
+static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
+ struct csum_param *csum)
+{
+ u8 css, cso;
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ cso = skb->h.raw - skb->data;
+ css = (skb->h.raw + skb->csum) - skb->data;
+ if (unlikely(cso & 0x1)) {
+ printk(KERN_DEBUG "%s: payload offset != even number\n",
+ atl1_driver_name);
+ return -1;
+ }
+ csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) <<
+ CSUM_PARAM_PLOADOFFSET_SHIFT;
+ csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) <<
+ CSUM_PARAM_XSUMOFFSET_SHIFT;
+ csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT;
+ return true;
+ }
+
+ return true;
+}
+
+static void atl1_tx_map(struct atl1_adapter *adapter,
+ struct sk_buff *skb, bool tcp_seg)
+{
+ /* We enter this function holding a spinlock. */
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ struct page *page;
+ int first_buf_len = skb->len;
+ unsigned long offset;
+ unsigned int nr_frags;
+ unsigned int f;
+ u16 tpd_next_to_use;
+ u16 proto_hdr_len;
+ u16 i, m, len12;
+
+ first_buf_len -= skb->data_len;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
+ buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
+ if (unlikely(buffer_info->skb))
+ BUG();
+ buffer_info->skb = NULL; /* put skb in last TPD */
+
+ if (tcp_seg) {
+ /* TSO/GSO */
+ proto_hdr_len =
+ ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+ buffer_info->length = proto_hdr_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(adapter->pdev, page,
+ offset, proto_hdr_len,
+ PCI_DMA_TODEVICE);
+
+ if (++tpd_next_to_use == tpd_ring->count)
+ tpd_next_to_use = 0;
+
+ if (first_buf_len > proto_hdr_len) {
+ len12 = first_buf_len - proto_hdr_len;
+ m = (len12 + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ for (i = 0; i < m; i++) {
+ buffer_info =
+ &tpd_ring->buffer_info[tpd_next_to_use];
+ buffer_info->skb = NULL;
+ buffer_info->length =
+ (MAX_TX_BUF_LEN >=
+ len12) ? MAX_TX_BUF_LEN : len12;
+ len12 -= buffer_info->length;
+ page = virt_to_page(skb->data +
+ (proto_hdr_len +
+ i * MAX_TX_BUF_LEN));
+ offset = (unsigned long)(skb->data +
+ (proto_hdr_len +
+ i * MAX_TX_BUF_LEN)) &
+ ~PAGE_MASK;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev, page, offset,
+ buffer_info->length,
+ PCI_DMA_TODEVICE);
+ if (++tpd_next_to_use == tpd_ring->count)
+ tpd_next_to_use = 0;
+ }
+ }
+ } else {
+ /* not TSO/GSO */
+ buffer_info->length = first_buf_len;
+ page = virt_to_page(skb->data);
+ offset = (unsigned long)skb->data & ~PAGE_MASK;
+ buffer_info->dma = pci_map_page(adapter->pdev, page,
+ offset, first_buf_len,
+ PCI_DMA_TODEVICE);
+ if (++tpd_next_to_use == tpd_ring->count)
+ tpd_next_to_use = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+ u16 lenf, i, m;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ lenf = frag->size;
+
+ m = (lenf + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ for (i = 0; i < m; i++) {
+ buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
+ if (unlikely(buffer_info->skb))
+ BUG();
+ buffer_info->skb = NULL;
+ buffer_info->length =
+ (lenf > MAX_TX_BUF_LEN) ? MAX_TX_BUF_LEN : lenf;
+ lenf -= buffer_info->length;
+ buffer_info->dma =
+ pci_map_page(adapter->pdev, frag->page,
+ frag->page_offset + i * MAX_TX_BUF_LEN,
+ buffer_info->length, PCI_DMA_TODEVICE);
+
+ if (++tpd_next_to_use == tpd_ring->count)
+ tpd_next_to_use = 0;
+ }
+ }
+
+ /* last tpd's buffer-info */
+ buffer_info->skb = skb;
+}
+
+static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
+ union tpd_descr *descr)
+{
+ /* We enter this function holding a spinlock. */
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ int j;
+ u32 val;
+ struct atl1_buffer *buffer_info;
+ struct tx_packet_desc *tpd;
+ u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use);
+
+ for (j = 0; j < count; j++) {
+ buffer_info = &tpd_ring->buffer_info[tpd_next_to_use];
+ tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use);
+ tpd->desc.csum.csumpu = descr->csum.csumpu;
+ tpd->desc.csum.csumpl = descr->csum.csumpl;
+ tpd->desc.tso.tsopu = descr->tso.tsopu;
+ tpd->desc.tso.tsopl = descr->tso.tsopl;
+ tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
+ tpd->desc.data = descr->data;
+ tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) &
+ CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT;
+
+ val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) &
+ TSO_PARAM_SEGMENT_MASK;
+ if (val && !j)
+ tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT;
+
+ if (j == (count - 1))
+ tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT;
+
+ if (++tpd_next_to_use == tpd_ring->count)
+ tpd_next_to_use = 0;
+ }
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use);
+}
+
+static void atl1_update_mailbox(struct atl1_adapter *adapter)
+{
+ unsigned long flags;
+ u32 tpd_next_to_use;
+ u32 rfd_next_to_use;
+ u32 rrd_next_to_clean;
+ u32 value;
+
+ spin_lock_irqsave(&adapter->mb_lock, flags);
+
+ tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use);
+ rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use);
+ rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean);
+
+ value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) <<
+ MB_RFD_PROD_INDX_SHIFT) |
+ ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) <<
+ MB_RRD_CONS_INDX_SHIFT) |
+ ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) <<
+ MB_TPD_PROD_INDX_SHIFT);
+ iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
+
+ spin_unlock_irqrestore(&adapter->mb_lock, flags);
+}
+
+static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int len = skb->len;
+ int tso;
+ int count = 1;
+ int ret_val;
+ u32 val;
+ union tpd_descr param;
+ u16 frag_size;
+ u16 vlan_tag;
+ unsigned long flags;
+ unsigned int nr_frags = 0;
+ unsigned int mss = 0;
+ unsigned int f;
+ unsigned int proto_hdr_len;
+
+ len -= skb->data_len;
+
+ if (unlikely(skb->len == 0)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ param.data = 0;
+ param.tso.tsopu = 0;
+ param.tso.tsopl = 0;
+ param.csum.csumpu = 0;
+ param.csum.csumpl = 0;
+
+ /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ for (f = 0; f < nr_frags; f++) {
+ frag_size = skb_shinfo(skb)->frags[f].size;
+ if (frag_size)
+ count +=
+ (frag_size + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ }
+
+ /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ if (skb->protocol == ntohs(ETH_P_IP)) {
+ proto_hdr_len = ((skb->h.raw - skb->data) +
+ (skb->h.th->doff << 2));
+ if (unlikely(proto_hdr_len > len)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ /* need additional TPD ? */
+ if (proto_hdr_len != len)
+ count += (len - proto_hdr_len +
+ MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN;
+ }
+ }
+
+ local_irq_save(flags);
+ if (!spin_trylock(&adapter->lock)) {
+ /* Can't get lock - tell upper layer to requeue */
+ local_irq_restore(flags);
+ printk(KERN_DEBUG "%s: TX locked\n", atl1_driver_name);
+ return NETDEV_TX_LOCKED;
+ }
+
+ if (tpd_avail(&adapter->tpd_ring) < count) {
+ /* not enough descriptors */
+ netif_stop_queue(netdev);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ printk(KERN_DEBUG "%s: TX busy\n", atl1_driver_name);
+ return NETDEV_TX_BUSY;
+ }
+
+ param.data = 0;
+
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
+ ((vlan_tag >> 9) & 0x8);
+ param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT;
+ param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) <<
+ CSUM_PARAM_VALAN_SHIFT;
+ }
+
+ tso = atl1_tso(adapter, skb, &param.tso);
+ if (tso < 0) {
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!tso) {
+ ret_val = atl1_tx_csum(adapter, skb, &param.csum);
+ if (ret_val < 0) {
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
+
+ val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) &
+ CSUM_PARAM_SEGMENT_MASK;
+ atl1_tx_map(adapter, skb, 1 == val);
+ atl1_tx_queue(adapter, count, &param);
+ netdev->trans_start = jiffies;
+ spin_unlock_irqrestore(&adapter->lock, flags);
+ atl1_update_mailbox(adapter);
+ return NETDEV_TX_OK;
+}
+
+/*
+ * atl1_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ */
+static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ return &adapter->net_stats;
+}
+
+/*
+ * atl1_clean_rx_ring - Free RFD Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
+{
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rfd_ring->count; i++) {
+ buffer_info = &rfd_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ pci_unmap_page(pdev,
+ buffer_info->dma,
+ buffer_info->length,
+ PCI_DMA_FROMDEVICE);
+ buffer_info->dma = 0;
+ }
+ if (buffer_info->skb) {
+ dev_kfree_skb(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct atl1_buffer) * rfd_ring->count;
+ memset(rfd_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rfd_ring->desc, 0, rfd_ring->size);
+
+ rfd_ring->next_to_clean = 0;
+ atomic_set(&rfd_ring->next_to_use, 0);
+
+ rrd_ring->next_to_use = 0;
+ atomic_set(&rrd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ */
+static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
+{
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_buffer *buffer_info;
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ /* Free all the Tx ring sk_buffs */
+ for (i = 0; i < tpd_ring->count; i++) {
+ buffer_info = &tpd_ring->buffer_info[i];
+ if (buffer_info->dma) {
+ pci_unmap_page(pdev, buffer_info->dma,
+ buffer_info->length, PCI_DMA_TODEVICE);
+ buffer_info->dma = 0;
+ }
+ }
+
+ for (i = 0; i < tpd_ring->count; i++) {
+ buffer_info = &tpd_ring->buffer_info[i];
+ if (buffer_info->skb) {
+ dev_kfree_skb_any(buffer_info->skb);
+ buffer_info->skb = NULL;
+ }
+ }
+
+ size = sizeof(struct atl1_buffer) * tpd_ring->count;
+ memset(tpd_ring->buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(tpd_ring->desc, 0, tpd_ring->size);
+
+ atomic_set(&tpd_ring->next_to_use, 0);
+ atomic_set(&tpd_ring->next_to_clean, 0);
+}
+
+/*
+ * atl1_free_ring_resources - Free Tx / RX descriptor Resources
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ */
+void atl1_free_ring_resources(struct atl1_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
+ struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
+ struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring;
+ struct atl1_ring_header *ring_header = &adapter->ring_header;
+
+ atl1_clean_tx_ring(adapter);
+ atl1_clean_rx_ring(adapter);
+
+ kfree(tpd_ring->buffer_info);
+ pci_free_consistent(pdev, ring_header->size, ring_header->desc,
+ ring_header->dma);
+
+ tpd_ring->buffer_info = NULL;
+ tpd_ring->desc = NULL;
+ tpd_ring->dma = 0;
+
+ rfd_ring->buffer_info = NULL;
+ rfd_ring->desc = NULL;
+ rfd_ring->dma = 0;
+
+ rrd_ring->desc = NULL;
+ rrd_ring->dma = 0;
+}
+
+s32 atl1_up(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int err;
+ int irq_flags = IRQF_SAMPLE_RANDOM;
+
+ /* hardware has been reset, we need to reload some things */
+ atl1_set_multi(netdev);
+ atl1_restore_vlan(adapter);
+ err = atl1_alloc_rx_buffers(adapter);
+ if (unlikely(!err)) /* no RX BUFFER allocated */
+ return -ENOMEM;
+
+ if (unlikely(atl1_configure(adapter))) {
+ err = -EIO;
+ goto err_up;
+ }
+
+ err = pci_enable_msi(adapter->pdev);
+ if (err) {
+ dev_info(&adapter->pdev->dev,
+ "Unable to enable MSI: %d\n", err);
+ irq_flags |= IRQF_SHARED;
+ }
+
+ err = request_irq(adapter->pdev->irq, &atl1_intr, irq_flags,
+ netdev->name, netdev);
+ if (unlikely(err))
+ goto err_up;
+
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ atl1_irq_enable(adapter);
+ atl1_check_link(adapter);
+ return 0;
+
+ /* FIXME: unreachable code! -- CHS */
+ /* free irq disable any interrupt */
+ iowrite32(0, adapter->hw.hw_addr + REG_IMR);
+ free_irq(adapter->pdev->irq, netdev);
+
+err_up:
+ pci_disable_msi(adapter->pdev);
+ /* free rx_buffers */
+ atl1_clean_rx_ring(adapter);
+ return err;
+}
+
+void atl1_down(struct atl1_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ del_timer_sync(&adapter->watchdog_timer);
+ del_timer_sync(&adapter->phy_config_timer);
+ adapter->phy_timer_pending = false;
+
+ atl1_irq_disable(adapter);
+ free_irq(adapter->pdev->irq, netdev);
+ pci_disable_msi(adapter->pdev);
+ atl1_reset_hw(&adapter->hw);
+ adapter->cmb.cmb->int_stats = 0;
+
+ adapter->link_speed = SPEED_0;
+ adapter->link_duplex = -1;
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ atl1_clean_tx_ring(adapter);
+ atl1_clean_rx_ring(adapter);
+}
+
+/*
+ * atl1_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int old_mtu = netdev->mtu;
+ int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+
+ if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
+ (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+ printk(KERN_WARNING "%s: invalid MTU setting\n",
+ atl1_driver_name);
+ return -EINVAL;
+ }
+
+ adapter->hw.max_frame_size = max_frame;
+ adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
+ adapter->rx_buffer_len = (max_frame + 7) & ~7;
+ adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
+
+ netdev->mtu = new_mtu;
+ if ((old_mtu != new_mtu) && netif_running(netdev)) {
+ atl1_down(adapter);
+ atl1_up(adapter);
+ }
+
+ return 0;
+}
+
+/*
+ * atl1_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int atl1_set_mac(struct net_device *netdev, void *p)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
+
+ atl1_set_mac_addr(&adapter->hw);
+ return 0;
+}
+
+/*
+ * atl1_watchdog - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1_watchdog(unsigned long data)
+{
+ struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+
+ /* Reset the timer */
+ mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
+}
+
+static int mdio_read(struct net_device *netdev, int phy_id, int reg_num)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ u16 result;
+
+ atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result);
+
+ return result;
+}
+
+static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+
+ atl1_write_phy_reg(&adapter->hw, reg_num, val);
+}
+
+/*
+ * atl1_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ unsigned long flags;
+ int retval;
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+
+ return retval;
+}
+
+/*
+ * atl1_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ */
+static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return atl1_mii_ioctl(netdev, ifr, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * atl1_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ */
+static void atl1_tx_timeout(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+/*
+ * atl1_phy_config - Timer Call-back
+ * @data: pointer to netdev cast into an unsigned long
+ */
+static void atl1_phy_config(unsigned long data)
+{
+ struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_hw *hw = &adapter->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ adapter->phy_timer_pending = false;
+ atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
+ atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg);
+ atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+int atl1_reset(struct atl1_adapter *adapter)
+{
+ int ret;
+
+ ret = atl1_reset_hw(&adapter->hw);
+ if (ret != ATL1_SUCCESS)
+ return ret;
+ return atl1_init_hw(&adapter->hw);
+}
+
+/*
+ * atl1_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ */
+static int atl1_open(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* allocate transmit descriptors */
+ err = atl1_setup_ring_resources(adapter);
+ if (err)
+ return err;
+
+ err = atl1_up(adapter);
+ if (err)
+ goto err_up;
+
+ return 0;
+
+err_up:
+ atl1_reset(adapter);
+ return err;
+}
+
+/*
+ * atl1_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ */
+static int atl1_close(struct net_device *netdev)
+{
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ atl1_down(adapter);
+ atl1_free_ring_resources(adapter);
+ return 0;
+}
+
+/*
+ * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
+ * will assert. We do soft reset <0x1400=1> according
+ * with the SPEC. BUT, it seemes that PCIE or DMA
+ * state-machine will not be reset. DMAR_TO_INT will
+ * assert again and again.
+ */
+static void atl1_tx_timeout_task(struct work_struct *work)
+{
+ struct atl1_adapter *adapter =
+ container_of(work, struct atl1_adapter, tx_timeout_task);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+ atl1_down(adapter);
+ atl1_up(adapter);
+ netif_device_attach(netdev);
+}
+
+/*
+ * atl1_link_chg_task - deal with link change event Out of interrupt context
+ */
+static void atl1_link_chg_task(struct work_struct *work)
+{
+ struct atl1_adapter *adapter =
+ container_of(work, struct atl1_adapter, link_chg_task);
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->lock, flags);
+ atl1_check_link(adapter);
+ spin_unlock_irqrestore(&adapter->lock, flags);
+}
+
+/*
+ * atl1_pcie_patch - Patch for PCIE module
+ */
+static void atl1_pcie_patch(struct atl1_adapter *adapter)
+{
+ u32 value;
+ value = 0x6500;
+ iowrite32(value, adapter->hw.hw_addr + 0x12FC);
+ /* pcie flow control mode change */
+ value = ioread32(adapter->hw.hw_addr + 0x1008);
+ value |= 0x8000;
+ iowrite32(value, adapter->hw.hw_addr + 0x1008);
+}
+
+/*
+ * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400
+ * on PCI Command register is disable.
+ * The function enable this bit.
+ * Brackett, 2006/03/15
+ */
+static void atl1_via_workaround(struct atl1_adapter *adapter)
+{
+ unsigned long value;
+
+ value = ioread16(adapter->hw.hw_addr + PCI_COMMAND);
+ if (value & PCI_COMMAND_INTX_DISABLE)
+ value &= ~PCI_COMMAND_INTX_DISABLE;
+ iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
+}
+
+/*
+ * atl1_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in atl1_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * atl1_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ */
+static int __devinit atl1_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct atl1_adapter *adapter;
+ static int cards_found = 0;
+ bool pci_using_64 = true;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (err) {
+ printk(KERN_DEBUG
+ "%s: no usable DMA configuration, aborting\n",
+ atl1_driver_name);
+ goto err_dma;
+ }
+ pci_using_64 = false;
+ }
+ /* Mark all PCI regions associated with PCI device
+ * pdev as being reserved by owner atl1_driver_name
+ */
+ err = pci_request_regions(pdev, atl1_driver_name);
+ if (err)
+ goto err_request_regions;
+
+ /* Enables bus-mastering on the device and calls
+ * pcibios_set_master to do the needed arch specific settings
+ */
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct atl1_adapter));
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+ SET_MODULE_OWNER(netdev);
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->hw.back = adapter;
+
+ adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
+ if (!adapter->hw.hw_addr) {
+ err = -EIO;
+ goto err_pci_iomap;
+ }
+ /* get device revision number */
+ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2));
+
+ /* set default ring resource counts */
+ adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
+ adapter->tpd_ring.count = ATL1_DEFAULT_TPD;
+
+ adapter->mii.dev = netdev;
+ adapter->mii.mdio_read = mdio_read;
+ adapter->mii.mdio_write = mdio_write;
+ adapter->mii.phy_id_mask = 0x1f;
+ adapter->mii.reg_num_mask = 0x1f;
+
+ netdev->open = &atl1_open;
+ netdev->stop = &atl1_close;
+ netdev->hard_start_xmit = &atl1_xmit_frame;
+ netdev->get_stats = &atl1_get_stats;
+ netdev->set_multicast_list = &atl1_set_multi;
+ netdev->set_mac_address = &atl1_set_mac;
+ netdev->change_mtu = &atl1_change_mtu;
+ netdev->do_ioctl = &atl1_ioctl;
+ netdev->tx_timeout = &atl1_tx_timeout;
+ netdev->watchdog_timeo = 5 * HZ;
+ netdev->vlan_rx_register = atl1_vlan_rx_register;
+ netdev->vlan_rx_add_vid = atl1_vlan_rx_add_vid;
+ netdev->vlan_rx_kill_vid = atl1_vlan_rx_kill_vid;
+ netdev->ethtool_ops = &atl1_ethtool_ops;
+ adapter->bd_number = cards_found;
+ adapter->pci_using_64 = pci_using_64;
+
+ /* setup the private structure */
+ err = atl1_sw_init(adapter);
+ if (err)
+ goto err_common;
+
+ netdev->features = NETIF_F_HW_CSUM;
+ netdev->features |= NETIF_F_SG;
+ netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
+
+ /*
+ * FIXME - Until tso performance gets fixed, disable the feature.
+ * Enable it with ethtool -K if desired.
+ */
+ /* netdev->features |= NETIF_F_TSO; */
+
+ if (pci_using_64)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->features |= NETIF_F_LLTX;
+
+ /*
+ * patch for some L1 of old version,
+ * the final version of L1 may not need these
+ * patches
+ */
+ /* atl1_pcie_patch(adapter); */
+
+ /* really reset GPHY core */
+ iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
+
+ /*
+ * reset the controller to
+ * put the device in a known good starting state
+ */
+ if (atl1_reset_hw(&adapter->hw)) {
+ err = -EIO;
+ goto err_common;
+ }
+
+ /* copy the MAC address out of the EEPROM */
+ atl1_read_mac_addr(&adapter->hw);
+ memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ err = -EIO;
+ goto err_common;
+ }
+
+ atl1_check_options(adapter);
+
+ /* pre-init the MAC, and setup link */
+ err = atl1_init_hw(&adapter->hw);
+ if (err) {
+ err = -EIO;
+ goto err_common;
+ }
+
+ atl1_pcie_patch(adapter);
+ /* assume we have no link for now */
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &atl1_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ init_timer(&adapter->phy_config_timer);
+ adapter->phy_config_timer.function = &atl1_phy_config;
+ adapter->phy_config_timer.data = (unsigned long)adapter;
+ adapter->phy_timer_pending = false;
+
+ INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
+
+ INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task);
+
+ INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_common;
+
+ cards_found++;
+ atl1_via_workaround(adapter);
+ return 0;
+
+err_common:
+ pci_iounmap(pdev, adapter->hw.hw_addr);
+err_pci_iomap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_dma:
+err_request_regions:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/*
+ * atl1_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * atl1_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ */
+static void __devexit atl1_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter;
+ /* Device not available. Return. */
+ if (!netdev)
+ return;
+
+ adapter = netdev_priv(netdev);
+ iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE);
+ unregister_netdev(netdev);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+ free_netdev(netdev);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ struct atl1_hw *hw = &adapter->hw;
+ u32 ctrl = 0;
+ u32 wufc = adapter->wol;
+
+ netif_device_detach(netdev);
+ if (netif_running(netdev))
+ atl1_down(adapter);
+
+ atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+ atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
+ if (ctrl & BMSR_LSTATUS)
+ wufc &= ~ATL1_WUFC_LNKC;
+
+ /* reduce speed to 10/100M */
+ if (wufc) {
+ atl1_phy_enter_power_saving(hw);
+ /* if resume, let driver to re- setup link */
+ hw->phy_configured = false;
+ atl1_set_mac_addr(hw);
+ atl1_set_multi(netdev);
+
+ ctrl = 0;
+ /* turn on magic packet wol */
+ if (wufc & ATL1_WUFC_MAG)
+ ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
+
+ /* turn on Link change WOL */
+ if (wufc & ATL1_WUFC_LNKC)
+ ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
+ iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
+
+ /* turn on all-multi mode if wake on multicast is enabled */
+ ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
+ ctrl &= ~MAC_CTRL_DBG;
+ ctrl &= ~MAC_CTRL_PROMIS_EN;
+ if (wufc & ATL1_WUFC_MC)
+ ctrl |= MAC_CTRL_MC_ALL_EN;
+ else
+ ctrl &= ~MAC_CTRL_MC_ALL_EN;
+
+ /* turn on broadcast mode if wake on-BC is enabled */
+ if (wufc & ATL1_WUFC_BC)
+ ctrl |= MAC_CTRL_BC_EN;
+ else
+ ctrl &= ~MAC_CTRL_BC_EN;
+
+ /* enable RX */
+ ctrl |= MAC_CTRL_RX_EN;
+ iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_enable_wake(pdev, PCI_D3cold, 1); /* 4 == D3 cold */
+ } else {
+ iowrite32(0, hw->hw_addr + REG_WOL_CTRL);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
+ }
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int atl1_resume(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct atl1_adapter *adapter = netdev_priv(netdev);
+ u32 ret_val;
+
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ ret_val = pci_enable_device(pdev);
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_enable_wake(pdev, PCI_D3cold, 0);
+
+ iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL);
+ atl1_reset(adapter);
+
+ if (netif_running(netdev))
+ atl1_up(adapter);
+ netif_device_attach(netdev);
+
+ atl1_via_workaround(adapter);
+
+ return 0;
+}
+#else
+#define atl1_suspend NULL
+#define atl1_resume NULL
+#endif
+
+static struct pci_driver atl1_driver = {
+ .name = atl1_driver_name,
+ .id_table = atl1_pci_tbl,
+ .probe = atl1_probe,
+ .remove = __devexit_p(atl1_remove),
+ /* Power Managment Hooks */
+ /* probably broken right now -- CHS */
+ .suspend = atl1_suspend,
+ .resume = atl1_resume
+};
+
+/*
+ * atl1_exit_module - Driver Exit Cleanup Routine
+ *
+ * atl1_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit atl1_exit_module(void)
+{
+ pci_unregister_driver(&atl1_driver);
+}
+
+/*
+ * atl1_init_module - Driver Registration Routine
+ *
+ * atl1_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init atl1_init_module(void)
+{
+ printk(KERN_INFO "%s - version %s\n", atl1_driver_string, DRIVER_VERSION);
+ printk(KERN_INFO "%s\n", atl1_copyright);
+ return pci_register_driver(&atl1_driver);
+}
+
+module_init(atl1_init_module);
+module_exit(atl1_exit_module);
diff --git a/drivers/net/atl1/atl1_param.c b/drivers/net/atl1/atl1_param.c
new file mode 100644
index 00000000000..c407214339f
--- /dev/null
+++ b/drivers/net/atl1/atl1_param.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
+ * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
+ * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
+ *
+ * Derived from Intel e1000 driver
+ * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/moduleparam.h>
+#include "atl1.h"
+
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+#define ATL1_MAX_NIC 4
+
+#define OPTION_UNSET -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED 1
+
+#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
+
+/*
+ * Interrupt Moderate Timer in units of 2 us
+ *
+ * Valid Range: 10-65535
+ *
+ * Default Value: 100 (200us)
+ */
+static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static int num_int_mod_timer = 0;
+module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0);
+MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
+
+/*
+ * flash_vendor
+ *
+ * Valid Range: 0-2
+ *
+ * 0 - Atmel
+ * 1 - SST
+ * 2 - ST
+ *
+ * Default Value: 0
+ */
+static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
+static int num_flash_vendor = 0;
+module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
+MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
+
+#define DEFAULT_INT_MOD_CNT 100 /* 200us */
+#define MAX_INT_MOD_CNT 65000
+#define MIN_INT_MOD_CNT 50
+
+#define FLASH_VENDOR_DEFAULT 0
+#define FLASH_VENDOR_MIN 0
+#define FLASH_VENDOR_MAX 2
+
+struct atl1_option {
+ enum { enable_option, range_option, list_option } type;
+ char *name;
+ char *err;
+ int def;
+ union {
+ struct { /* range_option info */
+ int min;
+ int max;
+ } r;
+ struct { /* list_option info */
+ int nr;
+ struct atl1_opt_list {
+ int i;
+ char *str;
+ } *p;
+ } l;
+ } arg;
+};
+
+static int __devinit atl1_validate_option(int *value, struct atl1_option *opt)
+{
+ if (*value == OPTION_UNSET) {
+ *value = opt->def;
+ return 0;
+ }
+
+ switch (opt->type) {
+ case enable_option:
+ switch (*value) {
+ case OPTION_ENABLED:
+ printk(KERN_INFO "%s: %s Enabled\n", atl1_driver_name,
+ opt->name);
+ return 0;
+ case OPTION_DISABLED:
+ printk(KERN_INFO "%s: %s Disabled\n", atl1_driver_name,
+ opt->name);
+ return 0;
+ }
+ break;
+ case range_option:
+ if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+ printk(KERN_INFO "%s: %s set to %i\n",
+ atl1_driver_name, opt->name, *value);
+ return 0;
+ }
+ break;
+ case list_option:{
+ int i;
+ struct atl1_opt_list *ent;
+
+ for (i = 0; i < opt->arg.l.nr; i++) {
+ ent = &opt->arg.l.p[i];
+ if (*value == ent->i) {
+ if (ent->str[0] != '\0')
+ printk(KERN_INFO "%s: %s\n",
+ atl1_driver_name, ent->str);
+ return 0;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ printk(KERN_INFO "%s: invalid %s specified (%i) %s\n",
+ atl1_driver_name, opt->name, *value, opt->err);
+ *value = opt->def;
+ return -1;
+}
+
+/*
+ * atl1_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input. If an invalid value is given, or if no user specified
+ * value exists, a default value is used. The final value is stored
+ * in a variable in the adapter structure.
+ */
+void __devinit atl1_check_options(struct atl1_adapter *adapter)
+{
+ int bd = adapter->bd_number;
+ if (bd >= ATL1_MAX_NIC) {
+ printk(KERN_NOTICE "%s: warning: no configuration for board #%i\n",
+ atl1_driver_name, bd);
+ printk(KERN_NOTICE "%s: using defaults for all values\n",
+ atl1_driver_name);
+ }
+ { /* Interrupt Moderate Timer */
+ struct atl1_option opt = {
+ .type = range_option,
+ .name = "Interrupt Moderator Timer",
+ .err = "using default of "
+ __MODULE_STRING(DEFAULT_INT_MOD_CNT),
+ .def = DEFAULT_INT_MOD_CNT,
+ .arg = {.r =
+ {.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}}
+ };
+ int val;
+ if (num_int_mod_timer > bd) {
+ val = int_mod_timer[bd];
+ atl1_validate_option(&val, &opt);
+ adapter->imt = (u16) val;
+ } else
+ adapter->imt = (u16) (opt.def);
+ }
+
+ { /* Flash Vendor */
+ struct atl1_option opt = {
+ .type = range_option,
+ .name = "SPI Flash Vendor",
+ .err = "using default of "
+ __MODULE_STRING(FLASH_VENDOR_DEFAULT),
+ .def = DEFAULT_INT_MOD_CNT,
+ .arg = {.r =
+ {.min = FLASH_VENDOR_MIN,.max =
+ FLASH_VENDOR_MAX}}
+ };
+ int val;
+ if (num_flash_vendor > bd) {
+ val = flash_vendor[bd];
+ atl1_validate_option(&val, &opt);
+ adapter->hw.flash_vendor = (u8) val;
+ } else
+ adapter->hw.flash_vendor = (u8) (opt.def);
+ }
+}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 32923162179..217a2eedee0 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -184,7 +184,7 @@ static int tlb_initialize(struct bonding *bond)
spin_lock_init(&(bond_info->tx_hashtbl_lock));
- new_hashtbl = kmalloc(size, GFP_KERNEL);
+ new_hashtbl = kzalloc(size, GFP_KERNEL);
if (!new_hashtbl) {
printk(KERN_ERR DRV_NAME
": %s: Error: Failed to allocate TLB hash table\n",
@@ -195,8 +195,6 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
- memset(bond_info->tx_hashtbl, 0, size);
-
for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 1);
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d3801a00d3d..8ce8fec615b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1343,14 +1343,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
"inaccurate.\n", bond_dev->name, slave_dev->name);
}
- new_slave = kmalloc(sizeof(struct slave), GFP_KERNEL);
+ new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
if (!new_slave) {
res = -ENOMEM;
goto err_undo_flags;
}
- memset(new_slave, 0, sizeof(struct slave));
-
/* save slave's original flags before calling
* netdev_set_master and dev_open
*/
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index dfa035a1ad4..c67f7d3c2f9 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -74,8 +74,6 @@ enum {
#define EEPROM_MAGIC 0x38E2F10C
-#define to_net_dev(class) container_of(class, struct net_device, class_dev)
-
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
@@ -434,11 +432,12 @@ static int setup_sge_qsets(struct adapter *adap)
return 0;
}
-static ssize_t attr_show(struct class_device *cd, char *buf,
+static ssize_t attr_show(struct device *d, struct device_attribute *attr,
+ char *buf,
ssize_t(*format) (struct adapter *, char *))
{
ssize_t len;
- struct adapter *adap = to_net_dev(cd)->priv;
+ struct adapter *adap = to_net_dev(d)->priv;
/* Synchronize with ioctls that may shut down the device */
rtnl_lock();
@@ -447,14 +446,15 @@ static ssize_t attr_show(struct class_device *cd, char *buf,
return len;
}
-static ssize_t attr_store(struct class_device *cd, const char *buf, size_t len,
+static ssize_t attr_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len,
ssize_t(*set) (struct adapter *, unsigned int),
unsigned int min_val, unsigned int max_val)
{
char *endp;
ssize_t ret;
unsigned int val;
- struct adapter *adap = to_net_dev(cd)->priv;
+ struct adapter *adap = to_net_dev(d)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -476,9 +476,10 @@ static ssize_t format_##name(struct adapter *adap, char *buf) \
{ \
return sprintf(buf, "%u\n", val_expr); \
} \
-static ssize_t show_##name(struct class_device *cd, char *buf) \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+ char *buf) \
{ \
- return attr_show(cd, buf, format_##name); \
+ return attr_show(d, attr, buf, format_##name); \
}
static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
@@ -493,10 +494,10 @@ static ssize_t set_nfilters(struct adapter *adap, unsigned int val)
return 0;
}
-static ssize_t store_nfilters(struct class_device *cd, const char *buf,
- size_t len)
+static ssize_t store_nfilters(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
{
- return attr_store(cd, buf, len, set_nfilters, 0, ~0);
+ return attr_store(d, attr, buf, len, set_nfilters, 0, ~0);
}
static ssize_t set_nservers(struct adapter *adap, unsigned int val)
@@ -509,38 +510,39 @@ static ssize_t set_nservers(struct adapter *adap, unsigned int val)
return 0;
}
-static ssize_t store_nservers(struct class_device *cd, const char *buf,
- size_t len)
+static ssize_t store_nservers(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len)
{
- return attr_store(cd, buf, len, set_nservers, 0, ~0);
+ return attr_store(d, attr, buf, len, set_nservers, 0, ~0);
}
#define CXGB3_ATTR_R(name, val_expr) \
CXGB3_SHOW(name, val_expr) \
-static CLASS_DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
#define CXGB3_ATTR_RW(name, val_expr, store_method) \
CXGB3_SHOW(name, val_expr) \
-static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_method)
CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
static struct attribute *cxgb3_attrs[] = {
- &class_device_attr_cam_size.attr,
- &class_device_attr_nfilters.attr,
- &class_device_attr_nservers.attr,
+ &dev_attr_cam_size.attr,
+ &dev_attr_nfilters.attr,
+ &dev_attr_nservers.attr,
NULL
};
static struct attribute_group cxgb3_attr_group = {.attrs = cxgb3_attrs };
-static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
+static ssize_t tm_attr_show(struct device *d, struct device_attribute *attr,
+ char *buf, int sched)
{
ssize_t len;
unsigned int v, addr, bpt, cpt;
- struct adapter *adap = to_net_dev(cd)->priv;
+ struct adapter *adap = to_net_dev(d)->priv;
addr = A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2;
rtnl_lock();
@@ -560,13 +562,13 @@ static ssize_t tm_attr_show(struct class_device *cd, char *buf, int sched)
return len;
}
-static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
- size_t len, int sched)
+static ssize_t tm_attr_store(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t len, int sched)
{
char *endp;
ssize_t ret;
unsigned int val;
- struct adapter *adap = to_net_dev(cd)->priv;
+ struct adapter *adap = to_net_dev(d)->priv;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -584,15 +586,17 @@ static ssize_t tm_attr_store(struct class_device *cd, const char *buf,
}
#define TM_ATTR(name, sched) \
-static ssize_t show_##name(struct class_device *cd, char *buf) \
+static ssize_t show_##name(struct device *d, struct device_attribute *attr, \
+ char *buf) \
{ \
- return tm_attr_show(cd, buf, sched); \
+ return tm_attr_show(d, attr, buf, sched); \
} \
-static ssize_t store_##name(struct class_device *cd, const char *buf, size_t len) \
+static ssize_t store_##name(struct device *d, struct device_attribute *attr, \
+ const char *buf, size_t len) \
{ \
- return tm_attr_store(cd, buf, len, sched); \
+ return tm_attr_store(d, attr, buf, len, sched); \
} \
-static CLASS_DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_##name, store_##name)
TM_ATTR(sched0, 0);
TM_ATTR(sched1, 1);
@@ -604,14 +608,14 @@ TM_ATTR(sched6, 6);
TM_ATTR(sched7, 7);
static struct attribute *offload_attrs[] = {
- &class_device_attr_sched0.attr,
- &class_device_attr_sched1.attr,
- &class_device_attr_sched2.attr,
- &class_device_attr_sched3.attr,
- &class_device_attr_sched4.attr,
- &class_device_attr_sched5.attr,
- &class_device_attr_sched6.attr,
- &class_device_attr_sched7.attr,
+ &dev_attr_sched0.attr,
+ &dev_attr_sched1.attr,
+ &dev_attr_sched2.attr,
+ &dev_attr_sched3.attr,
+ &dev_attr_sched4.attr,
+ &dev_attr_sched5.attr,
+ &dev_attr_sched6.attr,
+ &dev_attr_sched7.attr,
NULL
};
@@ -836,7 +840,7 @@ static int offload_open(struct net_device *dev)
init_smt(adapter);
/* Never mind if the next step fails */
- sysfs_create_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
+ sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
/* Call back all registered clients */
cxgb3_add_clients(tdev);
@@ -861,7 +865,7 @@ static int offload_close(struct t3cdev *tdev)
/* Call back all registered clients */
cxgb3_remove_clients(tdev);
- sysfs_remove_group(&tdev->lldev->class_dev.kobj, &offload_attr_group);
+ sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
@@ -2420,7 +2424,7 @@ static int __devinit init_one(struct pci_dev *pdev,
else if (msi > 0 && pci_enable_msi(pdev) == 0)
adapter->flags |= USING_MSI;
- err = sysfs_create_group(&adapter->port[0]->class_dev.kobj,
+ err = sysfs_create_group(&adapter->port[0]->dev.kobj,
&cxgb3_attr_group);
print_port_info(adapter, ai);
@@ -2452,7 +2456,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
struct adapter *adapter = dev->priv;
t3_sge_stop(adapter);
- sysfs_remove_group(&adapter->port[0]->class_dev.kobj,
+ sysfs_remove_group(&adapter->port[0]->dev.kobj,
&cxgb3_attr_group);
for_each_port(adapter, i)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index a0806d262fc..2f4b1de7a2b 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -1343,15 +1343,12 @@ static int __init slip_init(void)
printk(KERN_INFO "SLIP linefill/keepalive option.\n");
#endif
- slip_devs = kmalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
+ slip_devs = kzalloc(sizeof(struct net_device *)*slip_maxdev, GFP_KERNEL);
if (!slip_devs) {
printk(KERN_ERR "SLIP: Can't allocate slip devices array! Uaargh! (-> No SLIP available)\n");
return -ENOMEM;
}
- /* Clear the pointer array, we allocate devices when we need them */
- memset(slip_devs, 0, sizeof(struct net_device *)*slip_maxdev);
-
/* Fill in our line protocol discipline, and register it */
if ((status = tty_register_ldisc(N_SLIP, &sl_ldisc)) != 0) {
printk(KERN_ERR "SLIP: can't register line discipline (err = %d)\n", status);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 135c0987dea..e136bae6197 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3380,7 +3380,7 @@ next_pkt:
}
next_pkt_nopost:
sw_idx++;
- sw_idx %= TG3_RX_RCB_RING_SIZE(tp);
+ sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
/* Refresh hw_idx to see if there is new work */
if (sw_idx == hw_idx) {
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index abb8611c5a9..31c97a6591a 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1709,75 +1709,13 @@ static void adjust_link(struct net_device *dev)
if (mii_info->speed != ugeth->oldspeed) {
switch (mii_info->speed) {
case 1000:
-#ifdef CONFIG_PPC_MPC836x
-/* FIXME: This code is for 100Mbs BUG fixing,
-remove this when it is fixed!!! */
- if (ugeth->ug_info->enet_interface ==
- ENET_1000_GMII)
- /* Run the commands which initialize the PHY */
- {
- tempval =
- (u32) mii_info->mdio_read(ugeth->
- dev, mii_info->mii_id, 0x1b);
- tempval |= 0x000f;
- mii_info->mdio_write(ugeth->dev,
- mii_info->mii_id, 0x1b,
- (u16) tempval);
- tempval =
- (u32) mii_info->mdio_read(ugeth->
- dev, mii_info->mii_id,
- MII_BMCR);
- mii_info->mdio_write(ugeth->dev,
- mii_info->mii_id, MII_BMCR,
- (u16) (tempval | BMCR_RESET));
- } else if (ugeth->ug_info->enet_interface ==
- ENET_1000_RGMII)
- /* Run the commands which initialize the PHY */
- {
- tempval =
- (u32) mii_info->mdio_read(ugeth->
- dev, mii_info->mii_id, 0x1b);
- tempval = (tempval & ~0x000f) | 0x000b;
- mii_info->mdio_write(ugeth->dev,
- mii_info->mii_id, 0x1b,
- (u16) tempval);
- tempval =
- (u32) mii_info->mdio_read(ugeth->
- dev, mii_info->mii_id,
- MII_BMCR);
- mii_info->mdio_write(ugeth->dev,
- mii_info->mii_id, MII_BMCR,
- (u16) (tempval | BMCR_RESET));
- }
- msleep(4000);
-#endif /* CONFIG_MPC8360 */
- adjust_enet_interface(ugeth);
+ ugeth->ug_info->enet_interface = ENET_1000_RGMII;
break;
case 100:
- case 10:
-#ifdef CONFIG_PPC_MPC836x
-/* FIXME: This code is for 100Mbs BUG fixing,
-remove this lines when it will be fixed!!! */
ugeth->ug_info->enet_interface = ENET_100_RGMII;
- tempval =
- (u32) mii_info->mdio_read(ugeth->dev,
- mii_info->mii_id,
- 0x1b);
- tempval = (tempval & ~0x000f) | 0x000b;
- mii_info->mdio_write(ugeth->dev,
- mii_info->mii_id, 0x1b,
- (u16) tempval);
- tempval =
- (u32) mii_info->mdio_read(ugeth->dev,
- mii_info->mii_id,
- MII_BMCR);
- mii_info->mdio_write(ugeth->dev,
- mii_info->mii_id, MII_BMCR,
- (u16) (tempval |
- BMCR_RESET));
- msleep(4000);
-#endif /* CONFIG_MPC8360 */
- adjust_enet_interface(ugeth);
+ break;
+ case 10:
+ ugeth->ug_info->enet_interface = ENET_10_RGMII;
break;
default:
ugeth_warn
@@ -1785,6 +1723,7 @@ remove this lines when it will be fixed!!! */
dev->name, mii_info->speed);
break;
}
+ adjust_enet_interface(ugeth);
ugeth_info("%s: Speed %dBT", dev->name,
mii_info->speed);
@@ -4133,6 +4072,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
static int mii_mng_configured = 0;
const phandle *ph;
const unsigned int *prop;
+ const void *mac_addr;
ugeth_vdbg("%s: IN", __FUNCTION__);
@@ -4258,7 +4198,12 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
ugeth->ug_info = ug_info;
ugeth->dev = dev;
- memcpy(dev->dev_addr, get_property(np, "mac-address", NULL), 6);
+
+ mac_addr = get_property(np, "mac-address", NULL);
+ if (mac_addr == NULL)
+ mac_addr = get_property(np, "local-mac-address", NULL);
+ if (mac_addr)
+ memcpy(dev->dev_addr, mac_addr, 6);
return 0;
}
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
index 3c86592ce03..6fda6d88be4 100644
--- a/drivers/net/ucc_geth_phy.c
+++ b/drivers/net/ucc_geth_phy.c
@@ -376,6 +376,8 @@ static int marvell_init(struct ugeth_mii_info *mii_info)
ugphy_vdbg("%s: IN", __FUNCTION__);
ucc_geth_phy_write(mii_info, 0x14, 0x0cd2);
+ ucc_geth_phy_write(mii_info, 0x1b,
+ (ucc_geth_phy_read(mii_info, 0x1b) & ~0x000f) | 0x000b);
ucc_geth_phy_write(mii_info, MII_BMCR,
ucc_geth_phy_read(mii_info, MII_BMCR) | BMCR_RESET);
msleep(4000);
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c
index a138b151009..3a1a958fb5f 100644
--- a/drivers/s390/char/monreader.c
+++ b/drivers/s390/char/monreader.c
@@ -3,7 +3,7 @@
*
* Character device driver for reading z/VM *MONITOR service records.
*
- * Copyright (C) 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
+ * Copyright 2004 IBM Corporation, IBM Deutschland Entwicklung GmbH.
*
* Author: Gerald Schaefer <geraldsc@de.ibm.com>
*/
@@ -22,7 +22,7 @@
#include <asm/ebcdic.h>
#include <asm/extmem.h>
#include <linux/poll.h>
-#include "../net/iucv.h"
+#include <net/iucv/iucv.h>
//#define MON_DEBUG /* Debug messages on/off */
@@ -50,14 +50,13 @@ static char mon_dcss_name[9] = "MONDCSS\0";
struct mon_msg {
u32 pos;
u32 mca_offset;
- iucv_MessagePending local_eib;
+ struct iucv_message msg;
char msglim_reached;
char replied_msglim;
};
struct mon_private {
- u16 pathid;
- iucv_handle_t iucv_handle;
+ struct iucv_path *path;
struct mon_msg *msg_array[MON_MSGLIM];
unsigned int write_index;
unsigned int read_index;
@@ -75,8 +74,6 @@ static unsigned long mon_dcss_end;
static DECLARE_WAIT_QUEUE_HEAD(mon_read_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(mon_conn_wait_queue);
-static u8 iucv_host[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
-
static u8 user_data_connect[16] = {
/* Version code, must be 0x01 for shared mode */
0x01,
@@ -100,8 +97,7 @@ static u8 user_data_sever[16] = {
* Create the 8 bytes EBCDIC DCSS segment name from
* an ASCII name, incl. padding
*/
-static inline void
-dcss_mkname(char *ascii_name, char *ebcdic_name)
+static inline void dcss_mkname(char *ascii_name, char *ebcdic_name)
{
int i;
@@ -119,8 +115,7 @@ dcss_mkname(char *ascii_name, char *ebcdic_name)
* print appropriate error message for segment_load()/segment_type()
* return code
*/
-static void
-mon_segment_warn(int rc, char* seg_name)
+static void mon_segment_warn(int rc, char* seg_name)
{
switch (rc) {
case -ENOENT:
@@ -166,44 +161,37 @@ mon_segment_warn(int rc, char* seg_name)
}
}
-static inline unsigned long
-mon_mca_start(struct mon_msg *monmsg)
+static inline unsigned long mon_mca_start(struct mon_msg *monmsg)
{
- return monmsg->local_eib.ln1msg1.iprmmsg1_u32;
+ return *(u32 *) &monmsg->msg.rmmsg;
}
-static inline unsigned long
-mon_mca_end(struct mon_msg *monmsg)
+static inline unsigned long mon_mca_end(struct mon_msg *monmsg)
{
- return monmsg->local_eib.ln1msg2.ipbfln1f;
+ return *(u32 *) &monmsg->msg.rmmsg[4];
}
-static inline u8
-mon_mca_type(struct mon_msg *monmsg, u8 index)
+static inline u8 mon_mca_type(struct mon_msg *monmsg, u8 index)
{
return *((u8 *) mon_mca_start(monmsg) + monmsg->mca_offset + index);
}
-static inline u32
-mon_mca_size(struct mon_msg *monmsg)
+static inline u32 mon_mca_size(struct mon_msg *monmsg)
{
return mon_mca_end(monmsg) - mon_mca_start(monmsg) + 1;
}
-static inline u32
-mon_rec_start(struct mon_msg *monmsg)
+static inline u32 mon_rec_start(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 4));
}
-static inline u32
-mon_rec_end(struct mon_msg *monmsg)
+static inline u32 mon_rec_end(struct mon_msg *monmsg)
{
return *((u32 *) (mon_mca_start(monmsg) + monmsg->mca_offset + 8));
}
-static inline int
-mon_check_mca(struct mon_msg *monmsg)
+static inline int mon_check_mca(struct mon_msg *monmsg)
{
if ((mon_rec_end(monmsg) <= mon_rec_start(monmsg)) ||
(mon_rec_start(monmsg) < mon_dcss_start) ||
@@ -221,20 +209,17 @@ mon_check_mca(struct mon_msg *monmsg)
return 0;
}
-static inline int
-mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
+static inline int mon_send_reply(struct mon_msg *monmsg,
+ struct mon_private *monpriv)
{
- u8 prmmsg[8];
int rc;
P_DEBUG("read, REPLY: pathid = 0x%04X, msgid = 0x%08X, trgcls = "
"0x%08X\n\n",
- monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
- monmsg->local_eib.iptrgcls);
- rc = iucv_reply_prmmsg(monmsg->local_eib.ippathid,
- monmsg->local_eib.ipmsgid,
- monmsg->local_eib.iptrgcls,
- 0, prmmsg);
+ monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
+
+ rc = iucv_message_reply(monpriv->path, &monmsg->msg,
+ IUCV_IPRMDATA, NULL, 0);
atomic_dec(&monpriv->msglim_count);
if (likely(!monmsg->msglim_reached)) {
monmsg->pos = 0;
@@ -251,10 +236,19 @@ mon_send_reply(struct mon_msg *monmsg, struct mon_private *monpriv)
return 0;
}
-static inline struct mon_private *
-mon_alloc_mem(void)
+static inline void mon_free_mem(struct mon_private *monpriv)
+{
+ int i;
+
+ for (i = 0; i < MON_MSGLIM; i++)
+ if (monpriv->msg_array[i])
+ kfree(monpriv->msg_array[i]);
+ kfree(monpriv);
+}
+
+static inline struct mon_private *mon_alloc_mem(void)
{
- int i,j;
+ int i;
struct mon_private *monpriv;
monpriv = kzalloc(sizeof(struct mon_private), GFP_KERNEL);
@@ -267,16 +261,15 @@ mon_alloc_mem(void)
GFP_KERNEL);
if (!monpriv->msg_array[i]) {
P_ERROR("open, no memory for msg_array\n");
- for (j = 0; j < i; j++)
- kfree(monpriv->msg_array[j]);
+ mon_free_mem(monpriv);
return NULL;
}
}
return monpriv;
}
-static inline void
-mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
+static inline void mon_read_debug(struct mon_msg *monmsg,
+ struct mon_private *monpriv)
{
#ifdef MON_DEBUG
u8 msg_type[2], mca_type;
@@ -284,7 +277,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
records_len = mon_rec_end(monmsg) - mon_rec_start(monmsg) + 1;
- memcpy(msg_type, &monmsg->local_eib.iptrgcls, 2);
+ memcpy(msg_type, &monmsg->msg.class, 2);
EBCASC(msg_type, 2);
mca_type = mon_mca_type(monmsg, 0);
EBCASC(&mca_type, 1);
@@ -292,8 +285,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
P_DEBUG("read, mon_read_index = %i, mon_write_index = %i\n",
monpriv->read_index, monpriv->write_index);
P_DEBUG("read, pathid = 0x%04X, msgid = 0x%08X, trgcls = 0x%08X\n",
- monmsg->local_eib.ippathid, monmsg->local_eib.ipmsgid,
- monmsg->local_eib.iptrgcls);
+ monpriv->path->pathid, monmsg->msg.id, monmsg->msg.class);
P_DEBUG("read, msg_type = '%c%c', mca_type = '%c' / 0x%X / 0x%X\n",
msg_type[0], msg_type[1], mca_type ? mca_type : 'X',
mon_mca_type(monmsg, 1), mon_mca_type(monmsg, 2));
@@ -306,8 +298,7 @@ mon_read_debug(struct mon_msg *monmsg, struct mon_private *monpriv)
#endif
}
-static inline void
-mon_next_mca(struct mon_msg *monmsg)
+static inline void mon_next_mca(struct mon_msg *monmsg)
{
if (likely((mon_mca_size(monmsg) - monmsg->mca_offset) == 12))
return;
@@ -316,8 +307,7 @@ mon_next_mca(struct mon_msg *monmsg)
monmsg->pos = 0;
}
-static inline struct mon_msg *
-mon_next_message(struct mon_private *monpriv)
+static inline struct mon_msg *mon_next_message(struct mon_private *monpriv)
{
struct mon_msg *monmsg;
@@ -342,39 +332,37 @@ mon_next_message(struct mon_private *monpriv)
/******************************************************************************
* IUCV handler *
*****************************************************************************/
-static void
-mon_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data)
+static void mon_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
- struct mon_private *monpriv = (struct mon_private *) pgm_data;
+ struct mon_private *monpriv = path->private;
P_DEBUG("IUCV connection completed\n");
P_DEBUG("IUCV ACCEPT (from *MONITOR): Version = 0x%02X, Event = "
"0x%02X, Sample = 0x%02X\n",
- eib->ipuser[0], eib->ipuser[1], eib->ipuser[2]);
+ ipuser[0], ipuser[1], ipuser[2]);
atomic_set(&monpriv->iucv_connected, 1);
wake_up(&mon_conn_wait_queue);
}
-static void
-mon_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data)
+static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
- struct mon_private *monpriv = (struct mon_private *) pgm_data;
+ struct mon_private *monpriv = path->private;
- P_ERROR("IUCV connection severed with rc = 0x%X\n",
- (u8) eib->ipuser[0]);
+ P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]);
+ iucv_path_sever(path, NULL);
atomic_set(&monpriv->iucv_severed, 1);
wake_up(&mon_conn_wait_queue);
wake_up_interruptible(&mon_read_wait_queue);
}
-static void
-mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
+static void mon_iucv_message_pending(struct iucv_path *path,
+ struct iucv_message *msg)
{
- struct mon_private *monpriv = (struct mon_private *) pgm_data;
+ struct mon_private *monpriv = path->private;
P_DEBUG("IUCV message pending\n");
- memcpy(&monpriv->msg_array[monpriv->write_index]->local_eib, eib,
- sizeof(iucv_MessagePending));
+ memcpy(&monpriv->msg_array[monpriv->write_index]->msg,
+ msg, sizeof(*msg));
if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) {
P_WARNING("IUCV message pending, message limit (%i) reached\n",
MON_MSGLIM);
@@ -385,54 +373,45 @@ mon_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data)
wake_up_interruptible(&mon_read_wait_queue);
}
-static iucv_interrupt_ops_t mon_iucvops = {
- .ConnectionComplete = mon_iucv_ConnectionComplete,
- .ConnectionSevered = mon_iucv_ConnectionSevered,
- .MessagePending = mon_iucv_MessagePending,
+static struct iucv_handler monreader_iucv_handler = {
+ .path_complete = mon_iucv_path_complete,
+ .path_severed = mon_iucv_path_severed,
+ .message_pending = mon_iucv_message_pending,
};
/******************************************************************************
* file operations *
*****************************************************************************/
-static int
-mon_open(struct inode *inode, struct file *filp)
+static int mon_open(struct inode *inode, struct file *filp)
{
- int rc, i;
struct mon_private *monpriv;
+ int rc;
/*
* only one user allowed
*/
+ rc = -EBUSY;
if (test_and_set_bit(MON_IN_USE, &mon_in_use))
- return -EBUSY;
+ goto out;
+ rc = -ENOMEM;
monpriv = mon_alloc_mem();
if (!monpriv)
- return -ENOMEM;
+ goto out_use;
/*
- * Register with IUCV and connect to *MONITOR service
+ * Connect to *MONITOR service
*/
- monpriv->iucv_handle = iucv_register_program("my_monreader ",
- MON_SERVICE,
- NULL,
- &mon_iucvops,
- monpriv);
- if (!monpriv->iucv_handle) {
- P_ERROR("failed to register with iucv driver\n");
- rc = -EIO;
- goto out_error;
- }
- P_INFO("open, registered with IUCV\n");
-
- rc = iucv_connect(&monpriv->pathid, MON_MSGLIM, user_data_connect,
- MON_SERVICE, iucv_host, IPRMDATA, NULL, NULL,
- monpriv->iucv_handle, NULL);
+ monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL);
+ if (!monpriv->path)
+ goto out_priv;
+ rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler,
+ MON_SERVICE, NULL, user_data_connect, monpriv);
if (rc) {
P_ERROR("iucv connection to *MONITOR failed with "
"IPUSER SEVER code = %i\n", rc);
rc = -EIO;
- goto out_unregister;
+ goto out_path;
}
/*
* Wait for connection confirmation
@@ -444,24 +423,23 @@ mon_open(struct inode *inode, struct file *filp)
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
rc = -EIO;
- goto out_unregister;
+ goto out_path;
}
P_INFO("open, established connection to *MONITOR service\n\n");
filp->private_data = monpriv;
return nonseekable_open(inode, filp);
-out_unregister:
- iucv_unregister_program(monpriv->iucv_handle);
-out_error:
- for (i = 0; i < MON_MSGLIM; i++)
- kfree(monpriv->msg_array[i]);
- kfree(monpriv);
+out_path:
+ kfree(monpriv->path);
+out_priv:
+ mon_free_mem(monpriv);
+out_use:
clear_bit(MON_IN_USE, &mon_in_use);
+out:
return rc;
}
-static int
-mon_close(struct inode *inode, struct file *filp)
+static int mon_close(struct inode *inode, struct file *filp)
{
int rc, i;
struct mon_private *monpriv = filp->private_data;
@@ -469,18 +447,12 @@ mon_close(struct inode *inode, struct file *filp)
/*
* Close IUCV connection and unregister
*/
- rc = iucv_sever(monpriv->pathid, user_data_sever);
+ rc = iucv_path_sever(monpriv->path, user_data_sever);
if (rc)
P_ERROR("close, iucv_sever failed with rc = %i\n", rc);
else
P_INFO("close, terminated connection to *MONITOR service\n");
- rc = iucv_unregister_program(monpriv->iucv_handle);
- if (rc)
- P_ERROR("close, iucv_unregister failed with rc = %i\n", rc);
- else
- P_INFO("close, unregistered with IUCV\n");
-
atomic_set(&monpriv->iucv_severed, 0);
atomic_set(&monpriv->iucv_connected, 0);
atomic_set(&monpriv->read_ready, 0);
@@ -495,8 +467,8 @@ mon_close(struct inode *inode, struct file *filp)
return 0;
}
-static ssize_t
-mon_read(struct file *filp, char __user *data, size_t count, loff_t *ppos)
+static ssize_t mon_read(struct file *filp, char __user *data,
+ size_t count, loff_t *ppos)
{
struct mon_private *monpriv = filp->private_data;
struct mon_msg *monmsg;
@@ -563,8 +535,7 @@ out_copy:
return count;
}
-static unsigned int
-mon_poll(struct file *filp, struct poll_table_struct *p)
+static unsigned int mon_poll(struct file *filp, struct poll_table_struct *p)
{
struct mon_private *monpriv = filp->private_data;
@@ -593,8 +564,7 @@ static struct miscdevice mon_dev = {
/******************************************************************************
* module init/exit *
*****************************************************************************/
-static int __init
-mon_init(void)
+static int __init mon_init(void)
{
int rc;
@@ -603,22 +573,34 @@ mon_init(void)
return -ENODEV;
}
+ /*
+ * Register with IUCV and connect to *MONITOR service
+ */
+ rc = iucv_register(&monreader_iucv_handler, 1);
+ if (rc) {
+ P_ERROR("failed to register with iucv driver\n");
+ return rc;
+ }
+ P_INFO("open, registered with IUCV\n");
+
rc = segment_type(mon_dcss_name);
if (rc < 0) {
mon_segment_warn(rc, mon_dcss_name);
- return rc;
+ goto out_iucv;
}
if (rc != SEG_TYPE_SC) {
P_ERROR("segment %s has unsupported type, should be SC\n",
mon_dcss_name);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out_iucv;
}
rc = segment_load(mon_dcss_name, SEGMENT_SHARED,
&mon_dcss_start, &mon_dcss_end);
if (rc < 0) {
mon_segment_warn(rc, mon_dcss_name);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out_iucv;
}
dcss_mkname(mon_dcss_name, &user_data_connect[8]);
@@ -634,14 +616,16 @@ mon_init(void)
out:
segment_unload(mon_dcss_name);
+out_iucv:
+ iucv_unregister(&monreader_iucv_handler, 1);
return rc;
}
-static void __exit
-mon_exit(void)
+static void __exit mon_exit(void)
{
segment_unload(mon_dcss_name);
WARN_ON(misc_deregister(&mon_dev) != 0);
+ iucv_unregister(&monreader_iucv_handler, 1);
return;
}
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 4f894dc2373..8432a76b961 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -3,7 +3,7 @@
* character device driver for reading z/VM system service records
*
*
- * Copyright (C) 2004 IBM Corporation
+ * Copyright 2004 IBM Corporation
* character device driver for reading z/VM system service records,
* Version 1.0
* Author(s): Xenia Tkatschow <xenia@us.ibm.com>
@@ -21,7 +21,7 @@
#include <asm/cpcmd.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
-#include "../net/iucv.h"
+#include <net/iucv/iucv.h>
#include <linux/kmod.h>
#include <linux/cdev.h>
#include <linux/device.h>
@@ -60,12 +60,11 @@ struct vmlogrdr_priv_t {
char system_service[8];
char internal_name[8];
char recording_name[8];
- u16 pathid;
+ struct iucv_path *path;
int connection_established;
int iucv_path_severed;
- iucv_MessagePending local_interrupt_buffer;
+ struct iucv_message local_interrupt_buffer;
atomic_t receive_ready;
- iucv_handle_t iucv_handle;
int minor_num;
char * buffer;
char * current_position;
@@ -97,37 +96,19 @@ static struct file_operations vmlogrdr_fops = {
};
-static u8 iucvMagic[16] = {
- 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
- 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
-};
+static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 ipuser[16]);
+static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 ipuser[16]);
+static void vmlogrdr_iucv_message_pending(struct iucv_path *,
+ struct iucv_message *);
-static u8 mask[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+static struct iucv_handler vmlogrdr_iucv_handler = {
+ .path_complete = vmlogrdr_iucv_path_complete,
+ .path_severed = vmlogrdr_iucv_path_severed,
+ .message_pending = vmlogrdr_iucv_message_pending,
};
-static u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-
-
-static void
-vmlogrdr_iucv_ConnectionComplete(iucv_ConnectionComplete *eib, void *pgm_data);
-static void
-vmlogrdr_iucv_ConnectionSevered(iucv_ConnectionSevered *eib, void *pgm_data);
-static void
-vmlogrdr_iucv_MessagePending(iucv_MessagePending *eib, void *pgm_data);
-
-
-static iucv_interrupt_ops_t vmlogrdr_iucvops = {
- .ConnectionComplete = vmlogrdr_iucv_ConnectionComplete,
- .ConnectionSevered = vmlogrdr_iucv_ConnectionSevered,
- .MessagePending = vmlogrdr_iucv_MessagePending,
-};
-
static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
@@ -176,28 +157,29 @@ static struct cdev *vmlogrdr_cdev = NULL;
static int recording_class_AB;
-static void
-vmlogrdr_iucv_ConnectionComplete (iucv_ConnectionComplete * eib,
- void * pgm_data)
+static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 ipuser[16])
{
- struct vmlogrdr_priv_t * logptr = pgm_data;
+ struct vmlogrdr_priv_t * logptr = path->private;
+
spin_lock(&logptr->priv_lock);
logptr->connection_established = 1;
spin_unlock(&logptr->priv_lock);
wake_up(&conn_wait_queue);
- return;
}
-static void
-vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
+static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16])
{
- u8 reason = (u8) eib->ipuser[8];
- struct vmlogrdr_priv_t * logptr = pgm_data;
+ struct vmlogrdr_priv_t * logptr = path->private;
+ u8 reason = (u8) ipuser[8];
printk (KERN_ERR "vmlogrdr: connection severed with"
" reason %i\n", reason);
+ iucv_path_sever(path, NULL);
+ kfree(path);
+ logptr->path = NULL;
+
spin_lock(&logptr->priv_lock);
logptr->connection_established = 0;
logptr->iucv_path_severed = 1;
@@ -209,10 +191,10 @@ vmlogrdr_iucv_ConnectionSevered (iucv_ConnectionSevered * eib, void * pgm_data)
}
-static void
-vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
+static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
+ struct iucv_message *msg)
{
- struct vmlogrdr_priv_t * logptr = pgm_data;
+ struct vmlogrdr_priv_t * logptr = path->private;
/*
* This function is the bottom half so it should be quick.
@@ -220,15 +202,15 @@ vmlogrdr_iucv_MessagePending (iucv_MessagePending * eib, void * pgm_data)
* the usage count
*/
spin_lock(&logptr->priv_lock);
- memcpy(&(logptr->local_interrupt_buffer), eib, sizeof(*eib));
+ memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
atomic_inc(&logptr->receive_ready);
spin_unlock(&logptr->priv_lock);
wake_up_interruptible(&read_wait_queue);
}
-static int
-vmlogrdr_get_recording_class_AB(void) {
+static int vmlogrdr_get_recording_class_AB(void)
+{
char cp_command[]="QUERY COMMAND RECORDING ";
char cp_response[80];
char *tail;
@@ -258,8 +240,9 @@ vmlogrdr_get_recording_class_AB(void) {
}
-static int
-vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
+static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
+ int action, int purge)
+{
char cp_command[80];
char cp_response[160];
@@ -317,8 +300,7 @@ vmlogrdr_recording(struct vmlogrdr_priv_t * logptr, int action, int purge) {
}
-static int
-vmlogrdr_open (struct inode *inode, struct file *filp)
+static int vmlogrdr_open (struct inode *inode, struct file *filp)
{
int dev_num = 0;
struct vmlogrdr_priv_t * logptr = NULL;
@@ -328,10 +310,7 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
dev_num = iminor(inode);
if (dev_num > MAXMINOR)
return -ENODEV;
-
logptr = &sys_ser[dev_num];
- if (logptr == NULL)
- return -ENODEV;
/*
* only allow for blocking reads to be open
@@ -344,52 +323,38 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
if (logptr->dev_in_use) {
spin_unlock_bh(&logptr->priv_lock);
return -EBUSY;
- } else {
- logptr->dev_in_use = 1;
- spin_unlock_bh(&logptr->priv_lock);
}
-
+ logptr->dev_in_use = 1;
+ logptr->connection_established = 0;
+ logptr->iucv_path_severed = 0;
atomic_set(&logptr->receive_ready, 0);
logptr->buffer_free = 1;
+ spin_unlock_bh(&logptr->priv_lock);
/* set the file options */
filp->private_data = logptr;
filp->f_op = &vmlogrdr_fops;
/* start recording for this service*/
- ret=0;
- if (logptr->autorecording)
+ if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
- if (ret)
- printk (KERN_WARNING "vmlogrdr: failed to start "
- "recording automatically\n");
-
- /* Register with iucv driver */
- logptr->iucv_handle = iucv_register_program(iucvMagic,
- logptr->system_service, mask, &vmlogrdr_iucvops,
- logptr);
-
- if (logptr->iucv_handle == NULL) {
- printk (KERN_ERR "vmlogrdr: failed to register with"
- "iucv driver\n");
- goto not_registered;
+ if (ret)
+ printk (KERN_WARNING "vmlogrdr: failed to start "
+ "recording automatically\n");
}
/* create connection to the system service */
- spin_lock_bh(&logptr->priv_lock);
- logptr->connection_established = 0;
- logptr->iucv_path_severed = 0;
- spin_unlock_bh(&logptr->priv_lock);
-
- connect_rc = iucv_connect (&(logptr->pathid), 10, iucvMagic,
- logptr->system_service, iucv_host, 0,
- NULL, NULL,
- logptr->iucv_handle, NULL);
+ logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
+ if (!logptr->path)
+ goto out_dev;
+ connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
+ logptr->system_service, NULL, NULL,
+ logptr);
if (connect_rc) {
printk (KERN_ERR "vmlogrdr: iucv connection to %s "
"failed with rc %i \n", logptr->system_service,
connect_rc);
- goto not_connected;
+ goto out_path;
}
/* We've issued the connect and now we must wait for a
@@ -398,35 +363,28 @@ vmlogrdr_open (struct inode *inode, struct file *filp)
*/
wait_event(conn_wait_queue, (logptr->connection_established)
|| (logptr->iucv_path_severed));
- if (logptr->iucv_path_severed) {
- goto not_connected;
- }
-
+ if (logptr->iucv_path_severed)
+ goto out_record;
return nonseekable_open(inode, filp);
-not_connected:
- iucv_unregister_program(logptr->iucv_handle);
- logptr->iucv_handle = NULL;
-not_registered:
+out_record:
if (logptr->autorecording)
vmlogrdr_recording(logptr,0,logptr->autopurge);
+out_path:
+ kfree(logptr->path); /* kfree(NULL) is ok. */
+ logptr->path = NULL;
+out_dev:
logptr->dev_in_use = 0;
return -EIO;
-
-
}
-static int
-vmlogrdr_release (struct inode *inode, struct file *filp)
+static int vmlogrdr_release (struct inode *inode, struct file *filp)
{
int ret;
struct vmlogrdr_priv_t * logptr = filp->private_data;
- iucv_unregister_program(logptr->iucv_handle);
- logptr->iucv_handle = NULL;
-
if (logptr->autorecording) {
ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
if (ret)
@@ -439,8 +397,8 @@ vmlogrdr_release (struct inode *inode, struct file *filp)
}
-static int
-vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
+static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
+{
int rc, *temp;
/* we need to keep track of two data sizes here:
* The number of bytes we need to receive from iucv and
@@ -461,8 +419,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
* We need to return the total length of the record
* + size of FENCE in the first 4 bytes of the buffer.
*/
- iucv_data_count =
- priv->local_interrupt_buffer.ln1msg2.ipbfln1f;
+ iucv_data_count = priv->local_interrupt_buffer.length;
user_data_count = sizeof(int);
temp = (int*)priv->buffer;
*temp= iucv_data_count + sizeof(FENCE);
@@ -474,14 +431,10 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
*/
if (iucv_data_count > NET_BUFFER_SIZE)
iucv_data_count = NET_BUFFER_SIZE;
- rc = iucv_receive(priv->pathid,
- priv->local_interrupt_buffer.ipmsgid,
- priv->local_interrupt_buffer.iptrgcls,
- buffer,
- iucv_data_count,
- NULL,
- NULL,
- &priv->residual_length);
+ rc = iucv_message_receive(priv->path,
+ &priv->local_interrupt_buffer,
+ 0, buffer, iucv_data_count,
+ &priv->residual_length);
spin_unlock_bh(&priv->priv_lock);
/* An rc of 5 indicates that the record was bigger then
* the buffer, which is OK for us. A 9 indicates that the
@@ -513,8 +466,8 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) {
}
-static ssize_t
-vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos)
+static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
+ size_t count, loff_t * ppos)
{
int rc;
struct vmlogrdr_priv_t * priv = filp->private_data;
@@ -546,8 +499,10 @@ vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos)
return count;
}
-static ssize_t
-vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) {
+static ssize_t vmlogrdr_autopurge_store(struct device * dev,
+ struct device_attribute *attr,
+ const char * buf, size_t count)
+{
struct vmlogrdr_priv_t *priv = dev->driver_data;
ssize_t ret = count;
@@ -565,8 +520,10 @@ vmlogrdr_autopurge_store(struct device * dev, struct device_attribute *attr, con
}
-static ssize_t
-vmlogrdr_autopurge_show(struct device *dev, struct device_attribute *attr, char *buf) {
+static ssize_t vmlogrdr_autopurge_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
struct vmlogrdr_priv_t *priv = dev->driver_data;
return sprintf(buf, "%u\n", priv->autopurge);
}
@@ -576,8 +533,10 @@ static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
vmlogrdr_autopurge_store);
-static ssize_t
-vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) {
+static ssize_t vmlogrdr_purge_store(struct device * dev,
+ struct device_attribute *attr,
+ const char * buf, size_t count)
+{
char cp_command[80];
char cp_response[80];
@@ -617,9 +576,10 @@ vmlogrdr_purge_store(struct device * dev, struct device_attribute *attr, const c
static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
-static ssize_t
-vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr, const char *buf,
- size_t count) {
+static ssize_t vmlogrdr_autorecording_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
struct vmlogrdr_priv_t *priv = dev->driver_data;
ssize_t ret = count;
@@ -637,8 +597,10 @@ vmlogrdr_autorecording_store(struct device *dev, struct device_attribute *attr,
}
-static ssize_t
-vmlogrdr_autorecording_show(struct device *dev, struct device_attribute *attr, char *buf) {
+static ssize_t vmlogrdr_autorecording_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
struct vmlogrdr_priv_t *priv = dev->driver_data;
return sprintf(buf, "%u\n", priv->autorecording);
}
@@ -648,9 +610,10 @@ static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
vmlogrdr_autorecording_store);
-static ssize_t
-vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t count) {
-
+static ssize_t vmlogrdr_recording_store(struct device * dev,
+ struct device_attribute *attr,
+ const char * buf, size_t count)
+{
struct vmlogrdr_priv_t *priv = dev->driver_data;
ssize_t ret;
@@ -675,8 +638,9 @@ vmlogrdr_recording_store(struct device * dev, struct device_attribute *attr, con
static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
-static ssize_t
-vmlogrdr_recording_status_show(struct device_driver *driver, char *buf) {
+static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
+ char *buf)
+{
char cp_command[] = "QUERY RECORDING ";
int len;
@@ -709,52 +673,63 @@ static struct device_driver vmlogrdr_driver = {
};
-static int
-vmlogrdr_register_driver(void) {
+static int vmlogrdr_register_driver(void)
+{
int ret;
+ /* Register with iucv driver */
+ ret = iucv_register(&vmlogrdr_iucv_handler, 1);
+ if (ret) {
+ printk (KERN_ERR "vmlogrdr: failed to register with"
+ "iucv driver\n");
+ goto out;
+ }
+
ret = driver_register(&vmlogrdr_driver);
if (ret) {
printk(KERN_ERR "vmlogrdr: failed to register driver.\n");
- return ret;
+ goto out_iucv;
}
ret = driver_create_file(&vmlogrdr_driver,
&driver_attr_recording_status);
if (ret) {
printk(KERN_ERR "vmlogrdr: failed to add driver attribute.\n");
- goto unregdriver;
+ goto out_driver;
}
vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
if (IS_ERR(vmlogrdr_class)) {
printk(KERN_ERR "vmlogrdr: failed to create class.\n");
- ret=PTR_ERR(vmlogrdr_class);
- vmlogrdr_class=NULL;
- goto unregattr;
+ ret = PTR_ERR(vmlogrdr_class);
+ vmlogrdr_class = NULL;
+ goto out_attr;
}
return 0;
-unregattr:
+out_attr:
driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
-unregdriver:
+out_driver:
driver_unregister(&vmlogrdr_driver);
+out_iucv:
+ iucv_unregister(&vmlogrdr_iucv_handler, 1);
+out:
return ret;
}
-static void
-vmlogrdr_unregister_driver(void) {
+static void vmlogrdr_unregister_driver(void)
+{
class_destroy(vmlogrdr_class);
vmlogrdr_class = NULL;
driver_remove_file(&vmlogrdr_driver, &driver_attr_recording_status);
driver_unregister(&vmlogrdr_driver);
- return;
+ iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
-static int
-vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
+static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
+{
struct device *dev;
int ret;
@@ -803,9 +778,10 @@ vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) {
}
-static int
-vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
- class_device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
+{
+ class_device_destroy(vmlogrdr_class,
+ MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
sysfs_remove_group(&priv->device->kobj, &vmlogrdr_attr_group);
device_unregister(priv->device);
@@ -815,8 +791,8 @@ vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv ) {
}
-static int
-vmlogrdr_register_cdev(dev_t dev) {
+static int vmlogrdr_register_cdev(dev_t dev)
+{
int rc = 0;
vmlogrdr_cdev = cdev_alloc();
if (!vmlogrdr_cdev) {
@@ -836,9 +812,10 @@ vmlogrdr_register_cdev(dev_t dev) {
}
-static void
-vmlogrdr_cleanup(void) {
+static void vmlogrdr_cleanup(void)
+{
int i;
+
if (vmlogrdr_cdev) {
cdev_del(vmlogrdr_cdev);
vmlogrdr_cdev=NULL;
@@ -855,8 +832,7 @@ vmlogrdr_cleanup(void) {
}
-static int
-vmlogrdr_init(void)
+static int vmlogrdr_init(void)
{
int rc;
int i;
@@ -906,8 +882,7 @@ cleanup:
}
-static void
-vmlogrdr_exit(void)
+static void vmlogrdr_exit(void)
{
vmlogrdr_cleanup();
printk (KERN_INFO "vmlogrdr: driver unloaded\n");
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 52625153a4f..f98fa465df0 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -22,13 +22,6 @@ config CTC
available. This option is also available as a module which will be
called ctc.ko. If you do not know what it is, it's safe to say "Y".
-config IUCV
- tristate "IUCV support (VM only)"
- help
- Select this option if you want to use inter-user communication
- under VM or VIF. If unsure, say "Y" to enable a fast communication
- link between VM guests.
-
config NETIUCV
tristate "IUCV network device support (VM only)"
depends on IUCV && NETDEVICES
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 4777e36a922..bbe3ab2e93d 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -4,7 +4,6 @@
ctc-objs := ctcmain.o ctcdbug.o
-obj-$(CONFIG_IUCV) += iucv.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c
deleted file mode 100644
index 229aeb5fc39..00000000000
--- a/drivers/s390/net/iucv.c
+++ /dev/null
@@ -1,2540 +0,0 @@
-/*
- * IUCV network driver
- *
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s):
- * Original source:
- * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
- * Xenia Tkatschow (xenia@us.ibm.com)
- * 2Gb awareness and general cleanup:
- * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
- *
- * Documentation used:
- * The original source
- * CP Programming Service, IBM document # SC24-5760
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <asm/atomic.h>
-#include "iucv.h"
-#include <asm/io.h>
-#include <asm/s390_ext.h>
-#include <asm/ebcdic.h>
-#include <asm/smp.h>
-#include <asm/s390_rdev.h>
-
-/* FLAGS:
- * All flags are defined in the field IPFLAGS1 of each function
- * and can be found in CP Programming Services.
- * IPSRCCLS - Indicates you have specified a source class
- * IPFGMCL - Indicates you have specified a target class
- * IPFGPID - Indicates you have specified a pathid
- * IPFGMID - Indicates you have specified a message ID
- * IPANSLST - Indicates that you are using an address list for
- * reply data
- * IPBUFLST - Indicates that you are using an address list for
- * message data
- */
-
-#define IPSRCCLS 0x01
-#define IPFGMCL 0x01
-#define IPFGPID 0x02
-#define IPFGMID 0x04
-#define IPANSLST 0x08
-#define IPBUFLST 0x40
-
-static int
-iucv_bus_match (struct device *dev, struct device_driver *drv)
-{
- return 0;
-}
-
-struct bus_type iucv_bus = {
- .name = "iucv",
- .match = iucv_bus_match,
-};
-
-struct device *iucv_root;
-
-/* General IUCV interrupt structure */
-typedef struct {
- __u16 ippathid;
- __u8 res1;
- __u8 iptype;
- __u32 res2;
- __u8 ipvmid[8];
- __u8 res3[24];
-} iucv_GeneralInterrupt;
-
-static iucv_GeneralInterrupt *iucv_external_int_buffer = NULL;
-
-/* Spin Lock declaration */
-
-static DEFINE_SPINLOCK(iucv_lock);
-
-static int messagesDisabled = 0;
-
-/***************INTERRUPT HANDLING ***************/
-
-typedef struct {
- struct list_head queue;
- iucv_GeneralInterrupt data;
-} iucv_irqdata;
-
-static struct list_head iucv_irq_queue;
-static DEFINE_SPINLOCK(iucv_irq_queue_lock);
-
-/*
- *Internal function prototypes
- */
-static void iucv_tasklet_handler(unsigned long);
-static void iucv_irq_handler(__u16);
-
-static DECLARE_TASKLET(iucv_tasklet,iucv_tasklet_handler,0);
-
-/************ FUNCTION ID'S ****************************/
-
-#define ACCEPT 10
-#define CONNECT 11
-#define DECLARE_BUFFER 12
-#define PURGE 9
-#define QUERY 0
-#define QUIESCE 13
-#define RECEIVE 5
-#define REJECT 8
-#define REPLY 6
-#define RESUME 14
-#define RETRIEVE_BUFFER 2
-#define SEND 4
-#define SETMASK 16
-#define SEVER 15
-
-/**
- * Structure: handler
- * members: list - list management.
- * structure: id
- * userid - 8 char array of machine identification
- * user_data - 16 char array for user identification
- * mask - 24 char array used to compare the 2 previous
- * interrupt_table - vector of interrupt functions.
- * pgm_data - ulong, application data that is passed
- * to the interrupt handlers
-*/
-typedef struct handler_t {
- struct list_head list;
- struct {
- __u8 userid[8];
- __u8 user_data[16];
- __u8 mask[24];
- } id;
- iucv_interrupt_ops_t *interrupt_table;
- void *pgm_data;
-} handler;
-
-/**
- * iucv_handler_table: List of registered handlers.
- */
-static struct list_head iucv_handler_table;
-
-/**
- * iucv_pathid_table: an array of *handler pointing into
- * iucv_handler_table for fast indexing by pathid;
- */
-static handler **iucv_pathid_table;
-
-static unsigned long max_connections;
-
-/**
- * iucv_cpuid: contains the logical cpu number of the cpu which
- * has declared the iucv buffer by issuing DECLARE_BUFFER.
- * If no cpu has done the initialization iucv_cpuid contains -1.
- */
-static int iucv_cpuid = -1;
-/**
- * register_flag: is 0 when external interrupt has not been registered
- */
-static int register_flag;
-
-/****************FIVE 40-BYTE PARAMETER STRUCTURES******************/
-/* Data struct 1: iparml_control
- * Used for iucv_accept
- * iucv_connect
- * iucv_quiesce
- * iucv_resume
- * iucv_sever
- * iucv_retrieve_buffer
- * Data struct 2: iparml_dpl (data in parameter list)
- * Used for iucv_send_prmmsg
- * iucv_send2way_prmmsg
- * iucv_send2way_prmmsg_array
- * iucv_reply_prmmsg
- * Data struct 3: iparml_db (data in a buffer)
- * Used for iucv_receive
- * iucv_receive_array
- * iucv_reject
- * iucv_reply
- * iucv_reply_array
- * iucv_send
- * iucv_send_array
- * iucv_send2way
- * iucv_send2way_array
- * iucv_declare_buffer
- * Data struct 4: iparml_purge
- * Used for iucv_purge
- * iucv_query
- * Data struct 5: iparml_set_mask
- * Used for iucv_set_mask
- */
-
-typedef struct {
- __u16 ippathid;
- __u8 ipflags1;
- __u8 iprcode;
- __u16 ipmsglim;
- __u16 res1;
- __u8 ipvmid[8];
- __u8 ipuser[16];
- __u8 iptarget[8];
-} iparml_control;
-
-typedef struct {
- __u16 ippathid;
- __u8 ipflags1;
- __u8 iprcode;
- __u32 ipmsgid;
- __u32 iptrgcls;
- __u8 iprmmsg[8];
- __u32 ipsrccls;
- __u32 ipmsgtag;
- __u32 ipbfadr2;
- __u32 ipbfln2f;
- __u32 res;
-} iparml_dpl;
-
-typedef struct {
- __u16 ippathid;
- __u8 ipflags1;
- __u8 iprcode;
- __u32 ipmsgid;
- __u32 iptrgcls;
- __u32 ipbfadr1;
- __u32 ipbfln1f;
- __u32 ipsrccls;
- __u32 ipmsgtag;
- __u32 ipbfadr2;
- __u32 ipbfln2f;
- __u32 res;
-} iparml_db;
-
-typedef struct {
- __u16 ippathid;
- __u8 ipflags1;
- __u8 iprcode;
- __u32 ipmsgid;
- __u8 ipaudit[3];
- __u8 res1[5];
- __u32 res2;
- __u32 ipsrccls;
- __u32 ipmsgtag;
- __u32 res3[3];
-} iparml_purge;
-
-typedef struct {
- __u8 ipmask;
- __u8 res1[2];
- __u8 iprcode;
- __u32 res2[9];
-} iparml_set_mask;
-
-typedef struct {
- union {
- iparml_control p_ctrl;
- iparml_dpl p_dpl;
- iparml_db p_db;
- iparml_purge p_purge;
- iparml_set_mask p_set_mask;
- } param;
- atomic_t in_use;
- __u32 res;
-} __attribute__ ((aligned(8))) iucv_param;
-#define PARAM_POOL_SIZE (PAGE_SIZE / sizeof(iucv_param))
-
-static iucv_param * iucv_param_pool;
-
-MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
-MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
-MODULE_LICENSE("GPL");
-
-/*
- * Debugging stuff
- *******************************************************************************/
-
-
-#ifdef DEBUG
-static int debuglevel = 0;
-
-module_param(debuglevel, int, 0);
-MODULE_PARM_DESC(debuglevel,
- "Specifies the debug level (0=off ... 3=all)");
-
-static void
-iucv_dumpit(char *title, void *buf, int len)
-{
- int i;
- __u8 *p = (__u8 *)buf;
-
- if (debuglevel < 3)
- return;
-
- printk(KERN_DEBUG "%s\n", title);
- printk(" ");
- for (i = 0; i < len; i++) {
- if (!(i % 16) && i != 0)
- printk ("\n ");
- else if (!(i % 4) && i != 0)
- printk(" ");
- printk("%02X", *p++);
- }
- if (len % 16)
- printk ("\n");
- return;
-}
-#define iucv_debug(lvl, fmt, args...) \
-do { \
- if (debuglevel >= lvl) \
- printk(KERN_DEBUG "%s: " fmt "\n", __FUNCTION__ , ## args); \
-} while (0)
-
-#else
-
-#define iucv_debug(lvl, fmt, args...) do { } while (0)
-#define iucv_dumpit(title, buf, len) do { } while (0)
-
-#endif
-
-/*
- * Internal functions
- *******************************************************************************/
-
-/**
- * print start banner
- */
-static void
-iucv_banner(void)
-{
- printk(KERN_INFO "IUCV lowlevel driver initialized\n");
-}
-
-/**
- * iucv_init - Initialization
- *
- * Allocates and initializes various data structures.
- */
-static int
-iucv_init(void)
-{
- int ret;
-
- if (iucv_external_int_buffer)
- return 0;
-
- if (!MACHINE_IS_VM) {
- printk(KERN_ERR "IUCV: IUCV connection needs VM as base\n");
- return -EPROTONOSUPPORT;
- }
-
- ret = bus_register(&iucv_bus);
- if (ret) {
- printk(KERN_ERR "IUCV: failed to register bus.\n");
- return ret;
- }
-
- iucv_root = s390_root_dev_register("iucv");
- if (IS_ERR(iucv_root)) {
- printk(KERN_ERR "IUCV: failed to register iucv root.\n");
- bus_unregister(&iucv_bus);
- return PTR_ERR(iucv_root);
- }
-
- /* Note: GFP_DMA used used to get memory below 2G */
- iucv_external_int_buffer = kzalloc(sizeof(iucv_GeneralInterrupt),
- GFP_KERNEL|GFP_DMA);
- if (!iucv_external_int_buffer) {
- printk(KERN_WARNING
- "%s: Could not allocate external interrupt buffer\n",
- __FUNCTION__);
- s390_root_dev_unregister(iucv_root);
- bus_unregister(&iucv_bus);
- return -ENOMEM;
- }
-
- /* Initialize parameter pool */
- iucv_param_pool = kzalloc(sizeof(iucv_param) * PARAM_POOL_SIZE,
- GFP_KERNEL|GFP_DMA);
- if (!iucv_param_pool) {
- printk(KERN_WARNING "%s: Could not allocate param pool\n",
- __FUNCTION__);
- kfree(iucv_external_int_buffer);
- iucv_external_int_buffer = NULL;
- s390_root_dev_unregister(iucv_root);
- bus_unregister(&iucv_bus);
- return -ENOMEM;
- }
-
- /* Initialize irq queue */
- INIT_LIST_HEAD(&iucv_irq_queue);
-
- /* Initialize handler table */
- INIT_LIST_HEAD(&iucv_handler_table);
-
- iucv_banner();
- return 0;
-}
-
-/**
- * iucv_exit - De-Initialization
- *
- * Frees everything allocated from iucv_init.
- */
-static int iucv_retrieve_buffer (void);
-
-static void
-iucv_exit(void)
-{
- iucv_retrieve_buffer();
- kfree(iucv_external_int_buffer);
- iucv_external_int_buffer = NULL;
- kfree(iucv_param_pool);
- iucv_param_pool = NULL;
- s390_root_dev_unregister(iucv_root);
- bus_unregister(&iucv_bus);
- printk(KERN_INFO "IUCV lowlevel driver unloaded\n");
-}
-
-/**
- * grab_param: - Get a parameter buffer from the pre-allocated pool.
- *
- * This function searches for an unused element in the pre-allocated pool
- * of parameter buffers. If one is found, it marks it "in use" and returns
- * a pointer to it. The calling function is responsible for releasing it
- * when it has finished its usage.
- *
- * Returns: A pointer to iucv_param.
- */
-static __inline__ iucv_param *
-grab_param(void)
-{
- iucv_param *ptr;
- static int hint = 0;
-
- ptr = iucv_param_pool + hint;
- do {
- ptr++;
- if (ptr >= iucv_param_pool + PARAM_POOL_SIZE)
- ptr = iucv_param_pool;
- } while (atomic_cmpxchg(&ptr->in_use, 0, 1) != 0);
- hint = ptr - iucv_param_pool;
-
- memset(&ptr->param, 0, sizeof(ptr->param));
- return ptr;
-}
-
-/**
- * release_param - Release a parameter buffer.
- * @p: A pointer to a struct iucv_param, previously obtained by calling
- * grab_param().
- *
- * This function marks the specified parameter buffer "unused".
- */
-static __inline__ void
-release_param(void *p)
-{
- atomic_set(&((iucv_param *)p)->in_use, 0);
-}
-
-/**
- * iucv_add_handler: - Add a new handler
- * @new_handler: handle that is being entered into chain.
- *
- * Places new handle on iucv_handler_table, if identical handler is not
- * found.
- *
- * Returns: 0 on success, !0 on failure (handler already in chain).
- */
-static int
-iucv_add_handler (handler *new)
-{
- ulong flags;
-
- iucv_debug(1, "entering");
- iucv_dumpit("handler:", new, sizeof(handler));
-
- spin_lock_irqsave (&iucv_lock, flags);
- if (!list_empty(&iucv_handler_table)) {
- struct list_head *lh;
-
- /**
- * Search list for handler with identical id. If one
- * is found, the new handler is _not_ added.
- */
- list_for_each(lh, &iucv_handler_table) {
- handler *h = list_entry(lh, handler, list);
- if (!memcmp(&new->id, &h->id, sizeof(h->id))) {
- iucv_debug(1, "ret 1");
- spin_unlock_irqrestore (&iucv_lock, flags);
- return 1;
- }
- }
- }
- /**
- * If we get here, no handler was found.
- */
- INIT_LIST_HEAD(&new->list);
- list_add(&new->list, &iucv_handler_table);
- spin_unlock_irqrestore (&iucv_lock, flags);
-
- iucv_debug(1, "exiting");
- return 0;
-}
-
-/**
- * b2f0:
- * @code: identifier of IUCV call to CP.
- * @parm: pointer to 40 byte iparml area passed to CP
- *
- * Calls CP to execute IUCV commands.
- *
- * Returns: return code from CP's IUCV call
- */
-static inline ulong b2f0(__u32 code, void *parm)
-{
- register unsigned long reg0 asm ("0");
- register unsigned long reg1 asm ("1");
- iucv_dumpit("iparml before b2f0 call:", parm, sizeof(iucv_param));
-
- reg0 = code;
- reg1 = virt_to_phys(parm);
- asm volatile(".long 0xb2f01000" : : "d" (reg0), "a" (reg1));
-
- iucv_dumpit("iparml after b2f0 call:", parm, sizeof(iucv_param));
-
- return (unsigned long)*((__u8 *)(parm + 3));
-}
-
-/*
- * Name: iucv_add_pathid
- * Purpose: Adds a path id to the system.
- * Input: pathid - pathid that is going to be entered into system
- * handle - address of handler that the pathid will be associated
- * with.
- * pgm_data - token passed in by application.
- * Output: 0: successful addition of pathid
- * - EINVAL - pathid entry is being used by another application
- * - ENOMEM - storage allocation for a new pathid table failed
-*/
-static int
-__iucv_add_pathid(__u16 pathid, handler *handler)
-{
-
- iucv_debug(1, "entering");
-
- iucv_debug(1, "handler is pointing to %p", handler);
-
- if (pathid > (max_connections - 1))
- return -EINVAL;
-
- if (iucv_pathid_table[pathid]) {
- iucv_debug(1, "pathid entry is %p", iucv_pathid_table[pathid]);
- printk(KERN_WARNING
- "%s: Pathid being used, error.\n", __FUNCTION__);
- return -EINVAL;
- }
- iucv_pathid_table[pathid] = handler;
-
- iucv_debug(1, "exiting");
- return 0;
-} /* end of add_pathid function */
-
-static int
-iucv_add_pathid(__u16 pathid, handler *handler)
-{
- ulong flags;
- int rc;
-
- spin_lock_irqsave (&iucv_lock, flags);
- rc = __iucv_add_pathid(pathid, handler);
- spin_unlock_irqrestore (&iucv_lock, flags);
- return rc;
-}
-
-static void
-iucv_remove_pathid(__u16 pathid)
-{
- ulong flags;
-
- if (pathid > (max_connections - 1))
- return;
-
- spin_lock_irqsave (&iucv_lock, flags);
- iucv_pathid_table[pathid] = NULL;
- spin_unlock_irqrestore (&iucv_lock, flags);
-}
-
-/**
- * iucv_declare_buffer_cpuid
- * Register at VM for subsequent IUCV operations. This is executed
- * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
- */
-static void
-iucv_declare_buffer_cpuid (void *result)
-{
- iparml_db *parm;
-
- parm = (iparml_db *)grab_param();
- parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
- if ((*((ulong *)result) = b2f0(DECLARE_BUFFER, parm)) == 1)
- *((ulong *)result) = parm->iprcode;
- release_param(parm);
-}
-
-/**
- * iucv_retrieve_buffer_cpuid:
- * Unregister IUCV usage at VM. This is always executed on the same
- * cpu that registered the buffer to VM.
- * Called from iucv_retrieve_buffer().
- */
-static void
-iucv_retrieve_buffer_cpuid (void *cpu)
-{
- iparml_control *parm;
-
- parm = (iparml_control *)grab_param();
- b2f0(RETRIEVE_BUFFER, parm);
- release_param(parm);
-}
-
-/**
- * Name: iucv_declare_buffer
- * Purpose: Specifies the guests real address of an external
- * interrupt.
- * Input: void
- * Output: iprcode - return code from b2f0 call
- */
-static int
-iucv_declare_buffer (void)
-{
- unsigned long flags;
- ulong b2f0_result;
-
- iucv_debug(1, "entering");
- b2f0_result = -ENODEV;
- spin_lock_irqsave (&iucv_lock, flags);
- if (iucv_cpuid == -1) {
- /* Reserve any cpu for use by iucv. */
- iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
- spin_unlock_irqrestore (&iucv_lock, flags);
- smp_call_function_on(iucv_declare_buffer_cpuid,
- &b2f0_result, 0, 1, iucv_cpuid);
- if (b2f0_result) {
- smp_put_cpu(iucv_cpuid);
- iucv_cpuid = -1;
- }
- iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
- } else {
- spin_unlock_irqrestore (&iucv_lock, flags);
- b2f0_result = 0;
- }
- iucv_debug(1, "exiting");
- return b2f0_result;
-}
-
-/**
- * iucv_retrieve_buffer:
- *
- * Terminates all use of IUCV.
- * Returns: return code from CP
- */
-static int
-iucv_retrieve_buffer (void)
-{
- iucv_debug(1, "entering");
- if (iucv_cpuid != -1) {
- smp_call_function_on(iucv_retrieve_buffer_cpuid,
- NULL, 0, 1, iucv_cpuid);
- /* Release the cpu reserved by iucv_declare_buffer. */
- smp_put_cpu(iucv_cpuid);
- iucv_cpuid = -1;
- }
- iucv_debug(1, "exiting");
- return 0;
-}
-
-/**
- * iucv_remove_handler:
- * @users_handler: handler to be removed
- *
- * Remove handler when application unregisters.
- */
-static void
-iucv_remove_handler(handler *handler)
-{
- unsigned long flags;
-
- if ((!iucv_pathid_table) || (!handler))
- return;
-
- iucv_debug(1, "entering");
-
- spin_lock_irqsave (&iucv_lock, flags);
- list_del(&handler->list);
- if (list_empty(&iucv_handler_table)) {
- if (register_flag) {
- unregister_external_interrupt(0x4000, iucv_irq_handler);
- register_flag = 0;
- }
- }
- spin_unlock_irqrestore (&iucv_lock, flags);
-
- iucv_debug(1, "exiting");
- return;
-}
-
-/**
- * iucv_register_program:
- * @pgmname: user identification
- * @userid: machine identification
- * @pgmmask: Indicates which bits in the pgmname and userid combined will be
- * used to determine who is given control.
- * @ops: Address of interrupt handler table.
- * @pgm_data: Application data to be passed to interrupt handlers.
- *
- * Registers an application with IUCV.
- * Returns:
- * The address of handler, or NULL on failure.
- * NOTE on pgmmask:
- * If pgmname, userid and pgmmask are provided, pgmmask is entered into the
- * handler as is.
- * If pgmmask is NULL, the internal mask is set to all 0xff's
- * When userid is NULL, the first 8 bytes of the internal mask are forced
- * to 0x00.
- * If pgmmask and userid are NULL, the first 8 bytes of the internal mask
- * are forced to 0x00 and the last 16 bytes to 0xff.
- */
-
-iucv_handle_t
-iucv_register_program (__u8 pgmname[16],
- __u8 userid[8],
- __u8 pgmmask[24],
- iucv_interrupt_ops_t * ops, void *pgm_data)
-{
- ulong rc = 0; /* return code from function calls */
- handler *new_handler;
-
- iucv_debug(1, "entering");
-
- if (ops == NULL) {
- /* interrupt table is not defined */
- printk(KERN_WARNING "%s: Interrupt table is not defined, "
- "exiting\n", __FUNCTION__);
- return NULL;
- }
- if (!pgmname) {
- printk(KERN_WARNING "%s: pgmname not provided\n", __FUNCTION__);
- return NULL;
- }
-
- /* Allocate handler entry */
- new_handler = kmalloc(sizeof(handler), GFP_ATOMIC);
- if (new_handler == NULL) {
- printk(KERN_WARNING "%s: storage allocation for new handler "
- "failed.\n", __FUNCTION__);
- return NULL;
- }
-
- if (!iucv_pathid_table) {
- if (iucv_init()) {
- kfree(new_handler);
- return NULL;
- }
-
- max_connections = iucv_query_maxconn();
- iucv_pathid_table = kcalloc(max_connections, sizeof(handler *),
- GFP_ATOMIC);
- if (iucv_pathid_table == NULL) {
- printk(KERN_WARNING "%s: iucv_pathid_table storage "
- "allocation failed\n", __FUNCTION__);
- kfree(new_handler);
- return NULL;
- }
- }
- memset(new_handler, 0, sizeof (handler));
- memcpy(new_handler->id.user_data, pgmname,
- sizeof (new_handler->id.user_data));
- if (userid) {
- memcpy (new_handler->id.userid, userid,
- sizeof (new_handler->id.userid));
- ASCEBC (new_handler->id.userid,
- sizeof (new_handler->id.userid));
- EBC_TOUPPER (new_handler->id.userid,
- sizeof (new_handler->id.userid));
-
- if (pgmmask) {
- memcpy (new_handler->id.mask, pgmmask,
- sizeof (new_handler->id.mask));
- } else {
- memset (new_handler->id.mask, 0xFF,
- sizeof (new_handler->id.mask));
- }
- } else {
- if (pgmmask) {
- memcpy (new_handler->id.mask, pgmmask,
- sizeof (new_handler->id.mask));
- } else {
- memset (new_handler->id.mask, 0xFF,
- sizeof (new_handler->id.mask));
- }
- memset (new_handler->id.userid, 0x00,
- sizeof (new_handler->id.userid));
- }
- /* fill in the rest of handler */
- new_handler->pgm_data = pgm_data;
- new_handler->interrupt_table = ops;
-
- /*
- * Check if someone else is registered with same pgmname, userid
- * and mask. If someone is already registered with same pgmname,
- * userid and mask, registration will fail and NULL will be returned
- * to the application.
- * If identical handler not found, then handler is added to list.
- */
- rc = iucv_add_handler(new_handler);
- if (rc) {
- printk(KERN_WARNING "%s: Someone already registered with same "
- "pgmname, userid, pgmmask\n", __FUNCTION__);
- kfree (new_handler);
- return NULL;
- }
-
- rc = iucv_declare_buffer();
- if (rc) {
- char *err = "Unknown";
- iucv_remove_handler(new_handler);
- kfree(new_handler);
- switch(rc) {
- case 0x03:
- err = "Directory error";
- break;
- case 0x0a:
- err = "Invalid length";
- break;
- case 0x13:
- err = "Buffer already exists";
- break;
- case 0x3e:
- err = "Buffer overlap";
- break;
- case 0x5c:
- err = "Paging or storage error";
- break;
- }
- printk(KERN_WARNING "%s: iucv_declare_buffer "
- "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
- return NULL;
- }
- if (!register_flag) {
- /* request the 0x4000 external interrupt */
- rc = register_external_interrupt (0x4000, iucv_irq_handler);
- if (rc) {
- iucv_remove_handler(new_handler);
- kfree (new_handler);
- printk(KERN_WARNING "%s: "
- "register_external_interrupt returned %ld\n",
- __FUNCTION__, rc);
- return NULL;
-
- }
- register_flag = 1;
- }
- iucv_debug(1, "exiting");
- return new_handler;
-} /* end of register function */
-
-/**
- * iucv_unregister_program:
- * @handle: address of handler
- *
- * Unregister application with IUCV.
- * Returns:
- * 0 on success, -EINVAL, if specified handle is invalid.
- */
-
-int
-iucv_unregister_program (iucv_handle_t handle)
-{
- handler *h = NULL;
- struct list_head *lh;
- int i;
- ulong flags;
-
- iucv_debug(1, "entering");
- iucv_debug(1, "address of handler is %p", h);
-
- /* Checking if handle is valid */
- spin_lock_irqsave (&iucv_lock, flags);
- list_for_each(lh, &iucv_handler_table) {
- if ((handler *)handle == list_entry(lh, handler, list)) {
- h = (handler *)handle;
- break;
- }
- }
- if (!h) {
- spin_unlock_irqrestore (&iucv_lock, flags);
- if (handle)
- printk(KERN_WARNING
- "%s: Handler not found in iucv_handler_table.\n",
- __FUNCTION__);
- else
- printk(KERN_WARNING
- "%s: NULL handle passed by application.\n",
- __FUNCTION__);
- return -EINVAL;
- }
-
- /**
- * First, walk thru iucv_pathid_table and sever any pathid which is
- * still pointing to the handler to be removed.
- */
- for (i = 0; i < max_connections; i++)
- if (iucv_pathid_table[i] == h) {
- spin_unlock_irqrestore (&iucv_lock, flags);
- iucv_sever(i, h->id.user_data);
- spin_lock_irqsave(&iucv_lock, flags);
- }
- spin_unlock_irqrestore (&iucv_lock, flags);
-
- iucv_remove_handler(h);
- kfree(h);
-
- iucv_debug(1, "exiting");
- return 0;
-}
-
-/**
- * iucv_accept:
- * @pathid: Path identification number
- * @msglim_reqstd: The number of outstanding messages requested.
- * @user_data: Data specified by the iucv_connect function.
- * @flags1: Contains options for this path.
- * - IPPRTY (0x20) Specifies if you want to send priority message.
- * - IPRMDATA (0x80) Specifies whether your program can handle a message
- * in the parameter list.
- * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
- * established.
- * @handle: Address of handler.
- * @pgm_data: Application data passed to interrupt handlers.
- * @flags1_out: Pointer to an int. If not NULL, on return the options for
- * the path are stored at the given location:
- * - IPPRTY (0x20) Indicates you may send a priority message.
- * @msglim: Pointer to an __u16. If not NULL, on return the maximum
- * number of outstanding messages is stored at the given
- * location.
- *
- * This function is issued after the user receives a Connection Pending external
- * interrupt and now wishes to complete the IUCV communication path.
- * Returns:
- * return code from CP
- */
-int
-iucv_accept(__u16 pathid, __u16 msglim_reqstd,
- __u8 user_data[16], int flags1,
- iucv_handle_t handle, void *pgm_data,
- int *flags1_out, __u16 * msglim)
-{
- ulong b2f0_result = 0;
- ulong flags;
- struct list_head *lh;
- handler *h = NULL;
- iparml_control *parm;
-
- iucv_debug(1, "entering");
- iucv_debug(1, "pathid = %d", pathid);
-
- /* Checking if handle is valid */
- spin_lock_irqsave (&iucv_lock, flags);
- list_for_each(lh, &iucv_handler_table) {
- if ((handler *)handle == list_entry(lh, handler, list)) {
- h = (handler *)handle;
- break;
- }
- }
- spin_unlock_irqrestore (&iucv_lock, flags);
-
- if (!h) {
- if (handle)
- printk(KERN_WARNING
- "%s: Handler not found in iucv_handler_table.\n",
- __FUNCTION__);
- else
- printk(KERN_WARNING
- "%s: NULL handle passed by application.\n",
- __FUNCTION__);
- return -EINVAL;
- }
-
- parm = (iparml_control *)grab_param();
-
- parm->ippathid = pathid;
- parm->ipmsglim = msglim_reqstd;
- if (user_data)
- memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
-
- parm->ipflags1 = (__u8)flags1;
- b2f0_result = b2f0(ACCEPT, parm);
-
- if (!b2f0_result) {
- if (msglim)
- *msglim = parm->ipmsglim;
- if (pgm_data)
- h->pgm_data = pgm_data;
- if (flags1_out)
- *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
- }
- release_param(parm);
-
- iucv_debug(1, "exiting");
- return b2f0_result;
-}
-
-/**
- * iucv_connect:
- * @pathid: Path identification number
- * @msglim_reqstd: Number of outstanding messages requested
- * @user_data: 16-byte user data
- * @userid: 8-byte of user identification
- * @system_name: 8-byte identifying the system name
- * @flags1: Specifies options for this path:
- * - IPPRTY (0x20) Specifies if you want to send priority message.
- * - IPRMDATA (0x80) Specifies whether your program can handle a message
- * in the parameter list.
- * - IPQUSCE (0x40) Specifies whether you want to quiesce the path being
- * established.
- * - IPLOCAL (0x01) Allows an application to force the partner to be on the
- * local system. If local is specified then target class
- * cannot be specified.
- * @flags1_out: Pointer to an int. If not NULL, on return the options for
- * the path are stored at the given location:
- * - IPPRTY (0x20) Indicates you may send a priority message.
- * @msglim: Pointer to an __u16. If not NULL, on return the maximum
- * number of outstanding messages is stored at the given
- * location.
- * @handle: Address of handler.
- * @pgm_data: Application data to be passed to interrupt handlers.
- *
- * This function establishes an IUCV path. Although the connect may complete
- * successfully, you are not able to use the path until you receive an IUCV
- * Connection Complete external interrupt.
- * Returns: return code from CP, or one of the following
- * - ENOMEM
- * - return code from iucv_declare_buffer
- * - EINVAL - invalid handle passed by application
- * - EINVAL - pathid address is NULL
- * - ENOMEM - pathid table storage allocation failed
- * - return code from internal function add_pathid
- */
-int
-iucv_connect (__u16 *pathid, __u16 msglim_reqstd,
- __u8 user_data[16], __u8 userid[8],
- __u8 system_name[8], int flags1,
- int *flags1_out, __u16 * msglim,
- iucv_handle_t handle, void *pgm_data)
-{
- iparml_control *parm;
- iparml_control local_parm;
- struct list_head *lh;
- ulong b2f0_result = 0;
- ulong flags;
- int add_pathid_result = 0;
- handler *h = NULL;
- __u8 no_memory[16] = "NO MEMORY";
-
- iucv_debug(1, "entering");
-
- /* Checking if handle is valid */
- spin_lock_irqsave (&iucv_lock, flags);
- list_for_each(lh, &iucv_handler_table) {
- if ((handler *)handle == list_entry(lh, handler, list)) {
- h = (handler *)handle;
- break;
- }
- }
- spin_unlock_irqrestore (&iucv_lock, flags);
-
- if (!h) {
- if (handle)
- printk(KERN_WARNING
- "%s: Handler not found in iucv_handler_table.\n",
- __FUNCTION__);
- else
- printk(KERN_WARNING
- "%s: NULL handle passed by application.\n",
- __FUNCTION__);
- return -EINVAL;
- }
-
- if (pathid == NULL) {
- printk(KERN_WARNING "%s: NULL pathid pointer\n",
- __FUNCTION__);
- return -EINVAL;
- }
-
- parm = (iparml_control *)grab_param();
-
- parm->ipmsglim = msglim_reqstd;
-
- if (user_data)
- memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
-
- if (userid) {
- memcpy(parm->ipvmid, userid, sizeof(parm->ipvmid));
- ASCEBC(parm->ipvmid, sizeof(parm->ipvmid));
- EBC_TOUPPER(parm->ipvmid, sizeof(parm->ipvmid));
- }
-
- if (system_name) {
- memcpy(parm->iptarget, system_name, sizeof(parm->iptarget));
- ASCEBC(parm->iptarget, sizeof(parm->iptarget));
- EBC_TOUPPER(parm->iptarget, sizeof(parm->iptarget));
- }
-
- /* In order to establish an IUCV connection, the procedure is:
- *
- * b2f0(CONNECT)
- * take the ippathid from the b2f0 call
- * register the handler to the ippathid
- *
- * Unfortunately, the ConnectionEstablished message gets sent after the
- * b2f0(CONNECT) call but before the register is handled.
- *
- * In order for this race condition to be eliminated, the IUCV Control
- * Interrupts must be disabled for the above procedure.
- *
- * David Kennedy <dkennedy@linuxcare.com>
- */
-
- /* Enable everything but IUCV Control messages */
- iucv_setmask(~(AllInterrupts));
- messagesDisabled = 1;
-
- spin_lock_irqsave (&iucv_lock, flags);
- parm->ipflags1 = (__u8)flags1;
- b2f0_result = b2f0(CONNECT, parm);
- memcpy(&local_parm, parm, sizeof(local_parm));
- release_param(parm);
- parm = &local_parm;
- if (!b2f0_result)
- add_pathid_result = __iucv_add_pathid(parm->ippathid, h);
- spin_unlock_irqrestore (&iucv_lock, flags);
-
- if (b2f0_result) {
- iucv_setmask(~0);
- messagesDisabled = 0;
- return b2f0_result;
- }
-
- *pathid = parm->ippathid;
-
- /* Enable everything again */
- iucv_setmask(IUCVControlInterruptsFlag);
-
- if (msglim)
- *msglim = parm->ipmsglim;
- if (flags1_out)
- *flags1_out = (parm->ipflags1 & IPPRTY) ? IPPRTY : 0;
-
- if (add_pathid_result) {
- iucv_sever(*pathid, no_memory);
- printk(KERN_WARNING "%s: add_pathid failed with rc ="
- " %d\n", __FUNCTION__, add_pathid_result);
- return(add_pathid_result);
- }
-
- iucv_debug(1, "exiting");
- return b2f0_result;
-}
-
-/**
- * iucv_purge:
- * @pathid: Path identification number
- * @msgid: Message ID of message to purge.
- * @srccls: Message class of the message to purge.
- * @audit: Pointer to an __u32. If not NULL, on return, information about
- * asynchronous errors that may have affected the normal completion
- * of this message ist stored at the given location.
- *
- * Cancels a message you have sent.
- * Returns: return code from CP
- */
-int
-iucv_purge (__u16 pathid, __u32 msgid, __u32 srccls, __u32 *audit)
-{
- iparml_purge *parm;
- ulong b2f0_result = 0;
-
- iucv_debug(1, "entering");
- iucv_debug(1, "pathid = %d", pathid);
-
- parm = (iparml_purge *)grab_param();
-
- parm->ipmsgid = msgid;
- parm->ippathid = pathid;
- parm->ipsrccls = srccls;
- parm->ipflags1 |= (IPSRCCLS | IPFGMID | IPFGPID);
- b2f0_result = b2f0(PURGE, parm);
-
- if (!b2f0_result && audit) {
- memcpy(audit, parm->ipaudit, sizeof(parm->ipaudit));
- /* parm->ipaudit has only 3 bytes */
- *audit >>= 8;
- }
-
- release_param(parm);
-
- iucv_debug(1, "b2f0_result = %ld", b2f0_result);
- iucv_debug(1, "exiting");
- return b2f0_result;
-}
-
-/**
- * iucv_query_generic:
- * @want_maxconn: Flag, describing which value is to be returned.
- *
- * Helper function for iucv_query_maxconn() and iucv_query_bufsize().
- *
- * Returns: The buffersize, if want_maxconn is 0; the maximum number of
- * connections, if want_maxconn is 1 or an error-code < 0 on failure.
- */
-static int
-iucv_query_generic(int want_maxconn)
-{
- register unsigned long reg0 asm ("0");
- register unsigned long reg1 asm ("1");
- iparml_purge *parm = (iparml_purge *)grab_param();
- int bufsize, maxconn;
- int ccode;
-
- /**
- * Call b2f0 and store R0 (max buffer size),
- * R1 (max connections) and CC.
- */
- reg0 = QUERY;
- reg1 = virt_to_phys(parm);
- asm volatile(
- " .long 0xb2f01000\n"
- " ipm %0\n"
- " srl %0,28\n"
- : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
- bufsize = reg0;
- maxconn = reg1;
- release_param(parm);
-
- if (ccode)
- return -EPERM;
- if (want_maxconn)
- return maxconn;
- return bufsize;
-}
-
-/**
- * iucv_query_maxconn:
- *
- * Determines the maximum number of connections thay may be established.
- *
- * Returns: Maximum number of connections that can be.
- */
-ulong
-iucv_query_maxconn(void)
-{
- return iucv_query_generic(1);
-}
-
-/**
- * iucv_query_bufsize:
- *
- * Determines the size of the external interrupt buffer.
- *
- * Returns: Size of external interrupt buffer.
- */
-ulong
-iucv_query_bufsize (void)
-{
- return iucv_query_generic(0);
-}
-
-/**
- * iucv_quiesce:
- * @pathid: Path identification number
- * @user_data: 16-byte user data
- *
- * Temporarily suspends incoming messages on an IUCV path.
- * You can later reactivate the path by invoking the iucv_resume function.
- * Returns: return code from CP
- */
-int
-iucv_quiesce (__u16 pathid, __u8 user_data[16])
-{
- iparml_control *parm;
- ulong b2f0_result = 0;
-
- iucv_debug(1, "entering");
- iucv_debug(1, "pathid = %d", pathid);
-
- parm = (iparml_control *)grab_param();
-
- memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
- parm->ippathid = pathid;
-
- b2f0_result = b2f0(QUIESCE, parm);
- release_param(parm);
-
- iucv_debug(1, "b2f0_result = %ld", b2f0_result);
- iucv_debug(1, "exiting");
-
- return b2f0_result;
-}
-
-/**
- * iucv_receive:
- * @pathid: Path identification number.
- * @buffer: Address of buffer to receive. Must be below 2G.
- * @buflen: Length of buffer to receive.
- * @msgid: Specifies the message ID.
- * @trgcls: Specifies target class.
- * @flags1_out: Receives options for path on return.
- * - IPNORPY (0x10) Specifies whether a reply is required
- * - IPPRTY (0x20) Specifies if you want to send priority message
- * - IPRMDATA (0x80) Specifies the data is contained in the parameter list
- * @residual_buffer: Receives the address of buffer updated by the number
- * of bytes you have received on return.
- * @residual_length: On return, receives one of the following values:
- * - 0 If the receive buffer is the same length as
- * the message.
- * - Remaining bytes in buffer If the receive buffer is longer than the
- * message.
- * - Remaining bytes in message If the receive buffer is shorter than the
- * message.
- *
- * This function receives messages that are being sent to you over established
- * paths.
- * Returns: return code from CP IUCV call; If the receive buffer is shorter
- * than the message, always 5
- * -EINVAL - buffer address is pointing to NULL
- */
-int
-iucv_receive (__u16 pathid, __u32 msgid, __u32 trgcls,
- void *buffer, ulong buflen,
- int *flags1_out, ulong * residual_buffer, ulong * residual_length)
-{
- iparml_db *parm;
- ulong b2f0_result;
- int moved = 0; /* number of bytes moved from parmlist to buffer */
-
- iucv_debug(2, "entering");
-
- if (!buffer)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ipbfadr1 = (__u32) (addr_t) buffer;
- parm->ipbfln1f = (__u32) ((ulong) buflen);
- parm->ipmsgid = msgid;
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipflags1 = (IPFGPID | IPFGMID | IPFGMCL);
-
- b2f0_result = b2f0(RECEIVE, parm);
-
- if (!b2f0_result || b2f0_result == 5) {
- if (flags1_out) {
- iucv_debug(2, "*flags1_out = %d", *flags1_out);
- *flags1_out = (parm->ipflags1 & (~0x07));
- iucv_debug(2, "*flags1_out = %d", *flags1_out);
- }
-
- if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
- if (residual_length)
- *residual_length = parm->ipbfln1f;
-
- if (residual_buffer)
- *residual_buffer = parm->ipbfadr1;
- } else {
- moved = min_t (unsigned long, buflen, 8);
-
- memcpy ((char *) buffer,
- (char *) &parm->ipbfadr1, moved);
-
- if (buflen < 8)
- b2f0_result = 5;
-
- if (residual_length)
- *residual_length = abs (buflen - 8);
-
- if (residual_buffer)
- *residual_buffer = (ulong) (buffer + moved);
- }
- }
- release_param(parm);
-
- iucv_debug(2, "exiting");
- return b2f0_result;
-}
-
-/*
- * Name: iucv_receive_array
- * Purpose: This function receives messages that are being sent to you
- * over established paths.
- * Input: pathid - path identification number
- * buffer - address of array of buffers
- * buflen - total length of buffers
- * msgid - specifies the message ID.
- * trgcls - specifies target class
- * Output:
- * flags1_out: Options for path.
- * IPNORPY - 0x10 specifies whether a reply is required
- * IPPRTY - 0x20 specifies if you want to send priority message
- * IPRMDATA - 0x80 specifies the data is contained in the parameter list
- * residual_buffer - address points to the current list entry IUCV
- * is working on.
- * residual_length -
- * Contains one of the following values, if the receive buffer is:
- * The same length as the message, this field is zero.
- * Longer than the message, this field contains the number of
- * bytes remaining in the buffer.
- * Shorter than the message, this field contains the residual
- * count (that is, the number of bytes remaining in the
- * message that does not fit into the buffer. In this case
- * b2f0_result = 5.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer address is NULL
- */
-int
-iucv_receive_array (__u16 pathid,
- __u32 msgid, __u32 trgcls,
- iucv_array_t * buffer, ulong buflen,
- int *flags1_out,
- ulong * residual_buffer, ulong * residual_length)
-{
- iparml_db *parm;
- ulong b2f0_result;
- int i = 0, moved = 0, need_to_move = 8, dyn_len;
-
- iucv_debug(2, "entering");
-
- if (!buffer)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ipbfadr1 = (__u32) ((ulong) buffer);
- parm->ipbfln1f = (__u32) buflen;
- parm->ipmsgid = msgid;
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipflags1 = (IPBUFLST | IPFGPID | IPFGMID | IPFGMCL);
-
- b2f0_result = b2f0(RECEIVE, parm);
-
- if (!b2f0_result || b2f0_result == 5) {
-
- if (flags1_out) {
- iucv_debug(2, "*flags1_out = %d", *flags1_out);
- *flags1_out = (parm->ipflags1 & (~0x07));
- iucv_debug(2, "*flags1_out = %d", *flags1_out);
- }
-
- if (!(parm->ipflags1 & IPRMDATA)) { /*msg not in parmlist */
-
- if (residual_length)
- *residual_length = parm->ipbfln1f;
-
- if (residual_buffer)
- *residual_buffer = parm->ipbfadr1;
-
- } else {
- /* copy msg from parmlist to users array. */
-
- while ((moved < 8) && (moved < buflen)) {
- dyn_len =
- min_t (unsigned int,
- (buffer + i)->length, need_to_move);
-
- memcpy ((char *)((ulong)((buffer + i)->address)),
- ((char *) &parm->ipbfadr1) + moved,
- dyn_len);
-
- moved += dyn_len;
- need_to_move -= dyn_len;
-
- (buffer + i)->address =
- (__u32)
- ((ulong)(__u8 *) ((ulong)(buffer + i)->address)
- + dyn_len);
-
- (buffer + i)->length -= dyn_len;
- i++;
- }
-
- if (need_to_move) /* buflen < 8 bytes */
- b2f0_result = 5;
-
- if (residual_length)
- *residual_length = abs (buflen - 8);
-
- if (residual_buffer) {
- if (!moved)
- *residual_buffer = (ulong) buffer;
- else
- *residual_buffer =
- (ulong) (buffer + (i - 1));
- }
-
- }
- }
- release_param(parm);
-
- iucv_debug(2, "exiting");
- return b2f0_result;
-}
-
-/**
- * iucv_reject:
- * @pathid: Path identification number.
- * @msgid: Message ID of the message to reject.
- * @trgcls: Target class of the message to reject.
- * Returns: return code from CP
- *
- * Refuses a specified message. Between the time you are notified of a
- * message and the time that you complete the message, the message may
- * be rejected.
- */
-int
-iucv_reject (__u16 pathid, __u32 msgid, __u32 trgcls)
-{
- iparml_db *parm;
- ulong b2f0_result = 0;
-
- iucv_debug(1, "entering");
- iucv_debug(1, "pathid = %d", pathid);
-
- parm = (iparml_db *)grab_param();
-
- parm->ippathid = pathid;
- parm->ipmsgid = msgid;
- parm->iptrgcls = trgcls;
- parm->ipflags1 = (IPFGMCL | IPFGMID | IPFGPID);
-
- b2f0_result = b2f0(REJECT, parm);
- release_param(parm);
-
- iucv_debug(1, "b2f0_result = %ld", b2f0_result);
- iucv_debug(1, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_reply
- * Purpose: This function responds to the two-way messages that you
- * receive. You must identify completely the message to
- * which you wish to reply. ie, pathid, msgid, and trgcls.
- * Input: pathid - path identification number
- * msgid - specifies the message ID.
- * trgcls - specifies target class
- * flags1 - option for path
- * IPPRTY- 0x20 - specifies if you want to send priority message
- * buffer - address of reply buffer
- * buflen - length of reply buffer
- * Output: ipbfadr2 - Address of buffer updated by the number
- * of bytes you have moved.
- * ipbfln2f - Contains one of the following values:
- * If the answer buffer is the same length as the reply, this field
- * contains zero.
- * If the answer buffer is longer than the reply, this field contains
- * the number of bytes remaining in the buffer.
- * If the answer buffer is shorter than the reply, this field contains
- * a residual count (that is, the number of bytes remianing in the
- * reply that does not fit into the buffer. In this
- * case b2f0_result = 5.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer address is NULL
- */
-int
-iucv_reply (__u16 pathid,
- __u32 msgid, __u32 trgcls,
- int flags1,
- void *buffer, ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
-{
- iparml_db *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!buffer)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ipbfadr2 = (__u32) ((ulong) buffer);
- parm->ipbfln2f = (__u32) buflen; /* length of message */
- parm->ippathid = pathid;
- parm->ipmsgid = msgid;
- parm->iptrgcls = trgcls;
- parm->ipflags1 = (__u8) flags1; /* priority message */
-
- b2f0_result = b2f0(REPLY, parm);
-
- if ((!b2f0_result) || (b2f0_result == 5)) {
- if (ipbfadr2)
- *ipbfadr2 = parm->ipbfadr2;
- if (ipbfln2f)
- *ipbfln2f = parm->ipbfln2f;
- }
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_reply_array
- * Purpose: This function responds to the two-way messages that you
- * receive. You must identify completely the message to
- * which you wish to reply. ie, pathid, msgid, and trgcls.
- * The array identifies a list of addresses and lengths of
- * discontiguous buffers that contains the reply data.
- * Input: pathid - path identification number
- * msgid - specifies the message ID.
- * trgcls - specifies target class
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * buffer - address of array of reply buffers
- * buflen - total length of reply buffers
- * Output: ipbfadr2 - Address of buffer which IUCV is currently working on.
- * ipbfln2f - Contains one of the following values:
- * If the answer buffer is the same length as the reply, this field
- * contains zero.
- * If the answer buffer is longer than the reply, this field contains
- * the number of bytes remaining in the buffer.
- * If the answer buffer is shorter than the reply, this field contains
- * a residual count (that is, the number of bytes remianing in the
- * reply that does not fit into the buffer. In this
- * case b2f0_result = 5.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer address is NULL
-*/
-int
-iucv_reply_array (__u16 pathid,
- __u32 msgid, __u32 trgcls,
- int flags1,
- iucv_array_t * buffer,
- ulong buflen, ulong * ipbfadr2, ulong * ipbfln2f)
-{
- iparml_db *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!buffer)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ipbfadr2 = (__u32) ((ulong) buffer);
- parm->ipbfln2f = buflen; /* length of message */
- parm->ippathid = pathid;
- parm->ipmsgid = msgid;
- parm->iptrgcls = trgcls;
- parm->ipflags1 = (IPANSLST | flags1);
-
- b2f0_result = b2f0(REPLY, parm);
-
- if ((!b2f0_result) || (b2f0_result == 5)) {
-
- if (ipbfadr2)
- *ipbfadr2 = parm->ipbfadr2;
- if (ipbfln2f)
- *ipbfln2f = parm->ipbfln2f;
- }
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_reply_prmmsg
- * Purpose: This function responds to the two-way messages that you
- * receive. You must identify completely the message to
- * which you wish to reply. ie, pathid, msgid, and trgcls.
- * Prmmsg signifies the data is moved into the
- * parameter list.
- * Input: pathid - path identification number
- * msgid - specifies the message ID.
- * trgcls - specifies target class
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * prmmsg - 8-bytes of data to be placed into the parameter
- * list.
- * Output: NA
- * Return: b2f0_result - return code from CP
-*/
-int
-iucv_reply_prmmsg (__u16 pathid,
- __u32 msgid, __u32 trgcls, int flags1, __u8 prmmsg[8])
-{
- iparml_dpl *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- parm = (iparml_dpl *)grab_param();
-
- parm->ippathid = pathid;
- parm->ipmsgid = msgid;
- parm->iptrgcls = trgcls;
- memcpy(parm->iprmmsg, prmmsg, sizeof (parm->iprmmsg));
- parm->ipflags1 = (IPRMDATA | flags1);
-
- b2f0_result = b2f0(REPLY, parm);
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/**
- * iucv_resume:
- * @pathid: Path identification number
- * @user_data: 16-byte of user data
- *
- * This function restores communication over a quiesced path.
- * Returns: return code from CP
- */
-int
-iucv_resume (__u16 pathid, __u8 user_data[16])
-{
- iparml_control *parm;
- ulong b2f0_result = 0;
-
- iucv_debug(1, "entering");
- iucv_debug(1, "pathid = %d", pathid);
-
- parm = (iparml_control *)grab_param();
-
- memcpy (parm->ipuser, user_data, sizeof (*user_data));
- parm->ippathid = pathid;
-
- b2f0_result = b2f0(RESUME, parm);
- release_param(parm);
-
- iucv_debug(1, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send
- * Purpose: sends messages
- * Input: pathid - ushort, pathid
- * msgid - ulong *, id of message returned to caller
- * trgcls - ulong, target message class
- * srccls - ulong, source message class
- * msgtag - ulong, message tag
- * flags1 - Contains options for this path.
- * IPPRTY - Ox20 - specifies if you want to send a priority message.
- * buffer - pointer to buffer
- * buflen - ulong, length of buffer
- * Output: b2f0_result - return code from b2f0 call
- * msgid - returns message id
- */
-int
-iucv_send (__u16 pathid, __u32 * msgid,
- __u32 trgcls, __u32 srccls,
- __u32 msgtag, int flags1, void *buffer, ulong buflen)
-{
- iparml_db *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!buffer)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ipbfadr1 = (__u32) ((ulong) buffer);
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipbfln1f = (__u32) buflen; /* length of message */
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipflags1 = (IPNORPY | flags1); /* one way priority message */
-
- b2f0_result = b2f0(SEND, parm);
-
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send_array
- * Purpose: This function transmits data to another application.
- * The contents of buffer is the address of the array of
- * addresses and lengths of discontiguous buffers that hold
- * the message text. This is a one-way message and the
- * receiver will not reply to the message.
- * Input: pathid - path identification number
- * trgcls - specifies target class
- * srccls - specifies the source message class
- * msgtag - specifies a tag to be associated witht the message
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * buffer - address of array of send buffers
- * buflen - total length of send buffers
- * Output: msgid - specifies the message ID.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer address is NULL
- */
-int
-iucv_send_array (__u16 pathid,
- __u32 * msgid,
- __u32 trgcls,
- __u32 srccls,
- __u32 msgtag, int flags1, iucv_array_t * buffer, ulong buflen)
-{
- iparml_db *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!buffer)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipbfadr1 = (__u32) ((ulong) buffer);
- parm->ipbfln1f = (__u32) buflen; /* length of message */
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipflags1 = (IPNORPY | IPBUFLST | flags1);
- b2f0_result = b2f0(SEND, parm);
-
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send_prmmsg
- * Purpose: This function transmits data to another application.
- * Prmmsg specifies that the 8-bytes of data are to be moved
- * into the parameter list. This is a one-way message and the
- * receiver will not reply to the message.
- * Input: pathid - path identification number
- * trgcls - specifies target class
- * srccls - specifies the source message class
- * msgtag - specifies a tag to be associated with the message
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * prmmsg - 8-bytes of data to be placed into parameter list
- * Output: msgid - specifies the message ID.
- * Return: b2f0_result - return code from CP
-*/
-int
-iucv_send_prmmsg (__u16 pathid,
- __u32 * msgid,
- __u32 trgcls,
- __u32 srccls, __u32 msgtag, int flags1, __u8 prmmsg[8])
-{
- iparml_dpl *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- parm = (iparml_dpl *)grab_param();
-
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipflags1 = (IPRMDATA | IPNORPY | flags1);
- memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
-
- b2f0_result = b2f0(SEND, parm);
-
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send2way
- * Purpose: This function transmits data to another application.
- * Data to be transmitted is in a buffer. The receiver
- * of the send is expected to reply to the message and
- * a buffer is provided into which IUCV moves the reply
- * to this message.
- * Input: pathid - path identification number
- * trgcls - specifies target class
- * srccls - specifies the source message class
- * msgtag - specifies a tag associated with the message
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * buffer - address of send buffer
- * buflen - length of send buffer
- * ansbuf - address of buffer to reply with
- * anslen - length of buffer to reply with
- * Output: msgid - specifies the message ID.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer or ansbuf address is NULL
- */
-int
-iucv_send2way (__u16 pathid,
- __u32 * msgid,
- __u32 trgcls,
- __u32 srccls,
- __u32 msgtag,
- int flags1,
- void *buffer, ulong buflen, void *ansbuf, ulong anslen)
-{
- iparml_db *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!buffer || !ansbuf)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipbfadr1 = (__u32) ((ulong) buffer);
- parm->ipbfln1f = (__u32) buflen; /* length of message */
- parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
- parm->ipbfln2f = (__u32) anslen;
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipflags1 = flags1; /* priority message */
-
- b2f0_result = b2f0(SEND, parm);
-
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send2way_array
- * Purpose: This function transmits data to another application.
- * The contents of buffer is the address of the array of
- * addresses and lengths of discontiguous buffers that hold
- * the message text. The receiver of the send is expected to
- * reply to the message and a buffer is provided into which
- * IUCV moves the reply to this message.
- * Input: pathid - path identification number
- * trgcls - specifies target class
- * srccls - specifies the source message class
- * msgtag - spcifies a tag to be associated with the message
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * buffer - address of array of send buffers
- * buflen - total length of send buffers
- * ansbuf - address of buffer to reply with
- * anslen - length of buffer to reply with
- * Output: msgid - specifies the message ID.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer address is NULL
- */
-int
-iucv_send2way_array (__u16 pathid,
- __u32 * msgid,
- __u32 trgcls,
- __u32 srccls,
- __u32 msgtag,
- int flags1,
- iucv_array_t * buffer,
- ulong buflen, iucv_array_t * ansbuf, ulong anslen)
-{
- iparml_db *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!buffer || !ansbuf)
- return -EINVAL;
-
- parm = (iparml_db *)grab_param();
-
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipbfadr1 = (__u32) ((ulong) buffer);
- parm->ipbfln1f = (__u32) buflen; /* length of message */
- parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
- parm->ipbfln2f = (__u32) anslen;
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipflags1 = (IPBUFLST | IPANSLST | flags1);
- b2f0_result = b2f0(SEND, parm);
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send2way_prmmsg
- * Purpose: This function transmits data to another application.
- * Prmmsg specifies that the 8-bytes of data are to be moved
- * into the parameter list. This is a two-way message and the
- * receiver of the message is expected to reply. A buffer
- * is provided into which IUCV moves the reply to this
- * message.
- * Input: pathid - path identification number
- * trgcls - specifies target class
- * srccls - specifies the source message class
- * msgtag - specifies a tag to be associated with the message
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * prmmsg - 8-bytes of data to be placed in parameter list
- * ansbuf - address of buffer to reply with
- * anslen - length of buffer to reply with
- * Output: msgid - specifies the message ID.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - buffer address is NULL
-*/
-int
-iucv_send2way_prmmsg (__u16 pathid,
- __u32 * msgid,
- __u32 trgcls,
- __u32 srccls,
- __u32 msgtag,
- ulong flags1, __u8 prmmsg[8], void *ansbuf, ulong anslen)
-{
- iparml_dpl *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!ansbuf)
- return -EINVAL;
-
- parm = (iparml_dpl *)grab_param();
-
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
- parm->ipbfln2f = (__u32) anslen;
- parm->ipflags1 = (IPRMDATA | flags1); /* message in prmlist */
- memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
-
- b2f0_result = b2f0(SEND, parm);
-
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
-
- return b2f0_result;
-}
-
-/*
- * Name: iucv_send2way_prmmsg_array
- * Purpose: This function transmits data to another application.
- * Prmmsg specifies that the 8-bytes of data are to be moved
- * into the parameter list. This is a two-way message and the
- * receiver of the message is expected to reply. A buffer
- * is provided into which IUCV moves the reply to this
- * message. The contents of ansbuf is the address of the
- * array of addresses and lengths of discontiguous buffers
- * that contain the reply.
- * Input: pathid - path identification number
- * trgcls - specifies target class
- * srccls - specifies the source message class
- * msgtag - specifies a tag to be associated with the message
- * flags1 - option for path
- * IPPRTY- specifies if you want to send priority message
- * prmmsg - 8-bytes of data to be placed into the parameter list
- * ansbuf - address of buffer to reply with
- * anslen - length of buffer to reply with
- * Output: msgid - specifies the message ID.
- * Return: b2f0_result - return code from CP
- * (-EINVAL) - ansbuf address is NULL
- */
-int
-iucv_send2way_prmmsg_array (__u16 pathid,
- __u32 * msgid,
- __u32 trgcls,
- __u32 srccls,
- __u32 msgtag,
- int flags1,
- __u8 prmmsg[8],
- iucv_array_t * ansbuf, ulong anslen)
-{
- iparml_dpl *parm;
- ulong b2f0_result;
-
- iucv_debug(2, "entering");
-
- if (!ansbuf)
- return -EINVAL;
-
- parm = (iparml_dpl *)grab_param();
-
- parm->ippathid = pathid;
- parm->iptrgcls = trgcls;
- parm->ipsrccls = srccls;
- parm->ipmsgtag = msgtag;
- parm->ipbfadr2 = (__u32) ((ulong) ansbuf);
- parm->ipbfln2f = (__u32) anslen;
- parm->ipflags1 = (IPRMDATA | IPANSLST | flags1);
- memcpy(parm->iprmmsg, prmmsg, sizeof(parm->iprmmsg));
- b2f0_result = b2f0(SEND, parm);
- if ((!b2f0_result) && (msgid))
- *msgid = parm->ipmsgid;
- release_param(parm);
-
- iucv_debug(2, "exiting");
- return b2f0_result;
-}
-
-void
-iucv_setmask_cpuid (void *result)
-{
- iparml_set_mask *parm;
-
- iucv_debug(1, "entering");
- parm = (iparml_set_mask *)grab_param();
- parm->ipmask = *((__u8*)result);
- *((ulong *)result) = b2f0(SETMASK, parm);
- release_param(parm);
-
- iucv_debug(1, "b2f0_result = %ld", *((ulong *)result));
- iucv_debug(1, "exiting");
-}
-
-/*
- * Name: iucv_setmask
- * Purpose: This function enables or disables the following IUCV
- * external interruptions: Nonpriority and priority message
- * interrupts, nonpriority and priority reply interrupts.
- * Input: SetMaskFlag - options for interrupts
- * 0x80 - Nonpriority_MessagePendingInterruptsFlag
- * 0x40 - Priority_MessagePendingInterruptsFlag
- * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
- * 0x10 - Priority_MessageCompletionInterruptsFlag
- * 0x08 - IUCVControlInterruptsFlag
- * Output: NA
- * Return: b2f0_result - return code from CP
-*/
-int
-iucv_setmask (int SetMaskFlag)
-{
- union {
- ulong result;
- __u8 param;
- } u;
- int cpu;
-
- u.param = SetMaskFlag;
- cpu = get_cpu();
- smp_call_function_on(iucv_setmask_cpuid, &u, 0, 1, iucv_cpuid);
- put_cpu();
-
- return u.result;
-}
-
-/**
- * iucv_sever:
- * @pathid: Path identification number
- * @user_data: 16-byte of user data
- *
- * This function terminates an iucv path.
- * Returns: return code from CP
- */
-int
-iucv_sever(__u16 pathid, __u8 user_data[16])
-{
- iparml_control *parm;
- ulong b2f0_result = 0;
-
- iucv_debug(1, "entering");
- parm = (iparml_control *)grab_param();
-
- memcpy(parm->ipuser, user_data, sizeof(parm->ipuser));
- parm->ippathid = pathid;
-
- b2f0_result = b2f0(SEVER, parm);
-
- if (!b2f0_result)
- iucv_remove_pathid(pathid);
- release_param(parm);
-
- iucv_debug(1, "exiting");
- return b2f0_result;
-}
-
-/*
- * Interrupt Handlers
- *******************************************************************************/
-
-/**
- * iucv_irq_handler:
- * @regs: Current registers
- * @code: irq code
- *
- * Handles external interrupts coming in from CP.
- * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
- */
-static void
-iucv_irq_handler(__u16 code)
-{
- iucv_irqdata *irqdata;
-
- irqdata = kmalloc(sizeof(iucv_irqdata), GFP_ATOMIC);
- if (!irqdata) {
- printk(KERN_WARNING "%s: out of memory\n", __FUNCTION__);
- return;
- }
-
- memcpy(&irqdata->data, iucv_external_int_buffer,
- sizeof(iucv_GeneralInterrupt));
-
- spin_lock(&iucv_irq_queue_lock);
- list_add_tail(&irqdata->queue, &iucv_irq_queue);
- spin_unlock(&iucv_irq_queue_lock);
-
- tasklet_schedule(&iucv_tasklet);
-}
-
-/**
- * iucv_do_int:
- * @int_buf: Pointer to copy of external interrupt buffer
- *
- * The workhorse for handling interrupts queued by iucv_irq_handler().
- * This function is called from the bottom half iucv_tasklet_handler().
- */
-static void
-iucv_do_int(iucv_GeneralInterrupt * int_buf)
-{
- handler *h = NULL;
- struct list_head *lh;
- ulong flags;
- iucv_interrupt_ops_t *interrupt = NULL; /* interrupt addresses */
- __u8 temp_buff1[24], temp_buff2[24]; /* masked handler id. */
- int rc = 0, j = 0;
- __u8 no_listener[16] = "NO LISTENER";
-
- iucv_debug(2, "entering, pathid %d, type %02X",
- int_buf->ippathid, int_buf->iptype);
- iucv_dumpit("External Interrupt Buffer:",
- int_buf, sizeof(iucv_GeneralInterrupt));
-
- ASCEBC (no_listener, 16);
-
- if (int_buf->iptype != 01) {
- if ((int_buf->ippathid) > (max_connections - 1)) {
- printk(KERN_WARNING "%s: Got interrupt with pathid %d"
- " > max_connections (%ld)\n", __FUNCTION__,
- int_buf->ippathid, max_connections - 1);
- } else {
- h = iucv_pathid_table[int_buf->ippathid];
- interrupt = h->interrupt_table;
- iucv_dumpit("Handler:", h, sizeof(handler));
- }
- }
-
- /* end of if statement */
- switch (int_buf->iptype) {
- case 0x01: /* connection pending */
- if (messagesDisabled) {
- iucv_setmask(~0);
- messagesDisabled = 0;
- }
- spin_lock_irqsave(&iucv_lock, flags);
- list_for_each(lh, &iucv_handler_table) {
- h = list_entry(lh, handler, list);
- memcpy(temp_buff1, &(int_buf->ipvmid), 24);
- memcpy(temp_buff2, &(h->id.userid), 24);
- for (j = 0; j < 24; j++) {
- temp_buff1[j] &= (h->id.mask)[j];
- temp_buff2[j] &= (h->id.mask)[j];
- }
-
- iucv_dumpit("temp_buff1:",
- temp_buff1, sizeof(temp_buff1));
- iucv_dumpit("temp_buff2",
- temp_buff2, sizeof(temp_buff2));
-
- if (!memcmp (temp_buff1, temp_buff2, 24)) {
-
- iucv_debug(2,
- "found a matching handler");
- break;
- } else
- h = NULL;
- }
- spin_unlock_irqrestore (&iucv_lock, flags);
- if (h) {
- /* ADD PATH TO PATHID TABLE */
- rc = iucv_add_pathid(int_buf->ippathid, h);
- if (rc) {
- iucv_sever (int_buf->ippathid,
- no_listener);
- iucv_debug(1,
- "add_pathid failed, rc = %d",
- rc);
- } else {
- interrupt = h->interrupt_table;
- if (interrupt->ConnectionPending) {
- EBCASC (int_buf->ipvmid, 8);
- interrupt->ConnectionPending(
- (iucv_ConnectionPending *)int_buf,
- h->pgm_data);
- } else
- iucv_sever(int_buf->ippathid,
- no_listener);
- }
- } else
- iucv_sever(int_buf->ippathid, no_listener);
- break;
-
- case 0x02: /*connection complete */
- if (messagesDisabled) {
- iucv_setmask(~0);
- messagesDisabled = 0;
- }
- if (h) {
- if (interrupt->ConnectionComplete)
- {
- interrupt->ConnectionComplete(
- (iucv_ConnectionComplete *)int_buf,
- h->pgm_data);
- }
- else
- iucv_debug(1,
- "ConnectionComplete not called");
- } else
- iucv_sever(int_buf->ippathid, no_listener);
- break;
-
- case 0x03: /* connection severed */
- if (messagesDisabled) {
- iucv_setmask(~0);
- messagesDisabled = 0;
- }
- if (h) {
- if (interrupt->ConnectionSevered)
- interrupt->ConnectionSevered(
- (iucv_ConnectionSevered *)int_buf,
- h->pgm_data);
-
- else
- iucv_sever (int_buf->ippathid, no_listener);
- } else
- iucv_sever(int_buf->ippathid, no_listener);
- break;
-
- case 0x04: /* connection quiesced */
- if (messagesDisabled) {
- iucv_setmask(~0);
- messagesDisabled = 0;
- }
- if (h) {
- if (interrupt->ConnectionQuiesced)
- interrupt->ConnectionQuiesced(
- (iucv_ConnectionQuiesced *)int_buf,
- h->pgm_data);
- else
- iucv_debug(1,
- "ConnectionQuiesced not called");
- }
- break;
-
- case 0x05: /* connection resumed */
- if (messagesDisabled) {
- iucv_setmask(~0);
- messagesDisabled = 0;
- }
- if (h) {
- if (interrupt->ConnectionResumed)
- interrupt->ConnectionResumed(
- (iucv_ConnectionResumed *)int_buf,
- h->pgm_data);
- else
- iucv_debug(1,
- "ConnectionResumed not called");
- }
- break;
-
- case 0x06: /* priority message complete */
- case 0x07: /* nonpriority message complete */
- if (h) {
- if (interrupt->MessageComplete)
- interrupt->MessageComplete(
- (iucv_MessageComplete *)int_buf,
- h->pgm_data);
- else
- iucv_debug(2,
- "MessageComplete not called");
- }
- break;
-
- case 0x08: /* priority message pending */
- case 0x09: /* nonpriority message pending */
- if (h) {
- if (interrupt->MessagePending)
- interrupt->MessagePending(
- (iucv_MessagePending *) int_buf,
- h->pgm_data);
- else
- iucv_debug(2,
- "MessagePending not called");
- }
- break;
- default: /* unknown iucv type */
- printk(KERN_WARNING "%s: unknown iucv interrupt\n",
- __FUNCTION__);
- break;
- } /* end switch */
-
- iucv_debug(2, "exiting pathid %d, type %02X",
- int_buf->ippathid, int_buf->iptype);
-
- return;
-}
-
-/**
- * iucv_tasklet_handler:
- *
- * This function loops over the queue of irq buffers and runs iucv_do_int()
- * on every queue element.
- */
-static void
-iucv_tasklet_handler(unsigned long ignored)
-{
- struct list_head head;
- struct list_head *next;
- ulong flags;
-
- spin_lock_irqsave(&iucv_irq_queue_lock, flags);
- list_add(&head, &iucv_irq_queue);
- list_del_init(&iucv_irq_queue);
- spin_unlock_irqrestore (&iucv_irq_queue_lock, flags);
-
- next = head.next;
- while (next != &head) {
- iucv_irqdata *p = list_entry(next, iucv_irqdata, queue);
-
- next = next->next;
- iucv_do_int(&p->data);
- kfree(p);
- }
-
- return;
-}
-
-subsys_initcall(iucv_init);
-module_exit(iucv_exit);
-
-/**
- * Export all public stuff
- */
-EXPORT_SYMBOL (iucv_bus);
-EXPORT_SYMBOL (iucv_root);
-EXPORT_SYMBOL (iucv_accept);
-EXPORT_SYMBOL (iucv_connect);
-#if 0
-EXPORT_SYMBOL (iucv_purge);
-EXPORT_SYMBOL (iucv_query_maxconn);
-EXPORT_SYMBOL (iucv_query_bufsize);
-EXPORT_SYMBOL (iucv_quiesce);
-#endif
-EXPORT_SYMBOL (iucv_receive);
-#if 0
-EXPORT_SYMBOL (iucv_receive_array);
-#endif
-EXPORT_SYMBOL (iucv_reject);
-#if 0
-EXPORT_SYMBOL (iucv_reply);
-EXPORT_SYMBOL (iucv_reply_array);
-EXPORT_SYMBOL (iucv_resume);
-#endif
-EXPORT_SYMBOL (iucv_reply_prmmsg);
-EXPORT_SYMBOL (iucv_send);
-EXPORT_SYMBOL (iucv_send2way);
-EXPORT_SYMBOL (iucv_send2way_array);
-EXPORT_SYMBOL (iucv_send2way_prmmsg);
-EXPORT_SYMBOL (iucv_send2way_prmmsg_array);
-#if 0
-EXPORT_SYMBOL (iucv_send_array);
-EXPORT_SYMBOL (iucv_send_prmmsg);
-EXPORT_SYMBOL (iucv_setmask);
-#endif
-EXPORT_SYMBOL (iucv_sever);
-EXPORT_SYMBOL (iucv_register_program);
-EXPORT_SYMBOL (iucv_unregister_program);
diff --git a/drivers/s390/net/iucv.h b/drivers/s390/net/iucv.h
deleted file mode 100644
index 5b6b1b7241c..00000000000
--- a/drivers/s390/net/iucv.h
+++ /dev/null
@@ -1,849 +0,0 @@
-/*
- * drivers/s390/net/iucv.h
- * IUCV base support.
- *
- * S390 version
- * Copyright (C) 2000 IBM Corporation
- * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
- * Xenia Tkatschow (xenia@us.ibm.com)
- *
- *
- * Functionality:
- * To explore any of the IUCV functions, one must first register
- * their program using iucv_register_program(). Once your program has
- * successfully completed a register, it can exploit the other functions.
- * For furthur reference on all IUCV functionality, refer to the
- * CP Programming Services book, also available on the web
- * thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
- *
- * Definition of Return Codes
- * -All positive return codes including zero are reflected back
- * from CP except for iucv_register_program. The definition of each
- * return code can be found in CP Programming Services book.
- * Also available on the web thru www.ibm.com/s390/vm/pubs, manual # SC24-5760
- * - Return Code of:
- * (-EINVAL) Invalid value
- * (-ENOMEM) storage allocation failed
- * pgmask defined in iucv_register_program will be set depending on input
- * paramters.
- *
- */
-
-#include <linux/types.h>
-#include <asm/debug.h>
-
-/**
- * Debug Facility stuff
- */
-#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
-#define IUCV_DBF_SETUP_PAGES 2
-#define IUCV_DBF_SETUP_NR_AREAS 1
-#define IUCV_DBF_SETUP_LEVEL 3
-
-#define IUCV_DBF_DATA_NAME "iucv_data"
-#define IUCV_DBF_DATA_LEN 128
-#define IUCV_DBF_DATA_PAGES 2
-#define IUCV_DBF_DATA_NR_AREAS 1
-#define IUCV_DBF_DATA_LEVEL 2
-
-#define IUCV_DBF_TRACE_NAME "iucv_trace"
-#define IUCV_DBF_TRACE_LEN 16
-#define IUCV_DBF_TRACE_PAGES 4
-#define IUCV_DBF_TRACE_NR_AREAS 1
-#define IUCV_DBF_TRACE_LEVEL 3
-
-#define IUCV_DBF_TEXT(name,level,text) \
- do { \
- debug_text_event(iucv_dbf_##name,level,text); \
- } while (0)
-
-#define IUCV_DBF_HEX(name,level,addr,len) \
- do { \
- debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
- } while (0)
-
-DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
-
-#define IUCV_DBF_TEXT_(name,level,text...) \
- do { \
- char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
- sprintf(iucv_dbf_txt_buf, text); \
- debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
- put_cpu_var(iucv_dbf_txt_buf); \
- } while (0)
-
-#define IUCV_DBF_SPRINTF(name,level,text...) \
- do { \
- debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
- debug_sprintf_event(iucv_dbf_trace, level, text ); \
- } while (0)
-
-/**
- * some more debug stuff
- */
-#define IUCV_HEXDUMP16(importance,header,ptr) \
-PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
- *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
- *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
- *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
- *(((char*)ptr)+12),*(((char*)ptr)+13), \
- *(((char*)ptr)+14),*(((char*)ptr)+15)); \
-PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
- *(((char*)ptr)+16),*(((char*)ptr)+17), \
- *(((char*)ptr)+18),*(((char*)ptr)+19), \
- *(((char*)ptr)+20),*(((char*)ptr)+21), \
- *(((char*)ptr)+22),*(((char*)ptr)+23), \
- *(((char*)ptr)+24),*(((char*)ptr)+25), \
- *(((char*)ptr)+26),*(((char*)ptr)+27), \
- *(((char*)ptr)+28),*(((char*)ptr)+29), \
- *(((char*)ptr)+30),*(((char*)ptr)+31));
-
-static inline void
-iucv_hex_dump(unsigned char *buf, size_t len)
-{
- size_t i;
-
- for (i = 0; i < len; i++) {
- if (i && !(i % 16))
- printk("\n");
- printk("%02x ", *(buf + i));
- }
- printk("\n");
-}
-/**
- * end of debug stuff
- */
-
-#define uchar unsigned char
-#define ushort unsigned short
-#define ulong unsigned long
-#define iucv_handle_t void *
-
-/* flags1:
- * All flags are defined in the field IPFLAGS1 of each function
- * and can be found in CP Programming Services.
- * IPLOCAL - Indicates the connect can only be satisfied on the
- * local system
- * IPPRTY - Indicates a priority message
- * IPQUSCE - Indicates you do not want to receive messages on a
- * path until an iucv_resume is issued
- * IPRMDATA - Indicates that the message is in the parameter list
- */
-#define IPLOCAL 0x01
-#define IPPRTY 0x20
-#define IPQUSCE 0x40
-#define IPRMDATA 0x80
-
-/* flags1_out:
- * All flags are defined in the output field of IPFLAGS1 for each function
- * and can be found in CP Programming Services.
- * IPNORPY - Specifies this is a one-way message and no reply is expected.
- * IPPRTY - Indicates a priority message is permitted. Defined in flags1.
- */
-#define IPNORPY 0x10
-
-#define Nonpriority_MessagePendingInterruptsFlag 0x80
-#define Priority_MessagePendingInterruptsFlag 0x40
-#define Nonpriority_MessageCompletionInterruptsFlag 0x20
-#define Priority_MessageCompletionInterruptsFlag 0x10
-#define IUCVControlInterruptsFlag 0x08
-#define AllInterrupts 0xf8
-/*
- * Mapping of external interrupt buffers should be used with the corresponding
- * interrupt types.
- * Names: iucv_ConnectionPending -> connection pending
- * iucv_ConnectionComplete -> connection complete
- * iucv_ConnectionSevered -> connection severed
- * iucv_ConnectionQuiesced -> connection quiesced
- * iucv_ConnectionResumed -> connection resumed
- * iucv_MessagePending -> message pending
- * iucv_MessageComplete -> message complete
- */
-typedef struct {
- u16 ippathid;
- uchar ipflags1;
- uchar iptype;
- u16 ipmsglim;
- u16 res1;
- uchar ipvmid[8];
- uchar ipuser[16];
- u32 res3;
- uchar ippollfg;
- uchar res4[3];
-} iucv_ConnectionPending;
-
-typedef struct {
- u16 ippathid;
- uchar ipflags1;
- uchar iptype;
- u16 ipmsglim;
- u16 res1;
- uchar res2[8];
- uchar ipuser[16];
- u32 res3;
- uchar ippollfg;
- uchar res4[3];
-} iucv_ConnectionComplete;
-
-typedef struct {
- u16 ippathid;
- uchar res1;
- uchar iptype;
- u32 res2;
- uchar res3[8];
- uchar ipuser[16];
- u32 res4;
- uchar ippollfg;
- uchar res5[3];
-} iucv_ConnectionSevered;
-
-typedef struct {
- u16 ippathid;
- uchar res1;
- uchar iptype;
- u32 res2;
- uchar res3[8];
- uchar ipuser[16];
- u32 res4;
- uchar ippollfg;
- uchar res5[3];
-} iucv_ConnectionQuiesced;
-
-typedef struct {
- u16 ippathid;
- uchar res1;
- uchar iptype;
- u32 res2;
- uchar res3[8];
- uchar ipuser[16];
- u32 res4;
- uchar ippollfg;
- uchar res5[3];
-} iucv_ConnectionResumed;
-
-typedef struct {
- u16 ippathid;
- uchar ipflags1;
- uchar iptype;
- u32 ipmsgid;
- u32 iptrgcls;
- union u2 {
- u32 iprmmsg1_u32;
- uchar iprmmsg1[4];
- } ln1msg1;
- union u1 {
- u32 ipbfln1f;
- uchar iprmmsg2[4];
- } ln1msg2;
- u32 res1[3];
- u32 ipbfln2f;
- uchar ippollfg;
- uchar res2[3];
-} iucv_MessagePending;
-
-typedef struct {
- u16 ippathid;
- uchar ipflags1;
- uchar iptype;
- u32 ipmsgid;
- u32 ipaudit;
- uchar iprmmsg[8];
- u32 ipsrccls;
- u32 ipmsgtag;
- u32 res;
- u32 ipbfln2f;
- uchar ippollfg;
- uchar res2[3];
-} iucv_MessageComplete;
-
-/*
- * iucv_interrupt_ops_t: Is a vector of functions that handle
- * IUCV interrupts.
- * Parameter list:
- * eib - is a pointer to a 40-byte area described
- * with one of the structures above.
- * pgm_data - this data is strictly for the
- * interrupt handler that is passed by
- * the application. This may be an address
- * or token.
-*/
-typedef struct {
- void (*ConnectionPending) (iucv_ConnectionPending * eib,
- void *pgm_data);
- void (*ConnectionComplete) (iucv_ConnectionComplete * eib,
- void *pgm_data);
- void (*ConnectionSevered) (iucv_ConnectionSevered * eib,
- void *pgm_data);
- void (*ConnectionQuiesced) (iucv_ConnectionQuiesced * eib,
- void *pgm_data);
- void (*ConnectionResumed) (iucv_ConnectionResumed * eib,
- void *pgm_data);
- void (*MessagePending) (iucv_MessagePending * eib, void *pgm_data);
- void (*MessageComplete) (iucv_MessageComplete * eib, void *pgm_data);
-} iucv_interrupt_ops_t;
-
-/*
- *iucv_array_t : Defines buffer array.
- * Inside the array may be 31- bit addresses and 31-bit lengths.
-*/
-typedef struct {
- u32 address;
- u32 length;
-} iucv_array_t __attribute__ ((aligned (8)));
-
-extern struct bus_type iucv_bus;
-extern struct device *iucv_root;
-
-/* -prototypes- */
-/*
- * Name: iucv_register_program
- * Purpose: Registers an application with IUCV
- * Input: prmname - user identification
- * userid - machine identification
- * pgmmask - indicates which bits in the prmname and userid combined will be
- * used to determine who is given control
- * ops - address of vector of interrupt handlers
- * pgm_data- application data passed to interrupt handlers
- * Output: NA
- * Return: address of handler
- * (0) - Error occurred, registration not completed.
- * NOTE: Exact cause of failure will be recorded in syslog.
-*/
-iucv_handle_t iucv_register_program (uchar pgmname[16],
- uchar userid[8],
- uchar pgmmask[24],
- iucv_interrupt_ops_t * ops,
- void *pgm_data);
-
-/*
- * Name: iucv_unregister_program
- * Purpose: Unregister application with IUCV
- * Input: address of handler
- * Output: NA
- * Return: (0) - Normal return
- * (-EINVAL) - Internal error, wild pointer
-*/
-int iucv_unregister_program (iucv_handle_t handle);
-
-/*
- * Name: iucv_accept
- * Purpose: This function is issued after the user receives a Connection Pending external
- * interrupt and now wishes to complete the IUCV communication path.
- * Input: pathid - u16 , Path identification number
- * msglim_reqstd - u16, The number of outstanding messages requested.
- * user_data - uchar[16], Data specified by the iucv_connect function.
- * flags1 - int, Contains options for this path.
- * -IPPRTY - 0x20- Specifies if you want to send priority message.
- * -IPRMDATA - 0x80, Specifies whether your program can handle a message
- * in the parameter list.
- * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
- * established.
- * handle - iucv_handle_t, Address of handler.
- * pgm_data - void *, Application data passed to interrupt handlers.
- * flags1_out - int * Contains information about the path
- * - IPPRTY - 0x20, Indicates you may send priority messages.
- * msglim - *u16, Number of outstanding messages.
- * Output: return code from CP IUCV call.
-*/
-
-int iucv_accept (u16 pathid,
- u16 msglim_reqstd,
- uchar user_data[16],
- int flags1,
- iucv_handle_t handle,
- void *pgm_data, int *flags1_out, u16 * msglim);
-
-/*
- * Name: iucv_connect
- * Purpose: This function establishes an IUCV path. Although the connect may complete
- * successfully, you are not able to use the path until you receive an IUCV
- * Connection Complete external interrupt.
- * Input: pathid - u16 *, Path identification number
- * msglim_reqstd - u16, Number of outstanding messages requested
- * user_data - uchar[16], 16-byte user data
- * userid - uchar[8], User identification
- * system_name - uchar[8], 8-byte identifying the system name
- * flags1 - int, Contains options for this path.
- * -IPPRTY - 0x20, Specifies if you want to send priority message.
- * -IPRMDATA - 0x80, Specifies whether your program can handle a message
- * in the parameter list.
- * -IPQUSCE - 0x40, Specifies whether you want to quiesce the path being
- * established.
- * -IPLOCAL - 0X01, Allows an application to force the partner to be on
- * the local system. If local is specified then target class cannot be
- * specified.
- * flags1_out - int * Contains information about the path
- * - IPPRTY - 0x20, Indicates you may send priority messages.
- * msglim - * u16, Number of outstanding messages
- * handle - iucv_handle_t, Address of handler
- * pgm_data - void *, Application data passed to interrupt handlers
- * Output: return code from CP IUCV call
- * rc - return code from iucv_declare_buffer
- * -EINVAL - Invalid handle passed by application
- * -EINVAL - Pathid address is NULL
- * add_pathid_result - Return code from internal function add_pathid
-*/
-int
- iucv_connect (u16 * pathid,
- u16 msglim_reqstd,
- uchar user_data[16],
- uchar userid[8],
- uchar system_name[8],
- int flags1,
- int *flags1_out,
- u16 * msglim, iucv_handle_t handle, void *pgm_data);
-
-/*
- * Name: iucv_purge
- * Purpose: This function cancels a message that you have sent.
- * Input: pathid - Path identification number.
- * msgid - Specifies the message ID of the message to be purged.
- * srccls - Specifies the source message class.
- * Output: audit - Contains information about asynchronous error
- * that may have affected the normal completion
- * of this message.
- * Return: Return code from CP IUCV call.
-*/
-int iucv_purge (u16 pathid, u32 msgid, u32 srccls, __u32 *audit);
-/*
- * Name: iucv_query_maxconn
- * Purpose: This function determines the maximum number of communication paths you
- * may establish.
- * Return: maxconn - ulong, Maximum number of connection the virtual machine may
- * establish.
-*/
-ulong iucv_query_maxconn (void);
-
-/*
- * Name: iucv_query_bufsize
- * Purpose: This function determines how large an external interrupt
- * buffer IUCV requires to store information.
- * Return: bufsize - ulong, Size of external interrupt buffer.
- */
-ulong iucv_query_bufsize (void);
-
-/*
- * Name: iucv_quiesce
- * Purpose: This function temporarily suspends incoming messages on an
- * IUCV path. You can later reactivate the path by invoking
- * the iucv_resume function.
- * Input: pathid - Path identification number
- * user_data - 16-bytes of user data
- * Output: NA
- * Return: Return code from CP IUCV call.
-*/
-int iucv_quiesce (u16 pathid, uchar user_data[16]);
-
-/*
- * Name: iucv_receive
- * Purpose: This function receives messages that are being sent to you
- * over established paths. Data will be returned in buffer for length of
- * buflen.
- * Input:
- * pathid - Path identification number.
- * buffer - Address of buffer to receive.
- * buflen - Length of buffer to receive.
- * msgid - Specifies the message ID.
- * trgcls - Specifies target class.
- * Output:
- * flags1_out: int *, Contains information about this path.
- * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
- * expected.
- * IPPRTY - 0x20 Specifies if you want to send priority message.
- * IPRMDATA - 0x80 specifies the data is contained in the parameter list
- * residual_buffer - address of buffer updated by the number
- * of bytes you have received.
- * residual_length -
- * Contains one of the following values, if the receive buffer is:
- * The same length as the message, this field is zero.
- * Longer than the message, this field contains the number of
- * bytes remaining in the buffer.
- * Shorter than the message, this field contains the residual
- * count (that is, the number of bytes remaining in the
- * message that does not fit into the buffer. In this
- * case b2f0_result = 5.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - buffer address is pointing to NULL
-*/
-int iucv_receive (u16 pathid,
- u32 msgid,
- u32 trgcls,
- void *buffer,
- ulong buflen,
- int *flags1_out,
- ulong * residual_buffer, ulong * residual_length);
-
- /*
- * Name: iucv_receive_array
- * Purpose: This function receives messages that are being sent to you
- * over established paths. Data will be returned in first buffer for
- * length of first buffer.
- * Input: pathid - Path identification number.
- * msgid - specifies the message ID.
- * trgcls - Specifies target class.
- * buffer - Address of array of buffers.
- * buflen - Total length of buffers.
- * Output:
- * flags1_out: int *, Contains information about this path.
- * IPNORPY - 0x10 Specifies this is a one-way message and no reply is
- * expected.
- * IPPRTY - 0x20 Specifies if you want to send priority message.
- * IPRMDATA - 0x80 specifies the data is contained in the parameter list
- * residual_buffer - address points to the current list entry IUCV
- * is working on.
- * residual_length -
- * Contains one of the following values, if the receive buffer is:
- * The same length as the message, this field is zero.
- * Longer than the message, this field contains the number of
- * bytes remaining in the buffer.
- * Shorter than the message, this field contains the residual
- * count (that is, the number of bytes remaining in the
- * message that does not fit into the buffer. In this
- * case b2f0_result = 5.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
- */
-int iucv_receive_array (u16 pathid,
- u32 msgid,
- u32 trgcls,
- iucv_array_t * buffer,
- ulong buflen,
- int *flags1_out,
- ulong * residual_buffer, ulong * residual_length);
-
-/*
- * Name: iucv_reject
- * Purpose: The reject function refuses a specified message. Between the
- * time you are notified of a message and the time that you
- * complete the message, the message may be rejected.
- * Input: pathid - Path identification number.
- * msgid - Specifies the message ID.
- * trgcls - Specifies target class.
- * Output: NA
- * Return: Return code from CP IUCV call.
-*/
-int iucv_reject (u16 pathid, u32 msgid, u32 trgcls);
-
-/*
- * Name: iucv_reply
- * Purpose: This function responds to the two-way messages that you
- * receive. You must identify completely the message to
- * which you wish to reply. ie, pathid, msgid, and trgcls.
- * Input: pathid - Path identification number.
- * msgid - Specifies the message ID.
- * trgcls - Specifies target class.
- * flags1 - Option for path.
- * IPPRTY- 0x20, Specifies if you want to send priority message.
- * buffer - Address of reply buffer.
- * buflen - Length of reply buffer.
- * Output: residual_buffer - Address of buffer updated by the number
- * of bytes you have moved.
- * residual_length - Contains one of the following values:
- * If the answer buffer is the same length as the reply, this field
- * contains zero.
- * If the answer buffer is longer than the reply, this field contains
- * the number of bytes remaining in the buffer.
- * If the answer buffer is shorter than the reply, this field contains
- * a residual count (that is, the number of bytes remianing in the
- * reply that does not fit into the buffer. In this
- * case b2f0_result = 5.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
-*/
-int iucv_reply (u16 pathid,
- u32 msgid,
- u32 trgcls,
- int flags1,
- void *buffer, ulong buflen, ulong * residual_buffer,
- ulong * residual_length);
-
-/*
- * Name: iucv_reply_array
- * Purpose: This function responds to the two-way messages that you
- * receive. You must identify completely the message to
- * which you wish to reply. ie, pathid, msgid, and trgcls.
- * The array identifies a list of addresses and lengths of
- * discontiguous buffers that contains the reply data.
- * Input: pathid - Path identification number
- * msgid - Specifies the message ID.
- * trgcls - Specifies target class.
- * flags1 - Option for path.
- * IPPRTY- 0x20, Specifies if you want to send priority message.
- * buffer - Address of array of reply buffers.
- * buflen - Total length of reply buffers.
- * Output: residual_buffer - Address of buffer which IUCV is currently working on.
- * residual_length - Contains one of the following values:
- * If the answer buffer is the same length as the reply, this field
- * contains zero.
- * If the answer buffer is longer than the reply, this field contains
- * the number of bytes remaining in the buffer.
- * If the answer buffer is shorter than the reply, this field contains
- * a residual count (that is, the number of bytes remianing in the
- * reply that does not fit into the buffer. In this
- * case b2f0_result = 5.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
-*/
-int iucv_reply_array (u16 pathid,
- u32 msgid,
- u32 trgcls,
- int flags1,
- iucv_array_t * buffer,
- ulong buflen, ulong * residual_address,
- ulong * residual_length);
-
-/*
- * Name: iucv_reply_prmmsg
- * Purpose: This function responds to the two-way messages that you
- * receive. You must identify completely the message to
- * which you wish to reply. ie, pathid, msgid, and trgcls.
- * Prmmsg signifies the data is moved into the
- * parameter list.
- * Input: pathid - Path identification number.
- * msgid - Specifies the message ID.
- * trgcls - Specifies target class.
- * flags1 - Option for path.
- * IPPRTY- 0x20 Specifies if you want to send priority message.
- * prmmsg - 8-bytes of data to be placed into the parameter.
- * list.
- * Output: NA
- * Return: Return code from CP IUCV call.
-*/
-int iucv_reply_prmmsg (u16 pathid,
- u32 msgid, u32 trgcls, int flags1, uchar prmmsg[8]);
-
-/*
- * Name: iucv_resume
- * Purpose: This function restores communications over a quiesced path
- * Input: pathid - Path identification number.
- * user_data - 16-bytes of user data.
- * Output: NA
- * Return: Return code from CP IUCV call.
-*/
-int iucv_resume (u16 pathid, uchar user_data[16]);
-
-/*
- * Name: iucv_send
- * Purpose: This function transmits data to another application.
- * Data to be transmitted is in a buffer and this is a
- * one-way message and the receiver will not reply to the
- * message.
- * Input: pathid - Path identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag to be associated with the message.
- * flags1 - Option for path.
- * IPPRTY- 0x20 Specifies if you want to send priority message.
- * buffer - Address of send buffer.
- * buflen - Length of send buffer.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
-*/
-int iucv_send (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls, u32 msgtag, int flags1, void *buffer, ulong buflen);
-
-/*
- * Name: iucv_send_array
- * Purpose: This function transmits data to another application.
- * The contents of buffer is the address of the array of
- * addresses and lengths of discontiguous buffers that hold
- * the message text. This is a one-way message and the
- * receiver will not reply to the message.
- * Input: pathid - Path identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag to be associated witht the message.
- * flags1 - Option for path.
- * IPPRTY- specifies if you want to send priority message.
- * buffer - Address of array of send buffers.
- * buflen - Total length of send buffers.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
-*/
-int iucv_send_array (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls,
- u32 msgtag,
- int flags1, iucv_array_t * buffer, ulong buflen);
-
-/*
- * Name: iucv_send_prmmsg
- * Purpose: This function transmits data to another application.
- * Prmmsg specifies that the 8-bytes of data are to be moved
- * into the parameter list. This is a one-way message and the
- * receiver will not reply to the message.
- * Input: pathid - Path identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag to be associated with the message.
- * flags1 - Option for path.
- * IPPRTY- 0x20 specifies if you want to send priority message.
- * prmmsg - 8-bytes of data to be placed into parameter list.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
-*/
-int iucv_send_prmmsg (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls, u32 msgtag, int flags1, uchar prmmsg[8]);
-
-/*
- * Name: iucv_send2way
- * Purpose: This function transmits data to another application.
- * Data to be transmitted is in a buffer. The receiver
- * of the send is expected to reply to the message and
- * a buffer is provided into which IUCV moves the reply
- * to this message.
- * Input: pathid - Path identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag associated with the message.
- * flags1 - Option for path.
- * IPPRTY- 0x20 Specifies if you want to send priority message.
- * buffer - Address of send buffer.
- * buflen - Length of send buffer.
- * ansbuf - Address of buffer into which IUCV moves the reply of
- * this message.
- * anslen - Address of length of buffer.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer or ansbuf address is NULL.
-*/
-int iucv_send2way (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls,
- u32 msgtag,
- int flags1,
- void *buffer, ulong buflen, void *ansbuf, ulong anslen);
-
-/*
- * Name: iucv_send2way_array
- * Purpose: This function transmits data to another application.
- * The contents of buffer is the address of the array of
- * addresses and lengths of discontiguous buffers that hold
- * the message text. The receiver of the send is expected to
- * reply to the message and a buffer is provided into which
- * IUCV moves the reply to this message.
- * Input: pathid - Path identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag to be associated with the message.
- * flags1 - Option for path.
- * IPPRTY- 0x20 Specifies if you want to send priority message.
- * buffer - Sddress of array of send buffers.
- * buflen - Total length of send buffers.
- * ansbuf - Address of array of buffer into which IUCV moves the reply
- * of this message.
- * anslen - Address of length reply buffers.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
-*/
-int iucv_send2way_array (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls,
- u32 msgtag,
- int flags1,
- iucv_array_t * buffer,
- ulong buflen, iucv_array_t * ansbuf, ulong anslen);
-
-/*
- * Name: iucv_send2way_prmmsg
- * Purpose: This function transmits data to another application.
- * Prmmsg specifies that the 8-bytes of data are to be moved
- * into the parameter list. This is a two-way message and the
- * receiver of the message is expected to reply. A buffer
- * is provided into which IUCV moves the reply to this
- * message.
- * Input: pathid - Rath identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag to be associated with the message.
- * flags1 - Option for path.
- * IPPRTY- 0x20 Specifies if you want to send priority message.
- * prmmsg - 8-bytes of data to be placed in parameter list.
- * ansbuf - Address of buffer into which IUCV moves the reply of
- * this message.
- * anslen - Address of length of buffer.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Buffer address is NULL.
-*/
-int iucv_send2way_prmmsg (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls,
- u32 msgtag,
- ulong flags1,
- uchar prmmsg[8], void *ansbuf, ulong anslen);
-
-/*
- * Name: iucv_send2way_prmmsg_array
- * Purpose: This function transmits data to another application.
- * Prmmsg specifies that the 8-bytes of data are to be moved
- * into the parameter list. This is a two-way message and the
- * receiver of the message is expected to reply. A buffer
- * is provided into which IUCV moves the reply to this
- * message. The contents of ansbuf is the address of the
- * array of addresses and lengths of discontiguous buffers
- * that contain the reply.
- * Input: pathid - Path identification number.
- * trgcls - Specifies target class.
- * srccls - Specifies the source message class.
- * msgtag - Specifies a tag to be associated with the message.
- * flags1 - Option for path.
- * IPPRTY- 0x20 specifies if you want to send priority message.
- * prmmsg - 8-bytes of data to be placed into the parameter list.
- * ansbuf - Address of array of buffer into which IUCV moves the reply
- * of this message.
- * anslen - Address of length of reply buffers.
- * Output: msgid - Specifies the message ID.
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Ansbuf address is NULL.
-*/
-int iucv_send2way_prmmsg_array (u16 pathid,
- u32 * msgid,
- u32 trgcls,
- u32 srccls,
- u32 msgtag,
- int flags1,
- uchar prmmsg[8],
- iucv_array_t * ansbuf, ulong anslen);
-
-/*
- * Name: iucv_setmask
- * Purpose: This function enables or disables the following IUCV
- * external interruptions: Nonpriority and priority message
- * interrupts, nonpriority and priority reply interrupts.
- * Input: SetMaskFlag - options for interrupts
- * 0x80 - Nonpriority_MessagePendingInterruptsFlag
- * 0x40 - Priority_MessagePendingInterruptsFlag
- * 0x20 - Nonpriority_MessageCompletionInterruptsFlag
- * 0x10 - Priority_MessageCompletionInterruptsFlag
- * 0x08 - IUCVControlInterruptsFlag
- * Output: NA
- * Return: Return code from CP IUCV call.
-*/
-int iucv_setmask (int SetMaskFlag);
-
-/*
- * Name: iucv_sever
- * Purpose: This function terminates an IUCV path.
- * Input: pathid - Path identification number.
- * user_data - 16-bytes of user data.
- * Output: NA
- * Return: Return code from CP IUCV call.
- * (-EINVAL) - Interal error, wild pointer.
-*/
-int iucv_sever (u16 pathid, uchar user_data[16]);
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 3346088f47e..6387b483f2b 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -1,7 +1,7 @@
/*
* IUCV network driver
*
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
*
* Sysfs integration and all bugs therein by Cornelia Huck
@@ -58,13 +58,94 @@
#include <asm/io.h>
#include <asm/uaccess.h>
-#include "iucv.h"
+#include <net/iucv/iucv.h>
#include "fsm.h"
MODULE_AUTHOR
("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
+/**
+ * Debug Facility stuff
+ */
+#define IUCV_DBF_SETUP_NAME "iucv_setup"
+#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_PAGES 2
+#define IUCV_DBF_SETUP_NR_AREAS 1
+#define IUCV_DBF_SETUP_LEVEL 3
+
+#define IUCV_DBF_DATA_NAME "iucv_data"
+#define IUCV_DBF_DATA_LEN 128
+#define IUCV_DBF_DATA_PAGES 2
+#define IUCV_DBF_DATA_NR_AREAS 1
+#define IUCV_DBF_DATA_LEVEL 2
+
+#define IUCV_DBF_TRACE_NAME "iucv_trace"
+#define IUCV_DBF_TRACE_LEN 16
+#define IUCV_DBF_TRACE_PAGES 4
+#define IUCV_DBF_TRACE_NR_AREAS 1
+#define IUCV_DBF_TRACE_LEVEL 3
+
+#define IUCV_DBF_TEXT(name,level,text) \
+ do { \
+ debug_text_event(iucv_dbf_##name,level,text); \
+ } while (0)
+
+#define IUCV_DBF_HEX(name,level,addr,len) \
+ do { \
+ debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
+ } while (0)
+
+DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
+
+#define IUCV_DBF_TEXT_(name,level,text...) \
+ do { \
+ char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \
+ sprintf(iucv_dbf_txt_buf, text); \
+ debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \
+ put_cpu_var(iucv_dbf_txt_buf); \
+ } while (0)
+
+#define IUCV_DBF_SPRINTF(name,level,text...) \
+ do { \
+ debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
+ debug_sprintf_event(iucv_dbf_trace, level, text ); \
+ } while (0)
+
+/**
+ * some more debug stuff
+ */
+#define IUCV_HEXDUMP16(importance,header,ptr) \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
+ *(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
+ *(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
+ *(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
+ *(((char*)ptr)+12),*(((char*)ptr)+13), \
+ *(((char*)ptr)+14),*(((char*)ptr)+15)); \
+PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", \
+ *(((char*)ptr)+16),*(((char*)ptr)+17), \
+ *(((char*)ptr)+18),*(((char*)ptr)+19), \
+ *(((char*)ptr)+20),*(((char*)ptr)+21), \
+ *(((char*)ptr)+22),*(((char*)ptr)+23), \
+ *(((char*)ptr)+24),*(((char*)ptr)+25), \
+ *(((char*)ptr)+26),*(((char*)ptr)+27), \
+ *(((char*)ptr)+28),*(((char*)ptr)+29), \
+ *(((char*)ptr)+30),*(((char*)ptr)+31));
+
+static inline void iucv_hex_dump(unsigned char *buf, size_t len)
+{
+ size_t i;
+
+ for (i = 0; i < len; i++) {
+ if (i && !(i % 16))
+ printk("\n");
+ printk("%02x ", *(buf + i));
+ }
+ printk("\n");
+}
#define PRINTK_HEADER " iucv: " /* for debugging */
@@ -73,6 +154,25 @@ static struct device_driver netiucv_driver = {
.bus = &iucv_bus,
};
+static int netiucv_callback_connreq(struct iucv_path *,
+ u8 ipvmid[8], u8 ipuser[16]);
+static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
+static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
+static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler netiucv_handler = {
+ .path_pending = netiucv_callback_connreq,
+ .path_complete = netiucv_callback_connack,
+ .path_severed = netiucv_callback_connrej,
+ .path_quiesced = netiucv_callback_connsusp,
+ .path_resumed = netiucv_callback_connres,
+ .message_pending = netiucv_callback_rx,
+ .message_complete = netiucv_callback_txdone
+};
+
/**
* Per connection profiling data
*/
@@ -92,9 +192,8 @@ struct connection_profile {
* Representation of one iucv connection
*/
struct iucv_connection {
- struct iucv_connection *next;
- iucv_handle_t handle;
- __u16 pathid;
+ struct list_head list;
+ struct iucv_path *path;
struct sk_buff *rx_buff;
struct sk_buff *tx_buff;
struct sk_buff_head collect_queue;
@@ -112,12 +211,9 @@ struct iucv_connection {
/**
* Linked list of all connection structs.
*/
-struct iucv_connection_struct {
- struct iucv_connection *iucv_connections;
- rwlock_t iucv_rwlock;
-};
-
-static struct iucv_connection_struct iucv_conns;
+static struct list_head iucv_connection_list =
+ LIST_HEAD_INIT(iucv_connection_list);
+static rwlock_t iucv_connection_rwlock = RW_LOCK_UNLOCKED;
/**
* Representation of event-data for the
@@ -142,11 +238,11 @@ struct netiucv_priv {
/**
* Link level header for a packet.
*/
-typedef struct ll_header_t {
- __u16 next;
-} ll_header;
+struct ll_header {
+ u16 next;
+};
-#define NETIUCV_HDRLEN (sizeof(ll_header))
+#define NETIUCV_HDRLEN (sizeof(struct ll_header))
#define NETIUCV_BUFSIZE_MAX 32768
#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
@@ -158,36 +254,26 @@ typedef struct ll_header_t {
* Compatibility macros for busy handling
* of network devices.
*/
-static __inline__ void netiucv_clear_busy(struct net_device *dev)
+static inline void netiucv_clear_busy(struct net_device *dev)
{
- clear_bit(0, &(((struct netiucv_priv *)dev->priv)->tbusy));
+ struct netiucv_priv *priv = netdev_priv(dev);
+ clear_bit(0, &priv->tbusy);
netif_wake_queue(dev);
}
-static __inline__ int netiucv_test_and_set_busy(struct net_device *dev)
+static inline int netiucv_test_and_set_busy(struct net_device *dev)
{
+ struct netiucv_priv *priv = netdev_priv(dev);
netif_stop_queue(dev);
- return test_and_set_bit(0, &((struct netiucv_priv *)dev->priv)->tbusy);
+ return test_and_set_bit(0, &priv->tbusy);
}
-static __u8 iucv_host[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-static __u8 iucvMagic[16] = {
+static u8 iucvMagic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
/**
- * This mask means the 16-byte IUCV "magic" and the origin userid must
- * match exactly as specified in order to give connection_pending()
- * control.
- */
-static __u8 netiucv_mask[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
-};
-
-/**
* Convert an iucv userId to its printable
* form (strip whitespace at end).
*
@@ -195,8 +281,7 @@ static __u8 netiucv_mask[] = {
*
* @returns The printable string (static data!!)
*/
-static __inline__ char *
-netiucv_printname(char *name)
+static inline char *netiucv_printname(char *name)
{
static char tmp[9];
char *p = tmp;
@@ -379,8 +464,7 @@ static debug_info_t *iucv_dbf_trace = NULL;
DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
-static void
-iucv_unregister_dbf_views(void)
+static void iucv_unregister_dbf_views(void)
{
if (iucv_dbf_setup)
debug_unregister(iucv_dbf_setup);
@@ -389,8 +473,7 @@ iucv_unregister_dbf_views(void)
if (iucv_dbf_trace)
debug_unregister(iucv_dbf_trace);
}
-static int
-iucv_register_dbf_views(void)
+static int iucv_register_dbf_views(void)
{
iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
IUCV_DBF_SETUP_PAGES,
@@ -422,125 +505,111 @@ iucv_register_dbf_views(void)
return 0;
}
-/**
+/*
* Callback-wrappers, called from lowlevel iucv layer.
- *****************************************************************************/
+ */
-static void
-netiucv_callback_rx(iucv_MessagePending *eib, void *pgm_data)
+static void netiucv_callback_rx(struct iucv_path *path,
+ struct iucv_message *msg)
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_connection *conn = path->private;
struct iucv_event ev;
ev.conn = conn;
- ev.data = (void *)eib;
-
+ ev.data = msg;
fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
}
-static void
-netiucv_callback_txdone(iucv_MessageComplete *eib, void *pgm_data)
+static void netiucv_callback_txdone(struct iucv_path *path,
+ struct iucv_message *msg)
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_connection *conn = path->private;
struct iucv_event ev;
ev.conn = conn;
- ev.data = (void *)eib;
+ ev.data = msg;
fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
}
-static void
-netiucv_callback_connack(iucv_ConnectionComplete *eib, void *pgm_data)
+static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
- struct iucv_event ev;
+ struct iucv_connection *conn = path->private;
- ev.conn = conn;
- ev.data = (void *)eib;
- fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, &ev);
+ fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
}
-static void
-netiucv_callback_connreq(iucv_ConnectionPending *eib, void *pgm_data)
+static int netiucv_callback_connreq(struct iucv_path *path,
+ u8 ipvmid[8], u8 ipuser[16])
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
+ struct iucv_connection *conn = path->private;
struct iucv_event ev;
+ int rc;
- ev.conn = conn;
- ev.data = (void *)eib;
- fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
+ if (memcmp(iucvMagic, ipuser, sizeof(ipuser)))
+ /* ipuser must match iucvMagic. */
+ return -EINVAL;
+ rc = -EINVAL;
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(conn, &iucv_connection_list, list) {
+ if (strncmp(ipvmid, conn->userid, 8))
+ continue;
+ /* Found a matching connection for this path. */
+ conn->path = path;
+ ev.conn = conn;
+ ev.data = path;
+ fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
+ rc = 0;
+ }
+ read_unlock_bh(&iucv_connection_rwlock);
+ return rc;
}
-static void
-netiucv_callback_connrej(iucv_ConnectionSevered *eib, void *pgm_data)
+static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
- struct iucv_event ev;
+ struct iucv_connection *conn = path->private;
- ev.conn = conn;
- ev.data = (void *)eib;
- fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, &ev);
+ fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
}
-static void
-netiucv_callback_connsusp(iucv_ConnectionQuiesced *eib, void *pgm_data)
+static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
- struct iucv_event ev;
+ struct iucv_connection *conn = path->private;
- ev.conn = conn;
- ev.data = (void *)eib;
- fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, &ev);
+ fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
}
-static void
-netiucv_callback_connres(iucv_ConnectionResumed *eib, void *pgm_data)
+static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
{
- struct iucv_connection *conn = (struct iucv_connection *)pgm_data;
- struct iucv_event ev;
+ struct iucv_connection *conn = path->private;
- ev.conn = conn;
- ev.data = (void *)eib;
- fsm_event(conn->fsm, CONN_EVENT_CONN_RES, &ev);
-}
-
-static iucv_interrupt_ops_t netiucv_ops = {
- .ConnectionPending = netiucv_callback_connreq,
- .ConnectionComplete = netiucv_callback_connack,
- .ConnectionSevered = netiucv_callback_connrej,
- .ConnectionQuiesced = netiucv_callback_connsusp,
- .ConnectionResumed = netiucv_callback_connres,
- .MessagePending = netiucv_callback_rx,
- .MessageComplete = netiucv_callback_txdone
-};
+ fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
+}
/**
* Dummy NOP action for all statemachines
*/
-static void
-fsm_action_nop(fsm_instance *fi, int event, void *arg)
+static void fsm_action_nop(fsm_instance *fi, int event, void *arg)
{
}
-/**
+/*
* Actions of the connection statemachine
- *****************************************************************************/
+ */
/**
- * Helper function for conn_action_rx()
- * Unpack a just received skb and hand it over to
- * upper layers.
+ * netiucv_unpack_skb
+ * @conn: The connection where this skb has been received.
+ * @pskb: The received skb.
*
- * @param conn The connection where this skb has been received.
- * @param pskb The received skb.
+ * Unpack a just received skb and hand it over to upper layers.
+ * Helper function for conn_action_rx.
*/
-//static __inline__ void
-static void
-netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
+static void netiucv_unpack_skb(struct iucv_connection *conn,
+ struct sk_buff *pskb)
{
struct net_device *dev = conn->netdev;
- struct netiucv_priv *privptr = dev->priv;
- __u16 offset = 0;
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ u16 offset = 0;
skb_put(pskb, NETIUCV_HDRLEN);
pskb->dev = dev;
@@ -549,7 +618,7 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
while (1) {
struct sk_buff *skb;
- ll_header *header = (ll_header *)pskb->data;
+ struct ll_header *header = (struct ll_header *) pskb->data;
if (!header->next)
break;
@@ -595,40 +664,37 @@ netiucv_unpack_skb(struct iucv_connection *conn, struct sk_buff *pskb)
}
}
-static void
-conn_action_rx(fsm_instance *fi, int event, void *arg)
+static void conn_action_rx(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
- iucv_MessagePending *eib = (iucv_MessagePending *)ev->data;
- struct netiucv_priv *privptr =(struct netiucv_priv *)conn->netdev->priv;
-
- __u32 msglen = eib->ln1msg2.ipbfln1f;
+ struct iucv_message *msg = ev->data;
+ struct netiucv_priv *privptr = netdev_priv(conn->netdev);
int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
if (!conn->netdev) {
- /* FRITZ: How to tell iucv LL to drop the msg? */
+ iucv_message_reject(conn->path, msg);
PRINT_WARN("Received data for unlinked connection\n");
IUCV_DBF_TEXT(data, 2,
- "Received data for unlinked connection\n");
+ "Received data for unlinked connection\n");
return;
}
- if (msglen > conn->max_buffsize) {
- /* FRITZ: How to tell iucv LL to drop the msg? */
+ if (msg->length > conn->max_buffsize) {
+ iucv_message_reject(conn->path, msg);
privptr->stats.rx_dropped++;
PRINT_WARN("msglen %d > max_buffsize %d\n",
- msglen, conn->max_buffsize);
+ msg->length, conn->max_buffsize);
IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
- msglen, conn->max_buffsize);
+ msg->length, conn->max_buffsize);
return;
}
conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head;
conn->rx_buff->len = 0;
- rc = iucv_receive(conn->pathid, eib->ipmsgid, eib->iptrgcls,
- conn->rx_buff->data, msglen, NULL, NULL, NULL);
- if (rc || msglen < 5) {
+ rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
+ msg->length, NULL);
+ if (rc || msg->length < 5) {
privptr->stats.rx_errors++;
PRINT_WARN("iucv_receive returned %08x\n", rc);
IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
@@ -637,26 +703,26 @@ conn_action_rx(fsm_instance *fi, int event, void *arg)
netiucv_unpack_skb(conn, conn->rx_buff);
}
-static void
-conn_action_txdone(fsm_instance *fi, int event, void *arg)
+static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
- iucv_MessageComplete *eib = (iucv_MessageComplete *)ev->data;
+ struct iucv_message *msg = ev->data;
+ struct iucv_message txmsg;
struct netiucv_priv *privptr = NULL;
- /* Shut up, gcc! skb is always below 2G. */
- __u32 single_flag = eib->ipmsgtag;
- __u32 txbytes = 0;
- __u32 txpackets = 0;
- __u32 stat_maxcq = 0;
+ u32 single_flag = msg->tag;
+ u32 txbytes = 0;
+ u32 txpackets = 0;
+ u32 stat_maxcq = 0;
struct sk_buff *skb;
unsigned long saveflags;
- ll_header header;
+ struct ll_header header;
+ int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
- if (conn && conn->netdev && conn->netdev->priv)
- privptr = (struct netiucv_priv *)conn->netdev->priv;
+ if (conn && conn->netdev)
+ privptr = netdev_priv(conn->netdev);
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
@@ -688,56 +754,55 @@ conn_action_txdone(fsm_instance *fi, int event, void *arg)
conn->prof.maxmulti = conn->collect_len;
conn->collect_len = 0;
spin_unlock_irqrestore(&conn->collect_lock, saveflags);
- if (conn->tx_buff->len) {
- int rc;
-
- header.next = 0;
- memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
- NETIUCV_HDRLEN);
+ if (conn->tx_buff->len == 0) {
+ fsm_newstate(fi, CONN_STATE_IDLE);
+ return;
+ }
- conn->prof.send_stamp = xtime;
- rc = iucv_send(conn->pathid, NULL, 0, 0, 0, 0,
+ header.next = 0;
+ memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
+ conn->prof.send_stamp = xtime;
+ txmsg.class = 0;
+ txmsg.tag = 0;
+ rc = iucv_message_send(conn->path, &txmsg, 0, 0,
conn->tx_buff->data, conn->tx_buff->len);
- conn->prof.doios_multi++;
- conn->prof.txlen += conn->tx_buff->len;
- conn->prof.tx_pending++;
- if (conn->prof.tx_pending > conn->prof.tx_max_pending)
- conn->prof.tx_max_pending = conn->prof.tx_pending;
- if (rc) {
- conn->prof.tx_pending--;
- fsm_newstate(fi, CONN_STATE_IDLE);
- if (privptr)
- privptr->stats.tx_errors += txpackets;
- PRINT_WARN("iucv_send returned %08x\n", rc);
- IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
- } else {
- if (privptr) {
- privptr->stats.tx_packets += txpackets;
- privptr->stats.tx_bytes += txbytes;
- }
- if (stat_maxcq > conn->prof.maxcqueue)
- conn->prof.maxcqueue = stat_maxcq;
- }
- } else
+ conn->prof.doios_multi++;
+ conn->prof.txlen += conn->tx_buff->len;
+ conn->prof.tx_pending++;
+ if (conn->prof.tx_pending > conn->prof.tx_max_pending)
+ conn->prof.tx_max_pending = conn->prof.tx_pending;
+ if (rc) {
+ conn->prof.tx_pending--;
fsm_newstate(fi, CONN_STATE_IDLE);
+ if (privptr)
+ privptr->stats.tx_errors += txpackets;
+ PRINT_WARN("iucv_send returned %08x\n", rc);
+ IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
+ } else {
+ if (privptr) {
+ privptr->stats.tx_packets += txpackets;
+ privptr->stats.tx_bytes += txbytes;
+ }
+ if (stat_maxcq > conn->prof.maxcqueue)
+ conn->prof.maxcqueue = stat_maxcq;
+ }
}
-static void
-conn_action_connaccept(fsm_instance *fi, int event, void *arg)
+static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
- iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
+ struct iucv_path *path = ev->data;
struct net_device *netdev = conn->netdev;
- struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
int rc;
- __u16 msglimit;
- __u8 udata[16];
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- rc = iucv_accept(eib->ippathid, NETIUCV_QUEUELEN_DEFAULT, udata, 0,
- conn->handle, conn, NULL, &msglimit);
+ conn->path = path;
+ path->msglim = NETIUCV_QUEUELEN_DEFAULT;
+ path->flags = 0;
+ rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
if (rc) {
PRINT_WARN("%s: IUCV accept failed with error %d\n",
netdev->name, rc);
@@ -745,183 +810,126 @@ conn_action_connaccept(fsm_instance *fi, int event, void *arg)
return;
}
fsm_newstate(fi, CONN_STATE_IDLE);
- conn->pathid = eib->ippathid;
- netdev->tx_queue_len = msglimit;
+ netdev->tx_queue_len = conn->path->msglim;
fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
}
-static void
-conn_action_connreject(fsm_instance *fi, int event, void *arg)
+static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
- struct iucv_connection *conn = ev->conn;
- struct net_device *netdev = conn->netdev;
- iucv_ConnectionPending *eib = (iucv_ConnectionPending *)ev->data;
- __u8 udata[16];
+ struct iucv_event *ev = arg;
+ struct iucv_path *path = ev->data;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
-
- iucv_sever(eib->ippathid, udata);
- if (eib->ippathid != conn->pathid) {
- PRINT_INFO("%s: IR Connection Pending; "
- "pathid %d does not match original pathid %d\n",
- netdev->name, eib->ippathid, conn->pathid);
- IUCV_DBF_TEXT_(data, 2,
- "connreject: IR pathid %d, conn. pathid %d\n",
- eib->ippathid, conn->pathid);
- iucv_sever(conn->pathid, udata);
- }
+ iucv_path_sever(path, NULL);
}
-static void
-conn_action_connack(fsm_instance *fi, int event, void *arg)
+static void conn_action_connack(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
- struct iucv_connection *conn = ev->conn;
- iucv_ConnectionComplete *eib = (iucv_ConnectionComplete *)ev->data;
+ struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
- struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
-
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_IDLE);
- if (eib->ippathid != conn->pathid) {
- PRINT_INFO("%s: IR Connection Complete; "
- "pathid %d does not match original pathid %d\n",
- netdev->name, eib->ippathid, conn->pathid);
- IUCV_DBF_TEXT_(data, 2,
- "connack: IR pathid %d, conn. pathid %d\n",
- eib->ippathid, conn->pathid);
- conn->pathid = eib->ippathid;
- }
- netdev->tx_queue_len = eib->ipmsglim;
+ netdev->tx_queue_len = conn->path->msglim;
fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
}
-static void
-conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
+static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
{
- struct iucv_connection *conn = (struct iucv_connection *)arg;
- __u8 udata[16];
+ struct iucv_connection *conn = arg;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
-
fsm_deltimer(&conn->timer);
- iucv_sever(conn->pathid, udata);
+ iucv_path_sever(conn->path, NULL);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
}
-static void
-conn_action_connsever(fsm_instance *fi, int event, void *arg)
+static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
- struct iucv_connection *conn = ev->conn;
+ struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
- struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
- __u8 udata[16];
+ struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&conn->timer);
- iucv_sever(conn->pathid, udata);
+ iucv_path_sever(conn->path, NULL);
PRINT_INFO("%s: Remote dropped connection\n", netdev->name);
IUCV_DBF_TEXT(data, 2,
- "conn_action_connsever: Remote dropped connection\n");
+ "conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
-static void
-conn_action_start(fsm_instance *fi, int event, void *arg)
+static void conn_action_start(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
- struct iucv_connection *conn = ev->conn;
- __u16 msglimit;
+ struct iucv_connection *conn = arg;
int rc;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if (!conn->handle) {
- IUCV_DBF_TEXT(trace, 5, "calling iucv_register_program\n");
- conn->handle =
- iucv_register_program(iucvMagic, conn->userid,
- netiucv_mask,
- &netiucv_ops, conn);
- fsm_newstate(fi, CONN_STATE_STARTWAIT);
- if (!conn->handle) {
- fsm_newstate(fi, CONN_STATE_REGERR);
- conn->handle = NULL;
- IUCV_DBF_TEXT(setup, 2,
- "NULL from iucv_register_program\n");
- return;
- }
-
- PRINT_DEBUG("%s('%s'): registered successfully\n",
- conn->netdev->name, conn->userid);
- }
-
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
PRINT_DEBUG("%s('%s'): connecting ...\n",
- conn->netdev->name, conn->userid);
+ conn->netdev->name, conn->userid);
- /* We must set the state before calling iucv_connect because the callback
- * handler could be called at any point after the connection request is
- * sent */
+ /*
+ * We must set the state before calling iucv_connect because the
+ * callback handler could be called at any point after the connection
+ * request is sent
+ */
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
- rc = iucv_connect(&(conn->pathid), NETIUCV_QUEUELEN_DEFAULT, iucvMagic,
- conn->userid, iucv_host, 0, NULL, &msglimit,
- conn->handle, conn);
+ conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+ rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
+ NULL, iucvMagic, conn);
switch (rc) {
- case 0:
- conn->netdev->tx_queue_len = msglimit;
- fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
- CONN_EVENT_TIMER, conn);
- return;
- case 11:
- PRINT_INFO("%s: User %s is currently not available.\n",
- conn->netdev->name,
- netiucv_printname(conn->userid));
- fsm_newstate(fi, CONN_STATE_STARTWAIT);
- return;
- case 12:
- PRINT_INFO("%s: User %s is currently not ready.\n",
- conn->netdev->name,
- netiucv_printname(conn->userid));
- fsm_newstate(fi, CONN_STATE_STARTWAIT);
- return;
- case 13:
- PRINT_WARN("%s: Too many IUCV connections.\n",
- conn->netdev->name);
- fsm_newstate(fi, CONN_STATE_CONNERR);
- break;
- case 14:
- PRINT_WARN(
- "%s: User %s has too many IUCV connections.\n",
- conn->netdev->name,
- netiucv_printname(conn->userid));
- fsm_newstate(fi, CONN_STATE_CONNERR);
- break;
- case 15:
- PRINT_WARN(
- "%s: No IUCV authorization in CP directory.\n",
- conn->netdev->name);
- fsm_newstate(fi, CONN_STATE_CONNERR);
- break;
- default:
- PRINT_WARN("%s: iucv_connect returned error %d\n",
- conn->netdev->name, rc);
- fsm_newstate(fi, CONN_STATE_CONNERR);
- break;
+ case 0:
+ conn->netdev->tx_queue_len = conn->path->msglim;
+ fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
+ CONN_EVENT_TIMER, conn);
+ return;
+ case 11:
+ PRINT_INFO("%s: User %s is currently not available.\n",
+ conn->netdev->name,
+ netiucv_printname(conn->userid));
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ break;
+ case 12:
+ PRINT_INFO("%s: User %s is currently not ready.\n",
+ conn->netdev->name,
+ netiucv_printname(conn->userid));
+ fsm_newstate(fi, CONN_STATE_STARTWAIT);
+ break;
+ case 13:
+ PRINT_WARN("%s: Too many IUCV connections.\n",
+ conn->netdev->name);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ case 14:
+ PRINT_WARN("%s: User %s has too many IUCV connections.\n",
+ conn->netdev->name,
+ netiucv_printname(conn->userid));
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ case 15:
+ PRINT_WARN("%s: No IUCV authorization in CP directory.\n",
+ conn->netdev->name);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
+ default:
+ PRINT_WARN("%s: iucv_connect returned error %d\n",
+ conn->netdev->name, rc);
+ fsm_newstate(fi, CONN_STATE_CONNERR);
+ break;
}
IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
- IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
- iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ kfree(conn->path);
+ conn->path = NULL;
}
-static void
-netiucv_purge_skb_queue(struct sk_buff_head *q)
+static void netiucv_purge_skb_queue(struct sk_buff_head *q)
{
struct sk_buff *skb;
@@ -931,36 +939,34 @@ netiucv_purge_skb_queue(struct sk_buff_head *q)
}
}
-static void
-conn_action_stop(fsm_instance *fi, int event, void *arg)
+static void conn_action_stop(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
+ struct iucv_event *ev = arg;
struct iucv_connection *conn = ev->conn;
struct net_device *netdev = conn->netdev;
- struct netiucv_priv *privptr = (struct netiucv_priv *)netdev->priv;
+ struct netiucv_priv *privptr = netdev_priv(netdev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
fsm_deltimer(&conn->timer);
fsm_newstate(fi, CONN_STATE_STOPPED);
netiucv_purge_skb_queue(&conn->collect_queue);
- if (conn->handle)
- IUCV_DBF_TEXT(trace, 5, "calling iucv_unregister_program\n");
- iucv_unregister_program(conn->handle);
- conn->handle = NULL;
+ if (conn->path) {
+ IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
+ iucv_path_sever(conn->path, iucvMagic);
+ kfree(conn->path);
+ conn->path = NULL;
+ }
netiucv_purge_skb_queue(&conn->commit_queue);
fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
}
-static void
-conn_action_inval(fsm_instance *fi, int event, void *arg)
+static void conn_action_inval(fsm_instance *fi, int event, void *arg)
{
- struct iucv_event *ev = (struct iucv_event *)arg;
- struct iucv_connection *conn = ev->conn;
+ struct iucv_connection *conn = arg;
struct net_device *netdev = conn->netdev;
- PRINT_WARN("%s: Cannot connect without username\n",
- netdev->name);
+ PRINT_WARN("%s: Cannot connect without username\n", netdev->name);
IUCV_DBF_TEXT(data, 2, "conn_action_inval called\n");
}
@@ -999,29 +1005,27 @@ static const fsm_node conn_fsm[] = {
static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
-/**
+/*
* Actions for interface - statemachine.
- *****************************************************************************/
+ */
/**
- * Startup connection by sending CONN_EVENT_START to it.
+ * dev_action_start
+ * @fi: An instance of an interface statemachine.
+ * @event: The event, just happened.
+ * @arg: Generic pointer, casted from struct net_device * upon call.
*
- * @param fi An instance of an interface statemachine.
- * @param event The event, just happened.
- * @param arg Generic pointer, casted from struct net_device * upon call.
+ * Startup connection by sending CONN_EVENT_START to it.
*/
-static void
-dev_action_start(fsm_instance *fi, int event, void *arg)
+static void dev_action_start(fsm_instance *fi, int event, void *arg)
{
- struct net_device *dev = (struct net_device *)arg;
- struct netiucv_priv *privptr = dev->priv;
- struct iucv_event ev;
+ struct net_device *dev = arg;
+ struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- ev.conn = privptr->conn;
fsm_newstate(fi, DEV_STATE_STARTWAIT);
- fsm_event(privptr->conn->fsm, CONN_EVENT_START, &ev);
+ fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
}
/**
@@ -1034,8 +1038,8 @@ dev_action_start(fsm_instance *fi, int event, void *arg)
static void
dev_action_stop(fsm_instance *fi, int event, void *arg)
{
- struct net_device *dev = (struct net_device *)arg;
- struct netiucv_priv *privptr = dev->priv;
+ struct net_device *dev = arg;
+ struct netiucv_priv *privptr = netdev_priv(dev);
struct iucv_event ev;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -1057,8 +1061,8 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
static void
dev_action_connup(fsm_instance *fi, int event, void *arg)
{
- struct net_device *dev = (struct net_device *)arg;
- struct netiucv_priv *privptr = dev->priv;
+ struct net_device *dev = arg;
+ struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -1131,11 +1135,13 @@ static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
*
* @return 0 on success, -ERRNO on failure. (Never fails.)
*/
-static int
-netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
+static int netiucv_transmit_skb(struct iucv_connection *conn,
+ struct sk_buff *skb)
+{
+ struct iucv_message msg;
unsigned long saveflags;
- ll_header header;
- int rc = 0;
+ struct ll_header header;
+ int rc;
if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
int l = skb->len + NETIUCV_HDRLEN;
@@ -1145,11 +1151,12 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
(conn->max_buffsize - NETIUCV_HDRLEN)) {
rc = -EBUSY;
IUCV_DBF_TEXT(data, 2,
- "EBUSY from netiucv_transmit_skb\n");
+ "EBUSY from netiucv_transmit_skb\n");
} else {
atomic_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
+ rc = 0;
}
spin_unlock_irqrestore(&conn->collect_lock, saveflags);
} else {
@@ -1188,9 +1195,10 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
fsm_newstate(conn->fsm, CONN_STATE_TX);
conn->prof.send_stamp = xtime;
- rc = iucv_send(conn->pathid, NULL, 0, 0, 1 /* single_flag */,
- 0, nskb->data, nskb->len);
- /* Shut up, gcc! nskb is always below 2G. */
+ msg.tag = 1;
+ msg.class = 0;
+ rc = iucv_message_send(conn->path, &msg, 0, 0,
+ nskb->data, nskb->len);
conn->prof.doios_single++;
conn->prof.txlen += skb->len;
conn->prof.tx_pending++;
@@ -1200,7 +1208,7 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
struct netiucv_priv *privptr;
fsm_newstate(conn->fsm, CONN_STATE_IDLE);
conn->prof.tx_pending--;
- privptr = (struct netiucv_priv *)conn->netdev->priv;
+ privptr = netdev_priv(conn->netdev);
if (privptr)
privptr->stats.tx_errors++;
if (copied)
@@ -1226,9 +1234,9 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
return rc;
}
-/**
+/*
* Interface API for upper network layers
- *****************************************************************************/
+ */
/**
* Open an interface.
@@ -1238,9 +1246,11 @@ netiucv_transmit_skb(struct iucv_connection *conn, struct sk_buff *skb) {
*
* @return 0 on success, -ERRNO on failure. (Never fails.)
*/
-static int
-netiucv_open(struct net_device *dev) {
- fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_START,dev);
+static int netiucv_open(struct net_device *dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+
+ fsm_event(priv->fsm, DEV_EVENT_START, dev);
return 0;
}
@@ -1252,9 +1262,11 @@ netiucv_open(struct net_device *dev) {
*
* @return 0 on success, -ERRNO on failure. (Never fails.)
*/
-static int
-netiucv_close(struct net_device *dev) {
- fsm_event(((struct netiucv_priv *)dev->priv)->fsm, DEV_EVENT_STOP, dev);
+static int netiucv_close(struct net_device *dev)
+{
+ struct netiucv_priv *priv = netdev_priv(dev);
+
+ fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
return 0;
}
@@ -1271,8 +1283,8 @@ netiucv_close(struct net_device *dev) {
*/
static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
{
- int rc = 0;
- struct netiucv_priv *privptr = dev->priv;
+ struct netiucv_priv *privptr = netdev_priv(dev);
+ int rc;
IUCV_DBF_TEXT(trace, 4, __FUNCTION__);
/**
@@ -1312,40 +1324,41 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
return -EBUSY;
}
dev->trans_start = jiffies;
- if (netiucv_transmit_skb(privptr->conn, skb))
- rc = 1;
+ rc = netiucv_transmit_skb(privptr->conn, skb) != 0;
netiucv_clear_busy(dev);
return rc;
}
/**
- * Returns interface statistics of a device.
+ * netiucv_stats
+ * @dev: Pointer to interface struct.
*
- * @param dev Pointer to interface struct.
+ * Returns interface statistics of a device.
*
- * @return Pointer to stats struct of this interface.
+ * Returns pointer to stats struct of this interface.
*/
-static struct net_device_stats *
-netiucv_stats (struct net_device * dev)
+static struct net_device_stats *netiucv_stats (struct net_device * dev)
{
+ struct netiucv_priv *priv = netdev_priv(dev);
+
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
- return &((struct netiucv_priv *)dev->priv)->stats;
+ return &priv->stats;
}
/**
- * Sets MTU of an interface.
+ * netiucv_change_mtu
+ * @dev: Pointer to interface struct.
+ * @new_mtu: The new MTU to use for this interface.
*
- * @param dev Pointer to interface struct.
- * @param new_mtu The new MTU to use for this interface.
+ * Sets MTU of an interface.
*
- * @return 0 on success, -EINVAL if MTU is out of valid range.
+ * Returns 0 on success, -EINVAL if MTU is out of valid range.
* (valid range is 576 .. NETIUCV_MTU_MAX).
*/
-static int
-netiucv_change_mtu (struct net_device * dev, int new_mtu)
+static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if ((new_mtu < 576) || (new_mtu > NETIUCV_MTU_MAX)) {
+ if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
return -EINVAL;
}
@@ -1353,12 +1366,12 @@ netiucv_change_mtu (struct net_device * dev, int new_mtu)
return 0;
}
-/**
+/*
* attributes in sysfs
- *****************************************************************************/
+ */
-static ssize_t
-user_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t user_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1366,8 +1379,8 @@ user_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
}
-static ssize_t
-user_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
struct net_device *ndev = priv->conn->netdev;
@@ -1375,80 +1388,70 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf,
char *tmp;
char username[9];
int i;
- struct iucv_connection **clist = &iucv_conns.iucv_connections;
- unsigned long flags;
+ struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if (count>9) {
- PRINT_WARN("netiucv: username too long (%d)!\n", (int)count);
+ if (count > 9) {
+ PRINT_WARN("netiucv: username too long (%d)!\n", (int) count);
IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int)count);
+ "%d is length of username\n", (int) count);
return -EINVAL;
}
tmp = strsep((char **) &buf, "\n");
- for (i=0, p=tmp; i<8 && *p; i++, p++) {
- if (isalnum(*p) || (*p == '$'))
+ for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
+ if (isalnum(*p) || (*p == '$')) {
username[i]= toupper(*p);
- else if (*p == '\n') {
+ continue;
+ }
+ if (*p == '\n') {
/* trailing lf, grr */
break;
- } else {
- PRINT_WARN("netiucv: Invalid char %c in username!\n",
- *p);
- IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n",
- *p);
- return -EINVAL;
}
+ PRINT_WARN("netiucv: Invalid char %c in username!\n", *p);
+ IUCV_DBF_TEXT_(setup, 2,
+ "username: invalid character %c\n", *p);
+ return -EINVAL;
}
- while (i<8)
+ while (i < 8)
username[i++] = ' ';
username[8] = '\0';
- if (memcmp(username, priv->conn->userid, 9)) {
- /* username changed */
- if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN(
- "netiucv: device %s active, connected to %s\n",
- dev->bus_id, priv->conn->userid);
- PRINT_WARN("netiucv: user cannot be updated\n");
- IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
- return -EBUSY;
+ if (memcmp(username, priv->conn->userid, 9) &&
+ (ndev->flags & (IFF_UP | IFF_RUNNING))) {
+ /* username changed while the interface is active. */
+ PRINT_WARN("netiucv: device %s active, connected to %s\n",
+ dev->bus_id, priv->conn->userid);
+ PRINT_WARN("netiucv: user cannot be updated\n");
+ IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
+ return -EBUSY;
+ }
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(cp, &iucv_connection_list, list) {
+ if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+ read_unlock_bh(&iucv_connection_rwlock);
+ PRINT_WARN("netiucv: Connection to %s already "
+ "exists\n", username);
+ return -EEXIST;
}
}
- read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
- while (*clist) {
- if (!strncmp(username, (*clist)->userid, 9) ||
- ((*clist)->netdev != ndev))
- break;
- clist = &((*clist)->next);
- }
- read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
- if (*clist) {
- PRINT_WARN("netiucv: Connection to %s already exists\n",
- username);
- return -EEXIST;
- }
+ read_unlock_bh(&iucv_connection_rwlock);
memcpy(priv->conn->userid, username, 9);
-
return count;
-
}
static DEVICE_ATTR(user, 0644, user_show, user_write);
-static ssize_t
-buffer_show (struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct netiucv_priv *priv = dev->driver_data;
+static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
+{ struct netiucv_priv *priv = dev->driver_data;
IUCV_DBF_TEXT(trace, 5, __FUNCTION__);
return sprintf(buf, "%d\n", priv->conn->max_buffsize);
}
-static ssize_t
-buffer_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
struct net_device *ndev = priv->conn->netdev;
@@ -1502,8 +1505,8 @@ buffer_write (struct device *dev, struct device_attribute *attr, const char *buf
static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
-static ssize_t
-dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1513,8 +1516,8 @@ dev_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
-static ssize_t
-conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t conn_fsm_show (struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1524,8 +1527,8 @@ conn_fsm_show (struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
-static ssize_t
-maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t maxmulti_show (struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1533,8 +1536,9 @@ maxmulti_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
}
-static ssize_t
-maxmulti_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t maxmulti_write (struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1545,8 +1549,8 @@ maxmulti_write (struct device *dev, struct device_attribute *attr, const char *b
static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
-static ssize_t
-maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1554,8 +1558,8 @@ maxcq_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
}
-static ssize_t
-maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1566,8 +1570,8 @@ maxcq_write (struct device *dev, struct device_attribute *attr, const char *buf,
static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
-static ssize_t
-sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1575,8 +1579,8 @@ sdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
}
-static ssize_t
-sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1587,8 +1591,8 @@ sdoio_write (struct device *dev, struct device_attribute *attr, const char *buf,
static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
-static ssize_t
-mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1596,8 +1600,8 @@ mdoio_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
}
-static ssize_t
-mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1608,8 +1612,8 @@ mdoio_write (struct device *dev, struct device_attribute *attr, const char *buf,
static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
-static ssize_t
-txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1617,8 +1621,8 @@ txlen_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
}
-static ssize_t
-txlen_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1629,8 +1633,8 @@ txlen_write (struct device *dev, struct device_attribute *attr, const char *buf,
static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
-static ssize_t
-txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1638,8 +1642,8 @@ txtime_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
}
-static ssize_t
-txtime_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1650,8 +1654,8 @@ txtime_write (struct device *dev, struct device_attribute *attr, const char *buf
static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
-static ssize_t
-txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1659,8 +1663,8 @@ txpend_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
}
-static ssize_t
-txpend_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1671,8 +1675,8 @@ txpend_write (struct device *dev, struct device_attribute *attr, const char *buf
static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
-static ssize_t
-txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1680,8 +1684,8 @@ txmpnd_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
}
-static ssize_t
-txmpnd_write (struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct netiucv_priv *priv = dev->driver_data;
@@ -1721,8 +1725,7 @@ static struct attribute_group netiucv_stat_attr_group = {
.attrs = netiucv_stat_attrs,
};
-static inline int
-netiucv_add_files(struct device *dev)
+static inline int netiucv_add_files(struct device *dev)
{
int ret;
@@ -1736,18 +1739,16 @@ netiucv_add_files(struct device *dev)
return ret;
}
-static inline void
-netiucv_remove_files(struct device *dev)
+static inline void netiucv_remove_files(struct device *dev)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
sysfs_remove_group(&dev->kobj, &netiucv_stat_attr_group);
sysfs_remove_group(&dev->kobj, &netiucv_attr_group);
}
-static int
-netiucv_register_device(struct net_device *ndev)
+static int netiucv_register_device(struct net_device *ndev)
{
- struct netiucv_priv *priv = ndev->priv;
+ struct netiucv_priv *priv = netdev_priv(ndev);
struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
int ret;
@@ -1786,8 +1787,7 @@ out_unreg:
return ret;
}
-static void
-netiucv_unregister_device(struct device *dev)
+static void netiucv_unregister_device(struct device *dev)
{
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
netiucv_remove_files(dev);
@@ -1798,107 +1798,89 @@ netiucv_unregister_device(struct device *dev)
* Allocate and initialize a new connection structure.
* Add it to the list of netiucv connections;
*/
-static struct iucv_connection *
-netiucv_new_connection(struct net_device *dev, char *username)
-{
- unsigned long flags;
- struct iucv_connection **clist = &iucv_conns.iucv_connections;
- struct iucv_connection *conn =
- kzalloc(sizeof(struct iucv_connection), GFP_KERNEL);
-
- if (conn) {
- skb_queue_head_init(&conn->collect_queue);
- skb_queue_head_init(&conn->commit_queue);
- spin_lock_init(&conn->collect_lock);
- conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
- conn->netdev = dev;
-
- conn->rx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
- GFP_KERNEL | GFP_DMA);
- if (!conn->rx_buff) {
- kfree(conn);
- return NULL;
- }
- conn->tx_buff = alloc_skb(NETIUCV_BUFSIZE_DEFAULT,
- GFP_KERNEL | GFP_DMA);
- if (!conn->tx_buff) {
- kfree_skb(conn->rx_buff);
- kfree(conn);
- return NULL;
- }
- conn->fsm = init_fsm("netiucvconn", conn_state_names,
- conn_event_names, NR_CONN_STATES,
- NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
- GFP_KERNEL);
- if (!conn->fsm) {
- kfree_skb(conn->tx_buff);
- kfree_skb(conn->rx_buff);
- kfree(conn);
- return NULL;
- }
- fsm_settimer(conn->fsm, &conn->timer);
- fsm_newstate(conn->fsm, CONN_STATE_INVALID);
-
- if (username) {
- memcpy(conn->userid, username, 9);
- fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
- }
+static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
+ char *username)
+{
+ struct iucv_connection *conn;
- write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
- conn->next = *clist;
- *clist = conn;
- write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
+ conn = kzalloc(sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ goto out;
+ skb_queue_head_init(&conn->collect_queue);
+ skb_queue_head_init(&conn->commit_queue);
+ spin_lock_init(&conn->collect_lock);
+ conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
+ conn->netdev = dev;
+
+ conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+ if (!conn->rx_buff)
+ goto out_conn;
+ conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
+ if (!conn->tx_buff)
+ goto out_rx;
+ conn->fsm = init_fsm("netiucvconn", conn_state_names,
+ conn_event_names, NR_CONN_STATES,
+ NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
+ GFP_KERNEL);
+ if (!conn->fsm)
+ goto out_tx;
+
+ fsm_settimer(conn->fsm, &conn->timer);
+ fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+
+ if (username) {
+ memcpy(conn->userid, username, 9);
+ fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
}
+
+ write_lock_bh(&iucv_connection_rwlock);
+ list_add_tail(&conn->list, &iucv_connection_list);
+ write_unlock_bh(&iucv_connection_rwlock);
return conn;
+
+out_tx:
+ kfree_skb(conn->tx_buff);
+out_rx:
+ kfree_skb(conn->rx_buff);
+out_conn:
+ kfree(conn);
+out:
+ return NULL;
}
/**
* Release a connection structure and remove it from the
* list of netiucv connections.
*/
-static void
-netiucv_remove_connection(struct iucv_connection *conn)
+static void netiucv_remove_connection(struct iucv_connection *conn)
{
- struct iucv_connection **clist = &iucv_conns.iucv_connections;
- unsigned long flags;
-
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- if (conn == NULL)
- return;
- write_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
- while (*clist) {
- if (*clist == conn) {
- *clist = conn->next;
- write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
- if (conn->handle) {
- iucv_unregister_program(conn->handle);
- conn->handle = NULL;
- }
- fsm_deltimer(&conn->timer);
- kfree_fsm(conn->fsm);
- kfree_skb(conn->rx_buff);
- kfree_skb(conn->tx_buff);
- return;
- }
- clist = &((*clist)->next);
+ write_lock_bh(&iucv_connection_rwlock);
+ list_del_init(&conn->list);
+ write_unlock_bh(&iucv_connection_rwlock);
+ if (conn->path) {
+ iucv_path_sever(conn->path, iucvMagic);
+ kfree(conn->path);
+ conn->path = NULL;
}
- write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
+ fsm_deltimer(&conn->timer);
+ kfree_fsm(conn->fsm);
+ kfree_skb(conn->rx_buff);
+ kfree_skb(conn->tx_buff);
}
/**
* Release everything of a net device.
*/
-static void
-netiucv_free_netdevice(struct net_device *dev)
+static void netiucv_free_netdevice(struct net_device *dev)
{
- struct netiucv_priv *privptr;
+ struct netiucv_priv *privptr = netdev_priv(dev);
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (!dev)
return;
- privptr = (struct netiucv_priv *)dev->priv;
if (privptr) {
if (privptr->conn)
netiucv_remove_connection(privptr->conn);
@@ -1913,11 +1895,8 @@ netiucv_free_netdevice(struct net_device *dev)
/**
* Initialize a net device. (Called from kernel in alloc_netdev())
*/
-static void
-netiucv_setup_netdevice(struct net_device *dev)
+static void netiucv_setup_netdevice(struct net_device *dev)
{
- memset(dev->priv, 0, sizeof(struct netiucv_priv));
-
dev->mtu = NETIUCV_MTU_DEFAULT;
dev->hard_start_xmit = netiucv_tx;
dev->open = netiucv_open;
@@ -1936,8 +1915,7 @@ netiucv_setup_netdevice(struct net_device *dev)
/**
* Allocate and initialize everything of a net device.
*/
-static struct net_device *
-netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username)
{
struct netiucv_priv *privptr;
struct net_device *dev;
@@ -1946,40 +1924,40 @@ netiucv_init_netdevice(char *username)
netiucv_setup_netdevice);
if (!dev)
return NULL;
- if (dev_alloc_name(dev, dev->name) < 0) {
- free_netdev(dev);
- return NULL;
- }
+ if (dev_alloc_name(dev, dev->name) < 0)
+ goto out_netdev;
- privptr = (struct netiucv_priv *)dev->priv;
+ privptr = netdev_priv(dev);
privptr->fsm = init_fsm("netiucvdev", dev_state_names,
dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
- if (!privptr->fsm) {
- free_netdev(dev);
- return NULL;
- }
+ if (!privptr->fsm)
+ goto out_netdev;
+
privptr->conn = netiucv_new_connection(dev, username);
if (!privptr->conn) {
- kfree_fsm(privptr->fsm);
- free_netdev(dev);
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
- return NULL;
+ goto out_fsm;
}
fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
-
return dev;
+
+out_fsm:
+ kfree_fsm(privptr->fsm);
+out_netdev:
+ free_netdev(dev);
+ return NULL;
}
-static ssize_t
-conn_write(struct device_driver *drv, const char *buf, size_t count)
+static ssize_t conn_write(struct device_driver *drv,
+ const char *buf, size_t count)
{
- char *p;
+ const char *p;
char username[9];
- int i, ret;
+ int i, rc;
struct net_device *dev;
- struct iucv_connection **clist = &iucv_conns.iucv_connections;
- unsigned long flags;
+ struct netiucv_priv *priv;
+ struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
if (count>9) {
@@ -1988,83 +1966,82 @@ conn_write(struct device_driver *drv, const char *buf, size_t count)
return -EINVAL;
}
- for (i=0, p=(char *)buf; i<8 && *p; i++, p++) {
- if (isalnum(*p) || (*p == '$'))
- username[i]= toupper(*p);
- else if (*p == '\n') {
+ for (i = 0, p = buf; i < 8 && *p; i++, p++) {
+ if (isalnum(*p) || *p == '$') {
+ username[i] = toupper(*p);
+ continue;
+ }
+ if (*p == '\n')
/* trailing lf, grr */
break;
- } else {
- PRINT_WARN("netiucv: Invalid character in username!\n");
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
- return -EINVAL;
- }
+ PRINT_WARN("netiucv: Invalid character in username!\n");
+ IUCV_DBF_TEXT_(setup, 2,
+ "conn_write: invalid character %c\n", *p);
+ return -EINVAL;
}
- while (i<8)
+ while (i < 8)
username[i++] = ' ';
username[8] = '\0';
- read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
- while (*clist) {
- if (!strncmp(username, (*clist)->userid, 9))
- break;
- clist = &((*clist)->next);
- }
- read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
- if (*clist) {
- PRINT_WARN("netiucv: Connection to %s already exists\n",
- username);
- return -EEXIST;
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(cp, &iucv_connection_list, list) {
+ if (!strncmp(username, cp->userid, 9)) {
+ read_unlock_bh(&iucv_connection_rwlock);
+ PRINT_WARN("netiucv: Connection to %s already "
+ "exists\n", username);
+ return -EEXIST;
+ }
}
+ read_unlock_bh(&iucv_connection_rwlock);
+
dev = netiucv_init_netdevice(username);
if (!dev) {
- PRINT_WARN(
- "netiucv: Could not allocate network device structure "
- "for user '%s'\n", netiucv_printname(username));
+ PRINT_WARN("netiucv: Could not allocate network device "
+ "structure for user '%s'\n",
+ netiucv_printname(username));
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
}
- if ((ret = netiucv_register_device(dev))) {
+ rc = netiucv_register_device(dev);
+ if (rc) {
IUCV_DBF_TEXT_(setup, 2,
- "ret %d from netiucv_register_device\n", ret);
+ "ret %d from netiucv_register_device\n", rc);
goto out_free_ndev;
}
/* sysfs magic */
- SET_NETDEV_DEV(dev,
- (struct device*)((struct netiucv_priv*)dev->priv)->dev);
+ priv = netdev_priv(dev);
+ SET_NETDEV_DEV(dev, priv->dev);
- if ((ret = register_netdev(dev))) {
- netiucv_unregister_device((struct device*)
- ((struct netiucv_priv*)dev->priv)->dev);
- goto out_free_ndev;
- }
+ rc = register_netdev(dev);
+ if (rc)
+ goto out_unreg;
PRINT_INFO("%s: '%s'\n", dev->name, netiucv_printname(username));
return count;
+out_unreg:
+ netiucv_unregister_device(priv->dev);
out_free_ndev:
PRINT_WARN("netiucv: Could not register '%s'\n", dev->name);
IUCV_DBF_TEXT(setup, 2, "conn_write: could not register\n");
netiucv_free_netdevice(dev);
- return ret;
+ return rc;
}
static DRIVER_ATTR(connection, 0200, NULL, conn_write);
-static ssize_t
-remove_write (struct device_driver *drv, const char *buf, size_t count)
+static ssize_t remove_write (struct device_driver *drv,
+ const char *buf, size_t count)
{
- struct iucv_connection **clist = &iucv_conns.iucv_connections;
- unsigned long flags;
+ struct iucv_connection *cp;
struct net_device *ndev;
struct netiucv_priv *priv;
struct device *dev;
char name[IFNAMSIZ];
- char *p;
+ const char *p;
int i;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
@@ -2072,33 +2049,27 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
if (count >= IFNAMSIZ)
count = IFNAMSIZ - 1;;
- for (i=0, p=(char *)buf; i<count && *p; i++, p++) {
- if ((*p == '\n') || (*p == ' ')) {
+ for (i = 0, p = buf; i < count && *p; i++, p++) {
+ if (*p == '\n' || *p == ' ')
/* trailing lf, grr */
break;
- } else {
- name[i]=*p;
- }
+ name[i] = *p;
}
name[i] = '\0';
- read_lock_irqsave(&iucv_conns.iucv_rwlock, flags);
- while (*clist) {
- ndev = (*clist)->netdev;
- priv = (struct netiucv_priv*)ndev->priv;
+ read_lock_bh(&iucv_connection_rwlock);
+ list_for_each_entry(cp, &iucv_connection_list, list) {
+ ndev = cp->netdev;
+ priv = netdev_priv(ndev);
dev = priv->dev;
-
- if (strncmp(name, ndev->name, count)) {
- clist = &((*clist)->next);
- continue;
- }
- read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
+ if (strncmp(name, ndev->name, count))
+ continue;
+ read_unlock_bh(&iucv_connection_rwlock);
if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
- PRINT_WARN(
- "netiucv: net device %s active with peer %s\n",
- ndev->name, priv->conn->userid);
+ PRINT_WARN("netiucv: net device %s active with peer "
+ "%s\n", ndev->name, priv->conn->userid);
PRINT_WARN("netiucv: %s cannot be removed\n",
- ndev->name);
+ ndev->name);
IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
return -EBUSY;
}
@@ -2106,7 +2077,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
netiucv_unregister_device(dev);
return count;
}
- read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags);
+ read_unlock_bh(&iucv_connection_rwlock);
PRINT_WARN("netiucv: net device %s unknown\n", name);
IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
return -EINVAL;
@@ -2114,67 +2085,86 @@ remove_write (struct device_driver *drv, const char *buf, size_t count)
static DRIVER_ATTR(remove, 0200, NULL, remove_write);
-static void
-netiucv_banner(void)
+static struct attribute * netiucv_drv_attrs[] = {
+ &driver_attr_connection.attr,
+ &driver_attr_remove.attr,
+ NULL,
+};
+
+static struct attribute_group netiucv_drv_attr_group = {
+ .attrs = netiucv_drv_attrs,
+};
+
+static void netiucv_banner(void)
{
PRINT_INFO("NETIUCV driver initialized\n");
}
-static void __exit
-netiucv_exit(void)
+static void __exit netiucv_exit(void)
{
+ struct iucv_connection *cp;
+ struct net_device *ndev;
+ struct netiucv_priv *priv;
+ struct device *dev;
+
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- while (iucv_conns.iucv_connections) {
- struct net_device *ndev = iucv_conns.iucv_connections->netdev;
- struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv;
- struct device *dev = priv->dev;
+ while (!list_empty(&iucv_connection_list)) {
+ cp = list_entry(iucv_connection_list.next,
+ struct iucv_connection, list);
+ list_del(&cp->list);
+ ndev = cp->netdev;
+ priv = netdev_priv(ndev);
+ dev = priv->dev;
unregister_netdev(ndev);
netiucv_unregister_device(dev);
}
- driver_remove_file(&netiucv_driver, &driver_attr_connection);
- driver_remove_file(&netiucv_driver, &driver_attr_remove);
+ sysfs_remove_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
driver_unregister(&netiucv_driver);
+ iucv_unregister(&netiucv_handler, 1);
iucv_unregister_dbf_views();
PRINT_INFO("NETIUCV driver unloaded\n");
return;
}
-static int __init
-netiucv_init(void)
+static int __init netiucv_init(void)
{
- int ret;
+ int rc;
- ret = iucv_register_dbf_views();
- if (ret) {
- PRINT_WARN("netiucv_init failed, "
- "iucv_register_dbf_views rc = %d\n", ret);
- return ret;
- }
+ rc = iucv_register_dbf_views();
+ if (rc)
+ goto out;
+ rc = iucv_register(&netiucv_handler, 1);
+ if (rc)
+ goto out_dbf;
IUCV_DBF_TEXT(trace, 3, __FUNCTION__);
- ret = driver_register(&netiucv_driver);
- if (ret) {
+ rc = driver_register(&netiucv_driver);
+ if (rc) {
PRINT_ERR("NETIUCV: failed to register driver.\n");
- IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", ret);
- iucv_unregister_dbf_views();
- return ret;
+ IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
+ goto out_iucv;
}
- /* Add entry for specifying connections. */
- ret = driver_create_file(&netiucv_driver, &driver_attr_connection);
- if (!ret) {
- ret = driver_create_file(&netiucv_driver, &driver_attr_remove);
- netiucv_banner();
- rwlock_init(&iucv_conns.iucv_rwlock);
- } else {
- PRINT_ERR("NETIUCV: failed to add driver attribute.\n");
- IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret);
- driver_unregister(&netiucv_driver);
- iucv_unregister_dbf_views();
+ rc = sysfs_create_group(&netiucv_driver.kobj, &netiucv_drv_attr_group);
+ if (rc) {
+ PRINT_ERR("NETIUCV: failed to add driver attributes.\n");
+ IUCV_DBF_TEXT_(setup, 2,
+ "ret %d - netiucv_drv_attr_group\n", rc);
+ goto out_driver;
}
- return ret;
+ netiucv_banner();
+ return rc;
+
+out_driver:
+ driver_unregister(&netiucv_driver);
+out_iucv:
+ iucv_unregister(&netiucv_handler, 1);
+out_dbf:
+ iucv_unregister_dbf_views();
+out:
+ return rc;
}
module_init(netiucv_init);
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c
index b8179c27ceb..3ccca5871fd 100644
--- a/drivers/s390/net/smsgiucv.c
+++ b/drivers/s390/net/smsgiucv.c
@@ -1,7 +1,7 @@
/*
* IUCV special message driver
*
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify
@@ -23,10 +23,10 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/device.h>
+#include <net/iucv/iucv.h>
#include <asm/cpcmd.h>
#include <asm/ebcdic.h>
-
-#include "iucv.h"
+#include "smsgiucv.h"
struct smsg_callback {
struct list_head list;
@@ -39,38 +39,46 @@ MODULE_AUTHOR
("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)");
MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver");
-static iucv_handle_t smsg_handle;
-static unsigned short smsg_pathid;
+static struct iucv_path *smsg_path;
+
static DEFINE_SPINLOCK(smsg_list_lock);
static struct list_head smsg_list = LIST_HEAD_INIT(smsg_list);
-static void
-smsg_connection_complete(iucv_ConnectionComplete *eib, void *pgm_data)
+static int smsg_path_pending(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+static void smsg_message_pending(struct iucv_path *, struct iucv_message *);
+
+static struct iucv_handler smsg_handler = {
+ .path_pending = smsg_path_pending,
+ .message_pending = smsg_message_pending,
+};
+
+static int smsg_path_pending(struct iucv_path *path, u8 ipvmid[8],
+ u8 ipuser[16])
{
+ if (strncmp(ipvmid, "*MSG ", sizeof(ipvmid)) != 0)
+ return -EINVAL;
+ /* Path pending from *MSG. */
+ return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL);
}
-
-static void
-smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
+static void smsg_message_pending(struct iucv_path *path,
+ struct iucv_message *msg)
{
struct smsg_callback *cb;
- unsigned char *msg;
+ unsigned char *buffer;
unsigned char sender[9];
- unsigned short len;
int rc, i;
- len = eib->ln1msg2.ipbfln1f;
- msg = kmalloc(len + 1, GFP_ATOMIC|GFP_DMA);
- if (!msg) {
- iucv_reject(eib->ippathid, eib->ipmsgid, eib->iptrgcls);
+ buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA);
+ if (!buffer) {
+ iucv_message_reject(path, msg);
return;
}
- rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls,
- msg, len, NULL, NULL, NULL);
+ rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL);
if (rc == 0) {
- msg[len] = 0;
- EBCASC(msg, len);
- memcpy(sender, msg, 8);
+ buffer[msg->length] = 0;
+ EBCASC(buffer, msg->length);
+ memcpy(sender, buffer, 8);
sender[8] = 0;
/* Remove trailing whitespace from the sender name. */
for (i = 7; i >= 0; i--) {
@@ -80,27 +88,17 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data)
}
spin_lock(&smsg_list_lock);
list_for_each_entry(cb, &smsg_list, list)
- if (strncmp(msg + 8, cb->prefix, cb->len) == 0) {
- cb->callback(sender, msg + 8);
+ if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) {
+ cb->callback(sender, buffer + 8);
break;
}
spin_unlock(&smsg_list_lock);
}
- kfree(msg);
+ kfree(buffer);
}
-static iucv_interrupt_ops_t smsg_ops = {
- .ConnectionComplete = smsg_connection_complete,
- .MessagePending = smsg_message_pending,
-};
-
-static struct device_driver smsg_driver = {
- .name = "SMSGIUCV",
- .bus = &iucv_bus,
-};
-
-int
-smsg_register_callback(char *prefix, void (*callback)(char *from, char *str))
+int smsg_register_callback(char *prefix,
+ void (*callback)(char *from, char *str))
{
struct smsg_callback *cb;
@@ -110,18 +108,18 @@ smsg_register_callback(char *prefix, void (*callback)(char *from, char *str))
cb->prefix = prefix;
cb->len = strlen(prefix);
cb->callback = callback;
- spin_lock(&smsg_list_lock);
+ spin_lock_bh(&smsg_list_lock);
list_add_tail(&cb->list, &smsg_list);
- spin_unlock(&smsg_list_lock);
+ spin_unlock_bh(&smsg_list_lock);
return 0;
}
-void
-smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str))
+void smsg_unregister_callback(char *prefix,
+ void (*callback)(char *from, char *str))
{
struct smsg_callback *cb, *tmp;
- spin_lock(&smsg_list_lock);
+ spin_lock_bh(&smsg_list_lock);
cb = NULL;
list_for_each_entry(tmp, &smsg_list, list)
if (tmp->callback == callback &&
@@ -130,55 +128,58 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str))
list_del(&cb->list);
break;
}
- spin_unlock(&smsg_list_lock);
+ spin_unlock_bh(&smsg_list_lock);
kfree(cb);
}
-static void __exit
-smsg_exit(void)
+static struct device_driver smsg_driver = {
+ .name = "SMSGIUCV",
+ .bus = &iucv_bus,
+};
+
+static void __exit smsg_exit(void)
{
- if (smsg_handle > 0) {
- cpcmd("SET SMSG OFF", NULL, 0, NULL);
- iucv_sever(smsg_pathid, NULL);
- iucv_unregister_program(smsg_handle);
- driver_unregister(&smsg_driver);
- }
- return;
+ cpcmd("SET SMSG IUCV", NULL, 0, NULL);
+ iucv_unregister(&smsg_handler, 1);
+ driver_unregister(&smsg_driver);
}
-static int __init
-smsg_init(void)
+static int __init smsg_init(void)
{
- static unsigned char pgmmask[24] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
int rc;
rc = driver_register(&smsg_driver);
- if (rc != 0) {
- printk(KERN_ERR "SMSGIUCV: failed to register driver.\n");
- return rc;
- }
- smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ",
- pgmmask, &smsg_ops, NULL);
- if (!smsg_handle) {
+ if (rc != 0)
+ goto out;
+ rc = iucv_register(&smsg_handler, 1);
+ if (rc) {
printk(KERN_ERR "SMSGIUCV: failed to register to iucv");
- driver_unregister(&smsg_driver);
- return -EIO; /* better errno ? */
+ rc = -EIO; /* better errno ? */
+ goto out_driver;
+ }
+ smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL);
+ if (!smsg_path) {
+ rc = -ENOMEM;
+ goto out_register;
}
- rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0,
- NULL, NULL, smsg_handle, NULL);
+ rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ",
+ NULL, NULL, NULL);
if (rc) {
printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG");
- iucv_unregister_program(smsg_handle);
- driver_unregister(&smsg_driver);
- smsg_handle = NULL;
- return -EIO;
+ rc = -EIO; /* better errno ? */
+ goto out_free;
}
cpcmd("SET SMSG IUCV", NULL, 0, NULL);
return 0;
+
+out_free:
+ iucv_path_free(smsg_path);
+out_register:
+ iucv_unregister(&smsg_handler, 1);
+out_driver:
+ driver_unregister(&smsg_driver);
+out:
+ return rc;
}
module_init(smsg_init);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index 7196f50fe15..a86a55ccf87 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -828,9 +828,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
mutex_unlock(&crypt_stat->cs_tfm_mutex);
goto out;
}
- crypto_blkcipher_set_flags(crypt_stat->tfm,
- (ECRYPTFS_DEFAULT_CHAINING_MODE
- | CRYPTO_TFM_REQ_WEAK_KEY));
+ crypto_blkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
mutex_unlock(&crypt_stat->cs_tfm_mutex);
rc = 0;
out:
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index afb64bdbe6a..0f897109759 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -176,7 +176,6 @@ ecryptfs_get_key_payload_data(struct key *key)
#define ECRYPTFS_FILE_SIZE_BYTES 8
#define ECRYPTFS_DEFAULT_CIPHER "aes"
#define ECRYPTFS_DEFAULT_KEY_BYTES 16
-#define ECRYPTFS_DEFAULT_CHAINING_MODE CRYPTO_TFM_MODE_CBC
#define ECRYPTFS_DEFAULT_HASH "md5"
#define ECRYPTFS_TAG_3_PACKET_TYPE 0x8C
#define ECRYPTFS_TAG_11_PACKET_TYPE 0xED
diff --git a/include/asm-avr32/arch-at32ap/at32ap7000.h b/include/asm-avr32/arch-at32ap/at32ap7000.h
index ba85e04553d..3914d7b94ff 100644
--- a/include/asm-avr32/arch-at32ap/at32ap7000.h
+++ b/include/asm-avr32/arch-at32ap/at32ap7000.h
@@ -24,10 +24,12 @@
#define GPIO_PIOB_BASE (GPIO_PIOA_BASE + 32)
#define GPIO_PIOC_BASE (GPIO_PIOB_BASE + 32)
#define GPIO_PIOD_BASE (GPIO_PIOC_BASE + 32)
+#define GPIO_PIOE_BASE (GPIO_PIOD_BASE + 32)
#define GPIO_PIN_PA(N) (GPIO_PIOA_BASE + (N))
#define GPIO_PIN_PB(N) (GPIO_PIOB_BASE + (N))
#define GPIO_PIN_PC(N) (GPIO_PIOC_BASE + (N))
#define GPIO_PIN_PD(N) (GPIO_PIOD_BASE + (N))
+#define GPIO_PIN_PE(N) (GPIO_PIOE_BASE + (N))
#endif /* __ASM_ARCH_AT32AP7000_H__ */
diff --git a/include/asm-avr32/arch-at32ap/gpio.h b/include/asm-avr32/arch-at32ap/gpio.h
new file mode 100644
index 00000000000..fcb756bdaa8
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/gpio.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_AVR32_ARCH_GPIO_H
+#define __ASM_AVR32_ARCH_GPIO_H
+
+#include <linux/compiler.h>
+#include <asm/irq.h>
+
+
+/* Arch-neutral GPIO API */
+int __must_check gpio_request(unsigned int gpio, const char *label);
+void gpio_free(unsigned int gpio);
+
+int gpio_direction_input(unsigned int gpio);
+int gpio_direction_output(unsigned int gpio);
+int gpio_get_value(unsigned int gpio);
+void gpio_set_value(unsigned int gpio, int value);
+
+static inline int gpio_to_irq(unsigned int gpio)
+{
+ return gpio + GPIO_IRQ_BASE;
+}
+
+static inline int irq_to_gpio(unsigned int irq)
+{
+ return irq - GPIO_IRQ_BASE;
+}
+
+#endif /* __ASM_AVR32_ARCH_GPIO_H */
diff --git a/include/asm-avr32/arch-at32ap/irq.h b/include/asm-avr32/arch-at32ap/irq.h
new file mode 100644
index 00000000000..5adffab9a57
--- /dev/null
+++ b/include/asm-avr32/arch-at32ap/irq.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_AVR32_ARCH_IRQ_H
+#define __ASM_AVR32_ARCH_IRQ_H
+
+#define EIM_IRQ_BASE NR_INTERNAL_IRQS
+#define NR_EIM_IRQS 32
+
+#define AT32_EXTINT(n) (EIM_IRQ_BASE + (n))
+
+#define GPIO_IRQ_BASE (EIM_IRQ_BASE + NR_EIM_IRQS)
+#define NR_GPIO_IRQS (5 * 32)
+
+#define NR_IRQS (GPIO_IRQ_BASE + NR_GPIO_IRQS)
+
+#endif /* __ASM_AVR32_ARCH_IRQ_H */
diff --git a/include/asm-avr32/arch-at32ap/portmux.h b/include/asm-avr32/arch-at32ap/portmux.h
index 83c69057132..9930871decd 100644
--- a/include/asm-avr32/arch-at32ap/portmux.h
+++ b/include/asm-avr32/arch-at32ap/portmux.h
@@ -15,12 +15,14 @@
*
* The following flags determine the initial state of the pin.
*/
-#define AT32_GPIOF_PULLUP 0x00000001 /* Enable pull-up */
-#define AT32_GPIOF_OUTPUT 0x00000002 /* Enable output driver */
-#define AT32_GPIOF_HIGH 0x00000004 /* Set output high */
+#define AT32_GPIOF_PULLUP 0x00000001 /* (not-OUT) Enable pull-up */
+#define AT32_GPIOF_OUTPUT 0x00000002 /* (OUT) Enable output driver */
+#define AT32_GPIOF_HIGH 0x00000004 /* (OUT) Set output high */
+#define AT32_GPIOF_DEGLITCH 0x00000008 /* (IN) Filter glitches */
void at32_select_periph(unsigned int pin, unsigned int periph,
unsigned long flags);
void at32_select_gpio(unsigned int pin, unsigned long flags);
+void at32_reserve_pin(unsigned int pin);
#endif /* __ASM_ARCH_PORTMUX_H__ */
diff --git a/include/asm-avr32/checksum.h b/include/asm-avr32/checksum.h
index af9d53f0f5d..4ddbfd2486a 100644
--- a/include/asm-avr32/checksum.h
+++ b/include/asm-avr32/checksum.h
@@ -38,7 +38,7 @@ __wsum csum_partial_copy_generic(const void *src, void *dst, int len,
* passed in an incorrect kernel address to one of these functions.
*
* If you use these functions directly please don't forget the
- * verify_area().
+ * access_ok().
*/
static inline
__wsum csum_partial_copy_nocheck(const void *src, void *dst,
diff --git a/include/asm-avr32/dma-mapping.h b/include/asm-avr32/dma-mapping.h
index 5c01e27f0b4..115813e48fe 100644
--- a/include/asm-avr32/dma-mapping.h
+++ b/include/asm-avr32/dma-mapping.h
@@ -32,6 +32,14 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
+/*
+ * dma_map_single can't fail as it is implemented now.
+ */
+static inline int dma_mapping_error(dma_addr_t addr)
+{
+ return 0;
+}
+
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
diff --git a/include/asm-avr32/gpio.h b/include/asm-avr32/gpio.h
new file mode 100644
index 00000000000..19e8ccc77db
--- /dev/null
+++ b/include/asm-avr32/gpio.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_AVR32_GPIO_H
+#define __ASM_AVR32_GPIO_H
+
+#include <asm/arch/gpio.h>
+
+#endif /* __ASM_AVR32_GPIO_H */
diff --git a/include/asm-avr32/irq.h b/include/asm-avr32/irq.h
index f7e725707dd..83e6549d778 100644
--- a/include/asm-avr32/irq.h
+++ b/include/asm-avr32/irq.h
@@ -2,8 +2,12 @@
#define __ASM_AVR32_IRQ_H
#define NR_INTERNAL_IRQS 64
-#define NR_EXTERNAL_IRQS 64
-#define NR_IRQS (NR_INTERNAL_IRQS + NR_EXTERNAL_IRQS)
+
+#include <asm/arch/irq.h>
+
+#ifndef NR_IRQS
+#define NR_IRQS (NR_INTERNAL_IRQS)
+#endif
#define irq_canonicalize(i) (i)
diff --git a/include/asm-avr32/posix_types.h b/include/asm-avr32/posix_types.h
index 2831b039b34..9e255b99963 100644
--- a/include/asm-avr32/posix_types.h
+++ b/include/asm-avr32/posix_types.h
@@ -23,7 +23,7 @@ typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
-typedef int __kernel_ssize_t;
+typedef long __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef long __kernel_time_t;
typedef long __kernel_suseconds_t;
diff --git a/include/asm-avr32/uaccess.h b/include/asm-avr32/uaccess.h
index 821deb5a9d2..74a679e9098 100644
--- a/include/asm-avr32/uaccess.h
+++ b/include/asm-avr32/uaccess.h
@@ -68,12 +68,6 @@ static inline void set_fs(mm_segment_t s)
#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
-static inline int
-verify_area(int type, const void __user *addr, unsigned long size)
-{
- return access_ok(type, addr, size) ? 0 : -EFAULT;
-}
-
/* Generic arbitrary sized copy. Return the number of bytes NOT copied */
extern __kernel_size_t __copy_user(void *to, const void *from,
__kernel_size_t n);
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index 5748aecdb41..4e05e93ff68 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,8 +18,8 @@ struct module;
struct seq_file;
struct crypto_type {
- unsigned int (*ctxsize)(struct crypto_alg *alg);
- int (*init)(struct crypto_tfm *tfm);
+ unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
+ int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
void (*exit)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
};
@@ -93,7 +93,8 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
struct crypto_instance *inst);
void crypto_drop_spawn(struct crypto_spawn *spawn);
-struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn);
+struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
+ u32 mask);
struct crypto_alg *crypto_get_attr_alg(void *param, unsigned int len,
u32 type, u32 mask);
@@ -132,11 +133,28 @@ static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
return crypto_tfm_ctx_aligned(&tfm->base);
}
+static inline struct crypto_cipher *crypto_spawn_cipher(
+ struct crypto_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_CIPHER;
+ u32 mask = CRYPTO_ALG_TYPE_MASK;
+
+ return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
{
return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
}
+static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
+{
+ u32 type = CRYPTO_ALG_TYPE_HASH;
+ u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
+
+ return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
+}
+
static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
{
return crypto_tfm_ctx_aligned(&tfm->base);
diff --git a/include/linux/atmarp.h b/include/linux/atmarp.h
index ee108f9e9cb..231f4bdec73 100644
--- a/include/linux/atmarp.h
+++ b/include/linux/atmarp.h
@@ -6,9 +6,7 @@
#ifndef _LINUX_ATMARP_H
#define _LINUX_ATMARP_H
-#ifdef __KERNEL__
#include <linux/types.h>
-#endif
#include <linux/atmapi.h>
#include <linux/atmioc.h>
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 4aa9046601d..779aa78ee64 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -51,15 +51,9 @@
/*
* Transform masks and values (for crt_flags).
*/
-#define CRYPTO_TFM_MODE_MASK 0x000000ff
#define CRYPTO_TFM_REQ_MASK 0x000fff00
#define CRYPTO_TFM_RES_MASK 0xfff00000
-#define CRYPTO_TFM_MODE_ECB 0x00000001
-#define CRYPTO_TFM_MODE_CBC 0x00000002
-#define CRYPTO_TFM_MODE_CFB 0x00000004
-#define CRYPTO_TFM_MODE_CTR 0x00000008
-
#define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
@@ -71,12 +65,8 @@
/*
* Miscellaneous stuff.
*/
-#define CRYPTO_UNSPEC 0
#define CRYPTO_MAX_ALG_NAME 64
-#define CRYPTO_DIR_ENCRYPT 1
-#define CRYPTO_DIR_DECRYPT 0
-
/*
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
* declaration) is used to ensure that the crypto_tfm context structure is
@@ -148,19 +138,6 @@ struct cipher_alg {
unsigned int keylen);
void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-
- unsigned int (*cia_encrypt_ecb)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes) __deprecated;
- unsigned int (*cia_decrypt_ecb)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes) __deprecated;
- unsigned int (*cia_encrypt_cbc)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes) __deprecated;
- unsigned int (*cia_decrypt_cbc)(const struct cipher_desc *desc,
- u8 *dst, const u8 *src,
- unsigned int nbytes) __deprecated;
};
struct digest_alg {
@@ -243,11 +220,6 @@ int crypto_unregister_alg(struct crypto_alg *alg);
#ifdef CONFIG_CRYPTO
int crypto_has_alg(const char *name, u32 type, u32 mask);
#else
-static inline int crypto_alg_available(const char *name, u32 flags)
-{
- return 0;
-}
-
static inline int crypto_has_alg(const char *name, u32 type, u32 mask)
{
return 0;
@@ -339,13 +311,18 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-#define crypto_cipher crypto_tfm
-#define crypto_comp crypto_tfm
-
struct crypto_blkcipher {
struct crypto_tfm base;
};
+struct crypto_cipher {
+ struct crypto_tfm base;
+};
+
+struct crypto_comp {
+ struct crypto_tfm base;
+};
+
struct crypto_hash {
struct crypto_tfm base;
};
@@ -395,40 +372,11 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
}
-static unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
- __deprecated;
-static inline unsigned int crypto_tfm_alg_min_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_min_keysize;
-}
-
-static unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
- __deprecated;
-static inline unsigned int crypto_tfm_alg_max_keysize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->__crt_alg->cra_cipher.cia_max_keysize;
-}
-
-static unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm) __deprecated;
-static inline unsigned int crypto_tfm_alg_ivsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_ivsize;
-}
-
static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_blocksize;
}
-static inline unsigned int crypto_tfm_alg_digestsize(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_DIGEST);
- return tfm->__crt_alg->cra_digest.dia_digestsize;
-}
-
static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm)
{
return tfm->__crt_alg->cra_alignmask;
@@ -633,7 +581,7 @@ static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name,
static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm)
{
- return tfm;
+ return &tfm->base;
}
static inline void crypto_free_cipher(struct crypto_cipher *tfm)
@@ -809,76 +757,6 @@ static inline int crypto_hash_setkey(struct crypto_hash *hash,
return crypto_hash_crt(hash)->setkey(hash, key, keylen);
}
-static int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes) __deprecated;
-static inline int crypto_cipher_encrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt(tfm, dst, src, nbytes);
-}
-
-static int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv) __deprecated;
-static inline int crypto_cipher_encrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_encrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes) __deprecated;
-static inline int crypto_cipher_decrypt(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt(tfm, dst, src, nbytes);
-}
-
-static int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv) __deprecated;
-static inline int crypto_cipher_decrypt_iv(struct crypto_tfm *tfm,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes, u8 *iv)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return tfm->crt_cipher.cit_decrypt_iv(tfm, dst, src, nbytes, iv);
-}
-
-static void crypto_cipher_set_iv(struct crypto_tfm *tfm,
- const u8 *src, unsigned int len) __deprecated;
-static inline void crypto_cipher_set_iv(struct crypto_tfm *tfm,
- const u8 *src, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(tfm->crt_cipher.cit_iv, src, len);
-}
-
-static void crypto_cipher_get_iv(struct crypto_tfm *tfm,
- u8 *dst, unsigned int len) __deprecated;
-static inline void crypto_cipher_get_iv(struct crypto_tfm *tfm,
- u8 *dst, unsigned int len)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- memcpy(dst, tfm->crt_cipher.cit_iv, len);
-}
-
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
@@ -903,7 +781,7 @@ static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm)
{
- return tfm;
+ return &tfm->base;
}
static inline void crypto_free_comp(struct crypto_comp *tfm)
@@ -934,14 +812,16 @@ static inline int crypto_comp_compress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- return crypto_comp_crt(tfm)->cot_compress(tfm, src, slen, dst, dlen);
+ return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
+ src, slen, dst, dlen);
}
static inline int crypto_comp_decompress(struct crypto_comp *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int *dlen)
{
- return crypto_comp_crt(tfm)->cot_decompress(tfm, src, slen, dst, dlen);
+ return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
+ src, slen, dst, dlen);
}
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 99393ef3af3..f3de05c3067 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -41,6 +41,7 @@ struct sockaddr_ll
#define PACKET_RX_RING 5
#define PACKET_STATISTICS 6
#define PACKET_COPY_THRESH 7
+#define PACKET_AUXDATA 8
struct tpacket_stats
{
@@ -48,6 +49,15 @@ struct tpacket_stats
unsigned int tp_drops;
};
+struct tpacket_auxdata
+{
+ __u32 tp_status;
+ __u32 tp_len;
+ __u32 tp_snaplen;
+ __u16 tp_mac;
+ __u16 tp_net;
+};
+
struct tpacket_hdr
{
unsigned long tp_status;
diff --git a/include/linux/net.h b/include/linux/net.h
index f28d8a2e2c9..4db21e63d8d 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -24,7 +24,7 @@
struct poll_table_struct;
struct inode;
-#define NPROTO 32 /* should be enough for now.. */
+#define NPROTO 33 /* should be enough for now.. */
#define SYS_SOCKET 1 /* sys_socket(2) */
#define SYS_BIND 2 /* sys_bind(2) */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 2e37f501278..1a528548cd1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -589,7 +589,7 @@ extern int dev_open(struct net_device *dev);
extern int dev_close(struct net_device *dev);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
-extern int unregister_netdevice(struct net_device *dev);
+extern void unregister_netdevice(struct net_device *dev);
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
diff --git a/include/linux/netfilter/Kbuild b/include/linux/netfilter/Kbuild
index 6328175a1c3..43397a414cd 100644
--- a/include/linux/netfilter/Kbuild
+++ b/include/linux/netfilter/Kbuild
@@ -33,6 +33,7 @@ header-y += xt_tcpmss.h
header-y += xt_tcpudp.h
header-y += xt_SECMARK.h
header-y += xt_CONNSECMARK.h
+header-y += xt_TCPMSS.h
unifdef-y += nf_conntrack_common.h
unifdef-y += nf_conntrack_ftp.h
diff --git a/include/linux/netfilter/nf_conntrack_sane.h b/include/linux/netfilter/nf_conntrack_sane.h
new file mode 100644
index 00000000000..4767d6e23e9
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sane.h
@@ -0,0 +1,21 @@
+#ifndef _NF_CONNTRACK_SANE_H
+#define _NF_CONNTRACK_SANE_H
+/* SANE tracking. */
+
+#ifdef __KERNEL__
+
+#define SANE_PORT 6566
+
+enum sane_state {
+ SANE_STATE_NORMAL,
+ SANE_STATE_START_REQUESTED,
+};
+
+/* This structure exists only once per master */
+struct nf_ct_sane_master {
+ enum sane_state state;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_SANE_H */
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
index 2f4e98b90cc..007af4c2770 100644
--- a/include/linux/netfilter/nf_conntrack_tcp.h
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -27,6 +27,9 @@ enum tcp_conntrack {
/* This sender sent FIN first */
#define IP_CT_TCP_FLAG_CLOSE_INIT 0x04
+/* Be liberal in window checking */
+#define IP_CT_TCP_FLAG_BE_LIBERAL 0x08
+
#ifdef __KERNEL__
struct ip_ct_tcp_state {
@@ -34,7 +37,6 @@ struct ip_ct_tcp_state {
u_int32_t td_maxend; /* max of ack + max(win, 1) */
u_int32_t td_maxwin; /* max(win) */
u_int8_t td_scale; /* window scale factor */
- u_int8_t loose; /* used when connection picked up from the middle */
u_int8_t flags; /* per direction options */
};
diff --git a/include/linux/netfilter/xt_TCPMSS.h b/include/linux/netfilter/xt_TCPMSS.h
new file mode 100644
index 00000000000..53a292cd47f
--- /dev/null
+++ b/include/linux/netfilter/xt_TCPMSS.h
@@ -0,0 +1,10 @@
+#ifndef _XT_TCPMSS_H
+#define _XT_TCPMSS_H
+
+struct xt_tcpmss_info {
+ u_int16_t mss;
+};
+
+#define XT_TCPMSS_CLAMP_PMTU 0xffff
+
+#endif /* _XT_TCPMSS_H */
diff --git a/include/linux/netfilter_ipv4/ip_nat.h b/include/linux/netfilter_ipv4/ip_nat.h
index bdf553620ca..bbca89aab81 100644
--- a/include/linux/netfilter_ipv4/ip_nat.h
+++ b/include/linux/netfilter_ipv4/ip_nat.h
@@ -16,6 +16,7 @@ enum ip_nat_manip_type
#define IP_NAT_RANGE_MAP_IPS 1
#define IP_NAT_RANGE_PROTO_SPECIFIED 2
+#define IP_NAT_RANGE_PROTO_RANDOM 4 /* add randomness to "port" selection */
/* NAT sequence number modifications */
struct ip_nat_seq {
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index 98d566c5e32..9527296595c 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -272,25 +272,9 @@ ipt_get_target(struct ipt_entry *e)
#include <linux/init.h>
extern void ipt_init(void) __init;
-#define ipt_register_target(tgt) \
-({ (tgt)->family = AF_INET; \
- xt_register_target(tgt); })
-#define ipt_unregister_target(tgt) xt_unregister_target(tgt)
-
-#define ipt_register_match(mtch) \
-({ (mtch)->family = AF_INET; \
- xt_register_match(mtch); })
-#define ipt_unregister_match(mtch) xt_unregister_match(mtch)
-
-//#define ipt_register_table(tbl, repl) xt_register_table(AF_INET, tbl, repl)
-//#define ipt_unregister_table(tbl) xt_unregister_table(AF_INET, tbl)
-
-extern int ipt_register_table(struct ipt_table *table,
+extern int ipt_register_table(struct xt_table *table,
const struct ipt_replace *repl);
-extern void ipt_unregister_table(struct ipt_table *table);
-
-/* net/sched/ipt.c: Gimme access to your targets! Gets target->me. */
-extern struct ipt_target *ipt_find_target(const char *name, u8 revision);
+extern void ipt_unregister_table(struct xt_table *table);
/* Standard entry. */
struct ipt_standard
@@ -315,7 +299,7 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
- struct ipt_table *table);
+ struct xt_table *table);
#define IPT_ALIGN(s) XT_ALIGN(s)
diff --git a/include/linux/netfilter_ipv4/ipt_TCPMSS.h b/include/linux/netfilter_ipv4/ipt_TCPMSS.h
index aadb39580cd..7a850f94582 100644
--- a/include/linux/netfilter_ipv4/ipt_TCPMSS.h
+++ b/include/linux/netfilter_ipv4/ipt_TCPMSS.h
@@ -1,10 +1,9 @@
#ifndef _IPT_TCPMSS_H
#define _IPT_TCPMSS_H
-struct ipt_tcpmss_info {
- u_int16_t mss;
-};
+#include <linux/netfilter/xt_TCPMSS.h>
-#define IPT_TCPMSS_CLAMP_PMTU 0xffff
+#define ipt_tcpmss_info xt_tcpmss_info
+#define IPT_TCPMSS_CLAMP_PMTU XT_TCPMSS_CLAMP_PMTU
#endif /*_IPT_TCPMSS_H*/
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 4aed340401d..61aa10412fc 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -104,6 +104,25 @@ struct ip6t_entry
unsigned char elems[0];
};
+/* Standard entry */
+struct ip6t_standard
+{
+ struct ip6t_entry entry;
+ struct ip6t_standard_target target;
+};
+
+struct ip6t_error_target
+{
+ struct ip6t_entry_target target;
+ char errorname[IP6T_FUNCTION_MAXNAMELEN];
+};
+
+struct ip6t_error
+{
+ struct ip6t_entry entry;
+ struct ip6t_error_target target;
+};
+
/*
* New IP firewall options for [gs]etsockopt at the RAW IP level.
* Unlike BSD Linux inherits IP options so you don't have to use
@@ -286,24 +305,14 @@ ip6t_get_target(struct ip6t_entry *e)
#include <linux/init.h>
extern void ip6t_init(void) __init;
-#define ip6t_register_target(tgt) \
-({ (tgt)->family = AF_INET6; \
- xt_register_target(tgt); })
-#define ip6t_unregister_target(tgt) xt_unregister_target(tgt)
-
-#define ip6t_register_match(match) \
-({ (match)->family = AF_INET6; \
- xt_register_match(match); })
-#define ip6t_unregister_match(match) xt_unregister_match(match)
-
-extern int ip6t_register_table(struct ip6t_table *table,
+extern int ip6t_register_table(struct xt_table *table,
const struct ip6t_replace *repl);
-extern void ip6t_unregister_table(struct ip6t_table *table);
+extern void ip6t_unregister_table(struct xt_table *table);
extern unsigned int ip6t_do_table(struct sk_buff **pskb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
- struct ip6t_table *table);
+ struct xt_table *table);
/* Check for an extension */
extern int ip6t_ext_hdr(u8 nexthdr);
diff --git a/include/linux/netfilter_ipv6/ip6t_mh.h b/include/linux/netfilter_ipv6/ip6t_mh.h
new file mode 100644
index 00000000000..b9ca9a5f74d
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_mh.h
@@ -0,0 +1,15 @@
+#ifndef _IP6T_MH_H
+#define _IP6T_MH_H
+
+/* MH matching stuff */
+struct ip6t_mh
+{
+ u_int8_t types[2]; /* MH type range */
+ u_int8_t invflags; /* Inverse flags */
+};
+
+/* Values for "invflags" field in struct ip6t_mh. */
+#define IP6T_MH_INV_TYPE 0x01 /* Invert the sense of type. */
+#define IP6T_MH_INV_MASK 0x01 /* All possible flags. */
+
+#endif /*_IP6T_MH_H*/
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index c3e255bf859..7a8dcb82a69 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -76,8 +76,6 @@ extern struct page * find_get_page(struct address_space *mapping,
unsigned long index);
extern struct page * find_lock_page(struct address_space *mapping,
unsigned long index);
-extern __deprecated_for_modules struct page * find_trylock_page(
- struct address_space *mapping, unsigned long index);
extern struct page * find_or_create_page(struct address_space *mapping,
unsigned long index, gfp_t gfp_mask);
unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index defdeed2064..920d21e021b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2073,6 +2073,8 @@
#define PCI_VENDOR_ID_PASEMI 0x1959
+#define PCI_VENDOR_ID_ATTANSIC 0x1969
+
#define PCI_VENDOR_ID_JMICRON 0x197B
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
diff --git a/include/linux/pfkeyv2.h b/include/linux/pfkeyv2.h
index 265bafab649..d9db5f62ee4 100644
--- a/include/linux/pfkeyv2.h
+++ b/include/linux/pfkeyv2.h
@@ -251,7 +251,8 @@ struct sadb_x_sec_ctx {
#define SADB_X_SPDEXPIRE 21
#define SADB_X_SPDDELETE2 22
#define SADB_X_NAT_T_NEW_MAPPING 23
-#define SADB_MAX 23
+#define SADB_X_MIGRATE 24
+#define SADB_MAX 24
/* Security Association flags */
#define SADB_SAFLAGS_PFS 1
@@ -297,6 +298,7 @@ struct sadb_x_sec_ctx {
#define SADB_X_EALG_BLOWFISHCBC 7
#define SADB_EALG_NULL 11
#define SADB_X_EALG_AESCBC 12
+#define SADB_X_EALG_CAMELLIACBC 22
#define SADB_EALG_MAX 253 /* last EALG */
/* private allocations should use 249-255 (RFC2407) */
#define SADB_X_EALG_SERPENTCBC 252 /* draft-ietf-ipsec-ciph-aes-cbc-00 */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 92cd38efad7..fcd35a210e7 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -187,7 +187,8 @@ struct ucred {
#define AF_LLC 26 /* Linux LLC */
#define AF_TIPC 30 /* TIPC sockets */
#define AF_BLUETOOTH 31 /* Bluetooth sockets */
-#define AF_MAX 32 /* For now.. */
+#define AF_IUCV 32 /* IUCV sockets */
+#define AF_MAX 33 /* For now.. */
/* Protocol families, same as address families. */
#define PF_UNSPEC AF_UNSPEC
@@ -220,6 +221,7 @@ struct ucred {
#define PF_LLC AF_LLC
#define PF_TIPC AF_TIPC
#define PF_BLUETOOTH AF_BLUETOOTH
+#define PF_IUCV AF_IUCV
#define PF_MAX AF_MAX
/* Maximum queue length specifiable by listen. */
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 81480e61346..665412c4f4b 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -699,7 +699,8 @@ enum {
NET_X25_CALL_REQUEST_TIMEOUT=2,
NET_X25_RESET_REQUEST_TIMEOUT=3,
NET_X25_CLEAR_REQUEST_TIMEOUT=4,
- NET_X25_ACK_HOLD_BACK_TIMEOUT=5
+ NET_X25_ACK_HOLD_BACK_TIMEOUT=5,
+ NET_X25_FORWARD=6
};
/* /proc/sys/net/token-ring */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3cc70d1a350..29d3089038a 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -316,7 +316,7 @@ struct tcp_sock {
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
- struct tcp_sack_block recv_sack_cache[4];
+ struct tcp_sack_block_wire recv_sack_cache[4];
/* from STCP, retrans queue hinting */
struct sk_buff* lost_skb_hint;
diff --git a/include/linux/wanrouter.h b/include/linux/wanrouter.h
index 2cd05013edf..3add87465b1 100644
--- a/include/linux/wanrouter.h
+++ b/include/linux/wanrouter.h
@@ -516,9 +516,6 @@ struct wan_device {
/* Public functions available for device drivers */
extern int register_wan_device(struct wan_device *wandev);
extern int unregister_wan_device(char *name);
-__be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev);
-int wanrouter_encapsulate(struct sk_buff *skb, struct net_device *dev,
- unsigned short type);
/* Proc interface functions. These must not be called by the drivers! */
extern int wanrouter_proc_init(void);
@@ -527,11 +524,6 @@ extern int wanrouter_proc_add(struct wan_device *wandev);
extern int wanrouter_proc_delete(struct wan_device *wandev);
extern int wanrouter_ioctl( struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg);
-extern void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
-extern void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
-
-
-
/* Public Data */
/* list of registered devices */
extern struct wan_device *wanrouter_router_devlist;
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 9529ea1ae39..15ca89e9961 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -178,6 +178,9 @@ enum {
XFRM_MSG_REPORT,
#define XFRM_MSG_REPORT XFRM_MSG_REPORT
+ XFRM_MSG_MIGRATE,
+#define XFRM_MSG_MIGRATE XFRM_MSG_MIGRATE
+
__XFRM_MSG_MAX
};
#define XFRM_MSG_MAX (__XFRM_MSG_MAX - 1)
@@ -256,6 +259,7 @@ enum xfrm_attr_type_t {
XFRMA_COADDR, /* xfrm_address_t */
XFRMA_LASTUSED,
XFRMA_POLICY_TYPE, /* struct xfrm_userpolicy_type */
+ XFRMA_MIGRATE,
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -351,6 +355,19 @@ struct xfrm_user_report {
struct xfrm_selector sel;
};
+struct xfrm_user_migrate {
+ xfrm_address_t old_daddr;
+ xfrm_address_t old_saddr;
+ xfrm_address_t new_daddr;
+ xfrm_address_t new_saddr;
+ __u8 proto;
+ __u8 mode;
+ __u16 reserved;
+ __u32 reqid;
+ __u16 old_family;
+ __u16 new_family;
+};
+
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
@@ -375,6 +392,8 @@ enum xfrm_nlgroups {
#define XFRMNLGRP_AEVENTS XFRMNLGRP_AEVENTS
XFRMNLGRP_REPORT,
#define XFRMNLGRP_REPORT XFRMNLGRP_REPORT
+ XFRMNLGRP_MIGRATE,
+#define XFRMNLGRP_MIGRATE XFRMNLGRP_MIGRATE
__XFRMNLGRP_MAX
};
#define XFRMNLGRP_MAX (__XFRMNLGRP_MAX - 1)
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 34cc76e3ddb..d27ee8c0da3 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -34,12 +34,13 @@
#include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards.
- * New scheme, half the table is for TIME_WAIT, the other half is
- * for the rest. I'll experiment with dynamic table growth later.
+ * One chain is dedicated to TIME_WAIT sockets.
+ * I'll experiment with dynamic table growth later.
*/
struct inet_ehash_bucket {
rwlock_t lock;
struct hlist_head chain;
+ struct hlist_head twchain;
};
/* There are a few simple rules, which allow for local port reuse by
@@ -97,8 +98,7 @@ struct inet_hashinfo {
*
* TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
*
- * First half of the table is for sockets not in TIME_WAIT, second half
- * is for TIME_WAIT sockets only.
+ * TIME_WAIT sockets use a separate chain (twchain).
*/
struct inet_ehash_bucket *ehash;
@@ -369,7 +369,7 @@ static inline struct sock *
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
+ sk_for_each(sk, node, &head->twchain) {
if (INET_TW_MATCH(sk, hash, acookie, saddr, daddr, ports, dif))
goto hit;
}
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
new file mode 100644
index 00000000000..04d1abb72d2
--- /dev/null
+++ b/include/net/iucv/af_iucv.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2006 IBM Corporation
+ * IUCV protocol stack for Linux on zSeries
+ * Version 1.0
+ * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
+ *
+ */
+
+#ifndef __AFIUCV_H
+#define __AFIUCV_H
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+#include <linux/list.h>
+#include <linux/poll.h>
+#include <linux/socket.h>
+
+#ifndef AF_IUCV
+#define AF_IUCV 32
+#define PF_IUCV AF_IUCV
+#endif
+
+/* Connection and socket states */
+enum {
+ IUCV_CONNECTED = 1,
+ IUCV_OPEN,
+ IUCV_BOUND,
+ IUCV_LISTEN,
+ IUCV_SEVERED,
+ IUCV_DISCONN,
+ IUCV_CLOSED
+};
+
+#define IUCV_QUEUELEN_DEFAULT 65535
+#define IUCV_CONN_TIMEOUT (HZ * 40)
+#define IUCV_DISCONN_TIMEOUT (HZ * 2)
+#define IUCV_CONN_IDLE_TIMEOUT (HZ * 60)
+#define IUCV_BUFSIZE_DEFAULT 32768
+
+/* IUCV socket address */
+struct sockaddr_iucv {
+ sa_family_t siucv_family;
+ unsigned short siucv_port; /* Reserved */
+ unsigned int siucv_addr; /* Reserved */
+ char siucv_nodeid[8]; /* Reserved */
+ char siucv_user_id[8]; /* Guest User Id */
+ char siucv_name[8]; /* Application Name */
+};
+
+
+/* Common socket structures and functions */
+
+#define iucv_sk(__sk) ((struct iucv_sock *) __sk)
+
+struct iucv_sock {
+ struct sock sk;
+ char src_user_id[8];
+ char src_name[8];
+ char dst_user_id[8];
+ char dst_name[8];
+ struct list_head accept_q;
+ struct sock *parent;
+ struct iucv_path *path;
+ struct sk_buff_head send_skb_q;
+ unsigned int send_tag;
+};
+
+struct iucv_sock_list {
+ struct hlist_head head;
+ rwlock_t lock;
+ atomic_t autobind_name;
+};
+
+static void iucv_sock_destruct(struct sock *sk);
+static void iucv_sock_cleanup_listen(struct sock *parent);
+static void iucv_sock_kill(struct sock *sk);
+static void iucv_sock_close(struct sock *sk);
+static int iucv_sock_create(struct socket *sock, int proto);
+static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len);
+static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags);
+static int iucv_sock_listen(struct socket *sock, int backlog);
+static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags);
+static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *len, int peer);
+static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len);
+static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags);
+unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
+static int iucv_sock_release(struct socket *sock);
+static int iucv_sock_shutdown(struct socket *sock, int how);
+
+void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
+void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
+int iucv_sock_wait_state(struct sock *sk, int state, int state2,
+ unsigned long timeo);
+int iucv_sock_wait_cnt(struct sock *sk, unsigned long timeo);
+void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
+void iucv_accept_unlink(struct sock *sk);
+struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock);
+
+#endif /* __IUCV_H */
diff --git a/include/net/iucv/iucv.h b/include/net/iucv/iucv.h
new file mode 100644
index 00000000000..746e7416261
--- /dev/null
+++ b/include/net/iucv/iucv.h
@@ -0,0 +1,415 @@
+/*
+ * drivers/s390/net/iucv.h
+ * IUCV base support.
+ *
+ * S390 version
+ * Copyright 2000, 2006 IBM Corporation
+ * Author(s):Alan Altmark (Alan_Altmark@us.ibm.com)
+ * Xenia Tkatschow (xenia@us.ibm.com)
+ * Rewritten for af_iucv:
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ *
+ * Functionality:
+ * To explore any of the IUCV functions, one must first register their
+ * program using iucv_register(). Once your program has successfully
+ * completed a register, it can exploit the other functions.
+ * For furthur reference on all IUCV functionality, refer to the
+ * CP Programming Services book, also available on the web thru
+ * www.ibm.com/s390/vm/pubs, manual # SC24-5760
+ *
+ * Definition of Return Codes
+ * - All positive return codes including zero are reflected back
+ * from CP. The definition of each return code can be found in
+ * CP Programming Services book.
+ * - Return Code of:
+ * -EINVAL: Invalid value
+ * -ENOMEM: storage allocation failed
+ */
+
+#include <linux/types.h>
+#include <asm/debug.h>
+
+/*
+ * IUCV option flags usable by device drivers:
+ *
+ * IUCV_IPRMDATA Indicates that your program can handle a message in the
+ * parameter list / a message is sent in the parameter list.
+ * Used for iucv_path_accept, iucv_path_connect,
+ * iucv_message_reply, iucv_message_send, iucv_message_send2way.
+ * IUCV_IPQUSCE Indicates that you do not want to receive messages on this
+ * path until an iucv_path_resume is issued.
+ * Used for iucv_path_accept, iucv_path_connect.
+ * IUCV_IPBUFLST Indicates that an address list is used for the message data.
+ * Used for iucv_message_receive, iucv_message_send,
+ * iucv_message_send2way.
+ * IUCV_IPPRTY Specifies that you want to send priority messages.
+ * Used for iucv_path_accept, iucv_path_connect,
+ * iucv_message_reply, iucv_message_send, iucv_message_send2way.
+ * IUCV_IPSYNC Indicates a synchronous send request.
+ * Used for iucv_message_send, iucv_message_send2way.
+ * IUCV_IPANSLST Indicates that an address list is used for the reply data.
+ * Used for iucv_message_reply, iucv_message_send2way.
+ * IUCV_IPLOCAL Specifies that the communication partner has to be on the
+ * local system. If local is specified no target class can be
+ * specified.
+ * Used for iucv_path_connect.
+ *
+ * All flags are defined in the input field IPFLAGS1 of each function
+ * and can be found in CP Programming Services.
+ */
+#define IUCV_IPRMDATA 0x80
+#define IUCV_IPQUSCE 0x40
+#define IUCV_IPBUFLST 0x40
+#define IUCV_IPPRTY 0x20
+#define IUCV_IPANSLST 0x08
+#define IUCV_IPSYNC 0x04
+#define IUCV_IPLOCAL 0x01
+
+/*
+ * iucv_array : Defines buffer array.
+ * Inside the array may be 31- bit addresses and 31-bit lengths.
+ * Use a pointer to an iucv_array as the buffer, reply or answer
+ * parameter on iucv_message_send, iucv_message_send2way, iucv_message_receive
+ * and iucv_message_reply if IUCV_IPBUFLST or IUCV_IPANSLST are used.
+ */
+struct iucv_array {
+ u32 address;
+ u32 length;
+} __attribute__ ((aligned (8)));
+
+extern struct bus_type iucv_bus;
+extern struct device *iucv_root;
+
+/*
+ * struct iucv_path
+ * pathid: 16 bit path identification
+ * msglim: 16 bit message limit
+ * flags: properties of the path: IPRMDATA, IPQUSCE, IPPRTY
+ * handler: address of iucv handler structure
+ * private: private information of the handler associated with the path
+ * list: list_head for the iucv_handler path list.
+ */
+struct iucv_path {
+ u16 pathid;
+ u16 msglim;
+ u8 flags;
+ void *private;
+ struct iucv_handler *handler;
+ struct list_head list;
+};
+
+/*
+ * struct iucv_message
+ * id: 32 bit message id
+ * audit: 32 bit error information of purged or replied messages
+ * class: 32 bit target class of a message (source class for replies)
+ * tag: 32 bit tag to be associated with the message
+ * length: 32 bit length of the message / reply
+ * reply_size: 32 bit maximum allowed length of the reply
+ * rmmsg: 8 byte inline message
+ * flags: message properties (IUCV_IPPRTY)
+ */
+struct iucv_message {
+ u32 id;
+ u32 audit;
+ u32 class;
+ u32 tag;
+ u32 length;
+ u32 reply_size;
+ u8 rmmsg[8];
+ u8 flags;
+};
+
+/*
+ * struct iucv_handler
+ *
+ * A vector of functions that handle IUCV interrupts. Each functions gets
+ * a parameter area as defined by the CP Programming Services and private
+ * pointer that is provided by the user of the interface.
+ */
+struct iucv_handler {
+ /*
+ * The path_pending function is called after an iucv interrupt
+ * type 0x01 has been received. The base code allocates a path
+ * structure and "asks" the handler if this path belongs to the
+ * handler. To accept the path the path_pending function needs
+ * to call iucv_path_accept and return 0. If the callback returns
+ * a value != 0 the iucv base code will continue with the next
+ * handler. The order in which the path_pending functions are
+ * called is the order of the registration of the iucv handlers
+ * to the base code.
+ */
+ int (*path_pending)(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+ /*
+ * The path_complete function is called after an iucv interrupt
+ * type 0x02 has been received for a path that has been established
+ * for this handler with iucv_path_connect and got accepted by the
+ * peer with iucv_path_accept.
+ */
+ void (*path_complete)(struct iucv_path *, u8 ipuser[16]);
+ /*
+ * The path_severed function is called after an iucv interrupt
+ * type 0x03 has been received. The communication peer shutdown
+ * his end of the communication path. The path still exists and
+ * remaining messages can be received until a iucv_path_sever
+ * shuts down the other end of the path as well.
+ */
+ void (*path_severed)(struct iucv_path *, u8 ipuser[16]);
+ /*
+ * The path_quiesced function is called after an icuv interrupt
+ * type 0x04 has been received. The communication peer has quiesced
+ * the path. Delivery of messages is stopped until iucv_path_resume
+ * has been called.
+ */
+ void (*path_quiesced)(struct iucv_path *, u8 ipuser[16]);
+ /*
+ * The path_resumed function is called after an icuv interrupt
+ * type 0x05 has been received. The communication peer has resumed
+ * the path.
+ */
+ void (*path_resumed)(struct iucv_path *, u8 ipuser[16]);
+ /*
+ * The message_pending function is called after an icuv interrupt
+ * type 0x06 or type 0x07 has been received. A new message is
+ * availabe and can be received with iucv_message_receive.
+ */
+ void (*message_pending)(struct iucv_path *, struct iucv_message *);
+ /*
+ * The message_complete function is called after an icuv interrupt
+ * type 0x08 or type 0x09 has been received. A message send with
+ * iucv_message_send2way has been replied to. The reply can be
+ * received with iucv_message_receive.
+ */
+ void (*message_complete)(struct iucv_path *, struct iucv_message *);
+
+ struct list_head list;
+ struct list_head paths;
+};
+
+/**
+ * iucv_register:
+ * @handler: address of iucv handler structure
+ * @smp: != 0 indicates that the handler can deal with out of order messages
+ *
+ * Registers a driver with IUCV.
+ *
+ * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
+ * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
+ */
+int iucv_register(struct iucv_handler *handler, int smp);
+
+/**
+ * iucv_unregister
+ * @handler: address of iucv handler structure
+ * @smp: != 0 indicates that the handler can deal with out of order messages
+ *
+ * Unregister driver from IUCV.
+ */
+void iucv_unregister(struct iucv_handler *handle, int smp);
+
+/**
+ * iucv_path_alloc
+ * @msglim: initial message limit
+ * @flags: initial flags
+ * @gfp: kmalloc allocation flag
+ *
+ * Allocate a new path structure for use with iucv_connect.
+ *
+ * Returns NULL if the memory allocation failed or a pointer to the
+ * path structure.
+ */
+static inline struct iucv_path *iucv_path_alloc(u16 msglim, u8 flags, gfp_t gfp)
+{
+ struct iucv_path *path;
+
+ path = kzalloc(sizeof(struct iucv_path), gfp);
+ if (path) {
+ path->msglim = msglim;
+ path->flags = flags;
+ }
+ return path;
+}
+
+/**
+ * iucv_path_free
+ * @path: address of iucv path structure
+ *
+ * Frees a path structure.
+ */
+static inline void iucv_path_free(struct iucv_path *path)
+{
+ kfree(path);
+}
+
+/**
+ * iucv_path_accept
+ * @path: address of iucv path structure
+ * @handler: address of iucv handler structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ * @private: private data passed to interrupt handlers for this path
+ *
+ * This function is issued after the user received a connection pending
+ * external interrupt and now wishes to complete the IUCV communication path.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userdata[16], void *private);
+
+/**
+ * iucv_path_connect
+ * @path: address of iucv path structure
+ * @handler: address of iucv handler structure
+ * @userid: 8-byte user identification
+ * @system: 8-byte target system identification
+ * @userdata: 16 bytes of data reflected to the communication partner
+ * @private: private data passed to interrupt handlers for this path
+ *
+ * This function establishes an IUCV path. Although the connect may complete
+ * successfully, you are not able to use the path until you receive an IUCV
+ * Connection Complete external interrupt.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userid[8], u8 system[8], u8 userdata[16],
+ void *private);
+
+/**
+ * iucv_path_quiesce:
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function temporarily suspends incoming messages on an IUCV path.
+ * You can later reactivate the path by invoking the iucv_resume function.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]);
+
+/**
+ * iucv_path_resume:
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function resumes incoming messages on an IUCV path that has
+ * been stopped with iucv_path_quiesce.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_resume(struct iucv_path *path, u8 userdata[16]);
+
+/**
+ * iucv_path_sever
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function terminates an IUCV path.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_sever(struct iucv_path *path, u8 userdata[16]);
+
+/**
+ * iucv_message_purge
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @srccls: source class of message
+ *
+ * Cancels a message you have sent.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
+ u32 srccls);
+
+/**
+ * iucv_message_receive
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: flags that affect how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * This function receives messages that are being sent to you over
+ * established paths. This function will deal with RMDATA messages
+ * embedded in struct iucv_message as well.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual);
+
+/**
+ * iucv_message_reject
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ *
+ * The reject function refuses a specified message. Between the time you
+ * are notified of a message and the time that you complete the message,
+ * the message may be rejected.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg);
+
+/**
+ * iucv_message_reply
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @reply: address of data buffer or address of struct iucv_array
+ * @size: length of reply data buffer
+ *
+ * This function responds to the two-way messages that you receive. You
+ * must identify completely the message to which you wish to reply. ie,
+ * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
+ * the parameter list.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *reply, size_t size);
+
+/**
+ * iucv_message_send
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @srccls: source class of message
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of send buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer and this is a one-way message and the
+ * receiver will not reply to the message.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size);
+
+/**
+ * iucv_message_send2way
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent and the reply is received
+ * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
+ * @srccls: source class of message
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of send buffer
+ * @ansbuf: address of answer buffer or address of struct iucv_array
+ * @asize: size of reply buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer. The receiver of the send is expected to
+ * reply to the message and a buffer is provided into which IUCV moves
+ * the reply to this message.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size,
+ void *answer, size_t asize, size_t *residual);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index bd01b4633ee..68ec27490c2 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -45,6 +45,7 @@ union nf_conntrack_expect_proto {
#include <linux/netfilter/nf_conntrack_ftp.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#include <linux/netfilter/nf_conntrack_h323.h>
+#include <linux/netfilter/nf_conntrack_sane.h>
/* per conntrack: application helper private data */
union nf_conntrack_help {
@@ -52,6 +53,7 @@ union nf_conntrack_help {
struct nf_ct_ftp_master ct_ftp_info;
struct nf_ct_pptp_master ct_pptp_info;
struct nf_ct_h323_master ct_h323_info;
+ struct nf_ct_sane_master ct_sane_info;
};
#include <linux/types.h>
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 61c62068ca6..bc57dd7b9b5 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -16,6 +16,7 @@ enum nf_nat_manip_type
#define IP_NAT_RANGE_MAP_IPS 1
#define IP_NAT_RANGE_PROTO_SPECIFIED 2
+#define IP_NAT_RANGE_PROTO_RANDOM 4
/* NAT sequence number modifications */
struct nf_nat_seq {
diff --git a/include/net/route.h b/include/net/route.h
index 486e37aff06..1440bdb5a27 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -146,7 +146,8 @@ static inline char rt_tos2priority(u8 tos)
static inline int ip_route_connect(struct rtable **rp, __be32 dst,
__be32 src, u32 tos, int oif, u8 protocol,
- __be16 sport, __be16 dport, struct sock *sk)
+ __be16 sport, __be16 dport, struct sock *sk,
+ int flags)
{
struct flowi fl = { .oif = oif,
.nl_u = { .ip4_u = { .daddr = dst,
@@ -168,7 +169,7 @@ static inline int ip_route_connect(struct rtable **rp, __be32 dst,
*rp = NULL;
}
security_sk_classify_flow(sk, &fl);
- return ip_route_output_flow(rp, &fl, sk, 0);
+ return ip_route_output_flow(rp, &fl, sk, flags);
}
static inline int ip_route_newports(struct rtable **rp, u8 protocol,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cd8fa0c858a..5c472f255b7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -802,9 +802,8 @@ static inline void tcp_update_wl(struct tcp_sock *tp, u32 ack, u32 seq)
/*
* Calculate(/check) TCP checksum
*/
-static inline __sum16 tcp_v4_check(struct tcphdr *th, int len,
- __be32 saddr, __be32 daddr,
- __wsum base)
+static inline __sum16 tcp_v4_check(int len, __be32 saddr,
+ __be32 daddr, __wsum base)
{
return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
}
diff --git a/include/net/x25.h b/include/net/x25.h
index e47fe440d9d..fc3f03d976f 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -161,6 +161,14 @@ struct x25_sock {
unsigned long vc_facil_mask; /* inc_call facilities mask */
};
+struct x25_forward {
+ struct list_head node;
+ unsigned int lci;
+ struct net_device *dev1;
+ struct net_device *dev2;
+ atomic_t refcnt;
+};
+
static inline struct x25_sock *x25_sk(const struct sock *sk)
{
return (struct x25_sock *)sk;
@@ -172,6 +180,7 @@ extern int sysctl_x25_call_request_timeout;
extern int sysctl_x25_reset_request_timeout;
extern int sysctl_x25_clear_request_timeout;
extern int sysctl_x25_ack_holdback_timeout;
+extern int sysctl_x25_forward;
extern int x25_addr_ntoa(unsigned char *, struct x25_address *,
struct x25_address *);
@@ -198,6 +207,13 @@ extern int x25_negotiate_facilities(struct sk_buff *, struct sock *,
struct x25_dte_facilities *);
extern void x25_limit_facilities(struct x25_facilities *, struct x25_neigh *);
+/* x25_forward.c */
+extern void x25_clear_forward_by_lci(unsigned int lci);
+extern void x25_clear_forward_by_dev(struct net_device *);
+extern int x25_forward_data(int, struct x25_neigh *, struct sk_buff *);
+extern int x25_forward_call(struct x25_address *, struct x25_neigh *,
+ struct sk_buff *, int);
+
/* x25_in.c */
extern int x25_process_rx_frame(struct sock *, struct sk_buff *);
extern int x25_backlog_rcv(struct sock *, struct sk_buff *);
@@ -282,6 +298,8 @@ extern struct hlist_head x25_list;
extern rwlock_t x25_list_lock;
extern struct list_head x25_route_list;
extern rwlock_t x25_route_list_lock;
+extern struct list_head x25_forward_list;
+extern rwlock_t x25_forward_list_lock;
extern int x25_proc_init(void);
extern void x25_proc_exit(void);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e4765413cf8..16924cb772c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -252,10 +252,13 @@ struct xfrm_state_afinfo {
xfrm_address_t *daddr, xfrm_address_t *saddr);
int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
+ int (*output)(struct sk_buff *skb);
};
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
+extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
@@ -359,6 +362,19 @@ struct xfrm_policy
struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
};
+struct xfrm_migrate {
+ xfrm_address_t old_daddr;
+ xfrm_address_t old_saddr;
+ xfrm_address_t new_daddr;
+ xfrm_address_t new_saddr;
+ u8 proto;
+ u8 mode;
+ u16 reserved;
+ u32 reqid;
+ u16 old_family;
+ u16 new_family;
+};
+
#define XFRM_KM_TIMEOUT 30
/* which seqno */
#define XFRM_REPLAY_SEQ 1
@@ -385,6 +401,7 @@ struct xfrm_mgr
int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
int (*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
+ int (*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles);
};
extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -985,6 +1002,16 @@ extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
struct flowi *fl, int family, int strict);
extern void xfrm_init_pmtu(struct dst_entry *dst);
+#ifdef CONFIG_XFRM_MIGRATE
+extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles);
+extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
+extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
+ struct xfrm_migrate *m);
+extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles);
+#endif
+
extern wait_queue_head_t km_waitq;
extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
@@ -1050,5 +1077,25 @@ static inline void xfrm_aevent_doreplay(struct xfrm_state *x)
xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
}
+#ifdef CONFIG_XFRM_MIGRATE
+static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
+{
+ return (struct xfrm_algo *)kmemdup(orig, sizeof(*orig) + orig->alg_key_len, GFP_KERNEL);
+}
+
+static inline void xfrm_states_put(struct xfrm_state **states, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ xfrm_state_put(*(states + i));
+}
+
+static inline void xfrm_states_delete(struct xfrm_state **states, int n)
+{
+ int i;
+ for (i = 0; i < n; i++)
+ xfrm_state_delete(*(states + i));
+}
+#endif
#endif /* _NET_XFRM_H */
diff --git a/mm/filemap.c b/mm/filemap.c
index 8332c77b1bd..f30ef28405d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -606,26 +606,6 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset)
EXPORT_SYMBOL(find_get_page);
/**
- * find_trylock_page - find and lock a page
- * @mapping: the address_space to search
- * @offset: the page index
- *
- * Same as find_get_page(), but trylock it instead of incrementing the count.
- */
-struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
-{
- struct page *page;
-
- read_lock_irq(&mapping->tree_lock);
- page = radix_tree_lookup(&mapping->page_tree, offset);
- if (page && TestSetPageLocked(page))
- page = NULL;
- read_unlock_irq(&mapping->tree_lock);
- return page;
-}
-EXPORT_SYMBOL(find_trylock_page);
-
-/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @offset: the page index
diff --git a/net/Kconfig b/net/Kconfig
index 7dfc9492069..915657832d9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -37,6 +37,7 @@ config NETDEBUG
source "net/packet/Kconfig"
source "net/unix/Kconfig"
source "net/xfrm/Kconfig"
+source "net/iucv/Kconfig"
config INET
bool "TCP/IP networking"
diff --git a/net/Makefile b/net/Makefile
index ad4d14f4bb2..4854ac50631 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_IP_SCTP) += sctp/
obj-$(CONFIG_IEEE80211) += ieee80211/
obj-$(CONFIG_TIPC) += tipc/
obj-$(CONFIG_NETLABEL) += netlabel/
+obj-$(CONFIG_IUCV) += iucv/
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/atm/common.c b/net/atm/common.c
index fbabff49446..a2878e92c3a 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -816,7 +816,8 @@ static void __exit atm_exit(void)
proto_unregister(&vcc_proto);
}
-module_init(atm_init);
+subsys_initcall(atm_init);
+
module_exit(atm_exit);
MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index ea3337ad0ed..a25fa8cb528 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -949,44 +949,29 @@ static ctl_table brnf_net_table[] = {
};
#endif
-int br_netfilter_init(void)
+int __init br_netfilter_init(void)
{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) {
- int ret;
-
- if ((ret = nf_register_hook(&br_nf_ops[i])) >= 0)
- continue;
-
- while (i--)
- nf_unregister_hook(&br_nf_ops[i]);
+ int ret;
+ ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
+ if (ret < 0)
return ret;
- }
-
#ifdef CONFIG_SYSCTL
brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0);
if (brnf_sysctl_header == NULL) {
printk(KERN_WARNING
"br_netfilter: can't register to sysctl.\n");
- for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++)
- nf_unregister_hook(&br_nf_ops[i]);
- return -EFAULT;
+ nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
+ return -ENOMEM;
}
#endif
-
printk(KERN_NOTICE "Bridge firewalling registered\n");
-
return 0;
}
void br_netfilter_fini(void)
{
- int i;
-
- for (i = ARRAY_SIZE(br_nf_ops) - 1; i >= 0; i--)
- nf_unregister_hook(&br_nf_ops[i]);
+ nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(brnf_sysctl_header);
#endif
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a9139682c49..7d68b24b565 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -45,7 +45,7 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifi_family = AF_BRIDGE;
@@ -72,7 +72,8 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
/*
@@ -89,9 +90,12 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
goto errout;
err = br_fill_ifinfo(skb, port, 0, 0, event, 0);
- /* failure implies BUG in br_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in br_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
errout:
if (err < 0)
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index e4c642448e1..6afa4d017d4 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -93,6 +93,7 @@ static int ebt_ip_check(const char *tablename, unsigned int hookmask,
return -EINVAL;
if (info->protocol != IPPROTO_TCP &&
info->protocol != IPPROTO_UDP &&
+ info->protocol != IPPROTO_UDPLITE &&
info->protocol != IPPROTO_SCTP &&
info->protocol != IPPROTO_DCCP)
return -EINVAL;
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index a184f879f25..985df82e427 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -96,6 +96,7 @@ ebt_log_packet(unsigned int pf, unsigned int hooknum,
NIPQUAD(ih->daddr), ih->tos, ih->protocol);
if (ih->protocol == IPPROTO_TCP ||
ih->protocol == IPPROTO_UDP ||
+ ih->protocol == IPPROTO_UDPLITE ||
ih->protocol == IPPROTO_SCTP ||
ih->protocol == IPPROTO_DCCP) {
struct tcpudphdr _ports, *pptr;
diff --git a/net/core/dev.c b/net/core/dev.c
index 455d589683e..1e94a1b9a0f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3247,7 +3247,7 @@ void synchronize_net(void)
* unregister_netdev() instead of this.
*/
-int unregister_netdevice(struct net_device *dev)
+void unregister_netdevice(struct net_device *dev)
{
struct net_device *d, **dp;
@@ -3258,7 +3258,9 @@ int unregister_netdevice(struct net_device *dev)
if (dev->reg_state == NETREG_UNINITIALIZED) {
printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
"was registered\n", dev->name, dev);
- return -ENODEV;
+
+ WARN_ON(1);
+ return;
}
BUG_ON(dev->reg_state != NETREG_REGISTERED);
@@ -3280,11 +3282,7 @@ int unregister_netdevice(struct net_device *dev)
break;
}
}
- if (!d) {
- printk(KERN_ERR "unregister net_device: '%s' not found\n",
- dev->name);
- return -ENODEV;
- }
+ BUG_ON(!d);
dev->reg_state = NETREG_UNREGISTERING;
@@ -3316,7 +3314,6 @@ int unregister_netdevice(struct net_device *dev)
synchronize_net();
dev_put(dev);
- return 0;
}
/**
diff --git a/net/core/dst.c b/net/core/dst.c
index 836ec660692..1a53fb39b7e 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -99,7 +99,14 @@ static void dst_run_gc(unsigned long dummy)
printk("dst_total: %d/%d %ld\n",
atomic_read(&dst_total), delayed, dst_gc_timer_expires);
#endif
- mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
+ /* if the next desired timer is more than 4 seconds in the future
+ * then round the timer to whole seconds
+ */
+ if (dst_gc_timer_expires > 4*HZ)
+ mod_timer(&dst_gc_timer,
+ round_jiffies(jiffies + dst_gc_timer_expires));
+ else
+ mod_timer(&dst_gc_timer, jiffies + dst_gc_timer_expires);
out:
spin_unlock(&dst_lock);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1df6cd4568d..215f1bff048 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -331,7 +331,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*frh), flags);
if (nlh == NULL)
- return -1;
+ return -EMSGSIZE;
frh = nlmsg_data(nlh);
frh->table = rule->table;
@@ -359,7 +359,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
int fib_rules_dump(struct sk_buff *skb, struct netlink_callback *cb, int family)
@@ -405,9 +406,12 @@ static void notify_rule_change(int event, struct fib_rule *rule,
goto errout;
err = fib_nl_fill_rule(skb, rule, pid, nlh->nlmsg_seq, event, 0, ops);
- /* failure implies BUG in fib_rule_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in fib_rule_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, pid, ops->nlgroup, nlh, GFP_KERNEL);
errout:
if (err < 0)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e7300b6b407..054d46493d2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -696,7 +696,10 @@ next_elt:
if (!expire)
expire = 1;
- mod_timer(&tbl->gc_timer, now + expire);
+ if (expire>HZ)
+ mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
+ else
+ mod_timer(&tbl->gc_timer, now + expire);
write_unlock(&tbl->lock);
}
@@ -1637,7 +1640,7 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
ndtmsg = nlmsg_data(nlh);
@@ -1706,7 +1709,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
nla_put_failure:
read_unlock_bh(&tbl->lock);
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static int neightbl_fill_param_info(struct sk_buff *skb,
@@ -1720,7 +1724,7 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
ndtmsg = nlmsg_data(nlh);
@@ -1737,7 +1741,8 @@ static int neightbl_fill_param_info(struct sk_buff *skb,
return nlmsg_end(skb, nlh);
errout:
read_unlock_bh(&tbl->lock);
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
@@ -1955,7 +1960,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = neigh->ops->family;
@@ -1987,7 +1992,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
@@ -2429,9 +2435,12 @@ static void __neigh_notify(struct neighbour *n, int type, int flags)
goto errout;
err = neigh_fill_info(skb, n, 0, 0, type, flags);
- /* failure implies BUG in neigh_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
errout:
if (err < 0)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index e76539a5eb5..9bf9ae05f15 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -320,7 +320,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifi_family = AF_UNSPEC;
@@ -384,7 +384,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -633,9 +634,12 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
err = rtnl_fill_ifinfo(nskb, dev, iw, iw_buf_len, RTM_NEWLINK,
NETLINK_CB(skb).pid, nlh->nlmsg_seq, 0, 0);
- /* failure impilies BUG in if_nlmsg_size or wireless_rtnetlink_get */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(nskb);
+ goto errout;
+ }
err = rtnl_unicast(nskb, NETLINK_CB(skb).pid);
errout:
kfree(iw_buf);
@@ -678,9 +682,12 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
goto errout;
err = rtnl_fill_ifinfo(skb, dev, NULL, 0, type, 0, 0, change, 0);
- /* failure implies BUG in if_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
errout:
if (err < 0)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 40402c59506..5c452a3ec4d 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -479,7 +479,8 @@ static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
ccid3_pr_debug("%s(%p), s=%u, w_init=%llu, "
"R_sample=%dus, X=%u\n", dccp_role(sk),
- sk, hctx->ccid3hctx_s, w_init,
+ sk, hctx->ccid3hctx_s,
+ (unsigned long long)w_init,
(int)r_sample,
(unsigned)(hctx->ccid3hctx_x >> 6));
@@ -1005,7 +1006,7 @@ static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
DCCP_BUG_ON(r_sample < 0);
if (unlikely(r_sample <= t_elapsed))
DCCP_WARN("r_sample=%ldus, t_elapsed=%ldus\n",
- r_sample, t_elapsed);
+ (long)r_sample, (long)t_elapsed);
else
r_sample -= t_elapsed;
CCID3_RTT_SANITY_CHECK(r_sample);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 90c74b4adb7..fa2c982d430 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -72,7 +72,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tmp = ip_route_connect(&rt, nexthop, inet->saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_DCCP,
- inet->sport, usin->sin_port, sk);
+ inet->sport, usin->sin_port, sk, 1);
if (tmp < 0)
return tmp;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6b91a9dd041..79140b3e592 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1041,7 +1041,7 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
- err = xfrm_lookup(&dst, &fl, sk, 0);
+ err = xfrm_lookup(&dst, &fl, sk, 1);
if (err < 0)
goto failure;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 63b3fa20e14..48438565d70 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -1024,7 +1024,6 @@ static int __init dccp_init(void)
do {
dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
sizeof(struct inet_ehash_bucket);
- dccp_hashinfo.ehash_size >>= 1;
while (dccp_hashinfo.ehash_size &
(dccp_hashinfo.ehash_size - 1))
dccp_hashinfo.ehash_size--;
@@ -1037,9 +1036,10 @@ static int __init dccp_init(void)
goto out_free_bind_bucket_cachep;
}
- for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
+ for (i = 0; i < dccp_hashinfo.ehash_size; i++) {
rwlock_init(&dccp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
+ INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].twchain);
}
bhash_order = ehash_order;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index ed083ab455b..90b3dfd72b4 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -749,7 +749,7 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_DECnet;
@@ -768,7 +768,8 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
@@ -781,9 +782,12 @@ static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
goto errout;
err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
- /* failure implies BUG in dn_ifaddr_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in dn_ifaddr_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
errout:
if (err < 0)
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 13b2421991b..c1f0cc1b1c6 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -350,7 +350,7 @@ static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
nlmsg_failure:
rtattr_failure:
skb_trim(skb, b - skb->data);
- return -1;
+ return -EMSGSIZE;
}
@@ -368,9 +368,12 @@ static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
err = dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id,
f->fn_type, f->fn_scope, &f->fn_key, z,
DN_FIB_INFO(f), 0);
- /* failure implies BUG in dn_fib_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, pid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
errout:
if (err < 0)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 86400964367..5750a2b2a0d 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1007,7 +1007,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)
RT_CONN_FLAGS(sk),
sk->sk_bound_dev_if,
sk->sk_protocol,
- inet->sport, inet->dport, sk);
+ inet->sport, inet->dport, sk, 0);
if (err)
return err;
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 7b068a89195..0072d79f0c2 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -49,7 +49,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
err = ip_route_connect(&rt, usin->sin_addr.s_addr, saddr,
RT_CONN_FLAGS(sk), oif,
sk->sk_protocol,
- inet->sport, usin->sin_port, sk);
+ inet->sport, usin->sin_port, sk, 1);
if (err)
return err;
if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 480ace9819f..c4020364096 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1140,7 +1140,7 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
@@ -1167,7 +1167,8 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
@@ -1225,9 +1226,12 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa, struct nlmsghdr *nlh,
goto errout;
err = inet_fill_ifaddr(skb, ifa, pid, seq, event, 0);
- /* failure implies BUG in inet_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in inet_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, pid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
errout:
if (err < 0)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index e63b8a98fb4..be1028c9933 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -314,9 +314,12 @@ void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
err = fib_dump_info(skb, info->pid, seq, event, tb_id,
fa->fa_type, fa->fa_scope, key, dst_len,
fa->fa_tos, fa->fa_info, 0);
- /* failure implies BUG in fib_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, info->pid, RTNLGRP_IPV4_ROUTE,
info->nlh, GFP_KERNEL);
errout:
@@ -960,7 +963,7 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*rtm), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET;
@@ -1031,7 +1034,8 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
/*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 0017ccb01d6..024ae56cab2 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -455,6 +455,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
skb = add_grhead(skb, pmc, type, &pgr);
first = 0;
}
+ if (!skb)
+ return NULL;
psrc = (__be32 *)skb_put(skb, sizeof(__be32));
*psrc = psf->sf_inaddr;
scount++; stotal++;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 77761ac4f7b..8aa7d51e688 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -153,7 +153,7 @@ static int inet_csk_diag_fill(struct sock *sk,
rtattr_failure:
nlmsg_failure:
skb_trim(skb, b - skb->data);
- return -1;
+ return -EMSGSIZE;
}
static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
@@ -209,7 +209,7 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
return skb->len;
nlmsg_failure:
skb_trim(skb, previous_tail - skb->data);
- return -1;
+ return -EMSGSIZE;
}
static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
@@ -274,11 +274,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
if (!rep)
goto out;
- if (sk_diag_fill(sk, rep, req->idiag_ext,
- NETLINK_CB(in_skb).pid,
- nlh->nlmsg_seq, 0, nlh) <= 0)
- BUG();
-
+ err = sk_diag_fill(sk, rep, req->idiag_ext,
+ NETLINK_CB(in_skb).pid,
+ nlh->nlmsg_seq, 0, nlh);
+ if (err < 0) {
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(rep);
+ goto out;
+ }
err = netlink_unicast(idiagnl, rep, NETLINK_CB(in_skb).pid,
MSG_DONTWAIT);
if (err > 0)
@@ -775,7 +778,7 @@ next_normal:
struct inet_timewait_sock *tw;
inet_twsk_for_each(tw, node,
- &hashinfo->ehash[i + hashinfo->ehash_size].chain) {
+ &head->twchain) {
if (num < s_num)
goto next_dying;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 8c79c8a4ea5..150ace18dc7 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -212,7 +212,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */
- sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) {
+ sk_for_each(sk2, node, &head->twchain) {
tw = inet_twsk(sk2);
if (INET_TW_MATCH(sk2, hash, acookie, saddr, daddr, ports, dif)) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 9f414e35c48..a73cf93cee3 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -78,8 +78,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
if (__sk_del_node_init(sk))
sock_prot_dec_use(sk->sk_prot);
- /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
- inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain);
+ /* Step 3: Hash TW into TIMEWAIT chain. */
+ inet_twsk_add_node(tw, &ehead->twchain);
atomic_inc(&tw->tw_refcnt);
write_unlock(&ehead->lock);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 476cb6084c7..51c83500790 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1008,7 +1008,8 @@ ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
dev = t->dev;
}
- err = unregister_netdevice(dev);
+ unregister_netdevice(dev);
+ err = 0;
break;
default:
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 9d719d664e5..da8bbd20c7e 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -754,7 +754,8 @@ ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
dev = t->dev;
}
- err = unregister_netdevice(dev);
+ unregister_netdevice(dev);
+ err = 0;
break;
default:
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 47bd3ad18b7..9b08e7ad71b 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -361,32 +361,6 @@ config IP_NF_TARGET_ULOG
To compile it as a module, choose M here. If unsure, say N.
-config IP_NF_TARGET_TCPMSS
- tristate "TCPMSS target support"
- depends on IP_NF_IPTABLES
- ---help---
- This option adds a `TCPMSS' target, which allows you to alter the
- MSS value of TCP SYN packets, to control the maximum size for that
- connection (usually limiting it to your outgoing interface's MTU
- minus 40).
-
- This is used to overcome criminally braindead ISPs or servers which
- block ICMP Fragmentation Needed packets. The symptoms of this
- problem are that everything works fine from your Linux
- firewall/router, but machines behind it can never exchange large
- packets:
- 1) Web browsers connect, then hang with no data received.
- 2) Small mail works fine, but large emails hang.
- 3) ssh works fine, but scp hangs after initial handshaking.
-
- Workaround: activate this option and add a rule to your firewall
- configuration like:
-
- iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
- -j TCPMSS --clamp-mss-to-pmtu
-
- To compile it as a module, choose M here. If unsure, say N.
-
# NAT + specific targets: ip_conntrack
config IP_NF_NAT
tristate "Full NAT"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 16d177b71bf..6625ec68180 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -103,7 +103,6 @@ obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
-obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index 06e4e8a6dd9..c34f48fe547 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -50,12 +50,9 @@ static DEFINE_RWLOCK(tcp_lock);
If it's non-zero, we mark only out of window RST segments as INVALID. */
int ip_ct_tcp_be_liberal __read_mostly = 0;
-/* When connection is picked up from the middle, how many packets are required
- to pass in each direction when we assume we are in sync - if any side uses
- window scaling, we lost the game.
- If it is set to zero, we disable picking up already established
+/* If it is set to zero, we disable picking up already established
connections. */
-int ip_ct_tcp_loose __read_mostly = 3;
+int ip_ct_tcp_loose __read_mostly = 1;
/* Max number of the retransmitted packets without receiving an (acceptable)
ACK from the destination. If this number is reached, a shorter timer
@@ -694,11 +691,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
before(sack, receiver->td_end + 1),
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
- if (sender->loose || receiver->loose ||
- (before(seq, sender->td_maxend + 1) &&
- after(end, sender->td_end - receiver->td_maxwin - 1) &&
- before(sack, receiver->td_end + 1) &&
- after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
+ if (before(seq, sender->td_maxend + 1) &&
+ after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ before(sack, receiver->td_end + 1) &&
+ after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
/*
* Take into account window scaling (RFC 1323).
*/
@@ -743,15 +739,13 @@ static int tcp_in_window(struct ip_ct_tcp *state,
state->retrans = 0;
}
}
- /*
- * Close the window of disabled window tracking :-)
- */
- if (sender->loose)
- sender->loose--;
-
res = 1;
} else {
- if (LOG_INVALID(IPPROTO_TCP))
+ res = 0;
+ if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
+ ip_ct_tcp_be_liberal)
+ res = 1;
+ if (!res && LOG_INVALID(IPPROTO_TCP))
nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
"ip_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
@@ -762,8 +756,6 @@ static int tcp_in_window(struct ip_ct_tcp *state,
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
-
- res = ip_ct_tcp_be_liberal;
}
DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
@@ -1105,8 +1097,6 @@ static int tcp_new(struct ip_conntrack *conntrack,
tcp_options(skb, iph, th, &conntrack->proto.tcp.seen[0]);
conntrack->proto.tcp.seen[1].flags = 0;
- conntrack->proto.tcp.seen[0].loose =
- conntrack->proto.tcp.seen[1].loose = 0;
} else if (ip_ct_tcp_loose == 0) {
/* Don't try to pick up connections. */
return 0;
@@ -1127,11 +1117,11 @@ static int tcp_new(struct ip_conntrack *conntrack,
conntrack->proto.tcp.seen[0].td_maxwin;
conntrack->proto.tcp.seen[0].td_scale = 0;
- /* We assume SACK. Should we assume window scaling too? */
+ /* We assume SACK and liberal window checking to handle
+ * window scaling */
conntrack->proto.tcp.seen[0].flags =
- conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM;
- conntrack->proto.tcp.seen[0].loose =
- conntrack->proto.tcp.seen[1].loose = ip_ct_tcp_loose;
+ conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
+ IP_CT_TCP_FLAG_BE_LIBERAL;
}
conntrack->proto.tcp.seen[1].td_end = 0;
diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c
index 9d1a5175dcd..5e08c2bf887 100644
--- a/net/ipv4/netfilter/ip_nat_core.c
+++ b/net/ipv4/netfilter/ip_nat_core.c
@@ -246,8 +246,9 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
if (maniptype == IP_NAT_MANIP_SRC) {
if (find_appropriate_src(orig_tuple, tuple, range)) {
DEBUGP("get_unique_tuple: Found current src map\n");
- if (!ip_nat_used_tuple(tuple, conntrack))
- return;
+ if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
+ if (!ip_nat_used_tuple(tuple, conntrack))
+ return;
}
}
@@ -261,6 +262,13 @@ get_unique_tuple(struct ip_conntrack_tuple *tuple,
proto = ip_nat_proto_find_get(orig_tuple->dst.protonum);
+ /* Change protocol info to have some randomization */
+ if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
+ proto->unique_tuple(tuple, range, maniptype, conntrack);
+ ip_nat_proto_put(proto);
+ return;
+ }
+
/* Only bother mapping if it's not already in range and unique */
if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED)
|| proto->in_range(tuple, maniptype, &range->min, &range->max))
diff --git a/net/ipv4/netfilter/ip_nat_helper.c b/net/ipv4/netfilter/ip_nat_helper.c
index ee80feb4b2a..2e5c4bc52a6 100644
--- a/net/ipv4/netfilter/ip_nat_helper.c
+++ b/net/ipv4/netfilter/ip_nat_helper.c
@@ -183,7 +183,7 @@ ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
datalen = (*pskb)->len - iph->ihl*4;
if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
tcph->check = 0;
- tcph->check = tcp_v4_check(tcph, datalen,
+ tcph->check = tcp_v4_check(datalen,
iph->saddr, iph->daddr,
csum_partial((char *)tcph,
datalen, 0));
diff --git a/net/ipv4/netfilter/ip_nat_proto_tcp.c b/net/ipv4/netfilter/ip_nat_proto_tcp.c
index b586d18b3fb..14ff24f53a7 100644
--- a/net/ipv4/netfilter/ip_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_tcp.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <linux/netfilter.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -75,6 +76,10 @@ tcp_unique_tuple(struct ip_conntrack_tuple *tuple,
range_size = ntohs(range->max.tcp.port) - min + 1;
}
+ /* Start from random port to avoid prediction */
+ if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+ port = net_random();
+
for (i = 0; i < range_size; i++, port++) {
*portptr = htons(min + port % range_size);
if (!ip_nat_used_tuple(tuple, conntrack)) {
diff --git a/net/ipv4/netfilter/ip_nat_proto_udp.c b/net/ipv4/netfilter/ip_nat_proto_udp.c
index 5ced0877b32..dfd52167289 100644
--- a/net/ipv4/netfilter/ip_nat_proto_udp.c
+++ b/net/ipv4/netfilter/ip_nat_proto_udp.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <linux/netfilter.h>
#include <linux/ip.h>
#include <linux/udp.h>
@@ -74,6 +75,10 @@ udp_unique_tuple(struct ip_conntrack_tuple *tuple,
range_size = ntohs(range->max.udp.port) - min + 1;
}
+ /* Start from random port to avoid prediction */
+ if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+ port = net_random();
+
for (i = 0; i < range_size; i++, port++) {
*portptr = htons(min + port % range_size);
if (!ip_nat_used_tuple(tuple, conntrack))
diff --git a/net/ipv4/netfilter/ip_nat_rule.c b/net/ipv4/netfilter/ip_nat_rule.c
index a176aa3031e..e1c8a05f3dc 100644
--- a/net/ipv4/netfilter/ip_nat_rule.c
+++ b/net/ipv4/netfilter/ip_nat_rule.c
@@ -86,7 +86,7 @@ static struct
}
};
-static struct ipt_table nat_table = {
+static struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
@@ -99,7 +99,7 @@ static unsigned int ipt_snat_target(struct sk_buff **pskb,
const struct net_device *in,
const struct net_device *out,
unsigned int hooknum,
- const struct ipt_target *target,
+ const struct xt_target *target,
const void *targinfo)
{
struct ip_conntrack *ct;
@@ -141,7 +141,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
const struct net_device *in,
const struct net_device *out,
unsigned int hooknum,
- const struct ipt_target *target,
+ const struct xt_target *target,
const void *targinfo)
{
struct ip_conntrack *ct;
@@ -166,7 +166,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
static int ipt_snat_checkentry(const char *tablename,
const void *entry,
- const struct ipt_target *target,
+ const struct xt_target *target,
void *targinfo,
unsigned int hook_mask)
{
@@ -182,7 +182,7 @@ static int ipt_snat_checkentry(const char *tablename,
static int ipt_dnat_checkentry(const char *tablename,
const void *entry,
- const struct ipt_target *target,
+ const struct xt_target *target,
void *targinfo,
unsigned int hook_mask)
{
@@ -193,6 +193,10 @@ static int ipt_dnat_checkentry(const char *tablename,
printk("DNAT: multiple ranges no longer supported\n");
return 0;
}
+ if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
+ printk("DNAT: port randomization not supported\n");
+ return 0;
+ }
return 1;
}
@@ -257,8 +261,9 @@ int ip_nat_rule_find(struct sk_buff **pskb,
return ret;
}
-static struct ipt_target ipt_snat_reg = {
+static struct xt_target ipt_snat_reg = {
.name = "SNAT",
+ .family = AF_INET,
.target = ipt_snat_target,
.targetsize = sizeof(struct ip_nat_multi_range_compat),
.table = "nat",
@@ -266,8 +271,9 @@ static struct ipt_target ipt_snat_reg = {
.checkentry = ipt_snat_checkentry,
};
-static struct ipt_target ipt_dnat_reg = {
+static struct xt_target ipt_dnat_reg = {
.name = "DNAT",
+ .family = AF_INET,
.target = ipt_dnat_target,
.targetsize = sizeof(struct ip_nat_multi_range_compat),
.table = "nat",
@@ -282,27 +288,27 @@ int __init ip_nat_rule_init(void)
ret = ipt_register_table(&nat_table, &nat_initial_table.repl);
if (ret != 0)
return ret;
- ret = ipt_register_target(&ipt_snat_reg);
+ ret = xt_register_target(&ipt_snat_reg);
if (ret != 0)
goto unregister_table;
- ret = ipt_register_target(&ipt_dnat_reg);
+ ret = xt_register_target(&ipt_dnat_reg);
if (ret != 0)
goto unregister_snat;
return ret;
unregister_snat:
- ipt_unregister_target(&ipt_snat_reg);
+ xt_unregister_target(&ipt_snat_reg);
unregister_table:
- ipt_unregister_table(&nat_table);
+ xt_unregister_table(&nat_table);
return ret;
}
void ip_nat_rule_cleanup(void)
{
- ipt_unregister_target(&ipt_dnat_reg);
- ipt_unregister_target(&ipt_snat_reg);
+ xt_unregister_target(&ipt_dnat_reg);
+ xt_unregister_target(&ipt_snat_reg);
ipt_unregister_table(&nat_table);
}
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index fc1f153c86b..5a7b3a34138 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -216,7 +216,7 @@ ipt_do_table(struct sk_buff **pskb,
unsigned int hook,
const struct net_device *in,
const struct net_device *out,
- struct ipt_table *table)
+ struct xt_table *table)
{
static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
u_int16_t offset;
@@ -507,7 +507,7 @@ check_entry(struct ipt_entry *e, const char *name)
static inline int check_match(struct ipt_entry_match *m, const char *name,
const struct ipt_ip *ip, unsigned int hookmask)
{
- struct ipt_match *match;
+ struct xt_match *match;
int ret;
match = m->u.kernel.match;
@@ -531,7 +531,7 @@ find_check_match(struct ipt_entry_match *m,
unsigned int hookmask,
unsigned int *i)
{
- struct ipt_match *match;
+ struct xt_match *match;
int ret;
match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
@@ -557,7 +557,7 @@ err:
static inline int check_target(struct ipt_entry *e, const char *name)
{
struct ipt_entry_target *t;
- struct ipt_target *target;
+ struct xt_target *target;
int ret;
t = ipt_get_target(e);
@@ -580,7 +580,7 @@ find_check_entry(struct ipt_entry *e, const char *name, unsigned int size,
unsigned int *i)
{
struct ipt_entry_target *t;
- struct ipt_target *target;
+ struct xt_target *target;
int ret;
unsigned int j;
@@ -818,7 +818,7 @@ get_counters(const struct xt_table_info *t,
}
}
-static inline struct xt_counters * alloc_counters(struct ipt_table *table)
+static inline struct xt_counters * alloc_counters(struct xt_table *table)
{
unsigned int countersize;
struct xt_counters *counters;
@@ -843,7 +843,7 @@ static inline struct xt_counters * alloc_counters(struct ipt_table *table)
static int
copy_entries_to_user(unsigned int total_size,
- struct ipt_table *table,
+ struct xt_table *table,
void __user *userptr)
{
unsigned int off, num;
@@ -1046,7 +1046,7 @@ static int compat_table_info(struct xt_table_info *info,
static int get_info(void __user *user, int *len, int compat)
{
char name[IPT_TABLE_MAXNAMELEN];
- struct ipt_table *t;
+ struct xt_table *t;
int ret;
if (*len != sizeof(struct ipt_getinfo)) {
@@ -1107,7 +1107,7 @@ get_entries(struct ipt_get_entries __user *uptr, int *len)
{
int ret;
struct ipt_get_entries get;
- struct ipt_table *t;
+ struct xt_table *t;
if (*len < sizeof(get)) {
duprintf("get_entries: %u < %d\n", *len,
@@ -1151,7 +1151,7 @@ __do_replace(const char *name, unsigned int valid_hooks,
void __user *counters_ptr)
{
int ret;
- struct ipt_table *t;
+ struct xt_table *t;
struct xt_table_info *oldinfo;
struct xt_counters *counters;
void *loc_cpu_old_entry;
@@ -1302,7 +1302,7 @@ do_add_counters(void __user *user, unsigned int len, int compat)
char *name;
int size;
void *ptmp;
- struct ipt_table *t;
+ struct xt_table *t;
struct xt_table_info *private;
int ret = 0;
void *loc_cpu_entry;
@@ -1437,7 +1437,7 @@ compat_check_calc_match(struct ipt_entry_match *m,
unsigned int hookmask,
int *size, int *i)
{
- struct ipt_match *match;
+ struct xt_match *match;
match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
m->u.user.revision),
@@ -1466,7 +1466,7 @@ check_compat_entry_size_and_hooks(struct ipt_entry *e,
const char *name)
{
struct ipt_entry_target *t;
- struct ipt_target *target;
+ struct xt_target *target;
unsigned int entry_offset;
int ret, off, h, j;
@@ -1550,7 +1550,7 @@ static int compat_copy_entry_from_user(struct ipt_entry *e, void **dstptr,
struct xt_table_info *newinfo, unsigned char *base)
{
struct ipt_entry_target *t;
- struct ipt_target *target;
+ struct xt_target *target;
struct ipt_entry *de;
unsigned int origsize;
int ret, h;
@@ -1795,7 +1795,7 @@ struct compat_ipt_get_entries
};
static int compat_copy_entries_to_user(unsigned int total_size,
- struct ipt_table *table, void __user *userptr)
+ struct xt_table *table, void __user *userptr)
{
unsigned int off, num;
struct compat_ipt_entry e;
@@ -1869,7 +1869,7 @@ compat_get_entries(struct compat_ipt_get_entries __user *uptr, int *len)
{
int ret;
struct compat_ipt_get_entries get;
- struct ipt_table *t;
+ struct xt_table *t;
if (*len < sizeof(get)) {
@@ -2052,7 +2052,7 @@ int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
return 0;
}
-void ipt_unregister_table(struct ipt_table *table)
+void ipt_unregister_table(struct xt_table *table)
{
struct xt_table_info *private;
void *loc_cpu_entry;
@@ -2124,7 +2124,7 @@ icmp_checkentry(const char *tablename,
}
/* The built-in targets: standard (NULL) and error. */
-static struct ipt_target ipt_standard_target = {
+static struct xt_target ipt_standard_target = {
.name = IPT_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = AF_INET,
@@ -2135,7 +2135,7 @@ static struct ipt_target ipt_standard_target = {
#endif
};
-static struct ipt_target ipt_error_target = {
+static struct xt_target ipt_error_target = {
.name = IPT_ERROR_TARGET,
.target = ipt_error,
.targetsize = IPT_FUNCTION_MAXNAMELEN,
@@ -2158,7 +2158,7 @@ static struct nf_sockopt_ops ipt_sockopts = {
#endif
};
-static struct ipt_match icmp_matchstruct = {
+static struct xt_match icmp_matchstruct = {
.name = "icmp",
.match = icmp_match,
.matchsize = sizeof(struct ipt_icmp),
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index b1c11160b9d..343c2abdc1a 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -26,6 +26,7 @@
#include <linux/netfilter_arp.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
#include <net/netfilter/nf_conntrack_compat.h>
@@ -247,6 +248,7 @@ clusterip_hashfn(struct sk_buff *skb, struct clusterip_config *config)
switch (iph->protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
case IPPROTO_ICMP:
@@ -329,7 +331,7 @@ target(struct sk_buff **pskb,
if ((*pskb)->nh.iph->protocol == IPPROTO_ICMP
&& (ctinfo == IP_CT_RELATED
|| ctinfo == IP_CT_RELATED+IP_CT_IS_REPLY))
- return IPT_CONTINUE;
+ return XT_CONTINUE;
/* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
* TIMESTAMP, INFO_REQUEST or ADDRESS type icmp packets from here
@@ -367,7 +369,7 @@ target(struct sk_buff **pskb,
* actually a unicast IP packet. TCP doesn't like PACKET_MULTICAST */
(*pskb)->pkt_type = PACKET_HOST;
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
@@ -470,8 +472,9 @@ static void destroy(const struct xt_target *target, void *targinfo)
nf_ct_l3proto_module_put(target->family);
}
-static struct ipt_target clusterip_tgt = {
+static struct xt_target clusterip_tgt = {
.name = "CLUSTERIP",
+ .family = AF_INET,
.target = target,
.targetsize = sizeof(struct ipt_clusterip_tgt_info),
.checkentry = checkentry,
@@ -727,7 +730,7 @@ static int __init ipt_clusterip_init(void)
{
int ret;
- ret = ipt_register_target(&clusterip_tgt);
+ ret = xt_register_target(&clusterip_tgt);
if (ret < 0)
return ret;
@@ -753,7 +756,7 @@ cleanup_hook:
nf_unregister_hook(&cip_arp_ops);
#endif /* CONFIG_PROC_FS */
cleanup_target:
- ipt_unregister_target(&clusterip_tgt);
+ xt_unregister_target(&clusterip_tgt);
return ret;
}
@@ -765,7 +768,7 @@ static void __exit ipt_clusterip_fini(void)
remove_proc_entry(clusterip_procdir->name, clusterip_procdir->parent);
#endif
nf_unregister_hook(&cip_arp_ops);
- ipt_unregister_target(&clusterip_tgt);
+ xt_unregister_target(&clusterip_tgt);
}
module_init(ipt_clusterip_init);
diff --git a/net/ipv4/netfilter/ipt_ECN.c b/net/ipv4/netfilter/ipt_ECN.c
index b55d670a24d..b5ca5938d1f 100644
--- a/net/ipv4/netfilter/ipt_ECN.c
+++ b/net/ipv4/netfilter/ipt_ECN.c
@@ -9,12 +9,14 @@
* ipt_ECN.c,v 1.5 2002/08/18 19:36:51 laforge Exp
*/
+#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/checksum.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_ECN.h>
@@ -95,7 +97,7 @@ target(struct sk_buff **pskb,
if (!set_ect_tcp(pskb, einfo))
return NF_DROP;
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
@@ -119,7 +121,7 @@ checkentry(const char *tablename,
return 0;
}
if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR))
- && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & IPT_INV_PROTO))) {
+ && (e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
printk(KERN_WARNING "ECN: cannot use TCP operations on a "
"non-tcp rule\n");
return 0;
@@ -127,8 +129,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_ecn_reg = {
+static struct xt_target ipt_ecn_reg = {
.name = "ECN",
+ .family = AF_INET,
.target = target,
.targetsize = sizeof(struct ipt_ECN_info),
.table = "mangle",
@@ -138,12 +141,12 @@ static struct ipt_target ipt_ecn_reg = {
static int __init ipt_ecn_init(void)
{
- return ipt_register_target(&ipt_ecn_reg);
+ return xt_register_target(&ipt_ecn_reg);
}
static void __exit ipt_ecn_fini(void)
{
- ipt_unregister_target(&ipt_ecn_reg);
+ xt_unregister_target(&ipt_ecn_reg);
}
module_init(ipt_ecn_init);
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c
index c96de16fefa..f68370ffb43 100644
--- a/net/ipv4/netfilter/ipt_LOG.c
+++ b/net/ipv4/netfilter/ipt_LOG.c
@@ -20,7 +20,7 @@
#include <net/route.h>
#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_LOG.h>
MODULE_LICENSE("GPL");
@@ -432,7 +432,7 @@ ipt_log_target(struct sk_buff **pskb,
ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li,
loginfo->prefix);
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int ipt_log_checkentry(const char *tablename,
@@ -455,8 +455,9 @@ static int ipt_log_checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_log_reg = {
+static struct xt_target ipt_log_reg = {
.name = "LOG",
+ .family = AF_INET,
.target = ipt_log_target,
.targetsize = sizeof(struct ipt_log_info),
.checkentry = ipt_log_checkentry,
@@ -471,8 +472,11 @@ static struct nf_logger ipt_log_logger ={
static int __init ipt_log_init(void)
{
- if (ipt_register_target(&ipt_log_reg))
- return -EINVAL;
+ int ret;
+
+ ret = xt_register_target(&ipt_log_reg);
+ if (ret < 0)
+ return ret;
if (nf_log_register(PF_INET, &ipt_log_logger) < 0) {
printk(KERN_WARNING "ipt_LOG: not logging via system console "
"since somebody else already registered for PF_INET\n");
@@ -486,7 +490,7 @@ static int __init ipt_log_init(void)
static void __exit ipt_log_fini(void)
{
nf_log_unregister_logger(&ipt_log_logger);
- ipt_unregister_target(&ipt_log_reg);
+ xt_unregister_target(&ipt_log_reg);
}
module_init(ipt_log_init);
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d669685afd0..91c42efcd53 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -25,7 +25,7 @@
#else
#include <linux/netfilter_ipv4/ip_nat_rule.h>
#endif
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -190,8 +190,9 @@ static struct notifier_block masq_inet_notifier = {
.notifier_call = masq_inet_event,
};
-static struct ipt_target masquerade = {
+static struct xt_target masquerade = {
.name = "MASQUERADE",
+ .family = AF_INET,
.target = masquerade_target,
.targetsize = sizeof(struct ip_nat_multi_range_compat),
.table = "nat",
@@ -204,7 +205,7 @@ static int __init ipt_masquerade_init(void)
{
int ret;
- ret = ipt_register_target(&masquerade);
+ ret = xt_register_target(&masquerade);
if (ret == 0) {
/* Register for device down reports */
@@ -218,7 +219,7 @@ static int __init ipt_masquerade_init(void)
static void __exit ipt_masquerade_fini(void)
{
- ipt_unregister_target(&masquerade);
+ xt_unregister_target(&masquerade);
unregister_netdevice_notifier(&masq_dev_notifier);
unregister_inetaddr_notifier(&masq_inet_notifier);
}
diff --git a/net/ipv4/netfilter/ipt_NETMAP.c b/net/ipv4/netfilter/ipt_NETMAP.c
index 9390e90f2b2..b4acc241d89 100644
--- a/net/ipv4/netfilter/ipt_NETMAP.c
+++ b/net/ipv4/netfilter/ipt_NETMAP.c
@@ -15,6 +15,7 @@
#include <linux/netdevice.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/x_tables.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_rule.h>
#else
@@ -88,8 +89,9 @@ target(struct sk_buff **pskb,
return ip_nat_setup_info(ct, &newrange, hooknum);
}
-static struct ipt_target target_module = {
+static struct xt_target target_module = {
.name = MODULENAME,
+ .family = AF_INET,
.target = target,
.targetsize = sizeof(struct ip_nat_multi_range_compat),
.table = "nat",
@@ -101,12 +103,12 @@ static struct ipt_target target_module = {
static int __init ipt_netmap_init(void)
{
- return ipt_register_target(&target_module);
+ return xt_register_target(&target_module);
}
static void __exit ipt_netmap_fini(void)
{
- ipt_unregister_target(&target_module);
+ xt_unregister_target(&target_module);
}
module_init(ipt_netmap_init);
diff --git a/net/ipv4/netfilter/ipt_REDIRECT.c b/net/ipv4/netfilter/ipt_REDIRECT.c
index 462eceb3a1b..54cd021aa5a 100644
--- a/net/ipv4/netfilter/ipt_REDIRECT.c
+++ b/net/ipv4/netfilter/ipt_REDIRECT.c
@@ -18,6 +18,7 @@
#include <net/protocol.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/x_tables.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_rule.h>
#else
@@ -104,8 +105,9 @@ redirect_target(struct sk_buff **pskb,
return ip_nat_setup_info(ct, &newrange, hooknum);
}
-static struct ipt_target redirect_reg = {
+static struct xt_target redirect_reg = {
.name = "REDIRECT",
+ .family = AF_INET,
.target = redirect_target,
.targetsize = sizeof(struct ip_nat_multi_range_compat),
.table = "nat",
@@ -116,12 +118,12 @@ static struct ipt_target redirect_reg = {
static int __init ipt_redirect_init(void)
{
- return ipt_register_target(&redirect_reg);
+ return xt_register_target(&redirect_reg);
}
static void __exit ipt_redirect_fini(void)
{
- ipt_unregister_target(&redirect_reg);
+ xt_unregister_target(&redirect_reg);
}
module_init(ipt_redirect_init);
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index f0319e5ee43..e4a1ddb386a 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -22,6 +22,7 @@
#include <net/tcp.h>
#include <net/route.h>
#include <net/dst.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_REJECT.h>
#ifdef CONFIG_BRIDGE_NETFILTER
@@ -116,7 +117,7 @@ static void send_reset(struct sk_buff *oldskb, int hook)
/* Adjust TCP checksum */
tcph->check = 0;
- tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
+ tcph->check = tcp_v4_check(sizeof(struct tcphdr),
nskb->nh.iph->saddr,
nskb->nh.iph->daddr,
csum_partial((char *)tcph,
@@ -230,7 +231,7 @@ static int check(const char *tablename,
} else if (rejinfo->with == IPT_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ip.proto != IPPROTO_TCP
- || (e->ip.invflags & IPT_INV_PROTO)) {
+ || (e->ip.invflags & XT_INV_PROTO)) {
DEBUGP("REJECT: TCP_RESET invalid for non-tcp\n");
return 0;
}
@@ -238,8 +239,9 @@ static int check(const char *tablename,
return 1;
}
-static struct ipt_target ipt_reject_reg = {
+static struct xt_target ipt_reject_reg = {
.name = "REJECT",
+ .family = AF_INET,
.target = reject,
.targetsize = sizeof(struct ipt_reject_info),
.table = "filter",
@@ -251,12 +253,12 @@ static struct ipt_target ipt_reject_reg = {
static int __init ipt_reject_init(void)
{
- return ipt_register_target(&ipt_reject_reg);
+ return xt_register_target(&ipt_reject_reg);
}
static void __exit ipt_reject_fini(void)
{
- ipt_unregister_target(&ipt_reject_reg);
+ xt_unregister_target(&ipt_reject_reg);
}
module_init(ipt_reject_init);
diff --git a/net/ipv4/netfilter/ipt_SAME.c b/net/ipv4/netfilter/ipt_SAME.c
index 3dcf2941133..a1cdd1262de 100644
--- a/net/ipv4/netfilter/ipt_SAME.c
+++ b/net/ipv4/netfilter/ipt_SAME.c
@@ -34,6 +34,7 @@
#include <net/protocol.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/x_tables.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_rule.h>
#else
@@ -186,8 +187,9 @@ same_target(struct sk_buff **pskb,
return ip_nat_setup_info(ct, &newrange, hooknum);
}
-static struct ipt_target same_reg = {
+static struct xt_target same_reg = {
.name = "SAME",
+ .family = AF_INET,
.target = same_target,
.targetsize = sizeof(struct ipt_same_info),
.table = "nat",
@@ -199,12 +201,12 @@ static struct ipt_target same_reg = {
static int __init ipt_same_init(void)
{
- return ipt_register_target(&same_reg);
+ return xt_register_target(&same_reg);
}
static void __exit ipt_same_fini(void)
{
- ipt_unregister_target(&same_reg);
+ xt_unregister_target(&same_reg);
}
module_init(ipt_same_init);
diff --git a/net/ipv4/netfilter/ipt_TCPMSS.c b/net/ipv4/netfilter/ipt_TCPMSS.c
deleted file mode 100644
index 93eb5c3c188..00000000000
--- a/net/ipv4/netfilter/ipt_TCPMSS.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * This is a module which is used for setting the MSS option in TCP packets.
- *
- * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/ip.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_TCPMSS.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("iptables TCP MSS modification module");
-
-static inline unsigned int
-optlen(const u_int8_t *opt, unsigned int offset)
-{
- /* Beware zero-length options: make finite progress */
- if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
- return 1;
- else
- return opt[offset+1];
-}
-
-static unsigned int
-ipt_tcpmss_target(struct sk_buff **pskb,
- const struct net_device *in,
- const struct net_device *out,
- unsigned int hooknum,
- const struct xt_target *target,
- const void *targinfo)
-{
- const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
- struct tcphdr *tcph;
- struct iphdr *iph;
- u_int16_t tcplen, newmss;
- __be16 newtotlen, oldval;
- unsigned int i;
- u_int8_t *opt;
-
- if (!skb_make_writable(pskb, (*pskb)->len))
- return NF_DROP;
-
- iph = (*pskb)->nh.iph;
- tcplen = (*pskb)->len - iph->ihl*4;
- tcph = (void *)iph + iph->ihl*4;
-
- /* Since it passed flags test in tcp match, we know it is is
- not a fragment, and has data >= tcp header length. SYN
- packets should not contain data: if they did, then we risk
- running over MTU, sending Frag Needed and breaking things
- badly. --RR */
- if (tcplen != tcph->doff*4) {
- if (net_ratelimit())
- printk(KERN_ERR
- "ipt_tcpmss_target: bad length (%d bytes)\n",
- (*pskb)->len);
- return NF_DROP;
- }
-
- if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU) {
- if (dst_mtu((*pskb)->dst) <= sizeof(struct iphdr) +
- sizeof(struct tcphdr)) {
- if (net_ratelimit())
- printk(KERN_ERR "ipt_tcpmss_target: "
- "unknown or invalid path-MTU (%d)\n",
- dst_mtu((*pskb)->dst));
- return NF_DROP; /* or IPT_CONTINUE ?? */
- }
-
- newmss = dst_mtu((*pskb)->dst) - sizeof(struct iphdr) -
- sizeof(struct tcphdr);
- } else
- newmss = tcpmssinfo->mss;
-
- opt = (u_int8_t *)tcph;
- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
- opt[i+1] == TCPOLEN_MSS) {
- u_int16_t oldmss;
-
- oldmss = (opt[i+2] << 8) | opt[i+3];
-
- if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU &&
- oldmss <= newmss)
- return IPT_CONTINUE;
-
- opt[i+2] = (newmss & 0xff00) >> 8;
- opt[i+3] = (newmss & 0x00ff);
-
- nf_proto_csum_replace2(&tcph->check, *pskb,
- htons(oldmss), htons(newmss), 0);
- return IPT_CONTINUE;
- }
- }
-
- /*
- * MSS Option not found ?! add it..
- */
- if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
- struct sk_buff *newskb;
-
- newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
- TCPOLEN_MSS, GFP_ATOMIC);
- if (!newskb)
- return NF_DROP;
- kfree_skb(*pskb);
- *pskb = newskb;
- iph = (*pskb)->nh.iph;
- tcph = (void *)iph + iph->ihl*4;
- }
-
- skb_put((*pskb), TCPOLEN_MSS);
-
- opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
-
- nf_proto_csum_replace2(&tcph->check, *pskb,
- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
- opt[0] = TCPOPT_MSS;
- opt[1] = TCPOLEN_MSS;
- opt[2] = (newmss & 0xff00) >> 8;
- opt[3] = (newmss & 0x00ff);
-
- nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
-
- oldval = ((__be16 *)tcph)[6];
- tcph->doff += TCPOLEN_MSS/4;
- nf_proto_csum_replace2(&tcph->check, *pskb,
- oldval, ((__be16 *)tcph)[6], 0);
-
- newtotlen = htons(ntohs(iph->tot_len) + TCPOLEN_MSS);
- nf_csum_replace2(&iph->check, iph->tot_len, newtotlen);
- iph->tot_len = newtotlen;
- return IPT_CONTINUE;
-}
-
-#define TH_SYN 0x02
-
-static inline int find_syn_match(const struct ipt_entry_match *m)
-{
- const struct ipt_tcp *tcpinfo = (const struct ipt_tcp *)m->data;
-
- if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
- tcpinfo->flg_cmp & TH_SYN &&
- !(tcpinfo->invflags & IPT_TCP_INV_FLAGS))
- return 1;
-
- return 0;
-}
-
-/* Must specify -p tcp --syn/--tcp-flags SYN */
-static int
-ipt_tcpmss_checkentry(const char *tablename,
- const void *e_void,
- const struct xt_target *target,
- void *targinfo,
- unsigned int hook_mask)
-{
- const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
- const struct ipt_entry *e = e_void;
-
- if (tcpmssinfo->mss == IPT_TCPMSS_CLAMP_PMTU &&
- (hook_mask & ~((1 << NF_IP_FORWARD) |
- (1 << NF_IP_LOCAL_OUT) |
- (1 << NF_IP_POST_ROUTING))) != 0) {
- printk("TCPMSS: path-MTU clamping only supported in "
- "FORWARD, OUTPUT and POSTROUTING hooks\n");
- return 0;
- }
-
- if (IPT_MATCH_ITERATE(e, find_syn_match))
- return 1;
- printk("TCPMSS: Only works on TCP SYN packets\n");
- return 0;
-}
-
-static struct ipt_target ipt_tcpmss_reg = {
- .name = "TCPMSS",
- .target = ipt_tcpmss_target,
- .targetsize = sizeof(struct ipt_tcpmss_info),
- .proto = IPPROTO_TCP,
- .checkentry = ipt_tcpmss_checkentry,
- .me = THIS_MODULE,
-};
-
-static int __init ipt_tcpmss_init(void)
-{
- return ipt_register_target(&ipt_tcpmss_reg);
-}
-
-static void __exit ipt_tcpmss_fini(void)
-{
- ipt_unregister_target(&ipt_tcpmss_reg);
-}
-
-module_init(ipt_tcpmss_init);
-module_exit(ipt_tcpmss_fini);
diff --git a/net/ipv4/netfilter/ipt_TOS.c b/net/ipv4/netfilter/ipt_TOS.c
index 18e74ac4d42..29b05a6bd10 100644
--- a/net/ipv4/netfilter/ipt_TOS.c
+++ b/net/ipv4/netfilter/ipt_TOS.c
@@ -13,7 +13,7 @@
#include <linux/ip.h>
#include <net/checksum.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_TOS.h>
MODULE_LICENSE("GPL");
@@ -40,7 +40,7 @@ target(struct sk_buff **pskb,
iph->tos = (iph->tos & IPTOS_PREC_MASK) | tosinfo->tos;
nf_csum_replace2(&iph->check, htons(oldtos), htons(iph->tos));
}
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int
@@ -63,8 +63,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_tos_reg = {
+static struct xt_target ipt_tos_reg = {
.name = "TOS",
+ .family = AF_INET,
.target = target,
.targetsize = sizeof(struct ipt_tos_target_info),
.table = "mangle",
@@ -74,12 +75,12 @@ static struct ipt_target ipt_tos_reg = {
static int __init ipt_tos_init(void)
{
- return ipt_register_target(&ipt_tos_reg);
+ return xt_register_target(&ipt_tos_reg);
}
static void __exit ipt_tos_fini(void)
{
- ipt_unregister_target(&ipt_tos_reg);
+ xt_unregister_target(&ipt_tos_reg);
}
module_init(ipt_tos_init);
diff --git a/net/ipv4/netfilter/ipt_TTL.c b/net/ipv4/netfilter/ipt_TTL.c
index fffe5ca82e9..d2b6fa3f9dc 100644
--- a/net/ipv4/netfilter/ipt_TTL.c
+++ b/net/ipv4/netfilter/ipt_TTL.c
@@ -12,7 +12,7 @@
#include <linux/ip.h>
#include <net/checksum.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_TTL.h>
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
@@ -59,7 +59,7 @@ ipt_ttl_target(struct sk_buff **pskb,
iph->ttl = new_ttl;
}
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static int ipt_ttl_checkentry(const char *tablename,
@@ -80,8 +80,9 @@ static int ipt_ttl_checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_TTL = {
+static struct xt_target ipt_TTL = {
.name = "TTL",
+ .family = AF_INET,
.target = ipt_ttl_target,
.targetsize = sizeof(struct ipt_TTL_info),
.table = "mangle",
@@ -91,12 +92,12 @@ static struct ipt_target ipt_TTL = {
static int __init ipt_ttl_init(void)
{
- return ipt_register_target(&ipt_TTL);
+ return xt_register_target(&ipt_TTL);
}
static void __exit ipt_ttl_fini(void)
{
- ipt_unregister_target(&ipt_TTL);
+ xt_unregister_target(&ipt_TTL);
}
module_init(ipt_ttl_init);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index dbd34783a64..7af57a3a1f3 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -57,7 +57,7 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_ULOG.h>
#include <net/sock.h>
#include <linux/bitops.h>
@@ -132,7 +132,6 @@ static void ulog_send(unsigned int nlgroupnum)
ub->qlen = 0;
ub->skb = NULL;
ub->lastnlh = NULL;
-
}
@@ -314,7 +313,7 @@ static unsigned int ipt_ulog_target(struct sk_buff **pskb,
ipt_ulog_packet(hooknum, *pskb, in, out, loginfo, NULL);
- return IPT_CONTINUE;
+ return XT_CONTINUE;
}
static void ipt_logfn(unsigned int pf,
@@ -363,8 +362,9 @@ static int ipt_ulog_checkentry(const char *tablename,
return 1;
}
-static struct ipt_target ipt_ulog_reg = {
+static struct xt_target ipt_ulog_reg = {
.name = "ULOG",
+ .family = AF_INET,
.target = ipt_ulog_target,
.targetsize = sizeof(struct ipt_ulog_info),
.checkentry = ipt_ulog_checkentry,
@@ -379,7 +379,7 @@ static struct nf_logger ipt_ulog_logger = {
static int __init ipt_ulog_init(void)
{
- int i;
+ int ret, i;
DEBUGP("ipt_ULOG: init module\n");
@@ -400,9 +400,10 @@ static int __init ipt_ulog_init(void)
if (!nflognl)
return -ENOMEM;
- if (ipt_register_target(&ipt_ulog_reg) != 0) {
+ ret = xt_register_target(&ipt_ulog_reg);
+ if (ret < 0) {
sock_release(nflognl->sk_socket);
- return -EINVAL;
+ return ret;
}
if (nflog)
nf_log_register(PF_INET, &ipt_ulog_logger);
@@ -419,7 +420,7 @@ static void __exit ipt_ulog_fini(void)
if (nflog)
nf_log_unregister_logger(&ipt_ulog_logger);
- ipt_unregister_target(&ipt_ulog_reg);
+ xt_unregister_target(&ipt_ulog_reg);
sock_release(nflognl->sk_socket);
/* remove pending timers and free allocated skb's */
@@ -435,7 +436,6 @@ static void __exit ipt_ulog_fini(void)
ub->skb = NULL;
}
}
-
}
module_init(ipt_ulog_init);
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 7b60eb74788..648f555c4d1 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -16,7 +16,7 @@
#include <net/route.h>
#include <linux/netfilter_ipv4/ipt_addrtype.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -44,8 +44,9 @@ static int match(const struct sk_buff *skb,
return ret;
}
-static struct ipt_match addrtype_match = {
+static struct xt_match addrtype_match = {
.name = "addrtype",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_addrtype_info),
.me = THIS_MODULE
@@ -53,12 +54,12 @@ static struct ipt_match addrtype_match = {
static int __init ipt_addrtype_init(void)
{
- return ipt_register_match(&addrtype_match);
+ return xt_register_match(&addrtype_match);
}
static void __exit ipt_addrtype_fini(void)
{
- ipt_unregister_match(&addrtype_match);
+ xt_unregister_match(&addrtype_match);
}
module_init(ipt_addrtype_init);
diff --git a/net/ipv4/netfilter/ipt_ah.c b/net/ipv4/netfilter/ipt_ah.c
index 1798f86bc53..42f41224a43 100644
--- a/net/ipv4/netfilter/ipt_ah.c
+++ b/net/ipv4/netfilter/ipt_ah.c
@@ -6,12 +6,13 @@
* published by the Free Software Foundation.
*/
+#include <linux/in.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/netfilter_ipv4/ipt_ah.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Yon Uriarte <yon@astaro.de>");
@@ -86,8 +87,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_match ah_match = {
+static struct xt_match ah_match = {
.name = "ah",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_ah),
.proto = IPPROTO_AH,
@@ -97,12 +99,12 @@ static struct ipt_match ah_match = {
static int __init ipt_ah_init(void)
{
- return ipt_register_match(&ah_match);
+ return xt_register_match(&ah_match);
}
static void __exit ipt_ah_fini(void)
{
- ipt_unregister_match(&ah_match);
+ xt_unregister_match(&ah_match);
}
module_init(ipt_ah_init);
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index dafbdec0efc..37508b2cfea 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -9,10 +9,13 @@
* published by the Free Software Foundation.
*/
+#include <linux/in.h>
+#include <linux/ip.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/tcp.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_ecn.h>
@@ -109,8 +112,9 @@ static int checkentry(const char *tablename, const void *ip_void,
return 1;
}
-static struct ipt_match ecn_match = {
+static struct xt_match ecn_match = {
.name = "ecn",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_ecn_info),
.checkentry = checkentry,
@@ -119,12 +123,12 @@ static struct ipt_match ecn_match = {
static int __init ipt_ecn_init(void)
{
- return ipt_register_match(&ecn_match);
+ return xt_register_match(&ecn_match);
}
static void __exit ipt_ecn_fini(void)
{
- ipt_unregister_match(&ecn_match);
+ xt_unregister_match(&ecn_match);
}
module_init(ipt_ecn_init);
diff --git a/net/ipv4/netfilter/ipt_iprange.c b/net/ipv4/netfilter/ipt_iprange.c
index 5202edd8d33..05de593be94 100644
--- a/net/ipv4/netfilter/ipt_iprange.c
+++ b/net/ipv4/netfilter/ipt_iprange.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_iprange.h>
MODULE_LICENSE("GPL");
@@ -63,22 +63,22 @@ match(const struct sk_buff *skb,
return 1;
}
-static struct ipt_match iprange_match = {
+static struct xt_match iprange_match = {
.name = "iprange",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_iprange_info),
- .destroy = NULL,
.me = THIS_MODULE
};
static int __init ipt_iprange_init(void)
{
- return ipt_register_match(&iprange_match);
+ return xt_register_match(&iprange_match);
}
static void __exit ipt_iprange_fini(void)
{
- ipt_unregister_match(&iprange_match);
+ xt_unregister_match(&iprange_match);
}
module_init(ipt_iprange_init);
diff --git a/net/ipv4/netfilter/ipt_owner.c b/net/ipv4/netfilter/ipt_owner.c
index 78c336f12a9..9f496ac834b 100644
--- a/net/ipv4/netfilter/ipt_owner.c
+++ b/net/ipv4/netfilter/ipt_owner.c
@@ -15,7 +15,7 @@
#include <net/sock.h>
#include <linux/netfilter_ipv4/ipt_owner.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
@@ -68,8 +68,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ipt_match owner_match = {
+static struct xt_match owner_match = {
.name = "owner",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_owner_info),
.hooks = (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_POST_ROUTING),
@@ -79,12 +80,12 @@ static struct ipt_match owner_match = {
static int __init ipt_owner_init(void)
{
- return ipt_register_match(&owner_match);
+ return xt_register_match(&owner_match);
}
static void __exit ipt_owner_fini(void)
{
- ipt_unregister_match(&owner_match);
+ xt_unregister_match(&owner_match);
}
module_init(ipt_owner_init);
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c
index 4db0e73c56f..6b97b679617 100644
--- a/net/ipv4/netfilter/ipt_recent.c
+++ b/net/ipv4/netfilter/ipt_recent.c
@@ -12,6 +12,7 @@
* Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org
*/
#include <linux/init.h>
+#include <linux/ip.h>
#include <linux/moduleparam.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -24,7 +25,7 @@
#include <linux/skbuff.h>
#include <linux/inet.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ipt_recent.h>
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
@@ -462,8 +463,9 @@ static struct file_operations recent_fops = {
};
#endif /* CONFIG_PROC_FS */
-static struct ipt_match recent_match = {
+static struct xt_match recent_match = {
.name = "recent",
+ .family = AF_INET,
.match = ipt_recent_match,
.matchsize = sizeof(struct ipt_recent_info),
.checkentry = ipt_recent_checkentry,
@@ -479,13 +481,13 @@ static int __init ipt_recent_init(void)
return -EINVAL;
ip_list_hash_size = 1 << fls(ip_list_tot);
- err = ipt_register_match(&recent_match);
+ err = xt_register_match(&recent_match);
#ifdef CONFIG_PROC_FS
if (err)
return err;
proc_dir = proc_mkdir("ipt_recent", proc_net);
if (proc_dir == NULL) {
- ipt_unregister_match(&recent_match);
+ xt_unregister_match(&recent_match);
err = -ENOMEM;
}
#endif
@@ -495,7 +497,7 @@ static int __init ipt_recent_init(void)
static void __exit ipt_recent_exit(void)
{
BUG_ON(!list_empty(&tables));
- ipt_unregister_match(&recent_match);
+ xt_unregister_match(&recent_match);
#ifdef CONFIG_PROC_FS
remove_proc_entry("ipt_recent", proc_net);
#endif
diff --git a/net/ipv4/netfilter/ipt_tos.c b/net/ipv4/netfilter/ipt_tos.c
index 5549c39c785..5d33b51d49d 100644
--- a/net/ipv4/netfilter/ipt_tos.c
+++ b/net/ipv4/netfilter/ipt_tos.c
@@ -8,11 +8,12 @@
* published by the Free Software Foundation.
*/
+#include <linux/ip.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4/ipt_tos.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("iptables TOS match module");
@@ -32,8 +33,9 @@ match(const struct sk_buff *skb,
return (skb->nh.iph->tos == info->tos) ^ info->invert;
}
-static struct ipt_match tos_match = {
+static struct xt_match tos_match = {
.name = "tos",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_tos_info),
.me = THIS_MODULE,
@@ -41,12 +43,12 @@ static struct ipt_match tos_match = {
static int __init ipt_multiport_init(void)
{
- return ipt_register_match(&tos_match);
+ return xt_register_match(&tos_match);
}
static void __exit ipt_multiport_fini(void)
{
- ipt_unregister_match(&tos_match);
+ xt_unregister_match(&tos_match);
}
module_init(ipt_multiport_init);
diff --git a/net/ipv4/netfilter/ipt_ttl.c b/net/ipv4/netfilter/ipt_ttl.c
index a5243bdb87d..d5cd984e5ed 100644
--- a/net/ipv4/netfilter/ipt_ttl.c
+++ b/net/ipv4/netfilter/ipt_ttl.c
@@ -9,11 +9,12 @@
* published by the Free Software Foundation.
*/
+#include <linux/ip.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4/ipt_ttl.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("IP tables TTL matching module");
@@ -48,8 +49,9 @@ static int match(const struct sk_buff *skb,
return 0;
}
-static struct ipt_match ttl_match = {
+static struct xt_match ttl_match = {
.name = "ttl",
+ .family = AF_INET,
.match = match,
.matchsize = sizeof(struct ipt_ttl_info),
.me = THIS_MODULE,
@@ -57,13 +59,12 @@ static struct ipt_match ttl_match = {
static int __init ipt_ttl_init(void)
{
- return ipt_register_match(&ttl_match);
+ return xt_register_match(&ttl_match);
}
static void __exit ipt_ttl_fini(void)
{
- ipt_unregister_match(&ttl_match);
-
+ xt_unregister_match(&ttl_match);
}
module_init(ipt_ttl_init);
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index e2e7dd8d790..51053cb42f4 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -74,7 +74,7 @@ static struct
}
};
-static struct ipt_table packet_filter = {
+static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index af293988944..a532e4d8433 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -103,7 +103,7 @@ static struct
}
};
-static struct ipt_table packet_mangler = {
+static struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index bcbeb4aeacd..5277550fa6b 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -79,7 +79,7 @@ static struct
}
};
-static struct ipt_table packet_raw = {
+static struct xt_table packet_raw = {
.name = "raw",
.valid_hooks = RAW_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 86a92272b05..998b2557692 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -254,8 +254,9 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
if (maniptype == IP_NAT_MANIP_SRC) {
if (find_appropriate_src(orig_tuple, tuple, range)) {
DEBUGP("get_unique_tuple: Found current src map\n");
- if (!nf_nat_used_tuple(tuple, ct))
- return;
+ if (!(range->flags & IP_NAT_RANGE_PROTO_RANDOM))
+ if (!nf_nat_used_tuple(tuple, ct))
+ return;
}
}
@@ -269,6 +270,13 @@ get_unique_tuple(struct nf_conntrack_tuple *tuple,
proto = nf_nat_proto_find_get(orig_tuple->dst.protonum);
+ /* Change protocol info to have some randomization */
+ if (range->flags & IP_NAT_RANGE_PROTO_RANDOM) {
+ proto->unique_tuple(tuple, range, maniptype, ct);
+ nf_nat_proto_put(proto);
+ return;
+ }
+
/* Only bother mapping if it's not already in range and unique */
if ((!(range->flags & IP_NAT_RANGE_PROTO_SPECIFIED) ||
proto->in_range(tuple, maniptype, &range->min, &range->max)) &&
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 98fbfc84d18..dc6738bdfab 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -176,7 +176,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff **pskb,
datalen = (*pskb)->len - iph->ihl*4;
if ((*pskb)->ip_summed != CHECKSUM_PARTIAL) {
tcph->check = 0;
- tcph->check = tcp_v4_check(tcph, datalen,
+ tcph->check = tcp_v4_check(datalen,
iph->saddr, iph->daddr,
csum_partial((char *)tcph,
datalen, 0));
diff --git a/net/ipv4/netfilter/nf_nat_proto_tcp.c b/net/ipv4/netfilter/nf_nat_proto_tcp.c
index 7e26a7e9bee..439164c7a62 100644
--- a/net/ipv4/netfilter/nf_nat_proto_tcp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_tcp.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -75,6 +76,9 @@ tcp_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = ntohs(range->max.tcp.port) - min + 1;
}
+ if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+ port = net_random();
+
for (i = 0; i < range_size; i++, port++) {
*portptr = htons(min + port % range_size);
if (!nf_nat_used_tuple(tuple, ct))
diff --git a/net/ipv4/netfilter/nf_nat_proto_udp.c b/net/ipv4/netfilter/nf_nat_proto_udp.c
index ab0ce4c8699..8cae6e063bb 100644
--- a/net/ipv4/netfilter/nf_nat_proto_udp.c
+++ b/net/ipv4/netfilter/nf_nat_proto_udp.c
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/init.h>
+#include <linux/random.h>
#include <linux/ip.h>
#include <linux/udp.h>
@@ -73,6 +74,9 @@ udp_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = ntohs(range->max.udp.port) - min + 1;
}
+ if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
+ port = net_random();
+
for (i = 0; i < range_size; i++, port++) {
*portptr = htons(min + port % range_size);
if (!nf_nat_used_tuple(tuple, ct))
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index b868ee0195d..7f95b4e2eb3 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -119,7 +119,7 @@ static struct
}
};
-static struct ipt_table nat_table = {
+static struct xt_table nat_table = {
.name = "nat",
.valid_hooks = NAT_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
@@ -226,6 +226,10 @@ static int ipt_dnat_checkentry(const char *tablename,
printk("DNAT: multiple ranges no longer supported\n");
return 0;
}
+ if (mr->range[0].flags & IP_NAT_RANGE_PROTO_RANDOM) {
+ printk("DNAT: port randomization not supported\n");
+ return 0;
+ }
return 1;
}
@@ -290,7 +294,7 @@ int nf_nat_rule_find(struct sk_buff **pskb,
return ret;
}
-static struct ipt_target ipt_snat_reg = {
+static struct xt_target ipt_snat_reg = {
.name = "SNAT",
.target = ipt_snat_target,
.targetsize = sizeof(struct nf_nat_multi_range_compat),
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 00d6dea9f7f..5a964a167c1 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -32,12 +32,6 @@
#define DEBUGP(format, args...)
#endif
-#define HOOKNAME(hooknum) ((hooknum) == NF_IP_POST_ROUTING ? "POST_ROUTING" \
- : ((hooknum) == NF_IP_PRE_ROUTING ? "PRE_ROUTING" \
- : ((hooknum) == NF_IP_LOCAL_OUT ? "LOCAL_OUT" \
- : ((hooknum) == NF_IP_LOCAL_IN ? "LOCAL_IN" \
- : "*ERROR*")))
-
#ifdef CONFIG_XFRM
static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index a6c63bbd9dd..fed6a1e7af9 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -489,7 +489,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}
security_sk_classify_flow(sk, &fl);
- err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
+ err = ip_route_output_flow(&rt, &fl, sk, 1);
}
if (err)
goto done;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2daa0dc19d3..baee304a3cb 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2635,7 +2635,7 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
r = nlmsg_data(nlh);
r->rtm_family = AF_INET;
@@ -2718,7 +2718,8 @@ static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index b67e0dd743b..5bd43d7294f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2415,10 +2415,11 @@ void __init tcp_init(void)
&tcp_hashinfo.ehash_size,
NULL,
0);
- tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
- for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
+ tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
+ for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
rwlock_init(&tcp_hashinfo.ehash[i].lock);
INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
+ INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
}
tcp_hashinfo.bhash =
@@ -2475,7 +2476,7 @@ void __init tcp_init(void)
printk(KERN_INFO "TCP: Hash tables configured "
"(established %d bind %d)\n",
- tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
+ tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
tcp_register_congestion_control(&tcp_reno);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c26076fb890..c6109895bb5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -936,28 +936,58 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
struct tcp_sock *tp = tcp_sk(sk);
unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
+ struct sk_buff *cached_skb;
int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
int reord = tp->packets_out;
int prior_fackets;
u32 lost_retrans = 0;
int flag = 0;
int dup_sack = 0;
+ int cached_fack_count;
int i;
+ int first_sack_index;
if (!tp->sacked_out)
tp->fackets_out = 0;
prior_fackets = tp->fackets_out;
+ /* Check for D-SACK. */
+ if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) {
+ dup_sack = 1;
+ tp->rx_opt.sack_ok |= 4;
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
+ } else if (num_sacks > 1 &&
+ !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
+ !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
+ dup_sack = 1;
+ tp->rx_opt.sack_ok |= 4;
+ NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
+ }
+
+ /* D-SACK for already forgotten data...
+ * Do dumb counting. */
+ if (dup_sack &&
+ !after(ntohl(sp[0].end_seq), prior_snd_una) &&
+ after(ntohl(sp[0].end_seq), tp->undo_marker))
+ tp->undo_retrans--;
+
+ /* Eliminate too old ACKs, but take into
+ * account more or less fresh ones, they can
+ * contain valid SACK info.
+ */
+ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
+ return 0;
+
/* SACK fastpath:
* if the only SACK change is the increase of the end_seq of
* the first block then only apply that SACK block
* and use retrans queue hinting otherwise slowpath */
flag = 1;
- for (i = 0; i< num_sacks; i++) {
- __u32 start_seq = ntohl(sp[i].start_seq);
- __u32 end_seq = ntohl(sp[i].end_seq);
+ for (i = 0; i < num_sacks; i++) {
+ __be32 start_seq = sp[i].start_seq;
+ __be32 end_seq = sp[i].end_seq;
- if (i == 0){
+ if (i == 0) {
if (tp->recv_sack_cache[i].start_seq != start_seq)
flag = 0;
} else {
@@ -967,39 +997,14 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
}
tp->recv_sack_cache[i].start_seq = start_seq;
tp->recv_sack_cache[i].end_seq = end_seq;
-
- /* Check for D-SACK. */
- if (i == 0) {
- u32 ack = TCP_SKB_CB(ack_skb)->ack_seq;
-
- if (before(start_seq, ack)) {
- dup_sack = 1;
- tp->rx_opt.sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
- } else if (num_sacks > 1 &&
- !after(end_seq, ntohl(sp[1].end_seq)) &&
- !before(start_seq, ntohl(sp[1].start_seq))) {
- dup_sack = 1;
- tp->rx_opt.sack_ok |= 4;
- NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
- }
-
- /* D-SACK for already forgotten data...
- * Do dumb counting. */
- if (dup_sack &&
- !after(end_seq, prior_snd_una) &&
- after(end_seq, tp->undo_marker))
- tp->undo_retrans--;
-
- /* Eliminate too old ACKs, but take into
- * account more or less fresh ones, they can
- * contain valid SACK info.
- */
- if (before(ack, prior_snd_una - tp->max_window))
- return 0;
- }
+ }
+ /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
+ for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
+ tp->recv_sack_cache[i].start_seq = 0;
+ tp->recv_sack_cache[i].end_seq = 0;
}
+ first_sack_index = 0;
if (flag)
num_sacks = 1;
else {
@@ -1016,6 +1021,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
tmp = sp[j];
sp[j] = sp[j+1];
sp[j+1] = tmp;
+
+ /* Track where the first SACK block goes to */
+ if (j == first_sack_index)
+ first_sack_index = j+1;
}
}
@@ -1025,20 +1034,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
/* clear flag as used for different purpose in following code */
flag = 0;
+ /* Use SACK fastpath hint if valid */
+ cached_skb = tp->fastpath_skb_hint;
+ cached_fack_count = tp->fastpath_cnt_hint;
+ if (!cached_skb) {
+ cached_skb = sk->sk_write_queue.next;
+ cached_fack_count = 0;
+ }
+
for (i=0; i<num_sacks; i++, sp++) {
struct sk_buff *skb;
__u32 start_seq = ntohl(sp->start_seq);
__u32 end_seq = ntohl(sp->end_seq);
int fack_count;
- /* Use SACK fastpath hint if valid */
- if (tp->fastpath_skb_hint) {
- skb = tp->fastpath_skb_hint;
- fack_count = tp->fastpath_cnt_hint;
- } else {
- skb = sk->sk_write_queue.next;
- fack_count = 0;
- }
+ skb = cached_skb;
+ fack_count = cached_fack_count;
/* Event "B" in the comment above. */
if (after(end_seq, tp->high_seq))
@@ -1048,8 +1059,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
int in_sack, pcount;
u8 sacked;
- tp->fastpath_skb_hint = skb;
- tp->fastpath_cnt_hint = fack_count;
+ cached_skb = skb;
+ cached_fack_count = fack_count;
+ if (i == first_sack_index) {
+ tp->fastpath_skb_hint = skb;
+ tp->fastpath_cnt_hint = fack_count;
+ }
/* The retransmission queue is always in order, so
* we can short-circuit the walk early.
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 12de90a5047..f51d6404c61 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -191,7 +191,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
tmp = ip_route_connect(&rt, nexthop, inet->saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
- inet->sport, usin->sin_port, sk);
+ inet->sport, usin->sin_port, sk, 1);
if (tmp < 0)
return tmp;
@@ -502,11 +502,11 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
struct tcphdr *th = skb->h.th;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- th->check = ~tcp_v4_check(th, len,
- inet->saddr, inet->daddr, 0);
+ th->check = ~tcp_v4_check(len, inet->saddr,
+ inet->daddr, 0);
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
- th->check = tcp_v4_check(th, len, inet->saddr, inet->daddr,
+ th->check = tcp_v4_check(len, inet->saddr, inet->daddr,
csum_partial((char *)th,
th->doff << 2,
skb->csum));
@@ -525,7 +525,7 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
th = skb->h.th;
th->check = 0;
- th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0);
+ th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
skb->csum_offset = offsetof(struct tcphdr, check);
skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
@@ -747,7 +747,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
if (skb) {
struct tcphdr *th = skb->h.th;
- th->check = tcp_v4_check(th, skb->len,
+ th->check = tcp_v4_check(skb->len,
ireq->loc_addr,
ireq->rmt_addr,
csum_partial((char *)th, skb->len,
@@ -1514,7 +1514,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_COMPLETE) {
- if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
+ if (!tcp_v4_check(skb->len, skb->nh.iph->saddr,
skb->nh.iph->daddr, skb->csum)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
@@ -2051,7 +2051,7 @@ static void *established_get_first(struct seq_file *seq)
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
inet_twsk_for_each(tw, node,
- &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
+ &tcp_hashinfo.ehash[st->bucket].twchain) {
if (tw->tw_family != st->family) {
continue;
}
@@ -2107,7 +2107,7 @@ get_tw:
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
- tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
+ tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
goto get_tw;
found:
cur = sk;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 975f4472af2..58b7111523f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -965,7 +965,8 @@ static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *sk
u32 in_flight, cwnd;
/* Don't be strict about the congestion window for the final FIN. */
- if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)
+ if ((TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+ tcp_skb_pcount(skb) == 1)
return 1;
in_flight = tcp_packets_in_flight(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index cfff930f2ba..8b54c68a0d1 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -629,7 +629,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
{ .sport = inet->sport,
.dport = dport } } };
security_sk_classify_flow(sk, &fl);
- err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT));
+ err = ip_route_output_flow(&rt, &fl, sk, 1);
if (err)
goto out;
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index e23c21d31a5..e54c5494c88 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -23,6 +23,12 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
IP_ECN_set_ce(inner_iph);
}
+static inline void ipip6_ecn_decapsulate(struct iphdr *iph, struct sk_buff *skb)
+{
+ if (INET_ECN_is_ce(iph->tos))
+ IP6_ECN_set_ce(skb->nh.ipv6h);
+}
+
/* Add encapsulation header.
*
* The top IP header will be constructed per RFC 2401. The following fields
@@ -36,6 +42,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
+ struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
struct iphdr *iph, *top_iph;
int flags;
@@ -48,15 +55,27 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->ihl = 5;
top_iph->version = 4;
+ flags = x->props.flags;
+
/* DS disclosed */
- top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos);
+ if (xdst->route->ops->family == AF_INET) {
+ top_iph->protocol = IPPROTO_IPIP;
+ top_iph->tos = INET_ECN_encapsulate(iph->tos, iph->tos);
+ top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
+ 0 : (iph->frag_off & htons(IP_DF));
+ }
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ else {
+ struct ipv6hdr *ipv6h = (struct ipv6hdr*)iph;
+ top_iph->protocol = IPPROTO_IPV6;
+ top_iph->tos = INET_ECN_encapsulate(iph->tos, ipv6_get_dsfield(ipv6h));
+ top_iph->frag_off = 0;
+ }
+#endif
- flags = x->props.flags;
if (flags & XFRM_STATE_NOECN)
IP_ECN_clear(top_iph);
- top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
- 0 : (iph->frag_off & htons(IP_DF));
if (!top_iph->frag_off)
__ip_select_ident(top_iph, dst->child, 0);
@@ -64,7 +83,6 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
top_iph->saddr = x->props.saddr.a4;
top_iph->daddr = x->id.daddr.a4;
- top_iph->protocol = IPPROTO_IPIP;
memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options));
return 0;
@@ -75,8 +93,16 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph = skb->nh.iph;
int err = -EINVAL;
- if (iph->protocol != IPPROTO_IPIP)
- goto out;
+ switch(iph->protocol){
+ case IPPROTO_IPIP:
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ case IPPROTO_IPV6:
+ break;
+#endif
+ default:
+ goto out;
+ }
+
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
@@ -84,10 +110,19 @@ static int xfrm4_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
goto out;
- if (x->props.flags & XFRM_STATE_DECAP_DSCP)
- ipv4_copy_dscp(iph, skb->h.ipiph);
- if (!(x->props.flags & XFRM_STATE_NOECN))
- ipip_ecn_decapsulate(skb);
+ if (iph->protocol == IPPROTO_IPIP) {
+ if (x->props.flags & XFRM_STATE_DECAP_DSCP)
+ ipv4_copy_dscp(iph, skb->h.ipiph);
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ ipip_ecn_decapsulate(skb);
+ }
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ else {
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ ipip6_ecn_decapsulate(iph, skb);
+ skb->protocol = htons(ETH_P_IPV6);
+ }
+#endif
skb->mac.raw = memmove(skb->data - skb->mac_len,
skb->mac.raw, skb->mac_len);
skb->nh.raw = skb->data;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index fb9f69c616f..699f27ce62a 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -72,13 +72,11 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
struct dst_entry *dst, *dst_prev;
struct rtable *rt0 = (struct rtable*)(*dst_p);
struct rtable *rt = rt0;
- __be32 remote = fl->fl4_dst;
- __be32 local = fl->fl4_src;
struct flowi fl_tunnel = {
.nl_u = {
.ip4_u = {
- .saddr = local,
- .daddr = remote,
+ .saddr = fl->fl4_src,
+ .daddr = fl->fl4_dst,
.tos = fl->fl4_tos
}
}
@@ -94,7 +92,6 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
for (i = 0; i < nx; i++) {
struct dst_entry *dst1 = dst_alloc(&xfrm4_dst_ops);
struct xfrm_dst *xdst;
- int tunnel = 0;
if (unlikely(dst1 == NULL)) {
err = -ENOBUFS;
@@ -116,19 +113,28 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
dst1->next = dst_prev;
dst_prev = dst1;
- if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
- remote = xfrm[i]->id.daddr.a4;
- local = xfrm[i]->props.saddr.a4;
- tunnel = 1;
- }
+
header_len += xfrm[i]->props.header_len;
trailer_len += xfrm[i]->props.trailer_len;
- if (tunnel) {
- fl_tunnel.fl4_src = local;
- fl_tunnel.fl4_dst = remote;
+ if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
+ unsigned short encap_family = xfrm[i]->props.family;
+ switch(encap_family) {
+ case AF_INET:
+ fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
+ fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
+ break;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
+ ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
+ break;
+#endif
+ default:
+ BUG_ON(1);
+ }
err = xfrm_dst_lookup((struct xfrm_dst **)&rt,
- &fl_tunnel, AF_INET);
+ &fl_tunnel, encap_family);
if (err)
goto error;
} else
@@ -145,6 +151,7 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
i = 0;
for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
+ struct xfrm_state_afinfo *afinfo;
x->u.rt.fl = *fl;
dst_prev->xfrm = xfrm[i++];
@@ -162,8 +169,18 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
/* Copy neighbout for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
- dst_prev->output = xfrm4_output;
- if (rt->peer)
+ /* XXX: When IPv6 module can be unloaded, we should manage reference
+ * to xfrm6_output in afinfo->output. Miyazawa
+ * */
+ afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
+ if (!afinfo) {
+ dst = *dst_p;
+ err = -EAFNOSUPPORT;
+ goto error;
+ }
+ dst_prev->output = afinfo->output;
+ xfrm_state_put_afinfo(afinfo);
+ if (dst_prev->xfrm->props.family == AF_INET && rt->peer)
atomic_inc(&rt->peer->refcnt);
x->u.rt.peer = rt->peer;
/* Sheit... I remember I did this right. Apparently,
@@ -274,7 +291,7 @@ static void xfrm4_dst_destroy(struct dst_entry *dst)
if (likely(xdst->u.rt.idev))
in_dev_put(xdst->u.rt.idev);
- if (likely(xdst->u.rt.peer))
+ if (dst->xfrm->props.family == AF_INET && likely(xdst->u.rt.peer))
inet_putpeer(xdst->u.rt.peer);
xfrm_dst_destroy(xdst);
}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 3cc3df0c6ec..93e2c061cdd 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -51,6 +51,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.family = AF_INET,
.init_flags = xfrm4_init_flags,
.init_tempsel = __xfrm4_init_tempsel,
+ .output = xfrm4_output,
};
void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e3854696988..fe5e1d83387 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3117,7 +3117,7 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
@@ -3137,8 +3137,10 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
}
if (nla_put(skb, IFA_ADDRESS, 16, &ifa->addr) < 0 ||
- put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
- return nlmsg_cancel(skb, nlh);
+ put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
return nlmsg_end(skb, nlh);
}
@@ -3155,13 +3157,15 @@ static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put(skb, IFA_MULTICAST, 16, &ifmca->mca_addr) < 0 ||
put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
- INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0)
- return nlmsg_cancel(skb, nlh);
+ INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
return nlmsg_end(skb, nlh);
}
@@ -3178,13 +3182,15 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(struct ifaddrmsg), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
if (nla_put(skb, IFA_ANYCAST, 16, &ifaca->aca_addr) < 0 ||
put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
- INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0)
- return nlmsg_cancel(skb, nlh);
+ INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+ }
return nlmsg_end(skb, nlh);
}
@@ -3334,9 +3340,12 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, RTM_NEWADDR, 0);
- /* failure implies BUG in inet6_ifaddr_msgsize() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout_ifa;
+ }
err = rtnl_unicast(skb, NETLINK_CB(in_skb).pid);
errout_ifa:
in6_ifa_put(ifa);
@@ -3354,9 +3363,12 @@ static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
goto errout;
err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
- /* failure implies BUG in inet6_ifaddr_msgsize() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
errout:
if (err < 0)
@@ -3426,7 +3438,7 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
hdr = nlmsg_data(nlh);
hdr->ifi_family = AF_INET6;
@@ -3469,7 +3481,8 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
@@ -3507,9 +3520,12 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
goto errout;
err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
- /* failure implies BUG in inet6_if_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
errout:
if (err < 0)
@@ -3533,7 +3549,7 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*pmsg), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
pmsg = nlmsg_data(nlh);
pmsg->prefix_family = AF_INET6;
@@ -3558,7 +3574,8 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
static void inet6_prefix_notify(int event, struct inet6_dev *idev,
@@ -3572,9 +3589,12 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,
goto errout;
err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
- /* failure implies BUG in inet6_prefix_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
errout:
if (err < 0)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 5c94fea90e9..ecde30140f4 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -178,7 +178,7 @@ ipv4_connected:
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
+ if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
goto out;
/* source address lookup done in ip6_dst_lookup */
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index b7e5bae0e34..e61116949be 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -79,7 +79,7 @@ struct sock *__inet6_lookup_established(struct inet_hashinfo *hashinfo,
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
- sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
+ sk_for_each(sk, node, &head->twchain) {
const struct inet_timewait_sock *tw = inet_twsk(sk);
if(*((__portpair *)&(tw->tw_dport)) == ports &&
@@ -183,7 +183,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
write_lock(&head->lock);
/* Check TIME-WAIT sockets first. */
- sk_for_each(sk2, node, &(head + hinfo->ehash_size)->chain) {
+ sk_for_each(sk2, node, &head->twchain) {
const struct inet6_timewait_sock *tw6 = inet6_twsk(sk2);
tw = inet_twsk(sk2);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8d918348f5b..2b9e3bb7da6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -999,7 +999,8 @@ ip6ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
dev = t->dev;
}
- err = unregister_netdevice(dev);
+ err = 0;
+ unregister_netdevice(dev);
break;
default:
err = -EINVAL;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 882cde4b404..e3ec2169583 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1582,6 +1582,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
skb = add_grhead(skb, pmc, type, &pgr);
first = 0;
}
+ if (!skb)
+ return NULL;
psrc = (struct in6_addr *)skb_put(skb, sizeof(*psrc));
*psrc = psf->sf_addr;
scount++; stotal++;
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index be7dd7db65d..681bb077eac 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -89,7 +89,6 @@ static int mip6_mh_len(int type)
int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
{
struct ip6_mh *mh;
- int mhlen;
if (!pskb_may_pull(skb, (skb->h.raw - skb->data) + 8) ||
!pskb_may_pull(skb, (skb->h.raw - skb->data) + ((skb->h.raw[1] + 1) << 3)))
@@ -103,31 +102,6 @@ int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw);
return -1;
}
- mhlen = (mh->ip6mh_hdrlen + 1) << 3;
-
- if (skb->ip_summed == CHECKSUM_COMPLETE) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,
- mhlen, IPPROTO_MH,
- skb->csum)) {
- LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH hw checksum failed\n");
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
- if (skb->ip_summed == CHECKSUM_NONE) {
- if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,
- mhlen, IPPROTO_MH,
- skb_checksum(skb, 0, mhlen, 0))) {
- LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH checksum failed "
- "[" NIP6_FMT " > " NIP6_FMT "]\n",
- NIP6(skb->nh.ipv6h->saddr),
- NIP6(skb->nh.ipv6h->daddr));
- return -1;
- }
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
if (mh->ip6mh_proto != IPPROTO_NONE) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index adcd6131df2..cd549aea84f 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -114,6 +114,14 @@ config IP6_NF_MATCH_AH
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_MATCH_MH
+ tristate "MH match support"
+ depends on IP6_NF_IPTABLES
+ help
+ This module allows one to match MH packets.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP6_NF_MATCH_EUI64
tristate "EUI64 address check"
depends on IP6_NF_IPTABLES
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index ac1dfebde17..4513eab7739 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
# objects for l3 independent conntrack
nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 99502c5da4c..7083e1cfb2f 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -530,7 +530,7 @@ check_match(struct ip6t_entry_match *m,
unsigned int hookmask,
unsigned int *i)
{
- struct ip6t_match *match;
+ struct xt_match *match;
int ret;
match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
@@ -564,14 +564,14 @@ err:
return ret;
}
-static struct ip6t_target ip6t_standard_target;
+static struct xt_target ip6t_standard_target;
static inline int
check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
unsigned int *i)
{
struct ip6t_entry_target *t;
- struct ip6t_target *target;
+ struct xt_target *target;
int ret;
unsigned int j;
@@ -1348,13 +1348,13 @@ icmp6_checkentry(const char *tablename,
}
/* The built-in targets: standard (NULL) and error. */
-static struct ip6t_target ip6t_standard_target = {
+static struct xt_target ip6t_standard_target = {
.name = IP6T_STANDARD_TARGET,
.targetsize = sizeof(int),
.family = AF_INET6,
};
-static struct ip6t_target ip6t_error_target = {
+static struct xt_target ip6t_error_target = {
.name = IP6T_ERROR_TARGET,
.target = ip6t_error,
.targetsize = IP6T_FUNCTION_MAXNAMELEN,
@@ -1371,7 +1371,7 @@ static struct nf_sockopt_ops ip6t_sockopts = {
.get = do_ip6t_get_ctl,
};
-static struct ip6t_match icmp6_matchstruct = {
+static struct xt_match icmp6_matchstruct = {
.name = "icmp6",
.match = &icmp6_match,
.matchsize = sizeof(struct ip6t_icmp),
diff --git a/net/ipv6/netfilter/ip6t_HL.c b/net/ipv6/netfilter/ip6t_HL.c
index 435750f664d..04e500172fb 100644
--- a/net/ipv6/netfilter/ip6t_HL.c
+++ b/net/ipv6/netfilter/ip6t_HL.c
@@ -9,12 +9,13 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6t_HL.h>
MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
-MODULE_DESCRIPTION("IP tables Hop Limit modification module");
+MODULE_DESCRIPTION("IP6 tables Hop Limit modification module");
MODULE_LICENSE("GPL");
static unsigned int ip6t_hl_target(struct sk_buff **pskb,
@@ -52,10 +53,9 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
break;
}
- if (new_hl != ip6h->hop_limit)
- ip6h->hop_limit = new_hl;
+ ip6h->hop_limit = new_hl;
- return IP6T_CONTINUE;
+ return XT_CONTINUE;
}
static int ip6t_hl_checkentry(const char *tablename,
@@ -79,8 +79,9 @@ static int ip6t_hl_checkentry(const char *tablename,
return 1;
}
-static struct ip6t_target ip6t_HL = {
+static struct xt_target ip6t_HL = {
.name = "HL",
+ .family = AF_INET6,
.target = ip6t_hl_target,
.targetsize = sizeof(struct ip6t_HL_info),
.table = "mangle",
@@ -90,12 +91,12 @@ static struct ip6t_target ip6t_HL = {
static int __init ip6t_hl_init(void)
{
- return ip6t_register_target(&ip6t_HL);
+ return xt_register_target(&ip6t_HL);
}
static void __exit ip6t_hl_fini(void)
{
- ip6t_unregister_target(&ip6t_HL);
+ xt_unregister_target(&ip6t_HL);
}
module_init(ip6t_hl_init);
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 33b1faa90d7..5587a77b884 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -21,6 +21,7 @@
#include <net/tcp.h>
#include <net/ipv6.h>
#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
@@ -442,7 +443,7 @@ ip6t_log_target(struct sk_buff **pskb,
ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li,
loginfo->prefix);
- return IP6T_CONTINUE;
+ return XT_CONTINUE;
}
@@ -466,8 +467,9 @@ static int ip6t_log_checkentry(const char *tablename,
return 1;
}
-static struct ip6t_target ip6t_log_reg = {
+static struct xt_target ip6t_log_reg = {
.name = "LOG",
+ .family = AF_INET6,
.target = ip6t_log_target,
.targetsize = sizeof(struct ip6t_log_info),
.checkentry = ip6t_log_checkentry,
@@ -482,8 +484,11 @@ static struct nf_logger ip6t_logger = {
static int __init ip6t_log_init(void)
{
- if (ip6t_register_target(&ip6t_log_reg))
- return -EINVAL;
+ int ret;
+
+ ret = xt_register_target(&ip6t_log_reg);
+ if (ret < 0)
+ return ret;
if (nf_log_register(PF_INET6, &ip6t_logger) < 0) {
printk(KERN_WARNING "ip6t_LOG: not logging via system console "
"since somebody else already registered for PF_INET6\n");
@@ -497,7 +502,7 @@ static int __init ip6t_log_init(void)
static void __exit ip6t_log_fini(void)
{
nf_log_unregister_logger(&ip6t_logger);
- ip6t_unregister_target(&ip6t_log_reg);
+ xt_unregister_target(&ip6t_log_reg);
}
module_init(ip6t_log_init);
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 311eae82feb..278349c1879 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -26,6 +26,7 @@
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/flow.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_REJECT.h>
@@ -234,7 +235,7 @@ static int check(const char *tablename,
} else if (rejinfo->with == IP6T_TCP_RESET) {
/* Must specify that it's a TCP packet */
if (e->ipv6.proto != IPPROTO_TCP
- || (e->ipv6.invflags & IP6T_INV_PROTO)) {
+ || (e->ipv6.invflags & XT_INV_PROTO)) {
DEBUGP("ip6t_REJECT: TCP_RESET illegal for non-tcp\n");
return 0;
}
@@ -242,8 +243,9 @@ static int check(const char *tablename,
return 1;
}
-static struct ip6t_target ip6t_reject_reg = {
+static struct xt_target ip6t_reject_reg = {
.name = "REJECT",
+ .family = AF_INET6,
.target = reject6_target,
.targetsize = sizeof(struct ip6t_reject_info),
.table = "filter",
@@ -255,12 +257,12 @@ static struct ip6t_target ip6t_reject_reg = {
static int __init ip6t_reject_init(void)
{
- return ip6t_register_target(&ip6t_reject_reg);
+ return xt_register_target(&ip6t_reject_reg);
}
static void __exit ip6t_reject_fini(void)
{
- ip6t_unregister_target(&ip6t_reject_reg);
+ xt_unregister_target(&ip6t_reject_reg);
}
module_init(ip6t_reject_init);
diff --git a/net/ipv6/netfilter/ip6t_ah.c b/net/ipv6/netfilter/ip6t_ah.c
index 46486645eb7..456c76adcbf 100644
--- a/net/ipv6/netfilter/ip6t_ah.c
+++ b/net/ipv6/netfilter/ip6t_ah.c
@@ -15,6 +15,7 @@
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_ah.h>
@@ -118,8 +119,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ip6t_match ah_match = {
+static struct xt_match ah_match = {
.name = "ah",
+ .family = AF_INET6,
.match = match,
.matchsize = sizeof(struct ip6t_ah),
.checkentry = checkentry,
@@ -128,12 +130,12 @@ static struct ip6t_match ah_match = {
static int __init ip6t_ah_init(void)
{
- return ip6t_register_match(&ah_match);
+ return xt_register_match(&ah_match);
}
static void __exit ip6t_ah_fini(void)
{
- ip6t_unregister_match(&ah_match);
+ xt_unregister_match(&ah_match);
}
module_init(ip6t_ah_init);
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c
index 4f6b84c8f4a..967bed71d4a 100644
--- a/net/ipv6/netfilter/ip6t_eui64.c
+++ b/net/ipv6/netfilter/ip6t_eui64.c
@@ -12,6 +12,7 @@
#include <linux/ipv6.h>
#include <linux/if_ether.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_DESCRIPTION("IPv6 EUI64 address checking match");
@@ -61,8 +62,9 @@ match(const struct sk_buff *skb,
return 0;
}
-static struct ip6t_match eui64_match = {
+static struct xt_match eui64_match = {
.name = "eui64",
+ .family = AF_INET6,
.match = match,
.matchsize = sizeof(int),
.hooks = (1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN) |
@@ -72,12 +74,12 @@ static struct ip6t_match eui64_match = {
static int __init ip6t_eui64_init(void)
{
- return ip6t_register_match(&eui64_match);
+ return xt_register_match(&eui64_match);
}
static void __exit ip6t_eui64_fini(void)
{
- ip6t_unregister_match(&eui64_match);
+ xt_unregister_match(&eui64_match);
}
module_init(ip6t_eui64_init);
diff --git a/net/ipv6/netfilter/ip6t_frag.c b/net/ipv6/netfilter/ip6t_frag.c
index cd22eaaccdc..5a5da71321b 100644
--- a/net/ipv6/netfilter/ip6t_frag.c
+++ b/net/ipv6/netfilter/ip6t_frag.c
@@ -14,6 +14,7 @@
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_frag.h>
@@ -135,8 +136,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ip6t_match frag_match = {
+static struct xt_match frag_match = {
.name = "frag",
+ .family = AF_INET6,
.match = match,
.matchsize = sizeof(struct ip6t_frag),
.checkentry = checkentry,
@@ -145,12 +147,12 @@ static struct ip6t_match frag_match = {
static int __init ip6t_frag_init(void)
{
- return ip6t_register_match(&frag_match);
+ return xt_register_match(&frag_match);
}
static void __exit ip6t_frag_fini(void)
{
- ip6t_unregister_match(&frag_match);
+ xt_unregister_match(&frag_match);
}
module_init(ip6t_frag_init);
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index 3f25babe044..d2373c7cd35 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -16,6 +16,7 @@
#include <asm/byteorder.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_opts.h>
diff --git a/net/ipv6/netfilter/ip6t_hl.c b/net/ipv6/netfilter/ip6t_hl.c
index 44a729e17c4..601cc1211c6 100644
--- a/net/ipv6/netfilter/ip6t_hl.c
+++ b/net/ipv6/netfilter/ip6t_hl.c
@@ -8,11 +8,12 @@
* published by the Free Software Foundation.
*/
+#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_ipv6/ip6t_hl.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Maciej Soltysiak <solt@dns.toxicfilms.tv>");
MODULE_DESCRIPTION("IP tables Hop Limit matching module");
@@ -48,8 +49,9 @@ static int match(const struct sk_buff *skb,
return 0;
}
-static struct ip6t_match hl_match = {
+static struct xt_match hl_match = {
.name = "hl",
+ .family = AF_INET6,
.match = match,
.matchsize = sizeof(struct ip6t_hl_info),
.me = THIS_MODULE,
@@ -57,13 +59,12 @@ static struct ip6t_match hl_match = {
static int __init ip6t_hl_init(void)
{
- return ip6t_register_match(&hl_match);
+ return xt_register_match(&hl_match);
}
static void __exit ip6t_hl_fini(void)
{
- ip6t_unregister_match(&hl_match);
-
+ xt_unregister_match(&hl_match);
}
module_init(ip6t_hl_init);
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 3093c398002..26ac084adef 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -18,6 +18,7 @@
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_ipv6header.h>
@@ -140,8 +141,9 @@ ipv6header_checkentry(const char *tablename,
return 1;
}
-static struct ip6t_match ip6t_ipv6header_match = {
+static struct xt_match ip6t_ipv6header_match = {
.name = "ipv6header",
+ .family = AF_INET6,
.match = &ipv6header_match,
.matchsize = sizeof(struct ip6t_ipv6header_info),
.checkentry = &ipv6header_checkentry,
@@ -151,12 +153,12 @@ static struct ip6t_match ip6t_ipv6header_match = {
static int __init ipv6header_init(void)
{
- return ip6t_register_match(&ip6t_ipv6header_match);
+ return xt_register_match(&ip6t_ipv6header_match);
}
static void __exit ipv6header_exit(void)
{
- ip6t_unregister_match(&ip6t_ipv6header_match);
+ xt_unregister_match(&ip6t_ipv6header_match);
}
module_init(ipv6header_init);
diff --git a/net/ipv6/netfilter/ip6t_mh.c b/net/ipv6/netfilter/ip6t_mh.c
new file mode 100644
index 00000000000..2c7efc6a506
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_mh.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C)2006 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ * Masahide NAKAMURA @USAGI <masahide.nakamura.cz@hitachi.com>
+ *
+ * Based on net/netfilter/xt_tcpudp.c
+ *
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/mip6.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_ipv6/ip6t_mh.h>
+
+MODULE_DESCRIPTION("ip6t_tables match for MH");
+MODULE_LICENSE("GPL");
+
+#ifdef DEBUG_IP_FIREWALL_USER
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+/* Returns 1 if the type is matched by the range, 0 otherwise */
+static inline int
+type_match(u_int8_t min, u_int8_t max, u_int8_t type, int invert)
+{
+ int ret;
+
+ ret = (type >= min && type <= max) ^ invert;
+ return ret;
+}
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct xt_match *match,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ struct ip6_mh _mh, *mh;
+ const struct ip6t_mh *mhinfo = matchinfo;
+
+ /* Must not be a fragment. */
+ if (offset)
+ return 0;
+
+ mh = skb_header_pointer(skb, protoff, sizeof(_mh), &_mh);
+ if (mh == NULL) {
+ /* We've been asked to examine this packet, and we
+ can't. Hence, no choice but to drop. */
+ duprintf("Dropping evil MH tinygram.\n");
+ *hotdrop = 1;
+ return 0;
+ }
+
+ return type_match(mhinfo->types[0], mhinfo->types[1], mh->ip6mh_type,
+ !!(mhinfo->invflags & IP6T_MH_INV_TYPE));
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+mh_checkentry(const char *tablename,
+ const void *entry,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int hook_mask)
+{
+ const struct ip6t_mh *mhinfo = matchinfo;
+
+ /* Must specify no unknown invflags */
+ return !(mhinfo->invflags & ~IP6T_MH_INV_MASK);
+}
+
+static struct xt_match mh_match = {
+ .name = "mh",
+ .family = AF_INET6,
+ .checkentry = mh_checkentry,
+ .match = match,
+ .matchsize = sizeof(struct ip6t_mh),
+ .proto = IPPROTO_MH,
+ .me = THIS_MODULE,
+};
+
+static int __init ip6t_mh_init(void)
+{
+ return xt_register_match(&mh_match);
+}
+
+static void __exit ip6t_mh_fini(void)
+{
+ xt_unregister_match(&mh_match);
+}
+
+module_init(ip6t_mh_init);
+module_exit(ip6t_mh_fini);
diff --git a/net/ipv6/netfilter/ip6t_owner.c b/net/ipv6/netfilter/ip6t_owner.c
index 4eb9bbc4ebc..43738bba00b 100644
--- a/net/ipv6/netfilter/ip6t_owner.c
+++ b/net/ipv6/netfilter/ip6t_owner.c
@@ -16,6 +16,7 @@
#include <linux/netfilter_ipv6/ip6t_owner.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_DESCRIPTION("IP6 tables owner matching module");
@@ -69,8 +70,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ip6t_match owner_match = {
+static struct xt_match owner_match = {
.name = "owner",
+ .family = AF_INET6,
.match = match,
.matchsize = sizeof(struct ip6t_owner_info),
.hooks = (1 << NF_IP6_LOCAL_OUT) | (1 << NF_IP6_POST_ROUTING),
@@ -80,12 +82,12 @@ static struct ip6t_match owner_match = {
static int __init ip6t_owner_init(void)
{
- return ip6t_register_match(&owner_match);
+ return xt_register_match(&owner_match);
}
static void __exit ip6t_owner_fini(void)
{
- ip6t_unregister_match(&owner_match);
+ xt_unregister_match(&owner_match);
}
module_init(ip6t_owner_init);
diff --git a/net/ipv6/netfilter/ip6t_rt.c b/net/ipv6/netfilter/ip6t_rt.c
index 54d7d14134f..81ab00d8c18 100644
--- a/net/ipv6/netfilter/ip6t_rt.c
+++ b/net/ipv6/netfilter/ip6t_rt.c
@@ -16,6 +16,7 @@
#include <asm/byteorder.h>
+#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/netfilter_ipv6/ip6t_rt.h>
@@ -221,8 +222,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ip6t_match rt_match = {
+static struct xt_match rt_match = {
.name = "rt",
+ .family = AF_INET6,
.match = match,
.matchsize = sizeof(struct ip6t_rt),
.checkentry = checkentry,
@@ -231,12 +233,12 @@ static struct ip6t_match rt_match = {
static int __init ip6t_rt_init(void)
{
- return ip6t_register_match(&rt_match);
+ return xt_register_match(&rt_match);
}
static void __exit ip6t_rt_fini(void)
{
- ip6t_unregister_match(&rt_match);
+ xt_unregister_match(&rt_match);
}
module_init(ip6t_rt_init);
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index 2fc07c74dec..112a21d0c6d 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -19,25 +19,6 @@ MODULE_DESCRIPTION("ip6tables filter table");
#define FILTER_VALID_HOOKS ((1 << NF_IP6_LOCAL_IN) | (1 << NF_IP6_FORWARD) | (1 << NF_IP6_LOCAL_OUT))
-/* Standard entry. */
-struct ip6t_standard
-{
- struct ip6t_entry entry;
- struct ip6t_standard_target target;
-};
-
-struct ip6t_error_target
-{
- struct ip6t_entry_target target;
- char errorname[IP6T_FUNCTION_MAXNAMELEN];
-};
-
-struct ip6t_error
-{
- struct ip6t_entry entry;
- struct ip6t_error_target target;
-};
-
static struct
{
struct ip6t_replace repl;
@@ -92,7 +73,7 @@ static struct
}
};
-static struct ip6t_table packet_filter = {
+static struct xt_table packet_filter = {
.name = "filter",
.valid_hooks = FILTER_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index 6250e86a6dd..5f5aa0e5147 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -29,25 +29,6 @@ MODULE_DESCRIPTION("ip6tables mangle table");
#define DEBUGP(x, args...)
#endif
-/* Standard entry. */
-struct ip6t_standard
-{
- struct ip6t_entry entry;
- struct ip6t_standard_target target;
-};
-
-struct ip6t_error_target
-{
- struct ip6t_entry_target target;
- char errorname[IP6T_FUNCTION_MAXNAMELEN];
-};
-
-struct ip6t_error
-{
- struct ip6t_entry entry;
- struct ip6t_error_target target;
-};
-
static struct
{
struct ip6t_replace repl;
@@ -122,7 +103,7 @@ static struct
}
};
-static struct ip6t_table packet_mangler = {
+static struct xt_table packet_mangler = {
.name = "mangle",
.valid_hooks = MANGLE_VALID_HOOKS,
.lock = RW_LOCK_UNLOCKED,
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index b4154da575c..277bf34638b 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -14,25 +14,6 @@
#define DEBUGP(x, args...)
#endif
-/* Standard entry. */
-struct ip6t_standard
-{
- struct ip6t_entry entry;
- struct ip6t_standard_target target;
-};
-
-struct ip6t_error_target
-{
- struct ip6t_entry_target target;
- char errorname[IP6T_FUNCTION_MAXNAMELEN];
-};
-
-struct ip6t_error
-{
- struct ip6t_entry entry;
- struct ip6t_error_target target;
-};
-
static struct
{
struct ip6t_replace repl;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 4ae1b19ada5..c2d8059e754 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -815,7 +815,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
+ if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
goto out;
if (hlimit < 0) {
@@ -1094,10 +1094,19 @@ static void rawv6_close(struct sock *sk, long timeout)
static int rawv6_init_sk(struct sock *sk)
{
- if (inet_sk(sk)->num == IPPROTO_ICMPV6) {
- struct raw6_sock *rp = raw6_sk(sk);
+ struct raw6_sock *rp = raw6_sk(sk);
+
+ switch (inet_sk(sk)->num) {
+ case IPPROTO_ICMPV6:
rp->checksum = 1;
rp->offset = 2;
+ break;
+ case IPPROTO_MH:
+ rp->checksum = 1;
+ rp->offset = 4;
+ break;
+ default:
+ break;
}
return(0);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5f0043c30b7..19c906f6efa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -311,12 +311,21 @@ static inline void rt6_probe(struct rt6_info *rt)
static int inline rt6_check_dev(struct rt6_info *rt, int oif)
{
struct net_device *dev = rt->rt6i_dev;
- if (!oif || dev->ifindex == oif)
+ int ret = 0;
+
+ if (!oif)
return 2;
- if ((dev->flags & IFF_LOOPBACK) &&
- rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
- return 1;
- return 0;
+ if (dev->flags & IFF_LOOPBACK) {
+ if (!WARN_ON(rt->rt6i_idev == NULL) &&
+ rt->rt6i_idev->dev->ifindex == oif)
+ ret = 1;
+ else
+ return 0;
+ }
+ if (dev->ifindex == oif)
+ return 2;
+
+ return ret;
}
static int inline rt6_check_neigh(struct rt6_info *rt)
@@ -2040,7 +2049,7 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtm), flags);
if (nlh == NULL)
- return -ENOBUFS;
+ return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = AF_INET6;
@@ -2111,7 +2120,8 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
return nlmsg_end(skb, nlh);
nla_put_failure:
- return nlmsg_cancel(skb, nlh);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
}
int rt6_dump_route(struct rt6_info *rt, void *p_arg)
@@ -2222,9 +2232,12 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
goto errout;
err = rt6_fill_node(skb, rt, NULL, NULL, 0, event, pid, seq, 0, 0);
- /* failure implies BUG in rt6_nlmsg_size() */
- BUG_ON(err < 0);
-
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
err = rtnl_notify(skb, pid, RTNLGRP_IPV6_ROUTE, nlh, gfp_any());
errout:
if (err < 0)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 77b7b091143..47cfeadac6d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -686,7 +686,8 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
goto done;
dev = t->dev;
}
- err = unregister_netdevice(dev);
+ unregister_netdevice(dev);
+ err = 0;
break;
default:
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index c25e930c2c6..dcb7b00a737 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -265,7 +265,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
+ if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
goto failure;
if (saddr == NULL) {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f52a5c3cc0a..15e5195549c 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -736,7 +736,7 @@ do_udp_sendmsg:
if (final_p)
ipv6_addr_copy(&fl.fl6_dst, final_p);
- if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
+ if ((err = xfrm_lookup(&dst, &fl, sk, 1)) < 0)
goto out;
if (hlimit < 0) {
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index 5e7d8a7d641..0bc866c0d83 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -25,6 +25,12 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
IP6_ECN_set_ce(inner_iph);
}
+static inline void ip6ip_ecn_decapsulate(struct sk_buff *skb)
+{
+ if (INET_ECN_is_ce(ipv6_get_dsfield(skb->nh.ipv6h)))
+ IP_ECN_set_ce(skb->h.ipiph);
+}
+
/* Add encapsulation header.
*
* The top IP header will be constructed per RFC 2401. The following fields
@@ -40,6 +46,7 @@ static inline void ipip6_ecn_decapsulate(struct sk_buff *skb)
static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct dst_entry *dst = skb->dst;
+ struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
struct ipv6hdr *iph, *top_iph;
int dsfield;
@@ -52,16 +59,24 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
skb->h.ipv6h = top_iph + 1;
top_iph->version = 6;
- top_iph->priority = iph->priority;
- top_iph->flow_lbl[0] = iph->flow_lbl[0];
- top_iph->flow_lbl[1] = iph->flow_lbl[1];
- top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ if (xdst->route->ops->family == AF_INET6) {
+ top_iph->priority = iph->priority;
+ top_iph->flow_lbl[0] = iph->flow_lbl[0];
+ top_iph->flow_lbl[1] = iph->flow_lbl[1];
+ top_iph->flow_lbl[2] = iph->flow_lbl[2];
+ top_iph->nexthdr = IPPROTO_IPV6;
+ } else {
+ top_iph->priority = 0;
+ top_iph->flow_lbl[0] = 0;
+ top_iph->flow_lbl[1] = 0;
+ top_iph->flow_lbl[2] = 0;
+ top_iph->nexthdr = IPPROTO_IPIP;
+ }
dsfield = ipv6_get_dsfield(top_iph);
dsfield = INET_ECN_encapsulate(dsfield, dsfield);
if (x->props.flags & XFRM_STATE_NOECN)
dsfield &= ~INET_ECN_MASK;
ipv6_change_dsfield(top_iph, 0, dsfield);
- top_iph->nexthdr = IPPROTO_IPV6;
top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT);
ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
@@ -72,7 +87,8 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
int err = -EINVAL;
- if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6)
+ if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6
+ && skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPIP)
goto out;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto out;
@@ -81,10 +97,16 @@ static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
(err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
goto out;
- if (x->props.flags & XFRM_STATE_DECAP_DSCP)
- ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h);
- if (!(x->props.flags & XFRM_STATE_NOECN))
- ipip6_ecn_decapsulate(skb);
+ if (skb->nh.raw[IP6CB(skb)->nhoff] == IPPROTO_IPV6) {
+ if (x->props.flags & XFRM_STATE_DECAP_DSCP)
+ ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h);
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ ipip6_ecn_decapsulate(skb);
+ } else {
+ if (!(x->props.flags & XFRM_STATE_NOECN))
+ ip6ip_ecn_decapsulate(skb);
+ skb->protocol = htons(ETH_P_IP);
+ }
skb->mac.raw = memmove(skb->data - skb->mac_len,
skb->mac.raw, skb->mac_len);
skb->nh.raw = skb->data;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8dffd4daae9..59480e92177 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -131,13 +131,11 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
struct dst_entry *dst, *dst_prev;
struct rt6_info *rt0 = (struct rt6_info*)(*dst_p);
struct rt6_info *rt = rt0;
- struct in6_addr *remote = &fl->fl6_dst;
- struct in6_addr *local = &fl->fl6_src;
struct flowi fl_tunnel = {
.nl_u = {
.ip6_u = {
- .saddr = *local,
- .daddr = *remote
+ .saddr = fl->fl6_src,
+ .daddr = fl->fl6_dst,
}
}
};
@@ -153,7 +151,6 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
for (i = 0; i < nx; i++) {
struct dst_entry *dst1 = dst_alloc(&xfrm6_dst_ops);
struct xfrm_dst *xdst;
- int tunnel = 0;
if (unlikely(dst1 == NULL)) {
err = -ENOBUFS;
@@ -177,19 +174,27 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
dst1->next = dst_prev;
dst_prev = dst1;
- if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
- remote = __xfrm6_bundle_addr_remote(xfrm[i], remote);
- local = __xfrm6_bundle_addr_local(xfrm[i], local);
- tunnel = 1;
- }
+
__xfrm6_bundle_len_inc(&header_len, &nfheader_len, xfrm[i]);
trailer_len += xfrm[i]->props.trailer_len;
- if (tunnel) {
- ipv6_addr_copy(&fl_tunnel.fl6_dst, remote);
- ipv6_addr_copy(&fl_tunnel.fl6_src, local);
+ if (xfrm[i]->props.mode == XFRM_MODE_TUNNEL) {
+ unsigned short encap_family = xfrm[i]->props.family;
+ switch(encap_family) {
+ case AF_INET:
+ fl_tunnel.fl4_dst = xfrm[i]->id.daddr.a4;
+ fl_tunnel.fl4_src = xfrm[i]->props.saddr.a4;
+ break;
+ case AF_INET6:
+ ipv6_addr_copy(&fl_tunnel.fl6_dst, (struct in6_addr*)&xfrm[i]->id.daddr.a6);
+ ipv6_addr_copy(&fl_tunnel.fl6_src, (struct in6_addr*)&xfrm[i]->props.saddr.a6);
+ break;
+ default:
+ BUG_ON(1);
+ }
+
err = xfrm_dst_lookup((struct xfrm_dst **) &rt,
- &fl_tunnel, AF_INET6);
+ &fl_tunnel, encap_family);
if (err)
goto error;
} else
@@ -208,6 +213,7 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
i = 0;
for (; dst_prev != &rt->u.dst; dst_prev = dst_prev->child) {
struct xfrm_dst *x = (struct xfrm_dst*)dst_prev;
+ struct xfrm_state_afinfo *afinfo;
dst_prev->xfrm = xfrm[i++];
dst_prev->dev = rt->u.dst.dev;
@@ -224,7 +230,17 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int
/* Copy neighbour for reachability confirmation */
dst_prev->neighbour = neigh_clone(rt->u.dst.neighbour);
dst_prev->input = rt->u.dst.input;
- dst_prev->output = xfrm6_output;
+ /* XXX: When IPv4 is implemented as module and can be unloaded,
+ * we should manage reference to xfrm4_output in afinfo->output.
+ * Miyazawa
+ */
+ afinfo = xfrm_state_get_afinfo(dst_prev->xfrm->props.family);
+ if (!afinfo) {
+ dst = *dst_p;
+ goto error;
+ };
+ dst_prev->output = afinfo->output;
+ xfrm_state_put_afinfo(afinfo);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
x->u.rt6.rt6i_flags = rt0->rt6i_flags&(RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL);
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index 9ddaa9d4153..60ad5f074e0 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -171,6 +171,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.init_tempsel = __xfrm6_init_tempsel,
.tmpl_sort = __xfrm6_tmpl_sort,
.state_sort = __xfrm6_state_sort,
+ .output = xfrm6_output,
};
void __init xfrm6_state_init(void)
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 76c661566df..89f283c51df 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -2035,19 +2035,27 @@ static void __exit ipx_proto_finito(void)
ipxitf_cleanup();
- unregister_snap_client(pSNAP_datalink);
- pSNAP_datalink = NULL;
+ if (pSNAP_datalink) {
+ unregister_snap_client(pSNAP_datalink);
+ pSNAP_datalink = NULL;
+ }
- unregister_8022_client(p8022_datalink);
- p8022_datalink = NULL;
+ if (p8022_datalink) {
+ unregister_8022_client(p8022_datalink);
+ p8022_datalink = NULL;
+ }
dev_remove_pack(&ipx_8023_packet_type);
- destroy_8023_client(p8023_datalink);
- p8023_datalink = NULL;
+ if (p8023_datalink) {
+ destroy_8023_client(p8023_datalink);
+ p8023_datalink = NULL;
+ }
dev_remove_pack(&ipx_dix_packet_type);
- destroy_EII_client(pEII_datalink);
- pEII_datalink = NULL;
+ if (pEII_datalink) {
+ destroy_EII_client(pEII_datalink);
+ pEII_datalink = NULL;
+ }
proto_unregister(&ipx_proto);
sock_unregister(ipx_family_ops.family);
diff --git a/net/irda/irias_object.c b/net/irda/irias_object.c
index b1ee99a59c0..2a571b43ebe 100644
--- a/net/irda/irias_object.c
+++ b/net/irda/irias_object.c
@@ -91,6 +91,12 @@ struct ias_object *irias_new_object( char *name, int id)
obj->magic = IAS_OBJECT_MAGIC;
obj->name = strndup(name, IAS_MAX_CLASSNAME);
+ if (!obj->name) {
+ IRDA_WARNING("%s(), Unable to allocate name!\n",
+ __FUNCTION__);
+ kfree(obj);
+ return NULL;
+ }
obj->id = id;
/* Locking notes : the attrib spinlock has lower precendence
@@ -101,6 +107,7 @@ struct ias_object *irias_new_object( char *name, int id)
if (obj->attribs == NULL) {
IRDA_WARNING("%s(), Unable to allocate attribs!\n",
__FUNCTION__);
+ kfree(obj->name);
kfree(obj);
return NULL;
}
@@ -357,6 +364,15 @@ void irias_add_integer_attrib(struct ias_object *obj, char *name, int value,
/* Insert value */
attrib->value = irias_new_integer_value(value);
+ if (!attrib->name || !attrib->value) {
+ IRDA_WARNING("%s: Unable to allocate attribute!\n",
+ __FUNCTION__);
+ if (attrib->value)
+ irias_delete_value(attrib->value);
+ kfree(attrib->name);
+ kfree(attrib);
+ return;
+ }
irias_add_attrib(obj, attrib, owner);
}
@@ -391,6 +407,15 @@ void irias_add_octseq_attrib(struct ias_object *obj, char *name, __u8 *octets,
attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
attrib->value = irias_new_octseq_value( octets, len);
+ if (!attrib->name || !attrib->value) {
+ IRDA_WARNING("%s: Unable to allocate attribute!\n",
+ __FUNCTION__);
+ if (attrib->value)
+ irias_delete_value(attrib->value);
+ kfree(attrib->name);
+ kfree(attrib);
+ return;
+ }
irias_add_attrib(obj, attrib, owner);
}
@@ -424,6 +449,15 @@ void irias_add_string_attrib(struct ias_object *obj, char *name, char *value,
attrib->name = strndup(name, IAS_MAX_ATTRIBNAME);
attrib->value = irias_new_string_value(value);
+ if (!attrib->name || !attrib->value) {
+ IRDA_WARNING("%s: Unable to allocate attribute!\n",
+ __FUNCTION__);
+ if (attrib->value)
+ irias_delete_value(attrib->value);
+ kfree(attrib->name);
+ kfree(attrib);
+ return;
+ }
irias_add_attrib(obj, attrib, owner);
}
@@ -473,6 +507,12 @@ struct ias_value *irias_new_string_value(char *string)
value->type = IAS_STRING;
value->charset = CS_ASCII;
value->t.string = strndup(string, IAS_MAX_STRING);
+ if (!value->t.string) {
+ IRDA_WARNING("%s: Unable to kmalloc!\n", __FUNCTION__);
+ kfree(value);
+ return NULL;
+ }
+
value->len = strlen(value->t.string);
return value;
diff --git a/net/irda/irlan/irlan_common.c b/net/irda/irlan/irlan_common.c
index 2bb04ac0932..310776dd610 100644
--- a/net/irda/irlan/irlan_common.c
+++ b/net/irda/irlan/irlan_common.c
@@ -144,12 +144,18 @@ static int __init irlan_init(void)
/* Register with IrLMP as a client */
ckey = irlmp_register_client(hints, &irlan_client_discovery_indication,
NULL, NULL);
-
+ if (!ckey)
+ goto err_ckey;
+
/* Register with IrLMP as a service */
- skey = irlmp_register_service(hints);
+ skey = irlmp_register_service(hints);
+ if (!skey)
+ goto err_skey;
/* Start the master IrLAN instance (the only one for now) */
- new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY);
+ new = irlan_open(DEV_ADDR_ANY, DEV_ADDR_ANY);
+ if (!new)
+ goto err_open;
/* The master will only open its (listen) control TSAP */
irlan_provider_open_ctrl_tsap(new);
@@ -158,6 +164,17 @@ static int __init irlan_init(void)
irlmp_discovery_request(DISCOVERY_DEFAULT_SLOTS);
return 0;
+
+err_open:
+ irlmp_unregister_service(skey);
+err_skey:
+ irlmp_unregister_client(ckey);
+err_ckey:
+#ifdef CONFIG_PROC_FS
+ remove_proc_entry("irlan", proc_irda);
+#endif /* CONFIG_PROC_FS */
+
+ return -ENOMEM;
}
static void __exit irlan_cleanup(void)
diff --git a/net/iucv/Kconfig b/net/iucv/Kconfig
new file mode 100644
index 00000000000..f8fcc3d1032
--- /dev/null
+++ b/net/iucv/Kconfig
@@ -0,0 +1,15 @@
+config IUCV
+ tristate "IUCV support (VM only)"
+ depends on S390
+ help
+ Select this option if you want to use inter-user communication under
+ VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
+ communication link between VM guests.
+
+config AFIUCV
+ tristate "AF_IUCV support (VM only)"
+ depends on IUCV
+ help
+ Select this option if you want to use inter-user communication under
+ VM or VIF sockets. If you run on z/VM, say "Y" to enable a fast
+ communication link between VM guests.
diff --git a/net/iucv/Makefile b/net/iucv/Makefile
new file mode 100644
index 00000000000..7bfdc853267
--- /dev/null
+++ b/net/iucv/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for IUCV
+#
+
+obj-$(CONFIG_IUCV) += iucv.o
+obj-$(CONFIG_AFIUCV) += af_iucv.o
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
new file mode 100644
index 00000000000..acc94214bde
--- /dev/null
+++ b/net/iucv/af_iucv.c
@@ -0,0 +1,1077 @@
+/*
+ * linux/net/iucv/af_iucv.c
+ *
+ * IUCV protocol stack for Linux on zSeries
+ *
+ * Copyright 2006 IBM Corporation
+ *
+ * Author(s): Jennifer Hunt <jenhunt@us.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <net/sock.h>
+#include <asm/ebcdic.h>
+#include <asm/cpcmd.h>
+#include <linux/kmod.h>
+
+#include <net/iucv/iucv.h>
+#include <net/iucv/af_iucv.h>
+
+#define CONFIG_IUCV_SOCK_DEBUG 1
+
+#define IPRMDATA 0x80
+#define VERSION "1.0"
+
+static char iucv_userid[80];
+
+static struct proto_ops iucv_sock_ops;
+
+static struct proto iucv_proto = {
+ .name = "AF_IUCV",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct iucv_sock),
+};
+
+/* Call Back functions */
+static void iucv_callback_rx(struct iucv_path *, struct iucv_message *);
+static void iucv_callback_txdone(struct iucv_path *, struct iucv_message *);
+static void iucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
+static int iucv_callback_connreq(struct iucv_path *, u8 ipvmid[8], u8 ipuser[16]);
+static void iucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
+
+static struct iucv_sock_list iucv_sk_list = {
+ .lock = RW_LOCK_UNLOCKED,
+ .autobind_name = ATOMIC_INIT(0)
+};
+
+static struct iucv_handler af_iucv_handler = {
+ .path_pending = iucv_callback_connreq,
+ .path_complete = iucv_callback_connack,
+ .path_severed = iucv_callback_connrej,
+ .message_pending = iucv_callback_rx,
+ .message_complete = iucv_callback_txdone
+};
+
+static inline void high_nmcpy(unsigned char *dst, char *src)
+{
+ memcpy(dst, src, 8);
+}
+
+static inline void low_nmcpy(unsigned char *dst, char *src)
+{
+ memcpy(&dst[8], src, 8);
+}
+
+/* Timers */
+static void iucv_sock_timeout(unsigned long arg)
+{
+ struct sock *sk = (struct sock *)arg;
+
+ bh_lock_sock(sk);
+ sk->sk_err = ETIMEDOUT;
+ sk->sk_state_change(sk);
+ bh_unlock_sock(sk);
+
+ iucv_sock_kill(sk);
+ sock_put(sk);
+}
+
+static void iucv_sock_clear_timer(struct sock *sk)
+{
+ sk_stop_timer(sk, &sk->sk_timer);
+}
+
+static void iucv_sock_init_timer(struct sock *sk)
+{
+ init_timer(&sk->sk_timer);
+ sk->sk_timer.function = iucv_sock_timeout;
+ sk->sk_timer.data = (unsigned long)sk;
+}
+
+static struct sock *__iucv_get_sock_by_name(char *nm)
+{
+ struct sock *sk;
+ struct hlist_node *node;
+
+ sk_for_each(sk, node, &iucv_sk_list.head)
+ if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
+ return sk;
+
+ return NULL;
+}
+
+static void iucv_sock_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+}
+
+/* Cleanup Listen */
+static void iucv_sock_cleanup_listen(struct sock *parent)
+{
+ struct sock *sk;
+
+ /* Close non-accepted connections */
+ while ((sk = iucv_accept_dequeue(parent, NULL))) {
+ iucv_sock_close(sk);
+ iucv_sock_kill(sk);
+ }
+
+ parent->sk_state = IUCV_CLOSED;
+ sock_set_flag(parent, SOCK_ZAPPED);
+}
+
+/* Kill socket */
+static void iucv_sock_kill(struct sock *sk)
+{
+ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
+ return;
+
+ iucv_sock_unlink(&iucv_sk_list, sk);
+ sock_set_flag(sk, SOCK_DEAD);
+ sock_put(sk);
+}
+
+/* Close an IUCV socket */
+static void iucv_sock_close(struct sock *sk)
+{
+ unsigned char user_data[16];
+ struct iucv_sock *iucv = iucv_sk(sk);
+ int err;
+
+ iucv_sock_clear_timer(sk);
+ lock_sock(sk);
+
+ switch(sk->sk_state) {
+ case IUCV_LISTEN:
+ iucv_sock_cleanup_listen(sk);
+ break;
+
+ case IUCV_CONNECTED:
+ case IUCV_DISCONN:
+ err = 0;
+ if (iucv->path) {
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+ ASCEBC(user_data, sizeof(user_data));
+ err = iucv_path_sever(iucv->path, user_data);
+ iucv_path_free(iucv->path);
+ iucv->path = NULL;
+ }
+
+ sk->sk_state = IUCV_CLOSED;
+ sk->sk_state_change(sk);
+ sk->sk_err = ECONNRESET;
+ sk->sk_state_change(sk);
+
+ skb_queue_purge(&iucv->send_skb_q);
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+ break;
+
+ default:
+ sock_set_flag(sk, SOCK_ZAPPED);
+ break;
+ };
+
+ release_sock(sk);
+ iucv_sock_kill(sk);
+}
+
+static void iucv_sock_init(struct sock *sk, struct sock *parent)
+{
+ if (parent)
+ sk->sk_type = parent->sk_type;
+}
+
+static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
+{
+ struct sock *sk;
+
+ sk = sk_alloc(PF_IUCV, prio, &iucv_proto, 1);
+ if (!sk)
+ return NULL;
+
+ sock_init_data(sock, sk);
+ INIT_LIST_HEAD(&iucv_sk(sk)->accept_q);
+ skb_queue_head_init(&iucv_sk(sk)->send_skb_q);
+ iucv_sk(sk)->send_tag = 0;
+
+ sk->sk_destruct = iucv_sock_destruct;
+ sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
+ sk->sk_allocation = GFP_DMA;
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+ sk->sk_protocol = proto;
+ sk->sk_state = IUCV_OPEN;
+
+ iucv_sock_init_timer(sk);
+
+ iucv_sock_link(&iucv_sk_list, sk);
+ return sk;
+}
+
+/* Create an IUCV socket */
+static int iucv_sock_create(struct socket *sock, int protocol)
+{
+ struct sock *sk;
+
+ if (sock->type != SOCK_STREAM)
+ return -ESOCKTNOSUPPORT;
+
+ sock->state = SS_UNCONNECTED;
+ sock->ops = &iucv_sock_ops;
+
+ sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL);
+ if (!sk)
+ return -ENOMEM;
+
+ iucv_sock_init(sk, NULL);
+
+ return 0;
+}
+
+void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
+{
+ write_lock_bh(&l->lock);
+ sk_add_node(sk, &l->head);
+ write_unlock_bh(&l->lock);
+}
+
+void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
+{
+ write_lock_bh(&l->lock);
+ sk_del_node_init(sk);
+ write_unlock_bh(&l->lock);
+}
+
+void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
+{
+ sock_hold(sk);
+ list_add_tail(&iucv_sk(sk)->accept_q, &iucv_sk(parent)->accept_q);
+ iucv_sk(sk)->parent = parent;
+ parent->sk_ack_backlog++;
+}
+
+void iucv_accept_unlink(struct sock *sk)
+{
+ list_del_init(&iucv_sk(sk)->accept_q);
+ iucv_sk(sk)->parent->sk_ack_backlog--;
+ iucv_sk(sk)->parent = NULL;
+ sock_put(sk);
+}
+
+struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock)
+{
+ struct iucv_sock *isk, *n;
+ struct sock *sk;
+
+ list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
+ sk = (struct sock *) isk;
+ lock_sock(sk);
+
+ if (sk->sk_state == IUCV_CLOSED) {
+ release_sock(sk);
+ iucv_accept_unlink(sk);
+ continue;
+ }
+
+ if (sk->sk_state == IUCV_CONNECTED ||
+ sk->sk_state == IUCV_SEVERED ||
+ !newsock) {
+ iucv_accept_unlink(sk);
+ if (newsock)
+ sock_graft(sk, newsock);
+
+ if (sk->sk_state == IUCV_SEVERED)
+ sk->sk_state = IUCV_DISCONN;
+
+ release_sock(sk);
+ return sk;
+ }
+
+ release_sock(sk);
+ }
+ return NULL;
+}
+
+int iucv_sock_wait_state(struct sock *sk, int state, int state2,
+ unsigned long timeo)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int err = 0;
+
+ add_wait_queue(sk->sk_sleep, &wait);
+ while (sk->sk_state != state && sk->sk_state != state2) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+
+ err = sock_error(sk);
+ if (err)
+ break;
+ }
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk->sk_sleep, &wait);
+ return err;
+}
+
+/* Bind an unbound socket */
+static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv;
+ int err;
+
+ /* Verify the input sockaddr */
+ if (!addr || addr->sa_family != AF_IUCV)
+ return -EINVAL;
+
+ lock_sock(sk);
+ if (sk->sk_state != IUCV_OPEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ write_lock_bh(&iucv_sk_list.lock);
+
+ iucv = iucv_sk(sk);
+ if (__iucv_get_sock_by_name(sa->siucv_name)) {
+ err = -EADDRINUSE;
+ goto done_unlock;
+ }
+ if (iucv->path) {
+ err = 0;
+ goto done_unlock;
+ }
+
+ /* Bind the socket */
+ memcpy(iucv->src_name, sa->siucv_name, 8);
+
+ /* Copy the user id */
+ memcpy(iucv->src_user_id, iucv_userid, 8);
+ sk->sk_state = IUCV_BOUND;
+ err = 0;
+
+done_unlock:
+ /* Release the socket list lock */
+ write_unlock_bh(&iucv_sk_list.lock);
+done:
+ release_sock(sk);
+ return err;
+}
+
+/* Automatically bind an unbound socket */
+static int iucv_sock_autobind(struct sock *sk)
+{
+ struct iucv_sock *iucv = iucv_sk(sk);
+ char query_buffer[80];
+ char name[12];
+ int err = 0;
+
+ /* Set the userid and name */
+ cpcmd("QUERY USERID", query_buffer, sizeof(query_buffer), &err);
+ if (unlikely(err))
+ return -EPROTO;
+
+ memcpy(iucv->src_user_id, query_buffer, 8);
+
+ write_lock_bh(&iucv_sk_list.lock);
+
+ sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
+ while (__iucv_get_sock_by_name(name)) {
+ sprintf(name, "%08x",
+ atomic_inc_return(&iucv_sk_list.autobind_name));
+ }
+
+ write_unlock_bh(&iucv_sk_list.lock);
+
+ memcpy(&iucv->src_name, name, 8);
+
+ return err;
+}
+
+/* Connect an unconnected socket */
+static int iucv_sock_connect(struct socket *sock, struct sockaddr *addr,
+ int alen, int flags)
+{
+ struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv;
+ unsigned char user_data[16];
+ int err;
+
+ if (addr->sa_family != AF_IUCV || alen < sizeof(struct sockaddr_iucv))
+ return -EINVAL;
+
+ if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
+ return -EBADFD;
+
+ if (sk->sk_type != SOCK_STREAM)
+ return -EINVAL;
+
+ iucv = iucv_sk(sk);
+
+ if (sk->sk_state == IUCV_OPEN) {
+ err = iucv_sock_autobind(sk);
+ if (unlikely(err))
+ return err;
+ }
+
+ lock_sock(sk);
+
+ /* Set the destination information */
+ memcpy(iucv_sk(sk)->dst_user_id, sa->siucv_user_id, 8);
+ memcpy(iucv_sk(sk)->dst_name, sa->siucv_name, 8);
+
+ high_nmcpy(user_data, sa->siucv_name);
+ low_nmcpy(user_data, iucv_sk(sk)->src_name);
+ ASCEBC(user_data, sizeof(user_data));
+
+ iucv = iucv_sk(sk);
+ /* Create path. */
+ iucv->path = iucv_path_alloc(IUCV_QUEUELEN_DEFAULT,
+ IPRMDATA, GFP_KERNEL);
+ err = iucv_path_connect(iucv->path, &af_iucv_handler,
+ sa->siucv_user_id, NULL, user_data, sk);
+ if (err) {
+ iucv_path_free(iucv->path);
+ iucv->path = NULL;
+ err = -ECONNREFUSED;
+ goto done;
+ }
+
+ if (sk->sk_state != IUCV_CONNECTED) {
+ err = iucv_sock_wait_state(sk, IUCV_CONNECTED, IUCV_DISCONN,
+ sock_sndtimeo(sk, flags & O_NONBLOCK));
+ }
+
+ if (sk->sk_state == IUCV_DISCONN) {
+ release_sock(sk);
+ return -ECONNREFUSED;
+ }
+done:
+ release_sock(sk);
+ return err;
+}
+
+/* Move a socket into listening state. */
+static int iucv_sock_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ lock_sock(sk);
+
+ err = -EINVAL;
+ if (sk->sk_state != IUCV_BOUND || sock->type != SOCK_STREAM)
+ goto done;
+
+ sk->sk_max_ack_backlog = backlog;
+ sk->sk_ack_backlog = 0;
+ sk->sk_state = IUCV_LISTEN;
+ err = 0;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+/* Accept a pending connection */
+static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
+ int flags)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sock *sk = sock->sk, *nsk;
+ long timeo;
+ int err = 0;
+
+ lock_sock(sk);
+
+ if (sk->sk_state != IUCV_LISTEN) {
+ err = -EBADFD;
+ goto done;
+ }
+
+ timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+
+ /* Wait for an incoming connection */
+ add_wait_queue_exclusive(sk->sk_sleep, &wait);
+ while (!(nsk = iucv_accept_dequeue(sk, newsock))){
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (!timeo) {
+ err = -EAGAIN;
+ break;
+ }
+
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+
+ if (sk->sk_state != IUCV_LISTEN) {
+ err = -EBADFD;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ break;
+ }
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(sk->sk_sleep, &wait);
+
+ if (err)
+ goto done;
+
+ newsock->state = SS_CONNECTED;
+
+done:
+ release_sock(sk);
+ return err;
+}
+
+static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr,
+ int *len, int peer)
+{
+ struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr;
+ struct sock *sk = sock->sk;
+
+ addr->sa_family = AF_IUCV;
+ *len = sizeof(struct sockaddr_iucv);
+
+ if (peer) {
+ memcpy(siucv->siucv_user_id, iucv_sk(sk)->dst_user_id, 8);
+ memcpy(siucv->siucv_name, &iucv_sk(sk)->dst_name, 8);
+ } else {
+ memcpy(siucv->siucv_user_id, iucv_sk(sk)->src_user_id, 8);
+ memcpy(siucv->siucv_name, iucv_sk(sk)->src_name, 8);
+ }
+ memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port));
+ memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr));
+ memset(siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid));
+
+ return 0;
+}
+
+static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct sk_buff *skb;
+ struct iucv_message txmsg;
+ int err;
+
+ err = sock_error(sk);
+ if (err)
+ return err;
+
+ if (msg->msg_flags & MSG_OOB)
+ return -EOPNOTSUPP;
+
+ lock_sock(sk);
+
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ err = -EPIPE;
+ goto out;
+ }
+
+ if (sk->sk_state == IUCV_CONNECTED){
+ if(!(skb = sock_alloc_send_skb(sk, len,
+ msg->msg_flags & MSG_DONTWAIT,
+ &err)))
+ return err;
+
+ if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)){
+ err = -EFAULT;
+ goto fail;
+ }
+
+ txmsg.class = 0;
+ txmsg.tag = iucv->send_tag++;
+ memcpy(skb->cb, &txmsg.tag, 4);
+ skb_queue_tail(&iucv->send_skb_q, skb);
+ err = iucv_message_send(iucv->path, &txmsg, 0, 0,
+ (void *) skb->data, skb->len);
+ if (err) {
+ if (err == 3)
+ printk(KERN_ERR "AF_IUCV msg limit exceeded\n");
+ skb_unlink(skb, &iucv->send_skb_q);
+ err = -EPIPE;
+ goto fail;
+ }
+
+ } else {
+ err = -ENOTCONN;
+ goto out;
+ }
+
+ release_sock(sk);
+ return len;
+
+fail:
+ kfree_skb(skb);
+out:
+ release_sock(sk);
+ return err;
+}
+
+static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t len, int flags)
+{
+ int noblock = flags & MSG_DONTWAIT;
+ struct sock *sk = sock->sk;
+ int target, copied = 0;
+ struct sk_buff *skb;
+ int err = 0;
+
+ if (flags & (MSG_OOB))
+ return -EOPNOTSUPP;
+
+ target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb) {
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return 0;
+ return err;
+ }
+
+ copied = min_t(unsigned int, skb->len, len);
+
+ if (memcpy_toiovec(msg->msg_iov, skb->data, copied)) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ if (copied == 0)
+ return -EFAULT;
+ }
+
+ len -= copied;
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ skb_pull(skb, copied);
+
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ goto done;
+ }
+
+ kfree_skb(skb);
+ } else
+ skb_queue_head(&sk->sk_receive_queue, skb);
+
+done:
+ return err ? : copied;
+}
+
+static inline unsigned int iucv_accept_poll(struct sock *parent)
+{
+ struct iucv_sock *isk, *n;
+ struct sock *sk;
+
+ list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q){
+ sk = (struct sock *) isk;
+
+ if (sk->sk_state == IUCV_CONNECTED)
+ return POLLIN | POLLRDNORM;
+ }
+
+ return 0;
+}
+
+unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ unsigned int mask = 0;
+
+ poll_wait(file, sk->sk_sleep, wait);
+
+ if (sk->sk_state == IUCV_LISTEN)
+ return iucv_accept_poll(sk);
+
+ if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+ mask |= POLLERR;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
+
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sk->sk_state == IUCV_CLOSED)
+ mask |= POLLHUP;
+
+ if (sock_writeable(sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+ else
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+
+ return mask;
+}
+
+static int iucv_sock_shutdown(struct socket *sock, int how)
+{
+ struct sock *sk = sock->sk;
+ struct iucv_sock *iucv = iucv_sk(sk);
+ struct iucv_message txmsg;
+ int err = 0;
+ u8 prmmsg[8] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
+
+ how++;
+
+ if ((how & ~SHUTDOWN_MASK) || !how)
+ return -EINVAL;
+
+ lock_sock(sk);
+ switch(sk->sk_state) {
+ case IUCV_CLOSED:
+ err = -ENOTCONN;
+ goto fail;
+
+ default:
+ sk->sk_shutdown |= how;
+ break;
+ }
+
+ if (how == SEND_SHUTDOWN || how == SHUTDOWN_MASK) {
+ txmsg.class = 0;
+ txmsg.tag = 0;
+ err = iucv_message_send(iucv->path, &txmsg, IUCV_IPRMDATA, 0,
+ (void *) prmmsg, 8);
+ if (err) {
+ switch(err) {
+ case 1:
+ err = -ENOTCONN;
+ break;
+ case 2:
+ err = -ECONNRESET;
+ break;
+ default:
+ err = -ENOTCONN;
+ break;
+ }
+ }
+ }
+
+ if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
+ err = iucv_path_quiesce(iucv_sk(sk)->path, NULL);
+ if (err)
+ err = -ENOTCONN;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ }
+
+ /* Wake up anyone sleeping in poll */
+ sk->sk_state_change(sk);
+
+fail:
+ release_sock(sk);
+ return err;
+}
+
+static int iucv_sock_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ if (!sk)
+ return 0;
+
+ iucv_sock_close(sk);
+
+ /* Unregister with IUCV base support */
+ if (iucv_sk(sk)->path) {
+ iucv_path_sever(iucv_sk(sk)->path, NULL);
+ iucv_path_free(iucv_sk(sk)->path);
+ iucv_sk(sk)->path = NULL;
+ }
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime){
+ lock_sock(sk);
+ err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0,
+ sk->sk_lingertime);
+ release_sock(sk);
+ }
+
+ sock_orphan(sk);
+ iucv_sock_kill(sk);
+ return err;
+}
+
+/* Callback wrappers - called from iucv base support */
+static int iucv_callback_connreq(struct iucv_path *path,
+ u8 ipvmid[8], u8 ipuser[16])
+{
+ unsigned char user_data[16];
+ unsigned char nuser_data[16];
+ unsigned char src_name[8];
+ struct hlist_node *node;
+ struct sock *sk, *nsk;
+ struct iucv_sock *iucv, *niucv;
+ int err;
+
+ memcpy(src_name, ipuser, 8);
+ EBCASC(src_name, 8);
+ /* Find out if this path belongs to af_iucv. */
+ read_lock(&iucv_sk_list.lock);
+ iucv = NULL;
+ sk_for_each(sk, node, &iucv_sk_list.head)
+ if (sk->sk_state == IUCV_LISTEN &&
+ !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
+ /*
+ * Found a listening socket with
+ * src_name == ipuser[0-7].
+ */
+ iucv = iucv_sk(sk);
+ break;
+ }
+ read_unlock(&iucv_sk_list.lock);
+ if (!iucv)
+ /* No socket found, not one of our paths. */
+ return -EINVAL;
+
+ bh_lock_sock(sk);
+
+ /* Check if parent socket is listening */
+ low_nmcpy(user_data, iucv->src_name);
+ high_nmcpy(user_data, iucv->dst_name);
+ ASCEBC(user_data, sizeof(user_data));
+ if (sk->sk_state != IUCV_LISTEN) {
+ err = iucv_path_sever(path, user_data);
+ goto fail;
+ }
+
+ /* Check for backlog size */
+ if (sk_acceptq_is_full(sk)) {
+ err = iucv_path_sever(path, user_data);
+ goto fail;
+ }
+
+ /* Create the new socket */
+ nsk = iucv_sock_alloc(NULL, SOCK_STREAM, GFP_ATOMIC);
+ if (!nsk){
+ err = iucv_path_sever(path, user_data);
+ goto fail;
+ }
+
+ niucv = iucv_sk(nsk);
+ iucv_sock_init(nsk, sk);
+
+ /* Set the new iucv_sock */
+ memcpy(niucv->dst_name, ipuser + 8, 8);
+ EBCASC(niucv->dst_name, 8);
+ memcpy(niucv->dst_user_id, ipvmid, 8);
+ memcpy(niucv->src_name, iucv->src_name, 8);
+ memcpy(niucv->src_user_id, iucv->src_user_id, 8);
+ niucv->path = path;
+
+ /* Call iucv_accept */
+ high_nmcpy(nuser_data, ipuser + 8);
+ memcpy(nuser_data + 8, niucv->src_name, 8);
+ ASCEBC(nuser_data + 8, 8);
+
+ path->msglim = IUCV_QUEUELEN_DEFAULT;
+ err = iucv_path_accept(path, &af_iucv_handler, nuser_data, nsk);
+ if (err){
+ err = iucv_path_sever(path, user_data);
+ goto fail;
+ }
+
+ iucv_accept_enqueue(sk, nsk);
+
+ /* Wake up accept */
+ nsk->sk_state = IUCV_CONNECTED;
+ sk->sk_data_ready(sk, 1);
+ err = 0;
+fail:
+ bh_unlock_sock(sk);
+ return 0;
+}
+
+static void iucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
+{
+ struct sock *sk = path->private;
+
+ sk->sk_state = IUCV_CONNECTED;
+ sk->sk_state_change(sk);
+}
+
+static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
+{
+ struct sock *sk = path->private;
+ struct sk_buff *skb;
+ int rc;
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return;
+
+ skb = alloc_skb(msg->length, GFP_ATOMIC | GFP_DMA);
+ if (!skb) {
+ iucv_message_reject(path, msg);
+ return;
+ }
+
+ if (msg->flags & IPRMDATA) {
+ skb->data = NULL;
+ skb->len = 0;
+ } else {
+ rc = iucv_message_receive(path, msg, 0, skb->data,
+ msg->length, NULL);
+ if (rc) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb->h.raw = skb->data;
+ skb->nh.raw = skb->data;
+ skb->len = msg->length;
+ }
+
+ if (sock_queue_rcv_skb(sk, skb))
+ kfree_skb(skb);
+}
+
+static void iucv_callback_txdone(struct iucv_path *path,
+ struct iucv_message *msg)
+{
+ struct sock *sk = path->private;
+ struct sk_buff *this;
+ struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
+ struct sk_buff *list_skb = list->next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&list->lock, flags);
+
+ do {
+ this = list_skb;
+ list_skb = list_skb->next;
+ } while (memcmp(&msg->tag, this->cb, 4));
+
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ skb_unlink(this, &iucv_sk(sk)->send_skb_q);
+ kfree_skb(this);
+}
+
+static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
+{
+ struct sock *sk = path->private;
+
+ if (!list_empty(&iucv_sk(sk)->accept_q))
+ sk->sk_state = IUCV_SEVERED;
+ else
+ sk->sk_state = IUCV_DISCONN;
+
+ sk->sk_state_change(sk);
+}
+
+static struct proto_ops iucv_sock_ops = {
+ .family = PF_IUCV,
+ .owner = THIS_MODULE,
+ .release = iucv_sock_release,
+ .bind = iucv_sock_bind,
+ .connect = iucv_sock_connect,
+ .listen = iucv_sock_listen,
+ .accept = iucv_sock_accept,
+ .getname = iucv_sock_getname,
+ .sendmsg = iucv_sock_sendmsg,
+ .recvmsg = iucv_sock_recvmsg,
+ .poll = iucv_sock_poll,
+ .ioctl = sock_no_ioctl,
+ .mmap = sock_no_mmap,
+ .socketpair = sock_no_socketpair,
+ .shutdown = iucv_sock_shutdown,
+ .setsockopt = sock_no_setsockopt,
+ .getsockopt = sock_no_getsockopt
+};
+
+static struct net_proto_family iucv_sock_family_ops = {
+ .family = AF_IUCV,
+ .owner = THIS_MODULE,
+ .create = iucv_sock_create,
+};
+
+static int afiucv_init(void)
+{
+ int err;
+
+ if (!MACHINE_IS_VM) {
+ printk(KERN_ERR "AF_IUCV connection needs VM as base\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+ cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err);
+ if (unlikely(err)) {
+ printk(KERN_ERR "AF_IUCV needs the VM userid\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ err = iucv_register(&af_iucv_handler, 0);
+ if (err)
+ goto out;
+ err = proto_register(&iucv_proto, 0);
+ if (err)
+ goto out_iucv;
+ err = sock_register(&iucv_sock_family_ops);
+ if (err)
+ goto out_proto;
+ printk(KERN_INFO "AF_IUCV lowlevel driver initialized\n");
+ return 0;
+
+out_proto:
+ proto_unregister(&iucv_proto);
+out_iucv:
+ iucv_unregister(&af_iucv_handler, 0);
+out:
+ return err;
+}
+
+static void __exit afiucv_exit(void)
+{
+ sock_unregister(PF_IUCV);
+ proto_unregister(&iucv_proto);
+ iucv_unregister(&af_iucv_handler, 0);
+
+ printk(KERN_INFO "AF_IUCV lowlevel driver unloaded\n");
+}
+
+module_init(afiucv_init);
+module_exit(afiucv_exit);
+
+MODULE_AUTHOR("Jennifer Hunt <jenhunt@us.ibm.com>");
+MODULE_DESCRIPTION("IUCV Sockets ver " VERSION);
+MODULE_VERSION(VERSION);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_IUCV);
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
new file mode 100644
index 00000000000..1b10d576f22
--- /dev/null
+++ b/net/iucv/iucv.c
@@ -0,0 +1,1619 @@
+/*
+ * IUCV base infrastructure.
+ *
+ * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s):
+ * Original source:
+ * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
+ * Xenia Tkatschow (xenia@us.ibm.com)
+ * 2Gb awareness and general cleanup:
+ * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
+ * Rewritten for af_iucv:
+ * Martin Schwidefsky <schwidefsky@de.ibm.com>
+ *
+ * Documentation used:
+ * The original source
+ * CP Programming Service, IBM document # SC24-5760
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/spinlock.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <net/iucv/iucv.h>
+#include <asm/atomic.h>
+#include <asm/ebcdic.h>
+#include <asm/io.h>
+#include <asm/s390_ext.h>
+#include <asm/s390_rdev.h>
+#include <asm/smp.h>
+
+/*
+ * FLAGS:
+ * All flags are defined in the field IPFLAGS1 of each function
+ * and can be found in CP Programming Services.
+ * IPSRCCLS - Indicates you have specified a source class.
+ * IPTRGCLS - Indicates you have specified a target class.
+ * IPFGPID - Indicates you have specified a pathid.
+ * IPFGMID - Indicates you have specified a message ID.
+ * IPNORPY - Indicates a one-way message. No reply expected.
+ * IPALL - Indicates that all paths are affected.
+ */
+#define IUCV_IPSRCCLS 0x01
+#define IUCV_IPTRGCLS 0x01
+#define IUCV_IPFGPID 0x02
+#define IUCV_IPFGMID 0x04
+#define IUCV_IPNORPY 0x10
+#define IUCV_IPALL 0x80
+
+static int iucv_bus_match (struct device *dev, struct device_driver *drv)
+{
+ return 0;
+}
+
+struct bus_type iucv_bus = {
+ .name = "iucv",
+ .match = iucv_bus_match,
+};
+
+struct device *iucv_root;
+static int iucv_available;
+
+/* General IUCV interrupt structure */
+struct iucv_irq_data {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iptype;
+ u32 res2[8];
+};
+
+struct iucv_work {
+ struct list_head list;
+ struct iucv_irq_data data;
+};
+
+static LIST_HEAD(iucv_work_queue);
+static DEFINE_SPINLOCK(iucv_work_lock);
+
+static struct iucv_irq_data *iucv_irq_data;
+static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE;
+static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE;
+
+static void iucv_tasklet_handler(unsigned long);
+static DECLARE_TASKLET(iucv_tasklet, iucv_tasklet_handler,0);
+
+enum iucv_command_codes {
+ IUCV_QUERY = 0,
+ IUCV_RETRIEVE_BUFFER = 2,
+ IUCV_SEND = 4,
+ IUCV_RECEIVE = 5,
+ IUCV_REPLY = 6,
+ IUCV_REJECT = 8,
+ IUCV_PURGE = 9,
+ IUCV_ACCEPT = 10,
+ IUCV_CONNECT = 11,
+ IUCV_DECLARE_BUFFER = 12,
+ IUCV_QUIESCE = 13,
+ IUCV_RESUME = 14,
+ IUCV_SEVER = 15,
+ IUCV_SETMASK = 16,
+};
+
+/*
+ * Error messages that are used with the iucv_sever function. They get
+ * converted to EBCDIC.
+ */
+static char iucv_error_no_listener[16] = "NO LISTENER";
+static char iucv_error_no_memory[16] = "NO MEMORY";
+static char iucv_error_pathid[16] = "INVALID PATHID";
+
+/*
+ * iucv_handler_list: List of registered handlers.
+ */
+static LIST_HEAD(iucv_handler_list);
+
+/*
+ * iucv_path_table: an array of iucv_path structures.
+ */
+static struct iucv_path **iucv_path_table;
+static unsigned long iucv_max_pathid;
+
+/*
+ * iucv_lock: spinlock protecting iucv_handler_list and iucv_pathid_table
+ */
+static DEFINE_SPINLOCK(iucv_table_lock);
+
+/*
+ * iucv_tasklet_cpu: contains the number of the cpu executing the tasklet.
+ * Needed for iucv_path_sever called from tasklet.
+ */
+static int iucv_tasklet_cpu = -1;
+
+/*
+ * Mutex and wait queue for iucv_register/iucv_unregister.
+ */
+static DEFINE_MUTEX(iucv_register_mutex);
+
+/*
+ * Counter for number of non-smp capable handlers.
+ */
+static int iucv_nonsmp_handler;
+
+/*
+ * IUCV control data structure. Used by iucv_path_accept, iucv_path_connect,
+ * iucv_path_quiesce and iucv_path_sever.
+ */
+struct iucv_cmd_control {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iprcode;
+ u16 ipmsglim;
+ u16 res1;
+ u8 ipvmid[8];
+ u8 ipuser[16];
+ u8 iptarget[8];
+} __attribute__ ((packed,aligned(8)));
+
+/*
+ * Data in parameter list iucv structure. Used by iucv_message_send,
+ * iucv_message_send2way and iucv_message_reply.
+ */
+struct iucv_cmd_dpl {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iprcode;
+ u32 ipmsgid;
+ u32 iptrgcls;
+ u8 iprmmsg[8];
+ u32 ipsrccls;
+ u32 ipmsgtag;
+ u32 ipbfadr2;
+ u32 ipbfln2f;
+ u32 res;
+} __attribute__ ((packed,aligned(8)));
+
+/*
+ * Data in buffer iucv structure. Used by iucv_message_receive,
+ * iucv_message_reject, iucv_message_send, iucv_message_send2way
+ * and iucv_declare_cpu.
+ */
+struct iucv_cmd_db {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iprcode;
+ u32 ipmsgid;
+ u32 iptrgcls;
+ u32 ipbfadr1;
+ u32 ipbfln1f;
+ u32 ipsrccls;
+ u32 ipmsgtag;
+ u32 ipbfadr2;
+ u32 ipbfln2f;
+ u32 res;
+} __attribute__ ((packed,aligned(8)));
+
+/*
+ * Purge message iucv structure. Used by iucv_message_purge.
+ */
+struct iucv_cmd_purge {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iprcode;
+ u32 ipmsgid;
+ u8 ipaudit[3];
+ u8 res1[5];
+ u32 res2;
+ u32 ipsrccls;
+ u32 ipmsgtag;
+ u32 res3[3];
+} __attribute__ ((packed,aligned(8)));
+
+/*
+ * Set mask iucv structure. Used by iucv_enable_cpu.
+ */
+struct iucv_cmd_set_mask {
+ u8 ipmask;
+ u8 res1[2];
+ u8 iprcode;
+ u32 res2[9];
+} __attribute__ ((packed,aligned(8)));
+
+union iucv_param {
+ struct iucv_cmd_control ctrl;
+ struct iucv_cmd_dpl dpl;
+ struct iucv_cmd_db db;
+ struct iucv_cmd_purge purge;
+ struct iucv_cmd_set_mask set_mask;
+};
+
+/*
+ * Anchor for per-cpu IUCV command parameter block.
+ */
+static union iucv_param *iucv_param;
+
+/**
+ * iucv_call_b2f0
+ * @code: identifier of IUCV call to CP.
+ * @parm: pointer to a struct iucv_parm block
+ *
+ * Calls CP to execute IUCV commands.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+static inline int iucv_call_b2f0(int command, union iucv_param *parm)
+{
+ register unsigned long reg0 asm ("0");
+ register unsigned long reg1 asm ("1");
+ int ccode;
+
+ reg0 = command;
+ reg1 = virt_to_phys(parm);
+ asm volatile(
+ " .long 0xb2f01000\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode), "=m" (*parm), "+d" (reg0), "+a" (reg1)
+ : "m" (*parm) : "cc");
+ return (ccode == 1) ? parm->ctrl.iprcode : ccode;
+}
+
+/**
+ * iucv_query_maxconn
+ *
+ * Determines the maximum number of connections that may be established.
+ *
+ * Returns the maximum number of connections or -EPERM is IUCV is not
+ * available.
+ */
+static int iucv_query_maxconn(void)
+{
+ register unsigned long reg0 asm ("0");
+ register unsigned long reg1 asm ("1");
+ void *param;
+ int ccode;
+
+ param = kzalloc(sizeof(union iucv_param), GFP_KERNEL|GFP_DMA);
+ if (!param)
+ return -ENOMEM;
+ reg0 = IUCV_QUERY;
+ reg1 = (unsigned long) param;
+ asm volatile (
+ " .long 0xb2f01000\n"
+ " ipm %0\n"
+ " srl %0,28\n"
+ : "=d" (ccode), "+d" (reg0), "+d" (reg1) : : "cc");
+ if (ccode == 0)
+ iucv_max_pathid = reg0;
+ kfree(param);
+ return ccode ? -EPERM : 0;
+}
+
+/**
+ * iucv_allow_cpu
+ * @data: unused
+ *
+ * Allow iucv interrupts on this cpu.
+ */
+static void iucv_allow_cpu(void *data)
+{
+ int cpu = smp_processor_id();
+ union iucv_param *parm;
+
+ /*
+ * Enable all iucv interrupts.
+ * ipmask contains bits for the different interrupts
+ * 0x80 - Flag to allow nonpriority message pending interrupts
+ * 0x40 - Flag to allow priority message pending interrupts
+ * 0x20 - Flag to allow nonpriority message completion interrupts
+ * 0x10 - Flag to allow priority message completion interrupts
+ * 0x08 - Flag to allow IUCV control interrupts
+ */
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->set_mask.ipmask = 0xf8;
+ iucv_call_b2f0(IUCV_SETMASK, parm);
+
+ /* Set indication that iucv interrupts are allowed for this cpu. */
+ cpu_set(cpu, iucv_irq_cpumask);
+}
+
+/**
+ * iucv_block_cpu
+ * @data: unused
+ *
+ * Block iucv interrupts on this cpu.
+ */
+static void iucv_block_cpu(void *data)
+{
+ int cpu = smp_processor_id();
+ union iucv_param *parm;
+
+ /* Disable all iucv interrupts. */
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ iucv_call_b2f0(IUCV_SETMASK, parm);
+
+ /* Clear indication that iucv interrupts are allowed for this cpu. */
+ cpu_clear(cpu, iucv_irq_cpumask);
+}
+
+/**
+ * iucv_declare_cpu
+ * @data: unused
+ *
+ * Declare a interupt buffer on this cpu.
+ */
+static void iucv_declare_cpu(void *data)
+{
+ int cpu = smp_processor_id();
+ union iucv_param *parm;
+ int rc;
+
+ if (cpu_isset(cpu, iucv_buffer_cpumask))
+ return;
+
+ /* Declare interrupt buffer. */
+ parm = percpu_ptr(iucv_param, cpu);
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu));
+ rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm);
+ if (rc) {
+ char *err = "Unknown";
+ switch(rc) {
+ case 0x03:
+ err = "Directory error";
+ break;
+ case 0x0a:
+ err = "Invalid length";
+ break;
+ case 0x13:
+ err = "Buffer already exists";
+ break;
+ case 0x3e:
+ err = "Buffer overlap";
+ break;
+ case 0x5c:
+ err = "Paging or storage error";
+ break;
+ }
+ printk(KERN_WARNING "iucv_register: iucv_declare_buffer "
+ "on cpu %i returned error 0x%02x (%s)\n", cpu, rc, err);
+ return;
+ }
+
+ /* Set indication that an iucv buffer exists for this cpu. */
+ cpu_set(cpu, iucv_buffer_cpumask);
+
+ if (iucv_nonsmp_handler == 0 || cpus_empty(iucv_irq_cpumask))
+ /* Enable iucv interrupts on this cpu. */
+ iucv_allow_cpu(NULL);
+ else
+ /* Disable iucv interrupts on this cpu. */
+ iucv_block_cpu(NULL);
+}
+
+/**
+ * iucv_retrieve_cpu
+ * @data: unused
+ *
+ * Retrieve interrupt buffer on this cpu.
+ */
+static void iucv_retrieve_cpu(void *data)
+{
+ int cpu = smp_processor_id();
+ union iucv_param *parm;
+
+ if (!cpu_isset(cpu, iucv_buffer_cpumask))
+ return;
+
+ /* Block iucv interrupts. */
+ iucv_block_cpu(NULL);
+
+ /* Retrieve interrupt buffer. */
+ parm = percpu_ptr(iucv_param, cpu);
+ iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm);
+
+ /* Clear indication that an iucv buffer exists for this cpu. */
+ cpu_clear(cpu, iucv_buffer_cpumask);
+}
+
+/**
+ * iucv_setmask_smp
+ *
+ * Allow iucv interrupts on all cpus.
+ */
+static void iucv_setmask_mp(void)
+{
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ /* Enable all cpus with a declared buffer. */
+ if (cpu_isset(cpu, iucv_buffer_cpumask) &&
+ !cpu_isset(cpu, iucv_irq_cpumask))
+ smp_call_function_on(iucv_allow_cpu, NULL, 0, 1, cpu);
+}
+
+/**
+ * iucv_setmask_up
+ *
+ * Allow iucv interrupts on a single cpus.
+ */
+static void iucv_setmask_up(void)
+{
+ cpumask_t cpumask;
+ int cpu;
+
+ /* Disable all cpu but the first in cpu_irq_cpumask. */
+ cpumask = iucv_irq_cpumask;
+ cpu_clear(first_cpu(iucv_irq_cpumask), cpumask);
+ for_each_cpu_mask(cpu, cpumask)
+ smp_call_function_on(iucv_block_cpu, NULL, 0, 1, cpu);
+}
+
+/**
+ * iucv_enable
+ *
+ * This function makes iucv ready for use. It allocates the pathid
+ * table, declares an iucv interrupt buffer and enables the iucv
+ * interrupts. Called when the first user has registered an iucv
+ * handler.
+ */
+static int iucv_enable(void)
+{
+ size_t alloc_size;
+ int cpu, rc;
+
+ rc = -ENOMEM;
+ alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+ iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
+ if (!iucv_path_table)
+ goto out;
+ /* Declare per cpu buffers. */
+ rc = -EIO;
+ for_each_online_cpu(cpu)
+ smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ if (cpus_empty(iucv_buffer_cpumask))
+ /* No cpu could declare an iucv buffer. */
+ goto out_path;
+ return 0;
+
+out_path:
+ kfree(iucv_path_table);
+out:
+ return rc;
+}
+
+/**
+ * iucv_disable
+ *
+ * This function shuts down iucv. It disables iucv interrupts, retrieves
+ * the iucv interrupt buffer and frees the pathid table. Called after the
+ * last user unregister its iucv handler.
+ */
+static void iucv_disable(void)
+{
+ on_each_cpu(iucv_retrieve_cpu, NULL, 0, 1);
+ kfree(iucv_path_table);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ cpumask_t cpumask;
+ long cpu = (long) hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (!percpu_populate(iucv_irq_data,
+ sizeof(struct iucv_irq_data),
+ GFP_KERNEL|GFP_DMA, cpu))
+ return NOTIFY_BAD;
+ if (!percpu_populate(iucv_param, sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA, cpu)) {
+ percpu_depopulate(iucv_irq_data, cpu);
+ return NOTIFY_BAD;
+ }
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DEAD:
+ percpu_depopulate(iucv_param, cpu);
+ percpu_depopulate(iucv_irq_data, cpu);
+ break;
+ case CPU_ONLINE:
+ case CPU_DOWN_FAILED:
+ smp_call_function_on(iucv_declare_cpu, NULL, 0, 1, cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ cpumask = iucv_buffer_cpumask;
+ cpu_clear(cpu, cpumask);
+ if (cpus_empty(cpumask))
+ /* Can't offline last IUCV enabled cpu. */
+ return NOTIFY_BAD;
+ smp_call_function_on(iucv_retrieve_cpu, NULL, 0, 1, cpu);
+ if (cpus_empty(iucv_irq_cpumask))
+ smp_call_function_on(iucv_allow_cpu, NULL, 0, 1,
+ first_cpu(iucv_buffer_cpumask));
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block iucv_cpu_notifier = {
+ .notifier_call = iucv_cpu_notify,
+};
+#endif
+
+/**
+ * iucv_sever_pathid
+ * @pathid: path identification number.
+ * @userdata: 16-bytes of user data.
+ *
+ * Sever an iucv path to free up the pathid. Used internally.
+ */
+static int iucv_sever_pathid(u16 pathid, u8 userdata[16])
+{
+ union iucv_param *parm;
+
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ if (userdata)
+ memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
+ parm->ctrl.ippathid = pathid;
+ return iucv_call_b2f0(IUCV_SEVER, parm);
+}
+
+/**
+ * __iucv_cleanup_pathid
+ * @dummy: unused dummy argument
+ *
+ * Nop function called via smp_call_function to force work items from
+ * pending external iucv interrupts to the work queue.
+ */
+static void __iucv_cleanup_pathid(void *dummy)
+{
+}
+
+/**
+ * iucv_cleanup_pathid
+ * @pathid: 16 bit pathid
+ *
+ * Function called after a path has been severed to find all remaining
+ * work items for the now stale pathid. The caller needs to hold the
+ * iucv_table_lock.
+ */
+static void iucv_cleanup_pathid(u16 pathid)
+{
+ struct iucv_work *p, *n;
+
+ /*
+ * Path is severed, the pathid can be reused immediatly on
+ * a iucv connect or a connection pending interrupt.
+ * iucv_path_connect and connection pending interrupt will
+ * wait until the iucv_table_lock is released before the
+ * recycled pathid enters the system.
+ * Force remaining interrupts to the work queue, then
+ * scan the work queue for items of this path.
+ */
+ smp_call_function(__iucv_cleanup_pathid, NULL, 0, 1);
+ spin_lock_irq(&iucv_work_lock);
+ list_for_each_entry_safe(p, n, &iucv_work_queue, list) {
+ /* Remove work items for pathid except connection pending */
+ if (p->data.ippathid == pathid && p->data.iptype != 0x01) {
+ list_del(&p->list);
+ kfree(p);
+ }
+ }
+ spin_unlock_irq(&iucv_work_lock);
+}
+
+/**
+ * iucv_register:
+ * @handler: address of iucv handler structure
+ * @smp: != 0 indicates that the handler can deal with out of order messages
+ *
+ * Registers a driver with IUCV.
+ *
+ * Returns 0 on success, -ENOMEM if the memory allocation for the pathid
+ * table failed, or -EIO if IUCV_DECLARE_BUFFER failed on all cpus.
+ */
+int iucv_register(struct iucv_handler *handler, int smp)
+{
+ int rc;
+
+ if (!iucv_available)
+ return -ENOSYS;
+ mutex_lock(&iucv_register_mutex);
+ if (!smp)
+ iucv_nonsmp_handler++;
+ if (list_empty(&iucv_handler_list)) {
+ rc = iucv_enable();
+ if (rc)
+ goto out_mutex;
+ } else if (!smp && iucv_nonsmp_handler == 1)
+ iucv_setmask_up();
+ INIT_LIST_HEAD(&handler->paths);
+
+ spin_lock_irq(&iucv_table_lock);
+ list_add_tail(&handler->list, &iucv_handler_list);
+ spin_unlock_irq(&iucv_table_lock);
+ rc = 0;
+out_mutex:
+ mutex_unlock(&iucv_register_mutex);
+ return rc;
+}
+
+/**
+ * iucv_unregister
+ * @handler: address of iucv handler structure
+ * @smp: != 0 indicates that the handler can deal with out of order messages
+ *
+ * Unregister driver from IUCV.
+ */
+void iucv_unregister(struct iucv_handler *handler, int smp)
+{
+ struct iucv_path *p, *n;
+
+ mutex_lock(&iucv_register_mutex);
+ spin_lock_bh(&iucv_table_lock);
+ /* Remove handler from the iucv_handler_list. */
+ list_del_init(&handler->list);
+ /* Sever all pathids still refering to the handler. */
+ list_for_each_entry_safe(p, n, &handler->paths, list) {
+ iucv_sever_pathid(p->pathid, NULL);
+ iucv_path_table[p->pathid] = NULL;
+ list_del(&p->list);
+ iucv_cleanup_pathid(p->pathid);
+ iucv_path_free(p);
+ }
+ spin_unlock_bh(&iucv_table_lock);
+ if (!smp)
+ iucv_nonsmp_handler--;
+ if (list_empty(&iucv_handler_list))
+ iucv_disable();
+ else if (!smp && iucv_nonsmp_handler == 0)
+ iucv_setmask_mp();
+ mutex_unlock(&iucv_register_mutex);
+}
+
+/**
+ * iucv_path_accept
+ * @path: address of iucv path structure
+ * @handler: address of iucv handler structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ * @private: private data passed to interrupt handlers for this path
+ *
+ * This function is issued after the user received a connection pending
+ * external interrupt and now wishes to complete the IUCV communication path.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userdata[16], void *private)
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ /* Prepare parameter block. */
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->ctrl.ippathid = path->pathid;
+ parm->ctrl.ipmsglim = path->msglim;
+ if (userdata)
+ memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
+ parm->ctrl.ipflags1 = path->flags;
+
+ rc = iucv_call_b2f0(IUCV_ACCEPT, parm);
+ if (!rc) {
+ path->private = private;
+ path->msglim = parm->ctrl.ipmsglim;
+ path->flags = parm->ctrl.ipflags1;
+ }
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_path_connect
+ * @path: address of iucv path structure
+ * @handler: address of iucv handler structure
+ * @userid: 8-byte user identification
+ * @system: 8-byte target system identification
+ * @userdata: 16 bytes of data reflected to the communication partner
+ * @private: private data passed to interrupt handlers for this path
+ *
+ * This function establishes an IUCV path. Although the connect may complete
+ * successfully, you are not able to use the path until you receive an IUCV
+ * Connection Complete external interrupt.
+ *
+ * Returns the result of the CP IUCV call.
+ */
+int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler,
+ u8 userid[8], u8 system[8], u8 userdata[16],
+ void *private)
+{
+ union iucv_param *parm;
+ int rc;
+
+ preempt_disable();
+ if (iucv_tasklet_cpu != smp_processor_id())
+ spin_lock_bh(&iucv_table_lock);
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->ctrl.ipmsglim = path->msglim;
+ parm->ctrl.ipflags1 = path->flags;
+ if (userid) {
+ memcpy(parm->ctrl.ipvmid, userid, sizeof(parm->ctrl.ipvmid));
+ ASCEBC(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
+ EBC_TOUPPER(parm->ctrl.ipvmid, sizeof(parm->ctrl.ipvmid));
+ }
+ if (system) {
+ memcpy(parm->ctrl.iptarget, system,
+ sizeof(parm->ctrl.iptarget));
+ ASCEBC(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
+ EBC_TOUPPER(parm->ctrl.iptarget, sizeof(parm->ctrl.iptarget));
+ }
+ if (userdata)
+ memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
+
+ rc = iucv_call_b2f0(IUCV_CONNECT, parm);
+ if (!rc) {
+ if (parm->ctrl.ippathid < iucv_max_pathid) {
+ path->pathid = parm->ctrl.ippathid;
+ path->msglim = parm->ctrl.ipmsglim;
+ path->flags = parm->ctrl.ipflags1;
+ path->handler = handler;
+ path->private = private;
+ list_add_tail(&path->list, &handler->paths);
+ iucv_path_table[path->pathid] = path;
+ } else {
+ iucv_sever_pathid(parm->ctrl.ippathid,
+ iucv_error_pathid);
+ rc = -EIO;
+ }
+ }
+ if (iucv_tasklet_cpu != smp_processor_id())
+ spin_unlock_bh(&iucv_table_lock);
+ preempt_enable();
+ return rc;
+}
+
+/**
+ * iucv_path_quiesce:
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function temporarily suspends incoming messages on an IUCV path.
+ * You can later reactivate the path by invoking the iucv_resume function.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16])
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ if (userdata)
+ memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
+ parm->ctrl.ippathid = path->pathid;
+ rc = iucv_call_b2f0(IUCV_QUIESCE, parm);
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_path_resume:
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function resumes incoming messages on an IUCV path that has
+ * been stopped with iucv_path_quiesce.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_resume(struct iucv_path *path, u8 userdata[16])
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ if (userdata)
+ memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser));
+ parm->ctrl.ippathid = path->pathid;
+ rc = iucv_call_b2f0(IUCV_RESUME, parm);
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_path_sever
+ * @path: address of iucv path structure
+ * @userdata: 16 bytes of data reflected to the communication partner
+ *
+ * This function terminates an IUCV path.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_path_sever(struct iucv_path *path, u8 userdata[16])
+{
+ int rc;
+
+
+ preempt_disable();
+ if (iucv_tasklet_cpu != smp_processor_id())
+ spin_lock_bh(&iucv_table_lock);
+ rc = iucv_sever_pathid(path->pathid, userdata);
+ if (!rc) {
+ iucv_path_table[path->pathid] = NULL;
+ list_del_init(&path->list);
+ iucv_cleanup_pathid(path->pathid);
+ }
+ if (iucv_tasklet_cpu != smp_processor_id())
+ spin_unlock_bh(&iucv_table_lock);
+ preempt_enable();
+ return rc;
+}
+
+/**
+ * iucv_message_purge
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @srccls: source class of message
+ *
+ * Cancels a message you have sent.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg,
+ u32 srccls)
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->purge.ippathid = path->pathid;
+ parm->purge.ipmsgid = msg->id;
+ parm->purge.ipsrccls = srccls;
+ parm->purge.ipflags1 = IUCV_IPSRCCLS | IUCV_IPFGMID | IUCV_IPFGPID;
+ rc = iucv_call_b2f0(IUCV_PURGE, parm);
+ if (!rc) {
+ msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8;
+ msg->tag = parm->purge.ipmsgtag;
+ }
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_message_receive
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is received (IUCV_IPBUFLST)
+ * @buffer: address of data buffer or address of struct iucv_array
+ * @size: length of data buffer
+ * @residual:
+ *
+ * This function receives messages that are being sent to you over
+ * established paths. This function will deal with RMDATA messages
+ * embedded in struct iucv_message as well.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *buffer, size_t size, size_t *residual)
+{
+ union iucv_param *parm;
+ struct iucv_array *array;
+ u8 *rmmsg;
+ size_t copy;
+ int rc;
+
+ if (msg->flags & IUCV_IPRMDATA) {
+ /*
+ * Message is 8 bytes long and has been stored to the
+ * message descriptor itself.
+ */
+ rc = (size < 8) ? 5 : 0;
+ if (residual)
+ *residual = abs(size - 8);
+ rmmsg = msg->rmmsg;
+ if (flags & IUCV_IPBUFLST) {
+ /* Copy to struct iucv_array. */
+ size = (size < 8) ? size : 8;
+ for (array = buffer; size > 0; array++) {
+ copy = min_t(size_t, size, array->length);
+ memcpy((u8 *)(addr_t) array->address,
+ rmmsg, copy);
+ rmmsg += copy;
+ size -= copy;
+ }
+ } else {
+ /* Copy to direct buffer. */
+ memcpy(buffer, rmmsg, min_t(size_t, size, 8));
+ }
+ return 0;
+ }
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfln1f = (u32) size;
+ parm->db.ipmsgid = msg->id;
+ parm->db.ippathid = path->pathid;
+ parm->db.iptrgcls = msg->class;
+ parm->db.ipflags1 = (flags | IUCV_IPFGPID |
+ IUCV_IPFGMID | IUCV_IPTRGCLS);
+ rc = iucv_call_b2f0(IUCV_RECEIVE, parm);
+ if (!rc || rc == 5) {
+ msg->flags = parm->db.ipflags1;
+ if (residual)
+ *residual = parm->db.ipbfln1f;
+ }
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_message_reject
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ *
+ * The reject function refuses a specified message. Between the time you
+ * are notified of a message and the time that you complete the message,
+ * the message may be rejected.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg)
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ parm->db.ippathid = path->pathid;
+ parm->db.ipmsgid = msg->id;
+ parm->db.iptrgcls = msg->class;
+ parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID);
+ rc = iucv_call_b2f0(IUCV_REJECT, parm);
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_message_reply
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @reply: address of reply data buffer or address of struct iucv_array
+ * @size: length of reply data buffer
+ *
+ * This function responds to the two-way messages that you receive. You
+ * must identify completely the message to which you wish to reply. ie,
+ * pathid, msgid, and trgcls. Prmmsg signifies the data is moved into
+ * the parameter list.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, void *reply, size_t size)
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ if (flags & IUCV_IPRMDATA) {
+ parm->dpl.ippathid = path->pathid;
+ parm->dpl.ipflags1 = flags;
+ parm->dpl.ipmsgid = msg->id;
+ parm->dpl.iptrgcls = msg->class;
+ memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8));
+ } else {
+ parm->db.ipbfadr1 = (u32)(addr_t) reply;
+ parm->db.ipbfln1f = (u32) size;
+ parm->db.ippathid = path->pathid;
+ parm->db.ipflags1 = flags;
+ parm->db.ipmsgid = msg->id;
+ parm->db.iptrgcls = msg->class;
+ }
+ rc = iucv_call_b2f0(IUCV_REPLY, parm);
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_message_send
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
+ * @srccls: source class of message
+ * @buffer: address of send buffer or address of struct iucv_array
+ * @size: length of send buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer and this is a one-way message and the
+ * receiver will not reply to the message.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size)
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ if (flags & IUCV_IPRMDATA) {
+ /* Message of 8 bytes can be placed into the parameter list. */
+ parm->dpl.ippathid = path->pathid;
+ parm->dpl.ipflags1 = flags | IUCV_IPNORPY;
+ parm->dpl.iptrgcls = msg->class;
+ parm->dpl.ipsrccls = srccls;
+ parm->dpl.ipmsgtag = msg->tag;
+ memcpy(parm->dpl.iprmmsg, buffer, 8);
+ } else {
+ parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfln1f = (u32) size;
+ parm->db.ippathid = path->pathid;
+ parm->db.ipflags1 = flags | IUCV_IPNORPY;
+ parm->db.iptrgcls = msg->class;
+ parm->db.ipsrccls = srccls;
+ parm->db.ipmsgtag = msg->tag;
+ }
+ rc = iucv_call_b2f0(IUCV_SEND, parm);
+ if (!rc)
+ msg->id = parm->db.ipmsgid;
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_message_send2way
+ * @path: address of iucv path structure
+ * @msg: address of iucv msg structure
+ * @flags: how the message is sent and the reply is received
+ * (IUCV_IPRMDATA, IUCV_IPBUFLST, IUCV_IPPRTY, IUCV_ANSLST)
+ * @srccls: source class of message
+ * @buffer: address of send buffer or address of struct iucv_array
+ * @size: length of send buffer
+ * @ansbuf: address of answer buffer or address of struct iucv_array
+ * @asize: size of reply buffer
+ *
+ * This function transmits data to another application. Data to be
+ * transmitted is in a buffer. The receiver of the send is expected to
+ * reply to the message and a buffer is provided into which IUCV moves
+ * the reply to this message.
+ *
+ * Returns the result from the CP IUCV call.
+ */
+int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg,
+ u8 flags, u32 srccls, void *buffer, size_t size,
+ void *answer, size_t asize, size_t *residual)
+{
+ union iucv_param *parm;
+ int rc;
+
+ local_bh_disable();
+ parm = percpu_ptr(iucv_param, smp_processor_id());
+ memset(parm, 0, sizeof(union iucv_param));
+ if (flags & IUCV_IPRMDATA) {
+ parm->dpl.ippathid = path->pathid;
+ parm->dpl.ipflags1 = path->flags; /* priority message */
+ parm->dpl.iptrgcls = msg->class;
+ parm->dpl.ipsrccls = srccls;
+ parm->dpl.ipmsgtag = msg->tag;
+ parm->dpl.ipbfadr2 = (u32)(addr_t) answer;
+ parm->dpl.ipbfln2f = (u32) asize;
+ memcpy(parm->dpl.iprmmsg, buffer, 8);
+ } else {
+ parm->db.ippathid = path->pathid;
+ parm->db.ipflags1 = path->flags; /* priority message */
+ parm->db.iptrgcls = msg->class;
+ parm->db.ipsrccls = srccls;
+ parm->db.ipmsgtag = msg->tag;
+ parm->db.ipbfadr1 = (u32)(addr_t) buffer;
+ parm->db.ipbfln1f = (u32) size;
+ parm->db.ipbfadr2 = (u32)(addr_t) answer;
+ parm->db.ipbfln2f = (u32) asize;
+ }
+ rc = iucv_call_b2f0(IUCV_SEND, parm);
+ if (!rc)
+ msg->id = parm->db.ipmsgid;
+ local_bh_enable();
+ return rc;
+}
+
+/**
+ * iucv_path_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_path_pending {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iptype;
+ u16 ipmsglim;
+ u16 res1;
+ u8 ipvmid[8];
+ u8 ipuser[16];
+ u32 res3;
+ u8 ippollfg;
+ u8 res4[3];
+} __attribute__ ((packed));
+
+static void iucv_path_pending(struct iucv_irq_data *data)
+{
+ struct iucv_path_pending *ipp = (void *) data;
+ struct iucv_handler *handler;
+ struct iucv_path *path;
+ char *error;
+
+ BUG_ON(iucv_path_table[ipp->ippathid]);
+ /* New pathid, handler found. Create a new path struct. */
+ error = iucv_error_no_memory;
+ path = iucv_path_alloc(ipp->ipmsglim, ipp->ipflags1, GFP_ATOMIC);
+ if (!path)
+ goto out_sever;
+ path->pathid = ipp->ippathid;
+ iucv_path_table[path->pathid] = path;
+ EBCASC(ipp->ipvmid, 8);
+
+ /* Call registered handler until one is found that wants the path. */
+ list_for_each_entry(handler, &iucv_handler_list, list) {
+ if (!handler->path_pending)
+ continue;
+ /*
+ * Add path to handler to allow a call to iucv_path_sever
+ * inside the path_pending function. If the handler returns
+ * an error remove the path from the handler again.
+ */
+ list_add(&path->list, &handler->paths);
+ path->handler = handler;
+ if (!handler->path_pending(path, ipp->ipvmid, ipp->ipuser))
+ return;
+ list_del(&path->list);
+ path->handler = NULL;
+ }
+ /* No handler wanted the path. */
+ iucv_path_table[path->pathid] = NULL;
+ iucv_path_free(path);
+ error = iucv_error_no_listener;
+out_sever:
+ iucv_sever_pathid(ipp->ippathid, error);
+}
+
+/**
+ * iucv_path_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_path_complete {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iptype;
+ u16 ipmsglim;
+ u16 res1;
+ u8 res2[8];
+ u8 ipuser[16];
+ u32 res3;
+ u8 ippollfg;
+ u8 res4[3];
+} __attribute__ ((packed));
+
+static void iucv_path_complete(struct iucv_irq_data *data)
+{
+ struct iucv_path_complete *ipc = (void *) data;
+ struct iucv_path *path = iucv_path_table[ipc->ippathid];
+
+ BUG_ON(!path || !path->handler);
+ if (path->handler->path_complete)
+ path->handler->path_complete(path, ipc->ipuser);
+}
+
+/**
+ * iucv_path_severed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection severed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_path_severed {
+ u16 ippathid;
+ u8 res1;
+ u8 iptype;
+ u32 res2;
+ u8 res3[8];
+ u8 ipuser[16];
+ u32 res4;
+ u8 ippollfg;
+ u8 res5[3];
+} __attribute__ ((packed));
+
+static void iucv_path_severed(struct iucv_irq_data *data)
+{
+ struct iucv_path_severed *ips = (void *) data;
+ struct iucv_path *path = iucv_path_table[ips->ippathid];
+
+ BUG_ON(!path || !path->handler);
+ if (path->handler->path_severed)
+ path->handler->path_severed(path, ips->ipuser);
+ else {
+ iucv_sever_pathid(path->pathid, NULL);
+ iucv_path_table[path->pathid] = NULL;
+ list_del_init(&path->list);
+ iucv_cleanup_pathid(path->pathid);
+ iucv_path_free(path);
+ }
+}
+
+/**
+ * iucv_path_quiesced
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection quiesced work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_path_quiesced {
+ u16 ippathid;
+ u8 res1;
+ u8 iptype;
+ u32 res2;
+ u8 res3[8];
+ u8 ipuser[16];
+ u32 res4;
+ u8 ippollfg;
+ u8 res5[3];
+} __attribute__ ((packed));
+
+static void iucv_path_quiesced(struct iucv_irq_data *data)
+{
+ struct iucv_path_quiesced *ipq = (void *) data;
+ struct iucv_path *path = iucv_path_table[ipq->ippathid];
+
+ BUG_ON(!path || !path->handler);
+ if (path->handler->path_quiesced)
+ path->handler->path_quiesced(path, ipq->ipuser);
+}
+
+/**
+ * iucv_path_resumed
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process connection resumed work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_path_resumed {
+ u16 ippathid;
+ u8 res1;
+ u8 iptype;
+ u32 res2;
+ u8 res3[8];
+ u8 ipuser[16];
+ u32 res4;
+ u8 ippollfg;
+ u8 res5[3];
+} __attribute__ ((packed));
+
+static void iucv_path_resumed(struct iucv_irq_data *data)
+{
+ struct iucv_path_resumed *ipr = (void *) data;
+ struct iucv_path *path = iucv_path_table[ipr->ippathid];
+
+ BUG_ON(!path || !path->handler);
+ if (path->handler->path_resumed)
+ path->handler->path_resumed(path, ipr->ipuser);
+}
+
+/**
+ * iucv_message_complete
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message complete work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_message_complete {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iptype;
+ u32 ipmsgid;
+ u32 ipaudit;
+ u8 iprmmsg[8];
+ u32 ipsrccls;
+ u32 ipmsgtag;
+ u32 res;
+ u32 ipbfln2f;
+ u8 ippollfg;
+ u8 res2[3];
+} __attribute__ ((packed));
+
+static void iucv_message_complete(struct iucv_irq_data *data)
+{
+ struct iucv_message_complete *imc = (void *) data;
+ struct iucv_path *path = iucv_path_table[imc->ippathid];
+ struct iucv_message msg;
+
+ BUG_ON(!path || !path->handler);
+ if (path->handler->message_complete) {
+ msg.flags = imc->ipflags1;
+ msg.id = imc->ipmsgid;
+ msg.audit = imc->ipaudit;
+ memcpy(msg.rmmsg, imc->iprmmsg, 8);
+ msg.class = imc->ipsrccls;
+ msg.tag = imc->ipmsgtag;
+ msg.length = imc->ipbfln2f;
+ path->handler->message_complete(path, &msg);
+ }
+}
+
+/**
+ * iucv_message_pending
+ * @data: Pointer to external interrupt buffer
+ *
+ * Process message pending work item. Called from tasklet while holding
+ * iucv_table_lock.
+ */
+struct iucv_message_pending {
+ u16 ippathid;
+ u8 ipflags1;
+ u8 iptype;
+ u32 ipmsgid;
+ u32 iptrgcls;
+ union {
+ u32 iprmmsg1_u32;
+ u8 iprmmsg1[4];
+ } ln1msg1;
+ union {
+ u32 ipbfln1f;
+ u8 iprmmsg2[4];
+ } ln1msg2;
+ u32 res1[3];
+ u32 ipbfln2f;
+ u8 ippollfg;
+ u8 res2[3];
+} __attribute__ ((packed));
+
+static void iucv_message_pending(struct iucv_irq_data *data)
+{
+ struct iucv_message_pending *imp = (void *) data;
+ struct iucv_path *path = iucv_path_table[imp->ippathid];
+ struct iucv_message msg;
+
+ BUG_ON(!path || !path->handler);
+ if (path->handler->message_pending) {
+ msg.flags = imp->ipflags1;
+ msg.id = imp->ipmsgid;
+ msg.class = imp->iptrgcls;
+ if (imp->ipflags1 & IUCV_IPRMDATA) {
+ memcpy(msg.rmmsg, imp->ln1msg1.iprmmsg1, 8);
+ msg.length = 8;
+ } else
+ msg.length = imp->ln1msg2.ipbfln1f;
+ msg.reply_size = imp->ipbfln2f;
+ path->handler->message_pending(path, &msg);
+ }
+}
+
+/**
+ * iucv_tasklet_handler:
+ *
+ * This tasklet loops over the queue of irq buffers created by
+ * iucv_external_interrupt, calls the appropriate action handler
+ * and then frees the buffer.
+ */
+static void iucv_tasklet_handler(unsigned long ignored)
+{
+ typedef void iucv_irq_fn(struct iucv_irq_data *);
+ static iucv_irq_fn *irq_fn[] = {
+ [0x01] = iucv_path_pending,
+ [0x02] = iucv_path_complete,
+ [0x03] = iucv_path_severed,
+ [0x04] = iucv_path_quiesced,
+ [0x05] = iucv_path_resumed,
+ [0x06] = iucv_message_complete,
+ [0x07] = iucv_message_complete,
+ [0x08] = iucv_message_pending,
+ [0x09] = iucv_message_pending,
+ };
+ struct iucv_work *p;
+
+ /* Serialize tasklet, iucv_path_sever and iucv_path_connect. */
+ spin_lock(&iucv_table_lock);
+ iucv_tasklet_cpu = smp_processor_id();
+
+ spin_lock_irq(&iucv_work_lock);
+ while (!list_empty(&iucv_work_queue)) {
+ p = list_entry(iucv_work_queue.next, struct iucv_work, list);
+ list_del_init(&p->list);
+ spin_unlock_irq(&iucv_work_lock);
+ irq_fn[p->data.iptype](&p->data);
+ kfree(p);
+ spin_lock_irq(&iucv_work_lock);
+ }
+ spin_unlock_irq(&iucv_work_lock);
+
+ iucv_tasklet_cpu = -1;
+ spin_unlock(&iucv_table_lock);
+}
+
+/**
+ * iucv_external_interrupt
+ * @code: irq code
+ *
+ * Handles external interrupts coming in from CP.
+ * Places the interrupt buffer on a queue and schedules iucv_tasklet_handler().
+ */
+static void iucv_external_interrupt(u16 code)
+{
+ struct iucv_irq_data *p;
+ struct iucv_work *work;
+
+ p = percpu_ptr(iucv_irq_data, smp_processor_id());
+ if (p->ippathid >= iucv_max_pathid) {
+ printk(KERN_WARNING "iucv_do_int: Got interrupt with "
+ "pathid %d > max_connections (%ld)\n",
+ p->ippathid, iucv_max_pathid - 1);
+ iucv_sever_pathid(p->ippathid, iucv_error_no_listener);
+ return;
+ }
+ if (p->iptype < 0x01 || p->iptype > 0x09) {
+ printk(KERN_ERR "iucv_do_int: unknown iucv interrupt\n");
+ return;
+ }
+ work = kmalloc(sizeof(struct iucv_work), GFP_ATOMIC);
+ if (!work) {
+ printk(KERN_WARNING "iucv_external_interrupt: out of memory\n");
+ return;
+ }
+ memcpy(&work->data, p, sizeof(work->data));
+ spin_lock(&iucv_work_lock);
+ list_add_tail(&work->list, &iucv_work_queue);
+ spin_unlock(&iucv_work_lock);
+ tasklet_schedule(&iucv_tasklet);
+}
+
+/**
+ * iucv_init
+ *
+ * Allocates and initializes various data structures.
+ */
+static int iucv_init(void)
+{
+ int rc;
+
+ if (!MACHINE_IS_VM) {
+ rc = -EPROTONOSUPPORT;
+ goto out;
+ }
+ rc = iucv_query_maxconn();
+ if (rc)
+ goto out;
+ rc = register_external_interrupt (0x4000, iucv_external_interrupt);
+ if (rc)
+ goto out;
+ rc = bus_register(&iucv_bus);
+ if (rc)
+ goto out_int;
+ iucv_root = s390_root_dev_register("iucv");
+ if (IS_ERR(iucv_root)) {
+ rc = PTR_ERR(iucv_root);
+ goto out_bus;
+ }
+ /* Note: GFP_DMA used used to get memory below 2G */
+ iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data),
+ GFP_KERNEL|GFP_DMA);
+ if (!iucv_irq_data) {
+ rc = -ENOMEM;
+ goto out_root;
+ }
+ /* Allocate parameter blocks. */
+ iucv_param = percpu_alloc(sizeof(union iucv_param),
+ GFP_KERNEL|GFP_DMA);
+ if (!iucv_param) {
+ rc = -ENOMEM;
+ goto out_extint;
+ }
+ register_hotcpu_notifier(&iucv_cpu_notifier);
+ ASCEBC(iucv_error_no_listener, 16);
+ ASCEBC(iucv_error_no_memory, 16);
+ ASCEBC(iucv_error_pathid, 16);
+ iucv_available = 1;
+ return 0;
+
+out_extint:
+ percpu_free(iucv_irq_data);
+out_root:
+ s390_root_dev_unregister(iucv_root);
+out_bus:
+ bus_unregister(&iucv_bus);
+out_int:
+ unregister_external_interrupt(0x4000, iucv_external_interrupt);
+out:
+ return rc;
+}
+
+/**
+ * iucv_exit
+ *
+ * Frees everything allocated from iucv_init.
+ */
+static void iucv_exit(void)
+{
+ struct iucv_work *p, *n;
+
+ spin_lock_irq(&iucv_work_lock);
+ list_for_each_entry_safe(p, n, &iucv_work_queue, list)
+ kfree(p);
+ spin_unlock_irq(&iucv_work_lock);
+ unregister_hotcpu_notifier(&iucv_cpu_notifier);
+ percpu_free(iucv_param);
+ percpu_free(iucv_irq_data);
+ s390_root_dev_unregister(iucv_root);
+ bus_unregister(&iucv_bus);
+ unregister_external_interrupt(0x4000, iucv_external_interrupt);
+}
+
+subsys_initcall(iucv_init);
+module_exit(iucv_exit);
+
+/**
+ * Export all public stuff
+ */
+EXPORT_SYMBOL (iucv_bus);
+EXPORT_SYMBOL (iucv_root);
+EXPORT_SYMBOL (iucv_register);
+EXPORT_SYMBOL (iucv_unregister);
+EXPORT_SYMBOL (iucv_path_accept);
+EXPORT_SYMBOL (iucv_path_connect);
+EXPORT_SYMBOL (iucv_path_quiesce);
+EXPORT_SYMBOL (iucv_path_sever);
+EXPORT_SYMBOL (iucv_message_purge);
+EXPORT_SYMBOL (iucv_message_receive);
+EXPORT_SYMBOL (iucv_message_reject);
+EXPORT_SYMBOL (iucv_message_reply);
+EXPORT_SYMBOL (iucv_message_send);
+EXPORT_SYMBOL (iucv_message_send2way);
+
+MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver");
+MODULE_LICENSE("GPL");
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 5dd5094659a..b4e444063d1 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2345,6 +2345,196 @@ out:
return err;
}
+#ifdef CONFIG_NET_KEY_MIGRATE
+static int pfkey_sockaddr_pair_size(sa_family_t family)
+{
+ switch (family) {
+ case AF_INET:
+ return PFKEY_ALIGN8(sizeof(struct sockaddr_in) * 2);
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ return PFKEY_ALIGN8(sizeof(struct sockaddr_in6) * 2);
+#endif
+ default:
+ return 0;
+ }
+ /* NOTREACHED */
+}
+
+static int parse_sockaddr_pair(struct sadb_x_ipsecrequest *rq,
+ xfrm_address_t *saddr, xfrm_address_t *daddr,
+ u16 *family)
+{
+ struct sockaddr *sa = (struct sockaddr *)(rq + 1);
+ if (rq->sadb_x_ipsecrequest_len <
+ pfkey_sockaddr_pair_size(sa->sa_family))
+ return -EINVAL;
+
+ switch (sa->sa_family) {
+ case AF_INET:
+ {
+ struct sockaddr_in *sin;
+ sin = (struct sockaddr_in *)sa;
+ if ((sin+1)->sin_family != AF_INET)
+ return -EINVAL;
+ memcpy(&saddr->a4, &sin->sin_addr, sizeof(saddr->a4));
+ sin++;
+ memcpy(&daddr->a4, &sin->sin_addr, sizeof(daddr->a4));
+ *family = AF_INET;
+ break;
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ {
+ struct sockaddr_in6 *sin6;
+ sin6 = (struct sockaddr_in6 *)sa;
+ if ((sin6+1)->sin6_family != AF_INET6)
+ return -EINVAL;
+ memcpy(&saddr->a6, &sin6->sin6_addr,
+ sizeof(saddr->a6));
+ sin6++;
+ memcpy(&daddr->a6, &sin6->sin6_addr,
+ sizeof(daddr->a6));
+ *family = AF_INET6;
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ipsecrequests_to_migrate(struct sadb_x_ipsecrequest *rq1, int len,
+ struct xfrm_migrate *m)
+{
+ int err;
+ struct sadb_x_ipsecrequest *rq2;
+
+ if (len <= sizeof(struct sadb_x_ipsecrequest) ||
+ len < rq1->sadb_x_ipsecrequest_len)
+ return -EINVAL;
+
+ /* old endoints */
+ err = parse_sockaddr_pair(rq1, &m->old_saddr, &m->old_daddr,
+ &m->old_family);
+ if (err)
+ return err;
+
+ rq2 = (struct sadb_x_ipsecrequest *)((u8 *)rq1 + rq1->sadb_x_ipsecrequest_len);
+ len -= rq1->sadb_x_ipsecrequest_len;
+
+ if (len <= sizeof(struct sadb_x_ipsecrequest) ||
+ len < rq2->sadb_x_ipsecrequest_len)
+ return -EINVAL;
+
+ /* new endpoints */
+ err = parse_sockaddr_pair(rq2, &m->new_saddr, &m->new_daddr,
+ &m->new_family);
+ if (err)
+ return err;
+
+ if (rq1->sadb_x_ipsecrequest_proto != rq2->sadb_x_ipsecrequest_proto ||
+ rq1->sadb_x_ipsecrequest_mode != rq2->sadb_x_ipsecrequest_mode ||
+ rq1->sadb_x_ipsecrequest_reqid != rq2->sadb_x_ipsecrequest_reqid)
+ return -EINVAL;
+
+ m->proto = rq1->sadb_x_ipsecrequest_proto;
+ m->mode = rq1->sadb_x_ipsecrequest_mode - 1;
+ m->reqid = rq1->sadb_x_ipsecrequest_reqid;
+
+ return ((int)(rq1->sadb_x_ipsecrequest_len +
+ rq2->sadb_x_ipsecrequest_len));
+}
+
+static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
+ struct sadb_msg *hdr, void **ext_hdrs)
+{
+ int i, len, ret, err = -EINVAL;
+ u8 dir;
+ struct sadb_address *sa;
+ struct sadb_x_policy *pol;
+ struct sadb_x_ipsecrequest *rq;
+ struct xfrm_selector sel;
+ struct xfrm_migrate m[XFRM_MAX_DEPTH];
+
+ if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
+ ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
+ !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ pol = ext_hdrs[SADB_X_EXT_POLICY - 1];
+ if (!pol) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (pol->sadb_x_policy_dir >= IPSEC_DIR_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ dir = pol->sadb_x_policy_dir - 1;
+ memset(&sel, 0, sizeof(sel));
+
+ /* set source address info of selector */
+ sa = ext_hdrs[SADB_EXT_ADDRESS_SRC - 1];
+ sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);
+ sel.prefixlen_s = sa->sadb_address_prefixlen;
+ sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
+ sel.sport = ((struct sockaddr_in *)(sa + 1))->sin_port;
+ if (sel.sport)
+ sel.sport_mask = ~0;
+
+ /* set destination address info of selector */
+ sa = ext_hdrs[SADB_EXT_ADDRESS_DST - 1],
+ pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);
+ sel.prefixlen_d = sa->sadb_address_prefixlen;
+ sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
+ sel.dport = ((struct sockaddr_in *)(sa + 1))->sin_port;
+ if (sel.dport)
+ sel.dport_mask = ~0;
+
+ rq = (struct sadb_x_ipsecrequest *)(pol + 1);
+
+ /* extract ipsecrequests */
+ i = 0;
+ len = pol->sadb_x_policy_len * 8 - sizeof(struct sadb_x_policy);
+
+ while (len > 0 && i < XFRM_MAX_DEPTH) {
+ ret = ipsecrequests_to_migrate(rq, len, &m[i]);
+ if (ret < 0) {
+ err = ret;
+ goto out;
+ } else {
+ rq = (struct sadb_x_ipsecrequest *)((u8 *)rq + ret);
+ len -= ret;
+ i++;
+ }
+ }
+
+ if (!i || len > 0) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ return xfrm_migrate(&sel, dir, XFRM_POLICY_TYPE_MAIN, m, i);
+
+ out:
+ return err;
+}
+#else
+static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
+ struct sadb_msg *hdr, void **ext_hdrs)
+{
+ return -ENOPROTOOPT;
+}
+#endif
+
+
static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs)
{
unsigned int dir;
@@ -2473,6 +2663,7 @@ static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_X_SPDFLUSH] = pfkey_spdflush,
[SADB_X_SPDSETIDX] = pfkey_spdadd,
[SADB_X_SPDDELETE2] = pfkey_spdget,
+ [SADB_X_MIGRATE] = pfkey_migrate,
};
static int pfkey_process(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr)
@@ -3118,6 +3309,236 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL);
}
+#ifdef CONFIG_NET_KEY_MIGRATE
+static int set_sadb_address(struct sk_buff *skb, int sasize, int type,
+ struct xfrm_selector *sel)
+{
+ struct sadb_address *addr;
+ struct sockaddr_in *sin;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct sockaddr_in6 *sin6;
+#endif
+ addr = (struct sadb_address *)skb_put(skb, sizeof(struct sadb_address) + sasize);
+ addr->sadb_address_len = (sizeof(struct sadb_address) + sasize)/8;
+ addr->sadb_address_exttype = type;
+ addr->sadb_address_proto = sel->proto;
+ addr->sadb_address_reserved = 0;
+
+ switch (type) {
+ case SADB_EXT_ADDRESS_SRC:
+ if (sel->family == AF_INET) {
+ addr->sadb_address_prefixlen = sel->prefixlen_s;
+ sin = (struct sockaddr_in *)(addr + 1);
+ sin->sin_family = AF_INET;
+ memcpy(&sin->sin_addr.s_addr, &sel->saddr,
+ sizeof(sin->sin_addr.s_addr));
+ sin->sin_port = 0;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (sel->family == AF_INET6) {
+ addr->sadb_address_prefixlen = sel->prefixlen_s;
+ sin6 = (struct sockaddr_in6 *)(addr + 1);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = 0;
+ memcpy(&sin6->sin6_addr.s6_addr, &sel->saddr,
+ sizeof(sin6->sin6_addr.s6_addr));
+ }
+#endif
+ break;
+ case SADB_EXT_ADDRESS_DST:
+ if (sel->family == AF_INET) {
+ addr->sadb_address_prefixlen = sel->prefixlen_d;
+ sin = (struct sockaddr_in *)(addr + 1);
+ sin->sin_family = AF_INET;
+ memcpy(&sin->sin_addr.s_addr, &sel->daddr,
+ sizeof(sin->sin_addr.s_addr));
+ sin->sin_port = 0;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ else if (sel->family == AF_INET6) {
+ addr->sadb_address_prefixlen = sel->prefixlen_d;
+ sin6 = (struct sockaddr_in6 *)(addr + 1);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = 0;
+ memcpy(&sin6->sin6_addr.s6_addr, &sel->daddr,
+ sizeof(sin6->sin6_addr.s6_addr));
+ }
+#endif
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_ipsecrequest(struct sk_buff *skb,
+ uint8_t proto, uint8_t mode, int level,
+ uint32_t reqid, uint8_t family,
+ xfrm_address_t *src, xfrm_address_t *dst)
+{
+ struct sadb_x_ipsecrequest *rq;
+ struct sockaddr_in *sin;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ struct sockaddr_in6 *sin6;
+#endif
+ int size_req;
+
+ size_req = sizeof(struct sadb_x_ipsecrequest) +
+ pfkey_sockaddr_pair_size(family);
+
+ rq = (struct sadb_x_ipsecrequest *)skb_put(skb, size_req);
+ memset(rq, 0, size_req);
+ rq->sadb_x_ipsecrequest_len = size_req;
+ rq->sadb_x_ipsecrequest_proto = proto;
+ rq->sadb_x_ipsecrequest_mode = mode;
+ rq->sadb_x_ipsecrequest_level = level;
+ rq->sadb_x_ipsecrequest_reqid = reqid;
+
+ switch (family) {
+ case AF_INET:
+ sin = (struct sockaddr_in *)(rq + 1);
+ sin->sin_family = AF_INET;
+ memcpy(&sin->sin_addr.s_addr, src,
+ sizeof(sin->sin_addr.s_addr));
+ sin++;
+ sin->sin_family = AF_INET;
+ memcpy(&sin->sin_addr.s_addr, dst,
+ sizeof(sin->sin_addr.s_addr));
+ break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6:
+ sin6 = (struct sockaddr_in6 *)(rq + 1);
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = 0;
+ memcpy(&sin6->sin6_addr.s6_addr, src,
+ sizeof(sin6->sin6_addr.s6_addr));
+ sin6++;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
+ sin6->sin6_flowinfo = 0;
+ sin6->sin6_scope_id = 0;
+ memcpy(&sin6->sin6_addr.s6_addr, dst,
+ sizeof(sin6->sin6_addr.s6_addr));
+ break;
+#endif
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_NET_KEY_MIGRATE
+static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles)
+{
+ int i;
+ int sasize_sel;
+ int size = 0;
+ int size_pol = 0;
+ struct sk_buff *skb;
+ struct sadb_msg *hdr;
+ struct sadb_x_policy *pol;
+ struct xfrm_migrate *mp;
+
+ if (type != XFRM_POLICY_TYPE_MAIN)
+ return 0;
+
+ if (num_bundles <= 0 || num_bundles > XFRM_MAX_DEPTH)
+ return -EINVAL;
+
+ /* selector */
+ sasize_sel = pfkey_sockaddr_size(sel->family);
+ if (!sasize_sel)
+ return -EINVAL;
+ size += (sizeof(struct sadb_address) + sasize_sel) * 2;
+
+ /* policy info */
+ size_pol += sizeof(struct sadb_x_policy);
+
+ /* ipsecrequests */
+ for (i = 0, mp = m; i < num_bundles; i++, mp++) {
+ /* old locator pair */
+ size_pol += sizeof(struct sadb_x_ipsecrequest) +
+ pfkey_sockaddr_pair_size(mp->old_family);
+ /* new locator pair */
+ size_pol += sizeof(struct sadb_x_ipsecrequest) +
+ pfkey_sockaddr_pair_size(mp->new_family);
+ }
+
+ size += sizeof(struct sadb_msg) + size_pol;
+
+ /* alloc buffer */
+ skb = alloc_skb(size, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ hdr = (struct sadb_msg *)skb_put(skb, sizeof(struct sadb_msg));
+ hdr->sadb_msg_version = PF_KEY_V2;
+ hdr->sadb_msg_type = SADB_X_MIGRATE;
+ hdr->sadb_msg_satype = pfkey_proto2satype(m->proto);
+ hdr->sadb_msg_len = size / 8;
+ hdr->sadb_msg_errno = 0;
+ hdr->sadb_msg_reserved = 0;
+ hdr->sadb_msg_seq = 0;
+ hdr->sadb_msg_pid = 0;
+
+ /* selector src */
+ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
+
+ /* selector dst */
+ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_DST, sel);
+
+ /* policy information */
+ pol = (struct sadb_x_policy *)skb_put(skb, sizeof(struct sadb_x_policy));
+ pol->sadb_x_policy_len = size_pol / 8;
+ pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
+ pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
+ pol->sadb_x_policy_dir = dir + 1;
+ pol->sadb_x_policy_id = 0;
+ pol->sadb_x_policy_priority = 0;
+
+ for (i = 0, mp = m; i < num_bundles; i++, mp++) {
+ /* old ipsecrequest */
+ if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
+ (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
+ mp->reqid, mp->old_family,
+ &mp->old_saddr, &mp->old_daddr) < 0) {
+ return -EINVAL;
+ }
+
+ /* new ipsecrequest */
+ if (set_ipsecrequest(skb, mp->proto, mp->mode + 1,
+ (mp->reqid ? IPSEC_LEVEL_UNIQUE : IPSEC_LEVEL_REQUIRE),
+ mp->reqid, mp->new_family,
+ &mp->new_saddr, &mp->new_daddr) < 0) {
+ return -EINVAL;
+ }
+ }
+
+ /* broadcast migrate message to sockets */
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL);
+
+ return 0;
+}
+#else
+static int pfkey_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_bundles)
+{
+ return -ENOPROTOOPT;
+}
+#endif
+
static int pfkey_sendmsg(struct kiocb *kiocb,
struct socket *sock, struct msghdr *msg, size_t len)
{
@@ -3287,6 +3708,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
.compile_policy = pfkey_compile_policy,
.new_mapping = pfkey_send_new_mapping,
.notify_policy = pfkey_send_policy_notify,
+ .migrate = pfkey_send_migrate,
};
static void __exit ipsec_pfkey_exit(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 80107d4909c..748f7f00909 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -235,6 +235,19 @@ config NF_CONNTRACK_PPTP
To compile it as a module, choose M here. If unsure, say N.
+config NF_CONNTRACK_SANE
+ tristate "SANE protocol support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && NF_CONNTRACK
+ help
+ SANE is a protocol for remote access to scanners as implemented
+ by the 'saned' daemon. Like FTP, it uses separate control and
+ data connections.
+
+ With this module you can support SANE on a connection tracking
+ firewall.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NF_CONNTRACK_SIP
tristate "SIP protocol support (EXPERIMENTAL)"
depends on EXPERIMENTAL && NF_CONNTRACK
@@ -382,6 +395,32 @@ config NETFILTER_XT_TARGET_CONNSECMARK
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_TARGET_TCPMSS
+ tristate '"TCPMSS" target support'
+ depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
+ ---help---
+ This option adds a `TCPMSS' target, which allows you to alter the
+ MSS value of TCP SYN packets, to control the maximum size for that
+ connection (usually limiting it to your outgoing interface's MTU
+ minus 40).
+
+ This is used to overcome criminally braindead ISPs or servers which
+ block ICMP Fragmentation Needed packets. The symptoms of this
+ problem are that everything works fine from your Linux
+ firewall/router, but machines behind it can never exchange large
+ packets:
+ 1) Web browsers connect, then hang with no data received.
+ 2) Small mail works fine, but large emails hang.
+ 3) ssh works fine, but scp hangs after initial handshaking.
+
+ Workaround: activate this option and add a rule to your firewall
+ configuration like:
+
+ iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN \
+ -j TCPMSS --clamp-mss-to-pmtu
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_COMMENT
tristate '"comment" match support'
depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 5dc5574f7e9..b2b5c7566b2 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_NF_CONNTRACK_H323) += nf_conntrack_h323.o
obj-$(CONFIG_NF_CONNTRACK_IRC) += nf_conntrack_irc.o
obj-$(CONFIG_NF_CONNTRACK_NETBIOS_NS) += nf_conntrack_netbios_ns.o
obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_conntrack_pptp.o
+obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o
obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o
obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
@@ -44,6 +45,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
# matches
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 626b0011dd8..6fccdcf43e0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -60,12 +60,9 @@ static DEFINE_RWLOCK(tcp_lock);
If it's non-zero, we mark only out of window RST segments as INVALID. */
int nf_ct_tcp_be_liberal __read_mostly = 0;
-/* When connection is picked up from the middle, how many packets are required
- to pass in each direction when we assume we are in sync - if any side uses
- window scaling, we lost the game.
- If it is set to zero, we disable picking up already established
+/* If it is set to zero, we disable picking up already established
connections. */
-int nf_ct_tcp_loose __read_mostly = 3;
+int nf_ct_tcp_loose __read_mostly = 1;
/* Max number of the retransmitted packets without receiving an (acceptable)
ACK from the destination. If this number is reached, a shorter timer
@@ -650,11 +647,10 @@ static int tcp_in_window(struct ip_ct_tcp *state,
before(sack, receiver->td_end + 1),
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
- if (sender->loose || receiver->loose ||
- (before(seq, sender->td_maxend + 1) &&
- after(end, sender->td_end - receiver->td_maxwin - 1) &&
- before(sack, receiver->td_end + 1) &&
- after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
+ if (before(seq, sender->td_maxend + 1) &&
+ after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ before(sack, receiver->td_end + 1) &&
+ after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
/*
* Take into account window scaling (RFC 1323).
*/
@@ -699,15 +695,13 @@ static int tcp_in_window(struct ip_ct_tcp *state,
state->retrans = 0;
}
}
- /*
- * Close the window of disabled window tracking :-)
- */
- if (sender->loose)
- sender->loose--;
-
res = 1;
} else {
- if (LOG_INVALID(IPPROTO_TCP))
+ res = 0;
+ if (sender->flags & IP_CT_TCP_FLAG_BE_LIBERAL ||
+ nf_ct_tcp_be_liberal)
+ res = 1;
+ if (!res && LOG_INVALID(IPPROTO_TCP))
nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
@@ -718,8 +712,6 @@ static int tcp_in_window(struct ip_ct_tcp *state,
: "ACK is over the upper bound (ACKed data not seen yet)"
: "SEQ is under the lower bound (already ACKed data retransmitted)"
: "SEQ is over the upper bound (over the window of the receiver)");
-
- res = nf_ct_tcp_be_liberal;
}
DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
@@ -1063,8 +1055,6 @@ static int tcp_new(struct nf_conn *conntrack,
tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]);
conntrack->proto.tcp.seen[1].flags = 0;
- conntrack->proto.tcp.seen[0].loose =
- conntrack->proto.tcp.seen[1].loose = 0;
} else if (nf_ct_tcp_loose == 0) {
/* Don't try to pick up connections. */
return 0;
@@ -1085,11 +1075,11 @@ static int tcp_new(struct nf_conn *conntrack,
conntrack->proto.tcp.seen[0].td_maxwin;
conntrack->proto.tcp.seen[0].td_scale = 0;
- /* We assume SACK. Should we assume window scaling too? */
+ /* We assume SACK and liberal window checking to handle
+ * window scaling */
conntrack->proto.tcp.seen[0].flags =
- conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM;
- conntrack->proto.tcp.seen[0].loose =
- conntrack->proto.tcp.seen[1].loose = nf_ct_tcp_loose;
+ conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM |
+ IP_CT_TCP_FLAG_BE_LIBERAL;
}
conntrack->proto.tcp.seen[1].td_end = 0;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
new file mode 100644
index 00000000000..eb2d1dc46d4
--- /dev/null
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -0,0 +1,242 @@
+/* SANE connection tracking helper
+ * (SANE = Scanner Access Now Easy)
+ * For documentation about the SANE network protocol see
+ * http://www.sane-project.org/html/doc015.html
+ */
+
+/* Copyright (C) 2007 Red Hat, Inc.
+ * Author: Michal Schmidt <mschmidt@redhat.com>
+ * Based on the FTP conntrack helper (net/netfilter/nf_conntrack_ftp.c):
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ * (C) 2003 Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_expect.h>
+#include <linux/netfilter/nf_conntrack_sane.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Schmidt <mschmidt@redhat.com>");
+MODULE_DESCRIPTION("SANE connection tracking helper");
+
+static char *sane_buffer;
+
+static DEFINE_SPINLOCK(nf_sane_lock);
+
+#define MAX_PORTS 8
+static u_int16_t ports[MAX_PORTS];
+static unsigned int ports_c;
+module_param_array(ports, ushort, &ports_c, 0400);
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+struct sane_request {
+ __be32 RPC_code;
+#define SANE_NET_START 7 /* RPC code */
+
+ __be32 handle;
+};
+
+struct sane_reply_net_start {
+ __be32 status;
+#define SANE_STATUS_SUCCESS 0
+
+ __be16 zero;
+ __be16 port;
+ /* other fields aren't interesting for conntrack */
+};
+
+static int help(struct sk_buff **pskb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ unsigned int dataoff, datalen;
+ struct tcphdr _tcph, *th;
+ char *sb_ptr;
+ int ret = NF_ACCEPT;
+ int dir = CTINFO2DIR(ctinfo);
+ struct nf_ct_sane_master *ct_sane_info;
+ struct nf_conntrack_expect *exp;
+ struct nf_conntrack_tuple *tuple;
+ struct sane_request *req;
+ struct sane_reply_net_start *reply;
+ int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;
+
+ ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
+ /* Until there's been traffic both ways, don't look in packets. */
+ if (ctinfo != IP_CT_ESTABLISHED &&
+ ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY)
+ return NF_ACCEPT;
+
+ /* Not a full tcp header? */
+ th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ return NF_ACCEPT;
+
+ /* No data? */
+ dataoff = protoff + th->doff * 4;
+ if (dataoff >= (*pskb)->len)
+ return NF_ACCEPT;
+
+ datalen = (*pskb)->len - dataoff;
+
+ spin_lock_bh(&nf_sane_lock);
+ sb_ptr = skb_header_pointer(*pskb, dataoff, datalen, sane_buffer);
+ BUG_ON(sb_ptr == NULL);
+
+ if (dir == IP_CT_DIR_ORIGINAL) {
+ if (datalen != sizeof(struct sane_request))
+ goto out;
+
+ req = (struct sane_request *)sb_ptr;
+ if (req->RPC_code != htonl(SANE_NET_START)) {
+ /* Not an interesting command */
+ ct_sane_info->state = SANE_STATE_NORMAL;
+ goto out;
+ }
+
+ /* We're interested in the next reply */
+ ct_sane_info->state = SANE_STATE_START_REQUESTED;
+ goto out;
+ }
+
+ /* Is it a reply to an uninteresting command? */
+ if (ct_sane_info->state != SANE_STATE_START_REQUESTED)
+ goto out;
+
+ /* It's a reply to SANE_NET_START. */
+ ct_sane_info->state = SANE_STATE_NORMAL;
+
+ if (datalen < sizeof(struct sane_reply_net_start)) {
+ DEBUGP("nf_ct_sane: NET_START reply too short\n");
+ goto out;
+ }
+
+ reply = (struct sane_reply_net_start *)sb_ptr;
+ if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
+ /* saned refused the command */
+ DEBUGP("nf_ct_sane: unsuccessful SANE_STATUS = %u\n",
+ ntohl(reply->status));
+ goto out;
+ }
+
+ /* Invalid saned reply? Ignore it. */
+ if (reply->zero != 0)
+ goto out;
+
+ exp = nf_conntrack_expect_alloc(ct);
+ if (exp == NULL) {
+ ret = NF_DROP;
+ goto out;
+ }
+
+ tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ nf_conntrack_expect_init(exp, family,
+ &tuple->src.u3, &tuple->dst.u3,
+ IPPROTO_TCP,
+ NULL, &reply->port);
+
+ DEBUGP("nf_ct_sane: expect: ");
+ NF_CT_DUMP_TUPLE(&exp->tuple);
+ NF_CT_DUMP_TUPLE(&exp->mask);
+
+ /* Can't expect this? Best to drop packet now. */
+ if (nf_conntrack_expect_related(exp) != 0)
+ ret = NF_DROP;
+
+ nf_conntrack_expect_put(exp);
+
+out:
+ spin_unlock_bh(&nf_sane_lock);
+ return ret;
+}
+
+static struct nf_conntrack_helper sane[MAX_PORTS][2];
+static char sane_names[MAX_PORTS][2][sizeof("sane-65535")];
+
+/* don't make this __exit, since it's called from __init ! */
+static void nf_conntrack_sane_fini(void)
+{
+ int i, j;
+
+ for (i = 0; i < ports_c; i++) {
+ for (j = 0; j < 2; j++) {
+ DEBUGP("nf_ct_sane: unregistering helper for pf: %d "
+ "port: %d\n",
+ sane[i][j].tuple.src.l3num, ports[i]);
+ nf_conntrack_helper_unregister(&sane[i][j]);
+ }
+ }
+
+ kfree(sane_buffer);
+}
+
+static int __init nf_conntrack_sane_init(void)
+{
+ int i, j = -1, ret = 0;
+ char *tmpname;
+
+ sane_buffer = kmalloc(65536, GFP_KERNEL);
+ if (!sane_buffer)
+ return -ENOMEM;
+
+ if (ports_c == 0)
+ ports[ports_c++] = SANE_PORT;
+
+ /* FIXME should be configurable whether IPv4 and IPv6 connections
+ are tracked or not - YK */
+ for (i = 0; i < ports_c; i++) {
+ sane[i][0].tuple.src.l3num = PF_INET;
+ sane[i][1].tuple.src.l3num = PF_INET6;
+ for (j = 0; j < 2; j++) {
+ sane[i][j].tuple.src.u.tcp.port = htons(ports[i]);
+ sane[i][j].tuple.dst.protonum = IPPROTO_TCP;
+ sane[i][j].mask.src.u.tcp.port = 0xFFFF;
+ sane[i][j].mask.dst.protonum = 0xFF;
+ sane[i][j].max_expected = 1;
+ sane[i][j].timeout = 5 * 60; /* 5 Minutes */
+ sane[i][j].me = THIS_MODULE;
+ sane[i][j].help = help;
+ tmpname = &sane_names[i][j][0];
+ if (ports[i] == SANE_PORT)
+ sprintf(tmpname, "sane");
+ else
+ sprintf(tmpname, "sane-%d", ports[i]);
+ sane[i][j].name = tmpname;
+
+ DEBUGP("nf_ct_sane: registering helper for pf: %d "
+ "port: %d\n",
+ sane[i][j].tuple.src.l3num, ports[i]);
+ ret = nf_conntrack_helper_register(&sane[i][j]);
+ if (ret) {
+ printk(KERN_ERR "nf_ct_sane: failed to "
+ "register helper for pf: %d port: %d\n",
+ sane[i][j].tuple.src.l3num, ports[i]);
+ nf_conntrack_sane_fini();
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+module_init(nf_conntrack_sane_init);
+module_exit(nf_conntrack_sane_fini);
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
index 50de965bb10..195e92990da 100644
--- a/net/netfilter/xt_CLASSIFY.c
+++ b/net/netfilter/xt_CLASSIFY.c
@@ -33,9 +33,7 @@ target(struct sk_buff **pskb,
{
const struct xt_classify_target_info *clinfo = targinfo;
- if ((*pskb)->priority != clinfo->priority)
- (*pskb)->priority = clinfo->priority;
-
+ (*pskb)->priority = clinfo->priority;
return XT_CONTINUE;
}
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
index 0534bfa65cc..795c058b16a 100644
--- a/net/netfilter/xt_CONNMARK.c
+++ b/net/netfilter/xt_CONNMARK.c
@@ -61,7 +61,7 @@ target(struct sk_buff **pskb,
#else
nf_conntrack_event_cache(IPCT_MARK, *pskb);
#endif
- }
+ }
break;
case XT_CONNMARK_SAVE:
newmark = (*ctmark & ~markinfo->mask) |
@@ -78,8 +78,7 @@ target(struct sk_buff **pskb,
case XT_CONNMARK_RESTORE:
mark = (*pskb)->mark;
diff = (*ctmark ^ mark) & markinfo->mask;
- if (diff != 0)
- (*pskb)->mark = mark ^ diff;
+ (*pskb)->mark = mark ^ diff;
break;
}
}
diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c
index a3fe3c334b0..1ab0db641f9 100644
--- a/net/netfilter/xt_CONNSECMARK.c
+++ b/net/netfilter/xt_CONNSECMARK.c
@@ -41,8 +41,7 @@ static void secmark_save(struct sk_buff *skb)
connsecmark = nf_ct_get_secmark(skb, &ctinfo);
if (connsecmark && !*connsecmark)
- if (*connsecmark != skb->secmark)
- *connsecmark = skb->secmark;
+ *connsecmark = skb->secmark;
}
}
@@ -58,8 +57,7 @@ static void secmark_restore(struct sk_buff *skb)
connsecmark = nf_ct_get_secmark(skb, &ctinfo);
if (connsecmark && *connsecmark)
- if (skb->secmark != *connsecmark)
- skb->secmark = *connsecmark;
+ skb->secmark = *connsecmark;
}
}
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
index 0b48547e8d6..cfc45af357d 100644
--- a/net/netfilter/xt_MARK.c
+++ b/net/netfilter/xt_MARK.c
@@ -31,9 +31,7 @@ target_v0(struct sk_buff **pskb,
{
const struct xt_mark_target_info *markinfo = targinfo;
- if((*pskb)->mark != markinfo->mark)
- (*pskb)->mark = markinfo->mark;
-
+ (*pskb)->mark = markinfo->mark;
return XT_CONTINUE;
}
@@ -62,9 +60,7 @@ target_v1(struct sk_buff **pskb,
break;
}
- if((*pskb)->mark != mark)
- (*pskb)->mark = mark;
-
+ (*pskb)->mark = mark;
return XT_CONTINUE;
}
diff --git a/net/netfilter/xt_SECMARK.c b/net/netfilter/xt_SECMARK.c
index add75219629..f1131c3a9db 100644
--- a/net/netfilter/xt_SECMARK.c
+++ b/net/netfilter/xt_SECMARK.c
@@ -47,9 +47,7 @@ static unsigned int target(struct sk_buff **pskb, const struct net_device *in,
BUG();
}
- if ((*pskb)->secmark != secmark)
- (*pskb)->secmark = secmark;
-
+ (*pskb)->secmark = secmark;
return XT_CONTINUE;
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
new file mode 100644
index 00000000000..db7e38c08de
--- /dev/null
+++ b/net/netfilter/xt_TCPMSS.c
@@ -0,0 +1,296 @@
+/*
+ * This is a module which is used for setting the MSS option in TCP packets.
+ *
+ * Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter/xt_TCPMSS.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("x_tables TCP MSS modification module");
+MODULE_ALIAS("ipt_TCPMSS");
+MODULE_ALIAS("ip6t_TCPMSS");
+
+static inline unsigned int
+optlen(const u_int8_t *opt, unsigned int offset)
+{
+ /* Beware zero-length options: make finite progress */
+ if (opt[offset] <= TCPOPT_NOP || opt[offset+1] == 0)
+ return 1;
+ else
+ return opt[offset+1];
+}
+
+static int
+tcpmss_mangle_packet(struct sk_buff **pskb,
+ const struct xt_tcpmss_info *info,
+ unsigned int tcphoff,
+ unsigned int minlen)
+{
+ struct tcphdr *tcph;
+ unsigned int tcplen, i;
+ __be16 oldval;
+ u16 newmss;
+ u8 *opt;
+
+ if (!skb_make_writable(pskb, (*pskb)->len))
+ return -1;
+
+ tcplen = (*pskb)->len - tcphoff;
+ tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
+
+ /* Since it passed flags test in tcp match, we know it is is
+ not a fragment, and has data >= tcp header length. SYN
+ packets should not contain data: if they did, then we risk
+ running over MTU, sending Frag Needed and breaking things
+ badly. --RR */
+ if (tcplen != tcph->doff*4) {
+ if (net_ratelimit())
+ printk(KERN_ERR "xt_TCPMSS: bad length (%u bytes)\n",
+ (*pskb)->len);
+ return -1;
+ }
+
+ if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
+ if (dst_mtu((*pskb)->dst) <= minlen) {
+ if (net_ratelimit())
+ printk(KERN_ERR "xt_TCPMSS: "
+ "unknown or invalid path-MTU (%u)\n",
+ dst_mtu((*pskb)->dst));
+ return -1;
+ }
+ newmss = dst_mtu((*pskb)->dst) - minlen;
+ } else
+ newmss = info->mss;
+
+ opt = (u_int8_t *)tcph;
+ for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
+ if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
+ opt[i+1] == TCPOLEN_MSS) {
+ u_int16_t oldmss;
+
+ oldmss = (opt[i+2] << 8) | opt[i+3];
+
+ if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+ oldmss <= newmss)
+ return 0;
+
+ opt[i+2] = (newmss & 0xff00) >> 8;
+ opt[i+3] = (newmss & 0x00ff);
+
+ nf_proto_csum_replace2(&tcph->check, *pskb,
+ htons(oldmss), htons(newmss), 0);
+ return 0;
+ }
+ }
+
+ /*
+ * MSS Option not found ?! add it..
+ */
+ if (skb_tailroom((*pskb)) < TCPOLEN_MSS) {
+ struct sk_buff *newskb;
+
+ newskb = skb_copy_expand(*pskb, skb_headroom(*pskb),
+ TCPOLEN_MSS, GFP_ATOMIC);
+ if (!newskb)
+ return -1;
+ kfree_skb(*pskb);
+ *pskb = newskb;
+ tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff);
+ }
+
+ skb_put((*pskb), TCPOLEN_MSS);
+
+ opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
+ memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+
+ nf_proto_csum_replace2(&tcph->check, *pskb,
+ htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+ opt[0] = TCPOPT_MSS;
+ opt[1] = TCPOLEN_MSS;
+ opt[2] = (newmss & 0xff00) >> 8;
+ opt[3] = (newmss & 0x00ff);
+
+ nf_proto_csum_replace4(&tcph->check, *pskb, 0, *((__be32 *)opt), 0);
+
+ oldval = ((__be16 *)tcph)[6];
+ tcph->doff += TCPOLEN_MSS/4;
+ nf_proto_csum_replace2(&tcph->check, *pskb,
+ oldval, ((__be16 *)tcph)[6], 0);
+ return TCPOLEN_MSS;
+}
+
+static unsigned int
+xt_tcpmss_target4(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ struct iphdr *iph = (*pskb)->nh.iph;
+ __be16 newlen;
+ int ret;
+
+ ret = tcpmss_mangle_packet(pskb, targinfo, iph->ihl * 4,
+ sizeof(*iph) + sizeof(struct tcphdr));
+ if (ret < 0)
+ return NF_DROP;
+ if (ret > 0) {
+ iph = (*pskb)->nh.iph;
+ newlen = htons(ntohs(iph->tot_len) + ret);
+ nf_csum_replace2(&iph->check, iph->tot_len, newlen);
+ iph->tot_len = newlen;
+ }
+ return XT_CONTINUE;
+}
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+static unsigned int
+xt_tcpmss_target6(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ struct ipv6hdr *ipv6h = (*pskb)->nh.ipv6h;
+ u8 nexthdr;
+ int tcphoff;
+ int ret;
+
+ nexthdr = ipv6h->nexthdr;
+ tcphoff = ipv6_skip_exthdr(*pskb, sizeof(*ipv6h), &nexthdr);
+ if (tcphoff < 0) {
+ WARN_ON(1);
+ return NF_DROP;
+ }
+ ret = tcpmss_mangle_packet(pskb, targinfo, tcphoff,
+ sizeof(*ipv6h) + sizeof(struct tcphdr));
+ if (ret < 0)
+ return NF_DROP;
+ if (ret > 0) {
+ ipv6h = (*pskb)->nh.ipv6h;
+ ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
+ }
+ return XT_CONTINUE;
+}
+#endif
+
+#define TH_SYN 0x02
+
+/* Must specify -p tcp --syn */
+static inline int find_syn_match(const struct xt_entry_match *m)
+{
+ const struct xt_tcp *tcpinfo = (const struct xt_tcp *)m->data;
+
+ if (strcmp(m->u.kernel.match->name, "tcp") == 0 &&
+ tcpinfo->flg_cmp & TH_SYN &&
+ !(tcpinfo->invflags & XT_TCP_INV_FLAGS))
+ return 1;
+
+ return 0;
+}
+
+static int
+xt_tcpmss_checkentry4(const char *tablename,
+ const void *entry,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ const struct xt_tcpmss_info *info = targinfo;
+ const struct ipt_entry *e = entry;
+
+ if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+ (hook_mask & ~((1 << NF_IP_FORWARD) |
+ (1 << NF_IP_LOCAL_OUT) |
+ (1 << NF_IP_POST_ROUTING))) != 0) {
+ printk("xt_TCPMSS: path-MTU clamping only supported in "
+ "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return 0;
+ }
+ if (IPT_MATCH_ITERATE(e, find_syn_match))
+ return 1;
+ printk("xt_TCPMSS: Only works on TCP SYN packets\n");
+ return 0;
+}
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+static int
+xt_tcpmss_checkentry6(const char *tablename,
+ const void *entry,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ const struct xt_tcpmss_info *info = targinfo;
+ const struct ip6t_entry *e = entry;
+
+ if (info->mss == XT_TCPMSS_CLAMP_PMTU &&
+ (hook_mask & ~((1 << NF_IP6_FORWARD) |
+ (1 << NF_IP6_LOCAL_OUT) |
+ (1 << NF_IP6_POST_ROUTING))) != 0) {
+ printk("xt_TCPMSS: path-MTU clamping only supported in "
+ "FORWARD, OUTPUT and POSTROUTING hooks\n");
+ return 0;
+ }
+ if (IP6T_MATCH_ITERATE(e, find_syn_match))
+ return 1;
+ printk("xt_TCPMSS: Only works on TCP SYN packets\n");
+ return 0;
+}
+#endif
+
+static struct xt_target xt_tcpmss_reg[] = {
+ {
+ .family = AF_INET,
+ .name = "TCPMSS",
+ .checkentry = xt_tcpmss_checkentry4,
+ .target = xt_tcpmss_target4,
+ .targetsize = sizeof(struct xt_tcpmss_info),
+ .proto = IPPROTO_TCP,
+ .me = THIS_MODULE,
+ },
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+ {
+ .family = AF_INET6,
+ .name = "TCPMSS",
+ .checkentry = xt_tcpmss_checkentry6,
+ .target = xt_tcpmss_target6,
+ .targetsize = sizeof(struct xt_tcpmss_info),
+ .proto = IPPROTO_TCP,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static int __init xt_tcpmss_init(void)
+{
+ return xt_register_targets(xt_tcpmss_reg, ARRAY_SIZE(xt_tcpmss_reg));
+}
+
+static void __exit xt_tcpmss_fini(void)
+{
+ xt_unregister_targets(xt_tcpmss_reg, ARRAY_SIZE(xt_tcpmss_reg));
+}
+
+module_init(xt_tcpmss_init);
+module_exit(xt_tcpmss_fini);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index f28bf69d3d4..bd1f7a2048d 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -414,6 +414,7 @@ hashlimit_init_dst(struct xt_hashlimit_htable *hinfo, struct dsthash_dst *dst,
switch (nexthdr) {
case IPPROTO_TCP:
case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
ports = skb_header_pointer(skb, protoff, sizeof(_ports),
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6dc01bdeb76..a6fa48788e8 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -60,6 +60,7 @@
#include <linux/netdevice.h>
#include <linux/if_packet.h>
#include <linux/wireless.h>
+#include <linux/kernel.h>
#include <linux/kmod.h>
#include <net/ip.h>
#include <net/protocol.h>
@@ -200,7 +201,8 @@ struct packet_sock {
#endif
struct packet_type prot_hook;
spinlock_t bind_lock;
- char running; /* prot_hook is attached*/
+ unsigned int running:1, /* prot_hook is attached*/
+ auxdata:1;
int ifindex; /* bound device */
__be16 num;
#ifdef CONFIG_PACKET_MULTICAST
@@ -214,6 +216,16 @@ struct packet_sock {
#endif
};
+struct packet_skb_cb {
+ unsigned int origlen;
+ union {
+ struct sockaddr_pkt pkt;
+ struct sockaddr_ll ll;
+ } sa;
+};
+
+#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
+
#ifdef CONFIG_PACKET_MMAP
static inline char *packet_lookup_frame(struct packet_sock *po, unsigned int position)
@@ -293,7 +305,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct
/* drop conntrack reference */
nf_reset(skb);
- spkt = (struct sockaddr_pkt*)skb->cb;
+ spkt = &PACKET_SKB_CB(skb)->sa.pkt;
skb_push(skb, skb->data-skb->mac.raw);
@@ -512,7 +524,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
skb = nskb;
}
- sll = (struct sockaddr_ll*)skb->cb;
+ BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
+ sizeof(skb->cb));
+
+ sll = &PACKET_SKB_CB(skb)->sa.ll;
sll->sll_family = AF_PACKET;
sll->sll_hatype = dev->type;
sll->sll_protocol = skb->protocol;
@@ -523,6 +538,8 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
if (dev->hard_header_parse)
sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
+ PACKET_SKB_CB(skb)->origlen = skb->len;
+
if (pskb_trim(skb, snaplen))
goto drop_n_acct;
@@ -582,11 +599,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
else if (skb->pkt_type == PACKET_OUTGOING) {
/* Special case: outgoing packets have ll header at head */
skb_pull(skb, skb->nh.raw - skb->data);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- status |= TP_STATUS_CSUMNOTREADY;
}
}
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ status |= TP_STATUS_CSUMNOTREADY;
+
snaplen = skb->len;
res = run_filter(skb, sk, snaplen);
@@ -1092,7 +1110,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
* it in now.
*/
- sll = (struct sockaddr_ll*)skb->cb;
+ sll = &PACKET_SKB_CB(skb)->sa.ll;
if (sock->type == SOCK_PACKET)
msg->msg_namelen = sizeof(struct sockaddr_pkt);
else
@@ -1117,7 +1135,22 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
sock_recv_timestamp(msg, sk, skb);
if (msg->msg_name)
- memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
+ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
+ msg->msg_namelen);
+
+ if (pkt_sk(sk)->auxdata) {
+ struct tpacket_auxdata aux;
+
+ aux.tp_status = TP_STATUS_USER;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ aux.tp_status |= TP_STATUS_CSUMNOTREADY;
+ aux.tp_len = PACKET_SKB_CB(skb)->origlen;
+ aux.tp_snaplen = skb->len;
+ aux.tp_mac = 0;
+ aux.tp_net = skb->nh.raw - skb->data;
+
+ put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
+ }
/*
* Free or return the buffer as appropriate. Again this
@@ -1317,6 +1350,7 @@ static int
packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
{
struct sock *sk = sock->sk;
+ struct packet_sock *po = pkt_sk(sk);
int ret;
if (level != SOL_PACKET)
@@ -1369,6 +1403,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
return 0;
}
#endif
+ case PACKET_AUXDATA:
+ {
+ int val;
+
+ if (optlen < sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ po->auxdata = !!val;
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -1378,8 +1424,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
+ int val;
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
+ void *data;
+ struct tpacket_stats st;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
@@ -1392,9 +1441,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
switch(optname) {
case PACKET_STATISTICS:
- {
- struct tpacket_stats st;
-
if (len > sizeof(struct tpacket_stats))
len = sizeof(struct tpacket_stats);
spin_lock_bh(&sk->sk_receive_queue.lock);
@@ -1403,16 +1449,23 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
spin_unlock_bh(&sk->sk_receive_queue.lock);
st.tp_packets += st.tp_drops;
- if (copy_to_user(optval, &st, len))
- return -EFAULT;
+ data = &st;
+ break;
+ case PACKET_AUXDATA:
+ if (len > sizeof(int))
+ len = sizeof(int);
+ val = po->auxdata;
+
+ data = &val;
break;
- }
default:
return -ENOPROTOOPT;
}
if (put_user(len, optlen))
return -EFAULT;
+ if (copy_to_user(optval, data, len))
+ return -EFAULT;
return 0;
}
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 01e69138578..4c68c718f5e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -52,7 +52,7 @@ static struct tcf_hashinfo ipt_hash_info = {
static int ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
{
- struct ipt_target *target;
+ struct xt_target *target;
int ret = 0;
target = xt_request_find_target(AF_INET, t->u.user.name,
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index bc116bd6937..3b6e6a78092 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -209,7 +209,7 @@ static void dev_watchdog(unsigned long arg)
dev->name);
dev->tx_timeout(dev);
}
- if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
+ if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
dev_hold(dev);
}
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 2567b4c96c1..000e043ebd6 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -372,6 +372,20 @@ static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *
return 0;
}
+static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+ struct gnet_dump *d)
+{
+ struct prio_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *cl_q;
+
+ cl_q = q->queues[cl - 1];
+ if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
+ gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
+ return -1;
+
+ return 0;
+}
+
static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
{
struct prio_sched_data *q = qdisc_priv(sch);
@@ -414,6 +428,7 @@ static struct Qdisc_class_ops prio_class_ops = {
.bind_tcf = prio_bind,
.unbind_tcf = prio_put,
.dump = prio_dump_class,
+ .dump_stats = prio_dump_class_stats,
};
static struct Qdisc_ops prio_qdisc_ops = {
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 459cda258a5..82844801e42 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -143,6 +143,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
(iph->protocol == IPPROTO_TCP ||
iph->protocol == IPPROTO_UDP ||
+ iph->protocol == IPPROTO_UDPLITE ||
iph->protocol == IPPROTO_SCTP ||
iph->protocol == IPPROTO_DCCP ||
iph->protocol == IPPROTO_ESP))
@@ -156,6 +157,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
h2 = iph->saddr.s6_addr32[3]^iph->nexthdr;
if (iph->nexthdr == IPPROTO_TCP ||
iph->nexthdr == IPPROTO_UDP ||
+ iph->nexthdr == IPPROTO_UDPLITE ||
iph->nexthdr == IPPROTO_SCTP ||
iph->nexthdr == IPPROTO_DCCP ||
iph->nexthdr == IPPROTO_ESP)
diff --git a/net/socket.c b/net/socket.c
index 4e396312f8d..5f374e1ff52 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -407,24 +407,11 @@ int sock_map_fd(struct socket *sock)
static struct socket *sock_from_file(struct file *file, int *err)
{
- struct inode *inode;
- struct socket *sock;
-
if (file->f_op == &socket_file_ops)
return file->private_data; /* set in sock_map_fd */
- inode = file->f_path.dentry->d_inode;
- if (!S_ISSOCK(inode->i_mode)) {
- *err = -ENOTSOCK;
- return NULL;
- }
-
- sock = SOCKET_I(inode);
- if (sock->file != file) {
- printk(KERN_ERR "socki_lookup: socket file changed!\n");
- sock->file = file;
- }
- return sock;
+ *err = -ENOTSOCK;
+ return NULL;
}
/**
@@ -1527,8 +1514,9 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
struct file *sock_file;
sock_file = fget_light(fd, &fput_needed);
+ err = -EBADF;
if (!sock_file)
- return -EBADF;
+ goto out;
sock = sock_from_file(sock_file, &err);
if (!sock)
@@ -1555,6 +1543,7 @@ asmlinkage long sys_sendto(int fd, void __user *buff, size_t len,
out_put:
fput_light(sock_file, fput_needed);
+out:
return err;
}
@@ -1586,12 +1575,13 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
int fput_needed;
sock_file = fget_light(fd, &fput_needed);
+ err = -EBADF;
if (!sock_file)
- return -EBADF;
+ goto out;
sock = sock_from_file(sock_file, &err);
if (!sock)
- goto out;
+ goto out_put;
msg.msg_control = NULL;
msg.msg_controllen = 0;
@@ -1610,8 +1600,9 @@ asmlinkage long sys_recvfrom(int fd, void __user *ubuf, size_t size,
if (err2 < 0)
err = err2;
}
-out:
+out_put:
fput_light(sock_file, fput_needed);
+out:
return err;
}
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 769cdd62c1b..4d90a179aed 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -86,8 +86,8 @@ static int wanrouter_device_del_if(struct wan_device *wandev,
static struct wan_device *wanrouter_find_device(char *name);
static int wanrouter_delete_interface(struct wan_device *wandev, char *name);
-void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
-void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
+static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
+static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags);
@@ -104,8 +104,8 @@ struct wan_device* wanrouter_router_devlist; /* list of registered devices */
* Organize Unique Identifiers for encapsulation/decapsulation
*/
-static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
#if 0
+static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 };
static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 };
#endif
@@ -246,6 +246,8 @@ int unregister_wan_device(char *name)
return 0;
}
+#if 0
+
/*
* Encapsulate packet.
*
@@ -341,6 +343,7 @@ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev)
return ethertype;
}
+#endif /* 0 */
/*
* WAN device IOCTL.
@@ -799,23 +802,19 @@ static int wanrouter_delete_interface(struct wan_device *wandev, char *name)
return 0;
}
-void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
+static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
{
spin_lock_irqsave(lock, *smp_flags);
}
-void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
+static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags)
{
spin_unlock_irqrestore(lock, *smp_flags);
}
EXPORT_SYMBOL(register_wan_device);
EXPORT_SYMBOL(unregister_wan_device);
-EXPORT_SYMBOL(wanrouter_encapsulate);
-EXPORT_SYMBOL(wanrouter_type_trans);
-EXPORT_SYMBOL(lock_adapter_irq);
-EXPORT_SYMBOL(unlock_adapter_irq);
MODULE_LICENSE("GPL");
diff --git a/net/x25/Makefile b/net/x25/Makefile
index 587a71aa411..a2c34ab6f19 100644
--- a/net/x25/Makefile
+++ b/net/x25/Makefile
@@ -6,5 +6,5 @@ obj-$(CONFIG_X25) += x25.o
x25-y := af_x25.o x25_dev.o x25_facilities.o x25_in.o \
x25_link.o x25_out.o x25_route.o x25_subr.o \
- x25_timer.o x25_proc.o
+ x25_timer.o x25_proc.o x25_forward.o
x25-$(CONFIG_SYSCTL) += sysctl_net_x25.o
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index b5c80b18990..b37d894358e 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -63,6 +63,7 @@ int sysctl_x25_call_request_timeout = X25_DEFAULT_T21;
int sysctl_x25_reset_request_timeout = X25_DEFAULT_T22;
int sysctl_x25_clear_request_timeout = X25_DEFAULT_T23;
int sysctl_x25_ack_holdback_timeout = X25_DEFAULT_T2;
+int sysctl_x25_forward = 0;
HLIST_HEAD(x25_list);
DEFINE_RWLOCK(x25_list_lock);
@@ -846,7 +847,7 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
struct x25_address source_addr, dest_addr;
struct x25_facilities facilities;
struct x25_dte_facilities dte_facilities;
- int len, rc;
+ int len, addr_len, rc;
/*
* Remove the LCI and frame type.
@@ -857,7 +858,8 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
* Extract the X.25 addresses and convert them to ASCII strings,
* and remove them.
*/
- skb_pull(skb, x25_addr_ntoa(skb->data, &source_addr, &dest_addr));
+ addr_len = x25_addr_ntoa(skb->data, &source_addr, &dest_addr);
+ skb_pull(skb, addr_len);
/*
* Get the length of the facilities, skip past them for the moment
@@ -873,11 +875,28 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb,
sk = x25_find_listener(&source_addr,skb);
skb_push(skb,len);
+ if (sk != NULL && sk_acceptq_is_full(sk)) {
+ goto out_sock_put;
+ }
+
/*
- * We can't accept the Call Request.
+ * We dont have any listeners for this incoming call.
+ * Try forwarding it.
*/
- if (sk == NULL || sk_acceptq_is_full(sk))
- goto out_clear_request;
+ if (sk == NULL) {
+ skb_push(skb, addr_len + X25_STD_MIN_LEN);
+ if (sysctl_x25_forward &&
+ x25_forward_call(&dest_addr, nb, skb, lci) > 0)
+ {
+ /* Call was forwarded, dont process it any more */
+ kfree_skb(skb);
+ rc = 1;
+ goto out;
+ } else {
+ /* No listeners, can't forward, clear the call */
+ goto out_clear_request;
+ }
+ }
/*
* Try to reach a compromise on the requested facilities.
@@ -1598,6 +1617,9 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
x25_disconnect(s, ENETUNREACH, 0, 0);
write_unlock_bh(&x25_list_lock);
+
+ /* Remove any related forwards */
+ x25_clear_forward_by_dev(nb->dev);
}
static int __init x25_init(void)
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index aabda59c824..2b2e7fd689f 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,6 +73,14 @@ static struct ctl_table x25_table[] = {
.extra1 = &min_timer,
.extra2 = &max_timer,
},
+ {
+ .ctl_name = NET_X25_FORWARD,
+ .procname = "x25_forward",
+ .data = &sysctl_x25_forward,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
{ 0, },
};
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index 328d80f000a..f099fd6a7c0 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -67,9 +67,18 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
return x25_rx_call_request(skb, nb, lci);
/*
- * Its not a Call Request, nor is it a control frame.
- * Let caller throw it away.
+ * Its not a Call Request, nor is it a control frame.
+ * Can we forward it?
*/
+
+ if (x25_forward_data(lci, nb, skb)) {
+ if (frametype == X25_CLEAR_CONFIRMATION) {
+ x25_clear_forward_by_lci(lci);
+ }
+ kfree_skb(skb);
+ return 1;
+ }
+
/*
x25_transmit_clear_request(nb, lci, 0x0D);
*/
diff --git a/net/x25/x25_forward.c b/net/x25/x25_forward.c
new file mode 100644
index 00000000000..d339e0c810a
--- /dev/null
+++ b/net/x25/x25_forward.c
@@ -0,0 +1,163 @@
+/*
+ * This module:
+ * This module is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * History
+ * 03-01-2007 Added forwarding for x.25 Andrew Hendry
+ */
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <net/x25.h>
+
+struct list_head x25_forward_list = LIST_HEAD_INIT(x25_forward_list);
+DEFINE_RWLOCK(x25_forward_list_lock);
+
+int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
+ struct sk_buff *skb, int lci)
+{
+ struct x25_route *rt;
+ struct x25_neigh *neigh_new = NULL;
+ struct list_head *entry;
+ struct x25_forward *x25_frwd, *new_frwd;
+ struct sk_buff *skbn;
+ short same_lci = 0;
+ int rc = 0;
+
+ if ((rt = x25_get_route(dest_addr)) != NULL) {
+
+ if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
+ /* This shouldnt happen, if it occurs somehow
+ * do something sensible
+ */
+ goto out_put_route;
+ }
+
+ /* Avoid a loop. This is the normal exit path for a
+ * system with only one x.25 iface and default route
+ */
+ if (rt->dev == from->dev) {
+ goto out_put_nb;
+ }
+
+ /* Remote end sending a call request on an already
+ * established LCI? It shouldnt happen, just in case..
+ */
+ read_lock_bh(&x25_forward_list_lock);
+ list_for_each(entry, &x25_forward_list) {
+ x25_frwd = list_entry(entry, struct x25_forward, node);
+ if (x25_frwd->lci == lci) {
+ printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
+ same_lci = 1;
+ }
+ }
+ read_unlock_bh(&x25_forward_list_lock);
+
+ /* Save the forwarding details for future traffic */
+ if (!same_lci){
+ if ((new_frwd = kmalloc(sizeof(struct x25_forward),
+ GFP_ATOMIC)) == NULL){
+ rc = -ENOMEM;
+ goto out_put_nb;
+ }
+ new_frwd->lci = lci;
+ new_frwd->dev1 = rt->dev;
+ new_frwd->dev2 = from->dev;
+ write_lock_bh(&x25_forward_list_lock);
+ list_add(&new_frwd->node, &x25_forward_list);
+ write_unlock_bh(&x25_forward_list_lock);
+ }
+
+ /* Forward the call request */
+ if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
+ goto out_put_nb;
+ }
+ x25_transmit_link(skbn, neigh_new);
+ rc = 1;
+ }
+
+
+out_put_nb:
+ x25_neigh_put(neigh_new);
+
+out_put_route:
+ x25_route_put(rt);
+ return rc;
+}
+
+
+int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
+
+ struct x25_forward *frwd;
+ struct list_head *entry;
+ struct net_device *peer = NULL;
+ struct x25_neigh *nb;
+ struct sk_buff *skbn;
+ int rc = 0;
+
+ read_lock_bh(&x25_forward_list_lock);
+ list_for_each(entry, &x25_forward_list) {
+ frwd = list_entry(entry, struct x25_forward, node);
+ if (frwd->lci == lci) {
+ /* The call is established, either side can send */
+ if (from->dev == frwd->dev1) {
+ peer = frwd->dev2;
+ } else {
+ peer = frwd->dev1;
+ }
+ break;
+ }
+ }
+ read_unlock_bh(&x25_forward_list_lock);
+
+ if ( (nb = x25_get_neigh(peer)) == NULL)
+ goto out;
+
+ if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
+ goto out;
+
+ }
+ x25_transmit_link(skbn, nb);
+
+ x25_neigh_put(nb);
+ rc = 1;
+out:
+ return rc;
+}
+
+void x25_clear_forward_by_lci(unsigned int lci)
+{
+ struct x25_forward *fwd;
+ struct list_head *entry, *tmp;
+
+ write_lock_bh(&x25_forward_list_lock);
+
+ list_for_each_safe(entry, tmp, &x25_forward_list) {
+ fwd = list_entry(entry, struct x25_forward, node);
+ if (fwd->lci == lci) {
+ list_del(&fwd->node);
+ kfree(fwd);
+ }
+ }
+ write_unlock_bh(&x25_forward_list_lock);
+}
+
+
+void x25_clear_forward_by_dev(struct net_device *dev)
+{
+ struct x25_forward *fwd;
+ struct list_head *entry, *tmp;
+
+ write_lock_bh(&x25_forward_list_lock);
+
+ list_for_each_safe(entry, tmp, &x25_forward_list) {
+ fwd = list_entry(entry, struct x25_forward, node);
+ if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
+ list_del(&fwd->node);
+ kfree(fwd);
+ }
+ }
+ write_unlock_bh(&x25_forward_list_lock);
+}
diff --git a/net/x25/x25_proc.c b/net/x25/x25_proc.c
index a11837d361d..e0470bd8c2f 100644
--- a/net/x25/x25_proc.c
+++ b/net/x25/x25_proc.c
@@ -165,6 +165,75 @@ out:
return 0;
}
+static __inline__ struct x25_forward *x25_get_forward_idx(loff_t pos)
+{
+ struct x25_forward *f;
+ struct list_head *entry;
+
+ list_for_each(entry, &x25_forward_list) {
+ f = list_entry(entry, struct x25_forward, node);
+ if (!pos--)
+ goto found;
+ }
+
+ f = NULL;
+found:
+ return f;
+}
+
+static void *x25_seq_forward_start(struct seq_file *seq, loff_t *pos)
+{
+ loff_t l = *pos;
+
+ read_lock_bh(&x25_forward_list_lock);
+ return l ? x25_get_forward_idx(--l) : SEQ_START_TOKEN;
+}
+
+static void *x25_seq_forward_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct x25_forward *f;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN) {
+ f = NULL;
+ if (!list_empty(&x25_forward_list))
+ f = list_entry(x25_forward_list.next,
+ struct x25_forward, node);
+ goto out;
+ }
+ f = v;
+ if (f->node.next != &x25_forward_list)
+ f = list_entry(f->node.next, struct x25_forward, node);
+ else
+ f = NULL;
+out:
+ return f;
+
+}
+
+static void x25_seq_forward_stop(struct seq_file *seq, void *v)
+{
+ read_unlock_bh(&x25_forward_list_lock);
+}
+
+static int x25_seq_forward_show(struct seq_file *seq, void *v)
+{
+ struct x25_forward *f;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "lci dev1 dev2\n");
+ goto out;
+ }
+
+ f = v;
+
+ seq_printf(seq, "%d %-10s %-10s\n",
+ f->lci, f->dev1->name, f->dev2->name);
+
+out:
+ return 0;
+}
+
static struct seq_operations x25_seq_route_ops = {
.start = x25_seq_route_start,
.next = x25_seq_route_next,
@@ -179,6 +248,13 @@ static struct seq_operations x25_seq_socket_ops = {
.show = x25_seq_socket_show,
};
+static struct seq_operations x25_seq_forward_ops = {
+ .start = x25_seq_forward_start,
+ .next = x25_seq_forward_next,
+ .stop = x25_seq_forward_stop,
+ .show = x25_seq_forward_show,
+};
+
static int x25_seq_socket_open(struct inode *inode, struct file *file)
{
return seq_open(file, &x25_seq_socket_ops);
@@ -189,6 +265,11 @@ static int x25_seq_route_open(struct inode *inode, struct file *file)
return seq_open(file, &x25_seq_route_ops);
}
+static int x25_seq_forward_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &x25_seq_forward_ops);
+}
+
static struct file_operations x25_seq_socket_fops = {
.owner = THIS_MODULE,
.open = x25_seq_socket_open,
@@ -205,6 +286,14 @@ static struct file_operations x25_seq_route_fops = {
.release = seq_release,
};
+static struct file_operations x25_seq_forward_fops = {
+ .owner = THIS_MODULE,
+ .open = x25_seq_forward_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static struct proc_dir_entry *x25_proc_dir;
int __init x25_proc_init(void)
@@ -225,9 +314,17 @@ int __init x25_proc_init(void)
if (!p)
goto out_socket;
p->proc_fops = &x25_seq_socket_fops;
+
+ p = create_proc_entry("forward", S_IRUGO, x25_proc_dir);
+ if (!p)
+ goto out_forward;
+ p->proc_fops = &x25_seq_forward_fops;
rc = 0;
+
out:
return rc;
+out_forward:
+ remove_proc_entry("socket", x25_proc_dir);
out_socket:
remove_proc_entry("route", x25_proc_dir);
out_route:
@@ -237,6 +334,7 @@ out_route:
void __exit x25_proc_exit(void)
{
+ remove_proc_entry("forward", x25_proc_dir);
remove_proc_entry("route", x25_proc_dir);
remove_proc_entry("socket", x25_proc_dir);
remove_proc_entry("x25", proc_net);
diff --git a/net/x25/x25_route.c b/net/x25/x25_route.c
index 2a3fe986b24..883a848bca5 100644
--- a/net/x25/x25_route.c
+++ b/net/x25/x25_route.c
@@ -119,6 +119,9 @@ void x25_route_device_down(struct net_device *dev)
__x25_remove_route(rt);
}
write_unlock_bh(&x25_route_list_lock);
+
+ /* Remove any related forwarding */
+ x25_clear_forward_by_dev(dev);
}
/*
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 0faab633258..577a4f821b9 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -24,6 +24,17 @@ config XFRM_SUB_POLICY
If unsure, say N.
+config XFRM_MIGRATE
+ bool "Transformation migrate database (EXPERIMENTAL)"
+ depends on XFRM && EXPERIMENTAL
+ ---help---
+ A feature to update locator(s) of a given IPsec security
+ association dynamically. This feature is required, for
+ instance, in a Mobile IPv6 environment with IPsec configuration
+ where mobile nodes change their attachment point to the Internet.
+
+ If unsure, say N.
+
config NET_KEY
tristate "PF_KEY sockets"
select XFRM
@@ -34,4 +45,19 @@ config NET_KEY
Say Y unless you know what you are doing.
+config NET_KEY_MIGRATE
+ bool "PF_KEY MIGRATE (EXPERIMENTAL)"
+ depends on NET_KEY && EXPERIMENTAL
+ select XFRM_MIGRATE
+ ---help---
+ Add a PF_KEY MIGRATE message to PF_KEYv2 socket family.
+ The PF_KEY MIGRATE message is used to dynamically update
+ locator(s) of a given IPsec security association.
+ This feature is required, for instance, in a Mobile IPv6
+ environment with IPsec configuration where mobile nodes
+ change their attachment point to the Internet. Detail
+ information can be found in the internet-draft
+ <draft-sugimoto-mip6-pfkey-migrate>.
+
+ If unsure, say N.
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index f1cf3402e75..248f94814df 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -266,6 +266,23 @@ static struct xfrm_algo_desc ealg_list[] = {
}
},
{
+ .name = "cbc(camellia)",
+
+ .uinfo = {
+ .encr = {
+ .blockbits = 128,
+ .defkeybits = 128,
+ }
+ },
+
+ .desc = {
+ .sadb_alg_id = SADB_X_EALG_CAMELLIACBC,
+ .sadb_alg_ivlen = 8,
+ .sadb_alg_minbits = 128,
+ .sadb_alg_maxbits = 256
+ }
+},
+{
.name = "cbc(twofish)",
.compat = "twofish",
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b7e537fe2d7..fa7ce060b45 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -2236,3 +2236,234 @@ void __init xfrm_init(void)
xfrm_input_init();
}
+#ifdef CONFIG_XFRM_MIGRATE
+static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
+ struct xfrm_selector *sel_tgt)
+{
+ if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
+ if (sel_tgt->family == sel_cmp->family &&
+ xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
+ sel_cmp->family) == 0 &&
+ xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
+ sel_cmp->family) == 0 &&
+ sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
+ sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
+ return 1;
+ }
+ } else {
+ if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
+ return 1;
+ }
+ }
+ return 0;
+}
+
+static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
+ u8 dir, u8 type)
+{
+ struct xfrm_policy *pol, *ret = NULL;
+ struct hlist_node *entry;
+ struct hlist_head *chain;
+ u32 priority = ~0U;
+
+ read_lock_bh(&xfrm_policy_lock);
+ chain = policy_hash_direct(&sel->daddr, &sel->saddr, sel->family, dir);
+ hlist_for_each_entry(pol, entry, chain, bydst) {
+ if (xfrm_migrate_selector_match(sel, &pol->selector) &&
+ pol->type == type) {
+ ret = pol;
+ priority = ret->priority;
+ break;
+ }
+ }
+ chain = &xfrm_policy_inexact[dir];
+ hlist_for_each_entry(pol, entry, chain, bydst) {
+ if (xfrm_migrate_selector_match(sel, &pol->selector) &&
+ pol->type == type &&
+ pol->priority < priority) {
+ ret = pol;
+ break;
+ }
+ }
+
+ if (ret)
+ xfrm_pol_hold(ret);
+
+ read_unlock_bh(&xfrm_policy_lock);
+
+ return ret;
+}
+
+static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
+{
+ int match = 0;
+
+ if (t->mode == m->mode && t->id.proto == m->proto &&
+ (m->reqid == 0 || t->reqid == m->reqid)) {
+ switch (t->mode) {
+ case XFRM_MODE_TUNNEL:
+ case XFRM_MODE_BEET:
+ if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
+ m->old_family) == 0 &&
+ xfrm_addr_cmp(&t->saddr, &m->old_saddr,
+ m->old_family) == 0) {
+ match = 1;
+ }
+ break;
+ case XFRM_MODE_TRANSPORT:
+ /* in case of transport mode, template does not store
+ any IP addresses, hence we just compare mode and
+ protocol */
+ match = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ return match;
+}
+
+/* update endpoint address(es) of template(s) */
+static int xfrm_policy_migrate(struct xfrm_policy *pol,
+ struct xfrm_migrate *m, int num_migrate)
+{
+ struct xfrm_migrate *mp;
+ struct dst_entry *dst;
+ int i, j, n = 0;
+
+ write_lock_bh(&pol->lock);
+ if (unlikely(pol->dead)) {
+ /* target policy has been deleted */
+ write_unlock_bh(&pol->lock);
+ return -ENOENT;
+ }
+
+ for (i = 0; i < pol->xfrm_nr; i++) {
+ for (j = 0, mp = m; j < num_migrate; j++, mp++) {
+ if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
+ continue;
+ n++;
+ if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL)
+ continue;
+ /* update endpoints */
+ memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
+ sizeof(pol->xfrm_vec[i].id.daddr));
+ memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
+ sizeof(pol->xfrm_vec[i].saddr));
+ pol->xfrm_vec[i].encap_family = mp->new_family;
+ /* flush bundles */
+ while ((dst = pol->bundles) != NULL) {
+ pol->bundles = dst->next;
+ dst_free(dst);
+ }
+ }
+ }
+
+ write_unlock_bh(&pol->lock);
+
+ if (!n)
+ return -ENODATA;
+
+ return 0;
+}
+
+static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
+{
+ int i, j;
+
+ if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
+ return -EINVAL;
+
+ for (i = 0; i < num_migrate; i++) {
+ if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
+ m[i].old_family) == 0) &&
+ (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
+ m[i].old_family) == 0))
+ return -EINVAL;
+ if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
+ xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
+ return -EINVAL;
+
+ /* check if there is any duplicated entry */
+ for (j = i + 1; j < num_migrate; j++) {
+ if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
+ sizeof(m[i].old_daddr)) &&
+ !memcmp(&m[i].old_saddr, &m[j].old_saddr,
+ sizeof(m[i].old_saddr)) &&
+ m[i].proto == m[j].proto &&
+ m[i].mode == m[j].mode &&
+ m[i].reqid == m[j].reqid &&
+ m[i].old_family == m[j].old_family)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_migrate)
+{
+ int i, err, nx_cur = 0, nx_new = 0;
+ struct xfrm_policy *pol = NULL;
+ struct xfrm_state *x, *xc;
+ struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
+ struct xfrm_state *x_new[XFRM_MAX_DEPTH];
+ struct xfrm_migrate *mp;
+
+ if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
+ goto out;
+
+ /* Stage 1 - find policy */
+ if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ /* Stage 2 - find and update state(s) */
+ for (i = 0, mp = m; i < num_migrate; i++, mp++) {
+ if ((x = xfrm_migrate_state_find(mp))) {
+ x_cur[nx_cur] = x;
+ nx_cur++;
+ if ((xc = xfrm_state_migrate(x, mp))) {
+ x_new[nx_new] = xc;
+ nx_new++;
+ } else {
+ err = -ENODATA;
+ goto restore_state;
+ }
+ }
+ }
+
+ /* Stage 3 - update policy */
+ if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
+ goto restore_state;
+
+ /* Stage 4 - delete old state(s) */
+ if (nx_cur) {
+ xfrm_states_put(x_cur, nx_cur);
+ xfrm_states_delete(x_cur, nx_cur);
+ }
+
+ /* Stage 5 - announce */
+ km_migrate(sel, dir, type, m, num_migrate);
+
+ xfrm_pol_put(pol);
+
+ return 0;
+out:
+ return err;
+
+restore_state:
+ if (pol)
+ xfrm_pol_put(pol);
+ if (nx_cur)
+ xfrm_states_put(x_cur, nx_cur);
+ if (nx_new)
+ xfrm_states_delete(x_new, nx_new);
+
+ return err;
+}
+EXPORT_SYMBOL(xfrm_migrate);
+#endif
+
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index fdb08d9f34a..91b02687db5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -183,9 +183,6 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family);
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
@@ -831,6 +828,160 @@ out:
}
EXPORT_SYMBOL(xfrm_state_add);
+#ifdef CONFIG_XFRM_MIGRATE
+struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
+{
+ int err = -ENOMEM;
+ struct xfrm_state *x = xfrm_state_alloc();
+ if (!x)
+ goto error;
+
+ memcpy(&x->id, &orig->id, sizeof(x->id));
+ memcpy(&x->sel, &orig->sel, sizeof(x->sel));
+ memcpy(&x->lft, &orig->lft, sizeof(x->lft));
+ x->props.mode = orig->props.mode;
+ x->props.replay_window = orig->props.replay_window;
+ x->props.reqid = orig->props.reqid;
+ x->props.family = orig->props.family;
+ x->props.saddr = orig->props.saddr;
+
+ if (orig->aalg) {
+ x->aalg = xfrm_algo_clone(orig->aalg);
+ if (!x->aalg)
+ goto error;
+ }
+ x->props.aalgo = orig->props.aalgo;
+
+ if (orig->ealg) {
+ x->ealg = xfrm_algo_clone(orig->ealg);
+ if (!x->ealg)
+ goto error;
+ }
+ x->props.ealgo = orig->props.ealgo;
+
+ if (orig->calg) {
+ x->calg = xfrm_algo_clone(orig->calg);
+ if (!x->calg)
+ goto error;
+ }
+ x->props.calgo = orig->props.calgo;
+
+ if (orig->encap) {
+ x->encap = kmemdup(orig->encap, sizeof(*x->encap), GFP_KERNEL);
+ if (!x->encap)
+ goto error;
+ }
+
+ if (orig->coaddr) {
+ x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
+ GFP_KERNEL);
+ if (!x->coaddr)
+ goto error;
+ }
+
+ err = xfrm_init_state(x);
+ if (err)
+ goto error;
+
+ x->props.flags = orig->props.flags;
+
+ x->curlft.add_time = orig->curlft.add_time;
+ x->km.state = orig->km.state;
+ x->km.seq = orig->km.seq;
+
+ return x;
+
+ error:
+ if (errp)
+ *errp = err;
+ if (x) {
+ kfree(x->aalg);
+ kfree(x->ealg);
+ kfree(x->calg);
+ kfree(x->encap);
+ kfree(x->coaddr);
+ }
+ kfree(x);
+ return NULL;
+}
+EXPORT_SYMBOL(xfrm_state_clone);
+
+/* xfrm_state_lock is held */
+struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m)
+{
+ unsigned int h;
+ struct xfrm_state *x;
+ struct hlist_node *entry;
+
+ if (m->reqid) {
+ h = xfrm_dst_hash(&m->old_daddr, &m->old_saddr,
+ m->reqid, m->old_family);
+ hlist_for_each_entry(x, entry, xfrm_state_bydst+h, bydst) {
+ if (x->props.mode != m->mode ||
+ x->id.proto != m->proto)
+ continue;
+ if (m->reqid && x->props.reqid != m->reqid)
+ continue;
+ if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
+ m->old_family) ||
+ xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
+ m->old_family))
+ continue;
+ xfrm_state_hold(x);
+ return x;
+ }
+ } else {
+ h = xfrm_src_hash(&m->old_daddr, &m->old_saddr,
+ m->old_family);
+ hlist_for_each_entry(x, entry, xfrm_state_bysrc+h, bysrc) {
+ if (x->props.mode != m->mode ||
+ x->id.proto != m->proto)
+ continue;
+ if (xfrm_addr_cmp(&x->id.daddr, &m->old_daddr,
+ m->old_family) ||
+ xfrm_addr_cmp(&x->props.saddr, &m->old_saddr,
+ m->old_family))
+ continue;
+ xfrm_state_hold(x);
+ return x;
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(xfrm_migrate_state_find);
+
+struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
+ struct xfrm_migrate *m)
+{
+ struct xfrm_state *xc;
+ int err;
+
+ xc = xfrm_state_clone(x, &err);
+ if (!xc)
+ return NULL;
+
+ memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
+ memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
+
+ /* add state */
+ if (!xfrm_addr_cmp(&x->id.daddr, &m->new_daddr, m->new_family)) {
+ /* a care is needed when the destination address of the
+ state is to be updated as it is a part of triplet */
+ xfrm_state_insert(xc);
+ } else {
+ if ((err = xfrm_state_add(xc)) < 0)
+ goto error;
+ }
+
+ return xc;
+error:
+ kfree(xc);
+ return NULL;
+}
+EXPORT_SYMBOL(xfrm_state_migrate);
+#endif
+
int xfrm_state_update(struct xfrm_state *x)
{
struct xfrm_state *x1;
@@ -1345,6 +1496,26 @@ void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid)
}
EXPORT_SYMBOL(km_policy_expired);
+int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_migrate)
+{
+ int err = -EINVAL;
+ int ret;
+ struct xfrm_mgr *km;
+
+ read_lock(&xfrm_km_lock);
+ list_for_each_entry(km, &xfrm_km_list, list) {
+ if (km->migrate) {
+ ret = km->migrate(sel, dir, type, m, num_migrate);
+ if (!ret)
+ err = ret;
+ }
+ }
+ read_unlock(&xfrm_km_lock);
+ return err;
+}
+EXPORT_SYMBOL(km_migrate);
+
int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
{
int err = -EINVAL;
@@ -1458,7 +1629,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
{
struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO))
@@ -1470,11 +1641,14 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned short family)
return afinfo;
}
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
{
read_unlock(&xfrm_state_afinfo_lock);
}
+EXPORT_SYMBOL(xfrm_state_get_afinfo);
+EXPORT_SYMBOL(xfrm_state_put_afinfo);
+
/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
void xfrm_state_delete_tunnel(struct xfrm_state *x)
{
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 82f36d396fc..079a5d31575 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1632,6 +1632,176 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
return 0;
}
+#ifdef CONFIG_XFRM_MIGRATE
+static int verify_user_migrate(struct rtattr **xfrma)
+{
+ struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
+ struct xfrm_user_migrate *um;
+
+ if (!rt)
+ return -EINVAL;
+
+ if ((rt->rta_len - sizeof(*rt)) < sizeof(*um))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int copy_from_user_migrate(struct xfrm_migrate *ma,
+ struct rtattr **xfrma, int *num)
+{
+ struct rtattr *rt = xfrma[XFRMA_MIGRATE-1];
+ struct xfrm_user_migrate *um;
+ int i, num_migrate;
+
+ um = RTA_DATA(rt);
+ num_migrate = (rt->rta_len - sizeof(*rt)) / sizeof(*um);
+
+ if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
+ return -EINVAL;
+
+ for (i = 0; i < num_migrate; i++, um++, ma++) {
+ memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
+ memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
+ memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
+ memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
+
+ ma->proto = um->proto;
+ ma->mode = um->mode;
+ ma->reqid = um->reqid;
+
+ ma->old_family = um->old_family;
+ ma->new_family = um->new_family;
+ }
+
+ *num = i;
+ return 0;
+}
+
+static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct rtattr **xfrma)
+{
+ struct xfrm_userpolicy_id *pi = NLMSG_DATA(nlh);
+ struct xfrm_migrate m[XFRM_MAX_DEPTH];
+ u8 type;
+ int err;
+ int n = 0;
+
+ err = verify_user_migrate((struct rtattr **)xfrma);
+ if (err)
+ return err;
+
+ err = copy_from_user_policy_type(&type, (struct rtattr **)xfrma);
+ if (err)
+ return err;
+
+ err = copy_from_user_migrate((struct xfrm_migrate *)m,
+ (struct rtattr **)xfrma, &n);
+ if (err)
+ return err;
+
+ if (!n)
+ return 0;
+
+ xfrm_migrate(&pi->sel, pi->dir, type, m, n);
+
+ return 0;
+}
+#else
+static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct rtattr **xfrma)
+{
+ return -ENOPROTOOPT;
+}
+#endif
+
+#ifdef CONFIG_XFRM_MIGRATE
+static int copy_to_user_migrate(struct xfrm_migrate *m, struct sk_buff *skb)
+{
+ struct xfrm_user_migrate um;
+
+ memset(&um, 0, sizeof(um));
+ um.proto = m->proto;
+ um.mode = m->mode;
+ um.reqid = m->reqid;
+ um.old_family = m->old_family;
+ memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
+ memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
+ um.new_family = m->new_family;
+ memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
+ memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
+
+ RTA_PUT(skb, XFRMA_MIGRATE, sizeof(um), &um);
+ return 0;
+
+rtattr_failure:
+ return -1;
+}
+
+static int build_migrate(struct sk_buff *skb, struct xfrm_migrate *m,
+ int num_migrate, struct xfrm_selector *sel,
+ u8 dir, u8 type)
+{
+ struct xfrm_migrate *mp;
+ struct xfrm_userpolicy_id *pol_id;
+ struct nlmsghdr *nlh;
+ unsigned char *b = skb->tail;
+ int i;
+
+ nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id));
+ pol_id = NLMSG_DATA(nlh);
+ nlh->nlmsg_flags = 0;
+
+ /* copy data from selector, dir, and type to the pol_id */
+ memset(pol_id, 0, sizeof(*pol_id));
+ memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
+ pol_id->dir = dir;
+
+ if (copy_to_user_policy_type(type, skb) < 0)
+ goto nlmsg_failure;
+
+ for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
+ if (copy_to_user_migrate(mp, skb) < 0)
+ goto nlmsg_failure;
+ }
+
+ nlh->nlmsg_len = skb->tail - b;
+ return skb->len;
+nlmsg_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+
+static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_migrate)
+{
+ struct sk_buff *skb;
+ size_t len;
+
+ len = RTA_SPACE(sizeof(struct xfrm_user_migrate) * num_migrate);
+ len += NLMSG_SPACE(sizeof(struct xfrm_userpolicy_id));
+#ifdef CONFIG_XFRM_SUB_POLICY
+ len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type));
+#endif
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ /* build migrate */
+ if (build_migrate(skb, m, num_migrate, sel, dir, type) < 0)
+ BUG();
+
+ NETLINK_CB(skb).dst_group = XFRMNLGRP_MIGRATE;
+ return netlink_broadcast(xfrm_nl, skb, 0, XFRMNLGRP_MIGRATE,
+ GFP_ATOMIC);
+}
+#else
+static int xfrm_send_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
+ struct xfrm_migrate *m, int num_migrate)
+{
+ return -ENOPROTOOPT;
+}
+#endif
#define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type))
@@ -1653,6 +1823,7 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
[XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
+ [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
};
#undef XMSGSIZE
@@ -1679,6 +1850,7 @@ static struct xfrm_link {
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
[XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
[XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
+ [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
};
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
@@ -2285,6 +2457,7 @@ static struct xfrm_mgr netlink_mgr = {
.compile_policy = xfrm_compile_policy,
.notify_policy = xfrm_send_policy_notify,
.report = xfrm_send_report,
+ .migrate = xfrm_send_migrate,
};
static int __init xfrm_user_init(void)